diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..bc36aac5f099aa620b8b88faa3bdc6656fece9ca --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +*.ts text eol=lf +*.py text eol=lf +*.sh text eol=lf diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000000000000000000000000000000000..718e752b647bbe43a0d1fc41bc2729e0e2b95990 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,40 @@ +--- +name: Create an issue +about: Report an issue or question while using nni instance (deployment). + + +--- + +**Describe the issue**: + + + +**Environment**: +- NNI version: +- Training service (local|remote|pai|aml|etc): +- Client OS: +- Server OS (for remote mode only): +- Python version: +- PyTorch/TensorFlow version: +- Is conda/virtualenv/venv used?: +- Is running in Docker?: + + +**Configuration**: + - Experiment config (remember to remove secrets!): + - Search space: + + +**Log message**: + - nnimanager.log: + - dispatcher.log: + - nnictl stdout and stderr: + + + + +**How to reproduce it?**: \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 0000000000000000000000000000000000000000..e93d10844f509d2075621c845881edd43bd69357 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,17 @@ +--- +name: Enhancement Request +about: Suggest an enhancement to the nni project + +--- + + + +**What would you like to be added**: + +**Why is this needed**: + +**Without this feature, how does current nni work**: + +**Components that may involve changes**: + +**Brief description of your proposal if any**: diff --git a/.github/ISSUE_TEMPLATE/studentProgram.md b/.github/ISSUE_TEMPLATE/studentProgram.md new file mode 100644 index 0000000000000000000000000000000000000000..52abbcc2d6b74ec5ef171db24c040c22a6c3e68c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/studentProgram.md @@ -0,0 +1,44 @@ +--- +name: Question for NNI Student Program China / NNI 学生项目问题表单 +about: NNI Student Program China issue template on Github + +--- + + +## NNI 学生项目问题概述 / General Question of Student Program + +**请简要概述您的问题 / 观点 :** +**Short summary about the question / idea :** + +**请提供 NNI 环境信息 :** +**nni Environment :** +- nni version: +- nni mode(local|pai|remote): +- OS: +- python version: +- is conda or virtualenv used?: +- is running in docker?: + +## 其他建议 / Other Advice + +**是否需要更新文档(是 / 否):** +**Need to update document ( yes / no ) :** + +**其他分享内容 :** +**Anything else we need to know :** + +**Log message / 日志信息 :** + - [nnimanager.log and dispatcher.log](https://github.com/microsoft/nni/blob/master/docs/en_US/Tutorial/HowToDebug.md#experiment-root-directory) : + + - [nnictl stdout and stderr](https://github.com/microsoft/nni/blob/master/docs/en_US/Tutorial/Nnictl.md#nnictl%20log%20stdout) : + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000000000000000000000000000000000..2a769e29bccceef2f89c59e4b5369f3b26c24f1a --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,10 @@ +### Description ### + + +### Checklist ### + - [ ] test case + - [ ] doc + +### How to test ### + + diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..5079c0575ead927e080ea0e41c23f962cddd38e9 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,88 @@ +# This is a basic workflow to help you get started with Actions + +name: CI + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ master ] + paths: + - '**.md' + - '**.rst' + - '/docs/en_US/**' + - '/crowdin.yml' + schedule: + - cron: '*/30 * * * *' + + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + translation-syncup: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # get branch name + - name: Get the current branch name + shell: bash + run: echo "::set-output name=branch::${GITHUB_REF#refs/heads/}" + id: branchname + + # crowin-translation + - name: crowdin-action + uses: crowdin/github-action@1.0.8 + with: + # Upload sources to Crowdin + upload_sources: true # optional, default is true + # Upload translations to Crowdin + upload_translations: false # optional, default is false + # Automatically approves uploaded translations + auto_approve_imported: false # optional, default is false + # Defines whether to add translation if it is equal to source string in Crowdin project + import_eq_suggestions: false # optional, default is false + # Make pull request of Crowdin translations + download_translations: true # optional, default is false + # Use this option to download translations for a single specified language + # download_language: # optional + # Skip untranslated strings in exported files (does not work with .docx, .html, .md and other document files) + # skip_untranslated_strings: # optional, default is false + # Omit downloading not fully downloaded files + # skip_untranslated_files: # optional, default is false + # Include approved translations only in exported files. If not combined with --skip-untranslated-strings option, strings without approval are fulfilled with the source language + export_only_approved: false # optional, default is false + # Download translations with pushing to branch + push_translations: true # optional, default is true + # To download translations to the specified version branch + localization_branch_name: l10n_${{ steps.branchname.outputs.branch }} # optional, default is l10n_crowdin_action + # Create pull request after pushing to branch + create_pull_request: false # optional, default is true + # Option to upload or download files to the specified version branch in your Crowdin project + crowdin_branch_name: ${{ steps.branchname.outputs.branch }} # optional + # Option to specify a path to user-specific credentials + # identity: # optional + # Option to specify a path to the configuration file + config: crowdin.yml # optional + # Option to preview the list of managed files + # dryrun_action: true # optional, default is false + # Numerical ID of the project + project_id: 304950 # optional + # Personal access token required for authentication + token: ${{ secrets.CROWDIN_TOKEN }} # optional + # Base URL of Crowdin server for API requests execution + # base_url: # optional + # Path to your project directory on a local machine + # base_path: # optional + # Path to the source files + # source: # optional + # Path to the translation files + # translation: # optional + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CROWDIN_PROJECT_ID: 304950 + CROWDIN_PERSONAL_TOKEN: ${{ secrets.CROWDIN_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..eb9945b84cde66156884911642cfbb850b2d5e67 --- /dev/null +++ b/.gitignore @@ -0,0 +1,109 @@ +/nni/version.py +/nni_node/ +/toolchain/ + +# unit test generated files +/test/model_path/ +/test/temp.json +/test/ut/sdk/*.pth +/test/ut/tools/annotation/_generated/ +/ts/nni_manager/exp_profile.json +/ts/nni_manager/metrics.json +/ts/nni_manager/trial_jobs.json +/test/ut/retiarii/_debug_graph_data.json +/test/ut/retiarii/out.tmp + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Build package +dist/ + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +junit/ +coverage.xml +test-*.xml +.coverage.* +htmlcov/ +.coverage + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ +**/package-lock.json + +# TypeScript v1 declaration files +typings/ + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# next.js build output +.next + +# Pycharm Project files +.idea + +# Python cache files +__pycache__ +build +*.egg-info +.eggs/ +setup.pye +**/__init__.pye +**/.ipynb_checkpoints + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# VSCode +.vscode +.vs +.history diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000000000000000000000000000000000..d067b4a4dfd3cdceb4d161c53d393fa591d057ad --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,8 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: +- name: "Microsoft" +title: "Neural Network Intelligence" +date-released: 2021-01-14 +url: "https://github.com/microsoft/nni" +version: 2.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..6ce61868689091e8231f0c697bd1fa4a1ab12ee7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,68 @@ +# Contributing to NNI + +Welcome, and thank you for your interest in contributing to NNI! + +There are many ways in which you can contribute, beyond writing code. The goal of this document is to provide a high-level overview of how you can get involved. + +# Provide feedback or ask a question + +* [File an issue](https://github.com/microsoft/nni/issues/new/choose) on GitHub. +* Ask a question with NNI tags on [Stack Overflow](https://stackoverflow.com/questions/tagged/nni?sort=Newest&edited=true). +* Discuss on the NNI [Gitter](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) in NNI. + +Join IM discussion groups: +|Gitter||WeChat| +|----|----|----| +|![image](https://user-images.githubusercontent.com/39592018/80665738-e0574a80-8acc-11ea-91bc-0836dc4cbf89.png)| OR |![image](https://github.com/scarlett2018/nniutil/raw/master/wechat.png)| + + +# Look for an existing issue +Before you create a new issue, please do a search in [open issues](https://github.com/microsoft/nni/issues) to see if the issue or feature request has already been filed. + +Be sure to scan through the [most popular](https://github.com/microsoft/nni/issues?q=is%3Aopen+is%3Aissue+label%3AFAQ+sort%3Areactions-%2B1-desc) feature requests. + +If you find your issue already exists, make relevant comments and add your [reaction](https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comments). Use a reaction in place of a "+1" comment: + +* 👍 - upvote +* 👎 - downvote + +If you cannot find an existing issue that describes your bug or feature, create a new issue using the guidelines below. + +# Writing good bug reports or feature requests +File a single issue per problem and feature request. Do not enumerate multiple bugs or feature requests in the same issue. + +Provide as much information as you think might relevant to the context (thinking the issue is assigning to you, what kinds of info you will need to debug it!!!). To give you a general idea about what kinds of info are useful for developers to dig out the issue, we had provided issue template for you. + +Once you had submitted an issue, be sure to follow it for questions and discussions. + +Once the bug is fixed or feature is addressed, be sure to close the issue. + +# Contributing fixes or examples + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +# Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +# How to Contribute + +After getting familiar with contribution agreements, you are ready to create your first PR =), follow the NNI developer tutorials to get start: + +* We recommend new contributors to start with simple issues: ['good first issue'](https://github.com/Microsoft/nni/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) or ['help-wanted'](https://github.com/microsoft/nni/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22). +* [NNI developer environment installation tutorial](docs/en_US/Tutorial/SetupNniDeveloperEnvironment.rst) +* [How to debug](docs/en_US/Tutorial/HowToDebug.rst) +* If you have any questions on usage, review [FAQ](https://github.com/microsoft/nni/blob/master/docs/en_US/Tutorial/FAQ.rst) first, if there are no relevant issues and answers to your question, try contact NNI dev team and users in [Gitter](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) or [File an issue](https://github.com/microsoft/nni/issues/new/choose) on GitHub. +* [Customize your own Tuner](docs/en_US/Tuner/CustomizeTuner.rst) +* [Implement customized TrainingService](docs/en_US/TrainingService/HowToImplementTrainingService.rst) +* [Implement a new NAS trainer on NNI](docs/en_US/NAS/Advanced.rst) +* [Customize your own Advisor](docs/en_US/Tuner/CustomizeAdvisor.rst) + diff --git a/CONTRIBUTING_zh_CN.md b/CONTRIBUTING_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..1626a8524e2c98bfa5957c326a22fcce00b1fcdb --- /dev/null +++ b/CONTRIBUTING_zh_CN.md @@ -0,0 +1,62 @@ +# 贡献代码 + +非常感谢您有兴趣对 NNI 做出贡献! + +除了编写代码外,您还可以通过多种方式参与, 本文档的目的是提供一个如何参与贡献的高层次概述。 + +# 反馈或提问 + +* 在 Github 上创建 [issue](https://github.com/microsoft/nni/issues/new/choose)。 +* 在 [Stack Overflow](https://stackoverflow.com/questions/tagged/nni?sort=Newest&edited=true) 上使用 nni 标签提问。 +* 在 [Gitter](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 中参与讨论。 + +加入聊天组: +| Gitter | | 微信 | +| -------------------------------------------------------------------------------------------------------------- | - | ----------------------------------------------------------------------- | +| ![image](https://user-images.githubusercontent.com/39592018/80665738-e0574a80-8acc-11ea-91bc-0836dc4cbf89.png) | 或 | ![image](https://github.com/scarlett2018/nniutil/raw/master/wechat.png) | + + +# 查找现有问题 +在创建新 issue 之前,请在 [open issues](https://github.com/microsoft/nni/issues) 中进行搜索,以查看问题或功能请求是否已经存在。 + +确保已经浏览了 [最热门](https://github.com/microsoft/nni/issues?q=is%3Aopen+is%3Aissue+label%3AFAQ+sort%3Areactions-%2B1-desc) 的功能请求。 + +如果您的问题已经存在,请在下方发表评论或添加[回应](https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comments)。 通过回应来代替“+1”评论: + +* 👍 - 赞成 +* 👎 - 反对 + +如果未能找到描述您 Bug 或功能的现有问题,请使用以下指南创建一个新问题。 + +# 编写良好的错误报告或功能请求 +针对每个错误和功能请求提交一个问题, 不要在同一问题中列举多个 Bug 或功能请求。 + +尽可能多地提供您认为与上下文相关的信息(思考问题如果分配给您,您需要什么样的信息来调试它)。 为了让您大致了解哪些信息对开发人员解决问题有帮助,我们为您提供了问题模板。 + +提交问题后,请务必跟进问题并参与讨论。 + +修正 Bug 或实现功能后,请务必关闭此问题。 + +# 贡献修复或示例 + +此项目欢迎任何贡献和建议。 大多数贡献需要您同意参与者许可协议(CLA),来声明您有权并授予我们使用您贡献的权利。 有关详细信息,请访问 https://cla.opensource.microsoft.com。 + +当你提交拉取请求时,CLA 机器人会自动检查你是否需要提供 CLA,并修饰这个拉取请求(例如标签、注释等)。 只需要按照机器人提供的说明进行操作即可。 CLA 只需要同意一次,就能应用到所有的代码仓库上。 + +# 行为准则 + +该项目采用了 [ Microsoft 开源行为准则 ](https://opensource.microsoft.com/codeofconduct/)。 有关详细信息,请参阅[行为守则常见问题解答](https://opensource.microsoft.com/codeofconduct/faq/)或联系 opencode@microsoft.com 咨询问题或评论。 + +# 参与贡献 + +熟悉贡献协议后,即可按照 NNI 开发人员教程,创建第一个 PR =): + +* 推荐新贡献者先从简单的问题开始:['good first issue'](https://github.com/Microsoft/nni/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) 或 ['help-wanted'](https://github.com/microsoft/nni/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)。 +* [NNI 开发环境安装教程](docs/zh_CN/Tutorial/SetupNniDeveloperEnvironment.rst) +* [如何调试](docs/zh_CN/Tutorial/HowToDebug.rst) +* 如果有使用上的问题,可先查看[常见问题解答](https://github.com/microsoft/nni/blob/master/docs/zh_CN/Tutorial/FAQ.rst)。如果没能解决问题,可通过 [Gitter](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 联系 NNI 开发团队或在 GitHub 上 [报告问题](https://github.com/microsoft/nni/issues/new/choose)。 +* [自定义 Tuner](docs/zh_CN/Tuner/CustomizeTuner.rst) +* [实现定制的训练平台](docs/zh_CN/TrainingService/HowToImplementTrainingService.rst) +* [在 NNI 上实现新的 NAS Trainer](docs/zh_CN/NAS/Advanced.rst) +* [自定义 Advisor](docs/zh_CN/Tuner/CustomizeAdvisor.rst) + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..df45803ef29b345f368b5d32dde16392e3ca4a8d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +FROM nvidia/cuda:10.2-cudnn8-runtime-ubuntu18.04 + +ARG NNI_RELEASE + +LABEL maintainer='Microsoft NNI Team' + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get -y update +RUN apt-get -y install \ + sudo \ + apt-utils \ + git \ + curl \ + vim \ + unzip \ + wget \ + build-essential \ + cmake \ + libopenblas-dev \ + automake \ + openssh-client \ + openssh-server \ + lsof \ + python3.6 \ + python3-dev \ + python3-pip \ + python3-tk \ + libcupti-dev +RUN apt-get clean +RUN rm -rf /var/lib/apt/lists/* + +# +# generate python script +# +RUN ln -s python3 /usr/bin/python + +# +# update pip +# +RUN python3 -m pip install --upgrade pip==20.2.4 setuptools==50.3.2 + +# numpy 1.19.5 scipy 1.5.4 +RUN python3 -m pip --no-cache-dir install numpy==1.19.5 scipy==1.5.4 + +# +# TensorFlow +# +RUN python3 -m pip --no-cache-dir install tensorflow==2.3.1 + +# +# Keras +# +RUN python3 -m pip --no-cache-dir install Keras==2.4.3 + +# +# PyTorch +# +RUN python3 -m pip --no-cache-dir install torch==1.7.1 torchvision==0.8.2 pytorch-lightning==1.3.3 + +# +# sklearn 0.24.1 +# +RUN python3 -m pip --no-cache-dir install scikit-learn==0.24.1 + +# +# pandas==0.23.4 lightgbm==2.2.2 +# +RUN python3 -m pip --no-cache-dir install pandas==1.1 lightgbm==2.2.2 + +# +# Install NNI +# +COPY dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl . +RUN python3 -m pip install nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl + +# +# Vision patch. Need del later +# +COPY test/vso_tools/interim_patch.py . +RUN python3 interim_patch.py + +# +# install aml package +# +RUN python3 -m pip --no-cache-dir install azureml +RUN python3 -m pip --no-cache-dir install azureml-sdk + +ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/root/.local/bin:/usr/bin:/bin:/sbin + +WORKDIR /root diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b2f52a2bad4e27e2d9c68a755abb74cb8943f2fa --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0a6714c0912d72305422223b04c14381a9d38284 --- /dev/null +++ b/README.md @@ -0,0 +1,367 @@ +

+ +

+ +[![MIT licensed](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE) +[![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/full%20test%20-%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=62&branchName=master) +[![Issues](https://img.shields.io/github/issues-raw/Microsoft/nni.svg)](https://github.com/Microsoft/nni/issues?q=is%3Aissue+is%3Aopen) +[![Bugs](https://img.shields.io/github/issues/Microsoft/nni/bug.svg)](https://github.com/Microsoft/nni/issues?q=is%3Aissue+is%3Aopen+label%3Abug) +[![Pull Requests](https://img.shields.io/github/issues-pr-raw/Microsoft/nni.svg)](https://github.com/Microsoft/nni/pulls?q=is%3Apr+is%3Aopen) +[![Version](https://img.shields.io/github/release/Microsoft/nni.svg)](https://github.com/Microsoft/nni/releases) [![Join the chat at https://gitter.im/Microsoft/nni](https://badges.gitter.im/Microsoft/nni.svg)](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Documentation Status](https://readthedocs.org/projects/nni/badge/?version=stable)](https://nni.readthedocs.io/en/stable/?badge=stable) + +[NNI Doc](https://nni.readthedocs.io/) | [简体中文](README_zh_CN.md) + +**NNI (Neural Network Intelligence)** is a lightweight but powerful toolkit to help users **automate** Feature Engineering, Neural Architecture Search, Hyperparameter Tuning and Model Compression. + +The tool manages automated machine learning (AutoML) experiments, **dispatches and runs** experiments' trial jobs generated by tuning algorithms to search the best neural architecture and/or hyper-parameters in **different training environments** like Local Machine, Remote Servers, OpenPAI, Kubeflow, FrameworkController on K8S (AKS etc.), DLWorkspace (aka. DLTS), AML (Azure Machine Learning), AdaptDL (aka. ADL) , other cloud options and even Hybrid mode. + +## **Who should consider using NNI** + +* Those who want to **try different AutoML algorithms** in their training code/model. +* Those who want to run AutoML trial jobs **in different environments** to speed up search. +* Researchers and data scientists who want to easily **implement and experiment new AutoML algorithms**, may it be: hyperparameter tuning algorithm, neural architect search algorithm or model compression algorithm. +* ML Platform owners who want to **support AutoML in their platform**. + +## **What's NEW!**   + +* **New release**: [v2.6.1 is available](https://github.com/microsoft/nni/releases/tag/v2.6.1) - _released on Feb-18-2022_ +* **New demo available**: [Youtube entry](https://www.youtube.com/channel/UCKcafm6861B2mnYhPbZHavw) | [Bilibili 入口](https://space.bilibili.com/1649051673) - _last updated on May-26-2021_ +* **New webinar**: [Introducing Retiarii: A deep learning exploratory-training framework on NNI](https://note.microsoft.com/MSR-Webinar-Retiarii-Registration-Live.html) - _scheduled on June-24-2021_ +* **New community channel**: [Discussions](https://github.com/microsoft/nni/discussions) +* **New emoticons release**: [nnSpider](./docs/en_US/Tutorial/NNSpider.md) +

+ +

+ +## **NNI capabilities in a glance** + +NNI provides CommandLine Tool as well as an user friendly WebUI to manage training experiments. With the extensible API, you can customize your own AutoML algorithms and training services. To make it easy for new users, NNI also provides a set of build-in state-of-the-art AutoML algorithms and out of box support for popular training platforms. + +Within the following table, we summarized the current NNI capabilities, we are gradually adding new capabilities and we'd love to have your contribution. + +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + Frameworks & Libraries + + + Algorithms + + + Training Services + +
+ Built-in + +
  • Supported Frameworks
  • +
      +
    • PyTorch
    • +
    • Keras
    • +
    • TensorFlow
    • +
    • MXNet
    • +
    • Caffe2
    • + More...
      +
    +
+
    +
  • Supported Libraries
  • +
      +
    • Scikit-learn
    • +
    • XGBoost
    • +
    • LightGBM
    • + More...
      +
    +
+ +
+ Hyperparameter Tuning + + Neural Architecture Search (Retiarii) + + Model Compression + + Feature Engineering (Beta) + + Early Stop Algorithms + + + +
+ References + + + + + + +
+ +## **Installation** + +### **Install** + +NNI supports and is tested on Ubuntu >= 16.04, macOS >= 10.14.1, and Windows 10 >= 1809. Simply run the following `pip install` in an environment that has `python 64-bit >= 3.6`. + +Linux or macOS + +```bash +python3 -m pip install --upgrade nni +``` + +Windows + +```bash +python -m pip install --upgrade nni +``` + +If you want to try latest code, please [install NNI](https://nni.readthedocs.io/en/stable/installation.html) from source code. + +For detail system requirements of NNI, please refer to [here](https://nni.readthedocs.io/en/stable/Tutorial/InstallationLinux.html#system-requirements) for Linux & macOS, and [here](https://nni.readthedocs.io/en/stable/Tutorial/InstallationWin.html#system-requirements) for Windows. + +Note: + +* If there is any privilege issue, add `--user` to install NNI in the user directory. +* Currently NNI on Windows supports local, remote and pai mode. Anaconda or Miniconda is highly recommended to install [NNI on Windows](https://nni.readthedocs.io/en/stable/Tutorial/InstallationWin.html). +* If there is any error like `Segmentation fault`, please refer to [FAQ](https://nni.readthedocs.io/en/stable/Tutorial/FAQ.html). For FAQ on Windows, please refer to [NNI on Windows](https://nni.readthedocs.io/en/stable/Tutorial/InstallationWin.html#faq). + +### **Verify installation** + +* Download the examples via clone the source code. + + ```bash + git clone -b v2.6 https://github.com/Microsoft/nni.git + ``` + +* Run the MNIST example. + + Linux or macOS + + ```bash + nnictl create --config nni/examples/trials/mnist-pytorch/config.yml + ``` + + Windows + + ```powershell + nnictl create --config nni\examples\trials\mnist-pytorch\config_windows.yml + ``` + +* Wait for the message `INFO: Successfully started experiment!` in the command line. This message indicates that your experiment has been successfully started. You can explore the experiment using the `Web UI url`. + +```text +INFO: Starting restful server... +INFO: Successfully started Restful server! +INFO: Setting local config... +INFO: Successfully set local config! +INFO: Starting experiment... +INFO: Successfully started experiment! +----------------------------------------------------------------------- +The experiment id is egchD4qy +The Web UI urls are: http://223.255.255.1:8080 http://127.0.0.1:8080 +----------------------------------------------------------------------- + +You can use these commands to get more information about the experiment +----------------------------------------------------------------------- + commands description +1. nnictl experiment show show the information of experiments +2. nnictl trial ls list all of trial jobs +3. nnictl top monitor the status of running experiments +4. nnictl log stderr show stderr log content +5. nnictl log stdout show stdout log content +6. nnictl stop stop an experiment +7. nnictl trial kill kill a trial job by id +8. nnictl --help get help information about nnictl +----------------------------------------------------------------------- +``` + +* Open the `Web UI url` in your browser, you can view detailed information of the experiment and all the submitted trial jobs as shown below. [Here](https://nni.readthedocs.io/en/stable/Tutorial/WebUI.html) are more Web UI pages. + +webui + +## **Releases and Contributing** +NNI has a monthly release cycle (major releases). Please let us know if you encounter a bug by [filling an issue](https://github.com/microsoft/nni/issues/new/choose). + +We appreciate all contributions. If you are planning to contribute any bug-fixes, please do so without further discussions. + +If you plan to contribute new features, new tuners, new training services, etc. please first open an issue or reuse an exisiting issue, and discuss the feature with us. We will discuss with you on the issue timely or set up conference calls if needed. + +To learn more about making a contribution to NNI, please refer to our [How-to contribution page](https://nni.readthedocs.io/en/stable/contribution.html). + +We appreciate all contributions and thank all the contributors! + + + + +## **Feedback** +* [File an issue](https://github.com/microsoft/nni/issues/new/choose) on GitHub. +* Open or participate in a [discussion](https://github.com/microsoft/nni/discussions). +* Discuss on the NNI [Gitter](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) in NNI. + +Join IM discussion groups: +|Gitter||WeChat| +|----|----|----| +|![image](https://user-images.githubusercontent.com/39592018/80665738-e0574a80-8acc-11ea-91bc-0836dc4cbf89.png)| OR |![image](https://github.com/scarlett2018/nniutil/raw/master/wechat.png)| + + +## Test status + +### Essentials + +| Type | Status | +| :---: | :---: | +| Fast test | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/fast%20test?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=54&branchName=master) | +| Full linux | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/full%20test%20-%20linux?repoName=microsoft%2Fnni&branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=62&repoName=microsoft%2Fnni&branchName=master) | +| Full windows | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/full%20test%20-%20windows?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=63&branchName=master) | + +### Training services + +| Type | Status | +| :---: | :---: | +| Remote - linux to linux | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20remote%20-%20linux%20to%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=64&branchName=master) | +| Remote - linux to windows | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20remote%20-%20linux%20to%20windows?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=67&branchName=master) | +| Remote - windows to linux | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20remote%20-%20windows%20to%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=68&branchName=master) | +| OpenPAI | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20openpai%20-%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=65&branchName=master) | +| Frameworkcontroller | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20frameworkcontroller?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=70&branchName=master) | +| Kubeflow | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20kubeflow?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=69&branchName=master) | +| Hybrid | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20hybrid?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=79&branchName=master) | +| AzureML | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20aml?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=78&branchName=master) | + +## Related Projects + +Targeting at openness and advancing state-of-art technology, [Microsoft Research (MSR)](https://www.microsoft.com/en-us/research/group/systems-and-networking-research-group-asia/) had also released few other open source projects. + +* [OpenPAI](https://github.com/Microsoft/pai) : an open source platform that provides complete AI model training and resource management capabilities, it is easy to extend and supports on-premise, cloud and hybrid environments in various scale. +* [FrameworkController](https://github.com/Microsoft/frameworkcontroller) : an open source general-purpose Kubernetes Pod Controller that orchestrate all kinds of applications on Kubernetes by a single controller. +* [MMdnn](https://github.com/Microsoft/MMdnn) : A comprehensive, cross-framework solution to convert, visualize and diagnose deep neural network models. The "MM" in MMdnn stands for model management and "dnn" is an acronym for deep neural network. +* [SPTAG](https://github.com/Microsoft/SPTAG) : Space Partition Tree And Graph (SPTAG) is an open source library for large scale vector approximate nearest neighbor search scenario. +* [nn-Meter](https://github.com/microsoft/nn-Meter) : An accurate inference latency predictor for DNN models on diverse edge devices. + +We encourage researchers and students leverage these projects to accelerate the AI development and research. + +## **License** + +The entire codebase is under [MIT license](LICENSE) diff --git a/README_zh_CN.md b/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..21e93c1d913a72e6fefcebe085cf74ea64b20a89 --- /dev/null +++ b/README_zh_CN.md @@ -0,0 +1,364 @@ +

+ +

+ +* * * + +[![MIT 许可证](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE) [![生成状态](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/full%20test%20-%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=62&branchName=master) [![问题](https://img.shields.io/github/issues-raw/Microsoft/nni.svg)](https://github.com/Microsoft/nni/issues?q=is%3Aissue+is%3Aopen) [![Bug](https://img.shields.io/github/issues/Microsoft/nni/bug.svg)](https://github.com/Microsoft/nni/issues?q=is%3Aissue+is%3Aopen+label%3Abug) [![拉取请求](https://img.shields.io/github/issues-pr-raw/Microsoft/nni.svg)](https://github.com/Microsoft/nni/pulls?q=is%3Apr+is%3Aopen) [![版本](https://img.shields.io/github/release/Microsoft/nni.svg)](https://github.com/Microsoft/nni/releases) [![进入 https://gitter.im/Microsoft/nni 聊天室提问](https://badges.gitter.im/Microsoft/nni.svg)](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![文档状态](https://readthedocs.org/projects/nni/badge/?version=stable)](https://nni.readthedocs.io/zh/stable/?badge=stable) + +[NNI 文档](https://nni.readthedocs.io/zh/stable/) | [English](README.md) + +**NNI (Neural Network Intelligence)** 是一个帮助用户**自动**进行[特征工程](docs/zh_CN/FeatureEngineering/Overview.rst),[神经网络架构搜索](docs/zh_CN/NAS/Overview.rst),[超参调优](docs/zh_CN/Tuner/BuiltinTuner.rst)以及[模型压缩](docs/zh_CN/Compression/Overview.rst)的轻量且强大的工具包。 + +NNI 管理自动机器学习 (AutoML) 的 Experiment,**调度运行**由调优算法生成的 Trial 任务来找到最好的神经网络架构和/或超参,支持**各种训练环境**,如[本机](docs/zh_CN/TrainingService/LocalMode.rst),[远程服务器](docs/zh_CN/TrainingService/RemoteMachineMode.rst),[OpenPAI](docs/zh_CN/TrainingService/PaiMode.rst),[Kubeflow](docs/zh_CN/TrainingService/KubeflowMode.rst),[基于 K8S 的 FrameworkController(如,AKS 等)](docs/zh_CN/TrainingService/FrameworkControllerMode.rst), [DLWorkspace (又称 DLTS)](docs/zh_CN/TrainingService/DLTSMode.rst), [AML (Azure Machine Learning)](docs/zh_CN/TrainingService/AMLMode.rst), [AdaptDL(又称 ADL)](docs/zh_CN/TrainingService/AdaptDLMode.rst) ,和其他的云平台甚至 [混合模式](docs/zh_CN/TrainingService/HybridMode.rst) 。 DLTS),[AML (Azure Machine Learning)](https://nni.readthedocs.io/zh/stable/TrainingService/AMLMode.html)[AdaptDL(又称 ADL)](https://nni.readthedocs.io/zh/stable/TrainingService/AdaptDLMode.html) ,和其他的云平台甚至[混合模式](https://nni.readthedocs.io/zh/stable/TrainingService/HybridMode.html) 。 + +## **使用场景** + +* 想要在自己的代码、模型中试验**不同的自动机器学习算法**。 +* 想要在**不同的环境中**加速运行自动机器学习。 +* 想要更容易**实现或试验新的自动机器学习算法**的研究员或数据科学家,包括:超参调优算法,神经网络搜索算法以及模型压缩算法。 +* 在机器学习平台中**支持自动机器学习**。 + +## **最新消息!**  [](#nni-released-reminder) + +* **最新版本**:[v2.6.1 已发布](https://github.com/microsoft/nni/releases/tag/v2.6.1) - *2022年2月18日* +* **最新视频 demo**:[Youtube 入口](https://www.youtube.com/channel/UCKcafm6861B2mnYhPbZHavw) | [Bilibili 入口](https://space.bilibili.com/1649051673) - *上次更新:2021年5月26日* +* **最新网络研讨会**: [介绍Retiarii:NNI 上的深度学习探索性训练框架](https://note.microsoft.com/MSR-Webinar-Retiarii-Registration-Live.html) - *2021年6月24日* +* **最新互动渠道**: [Discussions](https://github.com/microsoft/nni/discussions) +* **最新粉丝福利表情包上线**: [nnSpider](./docs/en_US/Tutorial/NNSpider.md) +

+ +

+ +## **NNI 功能一览** + +NNI 提供命令行工具以及友好的 WebUI 来管理训练的 Experiment。 通过可扩展的 API,可定制自动机器学习算法和训练平台。 为了方便新用户,NNI 内置了最新的自动机器学习算法,并为流行的训练平台提供了开箱即用的支持。 + +下表中,包含了 NNI 的功能,同时在不断地增添新功能,也非常希望您能贡献其中。 + +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + 支持的框架和库 + + + 算法 + + + 训练平台 + +
+ 内置 + +
  • 支持的框架
  • +
      +
    • PyTorch
    • +
    • Keras
    • +
    • TensorFlow
    • +
    • MXNet
    • +
    • Caffe2
    • + 更多...
      +
    +
+
    +
  • 支持的库
  • +
      +
    • Scikit-learn
    • +
    • XGBoost
    • +
    • LightGBM
    • + 更多...
      +
    +
+ +
+ 超参调优 + + 神经网络架构搜索 + + 模型压缩 + + 特征工程(测试版) + + 提前终止算法 + + + +
+ 参考 + + + + + + +
+ +## **安装** + +### **安装** + +NNI 支持并在 Ubuntu >= 16.04, macOS >= 10.14.1, 和 Windows 10 >= 1809 通过了测试。 在 `python 64-bit >= 3.6` 的环境中,只需要运行 `pip install` 即可完成安装。 + +Linux 或 macOS + +```bash +python3 -m pip install --upgrade nni +``` + +Windows + +```bash +python -m pip install --upgrade nni +``` + +如果想试试最新代码,可参考从源代码[安装 NNI](https://nni.readthedocs.io/zh/latest/installation.html)。 + +Linux 和 macOS 下 NNI 系统需求[参考这里](https://nni.readthedocs.io/zh/latest/Tutorial/InstallationLinux.html#system-requirements) ,Windows [参考这里](https://nni.readthedocs.io/zh/latest/Tutorial/InstallationWin.html#system-requirements)。 + +注意: + +* 如果遇到任何权限问题,可添加 `--user` 在用户目录中安装 NNI。 +* 目前,Windows 上的 NNI 支持本机,远程和 OpenPAI 模式。 强烈推荐使用 Anaconda 或 Miniconda [在 Windows 上安装 NNI](https://nni.readthedocs.io/zh/stable/Tutorial/InstallationWin.html)。 +* 如果遇到如 `Segmentation fault` 等错误参考[常见问题](docs/zh_CN/Tutorial/FAQ.rst)。 Windows 上的 FAQ 参考[在 Windows 上使用 NNI](docs/zh_CN/Tutorial/InstallationWin.rst#faq)。 Windows 上的 FAQ 参考[在 Windows 上使用 NNI](https://nni.readthedocs.io/zh/stable/Tutorial/InstallationWin.html#faq)。 + +### **验证安装** + +* 通过克隆源代码下载示例。 + + ```bash + git clone -b v2.6 https://github.com/Microsoft/nni.git + ``` + +* 运行 MNIST 示例。 + + Linux 或 macOS + + ```bash + nnictl create --config nni/examples/trials/mnist-pytorch/config.yml + ``` + + Windows + + ```powershell + nnictl create --config nni\examples\trials\mnist-pytorch\config_windows.yml + ``` + +* 在命令行中等待输出 `INFO: Successfully started experiment!`。 此消息表明 Experiment 已成功启动。 通过命令行输出的 `Web UI url` 来访问 Experiment 的界面。 此消息表明 Experiment 已成功启动。 通过命令行输出的 `Web UI url` 来访问 Experiment 的界面。 + +```text +INFO: Starting restful server... +INFO: Successfully started Restful server! +INFO: Setting local config... +INFO: Successfully set local config! +INFO: Starting experiment... +INFO: Successfully started experiment! +----------------------------------------------------------------------- +The experiment id is egchD4qy +The Web UI urls are: http://223.255.255.1:8080 http://127.0.0.1:8080 +----------------------------------------------------------------------- + +You can use these commands to get more information about the experiment +----------------------------------------------------------------------- + commands description + +1. nnictl experiment show show the information of experiments +2. nnictl trial ls list all of trial jobs +3. nnictl top monitor the status of running experiments +4. nnictl log stderr show stderr log content +5. nnictl log stdout show stdout log content +6. nnictl stop stop an experiment +7. nnictl trial kill kill a trial job by id +8. nnictl --help get help information about nnictl +----------------------------------------------------------------------- +``` + +* 在浏览器中打开 `Web UI url`,可看到下图的 Experiment 详细信息,以及所有的 Trial 任务。 查看[这里](docs/zh_CN/Tutorial/WebUI.rst)的更多页面。 查看[这里](https://nni.readthedocs.io/zh/stable/Tutorial/WebUI.html)的更多页面。 + +webui + +## **发布和贡献** + +NNI 有一个月度发布周期(主要发布)。 如果您遇到问题可以通过 [创建 issue](https://github.com/microsoft/nni/issues/new/choose) 来报告。 + +我们感谢所有的贡献。 我们感谢所有的贡献。 如果您计划提供任何 Bug 修复,请放手去做,不需要任何顾虑。 + +如果您计划提供新的功能、新的 Tuner 和 新的训练平台等, 请先创建一个新的 issue 或重用现有 issue,并与我们讨论该功能。 我们会及时与您讨论这个问题,如有需要会安排电话会议。 + +再次感谢所有的贡献者! + +再次感谢所有的贡献者! + + + +## **反馈** + +* [在 GitHub 上提交问题](https://github.com/microsoft/nni/issues/new/choose)。 +* 在 [Gitter](https://gitter.im/Microsoft/nni?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 中参与讨论。 +* NNI 有一个月度发布周期(主要发布)。 如果您遇到问题可以通过 [创建 issue](https://github.com/microsoft/nni/issues/new/choose) 来报告。 + +加入聊天组: + +| Gitter | | 微信 | +| -------------------------------------------------------------------------------------------------------------- | - | ----------------------------------------------------------------------- | +| ![image](https://user-images.githubusercontent.com/39592018/80665738-e0574a80-8acc-11ea-91bc-0836dc4cbf89.png) | 或 | ![image](https://github.com/scarlett2018/nniutil/raw/master/wechat.png) | + +## 测试状态 + +### 必需 + +| 类型 | 状态 | +|:------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +| Fast test | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/fast%20test?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=54&branchName=master) | +| Full linux | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/full%20test%20-%20linux?repoName=microsoft%2Fnni&branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=62&repoName=microsoft%2Fnni&branchName=master) | +| Full windows | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/full%20test%20-%20windows?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=63&branchName=master) | + +### 训练平台 + +| 类型 | 状态 | +|:-------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +| Remote - linux to linux | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20remote%20-%20linux%20to%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=64&branchName=master) | +| Remote - linux to windows | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20remote%20-%20linux%20to%20windows?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=67&branchName=master) | +| Remote - windows to linux | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20remote%20-%20windows%20to%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=68&branchName=master) | +| OpenPAI | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20openpai%20-%20linux?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=65&branchName=master) | +| Frameworkcontroller | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20frameworkcontroller?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=70&branchName=master) | +| Kubeflow | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20kubeflow?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=69&branchName=master) | +| Hybrid | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20hybrid?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=79&branchName=master) | +| AzureML | [![Build Status](https://msrasrg.visualstudio.com/NNIOpenSource/_apis/build/status/integration%20test%20-%20aml?branchName=master)](https://msrasrg.visualstudio.com/NNIOpenSource/_build/latest?definitionId=78&branchName=master) | + +## 相关项目 + +针对开放性和推进最先进的技术,[微软研究院(MSR)](https://www.microsoft.com/en-us/research/group/systems-and-networking-research-group-asia/) 还发布了其他几个开源项目。 + +* [OpenPAI](https://github.com/Microsoft/pai):作为开源平台,提供了完整的 AI 模型训练和资源管理能力,能轻松扩展,并支持各种规模的私有部署、云和混合环境。 +* [FrameworkController](https://github.com/Microsoft/frameworkcontroller):开源的通用 Kubernetes Pod 控制器,通过单个控制器来编排 Kubernetes 上所有类型的应用。 +* [MMdnn](https://github.com/Microsoft/MMdnn):一个完整、跨框架的解决方案,能够转换、可视化、诊断深度神经网络模型。 MMdnn 中的 "MM" 表示 model management(模型管理),而 "dnn" 是 deep neural network(深度神经网络)的缩写。 MMdnn 中的 "MM" 表示 model management(模型管理),而 "dnn" 是 deep neural network(深度神经网络)的缩写。 +* [SPTAG](https://github.com/Microsoft/SPTAG) : Space Partition Tree And Graph (SPTAG) 是用于大规模向量的最近邻搜索场景的开源库。 + +我们鼓励研究人员和学生利用这些项目来加速 AI 开发和研究。 + +## **许可协议** + +代码库遵循 [MIT 许可协议](LICENSE) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..926b8ae4059a79c6a5f3aea7691480c5b6588269 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). + + diff --git a/SECURITY_zh_CN.md b/SECURITY_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..bfd7da66aa9b56a490e5590ba3d6503c1a01bf94 --- /dev/null +++ b/SECURITY_zh_CN.md @@ -0,0 +1,41 @@ + + +## 安全 + +微软非常重视软件产品和服务的安全性,包括通过我们的 GitHub 组织管理的所有源代码库,其中涵盖 [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin),和 [我们 GitHub 的组织](https://opensource.microsoft.com/)。 + +如果你在任何微软拥有的资源库中发现了安全漏洞,并且符合 [微软对安全漏洞的定义](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)),请按照下文所述向我们报告。 + +## 报告安全问题 + +**请不要通过公开的 GitHub 问题报告安全漏洞。** + +相反,请向微软安全响应中心(MSRC)报告,链接是 [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report)。 + +如果您希望在不登录的情况下提交,请发送电子邮件至 [secure@microsoft.com](mailto:secure@microsoft.com)。 如果可能的话,请用我们的 PGP 密钥对您的信息进行加密;请从以下网站下载该密钥 [微软安全响应中心 PGP 密钥页面](https://www.microsoft.com/en-us/msrc/pgp-key-msrc)。 + +你应该在24小时内收到回复。 如果由于某些原因你没有收到,请通过电子邮件跟进,以确保我们收到你的原始信息。 其他信息可以在以下网站找到 [microsoft.com/msrc](https://www.microsoft.com/msrc)。 + +请包括以下所要求的信息(尽可能多地提供),以帮助我们更好地了解可能的问题的性质和范围。 + + * 问题类型(如缓冲区溢出、SQL 注入、跨站脚本等) + * 与问题表现有关的源文件的完整路径 + * 受影响的源代码位置(标签/分支/提交或 URL) + * 重现该问题所需的任何特殊配置 + * 重现该问题的分步骤说明 + * 概念证明或漏洞代码(如果可能的话) + * 该问题的影响,包括攻击者如何利用该问题 + +这些信息将帮助我们更快地对你的报告进行分流。 + +如果您需要报告错误赏金,更完整的报告可有助于获得更高的赏金奖励。 请访问我们的[微软漏洞赏金计划](https://microsoft.com/msrc/bounty)页面,以了解有关我们活动计划的更多详细信息。 + +## 首选语言 + +我们希望所有的交流都是用英语进行的。 + +## 政策 + +微软遵循[协调漏洞披露](https://www.microsoft.com/en-us/msrc/cvd)的原则。 + + diff --git a/crowdin.yml b/crowdin.yml new file mode 100644 index 0000000000000000000000000000000000000000..2ff696f276997e13968feab10febb73b0e5e05ac --- /dev/null +++ b/crowdin.yml @@ -0,0 +1,15 @@ +project_id_env: CROWDIN_PROJECT_ID +api_token_env: CROWDIN_PERSONAL_TOKEN +preserve_hierarchy: true +files: + - source: /docs/en_US/**/* + ignore: + - /docs/zh_CN/**/* + translation: /docs/%locale_with_underscore%/**/%original_file_name% + - source: '/**/*.[mM][dD]' + ignore: + - '/**/*_%locale_with_underscore%.md' + - /docs + - /%locale_with_underscore% + - /.github + translation: /%original_path%/%file_name%_%locale_with_underscore%.md diff --git a/dependencies/develop.txt b/dependencies/develop.txt new file mode 100644 index 0000000000000000000000000000000000000000..27e676d1832d21a14ccf781a1657e443f4bfab10 --- /dev/null +++ b/dependencies/develop.txt @@ -0,0 +1,13 @@ +pylint +flake8 +sphinx +sphinx-argparse +sphinx-rtd-theme +sphinxcontrib-websupport +nbsphinx +pytest +pytest-cov +pytest-azurepipelines +coverage +ipython +jupyterlab diff --git a/dependencies/recommended.txt b/dependencies/recommended.txt new file mode 100644 index 0000000000000000000000000000000000000000..9724f81cfa7e846f7ec84653174751f9fd95ed3d --- /dev/null +++ b/dependencies/recommended.txt @@ -0,0 +1,17 @@ +# Recommended because some non-commonly-used modules/examples depend on those packages. + +-f https://download.pytorch.org/whl/torch_stable.html +tensorflow == 2.7.0 +tensorboard == 2.7.0 +torch == 1.10.0+cpu ; sys_platform != "darwin" +torch == 1.10.0 ; sys_platform == "darwin" +torchvision == 0.11.1+cpu ; sys_platform != "darwin" +torchvision == 0.11.1 ; sys_platform == "darwin" +pytorch-lightning >= 1.5.0 +torchmetrics +onnx +peewee +graphviz +gym +tianshou >= 0.4.1 +matplotlib diff --git a/dependencies/recommended_gpu.txt b/dependencies/recommended_gpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c63425353b224c8e91f1796ea98b0f0f2ec00e49 --- /dev/null +++ b/dependencies/recommended_gpu.txt @@ -0,0 +1,13 @@ +# Recommended because some non-commonly-used modules/examples depend on those packages. + +-f https://download.pytorch.org/whl/torch_stable.html +tensorflow +keras == 2.4.3 +torch == 1.10.0+cu111 +torchvision == 0.11.1+cu111 +pytorch-lightning >= 1.5.0 +onnx +peewee +graphviz +gym +tianshou >= 0.4.1 diff --git a/dependencies/recommended_legacy.txt b/dependencies/recommended_legacy.txt new file mode 100644 index 0000000000000000000000000000000000000000..e4a2401b5491b25e50c2e279d259c17d36370b0c --- /dev/null +++ b/dependencies/recommended_legacy.txt @@ -0,0 +1,17 @@ +-f https://download.pytorch.org/whl/torch_stable.html +tensorflow == 1.15.4 +torch == 1.7.1+cpu +torchvision == 0.8.2+cpu + +# It will install pytorch-lightning 0.8.x and unit tests won't work. +# Latest version has conflict with tensorboard and tensorflow 1.x. +pytorch-lightning +torchmetrics + +keras == 2.1.6 +onnx +peewee +graphviz +gym +tianshou >= 0.4.1 +matplotlib < 3.4 diff --git a/dependencies/required.txt b/dependencies/required.txt new file mode 100644 index 0000000000000000000000000000000000000000..322d2fcc734e20f26dac28caa9224faf461fa79a --- /dev/null +++ b/dependencies/required.txt @@ -0,0 +1,29 @@ +astor +hyperopt == 0.1.2 +json_tricks >= 3.15.5 +psutil +pyyaml >= 5.4 +requests +responses ; python_version >= "3.7" +responses < 0.18 ; python_version < "3.7" +schema +typeguard +PythonWebHDFS +colorama +scikit-learn >= 0.24.1 ; python_version >= "3.7" +scikit-learn < 1.0 ; python_version < "3.7" +websockets >= 10.1 ; python_version >= "3.7" +websockets <= 10.0 ; python_version < "3.7" +filelock ; python_version >= "3.7" +filelock < 3.4 ; python_version < "3.7" +prettytable +cloudpickle +dataclasses ; python_version < "3.7" +typing_extensions ; python_version < "3.8" +numpy < 1.19.4 ; sys_platform == "win32" +numpy < 1.20 ; sys_platform != "win32" and python_version < "3.7" +numpy ; sys.platform != "win32" and python_version >= "3.7" +scipy < 1.6 ; python_version < "3.7" +scipy ; python_version >= "3.7" +pandas < 1.2 ; python_version < "3.7" +pandas ; python_version >= "3.7" diff --git a/dependencies/required_extra.txt b/dependencies/required_extra.txt new file mode 100644 index 0000000000000000000000000000000000000000..4ec68e1246235b9dfdca5b6aaf6f9ac32f9c9942 --- /dev/null +++ b/dependencies/required_extra.txt @@ -0,0 +1,16 @@ +# the following content will be read by setup.py. +# please follow the logic in setup.py. + +# SMAC +ConfigSpaceNNI +smac4nni + +# BOHB +ConfigSpace>=0.4.11 +statsmodels>=0.12.0 + +# PPOTuner +gym + +# DNGO +pybnn diff --git a/dependencies/setup.txt b/dependencies/setup.txt new file mode 100644 index 0000000000000000000000000000000000000000..aab6943b049c1cf96eeb59f7346dff618e123070 --- /dev/null +++ b/dependencies/setup.txt @@ -0,0 +1,3 @@ +pip +wheel +setuptools diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..68a1b67065d6a6a20c76764550a9d475006d63d1 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +**/_build diff --git a/docs/en_US/Assessor/BuiltinAssessor.rst b/docs/en_US/Assessor/BuiltinAssessor.rst new file mode 100644 index 0000000000000000000000000000000000000000..6b85253a73613c5424abb2e4b27b26fb328e7ea3 --- /dev/null +++ b/docs/en_US/Assessor/BuiltinAssessor.rst @@ -0,0 +1,101 @@ +.. role:: raw-html(raw) + :format: html + + +Built-in Assessors +================== + +NNI provides state-of-the-art tuning algorithms within our builtin-assessors and makes them easy to use. Below is a brief overview of NNI's current builtin Assessors. + +Note: Click the **Assessor's name** to get each Assessor's installation requirements, suggested usage scenario, and a config example. A link to a detailed description of each algorithm is provided at the end of the suggested scenario for each Assessor. + +Currently, we support the following Assessors: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Assessor + - Brief Introduction of Algorithm + * - `Medianstop <#MedianStop>`__ + - Medianstop is a simple early stopping rule. It stops a pending trial X at step S if the trial’s best objective value by step S is strictly worse than the median value of the running averages of all completed trials’ objectives reported up to step S. `Reference Paper `__ + * - `Curvefitting <#Curvefitting>`__ + - Curve Fitting Assessor is an LPA (learning, predicting, assessing) algorithm. It stops a pending trial X at step S if the prediction of the final epoch's performance worse than the best final performance in the trial history. In this algorithm, we use 12 curves to fit the accuracy curve. `Reference Paper `__ + + +Usage of Builtin Assessors +-------------------------- + +Usage of builtin assessors provided by the NNI SDK requires one to declare the **builtinAssessorName** and **classArgs** in the ``config.yml`` file. In this part, we will introduce the details of usage and the suggested scenarios, classArg requirements, and an example for each assessor. + +Note: Please follow the provided format when writing your ``config.yml`` file. + +:raw-html:`` + +Median Stop Assessor +^^^^^^^^^^^^^^^^^^^^ + +.. + + Builtin Assessor Name: **Medianstop** + + +**Suggested scenario** + +It's applicable in a wide range of performance curves, thus, it can be used in various scenarios to speed up the tuning progress. `Detailed Description <./MedianstopAssessor.rst>`__ + +**classArgs requirements:** + + +* **optimize_mode** (*maximize or minimize, optional, default = maximize*\ ) - If 'maximize', assessor will **stop** the trial with smaller expectation. If 'minimize', assessor will **stop** the trial with larger expectation. +* **start_step** (*int, optional, default = 0*\ ) - A trial is determined to be stopped or not only after receiving start_step number of reported intermediate results. + +**Usage example:** + +.. code-block:: yaml + + # config.yml + assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + start_step: 5 + +:raw-html:`
` + +:raw-html:`` + +Curve Fitting Assessor +^^^^^^^^^^^^^^^^^^^^^^ + +.. + + Builtin Assessor Name: **Curvefitting** + + +**Suggested scenario** + +It's applicable in a wide range of performance curves, thus, it can be used in various scenarios to speed up the tuning progress. Even better, it's able to handle and assess curves with similar performance. `Detailed Description <./CurvefittingAssessor.rst>`__ + +**Note**\ , according to the original paper, only incremental functions are supported. Therefore this assessor can only be used to maximize optimization metrics. For example, it can be used for accuracy, but not for loss. + +**classArgs requirements:** + + +* **epoch_num** (*int,** required***\ ) - The total number of epochs. We need to know the number of epochs to determine which points we need to predict. +* **start_step** (*int, optional, default = 6*\ ) - A trial is determined to be stopped or not only after receiving start_step number of reported intermediate results. +* **threshold** (*float, optional, default = 0.95*\ ) - The threshold that we use to decide to early stop the worst performance curve. For example: if threshold = 0.95, and the best performance in the history is 0.9, then we will stop the trial who's predicted value is lower than 0.95 * 0.9 = 0.855. +* **gap** (*int, optional, default = 1*\ ) - The gap interval between Assessor judgements. For example: if gap = 2, start_step = 6, then we will assess the result when we get 6, 8, 10, 12...intermediate results. + +**Usage example:** + +.. code-block:: yaml + + # config.yml + assessor: + builtinAssessorName: Curvefitting + classArgs: + epoch_num: 20 + start_step: 6 + threshold: 0.95 + gap: 1 diff --git a/docs/en_US/Assessor/CurvefittingAssessor.rst b/docs/en_US/Assessor/CurvefittingAssessor.rst new file mode 100644 index 0000000000000000000000000000000000000000..41c6d2c1478754e5e74af48ea9f222141b2e1ecd --- /dev/null +++ b/docs/en_US/Assessor/CurvefittingAssessor.rst @@ -0,0 +1,101 @@ +Curve Fitting Assessor on NNI +============================= + +Introduction +------------ + +The Curve Fitting Assessor is an LPA (learning, predicting, assessing) algorithm. It stops a pending trial X at step S if the prediction of the final epoch's performance is worse than the best final performance in the trial history. + +In this algorithm, we use 12 curves to fit the learning curve. The set of parametric curve models are chosen from this `reference paper `__. The learning curves' shape coincides with our prior knowledge about the form of learning curves: They are typically increasing, saturating functions. + + +.. image:: ../../img/curvefitting_learning_curve.PNG + :target: ../../img/curvefitting_learning_curve.PNG + :alt: learning_curve + + +We combine all learning curve models into a single, more powerful model. This combined model is given by a weighted linear combination: + + +.. image:: ../../img/curvefitting_f_comb.gif + :target: ../../img/curvefitting_f_comb.gif + :alt: f_comb + + +with the new combined parameter vector + + +.. image:: ../../img/curvefitting_expression_xi.gif + :target: ../../img/curvefitting_expression_xi.gif + :alt: expression_xi + + +Assuming additive Gaussian noise and the noise parameter being initialized to its maximum likelihood estimate. + +We determine the maximum probability value of the new combined parameter vector by learning the historical data. We use such a value to predict future trial performance and stop the inadequate experiments to save computing resources. + +Concretely, this algorithm goes through three stages of learning, predicting, and assessing. + + +* + Step1: Learning. We will learn about the trial history of the current trial and determine the \xi at the Bayesian angle. First of all, We fit each curve using the least-squares method, implemented by ``fit_theta``. After we obtained the parameters, we filter the curve and remove the outliers, implemented by ``filter_curve``. Finally, we use the MCMC sampling method. implemented by ``mcmc_sampling``\ , to adjust the weight of each curve. Up to now, we have determined all the parameters in \xi. + +* + Step2: Predicting. It calculates the expected final result accuracy, implemented by ``f_comb``\ , at the target position (i.e., the total number of epochs) by \xi and the formula of the combined model. + +* + Step3: If the fitting result doesn't converge, the predicted value will be ``None``. In this case, we return ``AssessResult.Good`` to ask for future accuracy information and predict again. Furthermore, we will get a positive value from the ``predict()`` function. If this value is strictly greater than the best final performance in history * ``THRESHOLD``\ (default value = 0.95), return ``AssessResult.Good``\ , otherwise, return ``AssessResult.Bad`` + +The figure below is the result of our algorithm on MNIST trial history data, where the green point represents the data obtained by Assessor, the blue point represents the future but unknown data, and the red line is the Curve predicted by the Curve fitting assessor. + + +.. image:: ../../img/curvefitting_example.PNG + :target: ../../img/curvefitting_example.PNG + :alt: examples + + +Usage +----- + +To use Curve Fitting Assessor, you should add the following spec in your experiment's YAML config file: + +.. code-block:: yaml + + assessor: + builtinAssessorName: Curvefitting + classArgs: + # (required)The total number of epoch. + # We need to know the number of epoch to determine which point we need to predict. + epoch_num: 20 + # (optional) In order to save our computing resource, we start to predict when we have more than only after receiving start_step number of reported intermediate results. + # The default value of start_step is 6. + start_step: 6 + # (optional) The threshold that we decide to early stop the worse performance curve. + # For example: if threshold = 0.95, best performance in the history is 0.9, then we will stop the trial which predict value is lower than 0.95 * 0.9 = 0.855. + # The default value of threshold is 0.95. + threshold: 0.95 + # (optional) The gap interval between Assesor judgements. + # For example: if gap = 2, start_step = 6, then we will assess the result when we get 6, 8, 10, 12...intermedian result. + # The default value of gap is 1. + gap: 1 + +Limitation +---------- + +According to the original paper, only incremental functions are supported. Therefore this assessor can only be used to maximize optimization metrics. For example, it can be used for accuracy, but not for loss. + +File Structure +-------------- + +The assessor has a lot of different files, functions, and classes. Here we briefly describe a few of them. + + +* ``curvefunctions.py`` includes all the function expressions and default parameters. +* ``modelfactory.py`` includes learning and predicting; the corresponding calculation part is also implemented here. +* ``curvefitting_assessor.py`` is the assessor which receives the trial history and assess whether to early stop the trial. + +TODO +---- + + +* Further improve the accuracy of the prediction and test it on more models. diff --git a/docs/en_US/Assessor/CustomizeAssessor.rst b/docs/en_US/Assessor/CustomizeAssessor.rst new file mode 100644 index 0000000000000000000000000000000000000000..34cef8ebf46e25a3c9f19b634f4b3efc46d800b4 --- /dev/null +++ b/docs/en_US/Assessor/CustomizeAssessor.rst @@ -0,0 +1,65 @@ +Customize Assessor +================== + +NNI supports to build an assessor by yourself for tuning demand. + +If you want to implement a customized Assessor, there are three things to do: + + +#. Inherit the base Assessor class +#. Implement assess_trial function +#. Configure your customized Assessor in experiment YAML config file + +**1. Inherit the base Assessor class** + +.. code-block:: python + + from nni.assessor import Assessor + + class CustomizedAssessor(Assessor): + def __init__(self, ...): + ... + +**2. Implement assess trial function** + +.. code-block:: python + + from nni.assessor import Assessor, AssessResult + + class CustomizedAssessor(Assessor): + def __init__(self, ...): + ... + + def assess_trial(self, trial_history): + """ + Determines whether a trial should be killed. Must override. + trial_history: a list of intermediate result objects. + Returns AssessResult.Good or AssessResult.Bad. + """ + # you code implement here. + ... + +**3. Configure your customized Assessor in experiment YAML config file** + +NNI needs to locate your customized Assessor class and instantiate the class, so you need to specify the location of the customized Assessor class and pass literal values as parameters to the __init__ constructor. + +.. code-block:: yaml + + assessor: + codeDir: /home/abc/myassessor + classFileName: my_customized_assessor.py + className: CustomizedAssessor + # Any parameter need to pass to your Assessor class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + arg1: value1 + +Please noted in **2**. The object ``trial_history`` are exact the object that Trial send to Assessor by using SDK ``report_intermediate_result`` function. + +The working directory of your assessor is ``/nni-experiments//log``\ , which can be retrieved with environment variable ``NNI_LOG_DIRECTORY``\ , + +More detail example you could see: + +* :githublink:`medianstop-assessor ` +* :githublink:`curvefitting-assessor ` + diff --git a/docs/en_US/Assessor/MedianstopAssessor.rst b/docs/en_US/Assessor/MedianstopAssessor.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a307bf0d3405c4979ea2cddc55f36aae0dc3a49 --- /dev/null +++ b/docs/en_US/Assessor/MedianstopAssessor.rst @@ -0,0 +1,7 @@ +Medianstop Assessor on NNI +========================== + +Median Stop +----------- + +Medianstop is a simple early stopping rule mentioned in this `paper `__. It stops a pending trial X after step S if the trial’s best objective value by step S is strictly worse than the median value of the running averages of all completed trials’ objectives reported up to step S. diff --git a/docs/en_US/CommunitySharings/HpoComparison.rst b/docs/en_US/CommunitySharings/HpoComparison.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a95e42f875c47577b4b0b625c4e11e7568a25a0 --- /dev/null +++ b/docs/en_US/CommunitySharings/HpoComparison.rst @@ -0,0 +1,385 @@ +Hyper Parameter Optimization Comparison +======================================= + +*Posted by Anonymous Author* + +Comparison of Hyperparameter Optimization (HPO) algorithms on several problems. + +Hyperparameter Optimization algorithms are list below: + + +* `Random Search <../Tuner/BuiltinTuner.rst>`__ +* `Grid Search <../Tuner/BuiltinTuner.rst>`__ +* `Evolution <../Tuner/BuiltinTuner.rst>`__ +* `Anneal <../Tuner/BuiltinTuner.rst>`__ +* `Metis <../Tuner/BuiltinTuner.rst>`__ +* `TPE <../Tuner/BuiltinTuner.rst>`__ +* `SMAC <../Tuner/BuiltinTuner.rst>`__ +* `HyperBand <../Tuner/BuiltinTuner.rst>`__ +* `BOHB <../Tuner/BuiltinTuner.rst>`__ + +All algorithms run in NNI local environment. + +Machine Environment: + +.. code-block:: bash + + OS: Linux Ubuntu 16.04 LTS + CPU: Intel(R) Xeon(R) CPU E5-2690 v3 @ 2.60GHz 2600 MHz + Memory: 112 GB + NNI Version: v0.7 + NNI Mode(local|pai|remote): local + Python version: 3.6 + Is conda or virtualenv used?: Conda + is running in docker?: no + +AutoGBDT Example +---------------- + +Problem Description +^^^^^^^^^^^^^^^^^^^ + +Nonconvex problem on the hyper-parameter search of `AutoGBDT <../TrialExample/GbdtExample.rst>`__ example. + +Search Space +^^^^^^^^^^^^ + +.. code-block:: json + + { + "num_leaves": { + "_type": "choice", + "_value": [10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 48, 64, 96, 128] + }, + "learning_rate": { + "_type": "choice", + "_value": [0.00001, 0.0001, 0.001, 0.01, 0.05, 0.1, 0.2, 0.5] + }, + "max_depth": { + "_type": "choice", + "_value": [-1, 2, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 48, 64, 96, 128] + }, + "feature_fraction": { + "_type": "choice", + "_value": [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2] + }, + "bagging_fraction": { + "_type": "choice", + "_value": [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2] + }, + "bagging_freq": { + "_type": "choice", + "_value": [1, 2, 4, 8, 10, 12, 14, 16] + } + } + +The total search space is 1,204,224, we set the number of maximum trial to 1000. The time limitation is 48 hours. + +Results +^^^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Algorithm + - Best loss + - Average of Best 5 Losses + - Average of Best 10 Losses + * - Random Search + - 0.418854 + - 0.420352 + - 0.421553 + * - Random Search + - 0.417364 + - 0.420024 + - 0.420997 + * - Random Search + - 0.417861 + - 0.419744 + - 0.420642 + * - Grid Search + - 0.498166 + - 0.498166 + - 0.498166 + * - Evolution + - 0.409887 + - 0.409887 + - 0.409887 + * - Evolution + - 0.413620 + - 0.413875 + - 0.414067 + * - Evolution + - 0.409887 + - 0.409887 + - 0.409887 + * - Anneal + - 0.414877 + - 0.417289 + - 0.418281 + * - Anneal + - 0.409887 + - 0.409887 + - 0.410118 + * - Anneal + - 0.413683 + - 0.416949 + - 0.417537 + * - Metis + - 0.416273 + - 0.420411 + - 0.422380 + * - Metis + - 0.420262 + - 0.423175 + - 0.424816 + * - Metis + - 0.421027 + - 0.424172 + - 0.425714 + * - TPE + - 0.414478 + - 0.414478 + - 0.414478 + * - TPE + - 0.415077 + - 0.417986 + - 0.418797 + * - TPE + - 0.415077 + - 0.417009 + - 0.418053 + * - SMAC + - **0.408386** + - **0.408386** + - **0.408386** + * - SMAC + - 0.414012 + - 0.414012 + - 0.414012 + * - SMAC + - **0.408386** + - **0.408386** + - **0.408386** + * - BOHB + - 0.410464 + - 0.415319 + - 0.417755 + * - BOHB + - 0.418995 + - 0.420268 + - 0.422604 + * - BOHB + - 0.415149 + - 0.418072 + - 0.418932 + * - HyperBand + - 0.414065 + - 0.415222 + - 0.417628 + * - HyperBand + - 0.416807 + - 0.417549 + - 0.418828 + * - HyperBand + - 0.415550 + - 0.415977 + - 0.417186 + * - GP + - 0.414353 + - 0.418563 + - 0.420263 + * - GP + - 0.414395 + - 0.418006 + - 0.420431 + * - GP + - 0.412943 + - 0.416566 + - 0.418443 + + +In this example, all the algorithms are used with default parameters. For Metis, there are about 300 trials because it runs slowly due to its high time complexity O(n^3) in Gaussian Process. + +RocksDB Benchmark 'fillrandom' and 'readrandom' +----------------------------------------------- + +Problem Description +^^^^^^^^^^^^^^^^^^^ + +`DB_Bench `__ is the main tool that is used to benchmark `RocksDB `__\ 's performance. It has so many hapermeter to tune. + +The performance of ``DB_Bench`` is associated with the machine configuration and installation method. We run the ``DB_Bench``\ in the Linux machine and install the Rock in shared library. + +Machine configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + RocksDB: version 6.1 + CPU: 6 * Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz + CPUCache: 35840 KB + Keys: 16 bytes each + Values: 100 bytes each (50 bytes after compression) + Entries: 1000000 + +Storage performance +^^^^^^^^^^^^^^^^^^^ + +**Latency**\ : each IO request will take some time to complete, this is called the average latency. There are several factors that would affect this time including network connection quality and hard disk IO performance. + +**IOPS**\ : **IO operations per second**\ , which means the amount of *read or write operations* that could be done in one seconds time. + +**IO size**\ : **the size of each IO request**. Depending on the operating system and the application/service that needs disk access it will issue a request to read or write a certain amount of data at the same time. + +**Throughput (in MB/s) = Average IO size x IOPS** + +IOPS is related to online processing ability and we use the IOPS as the metric in my experiment. + +Search Space +^^^^^^^^^^^^ + +.. code-block:: json + + { + "max_background_compactions": { + "_type": "quniform", + "_value": [1, 256, 1] + }, + "block_size": { + "_type": "quniform", + "_value": [1, 500000, 1] + }, + "write_buffer_size": { + "_type": "quniform", + "_value": [1, 130000000, 1] + }, + "max_write_buffer_number": { + "_type": "quniform", + "_value": [1, 128, 1] + }, + "min_write_buffer_number_to_merge": { + "_type": "quniform", + "_value": [1, 32, 1] + }, + "level0_file_num_compaction_trigger": { + "_type": "quniform", + "_value": [1, 256, 1] + }, + "level0_slowdown_writes_trigger": { + "_type": "quniform", + "_value": [1, 1024, 1] + }, + "level0_stop_writes_trigger": { + "_type": "quniform", + "_value": [1, 1024, 1] + }, + "cache_size": { + "_type": "quniform", + "_value": [1, 30000000, 1] + }, + "compaction_readahead_size": { + "_type": "quniform", + "_value": [1, 30000000, 1] + }, + "new_table_reader_for_compaction_inputs": { + "_type": "randint", + "_value": [1] + } + } + +The search space is enormous (about 10^40) and we set the maximum number of trial to 100 to limit the computation resource. + +Results +^^^^^^^ + +fillrandom' Benchmark +^^^^^^^^^^^^^^^^^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Best IOPS (Repeat 1) + - Best IOPS (Repeat 2) + - Best IOPS (Repeat 3) + * - Random + - 449901 + - 427620 + - 477174 + * - Anneal + - 461896 + - 467150 + - 437528 + * - Evolution + - 436755 + - 389956 + - 389790 + * - TPE + - 378346 + - 482316 + - 468989 + * - SMAC + - 491067 + - 490472 + - **491136** + * - Metis + - 444920 + - 457060 + - 454438 + + +Figure: + + +.. image:: ../../img/hpo_rocksdb_fillrandom.png + :target: ../../img/hpo_rocksdb_fillrandom.png + :alt: + + +'readrandom' Benchmark +^^^^^^^^^^^^^^^^^^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Best IOPS (Repeat 1) + - Best IOPS (Repeat 2) + - Best IOPS (Repeat 3) + * - Random + - 2276157 + - 2285301 + - 2275142 + * - Anneal + - 2286330 + - 2282229 + - 2284012 + * - Evolution + - 2286524 + - 2283673 + - 2283558 + * - TPE + - 2287366 + - 2282865 + - 2281891 + * - SMAC + - 2270874 + - 2284904 + - 2282266 + * - Metis + - **2287696** + - 2283496 + - 2277701 + + +Figure: + + +.. image:: ../../img/hpo_rocksdb_readrandom.png + :target: ../../img/hpo_rocksdb_readrandom.png + :alt: + diff --git a/docs/en_US/CommunitySharings/ModelCompressionComparison.rst b/docs/en_US/CommunitySharings/ModelCompressionComparison.rst new file mode 100644 index 0000000000000000000000000000000000000000..de22b8639028824c9bda6352f5b95396955ac7e8 --- /dev/null +++ b/docs/en_US/CommunitySharings/ModelCompressionComparison.rst @@ -0,0 +1,133 @@ +Comparison of Filter Pruning Algorithms +======================================= + +To provide an initial insight into the performance of various filter pruning algorithms, +we conduct extensive experiments with various pruning algorithms on some benchmark models and datasets. +We present the experiment result in this document. +In addition, we provide friendly instructions on the re-implementation of these experiments to facilitate further contributions to this effort. + +Experiment Setting +------------------ + +The experiments are performed with the following pruners/datasets/models: + + +* + Models: :githublink:`VGG16, ResNet18, ResNet50 ` + +* + Datasets: CIFAR-10 + +* + Pruners: + + + * These pruners are included: + + * Pruners with scheduling : ``SimulatedAnnealing Pruner``\ , ``NetAdapt Pruner``\ , ``AutoCompress Pruner``. + Given the overal sparsity requirement, these pruners can automatically generate a sparsity distribution among different layers. + * One-shot pruners: ``L1Filter Pruner``\ , ``L2Filter Pruner``\ , ``FPGM Pruner``. + The sparsity of each layer is set the same as the overall sparsity in this experiment. + + * + Only **filter pruning** performances are compared here. + + For the pruners with scheduling, ``L1Filter Pruner`` is used as the base algorithm. That is to say, after the sparsities distribution is decided by the scheduling algorithm, ``L1Filter Pruner`` is used to performn real pruning. + + * + All the pruners listed above are implemented in :githublink:`nni `. + +Experiment Result +----------------- + +For each dataset/model/pruner combination, we prune the model to different levels by setting a series of target sparsities for the pruner. + +Here we plot both **Number of Weights - Performances** curve and **FLOPs - Performance** curve. +As a reference, we also plot the result declared in the paper `AutoCompress: An Automatic DNN Structured Pruning Framework for Ultra-High Compression Rates `__ for models VGG16 and ResNet18 on CIFAR-10. + +The experiment result are shown in the following figures: + +CIFAR-10, VGG16: + + +.. image:: ../../../examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_vgg16.png + :target: ../../../examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_vgg16.png + :alt: + + +CIFAR-10, ResNet18: + + +.. image:: ../../../examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet18.png + :target: ../../../examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet18.png + :alt: + + +CIFAR-10, ResNet50: + + +.. image:: ../../../examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet50.png + :target: ../../../examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet50.png + :alt: + + +Analysis +-------- + +From the experiment result, we get the following conclusions: + + +* Given the constraint on the number of parameters, the pruners with scheduling ( ``AutoCompress Pruner`` , ``SimualatedAnnealing Pruner`` ) performs better than the others when the constraint is strict. However, they have no such advantage in FLOPs/Performances comparison since only number of parameters constraint is considered in the optimization process; +* The basic algorithms ``L1Filter Pruner`` , ``L2Filter Pruner`` , ``FPGM Pruner`` performs very similarly in these experiments; +* ``NetAdapt Pruner`` can not achieve very high compression rate. This is caused by its mechanism that it prunes only one layer each pruning iteration. This leads to un-acceptable complexity if the sparsity per iteration is much lower than the overall sparisity constraint. + +Experiments Reproduction +------------------------ + +Implementation Details +^^^^^^^^^^^^^^^^^^^^^^ + + +* + The experiment results are all collected with the default configuration of the pruners in nni, which means that when we call a pruner class in nni, we don't change any default class arguments. + +* + Both FLOPs and the number of parameters are counted with :githublink:`Model FLOPs/Parameters Counter ` after :githublink:`model speed up `. + This avoids potential issues of counting them of masked models. + +* + The experiment code can be found :githublink:`here `. + +Experiment Result Rendering +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* + If you follow the practice in the :githublink:`example `\ , for every single pruning experiment, the experiment result will be saved in JSON format as follows: + + .. code-block:: json + + { + "performance": {"original": 0.9298, "pruned": 0.1, "speedup": 0.1, "finetuned": 0.7746}, + "params": {"original": 14987722.0, "speedup": 167089.0}, + "flops": {"original": 314018314.0, "speedup": 38589922.0} + } + +* + The experiment results are saved :githublink:`here `. + You can refer to :githublink:`analyze ` to plot new performance comparison figures. + +Contribution +------------ + +TODO Items +^^^^^^^^^^ + + +* Pruners constrained by FLOPS/latency +* More pruning algorithms/datasets/models + +Issues +^^^^^^ + +For algorithm implementation & experiment issues, please `create an issue `__. diff --git a/docs/en_US/CommunitySharings/NNI_AutoFeatureEng.rst b/docs/en_US/CommunitySharings/NNI_AutoFeatureEng.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c16f3cb3f15fd7fbe1ccb65dbe2b21e66cba409 --- /dev/null +++ b/docs/en_US/CommunitySharings/NNI_AutoFeatureEng.rst @@ -0,0 +1,142 @@ +.. role:: raw-html(raw) + :format: html + + +NNI review article from Zhihu: :raw-html:`` - By Garvin Li +======================================================================================================================== + +The article is by a NNI user on Zhihu forum. In the article, Garvin had shared his experience on using NNI for Automatic Feature Engineering. We think this article is very useful for users who are interested in using NNI for feature engineering. With author's permission, we translated the original article into English. + +**source**\ : `如何看待微软最新发布的AutoML平台NNI?By Garvin Li `__ + +01 Overview of AutoML +--------------------- + +In author's opinion, AutoML is not only about hyperparameter optimization, but +also a process that can target various stages of the machine learning process, +including feature engineering, NAS, HPO, etc. + +02 Overview of NNI +------------------ + +NNI (Neural Network Intelligence) is an open source AutoML toolkit from +Microsoft, to help users design and tune machine learning models, neural network +architectures, or a complex system’s parameters in an efficient and automatic +way. + +Link: `https://github.com/Microsoft/nni `__ + +In general, most of Microsoft tools have one prominent characteristic: the +design is highly reasonable (regardless of the technology innovation degree). +NNI's AutoFeatureENG basically meets all user requirements of AutoFeatureENG +with a very reasonable underlying framework design. + +03 Details of NNI-AutoFeatureENG +-------------------------------- + +.. + + The article is following the github project: `https://github.com/SpongebBob/tabular_automl_NNI `__. + + +Each new user could do AutoFeatureENG with NNI easily and efficiently. To exploring the AutoFeatureENG capability, downloads following required files, and then run NNI install through pip. + + +.. image:: https://pic3.zhimg.com/v2-8886eea730cad25f5ac06ef1897cd7e4_r.jpg + :target: https://pic3.zhimg.com/v2-8886eea730cad25f5ac06ef1897cd7e4_r.jpg + :alt: + +NNI treats AutoFeatureENG as a two-steps-task, feature generation exploration and feature selection. Feature generation exploration is mainly about feature derivation and high-order feature combination. + +04 Feature Exploration +---------------------- + +For feature derivation, NNI offers many operations which could automatically generate new features, which list \ `as following `__\  : + +**count**\ : Count encoding is based on replacing categories with their counts computed on the train set, also named frequency encoding. + +**target**\ : Target encoding is based on encoding categorical variable values with the mean of target variable per value. + +**embedding**\ : Regard features as sentences, generate vectors using *Word2Vec.* + +**crosscout**\ : Count encoding on more than one-dimension, alike CTR (Click Through Rate). + +**aggregete**\ : Decide the aggregation functions of the features, including min/max/mean/var. + +**nunique**\ : Statistics of the number of unique features. + +**histsta**\ : Statistics of feature buckets, like histogram statistics. + +Search space could be defined in a **JSON file**\ : to define how specific features intersect, which two columns intersect and how features generate from corresponding columns. + + +.. image:: https://pic1.zhimg.com/v2-3c3eeec6eea9821e067412725e5d2317_r.jpg + :target: https://pic1.zhimg.com/v2-3c3eeec6eea9821e067412725e5d2317_r.jpg + :alt: + + +The picture shows us the procedure of defining search space. NNI provides count encoding for 1-order-op, as well as cross count encoding, aggerate statistics (min max var mean median nunique) for 2-order-op. + +For example, we want to search the features which are a frequency encoding (valuecount) features on columns name {“C1”, ...,” C26”}, in the following way: + + +.. image:: https://github.com/JSong-Jia/Pic/blob/master/images/pic%203.jpg + :target: https://github.com/JSong-Jia/Pic/blob/master/images/pic%203.jpg + :alt: + + +we can define a cross frequency encoding (value count on cross dims) method on columns {"C1",...,"C26"} x {"C1",...,"C26"} in the following way: + + +.. image:: https://github.com/JSong-Jia/Pic/blob/master/images/pic%204.jpg + :target: https://github.com/JSong-Jia/Pic/blob/master/images/pic%204.jpg + :alt: + + +The purpose of Exploration is to generate new features. You can use **get_next_parameter** function to get received feature candidates of one trial. + +.. + + RECEIVED_PARAMS = nni.get_next_parameter() + + +05 Feature selection +-------------------- + +To avoid feature explosion and overfitting, feature selection is necessary. In the feature selection of NNI-AutoFeatureENG, LightGBM (Light Gradient Boosting Machine), a gradient boosting framework developed by Microsoft, is mainly promoted. + + +.. image:: https://pic2.zhimg.com/v2-7bf9c6ae1303692101a911def478a172_r.jpg + :target: https://pic2.zhimg.com/v2-7bf9c6ae1303692101a911def478a172_r.jpg + :alt: + + +If you have used **XGBoost** or **GBDT**\ , you would know the algorithm based on tree structure can easily calculate the importance of each feature on results. LightGBM is able to make feature selection naturally. + +The issue is that selected features might be applicable to *GBDT* (Gradient Boosting Decision Tree), but not to the linear algorithm like *LR* (Logistic Regression). + + +.. image:: https://pic4.zhimg.com/v2-d2f919497b0ed937acad0577f7a8df83_r.jpg + :target: https://pic4.zhimg.com/v2-d2f919497b0ed937acad0577f7a8df83_r.jpg + :alt: + + +06 Summary +---------- + +NNI's AutoFeatureEng sets a well-established standard, showing us the operation procedure, available modules, which is highly convenient to use. However, a simple model is probably not enough for good results. + +Suggestions to NNI +------------------ + +About Exploration: If consider using DNN (like xDeepFM) to extract high-order feature would be better. + +About Selection: There could be more intelligent options, such as automatic selection system based on downstream models. + +Conclusion: NNI could offer users some inspirations of design and it is a good open source project. I suggest researchers leverage it to accelerate the AI research. + +Tips: Because the scripts of open source projects are compiled based on gcc7, Mac system may encounter problems of gcc (GNU Compiler Collection). The solution is as follows: + +.. code-block:: bash + + brew install libomp diff --git a/docs/en_US/CommunitySharings/NNI_colab_support.rst b/docs/en_US/CommunitySharings/NNI_colab_support.rst new file mode 100644 index 0000000000000000000000000000000000000000..438f66bb2684c2123d8eba67d6021727074e73fb --- /dev/null +++ b/docs/en_US/CommunitySharings/NNI_colab_support.rst @@ -0,0 +1,47 @@ +Use NNI on Google Colab +======================= + +NNI can easily run on Google Colab platform. However, Colab doesn't expose its public IP and ports, so by default you can not access NNI's Web UI on Colab. To solve this, you need a reverse proxy software like ``ngrok`` or ``frp``. This tutorial will show you how to use ngrok to access NNI's Web UI on Colab. + +How to Open NNI's Web UI on Google Colab +---------------------------------------- + + +#. Install required packages and softwares. + +.. code-block:: bash + + ! pip install nni # install nni + ! wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip # download ngrok and unzip it + ! unzip ngrok-stable-linux-amd64.zip + ! mkdir -p nni_repo + ! git clone https://github.com/microsoft/nni.git nni_repo/nni # clone NNI's offical repo to get examples + + +#. Register a ngrok account `here `__\ , then connect to your account using your authtoken. + +.. code-block:: bash + + ! ./ngrok authtoken + + +#. Start an NNI example on a port bigger than 1024, then start ngrok with the same port. If you want to use gpu, make sure gpuNum >= 1 in config.yml. Use ``get_ipython()`` to start ngrok since it will be stuck if you use ``! ngrok http 5000 &``. + +.. code-block:: bash + + ! nnictl create --config nni_repo/nni/examples/trials/mnist-pytorch/config.yml --port 5000 & + get_ipython().system_raw('./ngrok http 5000 &') + + +#. Check the public url. + +.. code-block:: bash + + ! curl -s http://localhost:4040/api/tunnels # don't change the port number 4040 + +You will see an url like http://xxxx.ngrok.io after step 4, open this url and you will find NNI's Web UI. Have fun :) + +Access Web UI with frp +---------------------- + +frp is another reverse proxy software with similar functions. However, frp doesn't provide free public urls, so you may need an server with public IP as a frp server. See `here `__ to know more about how to deploy frp. diff --git a/docs/en_US/CommunitySharings/NasComparison.rst b/docs/en_US/CommunitySharings/NasComparison.rst new file mode 100644 index 0000000000000000000000000000000000000000..1c155194c6684ce13c53e41e6e7a97826aa89c6e --- /dev/null +++ b/docs/en_US/CommunitySharings/NasComparison.rst @@ -0,0 +1,165 @@ +Neural Architecture Search Comparison +===================================== + +*Posted by Anonymous Author* + +Train and Compare NAS (Neural Architecture Search) models including Autokeras, DARTS, ENAS and NAO. + +Their source code link is as below: + + +* + Autokeras: `https://github.com/jhfjhfj1/autokeras `__ + +* + DARTS: `https://github.com/quark0/darts `__ + +* + ENAS: `https://github.com/melodyguan/enas `__ + +* + NAO: `https://github.com/renqianluo/NAO `__ + +Experiment Description +---------------------- + +To avoid over-fitting in **CIFAR-10**\ , we also compare the models in the other five datasets including Fashion-MNIST, CIFAR-100, OUI-Adience-Age, ImageNet-10-1 (subset of ImageNet), ImageNet-10-2 (another subset of ImageNet). We just sample a subset with 10 different labels from ImageNet to make ImageNet-10-1 or ImageNet-10-2. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Dataset + - Training Size + - Numer of Classes + - Descriptions + * - `Fashion-MNIST `__ + - 60,000 + - 10 + - T-shirt/top, trouser, pullover, dress, coat, sandal, shirt, sneaker, bag and ankle boot. + * - `CIFAR-10 `__ + - 50,000 + - 10 + - Airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships and trucks. + * - `CIFAR-100 `__ + - 50,000 + - 100 + - Similar to CIFAR-10 but with 100 classes and 600 images each. + * - `OUI-Adience-Age `__ + - 26,580 + - 8 + - 8 age groups/labels (0-2, 4-6, 8-13, 15-20, 25-32, 38-43, 48-53, 60-). + * - `ImageNet-10-1 `__ + - 9,750 + - 10 + - Coffee mug, computer keyboard, dining table, wardrobe, lawn mower, microphone, swing, sewing machine, odometer and gas pump. + * - `ImageNet-10-2 `__ + - 9,750 + - 10 + - Drum, banj, whistle, grand piano, violin, organ, acoustic guitar, trombone, flute and sax. + + +We do not change the default fine-tuning technique in their source code. In order to match each task, the codes of input image shape and output numbers are changed. + +Search phase time for all NAS methods is **two days** as well as the retrain time. Average results are reported based on **three repeat times**. Our evaluation machines have one Nvidia Tesla P100 GPU, 112GB of RAM and one 2.60GHz CPU (Intel E5-2690). + +For NAO, it requires too much computing resources, so we only use NAO-WS which provides the pipeline script. + +For AutoKeras, we used 0.2.18 version because it was the latest version when we started the experiment. + +NAS Performance +--------------- + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - NAS + - AutoKeras (%) + - ENAS (macro) (%) + - ENAS (micro) (%) + - DARTS (%) + - NAO-WS (%) + * - Fashion-MNIST + - 91.84 + - 95.44 + - 95.53 + - **95.74** + - 95.20 + * - CIFAR-10 + - 75.78 + - 95.68 + - **96.16** + - 94.23 + - 95.64 + * - CIFAR-100 + - 43.61 + - 78.13 + - 78.84 + - **79.74** + - 75.75 + * - OUI-Adience-Age + - 63.20 + - **80.34** + - 78.55 + - 76.83 + - 72.96 + * - ImageNet-10-1 + - 61.80 + - 77.07 + - 79.80 + - **80.48** + - 77.20 + * - ImageNet-10-2 + - 37.20 + - 58.13 + - 56.47 + - 60.53 + - **61.20** + + +Unfortunately, we cannot reproduce all the results in the paper. + +The best or average results reported in the paper: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - NAS + - AutoKeras(%) + - ENAS (macro) (%) + - ENAS (micro) (%) + - DARTS (%) + - NAO-WS (%) + * - CIFAR- 10 + - 88.56(best) + - 96.13(best) + - 97.11(best) + - 97.17(average) + - 96.47(best) + + +For AutoKeras, it has relatively worse performance across all datasets due to its random factor on network morphism. + +For ENAS, ENAS (macro) shows good results in OUI-Adience-Age and ENAS (micro) shows good results in CIFAR-10. + +For DARTS, it has a good performance on some datasets but we found its high variance in other datasets. The difference among three runs of benchmarks can be up to 5.37% in OUI-Adience-Age and 4.36% in ImageNet-10-1. + +For NAO-WS, it shows good results in ImageNet-10-2 but it can perform very poorly in OUI-Adience-Age. + +Reference +--------- + + +#. + Jin, Haifeng, Qingquan Song, and Xia Hu. "Efficient neural architecture search with network morphism." *arXiv preprint arXiv:1806.10282* (2018). + +#. + Liu, Hanxiao, Karen Simonyan, and Yiming Yang. "Darts: Differentiable architecture search." arXiv preprint arXiv:1806.09055 (2018). + +#. + Pham, Hieu, et al. "Efficient Neural Architecture Search via Parameters Sharing." international conference on machine learning (2018): 4092-4101. + +#. + Luo, Renqian, et al. "Neural Architecture Optimization." neural information processing systems (2018): 7827-7838. diff --git a/docs/en_US/CommunitySharings/ParallelizingTpeSearch.rst b/docs/en_US/CommunitySharings/ParallelizingTpeSearch.rst new file mode 100644 index 0000000000000000000000000000000000000000..80b7913269b5eba2b0d64f900d7bd42183cd286f --- /dev/null +++ b/docs/en_US/CommunitySharings/ParallelizingTpeSearch.rst @@ -0,0 +1,183 @@ +.. role:: raw-html(raw) + :format: html + + +Parallelizing a Sequential Algorithm TPE +======================================== + +TPE approaches were actually run asynchronously in order to make use of multiple compute nodes and to avoid wasting time waiting for trial evaluations to complete. For the TPE approach, the so-called constant liar approach was used: each time a candidate point x∗ was proposed, a fake fitness evaluation of the y was assigned temporarily, until the evaluation completed and reported the actual loss f(x∗). + +Introduction and Problems +------------------------- + +Sequential Model-based Global Optimization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sequential Model-Based Global Optimization (SMBO) algorithms have been used in many applications where evaluation of the fitness function is expensive. In an application where the true fitness function f: X → R is costly to evaluate, model-based algorithms approximate f with a surrogate that is cheaper to evaluate. Typically the inner loop in an SMBO algorithm is the numerical optimization of this surrogate, or some transformation of the surrogate. The point x∗ that maximizes the surrogate (or its transformation) becomes the proposal for where the true function f should be evaluated. This active-learning-like algorithm template is summarized in the figure below. SMBO algorithms differ in what criterion they optimize to obtain x∗ given a model (or surrogate) of f, and in they model f via observation history H. + + +.. image:: ../../img/parallel_tpe_search4.PNG + :target: ../../img/parallel_tpe_search4.PNG + :alt: + + +The algorithms in this work optimize the criterion of Expected Improvement (EI). Other criteria have been suggested, such as Probability of Improvement and Expected Improvement, minimizing the Conditional Entropy of the Minimizer, and the bandit-based criterion. We chose to use the EI criterion in TPE because it is intuitive, and has been shown to work well in a variety of settings. Expected improvement is the expectation under some model M of f : X → RN that f(x) will exceed (negatively) some threshold y∗: + + +.. image:: ../../img/parallel_tpe_search_ei.PNG + :target: ../../img/parallel_tpe_search_ei.PNG + :alt: + + +Since calculation of p(y|x) is expensive, TPE approach modeled p(y|x) by p(x|y) and p(y).The TPE defines p(x|y) using two such densities: + + +.. image:: ../../img/parallel_tpe_search_tpe.PNG + :target: ../../img/parallel_tpe_search_tpe.PNG + :alt: + + +where l(x) is the density formed by using the observations {x(i)} such that corresponding loss +f(x(i)) was less than y∗ and g(x) is the density formed by using the remaining observations. TPE algorithm depends on a y∗ that is larger than the best observed f(x) so that some points can be used to form l(x). The TPE algorithm chooses y∗ to be some quantile γ of the observed y values, so that p(y<\ ``y∗``\ ) = γ, but no specific model for p(y) is necessary. The tree-structured form of l and g makes it easy to draw many candidates according to l and evaluate them according to g(x)/l(x). On each iteration, the algorithm returns the candidate x∗ with the greatest EI. + +Here is a simulation of the TPE algorithm in a two-dimensional search space. The difference of background color represents different values. It can be seen that TPE combines exploration and exploitation very well. (Black indicates the points of this round samples, and yellow indicates the points has been taken in the history.) + + +.. image:: ../../img/parallel_tpe_search1.gif + :target: ../../img/parallel_tpe_search1.gif + :alt: + + +**Since EI is a continuous function, the highest x of EI is determined at a certain status.** As shown in the figure below, the blue triangle is the point that is most likely to be sampled in this state. + + +.. image:: ../../img/parallel_tpe_search_ei2.PNG + :target: ../../img/parallel_tpe_search_ei2.PNG + :alt: + + +TPE performs well when we use it in sequential, but if we provide a larger concurrency, then **there will be a large number of points produced in the same EI state**\ , too concentrated points will reduce the exploration ability of the tuner, resulting in resources waste. + +Here is the simulation figure when we set ``concurrency=60``\ , It can be seen that this phenomenon is obvious. + + +.. image:: ../../img/parallel_tpe_search2.gif + :target: ../../img/parallel_tpe_search2.gif + :alt: + + +Research solution +----------------- + +Approximated q-EI Maximization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The multi-points criterion that we have presented below can potentially be used to deliver an additional design of experiments in one step through the resolution of the optimization problem. + + +.. image:: ../../img/parallel_tpe_search_qEI.PNG + :target: ../../img/parallel_tpe_search_qEI.PNG + :alt: + + +However, the computation of q-EI becomes intensive as q increases. After our research, there are four popular greedy strategies that approach the result of problem while avoiding its numerical cost. + +Solution 1: Believing the OK Predictor: The KB(Kriging Believer) Heuristic Strategy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Kriging Believer strategy replaces the conditional knowledge about the responses at the sites chosen within the last iterations by deterministic values equal to the expectation of the Kriging predictor. Keeping the same notations as previously, the strategy can be summed up as follows: + + +.. image:: ../../img/parallel_tpe_search_kb.PNG + :target: ../../img/parallel_tpe_search_kb.PNG + :alt: + + +This sequential strategy delivers a q-points design and is computationally affordable since it relies on the analytically known EI, optimized in d dimensions. However, there is a risk of failure, since believing an OK predictor that overshoots the observed data may lead to a sequence that gets trapped in a non-optimal region for many iterations. We now propose a second strategy that reduces this risk. + +Solution 2: The CL(Constant Liar) Heuristic Strategy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Let us now consider a sequential strategy in which the metamodel is updated (still without hyperparameter re-estimation) at each iteration with a value L exogenously fixed by the user, here called a ”lie”. The strategy referred to as the Constant Liar consists in lying with the same value L at every iteration: maximize EI (i.e. find xn+1), actualize the model as if y(xn+1) = L, and so on always with the same L ∈ R: + + +.. image:: ../../img/parallel_tpe_search_cl.PNG + :target: ../../img/parallel_tpe_search_cl.PNG + :alt: + + +L should logically be determined on the basis of the values taken by y at X. Three values, min{Y}, mean{Y}, and max{Y} are considered here. **The larger L is, the more explorative the algorithm will be, and vice versa.** + +We have simulated the method above. The following figure shows the result of using mean value liars to maximize q-EI. We find that the points we have taken have begun to be scattered. + + +.. image:: ../../img/parallel_tpe_search3.gif + :target: ../../img/parallel_tpe_search3.gif + :alt: + + +Experiment +---------- + +Branin-Hoo +^^^^^^^^^^ + +The four optimization strategies presented in the last section are now compared on the Branin-Hoo function which is a classical test-case in global optimization. + + +.. image:: ../../img/parallel_tpe_search_branin.PNG + :target: ../../img/parallel_tpe_search_branin.PNG + :alt: + + +The recommended values of a, b, c, r, s and t are: a = 1, b = 5.1 ⁄ (4π2), c = 5 ⁄ π, r = 6, s = 10 and t = 1 ⁄ (8π). This function has three global minimizers(-3.14, 12.27), (3.14, 2.27), (9.42, 2.47). + +Next is the comparison of the q-EI associated with the q first points (q ∈ [1,10]) given by the constant liar strategies (min and max), 2000 q-points designs uniformly drawn for every q, and 2000 q-points LHS designs taken at random for every q. + + +.. image:: ../../img/parallel_tpe_search_result.PNG + :target: ../../img/parallel_tpe_search_result.PNG + :alt: + + +As we can seen on figure, CL[max] and CL[min] offer very good q-EI results compared to random designs, especially for small values of q. + +Gaussian Mixed Model function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We also compared the case of using parallel optimization and not using parallel optimization. A two-dimensional multimodal Gaussian Mixed distribution is used to simulate, the following is our result: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - concurrency=80 + - concurrency=60 + - concurrency=40 + - concurrency=20 + - concurrency=10 + * - Without parallel optimization + - avg = 0.4841 :raw-html:`
` var = 0.1953 + - avg = 0.5155 :raw-html:`
` var = 0.2219 + - avg = 0.5773 :raw-html:`
` var = 0.2570 + - avg = 0.4680 :raw-html:`
` var = 0.1994 + - avg = 0.2774 :raw-html:`
` var = 0.1217 + * - With parallel optimization + - avg = 0.2132 :raw-html:`
` var = 0.0700 + - avg = 0.2177\ :raw-html:`
`\ var = 0.0796 + - avg = 0.1835 :raw-html:`
` var = 0.0533 + - avg = 0.1671 :raw-html:`
` var = 0.0413 + - avg = 0.1918 :raw-html:`
` var = 0.0697 + + +Note: The total number of samples per test is 240 (ensure that the budget is equal). The trials in each form were repeated 1000 times, the value is the average and variance of the best results in 1000 trials. + +References +---------- + +[1] James Bergstra, Remi Bardenet, Yoshua Bengio, Balazs Kegl. `Algorithms for Hyper-Parameter Optimization. `__ + +[2] Meng-Hiot Lim, Yew-Soon Ong. `Computational Intelligence in Expensive Optimization Problems. `__ + +[3] M. Jordan, J. Kleinberg, B. Scho¨lkopf. `Pattern Recognition and Machine Learning. `__ diff --git a/docs/en_US/CommunitySharings/RecommendersSvd.rst b/docs/en_US/CommunitySharings/RecommendersSvd.rst new file mode 100644 index 0000000000000000000000000000000000000000..080142d06ca48f76c9f02ec7ed259608c5cd74c3 --- /dev/null +++ b/docs/en_US/CommunitySharings/RecommendersSvd.rst @@ -0,0 +1,15 @@ +Automatically tuning SVD (NNI in Recommenders) +============================================== + +In this tutorial, we first introduce a github repo `Recommenders `__. It is a repository that provides examples and best practices for building recommendation systems, provided as Jupyter notebooks. It has various models that are popular and widely deployed in recommendation systems. To provide a complete end-to-end experience, they present each example in five key tasks, as shown below: + + +* `Prepare Data `__\ : Preparing and loading data for each recommender algorithm. +* Model(`collaborative filtering algorithms `__\ , `content-based filtering algorithms `__\ , `hybrid algorithms `__\ ): Building models using various classical and deep learning recommender algorithms such as Alternating Least Squares (\ `ALS `__\ ) or eXtreme Deep Factorization Machines (\ `xDeepFM `__\ ). +* `Evaluate `__\ : Evaluating algorithms with offline metrics. +* `Model Select and Optimize `__\ : Tuning and optimizing hyperparameters for recommender models. +* `Operationalize `__\ : Operationalizing models in a production environment on Azure. + +The fourth task is tuning and optimizing the model's hyperparameters, this is where NNI could help. To give a concrete example that NNI tunes the models in Recommenders, let's demonstrate with the model `SVD `__\ , and data Movielens100k. There are more than 10 hyperparameters to be tuned in this model. + +This `Jupyter notebook `__ provided by Recommenders is a very detailed step-by-step tutorial for this example. It uses different built-in tuning algorithms in NNI, including ``Annealing``\ , ``SMAC``\ , ``Random Search``\ , ``TPE``\ , ``Hyperband``\ , ``Metis`` and ``Evolution``. Finally, the results of different tuning algorithms are compared. Please go through this notebook to learn how to use NNI to tune SVD model, then you could further use NNI to tune other models in Recommenders. diff --git a/docs/en_US/CommunitySharings/SptagAutoTune.rst b/docs/en_US/CommunitySharings/SptagAutoTune.rst new file mode 100644 index 0000000000000000000000000000000000000000..70f7be67a3c86ed349da65c556e0da09792666d6 --- /dev/null +++ b/docs/en_US/CommunitySharings/SptagAutoTune.rst @@ -0,0 +1,9 @@ +Automatically tuning SPTAG with NNI +=================================== + +`SPTAG `__ (Space Partition Tree And Graph) is a library for large scale vector approximate nearest neighbor search scenario released by `Microsoft Research (MSR) `__ and `Microsoft Bing `__. + +This library assumes that the samples are represented as vectors and that the vectors can be compared by L2 distances or cosine distances. Vectors returned for a query vector are the vectors that have smallest L2 distance or cosine distances with the query vector. +SPTAG provides two methods: kd-tree and relative neighborhood graph (SPTAG-KDT) and balanced k-means tree and relative neighborhood graph (SPTAG-BKT). SPTAG-KDT is advantageous in index building cost, and SPTAG-BKT is advantageous in search accuracy in very high-dimensional data. + +In SPTAG, there are tens of parameters that can be tuned for specified scenarios or datasets. NNI is a great tool for automatically tuning those parameters. The authors of SPTAG tried NNI for the auto tuning and found good-performing parameters easily, thus, they shared the practice of tuning SPTAG on NNI in their document `here `__. Please refer to it for detailed tutorial. diff --git a/docs/en_US/CommunitySharings/automodel.rst b/docs/en_US/CommunitySharings/automodel.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4cc7e7fd84cafb7f94dbf02b9c6bd37652d487a --- /dev/null +++ b/docs/en_US/CommunitySharings/automodel.rst @@ -0,0 +1,13 @@ +###################### +Automatic Model Tuning +###################### + +NNI can be applied on various model tuning tasks. Some state-of-the-art model search algorithms, such as EfficientNet, can be easily built on NNI. Popular models, e.g., recommendation models, can be tuned with NNI. The following are some use cases to illustrate how to leverage NNI in your model tuning tasks and how to build your own pipeline with NNI. + +.. toctree:: + :maxdepth: 1 + + Tuning SVD automatically + EfficientNet on NNI <../TrialExample/EfficientNet> + Automatic Model Architecture Search for Reading Comprehension <../TrialExample/SquadEvolutionExamples> + Parallelizing Optimization for TPE \ No newline at end of file diff --git a/docs/en_US/CommunitySharings/autosys.rst b/docs/en_US/CommunitySharings/autosys.rst new file mode 100644 index 0000000000000000000000000000000000000000..81ec1f559b08728d1120290f03286e5fe6c1c260 --- /dev/null +++ b/docs/en_US/CommunitySharings/autosys.rst @@ -0,0 +1,12 @@ +####################### +Automatic System Tuning +####################### + +The performance of systems, such as database, tensor operator implementaion, often need to be tuned to adapt to specific hardware configuration, targeted workload, etc. Manually tuning a system is complicated and often requires detailed understanding of hardware and workload. NNI can make such tasks much easier and help system owners find the best configuration to the system automatically. The detailed design philosophy of automatic system tuning can be found in this `paper `__\ . The following are some typical cases that NNI can help. + +.. toctree:: + :maxdepth: 1 + + Tuning SPTAG (Space Partition Tree And Graph) automatically + Tuning the performance of RocksDB <../TrialExample/RocksdbExamples> + Tuning Tensor Operators automatically <../TrialExample/OpEvoExamples> \ No newline at end of file diff --git a/docs/en_US/CommunitySharings/community_sharings.rst b/docs/en_US/CommunitySharings/community_sharings.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5fc0a841d5d04e23cfc1ef15b32fdb9f28ed1e0 --- /dev/null +++ b/docs/en_US/CommunitySharings/community_sharings.rst @@ -0,0 +1,39 @@ +####################### +Use Cases and Solutions +####################### + +Different from the tutorials and examples in the rest of the document which show the usage of a feature, this part mainly introduces end-to-end scenarios and use cases to help users further understand how NNI can help them. NNI can be widely adopted in various scenarios. We also encourage community contributors to share their AutoML practices especially the NNI usage practices from their experience. + +Use Cases and Solutions +======================= +.. toctree:: + :maxdepth: 2 + + Automatic Model Tuning (HPO/NAS) + Automatic System Tuning (AutoSys) + Model Compression + Feature Engineering + Performance measurement, comparison and analysis + Use NNI on Google Colab + +External Repositories and References +==================================== +With authors' permission, we listed a set of NNI usage examples and relevant articles. + +External Repositories +===================== + * `Hyperparameter Tuning for Matrix Factorization `__ with NNI + * `scikit-nni `__ Hyper-parameter search for scikit-learn pipelines using NNI + +Relevant Articles +================= + * `Cost-effective Hyper-parameter Tuning using AdaptDL with NNI - Feb 23, 2021 `__ + * `(in Chinese) A summary of NNI new capabilities in NNI 2.0 - Jan 21, 2021 `__ + * `(in Chinese) A summary of NNI new capabilities in 2019 - Dec 26, 2019 `__ + * `Find thy hyper-parameters for scikit-learn pipelines using Microsoft NNI - Nov 6, 2019 `__ + * `(in Chinese) AutoML tools (Advisor, NNI and Google Vizier) comparison - Aug 05, 2019 `__ + * `Hyper Parameter Optimization Comparison <./HpoComparison.rst>`__ + * `Neural Architecture Search Comparison <./NasComparison.rst>`__ + * `Parallelizing a Sequential Algorithm TPE <./ParallelizingTpeSearch.rst>`__ + * `Automatically tuning SVD with NNI <./RecommendersSvd.rst>`__ + * `Automatically tuning SPTAG with NNI <./SptagAutoTune.rst>`__ diff --git a/docs/en_US/CommunitySharings/feature_engineering.rst b/docs/en_US/CommunitySharings/feature_engineering.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0beecf78fe515536eb94947a46d425bb202e580 --- /dev/null +++ b/docs/en_US/CommunitySharings/feature_engineering.rst @@ -0,0 +1,10 @@ +################### +Feature Engineering +################### + +The following is an article about how NNI helps in auto feature engineering shared by a community contributor. More use cases and solutions will be added in the future. + +.. toctree:: + :maxdepth: 1 + + NNI review article from Zhihu: - By Garvin Li \ No newline at end of file diff --git a/docs/en_US/CommunitySharings/model_compression.rst b/docs/en_US/CommunitySharings/model_compression.rst new file mode 100644 index 0000000000000000000000000000000000000000..ee569b08438a62fb9a3a345ae76ab5237418605f --- /dev/null +++ b/docs/en_US/CommunitySharings/model_compression.rst @@ -0,0 +1,10 @@ +################# +Model Compression +################# + +The following one shows how to apply knowledge distillation on NNI model compression. More use cases and solutions will be added in the future. + +.. toctree:: + :maxdepth: 1 + + Knowledge distillation with NNI model compression <../TrialExample/KDExample> \ No newline at end of file diff --git a/docs/en_US/CommunitySharings/perf_compare.rst b/docs/en_US/CommunitySharings/perf_compare.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b80ccdc6cc2fa49fd4b2832d7a123069dc94ae6 --- /dev/null +++ b/docs/en_US/CommunitySharings/perf_compare.rst @@ -0,0 +1,12 @@ +################################################ +Performance Measurement, Comparison and Analysis +################################################ + +Performance comparison and analysis can help users decide a proper algorithm (e.g., tuner, NAS algorithm) for their scenario. The following are some measurement and comparison data for users' reference. + +.. toctree:: + :maxdepth: 1 + + Neural Architecture Search Comparison + Hyper-parameter Tuning Algorithm Comparsion + Model Compression Algorithm Comparsion \ No newline at end of file diff --git a/docs/en_US/Compression/AutoCompression.rst b/docs/en_US/Compression/AutoCompression.rst new file mode 100644 index 0000000000000000000000000000000000000000..9705c05161e41805f3272e0cd9892834d497cae4 --- /dev/null +++ b/docs/en_US/Compression/AutoCompression.rst @@ -0,0 +1,122 @@ +Auto Compression with NNI Experiment +==================================== + +If you want to compress your model, but don't know what compression algorithm to choose, or don't know what sparsity is suitable for your model, or just want to try more possibilities, auto compression may help you. +Users can choose different compression algorithms and define the algorithms' search space, then auto compression will launch an NNI experiment and try different compression algorithms with varying sparsity automatically. +Of course, in addition to the sparsity rate, users can also introduce other related parameters into the search space. +If you don't know what is search space or how to write search space, `this <./Tutorial/SearchSpaceSpec.rst>`__ is for your reference. +Auto compression using experience is similar to the NNI experiment in python. +The main differences are as follows: + +* Use a generator to help generate search space object. +* Need to provide the model to be compressed, and the model should have already been pre-trained. +* No need to set ``trial_command``, additional need to set ``auto_compress_module`` as ``AutoCompressionExperiment`` input. + +.. note:: + Auto compression only supports TPE Tuner, Random Search Tuner, Anneal Tuner, Evolution Tuner right now. + +Generate search space +--------------------- + +Due to the extensive use of nested search space, we recommend a using generator to configure search space. +The following is an example. Using ``add_config()`` add subconfig, then ``dumps()`` search space dict. + +.. code-block:: python + + from nni.algorithms.compression.pytorch.auto_compress import AutoCompressionSearchSpaceGenerator + + generator = AutoCompressionSearchSpaceGenerator() + generator.add_config('level', [ + { + "sparsity": { + "_type": "uniform", + "_value": [0.01, 0.99] + }, + 'op_types': ['default'] + } + ]) + generator.add_config('qat', [ + { + 'quant_types': ['weight', 'output'], + 'quant_bits': { + 'weight': 8, + 'output': 8 + }, + 'op_types': ['Conv2d', 'Linear'] + }]) + + search_space = generator.dumps() + +Now we support the following pruners and quantizers: + +.. code-block:: python + + PRUNER_DICT = { + 'level': LevelPruner, + 'slim': SlimPruner, + 'l1': L1FilterPruner, + 'l2': L2FilterPruner, + 'fpgm': FPGMPruner, + 'taylorfo': TaylorFOWeightFilterPruner, + 'apoz': ActivationAPoZRankFilterPruner, + 'mean_activation': ActivationMeanRankFilterPruner + } + + QUANTIZER_DICT = { + 'naive': NaiveQuantizer, + 'qat': QAT_Quantizer, + 'dorefa': DoReFaQuantizer, + 'bnn': BNNQuantizer + } + +Provide user model for compression +---------------------------------- + +Users need to inherit ``AbstractAutoCompressionModule`` and override the abstract class function. + +.. code-block:: python + + from nni.algorithms.compression.pytorch.auto_compress import AbstractAutoCompressionModule + + class AutoCompressionModule(AbstractAutoCompressionModule): + @classmethod + def model(cls) -> nn.Module: + ... + return _model + + @classmethod + def evaluator(cls) -> Callable[[nn.Module], float]: + ... + return _evaluator + +Users need to implement at least ``model()`` and ``evaluator()``. +If you use iterative pruner, you need to additional implement ``optimizer_factory()``, ``criterion()`` and ``sparsifying_trainer()``. +If you want to finetune the model after compression, you need to implement ``optimizer_factory()``, ``criterion()``, ``post_compress_finetuning_trainer()`` and ``post_compress_finetuning_epochs()``. +The ``optimizer_factory()`` should return a factory function, the input is an iterable variable, i.e. your ``model.parameters()``, and the output is an optimizer instance. +The two kinds of ``trainer()`` should return a trainer with input ``model, optimizer, criterion, current_epoch``. +The full abstract interface refers to :githublink:`interface.py `. +An example of ``AutoCompressionModule`` implementation refers to :githublink:`auto_compress_module.py `. + +Launch NNI experiment +--------------------- + +Similar to launch from python, the difference is no need to set ``trial_command`` and put the user-provided ``AutoCompressionModule`` as ``AutoCompressionExperiment`` input. + +.. code-block:: python + + from pathlib import Path + from nni.algorithms.compression.pytorch.auto_compress import AutoCompressionExperiment + + from auto_compress_module import AutoCompressionModule + + experiment = AutoCompressionExperiment(AutoCompressionModule, 'local') + experiment.config.experiment_name = 'auto compression torch example' + experiment.config.trial_concurrency = 1 + experiment.config.max_trial_number = 10 + experiment.config.search_space = search_space + experiment.config.trial_code_directory = Path(__file__).parent + experiment.config.tuner.name = 'TPE' + experiment.config.tuner.class_args['optimize_mode'] = 'maximize' + experiment.config.training_service.use_active_gpu = True + + experiment.run(8088) diff --git a/docs/en_US/Compression/CompressionReference.rst b/docs/en_US/Compression/CompressionReference.rst new file mode 100644 index 0000000000000000000000000000000000000000..df2e278726c356dda8f4453be1b27af1ec71cc2f --- /dev/null +++ b/docs/en_US/Compression/CompressionReference.rst @@ -0,0 +1,160 @@ +Model Compression API Reference +=============================== + +.. contents:: + +Compressors +----------- + +Compressor +^^^^^^^^^^ + +.. autoclass:: nni.compression.pytorch.compressor.Compressor + :members: + +.. autoclass:: nni.compression.pytorch.compressor.Pruner + :members: + +.. autoclass:: nni.compression.pytorch.compressor.Quantizer + :members: + + +Module Wrapper +^^^^^^^^^^^^^^ + +.. autoclass:: nni.compression.pytorch.compressor.PrunerModuleWrapper + :members: + + +.. autoclass:: nni.compression.pytorch.compressor.QuantizerModuleWrapper + :members: + +Weight Masker +^^^^^^^^^^^^^ +.. autoclass:: nni.algorithms.compression.pytorch.pruning.weight_masker.WeightMasker + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.structured_pruning_masker.StructuredWeightMasker + :members: + + +Pruners +^^^^^^^ +.. autoclass:: nni.algorithms.compression.pytorch.pruning.sensitivity_pruner.SensitivityPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.one_shot_pruner.OneshotPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.one_shot_pruner.LevelPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.one_shot_pruner.L1FilterPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.one_shot_pruner.L2FilterPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.one_shot_pruner.FPGMPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.iterative_pruner.IterativePruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.iterative_pruner.SlimPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.iterative_pruner.TaylorFOWeightFilterPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.iterative_pruner.ActivationAPoZRankFilterPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.iterative_pruner.ActivationMeanRankFilterPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.iterative_pruner.AGPPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.iterative_pruner.ADMMPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.auto_compress_pruner.AutoCompressPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.net_adapt_pruner.NetAdaptPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.simulated_annealing_pruner.SimulatedAnnealingPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.lottery_ticket.LotteryTicketPruner + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.transformer_pruner.TransformerHeadPruner + :members: + +Quantizers +^^^^^^^^^^ +.. autoclass:: nni.algorithms.compression.pytorch.quantization.NaiveQuantizer + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.quantization.QAT_Quantizer + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.quantization.DoReFaQuantizer + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.quantization.BNNQuantizer + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.quantization.LsqQuantizer + :members: + +.. autoclass:: nni.algorithms.compression.pytorch.quantization.ObserverQuantizer + :members: + +Model Speedup +------------- + +Quantization Speedup +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: nni.compression.pytorch.quantization_speedup.backend.BaseModelSpeedup + :members: + +.. autoclass:: nni.compression.pytorch.quantization_speedup.integrated_tensorrt.ModelSpeedupTensorRT + :members: + +.. autoclass:: nni.compression.pytorch.quantization_speedup.calibrator.Calibrator + :members: + + +Compression Utilities +--------------------- + +Sensitivity Utilities +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: nni.compression.pytorch.utils.sensitivity_analysis.SensitivityAnalysis + :members: + +Topology Utilities +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: nni.compression.pytorch.utils.shape_dependency.ChannelDependency + :members: + +.. autoclass:: nni.compression.pytorch.utils.shape_dependency.GroupDependency + :members: + +.. autoclass:: nni.compression.pytorch.utils.mask_conflict.GroupMaskConflict + :members: + +.. autoclass:: nni.compression.pytorch.utils.mask_conflict.ChannelMaskConflict + :members: + +Model FLOPs/Parameters Counter +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autofunction:: nni.compression.pytorch.utils.counter.count_flops_params diff --git a/docs/en_US/Compression/CompressionUtils.rst b/docs/en_US/Compression/CompressionUtils.rst new file mode 100644 index 0000000000000000000000000000000000000000..90215d5f60096253b8c1a68d13104826165ed827 --- /dev/null +++ b/docs/en_US/Compression/CompressionUtils.rst @@ -0,0 +1,191 @@ +Analysis Utils for Model Compression +==================================== + +.. contents:: + +We provide several easy-to-use tools for users to analyze their model during model compression. + +Sensitivity Analysis +-------------------- + +First, we provide a sensitivity analysis tool (\ **SensitivityAnalysis**\ ) for users to analyze the sensitivity of each convolutional layer in their model. Specifically, the SensitiviyAnalysis gradually prune each layer of the model, and test the accuracy of the model at the same time. Note that, SensitivityAnalysis only prunes a layer once a time, and the other layers are set to their original weights. According to the accuracies of different convolutional layers under different sparsities, we can easily find out which layers the model accuracy is more sensitive to. + +Usage +^^^^^ + +The following codes show the basic usage of the SensitivityAnalysis. + +.. code-block:: python + + from nni.compression.pytorch.utils.sensitivity_analysis import SensitivityAnalysis + + def val(model): + model.eval() + total = 0 + correct = 0 + with torch.no_grad(): + for batchid, (data, label) in enumerate(val_loader): + data, label = data.cuda(), label.cuda() + out = model(data) + _, predicted = out.max(1) + total += data.size(0) + correct += predicted.eq(label).sum().item() + return correct / total + + s_analyzer = SensitivityAnalysis(model=net, val_func=val) + sensitivity = s_analyzer.analysis(val_args=[net]) + os.makedir(outdir) + s_analyzer.export(os.path.join(outdir, filename)) + +Two key parameters of SensitivityAnalysis are ``model``\ , and ``val_func``. ``model`` is the neural network that to be analyzed and the ``val_func`` is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. +SensitivityAnalysis can export the sensitivity results as a csv file usage is shown in the example above. + +Futhermore, users can specify the sparsities values used to prune for each layer by optional parameter ``sparsities``. + +.. code-block:: python + + s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75]) + +the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer, and record the model's accuracy at the same time (SensitivityAnalysis only prune a layer once a time, the other layers are set to their original weights). If the sparsities is not set, SensitivityAnalysis will use the numpy.arange(0.1, 1.0, 0.1) as the default sparsity values. + +Users can also speed up the progress of sensitivity analysis by the early_stop_mode and early_stop_value option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop_mode and early_stop_value are set, the sensitivity analysis for a layer will stop, when the accuracy/loss has already met the threshold set by early_stop_value. We support four early stop modes: minimize, maximize, dropped, raised. + +minimize: The analysis stops when the validation metric return by the val_func lower than ``early_stop_value``. + +maximize: The analysis stops when the validation metric return by the val_func larger than ``early_stop_value``. + +dropped: The analysis stops when the validation metric has dropped by ``early_stop_value``. + +raised: The analysis stops when the validation metric has raised by ``early_stop_value``. + +.. code-block:: python + + s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop_mode='dropped', early_stop_value=0.1) + +If users only want to analyze several specified convolutional layers, users can specify the target conv layers by the ``specified_layers`` in analysis function. ``specified_layers`` is a list that consists of the Pytorch module names of the conv layers. For example + +.. code-block:: python + + sensitivity = s_analyzer.analysis(val_args=[net], specified_layers=['Conv1']) + +In this example, only the ``Conv1`` layer is analyzed. In addtion, users can quickly and easily achieve the analysis parallelization by launching multiple processes and assigning different conv layers of the same model to each process. + +Output example +^^^^^^^^^^^^^^ + +The following lines are the example csv file exported from SensitivityAnalysis. The first line is constructed by 'layername' and sparsity list. Here the sparsity value means how much weight SensitivityAnalysis prune for each layer. Each line below records the model accuracy when this layer is under different sparsities. Note that, due to the early_stop option, some layers may +not have model accuracies/losses under all sparsities, for example, its accuracy drop has already exceeded the threshold set by the user. + +.. code-block:: bash + + layername,0.05,0.1,0.2,0.3,0.4,0.5,0.7,0.85,0.95 + features.0,0.54566,0.46308,0.06978,0.0374,0.03024,0.01512,0.00866,0.00492,0.00184 + features.3,0.54878,0.51184,0.37978,0.19814,0.07178,0.02114,0.00438,0.00442,0.00142 + features.6,0.55128,0.53566,0.4887,0.4167,0.31178,0.19152,0.08612,0.01258,0.00236 + features.8,0.55696,0.54194,0.48892,0.42986,0.33048,0.2266,0.09566,0.02348,0.0056 + features.10,0.55468,0.5394,0.49576,0.4291,0.3591,0.28138,0.14256,0.05446,0.01578 + +Topology Analysis +----------------- + +We also provide several tools for the topology analysis during the model compression. These tools are to help users compress their model better. Because of the complex topology of the network, when compressing the model, users often need to spend a lot of effort to check whether the compression configuration is reasonable. So we provide these tools for topology analysis to reduce the burden on users. + +ChannelDependency +^^^^^^^^^^^^^^^^^ + +Complicated models may have residual connection/concat operations in their models. When the user prunes these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. Taking the following residual block in the resnet18 as an example. The output features of the ``layer2.0.conv2`` and ``layer2.0.downsample.0`` are added together, so the number of the output channels of ``layer2.0.conv2`` and ``layer2.0.downsample.0`` should be the same, or there may be a tensor shape conflict. + + +.. image:: ../../img/channel_dependency_example.jpg + :target: ../../img/channel_dependency_example.jpg + :alt: + + +If the layers have channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then there will be a shape conflict during these layers. Even the pruned model with mask works fine, the pruned model cannot be speedup to the final model directly that runs on the devices, because there will be a shape conflict when the model tries to add/concat the outputs of these layers. This tool is to find the layers that have channel count dependencies to help users better prune their model. + +Usage +""""" + +.. code-block:: python + + from nni.compression.pytorch.utils.shape_dependency import ChannelDependency + data = torch.ones(1, 3, 224, 224).cuda() + channel_depen = ChannelDependency(net, data) + channel_depen.export('dependency.csv') + +Output Example +"""""""""""""" + +The following lines are the output example of torchvision.models.resnet18 exported by ChannelDependency. The layers at the same line have output channel dependencies with each other. For example, layer1.1.conv2, conv1, and layer1.0.conv2 have output channel dependencies with each other, which means the output channel(filters) numbers of these three layers should be same with each other, otherwise, the model may have shape conflict. + +.. code-block:: bash + + Dependency Set,Convolutional Layers + Set 1,layer1.1.conv2,layer1.0.conv2,conv1 + Set 2,layer1.0.conv1 + Set 3,layer1.1.conv1 + Set 4,layer2.0.conv1 + Set 5,layer2.1.conv2,layer2.0.conv2,layer2.0.downsample.0 + Set 6,layer2.1.conv1 + Set 7,layer3.0.conv1 + Set 8,layer3.0.downsample.0,layer3.1.conv2,layer3.0.conv2 + Set 9,layer3.1.conv1 + Set 10,layer4.0.conv1 + Set 11,layer4.0.downsample.0,layer4.1.conv2,layer4.0.conv2 + Set 12,layer4.1.conv1 + +MaskConflict +^^^^^^^^^^^^ + +When the masks of different layers in a model have conflict (for example, assigning different sparsities for the layers that have channel dependency), we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. + +.. code-block:: python + + from nni.compression.pytorch.utils.mask_conflict import fix_mask_conflict + fixed_mask = fix_mask_conflict('./resnet18_mask', net, data) + +not_safe_to_prune +^^^^^^^^^^^^^^^^^ + +If we try to prune a layer whose output tensor is taken as the input by a shape-constraint OP(for example, view, reshape), then such pruning maybe not be safe. For example, we have a convolutional layer followed by a view function. + +.. code-block:: python + + x = self.conv(x) # output shape is (batch, 1024, 3, 3) + x = x.view(-1, 1024) + +If the output shape of the pruned conv layer is not divisible by 1024(for example(batch, 500, 3, 3)), we may meet a shape error. We cannot replace such a function that directly operates on the Tensor. Therefore, we need to be careful when pruning such layers. The function not_safe_to_prune finds all the layers followed by a shape-constraint function. Here is an example for usage. If you meet a shape error when running the forward inference on the speeduped model, you can exclude the layers returned by not_safe_to_prune and try again. + +.. code-block:: python + + not_safe = not_safe_to_prune(model, dummy_input) + +Model FLOPs/Parameters Counter +------------------------------ + +We provide a model counter for calculating the model FLOPs and parameters. This counter supports calculating FLOPs/parameters of a normal model without masks, it can also calculates FLOPs/parameters of a model with mask wrappers, which helps users easily check model complexity during model compression on NNI. Note that, for sturctured pruning, we only identify the remained filters according to its mask, which not taking the pruned input channels into consideration, so the calculated FLOPs will be larger than real number (i.e., the number calculated after Model Speedup). + +We support two modes to collect information of modules. The first mode is ``default``\ , which only collect the information of convolution and linear. The second mode is ``full``\ , which also collect the information of other operations. Users can easily use our collected ``results`` for futher analysis. + +Usage +^^^^^ + +.. code-block:: python + + from nni.compression.pytorch.utils.counter import count_flops_params + + # Given input size (1, 1, 28, 28) + flops, params, results = count_flops_params(model, (1, 1, 28, 28)) + + # Given input tensor with size (1, 1, 28, 28) and switch to full mode + x = torch.randn(1, 1, 28, 28) + + flops, params, results = count_flops_params(model, (x,) mode='full') # tuple of tensor as input + + # Format output size to M (i.e., 10^6) + print(f'FLOPs: {flops/1e6:.3f}M, Params: {params/1e6:.3f}M) + print(results) + { + 'conv': {'flops': [60], 'params': [20], 'weight_size': [(5, 3, 1, 1)], 'input_size': [(1, 3, 2, 2)], 'output_size': [(1, 5, 2, 2)], 'module_type': ['Conv2d']}, + 'conv2': {'flops': [100], 'params': [30], 'weight_size': [(5, 5, 1, 1)], 'input_size': [(1, 5, 2, 2)], 'output_size': [(1, 5, 2, 2)], 'module_type': ['Conv2d']} + } diff --git a/docs/en_US/Compression/CustomizeCompressor.rst b/docs/en_US/Compression/CustomizeCompressor.rst new file mode 100644 index 0000000000000000000000000000000000000000..55efd5e88c1ba4ef46b6e32c943321704ffedade --- /dev/null +++ b/docs/en_US/Compression/CustomizeCompressor.rst @@ -0,0 +1,179 @@ +Customize New Compression Algorithm +=================================== + +.. contents:: + +In order to simplify the process of writing new compression algorithms, we have designed simple and flexible programming interface, which covers pruning and quantization. Below, we first demonstrate how to customize a new pruning algorithm and then demonstrate how to customize a new quantization algorithm. + +**Important Note** To better understand how to customize new pruning/quantization algorithms, users should first understand the framework that supports various pruning algorithms in NNI. Reference `Framework overview of model compression <../Compression/Framework.rst>`__ + +Customize a new pruning algorithm +--------------------------------- + +Implementing a new pruning algorithm requires implementing a ``weight masker`` class which shoud be a subclass of ``WeightMasker``\ , and a ``pruner`` class, which should be a subclass ``Pruner``. + +An implementation of ``weight masker`` may look like this: + +.. code-block:: python + + class MyMasker(WeightMasker): + def __init__(self, model, pruner): + super().__init__(model, pruner) + # You can do some initialization here, such as collecting some statistics data + # if it is necessary for your algorithms to calculate the masks. + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None): + # calculate the masks based on the wrapper.weight, and sparsity, + # and anything else + # mask = ... + return {'weight_mask': mask} + +You can reference nni provided :githublink:`weight masker ` implementations to implement your own weight masker. + +A basic ``pruner`` looks likes this: + +.. code-block:: python + + class MyPruner(Pruner): + def __init__(self, model, config_list, optimizer): + super().__init__(model, config_list, optimizer) + self.set_wrappers_attribute("if_calculated", False) + # construct a weight masker instance + self.masker = MyMasker(model, self) + + def calc_mask(self, wrapper, wrapper_idx=None): + sparsity = wrapper.config['sparsity'] + if wrapper.if_calculated: + # Already pruned, do not prune again as a one-shot pruner + return None + else: + # call your masker to actually calcuate the mask for this layer + masks = self.masker.calc_mask(sparsity=sparsity, wrapper=wrapper, wrapper_idx=wrapper_idx) + wrapper.if_calculated = True + return masks + +Reference nni provided :githublink:`pruner ` implementations to implement your own pruner class. + +---- + +Customize a new quantization algorithm +-------------------------------------- + +To write a new quantization algorithm, you can write a class that inherits ``nni.compression.pytorch.Quantizer``. Then, override the member functions with the logic of your algorithm. The member function to override is ``quantize_weight``. ``quantize_weight`` directly returns the quantized weights rather than mask, because for quantization the quantized weights cannot be obtained by applying mask. + +.. code-block:: python + + from nni.compression.pytorch import Quantizer + + class YourQuantizer(Quantizer): + def __init__(self, model, config_list): + """ + Suggest you to use the NNI defined spec for config + """ + super().__init__(model, config_list) + + def quantize_weight(self, weight, config, **kwargs): + """ + quantize should overload this method to quantize weight tensors. + This method is effectively hooked to :meth:`forward` of the model. + + Parameters + ---------- + weight : Tensor + weight that needs to be quantized + config : dict + the configuration for weight quantization + """ + + # Put your code to generate `new_weight` here + + return new_weight + + def quantize_output(self, output, config, **kwargs): + """ + quantize should overload this method to quantize output. + This method is effectively hooked to `:meth:`forward` of the model. + + Parameters + ---------- + output : Tensor + output that needs to be quantized + config : dict + the configuration for output quantization + """ + + # Put your code to generate `new_output` here + + return new_output + + def quantize_input(self, *inputs, config, **kwargs): + """ + quantize should overload this method to quantize input. + This method is effectively hooked to :meth:`forward` of the model. + + Parameters + ---------- + inputs : Tensor + inputs that needs to be quantized + config : dict + the configuration for inputs quantization + """ + + # Put your code to generate `new_input` here + + return new_input + + def update_epoch(self, epoch_num): + pass + + def step(self): + """ + Can do some processing based on the model or weights binded + in the func bind_model + """ + pass + +Customize backward function +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes it's necessary for a quantization operation to have a customized backward function, such as `Straight-Through Estimator `__\ , user can customize a backward function as follow: + +.. code-block:: python + + from nni.compression.pytorch.compressor import Quantizer, QuantGrad, QuantType + + class ClipGrad(QuantGrad): + @staticmethod + def quant_backward(tensor, grad_output, quant_type): + """ + This method should be overrided by subclass to provide customized backward function, + default implementation is Straight-Through Estimator + Parameters + ---------- + tensor : Tensor + input of quantization operation + grad_output : Tensor + gradient of the output of quantization operation + quant_type : QuantType + the type of quantization, it can be `QuantType.INPUT`, `QuantType.WEIGHT`, `QuantType.OUTPUT`, + you can define different behavior for different types. + Returns + ------- + tensor + gradient of the input of quantization operation + """ + + # for quant_output function, set grad to zero if the absolute value of tensor is larger than 1 + if quant_type == QuantType.OUTPUT: + grad_output[torch.abs(tensor) > 1] = 0 + return grad_output + + + class YourQuantizer(Quantizer): + def __init__(self, model, config_list): + super().__init__(model, config_list) + # set your customized backward function to overwrite default backward function + self.quant_grad = ClipGrad + +If you do not customize ``QuantGrad``\ , the default backward is Straight-Through Estimator. +*Coming Soon* ... diff --git a/docs/en_US/Compression/DependencyAware.rst b/docs/en_US/Compression/DependencyAware.rst new file mode 100644 index 0000000000000000000000000000000000000000..05daafe7f22f7c9cf323aff56442ea55876593d8 --- /dev/null +++ b/docs/en_US/Compression/DependencyAware.rst @@ -0,0 +1,77 @@ +Dependency-aware Mode for Filter Pruning +======================================== + +Currently, we have several filter pruning algorithm for the convolutional layers: FPGM Pruner, L1Filter Pruner, L2Filter Pruner, Activation APoZ Rank Filter Pruner, Activation Mean Rank Filter Pruner, Taylor FO On Weight Pruner. In these filter pruning algorithms, the pruner will prune each convolutional layer separately. While pruning a convolution layer, the algorithm will quantify the importance of each filter based on some specific rules(such as l1-norm), and prune the less important filters. + +As `dependency analysis utils <./CompressionUtils.rst>`__ shows, if the output channels of two convolutional layers(conv1, conv2) are added together, then these two conv layers have channel dependency with each other(more details please see `Compression Utils <./CompressionUtils.rst>`__\ ). Take the following figure as an example. + + +.. image:: ../../img/mask_conflict.jpg + :target: ../../img/mask_conflict.jpg + :alt: + + +If we prune the first 50% of output channels(filters) for conv1, and prune the last 50% of output channels for conv2. Although both layers have pruned 50% of the filters, the speedup module still needs to add zeros to align the output channels. In this case, we cannot harvest the speed benefit from the model pruning. + + To better gain the speed benefit of the model pruning, we add a dependency-aware mode for the Filter Pruner. In the dependency-aware mode, the pruner prunes the model not only based on the l1 norm of each filter, but also the topology of the whole network architecture. + +In the dependency-aware mode(\ ``dependency_aware`` is set ``True``\ ), the pruner will try to prune the same output channels for the layers that have the channel dependencies with each other, as shown in the following figure. + + +.. image:: ../../img/dependency-aware.jpg + :target: ../../img/dependency-aware.jpg + :alt: + + +Take the dependency-aware mode of L1Filter Pruner as an example. Specifically, the pruner will calculate the L1 norm (for example) sum of all the layers in the dependency set for each channel. Obviously, the number of channels that can actually be pruned of this dependency set in the end is determined by the minimum sparsity of layers in this dependency set(denoted by ``min_sparsity``\ ). According to the L1 norm sum of each channel, the pruner will prune the same ``min_sparsity`` channels for all the layers. Next, the pruner will additionally prune ``sparsity`` - ``min_sparsity`` channels for each convolutional layer based on its own L1 norm of each channel. For example, suppose the output channels of ``conv1`` , ``conv2`` are added together and the configured sparsities of ``conv1`` and ``conv2`` are 0.3, 0.2 respectively. In this case, the ``dependency-aware pruner`` will + +.. code-block:: bash + + - First, prune the same 20% of channels for `conv1` and `conv2` according to L1 norm sum of `conv1` and `conv2`. + - Second, the pruner will additionally prune 10% channels for `conv1` according to the L1 norm of each channel of `conv1`. + + +In addition, for the convolutional layers that have more than one filter group, ``dependency-aware pruner`` will also try to prune the same number of the channels for each filter group. Overall, this pruner will prune the model according to the L1 norm of each filter and try to meet the topological constrains(channel dependency, etc) to improve the final speed gain after the speedup process. + +In the dependency-aware mode, the pruner will provide a better speed gain from the model pruning. + +Usage +----- + +In this section, we will show how to enable the dependency-aware mode for the filter pruner. Currently, only the one-shot pruners such as FPGM Pruner, L1Filter Pruner, L2Filter Pruner, Activation APoZ Rank Filter Pruner, Activation Mean Rank Filter Pruner, Taylor FO On Weight Pruner, support the dependency-aware mode. + +To enable the dependency-aware mode for ``L1FilterPruner``\ : + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import L1FilterPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + # dummy_input is necessary for the dependency_aware mode + dummy_input = torch.ones(1, 3, 224, 224).cuda() + pruner = L1FilterPruner(model, config_list, dependency_aware=True, dummy_input=dummy_input) + # for L2FilterPruner + # pruner = L2FilterPruner(model, config_list, dependency_aware=True, dummy_input=dummy_input) + # for FPGMPruner + # pruner = FPGMPruner(model, config_list, dependency_aware=True, dummy_input=dummy_input) + # for ActivationAPoZRankFilterPruner + # pruner = ActivationAPoZRankFilterPruner(model, config_list, optimizer, trainer, criterion, sparsifying_training_batches=1, dependency_aware=True, dummy_input=dummy_input) + # for ActivationMeanRankFilterPruner + # pruner = ActivationMeanRankFilterPruner(model, config_list, optimizer, trainer, criterion, sparsifying_training_batches=1, dependency_aware=True, dummy_input=dummy_input) + # for TaylorFOWeightFilterPruner + # pruner = TaylorFOWeightFilterPruner(model, config_list, optimizer, trainer, criterion, sparsifying_training_batches=1, dependency_aware=True, dummy_input=dummy_input) + + pruner.compress() + +Evaluation +---------- + +In order to compare the performance of the pruner with or without the dependency-aware mode, we use L1FilterPruner to prune the Mobilenet_v2 separately when the dependency-aware mode is turned on and off. To simplify the experiment, we use the uniform pruning which means we allocate the same sparsity for all convolutional layers in the model. +We trained a Mobilenet_v2 model on the cifar10 dataset and prune the model based on this pretrained checkpoint. The following figure shows the accuracy and FLOPs of the model pruned by different pruners. + + +.. image:: ../../img/mobilev2_l1_cifar.jpg + :target: ../../img/mobilev2_l1_cifar.jpg + :alt: + + +In the figure, the ``Dependency-aware`` represents the L1FilterPruner with dependency-aware mode enabled. ``L1 Filter`` is the normal ``L1FilterPruner`` without the dependency-aware mode, and the ``No-Dependency`` means pruner only prunes the layers that has no channel dependency with other layers. As we can see in the figure, when the dependency-aware mode enabled, the pruner can bring higher accuracy under the same Flops. diff --git a/docs/en_US/Compression/Framework.rst b/docs/en_US/Compression/Framework.rst new file mode 100644 index 0000000000000000000000000000000000000000..453163b5efe2c29ffbd3763fec2b139417982f32 --- /dev/null +++ b/docs/en_US/Compression/Framework.rst @@ -0,0 +1,209 @@ +Framework overview of model compression +======================================= + +.. contents:: + +Below picture shows the components overview of model compression framework. + + +.. image:: ../../img/compressor_framework.jpg + :target: ../../img/compressor_framework.jpg + :alt: + + +There are 3 major components/classes in NNI model compression framework: ``Compressor``\ , ``Pruner`` and ``Quantizer``. Let's look at them in detail one by one: + +Compressor +---------- + +Compressor is the base class for pruner and quntizer, it provides a unified interface for pruner and quantizer for end users, so that pruner and quantizer can be used in the same way. For example, to use a pruner: + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import LevelPruner + + # load a pretrained model or train a model before using a pruner + + configure_list = [{ + 'sparsity': 0.7, + 'op_types': ['Conv2d', 'Linear'], + }] + + pruner = LevelPruner(model, configure_list) + model = pruner.compress() + + # model is ready for pruning, now start finetune the model, + # the model will be pruned during training automatically + +To use a quantizer: + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import DoReFaQuantizer + + configure_list = [{ + 'quant_types': ['weight'], + 'quant_bits': { + 'weight': 8, + }, + 'op_types':['Conv2d', 'Linear'] + }] + optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4) + quantizer = DoReFaQuantizer(model, configure_list, optimizer) + quantizer.compress() + +View :githublink:`example code ` for more information. + +``Compressor`` class provides some utility methods for subclass and users: + +Set wrapper attribute +^^^^^^^^^^^^^^^^^^^^^ + +Sometimes ``calc_mask`` must save some state data, therefore users can use ``set_wrappers_attribute`` API to register attribute just like how buffers are registered in PyTorch modules. These buffers will be registered to ``module wrapper``. Users can access these buffers through ``module wrapper``. +In above example, we use ``set_wrappers_attribute`` to set a buffer ``if_calculated`` which is used as flag indicating if the mask of a layer is already calculated. + +Collect data during forward +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes users want to collect some data during the modules' forward method, for example, the mean value of the activation. This can be done by adding a customized collector to module. + +.. code-block:: python + + class MyMasker(WeightMasker): + def __init__(self, model, pruner): + super().__init__(model, pruner) + # Set attribute `collected_activation` for all wrappers to store + # activations for each layer + self.pruner.set_wrappers_attribute("collected_activation", []) + self.activation = torch.nn.functional.relu + + def collector(wrapper, input_, output): + # The collected activation can be accessed via each wrapper's collected_activation + # attribute + wrapper.collected_activation.append(self.activation(output.detach().cpu())) + + self.pruner.hook_id = self.pruner.add_activation_collector(collector) + +The collector function will be called each time the forward method runs. + +Users can also remove this collector like this: + +.. code-block:: python + + # Save the collector identifier + collector_id = self.pruner.add_activation_collector(collector) + + # When the collector is not used any more, it can be remove using + # the saved collector identifier + self.pruner.remove_activation_collector(collector_id) + +---- + +Pruner +------ + +A pruner receives ``model`` , ``config_list`` as arguments. +Some pruners like ``TaylorFOWeightFilter Pruner`` prune the model per the ``config_list`` during training loop by adding a hook on ``optimizer.step()``. + +Pruner class is a subclass of Compressor, so it contains everything in the Compressor class and some additional components only for pruning, it contains: + +Weight masker +^^^^^^^^^^^^^ + +A ``weight masker`` is the implementation of pruning algorithms, it can prune a specified layer wrapped by ``module wrapper`` with specified sparsity. + +Pruning module wrapper +^^^^^^^^^^^^^^^^^^^^^^ + +A ``pruning module wrapper`` is a module containing: + + +#. the origin module +#. some buffers used by ``calc_mask`` +#. a new forward method that applies masks before running the original forward method. + +the reasons to use ``module wrapper``\ : + + +#. some buffers are needed by ``calc_mask`` to calculate masks and these buffers should be registered in ``module wrapper`` so that the original modules are not contaminated. +#. a new ``forward`` method is needed to apply masks to weight before calling the real ``forward`` method. + +Pruning hook +^^^^^^^^^^^^ + +A pruning hook is installed on a pruner when the pruner is constructed, it is used to call pruner's calc_mask method at ``optimizer.step()`` is invoked. + +---- + +Quantizer +--------- + +Quantizer class is also a subclass of ``Compressor``\ , it is used to compress models by reducing the number of bits required to represent weights or activations, which can reduce the computations and the inference time. It contains: + +Quantization module wrapper +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Each module/layer of the model to be quantized is wrapped by a quantization module wrapper, it provides a new ``forward`` method to quantize the original module's weight, input and output. + +Quantization hook +^^^^^^^^^^^^^^^^^ + +A quantization hook is installed on a quntizer when it is constructed, it is call at ``optimizer.step()``. + +Quantization methods +^^^^^^^^^^^^^^^^^^^^ + +``Quantizer`` class provides following methods for subclass to implement quantization algorithms: + +.. code-block:: python + + class Quantizer(Compressor): + """ + Base quantizer for pytorch quantizer + """ + def quantize_weight(self, weight, wrapper, **kwargs): + """ + quantize should overload this method to quantize weight. + This method is effectively hooked to :meth:`forward` of the model. + Parameters + ---------- + weight : Tensor + weight that needs to be quantized + wrapper : QuantizerModuleWrapper + the wrapper for origin module + """ + raise NotImplementedError('Quantizer must overload quantize_weight()') + + def quantize_output(self, output, wrapper, **kwargs): + """ + quantize should overload this method to quantize output. + This method is effectively hooked to :meth:`forward` of the model. + Parameters + ---------- + output : Tensor + output that needs to be quantized + wrapper : QuantizerModuleWrapper + the wrapper for origin module + """ + raise NotImplementedError('Quantizer must overload quantize_output()') + + def quantize_input(self, *inputs, wrapper, **kwargs): + """ + quantize should overload this method to quantize input. + This method is effectively hooked to :meth:`forward` of the model. + Parameters + ---------- + inputs : Tensor + inputs that needs to be quantized + wrapper : QuantizerModuleWrapper + the wrapper for origin module + """ + raise NotImplementedError('Quantizer must overload quantize_input()') + +---- + +Multi-GPU support +----------------- + +On multi-GPU training, buffers and parameters are copied to multiple GPU every time the ``forward`` method runs on multiple GPU. If buffers and parameters are updated in the ``forward`` method, an ``in-place`` update is needed to ensure the update is effective. +Since ``calc_mask`` is called in the ``optimizer.step`` method, which happens after the ``forward`` method and happens only on one GPU, it supports multi-GPU naturally. diff --git a/docs/en_US/Compression/ModelSpeedup.rst b/docs/en_US/Compression/ModelSpeedup.rst new file mode 100644 index 0000000000000000000000000000000000000000..86f099ac9df76a1217df92581b58c98b66d53cb8 --- /dev/null +++ b/docs/en_US/Compression/ModelSpeedup.rst @@ -0,0 +1,208 @@ +Speed up Masked Model +===================== + +*This feature is in Beta version.* + +Introduction +------------ + +Pruning algorithms usually use weight masks to simulate the real pruning. Masks can be used +to check model performance of a specific pruning (or sparsity), but there is no real speedup. +Since model speedup is the ultimate goal of model pruning, we try to provide a tool to users +to convert a model to a smaller one based on user provided masks (the masks come from the +pruning algorithms). + +There are two types of pruning. One is fine-grained pruning, it does not change the shape of weights, and input/output tensors. Sparse kernel is required to speed up a fine-grained pruned layer. The other is coarse-grained pruning (e.g., channels), shape of weights and input/output tensors usually change due to such pruning. To speed up this kind of pruning, there is no need to use sparse kernel, just replace the pruned layer with smaller one. Since the support of sparse kernels in community is limited, we only support the speedup of coarse-grained pruning and leave the support of fine-grained pruning in future. + +Design and Implementation +------------------------- + +To speed up a model, the pruned layers should be replaced, either replaced with smaller layer for coarse-grained mask, or replaced with sparse kernel for fine-grained mask. Coarse-grained mask usually changes the shape of weights or input/output tensors, thus, we should do shape inference to check are there other unpruned layers should be replaced as well due to shape change. Therefore, in our design, there are two main steps: first, do shape inference to find out all the modules that should be replaced; second, replace the modules. The first step requires topology (i.e., connections) of the model, we use ``jit.trace`` to obtain the model graph for PyTorch. + +For each module, we should prepare four functions, three for shape inference and one for module replacement. The three shape inference functions are: given weight shape infer input/output shape, given input shape infer weight/output shape, given output shape infer weight/input shape. The module replacement function returns a newly created module which is smaller. + +Usage +----- + +.. code-block:: python + + from nni.compression.pytorch import ModelSpeedup + # model: the model you want to speed up + # dummy_input: dummy input of the model, given to `jit.trace` + # masks_file: the mask file created by pruning algorithms + m_speedup = ModelSpeedup(model, dummy_input.to(device), masks_file) + m_speedup.speedup_model() + dummy_input = dummy_input.to(device) + start = time.time() + out = model(dummy_input) + print('elapsed time: ', time.time() - start) + +For complete examples please refer to :githublink:`the code ` + +NOTE: The current implementation supports PyTorch 1.3.1 or newer. + +Limitations +----------- + +Since every module requires four functions for shape inference and module replacement, this is a large amount of work, we only implemented the ones that are required by the examples. If you want to speed up your own model which cannot supported by the current implementation, you are welcome to contribute. + +For PyTorch we can only replace modules, if functions in ``forward`` should be replaced, our current implementation does not work. One workaround is make the function a PyTorch module. + +Speedup Results of Examples +--------------------------- + +The code of these experiments can be found :githublink:`here `. + +slim pruner example +^^^^^^^^^^^^^^^^^^^ + +on one V100 GPU, +input tensor: ``torch.randn(64, 3, 32, 32)`` + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Times + - Mask Latency + - Speedup Latency + * - 1 + - 0.01197 + - 0.005107 + * - 2 + - 0.02019 + - 0.008769 + * - 4 + - 0.02733 + - 0.014809 + * - 8 + - 0.04310 + - 0.027441 + * - 16 + - 0.07731 + - 0.05008 + * - 32 + - 0.14464 + - 0.10027 + + +fpgm pruner example +^^^^^^^^^^^^^^^^^^^ + +on cpu, +input tensor: ``torch.randn(64, 1, 28, 28)``\ , +too large variance + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Times + - Mask Latency + - Speedup Latency + * - 1 + - 0.01383 + - 0.01839 + * - 2 + - 0.01167 + - 0.003558 + * - 4 + - 0.01636 + - 0.01088 + * - 40 + - 0.14412 + - 0.08268 + * - 40 + - 1.29385 + - 0.14408 + * - 40 + - 0.41035 + - 0.46162 + * - 400 + - 6.29020 + - 5.82143 + + +l1filter pruner example +^^^^^^^^^^^^^^^^^^^^^^^ + +on one V100 GPU, +input tensor: ``torch.randn(64, 3, 32, 32)`` + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Times + - Mask Latency + - Speedup Latency + * - 1 + - 0.01026 + - 0.003677 + * - 2 + - 0.01657 + - 0.008161 + * - 4 + - 0.02458 + - 0.020018 + * - 8 + - 0.03498 + - 0.025504 + * - 16 + - 0.06757 + - 0.047523 + * - 32 + - 0.10487 + - 0.086442 + + +APoZ pruner example +^^^^^^^^^^^^^^^^^^^ + +on one V100 GPU, +input tensor: ``torch.randn(64, 3, 32, 32)`` + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Times + - Mask Latency + - Speedup Latency + * - 1 + - 0.01389 + - 0.004208 + * - 2 + - 0.01628 + - 0.008310 + * - 4 + - 0.02521 + - 0.014008 + * - 8 + - 0.03386 + - 0.023923 + * - 16 + - 0.06042 + - 0.046183 + * - 32 + - 0.12421 + - 0.087113 + + +SimulatedAnnealing pruner example +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In this experiment, we use SimulatedAnnealing pruner to prune the resnet18 on the cifar10 dataset. +We measure the latencies and accuracies of the pruned model under different sparsity ratios, as shown in the following figure. +The latency is measured on one V100 GPU and the input tensor is ``torch.randn(128, 3, 32, 32)``. + + +.. image:: ../../img/SA_latency_accuracy.png + + +User configuration for ModelSpeedup +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.compression.pytorch.ModelSpeedup diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..b33ec148edbaf35de10151779a3de5899ae1f078 --- /dev/null +++ b/docs/en_US/Compression/Overview.rst @@ -0,0 +1,131 @@ +Model Compression with NNI +========================== + +.. contents:: + +As larger neural networks with more layers and nodes are considered, reducing their storage and computational cost becomes critical, especially for some real-time applications. Model compression can be used to address this problem. + +NNI provides a model compression toolkit to help user compress and speed up their model with state-of-the-art compression algorithms and strategies. There are several core features supported by NNI model compression: + + +* Support many popular pruning and quantization algorithms. +* Automate model pruning and quantization process with state-of-the-art strategies and NNI's auto tuning power. +* Speed up a compressed model to make it have lower inference latency and also make it become smaller. +* Provide friendly and easy-to-use compression utilities for users to dive into the compression process and results. +* Concise interface for users to customize their own compression algorithms. + + +Compression Pipeline +-------------------- + +.. image:: ../../img/compression_flow.jpg + :target: ../../img/compression_flow.jpg + :alt: + +The overall compression pipeline in NNI. For compressing a pretrained model, pruning and quantization can be used alone or in combination. + +.. note:: + Since NNI compression algorithms are not meant to compress model while NNI speedup tool can truly compress model and reduce latency. To obtain a truly compact model, users should conduct `model speedup <./ModelSpeedup.rst>`__. The interface and APIs are unified for both PyTorch and TensorFlow, currently only PyTorch version has been supported, TensorFlow version will be supported in future. + +Supported Algorithms +-------------------- + +The algorithms include pruning algorithms and quantization algorithms. + +Pruning Algorithms +^^^^^^^^^^^^^^^^^^ + +Pruning algorithms compress the original network by removing redundant weights or channels of layers, which can reduce model complexity and mitigate the over-fitting issue. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name + - Brief Introduction of Algorithm + * - `Level Pruner `__ + - Pruning the specified ratio on each weight based on absolute values of weights + * - `AGP Pruner <../Compression/Pruner.rst#agp-pruner>`__ + - Automated gradual pruning (To prune, or not to prune: exploring the efficacy of pruning for model compression) `Reference Paper `__ + * - `Lottery Ticket Pruner <../Compression/Pruner.rst#lottery-ticket-hypothesis>`__ + - The pruning process used by "The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks". It prunes a model iteratively. `Reference Paper `__ + * - `FPGM Pruner <../Compression/Pruner.rst#fpgm-pruner>`__ + - Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration `Reference Paper `__ + * - `L1Filter Pruner <../Compression/Pruner.rst#l1filter-pruner>`__ + - Pruning filters with the smallest L1 norm of weights in convolution layers (Pruning Filters for Efficient Convnets) `Reference Paper `__ + * - `L2Filter Pruner <../Compression/Pruner.rst#l2filter-pruner>`__ + - Pruning filters with the smallest L2 norm of weights in convolution layers + * - `ActivationAPoZRankFilterPruner <../Compression/Pruner.rst#activationapozrankfilter-pruner>`__ + - Pruning filters based on the metric APoZ (average percentage of zeros) which measures the percentage of zeros in activations of (convolutional) layers. `Reference Paper `__ + * - `ActivationMeanRankFilterPruner <../Compression/Pruner.rst#activationmeanrankfilter-pruner>`__ + - Pruning filters based on the metric that calculates the smallest mean value of output activations + * - `Slim Pruner <../Compression/Pruner.rst#slim-pruner>`__ + - Pruning channels in convolution layers by pruning scaling factors in BN layers(Learning Efficient Convolutional Networks through Network Slimming) `Reference Paper `__ + * - `TaylorFO Pruner <../Compression/Pruner.rst#taylorfoweightfilter-pruner>`__ + - Pruning filters based on the first order taylor expansion on weights(Importance Estimation for Neural Network Pruning) `Reference Paper `__ + * - `ADMM Pruner <../Compression/Pruner.rst#admm-pruner>`__ + - Pruning based on ADMM optimization technique `Reference Paper `__ + * - `NetAdapt Pruner <../Compression/Pruner.rst#netadapt-pruner>`__ + - Automatically simplify a pretrained network to meet the resource budget by iterative pruning `Reference Paper `__ + * - `SimulatedAnnealing Pruner <../Compression/Pruner.rst#simulatedannealing-pruner>`__ + - Automatic pruning with a guided heuristic search method, Simulated Annealing algorithm `Reference Paper `__ + * - `AutoCompress Pruner <../Compression/Pruner.rst#autocompress-pruner>`__ + - Automatic pruning by iteratively call SimulatedAnnealing Pruner and ADMM Pruner `Reference Paper `__ + * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ + - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `Reference Paper `__ + * - `Transformer Head Pruner <../Compression/Pruner.rst#transformer-head-pruner>`__ + - Pruning attention heads from transformer models either in one shot or iteratively. + + +You can refer to this `benchmark <../CommunitySharings/ModelCompressionComparison.rst>`__ for the performance of these pruners on some benchmark problems. + +Quantization Algorithms +^^^^^^^^^^^^^^^^^^^^^^^ + +Quantization algorithms compress the original network by reducing the number of bits required to represent weights or activations, which can reduce the computations and the inference time. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name + - Brief Introduction of Algorithm + * - `Naive Quantizer <../Compression/Quantizer.rst#naive-quantizer>`__ + - Quantize weights to default 8 bits + * - `QAT Quantizer <../Compression/Quantizer.rst#qat-quantizer>`__ + - Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference. `Reference Paper `__ + * - `DoReFa Quantizer <../Compression/Quantizer.rst#dorefa-quantizer>`__ + - DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients. `Reference Paper `__ + * - `BNN Quantizer <../Compression/Quantizer.rst#bnn-quantizer>`__ + - Binarized Neural Networks: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1. `Reference Paper `__ + * - `LSQ Quantizer <../Compression/Quantizer.rst#lsq-quantizer>`__ + - Learned step size quantization. `Reference Paper `__ + * - `Observer Quantizer <../Compression/Quantizer.rst#observer-quantizer>`__ + - Post training quantizaiton. Collect quantization information during calibration with observers. + + +Model Speedup +------------- + +The final goal of model compression is to reduce inference latency and model size. However, existing model compression algorithms mainly use simulation to check the performance (e.g., accuracy) of compressed model, for example, using masks for pruning algorithms, and storing quantized values still in float32 for quantization algorithms. Given the output masks and quantization bits produced by those algorithms, NNI can really speed up the model. The detailed tutorial of Masked Model Speedup can be found `here <./ModelSpeedup.rst>`__, The detailed tutorial of Mixed Precision Quantization Model Speedup can be found `here <./QuantizationSpeedup.rst>`__. + + +Compression Utilities +--------------------- + +Compression utilities include some useful tools for users to understand and analyze the model they want to compress. For example, users could check sensitivity of each layer to pruning. Users could easily calculate the FLOPs and parameter size of a model. Please refer to `here <./CompressionUtils.rst>`__ for a complete list of compression utilities. + +Advanced Usage +-------------- + +NNI model compression leaves simple interface for users to customize a new compression algorithm. The design philosophy of the interface is making users focus on the compression logic while hiding framework specific implementation details from users. Users can learn more about our compression framework and customize a new compression algorithm (pruning algorithm or quantization algorithm) based on our framework. Moreover, users could leverage NNI's auto tuning power to automatically compress a model. Please refer to `here <./advanced.rst>`__ for more details. + + +Reference and Feedback +---------------------- + +* To `report a bug `__ for this feature in GitHub; +* To `file a feature or improvement request `__ for this feature in GitHub; +* To know more about `Feature Engineering with NNI <../FeatureEngineering/Overview.rst>`__\ ; +* To know more about `NAS with NNI <../NAS/Overview.rst>`__\ ; +* To know more about `Hyperparameter Tuning with NNI <../Tuner/BuiltinTuner.rst>`__\ ; diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst new file mode 100644 index 0000000000000000000000000000000000000000..8af360f03dcff573f3403479e55dd00516de766e --- /dev/null +++ b/docs/en_US/Compression/Pruner.rst @@ -0,0 +1,824 @@ +Supported Pruning Algorithms on NNI +=================================== + +We provide several pruning algorithms that support fine-grained weight pruning and structural filter pruning. **Fine-grained Pruning** generally results in unstructured models, which need specialized hardware or software to speed up the sparse network. **Filter Pruning** achieves acceleration by removing the entire filter. Some pruning algorithms use one-shot method that prune weights at once based on an importance metric (It is necessary to finetune the model to compensate for the loss of accuracy). Other pruning algorithms **iteratively** prune weights during optimization, which control the pruning schedule, including some automatic pruning algorithms. + + +**One-shot Pruning** + +* `Level Pruner <#level-pruner>`__ ((fine-grained pruning)) +* `Slim Pruner <#slim-pruner>`__ +* `FPGM Pruner <#fpgm-pruner>`__ +* `L1Filter Pruner <#l1filter-pruner>`__ +* `L2Filter Pruner <#l2filter-pruner>`__ +* `Activation APoZ Rank Filter Pruner <#activationAPoZRankFilter-pruner>`__ +* `Activation Mean Rank Filter Pruner <#activationmeanrankfilter-pruner>`__ +* `Taylor FO On Weight Pruner <#taylorfoweightfilter-pruner>`__ + +**Iteratively Pruning** + +* `AGP Pruner <#agp-pruner>`__ +* `NetAdapt Pruner <#netadapt-pruner>`__ +* `SimulatedAnnealing Pruner <#simulatedannealing-pruner>`__ +* `AutoCompress Pruner <#autocompress-pruner>`__ +* `AMC Pruner <#amc-pruner>`__ +* `Sensitivity Pruner <#sensitivity-pruner>`__ +* `ADMM Pruner <#admm-pruner>`__ + +**Others** + +* `Lottery Ticket Hypothesis <#lottery-ticket-hypothesis>`__ +* `Transformer Head Pruner <#transformer-head-pruner>`__ + +Level Pruner +------------ + +This is one basic one-shot pruner: you can set a target sparsity level (expressed as a fraction, 0.6 means we will prune 60% of the weight parameters). + +We first sort the weights in the specified layer by their absolute values. And then mask to zero the smallest magnitude weights until the desired sparsity level is reached. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import LevelPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }] + pruner = LevelPruner(model, config_list) + pruner.compress() + +User configuration for Level Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.LevelPruner + +**TensorFlow** + +.. autoclass:: nni.algorithms.compression.tensorflow.pruning.LevelPruner + + +Slim Pruner +----------- +This is an one-shot pruner, which adds sparsity regularization on the scaling factors of batch normalization (BN) layers during training to identify unimportant channels. The channels with small scaling factor values will be pruned. For more details, please refer to `'Learning Efficient Convolutional Networks through Network Slimming' `__\. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import SlimPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['BatchNorm2d'] }] + pruner = SlimPruner(model, config_list, optimizer, trainer, criterion) + pruner.compress() + +User configuration for Slim Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.SlimPruner + +Reproduced Experiment +^^^^^^^^^^^^^^^^^^^^^ + +We implemented one of the experiments in `Learning Efficient Convolutional Networks through Network Slimming `__\ , we pruned ``70%`` channels in the **VGGNet** for CIFAR-10 in the paper, in which ``88.5%`` parameters are pruned. Our experiments results are as follows: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Error(paper/ours) + - Parameters + - Pruned + * - VGGNet + - 6.34/6.69 + - 20.04M + - + * - Pruned-VGGNet + - 6.20/6.34 + - 2.03M + - 88.5% + + +The experiments code can be found at :githublink:`examples/model_compress/pruning/basic_pruners_torch.py ` + +.. code-block:: python + + python basic_pruners_torch.py --pruner slim --model vgg19 --sparsity 0.7 --speed-up + + +---- + +FPGM Pruner +----------- + +This is an one-shot pruner, which prunes filters with the smallest geometric median. FPGM chooses the filters with the most replaceable contribution. +For more details, please refer to `Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration `__. + +We also provide a dependency-aware mode for this pruner to get better speedup from the pruning. Please reference `dependency-aware <./DependencyAware.rst>`__ for more details. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import FPGMPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = FPGMPruner(model, config_list) + pruner.compress() + +User configuration for FPGM Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.FPGMPruner + +L1Filter Pruner +--------------- + +This is an one-shot pruner, which prunes the filters in the **convolution layers**. + +.. + The procedure of pruning m filters from the ith convolutional layer is as follows: + + #. For each filter :math:`F_{i,j}`, calculate the sum of its absolute kernel weights :math:`s_j=\sum_{l=1}^{n_i}\sum|K_l|`. + + #. Sort the filters by :math:`s_j`. + + #. Prune :math:`m` filters with the smallest sum values and their corresponding feature maps. The + kernels in the next convolutional layer corresponding to the pruned feature maps are also removed. + + #. A new kernel matrix is created for both the :math:`i`-th and :math:`i+1`-th layers, and the remaining kernel + weights are copied to the new model. + +For more details, please refer to `PRUNING FILTERS FOR EFFICIENT CONVNETS `__\. + + + +In addition, we also provide a dependency-aware mode for the L1FilterPruner. For more details about the dependency-aware mode, please reference `dependency-aware mode <./DependencyAware.rst>`__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import L1FilterPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = L1FilterPruner(model, config_list) + pruner.compress() + +User configuration for L1Filter Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.L1FilterPruner + +Reproduced Experiment +^^^^^^^^^^^^^^^^^^^^^ + +We implemented one of the experiments in `PRUNING FILTERS FOR EFFICIENT CONVNETS `__ with **L1FilterPruner**\ , we pruned **VGG-16** for CIFAR-10 to **VGG-16-pruned-A** in the paper, in which ``64%`` parameters are pruned. Our experiments results are as follows: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Error(paper/ours) + - Parameters + - Pruned + * - VGG-16 + - 6.75/6.49 + - 1.5x10^7 + - + * - VGG-16-pruned-A + - 6.60/6.47 + - 5.4x10^6 + - 64.0% + + +The experiments code can be found at :githublink:`examples/model_compress/pruning/basic_pruners_torch.py ` + +.. code-block:: python + + python basic_pruners_torch.py --pruner l1filter --model vgg16 --speed-up + +---- + +L2Filter Pruner +--------------- + +This is a structured pruning algorithm that prunes the filters with the smallest L2 norm of the weights. It is implemented as a one-shot pruner. + +We also provide a dependency-aware mode for this pruner to get better speedup from the pruning. Please reference `dependency-aware <./DependencyAware.rst>`__ for more details. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import L2FilterPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = L2FilterPruner(model, config_list) + pruner.compress() + +User configuration for L2Filter Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.L2FilterPruner + +---- + +ActivationAPoZRankFilter Pruner +------------------------------- + +ActivationAPoZRankFilter Pruner is a pruner which prunes the filters with the smallest importance criterion ``APoZ`` calculated from the output activations of convolution layers to achieve a preset level of network sparsity. The pruning criterion ``APoZ`` is explained in the paper `Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures `__. + +The APoZ is defined as: + +:math:`APoZ_{c}^{(i)} = APoZ\left(O_{c}^{(i)}\right)=\frac{\sum_{k}^{N} \sum_{j}^{M} f\left(O_{c, j}^{(i)}(k)=0\right)}{N \times M}` + + +We also provide a dependency-aware mode for this pruner to get better speedup from the pruning. Please reference `dependency-aware <./DependencyAware.rst>`__ for more details. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import ActivationAPoZRankFilterPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = ActivationAPoZRankFilterPruner(model, config_list, optimizer, trainer, criterion, sparsifying_training_batches=1) + pruner.compress() + +Note: ActivationAPoZRankFilterPruner is used to prune convolutional layers within deep neural networks, therefore the ``op_types`` field supports only convolutional layers. + +You can view :githublink:`example ` for more information. + +User configuration for ActivationAPoZRankFilter Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.ActivationAPoZRankFilterPruner + +---- + +ActivationMeanRankFilter Pruner +------------------------------- + +ActivationMeanRankFilterPruner is a pruner which prunes the filters with the smallest importance criterion ``mean activation`` calculated from the output activations of convolution layers to achieve a preset level of network sparsity. The pruning criterion ``mean activation`` is explained in section 2.2 of the paper `Pruning Convolutional Neural Networks for Resource Efficient Inference `__. Other pruning criteria mentioned in this paper will be supported in future release. + +We also provide a dependency-aware mode for this pruner to get better speedup from the pruning. Please reference `dependency-aware <./DependencyAware.rst>`__ for more details. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import ActivationMeanRankFilterPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = ActivationMeanRankFilterPruner(model, config_list, optimizer, trainer, criterion, sparsifying_training_batches=1) + pruner.compress() + +Note: ActivationMeanRankFilterPruner is used to prune convolutional layers within deep neural networks, therefore the ``op_types`` field supports only convolutional layers. + +You can view :githublink:`example ` for more information. + +User configuration for ActivationMeanRankFilterPruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.ActivationMeanRankFilterPruner + +---- + +TaylorFOWeightFilter Pruner +--------------------------- + +TaylorFOWeightFilter Pruner is a pruner which prunes convolutional layers based on estimated importance calculated from the first order taylor expansion on weights to achieve a preset level of network sparsity. The estimated importance of filters is defined as the paper `Importance Estimation for Neural Network Pruning `__. Other pruning criteria mentioned in this paper will be supported in future release. + +.. + +:math:`\widehat{\mathcal{I}}_{\mathcal{S}}^{(1)}(\mathbf{W}) \triangleq \sum_{s \in \mathcal{S}} \mathcal{I}_{s}^{(1)}(\mathbf{W})=\sum_{s \in \mathcal{S}}\left(g_{s} w_{s}\right)^{2}` + + +We also provide a dependency-aware mode for this pruner to get better speedup from the pruning. Please reference `dependency-aware <./DependencyAware.rst>`__ for more details. + +What's more, we provide a global-sort mode for this pruner which is aligned with paper implementation. Please set parameter 'global_sort' to True when instantiate TaylorFOWeightFilterPruner. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import TaylorFOWeightFilterPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = TaylorFOWeightFilterPruner(model, config_list, optimizer, trainer, criterion, sparsifying_training_batches=1) + pruner.compress() + +User configuration for TaylorFOWeightFilter Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.TaylorFOWeightFilterPruner + +---- + +AGP Pruner +---------- + +This is an iterative pruner, which the sparsity is increased from an initial sparsity value si (usually 0) to a final sparsity value sf over a span of n pruning steps, starting at training step :math:`t_{0}` and with pruning frequency :math:`\Delta t`: + +:math:`s_{t}=s_{f}+\left(s_{i}-s_{f}\right)\left(1-\frac{t-t_{0}}{n \Delta t}\right)^{3} \text { for } t \in\left\{t_{0}, t_{0}+\Delta t, \ldots, t_{0} + n \Delta t\right\}` + +For more details please refer to `To prune, or not to prune: exploring the efficacy of pruning for model compression `__\. + + +Usage +^^^^^ + +You can prune all weights from 0% to 80% sparsity in 10 epoch with the code below. + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import AGPPruner + config_list = [{ + 'sparsity': 0.8, + 'op_types': ['default'] + }] + + # load a pretrained model or train a model before using a pruner + # model = MyModel() + # model.load_state_dict(torch.load('mycheckpoint.pth')) + + # AGP pruner prunes model while fine tuning the model by adding a hook on + # optimizer.step(), so an optimizer is required to prune the model. + optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4) + + pruner = AGPPruner(model, config_list, optimizer, trainer, criterion, pruning_algorithm='level') + pruner.compress() + +AGP pruner uses ``LevelPruner`` algorithms to prune the weight by default, however you can set ``pruning_algorithm`` parameter to other values to use other pruning algorithms: + + +* ``level``\ : LevelPruner +* ``slim``\ : SlimPruner +* ``l1``\ : L1FilterPruner +* ``l2``\ : L2FilterPruner +* ``fpgm``\ : FPGMPruner +* ``taylorfo``\ : TaylorFOWeightFilterPruner +* ``apoz``\ : ActivationAPoZRankFilterPruner +* ``mean_activation``\ : ActivationMeanRankFilterPruner + + +User configuration for AGP Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.AGPPruner + +---- + +NetAdapt Pruner +--------------- + +NetAdapt allows a user to automatically simplify a pretrained network to meet the resource budget. +Given the overall sparsity, NetAdapt will automatically generate the sparsities distribution among different layers by iterative pruning. + +For more details, please refer to `NetAdapt: Platform-Aware Neural Network Adaptation for Mobile Applications `__. + + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import NetAdaptPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = NetAdaptPruner(model, config_list, short_term_fine_tuner=short_term_fine_tuner, evaluator=evaluator,base_algo='l1', experiment_data_dir='./') + pruner.compress() + +You can view :githublink:`example ` for more information. + +User configuration for NetAdapt Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.NetAdaptPruner + +SimulatedAnnealing Pruner +------------------------- + +We implement a guided heuristic search method, Simulated Annealing (SA) algorithm, with enhancement on guided search based on prior experience. +The enhanced SA technique is based on the observation that a DNN layer with more number of weights often has a higher degree of model compression with less impact on overall accuracy. + + +* Randomly initialize a pruning rate distribution (sparsities). +* While current_temperature < stop_temperature: + + #. generate a perturbation to current distribution + #. Perform fast evaluation on the perturbated distribution + #. accept the perturbation according to the performance and probability, if not accepted, return to step 1 + #. cool down, current_temperature <- current_temperature * cool_down_rate + +For more details, please refer to `AutoCompress: An Automatic DNN Structured Pruning Framework for Ultra-High Compression Rates `__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import SimulatedAnnealingPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = SimulatedAnnealingPruner(model, config_list, evaluator=evaluator, base_algo='l1', cool_down_rate=0.9, experiment_data_dir='./') + pruner.compress() + +You can view :githublink:`example ` for more information. + +User configuration for SimulatedAnnealing Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.SimulatedAnnealingPruner + +AutoCompress Pruner +------------------- + +For each round, AutoCompressPruner prune the model for the same sparsity to achive the overall sparsity: + +.. code-block:: bash + + 1. Generate sparsities distribution using SimulatedAnnealingPruner + 2. Perform ADMM-based structured pruning to generate pruning result for the next round. + Here we use `speedup` to perform real pruning. + + +For more details, please refer to `AutoCompress: An Automatic DNN Structured Pruning Framework for Ultra-High Compression Rates `__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import AutoCompressPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = AutoCompressPruner( + model, config_list, trainer=trainer, evaluator=evaluator, + dummy_input=dummy_input, num_iterations=3, optimize_mode='maximize', base_algo='l1', + cool_down_rate=0.9, admm_num_iterations=30, admm_training_epochs=5, experiment_data_dir='./') + pruner.compress() + +You can view :githublink:`example ` for more information. + +User configuration for AutoCompress Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.AutoCompressPruner + +AMC Pruner +---------- + +AMC pruner leverages reinforcement learning to provide the model compression policy. +This learning-based compression policy outperforms conventional rule-based compression policy by having higher compression ratio, +better preserving the accuracy and freeing human labor. + + +For more details, please refer to `AMC: AutoML for Model Compression and Acceleration on Mobile Devices `__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import AMCPruner + config_list = [{ + 'op_types': ['Conv2d', 'Linear'] + }] + pruner = AMCPruner(model, config_list, evaluator, val_loader, flops_ratio=0.5) + pruner.compress() + +You can view :githublink:`example ` for more information. + +User configuration for AMC Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.AMCPruner + +Reproduced Experiment +^^^^^^^^^^^^^^^^^^^^^ + +We implemented one of the experiments in `AMC: AutoML for Model Compression and Acceleration on Mobile Devices `__\ , we pruned **MobileNet** to 50% FLOPS for ImageNet in the paper. Our experiments results are as follows: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Top 1 acc.(paper/ours) + - Top 5 acc. (paper/ours) + - FLOPS + * - MobileNet + - 70.5% / 69.9% + - 89.3% / 89.1% + - 50% + + +The experiments code can be found at :githublink:`examples/model_compress/pruning/ ` + +ADMM Pruner +----------- + +Alternating Direction Method of Multipliers (ADMM) is a mathematical optimization technique, +by decomposing the original nonconvex problem into two subproblems that can be solved iteratively. In weight pruning problem, these two subproblems are solved via 1) gradient descent algorithm and 2) Euclidean projection respectively. + +During the process of solving these two subproblems, the weights of the original model will be changed. An one-shot pruner will then be applied to prune the model according to the config list given. + +This solution framework applies both to non-structured and different variations of structured pruning schemes. + +For more details, please refer to `A Systematic DNN Weight Pruning Framework using Alternating Direction Method of Multipliers `__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import ADMMPruner + config_list = [{ + 'sparsity': 0.8, + 'op_types': ['Conv2d'], + 'op_names': ['conv1'] + }, { + 'sparsity': 0.92, + 'op_types': ['Conv2d'], + 'op_names': ['conv2'] + }] + pruner = ADMMPruner(model, config_list, trainer, num_iterations=30, epochs_per_iteration=5) + pruner.compress() + +You can view :githublink:`example ` for more information. + +User configuration for ADMM Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.ADMMPruner + +Lottery Ticket Hypothesis +------------------------- + +`The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks `__\ , authors Jonathan Frankle and Michael Carbin,provides comprehensive measurement and analysis, and articulate the *lottery ticket hypothesis*\ : dense, randomly-initialized, feed-forward networks contain subnetworks (*winning tickets*\ ) that -- when trained in isolation -- reach test accuracy comparable to the original network in a similar number of iterations. + +In this paper, the authors use the following process to prune a model, called *iterative prunning*\ : + +.. + + #. Randomly initialize a neural network f(x;theta_0) (where theta\ *0 follows D*\ {theta}). + #. Train the network for j iterations, arriving at parameters theta_j. + #. Prune p% of the parameters in theta_j, creating a mask m. + #. Reset the remaining parameters to their values in theta_0, creating the winning ticket f(x;m*theta_0). + #. Repeat step 2, 3, and 4. + + +If the configured final sparsity is P (e.g., 0.8) and there are n times iterative pruning, each iterative pruning prunes 1-(1-P)^(1/n) of the weights that survive the previous round. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import LotteryTicketPruner + config_list = [{ + 'prune_iterations': 5, + 'sparsity': 0.8, + 'op_types': ['default'] + }] + pruner = LotteryTicketPruner(model, config_list, optimizer) + pruner.compress() + for _ in pruner.get_prune_iterations(): + pruner.prune_iteration_start() + for epoch in range(epoch_num): + ... + +The above configuration means that there are 5 times of iterative pruning. As the 5 times iterative pruning are executed in the same run, LotteryTicketPruner needs ``model`` and ``optimizer`` (\ **Note that should add ``lr_scheduler`` if used**\ ) to reset their states every time a new prune iteration starts. Please use ``get_prune_iterations`` to get the pruning iterations, and invoke ``prune_iteration_start`` at the beginning of each iteration. ``epoch_num`` is better to be large enough for model convergence, because the hypothesis is that the performance (accuracy) got in latter rounds with high sparsity could be comparable with that got in the first round. + + +User configuration for LotteryTicket Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.LotteryTicketPruner + +Reproduced Experiment +^^^^^^^^^^^^^^^^^^^^^ + +We try to reproduce the experiment result of the fully connected network on MNIST using the same configuration as in the paper. The code can be referred :githublink:`here `. In this experiment, we prune 10 times, for each pruning we train the pruned model for 50 epochs. + + +.. image:: ../../img/lottery_ticket_mnist_fc.png + :target: ../../img/lottery_ticket_mnist_fc.png + :alt: + + +The above figure shows the result of the fully connected network. ``round0-sparsity-0.0`` is the performance without pruning. Consistent with the paper, pruning around 80% also obtain similar performance compared to non-pruning, and converges a little faster. If pruning too much, e.g., larger than 94%, the accuracy becomes lower and convergence becomes a little slower. A little different from the paper, the trend of the data in the paper is relatively more clear. + +Sensitivity Pruner +------------------ + +For each round, SensitivityPruner prunes the model based on the sensitivity to the accuracy of each layer until meeting the final configured sparsity of the whole model: + +.. code-block:: bash + + 1. Analyze the sensitivity of each layer in the current state of the model. + 2. Prune each layer according to the sensitivity. + + +For more details, please refer to `Learning both Weights and Connections for Efficient Neural Networks `__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import SensitivityPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = SensitivityPruner(model, config_list, finetuner=fine_tuner, evaluator=evaluator) + # eval_args and finetune_args are the parameters passed to the evaluator and finetuner respectively + pruner.compress(eval_args=[model], finetune_args=[model]) + +User configuration for Sensitivity Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.SensitivityPruner + +Transformer Head Pruner +----------------------- + +Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. The following image from `Efficient Transformers: A Survey `__ gives a good overview the general structure of the Transformer. + +.. image:: ../../img/transformer_structure.png + :target: ../../img/transformer_structure.png + :alt: + +Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normally, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. + +Note: currently, the pruner can only handle models with projection weights written as separate ``Linear`` modules, i.e., it expects four ``Linear`` modules corresponding to query, key, value, and an output projections. Therefore, in the ``config_list``, you should either write ``['Linear']`` for the ``op_types`` field, or write names corresponding to ``Linear`` modules for the ``op_names`` field. For instance, the `Huggingface transformers `_ are supported, but ``torch.nn.Transformer`` is not. + +The pruner implements the following algorithm: + +.. code-block:: bash + + Repeat for each pruning iteration (1 for one-shot pruning): + 1. Calculate importance scores for each head in each specified layer using a specific criterion. + 2. Sort heads locally or globally, and prune out some heads with lowest scores. The number of pruned heads is determined according to the sparsity specified in the config. + 3. If the specified pruning iteration is larger than 1 (iterative pruning), finetune the model for a while before the next pruning iteration. + +Currently, the following head sorting criteria are supported: + + * "l1_weight": rank heads by the L1-norm of weights of the query, key, and value projection matrices. + * "l2_weight": rank heads by the L2-norm of weights of the query, key, and value projection matrices. + * "l1_activation": rank heads by the L1-norm of their attention computation output. + * "l2_activation": rank heads by the L2-norm of their attention computation output. + * "taylorfo": rank heads by l1 norm of the output of attention computation * gradient for this output. Check more details in `this paper `__ and `this one `__. + +We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the ``global_sort`` parameter. Note that if ``global_sort=True`` is passed, all weights must have the same sparsity in the config list. However, this does not mean that each layer will be prune to the same sparsity as specified. This sparsity value will be interpreted as a global sparsity, and each layer is likely to have different sparsity after pruning by global sort. As a reminder, we found that if global sorting is used, it is usually helpful to use an iterative pruning scheme, interleaving pruning with intermediate finetuning, since global sorting often results in non-uniform sparsity distributions, which makes the model more susceptible to forgetting. + +In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a nested list containing the names of these modules as the pruner's initialization parameters (usage below), or simply pass a dummy input instead and the pruner will run ``torch.jit.trace`` to group the weights (experimental feature). However, if you would like to assign different sparsity to each layer, you can only use the first option, i.e., passing names of the weights to the pruner (see usage below). Also, note that we require the weights belonging to the same layer to have the same sparsity. + +Usage +^^^^^ + +Suppose we want to prune a BERT with Huggingface implementation, which has the following architecture (obtained by calling ``print(model)``). Note that we only show the first layer of the repeated layers in the encoder's ``ModuleList layer``. + +.. image:: ../../img/huggingface_bert_architecture.png + :target: ../../img/huggingface_bert_architecture.png + :alt: + +**Usage Example: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code)**. Note that + +* Here we specify ``op_names`` in the config list to assign different sparsity to different layers. +* Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. +* Since in this example we want to do one-shot pruning, the ``num_iterations`` parameter is set to 1, and the parameter ``epochs_per_iteration`` is ignored. If you would like to do iterative pruning instead, you can set the ``num_iterations`` parameter to the number of pruning iterations, and the ``epochs_per_iteration`` parameter to the number of finetuning epochs between two iterations. +* The arguments ``trainer`` and ``optimizer`` are only used when we want to do iterative pruning, or the ranking criterion is ``taylorfo``. Here these two parameters are ignored by the pruner. +* The argument ``forward_runner`` is only used when the ranking criterion is ``l1_activation`` or ``l2_activation``. Here this parameter is ignored by the pruner. + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + attention_name_groups = list(zip(["encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + + kwargs = {"ranking_criterion": "l1_weight", + "global_sort": False, + "num_iterations": 1, + "epochs_per_iteration": 1, # this is ignored when num_iterations = 1 + "head_hidden_dim": 64, + "attention_name_groups": attention_name_groups, + "trainer": trainer, + "optimizer": optimizer, + "forward_runner": forward_runner + } + config_list = [{ + "sparsity": 0.5, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[:6] for x in layer] # first six layers + }, { + "sparsity": 0.25, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[6:] for x in layer] # last six layers + }] + + pruner = TransformerHeadPruner(model, config_list, **kwargs) + pruner.compress() + +In addition to this usage guide, we provide a more detailed example of pruning BERT (Huggingface implementation) for transfer learning on the tasks from the `GLUE benchmark `_. Please find it in this :githublink:`page `. To run the example, first make sure that you install the package ``transformers`` and ``datasets``. Then, you may start by running the following command: + +.. code-block:: bash + + ./run.sh gpu_id glue_task + +By default, the code will download a pretrained BERT language model, and then finetune for several epochs on the downstream GLUE task. Then, the ``TransformerHeadPruner`` will be used to prune out heads from each layer by a certain criterion (by default, the code lets the pruner uses magnitude ranking, and prunes out 50% of the heads in each layer in an one-shot manner). Finally, the pruned model will be finetuned in the downstream task for several epochs. You can check the details of pruning from the logs printed out by the example. You can also experiment with different pruning settings by changing the parameters in ``run.sh``, or directly changing the ``config_list`` in ``transformer_pruning.py``. + +User configuration for Transformer Head Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.TransformerHeadPruner diff --git a/docs/en_US/Compression/QuantizationSpeedup.rst b/docs/en_US/Compression/QuantizationSpeedup.rst new file mode 100644 index 0000000000000000000000000000000000000000..d355382b3c0787c367d035d2014e02d02d4aed68 --- /dev/null +++ b/docs/en_US/Compression/QuantizationSpeedup.rst @@ -0,0 +1,142 @@ +Speed up Mixed Precision Quantization Model (experimental) +========================================================== + + +Introduction +------------ + +Deep learning network has been computational intensive and memory intensive +which increases the difficulty of deploying deep neural network model. Quantization is a +fundamental technology which is widely used to reduce memory footprint and speed up inference +process. Many frameworks begin to support quantization, but few of them support mixed precision +quantization and get real speedup. Frameworks like `HAQ: Hardware-Aware Automated Quantization with Mixed Precision `__\, only support simulated mixed precision quantization which will +not speed up the inference process. To get real speedup of mixed precision quantization and +help people get the real feedback from hardware, we design a general framework with simple interface to allow NNI quantization algorithms to connect different +DL model optimization backends (e.g., TensorRT, NNFusion), which gives users an end-to-end experience that after quantizing their model +with quantization algorithms, the quantized model can be directly speeded up with the connected optimization backend. NNI connects +TensorRT at this stage, and will support more backends in the future. + + +Design and Implementation +------------------------- + +To support speeding up mixed precision quantization, we divide framework into two part, frontend and backend. +Frontend could be popular training frameworks such as PyTorch, TensorFlow etc. Backend could be inference +framework for different hardwares, such as TensorRT. At present, we support PyTorch as frontend and +TensorRT as backend. To convert PyTorch model to TensorRT engine, we leverage onnx as intermediate graph +representation. In this way, we convert PyTorch model to onnx model, then TensorRT parse onnx +model to generate inference engine. + + +Quantization aware training combines NNI quantization algorithm 'QAT' and NNI quantization speedup tool. +Users should set config to train quantized model using QAT algorithm(please refer to `NNI Quantization Algorithms `__\ ). +After quantization aware training, users can get new config with calibration parameters and model with quantized weight. By passing new config and model to quantization speedup tool, users can get real mixed precision speedup engine to do inference. + + +After getting mixed precision engine, users can do inference with input data. + + +Note + + +* Recommend using "cpu"(host) as data device(for both inference data and calibration data) since data should be on host initially and it will be transposed to device before inference. If data type is not "cpu"(host), this tool will transpose it to "cpu" which may increases unnecessary overhead. +* User can also do post-training quantization leveraging TensorRT directly(need to provide calibration dataset). +* Not all op types are supported right now. At present, NNI supports Conv, Linear, Relu and MaxPool. More op types will be supported in the following release. + + +Prerequisite +------------ +CUDA version >= 11.0 + +TensorRT version >= 7.2 + +Note + +* If you haven't installed TensorRT before or use the old version, please refer to `TensorRT Installation Guide `__\ + +Usage +----- +quantization aware training: + +.. code-block:: python + + # arrange bit config for QAT algorithm + configure_list = [{ + 'quant_types': ['weight', 'output'], + 'quant_bits': {'weight':8, 'output':8}, + 'op_names': ['conv1'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output':8}, + 'op_names': ['relu1'] + } + ] + + quantizer = QAT_Quantizer(model, configure_list, optimizer) + quantizer.compress() + calibration_config = quantizer.export_model(model_path, calibration_path) + + engine = ModelSpeedupTensorRT(model, input_shape, config=calibration_config, batchsize=batch_size) + # build tensorrt inference engine + engine.compress() + # data should be pytorch tensor + output, time = engine.inference(data) + + +Note that NNI also supports post-training quantization directly, please refer to complete examples for detail. + + +For complete examples please refer to :githublink:`the code `. + + +For more parameters about the class 'TensorRTModelSpeedUp', you can refer to `Model Compression API Reference `__\. + + +Mnist test +^^^^^^^^^^^^^^^^^^^ + +on one GTX2080 GPU, +input tensor: ``torch.randn(128, 1, 28, 28)`` + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - quantization strategy + - Latency + - accuracy + * - all in 32bit + - 0.001199961 + - 96% + * - mixed precision(average bit 20.4) + - 0.000753688 + - 96% + * - all in 8bit + - 0.000229869 + - 93.7% + + +Cifar10 resnet18 test(train one epoch) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +on one GTX2080 GPU, +input tensor: ``torch.randn(128, 3, 32, 32)`` + + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - quantization strategy + - Latency + - accuracy + * - all in 32bit + - 0.003286268 + - 54.21% + * - mixed precision(average bit 11.55) + - 0.001358022 + - 54.78% + * - all in 8bit + - 0.000859139 + - 52.81% \ No newline at end of file diff --git a/docs/en_US/Compression/Quantizer.rst b/docs/en_US/Compression/Quantizer.rst new file mode 100644 index 0000000000000000000000000000000000000000..79607d2f79603fcb70ccb336532d22e5bfd216c3 --- /dev/null +++ b/docs/en_US/Compression/Quantizer.rst @@ -0,0 +1,385 @@ +Supported Quantization Algorithms on NNI +======================================== + +Index of supported quantization algorithms + + +* `Naive Quantizer <#naive-quantizer>`__ +* `QAT Quantizer <#qat-quantizer>`__ +* `DoReFa Quantizer <#dorefa-quantizer>`__ +* `BNN Quantizer <#bnn-quantizer>`__ +* `LSQ Quantizer <#lsq-quantizer>`__ +* `Observer Quantizer <#observer-quantizer>`__ + +Naive Quantizer +--------------- + +We provide Naive Quantizer to quantizer weight to default 8 bits, you can use it to test quantize algorithm without any configure. + +Usage +^^^^^ + +pytorch + +.. code-block:: python + + model = nni.algorithms.compression.pytorch.quantization.NaiveQuantizer(model).compress() + +---- + +QAT Quantizer +------------- + +In `Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference `__\ , authors Benoit Jacob and Skirmantas Kligys provide an algorithm to quantize the model with training. + +.. + + We propose an approach that simulates quantization effects in the forward pass of training. Backpropagation still happens as usual, and all weights and biases are stored in floating point so that they can be easily nudged by small amounts. The forward propagation pass however simulates quantized inference as it will happen in the inference engine, by implementing in floating-point arithmetic the rounding behavior of the quantization scheme + + + * Weights are quantized before they are convolved with the input. If batch normalization (see [17]) is used for the layer, the batch normalization parameters are “folded into” the weights before quantization. + * Activations are quantized at points where they would be during inference, e.g. after the activation function is applied to a convolutional or fully connected layer’s output, or after a bypass connection adds or concatenates the outputs of several layers together such as in ResNets. + + +Usage +^^^^^ + +You can quantize your model to 8 bits with the code below before your training code. + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer + model = Mnist() + + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': { + 'weight': 8, + }, # you can just use `int` here because all `quan_types` share same bits length, see config for `ReLu6` below. + 'op_types':['Conv2d', 'Linear'] + }, { + 'quant_types': ['output'], + 'quant_bits': 8, + 'quant_start_step': 7000, + 'op_types':['ReLU6'] + }] + quantizer = QAT_Quantizer(model, config_list) + quantizer.compress() + +You can view example for more information + +User configuration for QAT Quantizer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +common configuration needed by compression algorithms can be found at `Specification of `config_list <./QuickStart.rst>`__. + +configuration needed by this algorithm : + + +* **quant_start_step:** int + +disable quantization until model are run by certain number of steps, this allows the network to enter a more stable +state where activation quantization ranges do not exclude a significant fraction of values, default value is 0 + +Batch normalization folding +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Batch normalization folding is supported in QAT quantizer. It can be easily enabled by passing an argument `dummy_input` to +the quantizer, like: + +.. code-block:: python + + # assume your model takes an input of shape (1, 1, 28, 28) + # and dummy_input must be on the same device as the model + dummy_input = torch.randn(1, 1, 28, 28) + + # pass the dummy_input to the quantizer + quantizer = QAT_Quantizer(model, config_list, dummy_input=dummy_input) + + +The quantizer will automatically detect Conv-BN patterns and simulate batch normalization folding process in the training +graph. Note that when the quantization aware training process is finished, the folded weight/bias would be restored after calling +`quantizer.export_model`. + +Quantization dtype and scheme customization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Different backends on different devices use different quantization strategies (i.e. dtype (int or uint) and +scheme (per-tensor or per-channel and symmetric or affine)). QAT quantizer supports customization of mainstream dtypes and schemes. +There are two ways to set them. One way is setting them globally through a function named `set_quant_scheme_dtype` like: + +.. code-block:: python + + from nni.compression.pytorch.quantization.settings import set_quant_scheme_dtype + + # This will set all the quantization of 'input' in 'per_tensor_affine' and 'uint' manner + set_quant_scheme_dtype('input', 'per_tensor_affine', 'uint) + # This will set all the quantization of 'output' in 'per_tensor_symmetric' and 'int' manner + set_quant_scheme_dtype('output', 'per_tensor_symmetric', 'int') + # This will set all the quantization of 'weight' in 'per_channel_symmetric' and 'int' manner + set_quant_scheme_dtype('weight', 'per_channel_symmetric', 'int') + + +The other way is more detailed. You can customize the dtype and scheme in each quantization config list like: + +.. code-block:: python + + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 8, + 'op_types':['Conv2d', 'Linear'], + 'quant_dtype': 'int', + 'quant_scheme': 'per_channel_symmetric' + }, { + 'quant_types': ['output'], + 'quant_bits': 8, + 'quant_start_step': 7000, + 'op_types':['ReLU6'], + 'quant_dtype': 'uint', + 'quant_scheme': 'per_tensor_affine' + }] + +Multi-GPU training +^^^^^^^^^^^^^^^^^^^ +QAT quantizer natively supports multi-gpu training (DataParallel and DistributedDataParallel). Note that the quantizer +instantiation should happen before you wrap your model with DataParallel or DistributedDataParallel. For example: + +.. code-block:: python + + from torch.nn.parallel import DistributedDataParallel as DDP + from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer + + model = define_your_model() + + model = QAT_Quantizer(model, **other_params) # <--- QAT_Quantizer instantiation + + model = DDP(model) + + for i in range(epochs): + train(model) + eval(model) + + +---- + +LSQ Quantizer +------------- + +In `LEARNED STEP SIZE QUANTIZATION `__\ , authors Steven K. Esser and Jeffrey L. McKinstry provide an algorithm to train the scales with gradients. + +.. + + The authors introduce a novel means to estimate and scale the task loss gradient at each weight and activation layer’s quantizer step size, such that it can be learned in conjunction with other network parameters. + + +Usage +^^^^^ +You can add codes below before your training codes. Three things must be done: + + +1. configure which layer to be quantized and which tensor (input/output/weight) of that layer to be quantized. +2. construct the lsq quantizer +3. call the `compress` API + + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.quantization import LsqQuantizer + model = Mnist() + + configure_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': { + 'weight': 8, + 'input': 8, + }, + 'op_names': ['conv1'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8,}, + 'op_names': ['relu1'] + }] + + quantizer = LsqQuantizer(model, configure_list, optimizer) + quantizer.compress() + +You can view example for more information. :githublink:`examples/model_compress/quantization/LSQ_torch_quantizer.py ` + +User configuration for LSQ Quantizer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +common configuration needed by compression algorithms can be found at `Specification of `config_list <./QuickStart.rst>`__. + +configuration needed by this algorithm : + + +---- + +DoReFa Quantizer +---------------- + +In `DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients `__\ , authors Shuchang Zhou and Yuxin Wu provide an algorithm named DoReFa to quantize the weight, activation and gradients with training. + +Usage +^^^^^ + +To implement DoReFa Quantizer, you can add code below before your training code + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.quantization import DoReFaQuantizer + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 8, + 'op_types': ['default'] + }] + quantizer = DoReFaQuantizer(model, config_list) + quantizer.compress() + +You can view example for more information + +User configuration for DoReFa Quantizer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +common configuration needed by compression algorithms can be found at `Specification of ``config_list`` <./QuickStart.rst>`__. + +configuration needed by this algorithm : + +---- + +BNN Quantizer +------------- + +In `Binarized Neural Networks: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1 `__\ , + +.. + + We introduce a method to train Binarized Neural Networks (BNNs) - neural networks with binary weights and activations at run-time. At training-time the binary weights and activations are used for computing the parameters gradients. During the forward pass, BNNs drastically reduce memory size and accesses, and replace most arithmetic operations with bit-wise operations, which is expected to substantially improve power-efficiency. + + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.quantization import BNNQuantizer + model = VGG_Cifar10(num_classes=10) + + configure_list = [{ + 'quant_bits': 1, + 'quant_types': ['weight'], + 'op_types': ['Conv2d', 'Linear'], + 'op_names': ['features.0', 'features.3', 'features.7', 'features.10', 'features.14', 'features.17', 'classifier.0', 'classifier.3'] + }, { + 'quant_bits': 1, + 'quant_types': ['output'], + 'op_types': ['Hardtanh'], + 'op_names': ['features.6', 'features.9', 'features.13', 'features.16', 'features.20', 'classifier.2', 'classifier.5'] + }] + + quantizer = BNNQuantizer(model, configure_list) + model = quantizer.compress() + +You can view example :githublink:`examples/model_compress/quantization/BNN_quantizer_cifar10.py ` for more information. + +User configuration for BNN Quantizer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +common configuration needed by compression algorithms can be found at `Specification of ``config_list`` <./QuickStart.rst>`__. + +configuration needed by this algorithm : + +Experiment +^^^^^^^^^^ + +We implemented one of the experiments in `Binarized Neural Networks: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1 `__\ , we quantized the **VGGNet** for CIFAR-10 in the paper. Our experiments results are as follows: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Accuracy + * - VGGNet + - 86.93% + + +The experiments code can be found at :githublink:`examples/model_compress/quantization/BNN_quantizer_cifar10.py ` + + +Observer Quantizer +------------------ + +.. + + Observer quantizer is a framework of post-training quantization. It will insert observers into the place where the quantization will happen. During quantization calibration, each observer will record all the tensors it 'sees'. These tensors will be used to calculate the quantization statistics after calibration. + +Usage +^^^^^ + +1. configure which layer to be quantized and which tensor (input/output/weight) of that layer to be quantized. +2. construct the observer quantizer. +3. do quantization calibration. +4. call the `compress` API to calculate the scale and zero point for each tensor and switch model to evaluation mode. + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.quantization import ObserverQuantizer + + def calibration(model, calib_loader): + model.eval() + with torch.no_grad(): + for data, _ in calib_loader: + model(data) + + model = Mnist() + + configure_list = [{ + 'quant_bits': 8, + 'quant_types': ['weight', 'input'], + 'op_names': ['conv1', 'conv2], + }, { + 'quant_bits': 8, + 'quant_types': ['output'], + 'op_names': ['relu1', 'relu2], + }] + + quantizer = ObserverQuantizer(model, configure_list) + calibration(model, calib_loader) + model = quantizer.compress() + +You can view example :githublink:`examples/model_compress/quantization/observer_quantizer.py ` for more information. + +User configuration for Observer Quantizer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Common configuration needed by compression algorithms can be found at `Specification of `config_list <./QuickStart.rst>`__. + + +.. note:: + This quantizer is still under development for now. Some quantizer settings are hard-coded: + + - weight observer: per_tensor_symmetric, qint8 + - output observer: per_tensor_affine, quint8, reduce_range=True + + Other settings (such as quant_type and op_names) can be configured. + +About the compress API +^^^^^^^^^^^^^^^^^^^^^^ +Before the `compress` API is called, the model will only record tensors' statistics and no quantization process will be executed. +After the `compress` API is called, the model will NOT record tensors' statistics any more. The quantization scale and zero point will +be generated for each tensor and will be used to quantize each tensor during inference (we call it evaluation mode) + +About calibration +^^^^^^^^^^^^^^^^^ +Usually we pick up about 100 training/evaluation examples for calibration. If you found the accuracy is a bit low, try +to reduce the number of calibration examples. + diff --git a/docs/en_US/Compression/QuickStart.rst b/docs/en_US/Compression/QuickStart.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c35473dfcda40e980b87910799d62a8651eecf3 --- /dev/null +++ b/docs/en_US/Compression/QuickStart.rst @@ -0,0 +1,124 @@ +Quick Start +=========== + +.. toctree:: + :hidden: + + Notebook Example + + +Model compression usually consists of three stages: 1) pre-training a model, 2) compress the model, 3) fine-tuning the model. NNI mainly focuses on the second stage and provides very simple APIs for compressing a model. Follow this guide for a quick look at how easy it is to use NNI to compress a model. + +A `compression pipeline example <./compression_pipeline_example.rst>`__ with Jupyter notebook is supported and refer the code :githublink:`here `. + +Model Pruning +------------- + +Here we use `level pruner <../Compression/Pruner.rst#level-pruner>`__ as an example to show the usage of pruning in NNI. + +Step1. Write configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Write a configuration to specify the layers that you want to prune. The following configuration means pruning all the ``default``\ ops to sparsity 0.5 while keeping other layers unpruned. + +.. code-block:: python + + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['default'], + }] + +The specification of configuration can be found `here <./Tutorial.rst#specify-the-configuration>`__. Note that different pruners may have their own defined fields in configuration. Please refer to each pruner's `usage <./Pruner.rst>`__ for details, and adjust the configuration accordingly. + +Step2. Choose a pruner and compress the model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +First instantiate the chosen pruner with your model and configuration as arguments, then invoke ``compress()`` to compress your model. Note that, some algorithms may check gradients for compressing, so we may also define a trainer, an optimizer, a criterion and pass them to the pruner. + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import LevelPruner + + pruner = LevelPruner(model, config_list) + model = pruner.compress() + +Some pruners (e.g., L1FilterPruner, FPGMPruner) prune once, some pruners (e.g., AGPPruner) prune your model iteratively, the masks are adjusted epoch by epoch during training. + +So if the pruners prune your model iteratively or they need training or inference to get gradients, you need pass finetuning logic to pruner. + +For example: + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import AGPPruner + + pruner = AGPPruner(model, config_list, optimizer, trainer, criterion, num_iterations=10, epochs_per_iteration=1, pruning_algorithm='level') + model = pruner.compress() + +Step3. Export compression result +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +After training, you can export model weights to a file, and the generated masks to a file as well. Exporting onnx model is also supported. + +.. code-block:: python + + pruner.export_model(model_path='pruned_vgg19_cifar10.pth', mask_path='mask_vgg19_cifar10.pth') + +Plese refer to :githublink:`mnist example ` for example code. + +More examples of pruning algorithms can be found in :githublink:`basic_pruners_torch ` and :githublink:`auto_pruners_torch `. + + +Model Quantization +------------------ + +Here we use `QAT Quantizer <../Compression/Quantizer.rst#qat-quantizer>`__ as an example to show the usage of pruning in NNI. + +Step1. Write configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + config_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': { + 'weight': 8, + 'input': 8, + }, # you can just use `int` here because all `quan_types` share same bits length, see config for `ReLu6` below. + 'op_types':['Conv2d', 'Linear'], + 'quant_dtype': 'int', + 'quant_scheme': 'per_channel_symmetric' + }, { + 'quant_types': ['output'], + 'quant_bits': 8, + 'quant_start_step': 7000, + 'op_types':['ReLU6'], + 'quant_dtype': 'uint', + 'quant_scheme': 'per_tensor_affine' + }] + +The specification of configuration can be found `here <./Tutorial.rst#quantization-specific-keys>`__. + +Step2. Choose a quantizer and compress the model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer + + quantizer = QAT_Quantizer(model, config_list) + quantizer.compress() + + +Step3. Export compression result +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +After training and calibration, you can export model weight to a file, and the generated calibration parameters to a file as well. Exporting onnx model is also supported. + +.. code-block:: python + + calibration_config = quantizer.export_model(model_path, calibration_path, onnx_path, input_shape, device) + +Plese refer to :githublink:`mnist example ` for example code. + +Congratulations! You've compressed your first model via NNI. To go a bit more in depth about model compression in NNI, check out the `Tutorial <./Tutorial.rst>`__. \ No newline at end of file diff --git a/docs/en_US/Compression/Tutorial.rst b/docs/en_US/Compression/Tutorial.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c2a8982d0306de68b16a2b97ac808077604c745 --- /dev/null +++ b/docs/en_US/Compression/Tutorial.rst @@ -0,0 +1,257 @@ +Tutorial +======== + +.. contents:: + +In this tutorial, we will explain more detailed usage about the model compression in NNI. + +Setup compression goal +---------------------- + +Specify the configuration +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Users can specify the configuration (i.e., ``config_list``\ ) for a compression algorithm. For example, when compressing a model, users may want to specify the sparsity ratio, to specify different ratios for different types of operations, to exclude certain types of operations, or to compress only a certain types of operations. For users to express these kinds of requirements, we define a configuration specification. It can be seen as a python ``list`` object, where each element is a ``dict`` object. + +The ``dict``\ s in the ``list`` are applied one by one, that is, the configurations in latter ``dict`` will overwrite the configurations in former ones on the operations that are within the scope of both of them. + +There are different keys in a ``dict``. Some of them are common keys supported by all the compression algorithms: + +* **op_types**\ : This is to specify what types of operations to be compressed. 'default' means following the algorithm's default setting. All suported module types are defined in :githublink:`default_layers.py ` for pytorch. +* **op_names**\ : This is to specify by name what operations to be compressed. If this field is omitted, operations will not be filtered by it. +* **exclude**\ : Default is False. If this field is True, it means the operations with specified types and names will be excluded from the compression. + +Some other keys are often specific to a certain algorithm, users can refer to `pruning algorithms <./Pruner.rst>`__ and `quantization algorithms <./Quantizer.rst>`__ for the keys allowed by each algorithm. + +To prune all ``Conv2d`` layers with the sparsity of 0.6, the configuration can be written as: + +.. code-block:: python + + [{ + 'sparsity': 0.6, + 'op_types': ['Conv2d'] + }] + +To control the sparsity of specific layers, the configuration can be written as: + +.. code-block:: python + + [{ + 'sparsity': 0.8, + 'op_types': ['default'] + }, + { + 'sparsity': 0.6, + 'op_names': ['op_name1', 'op_name2'] + }, + { + 'exclude': True, + 'op_names': ['op_name3'] + }] + +It means following the algorithm's default setting for compressed operations with sparsity 0.8, but for ``op_name1`` and ``op_name2`` use sparsity 0.6, and do not compress ``op_name3``. + +Quantization specific keys +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Besides the keys explained above, if you use quantization algorithms you need to specify more keys in ``config_list``\ , which are explained below. + +* **quant_types** : list of string. + +Type of quantization you want to apply, currently support 'weight', 'input', 'output'. 'weight' means applying quantization operation +to the weight parameter of modules. 'input' means applying quantization operation to the input of module forward method. 'output' means applying quantization operation to the output of module forward method, which is often called as 'activation' in some papers. + + +* **quant_bits** : int or dict of {str : int} + +bits length of quantization, key is the quantization type, value is the quantization bits length, eg. + +.. code-block:: python + + { + quant_bits: { + 'weight': 8, + 'output': 4, + }, + } + +when the value is int type, all quantization types share same bits length. eg. + +.. code-block:: python + + { + quant_bits: 8, # weight or output quantization are all 8 bits + } + +* **quant_dtype** : str or dict of {str : str} + +quantization dtype, used to determine the range of quantized value. Two choices can be used: + +- int: the range is singed +- uint: the range is unsigned + +Two ways to set it. One is that the key is the quantization type, and the value is the quantization dtype, eg. + +.. code-block:: python + + { + quant_dtype: { + 'weight': 'int', + 'output': 'uint, + }, + } + +The other is that the value is str type, and all quantization types share the same dtype. eg. + +.. code-block:: python + + { + 'quant_dtype': 'int', # the dtype of weight and output quantization are all 'int' + } + +There are totally two kinds of `quant_dtype` you can set, they are 'int' and 'uint'. + +* **quant_scheme** : str or dict of {str : str} + +quantization scheme, used to determine the quantization manners. Four choices can used: + +- per_tensor_affine: per tensor, asymmetric quantization +- per_tensor_symmetric: per tensor, symmetric quantization +- per_channel_affine: per channel, asymmetric quantization +- per_channel_symmetric: per channel, symmetric quantization + +Two ways to set it. One is that the key is the quantization type, value is the quantization scheme, eg. + +.. code-block:: python + + { + quant_scheme: { + 'weight': 'per_channel_symmetric', + 'output': 'per_tensor_affine', + }, + } + +The other is that the value is str type, all quantization types share the same quant_scheme. eg. + +.. code-block:: python + + { + quant_scheme: 'per_channel_symmetric', # the quant_scheme of weight and output quantization are all 'per_channel_symmetric' + } + +There are totally four kinds of `quant_scheme` you can set, they are 'per_tensor_affine', 'per_tensor_symmetric', 'per_channel_affine' and 'per_channel_symmetric'. + +The following example shows a more complete ``config_list``\ , it uses ``op_names`` (or ``op_types``\ ) to specify the target layers along with the quantization bits for those layers. + +.. code-block:: python + + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 8, + 'op_names': ['conv1'], + 'quant_dtype': 'int', + 'quant_scheme': 'per_channel_symmetric' + }, + { + 'quant_types': ['weight'], + 'quant_bits': 4, + 'quant_start_step': 0, + 'op_names': ['conv2'], + 'quant_dtype': 'int', + 'quant_scheme': 'per_tensor_symmetric' + }, + { + 'quant_types': ['weight'], + 'quant_bits': 3, + 'op_names': ['fc1'], + 'quant_dtype': 'int', + 'quant_scheme': 'per_tensor_symmetric' + }, + { + 'quant_types': ['weight'], + 'quant_bits': 2, + 'op_names': ['fc2'], + 'quant_dtype': 'int', + 'quant_scheme': 'per_channel_symmetric' + }] + +In this example, 'op_names' is the name of layer and four layers will be quantized to different quant_bits. + + +Export compression result +------------------------- + +Export the pruned model +^^^^^^^^^^^^^^^^^^^^^^^ + +You can easily export the pruned model using the following API if you are pruning your model, ``state_dict`` of the sparse model weights will be stored in ``model.pth``\ , which can be loaded by ``torch.load('model.pth')``. Note that, the exported ``model.pth``\ has the same parameters as the original model except the masked weights are zero. ``mask_dict`` stores the binary value that produced by the pruning algorithm, which can be further used to speed up the model. + +.. code-block:: python + + # export model weights and mask + pruner.export_model(model_path='model.pth', mask_path='mask.pth') + + # apply mask to model + from nni.compression.pytorch import apply_compression_results + + apply_compression_results(model, mask_file, device) + + +export model in ``onnx`` format(\ ``input_shape`` need to be specified): + +.. code-block:: python + + pruner.export_model(model_path='model.pth', mask_path='mask.pth', onnx_path='model.onnx', input_shape=[1, 1, 28, 28]) + + +Export the quantized model +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can export the quantized model directly by using ``torch.save`` api and the quantized model can be loaded by ``torch.load`` without any extra modification. The following example shows the normal procedure of saving, loading quantized model and get related parameters in QAT. + +.. code-block:: python + + # Save quantized model which is generated by using NNI QAT algorithm + torch.save(model.state_dict(), "quantized_model.pth") + + # Simulate model loading procedure + # Have to init new model and compress it before loading + qmodel_load = Mnist() + optimizer = torch.optim.SGD(qmodel_load.parameters(), lr=0.01, momentum=0.5) + quantizer = QAT_Quantizer(qmodel_load, config_list, optimizer) + quantizer.compress() + + # Load quantized model + qmodel_load.load_state_dict(torch.load("quantized_model.pth")) + + # Get scale, zero_point and weight of conv1 in loaded model + conv1 = qmodel_load.conv1 + scale = conv1.module.scale + zero_point = conv1.module.zero_point + weight = conv1.module.weight + + +Speed up the model +------------------ + +Masks do not provide real speedup of your model. The model should be speeded up based on the exported masks, thus, we provide an API to speed up your model as shown below. After invoking ``apply_compression_results`` on your model, your model becomes a smaller one with shorter inference latency. + +.. code-block:: python + + from nni.compression.pytorch import apply_compression_results, ModelSpeedup + + dummy_input = torch.randn(config['input_shape']).to(device) + m_speedup = ModelSpeedup(model, dummy_input, masks_file, device) + m_speedup.speedup_model() + + +Please refer to `here `__ for detailed description. The example code for model speedup can be found :githublink:`here ` + + +Control the Fine-tuning process +------------------------------- + +Enhance the fine-tuning process +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Knowledge distillation effectively learns a small student model from a large teacher model. Users can enhance the fine-tuning process that utilize knowledge distillation to improve the performance of the compressed model. Example code can be found :githublink:`here ` diff --git a/docs/en_US/Compression/advanced.rst b/docs/en_US/Compression/advanced.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0df64214f55d421e21a39d4ddece85a0c614dec --- /dev/null +++ b/docs/en_US/Compression/advanced.rst @@ -0,0 +1,9 @@ +Advanced Usage +============== + +.. toctree:: + :maxdepth: 2 + + Framework <./Framework> + Customize a new algorithm <./CustomizeCompressor> + Automatic Model Compression (Beta) <./AutoCompression> diff --git a/docs/en_US/Compression/compression_pipeline_example.ipynb b/docs/en_US/Compression/compression_pipeline_example.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..bae093e0f21391321d11c1ed501633bf7e90e72b --- /dev/null +++ b/docs/en_US/Compression/compression_pipeline_example.ipynb @@ -0,0 +1,1281 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1. Prepare model" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn.functional as F\n", + "\n", + "class NaiveModel(torch.nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.conv1 = torch.nn.Conv2d(1, 20, 5, 1)\n", + " self.conv2 = torch.nn.Conv2d(20, 50, 5, 1)\n", + " self.fc1 = torch.nn.Linear(4 * 4 * 50, 500)\n", + " self.fc2 = torch.nn.Linear(500, 10)\n", + " self.relu1 = torch.nn.ReLU6()\n", + " self.relu2 = torch.nn.ReLU6()\n", + " self.relu3 = torch.nn.ReLU6()\n", + " self.max_pool1 = torch.nn.MaxPool2d(2, 2)\n", + " self.max_pool2 = torch.nn.MaxPool2d(2, 2)\n", + "\n", + " def forward(self, x):\n", + " x = self.relu1(self.conv1(x))\n", + " x = self.max_pool1(x)\n", + " x = self.relu2(self.conv2(x))\n", + " x = self.max_pool2(x)\n", + " x = x.view(-1, x.size()[1:].numel())\n", + " x = self.relu3(self.fc1(x))\n", + " x = self.fc2(x)\n", + " return F.log_softmax(x, dim=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# define model, optimizer, criterion, data_loader, trainer, evaluator.\n", + "\n", + "import torch.optim as optim\n", + "from torchvision import datasets, transforms\n", + "from torch.optim.lr_scheduler import StepLR\n", + "\n", + "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", + "\n", + "model = NaiveModel().to(device)\n", + "\n", + "optimizer = optim.Adadelta(model.parameters(), lr=1)\n", + "\n", + "criterion = torch.nn.NLLLoss()\n", + "\n", + "transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n", + "train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)\n", + "test_dataset = datasets.MNIST('./data', train=False, transform=transform)\n", + "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64)\n", + "test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000)\n", + "\n", + "def trainer(model, optimizer, criterion, epoch):\n", + " model.train()\n", + " for batch_idx, (data, target) in enumerate(train_loader):\n", + " data, target = data.to(device), target.to(device)\n", + " optimizer.zero_grad()\n", + " output = model(data)\n", + " loss = criterion(output, target)\n", + " loss.backward()\n", + " optimizer.step()\n", + " if batch_idx % 100 == 0:\n", + " print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n", + " epoch, batch_idx * len(data), len(train_loader.dataset),\n", + " 100. * batch_idx / len(train_loader), loss.item()))\n", + "\n", + "def evaluator(model):\n", + " model.eval()\n", + " test_loss = 0\n", + " correct = 0\n", + " with torch.no_grad():\n", + " for data, target in test_loader:\n", + " data, target = data.to(device), target.to(device)\n", + " output = model(data)\n", + " test_loss += F.nll_loss(output, target, reduction='sum').item()\n", + " pred = output.argmax(dim=1, keepdim=True)\n", + " correct += pred.eq(target.view_as(pred)).sum().item()\n", + "\n", + " test_loss /= len(test_loader.dataset)\n", + " acc = 100 * correct / len(test_loader.dataset)\n", + "\n", + " print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", + " test_loss, correct, len(test_loader.dataset), acc))\n", + "\n", + " return acc" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 0 [0/60000 (0%)]\tLoss: 2.313423\n", + "Train Epoch: 0 [6400/60000 (11%)]\tLoss: 0.091786\n", + "Train Epoch: 0 [12800/60000 (21%)]\tLoss: 0.087317\n", + "Train Epoch: 0 [19200/60000 (32%)]\tLoss: 0.036397\n", + "Train Epoch: 0 [25600/60000 (43%)]\tLoss: 0.008173\n", + "Train Epoch: 0 [32000/60000 (53%)]\tLoss: 0.047565\n", + "Train Epoch: 0 [38400/60000 (64%)]\tLoss: 0.122448\n", + "Train Epoch: 0 [44800/60000 (75%)]\tLoss: 0.036732\n", + "Train Epoch: 0 [51200/60000 (85%)]\tLoss: 0.150135\n", + "Train Epoch: 0 [57600/60000 (96%)]\tLoss: 0.109684\n", + "\n", + "Test set: Average loss: 0.0457, Accuracy: 9857/10000 (99%)\n", + "\n", + "Train Epoch: 1 [0/60000 (0%)]\tLoss: 0.020650\n", + "Train Epoch: 1 [6400/60000 (11%)]\tLoss: 0.091525\n", + "Train Epoch: 1 [12800/60000 (21%)]\tLoss: 0.019602\n", + "Train Epoch: 1 [19200/60000 (32%)]\tLoss: 0.027827\n", + "Train Epoch: 1 [25600/60000 (43%)]\tLoss: 0.019414\n", + "Train Epoch: 1 [32000/60000 (53%)]\tLoss: 0.007640\n", + "Train Epoch: 1 [38400/60000 (64%)]\tLoss: 0.051296\n", + "Train Epoch: 1 [44800/60000 (75%)]\tLoss: 0.012038\n", + "Train Epoch: 1 [51200/60000 (85%)]\tLoss: 0.121057\n", + "Train Epoch: 1 [57600/60000 (96%)]\tLoss: 0.015796\n", + "\n", + "Test set: Average loss: 0.0302, Accuracy: 9902/10000 (99%)\n", + "\n", + "Train Epoch: 2 [0/60000 (0%)]\tLoss: 0.009903\n", + "Train Epoch: 2 [6400/60000 (11%)]\tLoss: 0.062256\n", + "Train Epoch: 2 [12800/60000 (21%)]\tLoss: 0.013844\n", + "Train Epoch: 2 [19200/60000 (32%)]\tLoss: 0.014133\n", + "Train Epoch: 2 [25600/60000 (43%)]\tLoss: 0.001051\n", + "Train Epoch: 2 [32000/60000 (53%)]\tLoss: 0.006128\n", + "Train Epoch: 2 [38400/60000 (64%)]\tLoss: 0.032162\n", + "Train Epoch: 2 [44800/60000 (75%)]\tLoss: 0.007687\n", + "Train Epoch: 2 [51200/60000 (85%)]\tLoss: 0.092295\n", + "Train Epoch: 2 [57600/60000 (96%)]\tLoss: 0.006266\n", + "\n", + "Test set: Average loss: 0.0259, Accuracy: 9920/10000 (99%)\n", + "\n" + ] + } + ], + "source": [ + "# pre-train model for 3 epoches.\n", + "\n", + "scheduler = StepLR(optimizer, step_size=1, gamma=0.7)\n", + "\n", + "for epoch in range(0, 3):\n", + " trainer(model, optimizer, criterion, epoch)\n", + " evaluator(model)\n", + " scheduler.step()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "op_name: \n", + "op_type: \n", + "\n", + "op_name: conv1\n", + "op_type: \n", + "\n", + "op_name: conv2\n", + "op_type: \n", + "\n", + "op_name: fc1\n", + "op_type: \n", + "\n", + "op_name: fc2\n", + "op_type: \n", + "\n", + "op_name: relu1\n", + "op_type: \n", + "\n", + "op_name: relu2\n", + "op_type: \n", + "\n", + "op_name: relu3\n", + "op_type: \n", + "\n", + "op_name: max_pool1\n", + "op_type: \n", + "\n", + "op_name: max_pool2\n", + "op_type: \n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "[None, None, None, None, None, None, None, None, None, None]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# show all op_name and op_type in the model.\n", + "\n", + "[print('op_name: {}\\nop_type: {}\\n'.format(name, type(module))) for name, module in model.named_modules()]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([20, 1, 5, 5])\n" + ] + } + ], + "source": [ + "# show the weight size of `conv1`.\n", + "\n", + "print(model.conv1.weight.data.size())" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[[[ 1.5338e-01, -1.1766e-01, -2.6654e-01, -2.9445e-02, -1.4650e-01],\n", + " [-1.8796e-01, -2.9882e-01, 6.9725e-02, 2.1561e-01, 6.5688e-02],\n", + " [ 1.5274e-01, -9.8471e-03, 3.2303e-01, 1.3472e-03, 1.7235e-01],\n", + " [ 1.1804e-01, 2.2535e-01, -8.3370e-02, -3.4553e-02, -1.2529e-01],\n", + " [-6.6012e-02, -2.0272e-02, -1.8797e-01, -4.6882e-02, -8.3206e-02]]],\n", + "\n", + "\n", + " [[[-1.2112e-01, 7.0756e-02, 5.0446e-02, 1.5156e-01, -2.7929e-02],\n", + " [-1.9744e-01, -2.1336e-03, 7.2534e-02, 6.2336e-02, 1.6039e-01],\n", + " [-6.7510e-02, 1.4636e-01, 7.1972e-02, -8.9118e-02, -4.0895e-02],\n", + " [ 2.9499e-02, 2.0788e-01, -1.4989e-01, 1.1668e-01, -2.8503e-01],\n", + " [ 8.1894e-02, -1.4489e-01, -4.2038e-02, -1.2794e-01, -5.0379e-02]]],\n", + "\n", + "\n", + " [[[ 3.8332e-02, -1.4270e-01, -1.9585e-01, 2.2653e-01, 1.0104e-01],\n", + " [-2.7956e-03, -1.4108e-01, -1.4694e-01, -1.3525e-01, 2.6959e-01],\n", + " [ 1.9522e-01, -1.2281e-01, -1.9173e-01, -1.8910e-02, 3.1572e-03],\n", + " [-1.0580e-01, -2.5239e-02, -5.8266e-02, -6.5815e-02, 6.6433e-02],\n", + " [ 8.9601e-02, 7.1189e-02, -2.4255e-01, 1.5746e-01, -1.4708e-01]]],\n", + "\n", + "\n", + " [[[-1.1963e-01, -1.7243e-01, -3.5174e-02, 1.4651e-01, -1.1675e-01],\n", + " [-1.3518e-01, 1.2830e-02, 7.7188e-02, 2.1060e-01, 4.0924e-02],\n", + " [-4.3364e-02, -1.9579e-01, -3.6559e-02, -6.9803e-02, 1.2380e-01],\n", + " [ 7.7321e-02, 3.7590e-02, 8.2935e-02, 2.2878e-01, 2.7859e-03],\n", + " [-1.3601e-01, -2.1167e-01, -2.3195e-01, -1.2524e-01, 1.0073e-01]]],\n", + "\n", + "\n", + " [[[-2.7300e-01, 6.8470e-02, 2.8405e-02, -4.5879e-03, -1.3735e-01],\n", + " [-8.9789e-02, -2.0209e-03, 5.0950e-03, 2.1633e-01, 2.5554e-01],\n", + " [ 5.4389e-02, 1.2262e-01, -1.5514e-01, -1.0416e-01, 1.3606e-01],\n", + " [-1.6794e-01, -2.8876e-02, 2.5900e-02, -2.4261e-02, 1.0923e-01],\n", + " [ 5.2524e-03, -4.4625e-02, -2.1327e-01, -1.7211e-01, -4.4819e-04]]],\n", + "\n", + "\n", + " [[[ 7.2378e-02, 1.5122e-01, -1.2964e-01, 4.9105e-02, -2.1639e-01],\n", + " [ 3.6547e-02, -1.5518e-02, 3.2059e-02, -3.2820e-02, 6.1231e-02],\n", + " [ 1.2514e-01, 8.0623e-02, 1.2686e-02, -1.0074e-01, 2.2836e-02],\n", + " [-2.6842e-02, 2.5578e-02, -2.5877e-01, -1.7808e-01, 7.6966e-02],\n", + " [-4.2424e-02, 4.7006e-02, -1.5486e-02, -4.2686e-02, 4.8482e-02]]],\n", + "\n", + "\n", + " [[[ 1.3081e-01, 9.9530e-02, -1.4729e-01, -1.7665e-01, -1.9757e-01],\n", + " [ 9.6603e-02, 2.2783e-02, 7.8402e-02, -2.8679e-02, 8.5252e-02],\n", + " [-1.5310e-02, 1.1605e-01, -5.8300e-02, 2.4563e-02, 1.7488e-01],\n", + " [ 6.5576e-02, -1.6325e-01, -1.1318e-01, -2.9251e-02, 6.2352e-02],\n", + " [-1.9084e-03, -1.4005e-01, -1.2363e-01, -9.7985e-02, -2.0562e-01]]],\n", + "\n", + "\n", + " [[[ 4.0772e-02, -8.2086e-02, -2.7555e-01, -3.2547e-01, -1.2226e-01],\n", + " [-5.9877e-02, 9.8567e-02, 2.5186e-01, -1.0280e-01, -2.3416e-01],\n", + " [ 8.5760e-02, 1.0896e-01, 1.4898e-01, 2.1579e-01, 8.5297e-02],\n", + " [ 5.4720e-02, -1.7226e-01, -7.2518e-02, 6.7099e-03, -1.6011e-03],\n", + " [-8.9944e-02, 1.7404e-01, -3.6985e-02, 1.8602e-01, 7.2353e-02]]],\n", + "\n", + "\n", + " [[[ 1.6276e-02, -9.6439e-02, -9.6085e-02, -2.4267e-01, -1.8521e-01],\n", + " [ 6.3310e-02, 1.7866e-01, 1.1694e-01, -1.4464e-01, -2.7711e-01],\n", + " [-2.4514e-02, 2.2222e-01, 2.1053e-01, -1.4271e-01, 8.7045e-02],\n", + " [-1.9207e-01, -5.4719e-02, -5.7775e-03, -1.0034e-05, -1.0923e-01],\n", + " [-2.4006e-02, 2.3780e-02, 1.8988e-01, 2.4734e-01, 4.8097e-02]]],\n", + "\n", + "\n", + " [[[ 1.1335e-01, -5.8451e-02, 5.2440e-02, -1.3223e-01, -2.5534e-02],\n", + " [ 9.1323e-02, -6.0707e-02, 2.3524e-01, 2.4992e-01, 8.7842e-02],\n", + " [ 2.9002e-02, 3.5379e-02, -5.9689e-02, -2.8363e-03, 1.8618e-01],\n", + " [-2.9671e-01, 8.1830e-03, 1.1076e-01, -5.4118e-02, -6.1685e-02],\n", + " [-1.7580e-01, -3.4534e-01, -3.9250e-01, -2.7569e-01, -2.6131e-01]]],\n", + "\n", + "\n", + " [[[ 1.1586e-01, -7.5997e-02, -1.4614e-01, 4.8750e-02, 1.8097e-01],\n", + " [-6.7027e-02, -1.4901e-01, -1.5614e-02, -1.0379e-02, 9.5526e-02],\n", + " [-3.2333e-02, -1.5107e-01, -1.9498e-01, 1.0083e-01, 2.2328e-01],\n", + " [-2.0692e-01, -6.3798e-02, -1.2524e-01, 1.9549e-01, 1.9682e-01],\n", + " [-2.1494e-01, 1.0475e-01, -2.4858e-02, -9.7831e-02, 1.1551e-01]]],\n", + "\n", + "\n", + " [[[ 6.3785e-02, -1.8044e-01, -1.0190e-01, -1.3588e-01, 8.5433e-02],\n", + " [ 2.0675e-01, 3.3238e-02, 9.2437e-02, 1.1799e-01, 2.1111e-01],\n", + " [-5.2138e-02, 1.5790e-01, 1.8151e-01, 8.0470e-02, 1.0131e-01],\n", + " [-4.4786e-02, 1.1771e-01, 2.1706e-02, -1.2563e-01, -2.1142e-01],\n", + " [-2.3589e-01, -2.1154e-01, -1.7890e-01, -2.7769e-01, -1.2512e-01]]],\n", + "\n", + "\n", + " [[[ 1.9133e-01, 2.4711e-01, 1.0413e-01, -1.9187e-01, -3.0991e-01],\n", + " [-1.2382e-01, 8.3641e-03, -5.6734e-02, 5.8376e-02, 2.2880e-02],\n", + " [-3.1734e-01, -1.0637e-02, -5.5974e-02, 1.0676e-01, -1.1080e-02],\n", + " [-2.2980e-01, 2.0486e-01, 1.0147e-01, 1.4484e-01, 5.2265e-02],\n", + " [ 7.4410e-02, 2.2806e-02, 8.5137e-02, -2.1809e-01, 3.1704e-02]]],\n", + "\n", + "\n", + " [[[-1.1006e-01, -2.5311e-01, 1.8925e-02, 1.0399e-02, 1.1951e-01],\n", + " [-2.1116e-01, 1.8409e-01, 3.2172e-02, 1.5962e-01, -7.9457e-02],\n", + " [ 1.1059e-01, 9.1966e-02, 1.0777e-01, -9.9132e-02, -4.4586e-02],\n", + " [-8.7919e-02, -3.7283e-02, 9.1275e-02, -3.7412e-02, 3.8875e-02],\n", + " [-4.3558e-02, 1.6196e-01, -4.7944e-03, -1.7560e-02, -1.2593e-01]]],\n", + "\n", + "\n", + " [[[ 7.6976e-02, -3.8627e-02, 1.2610e-01, 1.1994e-01, 2.1706e-03],\n", + " [ 7.4357e-02, 6.7929e-02, 3.1386e-02, 1.4606e-01, 2.1429e-01],\n", + " [-2.6569e-01, -4.2631e-04, -3.6654e-02, -3.0967e-02, -9.4961e-02],\n", + " [-2.0192e-01, -3.5423e-01, -2.5246e-01, -3.5092e-01, -2.4159e-01],\n", + " [ 1.7636e-02, 1.3744e-01, -1.0306e-01, 8.8370e-02, 7.3258e-02]]],\n", + "\n", + "\n", + " [[[ 2.0016e-01, 1.0956e-01, -5.9223e-02, 6.4871e-03, -2.4165e-01],\n", + " [ 5.6283e-02, 1.7276e-01, -2.2316e-01, -1.6699e-01, -7.0742e-02],\n", + " [ 2.6179e-01, -2.5102e-01, -2.0774e-01, -9.6413e-02, 3.4367e-02],\n", + " [-9.1882e-02, -2.9195e-01, -8.7432e-02, 1.0144e-01, -2.0559e-02],\n", + " [-2.5668e-01, -9.8016e-02, 1.1103e-01, -3.0233e-02, 1.1076e-01]]],\n", + "\n", + "\n", + " [[[ 1.0027e-03, -5.7955e-02, -2.1339e-01, -1.6729e-01, -2.0870e-01],\n", + " [ 4.2464e-02, 2.3177e-01, -6.1459e-02, -1.0905e-01, 1.7613e-02],\n", + " [-1.2282e-01, 2.1762e-01, -1.3553e-02, 2.7476e-01, 1.6703e-01],\n", + " [-5.6282e-02, 1.2731e-02, 1.0944e-01, -1.7347e-01, 4.4497e-02],\n", + " [ 5.7346e-02, -5.4657e-02, 4.8718e-02, -2.6221e-02, -2.6933e-02]]],\n", + "\n", + "\n", + " [[[ 6.7697e-02, 1.5692e-01, 2.7050e-01, 1.5936e-02, 1.7659e-01],\n", + " [-2.8899e-02, -1.4866e-01, 3.1838e-02, 1.0903e-01, 1.2292e-01],\n", + " [-1.3608e-01, -4.3198e-03, -9.8925e-02, -4.5599e-02, 1.3452e-01],\n", + " [-5.1435e-02, -2.3815e-01, -2.4151e-01, -4.8556e-02, 1.3825e-01],\n", + " [-1.2823e-01, 8.9324e-03, -1.5313e-01, -2.2933e-01, -3.4081e-02]]],\n", + "\n", + "\n", + " [[[-1.8396e-01, -6.8774e-03, -1.6675e-01, 7.1980e-03, 1.9922e-02],\n", + " [ 1.3416e-01, -1.1450e-01, -1.5277e-01, -6.5713e-02, -9.5435e-02],\n", + " [ 1.5406e-01, -9.1235e-02, -1.0880e-01, -7.1603e-02, -9.5575e-02],\n", + " [ 2.1772e-01, 8.4073e-02, -2.5264e-01, -2.1428e-01, 1.9537e-01],\n", + " [ 1.3124e-01, 7.9532e-02, -2.4044e-01, -1.5717e-01, 1.6562e-01]]],\n", + "\n", + "\n", + " [[[ 1.1849e-01, -5.0517e-03, -1.8900e-01, 1.8093e-02, 6.4660e-02],\n", + " [-1.5309e-01, -2.0106e-01, -8.6551e-02, 5.2692e-03, 1.5448e-01],\n", + " [-3.0727e-01, 4.9703e-02, -4.7637e-02, 2.9111e-01, -1.3173e-01],\n", + " [-8.5167e-02, -1.3540e-01, 2.9235e-01, 3.7895e-03, -9.4651e-02],\n", + " [-6.0694e-02, 9.6936e-02, 1.0533e-01, -6.1769e-02, -1.8086e-01]]]],\n", + " device='cuda:0')\n" + ] + } + ], + "source": [ + "# show the weight of `conv1`.\n", + "\n", + "print(model.conv1.weight.data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Prepare config_list for pruning" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# we will prune 50% weights in `conv1`.\n", + "\n", + "config_list = [{\n", + " 'sparsity': 0.5,\n", + " 'op_types': ['Conv2d'],\n", + " 'op_names': ['conv1']\n", + "}]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3. Choose a pruner and pruning" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# use l1filter pruner to prune the model\n", + "\n", + "from nni.algorithms.compression.pytorch.pruning import L1FilterPruner\n", + "\n", + "# Note that if you use a compressor that need you to pass a optimizer,\n", + "# you need a new optimizer instead of you have used above, because NNI might modify the optimizer.\n", + "# And of course this modified optimizer can not be used in finetuning.\n", + "pruner = L1FilterPruner(model, config_list)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "op_name: \n", + "op_type: \n", + "\n", + "op_name: conv1\n", + "op_type: \n", + "\n", + "op_name: conv1.module\n", + "op_type: \n", + "\n", + "op_name: conv2\n", + "op_type: \n", + "\n", + "op_name: fc1\n", + "op_type: \n", + "\n", + "op_name: fc2\n", + "op_type: \n", + "\n", + "op_name: relu1\n", + "op_type: \n", + "\n", + "op_name: relu2\n", + "op_type: \n", + "\n", + "op_name: relu3\n", + "op_type: \n", + "\n", + "op_name: max_pool1\n", + "op_type: \n", + "\n", + "op_name: max_pool2\n", + "op_type: \n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "[None, None, None, None, None, None, None, None, None, None, None]" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# we can find the `conv1` has been wrapped, the origin `conv1` changes to `conv1.module`.\n", + "# the weight of conv1 will modify by `weight * mask` in `forward()`. The initial mask is a `ones_like(weight)` tensor.\n", + "\n", + "[print('op_name: {}\\nop_type: {}\\n'.format(name, type(module))) for name, module in model.named_modules()]" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "NaiveModel(\n", + " (conv1): PrunerModuleWrapper(\n", + " (module): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))\n", + " )\n", + " (conv2): Conv2d(20, 50, kernel_size=(5, 5), stride=(1, 1))\n", + " (fc1): Linear(in_features=800, out_features=500, bias=True)\n", + " (fc2): Linear(in_features=500, out_features=10, bias=True)\n", + " (relu1): ReLU6()\n", + " (relu2): ReLU6()\n", + " (relu3): ReLU6()\n", + " (max_pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + " (max_pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + ")" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# compress the model, the mask will be updated.\n", + "\n", + "pruner.compress()" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([20, 1, 5, 5])\n" + ] + } + ], + "source": [ + "# show the mask size of `conv1`\n", + "\n", + "print(model.conv1.weight_mask.size())" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]],\n", + "\n", + "\n", + " [[[1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.],\n", + " [1., 1., 1., 1., 1.]]],\n", + "\n", + "\n", + " [[[0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.],\n", + " [0., 0., 0., 0., 0.]]]], device='cuda:0')\n" + ] + } + ], + "source": [ + "# show the mask of `conv1`\n", + "\n", + "print(model.conv1.weight_mask)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[[[ 1.5338e-01, -1.1766e-01, -2.6654e-01, -2.9445e-02, -1.4650e-01],\n", + " [-1.8796e-01, -2.9882e-01, 6.9725e-02, 2.1561e-01, 6.5688e-02],\n", + " [ 1.5274e-01, -9.8471e-03, 3.2303e-01, 1.3472e-03, 1.7235e-01],\n", + " [ 1.1804e-01, 2.2535e-01, -8.3370e-02, -3.4553e-02, -1.2529e-01],\n", + " [-6.6012e-02, -2.0272e-02, -1.8797e-01, -4.6882e-02, -8.3206e-02]]],\n", + "\n", + "\n", + " [[[-0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00],\n", + " [ 0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00],\n", + " [ 0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00]]],\n", + "\n", + "\n", + " [[[ 3.8332e-02, -1.4270e-01, -1.9585e-01, 2.2653e-01, 1.0104e-01],\n", + " [-2.7956e-03, -1.4108e-01, -1.4694e-01, -1.3525e-01, 2.6959e-01],\n", + " [ 1.9522e-01, -1.2281e-01, -1.9173e-01, -1.8910e-02, 3.1572e-03],\n", + " [-1.0580e-01, -2.5239e-02, -5.8266e-02, -6.5815e-02, 6.6433e-02],\n", + " [ 8.9601e-02, 7.1189e-02, -2.4255e-01, 1.5746e-01, -1.4708e-01]]],\n", + "\n", + "\n", + " [[[-0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00]]],\n", + "\n", + "\n", + " [[[-0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [ 0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [ 0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00]]],\n", + "\n", + "\n", + " [[[ 0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00],\n", + " [ 0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [ 0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00]]],\n", + "\n", + "\n", + " [[[ 0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00],\n", + " [ 0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [ 0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00]]],\n", + "\n", + "\n", + " [[[ 4.0772e-02, -8.2086e-02, -2.7555e-01, -3.2547e-01, -1.2226e-01],\n", + " [-5.9877e-02, 9.8567e-02, 2.5186e-01, -1.0280e-01, -2.3416e-01],\n", + " [ 8.5760e-02, 1.0896e-01, 1.4898e-01, 2.1579e-01, 8.5297e-02],\n", + " [ 5.4720e-02, -1.7226e-01, -7.2518e-02, 6.7099e-03, -1.6011e-03],\n", + " [-8.9944e-02, 1.7404e-01, -3.6985e-02, 1.8602e-01, 7.2353e-02]]],\n", + "\n", + "\n", + " [[[ 1.6276e-02, -9.6439e-02, -9.6085e-02, -2.4267e-01, -1.8521e-01],\n", + " [ 6.3310e-02, 1.7866e-01, 1.1694e-01, -1.4464e-01, -2.7711e-01],\n", + " [-2.4514e-02, 2.2222e-01, 2.1053e-01, -1.4271e-01, 8.7045e-02],\n", + " [-1.9207e-01, -5.4719e-02, -5.7775e-03, -1.0034e-05, -1.0923e-01],\n", + " [-2.4006e-02, 2.3780e-02, 1.8988e-01, 2.4734e-01, 4.8097e-02]]],\n", + "\n", + "\n", + " [[[ 1.1335e-01, -5.8451e-02, 5.2440e-02, -1.3223e-01, -2.5534e-02],\n", + " [ 9.1323e-02, -6.0707e-02, 2.3524e-01, 2.4992e-01, 8.7842e-02],\n", + " [ 2.9002e-02, 3.5379e-02, -5.9689e-02, -2.8363e-03, 1.8618e-01],\n", + " [-2.9671e-01, 8.1830e-03, 1.1076e-01, -5.4118e-02, -6.1685e-02],\n", + " [-1.7580e-01, -3.4534e-01, -3.9250e-01, -2.7569e-01, -2.6131e-01]]],\n", + "\n", + "\n", + " [[[ 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00]]],\n", + "\n", + "\n", + " [[[ 6.3785e-02, -1.8044e-01, -1.0190e-01, -1.3588e-01, 8.5433e-02],\n", + " [ 2.0675e-01, 3.3238e-02, 9.2437e-02, 1.1799e-01, 2.1111e-01],\n", + " [-5.2138e-02, 1.5790e-01, 1.8151e-01, 8.0470e-02, 1.0131e-01],\n", + " [-4.4786e-02, 1.1771e-01, 2.1706e-02, -1.2563e-01, -2.1142e-01],\n", + " [-2.3589e-01, -2.1154e-01, -1.7890e-01, -2.7769e-01, -1.2512e-01]]],\n", + "\n", + "\n", + " [[[ 1.9133e-01, 2.4711e-01, 1.0413e-01, -1.9187e-01, -3.0991e-01],\n", + " [-1.2382e-01, 8.3641e-03, -5.6734e-02, 5.8376e-02, 2.2880e-02],\n", + " [-3.1734e-01, -1.0637e-02, -5.5974e-02, 1.0676e-01, -1.1080e-02],\n", + " [-2.2980e-01, 2.0486e-01, 1.0147e-01, 1.4484e-01, 5.2265e-02],\n", + " [ 7.4410e-02, 2.2806e-02, 8.5137e-02, -2.1809e-01, 3.1704e-02]]],\n", + "\n", + "\n", + " [[[-0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00],\n", + " [ 0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00]]],\n", + "\n", + "\n", + " [[[ 7.6976e-02, -3.8627e-02, 1.2610e-01, 1.1994e-01, 2.1706e-03],\n", + " [ 7.4357e-02, 6.7929e-02, 3.1386e-02, 1.4606e-01, 2.1429e-01],\n", + " [-2.6569e-01, -4.2631e-04, -3.6654e-02, -3.0967e-02, -9.4961e-02],\n", + " [-2.0192e-01, -3.5423e-01, -2.5246e-01, -3.5092e-01, -2.4159e-01],\n", + " [ 1.7636e-02, 1.3744e-01, -1.0306e-01, 8.8370e-02, 7.3258e-02]]],\n", + "\n", + "\n", + " [[[ 2.0016e-01, 1.0956e-01, -5.9223e-02, 6.4871e-03, -2.4165e-01],\n", + " [ 5.6283e-02, 1.7276e-01, -2.2316e-01, -1.6699e-01, -7.0742e-02],\n", + " [ 2.6179e-01, -2.5102e-01, -2.0774e-01, -9.6413e-02, 3.4367e-02],\n", + " [-9.1882e-02, -2.9195e-01, -8.7432e-02, 1.0144e-01, -2.0559e-02],\n", + " [-2.5668e-01, -9.8016e-02, 1.1103e-01, -3.0233e-02, 1.1076e-01]]],\n", + "\n", + "\n", + " [[[ 0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00],\n", + " [ 0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [ 0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00]]],\n", + "\n", + "\n", + " [[[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00]]],\n", + "\n", + "\n", + " [[[-1.8396e-01, -6.8774e-03, -1.6675e-01, 7.1980e-03, 1.9922e-02],\n", + " [ 1.3416e-01, -1.1450e-01, -1.5277e-01, -6.5713e-02, -9.5435e-02],\n", + " [ 1.5406e-01, -9.1235e-02, -1.0880e-01, -7.1603e-02, -9.5575e-02],\n", + " [ 2.1772e-01, 8.4073e-02, -2.5264e-01, -2.1428e-01, 1.9537e-01],\n", + " [ 1.3124e-01, 7.9532e-02, -2.4044e-01, -1.5717e-01, 1.6562e-01]]],\n", + "\n", + "\n", + " [[[ 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, -0.0000e+00, 0.0000e+00, -0.0000e+00],\n", + " [-0.0000e+00, -0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00],\n", + " [-0.0000e+00, 0.0000e+00, 0.0000e+00, -0.0000e+00, -0.0000e+00]]]],\n", + " device='cuda:0')\n" + ] + } + ], + "source": [ + "# use a dummy input to apply the sparsify.\n", + "\n", + "model(torch.rand(1, 1, 28, 28).to(device))\n", + "\n", + "# the weights of `conv1` have been sparsified.\n", + "\n", + "print(model.conv1.module.weight.data)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-07-26 22:26:05] INFO (nni.compression.pytorch.compressor/MainThread) Model state_dict saved to pruned_naive_mnist_l1filter.pth\n", + "[2021-07-26 22:26:05] INFO (nni.compression.pytorch.compressor/MainThread) Mask dict saved to mask_naive_mnist_l1filter.pth\n" + ] + } + ], + "source": [ + "# export the sparsified model state to './pruned_naive_mnist_l1filter.pth'.\n", + "# export the mask to './mask_naive_mnist_l1filter.pth'.\n", + "\n", + "pruner.export_model(model_path='pruned_naive_mnist_l1filter.pth', mask_path='mask_naive_mnist_l1filter.pth')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 4. Speed Up" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NaiveModel(\n", + " (conv1): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))\n", + " (conv2): Conv2d(20, 50, kernel_size=(5, 5), stride=(1, 1))\n", + " (fc1): Linear(in_features=800, out_features=500, bias=True)\n", + " (fc2): Linear(in_features=500, out_features=10, bias=True)\n", + " (relu1): ReLU6()\n", + " (relu2): ReLU6()\n", + " (relu3): ReLU6()\n", + " (max_pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + " (max_pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + ")\n" + ] + } + ], + "source": [ + "# If you use a wrapped model, don't forget to unwrap it.\n", + "\n", + "pruner._unwrap_model()\n", + "\n", + "# the model has been unwrapped.\n", + "\n", + "print(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":22: TracerWarning: Converting a tensor to a Python index might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", + " x = x.view(-1, x.size()[1:].numel())\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) start to speed up the model\n", + "[2021-07-26 22:26:18] INFO (FixMaskConflict/MainThread) {'conv1': 1, 'conv2': 1}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-07-26 22:26:18] INFO (FixMaskConflict/MainThread) dim0 sparsity: 0.500000\n", + "[2021-07-26 22:26:18] INFO (FixMaskConflict/MainThread) dim1 sparsity: 0.000000\n", + "[2021-07-26 22:26:18] INFO (FixMaskConflict/MainThread) Dectected conv prune dim\" 0\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) infer module masks...\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for conv1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for relu1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for max_pool1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for conv2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for relu2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for max_pool2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for .aten::view.9\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.jit_translate/MainThread) View Module output size: [-1, 800]\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for fc1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for relu3\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for fc2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update mask for .aten::log_softmax.10\n", + "[2021-07-26 22:26:18] ERROR (nni.compression.pytorch.speedup.jit_translate/MainThread) aten::log_softmax is not Supported! Please report an issue at https://github.com/microsoft/nni. Thanks~\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for .aten::log_softmax.10\n", + "[2021-07-26 22:26:18] WARNING (nni.compression.pytorch.speedup.compressor/MainThread) Note: .aten::log_softmax.10 does not have corresponding mask inference object\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for fc2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the fc2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for relu3\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the relu3\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for fc1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the fc1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for .aten::view.9\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the .aten::view.9\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for max_pool2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the max_pool2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for relu2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the relu2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for conv2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the conv2\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for max_pool1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the max_pool1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for relu1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the relu1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update indirect sparsity for conv1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Update the indirect sparsity for the conv1\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) resolve the mask conflict\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace compressed modules...\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: conv1, op_type: Conv2d)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: relu1, op_type: ReLU6)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: max_pool1, op_type: MaxPool2d)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: conv2, op_type: Conv2d)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: relu2, op_type: ReLU6)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: max_pool2, op_type: MaxPool2d)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Warning: cannot replace (name: .aten::view.9, op_type: aten::view) which is func type\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: fc1, op_type: Linear)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compress_modules/MainThread) replace linear with new in_features: 800, out_features: 500\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: relu3, op_type: ReLU6)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) replace module (name: fc2, op_type: Linear)\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compress_modules/MainThread) replace linear with new in_features: 500, out_features: 10\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) Warning: cannot replace (name: .aten::log_softmax.10, op_type: aten::log_softmax) which is func type\n", + "[2021-07-26 22:26:18] INFO (nni.compression.pytorch.speedup.compressor/MainThread) speedup done\n" + ] + } + ], + "source": [ + "from nni.compression.pytorch import ModelSpeedup\n", + "\n", + "m_speedup = ModelSpeedup(model, dummy_input=torch.rand(10, 1, 28, 28).to(device), masks_file='mask_naive_mnist_l1filter.pth')\n", + "m_speedup.speedup_model()" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NaiveModel(\n", + " (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))\n", + " (conv2): Conv2d(10, 50, kernel_size=(5, 5), stride=(1, 1))\n", + " (fc1): Linear(in_features=800, out_features=500, bias=True)\n", + " (fc2): Linear(in_features=500, out_features=10, bias=True)\n", + " (relu1): ReLU6()\n", + " (relu2): ReLU6()\n", + " (relu3): ReLU6()\n", + " (max_pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + " (max_pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + ")\n" + ] + } + ], + "source": [ + "# the `conv1` has been replace from `Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))` to `Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))`\n", + "# and the following layer `conv2` has also changed because the input channel of `conv2` should aware the output channel of `conv1`.\n", + "\n", + "print(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 0 [0/60000 (0%)]\tLoss: 0.306930\n", + "Train Epoch: 0 [6400/60000 (11%)]\tLoss: 0.045807\n", + "Train Epoch: 0 [12800/60000 (21%)]\tLoss: 0.049293\n", + "Train Epoch: 0 [19200/60000 (32%)]\tLoss: 0.031464\n", + "Train Epoch: 0 [25600/60000 (43%)]\tLoss: 0.005392\n", + "Train Epoch: 0 [32000/60000 (53%)]\tLoss: 0.005652\n", + "Train Epoch: 0 [38400/60000 (64%)]\tLoss: 0.040619\n", + "Train Epoch: 0 [44800/60000 (75%)]\tLoss: 0.016515\n", + "Train Epoch: 0 [51200/60000 (85%)]\tLoss: 0.092886\n", + "Train Epoch: 0 [57600/60000 (96%)]\tLoss: 0.041380\n", + "\n", + "Test set: Average loss: 0.0257, Accuracy: 9917/10000 (99%)\n", + "\n" + ] + } + ], + "source": [ + "# finetune the model to recover the accuracy.\n", + "\n", + "optimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n", + "\n", + "for epoch in range(0, 1):\n", + " trainer(model, optimizer, criterion, epoch)\n", + " evaluator(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 5. Prepare config_list for quantization" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "config_list = [{\n", + " 'quant_types': ['weight', 'input'],\n", + " 'quant_bits': {'weight': 8, 'input': 8},\n", + " 'op_names': ['conv1', 'conv2']\n", + "}]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 6. Choose a quantizer and quantizing" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "NaiveModel(\n", + " (conv1): QuantizerModuleWrapper(\n", + " (module): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))\n", + " )\n", + " (conv2): QuantizerModuleWrapper(\n", + " (module): Conv2d(10, 50, kernel_size=(5, 5), stride=(1, 1))\n", + " )\n", + " (fc1): Linear(in_features=800, out_features=500, bias=True)\n", + " (fc2): Linear(in_features=500, out_features=10, bias=True)\n", + " (relu1): ReLU6()\n", + " (relu2): ReLU6()\n", + " (relu3): ReLU6()\n", + " (max_pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + " (max_pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", + ")" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer\n", + "\n", + "quantizer = QAT_Quantizer(model, config_list, optimizer)\n", + "quantizer.compress()" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 0 [0/60000 (0%)]\tLoss: 0.004960\n", + "Train Epoch: 0 [6400/60000 (11%)]\tLoss: 0.036269\n", + "Train Epoch: 0 [12800/60000 (21%)]\tLoss: 0.018744\n", + "Train Epoch: 0 [19200/60000 (32%)]\tLoss: 0.021916\n", + "Train Epoch: 0 [25600/60000 (43%)]\tLoss: 0.003095\n", + "Train Epoch: 0 [32000/60000 (53%)]\tLoss: 0.003947\n", + "Train Epoch: 0 [38400/60000 (64%)]\tLoss: 0.032094\n", + "Train Epoch: 0 [44800/60000 (75%)]\tLoss: 0.017358\n", + "Train Epoch: 0 [51200/60000 (85%)]\tLoss: 0.083886\n", + "Train Epoch: 0 [57600/60000 (96%)]\tLoss: 0.040433\n", + "\n", + "Test set: Average loss: 0.0247, Accuracy: 9917/10000 (99%)\n", + "\n" + ] + } + ], + "source": [ + "# finetune the model for calibration.\n", + "\n", + "for epoch in range(0, 1):\n", + " trainer(model, optimizer, criterion, epoch)\n", + " evaluator(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-07-26 22:34:41] INFO (nni.compression.pytorch.compressor/MainThread) Model state_dict saved to quantized_naive_mnist_l1filter.pth\n", + "[2021-07-26 22:34:41] INFO (nni.compression.pytorch.compressor/MainThread) Mask dict saved to calibration_naive_mnist_l1filter.pth\n" + ] + }, + { + "data": { + "text/plain": [ + "{'conv1': {'weight_bit': 8,\n", + " 'tracked_min_input': -0.42417848110198975,\n", + " 'tracked_max_input': 2.8212687969207764},\n", + " 'conv2': {'weight_bit': 8,\n", + " 'tracked_min_input': 0.0,\n", + " 'tracked_max_input': 4.246923446655273}}" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# export the sparsified model state to './quantized_naive_mnist_l1filter.pth'.\n", + "# export the calibration config to './calibration_naive_mnist_l1filter.pth'.\n", + "\n", + "quantizer.export_model(model_path='quantized_naive_mnist_l1filter.pth', calibration_path='calibration_naive_mnist_l1filter.pth')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 7. Speed Up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# speed up with tensorRT\n", + "\n", + "engine = ModelSpeedupTensorRT(model, (32, 1, 28, 28), config=calibration_config, batchsize=32)\n", + "engine.compress()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/en_US/Compression/pruning.rst b/docs/en_US/Compression/pruning.rst new file mode 100644 index 0000000000000000000000000000000000000000..023b0feeb5a85fea17ee1b63e512edb70d353138 --- /dev/null +++ b/docs/en_US/Compression/pruning.rst @@ -0,0 +1,25 @@ +################# +Pruning +################# + +Pruning is a common technique to compress neural network models. +The pruning methods explore the redundancy in the model weights(parameters) and try to remove/prune the redundant and uncritical weights. +The redundant elements are pruned from the model, their values are zeroed and we make sure they don't take part in the back-propagation process. + +From pruning granularity perspective, fine-grained pruning or unstructured pruning refers to pruning each individual weights separately. +Coarse-grained pruning or structured pruning is pruning entire group of weights, such as a convolutional filter. + +NNI provides multiple unstructured pruning and structured pruning algorithms. +It supports Tensorflow and PyTorch with unified interface. +For users to prune their models, they only need to add several lines in their code. +For the structured filter pruning, NNI also provides a dependency-aware mode. In the dependency-aware mode, the +filter pruner will get better speed gain after the speedup. + +For details, please refer to the following tutorials: + +.. toctree:: + :maxdepth: 2 + + Pruners + Dependency Aware Mode + Model Speedup diff --git a/docs/en_US/Compression/quantization.rst b/docs/en_US/Compression/quantization.rst new file mode 100644 index 0000000000000000000000000000000000000000..d909415f253cc73a6b21bee13c429dc545c6f5ca --- /dev/null +++ b/docs/en_US/Compression/quantization.rst @@ -0,0 +1,18 @@ +################# +Quantization +################# + +Quantization refers to compressing models by reducing the number of bits required to represent weights or activations, +which can reduce the computations and the inference time. In the context of deep neural networks, the major numerical +format for model weights is 32-bit float, or FP32. Many research works have demonstrated that weights and activations +can be represented using 8-bit integers without significant loss in accuracy. Even lower bit-widths, such as 4/2/1 bits, +is an active field of research. + +A quantizer is a quantization algorithm implementation in NNI, NNI provides multiple quantizers as below. You can also +create your own quantizer using NNI model compression interface. + +.. toctree:: + :maxdepth: 2 + + Quantizers + Quantization Speedup diff --git a/docs/en_US/Compression/v2_pruning.rst b/docs/en_US/Compression/v2_pruning.rst new file mode 100644 index 0000000000000000000000000000000000000000..e422eae7952ce573d1336d3852d7cf16b1908c27 --- /dev/null +++ b/docs/en_US/Compression/v2_pruning.rst @@ -0,0 +1,26 @@ +Pruning V2 +========== + +Pruning V2 is a refactoring of the old version and provides more powerful functions. +Compared with the old version, the iterative pruning process is detached from the pruner and the pruner is only responsible for pruning and generating the masks once. +What's more, pruning V2 unifies the pruning process and provides a more free combination of pruning components. +Task generator only cares about the pruning effect that should be achieved in each round, and uses a config list to express how to pruning in the next step. +Pruner will reset with the model and config list given by task generator then generate the masks in current step. + +For a clearer structure vision, please refer to the figure below. + +.. image:: ../../img/pruning_process.png + :target: ../../img/pruning_process.png + :alt: + +In V2, a pruning process is usually driven by a pruning scheduler, it contains a specific pruner and a task generator. +But users can also use pruner directly like in the pruning V1. + +For details, please refer to the following tutorials: + +.. toctree:: + :maxdepth: 2 + + Pruning Algorithms + Pruning Scheduler + Pruning Config List diff --git a/docs/en_US/Compression/v2_pruning_algo.rst b/docs/en_US/Compression/v2_pruning_algo.rst new file mode 100644 index 0000000000000000000000000000000000000000..23b711894522e797437dfb801e394e96daf3e54c --- /dev/null +++ b/docs/en_US/Compression/v2_pruning_algo.rst @@ -0,0 +1,587 @@ +Supported Pruning Algorithms in NNI +=================================== + +NNI provides several pruning algorithms that reproducing from the papers. In pruning v2, NNI split the pruning algorithm into more detailed components. +This means users can freely combine components from different algorithms, +or easily use a component of their own implementation to replace a step in the original algorithm to implement their own pruning algorithm. + +Right now, pruning algorithms with how to generate masks in one step are implemented as pruners, +and how to schedule sparsity in each iteration are implemented as iterative pruners. + +**Pruner** + +* `Level Pruner <#level-pruner>`__ +* `L1 Norm Pruner <#l1-norm-pruner>`__ +* `L2 Norm Pruner <#l2-norm-pruner>`__ +* `FPGM Pruner <#fpgm-pruner>`__ +* `Slim Pruner <#slim-pruner>`__ +* `Activation APoZ Rank Pruner <#activation-apoz-rank-pruner>`__ +* `Activation Mean Rank Pruner <#activation-mean-rank-pruner>`__ +* `Taylor FO Weight Pruner <#taylor-fo-weight-pruner>`__ +* `ADMM Pruner <#admm-pruner>`__ +* `Movement Pruner <#movement-pruner>`__ + +**Iterative Pruner** + +* `Linear Pruner <#linear-pruner>`__ +* `AGP Pruner <#agp-pruner>`__ +* `Lottery Ticket Pruner <#lottery-ticket-pruner>`__ +* `Simulated Annealing Pruner <#simulated-annealing-pruner>`__ +* `Auto Compress Pruner <#auto-compress-pruner>`__ +* `AMC Pruner <#amc-pruner>`__ + +Level Pruner +------------ + +This is a basic pruner, and in some papers called it magnitude pruning or fine-grained pruning. + +It will mask the weight in each specified layer with smaller absolute value by a ratio configured in the config list. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import LevelPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }] + pruner = LevelPruner(model, config_list) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/level_pruning_torch.py ` + +User configuration for Level Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.LevelPruner + +L1 Norm Pruner +-------------- + +L1 norm pruner computes the l1 norm of the layer weight on the first dimension, +then prune the weight blocks on this dimension with smaller l1 norm values. +i.e., compute the l1 norm of the filters in convolution layer as metric values, +compute the l1 norm of the weight by rows in linear layer as metric values. + +For more details, please refer to `PRUNING FILTERS FOR EFFICIENT CONVNETS `__\. + +In addition, L1 norm pruner also supports dependency-aware mode. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import L1NormPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = L1NormPruner(model, config_list) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/norm_pruning_torch.py ` + +User configuration for L1 Norm Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.L1NormPruner + +L2 Norm Pruner +-------------- + +L2 norm pruner is a variant of L1 norm pruner. It uses l2 norm as metric to determine which weight elements should be pruned. + +L2 norm pruner also supports dependency-aware mode. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import L2NormPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = L2NormPruner(model, config_list) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/norm_pruning_torch.py ` + +User configuration for L2 Norm Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.L2NormPruner + +FPGM Pruner +----------- + +FPGM pruner prunes the blocks of the weight on the first dimension with the smallest geometric median. +FPGM chooses the weight blocks with the most replaceable contribution. + +For more details, please refer to `Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration `__. + +FPGM pruner also supports dependency-aware mode. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import FPGMPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = FPGMPruner(model, config_list) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/fpgm_pruning_torch.py ` + +User configuration for FPGM Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.FPGMPruner + +Slim Pruner +----------- + +Slim pruner adds sparsity regularization on the scaling factors of batch normalization (BN) layers during training to identify unimportant channels. +The channels with small scaling factor values will be pruned. + +For more details, please refer to `Learning Efficient Convolutional Networks through Network Slimming `__\. + +Usage +^^^^^^ + +.. code-block:: python + + import nni + from nni.algorithms.compression.v2.pytorch.pruning import SlimPruner + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters()) + + config_list = [{ 'sparsity': 0.8, 'op_types': ['BatchNorm2d'] }] + pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/slim_pruning_torch.py ` + +User configuration for Slim Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.SlimPruner + +Activation APoZ Rank Pruner +--------------------------- + +Activation APoZ rank pruner is a pruner which prunes on the first weight dimension, +with the smallest importance criterion ``APoZ`` calculated from the output activations of convolution layers to achieve a preset level of network sparsity. +The pruning criterion ``APoZ`` is explained in the paper `Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures `__. + +The APoZ is defined as: + +:math:`APoZ_{c}^{(i)} = APoZ\left(O_{c}^{(i)}\right)=\frac{\sum_{k}^{N} \sum_{j}^{M} f\left(O_{c, j}^{(i)}(k)=0\right)}{N \times M}` + +Activation APoZ rank pruner also supports dependency-aware mode. + +Usage +^^^^^^ + +.. code-block:: python + + import nni + from nni.algorithms.compression.v2.pytorch.pruning import ActivationAPoZRankPruner + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters()) + + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/activation_pruning_torch.py ` + +User configuration for Activation APoZ Rank Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.ActivationAPoZRankPruner + +Activation Mean Rank Pruner +--------------------------- + +Activation mean rank pruner is a pruner which prunes on the first weight dimension, +with the smallest importance criterion ``mean activation`` calculated from the output activations of convolution layers to achieve a preset level of network sparsity. +The pruning criterion ``mean activation`` is explained in section 2.2 of the paper `Pruning Convolutional Neural Networks for Resource Efficient Inference `__. + +Activation mean rank pruner also supports dependency-aware mode. + +Usage +^^^^^^ + +.. code-block:: python + + import nni + from nni.algorithms.compression.v2.pytorch.pruning import ActivationMeanRankPruner + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.traces(torch.optim.Adam)(model.parameters()) + + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/activation_pruning_torch.py ` + +User configuration for Activation Mean Rank Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.ActivationMeanRankPruner + +Taylor FO Weight Pruner +----------------------- + +Taylor FO weight pruner is a pruner which prunes on the first weight dimension, +based on estimated importance calculated from the first order taylor expansion on weights to achieve a preset level of network sparsity. +The estimated importance is defined as the paper `Importance Estimation for Neural Network Pruning `__. + +:math:`\widehat{\mathcal{I}}_{\mathcal{S}}^{(1)}(\mathbf{W}) \triangleq \sum_{s \in \mathcal{S}} \mathcal{I}_{s}^{(1)}(\mathbf{W})=\sum_{s \in \mathcal{S}}\left(g_{s} w_{s}\right)^{2}` + +Taylor FO weight pruner also supports dependency-aware mode. + +What's more, we provide a global-sort mode for this pruner which is aligned with paper implementation. + +Usage +^^^^^^ + +.. code-block:: python + + import nni + from nni.algorithms.compression.v2.pytorch.pruning import TaylorFOWeightPruner + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters()) + + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/taylorfo_pruning_torch.py ` + +User configuration for Activation Mean Rank Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.TaylorFOWeightPruner + +ADMM Pruner +----------- + +Alternating Direction Method of Multipliers (ADMM) is a mathematical optimization technique, +by decomposing the original nonconvex problem into two subproblems that can be solved iteratively. +In weight pruning problem, these two subproblems are solved via 1) gradient descent algorithm and 2) Euclidean projection respectively. + +During the process of solving these two subproblems, the weights of the original model will be changed. +Then a fine-grained pruning will be applied to prune the model according to the config list given. + +This solution framework applies both to non-structured and different variations of structured pruning schemes. + +For more details, please refer to `A Systematic DNN Weight Pruning Framework using Alternating Direction Method of Multipliers `__. + +Usage +^^^^^^ + +.. code-block:: python + + import nni + from nni.algorithms.compression.v2.pytorch.pruning import ADMMPruner + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters()) + + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=10, training_epochs=1) + masked_model, masks = pruner.compress() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/admm_pruning_torch.py ` + +User configuration for ADMM Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.ADMMPruner + +Movement Pruner +--------------- + +Movement pruner is an implementation of movement pruning. +This is a "fine-pruning" algorithm, which means the masks may change during each fine-tuning step. +Each weight element will be scored by the opposite of the sum of the product of weight and its gradient during each step. +This means the weight elements moving towards zero will accumulate negative scores, the weight elements moving away from zero will accumulate positive scores. +The weight elements with low scores will be masked during inference. + +The following figure from the paper shows the weight pruning by movement pruning. + +.. image:: ../../img/movement_pruning.png + :target: ../../img/movement_pruning.png + :alt: + +For more details, please refer to `Movement Pruning: Adaptive Sparsity by Fine-Tuning `__. + +Usage +^^^^^^ + +.. code-block:: python + + import nni + from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters()) + + config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}] + pruner = MovementPruner(model, config_list, trainer, traced_optimizer, criterion, 10, 3000, 27000) + masked_model, masks = pruner.compress() + +User configuration for Movement Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.MovementPruner + +Reproduced Experiment +^^^^^^^^^^^^^^^^^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Dataset + - Remaining Weights + - MaP acc.(paper/ours) + - MvP acc.(paper/ours) + * - Bert base + - MNLI - Dev + - 10% + - 77.8% / 73.6% + - 79.3% / 78.8% + +Linear Pruner +------------- + +Linear pruner is an iterative pruner, it will increase sparsity evenly from scratch during each iteration. +For example, the final sparsity is set as 0.5, and the iteration number is 5, then the sparsity used in each iteration are ``[0, 0.1, 0.2, 0.3, 0.4, 0.5]``. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import LinearPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = LinearPruner(model, config_list, pruning_algorithm='l1', total_iteration=10, finetuner=finetuner) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/iterative_pruning_torch.py ` + +User configuration for Linear Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.LinearPruner + +AGP Pruner +---------- + +This is an iterative pruner, which the sparsity is increased from an initial sparsity value :math:`s_{i}` (usually 0) to a final sparsity value :math:`s_{f}` over a span of :math:`n` pruning iterations, +starting at training step :math:`t_{0}` and with pruning frequency :math:`\Delta t`: + +:math:`s_{t}=s_{f}+\left(s_{i}-s_{f}\right)\left(1-\frac{t-t_{0}}{n \Delta t}\right)^{3} \text { for } t \in\left\{t_{0}, t_{0}+\Delta t, \ldots, t_{0} + n \Delta t\right\}` + +For more details please refer to `To prune, or not to prune: exploring the efficacy of pruning for model compression `__\. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import AGPPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = AGPPruner(model, config_list, pruning_algorithm='l1', total_iteration=10, finetuner=finetuner) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/iterative_pruning_torch.py ` + +User configuration for AGP Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.AGPPruner + +Lottery Ticket Pruner +--------------------- + +`The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks `__\ , +authors Jonathan Frankle and Michael Carbin,provides comprehensive measurement and analysis, +and articulate the *lottery ticket hypothesis*\ : dense, randomly-initialized, feed-forward networks contain subnetworks (*winning tickets*\ ) that +-- when trained in isolation -- reach test accuracy comparable to the original network in a similar number of iterations. + +In this paper, the authors use the following process to prune a model, called *iterative prunning*\ : + +.. + + #. Randomly initialize a neural network f(x;theta_0) (where theta\ *0 follows D*\ {theta}). + #. Train the network for j iterations, arriving at parameters theta_j. + #. Prune p% of the parameters in theta_j, creating a mask m. + #. Reset the remaining parameters to their values in theta_0, creating the winning ticket f(x;m*theta_0). + #. Repeat step 2, 3, and 4. + +If the configured final sparsity is P (e.g., 0.8) and there are n times iterative pruning, +each iterative pruning prunes 1-(1-P)^(1/n) of the weights that survive the previous round. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import LotteryTicketPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = LotteryTicketPruner(model, config_list, pruning_algorithm='l1', total_iteration=10, finetuner=finetuner, reset_weight=True) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/iterative_pruning_torch.py ` + +User configuration for Lottery Ticket Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.LotteryTicketPruner + +Simulated Annealing Pruner +-------------------------- + +We implement a guided heuristic search method, Simulated Annealing (SA) algorithm. As mentioned in the paper, this method is enhanced on guided search based on prior experience. +The enhanced SA technique is based on the observation that a DNN layer with more number of weights often has a higher degree of model compression with less impact on overall accuracy. + +* Randomly initialize a pruning rate distribution (sparsities). +* While current_temperature < stop_temperature: + + #. generate a perturbation to current distribution + #. Perform fast evaluation on the perturbated distribution + #. accept the perturbation according to the performance and probability, if not accepted, return to step 1 + #. cool down, current_temperature <- current_temperature * cool_down_rate + +For more details, please refer to `AutoCompress: An Automatic DNN Structured Pruning Framework for Ultra-High Compression Rates `__. + +Usage +^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import SimulatedAnnealingPruner + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + pruner = SimulatedAnnealingPruner(model, config_list, pruning_algorithm='l1', evaluator=evaluator, cool_down_rate=0.9, finetuner=finetuner) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() + +For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/simulated_anealing_pruning_torch.py ` + +User configuration for Simulated Annealing Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.SimulatedAnnealingPruner + +Auto Compress Pruner +-------------------- + +For total iteration number :math:`N`, AutoCompressPruner prune the model that survive the previous iteration for a fixed sparsity ratio (e.g., :math:`1-{(1-0.8)}^{(1/N)}`) to achieve the overall sparsity (e.g., :math:`0.8`): + +.. code-block:: bash + + 1. Generate sparsities distribution using SimulatedAnnealingPruner + 2. Perform ADMM-based pruning to generate pruning result for the next iteration. + +For more details, please refer to `AutoCompress: An Automatic DNN Structured Pruning Framework for Ultra-High Compression Rates `__. + +Usage +^^^^^^ + +.. code-block:: python + + import nni + from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters()) + + config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] + admm_params = { + 'trainer': trainer, + 'traced_optimizer': traced_optimizer, + 'criterion': criterion, + 'iterations': 10, + 'training_epochs': 1 + } + sa_params = { + 'evaluator': evaluator + } + pruner = AutoCompressPruner(model, config_list, 10, admm_params, sa_params, finetuner=finetuner) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() + +The full script can be found :githublink:`here `. + +User configuration for Auto Compress Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.AutoCompressPruner + +AMC Pruner +---------- + +AMC pruner leverages reinforcement learning to provide the model compression policy. +According to the author, this learning-based compression policy outperforms conventional rule-based compression policy by having a higher compression ratio, +better preserving the accuracy and freeing human labor. + +For more details, please refer to `AMC: AutoML for Model Compression and Acceleration on Mobile Devices `__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import AMCPruner + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.5, 'max_sparsity_per_layer': 0.8}] + pruner = AMCPruner(400, model, config_list, dummy_input, evaluator, finetuner=finetuner) + pruner.compress() + +The full script can be found :githublink:`here `. + +User configuration for AMC Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.v2.pytorch.pruning.AMCPruner diff --git a/docs/en_US/Compression/v2_pruning_config_list.rst b/docs/en_US/Compression/v2_pruning_config_list.rst new file mode 100644 index 0000000000000000000000000000000000000000..2f2f7b43e8d1f83d18679f325394060281487813 --- /dev/null +++ b/docs/en_US/Compression/v2_pruning_config_list.rst @@ -0,0 +1,67 @@ +Pruning Config Specification +============================ + +The Keys in Config List +----------------------- + +Each sub-config in the config list is a dict, and the scope of each setting (key) is only internal to each sub-config. +If multiple sub-configs are configured for the same layer, the later ones will overwrite the previous ones. + +op_types +^^^^^^^^ + +The type of the layers targeted by this sub-config. +If ``op_names`` is not set in this sub-config, all layers in the model that satisfy the type will be selected. +If ``op_names`` is set in this sub-config, the selected layers should satisfy both type and name. + +op_names +^^^^^^^^ + +The name of the layers targeted by this sub-config. +If ``op_types`` is set in this sub-config, the selected layer should satisfy both type and name. + +op_partial_names +^^^^^^^^^^^^^^^^ + +This key is for the layers to be pruned with names that have the same sub-string. NNI will find all names in the model, +find names that contain one of ``op_partial_names``, and append them into the ``op_names``. + +sparsity_per_layer +^^^^^^^^^^^^^^^^^^ + +The sparsity ratio of each selected layer. + +e.g., the ``sparsity_per_layer`` is 0.8 means each selected layer will mask 80% values on the weight. +If ``layer_1`` (500 parameters) and ``layer_2`` (1000 parameters) are selected in this sub-config, +then ``layer_1`` will be masked 400 parameters and ``layer_2`` will be masked 800 parameters. + +total_sparsity +^^^^^^^^^^^^^^ + +The sparsity ratio of all selected layers, means that sparsity ratio may no longer be even between layers. + +e.g., the ``total_sparsity`` is 0.8 means 80% of parameters in this sub-config will be masked. +If ``layer_1`` (500 parameters) and ``layer_2`` (1000 parameters) are selected in this sub-config, +then ``layer_1`` and ``layer_2`` will be masked a total of 1200 parameters, +how these total parameters are distributed between the two layers is determined by the pruning algorithm. + +sparsity +^^^^^^^^ + +``sparsity`` is an old config key from the pruning v1, it has the same meaning as ``sparsity_per_layer``. +You can also use ``sparsity`` right now, but it will be deprecated in the future. + +max_sparsity_per_layer +^^^^^^^^^^^^^^^^^^^^^^ + +This key is usually used with ``total_sparsity``. It limits the maximum sparsity ratio of each layer. + +In ``total_sparsity`` example, there are 1200 parameters that need to be masked and all parameters in ``layer_1`` may be totally masked. +To avoid this situation, ``max_sparsity_per_layer`` can be set as 0.9, this means up to 450 parameters can be masked in ``layer_1``, +and 900 parameters can be masked in ``layer_2``. + +exclude +^^^^^^^ + +The ``exclude`` and ``sparsity`` keyword are mutually exclusive and cannot exist in the same sub-config. +If ``exclude`` is set in sub-config, the layers selected by this config will not be pruned. diff --git a/docs/en_US/Compression/v2_scheduler.rst b/docs/en_US/Compression/v2_scheduler.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f37af836587eb650198ba1a741523e11a21cc6c --- /dev/null +++ b/docs/en_US/Compression/v2_scheduler.rst @@ -0,0 +1,74 @@ +Pruning Scheduler +================= + +Pruning scheduler is new feature supported in pruning v2. It can bring more flexibility for pruning the model iteratively. +All the built-in iterative pruners (e.g., AGPPruner, SimulatedAnnealingPruner) are based on three abstracted components: pruning scheduler, pruners and task generators. +In addition to using the NNI built-in iterative pruners, +users can directly use the pruning schedulers to customize their own iterative pruning logic. + +Workflow of Pruning Scheduler +----------------------------- + +In iterative pruning, the final goal will be broken down into different small goals, and complete a small goal in each iteration. +For example, each iteration increases a little sparsity ratio, and after several pruning iterations, the continuous pruned model reaches the final overall sparsity; +fix the overall sparsity, try different ways to allocate sparsity between layers in each iteration, and find the best allocation way. + +We define a small goal as ``Task``, it usually includes states inherited from previous iterations (eg. pruned model and masks) and description of the current goal (eg. a config list that describes how to allocate sparsity). +Details about ``Task`` can be found in this :githublink:`file `. + +Pruning scheduler handles two main components, a basic pruner, and a task generator. The logic of generating ``Task`` is encapsulated in the task generator. +In an iteration (one pruning step), pruning scheduler parses the ``Task`` getting from the task generator, +and reset the pruner by ``model``, ``masks``, ``config_list`` parsing from the ``Task``. +Then pruning scheduler generates the new masks by the pruner. During an iteration, the new masked model may also experience speed-up, finetuning, and evaluating. +After one iteration is done, the pruning scheduler collects the compact model, new masks and evaluation score, packages them into ``TaskResult``, and passes it to task generator. +The iteration process will end until the task generator has no more ``Task``. + +How to Customized Iterative Pruning +----------------------------------- + +Using AGP Pruning as an example to explain how to implement an iterative pruning by scheduler in NNI. + +.. code-block:: python + + from nni.algorithms.compression.v2.pytorch.pruning import L1NormPruner, PruningScheduler + from nni.algorithms.compression.v2.pytorch.pruning.tools import AGPTaskGenerator + + pruner = L1NormPruner(model=None, config_list=None, mode='dependency_aware', dummy_input=torch.rand(10, 3, 224, 224).to(device)) + task_generator = AGPTaskGenerator(total_iteration=10, origin_model=model, origin_config_list=config_list, log_dir='.', keep_intermediate_result=True) + scheduler = PruningScheduler(pruner, task_generator, finetuner=finetuner, speed_up=True, dummy_input=dummy_input, evaluator=None, reset_weight=False) + + scheduler.compress() + _, model, masks, _, _ = scheduler.get_best_result() + +The full script can be found :githublink:`here `. + +In this example, we use ``dependency_aware`` mode L1 Norm Pruner as a basic pruner during each iteration. +Note we do not need to pass ``model`` and ``config_list`` to the pruner, because in each iteration the ``model`` and ``config_list`` used by the pruner are received from the task generator. +Then we can use ``scheduler`` as an iterative pruner directly. In fact, this is the implementation of ``AGPPruner`` in NNI. + +More about Task Generator +------------------------- + +The task generator is used to give the model that needs to be pruned in each iteration and the corresponding config_list. +For example, ``AGPTaskGenerator`` will give the model pruned in the previous iteration and compute the sparsity using in the current iteration. +``TaskGenerator`` put all these pruning information into ``Task`` and pruning scheduler will get the ``Task``, then run it. +The pruning result will return to the ``TaskGenerator`` at the end of each iteration and ``TaskGenerator`` will judge whether and how to generate the next ``Task``. + +The information included in the ``Task`` and ``TaskResult`` can be found :githublink:`here `. + +A clearer iterative pruning flow chart can be found `here `__. + +If you want to implement your own task generator, please following the ``TaskGenerator`` :githublink:`interface `. +Two main functions should be implemented, ``init_pending_tasks(self) -> List[Task]`` and ``generate_tasks(self, task_result: TaskResult) -> List[Task]``. + +Why Use Pruning Scheduler +------------------------- + +One of the benefits of using a scheduler to do iterative pruning is users can use more functions of NNI pruning components, +because of simplicity of the interface and the restoration of the paper, NNI not fully exposing all the low-level interfaces to the upper layer. +For example, resetting weight value to the original model in each iteration is a key point in lottery ticket pruning algorithm, and this is implemented in ``LotteryTicketPruner``. +To reduce the complexity of the interface, we only support this function in ``LotteryTicketPruner``, not other pruners. +If users want to reset weight during each iteration in AGP pruning, ``AGPPruner`` can not do this, but users can easily set ``reset_weight=True`` in ``PruningScheduler`` to implement this. + +What's more, for a customized pruner or task generator, using scheduler can easily enhance the algorithm. +In addition, users can also customize the scheduling process to implement their own scheduler. diff --git a/docs/en_US/FeatureEngineering/GBDTSelector.rst b/docs/en_US/FeatureEngineering/GBDTSelector.rst new file mode 100644 index 0000000000000000000000000000000000000000..daded470b02a02fede002c9e079e0536c500c6d3 --- /dev/null +++ b/docs/en_US/FeatureEngineering/GBDTSelector.rst @@ -0,0 +1,70 @@ +GBDTSelector +------------ + +GBDTSelector is based on `LightGBM `__\ , which is a gradient boosting framework that uses tree-based learning algorithms. + +When passing the data into the GBDT model, the model will construct the boosting tree. And the feature importance comes from the score in construction, which indicates how useful or valuable each feature was in the construction of the boosted decision trees within the model. + +We could use this method as a strong baseline in Feature Selector, especially when using the GBDT model as a classifier or regressor. + +For now, we support the ``importance_type`` is ``split`` and ``gain``. But we will support customized ``importance_type`` in the future, which means the user could define how to calculate the ``feature score`` by themselves. + +Usage +^^^^^ + +First you need to install dependency: + +.. code-block:: bash + + pip install lightgbm + +Then + +.. code-block:: python + + from nni.algorithms.feature_engineering.gbdt_selector import GBDTSelector + + # load data + ... + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) + + # initlize a selector + fgs = GBDTSelector() + # fit data + fgs.fit(X_train, y_train, ...) + # get improtant features + # will return the index with important feature here. + print(fgs.get_selected_features(10)) + + ... + +And you could reference the examples in ``/examples/feature_engineering/gbdt_selector/``\ , too. + +**Requirement of fit FuncArgs** + + +* + **X** (array-like, require) - The training input samples which shape = [n_samples, n_features] + +* + **y** (array-like, require) - The target values (class labels in classification, real numbers in regression) which shape = [n_samples]. + +* + **lgb_params** (dict, require) - The parameters for lightgbm model. The detail you could reference `here `__ + +* + **eval_ratio** (float, require) - The ratio of data size. It's used for split the eval data and train data from self.X. + +* + **early_stopping_rounds** (int, require) - The early stopping setting in lightgbm. The detail you could reference `here `__. + +* + **importance_type** (str, require) - could be 'split' or 'gain'. The 'split' means ' result contains numbers of times the feature is used in a model' and the 'gain' means 'result contains total gains of splits which use the feature'. The detail you could reference in `here `__. + +* + **num_boost_round** (int, require) - number of boost round. The detail you could reference `here `__. + +**Requirement of get_selected_features FuncArgs** + + +* **topk** (int, require) - the topK impotance features you want to selected. diff --git a/docs/en_US/FeatureEngineering/GradientFeatureSelector.rst b/docs/en_US/FeatureEngineering/GradientFeatureSelector.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c3e44226ee498ef021b24c939c4d9b8440dcbd1 --- /dev/null +++ b/docs/en_US/FeatureEngineering/GradientFeatureSelector.rst @@ -0,0 +1,107 @@ +GradientFeatureSelector +----------------------- + +The algorithm in GradientFeatureSelector comes from `Feature Gradients: Scalable Feature Selection via Discrete Relaxation `__. + +GradientFeatureSelector, a gradient-based search algorithm +for feature selection. + +1) This approach extends a recent result on the estimation of +learnability in the sublinear data regime by showing that the calculation can be performed iteratively (i.e., in mini-batches) and in **linear time and space** with respect to both the number of features D and the sample size N. + +2) This, along with a discrete-to-continuous relaxation of the search domain, allows for an **efficient, gradient-based** search algorithm among feature subsets for very **large datasets**. + +3) Crucially, this algorithm is capable of finding **higher-order correlations** between features and targets for both the N > D and N < D regimes, as opposed to approaches that do not consider such interactions and/or only consider one regime. + +Usage +^^^^^ + +.. code-block:: python + + from nni.algorithms.feature_engineering.gradient_selector import FeatureGradientSelector + + # load data + ... + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) + + # initlize a selector + fgs = FeatureGradientSelector(n_features=10) + # fit data + fgs.fit(X_train, y_train) + # get improtant features + # will return the index with important feature here. + print(fgs.get_selected_features()) + + ... + +And you could reference the examples in ``/examples/feature_engineering/gradient_feature_selector/``\ , too. + +**Parameters of class FeatureGradientSelector constructor** + + +* + **order** (int, optional, default = 4) - What order of interactions to include. Higher orders may be more accurate but increase the run time. 12 is the maximum allowed order. + +* + **penatly** (int, optional, default = 1) - Constant that multiplies the regularization term. + +* + **n_features** (int, optional, default = None) - If None, will automatically choose number of features based on search. Otherwise, the number of top features to select. + +* + **max_features** (int, optional, default = None) - If not None, will use the 'elbow method' to determine the number of features with max_features as the upper limit. + +* + **learning_rate** (float, optional, default = 1e-1) - learning rate + +* + **init** (*zero, on, off, onhigh, offhigh, or sklearn, optional, default = zero*\ ) - How to initialize the vector of scores. 'zero' is the default. + +* + **n_epochs** (int, optional, default = 1) - number of epochs to run + +* + **shuffle** (bool, optional, default = True) - Shuffle "rows" prior to an epoch. + +* + **batch_size** (int, optional, default = 1000) - Nnumber of "rows" to process at a time. + +* + **target_batch_size** (int, optional, default = 1000) - Number of "rows" to accumulate gradients over. Useful when many rows will not fit into memory but are needed for accurate estimation. + +* + **classification** (bool, optional, default = True) - If True, problem is classification, else regression. + +* + **ordinal** (bool, optional, default = True) - If True, problem is ordinal classification. Requires classification to be True. + +* + **balanced** (bool, optional, default = True) - If true, each class is weighted equally in optimization, otherwise weighted is done via support of each class. Requires classification to be True. + +* + **prerocess** (str, optional, default = 'zscore') - 'zscore' which refers to centering and normalizing data to unit variance or 'center' which only centers the data to 0 mean. + +* + **soft_grouping** (bool, optional, default = True) - If True, groups represent features that come from the same source. Used to encourage sparsity of groups and features within groups. + +* + **verbose** (int, optional, default = 0) - Controls the verbosity when fitting. Set to 0 for no printing 1 or higher for printing every verbose number of gradient steps. + +* + **device** (str, optional, default = 'cpu') - 'cpu' to run on CPU and 'cuda' to run on GPU. Runs much faster on GPU + +**Requirement of fit FuncArgs** + + +* + **X** (array-like, require) - The training input samples which shape = [n_samples, n_features]. `np.ndarry` recommended. + +* + **y** (array-like, require) - The target values (class labels in classification, real numbers in regression) which shape = [n_samples]. `np.ndarry` recommended. + +* + **groups** (array-like, optional, default = None) - Groups of columns that must be selected as a unit. e.g. [0, 0, 1, 2] specifies the first two columns are part of a group. Which shape is [n_features]. + +**Requirement of get_selected_features FuncArgs** + + For now, the ``get_selected_features`` function has no parameters. diff --git a/docs/en_US/FeatureEngineering/Overview.rst b/docs/en_US/FeatureEngineering/Overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..e6bba3af87fed7fbecfd7744279dba65d7e28801 --- /dev/null +++ b/docs/en_US/FeatureEngineering/Overview.rst @@ -0,0 +1,320 @@ +Feature Engineering with NNI +============================ + +We are glad to announce the alpha release for Feature Engineering toolkit on top of NNI, it's still in the experiment phase which might evolve based on user feedback. We'd like to invite you to use, feedback and even contribute. + +For now, we support the following feature selector: + + +* `GradientFeatureSelector <./GradientFeatureSelector.rst>`__ +* `GBDTSelector <./GBDTSelector.rst>`__ + +These selectors are suitable for tabular data(which means it doesn't include image, speech and text data). + +In addition, those selector only for feature selection. If you want to: +1) generate high-order combined features on nni while doing feature selection; +2) leverage your distributed resources; +you could try this :githublink:`example `. + +How to use? +----------- + +.. code-block:: python + + from nni.algorithms.feature_engineering.gradient_selector import FeatureGradientSelector + # from nni.algorithms.feature_engineering.gbdt_selector import GBDTSelector + + # load data + ... + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) + + # initlize a selector + fgs = FeatureGradientSelector(...) + # fit data + fgs.fit(X_train, y_train) + # get improtant features + # will return the index with important feature here. + print(fgs.get_selected_features(...)) + + ... + +When using the built-in Selector, you first need to ``import`` a feature selector, and ``initialize`` it. You could call the function ``fit`` in the selector to pass the data to the selector. After that, you could use ``get_seleteced_features`` to get important features. The function parameters in different selectors might be different, so you need to check the docs before using it. + +How to customize? +----------------- + +NNI provides *state-of-the-art* feature selector algorithm in the builtin-selector. NNI also supports to build a feature selector by yourself. + +If you want to implement a customized feature selector, you need to: + + +#. Inherit the base FeatureSelector class +#. Implement *fit* and _get_selected *features* function +#. Integrate with sklearn (Optional) + +Here is an example: + +**1. Inherit the base Featureselector Class** + +.. code-block:: python + + from nni.feature_engineering.feature_selector import FeatureSelector + + class CustomizedSelector(FeatureSelector): + def __init__(self, ...): + ... + +**2. Implement fit and _get_selected features Function** + +.. code-block:: python + + from nni.tuner import Tuner + + from nni.feature_engineering.feature_selector import FeatureSelector + + class CustomizedSelector(FeatureSelector): + def __init__(self, ...): + ... + + def fit(self, X, y, **kwargs): + """ + Fit the training data to FeatureSelector + + Parameters + ------------ + X : array-like numpy matrix + The training input samples, which shape is [n_samples, n_features]. + y: array-like numpy matrix + The target values (class labels in classification, real numbers in regression). Which shape is [n_samples]. + """ + self.X = X + self.y = y + ... + + def get_selected_features(self): + """ + Get important feature + + Returns + ------- + list : + Return the index of the important feature. + """ + ... + return self.selected_features_ + + ... + +**3. Integrate with Sklearn** + +``sklearn.pipeline.Pipeline`` can connect models in series, such as feature selector, normalization, and classification/regression to form a typical machine learning problem workflow. +The following step could help us to better integrate with sklearn, which means we could treat the customized feature selector as a module of the pipeline. + + +#. Inherit the calss *sklearn.base.BaseEstimator* +#. Implement _get\ *params* and _set*params* function in *BaseEstimator* +#. Inherit the class _sklearn.feature\ *selection.base.SelectorMixin* +#. Implement _get\ *support*\ , *transform* and _inverse*transform* Function in *SelectorMixin* + +Here is an example: + +**1. Inherit the BaseEstimator Class and its Function** + +.. code-block:: python + + from sklearn.base import BaseEstimator + from nni.feature_engineering.feature_selector import FeatureSelector + + class CustomizedSelector(FeatureSelector, BaseEstimator): + def __init__(self, ...): + ... + + def get_params(self, ...): + """ + Get parameters for this estimator. + """ + params = self.__dict__ + params = {key: val for (key, val) in params.items() + if not key.endswith('_')} + return params + + def set_params(self, **params): + """ + Set the parameters of this estimator. + """ + for param in params: + if hasattr(self, param): + setattr(self, param, params[param]) + return self + +**2. Inherit the SelectorMixin Class and its Function** + +.. code-block:: python + + from sklearn.base import BaseEstimator + from sklearn.feature_selection.base import SelectorMixin + + from nni.feature_engineering.feature_selector import FeatureSelector + + class CustomizedSelector(FeatureSelector, BaseEstimator, SelectorMixin): + def __init__(self, ...): + ... + + def get_params(self, ...): + """ + Get parameters for this estimator. + """ + params = self.__dict__ + params = {key: val for (key, val) in params.items() + if not key.endswith('_')} + return params + + def set_params(self, **params): + """ + Set the parameters of this estimator. + """ + for param in params: + if hasattr(self, param): + setattr(self, param, params[param]) + return self + + def get_support(self, indices=False): + """ + Get a mask, or integer index, of the features selected. + + Parameters + ---------- + indices : bool + Default False. If True, the return value will be an array of integers, rather than a boolean mask. + + Returns + ------- + list : + returns support: An index that selects the retained features from a feature vector. + If indices are False, this is a boolean array of shape [# input features], in which an element is True iff its corresponding feature is selected for retention. + If indices are True, this is an integer array of shape [# output features] whose values + are indices into the input feature vector. + """ + ... + return mask + + + def transform(self, X): + """Reduce X to the selected features. + + Parameters + ---------- + X : array + which shape is [n_samples, n_features] + + Returns + ------- + X_r : array + which shape is [n_samples, n_selected_features] + The input samples with only the selected features. + """ + ... + return X_r + + + def inverse_transform(self, X): + """ + Reverse the transformation operation + + Parameters + ---------- + X : array + shape is [n_samples, n_selected_features] + + Returns + ------- + X_r : array + shape is [n_samples, n_original_features] + """ + ... + return X_r + +After integrating with Sklearn, we could use the feature selector as follows: + +.. code-block:: python + + from sklearn.linear_model import LogisticRegression + + # load data + ... + X_train, y_train = ... + + # build a ppipeline + pipeline = make_pipeline(XXXSelector(...), LogisticRegression()) + pipeline = make_pipeline(SelectFromModel(ExtraTreesClassifier(n_estimators=50)), LogisticRegression()) + pipeline.fit(X_train, y_train) + + # score + print("Pipeline Score: ", pipeline.score(X_train, y_train)) + +Benchmark +--------- + +``Baseline`` means without any feature selection, we directly pass the data to LogisticRegression. For this benchmark, we only use 10% data from the train as test data. For the GradientFeatureSelector, we only take the top20 features. The metric is the mean accuracy on the given test data and labels. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Dataset + - All Features + LR (acc, time, memory) + - GradientFeatureSelector + LR (acc, time, memory) + - TreeBasedClassifier + LR (acc, time, memory) + - #Train + - #Feature + * - colon-cancer + - 0.7547, 890ms, 348MiB + - 0.7368, 363ms, 286MiB + - 0.7223, 171ms, 1171 MiB + - 62 + - 2,000 + * - gisette + - 0.9725, 215ms, 584MiB + - 0.89416, 446ms, 397MiB + - 0.9792, 911ms, 234MiB + - 6,000 + - 5,000 + * - avazu + - 0.8834, N/A, N/A + - N/A, N/A, N/A + - N/A, N/A, N/A + - 40,428,967 + - 1,000,000 + * - rcv1 + - 0.9644, 557ms, 241MiB + - 0.7333, 401ms, 281MiB + - 0.9615, 752ms, 284MiB + - 20,242 + - 47,236 + * - news20.binary + - 0.9208, 707ms, 361MiB + - 0.6870, 565ms, 371MiB + - 0.9070, 904ms, 364MiB + - 19,996 + - 1,355,191 + * - real-sim + - 0.9681, 433ms, 274MiB + - 0.7969, 251ms, 274MiB + - 0.9591, 643ms, 367MiB + - 72,309 + - 20,958 + + +The dataset of benchmark could be download in `here `__ + +The code could be refenrence ``/examples/feature_engineering/gradient_feature_selector/benchmark_test.py``. + +Reference and Feedback +---------------------- + + +* To `report a bug `__ for this feature in GitHub; +* To `file a feature or improvement request `__ for this feature in GitHub; +* To know more about :githublink:`Neural Architecture Search with NNI `\ ; +* To know more about :githublink:`Model Compression with NNI `\ ; +* To know more about :githublink:`Hyperparameter Tuning with NNI `\ ; diff --git a/docs/en_US/Makefile b/docs/en_US/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..51285967a7d9722c5bdee4f6a81c154a56aa0846 --- /dev/null +++ b/docs/en_US/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/en_US/NAS/ApiReference.rst b/docs/en_US/NAS/ApiReference.rst new file mode 100644 index 0000000000000000000000000000000000000000..73e37608fb4a4b76e29b59d4d559192a0fb574e9 --- /dev/null +++ b/docs/en_US/NAS/ApiReference.rst @@ -0,0 +1,123 @@ +Retiarii API Reference +====================== + +.. contents:: + +Inline Mutation APIs +-------------------- + +.. autoclass:: nni.retiarii.nn.pytorch.LayerChoice + :members: + +.. autoclass:: nni.retiarii.nn.pytorch.InputChoice + :members: + +.. autoclass:: nni.retiarii.nn.pytorch.ValueChoice + :members: + +.. autoclass:: nni.retiarii.nn.pytorch.ChosenInputs + :members: + +.. autoclass:: nni.retiarii.nn.pytorch.Repeat + :members: + +.. autoclass:: nni.retiarii.nn.pytorch.Cell + :members: + +Graph Mutation APIs +------------------- + +.. autoclass:: nni.retiarii.Mutator + :members: + +.. autoclass:: nni.retiarii.Model + :members: + +.. autoclass:: nni.retiarii.Graph + :members: + +.. autoclass:: nni.retiarii.Node + :members: + +.. autoclass:: nni.retiarii.Edge + :members: + +.. autoclass:: nni.retiarii.Operation + :members: + +Evaluators +---------- + +.. autoclass:: nni.retiarii.evaluator.FunctionalEvaluator + :members: + +.. autoclass:: nni.retiarii.evaluator.pytorch.lightning.LightningModule + :members: + +.. autoclass:: nni.retiarii.evaluator.pytorch.lightning.Classification + :members: + +.. autoclass:: nni.retiarii.evaluator.pytorch.lightning.Regression + :members: + +Oneshot Trainers +---------------- + +.. autoclass:: nni.retiarii.oneshot.pytorch.DartsTrainer + :members: + +.. autoclass:: nni.retiarii.oneshot.pytorch.EnasTrainer + :members: + +.. autoclass:: nni.retiarii.oneshot.pytorch.ProxylessTrainer + :members: + +.. autoclass:: nni.retiarii.oneshot.pytorch.SinglePathTrainer + :members: + +Exploration Strategies +---------------------- + +.. autoclass:: nni.retiarii.strategy.Random + :members: + +.. autoclass:: nni.retiarii.strategy.GridSearch + :members: + +.. autoclass:: nni.retiarii.strategy.RegularizedEvolution + :members: + +.. autoclass:: nni.retiarii.strategy.TPEStrategy + :members: + +.. autoclass:: nni.retiarii.strategy.PolicyBasedRL + :members: + +Retiarii Experiments +-------------------- + +.. autoclass:: nni.retiarii.experiment.pytorch.RetiariiExperiment + :members: + +.. autoclass:: nni.retiarii.experiment.pytorch.RetiariiExeConfig + :members: + +CGO Execution +------------- + +.. autofunction:: nni.retiarii.evaluator.pytorch.cgo.evaluator.MultiModelSupervisedLearningModule + +.. autofunction:: nni.retiarii.evaluator.pytorch.cgo.evaluator.Classification + +.. autofunction:: nni.retiarii.evaluator.pytorch.cgo.evaluator.Regression + +Utilities +--------- + +.. autofunction:: nni.retiarii.basic_unit + +.. autofunction:: nni.retiarii.model_wrapper + +.. autofunction:: nni.retiarii.fixed_arch + + diff --git a/docs/en_US/NAS/Benchmarks.rst b/docs/en_US/NAS/Benchmarks.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c59fa0b68cf4abf5763ef9c7a7c987e0390ec59 --- /dev/null +++ b/docs/en_US/NAS/Benchmarks.rst @@ -0,0 +1,179 @@ +NAS Benchmarks +============== + +.. toctree:: + :hidden: + + Example Usages + +Introduction +------------ + +To improve the reproducibility of NAS algorithms as well as reducing computing resource requirements, researchers proposed a series of NAS benchmarks such as `NAS-Bench-101 `__\ , `NAS-Bench-201 `__\ , `NDS `__\ , etc. NNI provides a query interface for users to acquire these benchmarks. Within just a few lines of code, researcher are able to evaluate their NAS algorithms easily and fairly by utilizing these benchmarks. + +Prerequisites +------------- + + +* Please prepare a folder to household all the benchmark databases. By default, it can be found at ``${HOME}/.cache/nni/nasbenchmark``. Or you can place it anywhere you like, and specify it in ``NASBENCHMARK_DIR`` via ``export NASBENCHMARK_DIR=/path/to/your/nasbenchmark`` before importing NNI. +* Please install ``peewee`` via ``pip3 install peewee``\ , which NNI uses to connect to database. + +Data Preparation +---------------- + +Option 1 (Recommended) +^^^^^^^^^^^^^^^^^^^^^^ + +You can download the preprocessed benchmark files via ``python -m nni.nas.benchmarks.download ``, where ```` can be ``nasbench101``, ``nasbench201``, and etc. Add ``--help`` to the command for supported command line arguments. + +Option 2 +^^^^^^^^ + +.. note:: If you have files that are processed before v2.5, it is recommended that you delete them and try option 1. + +#. + Clone NNI to your machine and enter ``examples/nas/benchmarks`` directory. + + .. code-block:: bash + + git clone -b ${NNI_VERSION} https://github.com/microsoft/nni + cd nni/examples/nas/benchmarks + + Replace ``${NNI_VERSION}`` with a released version name or branch name, e.g., ``v2.4``. + +#. + Install dependencies via ``pip3 install -r xxx.requirements.txt``. ``xxx`` can be ``nasbench101``\ , ``nasbench201`` or ``nds``. + +#. Generate the database via ``./xxx.sh``. The directory that stores the benchmark file can be configured with ``NASBENCHMARK_DIR`` environment variable, which defaults to ``~/.nni/nasbenchmark``. Note that the NAS-Bench-201 dataset will be downloaded from a google drive. + +Please make sure there is at least 10GB free disk space and note that the conversion process can take up to hours to complete. + +Example Usages +-------------- + +Please refer to `examples usages of Benchmarks API <./BenchmarksExample.rst>`__. + +NAS-Bench-101 +------------- + +* `Paper link `__ +* `Open-source `__ + +NAS-Bench-101 contains 423,624 unique neural networks, combined with 4 variations in number of epochs (4, 12, 36, 108), each of which is trained 3 times. It is a cell-wise search space, which constructs and stacks a cell by enumerating DAGs with at most 7 operators, and no more than 9 connections. All operators can be chosen from ``CONV3X3_BN_RELU``\ , ``CONV1X1_BN_RELU`` and ``MAXPOOL3X3``\ , except the first operator (always ``INPUT``\ ) and last operator (always ``OUTPUT``\ ). + +Notably, NAS-Bench-101 eliminates invalid cells (e.g., there is no path from input to output, or there is redundant computation). Furthermore, isomorphic cells are de-duplicated, i.e., all the remaining cells are computationally unique. + +API Documentation +^^^^^^^^^^^^^^^^^ + +.. autofunction:: nni.nas.benchmarks.nasbench101.query_nb101_trial_stats + +.. autoattribute:: nni.nas.benchmarks.nasbench101.INPUT + +.. autoattribute:: nni.nas.benchmarks.nasbench101.OUTPUT + +.. autoattribute:: nni.nas.benchmarks.nasbench101.CONV3X3_BN_RELU + +.. autoattribute:: nni.nas.benchmarks.nasbench101.CONV1X1_BN_RELU + +.. autoattribute:: nni.nas.benchmarks.nasbench101.MAXPOOL3X3 + +.. autoclass:: nni.nas.benchmarks.nasbench101.Nb101TrialConfig + +.. autoclass:: nni.nas.benchmarks.nasbench101.Nb101TrialStats + +.. autoclass:: nni.nas.benchmarks.nasbench101.Nb101IntermediateStats + +.. autofunction:: nni.nas.benchmarks.nasbench101.graph_util.nasbench_format_to_architecture_repr + +.. autofunction:: nni.nas.benchmarks.nasbench101.graph_util.infer_num_vertices + +.. autofunction:: nni.nas.benchmarks.nasbench101.graph_util.hash_module + +NAS-Bench-201 +------------- + +* `Paper link `__ +* `Open-source API `__ +* `Implementations `__ + +NAS-Bench-201 is a cell-wise search space that views nodes as tensors and edges as operators. The search space contains all possible densely-connected DAGs with 4 nodes, resulting in 15,625 candidates in total. Each operator (i.e., edge) is selected from a pre-defined operator set (\ ``NONE``\ , ``SKIP_CONNECT``\ , ``CONV_1X1``\ , ``CONV_3X3`` and ``AVG_POOL_3X3``\ ). Training appraoches vary in the dataset used (CIFAR-10, CIFAR-100, ImageNet) and number of epochs scheduled (12 and 200). Each combination of architecture and training approach is repeated 1 - 3 times with different random seeds. + +API Documentation +^^^^^^^^^^^^^^^^^ + +.. autofunction:: nni.nas.benchmarks.nasbench201.query_nb201_trial_stats + +.. autoattribute:: nni.nas.benchmarks.nasbench201.NONE + +.. autoattribute:: nni.nas.benchmarks.nasbench201.SKIP_CONNECT + +.. autoattribute:: nni.nas.benchmarks.nasbench201.CONV_1X1 + +.. autoattribute:: nni.nas.benchmarks.nasbench201.CONV_3X3 + +.. autoattribute:: nni.nas.benchmarks.nasbench201.AVG_POOL_3X3 + +.. autoclass:: nni.nas.benchmarks.nasbench201.Nb201TrialConfig + +.. autoclass:: nni.nas.benchmarks.nasbench201.Nb201TrialStats + +.. autoclass:: nni.nas.benchmarks.nasbench201.Nb201IntermediateStats + +NDS +--- + +* `Paper link `__ +* `Open-source `__ + +*On Network Design Spaces for Visual Recognition* released trial statistics of over 100,000 configurations (models + hyper-parameters) sampled from multiple model families, including vanilla (feedforward network loosely inspired by VGG), ResNet and ResNeXt (residual basic block and residual bottleneck block) and NAS cells (following popular design from NASNet, Ameoba, PNAS, ENAS and DARTS). Most configurations are trained only once with a fixed seed, except a few that are trained twice or three times. + +Instead of storing results obtained with different configurations in separate files, we dump them into one single database to enable comparison in multiple dimensions. Specifically, we use ``model_family`` to distinguish model types, ``model_spec`` for all hyper-parameters needed to build this model, ``cell_spec`` for detailed information on operators and connections if it is a NAS cell, ``generator`` to denote the sampling policy through which this configuration is generated. Refer to API documentation for details. + +Available Operators +------------------- + +Here is a list of available operators used in NDS. + +.. autoattribute:: nni.nas.benchmarks.nds.constants.NONE + +.. autoattribute:: nni.nas.benchmarks.nds.constants.SKIP_CONNECT + +.. autoattribute:: nni.nas.benchmarks.nds.constants.AVG_POOL_3X3 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.MAX_POOL_3X3 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.MAX_POOL_5X5 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.MAX_POOL_7X7 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.CONV_1X1 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.CONV_3X3 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.CONV_3X1_1X3 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.CONV_7X1_1X7 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.DIL_CONV_3X3 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.DIL_CONV_5X5 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.SEP_CONV_3X3 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.SEP_CONV_5X5 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.SEP_CONV_7X7 + +.. autoattribute:: nni.nas.benchmarks.nds.constants.DIL_SEP_CONV_3X3 + +API Documentation +^^^^^^^^^^^^^^^^^ + +.. autofunction:: nni.nas.benchmarks.nds.query_nds_trial_stats + +.. autoclass:: nni.nas.benchmarks.nds.NdsTrialConfig + +.. autoclass:: nni.nas.benchmarks.nds.NdsTrialStats + +.. autoclass:: nni.nas.benchmarks.nds.NdsIntermediateStats diff --git a/docs/en_US/NAS/BenchmarksExample.ipynb b/docs/en_US/NAS/BenchmarksExample.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..4c0c8d6c2d86893000925b71978376b33d53010b --- /dev/null +++ b/docs/en_US/NAS/BenchmarksExample.ipynb @@ -0,0 +1,396 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example Usages of NAS Benchmarks" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import pprint\n", + "import time\n", + "\n", + "from nni.nas.benchmarks.nasbench101 import query_nb101_trial_stats\n", + "from nni.nas.benchmarks.nasbench201 import query_nb201_trial_stats\n", + "from nni.nas.benchmarks.nds import query_nds_trial_stats\n", + "\n", + "ti = time.time()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NAS-Bench-101" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use the following architecture as an example:\n", + "\n", + "![nas-101](../../img/nas-bench-101-example.png)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "arch = {\n", + " 'op1': 'conv3x3-bn-relu',\n", + " 'op2': 'maxpool3x3',\n", + " 'op3': 'conv3x3-bn-relu',\n", + " 'op4': 'conv3x3-bn-relu',\n", + " 'op5': 'conv1x1-bn-relu',\n", + " 'input1': [0],\n", + " 'input2': [1],\n", + " 'input3': [2],\n", + " 'input4': [0],\n", + " 'input5': [0, 3, 4],\n", + " 'input6': [2, 5]\n", + "}\n", + "for t in query_nb101_trial_stats(arch, 108, include_intermediates=True):\n", + " pprint.pprint(t)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An architecture of NAS-Bench-101 could be trained more than once. Each element of the returned generator is a dict which contains one of the training results of this trial config (architecture + hyper-parameters) including train/valid/test accuracy, training time, number of epochs, etc. The results of NAS-Bench-201 and NDS follow similar formats." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NAS-Bench-201" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use the following architecture as an example:\n", + "\n", + "![nas-201](../../img/nas-bench-201-example.png)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "arch = {\n", + " '0_1': 'avg_pool_3x3',\n", + " '0_2': 'conv_1x1',\n", + " '1_2': 'skip_connect',\n", + " '0_3': 'conv_1x1',\n", + " '1_3': 'skip_connect',\n", + " '2_3': 'skip_connect'\n", + "}\n", + "for t in query_nb201_trial_stats(arch, 200, 'cifar100'):\n", + " pprint.pprint(t)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Intermediate results are also available." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "for t in query_nb201_trial_stats(arch, None, 'imagenet16-120', include_intermediates=True):\n", + " print(t['config'])\n", + " print('Intermediates:', len(t['intermediates']))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NDS" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use the following architecture as an example:
\n", + "![nds](../../img/nas-bench-nds-example.png)\n", + "\n", + "Here, `bot_muls`, `ds`, `num_gs`, `ss` and `ws` stand for \"bottleneck multipliers\", \"depths\", \"number of groups\", \"strides\" and \"widths\" respectively." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "model_spec = {\n", + " 'bot_muls': [0.0, 0.25, 0.25, 0.25],\n", + " 'ds': [1, 16, 1, 4],\n", + " 'num_gs': [1, 2, 1, 2],\n", + " 'ss': [1, 1, 2, 2],\n", + " 'ws': [16, 64, 128, 16]\n", + "}\n", + "# Use none as a wildcard\n", + "for t in query_nds_trial_stats('residual_bottleneck', None, None, model_spec, None, 'cifar10'):\n", + " pprint.pprint(t)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "model_spec = {\n", + " 'bot_muls': [0.0, 0.25, 0.25, 0.25],\n", + " 'ds': [1, 16, 1, 4],\n", + " 'num_gs': [1, 2, 1, 2],\n", + " 'ss': [1, 1, 2, 2],\n", + " 'ws': [16, 64, 128, 16]\n", + "}\n", + "for t in query_nds_trial_stats('residual_bottleneck', None, None, model_spec, None, 'cifar10', include_intermediates=True):\n", + " pprint.pprint(t['intermediates'][:10])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "model_spec = {'ds': [1, 12, 12, 12], 'ss': [1, 1, 2, 2], 'ws': [16, 24, 24, 40]}\n", + "for t in query_nds_trial_stats('residual_basic', 'resnet', 'random', model_spec, {}, 'cifar10'):\n", + " pprint.pprint(t)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# get the first one\n", + "pprint.pprint(next(query_nds_trial_stats('vanilla', None, None, None, None, None)))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# count number\n", + "model_spec = {'num_nodes_normal': 5, 'num_nodes_reduce': 5, 'depth': 12, 'width': 32, 'aux': False, 'drop_prob': 0.0}\n", + "cell_spec = {\n", + " 'normal_0_op_x': 'avg_pool_3x3',\n", + " 'normal_0_input_x': 0,\n", + " 'normal_0_op_y': 'conv_7x1_1x7',\n", + " 'normal_0_input_y': 1,\n", + " 'normal_1_op_x': 'sep_conv_3x3',\n", + " 'normal_1_input_x': 2,\n", + " 'normal_1_op_y': 'sep_conv_5x5',\n", + " 'normal_1_input_y': 0,\n", + " 'normal_2_op_x': 'dil_sep_conv_3x3',\n", + " 'normal_2_input_x': 2,\n", + " 'normal_2_op_y': 'dil_sep_conv_3x3',\n", + " 'normal_2_input_y': 2,\n", + " 'normal_3_op_x': 'skip_connect',\n", + " 'normal_3_input_x': 4,\n", + " 'normal_3_op_y': 'dil_sep_conv_3x3',\n", + " 'normal_3_input_y': 4,\n", + " 'normal_4_op_x': 'conv_7x1_1x7',\n", + " 'normal_4_input_x': 2,\n", + " 'normal_4_op_y': 'sep_conv_3x3',\n", + " 'normal_4_input_y': 4,\n", + " 'normal_concat': [3, 5, 6],\n", + " 'reduce_0_op_x': 'avg_pool_3x3',\n", + " 'reduce_0_input_x': 0,\n", + " 'reduce_0_op_y': 'dil_sep_conv_3x3',\n", + " 'reduce_0_input_y': 1,\n", + " 'reduce_1_op_x': 'sep_conv_3x3',\n", + " 'reduce_1_input_x': 0,\n", + " 'reduce_1_op_y': 'sep_conv_3x3',\n", + " 'reduce_1_input_y': 0,\n", + " 'reduce_2_op_x': 'skip_connect',\n", + " 'reduce_2_input_x': 2,\n", + " 'reduce_2_op_y': 'sep_conv_7x7',\n", + " 'reduce_2_input_y': 0,\n", + " 'reduce_3_op_x': 'conv_7x1_1x7',\n", + " 'reduce_3_input_x': 4,\n", + " 'reduce_3_op_y': 'skip_connect',\n", + " 'reduce_3_input_y': 4,\n", + " 'reduce_4_op_x': 'conv_7x1_1x7',\n", + " 'reduce_4_input_x': 0,\n", + " 'reduce_4_op_y': 'conv_7x1_1x7',\n", + " 'reduce_4_input_y': 5,\n", + " 'reduce_concat': [3, 6]\n", + "}\n", + "\n", + "for t in query_nds_trial_stats('nas_cell', None, None, model_spec, cell_spec, 'cifar10'):\n", + " assert t['config']['model_spec'] == model_spec\n", + " assert t['config']['cell_spec'] == cell_spec\n", + " pprint.pprint(t)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# count number\n", + "print('NDS (amoeba) count:', len(list(query_nds_trial_stats(None, 'amoeba', None, None, None, None, None))))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NLP" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "metadata": false + } + }, + "source": [ + "Use the following two architectures as examples. \n", + "The arch in the paper is called \"receipe\" with nested variable, and now it is nunested in the benchmarks for NNI.\n", + "An arch has multiple Node, Node_input_n and Node_op, you can refer to doc for more details.\n", + "\n", + "arch1 : \n", + "\n", + "\n", + "arch2 : \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{'config': {'arch': {'h_new_0_input_0': 'node_3',\n 'h_new_0_input_1': 'node_2',\n 'h_new_0_input_2': 'node_1',\n 'h_new_0_op': 'blend',\n 'node_0_input_0': 'x',\n 'node_0_input_1': 'h_prev_0',\n 'node_0_op': 'linear',\n 'node_1_input_0': 'node_0',\n 'node_1_op': 'activation_tanh',\n 'node_2_input_0': 'h_prev_0',\n 'node_2_input_1': 'node_1',\n 'node_2_input_2': 'x',\n 'node_2_op': 'linear',\n 'node_3_input_0': 'node_2',\n 'node_3_op': 'activation_leaky_relu'},\n 'dataset': 'ptb',\n 'id': 20003},\n 'id': 16291,\n 'test_loss': 4.680262297102549,\n 'train_loss': 4.132040537087838,\n 'training_time': 177.05208373069763,\n 'val_loss': 4.707944253177966}\n" + ] + } + ], + "source": [ + "import pprint\n", + "from nni.nas.benchmarks.nlp import query_nlp_trial_stats\n", + "\n", + "arch1 = {'h_new_0_input_0': 'node_3', 'h_new_0_input_1': 'node_2', 'h_new_0_input_2': 'node_1', 'h_new_0_op': 'blend', 'node_0_input_0': 'x', 'node_0_input_1': 'h_prev_0', 'node_0_op': 'linear','node_1_input_0': 'node_0', 'node_1_op': 'activation_tanh', 'node_2_input_0': 'h_prev_0', 'node_2_input_1': 'node_1', 'node_2_input_2': 'x', 'node_2_op': 'linear', 'node_3_input_0': 'node_2', 'node_3_op': 'activation_leaky_relu'}\n", + "for i in query_nlp_trial_stats(arch=arch1, dataset=\"ptb\"):\n", + " pprint.pprint(i)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[{'current_epoch': 46,\n 'id': 1796,\n 'test_loss': 6.233430054978619,\n 'train_loss': 6.4866799231542664,\n 'training_time': 146.5680329799652,\n 'val_loss': 6.326836978687959},\n {'current_epoch': 47,\n 'id': 1797,\n 'test_loss': 6.2402057403023825,\n 'train_loss': 6.485401405247535,\n 'training_time': 146.05511450767517,\n 'val_loss': 6.3239741605870865},\n {'current_epoch': 48,\n 'id': 1798,\n 'test_loss': 6.351145308363877,\n 'train_loss': 6.611281181173992,\n 'training_time': 145.8849437236786,\n 'val_loss': 6.436160816865809},\n {'current_epoch': 49,\n 'id': 1799,\n 'test_loss': 6.227155079159031,\n 'train_loss': 6.473414458249545,\n 'training_time': 145.51414465904236,\n 'val_loss': 6.313294354607077}]\n" + ] + } + ], + "source": [ + "arch2 = {\"h_new_0_input_0\":\"node_0\",\"h_new_0_input_1\":\"node_1\",\"h_new_0_op\":\"elementwise_sum\",\"node_0_input_0\":\"x\",\"node_0_input_1\":\"h_prev_0\",\"node_0_op\":\"linear\",\"node_1_input_0\":\"node_0\",\"node_1_op\":\"activation_tanh\"}\n", + "for i in query_nlp_trial_stats(arch=arch2, dataset='wikitext-2', include_intermediates=True):\n", + " pprint.pprint(i['intermediates'][45:49])" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "pycharm": {}, + "tags": [] + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Elapsed time: 5.60982608795166 seconds\n" + ] + } + ], + "source": [ + "print('Elapsed time: ', time.time() - ti, 'seconds')" + ] + } + ], + "metadata": { + "file_extension": ".py", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "name": "python", + "version": "3.8.5-final" + }, + "mimetype": "text/x-python", + "name": "python", + "npconvert_exporter": "python", + "orig_nbformat": 2, + "pygments_lexer": "ipython3", + "version": 3 + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/en_US/NAS/DARTS.rst b/docs/en_US/NAS/DARTS.rst new file mode 100644 index 0000000000000000000000000000000000000000..20dfef7ba435109ae19a06f6eba8a6d89937657b --- /dev/null +++ b/docs/en_US/NAS/DARTS.rst @@ -0,0 +1,66 @@ +DARTS +===== + +Introduction +------------ + +The paper `DARTS: Differentiable Architecture Search `__ addresses the scalability challenge of architecture search by formulating the task in a differentiable manner. Their method is based on the continuous relaxation of the architecture representation, allowing efficient search of the architecture using gradient descent. + +Authors' code optimizes the network weights and architecture weights alternatively in mini-batches. They further explore the possibility that uses second order optimization (unroll) instead of first order, to improve the performance. + +Implementation on NNI is based on the `official implementation `__ and a `popular 3rd-party repo `__. DARTS on NNI is designed to be general for arbitrary search space. A CNN search space tailored for CIFAR10, same as the original paper, is implemented as a use case of DARTS. + +Reproduction Results +-------------------- + +The above-mentioned example is meant to reproduce the results in the paper, we do experiments with first and second order optimization. Due to the time limit, we retrain *only the best architecture* derived from the search phase and we repeat the experiment *only once*. Our results is currently on par with the results reported in paper. We will add more results later when ready. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - In paper + - Reproduction + * - First order (CIFAR10) + - 3.00 +/- 0.14 + - 2.78 + * - Second order (CIFAR10) + - 2.76 +/- 0.09 + - 2.80 + + +Examples +-------- + +CNN Search Space +^^^^^^^^^^^^^^^^ + +:githublink:`Example code ` + +.. code-block:: bash + + # In case NNI code is not cloned. If the code is cloned already, ignore this line and enter code folder. + git clone https://github.com/Microsoft/nni.git + + # search the best architecture + cd examples/nas/oneshot/darts + python3 search.py + + # train the best architecture + python3 retrain.py --arc-checkpoint ./checkpoints/epoch_49.json + +Reference +--------- + +PyTorch +^^^^^^^ + +.. autoclass:: nni.retiarii.oneshot.pytorch.DartsTrainer + :noindex: + +Limitations +----------- + + +* DARTS doesn't support DataParallel and needs to be customized in order to support DistributedDataParallel. diff --git a/docs/en_US/NAS/ENAS.rst b/docs/en_US/NAS/ENAS.rst new file mode 100644 index 0000000000000000000000000000000000000000..60415e54503f8e227436fbce340e93866554d769 --- /dev/null +++ b/docs/en_US/NAS/ENAS.rst @@ -0,0 +1,43 @@ +ENAS +==== + +Introduction +------------ + +The paper `Efficient Neural Architecture Search via Parameter Sharing `__ uses parameter sharing between child models to accelerate the NAS process. In ENAS, a controller learns to discover neural network architectures by searching for an optimal subgraph within a large computational graph. The controller is trained with policy gradient to select a subgraph that maximizes the expected reward on the validation set. Meanwhile the model corresponding to the selected subgraph is trained to minimize a canonical cross entropy loss. + +Implementation on NNI is based on the `official implementation in Tensorflow `__\ , including a general-purpose Reinforcement-learning controller and a trainer that trains target network and this controller alternatively. Following paper, we have also implemented macro and micro search space on CIFAR10 to demonstrate how to use these trainers. Since code to train from scratch on NNI is not ready yet, reproduction results are currently unavailable. + +Examples +-------- + +CIFAR10 Macro/Micro Search Space +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:githublink:`Example code ` + +.. code-block:: bash + + # In case NNI code is not cloned. If the code is cloned already, ignore this line and enter code folder. + git clone https://github.com/Microsoft/nni.git + + # search the best architecture + cd examples/nas/oneshot/enas + + # search in macro search space + python3 search.py --search-for macro + + # search in micro search space + python3 search.py --search-for micro + + # view more options for search + python3 search.py -h + +Reference +--------- + +PyTorch +^^^^^^^ + +.. autoclass:: nni.retiarii.oneshot.pytorch.EnasTrainer + :noindex: diff --git a/docs/en_US/NAS/ExecutionEngines.rst b/docs/en_US/NAS/ExecutionEngines.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0e2bb5f5694359dd2f7faac35cc257659050ab6 --- /dev/null +++ b/docs/en_US/NAS/ExecutionEngines.rst @@ -0,0 +1,106 @@ +Execution Engines +================= + +Execution engine is for running Retiarii Experiment. NNI supports three execution engines, users can choose a speicific engine according to the type of their model mutation definition and their requirements for cross-model optimizations. + +* **Pure-python execution engine** is the default engine, it supports the model space expressed by `inline mutation API <./MutationPrimitives.rst>`__. + +* **Graph-based execution engine** supports the use of `inline mutation APIs <./MutationPrimitives.rst>`__ and model spaces represented by `mutators <./Mutators.rst>`__. It requires the user's model to be parsed by `TorchScript `__. + +* **CGO execution engine** has the same requirements and capabilities as the **Graph-based execution engine**. But further enables cross-model optimizations, which makes model space exploration faster. + +Pure-python Execution Engine +---------------------------- + +Pure-python Execution Engine is the default engine, we recommend users to keep using this execution engine, if they are new to NNI NAS. Pure-python execution engine plays magic within the scope of inline mutation APIs, while does not touch the rest of user model. Thus, it has minimal requirement on user model. + +One steps are needed to use this engine now. + +1. Add ``@nni.retiarii.model_wrapper`` decorator outside the whole PyTorch model. + +.. note:: You should always use ``super().__init__()`` instead of ``super(MyNetwork, self).__init__()`` in the PyTorch model, because the latter one has issues with model wrapper. + +Graph-based Execution Engine +---------------------------- + +For graph-based execution engine, it converts user-defined model to a graph representation (called graph IR) using `TorchScript `__, each instantiated module in the model is converted to a subgraph. Then mutations are applied to the graph to generate new graphs. Each new graph is then converted back to PyTorch code and executed on the user specified training service. + +Users may find ``@basic_unit`` helpful in some cases. ``@basic_unit`` here means the module will not be converted to a subgraph, instead, it is converted to a single graph node as a basic unit. + +``@basic_unit`` is usually used in the following cases: + +* When users want to tune initialization parameters of a module using ``ValueChoice``, then decorate the module with ``@basic_unit``. For example, ``self.conv = MyConv(kernel_size=nn.ValueChoice([1, 3, 5]))``, here ``MyConv`` should be decorated. + +* When a module cannot be successfully parsed to a subgraph, decorate the module with ``@basic_unit``. The parse failure could be due to complex control flow. Currently Retiarii does not support adhoc loop, if there is adhoc loop in a module's forward, this class should be decorated as serializable module. For example, the following ``MyModule`` should be decorated. + + .. code-block:: python + + @basic_unit + class MyModule(nn.Module): + def __init__(self): + ... + def forward(self, x): + for i in range(10): # <- adhoc loop + ... + +* Some inline mutation APIs require their handled module to be decorated with ``@basic_unit``. For example, user-defined module that is provided to ``LayerChoice`` as a candidate op should be decorated. + +Three steps are need to use graph-based execution engine. + +1. Remove ``@nni.retiarii.model_wrapper`` if there is any in your model. +2. Add ``config.execution_engine = 'base'`` to ``RetiariiExeConfig``. The default value of ``execution_engine`` is 'py', which means pure-python execution engine. +3. Add ``@basic_unit`` when necessary following the above guidelines. + +For exporting top models, graph-based execution engine supports exporting source code for top models by running ``exp.export_top_models(formatter='code')``. + +CGO Execution Engine (experimental) +----------------------------------- + +CGO(Cross-Graph Optimization) execution engine does cross-model optimizations based on the graph-based execution engine. In CGO execution engine, multiple models could be merged and trained together in one trial. +Currently, it only supports ``DedupInputOptimizer`` that can merge graphs sharing the same dataset to only loading and pre-processing each batch of data once, which can avoid bottleneck on data loading. + +.. note :: To use CGO engine, PyTorch-lightning above version 1.4.2 is required. + +To enable CGO execution engine, you need to follow these steps: + +1. Create RetiariiExeConfig with remote training service. CGO execution engine currently only supports remote training service. +2. Add configurations for remote training service +3. Add configurations for CGO engine + + .. code-block:: python + + exp = RetiariiExperiment(base_model, trainer, mutators, strategy) + config = RetiariiExeConfig('remote') + + # ... + # other configurations of RetiariiExeConfig + + config.execution_engine = 'cgo' # set execution engine to CGO + config.max_concurrency_cgo = 3 # the maximum number of concurrent models to merge + config.batch_waiting_time = 10 # how many seconds CGO execution engine should wait before optimizing a new batch of models + + rm_conf = RemoteMachineConfig() + + # ... + # server configuration in rm_conf + rm_conf.gpu_indices = [0, 1, 2, 3] # gpu_indices must be set in RemoteMachineConfig for CGO execution engine + + config.training_service.machine_list = [rm_conf] + exp.run(config, 8099) + +CGO Execution Engine only supports pytorch-lightning trainer that inherits :class:`nni.retiarii.evaluator.pytorch.cgo.evaluator.MultiModelSupervisedLearningModule`. +For a trial running multiple models, the trainers inheriting :class:`nni.retiarii.evaluator.pytorch.cgo.evaluator.MultiModelSupervisedLearningModule` can handle the multiple outputs from the merged model for training, test and validation. +We have already implemented two trainers: :class:`nni.retiarii.evaluator.pytorch.cgo.evaluator.Classification` and :class:`nni.retiarii.evaluator.pytorch.cgo.evaluator.Regression`. + +.. code-block:: python + + from nni.retiarii.evaluator.pytorch.cgo.evaluator import Classification + + trainer = Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=1, limit_train_batches=0.2) + +Advanced users can also implement their own trainers by inheriting ``MultiModelSupervisedLearningModule``. + +Sometimes, a mutated model cannot be executed (e.g., due to shape mismatch). When a trial running multiple models contains +a bad model, CGO execution engine will re-run each model independently in seperate trials without cross-model optimizations. diff --git a/docs/en_US/NAS/ExplorationStrategies.rst b/docs/en_US/NAS/ExplorationStrategies.rst new file mode 100644 index 0000000000000000000000000000000000000000..68f0114eff3bdb2d7024ec88978a2225d68fb1c5 --- /dev/null +++ b/docs/en_US/NAS/ExplorationStrategies.rst @@ -0,0 +1,74 @@ +Exploration Strategies for Multi-trial NAS +========================================== + +Usage of Exploration Strategy +----------------------------- + +To use an exploration strategy, users simply instantiate an exploration strategy and pass the instantiated object to ``RetiariiExperiment``. Below is a simple example. + +.. code-block:: python + + import nni.retiarii.strategy as strategy + + exploration_strategy = strategy.Random(dedup=True) # dedup=False if deduplication is not wanted + +Supported Exploration Strategies +-------------------------------- + +NNI provides the following exploration strategies for multi-trial NAS. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name + - Brief Introduction of Algorithm + * - `Random Strategy <./ApiReference.rst#nni.retiarii.strategy.Random>`__ + - Randomly sampling new model(s) from user defined model space. (``nni.retiarii.strategy.Random``) + * - `Grid Search <./ApiReference.rst#nni.retiarii.strategy.GridSearch>`__ + - Sampling new model(s) from user defined model space using grid search algorithm. (``nni.retiarii.strategy.GridSearch``) + * - `Regularized Evolution <./ApiReference.rst#nni.retiarii.strategy.RegularizedEvolution>`__ + - Generating new model(s) from generated models using `regularized evolution algorithm `__ . (``nni.retiarii.strategy.RegularizedEvolution``) + * - `TPE Strategy <./ApiReference.rst#nni.retiarii.strategy.TPEStrategy>`__ + - Sampling new model(s) from user defined model space using `TPE algorithm `__ . (``nni.retiarii.strategy.TPEStrategy``) + * - `RL Strategy <./ApiReference.rst#nni.retiarii.strategy.PolicyBasedRL>`__ + - It uses `PPO algorithm `__ to sample new model(s) from user defined model space. (``nni.retiarii.strategy.PolicyBasedRL``) + +Customize Exploration Strategy +------------------------------ + +If users want to innovate a new exploration strategy, they can easily customize a new one following the interface provided by NNI. Specifically, users should inherit the base strategy class ``BaseStrategy``, then implement the member function ``run``. This member function takes ``base_model`` and ``applied_mutators`` as its input arguments. It can simply apply the user specified mutators in ``applied_mutators`` onto ``base_model`` to generate a new model. When a mutator is applied, it should be bound with a sampler (e.g., ``RandomSampler``). Every sampler implements the ``choice`` function which chooses value(s) from candidate values. The ``choice`` functions invoked in mutators are executed with the sampler. + +Below is a very simple random strategy, which makes the choices completely random. + +.. code-block:: python + + from nni.retiarii import Sampler + + class RandomSampler(Sampler): + def choice(self, candidates, mutator, model, index): + return random.choice(candidates) + + class RandomStrategy(BaseStrategy): + def __init__(self): + self.random_sampler = RandomSampler() + + def run(self, base_model, applied_mutators): + _logger.info('stargety start...') + while True: + avail_resource = query_available_resources() + if avail_resource > 0: + model = base_model + _logger.info('apply mutators...') + _logger.info('mutators: %s', str(applied_mutators)) + for mutator in applied_mutators: + mutator.bind_sampler(self.random_sampler) + model = mutator.apply(model) + # run models + submit_models(model) + else: + time.sleep(2) + +You can find that this strategy does not know the search space beforehand, it passively makes decisions every time ``choice`` is invoked from mutators. If a strategy wants to know the whole search space before making any decision (e.g., TPE, SMAC), it can use ``dry_run`` function provided by ``Mutator`` to obtain the space. An example strategy can be found :githublink:`here `. + +After generating a new model, the strategy can use our provided APIs (e.g., ``submit_models``, ``is_stopped_exec``) to submit the model and get its reported results. More APIs can be found in `API References <./ApiReference.rst>`__. diff --git a/docs/en_US/NAS/FBNet.rst b/docs/en_US/NAS/FBNet.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e92b1cd4bcc4f4fa45ad6f50a02878b97d0e068 --- /dev/null +++ b/docs/en_US/NAS/FBNet.rst @@ -0,0 +1,153 @@ +FBNet +====== + +.. note:: This one-shot NAS is still implemented under NNI NAS 1.0, and will `be migrated to Retiarii framework in v2.4 `__. + +For the mobile application of facial landmark, based on the basic architecture of PFLD model, we have applied the FBNet (Block-wise DNAS) to design an concise model with the trade-off between latency and accuracy. References are listed as below: + + +* `FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search `__ +* `PFLD: A Practical Facial Landmark Detector `__ + +FBNet is a block-wise differentiable NAS method (Block-wise DNAS), where the best candidate building blocks can be chosen by using Gumbel Softmax random sampling and differentiable training. At each layer (or stage) to be searched, the diverse candidate blocks are side by side planned (just like the effectiveness of structural re-parameterization), leading to sufficient pre-training of the supernet. The pre-trained supernet is further sampled for finetuning of the subnet, to achieve better performance. + +.. image:: ../../img/fbnet.png + :target: ../../img/fbnet.png + :alt: + + +PFLD is a lightweight facial landmark model for realtime application. The architecture of PLFD is firstly simplified for acceleration, by using the stem block of PeleeNet, average pooling with depthwise convolution and eSE module. + +To achieve better trade-off between latency and accuracy, the FBNet is further applied on the simplified PFLD for searching the best block at each specific layer. The search space is based on the FBNet space, and optimized for mobile deployment by using the average pooling with depthwise convolution and eSE module etc. + + +Experiments +------------ + +To verify the effectiveness of FBNet applied on PFLD, we choose the open source dataset with 106 landmark points as the benchmark: + +* `Grand Challenge of 106-Point Facial Landmark Localization `__ + +The baseline model is denoted as MobileNet-V3 PFLD (`Reference baseline `__), and the searched model is denoted as Subnet. The experimental results are listed as below, where the latency is tested on Qualcomm 625 CPU (ARMv8): + + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Model + - Size + - Latency + - Validation NME + * - MobileNet-V3 PFLD + - 1.01MB + - 10ms + - 6.22% + * - Subnet + - 693KB + - 1.60ms + - 5.58% + + +Example +-------- + +`Example code `__ + +Please run the following scripts at the example directory. + +The Python dependencies used here are listed as below: + +.. code-block:: bash + + numpy==1.18.5 + opencv-python==4.5.1.48 + torch==1.6.0 + torchvision==0.7.0 + onnx==1.8.1 + onnx-simplifier==0.3.5 + onnxruntime==1.7.0 + +Data Preparation +----------------- + +Firstly, you should download the dataset `106points dataset `__ to the path ``./data/106points`` . The dataset includes the train-set and test-set: + +.. code-block:: bash + + ./data/106points/train_data/imgs + ./data/106points/train_data/list.txt + ./data/106points/test_data/imgs + ./data/106points/test_data/list.txt + + +Quik Start +----------- + +1. Search +^^^^^^^^^^ + +Based on the architecture of simplified PFLD, the setting of multi-stage search space and hyper-parameters for searching should be firstly configured to construct the supernet, as an example: + +.. code-block:: bash + + from lib.builder import search_space + from lib.ops import PRIMITIVES + from lib.supernet import PFLDInference, AuxiliaryNet + from nni.algorithms.nas.pytorch.fbnet import LookUpTable, NASConfig, + + # configuration of hyper-parameters + # search_space defines the multi-stage search space + nas_config = NASConfig( + model_dir="./ckpt_save", + nas_lr=0.01, + mode="mul", + alpha=0.25, + beta=0.6, + search_space=search_space, + ) + # lookup table to manage the information + lookup_table = LookUpTable(config=nas_config, primitives=PRIMITIVES) + # created supernet + pfld_backbone = PFLDInference(lookup_table) + + +After creation of the supernet with the specification of search space and hyper-parameters, we can run below command to start searching and training of the supernet: + +.. code-block:: bash + + python train.py --dev_id "0,1" --snapshot "./ckpt_save" --data_root "./data/106points" + +The validation accuracy will be shown during training, and the model with best accuracy will be saved as ``./ckpt_save/supernet/checkpoint_best.pth``. + + +2. Finetune +^^^^^^^^^^^^ + +After pre-training of the supernet, we can run below command to sample the subnet and conduct the finetuning: + +.. code-block:: bash + + python retrain.py --dev_id "0,1" --snapshot "./ckpt_save" --data_root "./data/106points" \ + --supernet "./ckpt_save/supernet/checkpoint_best.pth" + +The validation accuracy will be shown during training, and the model with best accuracy will be saved as ``./ckpt_save/subnet/checkpoint_best.pth``. + + +3. Export +^^^^^^^^^^ + +After the finetuning of subnet, we can run below command to export the ONNX model: + +.. code-block:: bash + + python export.py --supernet "./ckpt_save/supernet/checkpoint_best.pth" \ + --resume "./ckpt_save/subnet/checkpoint_best.pth" + +ONNX model is saved as ``./output/subnet.onnx``, which can be further converted to the mobile inference engine by using `MNN `__ . + +The checkpoints of pre-trained supernet and subnet are offered as below: + +* `Supernet `__ +* `Subnet `__ +* `ONNX model `__ \ No newline at end of file diff --git a/docs/en_US/NAS/HardwareAwareNAS.rst b/docs/en_US/NAS/HardwareAwareNAS.rst new file mode 100644 index 0000000000000000000000000000000000000000..afcadb8c6cd0006db31262f0668bb2b4eb0b1068 --- /dev/null +++ b/docs/en_US/NAS/HardwareAwareNAS.rst @@ -0,0 +1,79 @@ +Hardware-aware NAS +================== + +.. contents:: + +End-to-end Multi-trial SPOS Demo +-------------------------------- + +To empower affordable DNN on the edge and mobile devices, hardware-aware NAS searches both high accuracy and low latency models. In particular, the search algorithm only considers the models within the target latency constraints during the search process. + +To run this demo, first install nn-Meter by running: + +.. code-block:: bash + + pip install nn-meter + +Then run multi-trail SPOS demo: + +.. code-block:: bash + + python ${NNI_ROOT}/examples/nas/oneshot/spos/multi_trial.py + + +How the demo works +^^^^^^^^^^^^^^^^^^ + +To support hardware-aware NAS, you first need a ``Strategy`` that supports filtering the models by latency. We provide such a filter named ``LatencyFilter`` in NNI and initialize a ``Random`` strategy with the filter: + +.. code-block:: python + + simple_strategy = strategy.Random(model_filter=LatencyFilter(threshold=100, predictor=base_predictor)) + +``LatencyFilter`` will predict the models\' latency by using nn-Meter and filter out the models whose latency are larger than the threshold (i.e., ``100`` in this example). +You can also build your own strategies and filters to support more flexible NAS such as sorting the models according to latency. + +Then, pass this strategy to ``RetiariiExperiment``: + +.. code-block:: python + + exp = RetiariiExperiment(base_model, trainer, strategy=simple_strategy) + + exp_config = RetiariiExeConfig('local') + ... + exp_config.dummy_input = [1, 3, 32, 32] + + exp.run(exp_config, port) + +In ``exp_config``, ``dummy_input`` is required for tracing shape info. + + +End-to-end ProxylessNAS with Latency Constraints +------------------------------------------------ + +`ProxylessNAS `__ is a hardware-aware one-shot NAS algorithm. ProxylessNAS applies the expected latency of the model to build a differentiable metric and design efficient neural network architectures for hardware. The latency loss is added as a regularization term for architecture parameter optimization. In this example, nn-Meter provides a latency estimator to predict expected latency for the mixed operation on other types of mobile and edge hardware. + +To run the one-shot ProxylessNAS demo, first install nn-Meter by running: + +.. code-block:: bash + + pip install nn-meter + +Then run one-shot ProxylessNAS demo: + +```bash +python ${NNI_ROOT}/examples/nas/oneshot/proxylessnas/main.py --applied_hardware --reference_latency +``` + +How the demo works +^^^^^^^^^^^^^^^^^^ + +In the implementation of ProxylessNAS ``trainer``, we provide a ``HardwareLatencyEstimator`` which currently builds a lookup table, that stores the measured latency of each candidate building block in the search space. The latency sum of all building blocks in a candidate model will be treated as the model inference latency. The latency prediction is obtained by ``nn-Meter``. ``HardwareLatencyEstimator`` predicts expected latency for the mixed operation based on the path weight of `ProxylessLayerChoice`. With leveraging ``nn-Meter`` in NNI, users can apply ProxylessNAS to search efficient DNN models on more types of edge devices. + +Despite of ``applied_hardware`` and ``reference_latency``, There are some other parameters related to hardware-aware ProxylessNAS training in this :githublink:`example `: + +* ``grad_reg_loss_type``: Regularization type to add hardware related loss. Allowed types include ``"mul#log"`` and ``"add#linear"``. Type of ``mul#log`` is calculate by ``(torch.log(expected_latency) / math.log(reference_latency)) ** beta``. Type of ``"add#linear"`` is calculate by ``reg_lambda * (expected_latency - reference_latency) / reference_latency``. +* ``grad_reg_loss_lambda``: Regularization params, is set to ``0.1`` by default. +* ``grad_reg_loss_alpha``: Regularization params, is set to ``0.2`` by default. +* ``grad_reg_loss_beta``: Regularization params, is set to ``0.3`` by default. +* ``dummy_input``: The dummy input shape when applied to the target hardware. This parameter is set as (1, 3, 224, 224) by default. diff --git a/docs/en_US/NAS/Hypermodules.rst b/docs/en_US/NAS/Hypermodules.rst new file mode 100644 index 0000000000000000000000000000000000000000..e87bf347255a48d22fdac7cd4b69abbd25377382 --- /dev/null +++ b/docs/en_US/NAS/Hypermodules.rst @@ -0,0 +1,9 @@ +Hypermodules +============ + +Hypermodule is a (PyTorch) module which contains many architecture/hyperparameter candidates for this module. By using hypermodule in user defined model, NNI will help users automatically find the best architecture/hyperparameter of the hypermodules for this model. This follows the design philosophy of Retiarii that users write DNN model as a space. + +There has been proposed some hypermodules in NAS community, such as AutoActivation, AutoDropout. Some of them are implemented in the Retiarii framework. + +.. autoclass:: nni.retiarii.nn.pytorch.AutoActivation + :members: \ No newline at end of file diff --git a/docs/en_US/NAS/ModelEvaluators.rst b/docs/en_US/NAS/ModelEvaluators.rst new file mode 100644 index 0000000000000000000000000000000000000000..670014d9711af6c18cdc071cfd3122c1d13c8d8d --- /dev/null +++ b/docs/en_US/NAS/ModelEvaluators.rst @@ -0,0 +1,147 @@ +Model Evaluators +================ + +A model evaluator is for training and validating each generated model. They are necessary to evaluate the performance of new explored models. + +Customize Evaluator with Any Function +------------------------------------- + +The simplest way to customize a new evaluator is with functional APIs, which is very easy when training code is already available. Users only need to write a fit function that wraps everything, which usually includes training, validating and testing of a single model. This function takes one positional arguments (``model_cls``) and possible keyword arguments. The keyword arguments (other than ``model_cls``) are fed to FunctionEvaluator as its initialization parameters (note that they will be `serialized <./Serialization.rst>`__). In this way, users get everything under their control, but expose less information to the framework and as a result, further optimizations like `CGO <./ExecutionEngines.rst#cgo-execution-engine-experimental>`__ might be not feasible. An example is as belows: + +.. code-block:: python + + from nni.retiarii.evaluator import FunctionalEvaluator + from nni.retiarii.experiment.pytorch import RetiariiExperiment + + def fit(model_cls, dataloader): + model = model_cls() + train(model, dataloader) + acc = test(model, dataloader) + nni.report_final_result(acc) + + # The dataloader will be serialized, thus ``nni.trace`` is needed here. + # See serialization tutorial for more details. + evaluator = FunctionalEvaluator(fit, dataloader=nni.trace(DataLoader)(foo, bar)) + experiment = RetiariiExperiment(base_model, evaluator, mutators, strategy) + +.. tip:: + + When using customized evaluators, if you want to visualize models, you need to export your model and save it into ``$NNI_OUTPUT_DIR/model.onnx`` in your evaluator. An example here: + + .. code-block:: python + + def fit(model_cls): + model = model_cls() + onnx_path = Path(os.environ.get('NNI_OUTPUT_DIR', '.')) / 'model.onnx' + onnx_path.parent.mkdir(exist_ok=True) + dummy_input = torch.randn(10, 3, 224, 224) + torch.onnx.export(model, dummy_input, onnx_path) + # the rest of training code here + + If the conversion is successful, the model will be able to be visualized with powerful tools `Netron `__. + +Evaluators with PyTorch-Lightning +--------------------------------- + +Use Built-in Evaluators +^^^^^^^^^^^^^^^^^^^^^^^ + +NNI provides some commonly used model evaluators for users' convenience. These evaluators are built upon the awesome library PyTorch-Lightning. + +We recommend to read the `serialization tutorial <./Serialization.rst>`__ before using these evaluators. A few notes to summarize the tutorial: + +1. ``pl.DataLoader`` should be used in place of ``torch.utils.data.DataLoader``. +2. The datasets used in data-loader should be decorated with ``nni.trace`` recursively. + +For example, + +.. code-block:: python + + import nni.retiarii.evaluator.pytorch.lightning as pl + from torchvision import transforms + + transform = nni.trace(transforms.Compose, [nni.trace(transforms.ToTensor()), nni.trace(transforms.Normalize, (0.1307,), (0.3081,))]) + train_dataset = nni.trace(MNIST, root='data/mnist', train=True, download=True, transform=transform) + test_dataset = nni.trace(MNIST, root='data/mnist', train=False, download=True, transform=transform) + + # pl.DataLoader and pl.Classification is already traced and supports serialization. + evaluator = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=10) + +.. autoclass:: nni.retiarii.evaluator.pytorch.lightning.Classification + :noindex: + +.. autoclass:: nni.retiarii.evaluator.pytorch.lightning.Regression + :noindex: + +Customize Evaluator with PyTorch-Lightning +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Another approach is to write training code in PyTorch-Lightning style, that is, to write a LightningModule that defines all elements needed for training (e.g., loss function, optimizer) and to define a trainer that takes (optional) dataloaders to execute the training. Before that, please read the `document of PyTorch-lightning `__ to learn the basic concepts and components provided by PyTorch-lightning. + +In practice, writing a new training module in Retiarii should inherit ``nni.retiarii.evaluator.pytorch.lightning.LightningModule``, which has a ``set_model`` that will be called after ``__init__`` to save the candidate model (generated by strategy) as ``self.model``. The rest of the process (like ``training_step``) should be the same as writing any other lightning module. Evaluators should also communicate with strategies via two API calls (``nni.report_intermediate_result`` for periodical metrics and ``nni.report_final_result`` for final metrics), added in ``on_validation_epoch_end`` and ``teardown`` respectively. + +An example is as follows: + +.. code-block:: python + + from nni.retiarii.evaluator.pytorch.lightning import LightningModule # please import this one + + @nni.trace + class AutoEncoder(LightningModule): + def __init__(self): + super().__init__() + self.decoder = nn.Sequential( + nn.Linear(3, 64), + nn.ReLU(), + nn.Linear(64, 28*28) + ) + + def forward(self, x): + embedding = self.model(x) # let's search for encoder + return embedding + + def training_step(self, batch, batch_idx): + # training_step defined the train loop. + # It is independent of forward + x, y = batch + x = x.view(x.size(0), -1) + z = self.model(x) # model is the one that is searched for + x_hat = self.decoder(z) + loss = F.mse_loss(x_hat, x) + # Logging to TensorBoard by default + self.log('train_loss', loss) + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + x = x.view(x.size(0), -1) + z = self.model(x) + x_hat = self.decoder(z) + loss = F.mse_loss(x_hat, x) + self.log('val_loss', loss) + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) + return optimizer + + def on_validation_epoch_end(self): + nni.report_intermediate_result(self.trainer.callback_metrics['val_loss'].item()) + + def teardown(self, stage): + if stage == 'fit': + nni.report_final_result(self.trainer.callback_metrics['val_loss'].item()) + +Then, users need to wrap everything (including LightningModule, trainer and dataloaders) into a ``Lightning`` object, and pass this object into a Retiarii experiment. + +.. code-block:: python + + import nni.retiarii.evaluator.pytorch.lightning as pl + from nni.retiarii.experiment.pytorch import RetiariiExperiment + + lightning = pl.Lightning(AutoEncoder(), + pl.Trainer(max_epochs=10), + train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100)) + experiment = RetiariiExperiment(base_model, lightning, mutators, strategy) diff --git a/docs/en_US/NAS/MutationPrimitives.rst b/docs/en_US/NAS/MutationPrimitives.rst new file mode 100644 index 0000000000000000000000000000000000000000..f04315a251566af77156a42e4dbec6b0a855130a --- /dev/null +++ b/docs/en_US/NAS/MutationPrimitives.rst @@ -0,0 +1,51 @@ +Mutation Primitives +=================== + +To make users easily express a model space within their PyTorch/TensorFlow model, NNI provides some inline mutation APIs as shown below. + +* `nn.LayerChoice <./ApiReference.rst#nni.retiarii.nn.pytorch.LayerChoice>`__. It allows users to put several candidate operations (e.g., PyTorch modules), one of them is chosen in each explored model. + + .. code-block:: python + + # import nni.retiarii.nn.pytorch as nn + # declared in `__init__` method + self.layer = nn.LayerChoice([ + ops.PoolBN('max', channels, 3, stride, 1), + ops.SepConv(channels, channels, 3, stride, 1), + nn.Identity() + ]) + # invoked in `forward` method + out = self.layer(x) + +* `nn.InputChoice <./ApiReference.rst#nni.retiarii.nn.pytorch.InputChoice>`__. It is mainly for choosing (or trying) different connections. It takes several tensors and chooses ``n_chosen`` tensors from them. + + .. code-block:: python + + # import nni.retiarii.nn.pytorch as nn + # declared in `__init__` method + self.input_switch = nn.InputChoice(n_chosen=1) + # invoked in `forward` method, choose one from the three + out = self.input_switch([tensor1, tensor2, tensor3]) + +* `nn.ValueChoice <./ApiReference.rst#nni.retiarii.nn.pytorch.ValueChoice>`__. It is for choosing one value from some candidate values. It can only be used as input argument of basic units, that is, modules in ``nni.retiarii.nn.pytorch`` and user-defined modules decorated with ``@basic_unit``. + + .. code-block:: python + + # import nni.retiarii.nn.pytorch as nn + # used in `__init__` method + self.conv = nn.Conv2d(XX, XX, kernel_size=nn.ValueChoice([1, 3, 5]) + self.op = MyOp(nn.ValueChoice([0, 1]), nn.ValueChoice([-1, 1])) + +* `nn.Repeat <./ApiReference.rst#nni.retiarii.nn.pytorch.Repeat>`__. Repeat a block by a variable number of times. + +* `nn.Cell <./ApiReference.rst#nni.retiarii.nn.pytorch.Cell>`__. `This cell structure is popularly used in NAS literature `__. Specifically, the cell consists of multiple "nodes". Each node is a sum of multiple operators. Each operator is chosen from user specified candidates, and takes one input from previous nodes and predecessors. Predecessor means the input of cell. The output of cell is the concatenation of some of the nodes in the cell (currently all the nodes). + + +All the APIs have an optional argument called ``label``, mutations with the same label will share the same choice. A typical example is, + + .. code-block:: python + + self.net = nn.Sequential( + nn.Linear(10, nn.ValueChoice([32, 64, 128], label='hidden_dim'), + nn.Linear(nn.ValueChoice([32, 64, 128], label='hidden_dim'), 3) + ) diff --git a/docs/en_US/NAS/Mutators.rst b/docs/en_US/NAS/Mutators.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e02f89d467025256491c0471076638205e7ad9b --- /dev/null +++ b/docs/en_US/NAS/Mutators.rst @@ -0,0 +1,64 @@ +Express Mutations with Mutators +=============================== + +Besides the inline mutation APIs demonstrated `here <./MutationPrimitives.rst>`__, NNI provides a more general approach to express a model space, i.e., *Mutator*, to cover more complex model spaces. Those inline mutation APIs are also implemented with mutator in the underlying system, which can be seen as a special case of model mutation. + +.. note:: Mutator and inline mutation APIs cannot be used together. + +A mutator is a piece of logic to express how to mutate a given model. Users are free to write their own mutators. Then a model space is expressed with a base model and a list of mutators. A model in the model space is sampled by applying the mutators on the base model one after another. An example is shown below. + +.. code-block:: python + + applied_mutators = [] + applied_mutators.append(BlockMutator('mutable_0')) + applied_mutators.append(BlockMutator('mutable_1')) + +``BlockMutator`` is defined by users to express how to mutate the base model. + +Write a mutator +--------------- + +User-defined mutator should inherit ``Mutator`` class, and implement mutation logic in the member function ``mutate``. + +.. code-block:: python + + from nni.retiarii import Mutator + class BlockMutator(Mutator): + def __init__(self, target: str, candidates: List): + super(BlockMutator, self).__init__() + self.target = target + self.candidate_op_list = candidates + + def mutate(self, model): + nodes = model.get_nodes_by_label(self.target) + for node in nodes: + chosen_op = self.choice(self.candidate_op_list) + node.update_operation(chosen_op.type, chosen_op.params) + +The input of ``mutate`` is graph IR (Intermediate Representation) of the base model (please refer to `here <./ApiReference.rst>`__ for the format and APIs of the IR), users can mutate the graph using the graph's member functions (e.g., ``get_nodes_by_label``, ``update_operation``). The mutation operations can be combined with the API ``self.choice``, in order to express a set of possible mutations. In the above example, the node's operation can be changed to any operation from ``candidate_op_list``. + +Use placehoder to make mutation easier: ``nn.Placeholder``. If you want to mutate a subgraph or node of your model, you can define a placeholder in this model to represent the subgraph or node. Then, use mutator to mutate this placeholder to make it real modules. + +.. code-block:: python + + ph = nn.Placeholder( + label='mutable_0', + kernel_size_options=[1, 3, 5], + n_layer_options=[1, 2, 3, 4], + exp_ratio=exp_ratio, + stride=stride + ) + +``label`` is used by mutator to identify this placeholder. The other parameters are the information that is required by mutator. They can be accessed from ``node.operation.parameters`` as a dict, it could include any information that users want to put to pass it to user defined mutator. The complete example code can be found in :githublink:`Mnasnet base model `. + +Starting an experiment is almost the same as using inline mutation APIs. The only difference is that the applied mutators should be passed to ``RetiariiExperiment``. Below is a simple example. + +.. code-block:: python + + exp = RetiariiExperiment(base_model, trainer, applied_mutators, simple_strategy) + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'mnasnet_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 10 + exp_config.training_service.use_active_gpu = False + exp.run(exp_config, 8081) diff --git a/docs/en_US/NAS/OneshotTrainer.rst b/docs/en_US/NAS/OneshotTrainer.rst new file mode 100644 index 0000000000000000000000000000000000000000..e276a48125097af5f6aa1f6ffc865598ae53c046 --- /dev/null +++ b/docs/en_US/NAS/OneshotTrainer.rst @@ -0,0 +1,43 @@ +One-shot NAS +============ + +Before reading this tutorial, we highly recommend you to first go through the tutorial of how to `define a model space <./QuickStart.rst#define-your-model-space>`__. + +Model Search with One-shot Trainer +---------------------------------- + +With a defined model space, users can explore the space in two ways. One is using strategy and single-arch evaluator as demonstrated `here <./QuickStart.rst#explore-the-defined-model-space>`__. The other is using one-shot trainer, which consumes much less computational resource compared to the first one. In this tutorial we focus on this one-shot approach. The principle of one-shot approach is combining all the models in a model space into one big model (usually called super-model or super-graph). It takes charge of both search, training and testing, by training and evaluating this big model. + +We list the supported one-shot trainers here: + +* DARTS trainer +* ENAS trainer +* ProxylessNAS trainer +* Single-path (random) trainer + +See `API reference <./ApiReference.rst>`__ for detailed usages. Here, we show an example to use DARTS trainer manually. + +.. code-block:: python + + from nni.retiarii.oneshot.pytorch import DartsTrainer + trainer = DartsTrainer( + model=model, + loss=criterion, + metrics=lambda output, target: accuracy(output, target, topk=(1,)), + optimizer=optim, + num_epochs=args.epochs, + dataset=dataset_train, + batch_size=args.batch_size, + log_frequency=args.log_frequency, + unrolled=args.unrolled + ) + trainer.fit() + final_architecture = trainer.export() + +After the searching is done, we can use the exported architecture to instantiate the full network for retraining. Here is an example: + +.. code-block:: python + + from nni.retiarii import fixed_arch + with fixed_arch('/path/to/checkpoint.json'): + model = Model() diff --git a/docs/en_US/NAS/Overview.rst b/docs/en_US/NAS/Overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2e48c3a4e869b0f62955ecf4f1f5982afa1eff2 --- /dev/null +++ b/docs/en_US/NAS/Overview.rst @@ -0,0 +1,82 @@ +Retiarii for Neural Architecture Search +======================================= + +.. attention:: NNI's latest NAS supports are all based on Retiarii Framework, users who are still on `early version using NNI NAS v1.0 `__ shall migrate your work to Retiarii as soon as possible. + +.. contents:: + +Motivation +---------- + +Automatic neural architecture search is playing an increasingly important role in finding better models. Recent research has proven the feasibility of automatic NAS and has led to models that beat many manually designed and tuned models. Representative works include `NASNet `__\ , `ENAS `__\ , `DARTS `__\ , `Network Morphism `__\ , and `Evolution `__. In addition, new innovations continue to emerge. + +However, it is pretty hard to use existing NAS work to help develop common DNN models. Therefore, we designed `Retiarii `__, a novel NAS/HPO framework, and implemented it in NNI. It helps users easily construct a model space (or search space, tuning space), and utilize existing NAS algorithms. The framework also facilitates NAS innovation and is used to design new NAS algorithms. + +Overview +-------- + +There are three key characteristics of the Retiarii framework: + +* Simple APIs are provided for defining model search space within PyTorch/TensorFlow model. +* SOTA NAS algorithms are built-in to be used for exploring model search space. +* System-level optimizations are implemented for speeding up the exploration. + +There are two types of model space exploration approach: **Multi-trial NAS** and **One-shot NAS**. Mutli-trial NAS trains each sampled model in the model space independently, while One-shot NAS samples the model from a super model. After constructing the model space, users can use either exploration appraoch to explore the model space. + + +Multi-trial NAS +--------------- + +Multi-trial NAS means each sampled model from model space is trained independently. A typical multi-trial NAS is `NASNet `__. The algorithm to sample models from model space is called exploration strategy. NNI has supported the following exploration strategies for multi-trial NAS. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Exploration Strategy Name + - Brief Introduction of Algorithm + * - Random Strategy + - Randomly sampling new model(s) from user defined model space. (``nni.retiarii.strategy.Random``) + * - Grid Search + - Sampling new model(s) from user defined model space using grid search algorithm. (``nni.retiarii.strategy.GridSearch``) + * - Regularized Evolution + - Generating new model(s) from generated models using `regularized evolution algorithm `__ . (``nni.retiarii.strategy.RegularizedEvolution``) + * - TPE Strategy + - Sampling new model(s) from user defined model space using `TPE algorithm `__ . (``nni.retiarii.strategy.TPEStrategy``) + * - RL Strategy + - It uses `PPO algorithm `__ to sample new model(s) from user defined model space. (``nni.retiarii.strategy.PolicyBasedRL``) + + +Please refer to `here <./multi_trial_nas.rst>`__ for detailed usage of multi-trial NAS. + +One-shot NAS +------------ + +One-shot NAS means building model space into a super-model, training the super-model with weight sharing, and then sampling models from the super-model to find the best one. `DARTS `__ is a typical one-shot NAS. +Below is the supported one-shot NAS algorithms. More one-shot NAS will be supported soon. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - One-shot Algorithm Name + - Brief Introduction of Algorithm + * - `ENAS `__ + - `Efficient Neural Architecture Search via Parameter Sharing `__. In ENAS, a controller learns to discover neural network architectures by searching for an optimal subgraph within a large computational graph. It uses parameter sharing between child models to achieve fast speed and excellent performance. + * - `DARTS `__ + - `DARTS: Differentiable Architecture Search `__ introduces a novel algorithm for differentiable network architecture search on bilevel optimization. + * - `SPOS `__ + - `Single Path One-Shot Neural Architecture Search with Uniform Sampling `__ constructs a simplified supernet trained with a uniform path sampling method and applies an evolutionary algorithm to efficiently search for the best-performing architectures. + * - `ProxylessNAS `__ + - `ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware `__. It removes proxy, directly learns the architectures for large-scale target tasks and target hardware platforms. + +Please refer to `here `__ for detailed usage of one-shot NAS algorithms. + +Reference and Feedback +---------------------- + +* `Quick Start <./QuickStart.rst>`__ ; +* `Construct Your Model Space <./construct_space.rst>`__ ; +* `Retiarii: A Deep Learning Exploratory-Training Framework `__ ; +* To `report a bug `__ for this feature in GitHub ; +* To `file a feature or improvement request `__ for this feature in GitHub . diff --git a/docs/en_US/NAS/Proxylessnas.rst b/docs/en_US/NAS/Proxylessnas.rst new file mode 100644 index 0000000000000000000000000000000000000000..3700d3192c0ad347c8d4024897cac2d4d400541d --- /dev/null +++ b/docs/en_US/NAS/Proxylessnas.rst @@ -0,0 +1,74 @@ +ProxylessNAS on NNI +=================== + +Introduction +------------ + +The paper `ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware `__ removes proxy, it directly learns the architectures for large-scale target tasks and target hardware platforms. They address high memory consumption issue of differentiable NAS and reduce the computational cost to the same level of regular training while still allowing a large candidate set. Please refer to the paper for the details. + +Usage +----- + +To use ProxylessNAS training/searching approach, users need to specify search space in their model using `NNI NAS interface <./MutationPrimitives.rst>`__\ , e.g., ``LayerChoice``\ , ``InputChoice``. After defining and instantiating the model, the following work can be leaved to ProxylessNasTrainer by instantiating the trainer and passing the model to it. + +.. code-block:: python + + trainer = ProxylessTrainer(model, + loss=LabelSmoothingLoss(), + dataset=None, + optimizer=optimizer, + metrics=lambda output, target: accuracy(output, target, topk=(1, 5,)), + num_epochs=120, + log_frequency=10, + grad_reg_loss_type=args.grad_reg_loss_type, + grad_reg_loss_params=grad_reg_loss_params, + applied_hardware=args.applied_hardware, dummy_input=(1, 3, 224, 224), + ref_latency=args.reference_latency) + trainer.train() + trainer.export(args.arch_path) + +The complete example code can be found :githublink:`here `. + +**Input arguments of ProxylessNasTrainer** + + +* **model** (*PyTorch model, required*\ ) - The model that users want to tune/search. It has mutables to specify search space. +* **metrics** (*PyTorch module, required*\ ) - The main term of the loss function for model train. Receives logits and ground truth label, return a loss tensor. +* **optimizer** (*PyTorch Optimizer, required*\) - The optimizer used for optimizing the model. +* **num_epochs** (*int, optional, default = 120*\ ) - The number of epochs to train/search. +* **dataset** (*PyTorch dataset, required*\ ) - Dataset for training. Will be split for training weights and architecture weights. +* **warmup_epochs** (*int, optional, default = 0*\ ) - The number of epochs to do during warmup. +* **batch_size** (*int, optional, default = 64*\ ) - Batch size. +* **workers** (*int, optional, default = 4*\ ) - Workers for data loading. +* **device** (*device, optional, default = 'cpu'*\ ) - The devices that users provide to do the train/search. The trainer applies data parallel on the model for users. +* **log_frequency** (*int, optional, default = None*\ ) - Step count per logging. +* **arc_learning_rate** (*float, optional, default = 1e-3*\ ) - The learning rate of the architecture parameters optimizer. +* **grad_reg_loss_type** (*'mul#log', 'add#linear', or None, optional, default = 'add#linear'*\ ) - Regularization type to add hardware related loss. The trainer will not apply loss regularization when grad_reg_loss_type is set as None. +* **grad_reg_loss_params** (*dict, optional, default = None*\ ) - Regularization params. 'alpha' and 'beta' is required when ``grad_reg_loss_type`` is 'mul#log', 'lambda' is required when ``grad_reg_loss_type`` is 'add#linear'. +* **applied_hardware** (*string, optional, default = None*\ ) - Applied hardware for to constraint the model's latency. Latency is predicted by Microsoft nn-Meter (https://github.com/microsoft/nn-Meter). +* **dummy_input** (*tuple, optional, default = (1, 3, 224, 224)*\ ) - The dummy input shape when applied to the target hardware. +* **ref_latency** (*float, optional, default = 65.0*\ ) - Reference latency value in the applied hardware (ms). + + +Implementation +-------------- + +The implementation on NNI is based on the `offical implementation `__. The official implementation supports two training approaches: gradient descent and RL based. In our current implementation on NNI, gradient descent training approach is supported. The complete support of ProxylessNAS is ongoing. + +The official implementation supports different targeted hardware, including 'mobile', 'cpu', 'gpu8', 'flops'. In NNI repo, the hardware latency prediction is supported by `Microsoft nn-Meter `__. nn-Meter is an accurate inference latency predictor for DNN models on diverse edge devices. nn-Meter support four hardwares up to now, including *'cortexA76cpu_tflite21'*, *'adreno640gpu_tflite21'*, *'adreno630gpu_tflite21'*, and *'myriadvpu_openvino2019r2'*. Users can find more information about nn-Meter on its website. More hardware will be supported in the future. Users could find more details about applying ``nn-Meter`` `here <./HardwareAwareNAS.rst>`__ . + +Below we will describe implementation details. Like other one-shot NAS algorithms on NNI, ProxylessNAS is composed of two parts: *search space* and *training approach*. For users to flexibly define their own search space and use built-in ProxylessNAS training approach, we put the specified search space in :githublink:`example code ` using :githublink:`NNI NAS interface `. + +.. image:: ../../img/proxylessnas.png + :target: ../../img/proxylessnas.png + :alt: + + +ProxylessNAS training approach is composed of ProxylessLayerChoice and ProxylessNasTrainer. ProxylessLayerChoice instantiates MixedOp for each mutable (i.e., LayerChoice), and manage architecture weights in MixedOp. **For DataParallel**\ , architecture weights should be included in user model. Specifically, in ProxylessNAS implementation, we add MixedOp to the corresponding mutable (i.e., LayerChoice) as a member variable. The ProxylessLayerChoice class also exposes two member functions, i.e., ``resample``\ , ``finalize_grad``\ , for the trainer to control the training of architecture weights. + +ProxylessNasMutator also implements the forward logic of the mutables (i.e., LayerChoice). + +Reproduce Results +----------------- + +To reproduce the result, we first run the search, we found that though it runs many epochs the chosen architecture converges at the first several epochs. This is probably induced by hyper-parameters or the implementation, we are working on it. \ No newline at end of file diff --git a/docs/en_US/NAS/QuickStart.rst b/docs/en_US/NAS/QuickStart.rst new file mode 100644 index 0000000000000000000000000000000000000000..01df0cc0e5d9d9eb1809ec4eb2d11bc3e7b44fe8 --- /dev/null +++ b/docs/en_US/NAS/QuickStart.rst @@ -0,0 +1,203 @@ +Quick Start of Retiarii on NNI +============================== + + +.. contents:: + +In this quick start, we use multi-trial NAS as an example to show how to construct and explore a model space. There are mainly three crucial components for a neural architecture search task, namely, + +* Model search space that defines a set of models to explore. +* A proper strategy as the method to explore this model space. +* A model evaluator that reports the performance of every model in the space. + +The tutorial for One-shot NAS can be found `here <./OneshotTrainer.rst>`__. + +Currently, PyTorch is the only supported framework by Retiarii, and we have only tested **PyTorch 1.7 to 1.10**. This documentation assumes PyTorch context but it should also apply to other frameworks, which is in our future plan. + +Define your Model Space +----------------------- + +Model space is defined by users to express a set of models that users want to explore, which contains potentially good-performing models. In this framework, a model space is defined with two parts: a base model and possible mutations on the base model. + +Define Base Model +^^^^^^^^^^^^^^^^^ + +Defining a base model is almost the same as defining a PyTorch (or TensorFlow) model. Usually, you only need to replace the code ``import torch.nn as nn`` with ``import nni.retiarii.nn.pytorch as nn`` to use our wrapped PyTorch modules. + +Below is a very simple example of defining a base model. + +.. code-block:: python + + import torch + import torch.nn.functional as F + import nni.retiarii.nn.pytorch as nn + from nni.retiarii import model_wrapper + + @model_wrapper # this decorator should be put on the out most + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + self.conv2 = nn.Conv2d(32, 64, 3, 1) + self.dropout1 = nn.Dropout(0.25) + self.dropout2 = nn.Dropout(0.5) + self.fc1 = nn.Linear(9216, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(self.conv2(x), 2) + x = torch.flatten(self.dropout1(x), 1) + x = self.fc2(self.dropout2(F.relu(self.fc1(x)))) + output = F.log_softmax(x, dim=1) + return output + +.. tip:: Always keep in mind that you should use ``import nni.retiarii.nn.pytorch as nn`` and :meth:`nni.retiarii.model_wrapper`. Many mistakes are a result of forgetting one of those. Also, please use ``torch.nn`` for submodules of ``nn.init``, e.g., ``torch.nn.init`` instead of ``nn.init``. + +Define Model Mutations +^^^^^^^^^^^^^^^^^^^^^^ + +A base model is only one concrete model not a model space. We provide `APIs and primitives <./MutationPrimitives.rst>`__ for users to express how the base model can be mutated. That is, to build a model space which includes many models. + +Based on the above base model, we can define a model space as below. + +.. code-block:: diff + + import torch + import torch.nn.functional as F + import nni.retiarii.nn.pytorch as nn + from nni.retiarii import model_wrapper + + @model_wrapper + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + - self.conv2 = nn.Conv2d(32, 64, 3, 1) + + self.conv2 = nn.LayerChoice([ + + nn.Conv2d(32, 64, 3, 1), + + DepthwiseSeparableConv(32, 64) + + ]) + - self.dropout1 = nn.Dropout(0.25) + + self.dropout1 = nn.Dropout(nn.ValueChoice([0.25, 0.5, 0.75])) + self.dropout2 = nn.Dropout(0.5) + - self.fc1 = nn.Linear(9216, 128) + - self.fc2 = nn.Linear(128, 10) + + feature = nn.ValueChoice([64, 128, 256]) + + self.fc1 = nn.Linear(9216, feature) + + self.fc2 = nn.Linear(feature, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(self.conv2(x), 2) + x = torch.flatten(self.dropout1(x), 1) + x = self.fc2(self.dropout2(F.relu(self.fc1(x)))) + output = F.log_softmax(x, dim=1) + return output + +This example uses two mutation APIs, ``nn.LayerChoice`` and ``nn.ValueChoice``. ``nn.LayerChoice`` takes a list of candidate modules (two in this example), one will be chosen for each sampled model. It can be used like normal PyTorch module. ``nn.ValueChoice`` takes a list of candidate values, one will be chosen to take effect for each sampled model. + +More detailed API description and usage can be found `here <./construct_space.rst>`__ . + +.. note:: We are actively enriching the mutation APIs, to facilitate easy construction of model space. If the currently supported mutation APIs cannot express your model space, please refer to `this doc <./Mutators.rst>`__ for customizing mutators. + +Explore the Defined Model Space +------------------------------- + +There are basically two exploration approaches: (1) search by evaluating each sampled model independently, which is the search approach in multi-trial NAS and (2) one-shot weight-sharing based search, which is used in one-shot NAS. We demonstrate the first approach in this tutorial. Users can refer to `here <./OneshotTrainer.rst>`__ for the second approach. + +First, users need to pick a proper exploration strategy to explore the defined model space. Second, users need to pick or customize a model evaluator to evaluate the performance of each explored model. + +Pick an exploration strategy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Retiarii supports many `exploration strategies <./ExplorationStrategies.rst>`__. + +Simply choosing (i.e., instantiate) an exploration strategy as below. + +.. code-block:: python + + import nni.retiarii.strategy as strategy + + search_strategy = strategy.Random(dedup=True) # dedup=False if deduplication is not wanted + +Pick or customize a model evaluator +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the exploration process, the exploration strategy repeatedly generates new models. A model evaluator is for training and validating each generated model to obtain the model's performance. The performance is sent to the exploration strategy for the strategy to generate better models. + +Retiarii has provided `built-in model evaluators <./ModelEvaluators.rst>`__, but to start with, it is recommended to use ``FunctionalEvaluator``, that is, to wrap your own training and evaluation code with one single function. This function should receive one single model class and uses ``nni.report_final_result`` to report the final score of this model. + +An example here creates a simple evaluator that runs on MNIST dataset, trains for 2 epochs, and reports its validation accuracy. + +.. code-block:: python + + def evaluate_model(model_cls): + # "model_cls" is a class, need to instantiate + model = model_cls() + + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + transf = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = DataLoader(MNIST('data/mnist', download=True, transform=transf), batch_size=64, shuffle=True) + test_loader = DataLoader(MNIST('data/mnist', download=True, train=False, transform=transf), batch_size=64) + + device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + + for epoch in range(3): + # train the model for one epoch + train_epoch(model, device, train_loader, optimizer, epoch) + # test the model for one epoch + accuracy = test_epoch(model, device, test_loader) + # call report intermediate result. Result can be float or dict + nni.report_intermediate_result(accuracy) + + # report final test result + nni.report_final_result(accuracy) + + # Create the evaluator + evaluator = nni.retiarii.evaluator.FunctionalEvaluator(evaluate_model) + +The ``train_epoch`` and ``test_epoch`` here can be any customized function, where users can write their own training recipe. See :githublink:`examples/nas/multi-trial/mnist/search.py` for the full example. + +It is recommended that the ``evaluate_model`` here accepts no additional arguments other than ``model_cls``. However, in the `advanced tutorial <./ModelEvaluators.rst>`__, we will show how to use additional arguments in case you actually need those. In future, we will support mutation on the arguments of evaluators, which is commonly called "Hyper-parmeter tuning". + +Launch an Experiment +-------------------- + +After all the above are prepared, it is time to start an experiment to do the model search. An example is shown below. + +.. code-block:: python + + exp = RetiariiExperiment(base_model, evaluator, [], search_strategy) + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'mnist_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 20 + exp_config.training_service.use_active_gpu = False + exp.run(exp_config, 8081) + +The complete code of this example can be found :githublink:`here `. Users can also run Retiarii Experiment with `different training services <../training_services.rst>`__ besides ``local`` training service. + +Visualize the Experiment +------------------------ + +Users can visualize their experiment in the same way as visualizing a normal hyper-parameter tuning experiment. For example, open ``localhost::8081`` in your browser, 8081 is the port that you set in ``exp.run``. Please refer to `here <../Tutorial/WebUI.rst>`__ for details. + +We support visualizing models with 3rd-party visualization engines (like `Netron `__). This can be used by clicking ``Visualization`` in detail panel for each trial. Note that current visualization is based on `onnx `__ , thus visualization is not feasible if the model cannot be exported into onnx. Built-in evaluators (e.g., Classification) will automatically export the model into a file. For your own evaluator, you need to save your file into ``$NNI_OUTPUT_DIR/model.onnx`` to make this work. + +Export Top Models +----------------- + +Users can export top models after the exploration is done using ``export_top_models``. + +.. code-block:: python + + for model_code in exp.export_top_models(formatter='dict'): + print(model_code) + +The output is `json` object which records the mutation actions of the top model. If users want to output source code of the top model, they can use graph-based execution engine for the experiment, by simply adding the following two lines. + +.. code-block:: python + + exp_config.execution_engine = 'base' + export_formatter = 'code' diff --git a/docs/en_US/NAS/SPOS.rst b/docs/en_US/NAS/SPOS.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3ba8853100935b94192ea6f232ada4f494966f4 --- /dev/null +++ b/docs/en_US/NAS/SPOS.rst @@ -0,0 +1,102 @@ +Single Path One-Shot (SPOS) +=========================== + +Introduction +------------ + +Proposed in `Single Path One-Shot Neural Architecture Search with Uniform Sampling `__ is a one-shot NAS method that addresses the difficulties in training One-Shot NAS models by constructing a simplified supernet trained with an uniform path sampling method, so that all underlying architectures (and their weights) get trained fully and equally. An evolutionary algorithm is then applied to efficiently search for the best-performing architectures without any fine tuning. + +Implementation on NNI is based on `official repo `__. We implement a trainer that trains the supernet and a evolution tuner that leverages the power of NNI framework that speeds up the evolutionary search phase. + +Examples +-------- + +Here is a use case, which is the search space in paper. However, we applied latency limit instead of flops limit to perform the architecture search phase. + +:githublink:`Example code ` + +Requirements +^^^^^^^^^^^^ + +Prepare ImageNet in the standard format (follow the script `here `__\ ). Linking it to ``data/imagenet`` will be more convenient. + +Download the checkpoint file from `here `__ (maintained by `Megvii `__\ ) if you don't want to retrain the supernet. +Put ``checkpoint-150000.pth.tar`` under ``data`` directory. + + +After preparation, it's expected to have the following code structure: + +.. code-block:: bash + + spos + ├── architecture_final.json + ├── blocks.py + ├── data + │ ├── imagenet + │ │ ├── train + │ │ └── val + │ └── checkpoint-150000.pth.tar + ├── network.py + ├── readme.md + ├── supernet.py + ├── evaluation.py + ├── search.py + └── utils.py + +Step 1. Train Supernet +^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + python supernet.py + +Will export the checkpoint to ``checkpoints`` directory, for the next step. + +NOTE: The data loading used in the official repo is `slightly different from usual `__\ , as they use BGR tensor and keep the values between 0 and 255 intentionally to align with their own DL framework. The option ``--spos-preprocessing`` will simulate the behavior used originally and enable you to use the checkpoints pretrained. + +Step 2. Evolution Search +^^^^^^^^^^^^^^^^^^^^^^^^ + +Single Path One-Shot leverages evolution algorithm to search for the best architecture. In the paper, the search module, which is responsible for testing the sampled architecture, recalculates all the batch norm for a subset of training images, and evaluates the architecture on the full validation set. + +In this example, we have an incomplete implementation of the evolution search. The example only support training from scratch. Inheriting weights from pretrained supernet is not supported yet. To search with the regularized evolution strategy, run + +.. code-block:: bash + + python search.py + +The final architecture exported from every epoch of evolution can be found in ``trials`` under the working directory of your tuner, which, by default, is ``$HOME/nni-experiments/your_experiment_id/trials``. + +Step 3. Train for Evaluation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + python evaluation.py + +By default, it will use ``architecture_final.json``. This architecture is provided by the official repo (converted into NNI format). You can use any architecture (e.g., the architecture found in step 2) with ``--fixed-arc`` option. + +Reference +--------- + +PyTorch +^^^^^^^ + +.. autoclass:: nni.retiarii.oneshot.pytorch.SinglePathTrainer + :noindex: + +Known Limitations +----------------- + + +* Block search only. Channel search is not supported yet. +* In the search phase, training from the scratch is required. Inheriting weights from supernet is not supported yet. + +Current Reproduction Results +---------------------------- + +Reproduction is still undergoing. Due to the gap between official release and original paper, we compare our current results with official repo (our run) and paper. + + +* Evolution phase is almost aligned with official repo. Our evolution algorithm shows a converging trend and reaches ~65% accuracy at the end of search. Nevertheless, this result is not on par with paper. For details, please refer to `this issue `__. +* Retrain phase is not aligned. Our retraining code, which uses the architecture released by the authors, reaches 72.14% accuracy, still having a gap towards 73.61% by official release and 74.3% reported in original paper. diff --git a/docs/en_US/NAS/Serialization.rst b/docs/en_US/NAS/Serialization.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbe69059ee7045cf5bf100e90eedf958a467fbdc --- /dev/null +++ b/docs/en_US/NAS/Serialization.rst @@ -0,0 +1,62 @@ +Serialization +============= + +In multi-trial NAS, a sampled model should be able to be executed on a remote machine or a training platform (e.g., AzureML, OpenPAI). "Serialization" enables re-instantiation of model evaluator in another process or machine, such that, both the model and its model evaluator should be correctly serialized. To make NNI correctly serialize model evaluator, users should apply ``nni.trace`` on some of their functions and objects. API references can be found in :func:`nni.trace`. + +Serialization is implemented as a combination of `json-tricks `_ and `cloudpickle `_. Essentially, it is json-tricks, that is a enhanced version of Python JSON, enabling handling of serialization of numpy arrays, date/times, decimal, fraction and etc. The difference lies in the handling of class instances. Json-tricks deals with class instances with ``__dict__`` and ``__class__``, which in most of our cases are not reliable (e.g., datasets, dataloaders). Rather, our serialization deals with class instances with two methods: + +1. If the class / factory that creates the object is decorated with ``nni.trace``, we can serialize the class / factory function, along with the parameters, such that the instance can be re-instantiated. +2. Otherwise, cloudpickle is used to serialize the object into a binary. + +The recommendation is, unless you are absolutely certain that there is no problem and extra burden to serialize the object into binary, always add ``nni.trace``. In most cases, it will be more clean and neat, and enables possibilities such as mutation of parameters (will be supported in future). + +.. warning:: + + **What will happen if I forget to "trace" my objects?** + + It is likely that the program can still run. NNI will try to serialize the untraced object into a binary. It might fail in complex cases. For example, when the object is too large. Even if it succeeds, the result might be a substantially large object. For example, if you forgot to add ``nni.trace`` on ``MNIST``, the MNIST dataset object wil be serialized into binary, which will be dozens of megabytes because the object has the whole 60k images stored inside. You might see warnings and even errors when running experiments. To avoid such issues, the easiest way is to always remember to add ``nni.trace`` to non-primitive objects. + +.. note:: In Retiarii, serializer will throw exception when one of an single object in the recursive serialization is larger than 64 KB when binary serialized. This indicates that such object needs to be wrapped by ``nni.trace``. In rare cases, if you insist on pickling large data, the limit can be overridden by setting an environment variable ``PICKLE_SIZE_LIMIT``, whose unit is byte. Please note that even if the experiment might be able to run, this can still cause performance issues and even the crash of NNI experiment. + +To trace a function or class, users can use decorator like, + +.. code-block:: python + + @nni.trace + class MyClass: + ... + +Inline trace that traces instantly on the object instantiation or function invoke is also acceptable: ``nni.trace(MyClass)(parameters)``. + +Assuming a class ``cls`` is already traced, when it is serialized, its class type along with initialization parameters will be dumped. As the parameters are possibly class instances (if not primitive types like ``int`` and ``str``), their serialization will be a similar problem. We recommend decorate them with ``nni.trace`` as well. In other words, ``nni.trace`` should be applied recursively if necessary. + +Below is an example, ``transforms.Compose``, ``transforms.Normalize``, and ``MNIST`` are serialized manually using ``nni.trace``. ``nni.trace`` takes a class / function as its argument, and returns a wrapped class and function that has the same behavior with the original class / function. The usage of the wrapped class / function is also identical to the original one, except that the arguments are recorded. No need to apply ``nni.trace`` to ``pl.Classification`` and ``pl.DataLoader`` because they are already traced. + +.. code-block:: python + + import nni + import nni.retiarii.evaluator.pytorch.lightning as pl + from torchvision import transforms + + def create_mnist_dataset(root, transform): + return MNIST(root='data/mnist', train=False, download=True, transform=transform) + + transform = nni.trace(transforms.Compose)([nni.trace(transforms.ToTensor)(), nni.trace(transforms.Normalize)((0.1307,), (0.3081,))]) + + # If you write like following, the whole transform will be serialized into a pickle. + # This actually works fine, but we do NOT recommend such practice. + # transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + + train_dataset = nni.trace(MNIST)(root='data/mnist', train=True, download=True, transform=transform) + test_dataset = nni.trace(create_mnist_dataset)('data/mnist', transform=transform) # factory is also acceptable + evaluator = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=10) + +.. note:: + + **What's the relationship between model_wrapper, basic_unit and nni.trace?** + + They are fundamentally different. ``model_wrapper`` is used to wrap a base model (search space), ``basic_unit`` to annotate a module as primitive. ``nni.trace`` is to enable serialization of general objects. Though they share similar underlying implementations, but do keep in mind that you will experience errors if you mix them up. + + .. seealso:: Please refer to API reference of :meth:`nni.retiarii.model_wrapper`, :meth:`nni.retiarii.basic_unit`, and :meth:`nni.trace`. diff --git a/docs/en_US/NAS/WriteOneshot.rst b/docs/en_US/NAS/WriteOneshot.rst new file mode 100644 index 0000000000000000000000000000000000000000..19b546b3deb7c68066a942487e25f7239f41af01 --- /dev/null +++ b/docs/en_US/NAS/WriteOneshot.rst @@ -0,0 +1,56 @@ +Customize a New One-shot Trainer +================================ + +One-shot trainers should inherit ``nni.retiarii.oneshot.BaseOneShotTrainer``, and need to implement ``fit()`` (used to conduct the fitting and searching process) and ``export()`` method (used to return the searched best architecture). + +Writing a one-shot trainer is very different to single-arch evaluator. First of all, there are no more restrictions on init method arguments, any Python arguments are acceptable. Secondly, the model fed into one-shot trainers might be a model with Retiarii-specific modules, such as LayerChoice and InputChoice. Such model cannot directly forward-propagate and trainers need to decide how to handle those modules. + +A typical example is DartsTrainer, where learnable-parameters are used to combine multiple choices in LayerChoice. Retiarii provides ease-to-use utility functions for module-replace purposes, namely ``replace_layer_choice``, ``replace_input_choice``. A simplified example is as follows: + +.. code-block:: python + + from nni.retiarii.oneshot import BaseOneShotTrainer + from nni.retiarii.oneshot.pytorch import replace_layer_choice, replace_input_choice + + + class DartsLayerChoice(nn.Module): + def __init__(self, layer_choice): + super(DartsLayerChoice, self).__init__() + self.name = layer_choice.label + self.op_choices = nn.ModuleDict(layer_choice.named_children()) + self.alpha = nn.Parameter(torch.randn(len(self.op_choices)) * 1e-3) + + def forward(self, *args, **kwargs): + op_results = torch.stack([op(*args, **kwargs) for op in self.op_choices.values()]) + alpha_shape = [-1] + [1] * (len(op_results.size()) - 1) + return torch.sum(op_results * F.softmax(self.alpha, -1).view(*alpha_shape), 0) + + + class DartsTrainer(BaseOneShotTrainer): + + def __init__(self, model, loss, metrics, optimizer): + self.model = model + self.loss = loss + self.metrics = metrics + self.num_epochs = 10 + + self.nas_modules = [] + replace_layer_choice(self.model, DartsLayerChoice, self.nas_modules) + + ... # init dataloaders and optimizers + + def fit(self): + for i in range(self.num_epochs): + for (trn_X, trn_y), (val_X, val_y) in zip(self.train_loader, self.valid_loader): + self.train_architecture(val_X, val_y) + self.train_model_weight(trn_X, trn_y) + + @torch.no_grad() + def export(self): + result = dict() + for name, module in self.nas_modules: + if name not in result: + result[name] = select_best_of_module(module) + return result + +The full code of DartsTrainer is available to Retiarii source code. Please have a check at :githublink:`DartsTrainer `. diff --git a/docs/en_US/NAS/construct_space.rst b/docs/en_US/NAS/construct_space.rst new file mode 100644 index 0000000000000000000000000000000000000000..362bb446ea6efe88dc065c8c12ab1f20a9ffc775 --- /dev/null +++ b/docs/en_US/NAS/construct_space.rst @@ -0,0 +1,12 @@ +##################### +Construct Model Space +##################### + +NNI provides powerful APIs for users to easily express model space (or search space). First, users can use mutation primitives (e.g., ValueChoice, LayerChoice) to inline a space in their model. Second, NNI provides simple interface for users to customize new mutators for expressing more complicated model spaces. In most cases, the mutation primitives are enough to express users' model spaces. + +.. toctree:: + :maxdepth: 1 + + Mutation Primitives + Customize Mutators + Hypermodule Lib \ No newline at end of file diff --git a/docs/en_US/NAS/multi_trial_nas.rst b/docs/en_US/NAS/multi_trial_nas.rst new file mode 100644 index 0000000000000000000000000000000000000000..e95f446694eb7f689cc9bc3ec92db3cb56696fd3 --- /dev/null +++ b/docs/en_US/NAS/multi_trial_nas.rst @@ -0,0 +1,12 @@ +Multi-trial NAS +=============== + +In multi-trial NAS, users need model evaluator to evaluate the performance of each sampled model, and need an exploration strategy to sample models from a defined model space. Here, users could use NNI provided model evaluators or write their own model evalutor. They can simply choose a exploration strategy. Advanced users can also customize new exploration strategy. For a simple example about how to run a multi-trial NAS experiment, please refer to `Quick Start <./QuickStart.rst>`__. + +.. toctree:: + :maxdepth: 2 + + Model Evaluators + Exploration Strategies + Execution Engines + Serialization diff --git a/docs/en_US/NAS/one_shot_nas.rst b/docs/en_US/NAS/one_shot_nas.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d1082f7cbce1d252c0a226a31738c0d688afb45 --- /dev/null +++ b/docs/en_US/NAS/one_shot_nas.rst @@ -0,0 +1,16 @@ +One-shot NAS +============ + +One-shot NAS algorithms leverage weight sharing among models in neural architecture search space to train a supernet, and use this supernet to guide the selection of better models. This type of algorihtms greatly reduces computational resource compared to independently training each model from scratch (which we call "Multi-trial NAS"). NNI has supported many popular One-shot NAS algorithms as following. + + +.. toctree:: + :maxdepth: 1 + + Run One-shot NAS + ENAS + DARTS + SPOS + ProxylessNAS + FBNet + Customize One-shot NAS diff --git a/docs/en_US/Overview.rst b/docs/en_US/Overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4fe5d5dd829c9ec0471a6d19eb595df86ebc536 --- /dev/null +++ b/docs/en_US/Overview.rst @@ -0,0 +1,123 @@ +Overview +======== + +NNI (Neural Network Intelligence) is a toolkit to help users design and tune machine learning models (e.g., hyperparameters), neural network architectures, or complex system's parameters, in an efficient and automatic way. NNI has several appealing properties: ease-of-use, scalability, flexibility, and efficiency. + + +* **Ease-of-use**\ : NNI can be easily installed through python pip. Only several lines need to be added to your code in order to use NNI's power. You can use both the commandline tool and WebUI to work with your experiments. +* **Scalability**\ : Tuning hyperparameters or the neural architecture often demands a large number of computational resources, while NNI is designed to fully leverage different computation resources, such as remote machines, training platforms (e.g., OpenPAI, Kubernetes). Hundreds of trials could run in parallel by depending on the capacity of your configured training platforms. +* **Flexibility**\ : Besides rich built-in algorithms, NNI allows users to customize various hyperparameter tuning algorithms, neural architecture search algorithms, early stopping algorithms, etc. Users can also extend NNI with more training platforms, such as virtual machines, kubernetes service on the cloud. Moreover, NNI can connect to external environments to tune special applications/models on them. +* **Efficiency**\ : We are intensively working on more efficient model tuning on both the system and algorithm level. For example, we leverage early feedback to speedup the tuning procedure. + +The figure below shows high-level architecture of NNI. + + +.. raw:: html + +

+ drawing +

+ + +Key Concepts +------------ + + +* + *Experiment*\ : One task of, for example, finding out the best hyperparameters of a model, finding out the best neural network architecture, etc. It consists of trials and AutoML algorithms. + +* + *Search Space*\ : The feasible region for tuning the model. For example, the value range of each hyperparameter. + +* + *Configuration*\ : An instance from the search space, that is, each hyperparameter has a specific value. + +* + *Trial*\ : An individual attempt at applying a new configuration (e.g., a set of hyperparameter values, a specific neural architecture, etc.). Trial code should be able to run with the provided configuration. + +* + *Tuner*\ : An AutoML algorithm, which generates a new configuration for the next try. A new trial will run with this configuration. + +* + *Assessor*\ : Analyze a trial's intermediate results (e.g., periodically evaluated accuracy on test dataset) to tell whether this trial can be early stopped or not. + +* + *Training Platform*\ : Where trials are executed. Depending on your experiment's configuration, it could be your local machine, or remote servers, or large-scale training platform (e.g., OpenPAI, Kubernetes). + +Basically, an experiment runs as follows: Tuner receives search space and generates configurations. These configurations will be submitted to training platforms, such as the local machine, remote machines, or training clusters. Their performances are reported back to Tuner. Then, new configurations are generated and submitted. + +For each experiment, the user only needs to define a search space and update a few lines of code, and then leverage NNI built-in Tuner/Assessor and training platforms to search the best hyperparameters and/or neural architecture. There are basically 3 steps: + +.. + + Step 1: `Define search space `__ + + Step 2: `Update model codes `__ + + Step 3: `Define Experiment `__ + + + +.. raw:: html + +

+ drawing +

+ + +For more details about how to run an experiment, please refer to `Get Started `__. + +Core Features +------------- + +NNI provides a key capacity to run multiple instances in parallel to find the best combinations of parameters. This feature can be used in various domains, like finding the best hyperparameters for a deep learning model or finding the best configuration for database and other complex systems with real data. + +NNI also provides algorithm toolkits for machine learning and deep learning, especially neural architecture search (NAS) algorithms, model compression algorithms, and feature engineering algorithms. + +Hyperparameter Tuning +^^^^^^^^^^^^^^^^^^^^^ + +This is a core and basic feature of NNI, we provide many popular `automatic tuning algorithms `__ (i.e., tuner) and `early stop algorithms `__ (i.e., assessor). You can follow `Quick Start `__ to tune your model (or system). Basically, there are the above three steps and then starting an NNI experiment. + +General NAS Framework +^^^^^^^^^^^^^^^^^^^^^ + +This NAS framework is for users to easily specify candidate neural architectures, for example, one can specify multiple candidate operations (e.g., separable conv, dilated conv) for a single layer, and specify possible skip connections. NNI will find the best candidate automatically. On the other hand, the NAS framework provides a simple interface for another type of user (e.g., NAS algorithm researchers) to implement new NAS algorithms. A detailed description of NAS and its usage can be found `here `__. + +NNI has support for many one-shot NAS algorithms such as ENAS and DARTS through NNI trial SDK. To use these algorithms you do not have to start an NNI experiment. Instead, import an algorithm in your trial code and simply run your trial code. If you want to tune the hyperparameters in the algorithms or want to run multiple instances, you can choose a tuner and start an NNI experiment. + +Other than one-shot NAS, NAS can also run in a classic mode where each candidate architecture runs as an independent trial job. In this mode, similar to hyperparameter tuning, users have to start an NNI experiment and choose a tuner for NAS. + +Model Compression +^^^^^^^^^^^^^^^^^ + +NNI provides an easy-to-use model compression framework to compress deep neural networks, the compressed networks typically have much smaller model size and much faster +inference speed without losing performance significantlly. Model compression on NNI includes pruning algorithms and quantization algorithms. NNI provides many pruning and +quantization algorithms through NNI trial SDK. Users can directly use them in their trial code and run the trial code without starting an NNI experiment. Users can also use NNI model compression framework to customize their own pruning and quantization algorithms. + +A detailed description of model compression and its usage can be found `here `__. + +Automatic Feature Engineering +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Automatic feature engineering is for users to find the best features for their tasks. A detailed description of automatic feature engineering and its usage can be found `here `__. It is supported through NNI trial SDK, which means you do not have to create an NNI experiment. Instead, simply import a built-in auto-feature-engineering algorithm in your trial code and directly run your trial code. + +The auto-feature-engineering algorithms usually have a bunch of hyperparameters themselves. If you want to automatically tune those hyperparameters, you can leverage hyperparameter tuning of NNI, that is, choose a tuning algorithm (i.e., tuner) and start an NNI experiment for it. + +Learn More +---------- + + +* `Get started `__ +* `How to adapt your trial code on NNI? `__ +* `What are tuners supported by NNI? `__ +* `How to customize your own tuner? `__ +* `What are assessors supported by NNI? `__ +* `How to customize your own assessor? `__ +* `How to run an experiment on local? `__ +* `How to run an experiment on multiple machines? `__ +* `How to run an experiment on OpenPAI? `__ +* `Examples `__ +* `Neural Architecture Search on NNI `__ +* `Model Compression on NNI `__ +* `Automatic feature engineering on NNI `__ diff --git a/docs/en_US/Release.rst b/docs/en_US/Release.rst new file mode 100644 index 0000000000000000000000000000000000000000..405c3371335a8e33a4109170b5aa81368a35be61 --- /dev/null +++ b/docs/en_US/Release.rst @@ -0,0 +1,1634 @@ +.. role:: raw-html(raw) + :format: html + + +Change Log +========== + +Release 2.6.1 - 2/18/2022 +------------------------- + +Bug Fixes +^^^^^^^^^ + +* Fix a bug that new TPE does not support dict metrics. +* Fix a bug that missing comma. (Thanks to @mrshu) + +Release 2.6 - 1/19/2022 +----------------------- + +**NOTE**: NNI v2.6 is the last version that supports Python 3.6. From next release NNI will require Python 3.7+. + +Hyper-Parameter Optimization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Experiment +"""""""""" + +* The legacy experiment config format is now deprecated. `(doc of new config) `__ + + * If you are still using legacy format, nnictl will show equivalent new config on start. Please save it to replace the old one. + +* nnictl now uses ``nni.experiment.Experiment`` `APIs `__ as backend. The output message of create, resume, and view commands have changed. +* Added Kubeflow and Frameworkcontroller support to hybrid mode. `(doc) `__ +* The hidden tuner manifest file has been updated. This should be transparent to users, but if you encounter issues like failed to find tuner, please try to remove ``~/.config/nni``. + +Algorithms +"""""""""" + +* Random tuner now supports classArgs ``seed``. `(doc) `__ +* TPE tuner is refactored: `(doc) `__ + + * Support classArgs ``seed``. + * Support classArgs ``tpe_args`` for expert users to customize algorithm behavior. + * Parallel optimization has been turned on by default. To turn it off set ``tpe_args.constant_liar_type`` to ``null`` (or ``None`` in Python). + * ``parallel_optimize`` and ``constant_liar_type`` has been removed. If you are using them please update your config to use ``tpe_args.constant_liar_type`` instead. + +* Grid search tuner now supports all search space types, including uniform, normal, and nested choice. `(doc) `__ + +Neural Architecture Search +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Enhancement to serialization utilities `(doc) `__ and changes to recommended practice of customizing evaluators. `(doc) `__ +* Support latency constraint on edge device for ProxylessNAS based on nn-Meter. `(doc) `__ +* Trial parameters are showed more friendly in Retiarii experiments. +* Refactor NAS examples of ProxylessNAS and SPOS. + +Model Compression +^^^^^^^^^^^^^^^^^ + +* New Pruner Supported in Pruning V2 + + * Auto-Compress Pruner `(doc) `__ + * AMC Pruner `(doc) `__ + * Movement Pruning Pruner `(doc) `__ + +* Support ``nni.trace`` wrapped ``Optimizer`` in Pruning V2. In the case of not affecting the user experience as much as possible, trace the input parameters of the optimizer. `(doc) `__ +* Optimize Taylor Pruner, APoZ Activation Pruner, Mean Activation Pruner in V2 memory usage. +* Add more examples for Pruning V2. +* Add document for pruning config list. `(doc) `__ +* Parameter ``masks_file`` of ``ModelSpeedup`` now accepts `pathlib.Path` object. (Thanks to @dosemeion) `(doc) `__ +* Bug Fix + + * Fix Slim Pruner in V2 not sparsify the BN weight. + * Fix Simulator Annealing Task Generator generates config ignoring 0 sparsity. + +Documentation +^^^^^^^^^^^^^ + +* Supported GitHub feature "Cite this repository". +* Updated index page of readthedocs. +* Updated Chinese documentation. + + * From now on NNI only maintains translation for most import docs and ensures they are up to date. + +* Reorganized HPO tuners' doc. + +Bugfixes +^^^^^^^^ + +* Fixed a bug where numpy array is used as a truth value. (Thanks to @khituras) +* Fixed a bug in updating search space. +* Fixed a bug that HPO search space file does not support scientific notation and tab indent. + + * For now NNI does not support mixing scientific notation and YAML features. We are waiting for PyYAML to update. + +* Fixed a bug that causes DARTS 2nd order to crash. +* Fixed a bug that causes deep copy of mutation primitives (e.g., LayerChoice) to crash. +* Removed blank at bottom in Web UI overview page. + +Release 2.5 - 11/2/2021 +----------------------- + +Model Compression +^^^^^^^^^^^^^^^^^ + +* New major version of pruning framework `(doc) `__ + + * Iterative pruning is more automated, users can use less code to implement iterative pruning. + * Support exporting intermediate models in the iterative pruning process. + * The implementation of the pruning algorithm is closer to the paper. + * Users can easily customize their own iterative pruning by using ``PruningScheduler``. + * Optimize the basic pruners underlying generate mask logic, easier to extend new functions. + * Optimized the memory usage of the pruners. + +* MobileNetV2 end-to-end example `(notebook) `__ +* Improved QAT quantizer `(doc) `__ + + * support dtype and scheme customization + * support dp multi-gpu training + * support load_calibration_config + +* Model speed-up now supports directly loading the mask `(doc) `__ +* Support speed-up depth-wise convolution +* Support bn-folding for LSQ quantizer +* Support QAT and LSQ resume from PTQ +* Added doc for observer quantizer `(doc) `__ + +Neural Architecture Search +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* NAS benchmark `(doc) `__ + + * Support benchmark table lookup in experiments + * New data preparation approach + +* Improved `quick start doc `__ +* Experimental CGO execution engine `(doc) `__ + +Hyper-Parameter Optimization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* New training platform: Alibaba DSW+DLC `(doc) `__ +* Support passing ConfigSpace definition directly to BOHB `(doc) `__ (thanks to khituras) +* Reformatted `experiment config doc `__ +* Added example config files for Windows (thanks to @politecat314) +* FrameworkController now supports reuse mode + +Fixed Bugs +^^^^^^^^^^ + +* Experiment cannot start due to platform timestamp format (issue #4077 #4083) +* Cannot use ``1e-5`` in search space (issue #4080) +* Dependency version conflict caused by ConfigSpace (issue #3909) (thanks to @jexxers) +* Hardware-aware SPOS example does not work (issue #4198) +* Web UI show wrong remaining time when duration exceeds limit (issue #4015) +* cudnn.deterministic is always set in AMC pruner (#4117) thanks to @mstczuo + +And... +^^^^^^ + +* New `emoticons `__! + +.. image:: https://raw.githubusercontent.com/microsoft/nni/v2.5/docs/img/emoicons/Holiday.png + +Release 2.4 - 8/11/2021 +----------------------- + +Major Updates +^^^^^^^^^^^^^ + +Neural Architecture Search +"""""""""""""""""""""""""" + +* NAS visualization: visualize model graph through Netron (#3878) +* Support NAS bench 101/201 on Retiarii framework (#3871 #3920) +* Support hypermodule AutoActivation (#3868) +* Support PyTorch v1.8/v1.9 (#3937) +* Support Hardware-aware NAS with nn-Meter (#3938) +* Enable `fixed_arch` on Retiarii (#3972) + +Model Compression +""""""""""""""""" + +* Refactor of ModelSpeedup: auto shape/mask inference (#3462) +* Added more examples for ModelSpeedup (#3880) +* Support global sort for Taylor pruning (#3896) +* Support TransformerHeadPruner (#3884) +* Support batch normalization folding in QAT quantizer (#3911, thanks the external contributor @chenbohua3) +* Support post-training observer quantizer (#3915, thanks the external contributor @chenbohua3) +* Support ModelSpeedup for Slim Pruner (#4008) +* Support TensorRT 8.0.0 in ModelSpeedup (#3866) + +Hyper-parameter Tuning +"""""""""""""""""""""" + +* Improve HPO benchmarks (#3925) +* Improve type validation of user defined search space (#3975) + +Training service & nnictl +""""""""""""""""""""""""" + +* Support JupyterLab (#3668 #3954) +* Support viewing experiment from experiment folder (#3870) +* Support kubeflow in training service reuse framework (#3919) +* Support viewing trial log on WebUI for an experiment launched in `view` mode (#3872) + +Minor Updates & Bug Fixes +""""""""""""""""""""""""" + +* Fix the failure of the exit of Retiarii experiment (#3899) +* Fix `exclude` not supported in some `config_list` cases (#3815) +* Fix bug in remote training service on reuse mode (#3941) +* Improve IP address detection in modern way (#3860) +* Fix bug of the search box on WebUI (#3935) +* Fix bug in url_prefix of WebUI (#4051) +* Support dict format of intermediate on WebUI (#3895) +* Fix bug in openpai training service induced by experiment config v2 (#4027 #4057) +* Improved doc (#3861 #3885 #3966 #4004 #3955) +* Improved the API `export_model` in model compression (#3968) +* Supported `UnSqueeze` in ModelSpeedup (#3960) +* Thanks other external contributors: @Markus92 (#3936), @thomasschmied (#3963), @twmht (#3842) + + +Release 2.3 - 6/15/2021 +----------------------- + +Major Updates +^^^^^^^^^^^^^ + +Neural Architecture Search +"""""""""""""""""""""""""" + +* Retiarii Framework (NNI NAS 2.0) Beta Release with new features: + + * Support new high-level APIs: ``Repeat`` and ``Cell`` (#3481) + * Support pure-python execution engine (#3605) + * Support policy-based RL strategy (#3650) + * Support nested ModuleList (#3652) + * Improve documentation (#3785) + + **Note**: there are more exciting features of Retiarii planned in the future releases, please refer to `Retiarii Roadmap `__ for more information. + +* Add new NAS algorithm: Blockwise DNAS FBNet (#3532, thanks the external contributor @alibaba-yiwuyao) + +Model Compression +""""""""""""""""" + +* Support Auto Compression Framework (#3631) +* Support slim pruner in Tensorflow (#3614) +* Support LSQ quantizer (#3503, thanks the external contributor @chenbohua3) +* Improve APIs for iterative pruners (#3507 #3688) + +Training service & Rest +""""""""""""""""""""""" + +* Support 3rd-party training service (#3662 #3726) +* Support setting prefix URL (#3625 #3674 #3672 #3643) +* Improve NNI manager logging (#3624) +* Remove outdated TensorBoard code on nnictl (#3613) + +Hyper-Parameter Optimization +"""""""""""""""""""""""""""" + +* Add new tuner: DNGO (#3479 #3707) +* Add benchmark for tuners (#3644 #3720 #3689) + +WebUI +""""" + +* Improve search parameters on trial detail page (#3651 #3723 #3715) +* Make selected trials consistent after auto-refresh in detail table (#3597) +* Add trial stdout button on local mode (#3653 #3690) + +Examples & Documentation +"""""""""""""""""""""""" + +* Convert all trial examples' from config v1 to config v2 (#3721 #3733 #3711 #3600) +* Add new jupyter notebook examples (#3599 #3700) + +Dev Excellent +""""""""""""" + +* Upgrade dependencies in Dockerfile (#3713 #3722) +* Substitute PyYAML for ``ruamel.yaml`` (#3702) +* Add pipelines for AML and hybrid training service and experiment config V2 (#3477 #3648) +* Add pipeline badge in README (#3589) +* Update issue bug report template (#3501) + + +Bug Fixes & Minor Updates +^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Fix syntax error on Windows (#3634) +* Fix a logging related bug (#3705) +* Fix a bug in GPU indices (#3721) +* Fix a bug in FrameworkController (#3730) +* Fix a bug in ``export_data_url format`` (#3665) +* Report version check failure as a warning (#3654) +* Fix bugs and lints in nnictl (#3712) +* Fix bug of ``optimize_mode`` on WebUI (#3731) +* Fix bug of ``useActiveGpu`` in AML v2 config (#3655) +* Fix bug of ``experiment_working_directory`` in Retiarii config (#3607) +* Fix a bug in mask conflict (#3629, thanks the external contributor @Davidxswang) +* Fix a bug in model speedup shape inference (#3588, thanks the external contributor @Davidxswang) +* Fix a bug in multithread on Windows (#3604, thanks the external contributor @Ivanfangsc) +* Delete redundant code in training service (#3526, thanks the external contributor @maxsuren) +* Fix typo in DoReFa compression doc (#3693, thanks the external contributor @Erfandarzi) +* Update docstring in model compression (#3647, thanks the external contributor @ichejun) +* Fix a bug when using Kubernetes container (#3719, thanks the external contributor @rmfan) + + +Release 2.2 - 4/26/2021 +----------------------- + +Major updates +^^^^^^^^^^^^^ + +Neural Architecture Search +"""""""""""""""""""""""""" + +* Improve NAS 2.0 (Retiarii) Framework (Alpha Release) + + * Support local debug mode (#3476) + * Support nesting ``ValueChoice`` in ``LayerChoice`` (#3508) + * Support dict/list type in ``ValueChoice`` (#3508) + * Improve the format of export architectures (#3464) + * Refactor of NAS examples (#3513) + * Refer to `here `__ for Retiarii Roadmap + +Model Compression +""""""""""""""""" + +* Support speedup for mixed precision quantization model (Experimental) (#3488 #3512) +* Support model export for quantization algorithm (#3458 #3473) +* Support model export in model compression for TensorFlow (#3487) +* Improve documentation (#3482) + +nnictl & nni.experiment +""""""""""""""""""""""" + +* Add native support for experiment config V2 (#3466 #3540 #3552) +* Add resume and view mode in Python API ``nni.experiment`` (#3490 #3524 #3545) + +Training Service +"""""""""""""""" + +* Support umount for shared storage in remote training service (#3456) +* Support Windows as the remote training service in reuse mode (#3500) +* Remove duplicated env folder in remote training service (#3472) +* Add log information for GPU metric collector (#3506) +* Enable optional Pod Spec for FrameworkController platform (#3379, thanks the external contributor @mbu93) + +WebUI +""""" + +* Support launching TensorBoard on WebUI (#3454 #3361 #3531) +* Upgrade echarts-for-react to v5 (#3457) +* Add wrap for dispatcher/nnimanager log monaco editor (#3461) + +Bug Fixes +^^^^^^^^^ + +* Fix bug of FLOPs counter (#3497) +* Fix bug of hyper-parameter Add/Remove axes and table Add/Remove columns button conflict (#3491) +* Fix bug that monaco editor search text is not displayed completely (#3492) +* Fix bug of Cream NAS (#3498, thanks the external contributor @AliCloud-PAI) +* Fix typos in docs (#3448, thanks the external contributor @OliverShang) +* Fix typo in NAS 1.0 (#3538, thanks the external contributor @ankitaggarwal23) + + +Release 2.1 - 3/10/2021 +----------------------- + +Major updates +^^^^^^^^^^^^^ + +Neural architecture search +"""""""""""""""""""""""""" + +* Improve NAS 2.0 (Retiarii) Framework (Improved Experimental) + + * Improve the robustness of graph generation and code generation for PyTorch models (#3365) + * Support the inline mutation API ``ValueChoice`` (#3349 #3382) + * Improve the design and implementation of Model Evaluator (#3359 #3404) + * Support Random/Grid/Evolution exploration strategies (i.e., search algorithms) (#3377) + * Refer to `here `__ for Retiarii Roadmap + +Training service +"""""""""""""""" + +* Support shared storage for reuse mode (#3354) +* Support Windows as the local training service in hybrid mode (#3353) +* Remove PAIYarn training service (#3327) +* Add "recently-idle" scheduling algorithm (#3375) +* Deprecate ``preCommand`` and enable ``pythonPath`` for remote training service (#3284 #3410) +* Refactor reuse mode temp folder (#3374) + +nnictl & nni.experiment +""""""""""""""""""""""" + +* Migrate ``nnicli`` to new Python API ``nni.experiment`` (#3334) +* Refactor the way of specifying tuner in experiment Python API (\ ``nni.experiment``\ ), more aligned with ``nnictl`` (#3419) + +WebUI +""""" + +* Support showing the assigned training service of each trial in hybrid mode on WebUI (#3261 #3391) +* Support multiple selection for filter status in experiments management page (#3351) +* Improve overview page (#3316 #3317 #3352) +* Support copy trial id in the table (#3378) + +Documentation +^^^^^^^^^^^^^ + +* Improve model compression examples and documentation (#3326 #3371) +* Add Python API examples and documentation (#3396) +* Add SECURITY doc (#3358) +* Add 'What's NEW!' section in README (#3395) +* Update English contributing doc (#3398, thanks external contributor @Yongxuanzhang) + +Bug fixes +^^^^^^^^^ + +* Fix AML outputs path and python process not killed (#3321) +* Fix bug that an experiment launched from Python cannot be resumed by nnictl (#3309) +* Fix import path of network morphism example (#3333) +* Fix bug in the tuple unpack (#3340) +* Fix bug of security for arbitrary code execution (#3311, thanks external contributor @huntr-helper) +* Fix ``NoneType`` error on jupyter notebook (#3337, thanks external contributor @tczhangzhi) +* Fix bugs in Retiarii (#3339 #3341 #3357, thanks external contributor @tczhangzhi) +* Fix bug in AdaptDL mode example (#3381, thanks external contributor @ZeyaWang) +* Fix the spelling mistake of assessor (#3416, thanks external contributor @ByronCHAO) +* Fix bug in ruamel import (#3430, thanks external contributor @rushtehrani) + + +Release 2.0 - 1/14/2021 +----------------------- + +Major updates +^^^^^^^^^^^^^ + +Neural architecture search +"""""""""""""""""""""""""" + +* Support an improved NAS framework: Retiarii (experimental) + + * Feature roadmap (`issue #3301 `__) + * `Related issues and pull requests `__ + * Documentation (#3221 #3282 #3287) + +* Support a new NAS algorithm: Cream (#2705) +* Add a new NAS benchmark for NLP model search (#3140) + +Training service +"""""""""""""""" + +* Support hybrid training service (#3097 #3251 #3252) +* Support AdlTrainingService, a new training service based on Kubernetes (#3022, thanks external contributors Petuum @pw2393) + + +Model compression +""""""""""""""""" + +* Support pruning schedule for fpgm pruning algorithm (#3110) +* ModelSpeedup improvement: support torch v1.7 (updated graph_utils.py) (#3076) +* Improve model compression utility: model flops counter (#3048 #3265) + + +WebUI & nnictl +"""""""""""""" + +* Support experiments management on WebUI, add a web page for it (#3081 #3127) +* Improve the layout of overview page (#3046 #3123) +* Add navigation bar on the right for logs and configs; add expanded icons for table (#3069 #3103) + + +Others +"""""" + +* Support launching an experiment from Python code (#3111 #3210 #3263) +* Refactor builtin/customized tuner installation (#3134) +* Support new experiment configuration V2 (#3138 #3248 #3251) +* Reorganize source code directory hierarchy (#2962 #2987 #3037) +* Change SIGKILL to SIGTERM in local mode when cancelling trial jobs (#3173) +* Refector hyperband (#3040) + + +Documentation +^^^^^^^^^^^^^ + +* Port markdown docs to reStructuredText docs and introduce ``githublink`` (#3107) +* List related research and publications in doc (#3150) +* Add tutorial of saving and loading quantized model (#3192) +* Remove paiYarn doc and add description of ``reuse`` config in remote mode (#3253) +* Update EfficientNet doc to clarify repo versions (#3158, thanks external contributor @ahundt) + +Bug fixes +^^^^^^^^^ + +* Fix exp-duration pause timing under NO_MORE_TRIAL status (#3043) +* Fix bug in NAS SPOS trainer, apply_fixed_architecture (#3051, thanks external contributor @HeekangPark) +* Fix ``_compute_hessian`` bug in NAS DARTS (PyTorch version) (#3058, thanks external contributor @hroken) +* Fix bug of conv1d in the cdarts utils (#3073, thanks external contributor @athaker) +* Fix the handling of unknown trials when resuming an experiment (#3096) +* Fix bug of kill command under Windows (#3106) +* Fix lazy logging (#3108, thanks external contributor @HarshCasper) +* Fix checkpoint load and save issue in QAT quantizer (#3124, thanks external contributor @eedalong) +* Fix quant grad function calculation error (#3160, thanks external contributor @eedalong) +* Fix device assignment bug in quantization algorithm (#3212, thanks external contributor @eedalong) +* Fix bug in ModelSpeedup and enhance UT for it (#3279) +* and others (#3063 #3065 #3098 #3109 #3125 #3143 #3156 #3168 #3175 #3180 #3181 #3183 #3203 #3205 #3207 #3214 #3216 #3219 #3223 #3224 #3230 #3237 #3239 #3240 #3245 #3247 #3255 #3257 #3258 #3262 #3263 #3267 #3269 #3271 #3279 #3283 #3289 #3290 #3295) + + +Release 1.9 - 10/22/2020 +------------------------ + +Major updates +^^^^^^^^^^^^^ + +Neural architecture search +"""""""""""""""""""""""""" + + +* Support regularized evolution algorithm for NAS scenario (#2802) +* Add NASBench201 in search space zoo (#2766) + +Model compression +""""""""""""""""" + + +* AMC pruner improvement: support resnet, support reproduction of the experiments (default parameters in our example code) in AMC paper (#2876 #2906) +* Support constraint-aware on some of our pruners to improve model compression efficiency (#2657) +* Support "tf.keras.Sequential" in model compression for TensorFlow (#2887) +* Support customized op in the model flops counter (#2795) +* Support quantizing bias in QAT quantizer (#2914) + +Training service +"""""""""""""""" + + +* Support configuring python environment using "preCommand" in remote mode (#2875) +* Support AML training service in Windows (#2882) +* Support reuse mode for remote training service (#2923) + +WebUI & nnictl +"""""""""""""" + + +* The "Overview" page on WebUI is redesigned with new layout (#2914) +* Upgraded node, yarn and FabricUI, and enabled Eslint (#2894 #2873 #2744) +* Add/Remove columns in hyper-parameter chart and trials table in "Trials detail" page (#2900) +* JSON format utility beautify on WebUI (#2863) +* Support nnictl command auto-completion (#2857) + +UT & IT +^^^^^^^ + + +* Add integration test for experiment import and export (#2878) +* Add integration test for user installed builtin tuner (#2859) +* Add unit test for nnictl (#2912) + +Documentation +^^^^^^^^^^^^^ + + +* Refactor of the document for model compression (#2919) + +Bug fixes +^^^^^^^^^ + + +* Bug fix of naïve evolution tuner, correctly deal with trial fails (#2695) +* Resolve the warning "WARNING (nni.protocol) IPC pipeline not exists, maybe you are importing tuner/assessor from trial code?" (#2864) +* Fix search space issue in experiment save/load (#2886) +* Fix bug in experiment import data (#2878) +* Fix annotation in remote mode (python 3.8 ast update issue) (#2881) +* Support boolean type for "choice" hyper-parameter when customizing trial configuration on WebUI (#3003) + +Release 1.8 - 8/27/2020 +----------------------- + +Major updates +^^^^^^^^^^^^^ + +Training service +"""""""""""""""" + + +* Access trial log directly on WebUI (local mode only) (#2718) +* Add OpenPAI trial job detail link (#2703) +* Support GPU scheduler in reusable environment (#2627) (#2769) +* Add timeout for ``web_channel`` in ``trial_runner`` (#2710) +* Show environment error message in AzureML mode (#2724) +* Add more log information when copying data in OpenPAI mode (#2702) + +WebUI, nnictl and nnicli +"""""""""""""""""""""""" + + +* Improve hyper-parameter parallel coordinates plot (#2691) (#2759) +* Add pagination for trial job list (#2738) (#2773) +* Enable panel close when clicking overlay region (#2734) +* Remove support for Multiphase on WebUI (#2760) +* Support save and restore experiments (#2750) +* Add intermediate results in export result (#2706) +* Add `command `__ to list trial results with highest/lowest metrics (#2747) +* Improve the user experience of `nnicli `__ with `examples `__ (#2713) + +Neural architecture search +"""""""""""""""""""""""""" + + +* `Search space zoo: ENAS and DARTS `__ (#2589) +* API to query intermediate results in NAS benchmark (#2728) + +Model compression +""""""""""""""""" + + +* Support the List/Tuple Construct/Unpack operation for TorchModuleGraph (#2609) +* Model speedup improvement: Add support of DenseNet and InceptionV3 (#2719) +* Support the multiple successive tuple unpack operations (#2768) +* `Doc of comparing the performance of supported pruners `__ (#2742) +* New pruners: `Sensitivity pruner `__ (#2684) and `AMC pruner `__ (#2573) (#2786) +* TensorFlow v2 support in model compression (#2755) + +Backward incompatible changes +""""""""""""""""""""""""""""" + + +* Update the default experiment folder from ``$HOME/nni/experiments`` to ``$HOME/nni-experiments``. If you want to view the experiments created by previous NNI releases, you can move the experiments folders from ``$HOME/nni/experiments`` to ``$HOME/nni-experiments`` manually. (#2686) (#2753) +* Dropped support for Python 3.5 and scikit-learn 0.20 (#2778) (#2777) (2783) (#2787) (#2788) (#2790) + +Others +"""""" + + +* Upgrade TensorFlow version in Docker image (#2732) (#2735) (#2720) + +Examples +^^^^^^^^ + + +* Remove gpuNum in assessor examples (#2641) + +Documentation +^^^^^^^^^^^^^ + + +* Improve customized tuner documentation (#2628) +* Fix several typos and grammar mistakes in documentation (#2637 #2638, thanks @tomzx) +* Improve AzureML training service documentation (#2631) +* Improve CI of Chinese translation (#2654) +* Improve OpenPAI training service documentation (#2685) +* Improve documentation of community sharing (#2640) +* Add tutorial of Colab support (#2700) +* Improve documentation structure for model compression (#2676) + +Bug fixes +^^^^^^^^^ + + +* Fix mkdir error in training service (#2673) +* Fix bug when using chmod in remote training service (#2689) +* Fix dependency issue by making ``_graph_utils`` imported inline (#2675) +* Fix mask issue in ``SimulatedAnnealingPruner`` (#2736) +* Fix intermediate graph zooming issue (#2738) +* Fix issue when dict is unordered when querying NAS benchmark (#2728) +* Fix import issue for gradient selector dataloader iterator (#2690) +* Fix support of adding tens of machines in remote training service (#2725) +* Fix several styling issues in WebUI (#2762 #2737) +* Fix support of unusual types in metrics including NaN and Infinity (#2782) +* Fix nnictl experiment delete (#2791) + +Release 1.7 - 7/8/2020 +---------------------- + +Major Features +^^^^^^^^^^^^^^ + +Training Service +"""""""""""""""" + + +* Support AML(Azure Machine Learning) platform as NNI training service. +* OpenPAI job can be reusable. When a trial is completed, the OpenPAI job won't stop, and wait next trial. `refer to reuse flag in OpenPAI config `__. +* `Support ignoring files and folders in code directory with .nniignore when uploading code directory to training service `__. + +Neural Architecture Search (NAS) +"""""""""""""""""""""""""""""""" + + +* + `Provide NAS Open Benchmarks (NasBench101, NasBench201, NDS) with friendly APIs `__. + +* + `Support Classic NAS (i.e., non-weight-sharing mode) on TensorFlow 2.X `__. + +Model Compression +""""""""""""""""" + + +* Improve Model Speedup: track more dependencies among layers and automatically resolve mask conflict, support the speedup of pruned resnet. +* Added new pruners, including three auto model pruning algorithms: `NetAdapt Pruner `__\ , `SimulatedAnnealing Pruner `__\ , `AutoCompress Pruner `__\ , and `ADMM Pruner `__. +* Added `model sensitivity analysis tool `__ to help users find the sensitivity of each layer to the pruning. +* + `Easy flops calculation for model compression and NAS `__. + +* + Update lottery ticket pruner to export winning ticket. + +Examples +"""""""" + + +* Automatically optimize tensor operators on NNI with a new `customized tuner OpEvo `__. + +Built-in tuners/assessors/advisors +"""""""""""""""""""""""""""""""""" + + +* `Allow customized tuners/assessor/advisors to be installed as built-in algorithms `__. + +WebUI +""""" + + +* Support visualizing nested search space more friendly. +* Show trial's dict keys in hyper-parameter graph. +* Enhancements to trial duration display. + +Others +"""""" + + +* Provide utility function to merge parameters received from NNI +* Support setting paiStorageConfigName in pai mode + +Documentation +^^^^^^^^^^^^^ + + +* Improve `documentation for model compression `__ +* Improve `documentation `__ + and `examples `__ for NAS benchmarks. +* Improve `documentation for AzureML training service `__ +* Homepage migration to readthedoc. + +Bug Fixes +^^^^^^^^^ + + +* Fix bug for model graph with shared nn.Module +* Fix nodejs OOM when ``make build`` +* Fix NASUI bugs +* Fix duration and intermediate results pictures update issue. +* Fix minor WebUI table style issues. + +Release 1.6 - 5/26/2020 +----------------------- + +Major Features +^^^^^^^^^^^^^^ + +New Features and improvement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Improve IPC limitation to 100W +* improve code storage upload logic among trials in non-local platform +* support ``__version__`` for SDK version +* support windows dev intall + +Web UI +^^^^^^ + + +* Show trial error message +* finalize homepage layout +* Refactor overview's best trials module +* Remove multiphase from webui +* add tooltip for trial concurrency in the overview page +* Show top trials for hyper-parameter graph + +HPO Updates +^^^^^^^^^^^ + + +* Improve PBT on failure handling and support experiment resume for PBT + +NAS Updates +^^^^^^^^^^^ + + +* NAS support for TensorFlow 2.0 (preview) `TF2.0 NAS examples `__ +* Use OrderedDict for LayerChoice +* Prettify the format of export +* Replace layer choice with selected module after applied fixed architecture + +Model Compression Updates +^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Model compression PyTorch 1.4 support + +Training Service Updates +^^^^^^^^^^^^^^^^^^^^^^^^ + + +* update pai yaml merge logic +* support windows as remote machine in remote mode `Remote Mode `__ + +Bug Fix +^^^^^^^ + + +* fix dev install +* SPOS example crash when the checkpoints do not have state_dict +* Fix table sort issue when experiment had failed trial +* Support multi python env (conda, pyenv etc) + +Release 1.5 - 4/13/2020 +----------------------- + +New Features and Documentation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Hyper-Parameter Optimizing +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* New tuner: `Population Based Training (PBT) `__ +* Trials can now report infinity and NaN as result + +Neural Architecture Search +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* New NAS algorithm: `TextNAS `__ +* ENAS and DARTS now support `visualization `__ through web UI. + +Model Compression +^^^^^^^^^^^^^^^^^ + + +* New Pruner: `GradientRankFilterPruner `__ +* Compressors will validate configuration by default +* Refactor: Adding optimizer as an input argument of pruner, for easy support of DataParallel and more efficient iterative pruning. This is a broken change for the usage of iterative pruning algorithms. +* Model compression examples are refactored and improved +* Added documentation for `implementing compressing algorithm `__ + +Training Service +^^^^^^^^^^^^^^^^ + + +* Kubeflow now supports pytorchjob crd v1 (thanks external contributor @jiapinai) +* Experimental `DLTS `__ support + +Overall Documentation Improvement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Documentation is significantly improved on grammar, spelling, and wording (thanks external contributor @AHartNtkn) + +Fixed Bugs +^^^^^^^^^^ + + +* ENAS cannot have more than one LSTM layers (thanks external contributor @marsggbo) +* NNI manager's timers will never unsubscribe (thanks external contributor @guilhermehn) +* NNI manager may exhaust head memory (thanks external contributor @Sundrops) +* Batch tuner does not support customized trials (#2075) +* Experiment cannot be killed if it failed on start (#2080) +* Non-number type metrics break web UI (#2278) +* A bug in lottery ticket pruner +* Other minor glitches + +Release 1.4 - 2/19/2020 +----------------------- + +Major Features +^^^^^^^^^^^^^^ + +Neural Architecture Search +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Support `C-DARTS `__ algorithm and add `the example `__ using it +* Support a preliminary version of `ProxylessNAS `__ and the corresponding `example `__ +* Add unit tests for the NAS framework + +Model Compression +^^^^^^^^^^^^^^^^^ + + +* Support DataParallel for compressing models, and provide `an example `__ of using DataParallel +* Support `model speedup `__ for compressed models, in Alpha version + +Training Service +^^^^^^^^^^^^^^^^ + + +* Support complete PAI configurations by allowing users to specify PAI config file path +* Add example config yaml files for the new PAI mode (i.e., paiK8S) +* Support deleting experiments using sshkey in remote mode (thanks external contributor @tyusr) + +WebUI +^^^^^ + + +* WebUI refactor: adopt fabric framework + +Others +^^^^^^ + + +* Support running `NNI experiment at foreground `__\ , i.e., ``--foreground`` argument in ``nnictl create/resume/view`` +* Support canceling the trials in UNKNOWN state +* Support large search space whose size could be up to 50mb (thanks external contributor @Sundrops) + +Documentation +^^^^^^^^^^^^^ + + +* Improve `the index structure `__ of NNI readthedocs +* Improve `documentation for NAS `__ +* Improve documentation for `the new PAI mode `__ +* Add QuickStart guidance for `NAS `__ and `model compression `__ +* Improve documentation for `the supported EfficientNet `__ + +Bug Fixes +^^^^^^^^^ + + +* Correctly support NaN in metric data, JSON compliant +* Fix the out-of-range bug of ``randint`` type in search space +* Fix the bug of wrong tensor device when exporting onnx model in model compression +* Fix incorrect handling of nnimanagerIP in the new PAI mode (i.e., paiK8S) + +Release 1.3 - 12/30/2019 +------------------------ + +Major Features +^^^^^^^^^^^^^^ + +Neural Architecture Search Algorithms Support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* `Single Path One Shot `__ algorithm and the example using it + +Model Compression Algorithms Support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* `Knowledge Distillation `__ algorithm and the example using itExample +* Pruners + + * `L2Filter Pruner `__ + * `ActivationAPoZRankFilterPruner `__ + * `ActivationMeanRankFilterPruner `__ + +* `BNN Quantizer `__ + +Training Service +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* + NFS Support for PAI + + Instead of using HDFS as default storage, since OpenPAI v0.11, OpenPAI can have NFS or AzureBlob or other storage as default storage. In this release, NNI extended the support for this recent change made by OpenPAI, and could integrate with OpenPAI v0.11 or later version with various default storage. + +* + Kubeflow update adoption + + Adopted the Kubeflow 0.7's new supports for tf-operator. + +Engineering (code and build automation) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Enforced `ESLint `__ on static code analysis. + +Small changes & Bug Fixes +^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* correctly recognize builtin tuner and customized tuner +* logging in dispatcher base +* fix the bug where tuner/assessor's failure sometimes kills the experiment. +* Fix local system as remote machine `issue `__ +* de-duplicate trial configuration in smac tuner `ticket `__ + +Release 1.2 - 12/02/2019 +------------------------ + +Major Features +^^^^^^^^^^^^^^ + + +* `Feature Engineering `__ + + * New feature engineering interface + * Feature selection algorithms: `Gradient feature selector `__ & `GBDT selector `__ + * `Examples for feature engineering `__ + +* Neural Architecture Search (NAS) on NNI + + * `New NAS interface `__ + * NAS algorithms: `ENAS `__\ , `DARTS `__\ , `P-DARTS `__ (in PyTorch) + * NAS in classic mode (each trial runs independently) + +* Model compression + + * `New model pruning algorithms `__\ : lottery ticket pruning approach, L1Filter pruner, Slim pruner, FPGM pruner + * `New model quantization algorithms `__\ : QAT quantizer, DoReFa quantizer + * Support the API for exporting compressed model. + +* Training Service + + * Support OpenPAI token authentication + +* Examples: + + * `An example to automatically tune rocksdb configuration with NNI `__. + * `A new MNIST trial example supports tensorflow 2.0 `__. + +* Engineering Improvements + + * For remote training service, trial jobs require no GPU are now scheduled with round-robin policy instead of random. + * Pylint rules added to check pull requests, new pull requests need to comply with these `pylint rules `__. + +* Web Portal & User Experience + + * Support user to add customized trial. + * User can zoom out/in in detail graphs, except Hyper-parameter. + +* Documentation + + * Improved NNI API documentation with more API docstring. + +Bug fix +^^^^^^^ + + +* Fix the table sort issue when failed trials haven't metrics. -Issue #1773 +* Maintain selected status(Maximal/Minimal) when the page switched. -PR#1710 +* Make hyper-parameters graph's default metric yAxis more accurate. -PR#1736 +* Fix GPU script permission issue. -Issue #1665 + +Release 1.1 - 10/23/2019 +------------------------ + +Major Features +^^^^^^^^^^^^^^ + + +* New tuner: `PPO Tuner `__ +* `View stopped experiments `__ +* Tuners can now use dedicated GPU resource (see ``gpuIndices`` in `tutorial `__ for details) +* Web UI improvements + + * Trials detail page can now list hyperparameters of each trial, as well as their start and end time (via "add column") + * Viewing huge experiment is now less laggy + +* More examples + + * `EfficientNet PyTorch example `__ + * `Cifar10 NAS example `__ + +* `Model compression toolkit - Alpha release `__\ : We are glad to announce the alpha release for model compression toolkit on top of NNI, it's still in the experiment phase which might evolve based on usage feedback. We'd like to invite you to use, feedback and even contribute + +Fixed Bugs +^^^^^^^^^^ + + +* Multiphase job hangs when search space exhuasted (issue #1204) +* ``nnictl`` fails when log not available (issue #1548) + +Release 1.0 - 9/2/2019 +---------------------- + +Major Features +^^^^^^^^^^^^^^ + + +* + Tuners and Assessors + + + * Support Auto-Feature generator & selection -Issue#877 -PR #1387 + + * Provide auto feature interface + * Tuner based on beam search + * `Add Pakdd example `__ + + * Add a parallel algorithm to improve the performance of TPE with large concurrency. -PR #1052 + * Support multiphase for hyperband -PR #1257 + +* + Training Service + + + * Support private docker registry -PR #755 + + + * Engineering Improvements + + * Python wrapper for rest api, support retrieve the values of the metrics in a programmatic way PR #1318 + * New python API : get_experiment_id(), get_trial_id() -PR #1353 -Issue #1331 & -Issue#1368 + * Optimized NAS Searchspace -PR #1393 + + * Unify NAS search space with _type -- "mutable_type"e + * Update random search tuner + + * Set gpuNum as optional -Issue #1365 + * Remove outputDir and dataDir configuration in PAI mode -Issue #1342 + * When creating a trial in Kubeflow mode, codeDir will no longer be copied to logDir -Issue #1224 + +* + Web Portal & User Experience + + + * Show the best metric curve during search progress in WebUI -Issue #1218 + * Show the current number of parameters list in multiphase experiment -Issue1210 -PR #1348 + * Add "Intermediate count" option in AddColumn. -Issue #1210 + * Support search parameters value in WebUI -Issue #1208 + * Enable automatic scaling of axes for metric value in default metric graph -Issue #1360 + * Add a detailed documentation link to the nnictl command in the command prompt -Issue #1260 + * UX improvement for showing Error log -Issue #1173 + +* + Documentation + + + * Update the docs structure -Issue #1231 + * (deprecated) Multi phase document improvement -Issue #1233 -PR #1242 + + * Add configuration example + + * `WebUI description improvement `__ -PR #1419 + +Bug fix +^^^^^^^ + + +* (Bug fix)Fix the broken links in 0.9 release -Issue #1236 +* (Bug fix)Script for auto-complete +* (Bug fix)Fix pipeline issue that it only check exit code of last command in a script. -PR #1417 +* (Bug fix)quniform fors tuners -Issue #1377 +* (Bug fix)'quniform' has different meaning beween GridSearch and other tuner. -Issue #1335 +* (Bug fix)"nnictl experiment list" give the status of a "RUNNING" experiment as "INITIALIZED" -PR #1388 +* (Bug fix)SMAC cannot be installed if nni is installed in dev mode -Issue #1376 +* (Bug fix)The filter button of the intermediate result cannot be clicked -Issue #1263 +* (Bug fix)API "/api/v1/nni/trial-jobs/xxx" doesn't show a trial's all parameters in multiphase experiment -Issue #1258 +* (Bug fix)Succeeded trial doesn't have final result but webui show ×××(FINAL) -Issue #1207 +* (Bug fix)IT for nnictl stop -Issue #1298 +* (Bug fix)fix security warning +* (Bug fix)Hyper-parameter page broken -Issue #1332 +* (Bug fix)Run flake8 tests to find Python syntax errors and undefined names -PR #1217 + +Release 0.9 - 7/1/2019 +---------------------- + +Major Features +^^^^^^^^^^^^^^ + + +* General NAS programming interface + + * Add ``enas-mode`` and ``oneshot-mode`` for NAS interface: `PR #1201 `__ + +* + `Gaussian Process Tuner with Matern kernel `__ + +* + (deprecated) Multiphase experiment supports + + + * Added new training service support for multiphase experiment: PAI mode supports multiphase experiment since v0.9. + * Added multiphase capability for the following builtin tuners: + + * TPE, Random Search, Anneal, Naïve Evolution, SMAC, Network Morphism, Metis Tuner. + +* + Web Portal + + + * Enable trial comparation in Web Portal. For details, refer to `View trials status `__ + * Allow users to adjust rendering interval of Web Portal. For details, refer to `View Summary Page `__ + * show intermediate results more friendly. For details, refer to `View trials status `__ + +* `Commandline Interface `__ + + * ``nnictl experiment delete``\ : delete one or all experiments, it includes log, result, environment information and cache. It uses to delete useless experiment result, or save disk space. + * ``nnictl platform clean``\ : It uses to clean up disk on a target platform. The provided YAML file includes the information of target platform, and it follows the same schema as the NNI configuration file. + +Bug fix and other changes +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Tuner Installation Improvements: add `sklearn `__ to nni dependencies. +* (Bug Fix) Failed to connect to PAI http code - `Issue #1076 `__ +* (Bug Fix) Validate file name for PAI platform - `Issue #1164 `__ +* (Bug Fix) Update GMM evaluation in Metis Tuner +* (Bug Fix) Negative time number rendering in Web Portal - `Issue #1182 `__\ , `Issue #1185 `__ +* (Bug Fix) Hyper-parameter not shown correctly in WebUI when there is only one hyper parameter - `Issue #1192 `__ + +Release 0.8 - 6/4/2019 +---------------------- + +Major Features +^^^^^^^^^^^^^^ + + +* Support NNI on Windows for OpenPAI/Remote mode + + * NNI running on windows for remote mode + * NNI running on windows for OpenPAI mode + +* Advanced features for using GPU + + * Run multiple trial jobs on the same GPU for local and remote mode + * Run trial jobs on the GPU running non-NNI jobs + +* Kubeflow v1beta2 operator + + * Support Kubeflow TFJob/PyTorchJob v1beta2 + +* `General NAS programming interface `__ + + * Provide NAS programming interface for users to easily express their neural architecture search space through NNI annotation + * Provide a new command ``nnictl trial codegen`` for debugging the NAS code + * Tutorial of NAS programming interface, example of NAS on MNIST, customized random tuner for NAS + +* Support resume tuner/advisor's state for experiment resume +* For experiment resume, tuner/advisor will be resumed by replaying finished trial data +* Web Portal + + * Improve the design of copying trial's parameters + * Support 'randint' type in hyper-parameter graph + * Use should ComponentUpdate to avoid unnecessary render + +Bug fix and other changes +^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Bug fix that ``nnictl update`` has inconsistent command styles +* Support import data for SMAC tuner +* Bug fix that experiment state transition from ERROR back to RUNNING +* Fix bug of table entries +* Nested search space refinement +* Refine 'randint' type and support lower bound +* `Comparison of different hyper-parameter tuning algorithm `__ +* `Comparison of NAS algorithm `__ +* `NNI practice on Recommenders `__ + +Release 0.7 - 4/29/2018 +----------------------- + +Major Features +^^^^^^^^^^^^^^ + + +* `Support NNI on Windows `__ + + * NNI running on windows for local mode + +* `New advisor: BOHB `__ + + * Support a new advisor BOHB, which is a robust and efficient hyperparameter tuning algorithm, combines the advantages of Bayesian optimization and Hyperband + +* `Support import and export experiment data through nnictl `__ + + * Generate analysis results report after the experiment execution + * Support import data to tuner and advisor for tuning + +* `Designated gpu devices for NNI trial jobs `__ + + * Specify GPU devices for NNI trial jobs by gpuIndices configuration, if gpuIndices is set in experiment configuration file, only the specified GPU devices are used for NNI trial jobs. + +* Web Portal enhancement + + * Decimal format of metrics other than default on the Web UI + * Hints in WebUI about Multi-phase + * Enable copy/paste for hyperparameters as python dict + * Enable early stopped trials data for tuners. + +* NNICTL provide better error message + + * nnictl provide more meaningful error message for YAML file format error + +Bug fix +^^^^^^^ + + +* Unable to kill all python threads after nnictl stop in async dispatcher mode +* nnictl --version does not work with make dev-install +* All trail jobs status stays on 'waiting' for long time on OpenPAI platform + +Release 0.6 - 4/2/2019 +---------------------- + +Major Features +^^^^^^^^^^^^^^ + + +* `Version checking `__ + + * check whether the version is consistent between nniManager and trialKeeper + +* `Report final metrics for early stop job `__ + + * If includeIntermediateResults is true, the last intermediate result of the trial that is early stopped by assessor is sent to tuner as final result. The default value of includeIntermediateResults is false. + +* `Separate Tuner/Assessor `__ + + * Adds two pipes to separate message receiving channels for tuner and assessor. + +* Make log collection feature configurable +* Add intermediate result graph for all trials + +Bug fix +^^^^^^^ + + +* `Add shmMB config key for OpenPAI `__ +* Fix the bug that doesn't show any result if metrics is dict +* Fix the number calculation issue for float types in hyperband +* Fix a bug in the search space conversion in SMAC tuner +* Fix the WebUI issue when parsing experiment.json with illegal format +* Fix cold start issue in Metis Tuner + +Release 0.5.2 - 3/4/2019 +------------------------ + +Improvements +^^^^^^^^^^^^ + + +* Curve fitting assessor performance improvement. + +Documentation +^^^^^^^^^^^^^ + + +* Chinese version document: https://nni.readthedocs.io/zh/latest/ +* Debuggability/serviceability document: https://nni.readthedocs.io/en/latest/Tutorial/HowToDebug.html +* Tuner assessor reference: https://nni.readthedocs.io/en/latest/sdk_reference.html + +Bug Fixes and Other Changes +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Fix a race condition bug that does not store trial job cancel status correctly. +* Fix search space parsing error when using SMAC tuner. +* Fix cifar10 example broken pipe issue. +* Add unit test cases for nnimanager and local training service. +* Add integration test azure pipelines for remote machine, OpenPAI and kubeflow training services. +* Support Pylon in OpenPAI webhdfs client. + +Release 0.5.1 - 1/31/2018 +------------------------- + +Improvements +^^^^^^^^^^^^ + + +* Making `log directory `__ configurable +* Support `different levels of logs `__\ , making it easier for debugging + +Documentation +^^^^^^^^^^^^^ + + +* Reorganized documentation & New Homepage Released: https://nni.readthedocs.io/en/latest/ + +Bug Fixes and Other Changes +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Fix the bug of installation in python virtualenv, and refactor the installation logic +* Fix the bug of HDFS access failure on OpenPAI mode after OpenPAI is upgraded. +* Fix the bug that sometimes in-place flushed stdout makes experiment crash + +Release 0.5.0 - 01/14/2019 +-------------------------- + +Major Features +^^^^^^^^^^^^^^ + +New tuner and assessor supports +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Support `Metis tuner `__ as a new NNI tuner. Metis algorithm has been proofed to be well performed for **online** hyper-parameter tuning. +* Support `ENAS customized tuner `__\ , a tuner contributed by github community user, is an algorithm for neural network search, it could learn neural network architecture via reinforcement learning and serve a better performance than NAS. +* Support `Curve fitting assessor `__ for early stop policy using learning curve extrapolation. +* Advanced Support of `Weight Sharing `__\ : Enable weight sharing for NAS tuners, currently through NFS. + +Training Service Enhancement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* `FrameworkController Training service `__\ : Support run experiments using frameworkcontroller on kubernetes + + * FrameworkController is a Controller on kubernetes that is general enough to run (distributed) jobs with various machine learning frameworks, such as tensorflow, pytorch, MXNet. + * NNI provides unified and simple specification for job definition. + * MNIST example for how to use FrameworkController. + +User Experience improvements +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* A better trial logging support for NNI experiments in OpenPAI, Kubeflow and FrameworkController mode: + + * An improved logging architecture to send stdout/stderr of trials to NNI manager via Http post. NNI manager will store trial's stdout/stderr messages in local log file. + * Show the link for trial log file on WebUI. + +* Support to show final result's all key-value pairs. + +Release 0.4.1 - 12/14/2018 +-------------------------- + +Major Features +^^^^^^^^^^^^^^ + +New tuner supports +^^^^^^^^^^^^^^^^^^ + + +* Support `network morphism `__ as a new tuner + +Training Service improvements +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Migrate `Kubeflow training service `__\ 's dependency from kubectl CLI to `Kubernetes API `__ client +* `Pytorch-operator `__ support for Kubeflow training service +* Improvement on local code files uploading to OpenPAI HDFS +* Fixed OpenPAI integration WebUI bug: WebUI doesn't show latest trial job status, which is caused by OpenPAI token expiration + +NNICTL improvements +^^^^^^^^^^^^^^^^^^^ + + +* Show version information both in nnictl and WebUI. You can run **nnictl -v** to show your current installed NNI version + +WebUI improvements +^^^^^^^^^^^^^^^^^^ + + +* Enable modify concurrency number during experiment +* Add feedback link to NNI github 'create issue' page +* Enable customize top 10 trials regarding to metric numbers (largest or smallest) +* Enable download logs for dispatcher & nnimanager +* Enable automatic scaling of axes for metric number +* Update annotation to support displaying real choice in searchspace + +New examples +^^^^^^^^^^^^ + + +* `FashionMnist `__\ , work together with network morphism tuner +* `Distributed MNIST example `__ written in PyTorch + +Release 0.4 - 12/6/2018 +----------------------- + +Major Features +^^^^^^^^^^^^^^ + + +* `Kubeflow Training service `__ + + * Support tf-operator + * `Distributed trial example `__ on Kubeflow + +* `Grid search tuner `__ +* `Hyperband tuner `__ +* Support launch NNI experiment on MAC +* WebUI + + * UI support for hyperband tuner + * Remove tensorboard button + * Show experiment error message + * Show line numbers in search space and trial profile + * Support search a specific trial by trial number + * Show trial's hdfsLogPath + * Download experiment parameters + +Others +^^^^^^ + + +* Asynchronous dispatcher +* Docker file update, add pytorch library +* Refactor 'nnictl stop' process, send SIGTERM to nni manager process, rather than calling stop Rest API. +* OpenPAI training service bug fix + + * Support NNI Manager IP configuration(nniManagerIp) in OpenPAI cluster config file, to fix the issue that user’s machine has no eth0 device + * File number in codeDir is capped to 1000 now, to avoid user mistakenly fill root dir for codeDir + * Don’t print useless ‘metrics is empty’ log in OpenPAI job’s stdout. Only print useful message once new metrics are recorded, to reduce confusion when user checks OpenPAI trial’s output for debugging purpose + * Add timestamp at the beginning of each log entry in trial keeper. + +Release 0.3.0 - 11/2/2018 +------------------------- + +NNICTL new features and updates +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* + Support running multiple experiments simultaneously. + + Before v0.3, NNI only supports running single experiment once a time. After this release, users are able to run multiple experiments simultaneously. Each experiment will require a unique port, the 1st experiment will be set to the default port as previous versions. You can specify a unique port for the rest experiments as below: + + .. code-block:: bash + + nnictl create --port 8081 --config + +* + Support updating max trial number. + use ``nnictl update --help`` to learn more. Or refer to `NNICTL Spec `__ for the fully usage of NNICTL. + +API new features and updates +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* + :raw-html:`**breaking change**`\ : nn.get_parameters() is refactored to nni.get_next_parameter. All examples of prior releases can not run on v0.3, please clone nni repo to get new examples. If you had applied NNI to your own codes, please update the API accordingly. + +* + New API **nni.get_sequence_id()**. + Each trial job is allocated a unique sequence number, which can be retrieved by nni.get_sequence_id() API. + + .. code-block:: bash + + git clone -b v0.3 https://github.com/microsoft/nni.git + +* + **nni.report_final_result(result)** API supports more data types for result parameter. + + It can be of following types: + + + * int + * float + * A python dict containing 'default' key, the value of 'default' key should be of type int or float. The dict can contain any other key value pairs. + +New tuner support +^^^^^^^^^^^^^^^^^ + + +* **Batch Tuner** which iterates all parameter combination, can be used to submit batch trial jobs. + +New examples +^^^^^^^^^^^^ + + +* + A NNI Docker image for public usage: + + .. code-block:: bash + + docker pull msranni/nni:latest + +* + New trial example: `NNI Sklearn Example `__ + +* New competition example: `Kaggle Competition TGS Salt Example `__ + +Others +^^^^^^ + + +* UI refactoring, refer to `WebUI doc `__ for how to work with the new UI. +* Continuous Integration: NNI had switched to Azure pipelines + +Release 0.2.0 - 9/29/2018 +------------------------- + +Major Features +^^^^^^^^^^^^^^ + + +* Support `OpenPAI `__ Training Platform (See `here `__ for instructions about how to submit NNI job in pai mode) + + * Support training services on pai mode. NNI trials will be scheduled to run on OpenPAI cluster + * NNI trial's output (including logs and model file) will be copied to OpenPAI HDFS for further debugging and checking + +* Support `SMAC `__ tuner (See `here `__ for instructions about how to use SMAC tuner) + + * `SMAC `__ is based on Sequential Model-Based Optimization (SMBO). It adapts the most prominent previously used model class (Gaussian stochastic process models) and introduces the model class of random forests to SMBO to handle categorical parameters. The SMAC supported by NNI is a wrapper on `SMAC3 `__ + +* Support NNI installation on `conda `__ and python virtual environment +* Others + + * Update ga squad example and related documentation + * WebUI UX small enhancement and bug fix + +Release 0.1.0 - 9/10/2018 (initial release) +------------------------------------------- + +Initial release of Neural Network Intelligence (NNI). + +Major Features +^^^^^^^^^^^^^^ + + +* Installation and Deployment + + * Support pip install and source codes install + * Support training services on local mode(including Multi-GPU mode) as well as multi-machines mode + +* Tuners, Assessors and Trial + + * Support AutoML algorithms including: hyperopt_tpe, hyperopt_annealing, hyperopt_random, and evolution_tuner + * Support assessor(early stop) algorithms including: medianstop algorithm + * Provide Python API for user defined tuners and assessors + * Provide Python API for user to wrap trial code as NNI deployable codes + +* Experiments + + * Provide a command line toolkit 'nnictl' for experiments management + * Provide a WebUI for viewing experiments details and managing experiments + +* Continuous Integration + + * Support CI by providing out-of-box integration with `travis-ci `__ on ubuntu + +* Others + + * Support simple GPU job scheduling diff --git a/docs/en_US/Release_v1.0.md b/docs/en_US/Release_v1.0.md new file mode 100644 index 0000000000000000000000000000000000000000..e36ae943a3469ae928b9e13ce27cd62088e7c25e --- /dev/null +++ b/docs/en_US/Release_v1.0.md @@ -0,0 +1,32 @@ + +

+ +

+ + +From September 2018 to September 2019, We are still moving on … + + + **Great news!**  With the tag of **Scalability** and **Ease of Use**, NNI v1.0 is comming. Based on the various types of [Tuning Algorithms](./Tuner/BuiltinTuner.md), NNI has supported the Hyperparameter tuning, Neural Architecture search and Auto-Feature-Engineering, which is an exciting news for algorithmic engineers; besides these, NNI v1.0 has made many improvements in the optimization of tuning algorithm, [WebUI's simplicity and intuition](./Tutorial/WebUI.md) and [Platform diversification](./TrainingService/SupportTrainingService.md). NNI has grown into a more intelligent automated machine learning (AutoML) toolkit. + + + +
+
+
+

+ +

+ +
+
+ +

+ +

+ +               **Step one**: Start with the [Tutorial Doc](./Tutorial/Installation.md), and install NNI v1.0 first.
+               **Step two**: Find a " Hello world example", follow the [Tutorial Doc](./Tutorial/QuickStart.md) and have a Quick Start.
+               **Step three**: Get familiar with the [WebUI Tutorial](./Tutorial/WebUI.md) and let NNI better assists with your tuning tour.
+ +The fully automated tool greatly improves the efficiency of the tuning process. For more detail about the 1.0 updates, you can refer to [Release 1.0](https://github.com/microsoft/nni/releases). More of our advance plan, you can refer to our [Roadmap](https://github.com/microsoft/nni/wiki/Roadmap). Besides, we also welcome more and more contributors to join us, there are many ways to participate, please refer to [How to contribute](./Tutorial/Contributing.md) for more details. \ No newline at end of file diff --git a/docs/en_US/ResearchPublications.rst b/docs/en_US/ResearchPublications.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ca12930e03cc787c52a38502df6560007b58585 --- /dev/null +++ b/docs/en_US/ResearchPublications.rst @@ -0,0 +1,149 @@ +Research and Publications +========================= + +We are intensively working on both tool chain and research to make automatic model design and tuning really practical and powerful. On the one hand, our main work is tool chain oriented development. On the other hand, our research works aim to improve this tool chain, rethink challenging problems in AutoML (on both system and algorithm) and propose elegant solutions. Below we list some of our research works, we encourage more research works on this topic and encourage collaboration with us. + +System Research +--------------- + + +* `Retiarii: A Deep Learning Exploratory-Training Framework `__ + +.. code-block:: bibtex + + @inproceedings{zhang2020retiarii, + title={Retiarii: A Deep Learning Exploratory-Training Framework}, + author={Zhang, Quanlu and Han, Zhenhua and Yang, Fan and Zhang, Yuge and Liu, Zhe and Yang, Mao and Zhou, Lidong}, + booktitle={14th $\{$USENIX$\}$ Symposium on Operating Systems Design and Implementation ($\{$OSDI$\}$ 20)}, + pages={919--936}, + year={2020} + } + + +* `AutoSys: The Design and Operation of Learning-Augmented Systems `__ + +.. code-block:: bibtex + + @inproceedings{liang2020autosys, + title={AutoSys: The Design and Operation of Learning-Augmented Systems}, + author={Liang, Chieh-Jan Mike and Xue, Hui and Yang, Mao and Zhou, Lidong and Zhu, Lifei and Li, Zhao Lucis and Wang, Zibo and Chen, Qi and Zhang, Quanlu and Liu, Chuanjie and others}, + booktitle={2020 $\{$USENIX$\}$ Annual Technical Conference ($\{$USENIX$\}$$\{$ATC$\}$ 20)}, + pages={323--336}, + year={2020} + } + + +* `Gandiva: Introspective Cluster Scheduling for Deep Learning `__ + +.. code-block:: bibtex + + @inproceedings{xiao2018gandiva, + title={Gandiva: Introspective cluster scheduling for deep learning}, + author={Xiao, Wencong and Bhardwaj, Romil and Ramjee, Ramachandran and Sivathanu, Muthian and Kwatra, Nipun and Han, Zhenhua and Patel, Pratyush and Peng, Xuan and Zhao, Hanyu and Zhang, Quanlu and others}, + booktitle={13th $\{$USENIX$\}$ Symposium on Operating Systems Design and Implementation ($\{$OSDI$\}$ 18)}, + pages={595--610}, + year={2018} + } + +Algorithm Research +------------------ + +New Algorithms +^^^^^^^^^^^^^^ + + +* `TextNAS: A Neural Architecture Search Space Tailored for Text Representation `__ + +.. code-block:: bibtex + + @inproceedings{wang2020textnas, + title={TextNAS: A Neural Architecture Search Space Tailored for Text Representation.}, + author={Wang, Yujing and Yang, Yaming and Chen, Yiren and Bai, Jing and Zhang, Ce and Su, Guinan and Kou, Xiaoyu and Tong, Yunhai and Yang, Mao and Zhou, Lidong}, + booktitle={AAAI}, + pages={9242--9249}, + year={2020} + } + + +* `Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search `__ + +.. code-block:: bibtex + + @article{peng2020cream, + title={Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search}, + author={Peng, Houwen and Du, Hao and Yu, Hongyuan and Li, Qi and Liao, Jing and Fu, Jianlong}, + journal={Advances in Neural Information Processing Systems}, + volume={33}, + year={2020} + } + + +* `Metis: Robustly tuning tail latencies of cloud systems `__ + +.. code-block:: bibtex + + @inproceedings{li2018metis, + title={Metis: Robustly tuning tail latencies of cloud systems}, + author={Li, Zhao Lucis and Liang, Chieh-Jan Mike and He, Wenjia and Zhu, Lianjie and Dai, Wenjun and Jiang, Jin and Sun, Guangzhong}, + booktitle={2018 $\{$USENIX$\}$ Annual Technical Conference ($\{$USENIX$\}$$\{$ATC$\}$ 18)}, + pages={981--992}, + year={2018} + } + + +* `OpEvo: An Evolutionary Method for Tensor Operator Optimization `__ + +.. code-block:: bibtex + + @article{Gao2021opevo, + title={OpEvo: An Evolutionary Method for Tensor Operator Optimization}, + volume={35}, + url={https://ojs.aaai.org/index.php/AAAI/article/view/17462}, + number={14}, + journal={Proceedings of the AAAI Conference on Artificial Intelligence}, + author={Gao, Xiaotian and Cui, Wei and Zhang, Lintao and Yang, Mao}, + year={2021}, month={May}, pages={12320-12327} + } + +Measurement and Understanding +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* `Deeper insights into weight sharing in neural architecture search `__ + +.. code-block:: bibtex + + @article{zhang2020deeper, + title={Deeper insights into weight sharing in neural architecture search}, + author={Zhang, Yuge and Lin, Zejun and Jiang, Junyang and Zhang, Quanlu and Wang, Yujing and Xue, Hui and Zhang, Chen and Yang, Yaming}, + journal={arXiv preprint arXiv:2001.01431}, + year={2020} + } + + +* `How Does Supernet Help in Neural Architecture Search? `__ + +.. code-block:: bibtex + + @article{zhang2020does, + title={How Does Supernet Help in Neural Architecture Search?}, + author={Zhang, Yuge and Zhang, Quanlu and Yang, Yaming}, + journal={arXiv preprint arXiv:2010.08219}, + year={2020} + } + +Applications +^^^^^^^^^^^^ + + +* `AutoADR: Automatic Model Design for Ad Relevance `__ + +.. code-block:: bibtex + + @inproceedings{chen2020autoadr, + title={AutoADR: Automatic Model Design for Ad Relevance}, + author={Chen, Yiren and Yang, Yaming and Sun, Hong and Wang, Yujing and Xu, Yu and Shen, Wei and Zhou, Rong and Tong, Yunhai and Bai, Jing and Zhang, Ruofei}, + booktitle={Proceedings of the 29th ACM International Conference on Information \& Knowledge Management}, + pages={2365--2372}, + year={2020} + } diff --git a/docs/en_US/SupportedFramework_Library.rst b/docs/en_US/SupportedFramework_Library.rst new file mode 100644 index 0000000000000000000000000000000000000000..a120573b01febcfb3839b241941a5189518c0416 --- /dev/null +++ b/docs/en_US/SupportedFramework_Library.rst @@ -0,0 +1,59 @@ +.. role:: raw-html(raw) + :format: html + + +Framework and Library Supports +============================== + +With the built-in Python API, NNI naturally supports the hyper parameter tuning and neural network search for all the AI frameworks and libraries who support Python models(\ ``version >= 3.6``\ ). NNI had also provided a set of examples and tutorials for some of the popular scenarios to make jump start easier. + +Supported AI Frameworks +----------------------- + + +* `PyTorch `__ + + * :githublink:`MNIST-pytorch ` + * `CIFAR-10 <./TrialExample/Cifar10Examples.rst>`__ + * :githublink:`TGS salt identification chanllenge ` + * :githublink:`Network_morphism ` + +* `TensorFlow `__ + + * :githublink:`MNIST-tensorflow ` + * :githublink:`Squad ` + +* `Keras `__ + + * :githublink:`MNIST-keras ` + * :githublink:`Network_morphism ` + + +* `MXNet `__ +* `Caffe2 `__ +* `CNTK (Python language) `__ +* `Spark MLlib `__ +* `Chainer `__ +* `Theano `__ + +You are encouraged to `contribute more examples `__ for other NNI users. + +Supported Library +----------------- + +NNI also supports all libraries written in python.Here are some common libraries, including some algorithms based on GBDT: XGBoost, CatBoost and lightGBM. + + +* `Scikit-learn `__ + + * `Scikit-learn `__ + +* `XGBoost `__ +* `CatBoost `__ +* `LightGBM `__ + + * `Auto-gbdt `__ + +Here is just a small list of libraries that supported by NNI. If you are interested in NNI, you can refer to the `tutorial `__ to complete your own hacks. + +In addition to the above examples, we also welcome more and more users to apply NNI to your own work, if you have any doubts, please refer `Write a Trial Run on NNI `__. In particular, if you want to be a contributor of NNI, whether it is the sharing of examples , writing of Tuner or otherwise, we are all looking forward to your participation.More information please refer to `here `__. diff --git a/docs/en_US/TrainingService/AMLMode.rst b/docs/en_US/TrainingService/AMLMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8abf00d3c251a62549455373dbcd92df7bfb277 --- /dev/null +++ b/docs/en_US/TrainingService/AMLMode.rst @@ -0,0 +1,121 @@ +**Run an Experiment on Azure Machine Learning** +=================================================== + +NNI supports running an experiment on `AML `__ , called aml mode. + +Setup environment +----------------- + +Step 1. Install NNI, follow the install guide `here <../Tutorial/QuickStart.rst>`__. + +Step 2. Create an Azure account/subscription using this `link `__. If you already have an Azure account/subscription, skip this step. + +Step 3. Install the Azure CLI on your machine, follow the install guide `here `__. + +Step 4. Authenticate to your Azure subscription from the CLI. To authenticate interactively, open a command line or terminal and use the following command: + +.. code-block:: bash + + az login + +Step 5. Log into your Azure account with a web browser and create a Machine Learning resource. You will need to choose a resource group and specific a workspace name. Then download ``config.json`` which will be used later. + +.. image:: ../../img/aml_workspace.png + :target: ../../img/aml_workspace.png + :alt: + + +Step 6. Create an AML cluster as the computeTarget. + +.. image:: ../../img/aml_cluster.png + :target: ../../img/aml_cluster.png + :alt: + + +Step 7. Open a command line and install AML package environment. + +.. code-block:: bash + + python3 -m pip install azureml + python3 -m pip install azureml-sdk + +Run an experiment +----------------- + +Use ``examples/trials/mnist-pytorch`` as an example. The NNI config YAML file's content is like: + +.. code-block:: yaml + + searchSpaceFile: search_space.json + trialCommand: python3 mnist.py + trialConcurrency: 1 + maxTrialNumber: 10 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + platform: aml + dockerImage: msranni/nni + subscriptionId: ${your subscription ID} + resourceGroup: ${your resource group} + workspaceName: ${your workspace name} + computeTarget: ${your compute target} + +Note: You should set ``platform: aml`` in NNI config YAML file if you want to start experiment in aml mode. + +Compared with `LocalMode `__ training service configuration in aml mode have these additional keys: + + +* dockerImage + + * required key. The docker image name used in job. NNI support image ``msranni/nni`` for running aml jobs. + +.. Note:: This image is build based on cuda environment, may not be suitable for CPU clusters in AML. + +amlConfig: + + +* subscriptionId + + * required key, the subscriptionId of your account + +* resourceGroup + + * required key, the resourceGroup of your account + +* workspaceName + + * required key, the workspaceName of your account + +* computeTarget + + * required key, the compute cluster name you want to use in your AML workspace. `refer `__ See Step 6. + +* maxTrialNumberPerGpu + + * optional key, default 1. Used to specify the max concurrency trial number on a GPU device. + +* useActiveGpu + + * optional key, default false. Used to specify whether to use a GPU if there is another process. By default, NNI will use the GPU only if there is no other active process in the GPU. + +The required information of amlConfig could be found in the downloaded ``config.json`` in Step 5. + +Run the following commands to start the example experiment: + +.. code-block:: bash + + git clone -b ${NNI_VERSION} https://github.com/microsoft/nni + cd nni/examples/trials/mnist-pytorch + + # modify config_aml.yml ... + + nnictl create --config config_aml.yml + +Replace ``${NNI_VERSION}`` with a released version name or branch name, e.g., ``v2.4``. + +Monitor your code in the cloud by using the studio +-------------------------------------------------- + +To monitor your job's code, you need to visit your studio which you create at step 5. Once the job completes, go to the Outputs + logs tab. There you can see a 70_driver_log.txt file, This file contains the standard output from a run and can be useful when you're debugging remote runs in the cloud. Learn more about aml from `here `__. diff --git a/docs/en_US/TrainingService/AdaptDLMode.rst b/docs/en_US/TrainingService/AdaptDLMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7f844a3c7ab6816a04ae4adae7094d9be5c943d --- /dev/null +++ b/docs/en_US/TrainingService/AdaptDLMode.rst @@ -0,0 +1,200 @@ +Run an Experiment on AdaptDL +============================ + +Now NNI supports running experiment on `AdaptDL `__. Before starting to use NNI AdaptDL mode, you should have a Kubernetes cluster, either on-premises or `Azure Kubernetes Service(AKS) `__\ , a Ubuntu machine on which `kubeconfig `__ is setup to connect to your Kubernetes cluster. In AdaptDL mode, your trial program will run as AdaptDL job in Kubernetes cluster. + +AdaptDL aims to make distributed deep learning easy and efficient in dynamic-resource environments such as shared clusters and the cloud. + +Prerequisite for Kubernetes Service +----------------------------------- + + +#. A **Kubernetes** cluster using Kubernetes 1.14 or later with storage. Follow this guideline to set up Kubernetes `on Azure `__\ , or `on-premise `__ with `cephfs `__\ , or `microk8s with storage add-on enabled `__. +#. Helm install **AdaptDL Scheduler** to your Kubernetes cluster. Follow this `guideline `__ to setup AdaptDL scheduler. +#. Prepare a **kubeconfig** file, which will be used by NNI to interact with your Kubernetes API server. By default, NNI manager will use ``$(HOME)/.kube/config`` as kubeconfig file's path. You can also specify other kubeconfig files by setting the ** KUBECONFIG** environment variable. Refer this `guideline `__ to learn more about kubeconfig. +#. If your NNI trial job needs GPU resource, you should follow this `guideline `__ to configure **Nvidia device plugin for Kubernetes**. +#. (Optional) Prepare a **NFS server** and export a general purpose mount as external storage. +#. Install **NNI**\ , follow the install guide `here <../Tutorial/QuickStart.rst>`__. + +Verify Prerequisites +^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + nnictl --version + # Expected: + +.. code-block:: bash + + kubectl version + # Expected that the kubectl client version matches the server version. + +.. code-block:: bash + + kubectl api-versions | grep adaptdl + # Expected: adaptdl.petuum.com/v1 + +Run an experiment +----------------- + +We have a CIFAR10 example that fully leverages the AdaptDL scheduler under ``examples/trials/cifar10_pytorch`` folder. (\ ``main_adl.py`` and ``config_adl.yaml``\ ) + +Here is a template configuration specification to use AdaptDL as a training service. + +.. code-block:: yaml + + authorName: default + experimentName: minimal_adl + + trainingServicePlatform: adl + nniManagerIp: 10.1.10.11 + logCollection: http + + tuner: + builtinTunerName: GridSearch + searchSpacePath: search_space.json + + trialConcurrency: 2 + maxTrialNum: 2 + + trial: + adaptive: false # optional. + image: + imagePullSecrets: # optional + - name: stagingsecret + codeDir: . + command: python main.py + gpuNum: 1 + cpuNum: 1 # optional + memorySize: 8Gi # optional + nfs: # optional + server: 10.20.41.55 + path: / + containerMountPath: /nfs + checkpoint: # optional + storageClass: dfs + storageSize: 1Gi + +Those configs not mentioned below, are following the +`default specs defined `__ in the NNI doc. + + +* **trainingServicePlatform**\ : Choose ``adl`` to use the Kubernetes cluster with AdaptDL scheduler. +* **nniManagerIp**\ : *Required* to get the correct info and metrics back from the cluster, for ``adl`` training service. + IP address of the machine with NNI manager (NNICTL) that launches NNI experiment. +* **logCollection**\ : *Recommended* to set as ``http``. It will collect the trial logs on cluster back to your machine via http. +* **tuner**\ : It supports the Tuun tuner and all NNI built-in tuners (only except for the checkpoint feature of the NNI PBT tuners). +* **trial**\ : It defines the specs of an ``adl`` trial. + + * **namespace**\: (*Optional*\ ) Kubernetes namespace to launch the trials. Default to ``default`` namespace. + * **adaptive**\ : (*Optional*\ ) Boolean for AdaptDL trainer. While ``true``\ , it the job is preemptible and adaptive. + * **image**\ : Docker image for the trial + * **imagePullSecret**\ : (*Optional*\ ) If you are using a private registry, + you need to provide the secret to successfully pull the image. + * **codeDir**\ : the working directory of the container. ``.`` means the default working directory defined by the image. + * **command**\ : the bash command to start the trial + * **gpuNum**\ : the number of GPUs requested for this trial. It must be non-negative integer. + * **cpuNum**\ : (*Optional*\ ) the number of CPUs requested for this trial. It must be non-negative integer. + * **memorySize**\ : (*Optional*\ ) the size of memory requested for this trial. It must follow the Kubernetes + `default format `__. + * **nfs**\ : (*Optional*\ ) mounting external storage. For more information about using NFS please check the below paragraph. + * **checkpoint** (*Optional*\ ) storage settings for model checkpoints. + + * **storageClass**\ : check `Kubernetes storage documentation `__ for how to use the appropriate ``storageClass``. + * **storageSize**\ : this value should be large enough to fit your model's checkpoints, or it could cause "disk quota exceeded" error. + +NFS Storage +^^^^^^^^^^^ + +As you may have noticed in the above configuration spec, +an *optional* section is available to configure NFS external storage. It is optional when no external storage is required, when for example an docker image is sufficient with codes and data inside. + +Note that ``adl`` training service does NOT help mount an NFS to the local dev machine, so that one can manually mount it to local, manage the filesystem, copy the data or code etc. +The ``adl`` training service can then mount it to the kubernetes for every trials, with the proper configurations: + + +* **server**\ : NFS server address, e.g. IP address or domain +* **path**\ : NFS server export path, i.e. the absolute path in NFS that can be mounted to trials +* **containerMountPath**\ : In container absolute path to mount the NFS **path** above, + so that every trial will have the access to the NFS. + In the trial containers, you can access the NFS with this path. + +Use cases: + + +* If your training trials depend on a dataset of large size, you may want to download it first onto the NFS first, + and mount it so that it can be shared across multiple trials. +* The storage for containers are ephemeral and the trial containers will be deleted after a trial's lifecycle is over. + So if you want to export your trained models, + you may mount the NFS to the trial to persist and export your trained models. + +In short, it is not limited how a trial wants to read from or write on the NFS storage, so you may use it flexibly as per your needs. + +Monitor via Log Stream +---------------------- + +Follow the log streaming of a certain trial: + +.. code-block:: bash + + nnictl log trial --trial_id= + +.. code-block:: bash + + nnictl log trial --trial_id= + +Note that *after* a trial has done and its pod has been deleted, +no logs can be retrieved then via this command. +However you may still be able to access the past trial logs +according to the following approach. + +Monitor via TensorBoard +----------------------- + +In the context of NNI, an experiment has multiple trials. +For easy comparison across trials for a model tuning process, +we support TensorBoard integration. Here one experiment has +an independent TensorBoard logging directory thus dashboard. + +You can only use the TensorBoard while the monitored experiment is running. +In other words, it is not supported to monitor stopped experiments. + +In the trial container you may have access to two environment variables: + + +* ``ADAPTDL_TENSORBOARD_LOGDIR``\ : the TensorBoard logging directory for the current experiment, +* ``NNI_TRIAL_JOB_ID``\ : the ``trial`` job id for the current trial. + +It is recommended for to have them joined as the directory for trial, +for example in Python: + +.. code-block:: python + + import os + tensorboard_logdir = os.path.join( + os.getenv("ADAPTDL_TENSORBOARD_LOGDIR"), + os.getenv("NNI_TRIAL_JOB_ID") + ) + +If an experiment is stopped, the data logged here +(defined by *the above envs* for monitoring with the following commands) +will be lost. To persist the logged data, you can use the external storage (e.g. to mount an NFS) +to export it and view the TensorBoard locally. + +With the above setting, you can monitor the experiment easily +via TensorBoard by + +.. code-block:: bash + + nnictl tensorboard start + +If having multiple experiment running at the same time, you may use + +.. code-block:: bash + + nnictl tensorboard start + +It will provide you the web url to access the tensorboard. + +Note that you have the flexibility to set up the local ``--port`` +for the TensorBoard. diff --git a/docs/en_US/TrainingService/DLCMode.rst b/docs/en_US/TrainingService/DLCMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..9dc9e3443c63f7b88877c7eb1469aa830ccd8099 --- /dev/null +++ b/docs/en_US/TrainingService/DLCMode.rst @@ -0,0 +1,83 @@ +**Run an Experiment on Aliyun PAI-DSW + PAI-DLC** +=================================================== + +NNI supports running an experiment on `PAI-DSW `__ , submit trials to `PAI-DLC `__ called dlc mode. + +PAI-DSW server performs the role to submit a job while PAI-DLC is where the training job runs. + +Setup environment +----------------- + +Step 1. Install NNI, follow the install guide `here <../Tutorial/QuickStart.rst>`__. + +Step 2. Create PAI-DSW server following this `link `__. Note as the training service will be run on PAI-DLC, it won't cost many resources to run and you may just need a PAI-DSW server with CPU. + +Step 3. Open PAI-DLC `here `__, select the same region as your PAI-DSW server. Move to ``dataset configuration`` and mount the same NAS disk as the PAI-DSW server does. (Note currently only PAI-DLC public-cluster is supported.) + +Step 4. Open your PAI-DSW server command line, download and install PAI-DLC python SDK to submit DLC tasks, refer to `this link `__. Skip this step if SDK is already installed. + + +.. code-block:: bash + + wget https://sdk-portal-cluster-prod.oss-cn-zhangjiakou.aliyuncs.com/downloads/u-3536038a-3de7-4f2e-9379-0cb309d29355-python-pai-dlc.zip + unzip u-3536038a-3de7-4f2e-9379-0cb309d29355-python-pai-dlc.zip + pip install ./pai-dlc-20201203 # pai-dlc-20201203 refer to unzipped sdk file name, replace it accordingly. + + +Run an experiment +----------------- + +Use ``examples/trials/mnist-pytorch`` as an example. The NNI config YAML file's content is like: + +.. code-block:: yaml + + # working directory on DSW, please provie FULL path + experimentWorkingDirectory: /home/admin/workspace/{your_working_dir} + searchSpaceFile: search_space.json + # the command on trial runner(or, DLC container), be aware of data_dir + trialCommand: python mnist.py --data_dir /root/data/{your_data_dir} + trialConcurrency: 1 # NOTE: please provide number <= 3 due to DLC system limit. + maxTrialNumber: 10 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + # ref: https://help.aliyun.com/document_detail/203290.html?spm=a2c4g.11186623.6.727.6f9b5db6bzJh4x + trainingService: + platform: dlc + type: Worker + image: registry-vpc.cn-beijing.aliyuncs.com/pai-dlc/pytorch-training:1.6.0-gpu-py37-cu101-ubuntu18.04 + jobType: PyTorchJob # choices: [TFJob, PyTorchJob] + podCount: 1 + ecsSpec: ecs.c6.large + region: cn-hangzhou + nasDataSourceId: ${your_nas_data_source_id} + accessKeyId: ${your_ak_id} + accessKeySecret: ${your_ak_key} + nasDataSourceId: ${your_nas_data_source_id} # NAS datasource ID,e.g., datat56by9n1xt0a + localStorageMountPoint: /home/admin/workspace/ # default NAS path on DSW + containerStorageMountPoint: /root/data/ # default NAS path on DLC container, change it according your setting + +Note: You should set ``platform: dlc`` in NNI config YAML file if you want to start experiment in dlc mode. + +Compared with `LocalMode `__ training service configuration in dlc mode have these additional keys like ``type/image/jobType/podCount/ecsSpec/region/nasDataSourceId/accessKeyId/accessKeySecret``, for detailed explanation ref to this `link `__. + +Also, as dlc mode requires DSW/DLC to mount the same NAS disk to share information, there are two extra keys related to this: ``localStorageMountPoint`` and ``containerStorageMountPoint``. + +Run the following commands to start the example experiment: + +.. code-block:: bash + + git clone -b ${NNI_VERSION} https://github.com/microsoft/nni + cd nni/examples/trials/mnist-pytorch + + # modify config_dlc.yml ... + + nnictl create --config config_dlc.yml + +Replace ``${NNI_VERSION}`` with a released version name or branch name, e.g., ``v2.3``. + +Monitor your job +---------------- + +To monitor your job on DLC, you need to visit `DLC `__ to check job status. diff --git a/docs/en_US/TrainingService/DLTSMode.rst b/docs/en_US/TrainingService/DLTSMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..87be8befb430def7626628e6b4b4a45abae96637 --- /dev/null +++ b/docs/en_US/TrainingService/DLTSMode.rst @@ -0,0 +1,67 @@ +**Run an Experiment on DLTS** +================================= + +NNI supports running an experiment on `DLTS `__\ , called dlts mode. Before starting to use NNI dlts mode, you should have an account to access DLTS dashboard. + +Setup Environment +----------------- + +Step 1. Choose a cluster from DLTS dashboard, ask administrator for the cluster dashboard URL. + + +.. image:: ../../img/dlts-step1.png + :target: ../../img/dlts-step1.png + :alt: Choose Cluster + + +Step 2. Prepare a NNI config YAML like the following: + +.. code-block:: yaml + + # Set this field to "dlts" + trainingServicePlatform: dlts + authorName: your_name + experimentName: auto_mnist + trialConcurrency: 2 + maxExecDuration: 3h + maxTrialNum: 100 + searchSpacePath: search_space.json + useAnnotation: false + tuner: + builtinTunerName: TPE + classArgs: + optimize_mode: maximize + trial: + command: python3 mnist.py + codeDir: . + gpuNum: 1 + image: msranni/nni + # Configuration to access DLTS + dltsConfig: + dashboard: # Ask administrator for the cluster dashboard URL + +Remember to fill the cluster dashboard URL to the last line. + +Step 3. Open your working directory of the cluster, paste the NNI config as well as related code to a directory. + + +.. image:: ../../img/dlts-step3.png + :target: ../../img/dlts-step3.png + :alt: Copy Config + + +Step 4. Submit a NNI manager job to the specified cluster. + + +.. image:: ../../img/dlts-step4.png + :target: ../../img/dlts-step4.png + :alt: Submit Job + + +Step 5. Go to Endpoints tab of the newly created job, click the Port 40000 link to check trial's information. + + +.. image:: ../../img/dlts-step5.png + :target: ../../img/dlts-step5.png + :alt: View NNI WebUI + diff --git a/docs/en_US/TrainingService/FrameworkControllerMode.rst b/docs/en_US/TrainingService/FrameworkControllerMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4b652f7fa675ab2283e9e943d00076278bbefbc --- /dev/null +++ b/docs/en_US/TrainingService/FrameworkControllerMode.rst @@ -0,0 +1,216 @@ +Run an Experiment on FrameworkController +======================================== + +NNI supports running experiment using `FrameworkController `__\ , called frameworkcontroller mode. FrameworkController is built to orchestrate all kinds of applications on Kubernetes, you don't need to install Kubeflow for specific deep learning framework like tf-operator or pytorch-operator. Now you can use FrameworkController as the training service to run NNI experiment. + +Prerequisite for on-premises Kubernetes Service +----------------------------------------------- + + +#. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this `guideline `__ to set up Kubernetes +#. Prepare a **kubeconfig** file, which will be used by NNI to interact with your Kubernetes API server. By default, NNI manager will use $(HOME)/.kube/config as kubeconfig file's path. You can also specify other kubeconfig files by setting the**KUBECONFIG** environment variable. Refer this `guideline `__ to learn more about kubeconfig. +#. If your NNI trial job needs GPU resource, you should follow this `guideline `__ to configure **Nvidia device plugin for Kubernetes**. +#. Prepare a **NFS server** and export a general purpose mount (we recommend to map your NFS server path in ``root_squash option``\ , otherwise permission issue may raise when NNI copies files to NFS. Refer this `page `__ to learn what root_squash option is), or **Azure File Storage**. +#. Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + +.. code-block:: bash + + apt-get install nfs-common + +7. Install **NNI**\ , follow the install guide `here <../Tutorial/QuickStart.rst>`__. + +Prerequisite for Azure Kubernetes Service +----------------------------------------- + + +#. NNI support Kubeflow based on Azure Kubernetes Service, follow the `guideline `__ to set up Azure Kubernetes Service. +#. Install `Azure CLI `__ and **kubectl**. Use ``az login`` to set azure account, and connect kubectl client to AKS, refer this `guideline `__. +#. Follow the `guideline `__ to create azure file storage account. If you use Azure Kubernetes Service, NNI need Azure Storage Service to store code files and the output files. +#. To access Azure storage service, NNI need the access key of the storage account, and NNI uses `Azure Key Vault `__ Service to protect your private key. Set up Azure Key Vault Service, add a secret to Key Vault to store the access key of Azure storage account. Follow this `guideline `__ to store the access key. + + +Prerequisite for PVC storage mode +----------------------------------------- +In order to use persistent volume claims instead of NFS or Azure storage, related storage must +be created manually, in the namespace your trials will run later. This restriction is due to the +fact, that persistent volume claims are hard to recycle and thus can quickly mess with a cluster's +storage management. Persistent volume claims can be created by e.g. using kubectl. Please refer +to the official Kubernetes documentation for `further information `__. + + +Setup FrameworkController +------------------------- + +Follow the `guideline `__ to set up FrameworkController in the Kubernetes cluster, NNI supports FrameworkController by the stateful set mode. If your cluster enforces authorization, you need to create a service account with granted permission for FrameworkController, and then pass the name of the FrameworkController service account to the NNI Experiment Config. `refer `__. +If the k8s cluster enforces Authorization, you also need to create a ServiceAccount with granted permission for FrameworkController, `refer `__. + +Design +------ + +Please refer the design of `Kubeflow training service `__\ , FrameworkController training service pipeline is similar. + +Example +------- + +The FrameworkController config file format is: + +.. code-block:: yaml + + authorName: default + experimentName: example_mnist + trialConcurrency: 1 + maxExecDuration: 10h + maxTrialNum: 100 + #choice: local, remote, pai, kubeflow, frameworkcontroller + trainingServicePlatform: frameworkcontroller + searchSpacePath: ~/nni/examples/trials/mnist-tfv1/search_space.json + #choice: true, false + useAnnotation: false + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + trial: + codeDir: ~/nni/examples/trials/mnist-tfv1 + taskRoles: + - name: worker + taskNum: 1 + command: python3 mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceededTaskCount: 1 + frameworkcontrollerConfig: + storage: nfs + nfs: + server: {your_nfs_server} + path: {your_nfs_server_exported_path} + +If you use Azure Kubernetes Service, you should set ``frameworkcontrollerConfig`` in your config YAML file as follows: + +.. code-block:: yaml + + frameworkcontrollerConfig: + storage: azureStorage + serviceAccountName: {your_frameworkcontroller_service_account_name} + keyVault: + vaultName: {your_vault_name} + name: {your_secert_name} + azureStorage: + accountName: {your_storage_account_name} + azureShare: {your_azure_share_name} + +If you set `ServiceAccount `__ in your k8s, please set ``serviceAccountName`` in your config file: +For example: + +.. code-block:: yaml + + frameworkcontrollerConfig: + serviceAccountName: frameworkcontroller + +Note: You should explicitly set ``trainingServicePlatform: frameworkcontroller`` in NNI config YAML file if you want to start experiment in frameworkcontrollerConfig mode. + +The trial's config format for NNI frameworkcontroller mode is a simple version of FrameworkController's official config, you could refer the `Tensorflow example of FrameworkController `__ for deep understanding. + +Trial configuration in frameworkcontroller mode have the following configuration keys: + + +* taskRoles: you could set multiple task roles in config file, and each task role is a basic unit to process in Kubernetes cluster. + + * name: the name of task role specified, like "worker", "ps", "master". + * taskNum: the replica number of the task role. + * command: the users' command to be used in the container. + * gpuNum: the number of gpu device used in container. + * cpuNum: the number of cpu device used in container. + * memoryMB: the memory limitaion to be specified in container. + * image: the docker image used to create pod and run the program. + * frameworkAttemptCompletionPolicy: the policy to run framework, please refer the `user-manual `__ to get the specific information. Users could use the policy to control the pod, for example, if ps does not stop, only worker stops, The completion policy could helps stop ps. + +NNI also offers the possibility to include a customized frameworkcontroller template similar +to the aforementioned tensorflow example. A valid configuration the may look like: + +.. code-block:: yaml + + experimentName: example_mnist_pytorch + trialConcurrency: 1 + maxExecDuration: 1h + maxTrialNum: 2 + logLevel: trace + trainingServicePlatform: frameworkcontroller + searchSpacePath: search_space.json + tuner: + builtinTunerName: TPE + classArgs: + optimize_mode: maximize + assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + trial: + codeDir: . + frameworkcontrollerConfig: + configPath: fc_template.yml + storage: pvc + namespace: twin-pipelines + pvc: + path: /mnt/data + +Note that in this example a persistent volume claim has been used, that must be created manually in the specified namespace beforehand. Stick to the mnist-pytorch example (:githublink: ``__) for a more detailed config (:githublink: ``__) and frameworkcontroller template (:githublink: ``__). + +How to run example +------------------ + +After you prepare a config file, you could run your experiment by nnictl. The way to start an experiment on FrameworkController is similar to Kubeflow, please refer the `document `__ for more information. + +version check +------------- + +NNI support version check feature in since version 0.6, `refer `__ + + +FrameworkController reuse mode +------------------------------ +NNI support setting reuse mode for trial jobs. In reuse mode, NNI will submit a long-running trial runner process to occupy the container, and start trial jobs as the subprocess of the trial runner process, it means k8s do not need to schedule new container again, it just reuse old container. +Currently, frameworkcontroller reuse mode only support V2 config. +Here is the example: + +.. code-block:: yaml + + searchSpaceFile: search_space.json + trialCommand: python3 mnist.py + trialGpuNumber: 0 + trialConcurrency: 4 + maxTrialNumber: 20 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + reuseMode: true + platform: frameworkcontroller + taskRoles: + - name: + dockerImage: 'msranni/nni:latest' + taskNumber: 1 + command: + gpuNumber: + cpuNumber: + memorySize: + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceedTaskCount: 1 + storage: + storageType: azureStorage + azureAccount: {your_account} + azureShare: {your_share} + keyVaultName: {your_valut_name} + keyVaultKey: {your_valut_key} diff --git a/docs/en_US/TrainingService/HowToImplementTrainingService.rst b/docs/en_US/TrainingService/HowToImplementTrainingService.rst new file mode 100644 index 0000000000000000000000000000000000000000..502141002f22cf4a03f5a9c421cfca7082455482 --- /dev/null +++ b/docs/en_US/TrainingService/HowToImplementTrainingService.rst @@ -0,0 +1,190 @@ +How to Implement Training Service in NNI +======================================== + +Overview +-------- + +TrainingService is a module related to platform management and job schedule in NNI. TrainingService is designed to be easily implemented, we define an abstract class TrainingService as the parent class of all kinds of TrainingService, users just need to inherit the parent class and complete their own child class if they want to implement customized TrainingService. + +System architecture +------------------- + + +.. image:: ../../img/NNIDesign.jpg + :target: ../../img/NNIDesign.jpg + :alt: + + +The brief system architecture of NNI is shown in the picture. NNIManager is the core management module of system, in charge of calling TrainingService to manage trial jobs and the communication between different modules. Dispatcher is a message processing center responsible for message dispatch. TrainingService is a module to manage trial jobs, it communicates with nniManager module, and has different instance according to different training platform. For the time being, NNI supports `local platfrom `__\ , `remote platfrom `__\ , `PAI platfrom `__\ , `kubeflow platform `__ and `FrameworkController platfrom `__. + +In this document, we introduce the brief design of TrainingService. If users want to add a new TrainingService instance, they just need to complete a child class to implement TrainingService, don't need to understand the code detail of NNIManager, Dispatcher or other modules. + +Folder structure of code +------------------------ + +NNI's folder structure is shown below: + +.. code-block:: bash + + nni + |- deployment + |- docs + |- examaples + |- src + | |- nni_manager + | | |- common + | | |- config + | | |- core + | | |- coverage + | | |- dist + | | |- rest_server + | | |- training_service + | | | |- common + | | | |- kubernetes + | | | |- local + | | | |- pai + | | | |- remote_machine + | | | |- test + | |- sdk + | |- webui + |- test + |- tools + | |-nni_annotation + | |-nni_cmd + | |-nni_gpu_tool + | |-nni_trial_tool + +``nni/src/`` folder stores the most source code of NNI. The code in this folder is related to NNIManager, TrainingService, SDK, WebUI and other modules. Users could find the abstract class of TrainingService in ``nni/src/nni_manager/common/trainingService.ts`` file, and they should put their own implemented TrainingService in ``nni/src/nni_manager/training_service`` folder. If users have implemented their own TrainingService code, they should also supplement the unit test of the code, and place them in ``nni/src/nni_manager/training_service/test`` folder. + +Function annotation of TrainingService +-------------------------------------- + +.. code-block:: bash + + abstract class TrainingService { + public abstract listTrialJobs(): Promise; + public abstract getTrialJob(trialJobId: string): Promise; + public abstract addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void; + public abstract removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void; + public abstract submitTrialJob(form: JobApplicationForm): Promise; + public abstract updateTrialJob(trialJobId: string, form: JobApplicationForm): Promise; + public abstract get isMultiPhaseJobSupported(): boolean; + public abstract cancelTrialJob(trialJobId: string, isEarlyStopped?: boolean): Promise; + public abstract setClusterMetadata(key: string, value: string): Promise; + public abstract getClusterMetadata(key: string): Promise; + public abstract cleanUp(): Promise; + public abstract run(): Promise; + } + +The parent class of TrainingService has a few abstract functions, users need to inherit the parent class and implement all of these abstract functions. + +**setClusterMetadata(key: string, value: string)** + +ClusterMetadata is the data related to platform details, for examples, the ClusterMetadata defined in remote machine server is: + +.. code-block:: bash + + export class RemoteMachineMeta { + public readonly ip : string; + public readonly port : number; + public readonly username : string; + public readonly passwd?: string; + public readonly sshKeyPath?: string; + public readonly passphrase?: string; + public gpuSummary : GPUSummary | undefined; + /* GPU Reservation info, the key is GPU index, the value is the job id which reserves this GPU*/ + public gpuReservation : Map; + + constructor(ip : string, port : number, username : string, passwd : string, + sshKeyPath : string, passphrase : string) { + this.ip = ip; + this.port = port; + this.username = username; + this.passwd = passwd; + this.sshKeyPath = sshKeyPath; + this.passphrase = passphrase; + this.gpuReservation = new Map(); + } + } + +The metadata includes the host address, the username or other configuration related to the platform. Users need to define their own metadata format, and set the metadata instance in this function. This function is called before the experiment is started to set the configuration of remote machines. + +**getClusterMetadata(key: string)** + +This function will return the metadata value according to the values, it could be left empty if users don't need to use it. + +**submitTrialJob(form: JobApplicationForm)** + +SubmitTrialJob is a function to submit new trial jobs, users should generate a job instance in TrialJobDetail type. TrialJobDetail is defined as follow: + +.. code-block:: bash + + interface TrialJobDetail { + readonly id: string; + readonly status: TrialJobStatus; + readonly submitTime: number; + readonly startTime?: number; + readonly endTime?: number; + readonly tags?: string[]; + readonly url?: string; + readonly workingDirectory: string; + readonly form: JobApplicationForm; + readonly sequenceId: number; + isEarlyStopped?: boolean; + } + +According to different kinds of implementation, users could put the job detail into a job queue, and keep fetching the job from the queue and start preparing and running them. Or they could finish preparing and running process in this function, and return job detail after the submit work. + +**cancelTrialJob(trialJobId: string, isEarlyStopped?: boolean)** + +If this function is called, the trial started by the platform should be canceled. Different kind of platform has diffenent methods to calcel a running job, this function should be implemented according to specific platform. + +**updateTrialJob(trialJobId: string, form: JobApplicationForm)** + +This function is called to update the trial job's status, trial job's status should be detected according to different platform, and be updated to ``RUNNING``\ , ``SUCCEED``\ , ``FAILED`` etc. + +**getTrialJob(trialJobId: string)** + +This function returns a trialJob detail instance according to trialJobId. + +**listTrialJobs()** + +Users should put all of trial job detail information into a list, and return the list. + +**addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void)** + +NNI will hold an EventEmitter to get job metrics, if there is new job metrics detected, the EventEmitter will be triggered. Users should start the EventEmitter in this function. + +**removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void)** + +Close the EventEmitter. + +**run()** + +The run() function is a main loop function in TrainingService, users could set a while loop to execute their logic code, and finish executing them when the experiment is stopped. + +**cleanUp()** + +This function is called to clean up the environment when a experiment is stopped. Users should do the platform-related cleaning operation in this function. + +TrialKeeper tool +---------------- + +NNI offers a TrialKeeper tool to help maintaining trial jobs. Users can find the source code in ``nni/tools/nni_trial_tool``. If users want to run trial jobs in cloud platform, this tool will be a fine choice to help keeping trial running in the platform. + +The running architecture of TrialKeeper is show as follow: + + +.. image:: ../../img/trialkeeper.jpg + :target: ../../img/trialkeeper.jpg + :alt: + + +When users submit a trial job to cloud platform, they should wrap their trial command into TrialKeeper, and start a TrialKeeper process in cloud platform. Notice that TrialKeeper use restful server to communicate with TrainingService, users should start a restful server in local machine to receive metrics sent from TrialKeeper. The source code about restful server could be found in ``nni/src/nni_manager/training_service/common/clusterJobRestServer.ts``. + +Reference +--------- + +For more information about how to debug, please `refer <../Tutorial/HowToDebug.rst>`__. + +The guideline of how to contribute, please `refer <../Tutorial/Contributing.rst>`__. diff --git a/docs/en_US/TrainingService/HybridMode.rst b/docs/en_US/TrainingService/HybridMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..14840f0a9cce093ecb1ee488498de19dfc8452ea --- /dev/null +++ b/docs/en_US/TrainingService/HybridMode.rst @@ -0,0 +1,39 @@ +**Run an Experiment on Hybrid Mode** +=========================================== + +Run NNI on hybrid mode means that NNI will run trials jobs in multiple kinds of training platforms. For example, NNI could submit trial jobs to remote machine and AML simultaneously. + +Setup environment +----------------- + +NNI has supported `local <./LocalMode.rst>`__\ , `remote <./RemoteMachineMode.rst>`__\ , `PAI <./PaiMode.rst>`__\ , `AML <./AMLMode.rst>`__, `Kubeflow <./KubeflowMode.rst>`__\ , `FrameworkController <./FrameworkControllerMode.rst>`__\ ,for hybrid training service. Before starting an experiment using these mode, users should setup the corresponding environment for the platforms. More details about the environment setup could be found in the corresponding docs. + +Run an experiment +----------------- + +Use ``examples/trials/mnist-tfv1`` as an example. The NNI config YAML file's content is like: + +.. code-block:: yaml + + experimentName: MNIST + searchSpaceFile: search_space.json + trialCommand: python3 mnist.py + trialCodeDirectory: . + trialConcurrency: 2 + trialGpuNumber: 0 + maxExperimentDuration: 24h + maxTrialNumber: 100 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + - platform: remote + machineList: + - host: 127.0.0.1 + user: bob + password: bob + - platform: local + +To use hybrid training services, users should set training service configurations as a list in `trainingService` field. +Currently, hybrid support setting `local`, `remote`, `pai`, `aml`, `kubeflow` and `frameworkcontroller` training services. diff --git a/docs/en_US/TrainingService/KubeflowMode.rst b/docs/en_US/TrainingService/KubeflowMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb8a31db43f6e07e435154c74f436a06b244b6d5 --- /dev/null +++ b/docs/en_US/TrainingService/KubeflowMode.rst @@ -0,0 +1,293 @@ +Run an Experiment on Kubeflow +============================= + +Now NNI supports running experiment on `Kubeflow `__\ , called kubeflow mode. Before starting to use NNI kubeflow mode, you should have a Kubernetes cluster, either on-premises or `Azure Kubernetes Service(AKS) `__\ , a Ubuntu machine on which `kubeconfig `__ is setup to connect to your Kubernetes cluster. If you are not familiar with Kubernetes, `here `__ is a good start. In kubeflow mode, your trial program will run as Kubeflow job in Kubernetes cluster. + +Prerequisite for on-premises Kubernetes Service +----------------------------------------------- + + +#. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this `guideline `__ to set up Kubernetes +#. Download, set up, and deploy **Kubeflow** to your Kubernetes cluster. Follow this `guideline `__ to setup Kubeflow. +#. Prepare a **kubeconfig** file, which will be used by NNI to interact with your Kubernetes API server. By default, NNI manager will use ``$(HOME)/.kube/config`` as kubeconfig file's path. You can also specify other kubeconfig files by setting the **KUBECONFIG** environment variable. Refer this `guideline `__ to learn more about kubeconfig. +#. If your NNI trial job needs GPU resource, you should follow this `guideline `__ to configure **Nvidia device plugin for Kubernetes**. +#. Prepare a **NFS server** and export a general purpose mount (we recommend to map your NFS server path in ``root_squash option``\ , otherwise permission issue may raise when NNI copy files to NFS. Refer this `page `__ to learn what root_squash option is), or **Azure File Storage**. +#. Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + +.. code-block:: bash + + apt-get install nfs-common + +7. Install **NNI**\ , follow the install guide `here <../Tutorial/QuickStart.rst>`__. + +Prerequisite for Azure Kubernetes Service +----------------------------------------- + + +#. NNI support Kubeflow based on Azure Kubernetes Service, follow the `guideline `__ to set up Azure Kubernetes Service. +#. Install `Azure CLI `__ and **kubectl**. Use ``az login`` to set azure account, and connect kubectl client to AKS, refer this `guideline `__. +#. Deploy Kubeflow on Azure Kubernetes Service, follow the `guideline `__. +#. Follow the `guideline `__ to create azure file storage account. If you use Azure Kubernetes Service, NNI need Azure Storage Service to store code files and the output files. +#. To access Azure storage service, NNI need the access key of the storage account, and NNI use `Azure Key Vault `__ Service to protect your private key. Set up Azure Key Vault Service, add a secret to Key Vault to store the access key of Azure storage account. Follow this `guideline `__ to store the access key. + +Design +------ + + +.. image:: ../../img/kubeflow_training_design.png + :target: ../../img/kubeflow_training_design.png + :alt: + +Kubeflow training service instantiates a Kubernetes rest client to interact with your K8s cluster's API server. + +For each trial, we will upload all the files in your local codeDir path (configured in nni_config.yml) together with NNI generated files like parameter.cfg into a storage volumn. Right now we support two kinds of storage volumes: `nfs `__ and `azure file storage `__\ , you should configure the storage volumn in NNI config YAML file. After files are prepared, Kubeflow training service will call K8S rest API to create Kubeflow jobs (\ `tf-operator `__ job or `pytorch-operator `__ job) in K8S, and mount your storage volume into the job's pod. Output files of Kubeflow job, like stdout, stderr, trial.log or model files, will also be copied back to the storage volumn. NNI will show the storage volumn's URL for each trial in WebUI, to allow user browse the log files and job's output files. + +Supported operator +------------------ + +NNI only support tf-operator and pytorch-operator of Kubeflow, other operators is not tested. +Users could set operator type in config file. +The setting of tf-operator: + +.. code-block:: yaml + + kubeflowConfig: + operator: tf-operator + +The setting of pytorch-operator: + +.. code-block:: yaml + + kubeflowConfig: + operator: pytorch-operator + +If users want to use tf-operator, he could set ``ps`` and ``worker`` in trial config. If users want to use pytorch-operator, he could set ``master`` and ``worker`` in trial config. + +Supported storage type +---------------------- + +NNI support NFS and Azure Storage to store the code and output files, users could set storage type in config file and set the corresponding config. + +The setting for NFS storage are as follows: + +.. code-block:: yaml + + kubeflowConfig: + storage: nfs + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} + +If you use Azure storage, you should set ``kubeflowConfig`` in your config YAML file as follows: + +.. code-block:: yaml + + kubeflowConfig: + storage: azureStorage + keyVault: + vaultName: {your_vault_name} + name: {your_secert_name} + azureStorage: + accountName: {your_storage_account_name} + azureShare: {your_azure_share_name} + +Run an experiment +----------------- + +Use ``examples/trials/mnist-tfv1`` as an example. This is a tensorflow job, and use tf-operator of Kubeflow. The NNI config YAML file's content is like: + +.. code-block:: yaml + + authorName: default + experimentName: example_mnist + trialConcurrency: 2 + maxExecDuration: 1h + maxTrialNum: 20 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: kubeflow + searchSpacePath: search_space.json + #choice: true, false + useAnnotation: false + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + trial: + codeDir: . + worker: + replicas: 2 + command: python3 dist_mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 8196 + image: msranni/nni:latest + ps: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 8196 + image: msranni/nni:latest + kubeflowConfig: + operator: tf-operator + apiVersion: v1alpha2 + storage: nfs + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} + +Note: You should explicitly set ``trainingServicePlatform: kubeflow`` in NNI config YAML file if you want to start experiment in kubeflow mode. + +If you want to run PyTorch jobs, you could set your config files as follow: + +.. code-block:: yaml + + authorName: default + experimentName: example_mnist_distributed_pytorch + trialConcurrency: 1 + maxExecDuration: 1h + maxTrialNum: 10 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: kubeflow + searchSpacePath: search_space.json + #choice: true, false + useAnnotation: false + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: minimize + trial: + codeDir: . + master: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 2048 + image: msranni/nni:latest + worker: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 2048 + image: msranni/nni:latest + kubeflowConfig: + operator: pytorch-operator + apiVersion: v1alpha2 + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} + +Trial configuration in kubeflow mode have the following configuration keys: + + +* codeDir + + * code directory, where you put training code and config files + +* worker (required). This config section is used to configure tensorflow worker role + + * replicas + + * Required key. Should be positive number depends on how many replication your want to run for tensorflow worker role. + + * command + + * Required key. Command to launch your trial job, like ``python mnist.py`` + + * memoryMB + + * Required key. Should be positive number based on your trial program's memory requirement + + * cpuNum + * gpuNum + * image + + * Required key. In kubeflow mode, your trial program will be scheduled by Kubernetes to run in `Pod `__. This key is used to specify the Docker image used to create the pod where your trail program will run. + * We already build a docker image :githublink:`msranni/nni `. You can either use this image directly in your config file, or build your own image based on it. + + * privateRegistryAuthPath + + * Optional field, specify ``config.json`` file path that holds an authorization token of docker registry, used to pull image from private registry. `Refer `__. + + * apiVersion + + * Required key. The API version of your Kubeflow. + +.. cannot find :githublink:`msranni/nni ` + +* ps (optional). This config section is used to configure Tensorflow parameter server role. +* master(optional). This config section is used to configure PyTorch parameter server role. + +Once complete to fill NNI experiment config file and save (for example, save as exp_kubeflow.yml), then run the following command + +.. code-block:: bash + + nnictl create --config exp_kubeflow.yml + +to start the experiment in kubeflow mode. NNI will create Kubeflow tfjob or pytorchjob for each trial, and the job name format is something like ``nni_exp_{experiment_id}_trial_{trial_id}``. +You can see the Kubeflow tfjob created by NNI in your Kubernetes dashboard. + +Notice: In kubeflow mode, NNIManager will start a rest server and listen on a port which is your NNI WebUI's port plus 1. For example, if your WebUI port is ``8080``\ , the rest server will listen on ``8081``\ , to receive metrics from trial job running in Kubernetes. So you should ``enable 8081`` TCP port in your firewall rule to allow incoming traffic. + +Once a trial job is completed, you can go to NNI WebUI's overview page (like http://localhost:8080/oview) to check trial's information. + +version check +------------- + +NNI support version check feature in since version 0.6, `refer `__ + +Any problems when using NNI in Kubeflow mode, please create issues on `NNI Github repo `__. + + +Kubeflow reuse mode +---------------------- +NNI support setting reuse mode for trial jobs. In reuse mode, NNI will submit a long-running trial runner process to occupy the container, and start trial jobs as the subprocess of the trial runner process, it means k8s do not need to schedule new container again, it just reuse old container. +Currently, kubeflow reuse mode only support V2 config. +Here is the example: + +.. code-block:: yaml + + searchSpaceFile: search_space.json + trialCommand: python3 mnist.py + trialGpuNumber: 0 + trialConcurrency: 4 + maxTrialNumber: 20 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + reuseMode: true + platform: kubeflow + worker: + command: python3 mnist.py + code_directory: . + dockerImage: msranni/nni + cpuNumber: 1 + gpuNumber: 0 + memorySize: 8192 + replicas: 1 + operator: tf-operator + storage: + storageType: azureStorage + azureAccount: {your_account} + azureShare: {your_share} + keyVaultName: {your_valut_name} + keyVaultKey: {your_valut_key} + apiVersion: v1 diff --git a/docs/en_US/TrainingService/LocalMode.rst b/docs/en_US/TrainingService/LocalMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..95a2f2dbf490e4d77049ddaae06084e9cfaa8672 --- /dev/null +++ b/docs/en_US/TrainingService/LocalMode.rst @@ -0,0 +1,174 @@ +**Tutorial: Create and Run an Experiment on local with NNI API** +================================================================ + +In this tutorial, we will use the example in [nni/examples/trials/mnist-pytorch] to explain how to create and run an experiment on local with NNI API. + +.. + + Before starts + + +You have an implementation for MNIST classifer using convolutional layers, the Python code is similar to ``mnist.py``. + +.. + + Step 1 - Update model codes + + +To enable NNI API, make the following changes: + +1.1 Declare NNI API: include ``import nni`` in your trial code to use NNI APIs. + +1.2 Get predefined parameters + +Use the following code snippet: + +.. code-block:: python + + tuner_params = nni.get_next_parameter() + +to get hyper-parameters' values assigned by tuner. ``tuner_params`` is an object, for example: + +.. code-block:: json + + {"batch_size": 32, "hidden_size": 128, "lr": 0.01, "momentum": 0.2029} + +.. + +1.3 Report NNI results: Use the API: ``nni.report_intermediate_result(accuracy)`` to send ``accuracy`` to assessor. Use the API: ``nni.report_final_result(accuracy)`` to send `accuracy` to tuner. + +**NOTE**\ : + +.. code-block:: bash + + accuracy - The `accuracy` could be any python object, but if you use NNI built-in tuner/assessor, `accuracy` should be a numerical variable (e.g. float, int). + tuner - The tuner will generate next parameters/architecture based on the explore history (final result of all trials). + assessor - The assessor will decide which trial should early stop based on the history performance of trial (intermediate result of one trial). + +.. + + Step 2 - Define SearchSpace + + +The hyper-parameters used in ``Step 1.2 - Get predefined parameters`` is defined in a ``search_space.json`` file like below: + +.. code-block:: bash + + { + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "hidden_size":{"_type":"choice","_value":[128, 256, 512, 1024]}, + "lr":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"uniform","_value":[0, 1]} + } + +Refer to `define search space <../Tutorial/SearchSpaceSpec.rst>`__ to learn more about search space. + +.. + + Step 3 - Define Experiment + + .. + +To run an experiment in NNI, you only needed: + + +* Provide a runnable trial +* Provide or choose a tuner +* Provide a YAML experiment configure file +* (optional) Provide or choose an assessor + +**Prepare trial**\ : + +.. + + You can download nni source code and a set of examples can be found in ``nni/examples``, run ``ls nni/examples/trials`` to see all the trial examples. + + +Let's use a simple trial example, e.g. mnist, provided by NNI. After you cloned NNI source, NNI examples have been put in ~/nni/examples, run ``ls ~/nni/examples/trials`` to see all the trial examples. You can simply execute the following command to run the NNI mnist example: + +.. code-block:: bash + + python ~/nni/examples/trials/mnist-pytorch/mnist.py + + +This command will be filled in the YAML configure file below. Please refer to `here <../TrialExample/Trials.rst>`__ for how to write your own trial. + +**Prepare tuner**\ : NNI supports several popular automl algorithms, including Random Search, Tree of Parzen Estimators (TPE), Evolution algorithm etc. Users can write their own tuner (refer to `here <../Tuner/CustomizeTuner.rst>`__\ ), but for simplicity, here we choose a tuner provided by NNI as below: + +.. code-block:: bash + + tuner: + name: TPE + classArgs: + optimize_mode: maximize + + +*name* is used to specify a tuner in NNI, *classArgs* are the arguments pass to the tuner (the spec of builtin tuners can be found `here <../Tuner/BuiltinTuner.rst>`__\ ), *optimization_mode* is to indicate whether you want to maximize or minimize your trial's result. + +**Prepare configure file**\ : Since you have already known which trial code you are going to run and which tuner you are going to use, it is time to prepare the YAML configure file. NNI provides a demo configure file for each trial example, ``cat ~/nni/examples/trials/mnist-pytorch/config.yml`` to see it. Its content is basically shown below: + +.. code-block:: yaml + + experimentName: local training service example + + searchSpaceFile ~/nni/examples/trials/mnist-pytorch/search_space.json + trailCommand: python3 mnist.py + trialCodeDirectory: ~/nni/examples/trials/mnist-pytorch + + trialGpuNumber: 0 + trialConcurrency: 1 + maxExperimentDuration: 3h + maxTrialNumber: 10 + + trainingService: + platform: local + + tuner: + name: TPE + classArgs: + optimize_mode: maximize + + +With all these steps done, we can run the experiment with the following command: + +.. code-block:: bash + + nnictl create --config ~/nni/examples/trials/mnist-pytorch/config.yml + + +You can refer to `here <../Tutorial/Nnictl.rst>`__ for more usage guide of *nnictl* command line tool. + +View experiment results +----------------------- + +The experiment has been running now. Other than *nnictl*\ , NNI also provides WebUI for you to view experiment progress, to control your experiment, and some other appealing features. + +Using multiple local GPUs to speed up search +-------------------------------------------- + +The following steps assume that you have 4 NVIDIA GPUs installed at local and PyTorch with CUDA support. The demo enables 4 concurrent trail jobs and each trail job uses 1 GPU. + +**Prepare configure file**\ : NNI provides a demo configuration file for the setting above, ``cat ~/nni/examples/trials/mnist-pytorch/config_detailed.yml`` to see it. The trailConcurrency and trialGpuNumber are different from the basic configure file: + +.. code-block:: bash + + ... + + trialGpuNumber: 1 + trialConcurrency: 4 + + ... + + trainingService: + platform: local + useActiveGpu: false # set to "true" if you are using graphical OS like Windows 10 and Ubuntu desktop + + +We can run the experiment with the following command: + +.. code-block:: bash + + nnictl create --config ~/nni/examples/trials/mnist-pytorch/config_detailed.yml + + +You can use *nnictl* command line tool or WebUI to trace the training progress. *nvidia_smi* command line tool can also help you to monitor the GPU usage during training. diff --git a/docs/en_US/TrainingService/Overview.rst b/docs/en_US/TrainingService/Overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..6a4b5e91c1681c76f190626bf66f9f8f5b400d9f --- /dev/null +++ b/docs/en_US/TrainingService/Overview.rst @@ -0,0 +1,82 @@ +Training Service +================ + +What is Training Service? +------------------------- + +NNI training service is designed to allow users to focus on AutoML itself, agnostic to the underlying computing infrastructure where the trials are actually run. When migrating from one cluster to another (e.g., local machine to Kubeflow), users only need to tweak several configurations, and the experiment can be easily scaled. + +Users can use training service provided by NNI, to run trial jobs on `local machine <./LocalMode.rst>`__\ , `remote machines <./RemoteMachineMode.rst>`__\ , and on clusters like `PAI <./PaiMode.rst>`__\ , `Kubeflow <./KubeflowMode.rst>`__\ , `AdaptDL <./AdaptDLMode.rst>`__\ , `FrameworkController <./FrameworkControllerMode.rst>`__\ , `DLTS <./DLTSMode.rst>`__, `AML <./AMLMode.rst>`__ and `DLC <./DLCMode.rst>`__. These are called *built-in training services*. + +If the computing resource customers try to use is not listed above, NNI provides interface that allows users to build their own training service easily. Please refer to `how to implement training service <./HowToImplementTrainingService.rst>`__ for details. + +How to use Training Service? +---------------------------- + +Training service needs to be chosen and configured properly in experiment configuration YAML file. Users could refer to the document of each training service for how to write the configuration. Also, `reference <../Tutorial/ExperimentConfig.rst>`__ provides more details on the specification of the experiment configuration file. + +Next, users should prepare code directory, which is specified as ``codeDir`` in config file. Please note that in non-local mode, the code directory will be uploaded to remote or cluster before the experiment. Therefore, we limit the number of files to 2000 and total size to 300MB. If the code directory contains too many files, users can choose which files and subfolders should be excluded by adding a ``.nniignore`` file that works like a ``.gitignore`` file. For more details on how to write this file, see :githublink:`this example ` and the `git documentation `__. + +In case users intend to use large files in their experiment (like large-scaled datasets) and they are not using local mode, they can either: 1) download the data before each trial launches by putting it into trial command; or 2) use a shared storage that is accessible to worker nodes. Usually, training platforms are equipped with shared storage, and NNI allows users to easily use them. Refer to docs of each built-in training service for details. + +Built-in Training Services +-------------------------- + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - TrainingService + - Brief Introduction + * - `Local <./LocalMode.rst>`__ + - NNI supports running an experiment on local machine, called local mode. Local mode means that NNI will run the trial jobs and nniManager process in same machine, and support gpu schedule function for trial jobs. + * - `Remote <./RemoteMachineMode.rst>`__ + - NNI supports running an experiment on multiple machines through SSH channel, called remote mode. NNI assumes that you have access to those machines, and already setup the environment for running deep learning training code. NNI will submit the trial jobs in remote machine, and schedule suitable machine with enough gpu resource if specified. + * - `PAI <./PaiMode.rst>`__ + - NNI supports running an experiment on `OpenPAI `__ (aka PAI), called PAI mode. Before starting to use NNI PAI mode, you should have an account to access an `OpenPAI `__ cluster. See `here `__ if you don't have any OpenPAI account and want to deploy an OpenPAI cluster. In PAI mode, your trial program will run in PAI's container created by Docker. + * - `Kubeflow <./KubeflowMode.rst>`__ + - NNI supports running experiment on `Kubeflow `__\ , called kubeflow mode. Before starting to use NNI kubeflow mode, you should have a Kubernetes cluster, either on-premises or `Azure Kubernetes Service(AKS) `__\ , a Ubuntu machine on which `kubeconfig `__ is setup to connect to your Kubernetes cluster. If you are not familiar with Kubernetes, `here `__ is a good start. In kubeflow mode, your trial program will run as Kubeflow job in Kubernetes cluster. + * - `AdaptDL <./AdaptDLMode.rst>`__ + - NNI supports running experiment on `AdaptDL `__\ , called AdaptDL mode. Before starting to use AdaptDL mode, you should have a Kubernetes cluster. + * - `FrameworkController <./FrameworkControllerMode.rst>`__ + - NNI supports running experiment using `FrameworkController `__\ , called frameworkcontroller mode. FrameworkController is built to orchestrate all kinds of applications on Kubernetes, you don't need to install Kubeflow for specific deep learning framework like tf-operator or pytorch-operator. Now you can use FrameworkController as the training service to run NNI experiment. + * - `DLTS <./DLTSMode.rst>`__ + - NNI supports running experiment using `DLTS `__\ , which is an open source toolkit, developed by Microsoft, that allows AI scientists to spin up an AI cluster in turn-key fashion. + * - `AML <./AMLMode.rst>`__ + - NNI supports running an experiment on `AML `__ , called aml mode. + * - `DLC <./DLCMode.rst>`__ + - NNI supports running an experiment on `PAI-DLC `__ , called dlc mode. + + +What does Training Service do? +------------------------------ + + +.. raw:: html + +

+ drawing +

+ + +According to the architecture shown in `Overview <../Overview.rst>`__\ , training service (platform) is actually responsible for two events: 1) initiating a new trial; 2) collecting metrics and communicating with NNI core (NNI manager); 3) monitoring trial job status. To demonstrated in detail how training service works, we show the workflow of training service from the very beginning to the moment when first trial succeeds. + +Step 1. **Validate config and prepare the training platform.** Training service will first check whether the training platform user specifies is valid (e.g., is there anything wrong with authentication). After that, training service will start to prepare for the experiment by making the code directory (\ ``codeDir``\ ) accessible to training platform. + +.. Note:: Different training services have different ways to handle ``codeDir``. For example, local training service directly runs trials in ``codeDir``. Remote training service packs ``codeDir`` into a zip and uploads it to each machine. K8S-based training services copy ``codeDir`` onto a shared storage, which is either provided by training platform itself, or configured by users in config file. + +Step 2. **Submit the first trial.** To initiate a trial, usually (in non-reuse mode), NNI copies another few files (including parameters, launch script and etc.) onto training platform. After that, NNI launches the trial through subprocess, SSH, RESTful API, and etc. + +.. Warning:: The working directory of trial command has exactly the same content as ``codeDir``, but can have different paths (even on different machines) Local mode is the only training service that shares one ``codeDir`` across all trials. Other training services copies a ``codeDir`` from the shared copy prepared in step 1 and each trial has an independent working directory. We strongly advise users not to rely on the shared behavior in local mode, as it will make your experiments difficult to scale to other training services. + +Step 3. **Collect metrics.** NNI then monitors the status of trial, updates the status (e.g., from ``WAITING`` to ``RUNNING``\ , ``RUNNING`` to ``SUCCEEDED``\ ) recorded, and also collects the metrics. Currently, most training services are implemented in an "active" way, i.e., training service will call the RESTful API on NNI manager to update the metrics. Note that this usually requires the machine that runs NNI manager to be at least accessible to the worker node. + + +Training Service Under Reuse Mode +--------------------------------- + +When reuse mode is enabled, a cluster, such as a remote machine or a computer instance on AML, will launch a long-running environment, so that NNI will submit trials to these environments iteratively, which saves the time to create new jobs. For instance, using OpenPAI training platform under reuse mode can avoid the overhead of pulling docker images, creating containers, and downloading data repeatedly. + +In the reuse mode, user needs to make sure each trial can run independently in the same job (e.g., avoid loading checkpoints from previous trials). + +.. note:: Currently, only `Local <./LocalMode.rst>`__, `Remote <./RemoteMachineMode.rst>`__, `OpenPAI <./PaiMode.rst>`__, `AML <./AMLMode.rst>`__ and `DLC <./DLCMode.rst>`__ training services support resue mode. For Remote and OpenPAI training platforms, you can enable reuse mode according to `here <../reference/experiment_config.rst>`__ manually. AML is implemented under reuse mode, so the default mode is reuse mode, no need to manually enable. diff --git a/docs/en_US/TrainingService/PaiMode.rst b/docs/en_US/TrainingService/PaiMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..10076b33bd9ec9cba00bee9077f00b42709de3fd --- /dev/null +++ b/docs/en_US/TrainingService/PaiMode.rst @@ -0,0 +1,225 @@ +.. role:: raw-html(raw) + :format: html + + +**Run an Experiment on OpenPAI** +==================================== + +NNI supports running an experiment on `OpenPAI `__\ , called pai mode. Before starting to use NNI pai mode, you should have an account to access an `OpenPAI `__ cluster. See `here `__ if you don't have any OpenPAI account and want to deploy an OpenPAI cluster. In pai mode, your trial program will run in pai's container created by Docker. + +.. toctree:: + +Setup environment +----------------- + +**Step 1. Install NNI, follow the install guide** `here <../Tutorial/QuickStart.rst>`__. + +**Step 2. Get token.** + +Open web portal of OpenPAI, and click ``My profile`` button in the top-right side. + +.. image:: ../../img/pai_profile.jpg + :scale: 80% + +Click ``copy`` button in the page to copy a jwt token. + +.. image:: ../../img/pai_token.jpg + :scale: 67% + +**Step 3. Mount NFS storage to local machine.** + +Click ``Submit job`` button in web portal. + +.. image:: ../../img/pai_job_submission_page.jpg + :scale: 50% + +Find the data management region in job submission page. + +.. image:: ../../img/pai_data_management_page.jpg + :scale: 33% + +The ``Preview container paths`` is the NFS host and path that OpenPAI provided, you need to mount the corresponding host and path to your local machine first, then NNI could use the OpenPAI's NFS storage.\ :raw-html:`
` +For example, use the following command: + +.. code-block:: bash + + sudo mount -t nfs4 gcr-openpai-infra02:/pai/data /local/mnt + +Then the ``/data`` folder in container will be mounted to ``/local/mnt`` folder in your local machine.\ :raw-html:`
` +You could use the following configuration in your NNI's config file: + +.. code-block:: yaml + + localStorageMountPoint: /local/mnt + +**Step 4. Get OpenPAI's storage config name and localStorageMountPoint** + +The ``Team share storage`` field is storage configuration used to specify storage value in OpenPAI. You can get ``storageConfigName`` and ``containerStorageMountPoint`` field in ``Team share storage``\ , for example: + +.. code-block:: yaml + + storageConfigName: confignfs-data + containerStorageMountPoint: /mnt/confignfs-data + +Run an experiment +----------------- + +Use ``examples/trials/mnist-pytorch`` as an example. The NNI config YAML file's content is like: + +.. code-block:: yaml + + searchSpaceFile: search_space.json + trialCommand: python3 mnist.py + trialGpuNumber: 0 + trialConcurrency: 1 + maxTrialNumber: 10 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + platform: openpai + host: http://123.123.123.123 + username: ${your user name} + token: ${your token} + dockerImage: msranni/nni + trialCpuNumber: 1 + trialMemorySize: 8GB + storageConfigName: ${your storage config name} + localStorageMountPoint: ${NFS mount point on local machine} + containerStorageMountPoint: ${NFS mount point inside Docker container} + +Note: You should set ``platform: pai`` in NNI config YAML file if you want to start experiment in pai mode. The host field in configuration file is PAI's job submission page uri, like ``10.10.5.1``\ , the default protocol in NNI is HTTPS, if your PAI's cluster disabled https, please use the uri in ``http://10.10.5.1`` format. + +OpenPai configurations +^^^^^^^^^^^^^^^^^^^^^^ + +Compared with `LocalMode `__ and `RemoteMachineMode `__\ , ``trainingService`` configuration in pai mode has the following additional keys: + + +* + username + + Required key. User name of OpenPAI platform. + +* + token + + Required key. Authentication key of OpenPAI platform. + +* + host + + Required key. The host of OpenPAI platform. It's OpenPAI's job submission page uri, like ``10.10.5.1``\ , the default protocol in NNI is HTTPS, if your OpenPAI cluster disabled https, please use the uri in ``http://10.10.5.1`` format. + +* + trialCpuNumber + + Optional key. Should be positive number based on your trial program's CPU requirement. If it is not set in trial configuration, it should be set in the config specified in ``openpaiConfig`` or ``openpaiConfigFile`` field. + +* + trialMemorySize + + Optional key. Should be in format like ``2gb`` based on your trial program's memory requirement. If it is not set in trial configuration, it should be set in the config specified in ``openpaiConfig`` or ``openpaiConfigFile`` field. + +* + dockerImage + + Optional key. In OpenPai mode, your trial program will be scheduled by OpenPAI to run in `Docker container `__. This key is used to specify the Docker image used to create the container in which your trial will run. + + We already build a docker image :githublink:`nnimsra/nni `. You can either use this image directly in your config file, or build your own image based on it. If it is not set in trial configuration, it should be set in the config specified in ``openpaiConfig`` or ``openpaiConfigFile`` field. + +.. cannot find :githublink:`nnimsra/nni ` + +* + virtualCluster + + Optional key. Set the virtualCluster of OpenPAI. If omitted, the job will run on default virtual cluster. + +* + localStorageMountPoint + + Required key. Set the mount path in the machine you run nnictl. + +* + containerStorageMountPoint + + Required key. Set the mount path in your container used in OpenPAI. + +* + storageConfigName: + + Optional key. Set the storage name used in OpenPAI. If it is not set in trial configuration, it should be set in the config specified in ``openpaiConfig`` or ``openpaiConfigFile`` field. + +* + openpaiConfigFile + + Optional key. Set the file path of OpenPAI job configuration, the file is in yaml format. + + If users set ``openpaiConfigFile`` in NNI's configuration file, no need to specify the fields ``storageConfigName``, ``virtualCluster``, ``dockerImage``, ``trialCpuNumber``, ``trialGpuNumber``, ``trialMemorySize`` in configuration. These fields will use the values from the config file specified by ``openpaiConfigFile``. + +* + openpaiConfig + + Optional key. Similar to ``openpaiConfigFile``, but instead of referencing an external file, using this field you embed the content into NNI's config YAML. + + Note: + + + #. + The job name in OpenPAI's configuration file will be replaced by a new job name, the new job name is created by NNI, the name format is ``nni_exp_{this.experimentId}_trial_{trialJobId}`` . + + #. + If users set multiple taskRoles in OpenPAI's configuration file, NNI will wrap all of these taksRoles and start multiple tasks in one trial job, users should ensure that only one taskRole report metric to NNI, otherwise there might be some conflict error. + +Once complete to fill NNI experiment config file and save (for example, save as exp_pai.yml), then run the following command + +.. code-block:: bash + + nnictl create --config exp_pai.yml + +to start the experiment in pai mode. NNI will create OpenPAI job for each trial, and the job name format is something like ``nni_exp_{experiment_id}_trial_{trial_id}``. +You can see jobs created by NNI in the OpenPAI cluster's web portal, like: + +.. image:: ../../img/nni_pai_joblist.jpg + :target: ../../img/nni_pai_joblist.jpg + :alt: + + +Notice: In pai mode, NNIManager will start a rest server and listen on a port which is your NNI WebUI's port plus 1. For example, if your WebUI port is ``8080``\ , the rest server will listen on ``8081``\ , to receive metrics from trial job running in Kubernetes. So you should ``enable 8081`` TCP port in your firewall rule to allow incoming traffic. + +Once a trial job is completed, you can goto NNI WebUI's overview page (like http://localhost:8080/oview) to check trial's information. + +Expand a trial information in trial list view, click the logPath link like: + +.. image:: ../../img/nni_webui_joblist.png + :scale: 30% + +And you will be redirected to HDFS web portal to browse the output files of that trial in HDFS: + +.. image:: ../../img/nni_trial_hdfs_output.jpg + :scale: 80% + +You can see there're three fils in output folder: stderr, stdout, and trial.log + +data management +--------------- + +Before using NNI to start your experiment, users should set the corresponding mount data path in your nniManager machine. OpenPAI has their own storage(NFS, AzureBlob ...), and the storage will used in OpenPAI will be mounted to the container when it start a job. Users should set the OpenPAI storage type by ``paiStorageConfigName`` field to choose a storage in OpenPAI. Then users should mount the storage to their nniManager machine, and set the ``nniManagerNFSMountPath`` field in configuration file, NNI will generate bash files and copy data in ``codeDir`` to the ``nniManagerNFSMountPath`` folder, then NNI will start a trial job. The data in ``nniManagerNFSMountPath`` will be sync to OpenPAI storage, and will be mounted to OpenPAI's container. The data path in container is set in ``containerNFSMountPath``\ , NNI will enter this folder first, and then run scripts to start a trial job. + +version check +------------- + +NNI support version check feature in since version 0.6. It is a policy to insure the version of NNIManager is consistent with trialKeeper, and avoid errors caused by version incompatibility. +Check policy: + + +#. NNIManager before v0.6 could run any version of trialKeeper, trialKeeper support backward compatibility. +#. Since version 0.6, NNIManager version should keep same with triakKeeper version. For example, if NNIManager version is 0.6, trialKeeper version should be 0.6 too. +#. Note that the version check feature only check first two digits of version.For example, NNIManager v0.6.1 could use trialKeeper v0.6 or trialKeeper v0.6.2, but could not use trialKeeper v0.5.1 or trialKeeper v0.7. + +If you could not run your experiment and want to know if it is caused by version check, you could check your webUI, and there will be an error message about version check. + + +.. image:: ../../img/webui-img/experimentError.png + :scale: 80% diff --git a/docs/en_US/TrainingService/RemoteMachineMode.rst b/docs/en_US/TrainingService/RemoteMachineMode.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e54ef8d845f0efae802a7bbacc5cd6d31c7b838 --- /dev/null +++ b/docs/en_US/TrainingService/RemoteMachineMode.rst @@ -0,0 +1,139 @@ +Run an Experiment on Remote Machines +==================================== + +NNI can run one experiment on multiple remote machines through SSH, called ``remote`` mode. It's like a lightweight training platform. In this mode, NNI can be started from your computer, and dispatch trials to remote machines in parallel. + +The OS of remote machines supports ``Linux``\ , ``Windows 10``\ , and ``Windows Server 2019``. + +Requirements +------------ + + +* + Make sure the default environment of remote machines meets requirements of your trial code. If the default environment does not meet the requirements, the setup script can be added into ``command`` field of NNI config. + +* + Make sure remote machines can be accessed through SSH from the machine which runs ``nnictl`` command. It supports both password and key authentication of SSH. For advanced usages, please refer to `machineList part of configuration <../Tutorial/ExperimentConfig.rst>`__. + +* + Make sure the NNI version on each machine is consistent. + +* + Make sure the command of Trial is compatible with remote OSes, if you want to use remote Linux and Windows together. For example, the default python 3.x executable called ``python3`` on Linux, and ``python`` on Windows. + +Linux +^^^^^ + + +* Follow `installation <../Tutorial/InstallationLinux.rst>`__ to install NNI on the remote machine. + +Windows +^^^^^^^ + + +* + Follow `installation <../Tutorial/InstallationWin.rst>`__ to install NNI on the remote machine. + +* + Install and start ``OpenSSH Server``. + + + #. + Open ``Settings`` app on Windows. + + #. + Click ``Apps``\ , then click ``Optional features``. + + #. + Click ``Add a feature``\ , search and select ``OpenSSH Server``\ , and then click ``Install``. + + #. + Once it's installed, run below command to start and set to automatic start. + + .. code-block:: bat + + sc config sshd start=auto + net start sshd + +* + Make sure remote account is administrator, so that it can stop running trials. + +* + Make sure there is no welcome message more than default, since it causes ssh2 failed in NodeJs. For example, if you're using Data Science VM on Azure, it needs to remove extra echo commands in ``C:\dsvm\tools\setup\welcome.bat``. + + The output like below is ok, when opening a new command window. + + .. code-block:: text + + Microsoft Windows [Version 10.0.17763.1192] + (c) 2018 Microsoft Corporation. All rights reserved. + + (py37_default) C:\Users\AzureUser> + +Run an experiment +----------------- + +e.g. there are three machines, which can be logged in with username and password. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - IP + - Username + - Password + * - 10.1.1.1 + - bob + - bob123 + * - 10.1.1.2 + - bob + - bob123 + * - 10.1.1.3 + - bob + - bob123 + + +Install and run NNI on one of those three machines or another machine, which has network access to them. + +Use ``examples/trials/mnist-pytorch`` as the example. Below is content of ``examples/trials/mnist-pytorch/config_remote.yml``\ : + +.. code-block:: yaml + + searchSpaceFile: search_space.json + trialCommand: python3 mnist.py + trialCodeDirectory: . # default value, can be omitted + trialGpuNumber: 0 + trialConcurrency: 4 + maxTrialNumber: 20 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + platform: remote + machineList: + - host: 192.0.2.1 + user: alice + ssh_key_file: ~/.ssh/id_rsa + - host: 192.0.2.2 + port: 10022 + user: bob + password: bob123 + pythonPath: /usr/bin + +Files in ``trialCodeDirectory`` will be uploaded to remote machines automatically. You can run below command on Windows, Linux, or macOS to spawn trials on remote Linux machines: + +.. code-block:: bash + + nnictl create --config examples/trials/mnist-pytorch/config_remote.yml + +Configure python environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, commands and scripts will be executed in the default environment in remote machine. If there are multiple python virtual environments in your remote machine, and you want to run experiments in a specific environment, then use **pythonPath** to specify a python environment on your remote machine. + +For example, with anaconda you can specify: + +.. code-block:: yaml + + pythonPath: /home/bob/.conda/envs/ENV-NAME/bin diff --git a/docs/en_US/TrialExample/Cifar10Examples.rst b/docs/en_US/TrialExample/Cifar10Examples.rst new file mode 100644 index 0000000000000000000000000000000000000000..6059b3b5d2225be5f3c49f18bbaeb380f3ed3b80 --- /dev/null +++ b/docs/en_US/TrialExample/Cifar10Examples.rst @@ -0,0 +1,85 @@ +CIFAR-10 examples +================= + +Overview +-------- + +`CIFAR-10 `__ classification is a common benchmark problem in machine learning. The CIFAR-10 dataset is the collection of images. It is one of the most widely used datasets for machine learning research which contains 60,000 32x32 color images in 10 different classes. Thus, we use CIFAR-10 classification as an example to introduce NNI usage. + +**Goals** +^^^^^^^^^^^^^ + +As we all know, the choice of model optimizer is directly affects the performance of the final metrics. The goal of this tutorial is to **tune a better performace optimizer** to train a relatively small convolutional neural network (CNN) for recognizing images. + +In this example, we have selected the following common deep learning optimizer: + +.. code-block:: bash + + "SGD", "Adadelta", "Adagrad", "Adam", "Adamax" + + +**Experimental** +^^^^^^^^^^^^^^^^^^^^ + +Preparations +^^^^^^^^^^^^ + +This example requires PyTorch. PyTorch install package should be chosen based on python version and cuda version. + +Here is an example of the environment python==3.5 and cuda == 8.0, then using the following commands to install `PyTorch `__\ : + +.. code-block:: bash + + python3 -m pip install http://download.pytorch.org/whl/cu80/torch-0.4.1-cp35-cp35m-linux_x86_64.whl + python3 -m pip install torchvision + +CIFAR-10 with NNI +^^^^^^^^^^^^^^^^^ + +**Search Space** + +As we stated in the target, we target to find out the best ``optimizer`` for training CIFAR-10 classification. When using different optimizers, we also need to adjust ``learning rates`` and ``network structure`` accordingly. so we chose these three parameters as hyperparameters and write the following search space. + +.. code-block:: json + + { + "lr":{"_type":"choice", "_value":[0.1, 0.01, 0.001, 0.0001]}, + "optimizer":{"_type":"choice", "_value":["SGD", "Adadelta", "Adagrad", "Adam", "Adamax"]}, + "model":{"_type":"choice", "_value":["vgg", "resnet18", "googlenet", "densenet121", "mobilenet", "dpn92", "senet18"]} + } + +Implemented code directory: :githublink:`search_space.json ` + +**Trial** + +The code for CNN training of each hyperparameters set, paying particular attention to the following points are specific for NNI: + + +* Use ``nni.get_next_parameter()`` to get next training hyperparameter set. +* Use ``nni.report_intermediate_result(acc)`` to report the intermedian result after finish each epoch. +* Use ``nni.report_final_result(acc)`` to report the final result before the trial end. + +Implemented code directory: :githublink:`main.py ` + +You can also use your previous code directly, refer to `How to define a trial `__ for modify. + +**Config** + +Here is the example of running this experiment on local(with multiple GPUs): + +code directory: :githublink:`examples/trials/cifar10_pytorch/config.yml ` + +Here is the example of running this experiment on OpenPAI: + +code directory: :githublink:`examples/trials/cifar10_pytorch/config_pai.yml ` + +The complete examples we have implemented: :githublink:`examples/trials/cifar10_pytorch/ ` + +Launch the experiment +^^^^^^^^^^^^^^^^^^^^^ + +We are ready for the experiment, let's now **run the config.yml file from your command line to start the experiment**. + +.. code-block:: bash + + nnictl create --config nni/examples/trials/cifar10_pytorch/config.yml diff --git a/docs/en_US/TrialExample/EfficientNet.rst b/docs/en_US/TrialExample/EfficientNet.rst new file mode 100644 index 0000000000000000000000000000000000000000..b544f88312c8add93eda0f090d0d01263cc6e8ba --- /dev/null +++ b/docs/en_US/TrialExample/EfficientNet.rst @@ -0,0 +1,29 @@ +EfficientNet +============ + +`EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks `__ + +Use Grid search to find the best combination of alpha, beta and gamma for EfficientNet-B1, as discussed in Section 3.3 in paper. Search space, tuner, configuration examples are provided here. + +Instructions +------------ + +:githublink:`Example code ` + + +#. Set your working directory here in the example code directory. +#. Run ``git clone https://github.com/ultmaster/EfficientNet-PyTorch`` to clone the `ultmaster modified version `__ of the original `EfficientNet-PyTorch `__. The modifications were done to adhere to the original `Tensorflow version `__ as close as possible (including EMA, label smoothing and etc.); also added are the part which gets parameters from tuner and reports intermediate/final results. Clone it into ``EfficientNet-PyTorch``\ ; the files like ``main.py``\ , ``train_imagenet.sh`` will appear inside, as specified in the configuration files. +#. Run ``nnictl create --config config_local.yml`` (use ``config_pai.yml`` for OpenPAI) to find the best EfficientNet-B1. Adjust the training service (PAI/local/remote), batch size in the config files according to the environment. + +For training on ImageNet, read ``EfficientNet-PyTorch/train_imagenet.sh``. Download ImageNet beforehand and extract it adhering to `PyTorch format `__ and then replace ``/mnt/data/imagenet`` in with the location of the ImageNet storage. This file should also be a good example to follow for mounting ImageNet into the container on OpenPAI. + +Results +------- + +The follow image is a screenshot, demonstrating the relationship between acc@1 and alpha, beta, gamma. + + +.. image:: ../../img/efficientnet_search_result.png + :target: ../../img/efficientnet_search_result.png + :alt: + diff --git a/docs/en_US/TrialExample/GbdtExample.rst b/docs/en_US/TrialExample/GbdtExample.rst new file mode 100644 index 0000000000000000000000000000000000000000..338b446562f0237cabd277a4d1d199c66aa8de22 --- /dev/null +++ b/docs/en_US/TrialExample/GbdtExample.rst @@ -0,0 +1,219 @@ +GBDT in nni +=========== + +Gradient boosting is a machine learning technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. It builds the model in a stage-wise fashion as other boosting methods do, and it generalizes them by allowing optimization of an arbitrary differentiable loss function. + +Gradient boosting decision tree has many popular implementations, such as `lightgbm `__\ , `xgboost `__\ , and `catboost `__\ , etc. GBDT is a great tool for solving the problem of traditional machine learning problem. Since GBDT is a robust algorithm, it could use in many domains. The better hyper-parameters for GBDT, the better performance you could achieve. + +NNI is a great platform for tuning hyper-parameters, you could try various builtin search algorithm in nni and run multiple trials concurrently. + +1. Search Space in GBDT +----------------------- + +There are many hyper-parameters in GBDT, but what kind of parameters will affect the performance or speed? Based on some practical experience, some suggestion here(Take lightgbm as example): + +.. + + * For better accuracy + * ``learning_rate``. The range of ``learning rate`` could be [0.001, 0.9]. + + + +* + ``num_leaves``. ``num_leaves`` is related to ``max_depth``\ , you don't have to tune both of them. + +* + ``bagging_freq``. ``bagging_freq`` could be [1, 2, 4, 8, 10] + +* + ``num_iterations``. May larger if underfitting. + +.. + + * For speed up + * ``bagging_fraction``. The range of ``bagging_fraction`` could be [0.7, 1.0]. + + + +* + ``feature_fraction``. The range of ``feature_fraction`` could be [0.6, 1.0]. + +* + ``max_bin``. + +.. + + * To avoid overfitting + * ``min_data_in_leaf``. This depends on your dataset. + + + +* + ``min_sum_hessian_in_leaf``. This depend on your dataset. + +* + ``lambda_l1`` and ``lambda_l2``. + +* + ``min_gain_to_split``. + +* + ``num_leaves``. + +Reference link: +`lightgbm `__ and `autoxgoboost `__ + +2. Task description +------------------- + +Now we come back to our example "auto-gbdt" which run in lightgbm and nni. The data including :githublink:`train data ` and :githublink:`test data `. +Given the features and label in train data, we train a GBDT regression model and use it to predict. + +3. How to run in nni +-------------------- + +3.1 Install all the requirments +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + pip install lightgbm + pip install pandas + +3.2 Prepare your trial code +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You need to prepare a basic code as following: + +.. code-block:: python + + ... + + def get_default_parameters(): + ... + return params + + + def load_data(train_path='./data/regression.train', test_path='./data/regression.test'): + ''' + Load or create dataset + ''' + ... + + return lgb_train, lgb_eval, X_test, y_test + + def run(lgb_train, lgb_eval, params, X_test, y_test): + # train + gbm = lgb.train(params, + lgb_train, + num_boost_round=20, + valid_sets=lgb_eval, + early_stopping_rounds=5) + # predict + y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration) + + # eval + rmse = mean_squared_error(y_test, y_pred) ** 0.5 + print('The rmse of prediction is:', rmse) + + if __name__ == '__main__': + lgb_train, lgb_eval, X_test, y_test = load_data() + + PARAMS = get_default_parameters() + # train + run(lgb_train, lgb_eval, PARAMS, X_test, y_test) + +3.3 Prepare your search space. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you like to tune ``num_leaves``\ , ``learning_rate``\ , ``bagging_fraction`` and ``bagging_freq``\ , you could write a :githublink:`search_space.json ` as follow: + +.. code-block:: json + + { + "num_leaves":{"_type":"choice","_value":[31, 28, 24, 20]}, + "learning_rate":{"_type":"choice","_value":[0.01, 0.05, 0.1, 0.2]}, + "bagging_fraction":{"_type":"uniform","_value":[0.7, 1.0]}, + "bagging_freq":{"_type":"choice","_value":[1, 2, 4, 8, 10]} + } + +More support variable type you could reference `here <../Tutorial/SearchSpaceSpec.rst>`__. + +3.4 Add SDK of nni into your code. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: diff + + +import nni + ... + + def get_default_parameters(): + ... + return params + + + def load_data(train_path='./data/regression.train', test_path='./data/regression.test'): + ''' + Load or create dataset + ''' + ... + + return lgb_train, lgb_eval, X_test, y_test + + def run(lgb_train, lgb_eval, params, X_test, y_test): + # train + gbm = lgb.train(params, + lgb_train, + num_boost_round=20, + valid_sets=lgb_eval, + early_stopping_rounds=5) + # predict + y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration) + + # eval + rmse = mean_squared_error(y_test, y_pred) ** 0.5 + print('The rmse of prediction is:', rmse) + + nni.report_final_result(rmse) + + if __name__ == '__main__': + lgb_train, lgb_eval, X_test, y_test = load_data() + + RECEIVED_PARAMS = nni.get_next_parameter() + PARAMS = get_default_parameters() + + PARAMS.update(RECEIVED_PARAMS) + + # train + run(lgb_train, lgb_eval, PARAMS, X_test, y_test) + +3.5 Write a config file and run it. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the config file, you could set some settings including: + + +* Experiment setting: ``trialConcurrency``\ , ``trialGpuNumber``\ , etc. +* Platform setting: ``trainingService``\ , etc. +* Path setting: ``searchSpaceFile``\ , ``trialCodeDirectory``\ , etc. +* Algorithm setting: select ``tuner`` algorithm, ``tuner optimize_mode``\ , etc. + +An config.yml as follow: + +.. code-block:: yaml + + experimentName: auto-gbdt example + searchSpaceFile: search_space.json + trialCommand: python3 main.py + trialGpuNumber: 0 + trialConcurrency: 1 + maxTrialNumber: 10 + trainingService: + platform: local + tuner: + name: TPE #choice: TPE, Random, Anneal, Evolution, BatchTuner, etc + classArgs: + optimize_mode: minimize + +Run this experiment with command as follow: + +.. code-block:: bash + + nnictl create --config ./config.yml diff --git a/docs/en_US/TrialExample/KDExample.rst b/docs/en_US/TrialExample/KDExample.rst new file mode 100644 index 0000000000000000000000000000000000000000..29a23ae02bdfcd7412f39b674b3b5ac91bf133bb --- /dev/null +++ b/docs/en_US/TrialExample/KDExample.rst @@ -0,0 +1,46 @@ +Knowledge Distillation on NNI +============================= + +KnowledgeDistill +---------------- + +Knowledge Distillation (KD) is proposed in `Distilling the Knowledge in a Neural Network `__\ , the compressed model is trained to mimic a pre-trained, larger model. This training setting is also referred to as "teacher-student", where the large model is the teacher and the small model is the student. KD is often used to fine-tune the pruned model. + + +.. image:: ../../img/distill.png + :target: ../../img/distill.png + :alt: + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + y_s = model_s(data) + y_t = model_t(data) + loss_cri = F.cross_entropy(y_s, target) + + # kd loss + p_s = F.log_softmax(y_s/kd_T, dim=1) + p_t = F.softmax(y_t/kd_T, dim=1) + loss_kd = F.kl_div(p_s, p_t, size_average=False) * (self.T**2) / y_s.shape[0] + + # total loss + loss = loss_cir + loss_kd + loss.backward() + + +The complete code for fine-tuning the pruned model can be found :githublink:`here ` + +.. code-block:: python + + python finetune_kd_torch.py --model [model name] --teacher-model-dir [pretrained checkpoint path] --student-model-dir [pruned checkpoint path] --mask-path [mask file path] + +Note that: for fine-tuning a pruned model, run :githublink:`basic_pruners_torch.py ` first to get the mask file, then pass the mask path as argument to the script. + + diff --git a/docs/en_US/TrialExample/MnistExamples.rst b/docs/en_US/TrialExample/MnistExamples.rst new file mode 100644 index 0000000000000000000000000000000000000000..67365cd4b35093c1ceeb9b13e285d62e96a631e8 --- /dev/null +++ b/docs/en_US/TrialExample/MnistExamples.rst @@ -0,0 +1,88 @@ +.. role:: raw-html(raw) + :format: html + + +MNIST examples +============== + +CNN MNIST classifier for deep learning is similar to ``hello world`` for programming languages. Thus, we use MNIST as example to introduce different features of NNI. The examples are listed below: + + +* `MNIST with NNI API (PyTorch) <#mnist-pytorch>`__ +* `MNIST with NNI API (TensorFlow v2.x) <#mnist-tfv2>`__ +* `MNIST with NNI API (TensorFlow v1.x) <#mnist-tfv1>`__ +* `MNIST with NNI annotation <#mnist-annotation>`__ +* `MNIST in keras <#mnist-keras>`__ +* `MNIST -- tuning with batch tuner <#mnist-batch>`__ +* `MNIST -- tuning with hyperband <#mnist-hyperband>`__ +* `MNIST -- tuning within a nested search space <#mnist-nested>`__ +* `distributed MNIST (tensorflow) using kubeflow <#mnist-kubeflow-tf>`__ +* `distributed MNIST (pytorch) using kubeflow <#mnist-kubeflow-pytorch>`__ + +:raw-html:`` +**MNIST with NNI API (PyTorch)** + +This is a simple network which has two convolutional layers, two pooling layers and a fully connected layer. +We tune hyperparameters, such as dropout rate, convolution size, hidden size, etc. +It can be tuned with most NNI built-in tuners, such as TPE, SMAC, Random. +We also provide an exmaple YAML file which enables assessor. + +code directory: :githublink:`mnist-pytorch/ ` + +:raw-html:`` +**MNIST with NNI API (TensorFlow v2.x)** + +Same network to the example above, but written in TensorFlow. + +code directory: :githublink:`mnist-tfv2/ ` + +:raw-html:`` +**MNIST with NNI API (TensorFlow v1.x)** + +Same network to the example above, but written in TensorFlow v1.x API. + +code directory: :githublink:`mnist-tfv1/ ` + +:raw-html:`` +**MNIST with NNI annotation** + +This example is similar to the example above, the only difference is that this example uses NNI annotation to specify search space and report results, while the example above uses NNI apis to receive configuration and report results. + +code directory: :githublink:`mnist-annotation/ ` + +:raw-html:`` +**MNIST -- tuning with batch tuner** + +This example is to show how to use batch tuner. Users simply list all the configurations they want to try in the search space file. NNI will try all of them. + +code directory: :githublink:`mnist-batch-tune-keras/ ` + +:raw-html:`` +**MNIST -- tuning with hyperband** + +This example is to show how to use hyperband to tune the model. There is one more key ``STEPS`` in the received configuration for trials to control how long it can run (e.g., number of iterations). + +.. cannot find :githublink:`mnist-hyperband/ ` + +code directory: :githublink:`mnist-hyperband/ ` + +:raw-html:`` +**MNIST -- tuning within a nested search space** + +This example is to show that NNI also support nested search space. The search space file is an example of how to define nested search space. + +code directory: :githublink:`mnist-nested-search-space/ ` + +:raw-html:`` +**distributed MNIST (tensorflow) using kubeflow** + +This example is to show how to run distributed training on kubeflow through NNI. Users can simply provide distributed training code and a configure file which specifies the kubeflow mode. For example, what is the command to run ps and what is the command to run worker, and how many resources they consume. This example is implemented in tensorflow, thus, uses kubeflow tensorflow operator. + +code directory: :githublink:`mnist-distributed/ ` + +:raw-html:`` +**distributed MNIST (pytorch) using kubeflow** + +Similar to the previous example, the difference is that this example is implemented in pytorch, thus, it uses kubeflow pytorch operator. + +code directory: :githublink:`mnist-distributed-pytorch/ ` diff --git a/docs/en_US/TrialExample/OpEvoExamples.rst b/docs/en_US/TrialExample/OpEvoExamples.rst new file mode 100644 index 0000000000000000000000000000000000000000..870bdd2123e814cdfd1ef27445671fedf659b35a --- /dev/null +++ b/docs/en_US/TrialExample/OpEvoExamples.rst @@ -0,0 +1,130 @@ +.. role:: raw-html(raw) + :format: html + + +Tuning Tensor Operators on NNI +============================== + +Overview +-------- + +Abundant applications raise the demands of training and inference deep neural networks (DNNs) efficiently on diverse hardware platforms ranging from cloud servers to embedded devices. Moreover, computational graph-level optimization of deep neural network, like tensor operator fusion, may introduce new tensor operators. Thus, manually optimized tensor operators provided by hardware-specific libraries have limitations in terms of supporting new hardware platforms or supporting new operators, so automatically optimizing tensor operators on diverse hardware platforms is essential for large-scale deployment and application of deep learning technologies in the real-world problems. + +Tensor operator optimization is substantially a combinatorial optimization problem. The objective function is the performance of a tensor operator on specific hardware platform, which should be maximized with respect to the hyper-parameters of corresponding device code, such as how to tile a matrix or whether to unroll a loop. Unlike many typical problems of this type, such as travelling salesman problem, the objective function of tensor operator optimization is a black box and expensive to sample. One has to compile a device code with a specific configuration and run it on real hardware to get the corresponding performance metric. Therefore, a desired method for optimizing tensor operators should find the best configuration with as few samples as possible. + +The expensive objective function makes solving tensor operator optimization problem with traditional combinatorial optimization methods, for example, simulated annealing and evolutionary algorithms, almost impossible. Although these algorithms inherently support combinatorial search spaces, they do not take sample-efficiency into account, +thus thousands of or even more samples are usually needed, which is unacceptable when tuning tensor operators in product environments. On the other hand, sequential model based optimization (SMBO) methods are proved sample-efficient for optimizing black-box functions with continuous search spaces. However, when optimizing ones with combinatorial search spaces, SMBO methods are not as sample-efficient as their continuous counterparts, because there is lack of prior assumptions about the objective functions, such as continuity and differentiability in the case of continuous search spaces. For example, if one could assume an objective function with a continuous search space is infinitely differentiable, a Gaussian process with a radial basis function (RBF) kernel could be used to model the objective function. In this way, a sample provides not only a single value at a point but also the local properties of the objective function in its neighborhood or even global properties, +which results in a high sample-efficiency. In contrast, SMBO methods for combinatorial optimization suffer poor sample-efficiency due to the lack of proper prior assumptions and surrogate models which can leverage them. + +OpEvo is recently proposed for solving this challenging problem. It efficiently explores the search spaces of tensor operators by introducing a topology-aware mutation operation based on q-random walk distribution to leverage the topological structures over the search spaces. Following this example, you can use OpEvo to tune three representative types of tensor operators selected from two popular neural networks, BERT and AlexNet. Three comparison baselines, AutoTVM, G-BFS and N-A2C, are also provided. Please refer to `OpEvo: An Evolutionary Method for Tensor Operator Optimization `__ for detailed explanation about these algorithms. + +Environment Setup +----------------- + +We prepared a dockerfile for setting up experiment environments. Before starting, please make sure the Docker daemon is running and the driver of your GPU accelerator is properly installed. Enter into the example folder ``examples/trials/systems/opevo`` and run below command to build and instantiate a Docker image from the dockerfile. + +.. code-block:: bash + + # if you are using Nvidia GPU + make cuda-env + # if you are using AMD GPU + make rocm-env + +Run Experiments: +---------------- + +Three representative kinds of tensor operators, **matrix multiplication**\ , **batched matrix multiplication** and **2D convolution**\ , are chosen from BERT and AlexNet, and tuned with NNI. The ``Trial`` code for all tensor operators is ``/root/compiler_auto_tune_stable.py``\ , and ``Search Space`` files and ``config`` files for each tuning algorithm locate in ``/root/experiments/``\ , which are categorized by tensor operators. Here ``/root`` refers to the root of the container. + +For tuning the operators of matrix multiplication, please run below commands from ``/root``\ : + +.. code-block:: bash + + # (N, K) x (K, M) represents a matrix of shape (N, K) multiplies a matrix of shape (K, M) + + # (512, 1024) x (1024, 1024) + # tuning with OpEvo + nnictl create --config experiments/mm/N512K1024M1024/config_opevo.yml + # tuning with G-BFS + nnictl create --config experiments/mm/N512K1024M1024/config_gbfs.yml + # tuning with N-A2C + nnictl create --config experiments/mm/N512K1024M1024/config_na2c.yml + # tuning with AutoTVM + OP=matmul STEP=512 N=512 M=1024 K=1024 P=NN ./run.s + + # (512, 1024) x (1024, 4096) + # tuning with OpEvo + nnictl create --config experiments/mm/N512K1024M4096/config_opevo.yml + # tuning with G-BFS + nnictl create --config experiments/mm/N512K1024M4096/config_gbfs.yml + # tuning with N-A2C + nnictl create --config experiments/mm/N512K1024M4096/config_na2c.yml + # tuning with AutoTVM + OP=matmul STEP=512 N=512 M=1024 K=4096 P=NN ./run.sh + + # (512, 4096) x (4096, 1024) + # tuning with OpEvo + nnictl create --config experiments/mm/N512K4096M1024/config_opevo.yml + # tuning with G-BFS + nnictl create --config experiments/mm/N512K4096M1024/config_gbfs.yml + # tuning with N-A2C + nnictl create --config experiments/mm/N512K4096M1024/config_na2c.yml + # tuning with AutoTVM + OP=matmul STEP=512 N=512 M=4096 K=1024 P=NN ./run.sh + +For tuning the operators of batched matrix multiplication, please run below commands from ``/root``\ : + +.. code-block:: bash + + # batched matrix with batch size 960 and shape of matrix (128, 128) multiplies batched matrix with batch size 960 and shape of matrix (128, 64) + # tuning with OpEvo + nnictl create --config experiments/bmm/B960N128K128M64PNN/config_opevo.yml + # tuning with AutoTVM + OP=batch_matmul STEP=512 B=960 N=128 K=128 M=64 P=NN ./run.sh + + # batched matrix with batch size 960 and shape of matrix (128, 128) is transposed first and then multiplies batched matrix with batch size 960 and shape of matrix (128, 64) + # tuning with OpEvo + nnictl create --config experiments/bmm/B960N128K128M64PTN/config_opevo.yml + # tuning with AutoTVM + OP=batch_matmul STEP=512 B=960 N=128 K=128 M=64 P=TN ./run.sh + + # batched matrix with batch size 960 and shape of matrix (128, 64) is transposed first and then right multiplies batched matrix with batch size 960 and shape of matrix (128, 64). + # tuning with OpEvo + nnictl create --config experiments/bmm/B960N128K64M128PNT/config_opevo.yml + # tuning with AutoTVM + OP=batch_matmul STEP=512 B=960 N=128 K=64 M=128 P=NT ./run.sh + +For tuning the operators of 2D convolution, please run below commands from ``/root``\ : + +.. code-block:: bash + + # image tensor of shape (512, 3, 227, 227) convolves with kernel tensor of shape (64, 3, 11, 11) with stride 4 and padding 0 + # tuning with OpEvo + nnictl create --config experiments/conv/N512C3HW227F64K11ST4PD0/config_opevo.yml + # tuning with AutoTVM + OP=convfwd_direct STEP=512 N=512 C=3 H=227 W=227 F=64 K=11 ST=4 PD=0 ./run.sh + + # image tensor of shape (512, 64, 27, 27) convolves with kernel tensor of shape (192, 64, 5, 5) with stride 1 and padding 2 + # tuning with OpEvo + nnictl create --config experiments/conv/N512C64HW27F192K5ST1PD2/config_opevo.yml + # tuning with AutoTVM + OP=convfwd_direct STEP=512 N=512 C=64 H=27 W=27 F=192 K=5 ST=1 PD=2 ./run.sh + +Please note that G-BFS and N-A2C are only designed for tuning tiling schemes of multiplication of matrices with only power of 2 rows and columns, so they are not compatible with other types of configuration spaces, thus not eligible to tune the operators of batched matrix multiplication and 2D convolution. Here, AutoTVM is implemented by its authors in the TVM project, so the tuning results are printed on the screen rather than reported to NNI manager. The port 8080 of the container is bind to the host on the same port, so one can access the NNI Web UI through ``host_ip_addr:8080`` and monitor tuning process as below screenshot. + +.. image:: ../../img/opevo.png + +Citing OpEvo +------------ + +If you feel OpEvo is helpful, please consider citing the paper as follows: + +.. code-block:: bash + + @misc{gao2020opevo, + title={OpEvo: An Evolutionary Method for Tensor Operator Optimization}, + author={Xiaotian Gao and Cui Wei and Lintao Zhang and Mao Yang}, + year={2020}, + eprint={2006.05664}, + archivePrefix={arXiv}, + primaryClass={cs.LG} + } diff --git a/docs/en_US/TrialExample/Pix2pixExample.rst b/docs/en_US/TrialExample/Pix2pixExample.rst new file mode 100644 index 0000000000000000000000000000000000000000..4ad858f986b480d02efd455a09da2eb758ccf08f --- /dev/null +++ b/docs/en_US/TrialExample/Pix2pixExample.rst @@ -0,0 +1,164 @@ +Pix2pix example +================= + +Overview +-------- + +`Pix2pix `__ is a conditional generative adversial network (conditional GAN) framework proposed by Isola et. al. in 2016 targeting at solving image-to-image translation problems. This framework performs well in a wide range of image generation problems. In the original paper, the authors demonstrate how to use pix2pix to solve the following image translation problems: 1) labels to street scene; 2) labels to facade; 3) BW to Color; 4) Aerial to Map; 5) Day to Night and 6) Edges to Photo. If you are interested, please read more in the `official project page `__ . In this example, we use pix2pix to introduce how to use NNI for tuning conditional GANs. + +**Goals** +^^^^^^^^^^^^^ + +Although GANs are known to be able to generate high-resolution realistic images, they are generally fragile and difficult to optimize, and mode collapse can happen during training due to improper optimization setting, loss formulation, model architecture, weight initialization, or even data augmentation patterns. The goal of this tutorial is to leverage NNI hyperparameter tuning tools to automatically find a good setting for these important factors. + +In this example, we aim at selecting the following hyperparameters automatically: + + +* ``ngf``: number of generator filters in the last conv layer +* ``ndf``: number of discriminator filters in the first conv layer +* ``netG``: generator architecture +* ``netD``: discriminator architecture +* ``norm``: normalization type +* ``init_type``: weight initialization method +* ``lr``: initial learning rate for adam +* ``beta1``: momentum term of adam +* ``lr_policy``: learning rate policy +* ``gan_mode``: type of GAN objective +* ``lambda_L1``: weight of L1 loss in the generator objective + + +**Experiments** +^^^^^^^^^^^^^^^^^^^^ + +Preparations +^^^^^^^^^^^^ + +This example requires the GPU version of PyTorch. PyTorch installation should be chosen based on system, python version, and cuda version. + +Please refer to the detailed instruction of installing `PyTorch `__ + +Next, run the following shell script to clone the repository maintained by the original authors of pix2pix. This example relies on the implementations in this repository. + +.. code-block:: bash + + ./setup.sh + +Pix2pix with NNI +^^^^^^^^^^^^^^^^^ + +**Search Space** + +We summarize the range of values for each hyperparameter mentioned above into a single search space json object. + +.. code-block:: json + + { + "ngf": {"_type":"choice","_value":[16, 32, 64, 128, 256]}, + "ndf": {"_type":"choice","_value":[16, 32, 64, 128, 256]}, + "netG": {"_type":"choice","_value":["resnet_9blocks", "unet_256"]}, + "netD": {"_type":"choice","_value":["basic", "pixel", "n_layers"]}, + "norm": {"_type":"choice","_value":["batch", "instance", "none"]}, + "init_type": {"_type":"choice","_value":["xavier", "normal", "kaiming", "orthogonal"]}, + "lr":{"_type":"choice","_value":[0.0001, 0.0002, 0.0005, 0.001, 0.005, 0.01, 0.1]}, + "beta1":{"_type":"uniform","_value":[0, 1]}, + "lr_policy": {"_type":"choice","_value":["linear", "step", "plateau", "cosine"]}, + "gan_mode": {"_type":"choice","_value":["vanilla", "lsgan", "wgangp"]} , + "lambda_L1": {"_type":"choice","_value":[1, 5, 10, 100, 250, 500]} + } + +Starting from v2.0, the search space is directly included in the config. Please find the example here: :githublink:`config.yml ` + +**Trial** + +To experiment on this set of hyperparameters using NNI, we have to write a trial code, which receives a set of parameter settings from NNI, trains a generator and discriminator using these parameters, and then reports the final scores back to NNI. In the experiment, NNI repeatedly calls this trial code, passing in different set of hyperparameter settings. It is important that the following three lines are incorporated in the trial code: + +* Use ``nni.get_next_parameter()`` to get next hyperparameter set. +* (Optional) Use ``nni.report_intermediate_result(score)`` to report the intermediate result after finishing each epoch. +* Use ``nni.report_final_result(score)`` to report the final result before the trial ends. + +Implemented code directory: :githublink:`pix2pix.py ` + +Some notes on the implementation: + +* The trial code for this example is adapted from the `repository maintained by the authors of Pix2pix and CycleGAN `__ . You can also use your previous code directly. Please refer to `How to define a trial `__ for modifying the code. +* By default, the code uses the dataset "facades". It also supports the datasets "night2day", "edges2handbags", "edges2shoes", and "maps". +* For "facades", 200 epochs are enough for the model to converge to a point where the difference between models trained with different hyperparameters are salient enough for evaluation. If you are using other datasets, please consider increasing the ``n_epochs`` and ``n_epochs_decay`` parameters by either passing them as arguments when calling ``pix2pix.py`` in the config file (discussed below) or changing the ``pix2pix.py`` directly. Also, for "facades", 200 epochs are enought for the final training, while the number may vary for other datasets. +* In this example, we use L1 loss on the test set as the score to report to NNI. Although L1 is by no means a comprehensive measure of image generation performance, at most times it makes sense for evaluating pix2pix models with similar architectural setup. In this example, for the hyperparameters we experiment on, a higher L1 score generally indicates a higher generation performance. + + +**Config** + +Here is the example config of running this experiment on local (with a single GPU): + +code directory: :githublink:`examples/trials/pix2pix-pytorch/config.yml ` + +To have a full glance on our implementation, check: :githublink:`examples/trials/pix2pix-pytorch/ ` + +Launch the experiment +^^^^^^^^^^^^^^^^^^^^^ + +We are ready for the experiment, let's now **run the config.yml file from your command line to start the experiment**. + +.. code-block:: bash + + nnictl create --config nni/examples/trials/pix2pix-pytorch/config.yml + +Collecting the Results +^^^^^^^^^^^^^^^^^^^^^^ + +By default, our trial code saves the final trained model for each trial in the ``checkpoints/`` directory in the trial directory of the NNI experiment. The ``latest_net_G.pth`` and ``latest_net_D.pth`` correspond to the save checkpoints for the generator and the discriminator. + +To make it easier to run inference and see the generated images, we also incorporate a simple inference code here: :githublink:`test.py ` + +To use the code, run the following command: + +.. code-block:: bash + + python3 test.py -c CHECKPOINT -p PARAMETER_CFG -d DATASET_NAME -o OUTPUT_DIR + +``CHECKPOINT`` is the directory saving the checkpoints (e.g., the ``checkpoints/`` directory in the trial directory). ``PARAMETER_CFG`` is the ``parameter.cfg`` file generated by NNI recording the hyperparameter settings. This file can be found in the trial directory created by NNI. + +Results and Discussions +^^^^^^^^^^^^^^^^^^^^^^^ + +Following the previous steps, we ran the example for 40 trials using the TPE tuner. We found that the best-performing parameters on the 'facades' dataset to be the following set. + +.. code-block:: json + + { + "ngf": 16, + "ndf": 128, + "netG": "unet_256", + "netD": "pixel", + "norm": "none", + "init_type": "normal", + "lr": 0.0002, + "beta1": 0.6954, + "lr_policy": "step", + "gan_mode": "lsgan", + "lambda_L1": 500 + } + +Meanwhile, we compare the results with the model training using the following default empirical hyperparameter settings: + +.. code-block:: json + + { + "ngf": 128, + "ndf": 128, + "netG": "unet_256", + "netD": "basic", + "norm": "batch", + "init_type": "xavier", + "lr": 0.0002, + "beta1": 0.5, + "lr_policy": "linear", + "gan_mode": "lsgan", + "lambda_L1": 100 + } + +We can observe that for learning rate (0.0002), the generator architecture (U-Net), and gan objective (LSGAN), the two results agree with each other. This is also consistent with the widely accepted practice on this dataset. Meanwhile, the hyperparameters "beta1", "lambda_L1", "ngf", and "ndf" are slightly changed in the NNI's found solution to fit the target dataset. We found that the parameters searched by NNI outperforms the empirical parameters on the facades dataset both in terms of L1 loss and the visual qualities of the images. While the search hyperparameter has a L1 loss of 0.3317 on the test set of facades, the empirical hyperparameters can only achieve a L1 loss of 0.4148. The following image shows some sample results of facades test set input-output pairs produced by the model with hyperparameters tuned with NNI. + +.. image:: ../../img/pix2pix_pytorch_facades.png + :target: ../../img/pix2pix_pytorch_facades.png + :alt: diff --git a/docs/en_US/TrialExample/RocksdbExamples.rst b/docs/en_US/TrialExample/RocksdbExamples.rst new file mode 100644 index 0000000000000000000000000000000000000000..b917194854a77be389f666340129f5e9144bfaff --- /dev/null +++ b/docs/en_US/TrialExample/RocksdbExamples.rst @@ -0,0 +1,129 @@ +Tuning RocksDB on NNI +===================== + +Overview +-------- + +`RocksDB `__ is a popular high performance embedded key-value database used in production systems at various web-scale enterprises including Facebook, Yahoo!, and LinkedIn.. It is a fork of `LevelDB `__ by Facebook optimized to exploit many central processing unit (CPU) cores, and make efficient use of fast storage, such as solid-state drives (SSD), for input/output (I/O) bound workloads. + +The performance of RocksDB is highly contingent on its tuning. However, because of the complexity of its underlying technology and a large number of configurable parameters, a good configuration is sometimes hard to obtain. NNI can help to address this issue. NNI supports many kinds of tuning algorithms to search the best configuration of RocksDB, and support many kinds of environments like local machine, remote servers and cloud. + +This example illustrates how to use NNI to search the best configuration of RocksDB for a ``fillrandom`` benchmark supported by a benchmark tool ``db_bench``\ , which is an official benchmark tool provided by RocksDB itself. Therefore, before running this example, please make sure NNI is installed and `db_bench `__ is in your ``PATH``. Please refer to `here <../Tutorial/QuickStart.rst>`__ for detailed information about installation and preparing of NNI environment, and `here `__ for compiling RocksDB as well as ``db_bench``. + +We also provide a simple script :githublink:`db_bench_installation.sh ` helping to compile and install ``db_bench`` as well as its dependencies on Ubuntu. Installing RocksDB on other systems can follow the same procedure. + +:githublink:`code directory ` + +Experiment setup +---------------- + +There are mainly three steps to setup an experiment of tuning systems on NNI. Define search space with a ``json`` file, write a benchmark code, and start NNI experiment by passing a config file to NNI manager. + +Search Space +^^^^^^^^^^^^ + +For simplicity, this example tunes three parameters, ``write_buffer_size``\ , ``min_write_buffer_num`` and ``level0_file_num_compaction_trigger``\ , for writing 16M keys with 20 Bytes of key size and 100 Bytes of value size randomly, based on writing operations per second (OPS). ``write_buffer_size`` sets the size of a single memtable. Once memtable exceeds this size, it is marked immutable and a new one is created. ``min_write_buffer_num`` is the minimum number of memtables to be merged before flushing to storage. Once the number of files in level 0 reaches ``level0_file_num_compaction_trigger``\ , level 0 to level 1 compaction is triggered. + +In this example, the search space is specified by a ``search_space.json`` file as shown below. Detailed explanation of search space could be found `here <../Tutorial/SearchSpaceSpec.rst>`__. + +.. code-block:: json + + { + "write_buffer_size": { + "_type": "quniform", + "_value": [2097152, 16777216, 1048576] + }, + "min_write_buffer_number_to_merge": { + "_type": "quniform", + "_value": [2, 16, 1] + }, + "level0_file_num_compaction_trigger": { + "_type": "quniform", + "_value": [2, 16, 1] + } + } + +:githublink:`code directory ` + +Benchmark code +^^^^^^^^^^^^^^ + +Benchmark code should receive a configuration from NNI manager, and report the corresponding benchmark result back. Following NNI APIs are designed for this purpose. In this example, writing operations per second (OPS) is used as a performance metric. Please refer to `here `__ for detailed information. + + +* Use ``nni.get_next_parameter()`` to get next system configuration. +* Use ``nni.report_final_result(metric)`` to report the benchmark result. + +:githublink:`code directory ` + +Config file +^^^^^^^^^^^ + +One could start a NNI experiment with a config file. A config file for NNI is a ``yaml`` file usually including experiment settings (\ ``trialConcurrency``\ , ``trialGpuNumber``\ , etc.), platform settings (\ ``trainingService``\ ), path settings (\ ``searchSpaceFile``\ , ``trialCodeDirectory``\ , etc.) and tuner settings (\ ``tuner``\ , ``tuner optimize_mode``\ , etc.). Please refer to `here <../Tutorial/QuickStart.rst>`__ for more information. + +Here is an example of tuning RocksDB with SMAC algorithm: + +:githublink:`code directory ` + +Here is an example of tuning RocksDB with TPE algorithm: + +:githublink:`code directory ` + +Other tuners can be easily adopted in the same way. Please refer to `here <../Tuner/BuiltinTuner.rst>`__ for more information. + +Finally, we could enter the example folder and start the experiment using following commands: + +.. code-block:: bash + + # tuning RocksDB with SMAC tuner + nnictl create --config ./config_smac.yml + # tuning RocksDB with TPE tuner + nnictl create --config ./config_tpe.yml + +Experiment results +------------------ + +We ran these two examples on the same machine with following details: + + +* 16 * Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz +* 465 GB of rotational hard drive with ext4 file system +* 128 GB of RAM +* Kernel version: 4.15.0-58-generic +* NNI version: v1.0-37-g1bd24577 +* RocksDB version: 6.4 +* RocksDB DEBUG_LEVEL: 0 + +The detailed experiment results are shown in the below figure. Horizontal axis is sequential order of trials. Vertical axis is the metric, write OPS in this example. Blue dots represent trials for tuning RocksDB with SMAC tuner, and orange dots stand for trials for tuning RocksDB with TPE tuner. + + +.. image:: ../../img/rocksdb-fillrandom-plot.png + :target: ../../img/rocksdb-fillrandom-plot.png + :alt: image + + +Following table lists the best trials and corresponding parameters and metric obtained by the two tuners. Unsurprisingly, both of them found the same optimal configuration for ``fillrandom`` benchmark. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Tuner + - Best trial + - Best OPS + - write_buffer_size + - min_write_buffer_number_to_merge + - level0_file_num_compaction_trigger + * - SMAC + - 255 + - 779289 + - 2097152 + - 7.0 + - 7.0 + * - TPE + - 169 + - 761456 + - 2097152 + - 7.0 + - 7.0 + diff --git a/docs/en_US/TrialExample/SklearnExamples.rst b/docs/en_US/TrialExample/SklearnExamples.rst new file mode 100644 index 0000000000000000000000000000000000000000..eed7fb2e1b23a83868fe4d3fe4a08806ebc32439 --- /dev/null +++ b/docs/en_US/TrialExample/SklearnExamples.rst @@ -0,0 +1,103 @@ +Scikit-learn in NNI +=================== + +`Scikit-learn `__ is a popular machine learning tool for data mining and data analysis. It supports many kinds of machine learning models like LinearRegression, LogisticRegression, DecisionTree, SVM etc. How to make the use of scikit-learn more efficiency is a valuable topic. + +NNI supports many kinds of tuning algorithms to search the best models and/or hyper-parameters for scikit-learn, and support many kinds of environments like local machine, remote servers and cloud. + +1. How to run the example +------------------------- + +To start using NNI, you should install the NNI package, and use the command line tool ``nnictl`` to start an experiment. For more information about installation and preparing for the environment, please refer `here <../Tutorial/QuickStart.rst>`__. + +After you installed NNI, you could enter the corresponding folder and start the experiment using following commands: + +.. code-block:: bash + + nnictl create --config ./config.yml + +2. Description of the example +----------------------------- + +2.1 classification +^^^^^^^^^^^^^^^^^^ + +This example uses the dataset of digits, which is made up of 1797 8x8 images, and each image is a hand-written digit, the goal is to classify these images into 10 classes. + +In this example, we use SVC as the model, and choose some parameters of this model, including ``"C", "kernel", "degree", "gamma" and "coef0"``. For more information of these parameters, please `refer `__. + +2.2 regression +^^^^^^^^^^^^^^ + +This example uses the Boston Housing Dataset, this dataset consists of price of houses in various places in Boston and the information such as Crime (CRIM), areas of non-retail business in the town (INDUS), the age of people who own the house (AGE) etc., to predict the house price of Boston. + +In this example, we tune different kinds of regression models including ``"LinearRegression", "SVR", "KNeighborsRegressor", "DecisionTreeRegressor"`` and some parameters like ``"svr_kernel", "knr_weights"``. You could get more details about these models from `here `__. + +3. How to write scikit-learn code using NNI +------------------------------------------- + +It is easy to use NNI in your scikit-learn code, there are only a few steps. + + +* + **step 1** + + Prepare a search_space.json to storage your choose spaces. + For example, if you want to choose different models, you may try: + + .. code-block:: json + + { + "model_name":{"_type":"choice","_value":["LinearRegression", "SVR", "KNeighborsRegressor", "DecisionTreeRegressor"]} + } + + If you want to choose different models and parameters, you could put them together in a search_space.json file. + + .. code-block:: json + + { + "model_name":{"_type":"choice","_value":["LinearRegression", "SVR", "KNeighborsRegressor", "DecisionTreeRegressor"]}, + "svr_kernel": {"_type":"choice","_value":["linear", "poly", "rbf"]}, + "knr_weights": {"_type":"choice","_value":["uniform", "distance"]} + } + + Then you could read these values as a dict from your python code, please get into the step 2. + +* + **step 2** + + At the beginning of your python code, you should ``import nni`` to insure the packages works normally. + + First, you should use ``nni.get_next_parameter()`` function to get your parameters given by NNI. Then you could use these parameters to update your code. + For example, if you define your search_space.json like following format: + + .. code-block:: json + + { + "C": {"_type":"uniform","_value":[0.1, 1]}, + "kernel": {"_type":"choice","_value":["linear", "rbf", "poly", "sigmoid"]}, + "degree": {"_type":"choice","_value":[1, 2, 3, 4]}, + "gamma": {"_type":"uniform","_value":[0.01, 0.1]}, + "coef0": {"_type":"uniform","_value":[0.01, 0.1]} + } + + You may get a parameter dict like this: + + .. code-block:: python + + params = { + 'C': 1.0, + 'kernel': 'linear', + 'degree': 3, + 'gamma': 0.01, + 'coef0': 0.01 + } + + Then you could use these variables to write your scikit-learn code. + +* + **step 3** + + After you finished your training, you could get your own score of the model, like your precision, recall or MSE etc. NNI needs your score to tuner algorithms and generate next group of parameters, please report the score back to NNI and start next trial job. + + You just need to use ``nni.report_final_result(score)`` to communicate with NNI after you process your scikit-learn code. Or if you have multiple scores in the steps of training, you could also report them back to NNI using ``nni.report_intemediate_result(score)``. Note, you may not report intermediate result of your job, but you must report back your final result. diff --git a/docs/en_US/TrialExample/SquadEvolutionExamples.rst b/docs/en_US/TrialExample/SquadEvolutionExamples.rst new file mode 100644 index 0000000000000000000000000000000000000000..69cc7e4742f32efcf6e22112311b05eaa1316b3a --- /dev/null +++ b/docs/en_US/TrialExample/SquadEvolutionExamples.rst @@ -0,0 +1,274 @@ +Automatic Model Architecture Search for Reading Comprehension +============================================================= + +This example shows us how to use Genetic Algorithm to find good model architectures for Reading Comprehension. + +1. Search Space +--------------- + +Since attention and RNN have been proven effective in Reading Comprehension, we conclude the search space as follow: + + +#. IDENTITY (Effectively means keep training). +#. INSERT-RNN-LAYER (Inserts a LSTM. Comparing the performance of GRU and LSTM in our experiment, we decided to use LSTM here.) +#. REMOVE-RNN-LAYER +#. INSERT-ATTENTION-LAYER(Inserts an attention layer.) +#. REMOVE-ATTENTION-LAYER +#. ADD-SKIP (Identity between random layers). +#. REMOVE-SKIP (Removes random skip). + + +.. image:: ../../../examples/trials/ga_squad/ga_squad.png + :target: ../../../examples/trials/ga_squad/ga_squad.png + :alt: + + +New version +^^^^^^^^^^^ + +Also we have another version which time cost is less and performance is better. We will release soon. + +2. How to run this example in local? +------------------------------------ + +2.1 Use downloading script to download data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Execute the following command to download needed files +using the downloading script: + +.. code-block:: bash + + chmod +x ./download.sh + ./download.sh + +Or Download manually + + +#. download ``dev-v1.1.json`` and ``train-v1.1.json`` `here `__ + +.. code-block:: bash + + wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json + wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json + + +#. download ``glove.840B.300d.txt`` `here `__ + +.. code-block:: bash + + wget http://nlp.stanford.edu/data/glove.840B.300d.zip + unzip glove.840B.300d.zip + +2.2 Update configuration +^^^^^^^^^^^^^^^^^^^^^^^^ + +Modify ``nni/examples/trials/ga_squad/config.yml``\ , here is the default configuration: + +.. code-block:: yaml + + experimentName: ga-squad example + trialCommand: python3 trial.py + trialCodeDirectory: ~/nni/examples/trials/ga_squad + + trialGpuNumber: 0 + trialConcurrency: 1 + maxTrialNumber: 10 + maxExperimentDuration: 1h + + searchSpace: {} # hard-coded in tuner + tuner: + className: customer_tuner.CustomerTuner + codeDirectory: ~/nni/examples/tuners/ga_customer_tuner + classArgs: + optimize_mode: maximize + + trainingService: + platform: local + +In the **trial** part, if you want to use GPU to perform the architecture search, change ``trialGpuNum`` from ``0`` to ``1``. You need to increase the ``maxTrialNumber`` and ``maxExperimentDuration``\ , according to how long you want to wait for the search result. + +2.3 submit this job +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + nnictl create --config ~/nni/examples/trials/ga_squad/config.yml + +3. Technical details about the trial +------------------------------------ + +3.1 How does it works +^^^^^^^^^^^^^^^^^^^^^ + +The evolution-algorithm based architecture for question answering has two different parts just like any other examples: the trial and the tuner. + +3.2 The trial +^^^^^^^^^^^^^ + +The trial has a lot of different files, functions and classes. Here we will only give most of those files a brief introduction: + + +* ``attention.py`` contains an implementation for attention mechanism in Tensorflow. +* ``data.py`` contains functions for data preprocessing. +* ``evaluate.py`` contains the evaluation script. +* ``graph.py`` contains the definition of the computation graph. +* ``rnn.py`` contains an implementation for GRU in Tensorflow. +* ``train_model.py`` is a wrapper for the whole question answering model. + +Among those files, ``trial.py`` and ``graph_to_tf.py`` are special. + +``graph_to_tf.py`` has a function named as ``graph_to_network``\ , here is its skeleton code: + +.. code-block:: python + + def graph_to_network(input1, + input2, + input1_lengths, + input2_lengths, + graph, + dropout_rate, + is_training, + num_heads=1, + rnn_units=256): + topology = graph.is_topology() + layers = dict() + layers_sequence_lengths = dict() + num_units = input1.get_shape().as_list()[-1] + layers[0] = input1*tf.sqrt(tf.cast(num_units, tf.float32)) + \ + positional_encoding(input1, scale=False, zero_pad=False) + layers[1] = input2*tf.sqrt(tf.cast(num_units, tf.float32)) + layers[0] = dropout(layers[0], dropout_rate, is_training) + layers[1] = dropout(layers[1], dropout_rate, is_training) + layers_sequence_lengths[0] = input1_lengths + layers_sequence_lengths[1] = input2_lengths + for _, topo_i in enumerate(topology): + if topo_i == '|': + continue + if graph.layers[topo_i].graph_type == LayerType.input.value: + # ...... + elif graph.layers[topo_i].graph_type == LayerType.attention.value: + # ...... + # More layers to handle + +As we can see, this function is actually a compiler, that converts the internal model DAG configuration (which will be introduced in the ``Model configuration format`` section) ``graph``\ , to a Tensorflow computation graph. + +.. code-block:: python + + topology = graph.is_topology() + +performs topological sorting on the internal graph representation, and the code inside the loop: + +.. code-block:: python + + for _, topo_i in enumerate(topology): + +performs actually conversion that maps each layer to a part in Tensorflow computation graph. + +3.3 The tuner +^^^^^^^^^^^^^ + +The tuner is much more simple than the trial. They actually share the same ``graph.py``. Besides, the tuner has a ``customer_tuner.py``\ , the most important class in which is ``CustomerTuner``\ : + +.. code-block:: python + + class CustomerTuner(Tuner): + # ...... + + def generate_parameters(self, parameter_id): + """Returns a set of trial graph config, as a serializable object. + parameter_id : int + """ + if len(self.population) <= 0: + logger.debug("the len of poplution lower than zero.") + raise Exception('The population is empty') + pos = -1 + for i in range(len(self.population)): + if self.population[i].result == None: + pos = i + break + if pos != -1: + indiv = copy.deepcopy(self.population[pos]) + self.population.pop(pos) + temp = json.loads(graph_dumps(indiv.config)) + else: + random.shuffle(self.population) + if self.population[0].result > self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + self.population.pop(1) + indiv.mutation() + graph = indiv.config + temp = json.loads(graph_dumps(graph)) + + # ...... + +As we can see, the overloaded method ``generate_parameters`` implements a pretty naive mutation algorithm. The code lines: + +.. code-block:: python + + if self.population[0].result > self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + +controls the mutation process. It will always take two random individuals in the population, only keeping and mutating the one with better result. + +3.4 Model configuration format +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here is an example of the model configuration, which is passed from the tuner to the trial in the architecture search procedure. + +.. code-block:: json + + { + "max_layer_num": 50, + "layers": [ + { + "input_size": 0, + "type": 3, + "output_size": 1, + "input": [], + "size": "x", + "output": [4, 5], + "is_delete": false + }, + { + "input_size": 0, + "type": 3, + "output_size": 1, + "input": [], + "size": "y", + "output": [4, 5], + "is_delete": false + }, + { + "input_size": 1, + "type": 4, + "output_size": 0, + "input": [6], + "size": "x", + "output": [], + "is_delete": false + }, + { + "input_size": 1, + "type": 4, + "output_size": 0, + "input": [5], + "size": "y", + "output": [], + "is_delete": false + }, + {"Comment": "More layers will be here for actual graphs."} + ] + } + +Every model configuration will have a "layers" section, which is a JSON list of layer definitions. The definition of each layer is also a JSON object, where: + + +* ``type`` is the type of the layer. 0, 1, 2, 3, 4 corresponds to attention, self-attention, RNN, input and output layer respectively. +* ``size`` is the length of the output. "x", "y" correspond to document length / question length, respectively. +* ``input_size`` is the number of inputs the layer has. +* ``input`` is the indices of layers taken as input of this layer. +* ``output`` is the indices of layers use this layer's output as their input. +* ``is_delete`` means whether the layer is still available. diff --git a/docs/en_US/TrialExample/Trials.rst b/docs/en_US/TrialExample/Trials.rst new file mode 100644 index 0000000000000000000000000000000000000000..2175f6cdf268052dae022e29fc788b2066125285 --- /dev/null +++ b/docs/en_US/TrialExample/Trials.rst @@ -0,0 +1,217 @@ +.. role:: raw-html(raw) + :format: html + + +Write a Trial Run on NNI +======================== + +A **Trial** in NNI is an individual attempt at applying a configuration (e.g., a set of hyper-parameters) to a model. + +To define an NNI trial, you need to first define the set of parameters (i.e., search space) and then update the model. NNI provides two approaches for you to define a trial: `NNI API <#nni-api>`__ and `NNI Python annotation <#nni-annotation>`__. You could also refer to `here <#more-examples>`__ for more trial examples. + +:raw-html:`` + +NNI API +------- + +Step 1 - Prepare a SearchSpace parameters file. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An example is shown below: + +.. code-block:: json + + { + "dropout_rate":{"_type":"uniform","_value":[0.1,0.5]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "learning_rate":{"_type":"uniform","_value":[0.0001, 0.1]} + } + +Refer to `SearchSpaceSpec <../Tutorial/SearchSpaceSpec.rst>`__ to learn more about search spaces. Tuner will generate configurations from this search space, that is, choosing a value for each hyperparameter from the range. + +Step 2 - Update model code +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* + Import NNI + + Include ``import nni`` in your trial code to use NNI APIs. + +* + Get configuration from Tuner + +.. code-block:: python + + RECEIVED_PARAMS = nni.get_next_parameter() + +``RECEIVED_PARAMS`` is an object, for example: + +``{"conv_size": 2, "hidden_size": 124, "learning_rate": 0.0307, "dropout_rate": 0.2029}``. + + +* Report metric data periodically (optional) + +.. code-block:: python + + nni.report_intermediate_result(metrics) + +``metrics`` can be any python object. If users use the NNI built-in tuner/assessor, ``metrics`` can only have two formats: 1) a number e.g., float, int, or 2) a dict object that has a key named ``default`` whose value is a number. These ``metrics`` are reported to `assessor <../Assessor/BuiltinAssessor.rst>`__. Often, ``metrics`` includes the periodically evaluated loss or accuracy. + + +* Report performance of the configuration + +.. code-block:: python + + nni.report_final_result(metrics) + +``metrics`` can also be any python object. If users use the NNI built-in tuner/assessor, ``metrics`` follows the same format rule as that in ``report_intermediate_result``\ , the number indicates the model's performance, for example, the model's accuracy, loss etc. These ``metrics`` are reported to `tuner <../Tuner/BuiltinTuner.rst>`__. + +Step 3 - Enable NNI API +^^^^^^^^^^^^^^^^^^^^^^^ + +To enable NNI API mode, you need to set useAnnotation to *false* and provide the path of the SearchSpace file was defined in step 1: + +.. code-block:: yaml + + useAnnotation: false + searchSpacePath: /path/to/your/search_space.json + +You can refer to `here <../Tutorial/ExperimentConfig.rst>`__ for more information about how to set up experiment configurations. + +Please refer to `here <../sdk_reference.rst>`__ for more APIs (e.g., ``nni.get_sequence_id()``\ ) provided by NNI. + +:raw-html:`` + +NNI Python Annotation +--------------------- + +An alternative to writing a trial is to use NNI's syntax for python. NNI annotations are simple, similar to comments. You don't have to make structural changes to your existing code. With a few lines of NNI annotation, you will be able to: + + +* annotate the variables you want to tune +* specify the range in which you want to tune the variables +* annotate which variable you want to report as an intermediate result to ``assessor`` +* annotate which variable you want to report as the final result (e.g. model accuracy) to ``tuner``. + +Again, take MNIST as an example, it only requires 2 steps to write a trial with NNI Annotation. + +Step 1 - Update codes with annotations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following is a TensorFlow code snippet for NNI Annotation where the highlighted four lines are annotations that: + + +#. tune batch_size and dropout_rate +#. report test_acc every 100 steps +#. lastly report test_acc as the final result. + +It's worth noting that, as these newly added codes are merely annotations, you can still run your code as usual in environments without NNI installed. + +.. code-block:: diff + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + + """@nni.variable(nni.choice(50, 250, 500), name=batch_size)""" + batch_size = 128 + for i in range(10000): + batch = mnist.train.next_batch(batch_size) + + """@nni.variable(nni.choice(0.1, 0.5), name=dropout_rate)""" + dropout_rate = 0.5 + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: dropout_rate}) + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_intermediate_result(test_acc)""" + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_final_result(test_acc)""" + +**NOTE**\ : + + +* ``@nni.variable`` will affect its following line which should be an assignment statement whose left-hand side must be the same as the keyword ``name`` in the ``@nni.variable`` statement. +* ``@nni.report_intermediate_result``\ /\ ``@nni.report_final_result`` will send the data to assessor/tuner at that line. + +For more information about annotation syntax and its usage, please refer to `Annotation <../Tutorial/AnnotationSpec.rst>`__. + +Step 2 - Enable NNI Annotation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the YAML configure file, you need to set *useAnnotation* to true to enable NNI annotation: + +.. code-block:: bash + + useAnnotation: true + +Standalone mode for debugging +----------------------------- + +NNI supports a standalone mode for trial code to run without starting an NNI experiment. This is for finding out bugs in trial code more conveniently. NNI annotation natively supports standalone mode, as the added NNI related lines are comments. For NNI trial APIs, the APIs have changed behaviors in standalone mode, some APIs return dummy values, and some APIs do not really report values. Please refer to the following table for the full list of these APIs. + +.. code-block:: python + + # NOTE: please assign default values to the hyperparameters in your trial code + nni.get_next_parameter # return {} + nni.report_final_result # have log printed on stdout, but does not report + nni.report_intermediate_result # have log printed on stdout, but does not report + nni.get_experiment_id # return "STANDALONE" + nni.get_trial_id # return "STANDALONE" + nni.get_sequence_id # return 0 + +You can try standalone mode with the :githublink:`mnist example `. Simply run ``python3 mnist.py`` under the code directory. The trial code should successfully run with the default hyperparameter values. + +For more information on debugging, please refer to `How to Debug <../Tutorial/HowToDebug.rst>`__ + +Where are my trials? +-------------------- + +Local Mode +^^^^^^^^^^ + +In NNI, every trial has a dedicated directory for them to output their own data. In each trial, an environment variable called ``NNI_OUTPUT_DIR`` is exported. Under this directory, you can find each trial's code, data, and other logs. In addition, each trial's log (including stdout) will be re-directed to a file named ``trial.log`` under that directory. + +If NNI Annotation is used, the trial's converted code is in another temporary directory. You can check that in a file named ``run.sh`` under the directory indicated by ``NNI_OUTPUT_DIR``. The second line (i.e., the ``cd`` command) of this file will change directory to the actual directory where code is located. Below is an example of ``run.sh``\ : + +.. code-block:: bash + + #!/bin/bash + cd /tmp/user_name/nni/annotation/tmpzj0h72x6 #This is the actual directory + export NNI_PLATFORM=local + export NNI_SYS_DIR=/home/user_name/nni-experiments/$experiment_id$/trials/$trial_id$ + export NNI_TRIAL_JOB_ID=nrbb2 + export NNI_OUTPUT_DIR=/home/user_name/nni-experiments/$eperiment_id$/trials/$trial_id$ + export NNI_TRIAL_SEQ_ID=1 + export MULTI_PHASE=false + export HIP_VISIBLE_DEVICES= + eval python3 mnist.py 2>/home/user_name/nni-experiments/$experiment_id$/trials/$trial_id$/stderr + echo $? `date +%s%3N` >/home/user_name/nni-experiments/$experiment_id$/trials/$trial_id$/.nni/state + +Other Modes +^^^^^^^^^^^ + +When running trials on other platforms like remote machine or PAI, the environment variable ``NNI_OUTPUT_DIR`` only refers to the output directory of the trial, while the trial code and ``run.sh`` might not be there. However, the ``trial.log`` will be transmitted back to the local machine in the trial's directory, which defaults to ``~/nni-experiments/$experiment_id$/trials/$trial_id$/`` + +For more information, please refer to `HowToDebug <../Tutorial/HowToDebug.rst>`__. + +:raw-html:`` + +More Trial Examples +------------------- + + +* `Write logs to trial output directory for tensorboard <../Tutorial/Tensorboard.rst>`__ +* `MNIST examples `__ +* `Finding out best optimizer for Cifar10 classification `__ +* `How to tune Scikit-learn on NNI `__ +* `Automatic Model Architecture Search for Reading Comprehension. `__ +* `Tuning GBDT on NNI `__ +* `Tuning RocksDB on NNI `__ diff --git a/docs/en_US/Tuner/AnnealTuner.rst b/docs/en_US/Tuner/AnnealTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d24ecb7a20cd10a48e90446235f926ba3678bc8 --- /dev/null +++ b/docs/en_US/Tuner/AnnealTuner.rst @@ -0,0 +1,23 @@ +Anneal Tuner +============ + +This simple annealing algorithm begins by sampling from the prior but tends over time to sample from points closer and closer to the best ones observed. This algorithm is a simple variation on random search that leverages smoothness in the response surface. The annealing rate is not adaptive. + +Usage +----- + +classArgs Requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*maximize or minimize, optional, default = maximize*) - If 'maximize', the tuner will try to maximize metrics. If 'minimize', the tuner will try to minimize metrics. + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: Anneal + classArgs: + optimize_mode: maximize diff --git a/docs/en_US/Tuner/BatchTuner.rst b/docs/en_US/Tuner/BatchTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..d44b602c095e38bdbea13d42944bb7f560822492 --- /dev/null +++ b/docs/en_US/Tuner/BatchTuner.rst @@ -0,0 +1,37 @@ +Batch Tuner +=========== + +Batch tuner allows users to simply provide several configurations (i.e., choices of hyper-parameters) for their trial code. After finishing all the configurations, the experiment is done. Batch tuner only supports the type ``choice`` in the `search space spec <../Tutorial/SearchSpaceSpec.rst>`__. + +Suggested scenario: If the configurations you want to try have been decided, you can list them in the SearchSpace file (using ``choice``) and run them using the batch tuner. + +Usage +----- + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: BatchTuner + +Note that the search space for BatchTuner should look like: + +.. code-block:: json + + { + "combine_params": + { + "_type" : "choice", + "_value" : [{"optimizer": "Adam", "learning_rate": 0.00001}, + {"optimizer": "Adam", "learning_rate": 0.0001}, + {"optimizer": "Adam", "learning_rate": 0.001}, + {"optimizer": "SGD", "learning_rate": 0.01}, + {"optimizer": "SGD", "learning_rate": 0.005}, + {"optimizer": "SGD", "learning_rate": 0.0002}] + } + } + +The search space file should include the high-level key ``combine_params``. The type of params in the search space must be ``choice`` and the ``values`` must include all the combined params values. diff --git a/docs/en_US/Tuner/BohbAdvisor.rst b/docs/en_US/Tuner/BohbAdvisor.rst new file mode 100644 index 0000000000000000000000000000000000000000..9f43f3ee7a838379dd27340348a542312c948eda --- /dev/null +++ b/docs/en_US/Tuner/BohbAdvisor.rst @@ -0,0 +1,158 @@ +BOHB Advisor +============ + +BOHB is a robust and efficient hyperparameter tuning algorithm mentioned in `this reference paper `__. BO is an abbreviation for "Bayesian Optimization" and HB is an abbreviation for "Hyperband". + +BOHB relies on HB (Hyperband) to determine how many configurations to evaluate with which budget, but it **replaces the random selection of configurations at the beginning of each HB iteration by a model-based search (Bayesian Optimization)**. Once the desired number of configurations for the iteration is reached, the standard successive halving procedure is carried out using these configurations. We keep track of the performance of all function evaluations g(x, b) of configurations x on all budgets b to use as a basis for our models in later iterations. + +Below we divide the introduction of the BOHB process into two parts: + +HB (Hyperband) +^^^^^^^^^^^^^^ + +We follow Hyperband’s way of choosing the budgets and continue to use SuccessiveHalving. For more details, you can refer to the `Hyperband in NNI `__ and the `reference paper for Hyperband `__. This procedure is summarized by the pseudocode below. + + +.. image:: ../../img/bohb_1.png + :target: ../../img/bohb_1.png + :alt: + + +BO (Bayesian Optimization) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The BO part of BOHB closely resembles TPE with one major difference: we opted for a single multidimensional KDE compared to the hierarchy of one-dimensional KDEs used in TPE in order to better handle interaction effects in the input space. + +Tree Parzen Estimator(TPE): uses a KDE (kernel density estimator) to model the densities. + + +.. image:: ../../img/bohb_2.png + :target: ../../img/bohb_2.png + :alt: + + +To fit useful KDEs, we require a minimum number of data points Nmin; this is set to d + 1 for our experiments, where d is the number of hyperparameters. To build a model as early as possible, we do not wait until Nb = \|Db\|, where the number of observations for budget b is large enough to satisfy q · Nb ≥ Nmin. Instead, after initializing with Nmin + 2 random configurations, we choose the + + +.. image:: ../../img/bohb_3.png + :target: ../../img/bohb_3.png + :alt: + + +best and worst configurations, respectively, to model the two densities. + +Note that we also sample a constant fraction named **random fraction** of the configurations uniformly at random. + +Workflow +-------- + + +.. image:: ../../img/bohb_6.jpg + :target: ../../img/bohb_6.jpg + :alt: + + +This image shows the workflow of BOHB. Here we set max_budget = 9, min_budget = 1, eta = 3, others as default. In this case, s_max = 2, so we will continuously run the {s=2, s=1, s=0, s=2, s=1, s=0, ...} cycle. In each stage of SuccessiveHalving (the orange box), we will pick the top 1/eta configurations and run them again with more budget, repeating the SuccessiveHalving stage until the end of this iteration. At the same time, we collect the configurations, budgets and final metrics of each trial and use these to build a multidimensional KDEmodel with the key "budget". + Multidimensional KDE is used to guide the selection of configurations for the next iteration. + +The sampling procedure (using Multidimensional KDE to guide selection) is summarized by the pseudocode below. + + +.. image:: ../../img/bohb_4.png + :target: ../../img/bohb_4.png + :alt: + + +Usage +----- + +Installation +^^^^^^^^^^^^ + +BOHB advisor requires the `ConfigSpace `__ package. ConfigSpace can be installed using the following command. + +.. code-block:: bash + + pip install nni[BOHB] + +classArgs Requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*maximize or minimize, optional, default = maximize*) - If 'maximize', tuners will try to maximize metrics. If 'minimize', tuner will try to minimize metrics. +* **min_budget** (*int, optional, default = 1*) - The smallest budget to assign to a trial job, (budget can be the number of mini-batches or epochs). Needs to be positive. +* **max_budget** (*int, optional, default = 3*) - The largest budget to assign to a trial job, (budget can be the number of mini-batches or epochs). Needs to be larger than min_budget. +* **eta** (*int, optional, default = 3*) - In each iteration, a complete run of sequential halving is executed. In it, after evaluating each configuration on the same subset size, only a fraction of 1/eta of them 'advances' to the next round. Must be greater or equal to 2. +* **min_points_in_model** (*int, optional, default = None*): number of observations to start building a KDE. Default 'None' means dim+1; when the number of completed trials in this budget is equal to or larger than ``max{dim+1, min_points_in_model}``, BOHB will start to build a KDE model of this budget then use said KDE model to guide configuration selection. Needs to be positive. (dim means the number of hyperparameters in search space) +* **top_n_percent** (*int, optional, default = 15*): percentage (between 1 and 99) of the observations which are considered good. Good points and bad points are used for building KDE models. For example, if you have 100 observed trials and top_n_percent is 15, then the top 15% of points will be used for building the good points models "l(x)". The remaining 85% of points will be used for building the bad point models "g(x)". +* **num_samples** (*int, optional, default = 64*): number of samples to optimize EI (default 64). In this case, we will sample "num_samples" points and compare the result of l(x)/g(x). Then we will return the one with the maximum l(x)/g(x) value as the next configuration if the optimize_mode is ``maximize``. Otherwise, we return the smallest one. +* **random_fraction** (*float, optional, default = 0.33*): fraction of purely random configurations that are sampled from the prior without the model. +* **bandwidth_factor** (*float, optional, default = 3.0*): to encourage diversity, the points proposed to optimize EI are sampled from a 'widened' KDE where the bandwidth is multiplied by this factor. We suggest using the default value if you are not familiar with KDE. +* **min_bandwidth** (*float, optional, default = 0.001*): to keep diversity, even when all (good) samples have the same value for one of the parameters, a minimum bandwidth (default: 1e-3) is used instead of zero. We suggest using the default value if you are not familiar with KDE. + +*Please note that the float type currently only supports decimal representations. You have to use 0.333 instead of 1/3 and 0.001 instead of 1e-3.* + + +Config File +^^^^^^^^^^^ + +To use BOHB, you should add the following spec in your experiment's YAML config file: + +.. code-block:: yaml + + advisor: + builtinAdvisorName: BOHB + classArgs: + optimize_mode: maximize + min_budget: 1 + max_budget: 27 + eta: 3 + min_points_in_model: 7 + top_n_percent: 15 + num_samples: 64 + random_fraction: 0.33 + bandwidth_factor: 3.0 + min_bandwidth: 0.001 + +**classArgs Requirements:** + + +* **optimize_mode** (*maximize or minimize, optional, default = maximize*) - If 'maximize', tuners will try to maximize metrics. If 'minimize', tuner will try to minimize metrics. +* **min_budget** (*int, optional, default = 1*) - The smallest budget to assign to a trial job, (budget can be the number of mini-batches or epochs). Needs to be positive. +* **max_budget** (*int, optional, default = 3*) - The largest budget to assign to a trial job, (budget can be the number of mini-batches or epochs). Needs to be larger than min_budget. +* **eta** (*int, optional, default = 3*) - In each iteration, a complete run of sequential halving is executed. In it, after evaluating each configuration on the same subset size, only a fraction of 1/eta of them 'advances' to the next round. Must be greater or equal to 2. +* **min_points_in_model** (*int, optional, default = None*): number of observations to start building a KDE. Default 'None' means dim+1; when the number of completed trials in this budget is equal to or larger than ``max{dim+1, min_points_in_model}``, BOHB will start to build a KDE model of this budget then use said KDE model to guide configuration selection. Needs to be positive. (dim means the number of hyperparameters in search space) +* **top_n_percent** (*int, optional, default = 15*): percentage (between 1 and 99) of the observations which are considered good. Good points and bad points are used for building KDE models. For example, if you have 100 observed trials and top_n_percent is 15, then the top 15% of points will be used for building the good points models "l(x)". The remaining 85% of points will be used for building the bad point models "g(x)". +* **num_samples** (*int, optional, default = 64*): number of samples to optimize EI (default 64). In this case, we will sample "num_samples" points and compare the result of l(x)/g(x). Then we will return the one with the maximum l(x)/g(x) value as the next configuration if the optimize_mode is ``maximize``. Otherwise, we return the smallest one. +* **random_fraction** (*float, optional, default = 0.33*): fraction of purely random configurations that are sampled from the prior without the model. +* **bandwidth_factor** (*float, optional, default = 3.0*): to encourage diversity, the points proposed to optimize EI are sampled from a 'widened' KDE where the bandwidth is multiplied by this factor. We suggest using the default value if you are not familiar with KDE. +* **min_bandwidth** (*float, optional, default = 0.001*): to keep diversity, even when all (good) samples have the same value for one of the parameters, a minimum bandwidth (default: 1e-3) is used instead of zero. We suggest using the default value if you are not familiar with KDE. +* **config_space** (*str, optional*): directly use a .pcs file serialized by `ConfigSpace ` in "pcs new" format. In this case, search space file (if provided in config) will be ignored. Note that this path needs to be an absolute path. Relative path is currently not supported. + +*Please note that the float type currently only supports decimal representations. You have to use 0.333 instead of 1/3 and 0.001 instead of 1e-3.* + +File Structure +-------------- + +The advisor has a lot of different files, functions, and classes. Here, we will only give most of those files a brief introduction: + + +* ``bohb_advisor.py`` Definition of BOHB, handles interaction with the dispatcher, including generating new trials and processing results. Also includes the implementation of the HB (Hyperband) part. +* ``config_generator.py`` Includes the implementation of the BO (Bayesian Optimization) part. The function *get_config* can generate new configurations based on BO; the function *new_result* will update the model with the new result. + +Experiment +---------- + +MNIST with BOHB +^^^^^^^^^^^^^^^ + +code implementation: :githublink:`examples/trials/mnist-advisor ` + +We chose BOHB to build a CNN on the MNIST dataset. The following is our experimental final results: + + +.. image:: ../../img/bohb_5.png + :target: ../../img/bohb_5.png + :alt: + + +More experimental results can be found in the `reference paper `__. We can see that BOHB makes good use of previous results and has a balanced trade-off in exploration and exploitation. diff --git a/docs/en_US/Tuner/BuiltinTuner.rst b/docs/en_US/Tuner/BuiltinTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6eecd9c934029ed11b11d4d734b50d2451c3867 --- /dev/null +++ b/docs/en_US/Tuner/BuiltinTuner.rst @@ -0,0 +1,100 @@ +HyperParameter Tuning with NNI Built-in Tuners +============================================== + +To fit a machine/deep learning model into different tasks/problems, hyperparameters always need to be tuned. Automating the process of hyperparaeter tuning always requires a good tuning algorithm. NNI has provided state-of-the-art tuning algorithms as part of our built-in tuners and makes them easy to use. Below is the brief summary of NNI's current built-in tuners: + +Note: Click the **Tuner's name** to get the Tuner's installation requirements, suggested scenario, and an example configuration. A link for a detailed description of each algorithm is located at the end of the suggested scenario for each tuner. Here is an `article <../CommunitySharings/HpoComparison.rst>`__ comparing different Tuners on several problems. + +Currently, we support the following algorithms: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Tuner + - Brief Introduction of Algorithm + + * - `TPE <./TpeTuner.rst>`__ + - The Tree-structured Parzen Estimator (TPE) is a sequential model-based optimization (SMBO) approach. SMBO methods sequentially construct models to approximate the performance of hyperparameters based on historical measurements, and then subsequently choose new hyperparameters to test based on this model. `Reference Paper `__ + + TPE, as a black-box optimization, can be used in various scenarios and shows good performance in general. Especially when you have limited computation resources and can only try a small number of trials. From a large amount of experiments, we found that TPE is far better than Random Search. + + * - `Random Search <./RandomTuner.rst>`__ + - In Random Search for Hyper-Parameter Optimization show that Random Search might be surprisingly simple and effective. We suggest that we could use Random Search as the baseline when we have no knowledge about the prior distribution of hyper-parameters. `Reference Paper `__ + + Random search is suggested when each trial does not take very long (e.g., each trial can be completed very quickly, or early stopped by the assessor), and you have enough computational resources. It's also useful if you want to uniformly explore the search space. Random Search can be considered a baseline search algorithm. + + * - `Anneal <./AnnealTuner.rst>`__ + - This simple annealing algorithm begins by sampling from the prior, but tends over time to sample from points closer and closer to the best ones observed. This algorithm is a simple variation on the random search that leverages smoothness in the response surface. The annealing rate is not adaptive. + + Anneal is suggested when each trial does not take very long and you have enough computation resources (very similar to Random Search). It's also useful when the variables in the search space can be sample from some prior distribution. + + * - `Naïve Evolution <./EvolutionTuner.rst>`__ + - Naïve Evolution comes from Large-Scale Evolution of Image Classifiers. It randomly initializes a population-based on search space. For each generation, it chooses better ones and does some mutation (e.g., change a hyperparameter, add/remove one layer) on them to get the next generation. Naïve Evolution requires many trials to work, but it's very simple and easy to expand new features. `Reference paper `__ + + Its computational resource requirements are relatively high. Specifically, it requires a large initial population to avoid falling into a local optimum. If your trial is short or leverages assessor, this tuner is a good choice. It is also suggested when your trial code supports weight transfer; that is, the trial could inherit the converged weights from its parent(s). This can greatly speed up the training process. + + * - `SMAC <./SmacTuner.rst>`__ + - SMAC is based on Sequential Model-Based Optimization (SMBO). It adapts the most prominent previously used model class (Gaussian stochastic process models) and introduces the model class of random forests to SMBO, in order to handle categorical parameters. The SMAC supported by NNI is a wrapper on the SMAC3 GitHub repo. Notice, SMAC needs to be installed by ``pip install nni[SMAC]`` command. `Reference Paper, `__ `GitHub Repo `__ + + **Please note that SMAC doesn't support running on Windows currently**. For the specific reason, please refer to this `GitHub issue `__. + + Similar to TPE, SMAC is also a black-box tuner that can be tried in various scenarios and is suggested when computational resources are limited. It is optimized for discrete hyperparameters, thus, it's suggested when most of your hyperparameters are discrete. + + * - `Batch tuner <./BatchTuner.rst>`__ + - Batch tuner allows users to simply provide several configurations (i.e., choices of hyper-parameters) for their trial code. After finishing all the configurations, the experiment is done. Batch tuner only supports the type choice in search space spec. + + If the configurations you want to try have been decided beforehand, you can list them in search space file (using ``choice``) and run them using batch tuner. + `Detailed Description <./BatchTuner.rst>`__ + + * - `Grid Search <./GridsearchTuner.rst>`__ + - Grid Search performs an exhaustive searching through the search space. + + This is suggested when the search space is small. It's suggested when it is feasible to exhaustively sweep the whole search space. + + * - `Hyperband <./HyperbandAdvisor.rst>`__ + - Hyperband tries to use limited resources to explore as many configurations as possible and returns the most promising ones as a final result. The basic idea is to generate many configurations and run them for a small number of trials. The half least-promising configurations are thrown out, the remaining are further trained along with a selection of new configurations. The size of these populations is sensitive to resource constraints (e.g. allotted search time). `Reference Paper `__ + + This is suggested when you have limited computational resources but have a relatively large search space. It performs well in scenarios where intermediate results can indicate good or bad final results to some extent. For example, when models that are more accurate early on in training are also more accurate later on. + + * - `Network Morphism <./NetworkmorphismTuner.rst>`__ + - Network Morphism provides functions to automatically search for deep learning architectures. It generates child networks that inherit the knowledge from their parent network which it is a morph from. This includes changes in depth, width, and skip-connections. Next, it estimates the value of a child network using historic architecture and metric pairs. Then it selects the most promising one to train. `Reference Paper `__ + + This is suggested when you want to apply deep learning methods to your task but you have no idea how to choose or design a network. You may modify this :githublink:`example ` to fit your own dataset and your own data augmentation method. Also you can change the batch size, learning rate, or optimizer. Currently, this tuner only supports the computer vision domain. + + * - `Metis Tuner <./MetisTuner.rst>`__ + - Metis offers the following benefits when it comes to tuning parameters: While most tools only predict the optimal configuration, Metis gives you two outputs: (a) current prediction of optimal configuration, and (b) suggestion for the next trial. No more guesswork. While most tools assume training datasets do not have noisy data, Metis actually tells you if you need to re-sample a particular hyper-parameter. `Reference Paper `__ + + Similar to TPE and SMAC, Metis is a black-box tuner. If your system takes a long time to finish each trial, Metis is more favorable than other approaches such as random search. Furthermore, Metis provides guidance on subsequent trials. Here is an :githublink:`example ` on the use of Metis. Users only need to send the final result, such as ``accuracy``, to the tuner by calling the NNI SDK. + + Note that the only acceptable types of search space types are ``quniform``, ``uniform``, ``randint``, and numerical ``choice``. Only numerical values are supported since the values will be used to evaluate the 'distance' between different points. + + * - `BOHB <./BohbAdvisor.rst>`__ + - BOHB is a follow-up work to Hyperband. It targets the weakness of Hyperband that new configurations are generated randomly without leveraging finished trials. For the name BOHB, HB means Hyperband, BO means Bayesian Optimization. BOHB leverages finished trials by building multiple TPE models, a proportion of new configurations are generated through these models. `Reference Paper `__ + + Similar to Hyperband, BOHB is suggested when you have limited computational resources but have a relatively large search space. It performs well in scenarios where intermediate results can indicate good or bad final results to some extent. In this case, it may converge to a better configuration than Hyperband due to its usage of Bayesian optimization. + + * - `GP Tuner <./GPTuner.rst>`__ + - Gaussian Process Tuner is a sequential model-based optimization (SMBO) approach with Gaussian Process as the surrogate. `Reference Paper `__, `Github Repo `__ + + Note that the only acceptable types within the search space are ``randint``, ``uniform``, ``quniform``, ``loguniform``, ``qloguniform``, and numerical ``choice``. Only numerical values are supported since the values will be used to evaluate the 'distance' between different points. + + As a strategy in a Sequential Model-based Global Optimization (SMBO) algorithm, GP Tuner uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) to solve and common tools can be employed to solve it. Therefore, GP Tuner is most adequate for situations where the function to be optimized is very expensive to evaluate. GP can be used when computational resources are limited. However, GP Tuner has a computational cost that grows at *O(N^3)* due to the requirement of inverting the Gram matrix, so it's not suitable when lots of trials are needed. + + * - `PBT Tuner <./PBTTuner.rst>`__ + - PBT Tuner is a simple asynchronous optimization algorithm which effectively utilizes a fixed computational budget to jointly optimize a population of models and their hyperparameters to maximize performance. `Reference Paper `__ + + Population Based Training (PBT) bridges and extends parallel search methods and sequential optimization methods. It requires relatively small computation resource, by inheriting weights from currently good-performing ones to explore better ones periodically. With PBTTuner, users finally get a trained model, rather than a configuration that could reproduce the trained model by training the model from scratch. This is because model weights are inherited periodically through the whole search process. PBT can also be seen as a training approach. If you don't need to get a specific configuration, but just expect a good model, PBTTuner is a good choice. + + * - `DNGO Tuner <./DngoTuner.rst>`__ + - Use of neural networks as an alternative to GPs to model distributions over functions in bayesian optimization. + + Applicable to large scale hyperparameter optimization. Bayesian optimization that rapidly finds competitive models on benchmark object recognition tasks using convolutional networks, and image caption generation using neural language models. + +Usage of Built-in Tuners +------------------------ + +Using a built-in tuner provided by the NNI SDK requires one to declare the **name** and **classArgs** in the ``config.yml`` file. +Click tuners' name in above table to see their specification. + +Note: Some built-in tuners have dependencies that need to be installed using ``pip install nni[]``, like SMAC's dependencies can be installed using ``pip install nni[SMAC]``. diff --git a/docs/en_US/Tuner/CustomizeAdvisor.rst b/docs/en_US/Tuner/CustomizeAdvisor.rst new file mode 100644 index 0000000000000000000000000000000000000000..202dbe3ef1856ab94b473a3bb27f04115220b206 --- /dev/null +++ b/docs/en_US/Tuner/CustomizeAdvisor.rst @@ -0,0 +1,44 @@ +**How To** - Customize Your Own Advisor +======================================= + +*Warning: API is subject to change in future releases.* + +Advisor targets the scenario that the automl algorithm wants the methods of both tuner and assessor. Advisor is similar to tuner on that it receives trial parameters request, final results, and generate trial parameters. Also, it is similar to assessor on that it receives intermediate results, trial's end state, and could send trial kill command. Note that, if you use Advisor, tuner and assessor are not allowed to be used at the same time. + +If a user want to implement a customized Advisor, she/he only needs to: + +**1. Define an Advisor inheriting from the MsgDispatcherBase class.** For example: + +.. code-block:: python + + from nni.runtime.msg_dispatcher_base import MsgDispatcherBase + + class CustomizedAdvisor(MsgDispatcherBase): + def __init__(self, ...): + ... + +**2. Implement the methods with prefix "handle_" except "handle_request""** + +You might find `docs <../autotune_ref.rst#Advisor>`__ for ``MsgDispatcherBase`` helpful. + +**3. Configure your customized Advisor in experiment YAML config file.** + +Similar to tuner and assessor. NNI needs to locate your customized Advisor class and instantiate the class, so you need to specify the location of the customized Advisor class and pass literal values as parameters to the ``__init__`` constructor. + +.. code-block:: yaml + + advisor: + codeDir: /home/abc/myadvisor + classFileName: my_customized_advisor.py + className: CustomizedAdvisor + # Any parameter need to pass to your advisor class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + arg1: value1 + +**Note that** The working directory of your advisor is ``/nni-experiments//log``, which can be retrieved with environment variable ``NNI_LOG_DIRECTORY``. + +Example +------- + +Here we provide an :githublink:`example `. diff --git a/docs/en_US/Tuner/CustomizeTuner.rst b/docs/en_US/Tuner/CustomizeTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..b79563662169982198f4e124eee4b90ad10c7bec --- /dev/null +++ b/docs/en_US/Tuner/CustomizeTuner.rst @@ -0,0 +1,125 @@ +Customize-Tuner +=============== + +NNI provides state-of-the-art tuning algorithm in builtin-tuners. NNI supports to build a tuner by yourself for tuning demand. + +If you want to implement your own tuning algorithm, you can implement a customized Tuner, there are three things to do: + + +#. Inherit the base Tuner class +#. Implement receive_trial_result, generate_parameter and update_search_space function +#. Configure your customized tuner in experiment YAML config file + +Here is an example: + +**1. Inherit the base Tuner class** + +.. code-block:: python + + from nni.tuner import Tuner + + class CustomizedTuner(Tuner): + def __init__(self, ...): + ... + +**2. Implement receive_trial_result, generate_parameter and update_search_space function** + +.. code-block:: python + + from nni.tuner import Tuner + + class CustomizedTuner(Tuner): + def __init__(self, ...): + ... + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + ''' + Receive trial's final result. + parameter_id: int + parameters: object created by 'generate_parameters()' + value: final metrics of the trial, including default metric + ''' + # your code implements here. + ... + + def generate_parameters(self, parameter_id, **kwargs): + ''' + Returns a set of trial (hyper-)parameters, as a serializable object + parameter_id: int + ''' + # your code implements here. + return your_parameters + ... + + def update_search_space(self, search_space): + ''' + Tuners are advised to support updating search space at run-time. + If a tuner can only set search space once before generating first hyper-parameters, + it should explicitly document this behaviour. + search_space: JSON object created by experiment owner + ''' + # your code implements here. + ... + +``receive_trial_result`` will receive the ``parameter_id, parameters, value`` as parameters input. Also, Tuner will receive the ``value`` object are exactly same value that Trial send. + +The ``your_parameters`` return from ``generate_parameters`` function, will be package as json object by NNI SDK. NNI SDK will unpack json object so the Trial will receive the exact same ``your_parameters`` from Tuner. + +For example: +If the you implement the ``generate_parameters`` like this: + +.. code-block:: python + + def generate_parameters(self, parameter_id, **kwargs): + ''' + Returns a set of trial (hyper-)parameters, as a serializable object + parameter_id: int + ''' + # your code implements here. + return {"dropout": 0.3, "learning_rate": 0.4} + +It means your Tuner will always generate parameters ``{"dropout": 0.3, "learning_rate": 0.4}``. Then Trial will receive ``{"dropout": 0.3, "learning_rate": 0.4}`` by calling API ``nni.get_next_parameter()``. Once the trial ends with a result (normally some kind of metrics), it can send the result to Tuner by calling API ``nni.report_final_result()``, for example ``nni.report_final_result(0.93)``. Then your Tuner's ``receive_trial_result`` function will receied the result like: + +.. code-block:: python + + parameter_id = 82347 + parameters = {"dropout": 0.3, "learning_rate": 0.4} + value = 0.93 + +**Note that** The working directory of your tuner is ``/nni-experiments//log``, which can be retrieved with environment variable ``NNI_LOG_DIRECTORY``, therefore, if you want to access a file (e.g., ``data.txt``) in the directory of your own tuner, you cannot use ``open('data.txt', 'r')``. Instead, you should use the following: + +.. code-block:: python + + _pwd = os.path.dirname(__file__) + _fd = open(os.path.join(_pwd, 'data.txt'), 'r') + +This is because your tuner is not executed in the directory of your tuner (i.e., ``pwd`` is not the directory of your own tuner). + +**3. Configure your customized tuner in experiment YAML config file** + +NNI needs to locate your customized tuner class and instantiate the class, so you need to specify the location of the customized tuner class and pass literal values as parameters to the __init__ constructor. + +.. code-block:: yaml + + tuner: + codeDir: /home/abc/mytuner + classFileName: my_customized_tuner.py + className: CustomizedTuner + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + arg1: value1 + +More detail example you could see: + +.. + + * :githublink:`evolution-tuner ` + * :githublink:`hyperopt-tuner ` + * :githublink:`evolution-based-customized-tuner ` + + +Write a more advanced automl algorithm +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The methods above are usually enough to write a general tuner. However, users may also want more methods, for example, intermediate results, trials' state (e.g., the methods in assessor), in order to have a more powerful automl algorithm. Therefore, we have another concept called ``advisor`` which directly inherits from ``MsgDispatcherBase`` in :githublink:`msg_dispatcher_base.py `. Please refer to `here `__ for how to write a customized advisor. diff --git a/docs/en_US/Tuner/DngoTuner.rst b/docs/en_US/Tuner/DngoTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..e4ea75b644e768788b2bd0764523574abed9d91d --- /dev/null +++ b/docs/en_US/Tuner/DngoTuner.rst @@ -0,0 +1,27 @@ +DNGO Tuner +========== + +Usage +----- + +Installation +^^^^^^^^^^^^ + +classArgs requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*'maximize' or 'minimize'*) - If 'maximize', the tuner will target to maximize metrics. If 'minimize', the tuner will target to minimize metrics. +* **sample_size** (*int, default = 1000*) - Number of samples to select in each iteration. The best one will be picked from the samples as the next trial. +* **trials_per_update** (*int, default = 20*) - Number of trials to collect before updating the model. +* **num_epochs_per_training** (*int, default = 500*) - Number of epochs to train DNGO model. + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: DNGOTuner + classArgs: + optimize_mode: maximize diff --git a/docs/en_US/Tuner/EvolutionTuner.rst b/docs/en_US/Tuner/EvolutionTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..195372baf11c54281d197ecac4b70a59de6c1a16 --- /dev/null +++ b/docs/en_US/Tuner/EvolutionTuner.rst @@ -0,0 +1,29 @@ +Naive Evolution Tuner +===================== + +Naive Evolution comes from `Large-Scale Evolution of Image Classifiers `__. It randomly initializes a population based on the search space. For each generation, it chooses better ones and does some mutation (e.g., changes a hyperparameter, adds/removes one layer, etc.) on them to get the next generation. Naive Evolution requires many trials to works but it's very simple and it's easily expanded with new features. + +Usage +----- + +classArgs Requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* + **optimize_mode** (*maximize or minimize, optional, default = maximize*) - If 'maximize', the tuner will try to maximize metrics. If 'minimize', the tuner will try to minimize metrics. + +* + **population_size** (*int value (should > 0), optional, default = 20*) - the initial size of the population (trial num) in the evolution tuner. It's suggested that ``population_size`` be much larger than ``concurrency`` so users can get the most out of the algorithm (and at least ``concurrency``, or the tuner will fail on its first generation of parameters). + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: Evolution + classArgs: + optimize_mode: maximize + population_size: 100 + diff --git a/docs/en_US/Tuner/GPTuner.rst b/docs/en_US/Tuner/GPTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c36ca12b43bd12e64a9097be209d2a5d955688d --- /dev/null +++ b/docs/en_US/Tuner/GPTuner.rst @@ -0,0 +1,45 @@ +GP Tuner +======== + +Bayesian optimization works by constructing a posterior distribution of functions (a Gaussian Process) that best describes the function you want to optimize. As the number of observations grows, the posterior distribution improves, and the algorithm becomes more certain of which regions in parameter space are worth exploring and which are not. + +GP Tuner is designed to minimize/maximize the number of steps required to find a combination of parameters that are close to the optimal combination. To do so, this method uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) to solve, and it's amenable to common tools. Therefore, Bayesian Optimization is suggested for situations where sampling the function to be optimized is very expensive. + +Note that the only acceptable types within the search space are ``randint``, ``uniform``, ``quniform``, ``loguniform``, ``qloguniform``, and numerical ``choice``. + +This optimization approach is described in Section 3 of `Algorithms for Hyper-Parameter Optimization `__. + +Usage +----- + +classArgs requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*'maximize' or 'minimize', optional, default = 'maximize'*) - If 'maximize', the tuner will try to maximize metrics. If 'minimize', the tuner will try to minimize metrics. +* **utility** (*'ei', 'ucb' or 'poi', optional, default = 'ei'*) - The utility function (acquisition function). 'ei', 'ucb', and 'poi' correspond to 'Expected Improvement', 'Upper Confidence Bound', and 'Probability of Improvement', respectively. +* **kappa** (*float, optional, default = 5*) - Used by the 'ucb' utility function. The bigger ``kappa`` is, the more exploratory the tuner will be. +* **xi** (*float, optional, default = 0*) - Used by the 'ei' and 'poi' utility functions. The bigger ``xi`` is, the more exploratory the tuner will be. +* **nu** (*float, optional, default = 2.5*) - Used to specify the Matern kernel. The smaller nu, the less smooth the approximated function is. +* **alpha** (*float, optional, default = 1e-6*) - Used to specify the Gaussian Process Regressor. Larger values correspond to an increased noise level in the observations. +* **cold_start_num** (*int, optional, default = 10*) - Number of random explorations to perform before the Gaussian Process. Random exploration can help by diversifying the exploration space. +* **selection_num_warm_up** (*int, optional, default = 1e5*) - Number of random points to evaluate when getting the point which maximizes the acquisition function. +* **selection_num_starting_points** (*int, optional, default = 250*) - Number of times to run L-BFGS-B from a random starting point after the warmup. + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: GPTuner + classArgs: + optimize_mode: maximize + utility: 'ei' + kappa: 5.0 + xi: 0.0 + nu: 2.5 + alpha: 1e-6 + cold_start_num: 10 + selection_num_warm_up: 100000 + selection_num_starting_points: 250 diff --git a/docs/en_US/Tuner/GridsearchTuner.rst b/docs/en_US/Tuner/GridsearchTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..26675bbc67b4036fa3055d806c89d50cf4377fd8 --- /dev/null +++ b/docs/en_US/Tuner/GridsearchTuner.rst @@ -0,0 +1,19 @@ +Grid Search Tuner +================= + +Grid Search performs an exhaustive search through a search space. + +For uniform and normal distributed parameters, grid search tuner samples them at progressively decreased intervals. + +Usage +----- + +Grid search tuner has no argument. + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + tuner: + name: GridSearch diff --git a/docs/en_US/Tuner/HyperbandAdvisor.rst b/docs/en_US/Tuner/HyperbandAdvisor.rst new file mode 100644 index 0000000000000000000000000000000000000000..683bf480bcf3a9443231e20c7d8629e6322bede8 --- /dev/null +++ b/docs/en_US/Tuner/HyperbandAdvisor.rst @@ -0,0 +1,143 @@ +Hyperband Advisor +================= + +`Hyperband `__ is a popular autoML algorithm. The basic idea of Hyperband is to create several buckets, each having ``n`` randomly generated hyperparameter configurations, each configuration using ``r`` resources (e.g., epoch number, batch number). After the ``n`` configurations are finished, it chooses the top ``n/eta`` configurations and runs them using increased ``r*eta`` resources. At last, it chooses the best configuration it has found so far. + +Implementation with full parallelism +------------------------------------ + +First, this is an example of how to write an autoML algorithm based on MsgDispatcherBase, rather than Tuner and Assessor. Hyperband is implemented in this way because it integrates the functions of both Tuner and Assessor, thus, we call it Advisor. + +Second, this implementation fully leverages Hyperband's internal parallelism. Specifically, the next bucket is not started strictly after the current bucket. Instead, it starts when there are available resources. If you want to use full parallelism mode, set ``exec_mode`` with ``parallelism``. + +Or if you want to set ``exec_mode`` with ``serial`` according to the original algorithm. In this mode, the next bucket will start strictly after the current bucket. + +``parallelism`` mode may lead to multiple unfinished buckets, and there is at most one unfinished bucket under ``serial`` mode. The advantage of ``parallelism`` mode is to make full use of resources, which may reduce the experiment duration multiple times. The following two pictures are the results of quick verification using `nas-bench-201 <../NAS/Benchmarks.rst>`__, picture above is in ``parallelism`` mode, picture below is in ``serial`` mode. + + +.. image:: ../../img/hyperband_parallelism.png + :target: ../../img/hyperband_parallelism.png + :alt: parallelism mode + + + +.. image:: ../../img/hyperband_serial.png + :target: ../../img/hyperband_serial.png + :alt: serial mode + + +If you want to reproduce these results, refer to the example under ``examples/trials/benchmarking/`` for details. + +Usage +----- + +Config file +^^^^^^^^^^^ + +To use Hyperband, you should add the following spec in your experiment's YAML config file: + +.. code-block:: bash + + advisor: + #choice: Hyperband + builtinAdvisorName: Hyperband + classArgs: + #R: the maximum trial budget + R: 100 + #eta: proportion of discarded trials + eta: 3 + #choice: maximize, minimize + optimize_mode: maximize + #choice: serial, parallelism + exec_mode: parallelism + +Note that once you use Advisor, you are not allowed to add a Tuner and Assessor spec in the config file. If you use Hyperband, among the hyperparameters (i.e., key-value pairs) received by a trial, there will be one more key called ``TRIAL_BUDGET`` defined by user. **By using this ``TRIAL_BUDGET``, the trial can control how long it runs**. + +For ``report_intermediate_result(metric)`` and ``report_final_result(metric)`` in your trial code, **``metric`` should be either a number or a dict which has a key ``default`` with a number as its value**. This number is the one you want to maximize or minimize, for example, accuracy or loss. + +``R`` and ``eta`` are the parameters of Hyperband that you can change. ``R`` means the maximum trial budget that can be allocated to a configuration. Here, trial budget could mean the number of epochs or mini-batches. This ``TRIAL_BUDGET`` should be used by the trial to control how long it runs. Refer to the example under ``examples/trials/mnist-advisor/`` for details. + +``eta`` means ``n/eta`` configurations from ``n`` configurations will survive and rerun using more budgets. + +Here is a concrete example of ``R=81`` and ``eta=3``: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - s=4 + - s=3 + - s=2 + - s=1 + - s=0 + * - i + - n r + - n r + - n r + - n r + - n r + * - 0 + - 81 1 + - 27 3 + - 9 9 + - 6 27 + - 5 81 + * - 1 + - 27 3 + - 9 9 + - 3 27 + - 2 81 + - + * - 2 + - 9 9 + - 3 27 + - 1 81 + - + - + * - 3 + - 3 27 + - 1 81 + - + - + - + * - 4 + - 1 81 + - + - + - + - + + +``s`` means bucket, ``n`` means the number of configurations that are generated, the corresponding ``r`` means how many budgets these configurations run. ``i`` means round, for example, bucket 4 has 5 rounds, bucket 3 has 4 rounds. + +For information about writing trial code, please refer to the instructions under ``examples/trials/mnist-hyperband/``. + +classArgs requirements +^^^^^^^^^^^^^^^^^^^^^^ + + +* **optimize_mode** (*maximize or minimize, optional, default = maximize*) - If 'maximize', the tuner will try to maximize metrics. If 'minimize', the tuner will try to minimize metrics. +* **R** (*int, optional, default = 60*) - the maximum budget given to a trial (could be the number of mini-batches or epochs). Each trial should use TRIAL_BUDGET to control how long they run. +* **eta** (*int, optional, default = 3*) - ``(eta-1)/eta`` is the proportion of discarded trials. +* **exec_mode** (*serial or parallelism, optional, default = parallelism*) - If 'parallelism', the tuner will try to use available resources to start new bucket immediately. If 'serial', the tuner will only start new bucket after the current bucket is done. + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + advisor: + builtinAdvisorName: Hyperband + classArgs: + optimize_mode: maximize + R: 60 + eta: 3 + +Future improvements +------------------- + +The current implementation of Hyperband can be further improved by supporting a simple early stop algorithm since it's possible that not all the configurations in the top ``n/eta`` perform well. Any unpromising configurations should be stopped early. + +In the current implementation, configurations are generated randomly which follows the design in the `paper `__. As an improvement, configurations could be generated more wisely by leveraging advanced algorithms. diff --git a/docs/en_US/Tuner/MetisTuner.rst b/docs/en_US/Tuner/MetisTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..c99e43aa76b36e68d9175f3c1667c4baeed162d1 --- /dev/null +++ b/docs/en_US/Tuner/MetisTuner.rst @@ -0,0 +1,40 @@ +Metis Tuner +=========== + +`Metis `__ offers several benefits over other tuning algorithms. While most tools only predict the optimal configuration, Metis gives you two outputs, a prediction for the optimal configuration and a suggestion for the next trial. No more guess work! + +While most tools assume training datasets do not have noisy data, Metis actually tells you if you need to resample a particular hyper-parameter. + +While most tools have problems of being exploitation-heavy, Metis' search strategy balances exploration, exploitation, and (optional) resampling. + +Metis belongs to the class of sequential model-based optimization (SMBO) algorithms and it is based on the Bayesian Optimization framework. To model the parameter-vs-performance space, Metis uses both a Gaussian Process and GMM. Since each trial can impose a high time cost, Metis heavily trades inference computations with naive trials. At each iteration, Metis does two tasks: + + +* + It finds the global optimal point in the Gaussian Process space. This point represents the optimal configuration. + +* + It identifies the next hyper-parameter candidate. This is achieved by inferring the potential information gain of exploration, exploitation, and resampling. + +Note that the only acceptable types within the search space are ``quniform``, ``uniform``, ``randint``, and numerical ``choice``. + +More details can be found in our `paper `__. + +Usage +----- + +classArgs requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*'maximize' or 'minimize', optional, default = 'maximize'*) - If 'maximize', the tuner will try to maximize metrics. If 'minimize', the tuner will try to minimize metrics. + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: MetisTuner + classArgs: + optimize_mode: maximize diff --git a/docs/en_US/Tuner/NetworkmorphismTuner.rst b/docs/en_US/Tuner/NetworkmorphismTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..f97e6e3ad39ee15a341d7fb32cf6dab9219b0445 --- /dev/null +++ b/docs/en_US/Tuner/NetworkmorphismTuner.rst @@ -0,0 +1,296 @@ +Network Morphism Tuner +====================== + +`Autokeras `__ is a popular autoML tool using Network Morphism. The basic idea of Autokeras is to use Bayesian Regression to estimate the metric of the Neural Network Architecture. Each time, it generates several child networks from father networks. Then it uses a naïve Bayesian regression to estimate its metric value from the history of trained results of network and metric value pairs. Next, it chooses the child which has the best, estimated performance and adds it to the training queue. Inspired by the work of Autokeras and referring to its `code `__, we implemented our Network Morphism method on the NNI platform. + +If you want to know more about network morphism trial usage, please see the :githublink:`Readme.md `. + +Usage +----- + +Installation +^^^^^^^^^^^^ + +NetworkMorphism requires :githublink:`PyTorch `. + +classArgs Requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*maximize or minimize, optional, default = maximize*) - If 'maximize', the tuner will try to maximize metrics. If 'minimize', the tuner will try to minimize metrics. +* **task** (*('cv'), optional, default = 'cv'*) - The domain of the experiment. For now, this tuner only supports the computer vision (CV) domain. +* **input_width** (*int, optional, default = 32*) - input image width +* **input_channel** (*int, optional, default = 3*) - input image channel +* **n_output_node** (*int, optional, default = 10*) - number of classes + + + +Config File +^^^^^^^^^^^ + +To use Network Morphism, you should modify the following spec in your ``config.yml`` file: + +.. code-block:: yaml + + tuner: + #choice: NetworkMorphism + name: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + #for now, this tuner only supports cv domain + task: cv + #modify to fit your input image width + input_width: 32 + #modify to fit your input image channel + input_channel: 3 + #modify to fit your number of classes + n_output_node: 10 + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: NetworkMorphism + classArgs: + optimize_mode: maximize + task: cv + input_width: 32 + input_channel: 3 + n_output_node: 10 + +In the training procedure, it generates a JSON file which represents a Network Graph. Users can call the "json_to_graph()" function to build a PyTorch or Keras model from this JSON file. + +.. code-block:: python + + import nni + from nni.networkmorphism_tuner.graph import json_to_graph + + def build_graph_from_json(ir_model_json): + """build a pytorch model from json representation + """ + graph = json_to_graph(ir_model_json) + model = graph.produce_torch_model() + return model + + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + # call the function to build pytorch model or keras model + net = build_graph_from_json(RCV_CONFIG) + + # training procedure + # .... + + # report the final accuracy to NNI + nni.report_final_result(best_acc) + +If you want to save and load the **best model**, the following methods are recommended. + +.. code-block:: python + + # 1. Use NNI API + ## You can get the best model ID from WebUI + ## or `nni-experiments/experiment_id/log/model_path/best_model.txt' + + ## read the json string from model file and load it with NNI API + with open("best-model.json") as json_file: + json_of_model = json_file.read() + model = build_graph_from_json(json_of_model) + + # 2. Use Framework API (Related to Framework) + ## 2.1 Keras API + + ## Save the model with Keras API in the trial code + ## it's better to save model with id in nni local mode + model_id = nni.get_sequence_id() + ## serialize model to JSON + model_json = model.to_json() + with open("model-{}.json".format(model_id), "w") as json_file: + json_file.write(model_json) + ## serialize weights to HDF5 + model.save_weights("model-{}.h5".format(model_id)) + + ## Load the model with Keras API if you want to reuse the model + ## load json and create model + model_id = "" # id of the model you want to reuse + with open('model-{}.json'.format(model_id), 'r') as json_file: + loaded_model_json = json_file.read() + loaded_model = model_from_json(loaded_model_json) + ## load weights into new model + loaded_model.load_weights("model-{}.h5".format(model_id)) + + ## 2.2 PyTorch API + + ## Save the model with PyTorch API in the trial code + model_id = nni.get_sequence_id() + torch.save(model, "model-{}.pt".format(model_id)) + + ## Load the model with PyTorch API if you want to reuse the model + model_id = "" # id of the model you want to reuse + loaded_model = torch.load("model-{}.pt".format(model_id)) + +File Structure +-------------- + +The tuner has a lot of different files, functions, and classes. Here, we will give most of those files only a brief introduction: + + +* + ``networkmorphism_tuner.py`` is a tuner which uses network morphism techniques. + +* + ``bayesian.py`` is a Bayesian method to estimate the metric of unseen model based on the models we have already searched. + +* ``graph.py`` is the meta graph data structure. The class Graph represents the neural architecture graph of a model. + + * Graph extracts the neural architecture graph from a model. + * Each node in the graph is an intermediate tensor between layers. + * Each layer is an edge in the graph. + * Notably, multiple edges may refer to the same layer. + +* + ``graph_transformer.py`` includes some graph transformers which widen, deepen, or add skip-connections to the graph. + +* + ``layers.py`` includes all the layers we use in our model. + +* ``layer_transformer.py`` includes some layer transformers which widen, deepen, or add skip-connections to the layer. +* ``nn.py`` includes the class which generates the initial network. +* ``metric.py`` some metric classes including Accuracy and MSE. +* ``utils.py`` is the example search network architectures for the ``cifar10`` dataset, using Keras. + +The Network Representation Json Example +--------------------------------------- + +Here is an example of the intermediate representation JSON file we defined, which is passed from the tuner to the trial in the architecture search procedure. Users can call the "json_to_graph()" function in the trial code to build a PyTorch or Keras model from this JSON file. + +.. code-block:: json + + { + "input_shape": [32, 32, 3], + "weighted": false, + "operation_history": [], + "layer_id_to_input_node_ids": {"0": [0],"1": [1],"2": [2],"3": [3],"4": [4],"5": [5],"6": [6],"7": [7],"8": [8],"9": [9],"10": [10],"11": [11],"12": [12],"13": [13],"14": [14],"15": [15],"16": [16] + }, + "layer_id_to_output_node_ids": {"0": [1],"1": [2],"2": [3],"3": [4],"4": [5],"5": [6],"6": [7],"7": [8],"8": [9],"9": [10],"10": [11],"11": [12],"12": [13],"13": [14],"14": [15],"15": [16],"16": [17] + }, + "adj_list": { + "0": [[1, 0]], + "1": [[2, 1]], + "2": [[3, 2]], + "3": [[4, 3]], + "4": [[5, 4]], + "5": [[6, 5]], + "6": [[7, 6]], + "7": [[8, 7]], + "8": [[9, 8]], + "9": [[10, 9]], + "10": [[11, 10]], + "11": [[12, 11]], + "12": [[13, 12]], + "13": [[14, 13]], + "14": [[15, 14]], + "15": [[16, 15]], + "16": [[17, 16]], + "17": [] + }, + "reverse_adj_list": { + "0": [], + "1": [[0, 0]], + "2": [[1, 1]], + "3": [[2, 2]], + "4": [[3, 3]], + "5": [[4, 4]], + "6": [[5, 5]], + "7": [[6, 6]], + "8": [[7, 7]], + "9": [[8, 8]], + "10": [[9, 9]], + "11": [[10, 10]], + "12": [[11, 11]], + "13": [[12, 12]], + "14": [[13, 13]], + "15": [[14, 14]], + "16": [[15, 15]], + "17": [[16, 16]] + }, + "node_list": [ + [0, [32, 32, 3]], + [1, [32, 32, 3]], + [2, [32, 32, 64]], + [3, [32, 32, 64]], + [4, [16, 16, 64]], + [5, [16, 16, 64]], + [6, [16, 16, 64]], + [7, [16, 16, 64]], + [8, [8, 8, 64]], + [9, [8, 8, 64]], + [10, [8, 8, 64]], + [11, [8, 8, 64]], + [12, [4, 4, 64]], + [13, [64]], + [14, [64]], + [15, [64]], + [16, [64]], + [17, [10]] + ], + "layer_list": [ + [0, ["StubReLU", 0, 1]], + [1, ["StubConv2d", 1, 2, 3, 64, 3]], + [2, ["StubBatchNormalization2d", 2, 3, 64]], + [3, ["StubPooling2d", 3, 4, 2, 2, 0]], + [4, ["StubReLU", 4, 5]], + [5, ["StubConv2d", 5, 6, 64, 64, 3]], + [6, ["StubBatchNormalization2d", 6, 7, 64]], + [7, ["StubPooling2d", 7, 8, 2, 2, 0]], + [8, ["StubReLU", 8, 9]], + [9, ["StubConv2d", 9, 10, 64, 64, 3]], + [10, ["StubBatchNormalization2d", 10, 11, 64]], + [11, ["StubPooling2d", 11, 12, 2, 2, 0]], + [12, ["StubGlobalPooling2d", 12, 13]], + [13, ["StubDropout2d", 13, 14, 0.25]], + [14, ["StubDense", 14, 15, 64, 64]], + [15, ["StubReLU", 15, 16]], + [16, ["StubDense", 16, 17, 64, 10]] + ] + } + +You can consider the model to be a `directed acyclic graph `__. The definition of each model is a JSON object where: + + +* ``input_shape`` is a list of integers which do not include the batch axis. +* ``weighted`` means whether the weights and biases in the neural network should be included in the graph. +* ``operation_history`` is a list saving all the network morphism operations. +* ``layer_id_to_input_node_ids`` is a dictionary mapping from layer identifiers to their input nodes identifiers. +* ``layer_id_to_output_node_ids`` is a dictionary mapping from layer identifiers to their output nodes identifiers +* ``adj_list`` is a two-dimensional list; the adjacency list of the graph. The first dimension is identified by tensor identifiers. In each edge list, the elements are two-element tuples of (tensor identifier, layer identifier). +* ``reverse_adj_list`` is a reverse adjacent list in the same format as adj_list. +* ``node_list`` is a list of integers. The indices of the list are the identifiers. +* + ``layer_list`` is a list of stub layers. The indices of the list are the identifiers. + + + * + For ``StubConv (StubConv1d, StubConv2d, StubConv3d)``, the numbering follows the format: its node input id (or id list), node output id, input_channel, filters, kernel_size, stride, and padding. + + * + For ``StubDense``, the numbering follows the format: its node input id (or id list), node output id, input_units, and units. + + * + For ``StubBatchNormalization (StubBatchNormalization1d, StubBatchNormalization2d, StubBatchNormalization3d)``, the numbering follows the format: its node input id (or id list), node output id, and features numbers. + + * + For ``StubDropout(StubDropout1d, StubDropout2d, StubDropout3d)``, the numbering follows the format: its node input id (or id list), node output id, and dropout rate. + + * + For ``StubPooling (StubPooling1d, StubPooling2d, StubPooling3d)``, the numbering follows the format: its node input id (or id list), node output id, kernel_size, stride, and padding. + + * + For else layers, the numbering follows the format: its node input id (or id list) and node output id. + +TODO +---- + +Next step, we will change the API from s fixed network generator to a network generator with more available operators. We will use ONNX instead of JSON later as the intermediate representation spec in the future. diff --git a/docs/en_US/Tuner/PBTTuner.rst b/docs/en_US/Tuner/PBTTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ff56b74cb420076af98711a003daa4da968edff --- /dev/null +++ b/docs/en_US/Tuner/PBTTuner.rst @@ -0,0 +1,76 @@ +PBT Tuner +========= + +Population Based Training (PBT) comes from `Population Based Training of Neural Networks `__. It's a simple asynchronous optimization algorithm which effectively utilizes a fixed computational budget to jointly optimize a population of models and their hyperparameters to maximize performance. Importantly, PBT discovers a schedule of hyperparameter settings rather than following the generally sub-optimal strategy of trying to find a single fixed set to use for the whole course of training. + + +.. image:: ../../img/pbt.jpg + :target: ../../img/pbt.jpg + :alt: + + +PBTTuner initializes a population with several trials (i.e., ``population_size``). There are four steps in the above figure, each trial only runs by one step. How long is one step is controlled by trial code, e.g., one epoch. When a trial starts, it loads a checkpoint specified by PBTTuner and continues to run one step, then saves checkpoint to a directory specified by PBTTuner and exits. The trials in a population run steps synchronously, that is, after all the trials finish the ``i``-th step, the ``(i+1)``-th step can be started. Exploitation and exploration of PBT are executed between two consecutive steps. + +Usage +----- + +Provide checkpoint directory +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Since some trials need to load other trial's checkpoint, users should provide a directory (i.e., ``all_checkpoint_dir``) which is accessible by every trial. It is easy for local mode, users could directly use the default directory or specify any directory on the local machine. For other training services, users should follow `the document of those training services <../TrainingService/Overview.rst>`__ to provide a directory in a shared storage, such as NFS, Azure storage. + +Modify your trial code +^^^^^^^^^^^^^^^^^^^^^^ + +Before running a step, a trial needs to load a checkpoint, the checkpoint directory is specified in hyper-parameter configuration generated by PBTTuner, i.e., ``params['load_checkpoint_dir']``. Similarly, the directory for saving checkpoint is also included in the configuration, i.e., ``params['save_checkpoint_dir']``. Here, ``all_checkpoint_dir`` is base folder of ``load_checkpoint_dir`` and ``save_checkpoint_dir`` whose format is ``all_checkpoint_dir//``. + +.. code-block:: python + + params = nni.get_next_parameter() + # the path of the checkpoint to load + load_path = os.path.join(params['load_checkpoint_dir'], 'model.pth') + # load checkpoint from `load_path` + ... + # run one step + ... + # the path for saving a checkpoint + save_path = os.path.join(params['save_checkpoint_dir'], 'model.pth') + # save checkpoint to `save_path` + ... + +The complete example code can be found :githublink:`here `. + +classArgs requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*'maximize' or 'minimize'*) - If 'maximize', the tuner will target to maximize metrics. If 'minimize', the tuner will target to minimize metrics. +* **all_checkpoint_dir** (*str, optional, default = None*) - Directory for trials to load and save checkpoint, if not specified, the directory would be "~/nni/checkpoint/\ :raw-html:``\ ". Note that if the experiment is not local mode, users should provide a path in a shared storage which can be accessed by all the trials. +* **population_size** (*int, optional, default = 10*) - Number of trials in a population. Each step has this number of trials. In our implementation, one step is running each trial by specific training epochs set by users. +* **factors** (*tuple, optional, default = (1.2, 0.8)*) - Factors for perturbation of hyperparameters. +* **fraction** (*float, optional, default = 0.2*) - Fraction for selecting bottom and top trials. + +Experiment config +^^^^^^^^^^^^^^^^^ + +Below is an exmaple of PBTTuner configuration in experiment config file. **Note that Assessor is not allowed if PBTTuner is used.** + +.. code-block:: yaml + + # config.yml + tuner: + name: PBTTuner + classArgs: + optimize_mode: maximize + all_checkpoint_dir: /the/path/to/store/checkpoints + population_size: 10 + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: PBTTuner + classArgs: + optimize_mode: maximize diff --git a/docs/en_US/Tuner/RandomTuner.rst b/docs/en_US/Tuner/RandomTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b1022a5a22874b26a6c6473968c5d2a28e283ea --- /dev/null +++ b/docs/en_US/Tuner/RandomTuner.rst @@ -0,0 +1,17 @@ +Random Tuner +============ + +In `Random Search for Hyper-Parameter Optimization `__ we show that Random Search might be surprisingly effective despite its simplicity. +We suggest using Random Search as a baseline when no knowledge about the prior distribution of hyper-parameters is available. + +Usage +----- + +Example Configuration + +.. code-block:: yaml + + tuner: + name: Random + classArgs: + seed: 100 # optional diff --git a/docs/en_US/Tuner/SmacTuner.rst b/docs/en_US/Tuner/SmacTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c2a5781b808575b67cee82649425cf5b2a8d6f1 --- /dev/null +++ b/docs/en_US/Tuner/SmacTuner.rst @@ -0,0 +1,35 @@ +SMAC Tuner +========== + +`SMAC `__ is based on Sequential Model-Based Optimization (SMBO). It adapts the most prominent previously used model class (Gaussian stochastic process models) and introduces the model class of random forests to SMBO in order to handle categorical parameters. The SMAC supported by nni is a wrapper on `the SMAC3 github repo `__. + +Note that SMAC on nni only supports a subset of the types in the `search space spec <../Tutorial/SearchSpaceSpec.rst>`__: ``choice``, ``randint``, ``uniform``, ``loguniform``, and ``quniform``. + +Usage +----- + +Installation +^^^^^^^^^^^^ + +SMAC has dependencies that need to be installed by following command before the first usage. As a reminder, ``swig`` is required for SMAC: for Ubuntu ``swig`` can be installed with ``apt``. + +.. code-block:: bash + + pip install nni[SMAC] + +classArgs requirements +^^^^^^^^^^^^^^^^^^^^^^ + +* **optimize_mode** (*maximize or minimize, optional, default = maximize*) - If 'maximize', the tuner will try to maximize metrics. If 'minimize', the tuner will try to minimize metrics. +* **config_dedup** (*True or False, optional, default = False*) - If True, the tuner will not generate a configuration that has been already generated. If False, a configuration may be generated twice, but it is rare for a relatively large search space. + +Example Configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + # config.yml + tuner: + name: SMAC + classArgs: + optimize_mode: maximize diff --git a/docs/en_US/Tuner/TpeTuner.rst b/docs/en_US/Tuner/TpeTuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..d255b0f69a5e4eb10500b2f00c70292c1a56bcdc --- /dev/null +++ b/docs/en_US/Tuner/TpeTuner.rst @@ -0,0 +1,114 @@ +TPE Tuner +========= + +The Tree-structured Parzen Estimator (TPE) is a sequential model-based optimization (SMBO) approach. +SMBO methods sequentially construct models to approximate the performance of hyperparameters based on historical measurements, +and then subsequently choose new hyperparameters to test based on this model. + +The TPE approach models P(x|y) and P(y) where x represents hyperparameters and y the associated evaluation matric. +P(x|y) is modeled by transforming the generative process of hyperparameters, +replacing the distributions of the configuration prior with non-parametric densities. + +This optimization approach is described in detail in `Algorithms for Hyper-Parameter Optimization `__. + +Parallel TPE optimization +^^^^^^^^^^^^^^^^^^^^^^^^^ + +TPE approaches were actually run asynchronously in order to make use of multiple compute nodes and to avoid wasting time waiting for trial evaluations to complete. +The original algorithm design was optimized for sequential computation. +If we were to use TPE with much concurrency, its performance will be bad. +We have optimized this case using the Constant Liar algorithm. +For these principles of optimization, please refer to our `research blog <../CommunitySharings/ParallelizingTpeSearch.rst>`__. + +Usage +----- + + To use TPE, you should add the following spec in your experiment's YAML config file: + +.. code-block:: yaml + + ## minimal config ## + tuner: + name: TPE + classArgs: + optimize_mode: minimize + +.. code-block:: yaml + + ## advanced config ## + tuner: + name: TPE + classArgs: + optimize_mode: maximize + seed: 12345 + tpe_args: + constant_liar_type: 'mean' + n_startup_jobs: 10 + n_ei_candidates: 20 + linear_forgetting: 100 + prior_weight: 0 + gamma: 0.5 + +classArgs +^^^^^^^^^ + +.. list-table:: + :widths: 10 20 10 60 + :header-rows: 1 + + * - Field + - Type + - Default + - Description + + * - ``optimize_mode`` + - ``'minimize' | 'maximize'`` + - ``'minimize'`` + - Whether to minimize or maximize trial metrics. + + * - ``seed`` + - ``int | null`` + - ``null`` + - The random seed. + + * - ``tpe_args.constant_liar_type`` + - ``'best' | 'worst' | 'mean' | null`` + - ``'best'`` + - TPE algorithm itself does not support parallel tuning. This parameter specifies how to optimize for trial_concurrency > 1. How each liar works is explained in paper's section 6.1. + + In general ``best`` suit for small trial number and ``worst`` suit for large trial number. + + * - ``tpe_args.n_startup_jobs`` + - ``int`` + - ``20`` + - The first N hyper-parameters are generated fully randomly for warming up. + + If the search space is large, you can increase this value. Or if max_trial_number is small, you may want to decrease it. + + * - ``tpe_args.n_ei_candidates`` + - ``int`` + - ``24`` + - For each iteration TPE samples EI for N sets of parameters and choose the best one. (loosely speaking) + + * - ``tpe_args.linear_forgetting`` + - ``int`` + - ``25`` + - TPE will lower the weights of old trials. This controls how many iterations it takes for a trial to start decay. + + * - ``tpe_args.prior_weight`` + - ``float`` + - ``1.0`` + - TPE treats user provided search space as prior. + When generating new trials, it also incorporates the prior in trial history by transforming the search space to + one trial configuration (i.e., each parameter of this configuration chooses the mean of its candidate range). + Here, prior_weight determines the weight of this trial configuration in the history trial configurations. + + With prior weight 1.0, the search space is treated as one good trial. + For example, "normal(0, 1)" effectly equals to a trial with x = 0 which has yielded good result. + + * - ``tpe_args.gamma`` + - ``float`` + - ``0.25`` + - Controls how many trials are considered "good". + + The number is calculated as "min(gamma * sqrt(N), linear_forgetting)". diff --git a/docs/en_US/Tutorial/AnnotationSpec.rst b/docs/en_US/Tutorial/AnnotationSpec.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed3a2918a08ddabc28902a7cd7cd55f4c6d35966 --- /dev/null +++ b/docs/en_US/Tutorial/AnnotationSpec.rst @@ -0,0 +1,101 @@ +NNI Annotation +============== + +Overview +-------- + +To improve user experience and reduce user effort, we design an annotation grammar. Using NNI annotation, users can adapt their code to NNI just by adding some standalone annotating strings, which does not affect the execution of the original code. + +Below is an example: + +.. code-block:: python + + '''@nni.variable(nni.choice(0.1, 0.01, 0.001), name=learning_rate)''' + learning_rate = 0.1 + +The meaning of this example is that NNI will choose one of several values (0.1, 0.01, 0.001) to assign to the learning_rate variable. Specifically, this first line is an NNI annotation, which is a single string. Following is an assignment statement. What nni does here is to replace the right value of this assignment statement according to the information provided by the annotation line. + +In this way, users could either run the python code directly or launch NNI to tune hyper-parameter in this code, without changing any codes. + +Types of Annotation: +-------------------- + +In NNI, there are mainly four types of annotation: + +1. Annotate variables +^^^^^^^^^^^^^^^^^^^^^ + + ``'''@nni.variable(sampling_algo, name)'''`` + +``@nni.variable`` is used in NNI to annotate a variable. + +**Arguments** + + +* **sampling_algo**\ : Sampling algorithm that specifies a search space. User should replace it with a built-in NNI sampling function whose name consists of an ``nni.`` identification and a search space type specified in `SearchSpaceSpec `__ such as ``choice`` or ``uniform``. +* **name**\ : The name of the variable that the selected value will be assigned to. Note that this argument should be the same as the left value of the following assignment statement. + +There are 10 types to express your search space as follows: + + +* ``@nni.variable(nni.choice(option1,option2,...,optionN),name=variable)`` + Which means the variable value is one of the options, which should be a list The elements of options can themselves be stochastic expressions +* ``@nni.variable(nni.randint(lower, upper),name=variable)`` + Which means the variable value is a value like round(uniform(low, high)). For now, the type of chosen value is float. If you want to use integer value, please convert it explicitly. +* ``@nni.variable(nni.uniform(low, high),name=variable)`` + Which means the variable value is a value uniformly between low and high. +* ``@nni.variable(nni.quniform(low, high, q),name=variable)`` + Which means the variable value is a value like clip(round(uniform(low, high) / q) * q, low, high), where the clip operation is used to constraint the generated value in the bound. +* ``@nni.variable(nni.loguniform(low, high),name=variable)`` + Which means the variable value is a value drawn according to exp(uniform(low, high)) so that the logarithm of the return value is uniformly distributed. +* ``@nni.variable(nni.qloguniform(low, high, q),name=variable)`` + Which means the variable value is a value like clip(round(loguniform(low, high) / q) * q, low, high), where the clip operation is used to constraint the generated value in the bound. +* ``@nni.variable(nni.normal(mu, sigma),name=variable)`` + Which means the variable value is a real value that's normally-distributed with mean mu and standard deviation sigma. +* ``@nni.variable(nni.qnormal(mu, sigma, q),name=variable)`` + Which means the variable value is a value like round(normal(mu, sigma) / q) * q +* ``@nni.variable(nni.lognormal(mu, sigma),name=variable)`` + Which means the variable value is a value drawn according to exp(normal(mu, sigma)) +* ``@nni.variable(nni.qlognormal(mu, sigma, q),name=variable)`` + Which means the variable value is a value like round(exp(normal(mu, sigma)) / q) * q + +Below is an example: + +.. code-block:: python + + '''@nni.variable(nni.choice(0.1, 0.01, 0.001), name=learning_rate)''' + learning_rate = 0.1 + +2. Annotate functions +^^^^^^^^^^^^^^^^^^^^^ + + ``'''@nni.function_choice(*functions, name)'''`` + +``@nni.function_choice`` is used to choose one from several functions. + +**Arguments** + + +* **functions**\ : Several functions that are waiting to be selected from. Note that it should be a complete function call with arguments. Such as ``max_pool(hidden_layer, pool_size)``. +* **name**\ : The name of the function that will be replaced in the following assignment statement. + +An example here is: + +.. code-block:: python + + """@nni.function_choice(max_pool(hidden_layer, pool_size), avg_pool(hidden_layer, pool_size), name=max_pool)""" + h_pooling = max_pool(hidden_layer, pool_size) + +3. Annotate intermediate result +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + ``'''@nni.report_intermediate_result(metrics)'''`` + +``@nni.report_intermediate_result`` is used to report intermediate result, whose usage is the same as ``nni.report_intermediate_result`` in the doc of `Write a trial run on NNI <../TrialExample/Trials.rst>`__ + +4. Annotate final result +^^^^^^^^^^^^^^^^^^^^^^^^ + + ``'''@nni.report_final_result(metrics)'''`` + +``@nni.report_final_result`` is used to report the final result of the current trial, whose usage is the same as ``nni.report_final_result`` in the doc of `Write a trial run on NNI <../TrialExample/Trials.rst>`__ diff --git a/docs/en_US/Tutorial/Contributing.rst b/docs/en_US/Tutorial/Contributing.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f2fd5021731aed1960a9f86c1c4171b7459727e --- /dev/null +++ b/docs/en_US/Tutorial/Contributing.rst @@ -0,0 +1,74 @@ +Contributing to Neural Network Intelligence (NNI) +================================================= + +Great!! We are always on the lookout for more contributors to our code base. + +Firstly, if you are unsure or afraid of anything, just ask or submit the issue or pull request anyways. You won't be yelled at for giving your best effort. The worst that can happen is that you'll be politely asked to change something. We appreciate any sort of contributions and don't want a wall of rules to get in the way of that. + +However, for those individuals who want a bit more guidance on the best way to contribute to the project, read on. This document will cover all the points we're looking for in your contributions, raising your chances of quickly merging or addressing your contributions. + +Looking for a quickstart, get acquainted with our `Get Started `__ guide. + +There are a few simple guidelines that you need to follow before providing your hacks. + +Raising Issues +-------------- + +When raising issues, please specify the following: + + +* Setup details needs to be filled as specified in the issue template clearly for the reviewer to check. +* A scenario where the issue occurred (with details on how to reproduce it). +* Errors and log messages that are displayed by the software. +* Any other details that might be useful. + +Submit Proposals for New Features +--------------------------------- + + +* + There is always something more that is required, to make it easier to suit your use-cases. Feel free to join the discussion on new features or raise a PR with your proposed change. + +* + Fork the repository under your own github handle. After cloning the repository. Add, commit, push and sqaush (if necessary) the changes with detailed commit messages to your fork. From where you can proceed to making a pull request. + +Contributing to Source Code and Bug Fixes +----------------------------------------- + +Provide PRs with appropriate tags for bug fixes or enhancements to the source code. Do follow the correct naming conventions and code styles when you work on and do try to implement all code reviews along the way. + +If you are looking for How to develop and debug the NNI source code, you can refer to `How to set up NNI developer environment doc <./SetupNniDeveloperEnvironment.rst>`__ file in the ``docs`` folder. + +Similarly for `Quick Start `__. For everything else, refer to `NNI Home page `__. + +Solve Existing Issues +--------------------- + +Head over to `issues `__ to find issues where help is needed from contributors. You can find issues tagged with 'good-first-issue' or 'help-wanted' to contribute in. + +A person looking to contribute can take up an issue by claiming it as a comment/assign their Github ID to it. In case there is no PR or update in progress for a week on the said issue, then the issue reopens for anyone to take up again. We need to consider high priority issues/regressions where response time must be a day or so. + +Code Styles & Naming Conventions +-------------------------------- + +* We follow `PEP8 `__ for Python code and naming conventions, do try to adhere to the same when making a pull request or making a change. One can also take the help of linters such as ``flake8`` or ``pylint`` +* We also follow `NumPy Docstring Style `__ for Python Docstring Conventions. During the `documentation building `__\ , we use `sphinx.ext.napoleon `__ to generate Python API documentation from Docstring. +* For docstrings, please refer to `numpydoc docstring guide `__ and `pandas docstring guide `__ + + * For function docstring, **description**, **Parameters**, and **Returns** **Yields** are mandatory. + * For class docstring, **description**, **Attributes** are mandatory. + * For docstring to describe ``dict``, which is commonly used in our hyper-param format description, please refer to `Internal Guideline on Writing Standards `__ + +Documentation +------------- + +Our documentation is built with :githublink:`sphinx `. + +* Before submitting the documentation change, please **build homepage locally**: ``cd docs/en_US && make html``, then you can see all the built documentation webpage under the folder ``docs/en_US/_build/html``. It's also highly recommended taking care of **every WARNING** during the build, which is very likely the signal of a **deadlink** and other annoying issues. + +* + For links, please consider using **relative paths** first. However, if the documentation is written in reStructuredText format, and: + + + * It's an image link which needs to be formatted with embedded html grammar, please use global URL like ``https://user-images.githubusercontent.com/44491713/51381727-e3d0f780-1b4f-11e9-96ab-d26b9198ba65.png``, which can be automatically generated by dragging picture onto `Github Issue `__ Box. + * It cannot be re-formatted by sphinx, such as source code, please use its global URL. For source code that links to our github repo, please use URLs rooted at ``https://github.com/Microsoft/nni/tree/master/`` (:githublink:`mnist.py ` for example). diff --git a/docs/en_US/Tutorial/ExperimentConfig.rst b/docs/en_US/Tutorial/ExperimentConfig.rst new file mode 100644 index 0000000000000000000000000000000000000000..092ae59cee1fb92118b401995f524d370f4a8100 --- /dev/null +++ b/docs/en_US/Tutorial/ExperimentConfig.rst @@ -0,0 +1,1159 @@ +Experiment Config Reference (legacy) +==================================== + +This is the previous version (V1) of experiment configuration specification. It is still supported for now, but we recommend users to use `the new version of experiment configuration (V2) <../reference/experiment_config.rst>`_. + +A config file is needed when creating an experiment. The path of the config file is provided to ``nnictl``. +The config file is in YAML format. +This document describes the rules to write the config file, and provides some examples and templates. + + +* `Experiment Config Reference <#experiment-config-reference>`__ + + * `Template <#template>`__ + * `Configuration Spec <#configuration-spec>`__ + + * `authorName <#authorname>`__ + * `experimentName <#experimentname>`__ + * `trialConcurrency <#trialconcurrency>`__ + * `maxExecDuration <#maxexecduration>`__ + * `versionCheck <#versioncheck>`__ + * `debug <#debug>`__ + * `maxTrialNum <#maxtrialnum>`__ + * `maxTrialDuration <#maxtrialduration>`__ + * `trainingServicePlatform <#trainingserviceplatform>`__ + * `searchSpacePath <#searchspacepath>`__ + * `useAnnotation <#useannotation>`__ + * `multiThread <#multithread>`__ + * `nniManagerIp <#nnimanagerip>`__ + * `logDir <#logdir>`__ + * `logLevel <#loglevel>`__ + * `logCollection <#logcollection>`__ + * `tuner <#tuner>`__ + + * `builtinTunerName <#builtintunername>`__ + * `codeDir <#codedir>`__ + * `classFileName <#classfilename>`__ + * `className <#classname>`__ + * `classArgs <#classargs>`__ + * `gpuIndices <#gpuindices>`__ + * `includeIntermediateResults <#includeintermediateresults>`__ + + * `assessor <#assessor>`__ + + * `builtinAssessorName <#builtinassessorname>`__ + * `codeDir <#codedir-1>`__ + * `classFileName <#classfilename-1>`__ + * `className <#classname-1>`__ + * `classArgs <#classargs-1>`__ + + * `advisor <#advisor>`__ + + * `builtinAdvisorName <#builtinadvisorname>`__ + * `codeDir <#codedir-2>`__ + * `classFileName <#classfilename-2>`__ + * `className <#classname-2>`__ + * `classArgs <#classargs-2>`__ + * `gpuIndices <#gpuindices-1>`__ + + * `trial <#trial>`__ + * `localConfig <#localconfig>`__ + + * `gpuIndices <#gpuindices-2>`__ + * `maxTrialNumPerGpu <#maxtrialnumpergpu>`__ + * `useActiveGpu <#useactivegpu>`__ + + * `machineList <#machinelist>`__ + + * `ip <#ip>`__ + * `port <#port>`__ + * `username <#username>`__ + * `passwd <#passwd>`__ + * `sshKeyPath <#sshkeypath>`__ + * `passphrase <#passphrase>`__ + * `gpuIndices <#gpuindices-3>`__ + * `maxTrialNumPerGpu <#maxtrialnumpergpu-1>`__ + * `useActiveGpu <#useactivegpu-1>`__ + * `pythonPath <#pythonPath>`__ + + * `kubeflowConfig <#kubeflowconfig>`__ + + * `operator <#operator>`__ + * `storage <#storage>`__ + * `nfs <#nfs>`__ + * `keyVault <#keyvault>`__ + * `azureStorage <#azurestorage>`__ + * `uploadRetryCount <#uploadretrycount>`__ + + * `paiConfig <#paiconfig>`__ + + * `userName <#username>`__ + * `password <#password>`__ + * `token <#token>`__ + * `host <#host>`__ + * `reuse <#reuse>`__ + + * `Examples <#examples>`__ + + * `Local mode <#local-mode>`__ + * `Remote mode <#remote-mode>`__ + * `PAI mode <#pai-mode>`__ + * `Kubeflow mode <#kubeflow-mode>`__ + * `Kubeflow with azure storage <#kubeflow-with-azure-storage>`__ + +Template +-------- + + +* **Light weight (without Annotation and Assessor)** + +.. code-block:: yaml + + authorName: + experimentName: + trialConcurrency: + maxExecDuration: + maxTrialNum: + #choice: local, remote, pai, kubeflow + trainingServicePlatform: + searchSpacePath: + #choice: true, false, default: false + useAnnotation: + #choice: true, false, default: false + multiThread: + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: + classArgs: + #choice: maximize, minimize + optimize_mode: + gpuIndices: + trial: + command: + codeDir: + gpuNum: + #machineList can be empty if the platform is local + machineList: + - ip: + port: + username: + passwd: + + +* **Use Assessor** + +.. code-block:: yaml + + authorName: + experimentName: + trialConcurrency: + maxExecDuration: + maxTrialNum: + #choice: local, remote, pai, kubeflow + trainingServicePlatform: + searchSpacePath: + #choice: true, false, default: false + useAnnotation: + #choice: true, false, default: false + multiThread: + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: + classArgs: + #choice: maximize, minimize + optimize_mode: + gpuIndices: + assessor: + #choice: Medianstop + builtinAssessorName: + classArgs: + #choice: maximize, minimize + optimize_mode: + trial: + command: + codeDir: + gpuNum: + #machineList can be empty if the platform is local + machineList: + - ip: + port: + username: + passwd: + + +* **Use Annotation** + +.. code-block:: yaml + + authorName: + experimentName: + trialConcurrency: + maxExecDuration: + maxTrialNum: + #choice: local, remote, pai, kubeflow + trainingServicePlatform: + #choice: true, false, default: false + useAnnotation: + #choice: true, false, default: false + multiThread: + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: + classArgs: + #choice: maximize, minimize + optimize_mode: + gpuIndices: + assessor: + #choice: Medianstop + builtinAssessorName: + classArgs: + #choice: maximize, minimize + optimize_mode: + trial: + command: + codeDir: + gpuNum: + #machineList can be empty if the platform is local + machineList: + - ip: + port: + username: + passwd: + +Configuration Spec +------------------ + +authorName +^^^^^^^^^^ + +Required. String. + +The name of the author who create the experiment. + +*TBD: add default value.* + +experimentName +^^^^^^^^^^^^^^ + +Required. String. + +The name of the experiment created. + +*TBD: add default value.* + +trialConcurrency +^^^^^^^^^^^^^^^^ + +Required. Integer between 1 and 99999. + +Specifies the max num of trial jobs run simultaneously. + +If trialGpuNum is bigger than the free gpu numbers, and the trial jobs running simultaneously can not reach **trialConcurrency** number, some trial jobs will be put into a queue to wait for gpu allocation. + +maxExecDuration +^^^^^^^^^^^^^^^ + +Optional. String. Default: 999d. + +**maxExecDuration** specifies the max duration time of an experiment. The unit of the time is {**s**\ , **m**\ , **h**\ , **d**\ }, which means {*seconds*\ , *minutes*\ , *hours*\ , *days*\ }. + +Note: The maxExecDuration spec set the time of an experiment, not a trial job. If the experiment reach the max duration time, the experiment will not stop, but could not submit new trial jobs any more. + +versionCheck +^^^^^^^^^^^^ + +Optional. Bool. Default: true. + +NNI will check the version of nniManager process and the version of trialKeeper in remote, pai and kubernetes platform. If you want to disable version check, you could set versionCheck be false. + +debug +^^^^^ + +Optional. Bool. Default: false. + +Debug mode will set versionCheck to false and set logLevel to be 'debug'. + +maxTrialNum +^^^^^^^^^^^ + +Optional. Integer between 1 and 99999. Default: 99999. + +Specifies the max number of trial jobs created by NNI, including succeeded and failed jobs. + +maxTrialDuration +^^^^^^^^^^^^^^^^ + +Optional. String. Default: 999d. + +**maxTrialDuration** specifies the max duration time of each trial job. The unit of the time is {**s**\ , **m**\ , **h**\ , **d**\ }, which means {*seconds*\ , *minutes*\ , *hours*\ , *days*\ }. If current trial job reach the max duration time, this trial job will stop. + +trainingServicePlatform +^^^^^^^^^^^^^^^^^^^^^^^ + +Required. String. + +Specifies the platform to run the experiment, including **local**\ , **remote**\ , **pai**\ , **kubeflow**\ , **frameworkcontroller**. + + +* + **local** run an experiment on local ubuntu machine. + +* + **remote** submit trial jobs to remote ubuntu machines, and **machineList** field should be filed in order to set up SSH connection to remote machine. + +* + **pai** submit trial jobs to `OpenPAI `__ of Microsoft. For more details of pai configuration, please refer to `Guide to PAI Mode <../TrainingService/PaiMode.rst>`__ + +* + **kubeflow** submit trial jobs to `kubeflow `__\ , NNI support kubeflow based on normal kubernetes and `azure kubernetes `__. For detail please refer to `Kubeflow Docs <../TrainingService/KubeflowMode.rst>`__ + +* + **adl** submit trial jobs to `AdaptDL `__\ , NNI support AdaptDL on Kubernetes cluster. For detail please refer to `AdaptDL Docs <../TrainingService/AdaptDLMode.rst>`__ + +* + TODO: explain frameworkcontroller. + +searchSpacePath +^^^^^^^^^^^^^^^ + +Optional. Path to existing file. + +Specifies the path of search space file, which should be a valid path in the local linux machine. + +The only exception that **searchSpacePath** can be not fulfilled is when ``useAnnotation=True``. + +useAnnotation +^^^^^^^^^^^^^ + +Optional. Bool. Default: false. + +Use annotation to analysis trial code and generate search space. + +Note: if **useAnnotation** is true, the searchSpacePath field should be removed. + +multiThread +^^^^^^^^^^^ + +Optional. Bool. Default: false. + +Enable multi-thread mode for dispatcher. If multiThread is enabled, dispatcher will start a thread to process each command from NNI Manager. + +nniManagerIp +^^^^^^^^^^^^ + +Optional. String. Default: eth0 device IP. + +Set the IP address of the machine on which NNI manager process runs. This field is optional, and if it's not set, eth0 device IP will be used instead. + +Note: run ``ifconfig`` on NNI manager's machine to check if eth0 device exists. If not, **nniManagerIp** is recommended to set explicitly. + +logDir +^^^^^^ + +Optional. Path to a directory. Default: ``/nni-experiments``. + +Configures the directory to store logs and data of the experiment. + +logLevel +^^^^^^^^ + +Optional. String. Default: ``info``. + +Sets log level for the experiment. Available log levels are: ``trace``\ , ``debug``\ , ``info``\ , ``warning``\ , ``error``\ , ``fatal``. + +logCollection +^^^^^^^^^^^^^ + +Optional. ``http`` or ``none``. Default: ``none``. + +Set the way to collect log in remote, pai, kubeflow, frameworkcontroller platform. There are two ways to collect log, one way is from ``http``\ , trial keeper will post log content back from http request in this way, but this way may slow down the speed to process logs in trialKeeper. The other way is ``none``\ , trial keeper will not post log content back, and only post job metrics. If your log content is too big, you could consider setting this param be ``none``. + +tuner +^^^^^ + +Required. + +Specifies the tuner algorithm in the experiment, there are two kinds of ways to set tuner. One way is to use tuner provided by NNI sdk (built-in tuners), in which case you need to set **builtinTunerName** and **classArgs**. Another way is to use users' own tuner file, in which case **codeDirectory**\ , **classFileName**\ , **className** and **classArgs** are needed. *Users must choose exactly one way.* + +builtinTunerName +^^^^^^^^^^^^^^^^ + +Required if using built-in tuners. String. + +Specifies the name of system tuner, NNI sdk provides different tuners introduced `here <../Tuner/BuiltinTuner.rst>`__. + +codeDir +^^^^^^^ + +Required if using customized tuners. Path relative to the location of config file. + +Specifies the directory of tuner code. + +classFileName +^^^^^^^^^^^^^ + +Required if using customized tuners. File path relative to **codeDir**. + +Specifies the name of tuner file. + +className +^^^^^^^^^ + +Required if using customized tuners. String. + +Specifies the name of tuner class. + +classArgs +^^^^^^^^^ + +Optional. Key-value pairs. Default: empty. + +Specifies the arguments of tuner algorithm. Please refer to `this file <../Tuner/BuiltinTuner.rst>`__ for the configurable arguments of each built-in tuner. + +gpuIndices +^^^^^^^^^^ + +Optional. String. Default: empty. + +Specifies the GPUs that can be used by the tuner process. Single or multiple GPU indices can be specified. Multiple GPU indices are separated by comma ``,``. For example, ``1``\ , or ``0,1,3``. If the field is not set, no GPU will be visible to tuner (by setting ``HIP_VISIBLE_DEVICES`` to be an empty string). + +includeIntermediateResults +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Optional. Bool. Default: false. + +If **includeIntermediateResults** is true, the last intermediate result of the trial that is early stopped by assessor is sent to tuner as final result. + +assessor +^^^^^^^^ + +Specifies the assessor algorithm to run an experiment. Similar to tuners, there are two kinds of ways to set assessor. One way is to use assessor provided by NNI sdk. Users need to set **builtinAssessorName** and **classArgs**. Another way is to use users' own assessor file, and users need to set **codeDirectory**\ , **classFileName**\ , **className** and **classArgs**. *Users must choose exactly one way.* + +By default, there is no assessor enabled. + +builtinAssessorName +^^^^^^^^^^^^^^^^^^^ + +Required if using built-in assessors. String. + +Specifies the name of built-in assessor, NNI sdk provides different assessors introduced `here <../Assessor/BuiltinAssessor.rst>`__. + +codeDir +^^^^^^^ + +Required if using customized assessors. Path relative to the location of config file. + +Specifies the directory of assessor code. + +classFileName +^^^^^^^^^^^^^ + +Required if using customized assessors. File path relative to **codeDir**. + +Specifies the name of assessor file. + +className +^^^^^^^^^ + +Required if using customized assessors. String. + +Specifies the name of assessor class. + +classArgs +^^^^^^^^^ + +Optional. Key-value pairs. Default: empty. + +Specifies the arguments of assessor algorithm. + +advisor +^^^^^^^ + +Optional. + +Specifies the advisor algorithm in the experiment. Similar to tuners and assessors, there are two kinds of ways to specify advisor. One way is to use advisor provided by NNI sdk, need to set **builtinAdvisorName** and **classArgs**. Another way is to use users' own advisor file, and need to set **codeDirectory**\ , **classFileName**\ , **className** and **classArgs**. + +When advisor is enabled, settings of tuners and advisors will be bypassed. + +builtinAdvisorName +^^^^^^^^^^^^^^^^^^ + +Specifies the name of a built-in advisor. NNI sdk provides `BOHB <../Tuner/BohbAdvisor.rst>`__ and `Hyperband <../Tuner/HyperbandAdvisor.rst>`__. + +codeDir +^^^^^^^ + +Required if using customized advisors. Path relative to the location of config file. + +Specifies the directory of advisor code. + +classFileName +^^^^^^^^^^^^^ + +Required if using customized advisors. File path relative to **codeDir**. + +Specifies the name of advisor file. + +className +^^^^^^^^^ + +Required if using customized advisors. String. + +Specifies the name of advisor class. + +classArgs +^^^^^^^^^ + +Optional. Key-value pairs. Default: empty. + +Specifies the arguments of advisor. + +gpuIndices +^^^^^^^^^^ + +Optional. String. Default: empty. + +Specifies the GPUs that can be used. Single or multiple GPU indices can be specified. Multiple GPU indices are separated by comma ``,``. For example, ``1``\ , or ``0,1,3``. If the field is not set, no GPU will be visible to tuner (by setting ``HIP_VISIBLE_DEVICES`` to be an empty string). + +trial +^^^^^ + +Required. Key-value pairs. + +In local and remote mode, the following keys are required. + + +* + **command**\ : Required string. Specifies the command to run trial process. + +* + **codeDir**\ : Required string. Specifies the directory of your own trial file. This directory will be automatically uploaded in remote mode. + +* + **gpuNum**\ : Optional integer. Specifies the num of gpu to run the trial process. Default value is 0. + +In PAI mode, the following keys are required. + + +* + **command**\ : Required string. Specifies the command to run trial process. + +* + **codeDir**\ : Required string. Specifies the directory of the own trial file. Files in the directory will be uploaded in PAI mode. + +* + **gpuNum**\ : Required integer. Specifies the num of gpu to run the trial process. Default value is 0. + +* + **cpuNum**\ : Required integer. Specifies the cpu number of cpu to be used in pai container. + +* + **memoryMB**\ : Required integer. Set the memory size to be used in pai container, in megabytes. + +* + **image**\ : Required string. Set the image to be used in pai. + +* + **authFile**\ : Optional string. Used to provide Docker registry which needs authentication for image pull in PAI. `Reference `__. + +* + **shmMB**\ : Optional integer. Shared memory size of container. + +* + **portList**\ : List of key-values pairs with ``label``\ , ``beginAt``\ , ``portNumber``. See `job tutorial of PAI `__ for details. + +.. cannot find `Reference `__ and `job tutorial of PAI `__ + +In Kubeflow mode, the following keys are required. + + +* + **codeDir**\ : The local directory where the code files are in. + +* + **ps**\ : An optional configuration for kubeflow's tensorflow-operator, which includes + + + * + **replicas**\ : The replica number of **ps** role. + + * + **command**\ : The run script in **ps**\ 's container. + + * + **gpuNum**\ : The gpu number to be used in **ps** container. + + * + **cpuNum**\ : The cpu number to be used in **ps** container. + + * + **memoryMB**\ : The memory size of the container. + + * + **image**\ : The image to be used in **ps**. + +* + **worker**\ : An optional configuration for kubeflow's tensorflow-operator. + + + * + **replicas**\ : The replica number of **worker** role. + + * + **command**\ : The run script in **worker**\ 's container. + + * + **gpuNum**\ : The gpu number to be used in **worker** container. + + * + **cpuNum**\ : The cpu number to be used in **worker** container. + + * + **memoryMB**\ : The memory size of the container. + + * + **image**\ : The image to be used in **worker**. + +localConfig +^^^^^^^^^^^ + +Optional in local mode. Key-value pairs. + +Only applicable if **trainingServicePlatform** is set to ``local``\ , otherwise there should not be **localConfig** section in configuration file. + +gpuIndices +^^^^^^^^^^ + +Optional. String. Default: none. + +Used to specify designated GPU devices for NNI, if it is set, only the specified GPU devices are used for NNI trial jobs. Single or multiple GPU indices can be specified. Multiple GPU indices should be separated with comma (\ ``,``\ ), such as ``1`` or ``0,1,3``. By default, all GPUs available will be used. + +maxTrialNumPerGpu +^^^^^^^^^^^^^^^^^ + +Optional. Integer. Default: 1. + +Used to specify the max concurrency trial number on a GPU device. + +useActiveGpu +^^^^^^^^^^^^ + +Optional. Bool. Default: false. + +Used to specify whether to use a GPU if there is another process. By default, NNI will use the GPU only if there is no other active process in the GPU. If **useActiveGpu** is set to true, NNI will use the GPU regardless of another processes. This field is not applicable for NNI on Windows. + +machineList +^^^^^^^^^^^ + +Required in remote mode. A list of key-value pairs with the following keys. + +ip +^^ + +Required. IP address or host name that is accessible from the current machine. + +The IP address or host name of remote machine. + +port +^^^^ + +Optional. Integer. Valid port. Default: 22. + +The ssh port to be used to connect machine. + +username +^^^^^^^^ + +Required if authentication with username/password. String. + +The account of remote machine. + +passwd +^^^^^^ + +Required if authentication with username/password. String. + +Specifies the password of the account. + +sshKeyPath +^^^^^^^^^^ + +Required if authentication with ssh key. Path to private key file. + +If users use ssh key to login remote machine, **sshKeyPath** should be a valid path to a ssh key file. + +*Note: if users set passwd and sshKeyPath simultaneously, NNI will try passwd first.* + +passphrase +^^^^^^^^^^ + +Optional. String. + +Used to protect ssh key, which could be empty if users don't have passphrase. + +gpuIndices +^^^^^^^^^^ + +Optional. String. Default: none. + +Used to specify designated GPU devices for NNI, if it is set, only the specified GPU devices are used for NNI trial jobs. Single or multiple GPU indices can be specified. Multiple GPU indices should be separated with comma (\ ``,``\ ), such as ``1`` or ``0,1,3``. By default, all GPUs available will be used. + +maxTrialNumPerGpu +^^^^^^^^^^^^^^^^^ + +Optional. Integer. Default: 1. + +Used to specify the max concurrency trial number on a GPU device. + +useActiveGpu +^^^^^^^^^^^^ + +Optional. Bool. Default: false. + +Used to specify whether to use a GPU if there is another process. By default, NNI will use the GPU only if there is no other active process in the GPU. If **useActiveGpu** is set to true, NNI will use the GPU regardless of another processes. This field is not applicable for NNI on Windows. + +pythonPath +^^^^^^^^^^ + +Optional. String. + +Users can configure the python path environment on remote machine by setting **pythonPath**. + +remoteConfig +^^^^^^^^^^^^ + +Optional field in remote mode. Users could set per machine information in ``machineList`` field, and set global configuration for remote mode in this field. + +reuse +^^^^^ + +Optional. Bool. default: ``false``. It's an experimental feature. + +If it's true, NNI will reuse remote jobs to run as many as possible trials. It can save time of creating new jobs. User needs to make sure each trial can run independent in same job, for example, avoid loading checkpoint from previous trials. + +kubeflowConfig +^^^^^^^^^^^^^^ + +operator +^^^^^^^^ + +Required. String. Has to be ``tf-operator`` or ``pytorch-operator``. + +Specifies the kubeflow's operator to be used, NNI support ``tf-operator`` in current version. + +storage +^^^^^^^ + +Optional. String. Default. ``nfs``. + +Specifies the storage type of kubeflow, including ``nfs`` and ``azureStorage``. + +nfs +^^^ + +Required if using nfs. Key-value pairs. + + +* + **server** is the host of nfs server. + +* + **path** is the mounted path of nfs. + +keyVault +^^^^^^^^ + +Required if using azure storage. Key-value pairs. + +Set **keyVault** to storage the private key of your azure storage account. Refer to `the doc `__ . + + +* + **vaultName** is the value of ``--vault-name`` used in az command. + +* + **name** is the value of ``--name`` used in az command. + +azureStorage +^^^^^^^^^^^^ + +Required if using azure storage. Key-value pairs. + +Set azure storage account to store code files. + + +* + **accountName** is the name of azure storage account. + +* + **azureShare** is the share of the azure file storage. + +uploadRetryCount +^^^^^^^^^^^^^^^^ + +Required if using azure storage. Integer between 1 and 99999. + +If upload files to azure storage failed, NNI will retry the process of uploading, this field will specify the number of attempts to re-upload files. + +paiConfig +^^^^^^^^^ + +userName +^^^^^^^^ + +Required. String. + +The user name of your pai account. + +password +^^^^^^^^ + +Required if using password authentication. String. + +The password of the pai account. + +token +^^^^^ + +Required if using token authentication. String. + +Personal access token that can be retrieved from PAI portal. + +host +^^^^ + +Required. String. + +The hostname of IP address of PAI. + +reuse +^^^^^ + +Optional. Bool. default: ``false``. It's an experimental feature. + +If it's true, NNI will reuse OpenPAI jobs to run as many as possible trials. It can save time of creating new jobs. User needs to make sure each trial can run independent in same job, for example, avoid loading checkpoint from previous trials. + +sharedStorage +^^^^^^^^^^^^^ + +storageType +^^^^^^^^^^^ + +Required. String. + +The type of the storage, support ``NFS`` and ``AzureBlob``. + +localMountPoint +^^^^^^^^^^^^^^^ + +Required. String. + +The absolute or relative path that the storage has been or will be mounted in local. If the path does not exist, it will be created automatically. Recommended to use an absolute path. i.e. ``/tmp/nni-shared-storage``. + +remoteMountPoint +^^^^^^^^^^^^^^^^ + +Required. String. + +The absolute or relative path that the storage will be mounted in remote. If the path does not exist, it will be created automatically. Note that the directory must be empty if using AzureBlob. Recommended to use a relative path. i.e. ``./nni-shared-storage``. + +localMounted +^^^^^^^^^^^^ + +Required. String. + +One of ``usermount``, ``nnimount`` or ``nomount``. ``usermount`` means you have already mount this storage on localMountPoint. ``nnimount`` means nni will try to mount this storage on localMountPoint. ``nomount`` means storage will not mount in local machine, will support partial storages in the future. + +nfsServer +^^^^^^^^^ + +Optional. String. + +Required if using NFS storage. The NFS server host. + +exportedDirectory +^^^^^^^^^^^^^^^^^ + +Optional. String. + +Required if using NFS storage. The exported directory of NFS server. + +storageAccountName +^^^^^^^^^^^^^^^^^^ + +Optional. String. + +Required if using AzureBlob storage. The azure storage account name. + +storageAccountKey +^^^^^^^^^^^^^^^^^ + +Optional. String. + +Required if using AzureBlob storage. The azure storage account key. + +containerName +^^^^^^^^^^^^^ + +Optional. String. + +Required if using AzureBlob storage. The AzureBlob container name. + +Examples +-------- + +Local mode +^^^^^^^^^^ + +If users want to run trial jobs in local machine, and use annotation to generate search space, could use the following config: + +.. code-block:: yaml + + authorName: test + experimentName: test_experiment + trialConcurrency: 3 + maxExecDuration: 1h + maxTrialNum: 10 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: local + #choice: true, false + useAnnotation: true + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + trial: + command: python3 mnist.py + codeDir: /nni/mnist + gpuNum: 0 + +You can add assessor configuration. + +.. code-block:: yaml + + authorName: test + experimentName: test_experiment + trialConcurrency: 3 + maxExecDuration: 1h + maxTrialNum: 10 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: local + searchSpacePath: /nni/search_space.json + #choice: true, false + useAnnotation: false + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + assessor: + #choice: Medianstop + builtinAssessorName: Medianstop + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + trial: + command: python3 mnist.py + codeDir: /nni/mnist + gpuNum: 0 + +Or you could specify your own tuner and assessor file as following, + +.. code-block:: yaml + + authorName: test + experimentName: test_experiment + trialConcurrency: 3 + maxExecDuration: 1h + maxTrialNum: 10 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: local + searchSpacePath: /nni/search_space.json + #choice: true, false + useAnnotation: false + tuner: + codeDir: /nni/tuner + classFileName: mytuner.py + className: MyTuner + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + assessor: + codeDir: /nni/assessor + classFileName: myassessor.py + className: MyAssessor + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + trial: + command: python3 mnist.py + codeDir: /nni/mnist + gpuNum: 0 + +Remote mode +^^^^^^^^^^^ + +If run trial jobs in remote machine, users could specify the remote machine information as following format: + +.. code-block:: yaml + + authorName: test + experimentName: test_experiment + trialConcurrency: 3 + maxExecDuration: 1h + maxTrialNum: 10 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: remote + searchSpacePath: /nni/search_space.json + #choice: true, false + useAnnotation: false + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + trial: + command: python3 mnist.py + codeDir: /nni/mnist + gpuNum: 0 + #machineList can be empty if the platform is local + machineList: + - ip: 10.10.10.10 + port: 22 + username: test + passwd: test + - ip: 10.10.10.11 + port: 22 + username: test + passwd: test + - ip: 10.10.10.12 + port: 22 + username: test + sshKeyPath: /nni/sshkey + passphrase: qwert + # Below is an example of specifying python environment. + pythonPath: ${replace_to_python_environment_path_in_your_remote_machine} + +PAI mode +^^^^^^^^ + +.. code-block:: yaml + + authorName: test + experimentName: nni_test1 + trialConcurrency: 1 + maxExecDuration:500h + maxTrialNum: 1 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: pai + searchSpacePath: search_space.json + #choice: true, false + useAnnotation: false + tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + trial: + command: python3 main.py + codeDir: . + gpuNum: 4 + cpuNum: 2 + memoryMB: 10000 + #The docker image to run NNI job on pai + image: msranni/nni:latest + paiConfig: + #The username to login pai + userName: test + #The password to login pai + passWord: test + #The host of restful server of pai + host: 10.10.10.10 + +Kubeflow mode +^^^^^^^^^^^^^ + + kubeflow with nfs storage. + +.. code-block:: yaml + + authorName: default + experimentName: example_mni + trialConcurrency: 1 + maxExecDuration: 1h + maxTrialNum: 1 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: kubeflow + searchSpacePath: search_space.json + #choice: true, false + useAnnotation: false + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + trial: + codeDir: . + worker: + replicas: 1 + command: python3 mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest + kubeflowConfig: + operator: tf-operator + nfs: + server: 10.10.10.10 + path: /var/nfs/general + +Kubeflow with azure storage +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + authorName: default + experimentName: example_mni + trialConcurrency: 1 + maxExecDuration: 1h + maxTrialNum: 1 + #choice: local, remote, pai, kubeflow + trainingServicePlatform: kubeflow + searchSpacePath: search_space.json + #choice: true, false + useAnnotation: false + #nniManagerIp: 10.10.10.10 + tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + trial: + codeDir: . + worker: + replicas: 1 + command: python3 mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 4096 + image: msranni/nni:latest + kubeflowConfig: + operator: tf-operator + keyVault: + vaultName: Contoso-Vault + name: AzureStorageAccountKey + azureStorage: + accountName: storage + azureShare: share01 diff --git a/docs/en_US/Tutorial/FAQ.rst b/docs/en_US/Tutorial/FAQ.rst new file mode 100644 index 0000000000000000000000000000000000000000..bead88fb43fc5c5d6ddcd584298864939cefd224 --- /dev/null +++ b/docs/en_US/Tutorial/FAQ.rst @@ -0,0 +1,88 @@ +FAQ +=== + +This page is for frequent asked questions and answers. + +tmp folder fulled +^^^^^^^^^^^^^^^^^ + +nnictl will use tmp folder as a temporary folder to copy files under codeDir when executing experimentation creation. +When met errors like below, try to clean up **tmp** folder first. + +.. + + OSError: [Errno 28] No space left on device + + +Cannot get trials' metrics in OpenPAI mode +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In OpenPAI training mode, we start a rest server which listens on 51189 port in NNI Manager to receive metrcis reported from trials running in OpenPAI cluster. If you didn't see any metrics from WebUI in OpenPAI mode, check your machine where NNI manager runs on to make sure 51189 port is turned on in the firewall rule. + +Segmentation Fault (core dumped) when installing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: text + + make: *** [install-XXX] Segmentation fault (core dumped) + + +Please try the following solutions in turn: + + +* Update or reinstall you current python's pip like ``python3 -m pip install -U pip`` +* Install NNI with ``--no-cache-dir`` flag like ``python3 -m pip install nni --no-cache-dir`` + +Job management error: getIPV4Address() failed because os.networkInterfaces().eth0 is undefined. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Your machine don't have eth0 device, please set `nniManagerIp `__ in your config file manually. + +Exceed the MaxDuration but didn't stop +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When the duration of experiment reaches the maximum duration, nniManager will not create new trials, but the existing trials will continue unless user manually stop the experiment. + +Could not stop an experiment using ``nnictl stop`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you upgrade your NNI or you delete some config files of NNI when there is an experiment running, this kind of issue may happen because the loss of config file. You could use ``ps -ef | grep node`` to find the PID of your experiment, and use ``kill -9 {pid}`` to kill it manually. + +Could not get ``default metric`` in webUI of virtual machines +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Config the network mode to bridge mode or other mode that could make virtual machine's host accessible from external machine, and make sure the port of virtual machine is not forbidden by firewall. + +Could not open webUI link +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Unable to open the WebUI may have the following reasons: + + +* ``http://127.0.0.1``\ , ``http://172.17.0.1`` and ``http://10.0.0.15`` are referred to localhost, if you start your experiment on the server or remote machine. You can replace the IP to your server IP to view the WebUI, like ``http://[your_server_ip]:8080`` +* If you still can't see the WebUI after you use the server IP, you can check the proxy and the firewall of your machine. Or use the browser on the machine where you start your NNI experiment. +* Another reason may be your experiment is failed and NNI may fail to get the experiment information. You can check the log of NNIManager in the following directory: ``~/nni-experiments/[your_experiment_id]`` ``/log/nnimanager.log`` + +Restful server start failed +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Probably it's a problem with your network config. Here is a checklist. + + +* You might need to link ``127.0.0.1`` with ``localhost``. Add a line ``127.0.0.1 localhost`` to ``/etc/hosts``. +* It's also possible that you have set some proxy config. Check your environment for variables like ``HTTP_PROXY`` or ``HTTPS_PROXY`` and unset if they are set. + +NNI on Windows problems +^^^^^^^^^^^^^^^^^^^^^^^ + +Please refer to `NNI on Windows `__ + +More FAQ issues +^^^^^^^^^^^^^^^ + +`NNI Issues with FAQ labels `__ + +Help us improve +^^^^^^^^^^^^^^^ + +Please inquiry the problem in https://github.com/Microsoft/nni/issues to see whether there are other people already reported the problem, create a new one if there are no existing issues been created. diff --git a/docs/en_US/Tutorial/HowToDebug.rst b/docs/en_US/Tutorial/HowToDebug.rst new file mode 100644 index 0000000000000000000000000000000000000000..771b5206c17602b2a306188834ff7addb903e85d --- /dev/null +++ b/docs/en_US/Tutorial/HowToDebug.rst @@ -0,0 +1,111 @@ +**How to Debug in NNI** +=========================== + +Overview +-------- + +There are three parts that might have logs in NNI. They are nnimanager, dispatcher and trial. Here we will introduce them succinctly. More information please refer to `Overview <../Overview.rst>`__. + + +* **NNI controller**\ : NNI controller (nnictl) is the nni command-line tool that is used to manage experiments (e.g., start an experiment). +* **nnimanager**\ : nnimanager is the core of NNI, whose log is important when the whole experiment fails (e.g., no webUI or training service fails) +* **Dispatcher**\ : Dispatcher calls the methods of **Tuner** and **Assessor**. Logs of dispatcher are related to the tuner or assessor code. + + * **Tuner**\ : Tuner is an AutoML algorithm, which generates a new configuration for the next try. A new trial will run with this configuration. + * **Assessor**\ : Assessor analyzes trial's intermediate results (e.g., periodically evaluated accuracy on test dataset) to tell whether this trial can be early stopped or not. + +* **Trial**\ : Trial code is the code you write to run your experiment, which is an individual attempt at applying a new configuration (e.g., a set of hyperparameter values, a specific nerual architecture). + +Where is the log +---------------- + +There are three kinds of log in NNI. When creating a new experiment, you can specify log level as debug by adding ``--debug``. Besides, you can set more detailed log level in your configuration file by using +``logLevel`` keyword. Available logLevels are: ``trace``\ , ``debug``\ , ``info``\ , ``warning``\ , ``error``\ , ``fatal``. + +NNI controller +^^^^^^^^^^^^^^ + +All possible errors that happen when launching an NNI experiment can be found here. + +You can use ``nnictl log stderr`` to find error information. For more options please refer to `NNICTL `__ + +Experiment Root Directory +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Every experiment has a root folder, which is shown on the right-top corner of webUI. Or you could assemble it by replacing the ``experiment_id`` with your actual experiment_id in path ``~/nni-experiments/experiment_id/`` in case of webUI failure. ``experiment_id`` could be seen when you run ``nnictl create ...`` to create a new experiment. + +.. + + For flexibility, we also offer a ``logDir`` option in your configuration, which specifies the directory to store all experiments (defaults to ``~/nni-experiments``\ ). Please refer to `Configuration `__ for more details. + + +Under that directory, there is another directory named ``log``\ , where ``nnimanager.log`` and ``dispatcher.log`` are placed. + +Trial Root Directory +^^^^^^^^^^^^^^^^^^^^ + +Usually in webUI, you can click ``+`` in the left of every trial to expand it to see each trial's log path. + +Besides, there is another directory under experiment root directory, named ``trials``\ , which stores all the trials. +Every trial has a unique id as its directory name. In this directory, a file named ``stderr`` records trial error and another named ``trial.log`` records this trial's log. + +Different kinds of errors +------------------------- + +There are different kinds of errors. However, they can be divided into three categories based on their severity. So when nni fails, check each part sequentially. + +Generally, if webUI is started successfully, there is a ``Status`` in the ``Overview`` tab, serving as a possible indicator of what kind of error happens. Otherwise you should check manually. + +**NNI** Fails +^^^^^^^^^^^^^^^^^ + +This is the most serious error. When this happens, the whole experiment fails and no trial will be run. Usually this might be related to some installation problem. + +When this happens, you should check ``nnictl``\ 's error output file ``stderr`` (i.e., nnictl log stderr) and then the ``nnimanager``\ 's log to find if there is any error. + +**Dispatcher** Fails +^^^^^^^^^^^^^^^^^^^^^^^^ + +Dispatcher fails. Usually, for some new users of NNI, it means that tuner fails. You could check dispatcher's log to see what happens to your dispatcher. For built-in tuner, some common errors might be invalid search space (unsupported type of search space or inconsistence between initializing args in configuration file and actual tuner's ``__init__`` function args). + +Take the later situation as an example. If you write a customized tuner who's __init__ function has an argument called ``optimize_mode``\ , which you do not provide in your configuration file, NNI will fail to run your tuner so the experiment fails. You can see errors in the webUI like: + + +.. image:: ../../img/dispatcher_error.jpg + :target: ../../img/dispatcher_error.jpg + :alt: + + +Here we can see it is a dispatcher error. So we can check dispatcher's log, which might look like: + +.. code-block:: bash + + [2019-02-19 19:36:45] DEBUG (nni.main/MainThread) START + [2019-02-19 19:36:47] ERROR (nni.main/MainThread) __init__() missing 1 required positional arguments: 'optimize_mode' + Traceback (most recent call last): + File "/usr/lib/python3.7/site-packages/nni/__main__.py", line 202, in + main() + File "/usr/lib/python3.7/site-packages/nni/__main__.py", line 164, in main + args.tuner_args) + File "/usr/lib/python3.7/site-packages/nni/__main__.py", line 81, in create_customized_class_instance + instance = class_constructor(**class_args) + TypeError: __init__() missing 1 required positional arguments: 'optimize_mode'. + +**Trial** Fails +^^^^^^^^^^^^^^^^^^^ + +In this situation, NNI can still run and create new trials. + +It means your trial code (which is run by NNI) fails. This kind of error is strongly related to your trial code. Please check trial's log to fix any possible errors shown there. + +A common example of this would be run the mnist example without installing tensorflow. Surely there is an Import Error (that is, not installing tensorflow but trying to import it in your trial code) and thus every trial fails. + + +.. image:: ../../img/trial_error.jpg + :target: ../../img/trial_error.jpg + :alt: + + +As it shows, every trial has a log path, where you can find trial's log and stderr. + +In addition to experiment level debug, NNI also provides the capability for debugging a single trial without the need to start the entire experiment. Refer to `standalone mode <../TrialExample/Trials.rst#standalone-mode-for-debugging>`__ for more information about debug single trial code. diff --git a/docs/en_US/Tutorial/HowToLaunchFromPython.rst b/docs/en_US/Tutorial/HowToLaunchFromPython.rst new file mode 100644 index 0000000000000000000000000000000000000000..876f2ea307991ea84c78c6c80ae59497a931743e --- /dev/null +++ b/docs/en_US/Tutorial/HowToLaunchFromPython.rst @@ -0,0 +1,317 @@ +How to Launch an Experiment from Python +======================================= + +.. toctree:: + :hidden: + + Start Usage + Connect Usage + +Overview +-------- + +Since ``v2.0``, NNI provides a new way to launch the experiments. Before that, you need to configure the experiment in the YAML configuration file and then use the ``nnictl`` command to launch the experiment. Now, you can also configure and run experiments directly in the Python file. If you are familiar with Python programming, this will undoubtedly bring you more convenience. + +Run a New Experiment +-------------------- + +After successfully installing ``nni`` and prepare the `trial code <../TrialExample/Trials.rst>`__, you can start the experiment with a Python script in the following 2 steps. + +Step 1 - Initialize an experiment instance and configure it +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + from nni.experiment import Experiment + experiment = Experiment('local') + +Now, you have a ``Experiment`` instance, and this experiment will launch trials on your local machine due to ``training_service='local'``. + +See all `training services <../training_services.rst>`__ supported in NNI. + +.. code-block:: python + + experiment.config.experiment_name = 'MNIST example' + experiment.config.trial_concurrency = 2 + experiment.config.max_trial_number = 10 + experiment.config.search_space = search_space + experiment.config.trial_command = 'python3 mnist.py' + experiment.config.trial_code_directory = Path(__file__).parent + experiment.config.tuner.name = 'TPE' + experiment.config.tuner.class_args['optimize_mode'] = 'maximize' + experiment.config.training_service.use_active_gpu = True + +Use the form like ``experiment.config.foo = 'bar'`` to configure your experiment. + +See all real `builtin tuners <../builtin_tuner.rst>`__ supported in NNI. + +See `configuration reference <../reference/experiment_config.rst>`__ for more detailed usage of these fields. + + +Step 2 - Just run +^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + experiment.run(port=8080) + +Now, you have successfully launched an NNI experiment. And you can type ``localhost:8080`` in your browser to observe your experiment in real time. + +In this way, experiment will run in the foreground and will automatically exit when the experiment finished. + +.. Note:: If you want to run an experiment in an interactive way, use ``start()`` in Step 2. If you launch the experiment in Python script, please use ``run()``, as ``start()`` is designed for the interactive scenarios. + +Example +^^^^^^^ + +Below is an example for this new launching approach. You can find this code in :githublink:`mnist-tfv2/launch.py `. + +.. code-block:: python + + from pathlib import Path + + from nni.experiment import Experiment + + search_space = { + "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] }, + "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] }, + "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] }, + "batch_size": { "_type": "choice", "_value": [16, 32] }, + "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] } + } + + experiment = Experiment('local') + experiment.config.experiment_name = 'MNIST example' + experiment.config.trial_concurrency = 2 + experiment.config.max_trial_number = 10 + experiment.config.search_space = search_space + experiment.config.trial_command = 'python3 mnist.py' + experiment.config.trial_code_directory = Path(__file__).parent + experiment.config.tuner.name = 'TPE' + experiment.config.tuner.class_args['optimize_mode'] = 'maximize' + experiment.config.training_service.use_active_gpu = True + + experiment.run(8080) + + +Start and Manage a New Experiment +--------------------------------- + +NNI migrates the API in ``NNI Client`` to this new launching approach. Launch the experiment by ``start()`` instead of ``run()``, then you can use these APIs in interactive mode. + +Please refer to `example usage <./python_api_start.rst>`__ and code file :githublink:`python_api_start.ipynb `. + +.. Note:: ``run()`` polls the experiment status and will automatically call ``stop()`` when the experiment finished. ``start()`` just launched a new experiment, so you need to manually stop the experiment by calling ``stop()``. + + +Connect and Manage an Exist Experiment +-------------------------------------- + +If you launch an experiment by ``nnictl`` and also want to use these APIs, you can use ``Experiment.connect()`` to connect to an existing experiment. + +Please refer to `example usage <./python_api_connect.rst>`__ and code file :githublink:`python_api_connect.ipynb `. + +.. Note:: You can use ``stop()`` to stop the experiment when connecting to an existing experiment. + +Resume/View and Manage a Stopped Experiment +------------------------------------------- + +You can use ``Experiment.resume()`` and ``Experiment.view()`` to resume and view a stopped experiment, these functions behave like ``nnictl resume`` and ``nnictl view``. + +If you want to manage the experiment, set ``wait_completion`` as ``False`` and the functions will return an ``Experiment`` instance. For more parameters, please refer to API reference. + + +API Reference +------------- + +Detailed usage could be found `here <../reference/experiment_config.rst>`__. + +* `Experiment`_ +* `Experiment Config <#Experiment-Config>`_ +* `Algorithm Config <#Algorithm-Config>`_ +* `Training Service Config <#Training-Service-Config>`_ + * `Local Config <#Local-Config>`_ + * `Remote Config <#Remote-Config>`_ + * `Openpai Config <#Openpai-Config>`_ + * `AML Config <#AML-Config>`_ +* `Shared Storage Config `_ + + +Experiment +^^^^^^^^^^ + +.. autoclass:: nni.experiment.Experiment + :members: + + +Experiment Config +^^^^^^^^^^^^^^^^^ + +.. autoattribute:: nni.experiment.config.ExperimentConfig.experiment_name + +.. autoattribute:: nni.experiment.config.ExperimentConfig.search_space_file + +.. autoattribute:: nni.experiment.config.ExperimentConfig.search_space + +.. autoattribute:: nni.experiment.config.ExperimentConfig.trial_command + +.. autoattribute:: nni.experiment.config.ExperimentConfig.trial_code_directory + +.. autoattribute:: nni.experiment.config.ExperimentConfig.trial_concurrency + +.. autoattribute:: nni.experiment.config.ExperimentConfig.trial_gpu_number + +.. autoattribute:: nni.experiment.config.ExperimentConfig.max_experiment_duration + +.. autoattribute:: nni.experiment.config.ExperimentConfig.max_trial_number + +.. autoattribute:: nni.experiment.config.ExperimentConfig.nni_manager_ip + +.. autoattribute:: nni.experiment.config.ExperimentConfig.use_annotation + +.. autoattribute:: nni.experiment.config.ExperimentConfig.debug + +.. autoattribute:: nni.experiment.config.ExperimentConfig.log_level + +.. autoattribute:: nni.experiment.config.ExperimentConfig.experiment_working_directory + +.. autoattribute:: nni.experiment.config.ExperimentConfig.tuner_gpu_indices + +.. autoattribute:: nni.experiment.config.ExperimentConfig.tuner + +.. autoattribute:: nni.experiment.config.ExperimentConfig.assessor + +.. autoattribute:: nni.experiment.config.ExperimentConfig.advisor + +.. autoattribute:: nni.experiment.config.ExperimentConfig.training_service + +.. autoattribute:: nni.experiment.config.ExperimentConfig.shared_storage + + +Algorithm Config +^^^^^^^^^^^^^^^^ + +.. autoattribute:: nni.experiment.config.AlgorithmConfig.name + +.. autoattribute:: nni.experiment.config.AlgorithmConfig.class_args + +.. autoattribute:: nni.experiment.config.CustomAlgorithmConfig.class_name + +.. autoattribute:: nni.experiment.config.CustomAlgorithmConfig.code_directory + +.. autoattribute:: nni.experiment.config.CustomAlgorithmConfig.class_args + + +Training Service Config +^^^^^^^^^^^^^^^^^^^^^^^ + +Local Config +************ + +.. autoattribute:: nni.experiment.config.LocalConfig.platform + +.. autoattribute:: nni.experiment.config.LocalConfig.use_active_gpu + +.. autoattribute:: nni.experiment.config.LocalConfig.max_trial_number_per_gpu + +.. autoattribute:: nni.experiment.config.LocalConfig.gpu_indices + +Remote Config +************* + +.. autoattribute:: nni.experiment.config.RemoteConfig.platform + +.. autoattribute:: nni.experiment.config.RemoteConfig.reuse_mode + +.. autoattribute:: nni.experiment.config.RemoteConfig.machine_list + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.host + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.port + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.user + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.password + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.ssh_key_file + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.ssh_passphrase + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.use_active_gpu + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.max_trial_number_per_gpu + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.gpu_indices + +.. autoattribute:: nni.experiment.config.RemoteMachineConfig.python_path + + +Openpai Config +************** + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.platform + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.host + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.username + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.token + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.trial_cpu_number + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.trial_memory_size + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.storage_config_name + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.docker_image + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.local_storage_mount_point + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.container_storage_mount_point + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.reuse_mode + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.openpai_config + +.. autoattribute:: nni.experiment.config.OpenpaiConfig.openpai_config_file + +AML Config +********** + +.. autoattribute:: nni.experiment.config.AmlConfig.platform + +.. autoattribute:: nni.experiment.config.AmlConfig.subscription_id + +.. autoattribute:: nni.experiment.config.AmlConfig.resource_group + +.. autoattribute:: nni.experiment.config.AmlConfig.workspace_name + +.. autoattribute:: nni.experiment.config.AmlConfig.compute_target + +.. autoattribute:: nni.experiment.config.AmlConfig.docker_image + +.. autoattribute:: nni.experiment.config.AmlConfig.max_trial_number_per_gpu + + +Shared Storage Config +^^^^^^^^^^^^^^^^^^^^^ + +Nfs Config +********** + +.. autoattribute:: nni.experiment.config.NfsConfig.storage_type + +.. autoattribute:: nni.experiment.config.NfsConfig.nfs_server + +.. autoattribute:: nni.experiment.config.NfsConfig.exported_directory + +Azure Blob Config +***************** + +.. autoattribute:: nni.experiment.config.AzureBlobConfig.storage_type + +.. autoattribute:: nni.experiment.config.AzureBlobConfig.storage_account_name + +.. autoattribute:: nni.experiment.config.AzureBlobConfig.storage_account_key + +.. autoattribute:: nni.experiment.config.AzureBlobConfig.container_name diff --git a/docs/en_US/Tutorial/HowToUseDocker.rst b/docs/en_US/Tutorial/HowToUseDocker.rst new file mode 100644 index 0000000000000000000000000000000000000000..83bd12380e65c81dd400cbad622694e7e42e52f9 --- /dev/null +++ b/docs/en_US/Tutorial/HowToUseDocker.rst @@ -0,0 +1,112 @@ +**How to Use Docker in NNI** +================================ + +Overview +-------- + +`Docker `__ is a tool to make it easier for users to deploy and run applications based on their own operating system by starting containers. Docker is not a virtual machine, it does not create a virtual operating system, but it allows different applications to use the same OS kernel and isolate different applications by container. + +Users can start NNI experiments using Docker. NNI also provides an official Docker image `msranni/nni `__ on Docker Hub. + +Using Docker in local machine +----------------------------- + +Step 1: Installation of Docker +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Before you start using Docker for NNI experiments, you should install Docker on your local machine. `See here `__. + +Step 2: Start a Docker container +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you have installed the Docker package in your local machine, you can start a Docker container instance to run NNI examples. You should notice that because NNI will start a web UI process in a container and continue to listen to a port, you need to specify the port mapping between your host machine and Docker container to give access to web UI outside the container. By visiting the host IP address and port, you can redirect to the web UI process started in Docker container and visit web UI content. + +For example, you could start a new Docker container from the following command: + +.. code-block:: bash + + docker run -i -t -p [hostPort]:[containerPort] [image] + +``-i:`` Start a Docker in an interactive mode. + +``-t:`` Docker assign the container an input terminal. + +``-p:`` Port mapping, map host port to a container port. + +For more information about Docker commands, please `refer to this `__. + +Note: + +.. code-block:: bash + + NNI only supports Ubuntu and MacOS systems in local mode for the moment, please use correct Docker image type. If you want to use gpu in a Docker container, please use nvidia-docker. + +Step 3: Run NNI in a Docker container +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you start a Docker image using NNI's official image ``msranni/nni``\ , you can directly start NNI experiments by using the ``nnictl`` command. Our official image has NNI's running environment and basic python and deep learning frameworks preinstalled. + +If you start your own Docker image, you may need to install the NNI package first; please refer to `NNI installation `__. + +If you want to run NNI's official examples, you may need to clone the NNI repo in GitHub using + +.. code-block:: bash + + git clone https://github.com/Microsoft/nni.git + +then you can enter ``nni/examples/trials`` to start an experiment. + +After you prepare NNI's environment, you can start a new experiment using the ``nnictl`` command. `See here `__. + +Using Docker on a remote platform +--------------------------------- + +NNI supports starting experiments in `remoteTrainingService <../TrainingService/RemoteMachineMode.rst>`__\ , and running trial jobs on remote machines. As Docker can start an independent Ubuntu system as an SSH server, a Docker container can be used as the remote machine in NNI's remote mode. + +Step 1: Setting a Docker environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You should install the Docker software on your remote machine first, please `refer to this `__. + +To make sure your Docker container can be connected by NNI experiments, you should build your own Docker image to set an SSH server or use images with an SSH configuration. If you want to use a Docker container as an SSH server, you should configure the SSH password login or private key login; please `refer to this `__. + +Note: + +.. code-block:: text + + NNI's official image msranni/nni does not support SSH servers for the time being; you should build your own Docker image with an SSH configuration or use other images as a remote server. + +Step 2: Start a Docker container on a remote machine +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An SSH server needs a port; you need to expose Docker's SSH port to NNI as the connection port. For example, if you set your container's SSH port as ``A``, you should map the container's port ``A`` to your remote host machine's other port ``B``, NNI will connect port ``B`` as an SSH port, and your host machine will map the connection from port ``B`` to port ``A`` then NNI could connect to your Docker container. + +For example, you could start your Docker container using the following commands: + +.. code-block:: bash + + docker run -dit -p [hostPort]:[containerPort] [image] + +The ``containerPort`` is the SSH port used in your Docker container and the ``hostPort`` is your host machine's port exposed to NNI. You can set your NNI's config file to connect to ``hostPort`` and the connection will be transmitted to your Docker container. +For more information about Docker commands, please `refer to this `__. + +Note: + +.. code-block:: bash + + If you use your own Docker image as a remote server, please make sure that this image has a basic python environment and an NNI SDK runtime environment. If you want to use a GPU in a Docker container, please use nvidia-docker. + +Step 3: Run NNI experiments +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can set your config file as a remote platform and set the ``machineList`` configuration to connect to your Docker SSH server; `refer to this <../TrainingService/RemoteMachineMode.rst>`__. Note that you should set the correct ``port``\ , ``username``\ , and ``passWd`` or ``sshKeyPath`` of your host machine. + +``port:`` The host machine's port, mapping to Docker's SSH port. + +``username:`` The username of the Docker container. + +``passWd:`` The password of the Docker container. + +``sshKeyPath:`` The path of the private key of the Docker container. + +After the configuration of the config file, you could start an experiment, `refer to this `__. diff --git a/docs/en_US/Tutorial/HowToUseSharedStorage.rst b/docs/en_US/Tutorial/HowToUseSharedStorage.rst new file mode 100644 index 0000000000000000000000000000000000000000..45eb37a75110e9b5eff9636a881c3b94a475e179 --- /dev/null +++ b/docs/en_US/Tutorial/HowToUseSharedStorage.rst @@ -0,0 +1,63 @@ +**How to Use Shared Storage** +============================= + +If you want to use your own storage during using NNI, shared storage can satisfy you. +Instead of using training service native storage, shared storage can bring you more convenience. +All the information generated by the experiment will be stored under ``/nni`` folder in your shared storage. +All the output produced by the trial will be located under ``/nni/{EXPERIMENT_ID}/trials/{TRIAL_ID}/nnioutput`` folder in your shared storage. +This saves you from finding for experiment-related information in various places. +Remember that your trial working directory is ``/nni/{EXPERIMENT_ID}/trials/{TRIAL_ID}``, so if you upload your data in this shared storage, you can open it like a local file in your trial code without downloading it. +And we will develop more practical features in the future based on shared storage. The config reference can be found `here <../reference/experiment_config.html#sharedstorageconfig>`_. + +.. note:: + Shared storage is currently in the experimental stage. We suggest use AzureBlob under Ubuntu/CentOS/RHEL, and NFS under Ubuntu/CentOS/RHEL/Fedora/Debian for remote. + And make sure your local machine can mount NFS or fuse AzureBlob and the machine used in training service has ``sudo`` permission without password. We only support shared storage under training service with reuse mode for now. + +.. note:: + What is the difference between training service native storage and shared storage? Training service native storage is usually provided by the specific training service. + E.g., the local storage on remote machine in remote mode, the provided storage in openpai mode. These storages might not easy to use, e.g., users have to upload datasets to all remote machines to train the model. + In these cases, shared storage can automatically mount to the machine in the training platform. Users can directly save and load data from the shared storage. All the data/log used/generated in one experiment can be placed under the same place. + After the experiment is finished, shared storage will automatically unmount from the training platform. + +Example +------- +If you want to use AzureBlob, add below to your config. Full config file see :githublink:`mnist-sharedstorage/config_azureblob.yml `. + +.. code-block:: yaml + + sharedStorage: + storageType: AzureBlob + # please set localMountPoint as absolute path and localMountPoint should outside the code directory + # because nni will copy user code to localMountPoint + localMountPoint: ${your/local/mount/point} + # remoteMountPoint is the mount point on training service machine, it can be set as both absolute path and relative path + # make sure you have `sudo` permission without password on training service machine + remoteMountPoint: ${your/remote/mount/point} + storageAccountName: ${replace_to_your_storageAccountName} + storageAccountKey: ${replace_to_your_storageAccountKey} + containerName: ${replace_to_your_containerName} + # usermount means you have already mount this storage on localMountPoint + # nnimount means nni will try to mount this storage on localMountPoint + # nomount means storage will not mount in local machine, will support partial storages in the future + localMounted: nnimount + +You can find ``storageAccountName``, ``storageAccountKey``, ``containerName`` on azure storage account portal. + +.. image:: ../../img/azure_storage.png + :target: ../../img/azure_storage.png + :alt: + +If you want to use NFS, add below to your config. Full config file see :githublink:`mnist-sharedstorage/config_nfs.yml `. + +.. code-block:: yaml + + sharedStorage: + storageType: NFS + localMountPoint: ${your/local/mount/point} + remoteMountPoint: ${your/remote/mount/point} + nfsServer: ${nfs-server-ip} + exportedDirectory: ${nfs/exported/directory} + # usermount means you have already mount this storage on localMountPoint + # nnimount means nni will try to mount this storage on localMountPoint + # nomount means storage will not mount in local machine, will support partial storages in the future + localMounted: nnimount diff --git a/docs/en_US/Tutorial/InstallCustomizedAlgos.rst b/docs/en_US/Tutorial/InstallCustomizedAlgos.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce3a2d03f8d042d6b1ed64b6608f1e0d0b1c0b05 --- /dev/null +++ b/docs/en_US/Tutorial/InstallCustomizedAlgos.rst @@ -0,0 +1,222 @@ + +**How to register customized algorithms as builtin tuners, assessors and advisors** +======================================================================================= + +.. contents:: + +Overview +-------- + +NNI provides a lot of `builtin tuners <../Tuner/BuiltinTuner.rst>`_, `advisors <../Tuner/HyperbandAdvisor.rst>`__ and `assessors <../Assessor/BuiltinAssessor.rst>`__ can be used directly for Hyper Parameter Optimization, and some extra algorithms can be registered via ``nnictl algo register --meta `` after NNI is installed. You can check builtin algorithms via ``nnictl algo list`` command. + +NNI also provides the ability to build your own customized tuners, advisors and assessors. To use the customized algorithm, users can simply follow the spec in experiment config file to properly reference the algorithm, which has been illustrated in the tutorials of `customized tuners <../Tuner/CustomizeTuner.rst>`_ / `advisors <../Tuner/CustomizeAdvisor.rst>`__ / `assessors <../Assessor/CustomizeAssessor.rst>`__. + +NNI also allows users to install the customized algorithm as a builtin algorithm, in order for users to use the algorithm in the same way as NNI builtin tuners/advisors/assessors. More importantly, it becomes much easier for users to share or distribute their implemented algorithm to others. Customized tuners/advisors/assessors can be installed into NNI as builtin algorithms, once they are installed into NNI, you can use your customized algorithms the same way as builtin tuners/advisors/assessors in your experiment configuration file. For example, you built a customized tuner and installed it into NNI using a builtin name ``mytuner``, then you can use this tuner in your configuration file like below: + +.. code-block:: yaml + + tuner: + builtinTunerName: mytuner + +Register customized algorithms as builtin tuners, assessors and advisors +------------------------------------------------------------------------ + +You can follow below steps to build a customized tuner/assessor/advisor, and register it into NNI as builtin algorithm. + +1. Create a customized tuner/assessor/advisor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Reference following instructions to create: + + +* `customized tuner <../Tuner/CustomizeTuner.rst>`_ +* `customized assessor <../Assessor/CustomizeAssessor.rst>`_ +* `customized advisor <../Tuner/CustomizeAdvisor.rst>`_ + +2. (Optional) Create a validator to validate classArgs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +NNI provides a ``ClassArgsValidator`` interface for customized algorithms author to validate the classArgs parameters in experiment configuration file which are passed to customized algorithms constructors. +The ``ClassArgsValidator`` interface is defined as: + +.. code-block:: python + + class ClassArgsValidator(object): + def validate_class_args(self, **kwargs): + """ + The classArgs fields in experiment configuration are packed as a dict and + passed to validator as kwargs. + """ + pass + +For example, you can implement your validator such as: + +.. code-block:: python + + from schema import Schema, Optional + from nni import ClassArgsValidator + + class MedianstopClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('start_step'): self.range('start_step', int, 0, 9999), + }).validate(kwargs) + +The validator will be invoked before experiment is started to check whether the classArgs fields are valid for your customized algorithms. + +3. Install your customized algorithms into python environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Firstly, the customized algorithms need to be prepared as a python package. Then you can install the package into python environment via: + + +* Run command ``python setup.py develop`` from the package directory, this command will install the package in development mode, this is recommended if your algorithm is under development. +* Run command ``python setup.py bdist_wheel`` from the package directory, this command build a whl file which is a pip installation source. Then run ``pip install `` to install it. + +4. Prepare meta file +^^^^^^^^^^^^^^^^^^^^ + +Create a yaml file with following keys as meta file: + + +* ``algoType``: type of algorithms, could be one of ``tuner``, ``assessor``, ``advisor`` +* ``builtinName``: builtin name used in experiment configuration file +* `className`: tuner class name, including its module name, for example: ``demo_tuner.DemoTuner`` +* `classArgsValidator`: class args validator class name, including its module name, for example: ``demo_tuner.MyClassArgsValidator`` + +Following is an example of the yaml file: + +.. code-block:: yaml + + algoType: tuner + builtinName: demotuner + className: demo_tuner.DemoTuner + classArgsValidator: demo_tuner.MyClassArgsValidator + +5. Register customized algorithms into NNI +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Run following command to register the customized algorithms as builtin algorithms in NNI: + +.. code-block:: bash + + nnictl algo register --meta + +The ```` is the path to the yaml file your created in above section. + +Reference `customized tuner example <#example-register-a-customized-tuner-as-a-builtin-tuner>`_ for a full example. + +Use the installed builtin algorithms in experiment +-------------------------------------------------- + +Once your customized algorithms is installed, you can use it in experiment configuration file the same way as other builtin tuners/assessors/advisors, for example: + +.. code-block:: yaml + + tuner: + builtinTunerName: demotuner + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + +Manage builtin algorithms using ``nnictl algo`` +----------------------------------------------- + +List builtin algorithms +^^^^^^^^^^^^^^^^^^^^^^^ + +Run following command to list the registered builtin algorithms: + +.. code-block:: bash + + nnictl algo list + +-----------------+------------+-----------+--------=-------------+------------------------------------------+ + | Name | Type | Source | Class Name | Module Name | + +-----------------+------------+-----------+----------------------+------------------------------------------+ + | TPE | tuners | nni | HyperoptTuner | nni.hyperopt_tuner.hyperopt_tuner | + | Random | tuners | nni | HyperoptTuner | nni.hyperopt_tuner.hyperopt_tuner | + | Anneal | tuners | nni | HyperoptTuner | nni.hyperopt_tuner.hyperopt_tuner | + | Evolution | tuners | nni | EvolutionTuner | nni.evolution_tuner.evolution_tuner | + | BatchTuner | tuners | nni | BatchTuner | nni.batch_tuner.batch_tuner | + | GridSearch | tuners | nni | GridSearchTuner | nni.gridsearch_tuner.gridsearch_tuner | + | NetworkMorphism | tuners | nni | NetworkMorphismTuner | nni.networkmorphism_tuner.networkmo... | + | MetisTuner | tuners | nni | MetisTuner | nni.metis_tuner.metis_tuner | + | GPTuner | tuners | nni | GPTuner | nni.gp_tuner.gp_tuner | + | PBTTuner | tuners | nni | PBTTuner | nni.pbt_tuner.pbt_tuner | + | SMAC | tuners | nni | SMACTuner | nni.smac_tuner.smac_tuner | + | PPOTuner | tuners | nni | PPOTuner | nni.ppo_tuner.ppo_tuner | + | Medianstop | assessors | nni | MedianstopAssessor | nni.medianstop_assessor.medianstop_... | + | Curvefitting | assessors | nni | CurvefittingAssessor | nni.curvefitting_assessor.curvefitt... | + | Hyperband | advisors | nni | Hyperband | nni.hyperband_advisor.hyperband_adv... | + | BOHB | advisors | nni | BOHB | nni.bohb_advisor.bohb_advisor | + +-----------------+------------+-----------+----------------------+------------------------------------------+ + +Unregister builtin algorithms +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Run following command to uninstall an installed package: + +``nnictl algo unregister `` + +For example: + +``nnictl algo unregister demotuner`` + + +Porting customized algorithms from v1.x to v2.x +----------------------------------------------- + +All that needs to be modified is to delete ``NNI Package :: tuner`` metadata in ``setup.py`` and add a meta file mentioned in `4. Prepare meta file`_. Then you can follow `Register customized algorithms as builtin tuners, assessors and advisors`_ to register your customized algorithms. + +Example: Register a customized tuner as a builtin tuner +------------------------------------------------------- + +You can following below steps to register a customized tuner in ``nni/examples/tuners/customized_tuner`` as a builtin tuner. + +Install the customized tuner package into python environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are 2 options to install the package into python environment: + +Option 1: install from directory +"""""""""""""""""""""""""""""""" + +From ``nni/examples/tuners/customized_tuner`` directory, run: + +``python setup.py develop`` + +This command will build the ``nni/examples/tuners/customized_tuner`` directory as a pip installation source. + +Option 2: install from whl file +""""""""""""""""""""""""""""""" + +Step 1: From ``nni/examples/tuners/customized_tuner`` directory, run: + +``python setup.py bdist_wheel`` + +This command build a whl file which is a pip installation source. + +Step 2: Run command: + +``pip install dist/demo_tuner-0.1-py3-none-any.whl`` + +Register the customized tuner as builtin tuner: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Run following command: + +``nnictl algo register --meta meta_file.yml`` + +Check the registered builtin algorithms +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Then run command ``nnictl algo list``\ , you should be able to see that demotuner is installed: + +.. code-block:: bash + + +-----------------+------------+-----------+--------=-------------+------------------------------------------+ + | Name | Type | source | Class Name | Module Name | + +-----------------+------------+-----------+----------------------+------------------------------------------+ + | demotuner | tuners | User | DemoTuner | demo_tuner | + +-----------------+------------+-----------+----------------------+------------------------------------------+ diff --git a/docs/en_US/Tutorial/InstallationLinux.rst b/docs/en_US/Tutorial/InstallationLinux.rst new file mode 100644 index 0000000000000000000000000000000000000000..b29e2193103f823eb15937458e5fb9f2611a6a69 --- /dev/null +++ b/docs/en_US/Tutorial/InstallationLinux.rst @@ -0,0 +1,202 @@ +Install on Linux & Mac +====================== + +Installation +------------ + +Installation on Linux and macOS follow the same instructions, given below. + +Install NNI through pip +^^^^^^^^^^^^^^^^^^^^^^^ + + Prerequisite: ``python 64-bit >= 3.6`` + +.. code-block:: bash + + python3 -m pip install --upgrade nni + +Install NNI through source code +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If you are interested in special or the latest code versions, you can install NNI through source code. + + Prerequisites: ``python 64-bit >=3.6``, ``git`` + +.. code-block:: bash + + git clone -b v2.6 https://github.com/Microsoft/nni.git + cd nni + python3 -m pip install -U -r dependencies/setup.txt + python3 -m pip install -r dependencies/develop.txt + python3 setup.py develop + +Build wheel package from NNI source code +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The previous section shows how to install NNI in `development mode `__. +If you want to perform a persist install instead, we recommend to build your own wheel package and install from wheel. + +.. code-block:: bash + + git clone -b v2.6 https://github.com/Microsoft/nni.git + cd nni + export NNI_RELEASE=2.0 + python3 -m pip install -U -r dependencies/setup.txt + python3 -m pip install -r dependencies/develop.txt + python3 setup.py clean --all + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-2.0-py3-none-manylinux1_x86_64.whl + +Use NNI in a docker image +^^^^^^^^^^^^^^^^^^^^^^^^^ + + You can also install NNI in a docker image. Please follow the instructions `here <../Tutorial/HowToUseDocker.rst>`__ to build an NNI docker image. The NNI docker image can also be retrieved from Docker Hub through the command ``docker pull msranni/nni:latest``. + +Verify installation +------------------- + +* + Download the examples via cloning the source code. + + .. code-block:: bash + + git clone -b v2.6 https://github.com/Microsoft/nni.git + +* + Run the MNIST example. + + .. code-block:: bash + + nnictl create --config nni/examples/trials/mnist-pytorch/config.yml + +* + Wait for the message ``INFO: Successfully started experiment!`` in the command line. This message indicates that your experiment has been successfully started. You can explore the experiment using the ``Web UI url``. + +.. code-block:: text + + INFO: Starting restful server... + INFO: Successfully started Restful server! + INFO: Setting local config... + INFO: Successfully set local config! + INFO: Starting experiment... + INFO: Successfully started experiment! + ----------------------------------------------------------------------- + The experiment id is egchD4qy + The Web UI urls are: http://223.255.255.1:8080 http://127.0.0.1:8080 + ----------------------------------------------------------------------- + + You can use these commands to get more information about the experiment + ----------------------------------------------------------------------- + commands description + 1. nnictl experiment show show the information of experiments + 2. nnictl trial ls list all of trial jobs + 3. nnictl top monitor the status of running experiments + 4. nnictl log stderr show stderr log content + 5. nnictl log stdout show stdout log content + 6. nnictl stop stop an experiment + 7. nnictl trial kill kill a trial job by id + 8. nnictl --help get help information about nnictl + ----------------------------------------------------------------------- + + +* Open the ``Web UI url`` in your browser, you can view detailed information about the experiment and all the submitted trial jobs as shown below. `Here <../Tutorial/WebUI.rst>`__ are more Web UI pages. + + +.. image:: ../../img/webui_overview_page.png + :target: ../../img/webui_overview_page.png + :alt: overview + + + +.. image:: ../../img/webui_trialdetail_page.png + :target: ../../img/webui_trialdetail_page.png + :alt: detail + + +System requirements +------------------- + +Due to potential programming changes, the minimum system requirements of NNI may change over time. + +Linux +^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - Recommended + - Minimum + * - **Operating System** + - Ubuntu 16.04 or above + - + * - **CPU** + - Intel® Core™ i5 or AMD Phenom™ II X3 or better + - Intel® Core™ i3 or AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 or better + - NVIDIA® GeForce® GTX 460 + * - **Memory** + - 6 GB RAM + - 4 GB RAM + * - **Storage** + - 30 GB available hare drive space + - + * - **Internet** + - Boardband internet connection + - + * - **Resolution** + - 1024 x 768 minimum display resolution + - + + +macOS +^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - Recommended + - Minimum + * - **Operating System** + - macOS 10.14.1 or above + - + * - **CPU** + - Intel® Core™ i7-4770 or better + - Intel® Core™ i5-760 or better + * - **GPU** + - AMD Radeon™ R9 M395X or better + - NVIDIA® GeForce® GT 750M or AMD Radeon™ R9 M290 or better + * - **Memory** + - 8 GB RAM + - 4 GB RAM + * - **Storage** + - 70GB available space SSD + - 70GB available space 7200 RPM HDD + * - **Internet** + - Boardband internet connection + - + * - **Resolution** + - 1024 x 768 minimum display resolution + - + + +Further reading +--------------- + + +* `Overview <../Overview.rst>`__ +* `Use command line tool nnictl `__ +* `Use NNIBoard `__ +* `Define search space `__ +* `Config an experiment `__ +* `How to run an experiment on local (with multiple GPUs)? <../TrainingService/LocalMode.rst>`__ +* `How to run an experiment on multiple machines? <../TrainingService/RemoteMachineMode.rst>`__ +* `How to run an experiment on OpenPAI? <../TrainingService/PaiMode.rst>`__ +* `How to run an experiment on Kubernetes through Kubeflow? <../TrainingService/KubeflowMode.rst>`__ +* `How to run an experiment on Kubernetes through FrameworkController? <../TrainingService/FrameworkControllerMode.rst>`__ +* `How to run an experiment on Kubernetes through AdaptDL? <../TrainingService/AdaptDLMode.rst>`__ diff --git a/docs/en_US/Tutorial/InstallationWin.rst b/docs/en_US/Tutorial/InstallationWin.rst new file mode 100644 index 0000000000000000000000000000000000000000..383ec09c2c19dcd490b9e86c0ef41db6f84d0ee7 --- /dev/null +++ b/docs/en_US/Tutorial/InstallationWin.rst @@ -0,0 +1,214 @@ +Install on Windows +================== + +Prerequires +----------- + + +* + Python 3.6 (or above) 64-bit. `Anaconda `__ or `Miniconda `__ is highly recommended to manage multiple Python environments on Windows. + +* + If it's a newly installed Python environment, it needs to install `Microsoft C++ Build Tools `__ to support build NNI dependencies like ``scikit-learn``. + + .. code-block:: bat + + pip install cython wheel + +* + git for verifying installation. + +Install NNI +----------- + +In most cases, you can install and upgrade NNI from pip package. It's easy and fast. + +If you are interested in special or the latest code versions, you can install NNI through source code. + +If you want to contribute to NNI, refer to `setup development environment `__. + + +* + From pip package + + .. code-block:: bat + + python -m pip install --upgrade nni + +* + From source code + + .. code-block:: bat + + git clone -b v2.6 https://github.com/Microsoft/nni.git + cd nni + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + python setup.py develop + +Verify installation +------------------- + +* + Clone examples within source code. + + .. code-block:: bat + + git clone -b v2.6 https://github.com/Microsoft/nni.git + +* + Run the MNIST example. + + .. code-block:: bat + + nnictl create --config nni\examples\trials\mnist-pytorch\config_windows.yml + + Note: If you are familiar with other frameworks, you can choose corresponding example under ``examples\trials``. It needs to change trial command ``python3`` to ``python`` in each example YAML, since default installation has ``python.exe``\ , not ``python3.exe`` executable. + +* + Wait for the message ``INFO: Successfully started experiment!`` in the command line. This message indicates that your experiment has been successfully started. You can explore the experiment using the ``Web UI url``. + +.. code-block:: text + + INFO: Starting restful server... + INFO: Successfully started Restful server! + INFO: Setting local config... + INFO: Successfully set local config! + INFO: Starting experiment... + INFO: Successfully started experiment! + ----------------------------------------------------------------------- + The experiment id is egchD4qy + The Web UI urls are: http://223.255.255.1:8080 http://127.0.0.1:8080 + ----------------------------------------------------------------------- + + You can use these commands to get more information about the experiment + ----------------------------------------------------------------------- + commands description + 1. nnictl experiment show show the information of experiments + 2. nnictl trial ls list all of trial jobs + 3. nnictl top monitor the status of running experiments + 4. nnictl log stderr show stderr log content + 5. nnictl log stdout show stdout log content + 6. nnictl stop stop an experiment + 7. nnictl trial kill kill a trial job by id + 8. nnictl --help get help information about nnictl + ----------------------------------------------------------------------- + + +* Open the ``Web UI url`` in your browser, you can view detailed information about the experiment and all the submitted trial jobs as shown below. `Here <../Tutorial/WebUI.rst>`__ are more Web UI pages. + + +.. image:: ../../img/webui_overview_page.png + :target: ../../img/webui_overview_page.png + :alt: overview + + + +.. image:: ../../img/webui_trialdetail_page.png + :target: ../../img/webui_trialdetail_page.png + :alt: detail + + +System requirements +------------------- + +Below are the minimum system requirements for NNI on Windows, Windows 10.1809 is well tested and recommend. Due to potential programming changes, the minimum system requirements for NNI may change over time. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - Recommended + - Minimum + * - **Operating System** + - Windows 10 1809 or above + - + * - **CPU** + - Intel® Core™ i5 or AMD Phenom™ II X3 or better + - Intel® Core™ i3 or AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 or better + - NVIDIA® GeForce® GTX 460 + * - **Memory** + - 6 GB RAM + - 4 GB RAM + * - **Storage** + - 30 GB available hare drive space + - + * - **Internet** + - Boardband internet connection + - + * - **Resolution** + - 1024 x 768 minimum display resolution + - + + +FAQ +--- + +simplejson failed when installing NNI +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Make sure a C++ 14.0 compiler is installed. + +.. + + building 'simplejson._speedups' extension error: [WinError 3] The system cannot find the path specified + + +Trial failed with missing DLL in command line or PowerShell +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This error is caused by missing LIBIFCOREMD.DLL and LIBMMD.DLL and failure to install SciPy. Using Anaconda or Miniconda with Python(64-bit) can solve it. + +.. + + ImportError: DLL load failed + + +Trial failed on webUI +^^^^^^^^^^^^^^^^^^^^^ + +Please check the trial log file stderr for more details. + +If there is a stderr file, please check it. Two possible cases are: + + +* forgetting to change the trial command ``python3`` to ``python`` in each experiment YAML. +* forgetting to install experiment dependencies such as TensorFlow, Keras and so on. + +Fail to use BOHB on Windows +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Make sure a C++ 14.0 compiler is installed when trying to run ``pip install nni[BOHB]`` to install the dependencies. + +Not supported tuner on Windows +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +SMAC is not supported currently; for the specific reason refer to this `GitHub issue `__. + +Use Windows as a remote worker +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Refer to `Remote Machine mode <../TrainingService/RemoteMachineMode.rst>`__. + +Segmentation fault (core dumped) when installing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Refer to `FAQ `__. + +Further reading +--------------- + + +* `Overview <../Overview.rst>`__ +* `Use command line tool nnictl `__ +* `Use NNIBoard `__ +* `Define search space `__ +* `Config an experiment `__ +* `How to run an experiment on local (with multiple GPUs)? <../TrainingService/LocalMode.rst>`__ +* `How to run an experiment on multiple machines? <../TrainingService/RemoteMachineMode.rst>`__ +* `How to run an experiment on OpenPAI? <../TrainingService/PaiMode.rst>`__ +* `How to run an experiment on Kubernetes through Kubeflow? <../TrainingService/KubeflowMode.rst>`__ +* `How to run an experiment on Kubernetes through FrameworkController? <../TrainingService/FrameworkControllerMode.rst>`__ diff --git a/docs/en_US/Tutorial/NNSpider.md b/docs/en_US/Tutorial/NNSpider.md new file mode 100644 index 0000000000000000000000000000000000000000..a1f9d11f13602f12b559906ff51503e860cd68d7 --- /dev/null +++ b/docs/en_US/Tutorial/NNSpider.md @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+
+ +
+
+
+ +
+
No bugHolidayError
+
+ +
+
+
+ +
+
+
+ +
+
WorkingSignCrying
+
+ +
+
+
+ +
+
+
+ +
+
CutWeavingComfort
+
+ +
+
Sweat
diff --git a/docs/en_US/Tutorial/Nnictl.rst b/docs/en_US/Tutorial/Nnictl.rst new file mode 100644 index 0000000000000000000000000000000000000000..610c84d86a2e860e1b617b06816822303d4ad6e6 --- /dev/null +++ b/docs/en_US/Tutorial/Nnictl.rst @@ -0,0 +1,1524 @@ +.. role:: raw-html(raw) + :format: html + + +nnictl +====== + +Introduction +------------ + +**nnictl** is a command line tool, which can be used to control experiments, such as start/stop/resume an experiment, start/stop NNIBoard, etc. + +Commands +-------- + +nnictl support commands: + + +* `nnictl create <#create>`__ +* `nnictl resume <#resume>`__ +* `nnictl view <#view>`__ +* `nnictl stop <#stop>`__ +* `nnictl update <#update>`__ +* `nnictl trial <#trial>`__ +* `nnictl top <#top>`__ +* `nnictl experiment <#experiment>`__ +* `nnictl platform <#platform>`__ +* `nnictl config <#config>`__ +* `nnictl log <#log>`__ +* `nnictl webui <#webui>`__ +* `nnictl algo <#algo>`__ +* `nnictl ss_gen <#ss_gen>`__ +* `nnictl --version <#version>`__ + +Manage an experiment +^^^^^^^^^^^^^^^^^^^^ + +:raw-html:`` + +nnictl create +^^^^^^^^^^^^^ + + +* + Description + + You can use this command to create a new experiment, using the configuration specified in config file. + + After this command is successfully done, the context will be set as this experiment, which means the following command you issued is associated with this experiment, unless you explicitly changes the context(not supported yet). + +* + Usage + + .. code-block:: bash + + nnictl create [OPTIONS] + +* + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - --config, -c + - True + - + - YAML configure file of the experiment + * - --port, -p + - False + - + - the port of restful server + * - --debug, -d + - False + - + - set debug mode + * - --foreground, -f + - False + - + - set foreground mode, print log content to terminal + + + +* + Examples + + .. + + create a new experiment with the default port: 8080 + + + .. code-block:: bash + + nnictl create --config nni/examples/trials/mnist-pytorch/config.yml + + .. + + create a new experiment with specified port 8088 + + + .. code-block:: bash + + nnictl create --config nni/examples/trials/mnist-pytorch/config.yml --port 8088 + + .. + + create a new experiment with specified port 8088 and debug mode + + + .. code-block:: bash + + nnictl create --config nni/examples/trials/mnist-pytorch/config.yml --port 8088 --debug + +Note: + +.. code-block:: text + + Debug mode will disable version check function in Trialkeeper. + +:raw-html:`` + +nnictl resume +^^^^^^^^^^^^^ + + +* + Description + + You can use this command to resume a stopped experiment. + +* + Usage + + .. code-block:: bash + + nnictl resume [OPTIONS] + +* + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - True + - + - The id of the experiment you want to resume + * - --port, -p + - False + - + - Rest port of the experiment you want to resume + * - --debug, -d + - False + - + - set debug mode + * - --foreground, -f + - False + - + - set foreground mode, print log content to terminal + * - --experiment_dir, -e + - False + - + - Resume experiment from external folder, specify the full path of experiment folder + + + +* + Example + + .. + + resume an experiment with specified port 8088 + + + .. code-block:: bash + + nnictl resume [experiment_id] --port 8088 + +:raw-html:`` + +nnictl view +^^^^^^^^^^^ + + +* + Description + + You can use this command to view a stopped experiment. + +* + Usage + + .. code-block:: bash + + nnictl view [OPTIONS] + +* + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - True + - + - The id of the experiment you want to view + * - --port, -p + - False + - + - Rest port of the experiment you want to view + * - --experiment_dir, -e + - False + - + - View experiment from external folder, specify the full path of experiment folder + + + +* + Example + + .. + + view an experiment with specified port 8088 + + + .. code-block:: bash + + nnictl view [experiment_id] --port 8088 + +:raw-html:`` + +nnictl stop +^^^^^^^^^^^ + + +* + Description + + You can use this command to stop a running experiment or multiple experiments. + +* + Usage + + .. code-block:: bash + + nnictl stop [Options] + +* + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - The id of the experiment you want to stop + * - --port, -p + - False + - + - Rest port of the experiment you want to stop + * - --all, -a + - False + - + - Stop all of experiments + + + +* + Details & Examples + + + #. + If there is no id specified, and there is an experiment running, stop the running experiment, or print error message. + + .. code-block:: bash + + nnictl stop + + #. + If there is an id specified, and the id matches the running experiment, nnictl will stop the corresponding experiment, or will print error message. + + .. code-block:: bash + + nnictl stop [experiment_id] + + #. + If there is a port specified, and an experiment is running on that port, the experiment will be stopped. + + .. code-block:: bash + + nnictl stop --port 8080 + + #. + Users could use 'nnictl stop --all' to stop all experiments. + + .. code-block:: bash + + nnictl stop --all + + #. + If the id ends with \*, nnictl will stop all experiments whose ids matchs the regular. + + #. If the id does not exist but match the prefix of an experiment id, nnictl will stop the matched experiment. + #. If the id does not exist but match multiple prefix of the experiment ids, nnictl will give id information. + +:raw-html:`` + +nnictl update +^^^^^^^^^^^^^ + + +* + **nnictl update searchspace** + + + * + Description + + You can use this command to update an experiment's search space. + + * + Usage + + .. code-block:: bash + + nnictl update searchspace [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --filename, -f + - True + - + - the file storing your new search space + + + +* + Example + + ``update experiment's new search space with file dir 'examples/trials/mnist-pytorch/search_space.json'`` + + .. code-block:: bash + + nnictl update searchspace [experiment_id] --filename examples/trials/mnist-pytorch/search_space.json + + +* + **nnictl update concurrency** + + + * + Description + + You can use this command to update an experiment's concurrency. + + * + Usage + + .. code-block:: bash + + nnictl update concurrency [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --value, -v + - True + - + - the number of allowed concurrent trials + + + +* + Example + + .. + + update experiment's concurrency + + + .. code-block:: bash + + nnictl update concurrency [experiment_id] --value [concurrency_number] + + +* + **nnictl update duration** + + + * + Description + + You can use this command to update an experiment's duration. + + * + Usage + + .. code-block:: bash + + nnictl update duration [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --value, -v + - True + - + - Strings like '1m' for one minute or '2h' for two hours. SUFFIX may be 's' for seconds, 'm' for minutes, 'h' for hours or 'd' for days. + + + +* + Example + + .. + + update experiment's duration + + + .. code-block:: bash + + nnictl update duration [experiment_id] --value [duration] + + +* + **nnictl update trialnum** + + + * + Description + + You can use this command to update an experiment's maxtrialnum. + + * + Usage + + .. code-block:: bash + + nnictl update trialnum [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --value, -v + - True + - + - the new number of maxtrialnum you want to set + + + +* + Example + + .. + + update experiment's trial num + + + .. code-block:: bash + + nnictl update trialnum [experiment_id] --value [trial_num] + +:raw-html:`` + +nnictl trial +^^^^^^^^^^^^ + + +* + **nnictl trial ls** + + + * + Description + + You can use this command to show trial's information. Note that if ``head`` or ``tail`` is set, only complete trials will be listed. + + * + Usage + + .. code-block:: bash + + nnictl trial ls + nnictl trial ls --head 10 + nnictl trial ls --tail 10 + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --head + - False + - + - the number of items to be listed with the highest default metric + * - --tail + - False + - + - the number of items to be listed with the lowest default metric + + + +* + **nnictl trial kill** + + + * + Description + + You can use this command to kill a trial job. + + * + Usage + + .. code-block:: bash + + nnictl trial kill [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - Experiment ID of the trial + * - --trial_id, -T + - True + - + - ID of the trial you want to kill. + + + +* + Example + + .. + + kill trail job + + + .. code-block:: bash + + nnictl trial kill [experiment_id] --trial_id [trial_id] + +:raw-html:`` + +nnictl top +^^^^^^^^^^ + + +* + Description + + Monitor all of running experiments. + +* + Usage + + .. code-block:: bash + + nnictl top + +* + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --time, -t + - False + - + - The interval to update the experiment status, the unit of time is second, and the default value is 3 second. + + +:raw-html:`` + +Manage experiment information +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* + **nnictl experiment show** + + + * + Description + + Show the information of experiment. + + * + Usage + + .. code-block:: bash + + nnictl experiment show + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + + + +* + **nnictl experiment status** + + + * + Description + + Show the status of experiment. + + * + Usage + + .. code-block:: bash + + nnictl experiment status + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + + + +* + **nnictl experiment list** + + + * + Description + + Show the information of all the (running) experiments. + + * + Usage + + .. code-block:: bash + + nnictl experiment list [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - --all + - False + - + - list all of experiments + + + +* + **nnictl experiment delete** + + + * + Description + + Delete one or all experiments, it includes log, result, environment information and cache. It uses to delete useless experiment result, or save disk space. + + * + Usage + + .. code-block:: bash + + nnictl experiment delete [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment + * - --all + - False + - + - delete all of experiments + + + +* + **nnictl experiment export** + + + * + Description + + You can use this command to export reward & hyper-parameter of trial jobs to a csv file. + + * + Usage + + .. code-block:: bash + + nnictl experiment export [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment + * - --filename, -f + - True + - + - File path of the output file + * - --type + - True + - + - Type of output file, only support "csv" and "json" + * - --intermediate, -i + - False + - + - Are intermediate results included + + + +* + Examples + + .. + + export all trial data in an experiment as json format + + + .. code-block:: bash + + nnictl experiment export [experiment_id] --filename [file_path] --type json --intermediate + + +* + **nnictl experiment import** + + + * + Description + + You can use this command to import several prior or supplementary trial hyperparameters & results for NNI hyperparameter tuning. The data are fed to the tuning algorithm (e.g., tuner or advisor). + + * + Usage + + .. code-block:: bash + + nnictl experiment import [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - The id of the experiment you want to import data into + * - --filename, -f + - True + - + - a file with data you want to import in json format + + + +* + Details + + NNI supports users to import their own data, please express the data in the correct format. An example is shown below: + + .. code-block:: json + + [ + {"parameter": {"x": 0.5, "y": 0.9}, "value": 0.03}, + {"parameter": {"x": 0.4, "y": 0.8}, "value": 0.05}, + {"parameter": {"x": 0.3, "y": 0.7}, "value": 0.04} + ] + + Every element in the top level list is a sample. For our built-in tuners/advisors, each sample should have at least two keys: ``parameter`` and ``value``. The ``parameter`` must match this experiment's search space, that is, all the keys (or hyperparameters) in ``parameter`` must match the keys in the search space. Otherwise, tuner/advisor may have unpredictable behavior. ``Value`` should follow the same rule of the input in ``nni.report_final_result``\ , that is, either a number or a dict with a key named ``default``. For your customized tuner/advisor, the file could have any json content depending on how you implement the corresponding methods (e.g., ``import_data``\ ). + + You also can use `nnictl experiment export <#export>`__ to export a valid json file including previous experiment trial hyperparameters and results. + + Currently, following tuner and advisor support import data: + + .. code-block:: yaml + + builtinTunerName: TPE, Anneal, GridSearch, MetisTuner + builtinAdvisorName: BOHB + + *If you want to import data to BOHB advisor, user are suggested to add "TRIAL_BUDGET" in parameter as NNI do, otherwise, BOHB will use max_budget as "TRIAL_BUDGET". Here is an example:* + + .. code-block:: json + + [ + {"parameter": {"x": 0.5, "y": 0.9, "TRIAL_BUDGET": 27}, "value": 0.03} + ] + +* + Examples + + .. + + import data to a running experiment + + + .. code-block:: bash + + nnictl experiment import [experiment_id] -f experiment_data.json + + +* + **nnictl experiment save** + + + * + Description + + Save nni experiment metadata and code data. + + * + Usage + + .. code-block:: bash + + nnictl experiment save [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - True + - + - The id of the experiment you want to save + * - --path, -p + - False + - + - the folder path to store nni experiment data, default current working directory + * - --saveCodeDir, -s + - False + - + - save codeDir data of the experiment, default False + + + +* + Examples + + .. + + save an expeirment + + + .. code-block:: bash + + nnictl experiment save [experiment_id] --saveCodeDir + + +* + **nnictl experiment load** + + + * + Description + + Load an nni experiment. + + * + Usage + + .. code-block:: bash + + nnictl experiment load [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - --path, -p + - True + - + - the file path of nni package + * - --codeDir, -c + - True + - + - the path of codeDir for loaded experiment, this path will also put the code in the loaded experiment package + * - --logDir, -l + - False + - + - the path of logDir for loaded experiment + * - --searchSpacePath, -s + - True + - + - the path of search space file for loaded experiment, this path contains file name. Default in $codeDir/search_space.json + + + +* + Examples + + .. + + load an expeirment + + + .. code-block:: bash + + nnictl experiment load --path [path] --codeDir [codeDir] + +:raw-html:`` + +Manage platform information +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* + **nnictl platform clean** + + + * + Description + + It uses to clean up disk on a target platform. The provided YAML file includes the information of target platform, and it follows the same schema as the NNI configuration file. + + * + Note + + if the target platform is being used by other users, it may cause unexpected errors to others. + + * + Usage + + .. code-block:: bash + + nnictl platform clean [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - --config + - True + - + - the path of yaml config file used when create an experiment + + +:raw-html:`` + +nnictl config show +^^^^^^^^^^^^^^^^^^ + + +* + Description + + Display the current context information. + +* + Usage + + .. code-block:: bash + + nnictl config show + +:raw-html:`` + +Manage log +^^^^^^^^^^ + + +* + **nnictl log stdout** + + + * + Description + + Show the stdout log content. + + * + Usage + + .. code-block:: bash + + nnictl log stdout [options] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --head, -h + - False + - + - show head lines of stdout + * - --tail, -t + - False + - + - show tail lines of stdout + * - --path, -p + - False + - + - show the path of stdout file + + + +* + Example + + .. + + Show the tail of stdout log content + + + .. code-block:: bash + + nnictl log stdout [experiment_id] --tail [lines_number] + + +* + **nnictl log stderr** + + + * + Description + + Show the stderr log content. + + * + Usage + + .. code-block:: bash + + nnictl log stderr [options] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - ID of the experiment you want to set + * - --head, -h + - False + - + - show head lines of stderr + * - --tail, -t + - False + - + - show tail lines of stderr + * - --path, -p + - False + - + - show the path of stderr file + + + +* + **nnictl log trial** + + + * + Description + + Show trial log path. + + * + Usage + + .. code-block:: bash + + nnictl log trial [options] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - Experiment ID of the trial + * - --trial_id, -T + - False + - + - ID of the trial to be found the log path, required when id is not empty. + + +:raw-html:`` + +Manage webui +^^^^^^^^^^^^ + + +* + **nnictl webui url** + + + * + Description + + Show an experiment's webui url + + * + Usage + + .. code-block:: bash + + nnictl webui url [options] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - id + - False + - + - Experiment ID + + + +:raw-html:`` + +Manage builtin algorithms +^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* + **nnictl algo register** + + + * + Description + + Register customized algorithms as builtin tuner/assessor/advisor. + + * + Usage + + .. code-block:: bash + + nnictl algo register --meta + + ```` is the path to the meta data file in yml format, which has following keys: + + * + ``algoType``: type of algorithms, could be one of ``tuner``, ``assessor``, ``advisor`` + + * + ``builtinName``: builtin name used in experiment configuration file + + * + ``className``: tuner class name, including its module name, for example: ``demo_tuner.DemoTuner`` + + * + ``classArgsValidator``: class args validator class name, including its module name, for example: ``demo_tuner.MyClassArgsValidator`` + + * + Example + + .. + + Install a customized tuner in nni examples + + + .. code-block:: bash + + cd nni/examples/tuners/customized_tuner + python3 setup.py develop + nnictl algo register --meta meta_file.yml + + +* + **nnictl algo show** + + + * + Description + + Show the detailed information of specified registered algorithms. + + * + Usage + + .. code-block:: bash + + nnictl algo show + + * + Example + + .. code-block:: bash + + nnictl algo show SMAC + +* + **nnictl package list** + + + * + Description + + List the registered builtin algorithms. + + * + Usage + + .. code-block:: bash + + nnictl algo list + + +* + Example + + .. code-block:: bash + + nnictl algo list + + +* + **nnictl algo unregister** + + + * + Description + + Unregister a registered customized builtin algorithms. The NNI provided builtin algorithms can not be unregistered. + + * + Usage + + .. code-block:: bash + + nnictl algo unregister + + * + Example + + .. code-block:: bash + + nnictl algo unregister demotuner + +:raw-html:`` + +Generate search space +^^^^^^^^^^^^^^^^^^^^^ + + +* + **nnictl ss_gen** + + + * + Description + + Generate search space from user trial code which uses NNI NAS APIs. + + * + Usage + + .. code-block:: bash + + nnictl ss_gen [OPTIONS] + + * + Options + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Name, shorthand + - Required + - Default + - Description + * - --trial_command + - True + - + - The command of the trial code + * - --trial_dir + - False + - ./ + - The directory of the trial code + * - --file + - False + - nni_auto_gen_search_space.json + - The file for storing generated search space + + + +* + Example + + .. + + Generate a search space + + + .. code-block:: bash + + nnictl ss_gen --trial_command="python3 mnist.py" --trial_dir=./ --file=ss.json + +:raw-html:`` + +Check NNI version +^^^^^^^^^^^^^^^^^ + + +* + **nnictl --version** + + + * + Description + + Describe the current version of NNI installed. + + * + Usage + + .. code-block:: bash + + nnictl --version diff --git a/docs/en_US/Tutorial/QuickStart.rst b/docs/en_US/Tutorial/QuickStart.rst new file mode 100644 index 0000000000000000000000000000000000000000..e93dac8bba8c0a7e7a9ab7bc58ea64df95f77dfa --- /dev/null +++ b/docs/en_US/Tutorial/QuickStart.rst @@ -0,0 +1,289 @@ +QuickStart +========== + +Installation +------------ + +Currently, NNI supports running on Linux, macOS and Windows. Ubuntu 16.04 or higher, macOS 10.14.1, and Windows 10.1809 are tested and supported. Simply run the following ``pip install`` in an environment that has ``python >= 3.6``. + +Linux and macOS +^^^^^^^^^^^^^^^ + +.. code-block:: bash + + python3 -m pip install --upgrade nni + +Windows +^^^^^^^ + +.. code-block:: bash + + python -m pip install --upgrade nni + +.. Note:: For Linux and macOS, ``--user`` can be added if you want to install NNI in your home directory, which does not require any special privileges. + +.. Note:: If there is an error like ``Segmentation fault``, please refer to the :doc:`FAQ `. + +.. Note:: For the system requirements of NNI, please refer to :doc:`Install NNI on Linux & Mac ` or :doc:`Windows `. If you want to use docker, refer to :doc:`HowToUseDocker `. + + +"Hello World" example on MNIST +------------------------------ + +NNI is a toolkit to help users run automated machine learning experiments. It can automatically do the cyclic process of getting hyperparameters, running trials, testing results, and tuning hyperparameters. Here, we'll show how to use NNI to help you find the optimal hyperparameters on the MNIST dataset. + +Here is an example script to train a CNN on the MNIST dataset **without NNI**: + +.. code-block:: python + + def main(args): + # load data + train_loader = torch.utils.data.DataLoader(datasets.MNIST(...), batch_size=args['batch_size'], shuffle=True) + test_loader = torch.tuils.data.DataLoader(datasets.MNIST(...), batch_size=1000, shuffle=True) + # build model + model = Net(hidden_size=args['hidden_size']) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum']) + # train + for epoch in range(10): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + print(test_acc) + print('final accuracy:', test_acc) + + if __name__ == '__main__': + params = { + 'batch_size': 32, + 'hidden_size': 128, + 'lr': 0.001, + 'momentum': 0.5 + } + main(params) + +The above code can only try one set of parameters at a time. If you want to tune the learning rate, you need to manually modify the hyperparameter and start the trial again and again. + +NNI is born to help users tune jobs, whose working process is presented below: + +.. code-block:: text + + input: search space, trial code, config file + output: one optimal hyperparameter configuration + + 1: For t = 0, 1, 2, ..., maxTrialNum, + 2: hyperparameter = chose a set of parameter from search space + 3: final result = run_trial_and_evaluate(hyperparameter) + 4: report final result to NNI + 5: If reach the upper limit time, + 6: Stop the experiment + 7: return hyperparameter value with best final result + +.. note:: + + If you want to use NNI to automatically train your model and find the optimal hyper-parameters, there are two approaches: + + 1. Write a config file and start the experiment from the command line. + 2. Config and launch the experiment directly from a Python file + + In the this part, we will focus on the first approach. For the second approach, please refer to `this tutorial `__\ . + + +Step 1: Modify the ``Trial`` Code +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Modify your ``Trial`` file to get the hyperparameter set from NNI and report the final results to NNI. + +.. code-block:: diff + + + import nni + + def main(args): + # load data + train_loader = torch.utils.data.DataLoader(datasets.MNIST(...), batch_size=args['batch_size'], shuffle=True) + test_loader = torch.tuils.data.DataLoader(datasets.MNIST(...), batch_size=1000, shuffle=True) + # build model + model = Net(hidden_size=args['hidden_size']) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum']) + # train + for epoch in range(10): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + - print(test_acc) + + nni.report_intermediate_result(test_acc) + - print('final accuracy:', test_acc) + + nni.report_final_result(test_acc) + + if __name__ == '__main__': + - params = {'batch_size': 32, 'hidden_size': 128, 'lr': 0.001, 'momentum': 0.5} + + params = nni.get_next_parameter() + main(params) + +*Example:* :githublink:`mnist.py ` + + +Step 2: Define the Search Space +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Define a ``Search Space`` in a YAML file, including the ``name`` and the ``distribution`` (discrete-valued or continuous-valued) of all the hyperparameters you want to search. + +.. code-block:: yaml + + searchSpace: + batch_size: + _type: choice + _value: [16, 32, 64, 128] + hidden_size: + _type: choice + _value: [128, 256, 512, 1024] + lr: + _type: choice + _value: [0.0001, 0.001, 0.01, 0.1] + momentum: + _type: uniform + _value: [0, 1] + +*Example:* :githublink:`config_detailed.yml ` + +You can also write your search space in a JSON file and specify the file path in the configuration. For detailed tutorial on how to write the search space, please see `here `__. + + +Step 3: Config the Experiment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In addition to the search_space defined in the `step2 `__, you need to config the experiment in the YAML file. It specifies the key information of the experiment, such as the trial files, tuning algorithm, max trial number, and max duration, etc. + +.. code-block:: yaml + + experimentName: MNIST # An optional name to distinguish the experiments + trialCommand: python3 mnist.py # NOTE: change "python3" to "python" if you are using Windows + trialConcurrency: 2 # Run 2 trials concurrently + maxTrialNumber: 10 # Generate at most 10 trials + maxExperimentDuration: 1h # Stop generating trials after 1 hour + tuner: # Configure the tuning algorithm + name: TPE + classArgs: # Algorithm specific arguments + optimize_mode: maximize + trainingService: # Configure the training platform + platform: local + +Experiment config reference could be found `here <../reference/experiment_config.rst>`__. + +.. _nniignore: + +.. Note:: If you are planning to use remote machines or clusters as your :doc:`training service <../TrainingService/Overview>`, to avoid too much pressure on network, NNI limits the number of files to 2000 and total size to 300MB. If your codeDir contains too many files, you can choose which files and subfolders should be excluded by adding a ``.nniignore`` file that works like a ``.gitignore`` file. For more details on how to write this file, see the `git documentation `__. + +*Example:* :githublink:`config_detailed.yml ` and :githublink:`.nniignore ` + +All the code above is already prepared and stored in :githublink:`examples/trials/mnist-pytorch/`. + + +Step 4: Launch the Experiment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Linux and macOS +*************** + +Run the **config_detailed.yml** file from your command line to start the experiment. + +.. code-block:: bash + + nnictl create --config nni/examples/trials/mnist-pytorch/config_detailed.yml + +Windows +******* + +Change ``python3`` to ``python`` of the ``trialCommand`` field in the **config_detailed.yml** file, and run the **config_detailed.yml** file from your command line to start the experiment. + +.. code-block:: bash + + nnictl create --config nni\examples\trials\mnist-pytorch\config_detailed.yml + +.. Note:: ``nnictl`` is a command line tool that can be used to control experiments, such as start/stop/resume an experiment, start/stop NNIBoard, etc. Click :doc:`here ` for more usage of ``nnictl``. + +Wait for the message ``INFO: Successfully started experiment!`` in the command line. This message indicates that your experiment has been successfully started. And this is what we expect to get: + +.. code-block:: text + + INFO: Starting restful server... + INFO: Successfully started Restful server! + INFO: Setting local config... + INFO: Successfully set local config! + INFO: Starting experiment... + INFO: Successfully started experiment! + ----------------------------------------------------------------------- + The experiment id is egchD4qy + The Web UI urls are: [Your IP]:8080 + ----------------------------------------------------------------------- + + You can use these commands to get more information about the experiment + ----------------------------------------------------------------------- + commands description + 1. nnictl experiment show show the information of experiments + 2. nnictl trial ls list all of trial jobs + 3. nnictl top monitor the status of running experiments + 4. nnictl log stderr show stderr log content + 5. nnictl log stdout show stdout log content + 6. nnictl stop stop an experiment + 7. nnictl trial kill kill a trial job by id + 8. nnictl --help get help information about nnictl + ----------------------------------------------------------------------- + +If you prepared ``trial``\ , ``search space``\ , and ``config`` according to the above steps and successfully created an NNI job, NNI will automatically tune the optimal hyper-parameters and run different hyper-parameter sets for each trial according to the defined search space. You can see its progress through the WebUI clearly. + + +Step 5: View the Experiment +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +After starting the experiment successfully, you can find a message in the command-line interface that tells you the ``Web UI url`` like this: + +.. code-block:: text + + The Web UI urls are: [Your IP]:8080 + +Open the ``Web UI url`` (Here it's: ``[Your IP]:8080``\ ) in your browser, you can view detailed information about the experiment and all the submitted trial jobs as shown below. If you cannot open the WebUI link in your terminal, please refer to the `FAQ `__. + + +View Overview Page +****************** + +Information about this experiment will be shown in the WebUI, including the experiment profile and search space message. NNI also supports downloading this information and the parameters through the **Experiment summary** button. + + +.. image:: ../../img/webui-img/full-oview.png + :target: ../../img/webui-img/full-oview.png + :alt: overview + + +View Trials Detail Page +*********************** + +You could see the best trial metrics and hyper-parameter graph in this page. And the table content includes more columns when you click the button ``Add/Remove columns``. + + +.. image:: ../../img/webui-img/full-detail.png + :target: ../../img/webui-img/full-detail.png + :alt: detail + + +View Experiments Management Page +******************************** + +On the ``All experiments`` page, you can see all the experiments on your machine. + +.. image:: ../../img/webui-img/managerExperimentList/expList.png + :target: ../../img/webui-img/managerExperimentList/expList.png + :alt: Experiments list + +For more detailed usage of WebUI, please refer to `this doc <./WebUI.rst>`__. + + +Related Topic +------------- + +* `How to debug? `__ +* `How to write a trial? <../TrialExample/Trials.rst>`__ +* `How to try different Tuners? <../Tuner/BuiltinTuner.rst>`__ +* `How to try different Assessors? <../Assessor/BuiltinAssessor.rst>`__ +* `How to run an experiment on the different training platforms? <../training_services.rst>`__ +* `How to use Annotation? `__ +* `How to use the command line tool nnictl? `__ +* `How to launch Tensorboard on WebUI? `__ + diff --git a/docs/en_US/Tutorial/SearchSpaceSpec.rst b/docs/en_US/Tutorial/SearchSpaceSpec.rst new file mode 100644 index 0000000000000000000000000000000000000000..fc5f25efc9e0e0d2e92af5cb95d222bb28fd7f2f --- /dev/null +++ b/docs/en_US/Tutorial/SearchSpaceSpec.rst @@ -0,0 +1,271 @@ +.. role:: raw-html(raw) + :format: html + +Search Space +============ + +Overview +-------- + +In NNI, tuner will sample parameters/architectures according to the search space. + +To define a search space, users should define the name of the variable, the type of sampling strategy and its parameters. + +* An example of a search space definition in a JSON file is as follow: + +.. code-block:: json + + { + "dropout_rate": {"_type": "uniform", "_value": [0.1, 0.5]}, + "conv_size": {"_type": "choice", "_value": [2, 3, 5, 7]}, + "hidden_size": {"_type": "choice", "_value": [124, 512, 1024]}, + "batch_size": {"_type": "choice", "_value": [50, 250, 500]}, + "learning_rate": {"_type": "uniform", "_value": [0.0001, 0.1]} + } + +Take the first line as an example. ``dropout_rate`` is defined as a variable whose prior distribution is a uniform distribution with a range from ``0.1`` to ``0.5``. + +.. note:: In the `experiment configuration (V2) schema `_, NNI supports defining the search space directly in the configuration file, detailed usage can be found `here `__. When using Python API, users can write the search space in the Python file, refer `here `__. + +Note that the available sampling strategies within a search space depend on the tuner you want to use. We list the supported types for each builtin tuner below. For a customized tuner, you don't have to follow our convention and you will have the flexibility to define any type you want. + +Types +----- + +All types of sampling strategies and their parameter are listed here: + + +* + ``{"_type": "choice", "_value": options}`` + + + * The variable's value is one of the options. Here ``options`` should be a list of **numbers** or a list of **strings**. Using arbitrary objects as members of this list (like sublists, a mixture of numbers and strings, or null values) should work in most cases, but may trigger undefined behaviors. + * ``options`` can also be a nested sub-search-space, this sub-search-space takes effect only when the corresponding element is chosen. The variables in this sub-search-space can be seen as conditional variables. Here is an simple :githublink:`example of nested search space definition `. If an element in the options list is a dict, it is a sub-search-space, and for our built-in tuners you have to add a ``_name`` key in this dict, which helps you to identify which element is chosen. Accordingly, here is a :githublink:`sample ` which users can get from nni with nested search space definition. See the table below for the tuners which support nested search spaces. + +* + ``{"_type": "randint", "_value": [lower, upper]}`` + + + * Choosing a random integer between ``lower`` (inclusive) and ``upper`` (exclusive). + * Note: Different tuners may interpret ``randint`` differently. Some (e.g., TPE, GridSearch) treat integers from lower + to upper as unordered ones, while others respect the ordering (e.g., SMAC). If you want all the tuners to respect + the ordering, please use ``quniform`` with ``q=1``. + +* + ``{"_type": "uniform", "_value": [low, high]}`` + + + * The variable value is uniformly sampled between low and high. + * When optimizing, this variable is constrained to a two-sided interval. + +* + ``{"_type": "quniform", "_value": [low, high, q]}`` + + + * The variable value is determined using ``clip(round(uniform(low, high) / q) * q, low, high)``\ , where the clip operation is used to constrain the generated value within the bounds. For example, for ``_value`` specified as [0, 10, 2.5], possible values are [0, 2.5, 5.0, 7.5, 10.0]; For ``_value`` specified as [2, 10, 5], possible values are [2, 5, 10]. + * Suitable for a discrete value with respect to which the objective is still somewhat "smooth", but which should be bounded both above and below. If you want to uniformly choose an integer from a range [low, high], you can write ``_value`` like this: ``[low, high, 1]``. + +* + ``{"_type": "loguniform", "_value": [low, high]}`` + + + * The variable value is drawn from a range [low, high] according to a loguniform distribution like exp(uniform(log(low), log(high))), so that the logarithm of the return value is uniformly distributed. + * When optimizing, this variable is constrained to be positive. + +* + ``{"_type": "qloguniform", "_value": [low, high, q]}`` + + + * The variable value is determined using ``clip(round(loguniform(low, high) / q) * q, low, high)``\ , where the clip operation is used to constrain the generated value within the bounds. + * Suitable for a discrete variable with respect to which the objective is "smooth" and gets smoother with the size of the value, but which should be bounded both above and below. + +* + ``{"_type": "normal", "_value": [mu, sigma]}`` + + + * The variable value is a real value that's normally-distributed with mean mu and standard deviation sigma. When optimizing, this is an unconstrained variable. + +* + ``{"_type": "qnormal", "_value": [mu, sigma, q]}`` + + + * The variable value is determined using ``round(normal(mu, sigma) / q) * q`` + * Suitable for a discrete variable that probably takes a value around mu, but is fundamentally unbounded. + +* + ``{"_type": "lognormal", "_value": [mu, sigma]}`` + + + * The variable value is drawn according to ``exp(normal(mu, sigma))`` so that the logarithm of the return value is normally distributed. When optimizing, this variable is constrained to be positive. + +* + ``{"_type": "qlognormal", "_value": [mu, sigma, q]}`` + + + * The variable value is determined using ``round(exp(normal(mu, sigma)) / q) * q`` + * Suitable for a discrete variable with respect to which the objective is smooth and gets smoother with the size of the variable, which is bounded from one side. + +Search Space Types Supported by Each Tuner +------------------------------------------ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - choice + - choice(nested) + - randint + - uniform + - quniform + - loguniform + - qloguniform + - normal + - qnormal + - lognormal + - qlognormal + * - TPE Tuner + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + * - Random Search Tuner + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + * - Anneal Tuner + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + * - Evolution Tuner + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + * - SMAC Tuner + - :raw-html:`✓` + - + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - + - + - + - + - + * - Batch Tuner + - :raw-html:`✓` + - + - + - + - + - + - + - + - + - + - + * - Grid Search Tuner + - :raw-html:`✓` + - + - :raw-html:`✓` + - + - :raw-html:`✓` + - + - + - + - + - + - + * - Hyperband Advisor + - :raw-html:`✓` + - + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + * - Metis Tuner + - :raw-html:`✓` + - + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - + - + - + - + - + - + * - GP Tuner + - :raw-html:`✓` + - + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - + - + - + - + * - DNGO Tuner + - :raw-html:`✓` + - + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - + - + - + - + + +Known Limitations: + + +* + GP Tuner, Metis Tuner and DNGO tuner support only **numerical values** in search space (\ ``choice`` type values can be no-numerical with other tuners, e.g. string values). Both GP Tuner and Metis Tuner use Gaussian Process Regressor(GPR). GPR make predictions based on a kernel function and the 'distance' between different points, it's hard to get the true distance between no-numerical values. + +* + Note that for nested search space: + + + * Only Random Search/TPE/Anneal/Evolution/Grid Search tuner supports nested search space diff --git a/docs/en_US/Tutorial/SetupNniDeveloperEnvironment.rst b/docs/en_US/Tutorial/SetupNniDeveloperEnvironment.rst new file mode 100644 index 0000000000000000000000000000000000000000..b1adc581375fc4d5c81962afb5a4ae070ebca162 --- /dev/null +++ b/docs/en_US/Tutorial/SetupNniDeveloperEnvironment.rst @@ -0,0 +1,68 @@ +Setup NNI development environment +================================= + +NNI development environment supports Ubuntu 1604 (or above), and Windows 10 with Python3 64bit. + +Installation +------------ + +1. Clone source code +^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git clone https://github.com/Microsoft/nni.git + +Note, if you want to contribute code back, it needs to fork your own NNI repo, and clone from there. + +2. Install from source code +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + python3 -m pip install -U -r dependencies/setup.txt + python3 -m pip install -r dependencies/develop.txt + python3 setup.py develop + +This installs NNI in `development mode `__, +so you don't need to reinstall it after edit. + +3. Check if the environment is ready +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Now, you can try to start an experiment to check if your environment is ready. +For example, run the command + +.. code-block:: bash + + nnictl create --config examples/trials/mnist-pytorch/config.yml + +And open WebUI to check if everything is OK + +4. Reload changes +^^^^^^^^^^^^^^^^^ + +Python +****** + +Nothing to do, the code is already linked to package folders. + +TypeScript (Linux and macOS) +**************************** + +* If ``ts/nni_manager`` is changed, run ``yarn watch`` under this folder. It will watch and build code continually. The ``nnictl`` need to be restarted to reload NNI manager. +* If ``ts/webui`` is changed, run ``yarn dev``\ , which will run a mock API server and a webpack dev server simultaneously. Use ``EXPERIMENT`` environment variable (e.g., ``mnist-tfv1-running``\ ) to specify the mock data being used. Built-in mock experiments are listed in ``src/webui/mock``. An example of the full command is ``EXPERIMENT=mnist-tfv1-running yarn dev``. + +TypeScript (Windows) +******************** + +Currently you must rebuild TypeScript modules with `python3 setup.py build_ts` after edit. + +5. Submit Pull Request +^^^^^^^^^^^^^^^^^^^^^^ + +All changes are merged to master branch from your forked repo. The description of Pull Request must be meaningful, and useful. + +We will review the changes as soon as possible. Once it passes review, we will merge it to master branch. + +For more contribution guidelines and coding styles, you can refer to the `contributing document `__. diff --git a/docs/en_US/Tutorial/Tensorboard.rst b/docs/en_US/Tutorial/Tensorboard.rst new file mode 100644 index 0000000000000000000000000000000000000000..2fe34f7e6ff5380f91f1001657679aca2eed2e6b --- /dev/null +++ b/docs/en_US/Tutorial/Tensorboard.rst @@ -0,0 +1,51 @@ +How to Use Tensorboard within WebUI +=================================== + +You can launch a tensorboard process cross one or multi trials within webui since NNI v2.2. This feature supports local training service and reuse mode training service with shared storage for now, and will support more scenarios in later nni version. + +Preparation +----------- + +Make sure tensorboard installed in your environment. If you never used tensorboard, here are getting start tutorials for your reference, `tensorboard with tensorflow `__, `tensorboard with pytorch `__. + +Use WebUI Launch Tensorboard +---------------------------- + +1. Save Logs +^^^^^^^^^^^^ + +NNI will automatically fetch the ``tensorboard`` subfolder under trial's output folder as tensorboard logdir. So in trial's source code, you need to save the tensorboard logs under ``NNI_OUTPUT_DIR/tensorboard``. This log path can be joined as: + +.. code-block:: python + + log_dir = os.path.join(os.environ["NNI_OUTPUT_DIR"], 'tensorboard') + +2. Launch Tensorboard +^^^^^^^^^^^^^^^^^^^^^ + +Like compare, select the trials you want to combine to launch the tensorboard at first, then click the ``Tensorboard`` button. + +.. image:: ../../img/Tensorboard_1.png + :target: ../../img/Tensorboard_1.png + :alt: + +After click the ``OK`` button in the pop-up box, you will jump to the tensorboard portal. + +.. image:: ../../img/Tensorboard_2.png + :target: ../../img/Tensorboard_2.png + :alt: + +You can see the ``SequenceID-TrialID`` on the tensorboard portal. + +.. image:: ../../img/Tensorboard_3.png + :target: ../../img/Tensorboard_3.png + :alt: + +3. Stop All +^^^^^^^^^^^^ + +If you want to open the portal you have already launched, click the tensorboard id. If you don't need the tensorboard anymore, click ``Stop all tensorboard`` button. + +.. image:: ../../img/Tensorboard_4.png + :target: ../../img/Tensorboard_4.png + :alt: diff --git a/docs/en_US/Tutorial/WebUI.rst b/docs/en_US/Tutorial/WebUI.rst new file mode 100644 index 0000000000000000000000000000000000000000..fd3199c3bbc1d1021f6212efee6b6b152133dfdc --- /dev/null +++ b/docs/en_US/Tutorial/WebUI.rst @@ -0,0 +1,326 @@ +WebUI +===== + +Experiments managerment +----------------------- + +Click the tab ``All experiments`` on the nav bar. + +.. image:: ../../img/webui-img/managerExperimentList/experimentListNav.png + :target: ../../img/webui-img/managerExperimentList/experimentListNav.png + :alt: ExperimentList nav + + + +* On the ``All experiments`` page, you can see all the experiments on your machine. + +.. image:: ../../img/webui-img/managerExperimentList/expList.png + :target: ../../img/webui-img/managerExperimentList/expList.png + :alt: Experiments list + + + +* When you want to see more details about an experiment you could click the trial id, look that: + +.. image:: ../../img/webui-img/managerExperimentList/toAnotherExp.png + :target: ../../img/webui-img/managerExperimentList/toAnotherExp.png + :alt: See this experiment detail + + + +* If has many experiments on the table, you can use the ``filter`` button. + +.. image:: ../../img/webui-img/managerExperimentList/expFilter.png + :target: ../../img/webui-img/managerExperimentList/expFilter.png + :alt: filter button + + + +View summary page +----------------- + +Click the tab ``Overview``. + + +* On the overview tab, you can see the experiment information and status and the performance of ``top trials``. + + +.. image:: ../../img/webui-img/full-oview.png + :target: ../../img/webui-img/full-oview.png + :alt: overview + + + +* If you want to see experiment search space and config, please click the right button ``Search space`` and ``Config`` (when you hover on this button). + + 1. Search space file: + + + .. image:: ../../img/webui-img/searchSpace.png + :target: ../../img/webui-img/searchSpace.png + :alt: searchSpace + + + + 2. Config file: + + + .. image:: ../../img/webui-img/config.png + :target: ../../img/webui-img/config.png + :alt: config + + + +* You can view and download ``nni-manager/dispatcher log files`` on here. + + +.. image:: ../../img/webui-img/review-log.png + :target: ../../img/webui-img/review-log.png + :alt: logfile + + + +* If your experiment has many trials, you can change the refresh interval here. + + +.. image:: ../../img/webui-img/refresh-interval.png + :target: ../../img/webui-img/refresh-interval.png + :alt: refresh + + + + +* You can review and download the experiment results(``experiment config``, ``trial message`` and ``intermeidate metrics``) when you click the button ``Experiment summary``. + + +.. image:: ../../img/webui-img/summary.png + :target: ../../img/webui-img/summary.png + :alt: summary + + + +* You can change some experiment configurations such as ``maxExecDuration``, ``maxTrialNum`` and ``trial concurrency`` on here. + + +.. image:: ../../img/webui-img/edit-experiment-param.png + :target: ../../img/webui-img/edit-experiment-param.png + :alt: editExperimentParams + + + +* You can click the icon to see specific error message and ``nni-manager/dispatcher log files`` by clicking ``Learn about`` link. + + +.. image:: ../../img/webui-img/experimentError.png + :target: ../../img/webui-img/experimentError.png + :alt: experimentError + + + + +* You can click ``About`` to see the version and report any questions. + +View job default metric +----------------------- + + +* Click the tab ``Default metric`` to see the point graph of all trials. Hover to see its specific default metric and search space message. + + +.. image:: ../../img/webui-img/default-metric.png + :target: ../../img/webui-img/default-metric.png + :alt: defaultMetricGraph + + + +* Turn on the switch named ``Optimization curve`` to see the experiment's optimization curve. + + +.. image:: ../../img/webui-img/best-curve.png + :target: ../../img/webui-img/best-curve.png + :alt: bestCurveGraph + + +View hyper parameter +-------------------- + +Click the tab ``Hyper-parameter`` to see the parallel graph. + + +* You can click the ``add/remove`` button to add or remove axes. +* Drag the axes to swap axes on the chart. +* You can select the percentage to see top trials. + + +.. image:: ../../img/webui-img/hyperPara.png + :target: ../../img/webui-img/hyperPara.png + :alt: hyperParameterGraph + + + +View Trial Duration +------------------- + +Click the tab ``Trial Duration`` to see the bar graph. + + +.. image:: ../../img/webui-img/trial_duration.png + :target: ../../img/webui-img/trial_duration.png + :alt: trialDurationGraph + + + +View Trial Intermediate Result Graph +------------------------------------ + +Click the tab ``Intermediate Result`` to see the line graph. + + +.. image:: ../../img/webui-img/trials_intermeidate.png + :target: ../../img/webui-img/trials_intermeidate.png + :alt: trialIntermediateGraph + + + +The trial may have many intermediate results in the training process. In order to see the trend of some trials more clearly, we set a filtering function for the intermediate result graph. + +You may find that these trials will get better or worse at an intermediate result. This indicates that it is an important and relevant intermediate result. To take a closer look at the point here, you need to enter its corresponding X-value at #Intermediate. Then input the range of metrics on this intermedia result. In the picture below, we choose the No. 4 intermediate result and set the range of metrics to 0.8-1. + + +.. image:: ../../img/webui-img/filter-intermediate.png + :target: ../../img/webui-img/filter-intermediate.png + :alt: filterIntermediateGraph + + + +View trials status +------------------ + +Click the tab ``Trials Detail`` to see the status of all trials. Specifically: + + +* Trial detail: trial's id, trial's duration, start time, end time, status, accuracy, and search space file. + + +.. image:: ../../img/webui-img/detail-local.png + :target: ../../img/webui-img/detail-local.png + :alt: detailLocalImage + + + +* Support searching for a specific trial by its id, status, Trial No. and trial parameters. + +1. Trial id: + +.. image:: ../../img/webui-img/detail/searchId.png + :target: ../../img/webui-img/detail/searchId.png + :alt: searchTrialId + + +2. Trial No.: + +.. image:: ../../img/webui-img/detail/searchNo.png + :target: ../../img/webui-img/detail/searchNo.png + :alt: searchTrialNo. + + +3. Trial status: + +.. image:: ../../img/webui-img/detail/searchStatus.png + :target: ../../img/webui-img/detail/searchStatus.png + :alt: searchStatus + +4. Trial parameters: + +(1) parameters whose type is choice: + +.. image:: ../../img/webui-img/detail/searchParameterChoice.png + :target: ../../img/webui-img/detail/searchParameterChoice.png + :alt: searchParameterChoice + +(2) parameters whose type is not choice: + +.. image:: ../../img/webui-img/detail/searchParameterRange.png + :target: ../../img/webui-img/detail/searchParameterRange.png + :alt: searchParameterRange + + +* The button named ``Add column`` can select which column to show on the table. If you run an experiment whose final result is a dict, you can see other keys in the table. You can choose the column ``Intermediate count`` to watch the trial's progress. + + +.. image:: ../../img/webui-img/addColumn.png + :target: ../../img/webui-img/addColumn.png + :alt: addColumnGraph + + + +* If you want to compare some trials, you can select them and then click ``Compare`` to see the results. + + +.. image:: ../../img/webui-img/select-trial.png + :target: ../../img/webui-img/select-trial.png + :alt: selectTrialGraph + + +.. image:: ../../img/webui-img/compare.png + :target: ../../img/webui-img/compare.png + :alt: compareTrialsGraph + + +* ``Tensorboard`` please refer `doc `__. + + +* You can use the button named ``Copy as python`` to copy the trial's parameters. + + +.. image:: ../../img/webui-img/copyParameter.png + :target: ../../img/webui-img/copyParameter.png + :alt: copyTrialParameters + + + +* You could see trial logs on the tab of ``Log``. There are three buttons ``View trial log``, ``View trial error`` and ``View trial stdout`` on local mode. If you run on the OpenPAI or Kubeflow platform, you could see trial stdout and nfs log. + +1. local mode: + +.. image:: ../../img/webui-img/detail/log-local.png + :target: ../../img/webui-img/detail/log-local.png + :alt: logOnLocal + + +2. OpenPAI, Kubeflow and other mode: + +.. image:: ../../img/webui-img/detail-pai.png + :target: ../../img/webui-img/detail-pai.png + :alt: detailPai + + +* Intermediate Result Graph: you can see the default metric in this graph by clicking the intermediate button. + + +.. image:: ../../img/webui-img/intermediate.png + :target: ../../img/webui-img/intermediate.png + :alt: intermeidateGraph + + + +* Kill: you can kill a job that status is running. + + +.. image:: ../../img/webui-img/kill-running.png + :target: ../../img/webui-img/kill-running.png + :alt: killTrial + + + +* Customized trial: you can change this trial parameters and then submit it to the experiment. If you want to rerun a failed trial you could submit the same parameters to the experiment. + +.. image:: ../../img/webui-img/detail/customizedTrialButton.png + :target: ../../img/webui-img/detail/customizedTrialButton.png + :alt: customizedTrialButton + + + +.. image:: ../../img/webui-img/detail/customizedTrial.png + :target: ../../img/webui-img/detail/customizedTrial.png + :alt: customizedTrial diff --git a/docs/en_US/Tutorial/python_api_connect.ipynb b/docs/en_US/Tutorial/python_api_connect.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..ef73f937f7f701274dbdd6462062ceeb518b5dfa --- /dev/null +++ b/docs/en_US/Tutorial/python_api_connect.ipynb @@ -0,0 +1,195 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "white-electron", + "metadata": {}, + "source": [ + "## Connect and Manage an Exist Experiment" + ] + }, + { + "cell_type": "markdown", + "id": "recent-italic", + "metadata": {}, + "source": [ + "### 1. Connect Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "statistical-repair", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:18:28] Connect to port 8080 success, experiment id is DH8pVfXc, status is RUNNING.\n" + ] + } + ], + "source": [ + "from nni.experiment import Experiment\n", + "experiment = Experiment.connect(8080)" + ] + }, + { + "cell_type": "markdown", + "id": "defensive-scratch", + "metadata": {}, + "source": [ + "### 2. Experiment View & Control" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "independent-touch", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': 'DH8pVfXc',\n", + " 'revision': 4,\n", + " 'execDuration': 10,\n", + " 'logDir': '/home/ningshang/nni-experiments/DH8pVfXc',\n", + " 'nextSequenceId': 1,\n", + " 'params': {'authorName': 'default',\n", + " 'experimentName': 'example_sklearn-classification',\n", + " 'trialConcurrency': 1,\n", + " 'maxExecDuration': 3600,\n", + " 'maxTrialNum': 100,\n", + " 'searchSpace': '{\"C\": {\"_type\": \"uniform\", \"_value\": [0.1, 1]}, \"kernel\": {\"_type\": \"choice\", \"_value\": [\"linear\", \"rbf\", \"poly\", \"sigmoid\"]}, \"degree\": {\"_type\": \"choice\", \"_value\": [1, 2, 3, 4]}, \"gamma\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}, \"coef0\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}}',\n", + " 'trainingServicePlatform': 'local',\n", + " 'tuner': {'builtinTunerName': 'TPE',\n", + " 'classArgs': {'optimize_mode': 'maximize'},\n", + " 'checkpointDir': '/home/ningshang/nni-experiments/DH8pVfXc/checkpoint'},\n", + " 'versionCheck': True,\n", + " 'clusterMetaData': [{'key': 'trial_config',\n", + " 'value': {'command': 'python3 main.py',\n", + " 'codeDir': '/home/ningshang/nni/examples/trials/sklearn/classification/.',\n", + " 'gpuNum': 0}}]},\n", + " 'startTime': 1614946699989}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_experiment_profile()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "printable-bookmark", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:18:32] (root) Successfully update maxTrialNum.\n" + ] + } + ], + "source": [ + "experiment.update_max_trial_number(200)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "marine-serial", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': 'DH8pVfXc',\n", + " 'revision': 5,\n", + " 'execDuration': 14,\n", + " 'logDir': '/home/ningshang/nni-experiments/DH8pVfXc',\n", + " 'nextSequenceId': 1,\n", + " 'params': {'authorName': 'default',\n", + " 'experimentName': 'example_sklearn-classification',\n", + " 'trialConcurrency': 1,\n", + " 'maxExecDuration': 3600,\n", + " 'maxTrialNum': 200,\n", + " 'searchSpace': '{\"C\": {\"_type\": \"uniform\", \"_value\": [0.1, 1]}, \"kernel\": {\"_type\": \"choice\", \"_value\": [\"linear\", \"rbf\", \"poly\", \"sigmoid\"]}, \"degree\": {\"_type\": \"choice\", \"_value\": [1, 2, 3, 4]}, \"gamma\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}, \"coef0\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}}',\n", + " 'trainingServicePlatform': 'local',\n", + " 'tuner': {'builtinTunerName': 'TPE',\n", + " 'classArgs': {'optimize_mode': 'maximize'},\n", + " 'checkpointDir': '/home/ningshang/nni-experiments/DH8pVfXc/checkpoint'},\n", + " 'versionCheck': True,\n", + " 'clusterMetaData': [{'key': 'trial_config',\n", + " 'value': {'command': 'python3 main.py',\n", + " 'codeDir': '/home/ningshang/nni/examples/trials/sklearn/classification/.',\n", + " 'gpuNum': 0}}]},\n", + " 'startTime': 1614946699989}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_experiment_profile()" + ] + }, + { + "cell_type": "markdown", + "id": "opened-lounge", + "metadata": {}, + "source": [ + "### 3. Stop Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "emotional-machinery", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:18:36] Stopping experiment, please wait...\n", + "[2021-03-05 12:18:38] Experiment stopped\n" + ] + } + ], + "source": [ + "experiment.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nni-dev", + "language": "python", + "name": "nni-dev" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/en_US/Tutorial/python_api_start.ipynb b/docs/en_US/Tutorial/python_api_start.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..95ceb6d99b1ad983d06633cd803741b5cc202369 --- /dev/null +++ b/docs/en_US/Tutorial/python_api_start.ipynb @@ -0,0 +1,214 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "technological-script", + "metadata": {}, + "source": [ + "## Start and Manage a New Experiment" + ] + }, + { + "cell_type": "markdown", + "id": "reported-somerset", + "metadata": {}, + "source": [ + "### 1. Configure Search Space" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "potential-williams", + "metadata": {}, + "outputs": [], + "source": [ + "search_space = {\n", + " \"C\": {\"_type\":\"quniform\",\"_value\":[0.1, 1, 0.1]},\n", + " \"kernel\": {\"_type\":\"choice\",\"_value\":[\"linear\", \"rbf\", \"poly\", \"sigmoid\"]},\n", + " \"degree\": {\"_type\":\"choice\",\"_value\":[1, 2, 3, 4]},\n", + " \"gamma\": {\"_type\":\"quniform\",\"_value\":[0.01, 0.1, 0.01]},\n", + " \"coef0\": {\"_type\":\"quniform\",\"_value\":[0.01, 0.1, 0.01]}\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "greek-archive", + "metadata": {}, + "source": [ + "### 2. Configure Experiment " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "fiscal-expansion", + "metadata": {}, + "outputs": [], + "source": [ + "from nni.experiment import Experiment\n", + "experiment = Experiment('local')\n", + "experiment.config.experiment_name = 'Example'\n", + "experiment.config.trial_concurrency = 2\n", + "experiment.config.max_trial_number = 10\n", + "experiment.config.search_space = search_space\n", + "experiment.config.trial_command = 'python3 main.py'\n", + "experiment.config.trial_code_directory = './'\n", + "experiment.config.tuner.name = 'TPE'\n", + "experiment.config.tuner.class_args['optimize_mode'] = 'maximize'\n", + "experiment.config.training_service.use_active_gpu = True" + ] + }, + { + "cell_type": "markdown", + "id": "received-tattoo", + "metadata": {}, + "source": [ + "### 3. Start Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "pleasant-patent", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:12:19] Creating experiment, Experiment ID: wdt0le3v\n", + "[2021-03-05 12:12:19] Starting web server...\n", + "[2021-03-05 12:12:20] Setting up...\n", + "[2021-03-05 12:12:20] Web UI URLs: http://127.0.0.1:8080 http://10.0.1.5:8080 http://172.17.0.1:8080\n" + ] + } + ], + "source": [ + "experiment.start(8080)" + ] + }, + { + "cell_type": "markdown", + "id": "miniature-prison", + "metadata": {}, + "source": [ + "### 4. Experiment View & Control" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "animated-english", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'RUNNING'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_status()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "alpha-ottawa", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[TrialResult(parameter={'C': 0.30000000000000004, 'kernel': 'linear', 'degree': 3, 'gamma': 0.03, 'coef0': 0.07}, value=0.9888888888888889, trialJobId='VLqU9'),\n", + " TrialResult(parameter={'C': 0.5, 'kernel': 'sigmoid', 'degree': 1, 'gamma': 0.03, 'coef0': 0.07}, value=0.8888888888888888, trialJobId='DLo6r')]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.export_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "unique-rendering", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'DLo6r': [TrialMetricData(timestamp=1614946351592, trialJobId='DLo6r', parameterId='1', type='FINAL', sequence=0, data=0.8888888888888888)],\n", + " 'VLqU9': [TrialMetricData(timestamp=1614946351607, trialJobId='VLqU9', parameterId='0', type='FINAL', sequence=0, data=0.9888888888888889)]}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_job_metrics()" + ] + }, + { + "cell_type": "markdown", + "id": "welsh-difference", + "metadata": {}, + "source": [ + "### 5. Stop Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "technological-cleanup", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:12:40] Stopping experiment, please wait...\n", + "[2021-03-05 12:12:42] Experiment stopped\n" + ] + } + ], + "source": [ + "experiment.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nni-dev", + "language": "python", + "name": "nni-dev" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/en_US/_templates/index.html b/docs/en_US/_templates/index.html new file mode 100644 index 0000000000000000000000000000000000000000..82e2d31bc52904922ddd7b28c8efb3cfdc16a321 --- /dev/null +++ b/docs/en_US/_templates/index.html @@ -0,0 +1,541 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +
+ + NNI (Neural Network Intelligence) is a lightweight but powerful toolkit to + help users automate + Feature Engineering, + Neural Architecture Search, + Hyperparameter Tuning and + Model Compression. +
+

+ The tool manages automated machine learning (AutoML) experiments, + dispatches and runs + experiments' trial jobs generated by tuning algorithms to search the best neural + architecture and/or hyper-parameters in + different training environments like + Local Machine, + Remote Servers, + OpenPAI, + Kubeflow, + FrameworkController on K8S (AKS etc.), + DLWorkspace (aka. DLTS), + AML (Azure Machine Learning), + AdaptDL (aka. ADL), other cloud options and even Hybrid mode. +

+ +
+

Who should consider using NNI

+
    +
  • Those who want to try different AutoML algorithms in their training code/model.
  • +
  • Those who want to run AutoML trial jobs in different environments to speed up search.
  • +
  • Researchers and data scientists who want to easily implement and experiement new AutoML + algorithms + , may it be: hyperparameter tuning algorithm, + neural architect search algorithm or model compression algorithm. +
  • +
  • ML Platform owners who want to support AutoML in their platform
  • +
+
+ +
+
+

What's NEW!

+ +
+
+ +
+ +
+

NNI capabilities in a glance

+

+ NNI provides CommandLine Tool as well as an user friendly WebUI to manage training experiements. + With the extensible API, you can customize your own AutoML algorithms and training services. + To make it easy for new users, NNI also provides a set of build-in stat-of-the-art + AutoML algorithms and out of box support for popular training platforms. +

+

+ Within the following table, we summarized the current NNI capabilities, + we are gradually adding new capabilities and we'd love to have your contribution. +

+
+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + +
+ Frameworks & Libraries + + Algorithms + + Training Services +
Built-in +
    +
  • Supported Frameworks
  • +
      +
    • PyTorch
    • +
    • Keras
    • +
    • TensorFlow
    • +
    • MXNet
    • +
    • Caffe2
    • + More...
      +
    +
+
    +
  • Supported Libraries
  • +
      +
    • Scikit-learn
    • +
    • XGBoost
    • +
    • LightGBM
    • + More...
      +
    +
+ +
+ Hyperparameter Tuning + + Neural Architecture Search (Retiarii) + + Model Compression + + Feature Engineering (Beta) + + Early Stop Algorithms + + + +
References + + + + + +
+ + +
+

Installation

+
+

Install

+
+ NNI supports and is tested on Ubuntu >= 16.04, macOS >= 10.14.1, + and Windows 10 >= 1809. Simply run the following pip install + in an environment that has python 64-bit >= 3.6. +
+
Linux or macOS
+
python3 -m pip install --upgrade nni
+
Windows
+
python -m pip install --upgrade nni
+
If you want to try latest code, please install + NNI from source code. +
+
For detail system requirements of NNI, please refer to here + for Linux & macOS, and here for Windows.
+
+
+

Note:

+
    +
  • If there is any privilege issue, add --user to install NNI in the user directory.
  • +
  • Currently NNI on Windows supports local, remote and pai mode. Anaconda or Miniconda is highly + recommended to install NNI on Windows.
  • +
  • If there is any error like Segmentation fault, please refer to FAQ. For FAQ on Windows, please refer + to NNI on Windows.
  • +
+
+
+

Verify installation

+
+ The following example is built on TensorFlow 1.x. Make sure TensorFlow 1.x is used when running + it. +
+
    +
  • +
    Download the examples via clone the source code.
    +
    git clone -b {{ release }} https://github.com/Microsoft/nni.git
    +
  • +
  • +
    Run the MNIST example.
    +
    Linux or macOS
    +
    nnictl create --config nni/examples/trials/mnist-pytorch/config.yml
    +
    Windows
    +
    nnictl create --config nni\examples\trials\mnist-pytorch\config_windows.yml
    +
  • +
  • +
    + Wait for the message INFO: Successfully started experiment! in the command line. + This message indicates that your experiment has been successfully started. + You can explore the experiment using the Web UI url. +
    + +
    +INFO: Starting restful server...
    +INFO: Successfully started Restful server!
    +INFO: Setting local config...
    +INFO: Successfully set local config!
    +INFO: Starting experiment...
    +INFO: Successfully started experiment!
    +-----------------------------------------------------------------------
    +The experiment id is egchD4qy
    +The Web UI urls are: http://223.255.255.1:8080   http://127.0.0.1:8080
    +-----------------------------------------------------------------------
    +
    +You can use these commands to get more information about the experiment
    +-----------------------------------------------------------------------
    +  commands                       description
    +1. nnictl experiment show        show the information of experiments
    +2. nnictl trial ls               list all of trial jobs
    +3. nnictl top                    monitor the status of running experiments
    +4. nnictl log stderr             show stderr log content
    +5. nnictl log stdout             show stdout log content
    +6. nnictl stop                   stop an experiment
    +7. nnictl trial kill             kill a trial job by id
    +8. nnictl --help                 get help information about nnictl
    +-----------------------------------------------------------------------
    +
    +
  • +
  • + Open the Web UI url in your browser, you can view detail information of the experiment and + all the submitted trial jobs as shown below. Here are more Web UI + pages. + +
+ + +
+ + +
+

Releases and Contributing

+
NNI has a monthly release cycle (major releases). Please let us know if you encounter a bug by filling an issue.
+
+
We appreciate all contributions. If you are planning to contribute any bug-fixes, please do so without further discussions.
+
+
If you plan to contribute new features, new tuners, new training services, etc. please first open an issue or reuse an exisiting issue, and discuss the feature with us. We will discuss with you on the issue timely or set up conference calls if needed.
+
+
To learn more about making a contribution to NNI, please refer to our How-to contribution page.
+
+
We appreciate all contributions and thank all the contributors!
+ +
+ +
+

Feedback

+ +
+
Join IM discussion groups:
+ + + + + + + + + + + + + +
GitterWeChat
+ Gitter + OR + NNI Wechat +
+
+
+ +
+

Test status

+

Essentials

+ + + + + + + + + + + + + + + + + +
TypeStatus
Fast test + + + +
Full linux + + + +
Full windows + + + +
+

Training services

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeStatus
Remote - linux to linux + + + +
Remote - linux to windows + + + +
Remote - windows to linux + + + +
OpenPAI + + + +
Frameworkcontroller + + + +
Kubeflow + + + +
Hybrid + + + +
AzureML + + + +
+
+ +
+

Related Projects

+

+ Targeting at openness and advancing state-of-art technology, + Microsoft Research (MSR) + had also released few + other open source projects.

+
    +
  • + OpenPAI : an open source platform that provides complete AI model + training and resource management + capabilities, it is easy to extend and supports on-premise, + cloud and hybrid environments in various scale. +
  • +
  • + FrameworkController : an open source + general-purpose Kubernetes Pod Controller that orchestrate + all kinds of applications on Kubernetes by a single controller. +
  • +
  • + MMdnn : A comprehensive, cross-framework solution to convert, + visualize and diagnose deep neural network + models. The "MM" in MMdnn stands for model management + and "dnn" is an acronym for deep neural network. +
  • +
  • + SPTAG : Space Partition Tree And Graph (SPTAG) is an open + source library + for large scale vector approximate nearest neighbor search scenario. +
  • +
  • + nn-Meter : An accurate inference latency predictor for DNN models on diverse edge devices. +
  • +
+

We encourage researchers and students leverage these projects to accelerate the AI development and research.

+
+ + +
+

License

+

The entire codebase is under MIT license

+
+ +{% endblock %} diff --git a/docs/en_US/_templates/layout.html b/docs/en_US/_templates/layout.html new file mode 100644 index 0000000000000000000000000000000000000000..e4200b60ab8b4bd6206c551ab1017d1481d5ed31 --- /dev/null +++ b/docs/en_US/_templates/layout.html @@ -0,0 +1,30 @@ +{% extends "!layout.html" %} +{% block sidebartitle %} + + {% if logo and theme_logo_only %} + + {% else %} + {{ project }} + {% endif %} + + {% if logo %} + {# Not strictly valid HTML, but it's the only way to display/scale it properly, without weird scripting or heaps of work #} + + {% endif %} + + + {% if theme_display_version %} + {%- set nav_version = version %} + {% if READTHEDOCS and current_version %} + {%- set nav_version = current_version %} + {% endif %} + {% if nav_version %} +
+ {{ nav_version }} +
+ {% endif %} + {% endif %} + + {% include "searchbox.html" %} + +{% endblock %} diff --git a/docs/en_US/_templates/nnSpider.html b/docs/en_US/_templates/nnSpider.html new file mode 100644 index 0000000000000000000000000000000000000000..2f436832bbcc700060ef223449dd40721eff4753 --- /dev/null +++ b/docs/en_US/_templates/nnSpider.html @@ -0,0 +1,92 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} +

nnSpider emoticons

+ + +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/comfort.html b/docs/en_US/_templates/nnSpider/comfort.html new file mode 100644 index 0000000000000000000000000000000000000000..d6c33b3dfc7107380c70c3678c7074e455b24efe --- /dev/null +++ b/docs/en_US/_templates/nnSpider/comfort.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Comfort

+
+ Comfort +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/crying.html b/docs/en_US/_templates/nnSpider/crying.html new file mode 100644 index 0000000000000000000000000000000000000000..fc4b76843fa1ffb4a6f463d3c1209963ccaecf18 --- /dev/null +++ b/docs/en_US/_templates/nnSpider/crying.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Crying

+
+ Crying +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/cut.html b/docs/en_US/_templates/nnSpider/cut.html new file mode 100644 index 0000000000000000000000000000000000000000..f662e7dffe489b108d97f687617ce49c297a6f1b --- /dev/null +++ b/docs/en_US/_templates/nnSpider/cut.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Cut

+
+ Cut +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/errorEmotion.html b/docs/en_US/_templates/nnSpider/errorEmotion.html new file mode 100644 index 0000000000000000000000000000000000000000..ad105d391c47e274d7a761d7635b3523287e78ba --- /dev/null +++ b/docs/en_US/_templates/nnSpider/errorEmotion.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Error

+
+ Error +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/holiday.html b/docs/en_US/_templates/nnSpider/holiday.html new file mode 100644 index 0000000000000000000000000000000000000000..ed0db376d4a7d748f4265a54378e4225742c6114 --- /dev/null +++ b/docs/en_US/_templates/nnSpider/holiday.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Holiday

+
+ NoBug +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/nobug.html b/docs/en_US/_templates/nnSpider/nobug.html new file mode 100644 index 0000000000000000000000000000000000000000..cbaf9ec5d03fcd5035b462e08dfee9220c7229f8 --- /dev/null +++ b/docs/en_US/_templates/nnSpider/nobug.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

NoBug

+
+ NoBug +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/sign.html b/docs/en_US/_templates/nnSpider/sign.html new file mode 100644 index 0000000000000000000000000000000000000000..949ff3f49da77b504fae0fa6448ab721f49d095c --- /dev/null +++ b/docs/en_US/_templates/nnSpider/sign.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Sign

+
+ Sign +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/sweat.html b/docs/en_US/_templates/nnSpider/sweat.html new file mode 100644 index 0000000000000000000000000000000000000000..1e9effac77905eb5e341214248916bb5a5f8f099 --- /dev/null +++ b/docs/en_US/_templates/nnSpider/sweat.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Sweat

+
+ Sweat +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/weaving.html b/docs/en_US/_templates/nnSpider/weaving.html new file mode 100644 index 0000000000000000000000000000000000000000..70fd94604ed4dfd89d5224b643fde03fc0effbcc --- /dev/null +++ b/docs/en_US/_templates/nnSpider/weaving.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Weaving

+
+ Weaving +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/_templates/nnSpider/working.html b/docs/en_US/_templates/nnSpider/working.html new file mode 100644 index 0000000000000000000000000000000000000000..bf502fab688b4eb9646694ec7786e194d5efbe3c --- /dev/null +++ b/docs/en_US/_templates/nnSpider/working.html @@ -0,0 +1,12 @@ +{% extends "!layout.html" %} + +{% set title = "Welcome To Neural Network Intelligence !!!"%} + +{% block document %} + +

Working

+
+ Working +
+ +{% endblock %} \ No newline at end of file diff --git a/docs/en_US/autotune_ref.rst b/docs/en_US/autotune_ref.rst new file mode 100644 index 0000000000000000000000000000000000000000..a3e3261ff8ac1f6ecd011740cefaf036d49c72a8 --- /dev/null +++ b/docs/en_US/autotune_ref.rst @@ -0,0 +1,92 @@ +Python API Reference of Auto Tune +================================= + +.. contents:: + +Trial +----- + +.. autofunction:: nni.get_next_parameter +.. autofunction:: nni.get_current_parameter +.. autofunction:: nni.report_intermediate_result +.. autofunction:: nni.report_final_result +.. autofunction:: nni.get_experiment_id +.. autofunction:: nni.get_trial_id +.. autofunction:: nni.get_sequence_id + +Tuner +----- + +.. autoclass:: nni.tuner.Tuner + :members: + +.. autoclass:: nni.algorithms.hpo.tpe_tuner.TpeTuner + :members: + +.. autoclass:: nni.algorithms.hpo.random_tuner.RandomTuner + :members: + +.. autoclass:: nni.algorithms.hpo.hyperopt_tuner.HyperoptTuner + :members: + +.. autoclass:: nni.algorithms.hpo.evolution_tuner.EvolutionTuner + :members: + +.. autoclass:: nni.algorithms.hpo.smac_tuner.SMACTuner + :members: + +.. autoclass:: nni.algorithms.hpo.gridsearch_tuner.GridSearchTuner + :members: + +.. autoclass:: nni.algorithms.hpo.networkmorphism_tuner.NetworkMorphismTuner + :members: + +.. autoclass:: nni.algorithms.hpo.metis_tuner.MetisTuner + :members: + +.. autoclass:: nni.algorithms.hpo.ppo_tuner.PPOTuner + :members: + +.. autoclass:: nni.algorithms.hpo.batch_tuner.BatchTuner + :members: + +.. autoclass:: nni.algorithms.hpo.gp_tuner.GPTuner + :members: + +Assessor +-------- + +.. autoclass:: nni.assessor.Assessor + :members: + +.. autoclass:: nni.assessor.AssessResult + :members: + +.. autoclass:: nni.algorithms.hpo.curvefitting_assessor.CurvefittingAssessor + :members: + +.. autoclass:: nni.algorithms.hpo.medianstop_assessor.MedianstopAssessor + :members: + +Advisor +------- + +.. autoclass:: nni.runtime.msg_dispatcher_base.MsgDispatcherBase + :members: + +.. autoclass:: nni.algorithms.hpo.hyperband_advisor.Hyperband + :members: + +.. autoclass:: nni.algorithms.hpo.bohb_advisor.BOHB + :members: + +Utilities +--------- + +.. autofunction:: nni.utils.merge_parameter + +.. autofunction:: nni.trace + +.. autofunction:: nni.dump + +.. autofunction:: nni.load diff --git a/docs/en_US/builtin_assessor.rst b/docs/en_US/builtin_assessor.rst new file mode 100644 index 0000000000000000000000000000000000000000..9b059f60f1b0995e45298e741541be55fcde60e2 --- /dev/null +++ b/docs/en_US/builtin_assessor.rst @@ -0,0 +1,19 @@ +Builtin-Assessors +================= + +In order to save on computing resources, NNI supports an early stopping policy and has an interface called **Assessor** to do this job. + +Assessor receives the intermediate result from a trial and decides whether the trial should be killed using a specific algorithm. Once the trial experiment meets the early stopping conditions (which means Assessor is pessimistic about the final results), the assessor will kill the trial and the status of the trial will be `EARLY_STOPPED`. + +Here is an experimental result of MNIST after using the 'Curvefitting' Assessor in 'maximize' mode. You can see that Assessor successfully **early stopped** many trials with bad hyperparameters in advance. If you use Assessor, you may get better hyperparameters using the same computing resources. + +Implemented code directory: :githublink:`config_assessor.yml ` + +.. image:: ../img/Assessor.png + +.. toctree:: + :maxdepth: 1 + + Overview<./Assessor/BuiltinAssessor> + Medianstop<./Assessor/MedianstopAssessor> + Curvefitting<./Assessor/CurvefittingAssessor> diff --git a/docs/en_US/builtin_tuner.rst b/docs/en_US/builtin_tuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..e228aad4efa9968c1961dd8ac2dd1e228bb93156 --- /dev/null +++ b/docs/en_US/builtin_tuner.rst @@ -0,0 +1,26 @@ +Builtin-Tuners +============== + +NNI provides an easy way to adopt an approach to set up parameter tuning algorithms, we call them **Tuner**. + +Tuner receives metrics from `Trial` to evaluate the performance of a specific parameters/architecture configuration. Tuner sends the next hyper-parameter or architecture configuration to Trial. + + +.. toctree:: + :maxdepth: 1 + + Overview + TPE + Random Search + Anneal + Naive Evolution + SMAC + Metis Tuner + Batch Tuner + Grid Search + GP Tuner + Network Morphism + Hyperband + BOHB + PBT Tuner + DNGO Tuner diff --git a/docs/en_US/conf.py b/docs/en_US/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..73538ab559a413b6d3a0a1c3149b2316693d34e6 --- /dev/null +++ b/docs/en_US/conf.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys +sys.path.insert(0, os.path.abspath('../..')) + + +# -- Project information --------------------------------------------------- + +project = 'NNI' +copyright = '2021, Microsoft' +author = 'Microsoft' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = 'v2.6.1' + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.mathjax', + 'sphinxarg.ext', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'sphinx.ext.intersphinx', + 'nbsphinx', + 'sphinx.ext.extlinks', + 'IPython.sphinxext.ipython_console_highlighting', +] + +# Add mock modules +autodoc_mock_imports = ['apex', 'nni_node', 'tensorrt', 'pycuda', 'nn_meter'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = ['.rst'] + +# The master toctree document. +master_doc = 'contents' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Release_v1.0.md', '**.ipynb_checkpoints'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +html_additional_pages = { + 'index': 'index.html', + 'nnSpider': 'nnSpider.html', + 'nnSpider/nobug': 'nnSpider/nobug.html', + 'nnSpider/holiday': 'nnSpider/holiday.html', + 'nnSpider/errorEmotion': 'nnSpider/errorEmotion.html', + 'nnSpider/working': 'nnSpider/working.html', + 'nnSpider/sign': 'nnSpider/sign.html', + 'nnSpider/crying': 'nnSpider/crying.html', + 'nnSpider/cut': 'nnSpider/cut.html', + 'nnSpider/weaving': 'nnSpider/weaving.html', + 'nnSpider/comfort': 'nnSpider/comfort.html', + 'nnSpider/sweat': 'nnSpider/sweat.html' + +} + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'logo_only': True, +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['../static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + +html_logo = '../img/nni_logo_dark.png' +html_title = 'An open source AutoML toolkit for neural architecture search, model compression and hyper-parameter tuning (%s %s)' % \ + (project, release) + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'NeuralNetworkIntelligencedoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'NeuralNetworkIntelligence.tex', 'Neural Network Intelligence Documentation', + 'Microsoft', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'neuralnetworkintelligence', 'Neural Network Intelligence Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'NeuralNetworkIntelligence', 'Neural Network Intelligence Documentation', + author, 'NeuralNetworkIntelligence', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# external links (for github code) +# Reference the code via :githublink:`path/to/your/example/code.py` +git_commit_id = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip() + +extlinks = { + 'githublink': ('https://github.com/microsoft/nni/blob/' + git_commit_id + '/%s', 'Github link: ') +} + +# -- Extension configuration ------------------------------------------------- +def setup(app): + app.add_css_file('css/custom.css') diff --git a/docs/en_US/contents.rst b/docs/en_US/contents.rst new file mode 100644 index 0000000000000000000000000000000000000000..f9e64d9cdbf583e4a8daebef04ffe89103cb29f0 --- /dev/null +++ b/docs/en_US/contents.rst @@ -0,0 +1,23 @@ +########################### +Neural Network Intelligence +########################### + + +.. toctree:: + :caption: Table of Contents + :maxdepth: 2 + :titlesonly: + + Overview + Installation + QuickStart + Auto (Hyper-parameter) Tuning + Neural Architecture Search + Model Compression + Feature Engineering + References + Use Cases and Solutions + Research and Publications + FAQ + How to Contribute + Change Log diff --git a/docs/en_US/contribution.rst b/docs/en_US/contribution.rst new file mode 100644 index 0000000000000000000000000000000000000000..6131b37a8626c2565996d22ce33c5d434eb63691 --- /dev/null +++ b/docs/en_US/contribution.rst @@ -0,0 +1,7 @@ +############################### +Contribute to NNI +############################### + +.. toctree:: + Development Setup<./Tutorial/SetupNniDeveloperEnvironment> + Contribution Guide<./Tutorial/Contributing> \ No newline at end of file diff --git a/docs/en_US/examples.rst b/docs/en_US/examples.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0e820c0179ea17cd91907305784b4981929156f --- /dev/null +++ b/docs/en_US/examples.rst @@ -0,0 +1,12 @@ +###################### +Examples +###################### + +.. toctree:: + :maxdepth: 2 + + MNIST<./TrialExample/MnistExamples> + Cifar10<./TrialExample/Cifar10Examples> + Scikit-learn<./TrialExample/SklearnExamples> + GBDT<./TrialExample/GbdtExample> + Pix2pix<./TrialExample/Pix2pixExample> diff --git a/docs/en_US/feature_engineering.rst b/docs/en_US/feature_engineering.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2b2afda20486136f7f3b6634e7ff3a303bab6d4 --- /dev/null +++ b/docs/en_US/feature_engineering.rst @@ -0,0 +1,16 @@ +################### +Feature Engineering +################### + +We are glad to introduce Feature Engineering toolkit on top of NNI, +it's still in the experiment phase which might evolve based on usage feedback. +We'd like to invite you to use, feedback and even contribute. + +For details, please refer to the following tutorials: + +.. toctree:: + :maxdepth: 2 + + Overview + GradientFeatureSelector + GBDTSelector diff --git a/docs/en_US/hpo_advanced.rst b/docs/en_US/hpo_advanced.rst new file mode 100644 index 0000000000000000000000000000000000000000..f2c4529ac5838c7dd9a2e34c00f56286e8f6d406 --- /dev/null +++ b/docs/en_US/hpo_advanced.rst @@ -0,0 +1,11 @@ +Advanced Features +================= + +.. toctree:: + :maxdepth: 2 + + Write a New Tuner + Write a New Assessor + Write a New Advisor + Write a New Training Service + Install Customized Algorithms as Builtin Tuners/Assessors/Advisors diff --git a/docs/en_US/hpo_benchmark.rst b/docs/en_US/hpo_benchmark.rst new file mode 100644 index 0000000000000000000000000000000000000000..413f6d457338501ceaf70192a4fb546d708bdc38 --- /dev/null +++ b/docs/en_US/hpo_benchmark.rst @@ -0,0 +1,182 @@ +HPO Benchmarks +============== + +.. toctree:: + :hidden: + + HPO Benchmark Example Statistics + +We provide a benchmarking tool to compare the performances of tuners provided by NNI (and users' custom tuners) on different +types of tasks. This tool uses the `automlbenchmark repository `_ to run different *benchmarks* on the NNI *tuners*. +The tool is located in ``examples/trials/benchmarking/automlbenchmark``. This document provides a brief introduction to the tool, its usage, and currently available benchmarks. + +Overview and Terminologies +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ideally, an **HPO Benchmark** provides a tuner with a search space, calls the tuner repeatedly, and evaluates how the tuner probes +the search space and approaches to good solutions. In addition, inside the benchmark, an evaluator should be associated to +each search space for evaluating the score of points in this search space to give feedbacks to the tuner. For instance, +the search space could be the space of hyperparameters for a neural network. Then the evaluator should contain train data, +test data, and a criterion. To evaluate a point in the search space, the evaluator will train the network on the train data +and report the score of the model on the test data as the score for the point. + +However, a **benchmark** provided by the automlbenchmark repository only provides part of the functionality of the evaluator. +More concretely, it assumes that it is evaluating a **framework**. Different from a tuner, given train data, a **framework** +can directly solve a **task** and predict on the test set. The **benchmark** from the automlbenchmark repository directly provides +train and test datasets to a **framework**, evaluates the prediction on the test set, and reports this score as the final score. +Therefore, to implement **HPO Benchmark** using automlbenchmark, we pair up a tuner with a search space to form a **framework**, +and handle the repeated trial-evaluate-feedback loop in the **framework** abstraction. In other words, each **HPO Benchmark** +contains two main components: a **benchmark** from the automlbenchmark library, and an **architecture** which defines the search +space and the evaluator. To further clarify, we provide the definition for the terminologies used in this document. + +* **tuner**\ : a `tuner or advisor provided by NNI `_, or a custom tuner provided by the user. +* **task**\ : an abstraction used by automlbenchmark. A task can be thought of as a tuple (dataset, metric). It provides train and test datasets to the frameworks. Then, based on the returns predictions on the test set, the task evaluates the metric (e.g., mse for regression, f1 for classification) and reports the score. +* **benchmark**\ : an abstraction used by automlbenchmark. A benchmark is a set of tasks, along with other external constraints such as time limits. +* **framework**\ : an abstraction used by automlbenchmark. Given a task, a framework solves the proposed regression or classification problem using train data and produces predictions on the test set. In our implementation, each framework is an architecture, which defines a search space. To evaluate a task given by the benchmark on a specific tuner, we let the tuner continuously tune the hyperparameters (by giving it cross-validation score on the train data as feedback) until the time or trial limit is reached. Then, the architecture is retrained on the entire train set using the best set of hyperparameters. +* **architecture**\ : an architecture is a specific method for solving the tasks, along with a set of hyperparameters to optimize (i.e., the search space). See ``./nni/extensions/NNI/architectures`` for examples. + +Supported HPO Benchmarks +^^^^^^^^^^^^^^^^^^^^^^^^ + +From the previous discussion, we can see that to define an **HPO Benchmark**, we need to specify a **benchmark** and an **architecture**. + +Currently, the only architectures we support are random forest and MLP. We use the +`scikit-learn implementation `_. Typically, there are a number of +hyperparameters that may directly affect the performances of random forest and MLP models. We design the search +spaces to be the following. + +Search Space for Random Forest: + +.. code-block:: json + + { + "n_estimators": {"_type":"randint", "_value": [4, 2048]}, + "max_depth": {"_type":"choice", "_value": [4, 8, 16, 32, 64, 128, 256, 0]}, + "min_samples_leaf": {"_type":"randint", "_value": [1, 8]}, + "min_samples_split": {"_type":"randint", "_value": [2, 16]}, + "max_leaf_nodes": {"_type":"randint", "_value": [0, 4096]} + } + +Search Space for MLP: + +.. code-block:: json + + { + "hidden_layer_sizes": {"_type":"choice", "_value": [[16], [64], [128], [256], [16, 16], [64, 64], [128, 128], [256, 256], [16, 16, 16], [64, 64, 64], [128, 128, 128], [256, 256, 256], [256, 128, 64, 16], [128, 64, 16], [64, 16], [16, 64, 128, 256], [16, 64, 128], [16, 64]]}, + "learning_rate_init": {"_type":"choice", "_value": [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001]}, + "alpha": {"_type":"choice", "_value": [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]}, + "momentum": {"_type":"uniform","_value":[0, 1]}, + "beta_1": {"_type":"uniform","_value":[0, 1]}, + "tol": {"_type":"choice", "_value": [0.001, 0.0005, 0.0001, 0.00005, 0.00001]}, + "max_iter": {"_type":"randint", "_value": [2, 256]} + } + +In addition, we write the search space in different ways (e.g., using "choice" or "randint" or "loguniform"). +The architecture implementation and search space definition can be found in ``./nni/extensions/NNI/architectures/``. +You may replace the search space definition in this file to experiment different search spaces. + +For the automlbenchmarks, in addition to the built-in benchmarks provided by automl +(defined in ``/examples/trials/benchmarking/automlbenchmark/automlbenchmark/resources/benchmarks/``), we design several +additional benchmarks, defined in ``/examples/trials/benchmarking/automlbenchmark/nni/benchmarks``. +One example of larger benchmarks is "nnismall", which consists of 8 regression tasks, 8 binary classification tasks, and +8 multi-class classification tasks. We also provide three separate 8-task benchmarks "nnismall-regression", "nnismall-binary", and "nnismall-multiclass" +corresponding to the three types of tasks in nnismall. These tasks are suitable to solve with random forest and MLP. + +The following table summarizes the benchmarks we provide. For ``nnismall``, please check ``/examples/trials/benchmarking/automlbenchmark/automlbenchmark/resources/benchmarks/`` +for a more detailed description for each task. Also, since all tasks are from the OpenML platform, you can find the descriptions +of all datasets at `this webpage `_. + +.. list-table:: + :header-rows: 1 + :widths: 1 2 2 2 + + * - Benchmark name + - Description + - Task List + - Location + * - nnivalid + - A three-task benchmark to validate benchmark installation. + - ``kc2, iris, cholesterol`` + - ``/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/`` + * - nnismall-regression + - An eight-task benchmark consisting of **regression** tasks only. + - ``cholesterol, liver-disorders, kin8nm, cpu_small, titanic_2, boston, stock, space_ga`` + - ``/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/`` + * - nnismall-binary + - An eight-task benchmark consisting of **binary classification** tasks only. + - ``Australian, blood-transfusion, christine, credit-g, kc1, kr-vs-kp, phoneme, sylvine`` + - ``/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/`` + * - nnismall-multiclass + - An eight-task benchmark consisting of **multi-class classification** tasks only. + - ``car, cnae-9, dilbert, fabert, jasmine, mfeat-factors, segment, vehicle`` + - ``/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/`` + * - nnismall + - A 24-task benchmark that is the superset of nnismall-regression, nnismall-binary, and nnismall-multiclass. + - ``cholesterol, liver-disorders, kin8nm, cpu_small, titanic_2, boston, stock, space_ga, Australian, blood-transfusion, christine, credit-g, kc1, kr-vs-kp, phoneme, sylvine, car, cnae-9, dilbert, fabert, jasmine, mfeat-factors, segment, vehicle`` + - ``/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/`` + +Setup +^^^^^ + +Due to some incompatibilities between automlbenchmark and python 3.8, python 3.7 is recommended for running experiments contained in this folder. First, run the following shell script to clone the automlbenchmark repository. Note: it is recommended to perform the following steps in a separate virtual environment, as the setup code may install several packages. + +.. code-block:: bash + + ./setup.sh + +Run predefined benchmarks on existing tuners +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + ./runbenchmark_nni.sh [tuner-names] + +This script runs the benchmark 'nnivalid', which consists of a regression task, a binary classification task, and a +multi-class classification task. After the script finishes, you can find a summary of the results in the folder results_[time]/reports/. +To run on other predefined benchmarks, change the ``benchmark`` variable in ``runbenchmark_nni.sh``. To change to another +search space (by using another architecture), chang the `arch_type` parameter in ``./nni/frameworks.yaml``. Note that currently, +we only support ``random_forest`` or ``mlp`` as the `arch_type`. To experiment on other search spaces with the same +architecture, please change the search space defined in ``./nni/extensions/NNI/architectures/run_[architecture].py``. + +The ``./nni/frameworks.yaml`` is the actual configuration file for the HPO Benchmark. The ``limit_type`` parameter specifies +the limits for running the benchmark on one tuner. If ``limit_type`` is set to `ntrials`, then the tuner is called for +`trial_limit` times and then stopped. If ``limit_type`` is set to `time`, then the tuner is continuously called until +timeout for the benchmark is reached. The timeout for the benchmarks can be changed in the each benchmark file located +in ``./nni/benchmarks``. + +By default, the script runs the benchmark on all embedded tuners in NNI. If provided a list of tuners in [tuner-names], +it only runs the tuners in the list. Currently, the following tuner names are supported: "TPE", "Random", "Anneal", +"Evolution", "SMAC", "GPTuner", "MetisTuner", "DNGOTuner", "Hyperband", "BOHB". It is also possible to run the benchmark +on custom tuners. See the next sections for details. + +By default, the script runs the specified tuners against the specified benchmark one by one. To run the experiment for +all tuners simultaneously in the background, set the "serialize" flag to false in ``runbenchmark_nni.sh``. + +Note: the SMAC tuner, DNGO tuner, and the BOHB advisor has to be manually installed before running benchmarks on them. +Please refer to `this page `_ for more details +on installation. + +Run customized benchmarks on existing tuners +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can design your own benchmarks and evaluate the performance of NNI tuners on them. To run customized benchmarks, +add a benchmark_name.yaml file in the folder ``./nni/benchmarks``, and change the ``benchmark`` variable in ``runbenchmark_nni.sh``. +See ``./automlbenchmark/resources/benchmarks/`` for some examples of defining a custom benchmark. + +Run benchmarks on custom tuners +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You may also use the benchmark to compare a custom tuner written by yourself with the NNI built-in tuners. To use custom +tuners, first make sure that the tuner inherits from ``nni.tuner.Tuner`` and correctly implements the required APIs. For +more information on implementing a custom tuner, please refer to `here `_. +Next, perform the following steps: + +#. Install the custom tuner via the command ``nnictl algo register``. Check `this document `_ for details. +#. In ``./nni/frameworks.yaml``\ , add a new framework extending the base framework NNI. Make sure that the parameter ``tuner_type`` corresponds to the "builtinName" of tuner installed in step 1. +#. Run the following command + +.. code-block:: bash + + ./runbenchmark_nni.sh new-tuner-builtinName + +The benchmark will automatically find and match the tuner newly added to your NNI installation. diff --git a/docs/en_US/hpo_benchmark_stats.rst b/docs/en_US/hpo_benchmark_stats.rst new file mode 100644 index 0000000000000000000000000000000000000000..1c4e01cf4094c133ebd0914a55fbbd93fcc6b824 --- /dev/null +++ b/docs/en_US/hpo_benchmark_stats.rst @@ -0,0 +1,205 @@ +HPO Benchmark Example Statistics +================================ + +A Benchmark Example +^^^^^^^^^^^^^^^^^^^ + +As an example, we ran the "nnismall" benchmark with the random forest search space on the following 8 tuners: "TPE", +"Random", "Anneal", "Evolution", "SMAC", "GPTuner", "MetisTuner", "DNGOTuner". For convenience of reference, we also list +the search space we experimented on here. Note that the way in which the search space is written may significantly affect +hyperparameter optimization performance, and we plan to conduct further experiments on how well NNI built-in tuners adapt +to different search space formulations using this benchmarking tool. + +.. code-block:: json + + { + "n_estimators": {"_type":"randint", "_value": [8, 512]}, + "max_depth": {"_type":"choice", "_value": [4, 8, 16, 32, 64, 128, 256, 0]}, + "min_samples_leaf": {"_type":"randint", "_value": [1, 8]}, + "min_samples_split": {"_type":"randint", "_value": [2, 16]}, + "max_leaf_nodes": {"_type":"randint", "_value": [0, 4096]} + } + +As some of the tasks contains a considerable amount of training data, it took about 2 days to run the whole benchmark on +one tuner. For a more detailed description of the tasks, please check +``/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall_description.txt``. For binary and multi-class +classification tasks, the metric "auc" and "logloss" were used for evaluation, while for regression, "r2" and "rmse" were used. + +After the script finishes, the final scores of each tuner are summarized in the file ``results[time]/reports/performances.txt``. +Since the file is large, we only show the following screenshot and summarize other important statistics instead. + +.. image:: ../img/hpo_benchmark/performances.png + :target: ../img/hpo_benchmark/performances.png + :alt: + +When the results are parsed, the tuners are also ranked based on their final performance. The following three tables show +the average ranking of the tuners for each metric (logloss, rmse, auc). + +Also, for every tuner, their performance for each type of metric is summarized (another view of the same data). +We present this statistics in the fourth table. Note that this information can be found at ``results[time]/reports/rankings.txt``. + +Average rankings for metric rmse (for regression tasks). We found that Anneal performs the best among all NNI built-in tuners. + +.. list-table:: + :header-rows: 1 + + * - Tuner Name + - Average Ranking + * - Anneal + - 3.75 + * - Random + - 4.00 + * - Evolution + - 4.44 + * - DNGOTuner + - 4.44 + * - SMAC + - 4.56 + * - TPE + - 4.94 + * - GPTuner + - 4.94 + * - MetisTuner + - 4.94 + +Average rankings for metric auc (for classification tasks). We found that SMAC performs the best among all NNI built-in tuners. + +.. list-table:: + :header-rows: 1 + + * - Tuner Name + - Average Ranking + * - SMAC + - 3.67 + * - GPTuner + - 4.00 + * - Evolution + - 4.22 + * - Anneal + - 4.39 + * - MetisTuner + - 4.39 + * - TPE + - 4.67 + * - Random + - 5.33 + * - DNGOTuner + - 5.33 + +Average rankings for metric logloss (for classification tasks). We found that Random performs the best among all NNI built-in tuners. + +.. list-table:: + :header-rows: 1 + + * - Tuner Name + - Average Ranking + * - Random + - 3.36 + * - DNGOTuner + - 3.50 + * - SMAC + - 3.93 + * - GPTuner + - 4.64 + * - TPE + - 4.71 + * - Anneal + - 4.93 + * - Evolution + - 5.00 + * - MetisTuner + - 5.93 + +To view the same data in another way, for each tuner, we present the average rankings on different types of metrics. From the table, we can find that, for example, the DNGOTuner performs better for the tasks whose metric is "logloss" than for the tasks with metric "auc". We hope this information can to some extent guide the choice of tuners given some knowledge of task types. + +.. list-table:: + :header-rows: 1 + + * - Tuner Name + - rmse + - auc + - logloss + * - TPE + - 4.94 + - 4.67 + - 4.71 + * - Random + - 4.00 + - 5.33 + - 3.36 + * - Anneal + - 3.75 + - 4.39 + - 4.93 + * - Evolution + - 4.44 + - 4.22 + - 5.00 + * - GPTuner + - 4.94 + - 4.00 + - 4.64 + * - MetisTuner + - 4.94 + - 4.39 + - 5.93 + * - SMAC + - 4.56 + - 3.67 + - 3.93 + * - DNGOTuner + - 4.44 + - 5.33 + - 3.50 + +Besides these reports, our script also generates two graphs for each fold of each task: one graph presents the best score received by each tuner until trial x, and another graph shows the score that each tuner receives in trial x. These two graphs can give some information regarding how the tuners are "converging" to their final solution. We found that for "nnismall", tuners on the random forest model with search space defined in ``/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/architectures/run_random_forest.py`` generally converge to the final solution after 40 to 60 trials. As there are too much graphs to incldue in a single report (96 graphs in total), we only present 10 graphs here. + +.. image:: ../img/hpo_benchmark/car_fold1_1.jpg + :target: ../img/hpo_benchmark/car_fold1_1.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/car_fold1_2.jpg + :target: ../img/hpo_benchmark/car_fold1_2.jpg + :alt: + +The previous two graphs are generated for fold 1 of the task "car". In the first graph, we observe that most tuners find a relatively good solution within 40 trials. In this experiment, among all tuners, the DNGOTuner converges fastest to the best solution (within 10 trials). Its best score improved for three times in the entire experiment. In the second graph, we observe that most tuners have their score flucturate between 0.8 and 1 throughout the experiment. However, it seems that the Anneal tuner (green line) is more unstable (having more fluctuations) while the GPTuner has a more stable pattern. This may be interpreted as the Anneal tuner explores more aggressively than the GPTuner and thus its scores for different trials vary a lot. Regardless, although this pattern can to some extent hint a tuner's position on the explore-exploit tradeoff, it is not a comprehensive evaluation of a tuner's effectiveness. + +.. image:: ../img/hpo_benchmark/christine_fold0_1.jpg + :target: ../img/hpo_benchmark/christine_fold0_1.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/christine_fold0_2.jpg + :target: ../img/hpo_benchmark/christine_fold0_2.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/cnae-9_fold0_1.jpg + :target: ../img/hpo_benchmark/cnae-9_fold0_1.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/cnae-9_fold0_2.jpg + :target: ../img/hpo_benchmark/cnae-9_fold0_2.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/credit-g_fold1_1.jpg + :target: ../img/hpo_benchmark/credit-g_fold1_1.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/credit-g_fold1_2.jpg + :target: ../img/hpo_benchmark/credit-g_fold1_2.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/titanic_2_fold1_1.jpg + :target: ../img/hpo_benchmark/titanic_2_fold1_1.jpg + :alt: + + +.. image:: ../img/hpo_benchmark/titanic_2_fold1_2.jpg + :target: ../img/hpo_benchmark/titanic_2_fold1_2.jpg + :alt: diff --git a/docs/en_US/hyperparameter_tune.rst b/docs/en_US/hyperparameter_tune.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0f61367799c9a48b6394f8e7a60cd2b381830b9 --- /dev/null +++ b/docs/en_US/hyperparameter_tune.rst @@ -0,0 +1,28 @@ +############################# +Auto (Hyper-parameter) Tuning +############################# + +Auto tuning is one of the key features provided by NNI; a main application scenario being +hyper-parameter tuning. Tuning specifically applies to trial code. We provide a lot of popular +auto tuning algorithms (called Tuner), and some early stop algorithms (called Assessor). +NNI supports running trials on various training platforms, for example, on a local machine, +on several servers in a distributed manner, or on platforms such as OpenPAI, Kubernetes, etc. + +Other key features of NNI, such as model compression, feature engineering, can also be further +enhanced by auto tuning, which we'll described when introducing those features. + +NNI has high extensibility, advanced users can customize their own Tuner, Assessor, and Training Service +according to their needs. + +.. toctree:: + :maxdepth: 2 + + Write Trial + Tuners + Assessors + Training Platform + Examples + WebUI + How to Debug + Advanced + HPO Benchmarks diff --git a/docs/en_US/installation.rst b/docs/en_US/installation.rst new file mode 100644 index 0000000000000000000000000000000000000000..5688b3b4c832cb889d91e32b11fe1af9e7e9f2d0 --- /dev/null +++ b/docs/en_US/installation.rst @@ -0,0 +1,12 @@ +############ +Installation +############ + +Currently we support installation on Linux, Mac and Windows. We also allow you to use docker. + +.. toctree:: + :maxdepth: 2 + + Linux & Mac + Windows + Use Docker \ No newline at end of file diff --git a/docs/en_US/model_compression.rst b/docs/en_US/model_compression.rst new file mode 100644 index 0000000000000000000000000000000000000000..04be65b3a4660c618150b468ab3e206197a52090 --- /dev/null +++ b/docs/en_US/model_compression.rst @@ -0,0 +1,34 @@ +################# +Model Compression +################# + +Deep neural networks (DNNs) have achieved great success in many tasks. However, typical neural networks are both +computationally expensive and energy intensive, can be difficult to be deployed on devices with low computation +resources or with strict latency requirements. Therefore, a natural thought is to perform model compression to +reduce model size and accelerate model training/inference without losing performance significantly. Model compression +techniques can be divided into two categories: pruning and quantization. The pruning methods explore the redundancy +in the model weights and try to remove/prune the redundant and uncritical weights. Quantization refers to compressing +models by reducing the number of bits required to represent weights or activations. + +NNI provides an easy-to-use toolkit to help user design and use model pruning and quantization algorithms. +It supports Tensorflow and PyTorch with unified interface. +For users to compress their models, they only need to add several lines in their code. +There are some popular model compression algorithms built-in in NNI. +Users could further use NNI's auto tuning power to find the best compressed model, +which is detailed in Auto Model Compression. +On the other hand, users could easily customize their new compression algorithms using NNI's interface. + +For details, please refer to the following tutorials: + +.. toctree:: + :maxdepth: 2 + + Overview + Quick Start + Tutorial + Pruning + Pruning V2 + Quantization + Utilities + Advanced Usage + API Reference diff --git a/docs/en_US/nas.rst b/docs/en_US/nas.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c9860388a982341d89a6d2474af3ba5926dde6f --- /dev/null +++ b/docs/en_US/nas.rst @@ -0,0 +1,34 @@ +############################################# +Retiarii for Neural Architecture Search (NAS) +############################################# + +Automatic neural architecture search is taking an increasingly important role on finding better models. +Recent research works have proved the feasibility of automatic NAS, and also found some models that could beat manually tuned models. +Some of representative works are NASNet, ENAS, DARTS, Network Morphism, and Evolution. Moreover, new innovations keep emerging. + +However, it takes great efforts to implement NAS algorithms, and it is hard to reuse code base of existing algorithms in a new one. +To facilitate NAS innovations (e.g., design and implement new NAS models, compare different NAS models side-by-side), +an easy-to-use and flexible programming interface is crucial. + +Thus, we design `Retiarii `__. It is a deep learning framework that supports the exploratory training on a neural network model space, rather than on a single neural network model. +Exploratory training with Retiarii allows user to express various search spaces for *Neural Architecture Search* and *Hyper-Parameter Tuning* with high flexibility. + +Some frequently used terminologies in this document: + +* *Model search space*: it means a set of models from which the best model is explored/searched. Sometimes we use *search space* or *model space* in short. +* *Exploration strategy*: the algorithm that is used to explore a model search space. +* *Model evaluator*: it is used to train a model and evaluate the model's performance. + +Follow the instructions below to start your journey with Retiarii. + +.. toctree:: + :maxdepth: 2 + + Overview + Quick Start + Construct Model Space + Multi-trial NAS + One-shot NAS + Hardware-aware NAS + NAS Benchmarks + NAS API References diff --git a/docs/en_US/reference.rst b/docs/en_US/reference.rst new file mode 100644 index 0000000000000000000000000000000000000000..70d410ed2cb8f4a279b47dbeb83f02c58895630a --- /dev/null +++ b/docs/en_US/reference.rst @@ -0,0 +1,16 @@ +References +================== + +.. toctree:: + :maxdepth: 2 + + nnictl Commands + Experiment Configuration + Experiment Configuration (legacy) + Search Space + NNI Annotation + SDK API References + Supported Framework Library + Launch from Python + Shared Storage + Tensorboard diff --git a/docs/en_US/reference/experiment_config.rst b/docs/en_US/reference/experiment_config.rst new file mode 100644 index 0000000000000000000000000000000000000000..052e4c4d70e2664c518815d15f4dc149efaa2f5b --- /dev/null +++ b/docs/en_US/reference/experiment_config.rst @@ -0,0 +1,694 @@ +=========================== +Experiment Config Reference +=========================== + +A config file is needed when creating an experiment. This document describes the rules to write a config file and provides some examples. + +.. Note:: + + 1. This document lists field names with ``camelCase``. If users use these fields in the pythonic way with NNI Python APIs (e.g., ``nni.experiment``), the field names should be converted to ``snake_case``. + + 2. In this document, the type of fields are formatted as `Python type hint `_. Therefore JSON objects are called `dict` and arrays are called `list`. + + .. _path: + + 3. Some fields take a path to a file or directory. Unless otherwise noted, both absolute path and relative path are supported, and ``~`` will be expanded to the home directory. + + - When written in the YAML file, relative paths are relative to the directory containing that file. + - When assigned in Python code, relative paths are relative to the current working directory. + - All relative paths are converted to absolute when loading YAML file into Python class, and when saving Python class to YAML file. + + 4. Setting a field to ``None`` or ``null`` is equivalent to not setting the field. + +.. contents:: Contents + :local: + :depth: 3 + + +Examples +======== + +Local Mode +^^^^^^^^^^ + +.. code-block:: yaml + + experimentName: MNIST + searchSpaceFile: search_space.json + trialCommand: python mnist.py + trialCodeDirectory: . + trialGpuNumber: 1 + trialConcurrency: 2 + maxExperimentDuration: 24h + maxTrialNumber: 100 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + platform: local + useActiveGpu: True + +Local Mode (Inline Search Space) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + searchSpace: + batch_size: + _type: choice + _value: [16, 32, 64] + learning_rate: + _type: loguniform + _value: [0.0001, 0.1] + trialCommand: python mnist.py + trialGpuNumber: 1 + trialConcurrency: 2 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + platform: local + useActiveGpu: True + +Remote Mode +^^^^^^^^^^^ + +.. code-block:: yaml + + experimentName: MNIST + searchSpaceFile: search_space.json + trialCommand: python mnist.py + trialCodeDirectory: . + trialGpuNumber: 1 + trialConcurrency: 2 + maxExperimentDuration: 24h + maxTrialNumber: 100 + tuner: + name: TPE + classArgs: + optimize_mode: maximize + trainingService: + platform: remote + machineList: + - host: 11.22.33.44 + user: alice + password: xxxxx + - host: my.domain.com + user: bob + sshKeyFile: ~/.ssh/id_rsa + +Reference +========= + +ExperimentConfig +^^^^^^^^^^^^^^^^ + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - experimentName + - ``str``, optional + - Mnemonic name of the experiment, which will be shown in WebUI and nnictl. + + * - searchSpaceFile + - ``str``, optional + - Path_ to the JSON file containing the search space. + Search space format is determined by tuner. The common format for built-in tuners is documented `here <../Tutorial/SearchSpaceSpec.rst>`__. + Mutually exclusive to ``searchSpace``. + + * - searchSpace + - ``JSON``, optional + - Search space object. + The format is determined by tuner. Common format for built-in tuners is documented `here <../Tutorial/SearchSpaceSpec.rst>`__. + Note that ``None`` means "no such field" so empty search space should be written as ``{}``. + Mutually exclusive to ``searchSpaceFile``. + + * - trialCommand + - ``str`` + - Command to launch trial. + The command will be executed in bash on Linux and macOS, and in PowerShell on Windows. + Note that using ``python3`` on Linux and macOS, and using ``python`` on Windows. + + * - trialCodeDirectory + - ``str``, optional + - Default: ``"."``. `Path`_ to the directory containing trial source files. + All files in this directory will be sent to the training machine, unless in the ``.nniignore`` file. + (See :ref:`nniignore ` for details.) + + * - trialConcurrency + - ``int`` + - Specify how many trials should be run concurrently. + The real concurrency also depends on hardware resources and may be less than this value. + + * - trialGpuNumber + - ``int`` or ``None``, optional + - Default: None. This field might have slightly different meanings for various training services, + especially when set to ``0`` or ``None``. + See `training service's document <../training_services.rst>`__ for details. + + In local mode, setting the field to ``0`` will prevent trials from accessing GPU (by empty ``HIP_VISIBLE_DEVICES``). + And when set to ``None``, trials will be created and scheduled as if they did not use GPU, + but they can still use all GPU resources if they want. + + * - maxExperimentDuration + - ``str``, optional + - Limit the duration of this experiment if specified. The duration is unlimited if not set. + Format: ``number + s|m|h|d``. + Examples: ``"10m"``, ``"0.5h"``. + When time runs out, the experiment will stop creating trials but continue to serve WebUI. + + * - maxTrialNumber + - ``int``, optional + - Limit the number of trials to create if specified. The trial number is unlimited if not set. + When the budget runs out, the experiment will stop creating trials but continue to serve WebUI. + + * - maxTrialDuration + - ``str``, optional + - Limit the duration of trial job if specified. The duration is unlimited if not set. + Format: ``number + s|m|h|d``. + Examples: ``"10m"``, ``"0.5h"``. + When time runs out, the current trial job will stop. + + * - nniManagerIp + - ``str``, optional + - Default: default connection chosen by system. IP of the current machine, used by training machines to access NNI manager. Not used in local mode. + Except for the local mode, it is highly recommended to set this field manually. + + * - useAnnotation + - ``bool``, optional + - Default: ``False``. Enable `annotation <../Tutorial/AnnotationSpec.rst>`__. + When using annotation, ``searchSpace`` and ``searchSpaceFile`` should not be specified manually. + + * - debug + - ``bool``, optional + - Default: ``False``. Enable debug mode. + When enabled, logging will be more verbose and some internal validation will be loosened. + + * - logLevel + - ``str``, optional + - Default: ``info`` or ``debug``, depending on ``debug`` option. Set log level of the whole system. + values: ``"trace"``, ``"debug"``, ``"info"``, ``"warning"``, ``"error"``, ``"fatal"`` + When debug mode is enabled, Loglevel is set to "debug", otherwise, Loglevel is set to "info". + Most modules of NNI will be affected by this value, including NNI manager, tuner, training service, etc. + The exception is trial, whose logging level is directly managed by trial code. + For Python modules, "trace" acts as logging level 0 and "fatal" acts as ``logging.CRITICAL``. + + * - experimentWorkingDirectory + - ``str``, optional + - Default: ``~/nni-experiments``. + Specify the :ref:`directory ` to place log, checkpoint, metadata, and other run-time stuff. + NNI will create a subdirectory named by experiment ID, so it is safe to use the same directory for multiple experiments. + + * - tunerGpuIndices + - ``list[int]`` or ``str`` or ``int``, optional + - Limit the GPUs visible to tuner, assessor, and advisor. + This will be the ``HIP_VISIBLE_DEVICES`` environment variable of tuner process. + Because tuner, assessor, and advisor run in the same process, this option will affect them all. + + * - tuner + - ``AlgorithmConfig``, optional + - Specify the tuner. + The built-in tuners can be found `here <../builtin_tuner.rst>`__ and you can follow `this tutorial <../Tuner/CustomizeTuner.rst>`__ to customize a new tuner. + + * - assessor + - ``AlgorithmConfig``, optional + - Specify the assessor. + The built-in assessors can be found `here <../builtin_assessor.rst>`__ and you can follow `this tutorial <../Assessor/CustomizeAssessor.rst>`__ to customize a new assessor. + + * - advisor + - ``AlgorithmConfig``, optional + - Specify the advisor. + NNI provides two built-in advisors: `BOHB <../Tuner/BohbAdvisor.rst>`__ and `Hyperband <../Tuner/HyperbandAdvisor.rst>`__, and you can follow `this tutorial <../Tuner/CustomizeAdvisor.rst>`__ to customize a new advisor. + + * - trainingService + - ``TrainingServiceConfig`` + - Specify the `training service <../TrainingService/Overview.rst>`__. + + * - sharedStorage + - ``SharedStorageConfig``, optional + - Configure the shared storage, detailed usage can be found `here <../Tutorial/HowToUseSharedStorage.rst>`__. + +AlgorithmConfig +^^^^^^^^^^^^^^^ + +``AlgorithmConfig`` describes a tuner / assessor / advisor algorithm. + +For customized algorithms, there are two ways to describe them: + + 1. `Register the algorithm <../Tutorial/InstallCustomizedAlgos.rst>`__ to use it like built-in. (preferred) + + 2. Specify code directory and class name directly. + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - name + - ``str`` or ``None``, optional + - Default: None. Name of the built-in or registered algorithm. + ``str`` for the built-in and registered algorithm, ``None`` for other customized algorithms. + + * - className + - ``str`` or ``None``, optional + - Default: None. Qualified class name of not registered customized algorithm. + ``None`` for the built-in and registered algorithm, ``str`` for other customized algorithms. + example: ``"my_tuner.MyTuner"`` + + * - codeDirectory + - ``str`` or ``None``, optional + - Default: None. Path_ to the directory containing the customized algorithm class. + ``None`` for the built-in and registered algorithm, ``str`` for other customized algorithms. + + * - classArgs + - ``dict[str, Any]``, optional + - Keyword arguments passed to algorithm class' constructor. + See algorithm's document for supported value. + +TrainingServiceConfig +^^^^^^^^^^^^^^^^^^^^^ + +One of the following: + +- `LocalConfig`_ +- `RemoteConfig`_ +- `OpenpaiConfig`_ +- `AmlConfig`_ +- `DlcConfig`_ +- `HybridConfig`_ + +For `Kubeflow <../TrainingService/KubeflowMode.rst>`_, `FrameworkController <../TrainingService/FrameworkControllerMode.rst>`_, and `AdaptDL <../TrainingService/AdaptDLMode.rst>`_ training platforms, it is suggested to use `v1 config schema <../Tutorial/ExperimentConfig.rst>`_ for now. + +LocalConfig +----------- + +Detailed usage can be found `here <../TrainingService/LocalMode.rst>`__. + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - platform + - ``"local"`` + - + + * - useActiveGpu + - ``bool``, optional + - Default: ``False``. Specify whether NNI should submit trials to GPUs occupied by other tasks. + Must be set when ``trialGpuNumber`` greater than zero. + Following processes can make GPU "active": + + - non-NNI HIP programs + - graphical desktop + - trials submitted by other NNI instances, if you have more than one NNI experiments running at same time + - other users' HIP programs, if you are using a shared server + + If you are using a graphical OS like Windows 10 or Ubuntu desktop, set this field to ``True``, otherwise, the GUI will prevent NNI from launching any trial. + When you create multiple NNI experiments and ``useActiveGpu`` is set to ``True``, they will submit multiple trials to the same GPU(s) simultaneously. + + * - maxTrialNumberPerGpu + - ``int``, optional + - Default: ``1``. Specify how many trials can share one GPU. + + * - gpuIndices + - ``list[int]`` or ``str`` or ``int``, optional + - Limit the GPUs visible to trial processes. + If ``trialGpuNumber`` is less than the length of this value, only a subset will be visible to each trial. + This will be used as ``HIP_VISIBLE_DEVICES`` environment variable. + +RemoteConfig +------------ + +Detailed usage can be found `here <../TrainingService/RemoteMachineMode.rst>`__. + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - platform + - ``"remote"`` + - + + * - machineList + - ``List[RemoteMachineConfig]`` + - List of training machines. + + * - reuseMode + - ``bool``, optional + - Default: ``True``. Enable `reuse mode <../TrainingService/Overview.rst#training-service-under-reuse-mode>`__. + +RemoteMachineConfig +""""""""""""""""""" + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - host + - ``str`` + - IP or hostname (domain name) of the machine. + + * - port + - ``int``, optional + - Default: ``22``. SSH service port. + + * - user + - ``str`` + - Login user name. + + * - password + - ``str``, optional + - If not specified, ``sshKeyFile`` will be used instead. + + * - sshKeyFile + - ``str``, optional + - `Path`_ to ``sshKeyFile`` (identity file). + Only used when ``password`` is not specified. + + * - sshPassphrase + - ``str``, optional + - Passphrase of SSH identity file. + + * - useActiveGpu + - ``bool``, optional + - Default: ``False``. Specify whether NNI should submit trials to GPUs occupied by other tasks. + Must be set when ``trialGpuNumber`` greater than zero. + Following processes can make GPU "active": + + - non-NNI CUDA programs + - graphical desktop + - trials submitted by other NNI instances, if you have more than one NNI experiments running at same time + - other users' CUDA programs, if you are using a shared server + + If your remote machine is a graphical OS like Ubuntu desktop, set this field to ``True``, otherwise, the GUI will prevent NNI from launching any trial. + When you create multiple NNI experiments and ``useActiveGpu`` is set to ``True``, they will submit multiple trials to the same GPU(s) simultaneously. + + * - maxTrialNumberPerGpu + - ``int``, optional + - Default: ``1``. Specify how many trials can share one GPU. + + * - gpuIndices + - ``list[int]`` or ``str`` or ``int``, optional + - Limit the GPUs visible to trial processes. + If ``trialGpuNumber`` is less than the length of this value, only a subset will be visible to each trial. + This will be used as ``HIP_VISIBLE_DEVICES`` environment variable. + + * - pythonPath + - ``str``, optional + - Specify a Python environment. + This path will be inserted at the front of PATH. Here are some examples: + + - (linux) pythonPath: ``/opt/python3.7/bin`` + - (windows) pythonPath: ``C:/Python37`` + + If you are working on Anaconda, there is some difference. On Windows, you also have to add ``../script`` and ``../Library/bin`` separated by ``;``. Examples are as below: + + - (linux anaconda) pythonPath: ``/home/yourname/anaconda3/envs/myenv/bin/`` + - (windows anaconda) pythonPath: ``C:/Users/yourname/.conda/envs/myenv``; ``C:/Users/yourname/.conda/envs/myenv/Scripts``; ``C:/Users/yourname/.conda/envs/myenv/Library/bin`` + + This is useful if preparing steps vary for different machines. + +OpenpaiConfig +------------- + +Detailed usage can be found `here <../TrainingService/PaiMode.rst>`__. + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - platform + - ``"openpai"`` + - + + * - host + - ``str`` + - Hostname of OpenPAI service. + This may include ``https://`` or ``http://`` prefix. + HTTPS will be used by default. + + * - username + - ``str`` + - OpenPAI user name. + + * - token + - ``str`` + - OpenPAI user token. + This can be found in your OpenPAI user settings page. + + * - trialCpuNumber + - ``int`` + - Specify the CPU number of each trial to be used in OpenPAI container. + + * - trialMemorySize + - ``str`` + - Specify the memory size of each trial to be used in OpenPAI container. + format: ``number + tb|gb|mb|kb``. + examples: ``"8gb"``, ``"8192mb"``. + + * - storageConfigName + - ``str`` + - Specify the storage name used in OpenPAI. + + * - dockerImage + - ``str``, optional + - Default: ``"msranni/nni:latest"``. Name and tag of docker image to run the trials. + + * - localStorageMountPoint + - ``str`` + - :ref:`Mount point ` of storage service (typically NFS) on the local machine. + + * - containerStorageMountPoint + - ``str`` + - Mount point of storage service (typically NFS) in docker container. + This must be an absolute path. + + * - reuseMode + - ``bool``, optional + - Default: ``True``. Enable `reuse mode <../TrainingService/Overview.rst#training-service-under-reuse-mode>`__. + + * - openpaiConfig + - ``JSON``, optional + - Embedded OpenPAI config file. + + * - openpaiConfigFile + - ``str``, optional + - `Path`_ to OpenPAI config file. + An example can be found `here `__. + +AmlConfig +--------- + +Detailed usage can be found `here <../TrainingService/AMLMode.rst>`__. + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - platform + - ``"aml"`` + - + + * - dockerImage + - ``str``, optional + - Default: ``"msranni/nni:latest"``. Name and tag of docker image to run the trials. + + * - subscriptionId + - ``str`` + - Azure subscription ID. + + * - resourceGroup + - ``str`` + - Azure resource group name. + + * - workspaceName + - ``str`` + - Azure workspace name. + + * - computeTarget + - ``str`` + - AML compute cluster name. + +DlcConfig +--------- + +Detailed usage can be found `here <../TrainingService/DlcMode.rst>`__. + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - platform + - ``"dlc"`` + - + + * - type + - ``str``, optional + - Default: ``"Worker"``. Job spec type. + + * - image + - ``str`` + - Name and tag of docker image to run the trials. + + * - jobType + - ``str``, optional + - Default: ``"TFJob"``. PAI-DLC training job type, ``"TFJob"`` or ``"PyTorchJob"``. + + * - podCount + - ``str`` + - Pod count to run a single training job. + + * - ecsSpec + - ``str`` + - Training server config spec string. + + * - region + - ``str`` + - The region where PAI-DLC public-cluster locates. + + * - nasDataSourceId + - ``str`` + - The NAS datasource id configurated in PAI-DLC side. + + * - accessKeyId + - ``str`` + - The accessKeyId of your cloud account. + + * - accessKeySecret + - ``str`` + - The accessKeySecret of your cloud account. + + * - localStorageMountPoint + - ``str`` + - The mount point of the NAS on PAI-DSW server, default is /home/admin/workspace/. + + * - containerStorageMountPoint + - ``str`` + - The mount point of the NAS on PAI-DLC side, default is /root/data/. + +HybridConfig +------------ + +Currently only support `LocalConfig`_, `RemoteConfig`_, `OpenpaiConfig`_ and `AmlConfig`_ . Detailed usage can be found `here <../TrainingService/HybridMode.rst>`__. + +SharedStorageConfig +^^^^^^^^^^^^^^^^^^^ + +Detailed usage can be found `here <../Tutorial/HowToUseSharedStorage.rst>`__. + +nfsConfig +--------- + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - storageType + - ``"NFS"`` + - + + * - localMountPoint + - ``str`` + - The path that the storage has been or will be mounted in the local machine. + If the path does not exist, it will be created automatically. Recommended to use an absolute path, i.e. ``/tmp/nni-shared-storage``. + + * - remoteMountPoint + - ``str`` + - The path that the storage will be mounted in the remote machine. + If the path does not exist, it will be created automatically. Recommended to use a relative path. i.e. ``./nni-shared-storage``. + + * - localMounted + - ``str`` + - Specify the object and status to mount the shared storage. + values: ``"usermount"``, ``"nnimount"``, ``"nomount"`` + ``usermount`` means the user has already mounted this storage on localMountPoint. ``nnimount`` means NNI will try to mount this storage on localMountPoint. ``nomount`` means storage will not mount in the local machine, will support partial storages in the future. + + * - nfsServer + - ``str`` + - NFS server host. + + * - exportedDirectory + - ``str`` + - Exported directory of NFS server, detailed `here `_. + +azureBlobConfig +--------------- + +.. list-table:: + :widths: 10 10 80 + :header-rows: 1 + + * - Field Name + - Type + - Description + + * - storageType + - ``"AzureBlob"`` + - + + * - localMountPoint + - ``str`` + - The path that the storage has been or will be mounted in the local machine. + If the path does not exist, it will be created automatically. Recommended to use an absolute path, i.e. ``/tmp/nni-shared-storage``. + + * - remoteMountPoint + - ``str`` + - The path that the storage will be mounted in the remote machine. + If the path does not exist, it will be created automatically. Recommended to use a relative path. i.e. ``./nni-shared-storage``. + Note that the directory must be empty when using AzureBlob. + + * - localMounted + - ``str`` + - Specify the object and status to mount the shared storage. + values: ``"usermount"``, ``"nnimount"``, ``"nomount"``. + ``usermount`` means the user has already mounted this storage on localMountPoint. ``nnimount`` means NNI will try to mount this storage on localMountPoint. ``nomount`` means storage will not mount in the local machine, will support partial storages in the future. + + * - storageAccountName + - ``str`` + - Azure storage account name. + + * - storageAccountKey + - ``str`` + - Azure storage account key. + + * - containerName + - ``str`` + - AzureBlob container name. diff --git a/docs/en_US/sdk_reference.rst b/docs/en_US/sdk_reference.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d5a64353194c2d77f8c4fe96da699b2db2b2803 --- /dev/null +++ b/docs/en_US/sdk_reference.rst @@ -0,0 +1,12 @@ +#################### +Python API Reference +#################### + + +.. toctree:: + :maxdepth: 1 + + Auto Tune + NAS + Compression + Python API \ No newline at end of file diff --git a/docs/en_US/training_services.rst b/docs/en_US/training_services.rst new file mode 100644 index 0000000000000000000000000000000000000000..ab8f79fbd7d79ed7582eab9463ae5ec4ae90438a --- /dev/null +++ b/docs/en_US/training_services.rst @@ -0,0 +1,15 @@ +Introduction to NNI Training Services +===================================== + +.. toctree:: + Overview <./TrainingService/Overview> + Local<./TrainingService/LocalMode> + Remote<./TrainingService/RemoteMachineMode> + OpenPAI<./TrainingService/PaiMode> + Kubeflow<./TrainingService/KubeflowMode> + AdaptDL<./TrainingService/AdaptDLMode> + FrameworkController<./TrainingService/FrameworkControllerMode> + DLTS<./TrainingService/DLTSMode> + AML<./TrainingService/AMLMode> + PAI-DLC<./TrainingService/DLCMode> + Hybrid<./TrainingService/HybridMode> diff --git a/docs/img/3_steps.jpg b/docs/img/3_steps.jpg new file mode 100644 index 0000000000000000000000000000000000000000..291ea3ba2a5fa4cfc3cf294b0d9f9adc246e9340 Binary files /dev/null and b/docs/img/3_steps.jpg differ diff --git a/docs/img/Assessor.png b/docs/img/Assessor.png new file mode 100644 index 0000000000000000000000000000000000000000..23c13f9d1903487774b98118c44437841a3ea835 Binary files /dev/null and b/docs/img/Assessor.png differ diff --git a/docs/img/EvoNasTuner.png b/docs/img/EvoNasTuner.png new file mode 100644 index 0000000000000000000000000000000000000000..36d5483c8bbf03ff9c1fc91c22e6dd55ac34d7e0 Binary files /dev/null and b/docs/img/EvoNasTuner.png differ diff --git a/docs/img/NAS_Bench_201.svg b/docs/img/NAS_Bench_201.svg new file mode 100644 index 0000000000000000000000000000000000000000..6b08625b0535c931bb905e6c1c51a8ccf9375c76 --- /dev/null +++ b/docs/img/NAS_Bench_201.svg @@ -0,0 +1 @@ +inputnode 1node 2node 3choose none/one/multiple input(s) then add them as outputthe output of the previous cellchoose one operation from MaxPool, AvgPool,, Conv1x1, Conv3x3, SkipConnect, Zeroize \ No newline at end of file diff --git a/docs/img/NAS_Darts_cell.svg b/docs/img/NAS_Darts_cell.svg new file mode 100644 index 0000000000000000000000000000000000000000..9dd61253cd3a37ba35d161c7f9340693e99adcc1 --- /dev/null +++ b/docs/img/NAS_Darts_cell.svg @@ -0,0 +1 @@ +input 0input 1node 0node 1node 2node 3outputchoose an operation from AvgPool, MaxPool, 3x3 SepConv, 5x5SepConv, 3x3DilConv, 5x5DilConv, SkipConnectresults of previous layers as inputaccept two operations from all inputs, then add the results as outputconcat all inputs in channelspass the result directly to the output node \ No newline at end of file diff --git a/docs/img/NAS_ENAS_macro.svg b/docs/img/NAS_ENAS_macro.svg new file mode 100644 index 0000000000000000000000000000000000000000..9897020e989e76d73d3beb891e8093dec99c08c3 --- /dev/null +++ b/docs/img/NAS_ENAS_macro.svg @@ -0,0 +1 @@ +Layer N-1'soutputchoose an operation from: MaxPool, AvgPool, SepConvBN3x3, SepConvBN5x5 and SkipConnectaccept none/one/multiple input(s)add all input as the output of this layer(suppose this layer is the Nth layer) outputs of all N-2 layersx (N-2)ENASMacroLayerinputsoftmaxGeneralModelchoose an operation from: MaxPool, AvgPool, SepConvBN3x3, SepConvBN5x5 and SkipConnectpass tensor without operationENASMacroLayer(suppose this layer is the Nth layer) outputs of all N-1 layerspass the tensor input directly to the next node \ No newline at end of file diff --git a/docs/img/NAS_ENAS_micro.svg b/docs/img/NAS_ENAS_micro.svg new file mode 100644 index 0000000000000000000000000000000000000000..afe9c79c43861eecfd5dde98625e0797e8f1b633 --- /dev/null +++ b/docs/img/NAS_ENAS_micro.svg @@ -0,0 +1 @@ +input_0input_1node 0node 2node 1node 3choose one operation from MaxPool, AvgPool, SepConvBN3x3, SepConvBN5x5, SkipConnectaccept two operations from all inputs, then add the results as outputresults of previous layers as inputoutputpass the result directly to the output(the node does not exist) Only accept input whose starting point is not served as input for any other node. It calculates inputs' average value as the output of this cell. \ No newline at end of file diff --git a/docs/img/NNIDesign.jpg b/docs/img/NNIDesign.jpg new file mode 100644 index 0000000000000000000000000000000000000000..339c5013ba63c6afd43537ced4b36e72c54c85b4 Binary files /dev/null and b/docs/img/NNIDesign.jpg differ diff --git a/docs/img/SA_latency_accuracy.png b/docs/img/SA_latency_accuracy.png new file mode 100644 index 0000000000000000000000000000000000000000..6ea4f9260716281df9665b5d461584117fe66a22 Binary files /dev/null and b/docs/img/SA_latency_accuracy.png differ diff --git a/docs/img/Tensorboard_1.png b/docs/img/Tensorboard_1.png new file mode 100644 index 0000000000000000000000000000000000000000..644074bcda88d32e26b56b488e0951d000c2ccc8 Binary files /dev/null and b/docs/img/Tensorboard_1.png differ diff --git a/docs/img/Tensorboard_2.png b/docs/img/Tensorboard_2.png new file mode 100644 index 0000000000000000000000000000000000000000..d6c2a9548db9847bf54c4b030731ba65c9e40adc Binary files /dev/null and b/docs/img/Tensorboard_2.png differ diff --git a/docs/img/Tensorboard_3.png b/docs/img/Tensorboard_3.png new file mode 100644 index 0000000000000000000000000000000000000000..7579810308c66235a2bc8c4dd8acad950a7f4681 Binary files /dev/null and b/docs/img/Tensorboard_3.png differ diff --git a/docs/img/Tensorboard_4.png b/docs/img/Tensorboard_4.png new file mode 100644 index 0000000000000000000000000000000000000000..a66e99486bd4edfd571a25c27203cd2f1796b3b2 Binary files /dev/null and b/docs/img/Tensorboard_4.png differ diff --git a/docs/img/agp_pruner.png b/docs/img/agp_pruner.png new file mode 100644 index 0000000000000000000000000000000000000000..98e991a0f90bd55967848b83f4087aa1443abf5c Binary files /dev/null and b/docs/img/agp_pruner.png differ diff --git a/docs/img/algo_NetAdapt.png b/docs/img/algo_NetAdapt.png new file mode 100644 index 0000000000000000000000000000000000000000..d098526980d128fcf628c50f79a0925b4e8a0802 Binary files /dev/null and b/docs/img/algo_NetAdapt.png differ diff --git a/docs/img/amc_pruner.jpg b/docs/img/amc_pruner.jpg new file mode 100644 index 0000000000000000000000000000000000000000..456dcbc318403bd85bc370ac007737fce1f894cb Binary files /dev/null and b/docs/img/amc_pruner.jpg differ diff --git a/docs/img/aml_cluster.png b/docs/img/aml_cluster.png new file mode 100644 index 0000000000000000000000000000000000000000..4e1575af83ffdd94bfee4beecf3f579d96e4beb3 Binary files /dev/null and b/docs/img/aml_cluster.png differ diff --git a/docs/img/aml_workspace.png b/docs/img/aml_workspace.png new file mode 100644 index 0000000000000000000000000000000000000000..7dcfd805c764042c08e2659f11dec13b957ac1d7 Binary files /dev/null and b/docs/img/aml_workspace.png differ diff --git a/docs/img/apoz.png b/docs/img/apoz.png new file mode 100644 index 0000000000000000000000000000000000000000..e0c452e97879b2d83ade86a9c90b0aa688436e9b Binary files /dev/null and b/docs/img/apoz.png differ diff --git a/docs/img/azure_storage.png b/docs/img/azure_storage.png new file mode 100644 index 0000000000000000000000000000000000000000..e682f9f7946308f263fe0960a6517b9c0f977d24 Binary files /dev/null and b/docs/img/azure_storage.png differ diff --git a/docs/img/bar.png b/docs/img/bar.png new file mode 100644 index 0000000000000000000000000000000000000000..51645458be3fd4b6a9e02967481755e3b1401619 Binary files /dev/null and b/docs/img/bar.png differ diff --git a/docs/img/bohb_1.png b/docs/img/bohb_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c002c9408f8af193e38c0f8e54e74a689d035380 Binary files /dev/null and b/docs/img/bohb_1.png differ diff --git a/docs/img/bohb_2.png b/docs/img/bohb_2.png new file mode 100644 index 0000000000000000000000000000000000000000..b78e570678015d75592dbfe8657aa6e849b3c397 Binary files /dev/null and b/docs/img/bohb_2.png differ diff --git a/docs/img/bohb_3.png b/docs/img/bohb_3.png new file mode 100644 index 0000000000000000000000000000000000000000..b941bc40b23e89b90267845cc1d11c3b1386208f Binary files /dev/null and b/docs/img/bohb_3.png differ diff --git a/docs/img/bohb_4.png b/docs/img/bohb_4.png new file mode 100644 index 0000000000000000000000000000000000000000..38ecb187eb07201383d1d2ff5ad4310352437f9a Binary files /dev/null and b/docs/img/bohb_4.png differ diff --git a/docs/img/bohb_5.png b/docs/img/bohb_5.png new file mode 100644 index 0000000000000000000000000000000000000000..1b5fc82e91756531d82b02a774b2cf506455a1db Binary files /dev/null and b/docs/img/bohb_5.png differ diff --git a/docs/img/bohb_6.jpg b/docs/img/bohb_6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62cbc48c27bf98f6191254a17e6be7b99f94b65b Binary files /dev/null and b/docs/img/bohb_6.jpg differ diff --git a/docs/img/channel_dependency_example.jpg b/docs/img/channel_dependency_example.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fb517fe00e42e3a5a160d7a1c3fa6cdab0cca50 Binary files /dev/null and b/docs/img/channel_dependency_example.jpg differ diff --git a/docs/img/compression_flow.jpg b/docs/img/compression_flow.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18c6a0d22e6ed23cf31b51968458e7a1ec146f17 Binary files /dev/null and b/docs/img/compression_flow.jpg differ diff --git a/docs/img/compressor_framework.jpg b/docs/img/compressor_framework.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4528f3a49eb1e54281f3c7c166c249e6b1e040df Binary files /dev/null and b/docs/img/compressor_framework.jpg differ diff --git a/docs/img/contributors.png b/docs/img/contributors.png new file mode 100644 index 0000000000000000000000000000000000000000..37a84c49b9c20a2eece376becfcac46c51b8cc06 Binary files /dev/null and b/docs/img/contributors.png differ diff --git a/docs/img/cream.png b/docs/img/cream.png new file mode 100644 index 0000000000000000000000000000000000000000..99a24840a75f9f4b3c20006a537ea0792a670311 Binary files /dev/null and b/docs/img/cream.png differ diff --git a/docs/img/cream_flops100.jpg b/docs/img/cream_flops100.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a31078dd8f1ab0aa02b92986982a6e569b086fd8 Binary files /dev/null and b/docs/img/cream_flops100.jpg differ diff --git a/docs/img/cream_flops600.jpg b/docs/img/cream_flops600.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9f7a5a6d02e1bfcc2aac048f4eb9b80ee91d655 Binary files /dev/null and b/docs/img/cream_flops600.jpg differ diff --git a/docs/img/curvefitting_example.PNG b/docs/img/curvefitting_example.PNG new file mode 100644 index 0000000000000000000000000000000000000000..f405411eb6df77072e5bc0e0f5bae261bfcd3e70 Binary files /dev/null and b/docs/img/curvefitting_example.PNG differ diff --git a/docs/img/curvefitting_expression_xi.gif b/docs/img/curvefitting_expression_xi.gif new file mode 100644 index 0000000000000000000000000000000000000000..ff8d217c967a087be74255601fb01baf5bf7a4fb Binary files /dev/null and b/docs/img/curvefitting_expression_xi.gif differ diff --git a/docs/img/curvefitting_f_comb.gif b/docs/img/curvefitting_f_comb.gif new file mode 100644 index 0000000000000000000000000000000000000000..f2da808855ad551f60988bc360b1834410b768b6 Binary files /dev/null and b/docs/img/curvefitting_f_comb.gif differ diff --git a/docs/img/curvefitting_learning_curve.PNG b/docs/img/curvefitting_learning_curve.PNG new file mode 100644 index 0000000000000000000000000000000000000000..247e1e3968204a356f7f0e6ee499bd70cdd624a1 Binary files /dev/null and b/docs/img/curvefitting_learning_curve.PNG differ diff --git a/docs/img/darts_mode.png b/docs/img/darts_mode.png new file mode 100644 index 0000000000000000000000000000000000000000..4917c4afbc642af9e8b62017cf8a3a28147c179b Binary files /dev/null and b/docs/img/darts_mode.png differ diff --git a/docs/img/dependency-aware.jpg b/docs/img/dependency-aware.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2f9b57db3d9c2bd4af70cb5aa9b84ffdedd6148 Binary files /dev/null and b/docs/img/dependency-aware.jpg differ diff --git a/docs/img/dispatcher_error.jpg b/docs/img/dispatcher_error.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d955087d9b3f8091955fd08e048d91704ea281d5 Binary files /dev/null and b/docs/img/dispatcher_error.jpg differ diff --git a/docs/img/distill.png b/docs/img/distill.png new file mode 100644 index 0000000000000000000000000000000000000000..cea5739bc4656528238614ba3289a84195e87d5c Binary files /dev/null and b/docs/img/distill.png differ diff --git a/docs/img/dlts-step1.png b/docs/img/dlts-step1.png new file mode 100644 index 0000000000000000000000000000000000000000..47949767e5f85ffce83799ba17a38fd7aca18e70 Binary files /dev/null and b/docs/img/dlts-step1.png differ diff --git a/docs/img/dlts-step3.png b/docs/img/dlts-step3.png new file mode 100644 index 0000000000000000000000000000000000000000..cd67fe741c9f9a086ce033aa32515c374624c6d3 Binary files /dev/null and b/docs/img/dlts-step3.png differ diff --git a/docs/img/dlts-step4.png b/docs/img/dlts-step4.png new file mode 100644 index 0000000000000000000000000000000000000000..00528297af6e6d51b331994ef834115ddacbd16b Binary files /dev/null and b/docs/img/dlts-step4.png differ diff --git a/docs/img/dlts-step5.png b/docs/img/dlts-step5.png new file mode 100644 index 0000000000000000000000000000000000000000..6b29d57cba37918156b94fd5079695e860d62bf4 Binary files /dev/null and b/docs/img/dlts-step5.png differ diff --git a/docs/img/efficientnet_search_result.png b/docs/img/efficientnet_search_result.png new file mode 100644 index 0000000000000000000000000000000000000000..91c4f6dac7fad805ba441c6dbdcb79dd9bb4c4c5 Binary files /dev/null and b/docs/img/efficientnet_search_result.png differ diff --git a/docs/img/emoicons/Comfort.png b/docs/img/emoicons/Comfort.png new file mode 100644 index 0000000000000000000000000000000000000000..c42c69bbccb58f6e9d81139d8858467946821681 Binary files /dev/null and b/docs/img/emoicons/Comfort.png differ diff --git a/docs/img/emoicons/Crying.png b/docs/img/emoicons/Crying.png new file mode 100644 index 0000000000000000000000000000000000000000..43d10426cc2f2dac4072e391d99f3498fb58f335 Binary files /dev/null and b/docs/img/emoicons/Crying.png differ diff --git a/docs/img/emoicons/Cut.png b/docs/img/emoicons/Cut.png new file mode 100644 index 0000000000000000000000000000000000000000..83c4d37fdabaeab9d25730520d42a07426be63e8 Binary files /dev/null and b/docs/img/emoicons/Cut.png differ diff --git a/docs/img/emoicons/Error.png b/docs/img/emoicons/Error.png new file mode 100644 index 0000000000000000000000000000000000000000..5184238ece55c1c82cf08020d255a9570824c409 Binary files /dev/null and b/docs/img/emoicons/Error.png differ diff --git a/docs/img/emoicons/Holiday.png b/docs/img/emoicons/Holiday.png new file mode 100644 index 0000000000000000000000000000000000000000..f4446ed8131386d9414acb1af8f8f3ba707294c5 Binary files /dev/null and b/docs/img/emoicons/Holiday.png differ diff --git a/docs/img/emoicons/NoBug.png b/docs/img/emoicons/NoBug.png new file mode 100644 index 0000000000000000000000000000000000000000..9bf1266769f2bb870308d3917252952ca339928c Binary files /dev/null and b/docs/img/emoicons/NoBug.png differ diff --git a/docs/img/emoicons/Sign.png b/docs/img/emoicons/Sign.png new file mode 100644 index 0000000000000000000000000000000000000000..a57fb2064d3c9ebb0226816ff197ff11a5e5d457 Binary files /dev/null and b/docs/img/emoicons/Sign.png differ diff --git a/docs/img/emoicons/Sweat.png b/docs/img/emoicons/Sweat.png new file mode 100644 index 0000000000000000000000000000000000000000..642e5d6e55f9d637748dc3f1352d7d4b8748ab78 Binary files /dev/null and b/docs/img/emoicons/Sweat.png differ diff --git a/docs/img/emoicons/Weaving.png b/docs/img/emoicons/Weaving.png new file mode 100644 index 0000000000000000000000000000000000000000..3845ce4cbc27b031be24abc65860d7f2f92bce54 Binary files /dev/null and b/docs/img/emoicons/Weaving.png differ diff --git a/docs/img/emoicons/Working.png b/docs/img/emoicons/Working.png new file mode 100644 index 0000000000000000000000000000000000000000..47bc0cd49870cde1e71132c134457038ffddac05 Binary files /dev/null and b/docs/img/emoicons/Working.png differ diff --git a/docs/img/emoicons/home.svg b/docs/img/emoicons/home.svg new file mode 100644 index 0000000000000000000000000000000000000000..5360482f3260794c8aa9b037833954b9ebc61def --- /dev/null +++ b/docs/img/emoicons/home.svg @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/img/enas_search_space.png b/docs/img/enas_search_space.png new file mode 100644 index 0000000000000000000000000000000000000000..9280cc37bbbcfa853a1e222b2c876409db9f5c5d Binary files /dev/null and b/docs/img/enas_search_space.png differ diff --git a/docs/img/example_combined.png b/docs/img/example_combined.png new file mode 100644 index 0000000000000000000000000000000000000000..4757892266ddd7f343b0e304440a03bc7a785a2c Binary files /dev/null and b/docs/img/example_combined.png differ diff --git a/docs/img/example_connectchoice.png b/docs/img/example_connectchoice.png new file mode 100644 index 0000000000000000000000000000000000000000..74559b8e47ab378394106cbf4b4134343014ed2c Binary files /dev/null and b/docs/img/example_connectchoice.png differ diff --git a/docs/img/example_enas.png b/docs/img/example_enas.png new file mode 100644 index 0000000000000000000000000000000000000000..19c47ec89d0f1823526b4e2a23c61da11cdefe15 Binary files /dev/null and b/docs/img/example_enas.png differ diff --git a/docs/img/example_layerchoice.png b/docs/img/example_layerchoice.png new file mode 100644 index 0000000000000000000000000000000000000000..d325328e5876e5c8158d2ae8473a841832edb339 Binary files /dev/null and b/docs/img/example_layerchoice.png differ diff --git a/docs/img/example_of_curve_fitting.PNG b/docs/img/example_of_curve_fitting.PNG new file mode 100644 index 0000000000000000000000000000000000000000..f405411eb6df77072e5bc0e0f5bae261bfcd3e70 Binary files /dev/null and b/docs/img/example_of_curve_fitting.PNG differ diff --git a/docs/img/experiment_process.jpg b/docs/img/experiment_process.jpg new file mode 100644 index 0000000000000000000000000000000000000000..991f7f4978887b71dca7d3aed19257666f8fe8a9 Binary files /dev/null and b/docs/img/experiment_process.jpg differ diff --git a/docs/img/expression_xi.gif b/docs/img/expression_xi.gif new file mode 100644 index 0000000000000000000000000000000000000000..ff8d217c967a087be74255601fb01baf5bf7a4fb Binary files /dev/null and b/docs/img/expression_xi.gif differ diff --git a/docs/img/f_comb.gif b/docs/img/f_comb.gif new file mode 100644 index 0000000000000000000000000000000000000000..f2da808855ad551f60988bc360b1834410b768b6 Binary files /dev/null and b/docs/img/f_comb.gif differ diff --git a/docs/img/fbnet.png b/docs/img/fbnet.png new file mode 100644 index 0000000000000000000000000000000000000000..f5c44e76917d9d758d227807e2633aa710567d8b Binary files /dev/null and b/docs/img/fbnet.png differ diff --git a/docs/img/fpgm_fig1.png b/docs/img/fpgm_fig1.png new file mode 100644 index 0000000000000000000000000000000000000000..f9a1fe40316221bfa537a40a30372926ad55a676 Binary files /dev/null and b/docs/img/fpgm_fig1.png differ diff --git a/docs/img/highlevelarchi.png b/docs/img/highlevelarchi.png new file mode 100644 index 0000000000000000000000000000000000000000..0885fcab5ebcbb75f399d68fcdc71e11a6ab5d29 Binary files /dev/null and b/docs/img/highlevelarchi.png differ diff --git a/docs/img/hpo_benchmark/car_fold1_1.jpg b/docs/img/hpo_benchmark/car_fold1_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db30b6252a8773ad2e897f8679081a4b339281b5 Binary files /dev/null and b/docs/img/hpo_benchmark/car_fold1_1.jpg differ diff --git a/docs/img/hpo_benchmark/car_fold1_2.jpg b/docs/img/hpo_benchmark/car_fold1_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16701e3667920f6506f38a0ad50d1874975d3b80 Binary files /dev/null and b/docs/img/hpo_benchmark/car_fold1_2.jpg differ diff --git a/docs/img/hpo_benchmark/christine_fold0_1.jpg b/docs/img/hpo_benchmark/christine_fold0_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb2549c63f93c8d56e9e8aebc4ee18e46b3e65cb Binary files /dev/null and b/docs/img/hpo_benchmark/christine_fold0_1.jpg differ diff --git a/docs/img/hpo_benchmark/christine_fold0_2.jpg b/docs/img/hpo_benchmark/christine_fold0_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..104019c9a3ad3be2c8a215becfe05ac6f8254797 Binary files /dev/null and b/docs/img/hpo_benchmark/christine_fold0_2.jpg differ diff --git a/docs/img/hpo_benchmark/cnae-9_fold0_1.jpg b/docs/img/hpo_benchmark/cnae-9_fold0_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01e0b7137ebefb3317a634d463213eda2b252285 Binary files /dev/null and b/docs/img/hpo_benchmark/cnae-9_fold0_1.jpg differ diff --git a/docs/img/hpo_benchmark/cnae-9_fold0_2.jpg b/docs/img/hpo_benchmark/cnae-9_fold0_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db8c2e1a160ce6cd6c9766a258bb14e5aadca9c9 Binary files /dev/null and b/docs/img/hpo_benchmark/cnae-9_fold0_2.jpg differ diff --git a/docs/img/hpo_benchmark/credit-g_fold1_1.jpg b/docs/img/hpo_benchmark/credit-g_fold1_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a71ecdcfe1a8354b16d3d4fbbe03eb00144ba689 Binary files /dev/null and b/docs/img/hpo_benchmark/credit-g_fold1_1.jpg differ diff --git a/docs/img/hpo_benchmark/credit-g_fold1_2.jpg b/docs/img/hpo_benchmark/credit-g_fold1_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bb1982290c7b2e7bdaf4e2fc07090e26f73df3b Binary files /dev/null and b/docs/img/hpo_benchmark/credit-g_fold1_2.jpg differ diff --git a/docs/img/hpo_benchmark/performances.png b/docs/img/hpo_benchmark/performances.png new file mode 100644 index 0000000000000000000000000000000000000000..3a3d75653c68428bb5d75da93c9408fd8801b19b Binary files /dev/null and b/docs/img/hpo_benchmark/performances.png differ diff --git a/docs/img/hpo_benchmark/titanic_2_fold1_1.jpg b/docs/img/hpo_benchmark/titanic_2_fold1_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97124b387492a725d2526494c4bdf8fba04f892e Binary files /dev/null and b/docs/img/hpo_benchmark/titanic_2_fold1_1.jpg differ diff --git a/docs/img/hpo_benchmark/titanic_2_fold1_2.jpg b/docs/img/hpo_benchmark/titanic_2_fold1_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65f29cb11b2a73c23003ce1df687b4f1b86d3f5a Binary files /dev/null and b/docs/img/hpo_benchmark/titanic_2_fold1_2.jpg differ diff --git a/docs/img/hpo_rocksdb_fillrandom.png b/docs/img/hpo_rocksdb_fillrandom.png new file mode 100644 index 0000000000000000000000000000000000000000..707ad58c3913b3b9e472ae749c12a0d34a8365b7 Binary files /dev/null and b/docs/img/hpo_rocksdb_fillrandom.png differ diff --git a/docs/img/hpo_rocksdb_readrandom.png b/docs/img/hpo_rocksdb_readrandom.png new file mode 100644 index 0000000000000000000000000000000000000000..56eb71e37b332b8cee4c96b91d85b7d1d99f048a Binary files /dev/null and b/docs/img/hpo_rocksdb_readrandom.png differ diff --git a/docs/img/huggingface_bert_architecture.png b/docs/img/huggingface_bert_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..9187c79c2e5c3e498d419343564a6ad440c96f6a Binary files /dev/null and b/docs/img/huggingface_bert_architecture.png differ diff --git a/docs/img/hyperPara.png b/docs/img/hyperPara.png new file mode 100644 index 0000000000000000000000000000000000000000..ba7484c9a40430ab239cdff5cdd118db60d541cb Binary files /dev/null and b/docs/img/hyperPara.png differ diff --git a/docs/img/hyperband_parallelism.png b/docs/img/hyperband_parallelism.png new file mode 100644 index 0000000000000000000000000000000000000000..b167bc141af1229548041276d7fc2ab68db2c5c0 Binary files /dev/null and b/docs/img/hyperband_parallelism.png differ diff --git a/docs/img/hyperband_serial.png b/docs/img/hyperband_serial.png new file mode 100644 index 0000000000000000000000000000000000000000..c34962229f9132de5b80db93cf752926028e1b86 Binary files /dev/null and b/docs/img/hyperband_serial.png differ diff --git a/docs/img/importance_estimation_sum.png b/docs/img/importance_estimation_sum.png new file mode 100644 index 0000000000000000000000000000000000000000..d2f25e813acf32d9c88232dbe00e74f3e15adc22 Binary files /dev/null and b/docs/img/importance_estimation_sum.png differ diff --git a/docs/img/kubeflow_training_design.png b/docs/img/kubeflow_training_design.png new file mode 100644 index 0000000000000000000000000000000000000000..b2acc7adc434339a84f6a2eb45d01b2970a191f0 Binary files /dev/null and b/docs/img/kubeflow_training_design.png differ diff --git a/docs/img/l1filter_pruner.png b/docs/img/l1filter_pruner.png new file mode 100644 index 0000000000000000000000000000000000000000..a4d6c498ed4e50ffec4c2e6d665ca6edb4c1d105 Binary files /dev/null and b/docs/img/l1filter_pruner.png differ diff --git a/docs/img/learning_curve.PNG b/docs/img/learning_curve.PNG new file mode 100644 index 0000000000000000000000000000000000000000..247e1e3968204a356f7f0e6ee499bd70cdd624a1 Binary files /dev/null and b/docs/img/learning_curve.PNG differ diff --git a/docs/img/lottery_ticket_mnist_fc.png b/docs/img/lottery_ticket_mnist_fc.png new file mode 100644 index 0000000000000000000000000000000000000000..a9051705a8630f4743aa523ba44d6c5d36cd744a Binary files /dev/null and b/docs/img/lottery_ticket_mnist_fc.png differ diff --git a/docs/img/mask_conflict.jpg b/docs/img/mask_conflict.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d28bacf5204a02614ad4a425e535d216b73e87d7 Binary files /dev/null and b/docs/img/mask_conflict.jpg differ diff --git a/docs/img/mobilev2_l1_cifar.jpg b/docs/img/mobilev2_l1_cifar.jpg new file mode 100644 index 0000000000000000000000000000000000000000..202e5740e13b96277b4d933b61135e5b33da4e36 Binary files /dev/null and b/docs/img/mobilev2_l1_cifar.jpg differ diff --git a/docs/img/movement_pruning.png b/docs/img/movement_pruning.png new file mode 100644 index 0000000000000000000000000000000000000000..96edc0674e756f6b2fcb868db00c0020660efa44 Binary files /dev/null and b/docs/img/movement_pruning.png differ diff --git a/docs/img/nas-bench-101-example.png b/docs/img/nas-bench-101-example.png new file mode 100644 index 0000000000000000000000000000000000000000..50b06d333385f1f66bd879fcd0e4dc4663b32070 Binary files /dev/null and b/docs/img/nas-bench-101-example.png differ diff --git a/docs/img/nas-bench-201-example.png b/docs/img/nas-bench-201-example.png new file mode 100644 index 0000000000000000000000000000000000000000..17ed28539228347a97eacf3369571bd7a3f2007e Binary files /dev/null and b/docs/img/nas-bench-201-example.png differ diff --git a/docs/img/nas-bench-nds-example.png b/docs/img/nas-bench-nds-example.png new file mode 100644 index 0000000000000000000000000000000000000000..8165b566f88bd1e0b84b1405c5303c51e28fe348 Binary files /dev/null and b/docs/img/nas-bench-nds-example.png differ diff --git a/docs/img/nas-bench-nlp-example1.jpeg b/docs/img/nas-bench-nlp-example1.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3f5d3fe289d396a8736bc94a8ff7cb3542be5123 Binary files /dev/null and b/docs/img/nas-bench-nlp-example1.jpeg differ diff --git a/docs/img/nas-bench-nlp-example2.jpeg b/docs/img/nas-bench-nlp-example2.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..850d15b7355067d3086c52a5bce30eb177f41a69 Binary files /dev/null and b/docs/img/nas-bench-nlp-example2.jpeg differ diff --git a/docs/img/nas_abstract_illustration.png b/docs/img/nas_abstract_illustration.png new file mode 100644 index 0000000000000000000000000000000000000000..0faf667f5b0e79e5485753110b5839040ab1211d Binary files /dev/null and b/docs/img/nas_abstract_illustration.png differ diff --git a/docs/img/nas_on_nni.png b/docs/img/nas_on_nni.png new file mode 100644 index 0000000000000000000000000000000000000000..7359c210d81194a0bff8a192dc73b6c23684eba4 Binary files /dev/null and b/docs/img/nas_on_nni.png differ diff --git a/docs/img/nas_weight_share.png b/docs/img/nas_weight_share.png new file mode 100644 index 0000000000000000000000000000000000000000..e66beb7829cc5f6ae2a1c99adb77be375d6407d1 Binary files /dev/null and b/docs/img/nas_weight_share.png differ diff --git a/docs/img/nasui-1.png b/docs/img/nasui-1.png new file mode 100644 index 0000000000000000000000000000000000000000..ee4d68db121abc92cb2a673d2ecd38e392345831 Binary files /dev/null and b/docs/img/nasui-1.png differ diff --git a/docs/img/nasui-2.png b/docs/img/nasui-2.png new file mode 100644 index 0000000000000000000000000000000000000000..69e0794abd65bdf96695583cfc5bab76565be539 Binary files /dev/null and b/docs/img/nasui-2.png differ diff --git a/docs/img/nni-1.png b/docs/img/nni-1.png new file mode 100644 index 0000000000000000000000000000000000000000..b31e66b2ee0350f9c5401008d7f39293fac2d2bd Binary files /dev/null and b/docs/img/nni-1.png differ diff --git a/docs/img/nni_arch_overview.png b/docs/img/nni_arch_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..621f16a0ddfde3bdef49058aff64baaf36b82e01 Binary files /dev/null and b/docs/img/nni_arch_overview.png differ diff --git a/docs/img/nni_logo.png b/docs/img/nni_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..2b6479e46b40479565556c3a00d526133b9f5239 Binary files /dev/null and b/docs/img/nni_logo.png differ diff --git a/docs/img/nni_logo_dark.png b/docs/img/nni_logo_dark.png new file mode 100644 index 0000000000000000000000000000000000000000..6432b2cb767e1e67c53ce8875f71ce67ebeb5369 Binary files /dev/null and b/docs/img/nni_logo_dark.png differ diff --git a/docs/img/nni_pai_joblist.jpg b/docs/img/nni_pai_joblist.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa7564cb2a7c99f700a01c74d9e7e01ded023d28 Binary files /dev/null and b/docs/img/nni_pai_joblist.jpg differ diff --git a/docs/img/nni_trial_hdfs_output.jpg b/docs/img/nni_trial_hdfs_output.jpg new file mode 100644 index 0000000000000000000000000000000000000000..decbf3bde276af98e279e3cdd4764228a2474c38 Binary files /dev/null and b/docs/img/nni_trial_hdfs_output.jpg differ diff --git a/docs/img/nni_webui_joblist.png b/docs/img/nni_webui_joblist.png new file mode 100644 index 0000000000000000000000000000000000000000..babc10ec5f7bf13369ef5ec29b526bf7db010813 Binary files /dev/null and b/docs/img/nni_webui_joblist.png differ diff --git a/docs/img/one-shot_training.png b/docs/img/one-shot_training.png new file mode 100644 index 0000000000000000000000000000000000000000..746d7008b134d462270f0e3b7ac9989db8c797cc Binary files /dev/null and b/docs/img/one-shot_training.png differ diff --git a/docs/img/oneshot_mode.png b/docs/img/oneshot_mode.png new file mode 100644 index 0000000000000000000000000000000000000000..06ce09331d531012957980f68ad5512bb39d08a0 Binary files /dev/null and b/docs/img/oneshot_mode.png differ diff --git a/docs/img/opevo.png b/docs/img/opevo.png new file mode 100644 index 0000000000000000000000000000000000000000..dcbc89e0f5fd48bea63dec8aa2c259dbaec654a9 Binary files /dev/null and b/docs/img/opevo.png differ diff --git a/docs/img/over1.png b/docs/img/over1.png new file mode 100644 index 0000000000000000000000000000000000000000..7b5c6a0db3ffd3675327890b591a90522caed07a Binary files /dev/null and b/docs/img/over1.png differ diff --git a/docs/img/over2.png b/docs/img/over2.png new file mode 100644 index 0000000000000000000000000000000000000000..783ab2127f56acf9d861c9a13cafade42d758140 Binary files /dev/null and b/docs/img/over2.png differ diff --git a/docs/img/overview.svg b/docs/img/overview.svg new file mode 100644 index 0000000000000000000000000000000000000000..ae369941abc61d3c1c271c157c183f2801dbaa0d --- /dev/null +++ b/docs/img/overview.svg @@ -0,0 +1 @@ +overview栅格化 \ No newline at end of file diff --git a/docs/img/pai_data_management_page.jpg b/docs/img/pai_data_management_page.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ba33e1d35519b4d0eeb06c7d5656ab6e0ccb02a Binary files /dev/null and b/docs/img/pai_data_management_page.jpg differ diff --git a/docs/img/pai_job_submission_page.jpg b/docs/img/pai_job_submission_page.jpg new file mode 100644 index 0000000000000000000000000000000000000000..377a66f593c937b1b264d9e838ddb69b96320dd4 Binary files /dev/null and b/docs/img/pai_job_submission_page.jpg differ diff --git a/docs/img/pai_profile.jpg b/docs/img/pai_profile.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eadbbeb9fa7055285fbcade7bafb6d5f6dd2eccf Binary files /dev/null and b/docs/img/pai_profile.jpg differ diff --git a/docs/img/pai_token.jpg b/docs/img/pai_token.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83f388a28294901f0920ac41af162f8e5fd857c0 Binary files /dev/null and b/docs/img/pai_token.jpg differ diff --git a/docs/img/pai_token_button.jpg b/docs/img/pai_token_button.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86e911d0e845bb4498877ad78357795b6ec85254 Binary files /dev/null and b/docs/img/pai_token_button.jpg differ diff --git a/docs/img/parallel_tpe_search1.gif b/docs/img/parallel_tpe_search1.gif new file mode 100644 index 0000000000000000000000000000000000000000..e45131b1a466e05ac34d70c9485daedc2d43cb29 Binary files /dev/null and b/docs/img/parallel_tpe_search1.gif differ diff --git a/docs/img/parallel_tpe_search2.gif b/docs/img/parallel_tpe_search2.gif new file mode 100644 index 0000000000000000000000000000000000000000..0c2b5548ee9e6a35034dd0b30f5cd7dc774ab9a1 Binary files /dev/null and b/docs/img/parallel_tpe_search2.gif differ diff --git a/docs/img/parallel_tpe_search3.gif b/docs/img/parallel_tpe_search3.gif new file mode 100644 index 0000000000000000000000000000000000000000..e5412db052addff69feb1f26b0f8cb2a287150b4 Binary files /dev/null and b/docs/img/parallel_tpe_search3.gif differ diff --git a/docs/img/parallel_tpe_search4.PNG b/docs/img/parallel_tpe_search4.PNG new file mode 100644 index 0000000000000000000000000000000000000000..e1c12233b70df791ec292e6a46a9d887a67a44de Binary files /dev/null and b/docs/img/parallel_tpe_search4.PNG differ diff --git a/docs/img/parallel_tpe_search_branin.PNG b/docs/img/parallel_tpe_search_branin.PNG new file mode 100644 index 0000000000000000000000000000000000000000..b62ce1f09e56383ffdd27b1f6894a0810600dc8e Binary files /dev/null and b/docs/img/parallel_tpe_search_branin.PNG differ diff --git a/docs/img/parallel_tpe_search_cl.PNG b/docs/img/parallel_tpe_search_cl.PNG new file mode 100644 index 0000000000000000000000000000000000000000..40cf9134d89587d4087bcebd3b08a96dffa2ff1e Binary files /dev/null and b/docs/img/parallel_tpe_search_cl.PNG differ diff --git a/docs/img/parallel_tpe_search_ei.PNG b/docs/img/parallel_tpe_search_ei.PNG new file mode 100644 index 0000000000000000000000000000000000000000..745e84bfd93d45472e3aa142086f6635201cd003 Binary files /dev/null and b/docs/img/parallel_tpe_search_ei.PNG differ diff --git a/docs/img/parallel_tpe_search_ei2.PNG b/docs/img/parallel_tpe_search_ei2.PNG new file mode 100644 index 0000000000000000000000000000000000000000..554a68a78fcb5720ecf14bdeef78759fe70bacbd Binary files /dev/null and b/docs/img/parallel_tpe_search_ei2.PNG differ diff --git a/docs/img/parallel_tpe_search_kb.PNG b/docs/img/parallel_tpe_search_kb.PNG new file mode 100644 index 0000000000000000000000000000000000000000..7e671ab649df3be50c8ecc8094817c51156ef9f0 Binary files /dev/null and b/docs/img/parallel_tpe_search_kb.PNG differ diff --git a/docs/img/parallel_tpe_search_qEI.PNG b/docs/img/parallel_tpe_search_qEI.PNG new file mode 100644 index 0000000000000000000000000000000000000000..1a3e7c1c1e6a680538a7e79bf74e1c7bb5753a0a Binary files /dev/null and b/docs/img/parallel_tpe_search_qEI.PNG differ diff --git a/docs/img/parallel_tpe_search_result.PNG b/docs/img/parallel_tpe_search_result.PNG new file mode 100644 index 0000000000000000000000000000000000000000..8ebb706c3fda7e3e3f9748cb40d488dc90029131 Binary files /dev/null and b/docs/img/parallel_tpe_search_result.PNG differ diff --git a/docs/img/parallel_tpe_search_tpe.PNG b/docs/img/parallel_tpe_search_tpe.PNG new file mode 100644 index 0000000000000000000000000000000000000000..33f9ab2603b89ed6636df1af01d50e4e81672964 Binary files /dev/null and b/docs/img/parallel_tpe_search_tpe.PNG differ diff --git a/docs/img/pbt.jpg b/docs/img/pbt.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b930618f40720c3b81b0f05b2c834f40bca73c6a Binary files /dev/null and b/docs/img/pbt.jpg differ diff --git a/docs/img/pix2pix_pytorch_facades.png b/docs/img/pix2pix_pytorch_facades.png new file mode 100644 index 0000000000000000000000000000000000000000..4da8d2779f82488ab99cb1a2ad2852f3eaffb231 Binary files /dev/null and b/docs/img/pix2pix_pytorch_facades.png differ diff --git a/docs/img/ppo_cifar10.png b/docs/img/ppo_cifar10.png new file mode 100644 index 0000000000000000000000000000000000000000..b2061a07f6945b2d56e41784107a0915ffc5ca48 Binary files /dev/null and b/docs/img/ppo_cifar10.png differ diff --git a/docs/img/ppo_mnist.png b/docs/img/ppo_mnist.png new file mode 100644 index 0000000000000000000000000000000000000000..3c5a00c176d5482e2d478902cf3751081d539f02 Binary files /dev/null and b/docs/img/ppo_mnist.png differ diff --git a/docs/img/proxylessnas.png b/docs/img/proxylessnas.png new file mode 100644 index 0000000000000000000000000000000000000000..274e1dbd5b63e9142783baaf3b2ac7131047c6fb Binary files /dev/null and b/docs/img/proxylessnas.png differ diff --git a/docs/img/pruning_process.png b/docs/img/pruning_process.png new file mode 100644 index 0000000000000000000000000000000000000000..fd74e8432b4370b96049230aa15c62437e19b4b9 Binary files /dev/null and b/docs/img/pruning_process.png differ diff --git a/docs/img/release-1-title-1.png b/docs/img/release-1-title-1.png new file mode 100644 index 0000000000000000000000000000000000000000..6bcbff908768d03456b1ff77260e8aafa535f23d Binary files /dev/null and b/docs/img/release-1-title-1.png differ diff --git a/docs/img/release-1-title-2.png b/docs/img/release-1-title-2.png new file mode 100644 index 0000000000000000000000000000000000000000..cbdf2c3c2711c5f1753ceef88a7103cb17adef1a Binary files /dev/null and b/docs/img/release-1-title-2.png differ diff --git a/docs/img/release_icon.png b/docs/img/release_icon.png new file mode 100644 index 0000000000000000000000000000000000000000..c0479181f6a911863eee9d3d87f655f8de8c0d7b Binary files /dev/null and b/docs/img/release_icon.png differ diff --git a/docs/img/rocksdb-fillrandom-plot.png b/docs/img/rocksdb-fillrandom-plot.png new file mode 100644 index 0000000000000000000000000000000000000000..075bf7ac77e538903f81300af598a85490a14b03 Binary files /dev/null and b/docs/img/rocksdb-fillrandom-plot.png differ diff --git a/docs/img/slim_pruner.png b/docs/img/slim_pruner.png new file mode 100644 index 0000000000000000000000000000000000000000..e7fe52f67ad71d814fdd3794152e577c27a737d7 Binary files /dev/null and b/docs/img/slim_pruner.png differ diff --git a/docs/img/table_openrow.png b/docs/img/table_openrow.png new file mode 100644 index 0000000000000000000000000000000000000000..ceabea6bd4b5a3ebaa37878d918eb34856a94285 Binary files /dev/null and b/docs/img/table_openrow.png differ diff --git a/docs/img/transformer_structure.png b/docs/img/transformer_structure.png new file mode 100644 index 0000000000000000000000000000000000000000..bd3fcc78b3de8445663f8a691e2de31e3aea309f Binary files /dev/null and b/docs/img/transformer_structure.png differ diff --git a/docs/img/trial_detail.png b/docs/img/trial_detail.png new file mode 100644 index 0000000000000000000000000000000000000000..66841b950484686193af0cae6fb19e19fed9027d Binary files /dev/null and b/docs/img/trial_detail.png differ diff --git a/docs/img/trial_duration.png b/docs/img/trial_duration.png new file mode 100644 index 0000000000000000000000000000000000000000..f6069127752500f1fbc8ad6e115a1b189e9795af Binary files /dev/null and b/docs/img/trial_duration.png differ diff --git a/docs/img/trial_error.jpg b/docs/img/trial_error.jpg new file mode 100644 index 0000000000000000000000000000000000000000..311900d3cc29e51d00f794370f0fba46f8be8eef Binary files /dev/null and b/docs/img/trial_error.jpg differ diff --git a/docs/img/trialkeeper.jpg b/docs/img/trialkeeper.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ffa4f9a797c21b41079e7cb4a84975288ed44461 Binary files /dev/null and b/docs/img/trialkeeper.jpg differ diff --git a/docs/img/version_check.png b/docs/img/version_check.png new file mode 100644 index 0000000000000000000000000000000000000000..3ebb516b2a0532de7074c1b91ffdc6577fc75e83 Binary files /dev/null and b/docs/img/version_check.png differ diff --git a/docs/img/webui-img/addColumn.png b/docs/img/webui-img/addColumn.png new file mode 100644 index 0000000000000000000000000000000000000000..907ed00d11cccc87f579c2820e7230cc80f933ee Binary files /dev/null and b/docs/img/webui-img/addColumn.png differ diff --git a/docs/img/webui-img/best-curve.png b/docs/img/webui-img/best-curve.png new file mode 100644 index 0000000000000000000000000000000000000000..4880e80623734060350d99b1724324f7b39fd46e Binary files /dev/null and b/docs/img/webui-img/best-curve.png differ diff --git a/docs/img/webui-img/compare.png b/docs/img/webui-img/compare.png new file mode 100644 index 0000000000000000000000000000000000000000..7df344331b99e7457b8264067b626fc866078788 Binary files /dev/null and b/docs/img/webui-img/compare.png differ diff --git a/docs/img/webui-img/config.png b/docs/img/webui-img/config.png new file mode 100644 index 0000000000000000000000000000000000000000..8cad8ff088ddbe5d05bc8fe3ed9b245ed22b30e3 Binary files /dev/null and b/docs/img/webui-img/config.png differ diff --git a/docs/img/webui-img/copyParameter.png b/docs/img/webui-img/copyParameter.png new file mode 100644 index 0000000000000000000000000000000000000000..2dcd8a962057c748a75d9b72fb880c0a3c3b6210 Binary files /dev/null and b/docs/img/webui-img/copyParameter.png differ diff --git a/docs/img/webui-img/default-metric.png b/docs/img/webui-img/default-metric.png new file mode 100644 index 0000000000000000000000000000000000000000..3e75951a1e2fa875e23aa1995191e5d7d6e8bbf5 Binary files /dev/null and b/docs/img/webui-img/default-metric.png differ diff --git a/docs/img/webui-img/detail-local.png b/docs/img/webui-img/detail-local.png new file mode 100644 index 0000000000000000000000000000000000000000..d41301b46736b7521ddaf80811054b85c5e5885b Binary files /dev/null and b/docs/img/webui-img/detail-local.png differ diff --git a/docs/img/webui-img/detail-pai.png b/docs/img/webui-img/detail-pai.png new file mode 100644 index 0000000000000000000000000000000000000000..0a0921d2fdd0e8bc49ad9054a4c7a037e9d54177 Binary files /dev/null and b/docs/img/webui-img/detail-pai.png differ diff --git a/docs/img/webui-img/detail/customizedTrial.png b/docs/img/webui-img/detail/customizedTrial.png new file mode 100644 index 0000000000000000000000000000000000000000..0457faef023962329bf30071f2673458f7a84d73 Binary files /dev/null and b/docs/img/webui-img/detail/customizedTrial.png differ diff --git a/docs/img/webui-img/detail/customizedTrialButton.png b/docs/img/webui-img/detail/customizedTrialButton.png new file mode 100644 index 0000000000000000000000000000000000000000..c348b83c1727f906d1eb8a9afaa033bc26359027 Binary files /dev/null and b/docs/img/webui-img/detail/customizedTrialButton.png differ diff --git a/docs/img/webui-img/detail/log-local.png b/docs/img/webui-img/detail/log-local.png new file mode 100644 index 0000000000000000000000000000000000000000..b1a57772fd5ad32082dc18a5a8b9fc654ea4267d Binary files /dev/null and b/docs/img/webui-img/detail/log-local.png differ diff --git a/docs/img/webui-img/detail/searchId.png b/docs/img/webui-img/detail/searchId.png new file mode 100644 index 0000000000000000000000000000000000000000..3eea960f72b822bd109606437fcf8e999ff02735 Binary files /dev/null and b/docs/img/webui-img/detail/searchId.png differ diff --git a/docs/img/webui-img/detail/searchNo.png b/docs/img/webui-img/detail/searchNo.png new file mode 100644 index 0000000000000000000000000000000000000000..04515f02134e074775296694d4ed6efd1277ec4e Binary files /dev/null and b/docs/img/webui-img/detail/searchNo.png differ diff --git a/docs/img/webui-img/detail/searchParameterChoice.png b/docs/img/webui-img/detail/searchParameterChoice.png new file mode 100644 index 0000000000000000000000000000000000000000..2f95a0ef296035293e5e264019ca684c33fd54b0 Binary files /dev/null and b/docs/img/webui-img/detail/searchParameterChoice.png differ diff --git a/docs/img/webui-img/detail/searchParameterRange.png b/docs/img/webui-img/detail/searchParameterRange.png new file mode 100644 index 0000000000000000000000000000000000000000..465e11c802ec81c2ea5165ea50493798346816f2 Binary files /dev/null and b/docs/img/webui-img/detail/searchParameterRange.png differ diff --git a/docs/img/webui-img/detail/searchStatus.png b/docs/img/webui-img/detail/searchStatus.png new file mode 100644 index 0000000000000000000000000000000000000000..635a326d952dcdf90d1d2d498da04ffb9ef9c936 Binary files /dev/null and b/docs/img/webui-img/detail/searchStatus.png differ diff --git a/docs/img/webui-img/edit-experiment-param.png b/docs/img/webui-img/edit-experiment-param.png new file mode 100644 index 0000000000000000000000000000000000000000..847aac3071299785e9de41f9ba22154d4cc6f5bb Binary files /dev/null and b/docs/img/webui-img/edit-experiment-param.png differ diff --git a/docs/img/webui-img/experimentError.png b/docs/img/webui-img/experimentError.png new file mode 100644 index 0000000000000000000000000000000000000000..de00536f8adfde756ca19ebb3bf9436feecd699d Binary files /dev/null and b/docs/img/webui-img/experimentError.png differ diff --git a/docs/img/webui-img/filter-intermediate.png b/docs/img/webui-img/filter-intermediate.png new file mode 100644 index 0000000000000000000000000000000000000000..1ebbb62d3edf4e93b52c066bd405b6dcad9ef1c9 Binary files /dev/null and b/docs/img/webui-img/filter-intermediate.png differ diff --git a/docs/img/webui-img/full-detail.png b/docs/img/webui-img/full-detail.png new file mode 100644 index 0000000000000000000000000000000000000000..dc84d9114fcd63a139f88e423bd228e4e8487d54 Binary files /dev/null and b/docs/img/webui-img/full-detail.png differ diff --git a/docs/img/webui-img/full-oview.png b/docs/img/webui-img/full-oview.png new file mode 100644 index 0000000000000000000000000000000000000000..d711131474b7c3f6513f41c241e6588d9f3ebf96 Binary files /dev/null and b/docs/img/webui-img/full-oview.png differ diff --git a/docs/img/webui-img/hyperPara.png b/docs/img/webui-img/hyperPara.png new file mode 100644 index 0000000000000000000000000000000000000000..77f9340f35b3af7e41fa58c38782030859faad2c Binary files /dev/null and b/docs/img/webui-img/hyperPara.png differ diff --git a/docs/img/webui-img/intermediate.png b/docs/img/webui-img/intermediate.png new file mode 100644 index 0000000000000000000000000000000000000000..bf19500fd0ed99ca1faf4a820cd60d38aa7805d6 Binary files /dev/null and b/docs/img/webui-img/intermediate.png differ diff --git a/docs/img/webui-img/kill-running.png b/docs/img/webui-img/kill-running.png new file mode 100644 index 0000000000000000000000000000000000000000..193deb1ae748f146fbde5268064773e85c4eecc0 Binary files /dev/null and b/docs/img/webui-img/kill-running.png differ diff --git a/docs/img/webui-img/managerExperimentList/expFilter.png b/docs/img/webui-img/managerExperimentList/expFilter.png new file mode 100644 index 0000000000000000000000000000000000000000..49d6b0b05faefe065b51ed056be76afd17c18415 Binary files /dev/null and b/docs/img/webui-img/managerExperimentList/expFilter.png differ diff --git a/docs/img/webui-img/managerExperimentList/expList.png b/docs/img/webui-img/managerExperimentList/expList.png new file mode 100644 index 0000000000000000000000000000000000000000..d170426492f4aaa1b86db3b4b3f9428cc8957771 Binary files /dev/null and b/docs/img/webui-img/managerExperimentList/expList.png differ diff --git a/docs/img/webui-img/managerExperimentList/experimentListNav.png b/docs/img/webui-img/managerExperimentList/experimentListNav.png new file mode 100644 index 0000000000000000000000000000000000000000..b6f95514f22cd33e0bec82a69eca7c8dc9414875 Binary files /dev/null and b/docs/img/webui-img/managerExperimentList/experimentListNav.png differ diff --git a/docs/img/webui-img/managerExperimentList/toAnotherExp.png b/docs/img/webui-img/managerExperimentList/toAnotherExp.png new file mode 100644 index 0000000000000000000000000000000000000000..bd8965ce84c50b438eef1ecb72ffd382c397b2de Binary files /dev/null and b/docs/img/webui-img/managerExperimentList/toAnotherExp.png differ diff --git a/docs/img/webui-img/over1.png b/docs/img/webui-img/over1.png new file mode 100644 index 0000000000000000000000000000000000000000..733c1044b19e3004dca4e5b9a03bc02cff91c9e9 Binary files /dev/null and b/docs/img/webui-img/over1.png differ diff --git a/docs/img/webui-img/over2.png b/docs/img/webui-img/over2.png new file mode 100644 index 0000000000000000000000000000000000000000..f5174c4c72d18cf26f458d7a620d2e2f193d9c53 Binary files /dev/null and b/docs/img/webui-img/over2.png differ diff --git a/docs/img/webui-img/refresh-interval.png b/docs/img/webui-img/refresh-interval.png new file mode 100644 index 0000000000000000000000000000000000000000..9420d8af84da94e21b42bdd8d8c08f9b1d7924a7 Binary files /dev/null and b/docs/img/webui-img/refresh-interval.png differ diff --git a/docs/img/webui-img/review-log.png b/docs/img/webui-img/review-log.png new file mode 100644 index 0000000000000000000000000000000000000000..d643149bbe13c8d43c9122c056e8f525feff5eb8 Binary files /dev/null and b/docs/img/webui-img/review-log.png differ diff --git a/docs/img/webui-img/search-space-button.png b/docs/img/webui-img/search-space-button.png new file mode 100644 index 0000000000000000000000000000000000000000..830c6e780cbc9adfedffd557094768c14144b3db Binary files /dev/null and b/docs/img/webui-img/search-space-button.png differ diff --git a/docs/img/webui-img/search-space.png b/docs/img/webui-img/search-space.png new file mode 100644 index 0000000000000000000000000000000000000000..b77294627e1839f9ce625c1f96b1632c9048b435 Binary files /dev/null and b/docs/img/webui-img/search-space.png differ diff --git a/docs/img/webui-img/search-trial.png b/docs/img/webui-img/search-trial.png new file mode 100644 index 0000000000000000000000000000000000000000..3da210ab1f399a1f9c05e4f618ed6a65f55d64a3 Binary files /dev/null and b/docs/img/webui-img/search-trial.png differ diff --git a/docs/img/webui-img/searchSpace.png b/docs/img/webui-img/searchSpace.png new file mode 100644 index 0000000000000000000000000000000000000000..4a1283a505d851f1df5ff0f08440303b017277c7 Binary files /dev/null and b/docs/img/webui-img/searchSpace.png differ diff --git a/docs/img/webui-img/select-trial.png b/docs/img/webui-img/select-trial.png new file mode 100644 index 0000000000000000000000000000000000000000..30e89afa4f4be460b28e72cb943b5fdc35942498 Binary files /dev/null and b/docs/img/webui-img/select-trial.png differ diff --git a/docs/img/webui-img/summary.png b/docs/img/webui-img/summary.png new file mode 100644 index 0000000000000000000000000000000000000000..58f7465025695422ea60098ac09695656b9eb54d Binary files /dev/null and b/docs/img/webui-img/summary.png differ diff --git a/docs/img/webui-img/trial_duration.png b/docs/img/webui-img/trial_duration.png new file mode 100644 index 0000000000000000000000000000000000000000..d4bd5e735979e6791af1f34e50bd56b10c610545 Binary files /dev/null and b/docs/img/webui-img/trial_duration.png differ diff --git a/docs/img/webui-img/trials_intermeidate.png b/docs/img/webui-img/trials_intermeidate.png new file mode 100644 index 0000000000000000000000000000000000000000..767a6014efc718b5b37c92b0ad1ce879e7f09f9c Binary files /dev/null and b/docs/img/webui-img/trials_intermeidate.png differ diff --git a/docs/img/webui_overview_page.png b/docs/img/webui_overview_page.png new file mode 100644 index 0000000000000000000000000000000000000000..9a9ad216e7b11b4d0de1160705f890fb2d6cd3bb Binary files /dev/null and b/docs/img/webui_overview_page.png differ diff --git a/docs/img/webui_trialdetail_page.png b/docs/img/webui_trialdetail_page.png new file mode 100644 index 0000000000000000000000000000000000000000..e5ffa7b0b7eaa6d60ae6ab445afb995d6716d186 Binary files /dev/null and b/docs/img/webui_trialdetail_page.png differ diff --git a/docs/img/weight_sharing.png b/docs/img/weight_sharing.png new file mode 100644 index 0000000000000000000000000000000000000000..dbf087d020d1f27858ddcddbf46dece6087f289a Binary files /dev/null and b/docs/img/weight_sharing.png differ diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..67217e5cfa3727fa022e1a72c79bac557c9dd593 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,30 @@ +sphinx>=4.0 +sphinx-argparse +sphinx-rtd-theme +sphinxcontrib-websupport +pygments>=2.7.1 +hyperopt +json_tricks +numpy +scipy +coverage +peewee +nbsphinx +schema +tensorboard +scikit-learn>=0.24.1 +thop +colorama +pkginfo +websockets +filelock +prettytable +psutil +pyyaml +ipython +gym +tianshou +https://download.pytorch.org/whl/cpu/torch-1.7.1%2Bcpu-cp37-cp37m-linux_x86_64.whl +https://download.pytorch.org/whl/cpu/torchvision-0.8.2%2Bcpu-cp37-cp37m-linux_x86_64.whl +pytorch-lightning +onnx diff --git a/docs/static/css/custom.css b/docs/static/css/custom.css new file mode 100644 index 0000000000000000000000000000000000000000..c3986251b18c65e8ae7298ff8c6e7e1e5ff76f98 --- /dev/null +++ b/docs/static/css/custom.css @@ -0,0 +1,218 @@ +.wy-table-responsive table td, .wy-table-responsive table th{ + white-space:normal +} + +.gap{ + margin-top: 24px; +} + +.gap2{ + margin-top: 12px; +} + +.rowHeight{ + line-height: 24px; +} + +.title { + padding-bottom: 6px; + border-bottom: 1px solid #ccc; +} + +.second-title{ + margin-top: 24px; +} +/* command style */ +.command { + background-color: #f8f8f8; + border: 1px solid #e1e4e5; + height: 40px; + line-height: 40px; + font-size: 12px; + padding-left: 10px; +} + +.command-intro { + line-height: 48px; +} + +.code{ + background-color: #f8f8f8; + padding: 15px 20px; +} + +/* document body width */ +.wy-nav-content{ + max-width: 100% !important; + background-color: #fff; +} + +/* nni logo bgcolor */ +.wy-side-nav-search{ + background-color: black; +} + +/* document body font-family */ +.wy-body-for-nav, h1, h2, h3, h4, h5, h6, div, p, ul, li, a{ + font-family: "Lato", segoe ui, "proxima-nova","Helvetica Neue",Arial,sans-serif; +} + +.wy-nav-content ul li{ + list-style: disc; + margin-bottom: 12px; + margin-left: 24px; +} + +.list, .list tr, .list td{ + border: 1px solid #ccc; +} + +.column b{ + padding-bottom: 4px; + border-bottom: 2px solid blue; +} +.column td{ + width: 200px; + text-align: center; + line-height: 36px; +} +td.framework{ + width:300px; + -webkit-width: 220px; +} +.list .circle li{ + list-style-type: circle; +} + +.list .firstUl, .circle{ + padding-left: 20px; +} + +.list .verticalMiddle{ + vertical-align: middle !important; + text-align: center; +} + +.inline img, .inline h2{ + display: inline-block; + margin-bottom: 0px; +} + +inline hr{ + margin-top: 0px; +} + +.inline img { + margin-top: -20px; +} + +.ui-img img{ + height: 350px; +} + +.wy-side-nav-search>div.version{ + color: #fff !important; +} + +.list{ + width: 90%; + margin: 0 auto; +} + +.chinese{ + margin-bottom: 16px; +} + +.QR{ + width: 180px; +} + +.or{ + vertical-align: middle; +} + +.wy-plain-list-disc li, .rst-content .section ul li, .rst-content .toctree-wrapper ul li, article ul li { + margin-bottom: 0px; +} + +.wy-nav-content .emotion li { + width: 300px; + height: 300px; + float: left; + margin-left: 15px; + margin-bottom: 60px; + background: #f5f5f5; + box-sizing: border-box; + list-style: none; +} + +.emotion li div{ + transition: 0.2s; + text-align: center; + vertical-align: middle; +} + +.emotion li div:hover{ + transform: translate(1.1); + box-shadow: 0 15px 30px rgb(0 0 0 / 10%); +} + +.center{ + line-height: 54px; + text-align: center; +} + +.emotion img{ + width: 250px; +} + +.emotion .first img { + margin: 50px 24px; +} + +.emotion .second .working{ + margin: 67px 24px; +} + +.emotion .second .sign{ + margin: 77px 24px; +} + +.emotion .second .crying{ + margin: 80px 24px; +} + +.emotion .three img{ + margin: 66px 24px; +} + +.emotion .three .weaving{ + margin: 75px 24px; +} + +.three .comfort img{ + margin: 92px 24px; +} + +.emotion .four img{ + margin: 81px 24px; +} + +.details-container{ + text-align: center; +} + +.clear{ + clear: both; +} + +.whatNew{ + margin-top: 6px; +} + +.pipeline tr, .pipeline td, .pipeline th{ + width: 248px; + line-height: 26px; + text-align: center; + border: 1px solid #ccc; +} \ No newline at end of file diff --git a/docs/static/img/Comfort.png b/docs/static/img/Comfort.png new file mode 100644 index 0000000000000000000000000000000000000000..c42c69bbccb58f6e9d81139d8858467946821681 Binary files /dev/null and b/docs/static/img/Comfort.png differ diff --git a/docs/static/img/Crying.png b/docs/static/img/Crying.png new file mode 100644 index 0000000000000000000000000000000000000000..43d10426cc2f2dac4072e391d99f3498fb58f335 Binary files /dev/null and b/docs/static/img/Crying.png differ diff --git a/docs/static/img/Cut.png b/docs/static/img/Cut.png new file mode 100644 index 0000000000000000000000000000000000000000..83c4d37fdabaeab9d25730520d42a07426be63e8 Binary files /dev/null and b/docs/static/img/Cut.png differ diff --git a/docs/static/img/Error.png b/docs/static/img/Error.png new file mode 100644 index 0000000000000000000000000000000000000000..5184238ece55c1c82cf08020d255a9570824c409 Binary files /dev/null and b/docs/static/img/Error.png differ diff --git a/docs/static/img/Holiday.png b/docs/static/img/Holiday.png new file mode 100644 index 0000000000000000000000000000000000000000..f4446ed8131386d9414acb1af8f8f3ba707294c5 Binary files /dev/null and b/docs/static/img/Holiday.png differ diff --git a/docs/static/img/NoBug.png b/docs/static/img/NoBug.png new file mode 100644 index 0000000000000000000000000000000000000000..9bf1266769f2bb870308d3917252952ca339928c Binary files /dev/null and b/docs/static/img/NoBug.png differ diff --git a/docs/static/img/Sign.png b/docs/static/img/Sign.png new file mode 100644 index 0000000000000000000000000000000000000000..a57fb2064d3c9ebb0226816ff197ff11a5e5d457 Binary files /dev/null and b/docs/static/img/Sign.png differ diff --git a/docs/static/img/Sweat.png b/docs/static/img/Sweat.png new file mode 100644 index 0000000000000000000000000000000000000000..642e5d6e55f9d637748dc3f1352d7d4b8748ab78 Binary files /dev/null and b/docs/static/img/Sweat.png differ diff --git a/docs/static/img/Weaving.png b/docs/static/img/Weaving.png new file mode 100644 index 0000000000000000000000000000000000000000..3845ce4cbc27b031be24abc65860d7f2f92bce54 Binary files /dev/null and b/docs/static/img/Weaving.png differ diff --git a/docs/static/img/Working.png b/docs/static/img/Working.png new file mode 100644 index 0000000000000000000000000000000000000000..47bc0cd49870cde1e71132c134457038ffddac05 Binary files /dev/null and b/docs/static/img/Working.png differ diff --git a/docs/static/img/contributors.png b/docs/static/img/contributors.png new file mode 100644 index 0000000000000000000000000000000000000000..37a84c49b9c20a2eece376becfcac46c51b8cc06 Binary files /dev/null and b/docs/static/img/contributors.png differ diff --git a/docs/static/img/home.svg b/docs/static/img/home.svg new file mode 100644 index 0000000000000000000000000000000000000000..5360482f3260794c8aa9b037833954b9ebc61def --- /dev/null +++ b/docs/static/img/home.svg @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/static/img/overview.svg b/docs/static/img/overview.svg new file mode 100644 index 0000000000000000000000000000000000000000..ae369941abc61d3c1c271c157c183f2801dbaa0d --- /dev/null +++ b/docs/static/img/overview.svg @@ -0,0 +1 @@ +overview栅格化 \ No newline at end of file diff --git a/docs/static/img/release_icon.png b/docs/static/img/release_icon.png new file mode 100644 index 0000000000000000000000000000000000000000..c0479181f6a911863eee9d3d87f655f8de8c0d7b Binary files /dev/null and b/docs/static/img/release_icon.png differ diff --git a/docs/static/img/webui.gif b/docs/static/img/webui.gif new file mode 100644 index 0000000000000000000000000000000000000000..47d604272432670e45b60787fbfbbb240dccdea8 Binary files /dev/null and b/docs/static/img/webui.gif differ diff --git a/docs/tools/chineselink.py b/docs/tools/chineselink.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c99e640e2fcecd564031e4c2c90732694ebdc8 --- /dev/null +++ b/docs/tools/chineselink.py @@ -0,0 +1,133 @@ +""" +This is to keep Chinese doc update to English doc. Should be run regularly. +The files in whitelist will be kept unchanged, as they will be translated manually. + +Under docs, run + + python tools/chineselink.py +""" + +import hashlib +import os +import shutil +import sys +from pathlib import Path + + +def walk(path): + for p in Path(path).iterdir(): + if p.is_dir(): + yield from walk(p) + continue + yield p + + +# Keeps files as discussed in +# https://github.com/microsoft/nni/issues/4298 +# Not the recommended way of sphinx though: https://docs.readthedocs.io/en/stable/guides/manage-translations-sphinx.html + +whitelist = [ + '_templates/index.html', # I think no one ever remembers to update this file. Might need to rethink about this. + 'Overview.rst', + 'installation.rst', + 'Tutorial/InstallationLinux.rst', + 'Tutorial/InstallationWin.rst', + 'Tutorial/QuickStart.rst', + 'TrialExample/Trials.rst', + 'Tutorial/WebUI.rst', + 'NAS/QuickStart.rst', + 'Compression/Overview.rst', + 'Compression/QuickStart.rst', +] + +suffix_list = [ + '.html', + '.md', + '.rst', + '.ipynb', +] + +for path in whitelist: + assert (Path('zh_CN') / path).exists(), path + +content_tables = [] +for path in walk(Path('en_US')): + if path.suffix == '.rst': + is_content_table = False + for line in path.open('r').readlines(): + if is_content_table: + if not line.startswith(' ') and line.strip(): + is_content_table = False + if 'toctree::' in line: + is_content_table = True + if is_content_table: + content_tables.append(path.relative_to('en_US').as_posix()) + +print('Whitelist:' ,content_tables) +whitelist += content_tables + +pipeline_mode = len(sys.argv) > 1 and sys.argv[1] == 'check' +failed_files = [] + + +def need_to_translate(source, target): + if not target.exists(): + failed_files.append('(missing) ' + target.as_posix()) + if pipeline_mode: + return + shutil.copyfile(source, target) + if target.suffix == '.html': + return # FIXME I don't know how to process html + target_checksum = hashlib.sha256(path.open('rb').read()).hexdigest()[:32] + checksum = target.open('r').readline().strip()[3:] + if checksum != target_checksum: + failed_files.append('(out-of-date) ' + target.as_posix()) + if pipeline_mode: + return + contents = target.open('r').readlines() + firstline = '.. ' + target_checksum + '\n' + if contents[0].startswith('.. '): + contents = [firstline] + contents[1:] + else: + contents = [firstline, '\n'] + contents + target.open('w').writelines(contents) + + +for path in walk(Path('en_US')): + relative_path = path.relative_to('en_US') + if relative_path.as_posix().startswith('_build'): + continue + if path.suffix in suffix_list: + target_path = (Path('zh_CN') / relative_path) + if relative_path.as_posix() in whitelist: + # whitelist files. should be translated + need_to_translate(path, target_path) + print(f'Skipped linking for {path} as it is in whitelist.') + else: + target_path.parent.mkdir(exist_ok=True) + link_path = path + for _ in range(len(list(Path(relative_path).parents))): + link_path = Path('..') / link_path + if not target_path.is_symlink() or os.readlink(target_path) != link_path.as_posix(): + failed_files.append('(invalid link) ' + target_path.as_posix()) + if not pipeline_mode: + target_path.unlink(missing_ok=True) + target_path.symlink_to(link_path) + +# delete redundant files +for path in walk(Path('zh_CN')): + if path.suffix in suffix_list: + relative_path = path.relative_to('zh_CN') + if not (Path('en_US') / relative_path).exists(): + failed_files.append('(redundant) ' + path.as_posix()) + if not pipeline_mode: + print(f'Deleting {path}') + path.unlink() + + +if pipeline_mode and failed_files: + raise ValueError( + 'The following files are not up-to-date. Please run "python3 tools/chineselink.py" under docs folder ' + 'to refresh them and update their corresponding translation.\n' + '\n'.join([' ' + line for line in failed_files])) +if failed_files: + print('Updated files:', failed_files) diff --git a/docs/tools/md2rst.py b/docs/tools/md2rst.py new file mode 100644 index 0000000000000000000000000000000000000000..1136c7599094bd3db8ab631ea638f904017b383d --- /dev/null +++ b/docs/tools/md2rst.py @@ -0,0 +1,135 @@ +import argparse +import m2r +import os +import re +import shutil +from pathlib import Path + + +def single_line_process(line): + if line == ' .. contents::': + return '.. contents::' + # https://github.com/sphinx-doc/sphinx/issues/3921 + line = re.sub(r'(`.*? <.*?>`)_', r'\1__', line) + # inline emphasis + line = re.sub(r'\*\*\\ (.*?)\\ \*\*', r' **\1** ', line) + line = re.sub(r'\*(.*?)\\ \*', r'*\1*', line) + line = re.sub(r'\*\*(.*?) \*\*', r'**\1** ', line) + line = re.sub(r'\\\*\\\*(.*?)\*\*', r'**\1**', line) + line = re.sub(r'\\\*\\\*(.*?)\*\*\\ ', r'**\1**', line) + line = line.replace(r'\* - `\**', r'* - `**') + line = re.sub(r'\\\* \*\*(.*?)\*\* \(\\\*\s*(.*?)\s*\*\\ \)', r'* \1 (\2)', line) + line = re.sub(r'\<(.*)\.md(\>|#)', r'<\1.rst\2', line) + line = re.sub(r'`\*\*(.*?)\*\* <#(.*?)>`__', r'`\1 <#\2>`__', line) + line = re.sub(r'\*\* (classArgs|stop|FLOPS.*?|pruned.*?|large.*?|path|pythonPath|2D.*?|codeDirectory|ps|worker|Tuner|Assessor)\*\*', + r' **\1**', line) + + line = line.replace('.. code-block:::: bash', '.. code-block:: bash') + line = line.replace('raw-html-m2r', 'raw-html') + line = line.replace('[toc]', '.. toctree::') + + # image + line = re.sub(r'\:raw\-html\:`\`', r'\n.. image:: \1\n :scale: \2%', line) + + # special case (per line handling) + line = line.replace('Nb = |Db|', r'Nb = \|Db\|') + line = line.replace(' Here is just a small list of libraries ', '\nHere is just a small list of libraries ') + line = line.replace(' Find the data management region in job submission page.', 'Find the data management region in job submission page.') + line = line.replace('Tuner/InstallCustomizedTuner.md', 'Tuner/InstallCustomizedTuner') + line = line.replace('✓', ':raw-html:`✓`') + line = line.replace(' **builtinTunerName** and** classArgs**', '**builtinTunerName** and **classArgs**') + line = line.replace('`\ ``nnictl ss_gen`` <../Tutorial/Nnictl.rst>`__', '`nnictl ss_gen <../Tutorial/Nnictl.rst>`__') + line = line.replace('**Step 1. Install NNI, follow the install guide `here <../Tutorial/QuickStart.rst>`__.**', + '**Step 1. Install NNI, follow the install guide** `here <../Tutorial/QuickStart.rst>`__.') + line = line.replace('*Please refer to `here ', 'Please refer to `here ') + # line = line.replace('\* **optimize_mode** ', '* **optimize_mode** ') + if line == '~' * len(line): + line = '^' * len(line) + return line + + +def special_case_replace(full_text): + replace_pairs = {} + replace_pairs['PyTorch\n"""""""'] = '**PyTorch**' + replace_pairs['Search Space\n============'] = '.. role:: raw-html(raw)\n :format: html\n\nSearch Space\n============' + for file in os.listdir(Path(__file__).parent / 'patches'): + with open(Path(__file__).parent / 'patches' / file) as f: + r, s = f.read().split('%%%%%%\n') + replace_pairs[r] = s + for r, s in replace_pairs.items(): + full_text = full_text.replace(r, s) + return full_text + + +def process_table(content): + content = content.replace('------ |', '------|') + lines = [] + for line in content.split('\n'): + if line.startswith(' |'): + line = line[2:] + lines.append(line) + return '\n'.join(lines) + + +def process_github_link(line): + line = re.sub(r'`(\\ ``)?([^`]*?)(``)? \<(.*?)(blob|tree)/v1.9/(.*?)\>`__', r':githublink:`\2 <\6>`', line) + if 'githublink' in line: + line = re.sub(r'\*Example: (.*)\*', r'*Example:* \1', line) + line = line.replace('https://nni.readthedocs.io/en/latest', '') + return line + + +for root, dirs, files in os.walk('en_US'): + root = Path(root) + for file in files: + if not file.endswith('.md') or file == 'Release_v1.0.md': + continue + + with open(root / file) as f: + md_content = f.read() + + if file == 'Nnictl.md': + md_content = process_table(md_content) + + out = m2r.convert(md_content) + lines = out.split('\n') + if lines[0] == '': + lines = lines[1:] + + # remove code-block eval_rst + i = 0 + while i < len(lines): + line = lines[i] + if line.strip() == '.. code-block:: eval_rst': + space_count = line.index('.') + lines[i] = lines[i + 1] = None + if i > 0 and lines[i - 1]: + lines[i] = '' # blank line + i += 2 + while i < len(lines) and (lines[i].startswith(' ' * (space_count + 3)) or lines[i] == ''): + lines[i] = lines[i][space_count + 3:] + i += 1 + elif line.strip() == '.. code-block' or line.strip() == '.. code-block::': + lines[i] += ':: bash' + i += 1 + else: + i += 1 + + lines = [l for l in lines if l is not None] + + lines = list(map(single_line_process, lines)) + + if file != 'Release.md': + # githublink + lines = list(map(process_github_link, lines)) + + out = '\n'.join(lines) + out = special_case_replace(out) + + with open(root / (Path(file).stem + '.rst'), 'w') as f: + f.write(out) + + # back it up and remove + moved_root = Path('archive_en_US') / root.relative_to('en_US') + moved_root.mkdir(exist_ok=True) + shutil.move(root / file, moved_root / file) diff --git a/docs/tools/patches/1.txt b/docs/tools/patches/1.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbee52eded0ecc73d142890e71f4d1b4668f19e4 --- /dev/null +++ b/docs/tools/patches/1.txt @@ -0,0 +1,24 @@ + * - GP Tuner + - :raw-html:`✓` + - + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - + - + - +%%%%%% + * - GP Tuner + - :raw-html:`✓` + - + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - :raw-html:`✓` + - + - + - + - diff --git a/docs/tools/patches/10.txt b/docs/tools/patches/10.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba95164f93957847fbec32122fa231c8696850d4 --- /dev/null +++ b/docs/tools/patches/10.txt @@ -0,0 +1,3 @@ +An SSH server needs a port; you need to expose Docker's SSH port to NNI as the connection port. For example, if you set your container's SSH port as **``A``** \ , you should map the container's port ** ``A``** to your remote host machine's other port ** ``B``** \ , NNI will connect port ** ``B``** as an SSH port, and your host machine will map the connection from port ** ``B``** to port ** ``A``** then NNI could connect to your Docker container. +%%%%%% +An SSH server needs a port; you need to expose Docker's SSH port to NNI as the connection port. For example, if you set your container's SSH port as ``A``, you should map the container's port ``A`` to your remote host machine's other port ``B``, NNI will connect port ``B`` as an SSH port, and your host machine will map the connection from port ``B`` to port ``A`` then NNI could connect to your Docker container. diff --git a/docs/tools/patches/11.txt b/docs/tools/patches/11.txt new file mode 100644 index 0000000000000000000000000000000000000000..df6e5d9c406b151b8f99ee140996e8ff0b928845 --- /dev/null +++ b/docs/tools/patches/11.txt @@ -0,0 +1,3 @@ +If the id ends with *, nnictl will stop all experiments whose ids matchs the regular. +%%%%%% +If the id ends with \*, nnictl will stop all experiments whose ids matchs the regular. diff --git a/docs/tools/patches/12.txt b/docs/tools/patches/12.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1d88c078230d9eac16513cfd445b13925d301fa --- /dev/null +++ b/docs/tools/patches/12.txt @@ -0,0 +1,7 @@ +.. + + make: *** [install-XXX] Segmentation fault (core dumped) +%%%%%% +.. code-block:: text + + make: *** [install-XXX] Segmentation fault (core dumped) diff --git a/docs/tools/patches/13.txt b/docs/tools/patches/13.txt new file mode 100644 index 0000000000000000000000000000000000000000..c17cedbca8253dbed8b4ca0f444c9c4fe2a6e600 --- /dev/null +++ b/docs/tools/patches/13.txt @@ -0,0 +1,3 @@ + Click ``Submit job`` button in web portal. +%%%%%% +Click ``Submit job`` button in web portal. diff --git a/docs/tools/patches/14.txt b/docs/tools/patches/14.txt new file mode 100644 index 0000000000000000000000000000000000000000..88866fa2b50b5936e95f595a50c047118a0b4192 --- /dev/null +++ b/docs/tools/patches/14.txt @@ -0,0 +1,5 @@ +:raw-html:`
+ +
` +%%%%%% +:raw-html:`
` \ No newline at end of file diff --git a/docs/tools/patches/15.txt b/docs/tools/patches/15.txt new file mode 100644 index 0000000000000000000000000000000000000000..030074b45020f32d1771a8938e6f551cfdb5729d --- /dev/null +++ b/docs/tools/patches/15.txt @@ -0,0 +1,8 @@ +.. list-table:: + :header-rows: 1 + +%%%%%% +.. list-table:: + :header-rows: 1 + :widths: auto + diff --git a/docs/tools/patches/16.txt b/docs/tools/patches/16.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1592a713f0fcf3397e0797ece18c1248989c2a1 --- /dev/null +++ b/docs/tools/patches/16.txt @@ -0,0 +1,9 @@ +.. code-block:: bash + + 1.1 Declare NNI API + Include `import nni` in your trial code to use NNI APIs. +%%%%%% +.. + + 1.1 Declare NNI API + Include `import nni` in your trial code to use NNI APIs. diff --git a/docs/tools/patches/17.txt b/docs/tools/patches/17.txt new file mode 100644 index 0000000000000000000000000000000000000000..21b449b066a4ed925b4eb4a80bd2b69affa83cc1 --- /dev/null +++ b/docs/tools/patches/17.txt @@ -0,0 +1,7 @@ +.. code-block:: bash + + from nni.compression.pytorch.utils.counter import count_flops_params +%%%%%% +.. code-block:: python + + from nni.compression.pytorch.utils.counter import count_flops_params diff --git a/docs/tools/patches/18.txt b/docs/tools/patches/18.txt new file mode 100644 index 0000000000000000000000000000000000000000..3938dc4cb0edf65f3971676787c4ebaddd88e090 --- /dev/null +++ b/docs/tools/patches/18.txt @@ -0,0 +1,7 @@ +.. code-block:: bash + + NNI's official image msranni/nni does not support SSH servers for the time being; you should build your own Docker image with an SSH configuration or use other images as a remote server. +%%%%%% +.. code-block:: text + + NNI's official image msranni/nni does not support SSH servers for the time being; you should build your own Docker image with an SSH configuration or use other images as a remote server. diff --git a/docs/tools/patches/19.txt b/docs/tools/patches/19.txt new file mode 100644 index 0000000000000000000000000000000000000000..7fc65674b004efa846678ec90d9c28d586cbf28c --- /dev/null +++ b/docs/tools/patches/19.txt @@ -0,0 +1,56 @@ +Code Styles & Naming Conventions +-------------------------------- + + +* We follow `PEP8 `__ for Python code and naming conventions, do try to adhere to the same when making a pull request or making a change. One can also take the help of linters such as ``flake8`` or ``pylint`` +* We also follow `NumPy Docstring Style `__ for Python Docstring Conventions. During the `documentation building `__\ , we use `sphinx.ext.napoleon `__ to generate Python API documentation from Docstring. +* For docstrings, please refer to `numpydoc docstring guide `__ and `pandas docstring guide `__ + + * For function docstring, **description** , **Parameters**\ , and** Returns**\ /** Yields** are mandatory. + * For class docstring, **description**\ ,** Attributes** are mandatory. + * For docstring to describe ``dict``\ , which is commonly used in our hyper-param format description, please refer to [RiboKit : Doc Standards + + * Internal Guideline on Writing Standards](https://ribokit.github.io/docs/text/) + +Documentation +------------- + +Our documentation is built with :githublink:`sphinx `. + + +* + Before submitting the documentation change, please **build homepage locally**\ : ``cd docs/en_US && make html``\ , then you can see all the built documentation webpage under the folder ``docs/en_US/_build/html``. It's also highly recommended taking care of** every WARNING** during the build, which is very likely the signal of a** deadlink** and other annoying issues. + +* + For links, please consider using **relative paths** first. However, if the documentation is written in Markdown format, and: + + + * It's an image link which needs to be formatted with embedded html grammar, please use global URL like ``https://user-images.githubusercontent.com/44491713/51381727-e3d0f780-1b4f-11e9-96ab-d26b9198ba65.png``\ , which can be automatically generated by dragging picture onto `Github Issue `__ Box. + * It cannot be re-formatted by sphinx, such as source code, please use its global URL. For source code that links to our github repo, please use URLs rooted at ``https://github.com/Microsoft/nni/tree/v1.9/`` (\ :githublink:`mnist.py ` for example). +%%%%%% +Code Styles & Naming Conventions +-------------------------------- + +* We follow `PEP8 `__ for Python code and naming conventions, do try to adhere to the same when making a pull request or making a change. One can also take the help of linters such as ``flake8`` or ``pylint`` +* We also follow `NumPy Docstring Style `__ for Python Docstring Conventions. During the `documentation building `__\ , we use `sphinx.ext.napoleon `__ to generate Python API documentation from Docstring. +* For docstrings, please refer to `numpydoc docstring guide `__ and `pandas docstring guide `__ + + * For function docstring, **description**, **Parameters**, and **Returns** **Yields** are mandatory. + * For class docstring, **description**, **Attributes** are mandatory. + * For docstring to describe ``dict``, which is commonly used in our hyper-param format description, please refer to RiboKit Doc Standards + + * `Internal Guideline on Writing Standards `__ + +Documentation +------------- + +Our documentation is built with :githublink:`sphinx `. + +* Before submitting the documentation change, please **build homepage locally**: ``cd docs/en_US && make html``, then you can see all the built documentation webpage under the folder ``docs/en_US/_build/html``. It's also highly recommended taking care of **every WARNING** during the build, which is very likely the signal of a **deadlink** and other annoying issues. + +* + For links, please consider using **relative paths** first. However, if the documentation is written in Markdown format, and: + + + * It's an image link which needs to be formatted with embedded html grammar, please use global URL like ``https://user-images.githubusercontent.com/44491713/51381727-e3d0f780-1b4f-11e9-96ab-d26b9198ba65.png``, which can be automatically generated by dragging picture onto `Github Issue `__ Box. + * It cannot be re-formatted by sphinx, such as source code, please use its global URL. For source code that links to our github repo, please use URLs rooted at ``https://github.com/Microsoft/nni/tree/v1.9/`` (:githublink:`mnist.py ` for example). diff --git a/docs/tools/patches/2.txt b/docs/tools/patches/2.txt new file mode 100644 index 0000000000000000000000000000000000000000..6a5c87d26272f823498e2ff34abef6aebb702804 --- /dev/null +++ b/docs/tools/patches/2.txt @@ -0,0 +1,45 @@ + * - + - Recommended + - Minimum + * - **Operating System** + - Ubuntu 16.04 or above + * - **CPU** + - Intel® Core™ i5 or AMD Phenom™ II X3 or better + - Intel® Core™ i3 or AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 or better + - NVIDIA® GeForce® GTX 460 + * - **Memory** + - 6 GB RAM + - 4 GB RAM + * - **Storage** + - 30 GB available hare drive space + * - **Internet** + - Boardband internet connection + * - **Resolution** + - 1024 x 768 minimum display resolution +%%%%%% + * - + - Recommended + - Minimum + * - **Operating System** + - Ubuntu 16.04 or above + - + * - **CPU** + - Intel® Core™ i5 or AMD Phenom™ II X3 or better + - Intel® Core™ i3 or AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 or better + - NVIDIA® GeForce® GTX 460 + * - **Memory** + - 6 GB RAM + - 4 GB RAM + * - **Storage** + - 30 GB available hare drive space + - + * - **Internet** + - Boardband internet connection + - + * - **Resolution** + - 1024 x 768 minimum display resolution + - diff --git a/docs/tools/patches/20.txt b/docs/tools/patches/20.txt new file mode 100644 index 0000000000000000000000000000000000000000..dfb73a53e0b1242211a002cd706f7911ad4a58c2 --- /dev/null +++ b/docs/tools/patches/20.txt @@ -0,0 +1,44 @@ +.. + + 1.1 Declare NNI API + Include `import nni` in your trial code to use NNI APIs. + + 1.2 Get predefined parameters + Use the following code snippet: + + RECEIVED_PARAMS = nni.get_next_parameter() + + to get hyper-parameters' values assigned by tuner. `RECEIVED_PARAMS` is an object, for example: + + {"conv_size": 2, "hidden_size": 124, "learning_rate": 0.0307, "dropout_rate": 0.2029} + + 1.3 Report NNI results + Use the API: + + `nni.report_intermediate_result(accuracy)` + + to send `accuracy` to assessor. + + Use the API: + + `nni.report_final_result(accuracy)` + + to send `accuracy` to tuner. +%%%%%% +* Declare NNI API: include ``import nni`` in your trial code to use NNI APIs. +* Get predefined parameters + +Use the following code snippet: + +.. code-block:: python + + RECEIVED_PARAMS = nni.get_next_parameter() + +to get hyper-parameters' values assigned by tuner. ``RECEIVED_PARAMS`` is an object, for example: + +.. code-block:: json + + {"conv_size": 2, "hidden_size": 124, "learning_rate": 0.0307, "dropout_rate": 0.2029} + +* Report NNI results: Use the API: ``nni.report_intermediate_result(accuracy)`` to send ``accuracy`` to assessor. + Use the API: ``nni.report_final_result(accuracy)`` to send `accuracy` to tuner. diff --git a/docs/tools/patches/3.txt b/docs/tools/patches/3.txt new file mode 100644 index 0000000000000000000000000000000000000000..49037f5ab1a2d418a8e59a746a4b201c9f53a739 --- /dev/null +++ b/docs/tools/patches/3.txt @@ -0,0 +1,46 @@ + * - + - Recommended + - Minimum + * - **Operating System** + - macOS 10.14.1 or above + * - **CPU** + - Intel® Core™ i7-4770 or better + - Intel® Core™ i5-760 or better + * - **GPU** + - AMD Radeon™ R9 M395X or better + - NVIDIA® GeForce® GT 750M or AMD Radeon™ R9 M290 or better + * - **Memory** + - 8 GB RAM + - 4 GB RAM + * - **Storage** + - 70GB available space SSD + - 70GB available space 7200 RPM HDD + * - **Internet** + - Boardband internet connection + * - **Resolution** + - 1024 x 768 minimum display resolution +%%%%%% + * - + - Recommended + - Minimum + * - **Operating System** + - macOS 10.14.1 or above + - + * - **CPU** + - Intel® Core™ i7-4770 or better + - Intel® Core™ i5-760 or better + * - **GPU** + - AMD Radeon™ R9 M395X or better + - NVIDIA® GeForce® GT 750M or AMD Radeon™ R9 M290 or better + * - **Memory** + - 8 GB RAM + - 4 GB RAM + * - **Storage** + - 70GB available space SSD + - 70GB available space 7200 RPM HDD + * - **Internet** + - Boardband internet connection + - + * - **Resolution** + - 1024 x 768 minimum display resolution + - diff --git a/docs/tools/patches/4.txt b/docs/tools/patches/4.txt new file mode 100644 index 0000000000000000000000000000000000000000..c4fe7bfaae57ed538bd1428335058bb46e45ac2b --- /dev/null +++ b/docs/tools/patches/4.txt @@ -0,0 +1,45 @@ + * - + - Recommended + - Minimum + * - **Operating System** + - Windows 10 1809 or above + * - **CPU** + - Intel® Core™ i5 or AMD Phenom™ II X3 or better + - Intel® Core™ i3 or AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 or better + - NVIDIA® GeForce® GTX 460 + * - **Memory** + - 6 GB RAM + - 4 GB RAM + * - **Storage** + - 30 GB available hare drive space + * - **Internet** + - Boardband internet connection + * - **Resolution** + - 1024 x 768 minimum display resolution +%%%%%% + * - + - Recommended + - Minimum + * - **Operating System** + - Windows 10 1809 or above + - + * - **CPU** + - Intel® Core™ i5 or AMD Phenom™ II X3 or better + - Intel® Core™ i3 or AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 or better + - NVIDIA® GeForce® GTX 460 + * - **Memory** + - 6 GB RAM + - 4 GB RAM + * - **Storage** + - 30 GB available hare drive space + - + * - **Internet** + - Boardband internet connection + - + * - **Resolution** + - 1024 x 768 minimum display resolution + - diff --git a/docs/tools/patches/5.txt b/docs/tools/patches/5.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a0ee1d4ea9ecf43ec609f3110fa4f9bcec43aa3 --- /dev/null +++ b/docs/tools/patches/5.txt @@ -0,0 +1,84 @@ + * - + - s=4 + - s=3 + - s=2 + - s=1 + - s=0 + * - i + - n r + - n r + - n r + - n r + - n r + * - 0 + - 81 1 + - 27 3 + - 9 9 + - 6 27 + - 5 81 + * - 1 + - 27 3 + - 9 9 + - 3 27 + - 2 81 + - + * - 2 + - 9 9 + - 3 27 + - 1 81 + - + - + * - 3 + - 3 27 + - 1 81 + - + - + - + * - 4 + - 1 81 + - + - + - +%%%%%% + * - + - s=4 + - s=3 + - s=2 + - s=1 + - s=0 + * - i + - n r + - n r + - n r + - n r + - n r + * - 0 + - 81 1 + - 27 3 + - 9 9 + - 6 27 + - 5 81 + * - 1 + - 27 3 + - 9 9 + - 3 27 + - 2 81 + - + * - 2 + - 9 9 + - 3 27 + - 1 81 + - + - + * - 3 + - 3 27 + - 1 81 + - + - + - + * - 4 + - 1 81 + - + - + - + - diff --git a/docs/tools/patches/6.txt b/docs/tools/patches/6.txt new file mode 100644 index 0000000000000000000000000000000000000000..abf445780cff9e70ab0686deadfe10993eb429ba --- /dev/null +++ b/docs/tools/patches/6.txt @@ -0,0 +1,3 @@ +*Please refer to `here `__ for more APIs (e.g., ``nni.get_sequence_id()``\ ) provided by NNI. +%%%%%% +*Please refer to `here `__ for more APIs (e.g., ``nni.get_sequence_id()``\ ) provided by NNI.* diff --git a/docs/tools/patches/7.txt b/docs/tools/patches/7.txt new file mode 100644 index 0000000000000000000000000000000000000000..57393d54278c2be04fad65f3263df569e5c66d0d --- /dev/null +++ b/docs/tools/patches/7.txt @@ -0,0 +1,44 @@ + #. For each filter + .. image:: http://latex.codecogs.com/gif.latex?F_{i,j} + :target: http://latex.codecogs.com/gif.latex?F_{i,j} + :alt: + , calculate the sum of its absolute kernel weights + .. image:: http://latex.codecogs.com/gif.latex?s_j=\sum_{l=1}^{n_i}\sum|K_l| + :target: http://latex.codecogs.com/gif.latex?s_j=\sum_{l=1}^{n_i}\sum|K_l| + :alt: + + #. Sort the filters by + .. image:: http://latex.codecogs.com/gif.latex?s_j + :target: http://latex.codecogs.com/gif.latex?s_j + :alt: + . + #. Prune + .. image:: http://latex.codecogs.com/gif.latex?m + :target: http://latex.codecogs.com/gif.latex?m + :alt: + filters with the smallest sum values and their corresponding feature maps. The + kernels in the next convolutional layer corresponding to the pruned feature maps are also + .. code-block:: bash + + removed. + + #. A new kernel matrix is created for both the + .. image:: http://latex.codecogs.com/gif.latex?i + :target: http://latex.codecogs.com/gif.latex?i + :alt: + th and + .. image:: http://latex.codecogs.com/gif.latex?i+1 + :target: http://latex.codecogs.com/gif.latex?i+1 + :alt: + th layers, and the remaining kernel + weights are copied to the new model. +%%%%%% + #. For each filter :math:`F_{i,j}`, calculate the sum of its absolute kernel weights :math:`s_j=\sum_{l=1}^{n_i}\sum|K_l|`. + + #. Sort the filters by :math:`s_j`. + + #. Prune :math:`m` filters with the smallest sum values and their corresponding feature maps. The + kernels in the next convolutional layer corresponding to the pruned feature maps are also removed. + + #. A new kernel matrix is created for both the :math:`i`-th and :math:`i+1`-th layers, and the remaining kernel + weights are copied to the new model. diff --git a/docs/tools/patches/8.txt b/docs/tools/patches/8.txt new file mode 100644 index 0000000000000000000000000000000000000000..f92d650f6643ee2a5598b24e01f61c18d0cccac8 --- /dev/null +++ b/docs/tools/patches/8.txt @@ -0,0 +1,25 @@ +#. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this `guideline `__ to set up Kubernetes +#. Prepare a **kubeconfig** file, which will be used by NNI to interact with your Kubernetes API server. By default, NNI manager will use $(HOME)/.kube/config as kubeconfig file's path. You can also specify other kubeconfig files by setting the** KUBECONFIG** environment variable. Refer this `guideline `__ to learn more about kubeconfig. +#. If your NNI trial job needs GPU resource, you should follow this `guideline `__ to configure **Nvidia device plugin for Kubernetes**. +#. Prepare a **NFS server** and export a general purpose mount (we recommend to map your NFS server path in ``root_squash option``\ , otherwise permission issue may raise when NNI copies files to NFS. Refer this `page `__ to learn what root_squash option is), or** Azure File Storage**. +#. + Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + + .. code-block:: bash + + apt-get install nfs-common + +#. + Install **NNI**\ , follow the install guide `here <../Tutorial/QuickStart.rst>`__. +%%%%%% +#. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this `guideline `__ to set up Kubernetes +#. Prepare a **kubeconfig** file, which will be used by NNI to interact with your Kubernetes API server. By default, NNI manager will use $(HOME)/.kube/config as kubeconfig file's path. You can also specify other kubeconfig files by setting the**KUBECONFIG** environment variable. Refer this `guideline `__ to learn more about kubeconfig. +#. If your NNI trial job needs GPU resource, you should follow this `guideline `__ to configure **Nvidia device plugin for Kubernetes**. +#. Prepare a **NFS server** and export a general purpose mount (we recommend to map your NFS server path in ``root_squash option``\ , otherwise permission issue may raise when NNI copies files to NFS. Refer this `page `__ to learn what root_squash option is), or **Azure File Storage**. +#. Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + +.. code-block:: bash + + apt-get install nfs-common + +#. Install **NNI**\ , follow the install guide `here <../Tutorial/QuickStart>`__. diff --git a/docs/tools/patches/9.txt b/docs/tools/patches/9.txt new file mode 100644 index 0000000000000000000000000000000000000000..17560dceabb1b25c773dc18b0db5b615d1835a45 --- /dev/null +++ b/docs/tools/patches/9.txt @@ -0,0 +1,27 @@ +#. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this `guideline `__ to set up Kubernetes +#. Download, set up, and deploy **Kubeflow** to your Kubernetes cluster. Follow this `guideline `__ to setup Kubeflow. +#. Prepare a **kubeconfig** file, which will be used by NNI to interact with your Kubernetes API server. By default, NNI manager will use $(HOME)/.kube/config as kubeconfig file's path. You can also specify other kubeconfig files by setting the** KUBECONFIG** environment variable. Refer this `guideline `__ to learn more about kubeconfig. +#. If your NNI trial job needs GPU resource, you should follow this `guideline `__ to configure **Nvidia device plugin for Kubernetes**. +#. Prepare a **NFS server** and export a general purpose mount (we recommend to map your NFS server path in ``root_squash option``\ , otherwise permission issue may raise when NNI copy files to NFS. Refer this `page `__ to learn what root_squash option is), or** Azure File Storage**. +#. + Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + + .. code-block:: bash + + apt-get install nfs-common + +#. + Install **NNI**\ , follow the install guide `here <../Tutorial/QuickStart.rst>`__. +%%%%%% +#. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this `guideline `__ to set up Kubernetes +#. Download, set up, and deploy **Kubeflow** to your Kubernetes cluster. Follow this `guideline `__ to setup Kubeflow. +#. Prepare a **kubeconfig** file, which will be used by NNI to interact with your Kubernetes API server. By default, NNI manager will use $(HOME)/.kube/config as kubeconfig file's path. You can also specify other kubeconfig files by setting the**KUBECONFIG** environment variable. Refer this `guideline `__ to learn more about kubeconfig. +#. If your NNI trial job needs GPU resource, you should follow this `guideline `__ to configure **Nvidia device plugin for Kubernetes**. +#. Prepare a **NFS server** and export a general purpose mount (we recommend to map your NFS server path in ``root_squash option``\ , otherwise permission issue may raise when NNI copy files to NFS. Refer this `page `__ to learn what root_squash option is), or**Azure File Storage**. +#. Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + +.. code-block:: bash + + apt-get install nfs-common + +#. Install **NNI**\ , follow the install guide `here <../Tutorial/QuickStart>`__. diff --git a/docs/tools/restoremd.py b/docs/tools/restoremd.py new file mode 100644 index 0000000000000000000000000000000000000000..3c9e32e229efd25afa45ccc3f175f3f1a095bce9 --- /dev/null +++ b/docs/tools/restoremd.py @@ -0,0 +1,11 @@ +import os +import shutil +from pathlib import Path + + +for root, dirs, files in os.walk('archive_en_US'): + root = Path(root) + for file in files: + moved_root = Path('en_US') / root.relative_to('archive_en_US') + shutil.move(root / file, moved_root / file) + os.remove(moved_root / (Path(file).stem + '.rst')) diff --git a/docs/zh_CN/Assessor/BuiltinAssessor.rst b/docs/zh_CN/Assessor/BuiltinAssessor.rst new file mode 120000 index 0000000000000000000000000000000000000000..1871898d59f56bfc5d78bc682783ae9295e563c8 --- /dev/null +++ b/docs/zh_CN/Assessor/BuiltinAssessor.rst @@ -0,0 +1 @@ +../../en_US/Assessor/BuiltinAssessor.rst \ No newline at end of file diff --git a/docs/zh_CN/Assessor/CurvefittingAssessor.rst b/docs/zh_CN/Assessor/CurvefittingAssessor.rst new file mode 120000 index 0000000000000000000000000000000000000000..c71d333422ab7705badfc2c03ea2f67c187dc0ec --- /dev/null +++ b/docs/zh_CN/Assessor/CurvefittingAssessor.rst @@ -0,0 +1 @@ +../../en_US/Assessor/CurvefittingAssessor.rst \ No newline at end of file diff --git a/docs/zh_CN/Assessor/CustomizeAssessor.rst b/docs/zh_CN/Assessor/CustomizeAssessor.rst new file mode 120000 index 0000000000000000000000000000000000000000..162279a8cafc7092e1cd1aa5a5a63b96e7fb7a0b --- /dev/null +++ b/docs/zh_CN/Assessor/CustomizeAssessor.rst @@ -0,0 +1 @@ +../../en_US/Assessor/CustomizeAssessor.rst \ No newline at end of file diff --git a/docs/zh_CN/Assessor/MedianstopAssessor.rst b/docs/zh_CN/Assessor/MedianstopAssessor.rst new file mode 120000 index 0000000000000000000000000000000000000000..8544c896e4963b560f96516c6378fe95bc9f514e --- /dev/null +++ b/docs/zh_CN/Assessor/MedianstopAssessor.rst @@ -0,0 +1 @@ +../../en_US/Assessor/MedianstopAssessor.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/HpoComparison.rst b/docs/zh_CN/CommunitySharings/HpoComparison.rst new file mode 120000 index 0000000000000000000000000000000000000000..960024ad47105b275302dfad17a837454d4b8489 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/HpoComparison.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/HpoComparison.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/ModelCompressionComparison.rst b/docs/zh_CN/CommunitySharings/ModelCompressionComparison.rst new file mode 120000 index 0000000000000000000000000000000000000000..ff691c0c4492fd62055e10a2c0dab90af7597f42 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/ModelCompressionComparison.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/ModelCompressionComparison.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/NNI_AutoFeatureEng.rst b/docs/zh_CN/CommunitySharings/NNI_AutoFeatureEng.rst new file mode 120000 index 0000000000000000000000000000000000000000..5c6eadc99b116748125c16b2b5ad6ea6c122a0ff --- /dev/null +++ b/docs/zh_CN/CommunitySharings/NNI_AutoFeatureEng.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/NNI_AutoFeatureEng.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/NNI_colab_support.rst b/docs/zh_CN/CommunitySharings/NNI_colab_support.rst new file mode 120000 index 0000000000000000000000000000000000000000..7e987a566ddd61f5af714f1ad3a6796f98dad290 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/NNI_colab_support.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/NNI_colab_support.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/NasComparison.rst b/docs/zh_CN/CommunitySharings/NasComparison.rst new file mode 120000 index 0000000000000000000000000000000000000000..630234d6475d6ccadd08cf73041835d8ecd57c47 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/NasComparison.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/NasComparison.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/ParallelizingTpeSearch.rst b/docs/zh_CN/CommunitySharings/ParallelizingTpeSearch.rst new file mode 120000 index 0000000000000000000000000000000000000000..0b7e3a44f9a6b775d21fb3d2c9c7c9bc284f9816 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/ParallelizingTpeSearch.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/ParallelizingTpeSearch.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/RecommendersSvd.rst b/docs/zh_CN/CommunitySharings/RecommendersSvd.rst new file mode 120000 index 0000000000000000000000000000000000000000..dc97c4825f62cfb4c28239ad4102a6975ae38197 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/RecommendersSvd.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/RecommendersSvd.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/SptagAutoTune.rst b/docs/zh_CN/CommunitySharings/SptagAutoTune.rst new file mode 120000 index 0000000000000000000000000000000000000000..633b69514c61a955ad2a3b6f4ece7b3dd555ba15 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/SptagAutoTune.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/SptagAutoTune.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/automodel.rst b/docs/zh_CN/CommunitySharings/automodel.rst new file mode 100644 index 0000000000000000000000000000000000000000..a717ef0993688a52128ca3cceb4878a44c4f65cc --- /dev/null +++ b/docs/zh_CN/CommunitySharings/automodel.rst @@ -0,0 +1,15 @@ +.. 21be18c35dee2702eb1c7a805dcfd939 + +###################### +自动模型调优 +###################### + +NNI 可以应用于各种模型调优任务。 一些最先进的模型搜索算法,如EfficientNet,可以很容易地在NNI上构建。 流行的模型,例如,推荐模型,可以使用 NNI 进行调优。 下面是一些用例,展示了如何在您的模型调优任务中使用 NNI,以及如何使用 NNI 构建您自己的流水线。 + +.. toctree:: + :maxdepth: 1 + + SVD 自动调优 + NNI 中的 EfficientNet <../TrialExample/EfficientNet> + 用于阅读理解的自动模型架构搜索 <../TrialExample/SquadEvolutionExamples> + TPE 的并行优化 \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/autosys.rst b/docs/zh_CN/CommunitySharings/autosys.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf3c0e89a823d3b6d68406922ad5388cfce16cc1 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/autosys.rst @@ -0,0 +1,14 @@ +.. e0791b39c8c362669300ce55b42e997b + +####################### +自动系统调优 +####################### + +数据库、张量算子实现等系统的性能往往需要进行调优,以适应特定的硬件配置、目标工作负载等。 手动调优系统非常复杂,并且通常需要对硬件和工作负载有详细的了解。 NNI 可以使这些任务变得更容易,并帮助系统所有者自动找到系统的最佳配置。 自动系统调优的详细设计思想可以在 `这篇论文 `__ 中找到。 以下是 NNI 可以发挥作用的一些典型案例。 + +.. toctree:: + :maxdepth: 1 + + 自动调优 SPTAG(Space Partition Tree And Graph) + 调优 RocksDB 的性能<../TrialExample/RocksdbExamples> + 自动调优张量算子<../TrialExample/OpEvoExamples> \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/community_sharings.rst b/docs/zh_CN/CommunitySharings/community_sharings.rst new file mode 120000 index 0000000000000000000000000000000000000000..076f43165b899e0d165896b280fbbde9a7b7b2fb --- /dev/null +++ b/docs/zh_CN/CommunitySharings/community_sharings.rst @@ -0,0 +1 @@ +../../en_US/CommunitySharings/community_sharings.rst \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/feature_engineering.rst b/docs/zh_CN/CommunitySharings/feature_engineering.rst new file mode 100644 index 0000000000000000000000000000000000000000..7108b5c061891e2a2f6e8f0dc7d45d57214cba38 --- /dev/null +++ b/docs/zh_CN/CommunitySharings/feature_engineering.rst @@ -0,0 +1,12 @@ +.. 6b887244cf8fbace30971173f8c6fe8a + +################### +特征工程 +################### + +以下是关于 NNI 如何助力特征工程的文章,由社区贡献者分享。 将来会添加更多用例和解决方案。 + +.. toctree:: + :maxdepth: 1 + + 来自知乎的评论:作者 Garvin Li \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/model_compression.rst b/docs/zh_CN/CommunitySharings/model_compression.rst new file mode 100644 index 0000000000000000000000000000000000000000..409ca8e841867c436756b61da844364c211a8cfd --- /dev/null +++ b/docs/zh_CN/CommunitySharings/model_compression.rst @@ -0,0 +1,12 @@ +.. 8a4c54c8127199ad4c95e20247e33fe2 + +################# +模型压缩 +################# + +以下介绍了如何将知识蒸馏应用于 NNI 模型压缩。 将来会添加更多用例和解决方案。 + +.. toctree:: + :maxdepth: 1 + + 使用 NNI 模型压缩进行知识蒸馏<../TrialExample/KDExample> \ No newline at end of file diff --git a/docs/zh_CN/CommunitySharings/perf_compare.rst b/docs/zh_CN/CommunitySharings/perf_compare.rst new file mode 100644 index 0000000000000000000000000000000000000000..72618dab8ab780ee63d38e44d785b8bc9591f39c --- /dev/null +++ b/docs/zh_CN/CommunitySharings/perf_compare.rst @@ -0,0 +1,14 @@ +.. 7d625bd21018f53834e9db08a619c494 + +################################################ +性能测量,比较和分析 +################################################ + +性能比较和分析可以帮助用户在他们的场景中选择适合的算法(例如 Tuner,NAS 算法)。 以下是一些供用户参考的测量和比较数据。 + +.. toctree:: + :maxdepth: 1 + + 神经网络结构搜索(NAS)的对比 + 超参调优算法的对比 + 模型压缩算法的对比 \ No newline at end of file diff --git a/docs/zh_CN/Compression/AutoCompression.rst b/docs/zh_CN/Compression/AutoCompression.rst new file mode 120000 index 0000000000000000000000000000000000000000..e229574ed204f70c34852dff233686aaa5a76462 --- /dev/null +++ b/docs/zh_CN/Compression/AutoCompression.rst @@ -0,0 +1 @@ +../../en_US/Compression/AutoCompression.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/CompressionReference.rst b/docs/zh_CN/Compression/CompressionReference.rst new file mode 120000 index 0000000000000000000000000000000000000000..b4d383af25fcf5b09e4b080fb61055337805963b --- /dev/null +++ b/docs/zh_CN/Compression/CompressionReference.rst @@ -0,0 +1 @@ +../../en_US/Compression/CompressionReference.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/CompressionUtils.rst b/docs/zh_CN/Compression/CompressionUtils.rst new file mode 120000 index 0000000000000000000000000000000000000000..0d4e1def326ce2b2424da795700fe62bba6c1cc0 --- /dev/null +++ b/docs/zh_CN/Compression/CompressionUtils.rst @@ -0,0 +1 @@ +../../en_US/Compression/CompressionUtils.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/CustomizeCompressor.rst b/docs/zh_CN/Compression/CustomizeCompressor.rst new file mode 120000 index 0000000000000000000000000000000000000000..8b851589d25973e4baaa9981c98e7886c5e4b1d0 --- /dev/null +++ b/docs/zh_CN/Compression/CustomizeCompressor.rst @@ -0,0 +1 @@ +../../en_US/Compression/CustomizeCompressor.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/DependencyAware.rst b/docs/zh_CN/Compression/DependencyAware.rst new file mode 120000 index 0000000000000000000000000000000000000000..aa9a6d48ffb3efabfd9d1560097a8f25c20f4cb2 --- /dev/null +++ b/docs/zh_CN/Compression/DependencyAware.rst @@ -0,0 +1 @@ +../../en_US/Compression/DependencyAware.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/Framework.rst b/docs/zh_CN/Compression/Framework.rst new file mode 120000 index 0000000000000000000000000000000000000000..8af0cd709ee5afd999831eb2275b892faaa6eba8 --- /dev/null +++ b/docs/zh_CN/Compression/Framework.rst @@ -0,0 +1 @@ +../../en_US/Compression/Framework.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/ModelSpeedup.rst b/docs/zh_CN/Compression/ModelSpeedup.rst new file mode 120000 index 0000000000000000000000000000000000000000..1d77b8bbc9536bd3ea9c79310d11c5f7aa373595 --- /dev/null +++ b/docs/zh_CN/Compression/ModelSpeedup.rst @@ -0,0 +1 @@ +../../en_US/Compression/ModelSpeedup.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/Overview.rst b/docs/zh_CN/Compression/Overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..0274596c50d8fd4dfe9f3de94b1dc55106ef256b --- /dev/null +++ b/docs/zh_CN/Compression/Overview.rst @@ -0,0 +1,134 @@ +.. 37577199d91c137b881450f825f38fa2 + +使用 NNI 进行模型压缩 +==================== + +.. contents:: + +目前的大型神经网络较之以往具有更多的层和节点,而如何降低其存储和计算成本是一个重要的话题,尤其是针对于那些需要实时响应的应用程序。 +模型压缩的相关方法可以用于解决这些问题。 + +NNI 的模型压缩工具包,提供了最先进的模型压缩算法和策略,帮助压缩并加速模型。NNI 模型压缩支持的主要功能有: + + +* 支持多种流行的剪枝和量化算法。 +* 通过 NNI 强大的自动调优功能,可使用最先进的策略来自动化模型的剪枝和量化过程。 +* 加速压缩的模型,使其在推理时有更低的延迟,同时文件也会变小。 +* 提供友好易用的压缩工具,帮助用户深入了解压缩过程和结果。 +* 提供简洁的接口,帮助用户实现自己的压缩算法。 + + +压缩流水线 +---------- + +.. image:: ../../img/compression_flow.jpg + :target: ../../img/compression_flow.jpg + :alt: + +NNI整体的模型压缩流水线图。对于压缩一个预训练的模型,剪枝和量化可以单独使用或结合使用。 + +.. note:: + NNI 压缩算法并不意味着真正使模型变小或者减少延迟,NNI 的加速工具才可以真正压缩模型并减少延迟。要获得真正压缩后的模型,用户应该进行 `模型加速 <./ModelSpeedup.rst>`__。* 注意,PyTorch 和 TensorFlow 有统一的 API 接口,当前仅支持 PyTorch 版本,未来会提供 TensorFlow 的支持。 + +支持的算法 +---------- + +包括剪枝和量化算法。 + +剪枝算法 +^^^^^^^^ + +剪枝算法通过删除冗余权重或层通道来压缩原始网络,从而降低模型复杂性并解决过拟合问题。 + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - 名称 + - 算法简介 + * - `Level Pruner `__ + - 根据权重的绝对值,来按比例修剪权重。 + * - `AGP Pruner <../Compression/Pruner.rst#agp-pruner>`__ + - 自动的逐步剪枝(To prune, or not to prune: exploring the efficacy of pruning for model compression)`参考论文 `__ + * - `Lottery Ticket Pruner <../Compression/Pruner.rst#lottery-ticket>`__ + - "The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks" 提出的剪枝过程。 它会反复修剪模型。 `参考论文 `__ + * - `FPGM Pruner <../Compression/Pruner.rst#fpgm-pruner>`__ + - Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration `参考论文 `__ + * - `L1Filter Pruner <../Compression/Pruner.rst#l1filter-pruner>`__ + - 在卷积层中具有最小 L1 权重规范的剪枝滤波器。(Pruning Filters for Efficient Convnets) `参考论文 `__ + * - `L2Filter Pruner <../Compression/Pruner.rst#l2filter-pruner>`__ + - 在卷积层中具有最小 L2 权重规范的剪枝滤波器。 + * - `ActivationAPoZRankFilterPruner <../Compression/Pruner.rst#activationapozrankfilter-pruner>`__ + - 基于指标 APoZ(平均百分比零)的剪枝滤波器,该指标测量(卷积)图层激活值中零的百分比。 `参考论文 `__ + * - `ActivationMeanRankFilterPruner <../Compression/Pruner.rst#activationmeanrankfilter-pruner>`__ + - 基于计算输出激活最小平均值指标的剪枝滤波器。 + * - `Slim Pruner <../Compression/Pruner.rst#slim-pruner>`__ + - 通过修剪 BN 层中的缩放因子来修剪卷积层中的通道。 (Learning Efficient Convolutional Networks through Network Slimming) `参考论文 `__ + * - `TaylorFO Pruner <../Compression/Pruner.rst#taylorfoweightfilter-pruner>`__ + - 基于一阶泰勒展开的权重对滤波器剪枝。 (Importance Estimation for Neural Network Pruning) `参考论文 `__ + * - `ADMM Pruner <../Compression/Pruner.rst#admm-pruner>`__ + - 基于 ADMM 优化技术的剪枝。 `参考论文 `__ + * - `NetAdapt Pruner <../Compression/Pruner.rst#netadapt-pruner>`__ + - 在满足计算资源预算的情况下,对预训练的网络迭代剪枝。 `参考论文 `__ + * - `SimulatedAnnealing Pruner <../Compression/Pruner.rst#simulatedannealing-pruner>`__ + - 通过启发式的模拟退火算法进行自动剪枝。 `参考论文 `__ + * - `AutoCompress Pruner <../Compression/Pruner.rst#autocompress-pruner>`__ + - 通过迭代调用 SimulatedAnnealing Pruner 和 ADMM Pruner 进行自动剪枝。 `参考论文 - `__ + * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ + - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `参考论文 `__ + * - `Transformer Head Pruner <../Compression/Pruner.rst#transformer-head-pruner>`__ + - 针对transformer中的注意力头的剪枝. + + +参考此 :githublink:`基准测试 <../CommunitySharings/ModelCompressionComparison.rst>` 来查看这些剪枝器在一些基准问题上的表现。 + +量化算法 +^^^^^^^^ + +量化算法通过减少表示权重或激活函数所需的精度位数来压缩原始网络,这可以减少计算和推理时间。 + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - 名称 + - 算法简介 + * - `Naive Quantizer <../Compression/Quantizer.rst#naive-quantizer>`__ + - 默认将权重量化为 8 位。 + * - `QAT Quantizer <../Compression/Quantizer.rst#qat-quantizer>`__ + - Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference. `参考论文 `__ + * - `DoReFa Quantizer <../Compression/Quantizer.rst#dorefa-quantizer>`__ + - DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients. `参考论文 `__ + * - `BNN Quantizer <../Compression/Quantizer.rst#bnn-quantizer>`__ + - Binarized Neural Networks: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1. `参考论文 `__ + * - `LSQ Quantizer <../Compression/Quantizer.rst#lsq-quantizer>`__ + - Learned step size quantization. `参考论文 `__ + * - `Observer Quantizer <../Compression/Quantizer.rst#observer-quantizer>`__ + - Post training quantizaiton. 使用 observer 在校准期间收集量化信息。 + + +模型加速 +-------- + +模型压缩的目的是减少推理延迟和模型大小。但现有的模型压缩算法主要通过模拟的方法来检查压缩模型性能(如精度)。例如,剪枝算法中使用掩码,而量化算法中量化值仍然是以 32 位浮点数来存储。只要给出这些算法产生的掩码和量化位,NNI 可真正的加速模型。基于掩码的模型加速详细教程可以在 `这里 <./ModelSpeedup.rst>`__ 找到。混合精度量化的详细教程可以在 `这里 <./QuantizationSpeedup.rst>`__ 找到。 + + +压缩工具 +-------- + +压缩工具包括了一些有用的工具,能帮助用户理解并分析要压缩的模型。例如,可检查每层对剪枝的敏感度。可很容易的计算模型的 FLOPs 和参数数量。`点击这里 <./CompressionUtils.rst>`__,查看压缩工具的完整列表。 + +高级用法 +-------- + +NNI 模型压缩提供了简洁的接口,用于自定义新的压缩算法。接口的设计理念是,将框架相关的实现细节包装起来,让用户能聚焦于压缩逻辑。用户可以进一步了解我们的压缩框架,并根据我们的框架定制新的压缩算法(剪枝算法或量化算法)。此外,还可利用 NNI 的自动调参功能来自动的压缩模型。参考 `这里 <./advanced.rst>`__ 了解更多细节。 + + +参考和反馈 +---------- + +* 在Github 中 `提交此功能的 Bug `__ +* 在Github 中 `提交新功能或请求改进 `__ +* 了解更多关于 NNI 中的 `特征工程 <../FeatureEngineering/Overview.rst>`__\ ; +* 了解更多关于 NNI 中的 `NAS <../NAS/Overview.rst>`__\ ; +* 了解更多关于 NNI 中的 `超参调优 <../Tuner/BuiltinTuner.rst>`__\ ; diff --git a/docs/zh_CN/Compression/Pruner.rst b/docs/zh_CN/Compression/Pruner.rst new file mode 120000 index 0000000000000000000000000000000000000000..6eff6270b1733d9d736e0a4bd4d38008f86e69d7 --- /dev/null +++ b/docs/zh_CN/Compression/Pruner.rst @@ -0,0 +1 @@ +../../en_US/Compression/Pruner.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/QuantizationSpeedup.rst b/docs/zh_CN/Compression/QuantizationSpeedup.rst new file mode 120000 index 0000000000000000000000000000000000000000..812ddb1c92c8e06bb700f64334181886e2262ebd --- /dev/null +++ b/docs/zh_CN/Compression/QuantizationSpeedup.rst @@ -0,0 +1 @@ +../../en_US/Compression/QuantizationSpeedup.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/Quantizer.rst b/docs/zh_CN/Compression/Quantizer.rst new file mode 120000 index 0000000000000000000000000000000000000000..37486854ca0070a045098b55726363b3f0bc1e27 --- /dev/null +++ b/docs/zh_CN/Compression/Quantizer.rst @@ -0,0 +1 @@ +../../en_US/Compression/Quantizer.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/QuickStart.rst b/docs/zh_CN/Compression/QuickStart.rst new file mode 100644 index 0000000000000000000000000000000000000000..e7013bfe377ce9950dfcbc1869c2110cc9deb512 --- /dev/null +++ b/docs/zh_CN/Compression/QuickStart.rst @@ -0,0 +1,132 @@ +.. a67033195635ebcd510103eab8703b6a + +快速入门 +=========== + +.. toctree:: + :hidden: + + Notebook Example + + +模型压缩通常包括三个阶段:1)预训练模型,2)压缩模型,3)微调模型。 NNI 主要关注于第二阶段,并为模型压缩提供易于使用的 API。 +遵循本指南,您将快速了解如何使用 NNI 来压缩模型。 +更深入地了解 NNI 中的模型压缩模块,请查看 `Tutorial <./Tutorial.rst>`__。 +提供了一个在 Jupyter notebook 中进行完整的模型压缩流程的 `示例 <./compression_pipeline_example.rst>`__,参考 :githublink:`代码 `。 + +模型剪枝 +------------- + +这里通过 `level pruner <../Compression/Pruner.rst#level-pruner>`__ 举例说明 NNI 中模型剪枝的用法。 + +Step1. 编写配置 +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +编写配置来指定要剪枝的层。以下配置表示剪枝所有的 ``default`` 层,稀疏度设为 0.5,其它层保持不变。 + +.. code-block:: python + + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['default'], + }] + +配置说明在 `这里 <./Tutorial.rst#specify-the-configuration>`__。注意,不同的 Pruner 可能有自定义的配置字段。 +详情参考每个 Pruner 的 `具体用法 <./Pruner.rst>`__,来调整相应的配置。 + +Step2. 选择 Pruner 来压缩模型 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +首先,使用模型来初始化 Pruner,并将配置作为参数传入,然后调用 ``compress()`` 来压缩模型。 +请注意,有些算法可能会检查训练过程中的梯度,因此我们可能会定义一组 trainer, optimizer, criterion 并传递给 Pruner。 + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import LevelPruner + + pruner = LevelPruner(model, config_list) + model = pruner.compress() + +然后,使用正常的训练方法来训练模型 (如,SGD),剪枝在训练过程中是透明的。 +有些 Pruner(如 L1FilterPruner、FPGMPruner)在开始时修剪一次,下面的训练可以看作是微调。 +有些 Pruner(例如AGPPruner)会迭代的对模型剪枝,在训练过程中逐步修改掩码。 + +如果使用 Pruner 进行迭代剪枝,或者剪枝过程中需要训练或者推理,则需要将 finetune 逻辑传到 Pruner 中。 + +例如: + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import AGPPruner + + pruner = AGPPruner(model, config_list, optimizer, trainer, criterion, num_iterations=10, epochs_per_iteration=1, pruning_algorithm='level') + model = pruner.compress() + + +Step3. 导出压缩结果 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +训练之后,可将模型权重导出到文件,同时将生成的掩码也导出到文件, 也支持导出 ONNX 模型。 + +.. code-block:: python + + pruner.export_model(model_path='pruned_vgg19_cifar10.pth', mask_path='mask_vgg19_cifar10.pth') + +参考 :githublink:`mnist 示例 ` 获取代码。 + +更多剪枝算法的示例在 :githublink:`basic_pruners_torch ` 和 :githublink:`auto_pruners_torch `。 + + +模型量化 +------------------ + +这里通过 `QAT Quantizer <../Compression/Quantizer.rst#qat-quantizer>`__ 举例说明在 NNI 中量化的用法。 + +Step1. 编写配置 +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + config_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': { + 'weight': 8, + 'input': 8, + }, # 这里可以仅使用 `int`,因为所有 `quan_types` 使用了一样的位长,参考下方 `ReLu6` 配置。 + 'op_types':['Conv2d', 'Linear'], + 'quant_dtype': 'int', + 'quant_scheme': 'per_channel_symmetric' + }, { + 'quant_types': ['output'], + 'quant_bits': 8, + 'quant_start_step': 7000, + 'op_types':['ReLU6'], + 'quant_dtype': 'uint', + 'quant_scheme': 'per_tensor_affine' + }] + +配置说明在 `这里 <./Tutorial.rst#quantization-specific-keys>`__。 + +Step2. 选择 Quantizer 来压缩模型 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer + + quantizer = QAT_Quantizer(model, config_list) + quantizer.compress() + + +Step3. 导出压缩结果 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +在训练和校准之后,你可以将模型权重导出到一个文件,并将生成的校准参数也导出到一个文件。 也支持导出 ONNX 模型。 + +.. code-block:: python + + calibration_config = quantizer.export_model(model_path, calibration_path, onnx_path, input_shape, device) + +参考 :githublink:`mnist example ` 获取示例代码。 + +恭喜! 您已经通过 NNI 压缩了您的第一个模型。 更深入地了解 NNI 中的模型压缩,请查看 `Tutorial <./Tutorial.rst>`__。 \ No newline at end of file diff --git a/docs/zh_CN/Compression/Tutorial.rst b/docs/zh_CN/Compression/Tutorial.rst new file mode 120000 index 0000000000000000000000000000000000000000..c5b6f33e5406fe7dd129cdf4c5255877808f1475 --- /dev/null +++ b/docs/zh_CN/Compression/Tutorial.rst @@ -0,0 +1 @@ +../../en_US/Compression/Tutorial.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/advanced.rst b/docs/zh_CN/Compression/advanced.rst new file mode 100644 index 0000000000000000000000000000000000000000..c65ebbf35f2e7e6cfafe3af1ebe272c68c30f442 --- /dev/null +++ b/docs/zh_CN/Compression/advanced.rst @@ -0,0 +1,11 @@ +.. acd3f66ad7c2d82b950568efcba1f175 + +高级用法 +============== + +.. toctree:: + :maxdepth: 2 + + 框架 <./Framework> + 自定义压缩算法 <./CustomizeCompressor> + 自动模型压缩 (Beta) <./AutoCompression> diff --git a/docs/zh_CN/Compression/compression_pipeline_example.ipynb b/docs/zh_CN/Compression/compression_pipeline_example.ipynb new file mode 120000 index 0000000000000000000000000000000000000000..d1fb24bf324d04ad4e5085b88654d399c9643d11 --- /dev/null +++ b/docs/zh_CN/Compression/compression_pipeline_example.ipynb @@ -0,0 +1 @@ +../../en_US/Compression/compression_pipeline_example.ipynb \ No newline at end of file diff --git a/docs/zh_CN/Compression/pruning.rst b/docs/zh_CN/Compression/pruning.rst new file mode 100644 index 0000000000000000000000000000000000000000..04d2eb7a337f8d0a4e33f7b42c08d5655adf7c30 --- /dev/null +++ b/docs/zh_CN/Compression/pruning.rst @@ -0,0 +1,27 @@ +.. 0f2050a973cfb2207984b4e58c4baf28 + +################# +剪枝 +################# + +剪枝是一种常用的神经网络模型压缩技术。 +剪枝算法探索模型权重(参数)中的冗余,并尝试去除冗余和非关键权重, +将它们的值归零,确保其不参与反向传播过程。 + +从剪枝粒度的角度来看,细粒度剪枝或非结构化剪枝是指分别对每个权重进行剪枝。 +粗粒度剪枝或结构化剪枝是修剪整组权重,例如卷积滤波器。 + +NNI 提供了多种非结构化和结构化剪枝算法。 +其使用了统一的接口来支持 TensorFlow 和 PyTorch。 +只需要添加几行代码即可压缩模型。 +对于结构化滤波器剪枝,NNI 还提供了依赖感知模式。 在依赖感知模式下, +滤波器剪枝在加速后会获得更好的速度增益。 + +详细信息,参考以下教程: + +.. toctree:: + :maxdepth: 2 + + Pruners + 依赖感知模式 + 模型加速 diff --git a/docs/zh_CN/Compression/quantization.rst b/docs/zh_CN/Compression/quantization.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef2a474dee123ad006c34c0f90e0aefb9f263cd4 --- /dev/null +++ b/docs/zh_CN/Compression/quantization.rst @@ -0,0 +1,20 @@ +.. fe32a6de0be31a992afadba5cf6ffe23 + +################# +量化 +################# + +量化是指通过减少权重表示或激活所需的比特数来压缩模型, +从而减少计算量和推理时间。 在深度神经网络的背景下,模型权重主要的数据 +格式是32位浮点数。 许多研究工作表明,在不显着降低精度的情况下,权重和激活 +可以使用8位整数表示, 更低的比特位数,例如4/2/1比特, +是否能够表示权重也是目前非常活跃的研究方向。 + +一个 Quantizer 是指一种 NNI 实现的量化算法,NNI 提供了多个 Quantizer,如下所示。你也可以 +使用 NNI 模型压缩的接口来创造你的 Quantizer。 + +.. toctree:: + :maxdepth: 2 + + Quantizers + 量化加速 diff --git a/docs/zh_CN/Compression/v2_pruning.rst b/docs/zh_CN/Compression/v2_pruning.rst new file mode 100644 index 0000000000000000000000000000000000000000..bd92ee337f8763d5c337ed8546583edb9a4ea426 --- /dev/null +++ b/docs/zh_CN/Compression/v2_pruning.rst @@ -0,0 +1,29 @@ +.. 1ec93e31648291b0c881655304116b50 + +################# +剪枝(V2版本) +################# + +剪枝(V2版本)是对旧版本的重构,提供了更强大的功能。 +与旧版本相比,迭代剪枝过程与剪枝器(pruner)分离,剪枝器只负责剪枝且生成掩码一次。 +更重要的是,V2版本统一了剪枝过程,并提供了更自由的剪枝组件组合。 +任务生成器(task generator)只关心在每一轮中应该达到的修剪效果,并使用配置列表(config list)来表示下一步如何修剪。 +剪枝器将使用任务生成器提供的模型和配置列表重置,然后在当前步骤中生成掩码。 + +有关更清晰的架构,请参考下图。 + +.. image:: ../../img/pruning_process.png + :target: ../../img/pruning_process.png + :alt: + +在V2版本中,修剪过程通常由剪枝调度器(pruning scheduler)驱动,它包含一个特定的剪枝器和一个任务生成器。 +但是用户也可以像V1版本中那样直接使用剪枝器。 + +有关详细信息,请参阅以下教程: + +.. toctree:: + :maxdepth: 1 + + 剪枝算法 <../en_US/Compression/v2_pruning_algo> + 剪枝调度器接口 <../en_US/Compression/v2_scheduler> + 剪枝配置 <../en_US/Compression/v2_pruning_config_list> diff --git a/docs/zh_CN/Compression/v2_pruning_algo.rst b/docs/zh_CN/Compression/v2_pruning_algo.rst new file mode 120000 index 0000000000000000000000000000000000000000..ee7341a7955b6f7648c336b70fc4d2701371582c --- /dev/null +++ b/docs/zh_CN/Compression/v2_pruning_algo.rst @@ -0,0 +1 @@ +../../en_US/Compression/v2_pruning_algo.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/v2_pruning_config_list.rst b/docs/zh_CN/Compression/v2_pruning_config_list.rst new file mode 120000 index 0000000000000000000000000000000000000000..4c2097d397139ae070eb1560a974bd9a157ab4a1 --- /dev/null +++ b/docs/zh_CN/Compression/v2_pruning_config_list.rst @@ -0,0 +1 @@ +../../en_US/Compression/v2_pruning_config_list.rst \ No newline at end of file diff --git a/docs/zh_CN/Compression/v2_scheduler.rst b/docs/zh_CN/Compression/v2_scheduler.rst new file mode 120000 index 0000000000000000000000000000000000000000..d1451b4b1dc6c9c6f1235990a478547964eece9e --- /dev/null +++ b/docs/zh_CN/Compression/v2_scheduler.rst @@ -0,0 +1 @@ +../../en_US/Compression/v2_scheduler.rst \ No newline at end of file diff --git a/docs/zh_CN/FeatureEngineering/GBDTSelector.rst b/docs/zh_CN/FeatureEngineering/GBDTSelector.rst new file mode 120000 index 0000000000000000000000000000000000000000..fba5c0ea4b77e82fce028ea74e717f1a9107bc31 --- /dev/null +++ b/docs/zh_CN/FeatureEngineering/GBDTSelector.rst @@ -0,0 +1 @@ +../../en_US/FeatureEngineering/GBDTSelector.rst \ No newline at end of file diff --git a/docs/zh_CN/FeatureEngineering/GradientFeatureSelector.rst b/docs/zh_CN/FeatureEngineering/GradientFeatureSelector.rst new file mode 120000 index 0000000000000000000000000000000000000000..140bae6bcf34fae8491d8824f41daaa44ca4f3d3 --- /dev/null +++ b/docs/zh_CN/FeatureEngineering/GradientFeatureSelector.rst @@ -0,0 +1 @@ +../../en_US/FeatureEngineering/GradientFeatureSelector.rst \ No newline at end of file diff --git a/docs/zh_CN/FeatureEngineering/Overview.rst b/docs/zh_CN/FeatureEngineering/Overview.rst new file mode 120000 index 0000000000000000000000000000000000000000..605f63ede9399548de2f5da30f017a58ac653447 --- /dev/null +++ b/docs/zh_CN/FeatureEngineering/Overview.rst @@ -0,0 +1 @@ +../../en_US/FeatureEngineering/Overview.rst \ No newline at end of file diff --git a/docs/zh_CN/Makefile b/docs/zh_CN/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..51285967a7d9722c5bdee4f6a81c154a56aa0846 --- /dev/null +++ b/docs/zh_CN/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_CN/NAS/ApiReference.rst b/docs/zh_CN/NAS/ApiReference.rst new file mode 120000 index 0000000000000000000000000000000000000000..a97d54314cd664191fbdc527f3fe04ce29babe4c --- /dev/null +++ b/docs/zh_CN/NAS/ApiReference.rst @@ -0,0 +1 @@ +../../en_US/NAS/ApiReference.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/Benchmarks.rst b/docs/zh_CN/NAS/Benchmarks.rst new file mode 120000 index 0000000000000000000000000000000000000000..1bee12da1e4d63b52f45c854a0a269e0fa86b3e8 --- /dev/null +++ b/docs/zh_CN/NAS/Benchmarks.rst @@ -0,0 +1 @@ +../../en_US/NAS/Benchmarks.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/BenchmarksExample.ipynb b/docs/zh_CN/NAS/BenchmarksExample.ipynb new file mode 120000 index 0000000000000000000000000000000000000000..a4625863e82c503374a7ea9a43deecdc55e28d93 --- /dev/null +++ b/docs/zh_CN/NAS/BenchmarksExample.ipynb @@ -0,0 +1 @@ +../../en_US/NAS/BenchmarksExample.ipynb \ No newline at end of file diff --git a/docs/zh_CN/NAS/DARTS.rst b/docs/zh_CN/NAS/DARTS.rst new file mode 120000 index 0000000000000000000000000000000000000000..07f6bc375ecbd73cbe0ec5cb6e60c2af2648c3fc --- /dev/null +++ b/docs/zh_CN/NAS/DARTS.rst @@ -0,0 +1 @@ +../../en_US/NAS/DARTS.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/ENAS.rst b/docs/zh_CN/NAS/ENAS.rst new file mode 120000 index 0000000000000000000000000000000000000000..20eada04e3c2fc9466b6cc66ba0b1af848059223 --- /dev/null +++ b/docs/zh_CN/NAS/ENAS.rst @@ -0,0 +1 @@ +../../en_US/NAS/ENAS.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/ExecutionEngines.rst b/docs/zh_CN/NAS/ExecutionEngines.rst new file mode 120000 index 0000000000000000000000000000000000000000..ab3cde02d17480a9169a67b412854143b1e57b3e --- /dev/null +++ b/docs/zh_CN/NAS/ExecutionEngines.rst @@ -0,0 +1 @@ +../../en_US/NAS/ExecutionEngines.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/ExplorationStrategies.rst b/docs/zh_CN/NAS/ExplorationStrategies.rst new file mode 120000 index 0000000000000000000000000000000000000000..cea9d6814f217c1e20d166c29b7de386040d973e --- /dev/null +++ b/docs/zh_CN/NAS/ExplorationStrategies.rst @@ -0,0 +1 @@ +../../en_US/NAS/ExplorationStrategies.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/FBNet.rst b/docs/zh_CN/NAS/FBNet.rst new file mode 120000 index 0000000000000000000000000000000000000000..c262c2ef07ec1fb50eda09ed8b38bc0e2b8961e9 --- /dev/null +++ b/docs/zh_CN/NAS/FBNet.rst @@ -0,0 +1 @@ +../../en_US/NAS/FBNet.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/HardwareAwareNAS.rst b/docs/zh_CN/NAS/HardwareAwareNAS.rst new file mode 120000 index 0000000000000000000000000000000000000000..fe656ac2db2cf8ce660275943e2f0b92f4dcb382 --- /dev/null +++ b/docs/zh_CN/NAS/HardwareAwareNAS.rst @@ -0,0 +1 @@ +../../en_US/NAS/HardwareAwareNAS.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/Hypermodules.rst b/docs/zh_CN/NAS/Hypermodules.rst new file mode 120000 index 0000000000000000000000000000000000000000..f1d23726352a6c910f7677d8e11576b9a8a02c81 --- /dev/null +++ b/docs/zh_CN/NAS/Hypermodules.rst @@ -0,0 +1 @@ +../../en_US/NAS/Hypermodules.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/ModelEvaluators.rst b/docs/zh_CN/NAS/ModelEvaluators.rst new file mode 120000 index 0000000000000000000000000000000000000000..e672e76c6f5345aa4415698d7aebebc47e0a5b7b --- /dev/null +++ b/docs/zh_CN/NAS/ModelEvaluators.rst @@ -0,0 +1 @@ +../../en_US/NAS/ModelEvaluators.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/MutationPrimitives.rst b/docs/zh_CN/NAS/MutationPrimitives.rst new file mode 120000 index 0000000000000000000000000000000000000000..3035330c15c28ec067dfd190b238850f269b9a99 --- /dev/null +++ b/docs/zh_CN/NAS/MutationPrimitives.rst @@ -0,0 +1 @@ +../../en_US/NAS/MutationPrimitives.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/Mutators.rst b/docs/zh_CN/NAS/Mutators.rst new file mode 120000 index 0000000000000000000000000000000000000000..d4894dfa37ab9975fdc132f3b131c192b29ff3bd --- /dev/null +++ b/docs/zh_CN/NAS/Mutators.rst @@ -0,0 +1 @@ +../../en_US/NAS/Mutators.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/OneshotTrainer.rst b/docs/zh_CN/NAS/OneshotTrainer.rst new file mode 120000 index 0000000000000000000000000000000000000000..eac7077ef17cb6a7f6ef6d5e2dca0507f941c6c1 --- /dev/null +++ b/docs/zh_CN/NAS/OneshotTrainer.rst @@ -0,0 +1 @@ +../../en_US/NAS/OneshotTrainer.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/Overview.rst b/docs/zh_CN/NAS/Overview.rst new file mode 120000 index 0000000000000000000000000000000000000000..a4c5f1b3dc59caa74b57567629a6311fe04ee6dc --- /dev/null +++ b/docs/zh_CN/NAS/Overview.rst @@ -0,0 +1 @@ +../../en_US/NAS/Overview.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/Proxylessnas.rst b/docs/zh_CN/NAS/Proxylessnas.rst new file mode 120000 index 0000000000000000000000000000000000000000..ca819146d64916f7b68ae3341b2536829ab68895 --- /dev/null +++ b/docs/zh_CN/NAS/Proxylessnas.rst @@ -0,0 +1 @@ +../../en_US/NAS/Proxylessnas.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/QuickStart.rst b/docs/zh_CN/NAS/QuickStart.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ab661de4d2ef96420f0647e37450a8ff6396270 --- /dev/null +++ b/docs/zh_CN/NAS/QuickStart.rst @@ -0,0 +1,198 @@ +.. 2cbe7334076be1841320c31208c338ff + +快速入门 Retiarii +============================== + + +.. contents:: + +在快速入门教程中,我们以 multi-trial NAS 为例来展示如何构建和探索模型空间。 神经网络架构搜索任务主要有三个关键组件,即: + +* 模型搜索空间(Model search space),定义了要探索的模型集合。 +* 一个适当的策略(strategy),作为探索这个搜索空间的方法。 +* 一个模型评估器(model evaluator),报告一个给定模型的性能。 + +One-shot NAS 教程在 `这里 <./OneshotTrainer.rst>`__。 + +.. note:: 目前,PyTorch 是 Retiarii 唯一支持的框架,我们只用 **PyTorch 1.7 和 1.10** 进行了测试。 本文档基于 PyTorch 的背景,但它也应该适用于其他框架,这在我们未来的计划中。 + +定义模型空间 +----------------------- + +模型空间是由用户定义的,用来表达用户想要探索、认为包含性能良好模型的一组模型。 模型空间是由用户定义的,用来表达用户想要探索、认为包含性能良好模型的一组模型。 在这个框架中,模型空间由两部分组成:基本模型和基本模型上可能的突变。 + +定义基本模型 +^^^^^^^^^^^^^^^^^ + +定义基本模型与定义 PyTorch(或 TensorFlow)模型几乎相同, 只有两个小区别。 对于 PyTorch 模块(例如 ``nn.Conv2d``, ``nn.ReLU``),将代码 ``import torch.nn as nn`` 替换为 ``import nni.retiarii.nn.pytorch as nn`` 。 + +下面是定义基本模型的一个简单的示例,它与定义 PyTorch 模型几乎相同。 + +.. code-block:: python + + import torch + import torch.nn.functional as F + import nni.retiarii.nn.pytorch as nn + from nni.retiarii import model_wrapper + + @model_wrapper # this decorator should be put on the out most + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + self.conv2 = nn.Conv2d(32, 64, 3, 1) + self.dropout1 = nn.Dropout(0.25) + self.dropout2 = nn.Dropout(0.5) + self.fc1 = nn.Linear(9216, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(self.conv2(x), 2) + x = torch.flatten(self.dropout1(x), 1) + x = self.fc2(self.dropout2(F.relu(self.fc1(x)))) + output = F.log_softmax(x, dim=1) + return output + +.. tip:: 记得使用 ``import nni.retiarii.nn.pytorch as nn`` 和 :meth:`nni.retiarii.model_wrapper`. 许多错误都源于忘记使用它们。同时,对于 ``nn`` 的子模块(例如 ``nn.init``)请使用 ``torch.nn``,比如,``torch.nn.init`` 而不是 ``nn.init``。 + +定义模型突变 +^^^^^^^^^^^^^^^^^^^^^^ + +基本模型只是一个具体模型,而不是模型空间。 我们为用户提供 `API 和原语 <./MutationPrimitives.rst>`__,用于把基本模型变形成包含多个模型的模型空间。 + +基于上面定义的基本模型,我们可以这样定义一个模型空间: + +.. code-block:: diff + + import torch + import torch.nn.functional as F + import nni.retiarii.nn.pytorch as nn + from nni.retiarii import model_wrapper + + @model_wrapper + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + - self.conv2 = nn.Conv2d(32, 64, 3, 1) + + self.conv2 = nn.LayerChoice([ + + nn.Conv2d(32, 64, 3, 1), + + DepthwiseSeparableConv(32, 64) + + ]) + - self.dropout1 = nn.Dropout(0.25) + + self.dropout1 = nn.Dropout(nn.ValueChoice([0.25, 0.5, 0.75])) + self.dropout2 = nn.Dropout(0.5) + - self.fc1 = nn.Linear(9216, 128) + - self.fc2 = nn.Linear(128, 10) + + feature = nn.ValueChoice([64, 128, 256]) + + self.fc1 = nn.Linear(9216, feature) + + self.fc2 = nn.Linear(feature, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(self.conv2(x), 2) + x = torch.flatten(self.dropout1(x), 1) + x = self.fc2(self.dropout2(F.relu(self.fc1(x)))) + output = F.log_softmax(x, dim=1) + return output + +在这个例子中我们使用了两个突变 API, ``nn.LayerChoice`` 和 ``nn.ValueChoice``。 ``nn.LayerChoice`` 的输入参数是一个候选模块的列表(在这个例子中是两个),每个采样到的模型会选择其中的一个,然后它就可以像一般的 PyTorch 模块一样被使用。 ``nn.ValueChoice`` 输入一系列候选的值,然后对于每个采样到的模型,其中的一个值会生效。 + +更多的 API 描述和用法可以请阅读 `这里 <./construct_space.rst>`__ 。 + +.. note:: 我们正在积极的丰富突变 API,以简化模型空间的构建。如果我们提供的 API 不能满足您表达模型空间的需求,请阅读 `这个文档 <./Mutators.rst>`__ 以获得更多定制突变的资讯。 + +探索定义的模型空间 +------------------------------- + +简单来说,探索模型空间有两种方法:(1) 通过独立评估每个采样模型进行搜索;(2) 基于 One-Shot 的权重共享式搜索。 我们在本教程中演示了下面的第一种方法。 第二种方法可以参考 `这里 <./OneshotTrainer.rst>`__。 + +首先,用户需要选择合适的探索策略来探索模型空间。然后,用户需要选择或自定义模型评估器来评估每个采样模型的性能。 + +选择搜索策略 +^^^^^^^^^^^^^^^^^^^^^^^^ + +Retiarii 支持许多 `探索策略(exploration strategies) <./ExplorationStrategies.rst>`__。 + +简单地选择(即实例化)一个探索策略: + +.. code-block:: python + + import nni.retiarii.strategy as strategy + + search_strategy = strategy.Random(dedup=True) # dedup=False 如果不希望有重复数据删除 + +选择或编写模型评估器 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +在 NAS 过程中,探索策略反复生成新模型。 模型评估器用于训练和验证每个生成的模型。 生成的模型所获得的性能被收集起来,并送至探索策略以生成更好的模型。 + +Retiarii 提供了诸多的 `内置模型评估器 <./ModelEvaluators.rst>`__,但是作为第一步,我们还是推荐使用 ``FunctionalEvaluator``,也就是说,将您自己的训练和测试代码用一个函数包起来。这个函数的输入参数是一个模型的类,然后使用 ``nni.report_final_result`` 来汇报模型的效果。 + +这里的一个例子创建了一个简单的评估器,它在 MNIST 数据集上运行,训练 2 个 Epoch,并报告其在验证集上的准确率。 + +.. code-block:: python + + def evaluate_model(model_cls): + # "model_cls" 是一个类,需要初始化 + model = model_cls() + + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + transf = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = DataLoader(MNIST('data/mnist', download=True, transform=transf), batch_size=64, shuffle=True) + test_loader = DataLoader(MNIST('data/mnist', download=True, train=False, transform=transf), batch_size=64) + + device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + + for epoch in range(3): + # 训练模型,1 个 epoch + train_epoch(model, device, train_loader, optimizer, epoch) + # 测试模型,1 个 epoch + accuracy = test_epoch(model, device, test_loader) + # 汇报中间结果,可以是 float 或者 dict 类型 + nni.report_intermediate_result(accuracy) + + # 汇报最终结果 + nni.report_final_result(accuracy) + + # 创建模型评估器 + evaluator = nni.retiarii.evaluator.FunctionalEvaluator(evaluate_model) + +在这里 ``train_epoch`` 和 ``test_epoch`` 可以是任意自定义的函数,用户可以写自己的训练流程。完整的样例可以参见 :githublink:`examples/nas/multi-trial/mnist/search.py`。 + +我们建议 ``evaluate_model`` 不接受 ``model_cls`` 以外的其他参数。但是,我们在 `高级教程 <./ModelEvaluators.rst>`__ 中展示了其他参数的用法,如果您真的需要的话。另外,我们会在未来支持这些参数的突变(这通常会成为 "超参调优")。 + +发起 Experiment +-------------------- + +一切准备就绪,就可以发起 Experiment 以进行模型搜索了。 样例如下: + +.. code-block:: python + + exp = RetiariiExperiment(base_model, evaluator, [], search_strategy) + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'mnist_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 20 + exp_config.training_service.use_active_gpu = False + exp.run(exp_config, 8081) + +一个简单 MNIST 示例的完整代码在 :githublink:`这里 `。 除了本地训练平台,用户还可以在除了本地机器以外的 `不同的训练平台 <../training_services.rst>`__ 上运行 Retiarii 的实验。 + +可视化 Experiment +------------------------ + +用户可以像可视化普通的超参数调优 Experiment 一样可视化他们的 Experiment。 例如,在浏览器里打开 ``localhost::8081``,8081 是在 ``exp.run`` 里设置的端口。 参考 `这里 <../Tutorial/WebUI.rst>`__ 了解更多细节。 + +我们支持使用第三方工具(例如 `Netron `__)可视化搜索过程中采样到的模型。您可以点击每个 trial 面板下的 ``Visualization``。注意,目前的可视化是基于导出成 `onnx `__ 格式的模型实现的,所以如果模型无法导出成 onnx,那么可视化就无法进行。内置的模型评估器(比如 Classification)已经自动将模型导出成了一个文件。如果您自定义了模型,您需要将模型导出到 ``$NNI_OUTPUT_DIR/model.onnx``。 + +导出最佳模型 +----------------- + +探索完成后,用户可以使用 ``export_top_models`` 导出最佳模型。 + +.. code-block:: python + + for model_code in exp.export_top_models(formatter='dict'): + print(model_code) diff --git a/docs/zh_CN/NAS/SPOS.rst b/docs/zh_CN/NAS/SPOS.rst new file mode 120000 index 0000000000000000000000000000000000000000..4a70518f9bfb5a6f98994046848c5f11692667d2 --- /dev/null +++ b/docs/zh_CN/NAS/SPOS.rst @@ -0,0 +1 @@ +../../en_US/NAS/SPOS.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/Serialization.rst b/docs/zh_CN/NAS/Serialization.rst new file mode 120000 index 0000000000000000000000000000000000000000..b7d9c96a31fd2602260543fbd01c8e2bb4506a25 --- /dev/null +++ b/docs/zh_CN/NAS/Serialization.rst @@ -0,0 +1 @@ +../../en_US/NAS/Serialization.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/WriteOneshot.rst b/docs/zh_CN/NAS/WriteOneshot.rst new file mode 120000 index 0000000000000000000000000000000000000000..bb86305822549c8feac87f3d86c397df4fc979e3 --- /dev/null +++ b/docs/zh_CN/NAS/WriteOneshot.rst @@ -0,0 +1 @@ +../../en_US/NAS/WriteOneshot.rst \ No newline at end of file diff --git a/docs/zh_CN/NAS/construct_space.rst b/docs/zh_CN/NAS/construct_space.rst new file mode 100644 index 0000000000000000000000000000000000000000..ab19c41fa371e317e5a66b3ee1ad4cc7904d23db --- /dev/null +++ b/docs/zh_CN/NAS/construct_space.rst @@ -0,0 +1,14 @@ +.. bb39a6ac0ae1f5554bc38604c77fb616 + +##################### +构建模型空间 +##################### + +NNI为用户提供了强大的API,以方便表达模型空间(或搜索空间)。 首先,用户可以使用 mutation 原语(如 ValueChoice、LayerChoice)在他们的模型中内联一个空间。 其次,NNI为用户提供了简单的接口,可以定制新的 mutators 来表达更复杂的模型空间。 在大多数情况下,mutation 原语足以表达用户的模型空间。 + +.. toctree:: + :maxdepth: 1 + + mutation 原语 + 定制 mutator + Hypermodule Lib \ No newline at end of file diff --git a/docs/zh_CN/NAS/multi_trial_nas.rst b/docs/zh_CN/NAS/multi_trial_nas.rst new file mode 100644 index 0000000000000000000000000000000000000000..f79b16ab8c3e57403dc67d5713419838655f53a5 --- /dev/null +++ b/docs/zh_CN/NAS/multi_trial_nas.rst @@ -0,0 +1,14 @@ +.. 51734c9945d4eca0f9b5633929d8fadf + +Multi-trial NAS +=============== + +在 multi-trial NAS 中,用户需要模型评估器来评估每个采样模型的性能,并且需要一个探索策略来从定义的模型空间中采样模型。 在这里,用户可以使用 NNI 提供的模型评估器或编写自己的模型评估器。 他们可以简单地选择一种探索策略。 高级用户还可以自定义新的探索策略。 关于如何运行 multi-trial NAS 实验的简单例子,请参考 `快速入门 <./QuickStart.rst>`__。 + +.. toctree:: + :maxdepth: 1 + + 模型评估器 + 探索策略 + 执行引擎 + 序列化 diff --git a/docs/zh_CN/NAS/one_shot_nas.rst b/docs/zh_CN/NAS/one_shot_nas.rst new file mode 100644 index 0000000000000000000000000000000000000000..223c843175bf883893403e1894242ed113e9a723 --- /dev/null +++ b/docs/zh_CN/NAS/one_shot_nas.rst @@ -0,0 +1,18 @@ +.. c9ab8a1f91c587ad72d66b6c43e06528 + +One-shot NAS +============ + +One-Shot NAS 算法利用了搜索空间中模型间的权重共享来训练超网络,并使用超网络来指导选择出更好的模型。 与从头训练每个模型(我们称之为 "Multi-trial NAS")算法相比,此类算法大大减少了使用的计算资源。 NNI 支持下列流行的 One-Shot NAS 算法。 + + +.. toctree:: + :maxdepth: 1 + + 运行 One-shot NAS + ENAS + DARTS + SPOS + ProxylessNAS + FBNet + 自定义 One-shot NAS \ No newline at end of file diff --git a/docs/zh_CN/Overview.rst b/docs/zh_CN/Overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..be61cc35c3b9fbf807bdf5399bd0388ed5dfceda --- /dev/null +++ b/docs/zh_CN/Overview.rst @@ -0,0 +1,125 @@ +.. 6e45ee0ddd5d0315e5c946149d4f9c31 + +概述 +======== + +NNI (Neural Network Intelligence) 是一个工具包,可有效的帮助用户设计并调优机器学习模型的神经网络架构,复杂系统的参数(如超参)等。 NNI 的特性包括:易于使用,可扩展,灵活,高效。 + + +* **易于使用**:NNI 可通过 pip 安装。 只需要在代码中添加几行,就可以利用 NNI 来调优参数。 可使用命令行工具或 Web 界面来查看 Experiment。 +* **可扩展**:调优超参或网络结构通常需要大量的计算资源。NNI 在设计时就支持了多种不同的计算资源,如远程服务器组,训练平台(如:OpenPAI,Kubernetes),等等。 根据您配置的培训平台的能力,可以并行运行数百个 Trial 。 +* **灵活**:除了内置的算法,NNI 中还可以轻松集成自定义的超参调优算法,神经网络架构搜索算法,提前终止算法等等。 还可以将 NNI 连接到更多的训练平台上,如云中的虚拟机集群,Kubernetes 服务等等。 此外,NNI 还可以连接到外部环境中的特殊应用和模型上。 +* **高效**:NNI 在系统及算法级别上不断地进行优化。 例如:通过早期的反馈来加速调优过程。 + +下图显示了 NNI 的体系结构。 + + +.. raw:: html + +

+ drawing +

+ + +主要概念 +------------ + + +* + *Experiment(实验)*: 表示一次任务,例如,寻找模型的最佳超参组合,或最好的神经网络架构等。 它由 Trial 和自动机器学习算法所组成。 + +* + *搜索空间*:是模型调优的范围。 例如,超参的取值范围。 + +* + *Configuration(配置)*:配置是来自搜索空间的实例,每个超参都会有特定的值。 + +* + *Trial*:是一次独立的尝试,它会使用某组配置(例如,一组超参值,或者特定的神经网络架构)。 Trial 会基于提供的配置来运行。 + +* + *Tuner(调优器)*:一种自动机器学习算法,会为下一个 Trial 生成新的配置。 新的 Trial 会使用这组配置来运行。 + +* + *Assessor(评估器)*:分析 Trial 的中间结果(例如,定期评估数据集上的精度),来确定 Trial 是否应该被提前终止。 + +* + *训练平台*:是 Trial 的执行环境。 根据 Experiment 的配置,可以是本机,远程服务器组,或其它大规模训练平台(如,OpenPAI,Kubernetes)。 + +Experiment 的运行过程为:Tuner 接收搜索空间并生成配置。 这些配置将被提交到训练平台,如本机,远程服务器组或训练集群。 执行的性能结果会被返回给 Tuner。 然后,再生成并提交新的配置。 + +每次 Experiment 执行时,用户只需要定义搜索空间,改动几行代码,就能利用 NNI 内置的 Tuner/Assessor 和训练平台来搜索最好的超参组合以及神经网络结构。 基本上分为三步: + +.. + + 步骤一:`定义搜索空间 `__ + + 步骤二:`改动模型代码 `__ + + 步骤三:`定义实验配置 `__ + + + +.. raw:: html + +

+ drawing +

+ + +可查看 `快速入门 `__ 来调优你的模型或系统。 + +核心功能 +------------- + +NNI 提供了并行运行多个实例以查找最佳参数组合的能力。 此功能可用于各种领域,例如,为深度学习模型查找最佳超参数,或查找具有真实数据的数据库和其他复杂系统的最佳配置。 + +NNI 还希望提供用于机器学习和深度学习的算法工具包,尤其是神经体系结构搜索(NAS)算法,模型压缩算法和特征工程算法。 + +超参调优 +^^^^^^^^^^^^^^^^^^^^^ + +这是 NNI 最核心、基本的功能,其中提供了许多流行的 `自动调优算法 `__ (即 Tuner) 以及 `提前终止算法 `__ (即 Assessor)。 可查看 `快速入门 `__ 来调优你的模型或系统。 基本上通过以上三步,就能开始 NNI Experiment。 + +通用 NAS 框架 +^^^^^^^^^^^^^^^^^^^^^ + +此 NAS 框架可供用户轻松指定候选的神经体系结构,例如,可以为单个层指定多个候选操作(例如,可分离的 conv、扩张 conv),并指定可能的跳过连接。 NNI 将自动找到最佳候选。 另一方面,NAS 框架为其他类型的用户(如,NAS 算法研究人员)提供了简单的接口,以实现新的 NAS 算法。 NAS 详情及用法参考 `这里 `__。 + +NNI 通过 Trial SDK 支持多种 one-shot(一次性) NAS 算法,如:ENAS、DARTS。 使用这些算法时,不需启动 NNI Experiment。 在 Trial 代码中加入算法,直接运行即可。 如果要调整算法中的超参数,或运行多个实例,可以使用 Tuner 并启动 NNI Experiment。 + +除了 one-shot NAS 外,NAS 还能以 NNI 模式运行,其中每个候选的网络结构都作为独立 Trial 任务运行。 在此模式下,与超参调优类似,必须启动 NNI Experiment 并为 NAS 选择 Tuner。 + +模型压缩 +^^^^^^^^^^^^^^^^^ + +NNI 提供了一个易于使用的模型压缩框架来压缩深度神经网络,压缩后的网络通常具有更小的模型尺寸和更快的推理速度, +模型性能也不会有明显的下降。 NNI 上的模型压缩包括剪枝和量化算法。 这些算法通过 NNI Trial SDK 提供 +。 可以直接在 Trial 代码中使用,并在不启动 NNI Experiment 的情况下运行 Trial 代码。 用户还可以使用 NNI 模型压缩框架集成自定义的剪枝和量化算法。 + +模型压缩的详细说明和算法可在 `这里 `__ 找到。 + +自动特征工程 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +自动特征工程,可以为下游任务找到最有效的特征。 自动特征工程及其用法的详细说明可在 `这里 `__ 找到。 通过 NNI Trial SDK 支持,不必创建 NNI Experiment, 只需在 Trial 代码中加入内置的自动特征工程算法,然后直接运行 Trial 代码。 + +自动特征工程算法通常有一些超参。 如果要自动调整这些超参,可以利用 NNI 的超参数调优,即选择调优算法(即 Tuner)并启动 NNI Experiment。 + +了解更多信息 +-------------------- + + +* `入门 `__ +* `如何为 NNI 调整代码? `__ +* `NNI 支持哪些 Tuner? `__ +* `如何自定义 Tuner? `__ +* `NNI 支持哪些 Assessor? `__ +* `如何自定义 Assessor? `__ +* `如何在本机上运行 Experiment? `__ +* `如何在多机上运行 Experiment? `__ +* `如何在 OpenPAI 上运行 Experiment? `__ +* `示例 `__ +* `NNI 上的神经网络架构搜索 `__ +* `NNI 上的自动模型压缩 `__ +* `NNI 上的自动特征工程 `__ diff --git a/docs/zh_CN/Release.rst b/docs/zh_CN/Release.rst new file mode 120000 index 0000000000000000000000000000000000000000..a9bbbc3fedd8d675dc94d10a20f010d5f6067522 --- /dev/null +++ b/docs/zh_CN/Release.rst @@ -0,0 +1 @@ +../en_US/Release.rst \ No newline at end of file diff --git a/docs/zh_CN/Release_v1.0.md b/docs/zh_CN/Release_v1.0.md new file mode 120000 index 0000000000000000000000000000000000000000..5d9c97410ec962658dc9470ae828ceee3df312ec --- /dev/null +++ b/docs/zh_CN/Release_v1.0.md @@ -0,0 +1 @@ +../en_US/Release_v1.0.md \ No newline at end of file diff --git a/docs/zh_CN/ResearchPublications.rst b/docs/zh_CN/ResearchPublications.rst new file mode 120000 index 0000000000000000000000000000000000000000..3fffb5132143e53231d6bbbec066907abe1f8465 --- /dev/null +++ b/docs/zh_CN/ResearchPublications.rst @@ -0,0 +1 @@ +../en_US/ResearchPublications.rst \ No newline at end of file diff --git a/docs/zh_CN/SupportedFramework_Library.rst b/docs/zh_CN/SupportedFramework_Library.rst new file mode 120000 index 0000000000000000000000000000000000000000..98bc96d690060768c6d91bfcc5da55dcd048b0eb --- /dev/null +++ b/docs/zh_CN/SupportedFramework_Library.rst @@ -0,0 +1 @@ +../en_US/SupportedFramework_Library.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/AMLMode.rst b/docs/zh_CN/TrainingService/AMLMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..3a294305db9f56857ba48df1b3a7c7acad919317 --- /dev/null +++ b/docs/zh_CN/TrainingService/AMLMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/AMLMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/AdaptDLMode.rst b/docs/zh_CN/TrainingService/AdaptDLMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..d66aed29050f33135ab53ce6dac9a364ef2a6591 --- /dev/null +++ b/docs/zh_CN/TrainingService/AdaptDLMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/AdaptDLMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/DLCMode.rst b/docs/zh_CN/TrainingService/DLCMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..842f71a3ba9bb58d2b06abc6a2593ed9748baf76 --- /dev/null +++ b/docs/zh_CN/TrainingService/DLCMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/DLCMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/DLTSMode.rst b/docs/zh_CN/TrainingService/DLTSMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..addd326bc00c801a96d3923b3cf4057801923059 --- /dev/null +++ b/docs/zh_CN/TrainingService/DLTSMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/DLTSMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/FrameworkControllerMode.rst b/docs/zh_CN/TrainingService/FrameworkControllerMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..48f8da126fc0d7354f267c1eea87beedf03da678 --- /dev/null +++ b/docs/zh_CN/TrainingService/FrameworkControllerMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/FrameworkControllerMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/HowToImplementTrainingService.rst b/docs/zh_CN/TrainingService/HowToImplementTrainingService.rst new file mode 120000 index 0000000000000000000000000000000000000000..79f597fa9125af83866568089c0281e459a159fe --- /dev/null +++ b/docs/zh_CN/TrainingService/HowToImplementTrainingService.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/HowToImplementTrainingService.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/HybridMode.rst b/docs/zh_CN/TrainingService/HybridMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..deb252df0269b71471a20c4eed032325a9708268 --- /dev/null +++ b/docs/zh_CN/TrainingService/HybridMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/HybridMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/KubeflowMode.rst b/docs/zh_CN/TrainingService/KubeflowMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..99f763c36854d5c03c25ea8335787a783d20941d --- /dev/null +++ b/docs/zh_CN/TrainingService/KubeflowMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/KubeflowMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/LocalMode.rst b/docs/zh_CN/TrainingService/LocalMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..afd79c563d2e69b517e4a3e730c379ddad5cbe22 --- /dev/null +++ b/docs/zh_CN/TrainingService/LocalMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/LocalMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/Overview.rst b/docs/zh_CN/TrainingService/Overview.rst new file mode 120000 index 0000000000000000000000000000000000000000..0ac04a6210adaebe9c9bcfb2327b29d4bae3e6d7 --- /dev/null +++ b/docs/zh_CN/TrainingService/Overview.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/Overview.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/PaiMode.rst b/docs/zh_CN/TrainingService/PaiMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..bb6fa1f1dbaa740c7484183226007ba3b34dc298 --- /dev/null +++ b/docs/zh_CN/TrainingService/PaiMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/PaiMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrainingService/RemoteMachineMode.rst b/docs/zh_CN/TrainingService/RemoteMachineMode.rst new file mode 120000 index 0000000000000000000000000000000000000000..61dfc745a4a246ede88945f05f8c5a1f0e012412 --- /dev/null +++ b/docs/zh_CN/TrainingService/RemoteMachineMode.rst @@ -0,0 +1 @@ +../../en_US/TrainingService/RemoteMachineMode.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/Cifar10Examples.rst b/docs/zh_CN/TrialExample/Cifar10Examples.rst new file mode 120000 index 0000000000000000000000000000000000000000..17aad4bad004f3fc262242136444766453212034 --- /dev/null +++ b/docs/zh_CN/TrialExample/Cifar10Examples.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/Cifar10Examples.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/EfficientNet.rst b/docs/zh_CN/TrialExample/EfficientNet.rst new file mode 120000 index 0000000000000000000000000000000000000000..539e6982688e2d3ef8c1f82745ae901c2378a955 --- /dev/null +++ b/docs/zh_CN/TrialExample/EfficientNet.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/EfficientNet.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/GbdtExample.rst b/docs/zh_CN/TrialExample/GbdtExample.rst new file mode 120000 index 0000000000000000000000000000000000000000..e96cbcf6976d6923dbe6d18c3622ac82ea2bb2a2 --- /dev/null +++ b/docs/zh_CN/TrialExample/GbdtExample.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/GbdtExample.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/KDExample.rst b/docs/zh_CN/TrialExample/KDExample.rst new file mode 120000 index 0000000000000000000000000000000000000000..44176bb2dfb0c68483d3cf237f6a2b3b8bf358ad --- /dev/null +++ b/docs/zh_CN/TrialExample/KDExample.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/KDExample.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/MnistExamples.rst b/docs/zh_CN/TrialExample/MnistExamples.rst new file mode 120000 index 0000000000000000000000000000000000000000..d15711abfc47d19c10f215744c071d321ecb2716 --- /dev/null +++ b/docs/zh_CN/TrialExample/MnistExamples.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/MnistExamples.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/OpEvoExamples.rst b/docs/zh_CN/TrialExample/OpEvoExamples.rst new file mode 120000 index 0000000000000000000000000000000000000000..8828074b038391167dfa8aec55085665410e02b7 --- /dev/null +++ b/docs/zh_CN/TrialExample/OpEvoExamples.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/OpEvoExamples.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/Pix2pixExample.rst b/docs/zh_CN/TrialExample/Pix2pixExample.rst new file mode 120000 index 0000000000000000000000000000000000000000..00b1fdebf752523a7c8f97d98fca192f47e186d6 --- /dev/null +++ b/docs/zh_CN/TrialExample/Pix2pixExample.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/Pix2pixExample.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/RocksdbExamples.rst b/docs/zh_CN/TrialExample/RocksdbExamples.rst new file mode 120000 index 0000000000000000000000000000000000000000..9461b3c1ac9cff9fd37a479b3b7b330751464026 --- /dev/null +++ b/docs/zh_CN/TrialExample/RocksdbExamples.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/RocksdbExamples.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/SklearnExamples.rst b/docs/zh_CN/TrialExample/SklearnExamples.rst new file mode 120000 index 0000000000000000000000000000000000000000..aceca0c1861b9a9d366cff991760e9fcabab5b55 --- /dev/null +++ b/docs/zh_CN/TrialExample/SklearnExamples.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/SklearnExamples.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/SquadEvolutionExamples.rst b/docs/zh_CN/TrialExample/SquadEvolutionExamples.rst new file mode 120000 index 0000000000000000000000000000000000000000..9d2d9757709a2c737cf982b5842b7d441a75be69 --- /dev/null +++ b/docs/zh_CN/TrialExample/SquadEvolutionExamples.rst @@ -0,0 +1 @@ +../../en_US/TrialExample/SquadEvolutionExamples.rst \ No newline at end of file diff --git a/docs/zh_CN/TrialExample/Trials.rst b/docs/zh_CN/TrialExample/Trials.rst new file mode 100644 index 0000000000000000000000000000000000000000..6b776f3d63aecfbf2e4e64dc70ac37c3d64d7076 --- /dev/null +++ b/docs/zh_CN/TrialExample/Trials.rst @@ -0,0 +1,214 @@ +.. ce86df82c781b5be2b2ab411b4309f59 + :format: html + + +实现 NNI 的 Trial(试验)代码 +================================= + +**Trial(试验)** 是将一组参数组合(例如,超参)在模型上独立的一次尝试。 + +定义 NNI 的 Trial,需要首先定义参数组(例如,搜索空间),并更新模型代码。 有两种方法来定义一个 Trial:`NNI Python API <#nni-api>`__ 和 `NNI Python annotation <#nni-annotation>`__。 参考 `这里 <#more-examples>`__ 更多 Trial 示例。 + +:raw-html:`` + +NNI Trial API +------------- + +第一步:准备搜索空间参数文件。 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +样例如下: + +.. code-block:: json + + { + "dropout_rate":{"_type":"uniform","_value":[0.1,0.5]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "learning_rate":{"_type":"uniform","_value":[0.0001, 0.1]} + } + +参考 `SearchSpaceSpec.rst <../Tutorial/SearchSpaceSpec.rst>`__ 进一步了解搜索空间。 Tuner 会根据搜索空间来生成配置,即从每个超参的范围中选一个值。 + +第二步:更新模型代码 +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +* Import NNI + +在 Trial 代码中加上 ``import nni`` 。 + +* 从 Tuner 获得参数值 + +.. code-block:: python + + RECEIVED_PARAMS = nni.get_next_parameter() + +``RECEIVED_PARAMS`` 是一个对象,如: + +``{"conv_size": 2, "hidden_size": 124, "learning_rate": 0.0307, "dropout_rate": 0.2029}`` + + +* 定期返回指标数据(可选) + +.. code-block:: python + + nni.report_intermediate_result(metrics) + +``指标`` 可以是任意的 Python 对象。 如果使用了 NNI 内置的 Tuner/Assessor,``指标`` 只可以是两种类型:1) 数值类型,如 float、int, 2) dict 对象,其中必须有键名为 ``default`` ,值为数值的项目。 ``指标`` 会发送给 `assessor <../Assessor/BuiltinAssessor.rst>`__。 通常,``指标`` 包含了定期评估的损失值或精度。 + + +* 返回配置的最终性能 + +.. code-block:: python + + nni.report_final_result(metrics) + +``指标`` 可以是任意的 Python 对象。 如果使用了内置的 Tuner/Assessor,``指标`` 格式和 ``report_intermediate_result`` 中一样,这个数值表示模型的性能,如精度、损失值等。 ``指标`` 会发送给 `tuner <../Tuner/BuiltinTuner.rst>`__。 + +第三步:启动 NNI Experiment (实验) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +启动 NNI 实验,提供搜索空间文件的路径,即第一步中定义的文件: + +.. code-block:: yaml + + searchSpacePath: /path/to/your/search_space.json + +参考 `这里 <../Tutorial/ExperimentConfig.rst>`__ 进一步了解如何配置 Experiment。 + +参考 `这里 <../sdk_reference.rst>`__ ,了解更多 NNI Trial API (例如:``nni.get_sequence_id()``)。 + +:raw-html:`` + +NNI Annotation +--------------------- + +另一种实现 Trial 的方法是使用 Python 注释来标记 NNI。 NNI Annotation 很简单,类似于注释。 不必对现有代码进行结构更改。 只需要添加一些 NNI Annotation,就能够: + + +* 标记需要调整的参数变量 +* 指定要在其中调整的变量的范围 +* 标记哪个变量需要作为中间结果范围给 ``assessor`` +* 标记哪个变量需要作为最终结果(例如:模型精度) 返回给 ``tuner`` + +同样以 MNIST 为例,只需要两步就能用 NNI Annotation 来实现 Trial 代码。 + +第一步:在代码中加入 Annotation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +下面是加入了 Annotation 的 TensorFlow 代码片段,高亮的 4 行 Annotation 用于: + + +#. 调优 batch_size 和 dropout_rate +#. 每执行 100 步返回 test_acc +#. 最后返回 test_acc 作为最终结果。 + +值得注意的是,新添加的代码都是注释,不会影响以前的执行逻辑。因此这些代码仍然能在没有安装 NNI 的环境中运行。 + +.. code-block:: diff + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + + """@nni.variable(nni.choice(50, 250, 500), name=batch_size)""" + batch_size = 128 + for i in range(10000): + batch = mnist.train.next_batch(batch_size) + + """@nni.variable(nni.choice(0.1, 0.5), name=dropout_rate)""" + dropout_rate = 0.5 + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: dropout_rate}) + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_intermediate_result(test_acc)""" + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_final_result(test_acc)""" + +**注意**: + + +* ``@nni.variable`` 会对它的下面一行进行修改,左边被赋值变量必须与 ``@nni.variable`` 的关键字 ``name`` 相同。 +* ``@nni.report_intermediate_result``\ /\ ``@nni.report_final_result`` 会将数据发送给 assessor/tuner。 + +Annotation 的语法和用法等,参考 `Annotation <../Tutorial/AnnotationSpec.rst>`__。 + +第二步:启用 Annotation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +在 YAML 配置文件中设置 *useAnnotation* 为 true 来启用 Annotation: + +.. code-block:: bash + + useAnnotation: true + +用于调试的独立模式 +----------------------------- + +NNI 支持独立模式,使 Trial 代码无需启动 NNI 实验即可运行。 这样能更容易的找出 Trial 代码中的 Bug。 NNI Annotation 天然支持独立模式,因为添加的 NNI 相关的行都是注释的形式。 NNI Trial API 在独立模式下的行为有所变化,某些 API 返回虚拟值,而某些 API 不报告值。 有关这些 API 的完整列表,请参阅下表。 + +.. code-block:: python + + # 注意:请为 Trial 代码中的超参分配默认值 + nni.get_next_parameter # 返回 {} + nni.report_final_result # 已在 stdout 上打印日志,但不报告 + nni.report_intermediate_result # 已在 stdout 上打印日志,但不报告 + nni.get_experiment_id # 返回 "STANDALONE" + nni.get_trial_id # 返回 "STANDALONE" + nni.get_sequence_id # 返回 0 + +可使用 :githublink:`mnist 示例 ` 来尝试独立模式。 只需在代码目录下运行 ``python3 mnist.py``。 Trial 代码会使用默认超参成功运行。 + +更多调试的信息,可参考 `How to Debug <../Tutorial/HowToDebug.rst>`__。 + +Trial 存放在什么地方? +---------------------------------------- + +本机模式 +^^^^^^^^^^ + +每个 Trial 都有单独的目录来输出自己的数据。 在每次 Trial 运行后,环境变量 ``NNI_OUTPUT_DIR`` 定义的目录都会被导出。 在这个目录中可以看到 Trial 的代码、数据和日志。 此外,Trial 的日志(包括 stdout)还会被重定向到此目录中的 ``trial.log`` 文件。 + +如果使用了 Annotation 方法,转换后的 Trial 代码会存放在另一个临时目录中。 可以在 ``run.sh`` 文件中的 ``NNI_OUTPUT_DIR`` 变量找到此目录。 文件中的第二行(即:``cd``)会切换到代码所在的实际路径。 ``run.sh`` 文件示例: + +.. code-block:: bash + + #!/bin/bash + cd /tmp/user_name/nni/annotation/tmpzj0h72x6 #This is the actual directory + export NNI_PLATFORM=local + export NNI_SYS_DIR=/home/user_name/nni-experiments/$experiment_id$/trials/$trial_id$ + export NNI_TRIAL_JOB_ID=nrbb2 + export NNI_OUTPUT_DIR=/home/user_name/nni-experiments/$eperiment_id$/trials/$trial_id$ + export NNI_TRIAL_SEQ_ID=1 + export MULTI_PHASE=false + export HIP_VISIBLE_DEVICES= + eval python3 mnist.py 2>/home/user_name/nni-experiments/$experiment_id$/trials/$trial_id$/stderr + echo $? `date +%s%3N` >/home/user_name/nni-experiments/$experiment_id$/trials/$trial_id$/.nni/state + +其它模式 +^^^^^^^^^^^ + +当 Trial 运行在 OpenPAI 这样的远程服务器上时,``NNI_OUTPUT_DIR`` 仅会指向 Trial 的输出目录,而 ``run.sh`` 不会在此目录中。 ``trial.log`` 文件会被复制回本机的 Trial 目录中。目录的默认位置在 ``~/nni-experiments/$experiment_id$/trials/$trial_id$/``。 + +更多调试的信息,可参考 `How to Debug <../Tutorial/HowToDebug.rst>`__。 + +:raw-html:`` + +更多 Trial 的示例 +------------------- + + +* `将日志写入 TensorBoard 的 Trial 输出目录 <../Tutorial/Tensorboard.rst>`__ +* `MNIST 示例 `__ +* `为 CIFAR 10 分类找到最佳的 optimizer `__ +* `如何在 NNI 调优 SciKit-learn 的参数 `__ +* `在阅读理解上使用自动模型架构搜索。 `__ +* `如何在 NNI 上调优 GBDT `__ +* `在 NNI 上调优 RocksDB `__ diff --git a/docs/zh_CN/Tuner/AnnealTuner.rst b/docs/zh_CN/Tuner/AnnealTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..5c50e6f5b7e239f80d2261782990b405d130b8da --- /dev/null +++ b/docs/zh_CN/Tuner/AnnealTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/AnnealTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/BatchTuner.rst b/docs/zh_CN/Tuner/BatchTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..6b0817074a8138b9f81a5d7a7ca39cb545694748 --- /dev/null +++ b/docs/zh_CN/Tuner/BatchTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/BatchTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/BohbAdvisor.rst b/docs/zh_CN/Tuner/BohbAdvisor.rst new file mode 120000 index 0000000000000000000000000000000000000000..1fe9c235554b8bf15adf8166c98524f5fba71d41 --- /dev/null +++ b/docs/zh_CN/Tuner/BohbAdvisor.rst @@ -0,0 +1 @@ +../../en_US/Tuner/BohbAdvisor.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/BuiltinTuner.rst b/docs/zh_CN/Tuner/BuiltinTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..63634cce49ee17a4673913d62f906671617ef2b0 --- /dev/null +++ b/docs/zh_CN/Tuner/BuiltinTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/BuiltinTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/CustomizeAdvisor.rst b/docs/zh_CN/Tuner/CustomizeAdvisor.rst new file mode 120000 index 0000000000000000000000000000000000000000..3d08070d794cbb6bf0aff99d70c70f064f664dd8 --- /dev/null +++ b/docs/zh_CN/Tuner/CustomizeAdvisor.rst @@ -0,0 +1 @@ +../../en_US/Tuner/CustomizeAdvisor.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/CustomizeTuner.rst b/docs/zh_CN/Tuner/CustomizeTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..7769711d692011bd2ee1779272d62239200718f4 --- /dev/null +++ b/docs/zh_CN/Tuner/CustomizeTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/CustomizeTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/DngoTuner.rst b/docs/zh_CN/Tuner/DngoTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..085c9e4da9fed32b95c78b4ea1d542ffa99e7d6f --- /dev/null +++ b/docs/zh_CN/Tuner/DngoTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/DngoTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/EvolutionTuner.rst b/docs/zh_CN/Tuner/EvolutionTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..3949a42e05a4095f9a28bf5c57562f4b404b1726 --- /dev/null +++ b/docs/zh_CN/Tuner/EvolutionTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/EvolutionTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/GPTuner.rst b/docs/zh_CN/Tuner/GPTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..76106a8785f5a1b6ffd4c8f0539b25e2b7d3b1ad --- /dev/null +++ b/docs/zh_CN/Tuner/GPTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/GPTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/GridsearchTuner.rst b/docs/zh_CN/Tuner/GridsearchTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..1e9a0715319208dd2a5657fa46c86ce24843fb8e --- /dev/null +++ b/docs/zh_CN/Tuner/GridsearchTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/GridsearchTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/HyperbandAdvisor.rst b/docs/zh_CN/Tuner/HyperbandAdvisor.rst new file mode 120000 index 0000000000000000000000000000000000000000..813fa1c3af1cf32b954baee83b0d9abe4bb26f88 --- /dev/null +++ b/docs/zh_CN/Tuner/HyperbandAdvisor.rst @@ -0,0 +1 @@ +../../en_US/Tuner/HyperbandAdvisor.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/MetisTuner.rst b/docs/zh_CN/Tuner/MetisTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..47f52a5fced7c3934bd0eb9785feaafb8b0be861 --- /dev/null +++ b/docs/zh_CN/Tuner/MetisTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/MetisTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/NetworkmorphismTuner.rst b/docs/zh_CN/Tuner/NetworkmorphismTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..a8104447acaa65f2288cbf06f2ea438ef057e8cd --- /dev/null +++ b/docs/zh_CN/Tuner/NetworkmorphismTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/NetworkmorphismTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/PBTTuner.rst b/docs/zh_CN/Tuner/PBTTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..e5a5aa9e9127e210ac3bf5a356a213a0b45a46ea --- /dev/null +++ b/docs/zh_CN/Tuner/PBTTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/PBTTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/RandomTuner.rst b/docs/zh_CN/Tuner/RandomTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..829ca575fcfacd266c4e919405432ee8983f7b3c --- /dev/null +++ b/docs/zh_CN/Tuner/RandomTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/RandomTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/SmacTuner.rst b/docs/zh_CN/Tuner/SmacTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..0807af159d97377292c50f6bd9fa6ca6fe4c84b4 --- /dev/null +++ b/docs/zh_CN/Tuner/SmacTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/SmacTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tuner/TpeTuner.rst b/docs/zh_CN/Tuner/TpeTuner.rst new file mode 120000 index 0000000000000000000000000000000000000000..18e13ef4b40d3ae5816b3c045372c8136dd4deb4 --- /dev/null +++ b/docs/zh_CN/Tuner/TpeTuner.rst @@ -0,0 +1 @@ +../../en_US/Tuner/TpeTuner.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/AnnotationSpec.rst b/docs/zh_CN/Tutorial/AnnotationSpec.rst new file mode 120000 index 0000000000000000000000000000000000000000..e7b14d8d046c09b0326f87921623aaaacb2fe6a6 --- /dev/null +++ b/docs/zh_CN/Tutorial/AnnotationSpec.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/AnnotationSpec.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/Contributing.rst b/docs/zh_CN/Tutorial/Contributing.rst new file mode 120000 index 0000000000000000000000000000000000000000..ed6f2c21373ee36296e27dd73aedfc0adf31e81c --- /dev/null +++ b/docs/zh_CN/Tutorial/Contributing.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/Contributing.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/ExperimentConfig.rst b/docs/zh_CN/Tutorial/ExperimentConfig.rst new file mode 120000 index 0000000000000000000000000000000000000000..23e1484ca4d0c8fa444fdf6c952c82d247c4621d --- /dev/null +++ b/docs/zh_CN/Tutorial/ExperimentConfig.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/ExperimentConfig.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/FAQ.rst b/docs/zh_CN/Tutorial/FAQ.rst new file mode 120000 index 0000000000000000000000000000000000000000..e798f18d869f51ba5ae57ca1ae29f0130aa286fa --- /dev/null +++ b/docs/zh_CN/Tutorial/FAQ.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/FAQ.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/HowToDebug.rst b/docs/zh_CN/Tutorial/HowToDebug.rst new file mode 120000 index 0000000000000000000000000000000000000000..3e51c9967cbbf0051e7bde94c6895335ea892db6 --- /dev/null +++ b/docs/zh_CN/Tutorial/HowToDebug.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/HowToDebug.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/HowToLaunchFromPython.rst b/docs/zh_CN/Tutorial/HowToLaunchFromPython.rst new file mode 120000 index 0000000000000000000000000000000000000000..75d7b5e73f8aee293cb8d59b2c069af1549ec1cf --- /dev/null +++ b/docs/zh_CN/Tutorial/HowToLaunchFromPython.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/HowToLaunchFromPython.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/HowToUseDocker.rst b/docs/zh_CN/Tutorial/HowToUseDocker.rst new file mode 120000 index 0000000000000000000000000000000000000000..ced9133b2f44b39525730b8d8052899a0f3f543c --- /dev/null +++ b/docs/zh_CN/Tutorial/HowToUseDocker.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/HowToUseDocker.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/HowToUseSharedStorage.rst b/docs/zh_CN/Tutorial/HowToUseSharedStorage.rst new file mode 120000 index 0000000000000000000000000000000000000000..f1a96759f61cdb0f71e22f4eaa059710092e3167 --- /dev/null +++ b/docs/zh_CN/Tutorial/HowToUseSharedStorage.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/HowToUseSharedStorage.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/InstallCustomizedAlgos.rst b/docs/zh_CN/Tutorial/InstallCustomizedAlgos.rst new file mode 120000 index 0000000000000000000000000000000000000000..79326f9931c76e4c3b2681d0c6797763e3ece0d4 --- /dev/null +++ b/docs/zh_CN/Tutorial/InstallCustomizedAlgos.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/InstallCustomizedAlgos.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/InstallationLinux.rst b/docs/zh_CN/Tutorial/InstallationLinux.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4b8064e1182c25adfc75f664795d98fc6a4bcc1 --- /dev/null +++ b/docs/zh_CN/Tutorial/InstallationLinux.rst @@ -0,0 +1,202 @@ +.. 1488ec09b21ac2a6c35b41f710c9211e + +在 Linux 和 Mac 下安装 +====================== + +安装 +------------ + +在 Linux 和 macOS 上安装,遵循以下相同的说明。 + +通过 pip 命令安装 NNI +^^^^^^^^^^^^^^^^^^^^^^^ + + 先决条件:``python 64-bit >= 3.6`` + +.. code-block:: bash + + python3 -m pip install --upgrade nni + +通过源代码安装 NNI +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + 如果对某个或最新版本的代码感兴趣,可通过源代码安装 NNI。 + + 先决条件:``python 64-bit >=3.6``, ``git`` + +.. code-block:: bash + + git clone -b v2.6 https://github.com/Microsoft/nni.git + cd nni + python3 -m pip install --upgrade pip setuptools + python3 setup.py develop + +从 NNI 源代码构建 Wheel 包 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +上一节介绍了如何在 `开发模式 `__ 下安装NNI。 +如果要执行持久安装,建议您构建自己的 wheel 软件包并从wheel 安装。 + +.. code-block:: bash + + git clone -b v2.6 https://github.com/Microsoft/nni.git + cd nni + export NNI_RELEASE=2.6 + python3 -m pip install --upgrade pip setuptools wheel + python3 setup.py clean --all + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-2.6-py3-none-manylinux1_x86_64.whl + +在 Docker 映像中使用 NNI +^^^^^^^^^^^^^^^^^^^^^^^^^ + + 也可将 NNI 安装到 docker 映像中。 参考 `这里 <../Tutorial/HowToUseDocker.rst>`__ 来生成 NNI 的 docker 映像。 也可通过此命令从 Docker Hub 中直接拉取 NNI 的映像 ``docker pull msranni/nni:latest``。 + +验证安装 +------------------- + +* + 通过克隆源代码下载示例。 + + .. code-block:: bash + + git clone -b v2.6 https://github.com/Microsoft/nni.git + +* + 运行 MNIST 示例。 + + .. code-block:: bash + + nnictl create --config nni/examples/trials/mnist-pytorch/config.yml + +* + 在命令行中等待输出 ``INFO: Successfully started experiment!`` 。 此消息表明实验已成功启动。 通过命令行输出的 Web UI url 来访问 Experiment 的界面。 + +.. code-block:: text + + INFO: Starting restful server... + INFO: Successfully started Restful server! + INFO: Setting local config... + INFO: Successfully set local config! + INFO: Starting experiment... + INFO: Successfully started experiment! + ----------------------------------------------------------------------- + The experiment id is egchD4qy + The Web UI urls are: http://223.255.255.1:8080 http://127.0.0.1:8080 + ----------------------------------------------------------------------- + + You can use these commands to get more information about the experiment + ----------------------------------------------------------------------- + commands description + 1. nnictl experiment show show the information of experiments + 2. nnictl trial ls list all of trial jobs + 3. nnictl top monitor the status of running experiments + 4. nnictl log stderr show stderr log content + 5. nnictl log stdout show stdout log content + 6. nnictl stop stop an experiment + 7. nnictl trial kill kill a trial job by id + 8. nnictl --help get help information about nnictl + ----------------------------------------------------------------------- + + +* 在浏览器中打开 ``Web UI url``,可看到下图的实验详细信息,以及所有的尝试任务。 查看 `这里 <../Tutorial/WebUI.rst>`__ 的更多页面。 + + +.. image:: ../../img/webui_overview_page.png + :target: ../../img/webui_overview_page.png + :alt: overview + + + +.. image:: ../../img/webui_trialdetail_page.png + :target: ../../img/webui_trialdetail_page.png + :alt: detail + + +系统需求 +------------------- + +由于程序变更,NNI 的最低配置会有所更改。 + +Linux +^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - 推荐配置 + - 最低配置 + * - **操作系统** + - Ubuntu 16.04 或以上版本 + - + * - **CPU** + - Intel® Core™ i5 或 AMD Phenom™ II X3 或更高配置 + - Intel® Core™ i3 或 AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 或更高配置 + - NVIDIA® GeForce® GTX 460 + * - **内存** + - 6 GB + - 4 GB + * - **存储** + - 30 GB 可用的磁盘空间 + - + * - **网络** + - 宽带连接 + - + * - **分辨率** + - 1024 x 768 以上 + - + + +macOS +^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - 推荐配置 + - 最低配置 + * - **操作系统** + - macOS 10.14.1 或更高版本 + - + * - **CPU** + - Intel® Core™ i7-4770 或更高 + - Intel® Core™ i5-760 或更高 + * - **GPU** + - AMD Radeon™ R9 M395X 或更高 + - NVIDIA® GeForce® GT 750M 或 AMD Radeon™ R9 M290 或更高 + * - **内存** + - 8 GB + - 4 GB + * - **存储** + - 70GB 可用空间 SSD 硬盘 + - 70GB 可用空间及 7200 RPM 硬盘 + * - **网络** + - 宽带连接 + - + * - **分辨率** + - 1024 x 768 以上 + - + + +更多 +--------------- + + +* `概述 <../Overview.rst>`__ +* `如何使用命令行工具 nnictl `__ +* `如何使用 NNIBoard `__ +* `定义搜索空间 `__ +* `定义实验配置 `__ +* `如何在本机运行 Experiment (支持多 GPU 卡)? <../TrainingService/LocalMode.rst>`__ +* `如何在多机上运行 Experiment? <../TrainingService/RemoteMachineMode.rst>`__ +* `如何在 OpenPAI 上运行 Experiment? <../TrainingService/PaiMode.rst>`__ +* `如何通过 Kubeflow 在 Kubernetes 上运行 Experiment? <../TrainingService/KubeflowMode.rst>`__ +* `How to run an experiment on Kubernetes through FrameworkController? <../TrainingService/FrameworkControllerMode.rst>`__ +* `如何通过 AdaptDL在 Kubernetes 上运行 Experiment? <../TrainingService/AdaptDLMode.rst>`__ diff --git a/docs/zh_CN/Tutorial/InstallationWin.rst b/docs/zh_CN/Tutorial/InstallationWin.rst new file mode 100644 index 0000000000000000000000000000000000000000..43cc5003a0595b92b9285a220228d0f10df671c1 --- /dev/null +++ b/docs/zh_CN/Tutorial/InstallationWin.rst @@ -0,0 +1,214 @@ +.. acdfab53c8209a53709a5bdca72d29b2 + +在 Windows 上安装 +================== + +先决条件 +----------- + + +* + Python 3.6(或以上)64 位。 在 Windows 上推荐使用 `Anaconda `__ 或 `Miniconda `__ 来管理多个 Python 环境。 + +* + 如果是新安装的 Python 环境,需要安装 `Microsoft C++ Build Tools `__ 来支持 NNI 的依赖项,如 ``scikit-learn``。 + + .. code-block:: bat + + pip install cython wheel + +* + 安装 git 用于验证安装。 + +安装 NNI +----------- + +大多数情况下,可以从 pip 包安装和升级 NNI。 这样既方便又快捷。 + +如果对某个或最新版本的代码感兴趣,可通过源代码安装 NNI。 + +如果要为 NNI 贡献代码,参考 `设置开发环境 `__。 + + +* + 从 pip 包安装 + + .. code-block:: bat + + python -m pip install --upgrade nni + +* + 从源代码安装 + + .. code-block:: bat + + git clone -b v2.6 https://github.com/Microsoft/nni.git + cd nni + python setup.py develop + +验证安装 +------------------- + +* + 克隆源代码中的示例。 + + .. code-block:: bat + + git clone -b v2.6 https://github.com/Microsoft/nni.git + +* + 运行 MNIST 示例。 + + .. code-block:: bat + + nnictl create --config nni\examples\trials\mnist-pytorch\config_windows.yml + + 注意:如果熟悉其它框架,可选择 ``examples\trials`` 目录下对应的示例。 需要将示例 YAML 文件中 Trial 命令的 ``python3`` 改为 ``python``,这是因为默认安装的 Python 可执行文件是 ``python.exe``,没有 ``python3.exe``。 + +* + 在命令行中等待输出 ``INFO: Successfully started experiment!`` 。 此消息表明实验已成功启动。 通过命令行输出的 Web UI url 来访问 Experiment 的界面。 + +.. code-block:: text + + INFO: Starting restful server... + INFO: Successfully started Restful server! + INFO: Setting local config... + INFO: Successfully set local config! + INFO: Starting experiment... + INFO: Successfully started experiment! + ----------------------------------------------------------------------- + The experiment id is egchD4qy + The Web UI urls are: http://223.255.255.1:8080 http://127.0.0.1:8080 + ----------------------------------------------------------------------- + + You can use these commands to get more information about the experiment + ----------------------------------------------------------------------- + commands description + 1. nnictl experiment show show the information of experiments + 2. nnictl trial ls list all of trial jobs + 3. nnictl top monitor the status of running experiments + 4. nnictl log stderr show stderr log content + 5. nnictl log stdout show stdout log content + 6. nnictl stop stop an experiment + 7. nnictl trial kill kill a trial job by id + 8. nnictl --help get help information about nnictl + ----------------------------------------------------------------------- + + +* 在浏览器中打开 ``Web UI url``,可看到下图的实验详细信息,以及所有的尝试任务。 查看 `这里 <../Tutorial/WebUI.rst>`__ 的更多页面。 + + +.. image:: ../../img/webui_overview_page.png + :target: ../../img/webui_overview_page.png + :alt: overview + + + +.. image:: ../../img/webui_trialdetail_page.png + :target: ../../img/webui_trialdetail_page.png + :alt: detail + + +系统需求 +------------------- + +以下是 NNI 在 Windows 上的最低配置,推荐使用 Windows 10 1809 版。 由于程序变更,NNI 的最低配置会有所更改。 + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - + - 推荐配置 + - 最低配置 + * - **操作系统** + - Windows 10 1809 或更高版本 + - + * - **CPU** + - Intel® Core™ i5 或 AMD Phenom™ II X3 或更高配置 + - Intel® Core™ i3 或 AMD Phenom™ X3 8650 + * - **GPU** + - NVIDIA® GeForce® GTX 660 或更高配置 + - NVIDIA® GeForce® GTX 460 + * - **内存** + - 6 GB + - 4 GB + * - **存储** + - 30 GB 可用的磁盘空间 + - + * - **网络** + - 宽带连接 + - + * - **分辨率** + - 1024 x 768 以上 + - + + +常见问答 +------------ + +安装 NNI 时出现 simplejson 错误 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +确保安装了 C++ 14.0 编译器。 + +.. + + building 'simplejson._speedups' extension error: [WinError 3] The system cannot find the path specified + + +在命令行或 PowerShell 中,Trial 因为缺少 DLL 而失败 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +此错误因为缺少 LIBIFCOREMD.DLL 和 LIBMMD.DLL 文件,且 SciPy 安装失败。 使用 Anaconda 或 Miniconda 和 Python(64位)可解决。 + +.. + + ImportError: DLL load failed + + +Web 界面上的 Trial 错误 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +检查 Trial 日志文件来了解详情。 + +如果存在 stderr 文件,也需要查看其内容。 两种可能的情况是: + + +* 忘记将 Experiment 配置的 Trial 命令中的 ``python3`` 改为 ``python``。 +* 忘记安装 Experiment 的依赖,如 TensorFlow,Keras 等。 + +无法在 Windows 上使用 BOHB +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +确保安装了 C ++ 14.0 编译器然后尝试运行 ``pip install nni[BOHB]`` 来安装依赖项。 + +Windows 上不支持的 Tuner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +当前不支持 SMAC,原因可参考 `此问题 `__。 + +用 Windows 作为远程节点 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +参考 `远程模式 <../TrainingService/RemoteMachineMode.rst>`__. + +安装时出现 Segmentation Fault (core dumped) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +参考 `常见问题 `__。 + +更多 +--------------- + + +* `概述 <../Overview.rst>`__ +* `如何使用命令行工具 nnictl `__ +* `如何使用 NNIBoard `__ +* `定义搜索空间 `__ +* `定义实验配置 `__ +* `如何在本机运行 Experiment (支持多 GPU 卡)? <../TrainingService/LocalMode.rst>`__ +* `如何在多机上运行 Experiment? <../TrainingService/RemoteMachineMode.rst>`__ +* `如何在 OpenPAI 上运行 Experiment? <../TrainingService/PaiMode.rst>`__ +* `如何通过 Kubeflow 在 Kubernetes 上运行 Experiment? <../TrainingService/KubeflowMode.rst>`__ +* `如何通过 FrameworkController 在 Kubernetes 上运行 Experiment? <../TrainingService/FrameworkControllerMode.rst>`__ diff --git a/docs/zh_CN/Tutorial/NNSpider.md b/docs/zh_CN/Tutorial/NNSpider.md new file mode 120000 index 0000000000000000000000000000000000000000..e8fc12ec166852d0e0caa98be375c6c30903b82d --- /dev/null +++ b/docs/zh_CN/Tutorial/NNSpider.md @@ -0,0 +1 @@ +../../en_US/Tutorial/NNSpider.md \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/Nnictl.rst b/docs/zh_CN/Tutorial/Nnictl.rst new file mode 120000 index 0000000000000000000000000000000000000000..ef36ba19e400551efe3604b09e90a7b145fa2ee8 --- /dev/null +++ b/docs/zh_CN/Tutorial/Nnictl.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/Nnictl.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/QuickStart.rst b/docs/zh_CN/Tutorial/QuickStart.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef56b07a35ee6b88746a7dd7aeaa71860e237d2b --- /dev/null +++ b/docs/zh_CN/Tutorial/QuickStart.rst @@ -0,0 +1,287 @@ +.. df9198c942071002424be17beef23cf5 + +快速入门 +========== + +安装 +---- + +目前NNI支持了 Linux、macOS 和 Windows系统。 其中,Ubuntu 16.04 及更高版本、macOS 10.14.1 和 Windows 10.1809 均经过测试并支持。 在 ``python >= 3.6`` 环境中,只需运行 ``pip install`` 即可完成安装。 + +Linux 和 macOS +^^^^^^^^^^^^^^ + +.. code-block:: bash + + python3 -m pip install --upgrade nni + +Windows +^^^^^^^ + +.. code-block:: bash + + python -m pip install --upgrade nni + +.. Note:: 在 Linux 和 macOS 上,如果要将 NNI 安装到当前用户的 home 目录中,可使用 ``--user`` ;这不需要特殊权限。 + +.. Note:: 如果出现 ``Segmentation fault`` 这样的错误,参考 :doc:`常见问题 ` 。 + +.. Note:: NNI 的系统需求,参考 :doc:`Linux & Mac ` 或者 :doc:`Windows ` 的安装教程。如果想要使用 docker, 参考 :doc:`如何使用 Docker ` 。 + + +MNIST 上的 "Hello World" +------------------------ + +NNI 是一个能进行自动机器学习实验的工具包。 它可以自动进行获取超参、运行 Trial,测试结果,调优超参的循环。 在这里,将演示如何使用 NNI 帮助找到 MNIST 模型的最佳超参数。 + +这是还 **没有 NNI** 的示例代码,用 CNN 在 MNIST 数据集上训练: + +.. code-block:: python + + def main(args): + # 下载数据 + train_loader = torch.utils.data.DataLoader(datasets.MNIST(...), batch_size=args['batch_size'], shuffle=True) + test_loader = torch.tuils.data.DataLoader(datasets.MNIST(...), batch_size=1000, shuffle=True) + # 构建模型 + model = Net(hidden_size=args['hidden_size']) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum']) + # 训练 + for epoch in range(10): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + print(test_acc) + print('final accuracy:', test_acc) + + if __name__ == '__main__': + params = { + 'batch_size': 32, + 'hidden_size': 128, + 'lr': 0.001, + 'momentum': 0.5 + } + main(params) + +上面的代码一次只能尝试一组参数,如果想要调优学习率,需要手工改动超参,并一次次尝试。 + +NNI 用来帮助超参调优。它的流程如下: + +.. code-block:: text + + 输入: 搜索空间, Trial 代码, 配置文件 + 输出: 一组最优的参数配置 + + 1: For t = 0, 1, 2, ..., maxTrialNum, + 2: hyperparameter = 从搜索空间选择一组参数 + 3: final result = run_trial_and_evaluate(hyperparameter) + 4: 返回最终结果给 NNI + 5: If 时间达到上限, + 6: 停止实验 + 7: 返回最好的实验结果 + +.. note:: + + 如果需要使用 NNI 来自动训练模型,找到最佳超参,有两种实现方式: + + 1. 编写配置文件,然后使用命令行启动 experiment; + 2. 直接从 Python 文件中配置并启动 experiment。 + + 在本节中,我们将重点介绍第一种实现方式。如果希望使用第二种实现方式,请参考 `教程 `__\ 。 + + +第一步:修改 ``Trial`` 代码 +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +修改 ``Trial`` 代码来从 NNI 获取超参,并向 NNI 报告训练结果。 + +.. code-block:: diff + + + import nni + + def main(args): + # 下载数据 + train_loader = torch.utils.data.DataLoader(datasets.MNIST(...), batch_size=args['batch_size'], shuffle=True) + test_loader = torch.tuils.data.DataLoader(datasets.MNIST(...), batch_size=1000, shuffle=True) + # 构造模型 + model = Net(hidden_size=args['hidden_size']) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum']) + # 训练 + for epoch in range(10): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + - print(test_acc) + + nni.report_intermeidate_result(test_acc) + - print('final accuracy:', test_acc) + + nni.report_final_result(test_acc) + + if __name__ == '__main__': + - params = {'batch_size': 32, 'hidden_size': 128, 'lr': 0.001, 'momentum': 0.5} + + params = nni.get_next_parameter() + main(params) + +*示例:* :githublink:`mnist.py ` + + +第二步:定义搜索空间 +^^^^^^^^^^^^^^^^^^^ + +编写 YAML 格式的 **搜索空间** 文件,包括所有需要搜索的超参的 **名称** 和 **分布** (离散和连续值均可)。 + +.. code-block:: yaml + + searchSpace: + batch_size: + _type: choice + _value: [16, 32, 64, 128] + hidden_size: + _type: choice + _value: [128, 256, 512, 1024] + lr: + _type: choice + _value: [0.0001, 0.001, 0.01, 0.1] + momentum: + _type: uniform + _value: [0, 1] + +*示例:* :githublink:`config_detailed.yml ` + +也可以使用 JSON 文件来编写搜索空间,并在配置中确认文件路径。关于如何编写搜索空间,可以参考 `教程 `__. + + +第三步:配置 experiment +^^^^^^^^^^^^^^^^^^^^^^ + +除了在第二步中定义的搜索空间,还需要定义 YAML 格式的 **配置** 文件,声明 experiment 的关键信息,例如 Trail 文件,调优算法,最大 Trial 运行次数和最大持续时间等。 + +.. code-block:: yaml + + experimentName: MNIST # 用于区分 experiment 的名字,可选项 + trialCommand: python3 mnist.py # 注意:如果使用 Windows,请将 "python3" 修改为 "python" + trialConcurrency: 2 # 同时运行 2 个 trial + maxTrialNumber: 10 # 最多生成 10 个 trial + maxExperimentDuration: 1h # 1 小时后停止生成 trial + tuner: # 配置调优算法 + name: TPE + classArgs: # 算法特定参数 + optimize_mode: maximize + trainingService: # 配置训练平台 + platform: local + +Experiment 的配置文件可以参考 `文档 <../reference/experiment_config.rst>`__. + +.. _nniignore: + +.. Note:: 如果要使用远程服务器或集群作为 :doc:`训练平台 <../TrainingService/Overview>`,为了避免产生过大的网络压力,NNI 限制了文件的最大数量为 2000,大小为 300 MB。 如果代码目录中包含了过多的文件,可添加 ``.nniignore`` 文件来排除部分,与 ``.gitignore`` 文件用法类似。 参考 `git documentation `__ ,了解更多如何编写此文件的详细信息。 + +*示例:* :githublink:`config.yml ` 和 :githublink:`.nniignore ` + +上面的代码都已准备好,并保存在 :githublink:`examples/trials/mnist-pytorch/ `。 + + +第四步:运行 experiment +^^^^^^^^^^^^^^^^^^^^^^ + +Linux 和 macOS +************** + +从命令行使用 **config.yml** 文件启动 MNIST experiment 。 + +.. code-block:: bash + + nnictl create --config nni/examples/trials/mnist-pytorch/config_detailed.yml + +Windows +******* + +在 **config_detailed.yml** 文件的 ``trialCommand`` 项中将 ``python3`` 修改为 ``python``,然后从命令行使用 **config_detailed.yml** 文件启动 MNIST experiment 。 + +.. code-block:: bash + + nnictl create --config nni\examples\trials\mnist-pytorch\config_detailed.yml + +.. Note:: ``nnictl`` 是一个命令行工具,用来控制 NNI experiment,如启动、停止、继续 experiment,启动、停止 NNIBoard 等等。 点击 :doc:`这里 ` 查看 ``nnictl`` 的更多用法。 + +在命令行中等待输出 ``INFO: Successfully started experiment!`` 。 此消息表明实验已成功启动。 期望的输出如下: + +.. code-block:: text + + INFO: Starting restful server... + INFO: Successfully started Restful server! + INFO: Setting local config... + INFO: Successfully set local config! + INFO: Starting experiment... + INFO: Successfully started experiment! + ----------------------------------------------------------------------- + The experiment id is egchD4qy + The Web UI urls are: [Your IP]:8080 + ----------------------------------------------------------------------- + + You can use these commands to get more information about the experiment + ----------------------------------------------------------------------- + commands description + 1. nnictl experiment show show the information of experiments + 2. nnictl trial ls list all of trial jobs + 3. nnictl top monitor the status of running experiments + 4. nnictl log stderr show stderr log content + 5. nnictl log stdout show stdout log content + 6. nnictl stop stop an experiment + 7. nnictl trial kill kill a trial job by id + 8. nnictl --help get help information about nnictl + ----------------------------------------------------------------------- + +如果根据上述步骤准备好了相应 ``Trial`` , **搜索空间** 和 **配置** ,并成功创建的 NNI 任务。NNI 会自动开始通过配置的搜索空间来运行不同的超参集合,搜索最好的超参。 通过 Web 界面可看到 NNI 的进度。 + +第五步:查看 experiment +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +启动 experiment 后,可以在命令行界面找到如下的 **Web 界面地址** : + +.. code-block:: text + + The Web UI urls are: [Your IP]:8080 + +在浏览器中打开 **Web 界面地址** (即: ``[IP 地址]:8080`` ),就可以看到 experiment 的详细信息,以及所有的 Trial 任务。 如果无法打开终端中的 Web 界面链接,可以参考 `常见问题 `__。 + + +查看概要页面 +****************** + +Experiment 相关信息会显示在界面上,包括配置和搜索空间等。 NNI 还支持通过 **Experiment summary** 按钮下载这些信息和参数。 + +.. image:: ../../img/webui-img/full-oview.png + :target: ../../img/webui-img/full-oview.png + :alt: overview + + +查看 Trial 详情页面 +*********************** + +可以在此页面中看到最佳的 ``Trial`` 指标和超参数图。 您可以点击 ``Add/Remove columns`` 按钮向表格中添加更多列。 + +.. image:: ../../img/webui-img/full-detail.png + :target: ../../img/webui-img/full-detail.png + :alt: detail + + +查看 experiment 管理页面 +*********************** + +``All experiments`` 页面可以查看计算机上的所有实验。 + +.. image:: ../../img/webui-img/managerExperimentList/expList.png + :target: ../../img/webui-img/managerExperimentList/expList.png + :alt: Experiments list + +更多信息可参考 `此文档 <./WebUI.rst>`__。 + + +相关主题 +------------- + +* `进行Debug `__ +* `如何实现 Trial 代码 <../TrialExample/Trials.rst>`__ +* `尝试不同的 Tuner <../Tuner/BuiltinTuner.rst>`__ +* `尝试不同的 Assessor <../Assessor/BuiltinAssessor.rst>`__ +* `在不同训练平台上运行 experiment <../training_services.rst>`__ +* `如何使用 Annotation `__ +* `如何使用命令行工具 nnictl `__ +* `在 Web 界面中启动 TensorBoard `__ diff --git a/docs/zh_CN/Tutorial/SearchSpaceSpec.rst b/docs/zh_CN/Tutorial/SearchSpaceSpec.rst new file mode 120000 index 0000000000000000000000000000000000000000..98815b6fbf3f7091452cca7f2e6ca3858648ad62 --- /dev/null +++ b/docs/zh_CN/Tutorial/SearchSpaceSpec.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/SearchSpaceSpec.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/SetupNniDeveloperEnvironment.rst b/docs/zh_CN/Tutorial/SetupNniDeveloperEnvironment.rst new file mode 120000 index 0000000000000000000000000000000000000000..a17cfa9eda46884dd0391d94f32876b75ee580dc --- /dev/null +++ b/docs/zh_CN/Tutorial/SetupNniDeveloperEnvironment.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/SetupNniDeveloperEnvironment.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/Tensorboard.rst b/docs/zh_CN/Tutorial/Tensorboard.rst new file mode 120000 index 0000000000000000000000000000000000000000..2ab33ccdc0b3121ed3230764dc4571d369556b29 --- /dev/null +++ b/docs/zh_CN/Tutorial/Tensorboard.rst @@ -0,0 +1 @@ +../../en_US/Tutorial/Tensorboard.rst \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/WebUI.rst b/docs/zh_CN/Tutorial/WebUI.rst new file mode 100644 index 0000000000000000000000000000000000000000..67ebc27325d2054bb0e5bd996829fc266b705c03 --- /dev/null +++ b/docs/zh_CN/Tutorial/WebUI.rst @@ -0,0 +1,329 @@ +.. bb68c969dbc2b3a2ec79d323cbd31401 + +Web 界面 +================== + +Experiments 管理 +----------------------- + +点击导航栏上的 ``All experiments`` 标签。 + +.. image:: ../../img/webui-img/managerExperimentList/experimentListNav.png + :target: ../../img/webui-img/managerExperimentList/experimentListNav.png + :alt: ExperimentList nav + + + +* 在 ``All experiments`` 页面,可以看到机器上的所有 Experiment。 + +.. image:: ../../img/webui-img/managerExperimentList/expList.png + :target: ../../img/webui-img/managerExperimentList/expList.png + :alt: Experiments list + + + +* 查看 Experiment 更多详细信息时,可以单击 trial ID 跳转至该 Experiment 详情页,如下所示: + +.. image:: ../../img/webui-img/managerExperimentList/toAnotherExp.png + :target: ../../img/webui-img/managerExperimentList/toAnotherExp.png + :alt: See this experiment detail + + + +* 如果表格里有很多 Experiment,可以使用 ``filter`` 按钮。 + +.. image:: ../../img/webui-img/managerExperimentList/expFilter.png + :target: ../../img/webui-img/managerExperimentList/expFilter.png + :alt: filter button + + + +查看概要页面 +----------------- + +点击 ``Overview`` 标签。 + + +* 在 Overview 标签上,可看到 Experiment trial 的概况、搜索空间以及 ``top trials`` 的结果。 + + +.. image:: ../../img/webui-img/full-oview.png + :target: ../../img/webui-img/full-oview.png + :alt: overview + + + +如果想查看 Experiment 配置和搜索空间,点击右边的 ``Search space`` 和 ``Config`` 按钮。 + + 1. 搜索空间文件: + + + .. image:: ../../img/webui-img/searchSpace.png + :target: ../../img/webui-img/searchSpace.png + :alt: searchSpace + + + + 2. 配置文件: + + + .. image:: ../../img/webui-img/config.png + :target: ../../img/webui-img/config.png + :alt: config + + + +* 你可以在这里查看和下载 ``nni-manager/dispatcher 日志文件``。 + + +.. image:: ../../img/webui-img/review-log.png + :target: ../../img/webui-img/review-log.png + :alt: logfile + + + +* 如果 Experiment 包含了较多 Trial,可改变刷新间隔。 + + +.. image:: ../../img/webui-img/refresh-interval.png + :target: ../../img/webui-img/refresh-interval.png + :alt: refresh + + + + +* 单击按钮 ``Experiment summary`` ,可以查看和下载 Experiment 结果(``Experiment 配置``,``trial 信息`` 和 ``中间结果`` )。 + + +.. image:: ../../img/webui-img/summary.png + :target: ../../img/webui-img/summary.png + :alt: summary + + + +* 在这里修改 Experiment 配置(例如 ``maxExecDuration``, ``maxTrialNum`` 和 ``trial concurrency``)。 + + +.. image:: ../../img/webui-img/edit-experiment-param.png + :target: ../../img/webui-img/edit-experiment-param.png + :alt: editExperimentParams + + + +* 通过单击 ``Learn about`` ,可以查看错误消息和 ``nni-manager/dispatcher 日志文件`` + + +.. image:: ../../img/webui-img/experimentError.png + :target: ../../img/webui-img/experimentError.png + :alt: experimentError + + + + +* ``About`` 菜单内含有版本信息以及问题反馈渠道。 + +查看 trial 最终结果 +---------------------------------------------- + + +* ``Default metric`` 是所有 trial 的最终结果图。 在每一个结果上悬停鼠标可以看到 trial 信息,比如 trial id、No.、超参等。 + + +.. image:: ../../img/webui-img/default-metric.png + :target: ../../img/webui-img/default-metric.png + :alt: defaultMetricGraph + + + +* 打开 ``Optimization curve`` 来查看 Experiment 的优化曲线。 + + +.. image:: ../../img/webui-img/best-curve.png + :target: ../../img/webui-img/best-curve.png + :alt: bestCurveGraph + + +查看超参 +-------------------- + +单击 ``Hyper-parameter`` 标签查看平行坐标系图。 + + +* 可以点击 ``添加/删除`` 按钮来添加或删减纵坐标轴。 +* 直接在图上拖动轴线来交换轴线位置。 +* 通过调节百分比来查看 top trial。 + + +.. image:: ../../img/webui-img/hyperPara.png + :target: ../../img/webui-img/hyperPara.png + :alt: hyperParameterGraph + + + +查看 Trial 运行时间 +------------------- + +点击 ``Trial Duration`` 标签来查看柱状图。 + + +.. image:: ../../img/webui-img/trial_duration.png + :target: ../../img/webui-img/trial_duration.png + :alt: trialDurationGraph + + + +查看 Trial 中间结果 +------------------------------------ + +单击 ``Intermediate Result`` 标签查看折线图。 + + +.. image:: ../../img/webui-img/trials_intermeidate.png + :target: ../../img/webui-img/trials_intermeidate.png + :alt: trialIntermediateGraph + + + +Trial 在训练过程中可能有大量中间结果。 为了更清楚的理解一些 Trial 的趋势,可以为中间结果图设置过滤功能。 + +这样可以发现 Trial 在某个中间结果上会变得更好或更差。 这表明它是一个重要的并相关的中间结果。 如果要仔细查看这个点,可以在 #Intermediate 中输入其 X 坐标。 并输入这个中间结果的指标范围。 在下图中,选择了第四个中间结果并将指标范围设置为了 0.8 -1。 + + +.. image:: ../../img/webui-img/filter-intermediate.png + :target: ../../img/webui-img/filter-intermediate.png + :alt: filterIntermediateGraph + + + +查看 Trial 状态 +------------------ + +点击 ``Trials Detail`` 标签查看所有 Trial 的状态。具体如下: + + +* Trial 详情:Trial id,持续时间,开始时间,结束时间,状态,精度和 search space 文件。 + + +.. image:: ../../img/webui-img/detail-local.png + :target: ../../img/webui-img/detail-local.png + :alt: detailLocalImage + + + +* 支持通过 id,状态,Trial 编号以及参数来搜索。 + +1. Trial id: + +.. image:: ../../img/webui-img/detail/searchId.png + :target: ../../img/webui-img/detail/searchId.png + :alt: searchTrialId + + +2. Trial No.: + +.. image:: ../../img/webui-img/detail/searchNo.png + :target: ../../img/webui-img/detail/searchNo.png + :alt: searchTrialNo. + + +3. Trial 状态: + +.. image:: ../../img/webui-img/detail/searchStatus.png + :target: ../../img/webui-img/detail/searchStatus.png + :alt: searchStatus + +4. Trial 参数: + +(1) 类型为 choice 的参数: + +.. image:: ../../img/webui-img/detail/searchParameterChoice.png + :target: ../../img/webui-img/detail/searchParameterChoice.png + :alt: searchParameterChoice + +(2) 类型不是 choice 的参数: + +.. image:: ../../img/webui-img/detail/searchParameterRange.png + :target: ../../img/webui-img/detail/searchParameterRange.png + :alt: searchParameterRange + + +* ``Add column`` 按钮可选择在表格中显示的列。 如果 Experiment 的最终结果是 dict,则可以在表格中查看其它键。可选择 ``Intermediate count`` 列来查看 Trial 进度。 + + +.. image:: ../../img/webui-img/addColumn.png + :target: ../../img/webui-img/addColumn.png + :alt: addColumnGraph + + + +* 如果要比较某些 Trial,可选择并点击 ``Compare`` 来查看结果。 + + +.. image:: ../../img/webui-img/select-trial.png + :target: ../../img/webui-img/select-trial.png + :alt: selectTrialGraph + + +.. image:: ../../img/webui-img/compare.png + :target: ../../img/webui-img/compare.png + :alt: compareTrialsGraph + + +* ``Tensorboard`` 请参考 `此文档 `__。 + + + +* 可使用 ``Copy as python`` 按钮来拷贝 Trial 的参数。 + + +.. image:: ../../img/webui-img/copyParameter.png + :target: ../../img/webui-img/copyParameter.png + :alt: copyTrialParameters + + + +* 您可以在 ``Log`` 选项卡上看到 Trial 日志。 在本地模式下有 ``View trial log``, ``View trial error`` 和 ``View trial stdout`` 三个按钮。 * 如果在 OpenPAI 或 Kubeflow 平台上运行,还可以看到 hdfsLog。 + +1. 本机模式 + +.. image:: ../../img/webui-img/detail/log-local.png + :target: ../../img/webui-img/detail/log-local.png + :alt: logOnLocal + + +2. OpenPAI、Kubeflow 等模式: + +.. image:: ../../img/webui-img/detail-pai.png + :target: ../../img/webui-img/detail-pai.png + :alt: detailPai + + +* 中间结果图:可在此图中通过点击 intermediate 按钮来查看默认指标。 + + +.. image:: ../../img/webui-img/intermediate.png + :target: ../../img/webui-img/intermediate.png + :alt: intermeidateGraph + + + +* Kill: 可终止正在运行的任务。 + + +.. image:: ../../img/webui-img/kill-running.png + :target: ../../img/webui-img/kill-running.png + :alt: killTrial + + + +* 自定义 Trial:您可以更改此 Trial 参数,然后将其提交给 Experiment。如果您想重新运行失败的 Trial ,您可以向 Experiment 提交相同的参数。 + +.. image:: ../../img/webui-img/detail/customizedTrialButton.png + :target: ../../img/webui-img/detail/customizedTrialButton.png + :alt: customizedTrialButton + + + +.. image:: ../../img/webui-img/detail/customizedTrial.png + :target: ../../img/webui-img/detail/customizedTrial.png + :alt: customizedTrial diff --git a/docs/zh_CN/Tutorial/python_api_connect.ipynb b/docs/zh_CN/Tutorial/python_api_connect.ipynb new file mode 120000 index 0000000000000000000000000000000000000000..c0895b640652219863e5fd50e92938d40030024a --- /dev/null +++ b/docs/zh_CN/Tutorial/python_api_connect.ipynb @@ -0,0 +1 @@ +../../en_US/Tutorial/python_api_connect.ipynb \ No newline at end of file diff --git a/docs/zh_CN/Tutorial/python_api_start.ipynb b/docs/zh_CN/Tutorial/python_api_start.ipynb new file mode 120000 index 0000000000000000000000000000000000000000..bb06d24f99fbbbceb6642a2e96c0f83cd0ebdfd9 --- /dev/null +++ b/docs/zh_CN/Tutorial/python_api_start.ipynb @@ -0,0 +1 @@ +../../en_US/Tutorial/python_api_start.ipynb \ No newline at end of file diff --git a/docs/zh_CN/_templates/index.html b/docs/zh_CN/_templates/index.html new file mode 100644 index 0000000000000000000000000000000000000000..9a8162cad22f5caef80dd929bf9df3797353a990 --- /dev/null +++ b/docs/zh_CN/_templates/index.html @@ -0,0 +1,460 @@ +{% extends "!layout.html" %} + +{% set title = "欢迎使用 Neural Network Intelligence !!!"%} + +{% block document %} + +
+ + NNI (Neural Network Intelligence) 是一个轻量但强大的工具包,帮助用户自动的进行 + 特征工程神经网络架构搜索超参调优以及模型压缩。 +
+

+ NNI 管理自动机器学习 (AutoML) 的 Experiment, + 调度运行 + 由调优算法生成的 Trial 任务来找到最好的神经网络架构和/或超参,支持 + 各种训练环境,如 + 本机, + 远程服务器, + OpenPAI, + Kubeflow, + 基于 K8S 的 FrameworkController(如,AKS 等), + DLWorkspace (又称 DLTS), + AML (Azure Machine Learning) +以及其它云服务。 +

+ +
+

使用场景

+
    +
  • 想要在自己的代码、模型中试验不同的自动机器学习算法
  • +
  • 想要在不同的环境中加速运行自动机器学习。
  • +
  • 想要更容易实现或试验新的自动机器学习算法的研究员或数据科学家,包括:超参调优算法,神经网络搜索算法以及模型压缩算法。 +
  • +
  • 在机器学习平台中支持自动机器学习
  • +
+
+ + + +
+

NNI 功能一览

+

+ NNI 提供命令行工具以及友好的 WebUI 来管理训练的 Experiment。 + 通过可扩展的 API,可定制自动机器学习算法和训练平台。 + 为了方便新用户,NNI 内置了最新的自动机器学习算法,并为流行的训练平台提供了开箱即用的支持。 +

+

+ 下表中,包含了 NNI 的功能,同时在不断地增添新功能,也非常希望您能贡献其中。 +

+
+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + +
+ 框架和库 + + 算法 + + 训练平台 +
内置 +
    +
  • 支持的框架
  • +
      +
    • PyTorch
    • +
    • Keras
    • +
    • TensorFlow
    • +
    • MXNet
    • +
    • Caffe2
    • + 更多...
      +
    +
+
    +
  • 支持的库
  • +
      +
    • Scikit-learn
    • +
    • XGBoost
    • +
    • LightGBM
    • + 更多...
      +
    +
+ +
+ 超参调优 + + 神经网络架构搜索 + + 模型压缩 + + 特征工程(测试版) + + 提前终止算法 + + + +
参考 + + + + + +
+ + +
+

安装

+
+

安装

+

+ NNI 支持并在 Ubuntu >= 16.04, macOS >= 10.14.1, 和 Windows 10 >= 1809 通过了测试。 在 python 64-bit >= 3.6 的环境中,只需要运行 pip install 即可完成安装。 +

+
Linux 或 macOS
+
python3 -m pip install --upgrade nni
+
Windows
+
python -m pip install --upgrade nni
+

如果想要尝试最新代码,可通过源代码安装 + NNI。 +

+

Linux 和 macOS 下 NNI 系统需求参考这里,Windows 参考这里

+
+
+

注意:

+
    +
  • 如果遇到任何权限问题,可添加 --user 在用户目录中安装 NNI。
  • +
  • 目前,Windows 上的 NNI 支持本机,远程和 OpenPAI 模式。 强烈推荐使用 Anaconda 或 Miniconda 在 Windows 上安装 NNI
  • +
  • 如果遇到如 Segmentation fault 这样的任何错误请参考 常见问题。 Windows 上的常见问题,参考在 Windows 上使用 NNI。 Windows 上的常见问题,参考在 Windows 上使用 NNI
  • +
+
+
+

验证安装

+

+ 以下示例基于 TensorFlow 1.x 构建。 确保运行环境中使用的是 TensorFlow 1.x。 +

+
    +
  • +

    通过克隆源代码下载示例。

    +
    git clone -b {{ release }} https://github.com/Microsoft/nni.git
    +
  • +
  • +

    运行 MNIST 示例。

    +
    Linux 或 macOS
    +
    nnictl create --config nni/examples/trials/mnist-tfv1/config.yml
    +
    Windows
    +
    nnictl create --config nni\examples\trials\mnist-tfv1\config_windows.yml
    +
  • +
  • +

    + 在命令行中等待输出 INFO: Successfully started experiment! + 此消息表明 Experiment 已成功启动。 + 通过命令行输出的 Web UI url 来访问 Experiment 的界面。 +

    + +
    +INFO: Starting restful server...
    +INFO: Successfully started Restful server!
    +INFO: Setting local config...
    +INFO: Successfully set local config!
    +INFO: Starting experiment...
    +INFO: Successfully started experiment!
    +-----------------------------------------------------------------------
    +The experiment id is egchD4qy
    +The Web UI urls are: http://223.255.255.1:8080   http://127.0.0.1:8080
    +-----------------------------------------------------------------------
    +
    +You can use these commands to get more information about the experiment
    +-----------------------------------------------------------------------
    +  commands                       description
    +1. nnictl experiment show        show the information of experiments
    +2. nnictl trial ls               list all of trial jobs
    +3. nnictl top                    monitor the status of running experiments
    +4. nnictl log stderr             show stderr log content
    +5. nnictl log stdout             show stdout log content
    +6. nnictl stop                   stop an experiment
    +7. nnictl trial kill             kill a trial job by id
    +8. nnictl --help                 get help information about nnictl
    +-----------------------------------------------------------------------
    +
    +
  • +
  • + 在浏览器中打开 Web UI 地址,可看到下图的 Experiment 详细信息,以及所有的 Trial 任务。 查看这里的更多页面示例。 + +
+ + +
+ + +
+

文档

+
    +
  • 要了解 NNI,请阅读 NNI 概述
  • +
  • 要熟悉如何使用 NNI,请阅读文档
  • +
  • 要安装 NNI,请参阅安装 NNI
  • +
+
+ + +
+

贡献

+

+ 本项目欢迎任何贡献和建议。 大多数贡献都需要你同意参与者许可协议(CLA),来声明你有权,并实际上授予我们有权使用你的贡献。 + 有关详细信息,请访问 https://cla.microsoft.com。 +

+

+ 当你提交拉取请求时,CLA 机器人会自动检查你是否需要提供 CLA,并修饰这个拉取请求(例如,标签、注释)。 只需要按照机器人提供的说明进行操作即可。 CLA 只需要同意一次,就能应用到所有的代码仓库上。 +

+

+ 该项目采用了 Microsoft 开源行为准则 。 有关详细信息,请参阅行为守则常见问题解答或联系 opencode@microsoft.com 咨询问题或评论。 +

+

+ 熟悉贡献协议后,即可按照 NNI 开发人员教程,创建第一个 PR =) 了: +

+ +
+ + +
+

其它代码库和参考

+

经作者许可的一些 NNI 用法示例和相关文档。

+
    +

    外部代码库

    +
  • 在 NNI 中运行 ENAS
  • +
  • + https://github.com/microsoft/nni/blob/master/examples/feature_engineering/auto-feature-engineering/README_zh_CN.md +
  • +
  • 使用 NNI 的 矩阵分解超参调优
  • +
  • scikit-nni 使用 NNI 为 scikit-learn 开发的超参搜索。
  • +
+ + + +
+ + +
+

反馈

+ +
+
加入聊天组:
+ + + + + + + + + + + + + +
Gitter微信
+ Gitter + + NNI 微信 +
+
+
+ + +
+

相关项目

+

+ 以探索先进技术和开放为目标,Microsoft Research (MSR) 还发布了一些相关的开源项目。

+
    +
  • + OpenPAI:作为开源平台,提供了完整的 AI 模型训练和资源管理能力,能轻松扩展,并支持各种规模的私有部署、云和混合环境。 +
  • +
  • + FrameworkController:开源的通用 Kubernetes Pod 控制器,通过单个控制器来编排 Kubernetes 上所有类型的应用。 +
  • +
  • + MMdnn:一个完整、跨框架的解决方案,能够转换、可视化、诊断深度神经网络模型。 MMdnn 中的 "MM" 表示 model management(模型管理),而 "dnn" 是 deep neural network(深度神经网络)的缩写。 +
  • +
  • + SPTAG : Space Partition Tree And Graph (SPTAG) 是用于大规模向量的最近邻搜索场景的开源库。 +
  • +
+

我们鼓励研究人员和学生利用这些项目来加速 AI 开发和研究。

+
+ + +
+

许可协议

+

代码库遵循 MIT 许可协议

+
+ +{% endblock %} diff --git a/docs/zh_CN/_templates/layout.html b/docs/zh_CN/_templates/layout.html new file mode 120000 index 0000000000000000000000000000000000000000..37c708154d9ecdb68b56c2040575373d24f8fd3a --- /dev/null +++ b/docs/zh_CN/_templates/layout.html @@ -0,0 +1 @@ +../../en_US/_templates/layout.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider.html b/docs/zh_CN/_templates/nnSpider.html new file mode 120000 index 0000000000000000000000000000000000000000..b91ba7c74028da5bd33a99cb9550a349dc7495a8 --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider.html @@ -0,0 +1 @@ +../../en_US/_templates/nnSpider.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/comfort.html b/docs/zh_CN/_templates/nnSpider/comfort.html new file mode 120000 index 0000000000000000000000000000000000000000..6b28adaf7b689c6793fb98fc8567ee529efc0c2b --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/comfort.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/comfort.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/crying.html b/docs/zh_CN/_templates/nnSpider/crying.html new file mode 120000 index 0000000000000000000000000000000000000000..42b250cd673edb94802d1bc34b20ab186b6dec6e --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/crying.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/crying.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/cut.html b/docs/zh_CN/_templates/nnSpider/cut.html new file mode 120000 index 0000000000000000000000000000000000000000..d7c22779843b836c2920b2ac3c7bedb8a8e69067 --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/cut.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/cut.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/errorEmotion.html b/docs/zh_CN/_templates/nnSpider/errorEmotion.html new file mode 120000 index 0000000000000000000000000000000000000000..81801bed4a6ceee4bc6cf6d2da837a6c55789b9d --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/errorEmotion.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/errorEmotion.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/holiday.html b/docs/zh_CN/_templates/nnSpider/holiday.html new file mode 120000 index 0000000000000000000000000000000000000000..b41015daecdec79c107ba1e02e311afad1026282 --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/holiday.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/holiday.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/nobug.html b/docs/zh_CN/_templates/nnSpider/nobug.html new file mode 120000 index 0000000000000000000000000000000000000000..7466c12bc3d0496fa867c6e9d150d47d073943b6 --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/nobug.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/nobug.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/sign.html b/docs/zh_CN/_templates/nnSpider/sign.html new file mode 120000 index 0000000000000000000000000000000000000000..4dda9e7501819430b09ff32096673eb10b83ad37 --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/sign.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/sign.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/sweat.html b/docs/zh_CN/_templates/nnSpider/sweat.html new file mode 120000 index 0000000000000000000000000000000000000000..418d9c75932106c3d475e4ddfaec66e257a49947 --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/sweat.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/sweat.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/weaving.html b/docs/zh_CN/_templates/nnSpider/weaving.html new file mode 120000 index 0000000000000000000000000000000000000000..b16de5691461264e7853823420fd067a5bbdc6a8 --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/weaving.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/weaving.html \ No newline at end of file diff --git a/docs/zh_CN/_templates/nnSpider/working.html b/docs/zh_CN/_templates/nnSpider/working.html new file mode 120000 index 0000000000000000000000000000000000000000..e315ac94d9c5509175937c36bdb17a16713721dd --- /dev/null +++ b/docs/zh_CN/_templates/nnSpider/working.html @@ -0,0 +1 @@ +../../../en_US/_templates/nnSpider/working.html \ No newline at end of file diff --git a/docs/zh_CN/autotune_ref.rst b/docs/zh_CN/autotune_ref.rst new file mode 120000 index 0000000000000000000000000000000000000000..45b23bc0ead154fec22b63f7b569990996bfc39f --- /dev/null +++ b/docs/zh_CN/autotune_ref.rst @@ -0,0 +1 @@ +../en_US/autotune_ref.rst \ No newline at end of file diff --git a/docs/zh_CN/builtin_assessor.rst b/docs/zh_CN/builtin_assessor.rst new file mode 100644 index 0000000000000000000000000000000000000000..e745109d090794b1a7e128e538ee72c6c47010ad --- /dev/null +++ b/docs/zh_CN/builtin_assessor.rst @@ -0,0 +1,21 @@ +.. d5351e951811dcaeeda7f270427187fd + +内置 Assessor +================= + +为了节省计算资源,NNI 支持提前终止策略,并且通过叫做 **Assessor** 的接口来执行此操作。 + +Assessor 从 Trial 中接收中间结果,并通过指定的算法决定此 Trial 是否应该终止。 一旦 Trial 满足了提前终止策略(这表示 Assessor 认为最终结果不会太好),Assessor 会终止此 Trial,并将其状态标志为 `EARLY_STOPPED`。 + +这是 MNIST 在 "最大化" 模式下使用 "曲线拟合" Assessor 的实验结果。 可以看到 Assessor 成功的 **提前终止** 了许多结果不好超参组合的 Trial。 使用 Assessor,能在相同的计算资源下,得到更好的结果。 + +实验代码: :githublink:`config_assessor.yml ` + +.. image:: ../img/Assessor.png + +.. toctree:: + :maxdepth: 1 + + 概述<./Assessor/BuiltinAssessor> + Medianstop<./Assessor/MedianstopAssessor> + Curvefitting(曲线拟合)<./Assessor/CurvefittingAssessor> diff --git a/docs/zh_CN/builtin_tuner.rst b/docs/zh_CN/builtin_tuner.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7a3bb2981b5b87560b6daa99d71c15f59b27572 --- /dev/null +++ b/docs/zh_CN/builtin_tuner.rst @@ -0,0 +1,76 @@ +.. 1ff18ebada0efec66cd793f1a000f3fe + +内置 Tuner +========== + +为了让机器学习和深度学习模型适应不同的任务和问题,我们需要进行超参数调优,而自动化调优依赖于优秀的调优算法。NNI 内置了先进的调优算法,并且提供了易于使用的 API。 + +在 NNI 中,调优算法被称为“tuner”。Tuner 向 trial 发送超参数,接收运行结果从而评估这组超参的性能,然后将下一组超参发送给新的 trial。 + +下表简要介绍了 NNI 内置的调优算法。点击 tuner 的名称可以查看其安装需求、推荐使用场景、示例配置文件等详细信息。`这篇文章 <../CommunitySharings/HpoComparison.rst>`__ 对比了各个 tuner 在不同场景下的性能。 + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Tuner + - 算法简介 + + * - `TPE <./TpeTuner.rst>`__ + - Tree-structured Parzen Estimator (TPE) 是一种基于序列模型的优化方法 (sequential model-based optimization, SMBO)。SMBO方法根据历史数据来顺序地构造模型,从而预估超参性能,并基于此模型来选择新的超参。`参考论文 `__ + + * - `Random Search (随机搜索) <./RandomTuner.rst>`__ + - 随机搜索在超算优化中表现出了令人意外的性能。如果没有对超参分布的先验知识,我们推荐使用随机搜索作为基线方法。`参考论文 `__ + + * - `Anneal (退火) <./AnnealTuner.rst>`__ + - 朴素退火算法首先基于先验进行采样,然后逐渐逼近实际性能较好的采样点。该算法是随即搜索的变体,利用了反应曲面的平滑性。该实现中退火率不是自适应的。 + + * - `Naive Evolution(朴素进化) <./EvolutionTuner.rst>`__ + - 朴素进化算法来自于 Large-Scale Evolution of Image Classifiers。它基于搜索空间随机生成一个种群,在每一代中选择较好的结果,并对其下一代进行变异。朴素进化算法需要很多 Trial 才能取得最优效果,但它也非常简单,易于扩展。`参考论文 `__ + + * - `SMAC <./SmacTuner.rst>`__ + - SMAC 是基于序列模型的优化方法 (SMBO)。它利用使用过的最突出的模型(高斯随机过程模型),并将随机森林引入到SMBO中,来处理分类参数。NNI 的 SMAC tuner 封装了 GitHub 上的 `SMAC3 `__。`参考论文 `__ + + 注意:SMAC 算法需要使用 ``pip install nni[SMAC]`` 安装依赖,暂不支持 Windows 操作系统。 + + * - `Batch(批处理) <./BatchTuner.rst>`__ + - 批处理允许用户直接提供若干组配置,为每种配置运行一个 trial。 + + * - `Grid Search(网格遍历) <./GridsearchTuner.rst>`__ + - 网格遍历会穷举搜索空间中的所有超参组合。 + + * - `Hyperband <./HyperbandAdvisor.rst>`__ + - Hyperband 试图用有限的资源探索尽可能多的超参组合。该算法的思路是,首先生成大量超参配置,将每组超参运行较短的一段时间,随后抛弃其中效果较差的一半,让较好的超参继续运行,如此重复多轮。`参考论文 `__ + + * - `Metis <./MetisTuner.rst>`__ + - 大多数调参工具仅仅预测最优配置,而 Metis 的优势在于它有两个输出:(a) 最优配置的当前预测结果, 以及 (b) 下一次 trial 的建议。大多数工具假设训练集没有噪声数据,但 Metis 会知道是否需要对某个超参重新采样。`参考论文 `__ + + * - `BOHB <./BohbAdvisor.rst>`__ + - BOHB 是 Hyperband 算法的后续工作。 Hyperband 在生成新的配置时,没有利用已有的 trial 结果,而本算法利用了 trial 结果。BOHB 中,HB 表示 Hyperband,BO 表示贝叶斯优化(Byesian Optimization)。 BOHB 会建立多个 TPE 模型,从而利用已完成的 Trial 生成新的配置。`参考论文 `__ + + * - `GP (高斯过程) <./GPTuner.rst>`__ + - GP Tuner 是基于序列模型的优化方法 (SMBO),使用高斯过程进行 surrogate。`参考论文 `__ + + * - `PBT <./PBTTuner.rst>`__ + - PBT Tuner 是一种简单的异步优化算法,在固定的计算资源下,它能有效的联合优化一组模型及其超参来最优化性能。`参考论文 `__ + + * - `DNGO <./DngoTuner.rst>`__ + - DNGO 是基于序列模型的优化方法 (SMBO),该算法使用神经网络(而不是高斯过程)去建模贝叶斯优化中所需要的函数分布。 + +.. toctree:: + :maxdepth: 1 + + TPE + Random Search(随机搜索) + Anneal(退火) + Naïve Evolution(朴素进化) + SMAC + Metis Tuner + Batch Tuner(批处理) + Grid Search(网格遍历) + GP Tuner + Network Morphism + Hyperband + BOHB + PBT Tuner + DNGO Tuner diff --git a/docs/zh_CN/conf.py b/docs/zh_CN/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc2ce5049b2bcd4a3b1652bc96899df4ba069a4 --- /dev/null +++ b/docs/zh_CN/conf.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys +sys.path.insert(0, os.path.abspath('../..')) + + +# -- Project information --------------------------------------------------- + +project = 'NNI' +copyright = '2021, Microsoft' +author = 'Microsoft' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = 'v2.3' + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.mathjax', + 'sphinxarg.ext', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'sphinx.ext.intersphinx', + 'nbsphinx', + 'sphinx.ext.extlinks', + 'IPython.sphinxext.ipython_console_highlighting', +] + +# 添加示例模块 +autodoc_mock_imports = ['apex', 'nni_node', 'tensorrt', 'pycuda'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = ['.rst'] + +# The master toctree document. +master_doc = 'contents' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Release_v1.0.md', '**.ipynb_checkpoints'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +html_additional_pages = { + 'index': 'index.html', +} + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'logo_only': True, +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['../static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + +html_logo = '../img/nni_logo_dark.png' +html_title = '支持神经网络结构搜索、模型压缩、超参调优的开源自动机器学习工具 (%s %s)' % \ + (project, release) + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'NeuralNetworkIntelligencedoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'NeuralNetworkIntelligence.tex', 'Neural Network Intelligence Documentation', + 'Microsoft', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'neuralnetworkintelligence', 'Neural Network Intelligence Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'NeuralNetworkIntelligence', 'Neural Network Intelligence Documentation', + author, 'NeuralNetworkIntelligence', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# external links (for github code) +# Reference the code via :githublink:`path/to/your/example/code.py` +git_commit_id = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip() + +extlinks = { + 'githublink': ('https://github.com/microsoft/nni/blob/' + git_commit_id + '/%s', 'Github link: ') +} + +# -- Extension configuration ------------------------------------------------- +def setup(app): + app.add_css_file('css/custom.css') diff --git a/docs/zh_CN/contents.rst b/docs/zh_CN/contents.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e0caa2ae7772d1b2d254e7e659fac4483f79488 --- /dev/null +++ b/docs/zh_CN/contents.rst @@ -0,0 +1,25 @@ +.. 823dad4710f371e923033eebd9dba255 + +########################### +Neural Network Intelligence +########################### + + +.. toctree:: + :caption: 目录 + :maxdepth: 2 + :titlesonly: + + 概述 + 安装 + 入门 + 自动(超参数)调优 + 神经网络架构搜索 + 模型压缩 + 特征工程 + 参考 + 示例与解决方案 + 研究和出版物 + 常见问题 + 如何贡献 + 更改日志 diff --git a/docs/zh_CN/contribution.rst b/docs/zh_CN/contribution.rst new file mode 100644 index 0000000000000000000000000000000000000000..71584e9fb7b4cdbdf07fbe44698ac059cfb774c0 --- /dev/null +++ b/docs/zh_CN/contribution.rst @@ -0,0 +1,9 @@ +.. 24da49b25d3d36c476a69aceb825cb94 + +############################### +贡献代码 +############################### + +.. toctree:: + 设置开发环境<./Tutorial/SetupNniDeveloperEnvironment> + 贡献指南<./Tutorial/Contributing> \ No newline at end of file diff --git a/docs/zh_CN/examples.rst b/docs/zh_CN/examples.rst new file mode 100644 index 0000000000000000000000000000000000000000..c51da740a76c06d063f03e83f98700750dca2389 --- /dev/null +++ b/docs/zh_CN/examples.rst @@ -0,0 +1,14 @@ +.. d19a00598b8eca71c825d80c0a7106f2 + +###################### +示例 +###################### + +.. toctree:: + :maxdepth: 2 + + MNIST<./TrialExample/MnistExamples> + Cifar10<./TrialExample/Cifar10Examples> + Scikit-learn<./TrialExample/SklearnExamples> + GBDT<./TrialExample/GbdtExample> + Pix2pix<./TrialExample/Pix2pixExample> \ No newline at end of file diff --git a/docs/zh_CN/feature_engineering.rst b/docs/zh_CN/feature_engineering.rst new file mode 100644 index 0000000000000000000000000000000000000000..53634d10c35c42dbd2935d7a281979409f41b973 --- /dev/null +++ b/docs/zh_CN/feature_engineering.rst @@ -0,0 +1,18 @@ +.. 0958703dcd6f8078a1ad1bcaef9c7199 + +################### +特征工程 +################### + +很高兴在 NNI 上引入了特征工程工具包, +其仍处于试验阶段,会根据使用反馈来演化。 +诚挚邀请您使用、反馈,或更多贡献。 + +详细信息,参考以下教程: + +.. toctree:: + :maxdepth: 2 + + 概述 + GradientFeatureSelector + GBDTSelector diff --git a/docs/zh_CN/hpo_advanced.rst b/docs/zh_CN/hpo_advanced.rst new file mode 100644 index 0000000000000000000000000000000000000000..401b43ec99129f672695e839b8bf8851b1e7fadb --- /dev/null +++ b/docs/zh_CN/hpo_advanced.rst @@ -0,0 +1,13 @@ +.. 43bb394b1e25458a948c134058ec68ac + +高级功能 +================= + +.. toctree:: + :maxdepth: 2 + + 编写新的 Tuner + 编写新的 Assessor + 编写新的 Advisor + 编写新的训练平台 + 安装自定义的 Tuners/Assessors/Advisors diff --git a/docs/zh_CN/hpo_benchmark.rst b/docs/zh_CN/hpo_benchmark.rst new file mode 120000 index 0000000000000000000000000000000000000000..a391f1f999c837f0348921f737e925aa14767326 --- /dev/null +++ b/docs/zh_CN/hpo_benchmark.rst @@ -0,0 +1 @@ +../en_US/hpo_benchmark.rst \ No newline at end of file diff --git a/docs/zh_CN/hpo_benchmark_stats.rst b/docs/zh_CN/hpo_benchmark_stats.rst new file mode 120000 index 0000000000000000000000000000000000000000..b52e6e9715e5cebb995f9bda005c8179808d7d16 --- /dev/null +++ b/docs/zh_CN/hpo_benchmark_stats.rst @@ -0,0 +1 @@ +../en_US/hpo_benchmark_stats.rst \ No newline at end of file diff --git a/docs/zh_CN/hyperparameter_tune.rst b/docs/zh_CN/hyperparameter_tune.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b518d7754435c67aa90eaf810c2831f90e37ddd --- /dev/null +++ b/docs/zh_CN/hyperparameter_tune.rst @@ -0,0 +1,22 @@ +.. 6ed30d3a87dbc4c1c4650cf56f074045 + +############## +自动超参数调优 +############## + +自动调优是 NNI 的主要功能之一。它的工作模式是反复运行 trial 代码,每次向其提供不同的超参组合,从而对 trial 的运行结果进行调优。NNI 提供了很多流行的自动调优算法(称为 Tuner)和一些提前终止算法(称为 Assessor)。NNI 支持在多种训练平台上运行 trial,包括本机、远程服务器、Azure Machine Learning、基于 Kubernetes 的集群(如 OpenPAI、Kubeflow)等等。 + +NNI 具有高扩展性,用户可以根据需求实现自己的 Tuner 算法和训练平台。 + +.. toctree:: + :maxdepth: 2 + + 实现 Trial <./TrialExample/Trials> + Tuners + Assessors + 训练平台 + 示例 + Web 界面 + 如何调试 + 高级功能 + Tuner 基准测试 diff --git a/docs/zh_CN/installation.rst b/docs/zh_CN/installation.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf0e8ccc449fa02d43c94a5cd881bf7f40fea3e3 --- /dev/null +++ b/docs/zh_CN/installation.rst @@ -0,0 +1,14 @@ +.. c62173d7147a43a13bf2cdf945b82d07 + +############ +安装 +############ + +当前支持在 Linux,macOS 和 Windows 下安装。 还可使用 Docker。 + +.. toctree:: + :maxdepth: 2 + + Linux 和 macOS + Windows + 使用 Docker \ No newline at end of file diff --git a/docs/zh_CN/model_compression.rst b/docs/zh_CN/model_compression.rst new file mode 100644 index 0000000000000000000000000000000000000000..01f4a37e1070fff87019035e3d8302097a03e27f --- /dev/null +++ b/docs/zh_CN/model_compression.rst @@ -0,0 +1,36 @@ +.. da97b4cdd507bd8fad43d640f3d2bfef + +################# +模型压缩 +################# + +深度神经网络(DNNs)在许多领域都取得了巨大的成功。 然而,典型的神经网络是 +计算和能源密集型的,很难将其部署在计算资源匮乏 +或具有严格延迟要求的设备上。 因此,一个自然的想法就是对模型进行压缩 +以减小模型大小并加速模型训练/推断,同时不会显着降低模型性能。 模型压缩 +技术可以分为两类:剪枝和量化。 剪枝方法探索模型权重中的冗余, +并尝试删除/修剪冗余和非关键的权重。 量化是指通过减少 +权重表示或激活所需的比特数来压缩模型。 + +NNI 提供了易于使用的工具包来帮助用户设计并使用剪枝和量化算法。 +其使用了统一的接口来支持 TensorFlow 和 PyTorch。 +对用户来说, 只需要添加几行代码即可压缩模型。 +NNI 中也内置了一些主流的模型压缩算法。 +用户可以进一步利用 NNI 的自动调优功能找到最佳的压缩模型, +该功能在自动模型压缩部分有详细介绍。 +另一方面,用户可以使用 NNI 的接口自定义新的压缩算法。 + +详细信息,参考以下教程: + +.. toctree:: + :maxdepth: 2 + + 概述 + 快速入门 + 教程 + 剪枝 + 剪枝(V2版本) + 量化 + 工具 + 高级用法 + API 参考 diff --git a/docs/zh_CN/nas.rst b/docs/zh_CN/nas.rst new file mode 100644 index 0000000000000000000000000000000000000000..42484805f1637363bfcee467ff7192d0f695e8b4 --- /dev/null +++ b/docs/zh_CN/nas.rst @@ -0,0 +1,36 @@ +.. 0b36fb7844fd9cc88c4e74ad2c6b9ece + +########################## +神经网络架构搜索 +########################## + +自动化的神经网络架构(NAS)搜索在寻找更好的模型方面发挥着越来越重要的作用。 +最近的研究工作证明了自动化 NAS 的可行性,并发现了一些超越手动调整的模型。 +代表工作有 NASNet, ENAS, DARTS, Network Morphism, 以及 Evolution 等。 此外,新的创新不断涌现。 + +但是,要实现 NAS 算法需要花费大量的精力,并且很难在新算法中重用现有算法的代码。 +为了促进 NAS 创新 (如, 设计实现新的 NAS 模型,比较不同的 NAS 模型), +易于使用且灵活的编程接口非常重要。 + +因此,NNI 设计了 `Retiarii `__, 它是一个深度学习框架,支持在神经网络模型空间,而不是单个神经网络模型上进行探索性训练。 +Retiarii 的探索性训练允许用户以高度灵活的方式表达 *神经网络架构搜索* 和 *超参数调整* 的各种搜索空间。 + +本文档中的一些常用术语: + +* *Model search space(模型搜索空间)* :它意味着一组模型,用于从中探索/搜索出最佳模型。 有时我们简称为 *search space(搜索空间)* 或 *model space(模型空间)* 。 +* *Exploration strategy(探索策略)*:用于探索模型搜索空间的算法。 +* *Model evaluator(模型评估器)*:用于训练模型并评估模型的性能。 + +按照以下说明开始您的 Retiarii 之旅。 + +.. toctree:: + :maxdepth: 2 + + 概述 + 快速入门 + 构建模型空间 + Multi-trial NAS + One-Shot NAS + 硬件相关 NAS + NAS 基准测试 + NAS API 参考 diff --git a/docs/zh_CN/reference.rst b/docs/zh_CN/reference.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0d01b229603c1b6a87b621e8d3a01793ba92143 --- /dev/null +++ b/docs/zh_CN/reference.rst @@ -0,0 +1,18 @@ +.. 5f17887878bae5d51cf177a1c995c003 + +参考 +================== + +.. toctree:: + :maxdepth: 2 + + nnictl 命令 + Experiment 配置 + Experiment 配置(遗产) + 搜索空间 + NNI Annotation + SDK API 参考 + 支持的框架和库 + 从 Python 发起实验 + 共享存储 + Tensorboard diff --git a/docs/zh_CN/reference/experiment_config.rst b/docs/zh_CN/reference/experiment_config.rst new file mode 120000 index 0000000000000000000000000000000000000000..a8931147c5b2997012502a4510223a408d8c39ab --- /dev/null +++ b/docs/zh_CN/reference/experiment_config.rst @@ -0,0 +1 @@ +../../en_US/reference/experiment_config.rst \ No newline at end of file diff --git a/docs/zh_CN/sdk_reference.rst b/docs/zh_CN/sdk_reference.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbf85cad7af28755e3e13e3c36c283aa71db6330 --- /dev/null +++ b/docs/zh_CN/sdk_reference.rst @@ -0,0 +1,14 @@ +.. 60cb924d0ec522b7709acf4f8cff3f16 + +#################### +Python API 参考 +#################### + + +.. toctree:: + :maxdepth: 1 + + 自动调优 + NAS + 模型压缩 + Python API \ No newline at end of file diff --git a/docs/zh_CN/training_services.rst b/docs/zh_CN/training_services.rst new file mode 100644 index 0000000000000000000000000000000000000000..761536b2472ec6674ba1a2b5fe9b186fdaccbc6c --- /dev/null +++ b/docs/zh_CN/training_services.rst @@ -0,0 +1,17 @@ +.. 4e054d96c7d211dc514c99d673415d8e + +NNI 支持的训练平台介绍 +===================================== + +.. toctree:: + Overview <./TrainingService/Overview> + 本机<./TrainingService/LocalMode> + 远程<./TrainingService/RemoteMachineMode> + OpenPAI<./TrainingService/PaiMode> + Kubeflow<./TrainingService/KubeflowMode> + AdaptDL<./TrainingService/AdaptDLMode> + FrameworkController<./TrainingService/FrameworkControllerMode> + DLTS<./TrainingService/DLTSMode> + AML<./TrainingService/AMLMode> + PAI-DLC<./TrainingService/DLCMode> + 混合模式 <./TrainingService/HybridMode> diff --git a/examples/assessors/README.md b/examples/assessors/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1cb8e296d8a3a8ffc7431b8c7bfebb5a1e3868fb --- /dev/null +++ b/examples/assessors/README.md @@ -0,0 +1,57 @@ +# Define your own Assessor + +*Assessor receive intermediate result from Trial and decide whether the Trial should be killed. Once the Trial experiment meets the early stop conditions, the assessor will kill the Trial.* + +So, if users want to implement a customized Assessor, they only need to: + + +**1) Inherit an assessor of a base Assessor class** +```python +from nni.assessor import Assessor + +class CustomizedAssessor(Assessor): + def __init__(self, ...): + ... +``` + +**2) Implement assess trial function** +```python +from nni.assessor import Assessor, AssessResult + +class CustomizedAssessor(Assessor): + def __init__(self, ...): + ... + + def assess_trial(self, trial_history): + """ + Determines whether a trial should be killed. Must override. + trial_history: a list of intermediate result objects. + Returns AssessResult.Good or AssessResult.Bad. + """ + # you code implement here. + ... +``` +**3) Write a script to run Assessor** +```python +import argparse + +import CustomizedAssessor + +def main(): + parser = argparse.ArgumentParser(description='parse command line parameters.') + # parse your assessor arg here. + ... + FLAGS, unparsed = parser.parse_known_args() + + tuner = CustomizedAssessor(...) + tuner.run() + +main() +``` + +Please noted in 2). The object `trial_history` are exact the object that Trial send to Assessor by using SDK `report_intermediate_result` function. + +Also, user could override the `run` function in Assessor to control the process logic. + +More detail example you could see: +> * [Base-Assessor](https://msrasrg.visualstudio.com/NeuralNetworkIntelligenceOpenSource/_git/Default?_a=contents&path=%2Fsrc%2Fsdk%2Fpynni%2Fnni%2Fassessor.py&version=GBadd_readme) diff --git a/examples/assessors/README_zh_CN.md b/examples/assessors/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..5e0d72316a74a85846f256d1ab8ecf3129862851 --- /dev/null +++ b/examples/assessors/README_zh_CN.md @@ -0,0 +1,61 @@ +# 自定义 Assessor + +*Assessor 从 Trial 中接收中间结果,并决定此 Trial 是否应该终止。 一旦 Trial 满足提前终止条件,Assessor 将终止此 Trial。* + +因此,如果要自定义 Assessor,需要: + +**1) 继承于 Assessor 基类,创建 Assessor 类** + +```python +from nni.assessor import Assessor + +class CustomizedAssessor(Assessor): + def __init__(self, ...): + ... +``` + +**2) 实现评估 Trial 的函数** + +```python +from nni.assessor import Assessor, AssessResult + +class CustomizedAssessor(Assessor): + def __init__(self, ...): + ... + + def assess_trial(self, trial_history): + """ + 决定是否应该终止 Trial。 必须重载。 + trial_history: 中间结果列表对象。 + 返回 AssessResult.Good 或 AssessResult.Bad。 + """ + # 代码实现于此处。 + ... +``` + +**3) 实现脚本来运行 Assessor** + +```python +import argparse + +import CustomizedAssessor + +def main(): + parser = argparse.ArgumentParser(description='parse command line parameters.') + # 在这里解析 Assessor 的参数。 + ... + FLAGS, unparsed = parser.parse_known_args() + + tuner = CustomizedAssessor(...) + tuner.run() + +main() +``` + +注意 2) 中, 对象 `trial_history` 和 `report_intermediate_result` 函数返回给 Assessor 的完全一致。 + +也可以重载 Assessor 的 `run` 函数来控制过程逻辑。 + +更多示例,可参考: + +> - [Base-Assessor](https://msrasrg.visualstudio.com/NeuralNetworkIntelligenceOpenSource/_git/Default?_a=contents&path=%2Fsrc%2Fsdk%2Fpynni%2Fnni%2Fassessor.py&version=GBadd_readme) \ No newline at end of file diff --git a/examples/feature_engineering/auto-feature-engineering/README.md b/examples/feature_engineering/auto-feature-engineering/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c14ec3dfcd69854a463199e48b2c6d4f6b32360e --- /dev/null +++ b/examples/feature_engineering/auto-feature-engineering/README.md @@ -0,0 +1,8 @@ + **Automatic Feature Engineering in nni** + === + + Now we have an [example](https://github.com/SpongebBob/tabular_automl_NNI), which could automaticlly do feature engineering in nni. + + These code come from our contributors. And thanks our lovely contributors! + + And welcome more and more people to join us! diff --git a/examples/feature_engineering/auto-feature-engineering/README_zh_CN.md b/examples/feature_engineering/auto-feature-engineering/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..76cce132ff66bd55e75556af06ffe89da1cfdfee --- /dev/null +++ b/examples/feature_engineering/auto-feature-engineering/README_zh_CN.md @@ -0,0 +1,7 @@ + **NNI 中的自动特征工程** === + + 此[示例](https://github.com/SpongebBob/tabular_automl_NNI)在 NNI 中实现了自动特征工程。 + + 代码来自于贡献者。 谢谢可爱的贡献者! + + 欢迎越来越多的人加入我们! diff --git a/examples/feature_engineering/gbdt_selector/gbdt_selector_test.py b/examples/feature_engineering/gbdt_selector/gbdt_selector_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ca7aecf9538932f156da323ba40ec4c386e912c1 --- /dev/null +++ b/examples/feature_engineering/gbdt_selector/gbdt_selector_test.py @@ -0,0 +1,65 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import bz2 +import urllib.request +import numpy as np + +from sklearn.datasets import load_svmlight_file +from sklearn.model_selection import train_test_split + +from nni.algorithms.feature_engineering.gbdt_selector import GBDTSelector + +url_zip_train = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_train.binary.bz2' +urllib.request.urlretrieve(url_zip_train, filename='train.bz2') + +f_svm = open('train.svm', 'wt') +with bz2.open('train.bz2', 'rb') as f_zip: + data = f_zip.read() + f_svm.write(data.decode('utf-8')) +f_svm.close() + +X, y = load_svmlight_file('train.svm') +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) + +lgb_params = { + 'boosting_type': 'gbdt', + 'objective': 'regression', + 'metric': {'l2', 'l1'}, + 'num_leaves': 20, + 'learning_rate': 0.05, + 'feature_fraction': 0.9, + 'bagging_fraction': 0.8, + 'bagging_freq': 5, + 'verbose': 0} + +eval_ratio = 0.1 +early_stopping_rounds = 10 +importance_type = 'gain' +num_boost_round = 1000 +topk = 10 + +selector = GBDTSelector() +selector.fit(X_train, y_train, + lgb_params = lgb_params, + eval_ratio = eval_ratio, + early_stopping_rounds = early_stopping_rounds, + importance_type = importance_type, + num_boost_round = num_boost_round) + +print("selected features\t", selector.get_selected_features(topk=topk)) + diff --git a/examples/feature_engineering/gradient_feature_selector/.gitignore b/examples/feature_engineering/gradient_feature_selector/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..048100f85dc10da79afc298c4cccff83ec85f84e --- /dev/null +++ b/examples/feature_engineering/gradient_feature_selector/.gitignore @@ -0,0 +1,5 @@ +*.bz2 +*.svm +*.log +*memory +*time diff --git a/examples/feature_engineering/gradient_feature_selector/benchmark_test.py b/examples/feature_engineering/gradient_feature_selector/benchmark_test.py new file mode 100644 index 0000000000000000000000000000000000000000..015426e161f5f700ac63c2b55c3ffeb92fcae071 --- /dev/null +++ b/examples/feature_engineering/gradient_feature_selector/benchmark_test.py @@ -0,0 +1,148 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import bz2 +import urllib.request +import numpy as np +import datetime + +import line_profiler +profile = line_profiler.LineProfiler() + +import os + +from sklearn.datasets import load_svmlight_file +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.linear_model import LogisticRegression + +from sklearn.ensemble import ExtraTreesClassifier +from sklearn.feature_selection import SelectFromModel + +from nni.algorithms.feature_engineering.gradient_selector import FeatureGradientSelector + + +class Benchmark(): + + def __init__(self, files=None, test_size=0.2): + self.files = files + self.test_size = test_size + + + def run_all_test(self, pipeline): + for file_name in self.files: + file_path = self.files[file_name] + + self.run_test(pipeline, file_name, file_path) + + + def run_test(self, pipeline, name, path): + print("download " + name) + update_name = self.download(name, path) + X, y = load_svmlight_file(update_name) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=42) + + pipeline.fit(X_train, y_train) + print("[Benchmark "+ name + " Score]: ", pipeline.score(X_test, y_test)) + + + def download(self, name, path): + old_name = name + '_train.bz2' + update_name = name + '_train.svm' + + if os.path.exists(old_name) and os.path.exists(update_name): + return update_name + + urllib.request.urlretrieve(path, filename=old_name) + + f_svm = open(update_name, 'wt') + with bz2.open(old_name, 'rb') as f_zip: + data = f_zip.read() + f_svm.write(data.decode('utf-8')) + f_svm.close() + + return update_name + +@profile +def test_memory(pipeline_name, name, path): + if pipeline_name == "LR": + pipeline = make_pipeline(LogisticRegression()) + + if pipeline_name == "FGS": + pipeline = make_pipeline(FeatureGradientSelector(), LogisticRegression()) + + if pipeline_name == "Tree": + pipeline = make_pipeline(SelectFromModel(ExtraTreesClassifier(n_estimators=50)), LogisticRegression()) + + test_benchmark = Benchmark() + print("Dataset:\t", name) + print("Pipeline:\t", pipeline_name) + test_benchmark.run_test(pipeline, name, path) + print("") + + +def test_time(pipeline_name, name, path): + if pipeline_name == "LR": + pipeline = make_pipeline(LogisticRegression()) + + if pipeline_name == "FGS": + pipeline = make_pipeline(FeatureGradientSelector(), LogisticRegression()) + + if pipeline_name == "Tree": + pipeline = make_pipeline(SelectFromModel(ExtraTreesClassifier(n_estimators=50)), LogisticRegression()) + + test_benchmark = Benchmark() + print("Dataset:\t", name) + print("Pipeline:\t", pipeline_name) + starttime = datetime.datetime.now() + test_benchmark.run_test(pipeline, name, path) + endtime = datetime.datetime.now() + print("Used time: ", (endtime - starttime).microseconds/1000) + print("") + + +if __name__ == "__main__": + LIBSVM_DATA = { + "rcv1" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_train.binary.bz2", + "colon-cancer" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/covtype.libsvm.binary.bz2", + "gisette" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/gisette_scale.bz2", + "news20.binary" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/news20.binary.bz2", + "real-sim" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/real-sim.bz2", + "webspam" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/webspam_wc_normalized_trigram.svm.bz2", + "avazu" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/avazu-app.bz2" + } + + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--pipeline_name', type=str, help='display pipeline_name.') + parser.add_argument('--name', type=str, help='display name.') + parser.add_argument('--object', type=str, help='display test object: time or memory.') + + args = parser.parse_args() + pipeline_name = args.pipeline_name + name = args.name + test_object = args.object + path = LIBSVM_DATA[name] + + if test_object == 'time': + test_time(pipeline_name, name, path) + elif test_object == 'memory': + test_memory(pipeline_name, name, path) + else: + print("Not support test object.\t", test_object) + + print("Done.") diff --git a/examples/feature_engineering/gradient_feature_selector/sklearn_test.py b/examples/feature_engineering/gradient_feature_selector/sklearn_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c0bd245c32e1e6afa2e19c4b822890644a90731f --- /dev/null +++ b/examples/feature_engineering/gradient_feature_selector/sklearn_test.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +import bz2 +import urllib.request +import numpy as np + +from sklearn.datasets import load_svmlight_file +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.linear_model import LogisticRegression + +from sklearn.ensemble import ExtraTreesClassifier +from sklearn.feature_selection import SelectFromModel + +from nni.algorithms.feature_engineering.gradient_selector import FeatureGradientSelector + + +def test(): + url_zip_train = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_train.binary.bz2' + urllib.request.urlretrieve(url_zip_train, filename='train.bz2') + + f_svm = open('train.svm', 'wt') + with bz2.open('train.bz2', 'rb') as f_zip: + data = f_zip.read() + f_svm.write(data.decode('utf-8')) + f_svm.close() + + + X, y = load_svmlight_file('train.svm') + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) + + + pipeline = make_pipeline(FeatureGradientSelector(n_epochs=1, n_features=10), LogisticRegression()) + # pipeline = make_pipeline(SelectFromModel(ExtraTreesClassifier(n_estimators=50)), LogisticRegression()) + + pipeline.fit(X_train, y_train) + + print("Pipeline Score: ", pipeline.score(X_train, y_train)) + +if __name__ == "__main__": + test() diff --git a/examples/feature_engineering/gradient_feature_selector/test_memory.py b/examples/feature_engineering/gradient_feature_selector/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..ddec2782e8ef64d552fa708b2152313fcfb832a4 --- /dev/null +++ b/examples/feature_engineering/gradient_feature_selector/test_memory.py @@ -0,0 +1,26 @@ +import os + +LIBSVM_DATA = { + "rcv1" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_train.binary.bz2", + "colon-cancer" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/covtype.libsvm.binary.bz2", + "gisette" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/gisette_scale.bz2", + "news20.binary" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/news20.binary.bz2", + "real-sim" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/real-sim.bz2", + "avazu" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/avazu-app.bz2", +} + +pipeline_name = "Tree" +device = "HIP_VISIBLE_DEVICES=0 " +script = "setsid python -m memory_profiler benchmark_test.py " +test_object = "memory" + +for name in LIBSVM_DATA: + log_name = "_".join([pipeline_name, name, test_object]) + command = device + script + "--pipeline_name " + pipeline_name + " --name " + name + " --object " + test_object + " >" +log_name + " 2>&1 &" + print("command is\t", command) + os.system(command) + print("log is here\t", log_name) + +print("Done.") + + diff --git a/examples/feature_engineering/gradient_feature_selector/test_time.py b/examples/feature_engineering/gradient_feature_selector/test_time.py new file mode 100644 index 0000000000000000000000000000000000000000..4081a4883bb6a53eb856520bef8219a9c6565f5d --- /dev/null +++ b/examples/feature_engineering/gradient_feature_selector/test_time.py @@ -0,0 +1,26 @@ +import os + +LIBSVM_DATA = { + "rcv1" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_train.binary.bz2", + "colon-cancer" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/covtype.libsvm.binary.bz2", + "gisette" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/gisette_scale.bz2", + "news20.binary" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/news20.binary.bz2", + "real-sim" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/real-sim.bz2", + "avazu" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/avazu-app.bz2", +} + +pipeline_name = "LR" +device = "HIP_VISIBLE_DEVICES=0 " +script = "setsid python benchmark_test.py " +test_object = "time" + +for name in LIBSVM_DATA: + log_name = "_".join([pipeline_name, name, test_object]) + command = device + script + "--pipeline_name " + pipeline_name + " --name " + name + " --object " + test_object + " >" +log_name + " 2>&1 &" + print("command is\t", command) + os.system(command) + print("log is here\t", log_name) + +print("Done.") + + diff --git a/examples/model_compress/.gitignore b/examples/model_compress/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c2e41e6b0e1ae35dd8baca4c3063d0155e697371 --- /dev/null +++ b/examples/model_compress/.gitignore @@ -0,0 +1,6 @@ +.pth +.tar.gz +data/ +MNIST/ +cifar-10-batches-py/ +experiment_data/ \ No newline at end of file diff --git a/examples/model_compress/auto_compress/torch/auto_compress_module.py b/examples/model_compress/auto_compress/torch/auto_compress_module.py new file mode 100644 index 0000000000000000000000000000000000000000..a70d6e470f4117e96cfc2666b349394593ffab0a --- /dev/null +++ b/examples/model_compress/auto_compress/torch/auto_compress_module.py @@ -0,0 +1,129 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import Callable, Optional, Iterable + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +from torchvision import datasets, transforms + +from nni.algorithms.compression.pytorch.auto_compress import AbstractAutoCompressionModule + +torch.manual_seed(1) + +class LeNet(nn.Module): + def __init__(self): + super(LeNet, self).__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + self.conv2 = nn.Conv2d(32, 64, 3, 1) + self.dropout1 = nn.Dropout2d(0.25) + self.dropout2 = nn.Dropout2d(0.5) + self.fc1 = nn.Linear(9216, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + x = self.conv1(x) + x = F.relu(x) + x = self.conv2(x) + x = F.relu(x) + x = F.max_pool2d(x, 2) + x = self.dropout1(x) + x = torch.flatten(x, 1) + x = self.fc1(x) + x = F.relu(x) + x = self.dropout2(x) + x = self.fc2(x) + output = F.log_softmax(x, dim=1) + return output + +_use_cuda = torch.cuda.is_available() + +_train_kwargs = {'batch_size': 64} +_test_kwargs = {'batch_size': 1000} +if _use_cuda: + _cuda_kwargs = {'num_workers': 1, + 'pin_memory': True, + 'shuffle': True} + _train_kwargs.update(_cuda_kwargs) + _test_kwargs.update(_cuda_kwargs) + +_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) +]) + +_device = torch.device("cuda" if _use_cuda else "cpu") + +_train_loader = None +_test_loader = None + +def _train(model, optimizer, criterion, epoch): + global _train_loader + if _train_loader is None: + dataset = datasets.MNIST('./data', train=True, download=True, transform=_transform) + _train_loader = torch.utils.data.DataLoader(dataset, **_train_kwargs) + model.train() + for data, target in _train_loader: + data, target = data.to(_device), target.to(_device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def _test(model): + global _test_loader + if _test_loader is None: + dataset = datasets.MNIST('./data', train=False, transform=_transform) + _test_loader = torch.utils.data.DataLoader(dataset, **_test_kwargs) + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in _test_loader: + data, target = data.to(_device), target.to(_device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(_test_loader.dataset) + acc = 100 * correct / len(_test_loader.dataset) + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(_test_loader.dataset), acc)) + return acc + +_model = LeNet().to(_device) +_model.load_state_dict(torch.load('mnist_pretrain_lenet.pth')) + +class AutoCompressionModule(AbstractAutoCompressionModule): + @classmethod + def model(cls) -> nn.Module: + return _model + + @classmethod + def evaluator(cls) -> Callable[[nn.Module], float]: + return _test + + @classmethod + def optimizer_factory(cls) -> Optional[Callable[[Iterable], optim.Optimizer]]: + def _optimizer_factory(params: Iterable): + return torch.optim.SGD(params, lr=0.01) + return _optimizer_factory + + @classmethod + def criterion(cls) -> Optional[Callable]: + return F.nll_loss + + @classmethod + def sparsifying_trainer(cls, compress_algorithm_name: str) -> Optional[Callable[[nn.Module, optim.Optimizer, Callable, int], None]]: + return _train + + @classmethod + def post_compress_finetuning_trainer(cls, compress_algorithm_name: str) -> Optional[Callable[[nn.Module, optim.Optimizer, Callable, int], None]]: + return _train + + @classmethod + def post_compress_finetuning_epochs(cls, compress_algorithm_name: str) -> int: + return 2 diff --git a/examples/model_compress/auto_compress/torch/auto_compress_torch.py b/examples/model_compress/auto_compress/torch/auto_compress_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..5ba69ebc8e26a5742dbb31d5ca8c616e8e1320a3 --- /dev/null +++ b/examples/model_compress/auto_compress/torch/auto_compress_torch.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from pathlib import Path + +from nni.algorithms.compression.pytorch.auto_compress import AutoCompressionExperiment, AutoCompressionSearchSpaceGenerator + +from auto_compress_module import AutoCompressionModule + +generator = AutoCompressionSearchSpaceGenerator() +generator.add_config('level', [ + { + "sparsity": { + "_type": "uniform", + "_value": [0.01, 0.99] + }, + 'op_types': ['default'] + } +]) +generator.add_config('l1', [ + { + "sparsity": { + "_type": "uniform", + "_value": [0.01, 0.99] + }, + 'op_types': ['Conv2d'] + } +]) +generator.add_config('qat', [ + { + 'quant_types': ['weight', 'output'], + 'quant_bits': { + 'weight': 8, + 'output': 8 + }, + 'op_types': ['Conv2d', 'Linear'] + }]) +search_space = generator.dumps() + +experiment = AutoCompressionExperiment(AutoCompressionModule, 'local') +experiment.config.experiment_name = 'auto compression torch example' +experiment.config.trial_concurrency = 1 +experiment.config.max_trial_number = 10 +experiment.config.search_space = search_space +experiment.config.trial_code_directory = Path(__file__).parent +experiment.config.tuner.name = 'TPE' +experiment.config.tuner.class_args['optimize_mode'] = 'maximize' +experiment.config.training_service.use_active_gpu = True + +experiment.run(8088) diff --git a/examples/model_compress/auto_compress/torch/mnist_pretrain_lenet.pth b/examples/model_compress/auto_compress/torch/mnist_pretrain_lenet.pth new file mode 100644 index 0000000000000000000000000000000000000000..f5afbe63fcb843bd5bf7933f41a9f509a28e4504 Binary files /dev/null and b/examples/model_compress/auto_compress/torch/mnist_pretrain_lenet.pth differ diff --git a/examples/model_compress/end2end_compression.py b/examples/model_compress/end2end_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..062d6351d64789c95185727ae8b0f65701be85db --- /dev/null +++ b/examples/model_compress/end2end_compression.py @@ -0,0 +1,300 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +NNI example for combined pruning and quantization to compress a model. +In this example, we show the compression process to first prune a model, then quantize the pruned model. + +""" +import argparse +import os +import time +import torch +import torch.nn.functional as F +import torch.optim as optim +from torch.optim.lr_scheduler import StepLR +from torchvision import datasets, transforms + +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.compression.pytorch import ModelSpeedup + +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner +from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer + +from models.mnist.naive import NaiveModel +from nni.compression.pytorch.quantization_speedup import ModelSpeedupTensorRT + + +def get_model_time_cost(model, dummy_input): + model.eval() + n_times = 100 + time_list = [] + for _ in range(n_times): + torch.cuda.synchronize() + tic = time.time() + _ = model(dummy_input) + torch.cuda.synchronize() + time_list.append(time.time()-tic) + time_list = time_list[10:] + return sum(time_list) / len(time_list) + + +def train(args, model, device, train_loader, criterion, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + if args.dry_run: + break + + +def test(args, model, device, criterion, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += criterion(output, target).item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + acc = 100 * correct / len(test_loader.dataset) + + print('Test Loss: {:.6f} Accuracy: {}%\n'.format( + test_loss, acc)) + return acc + +def test_trt(engine, test_loader): + test_loss = 0 + correct = 0 + time_elasped = 0 + for data, target in test_loader: + output, time = engine.inference(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + time_elasped += time + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + print("Inference elapsed_time (whole dataset): {}s".format(time_elasped)) + +def main(args): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.makedirs(args.experiment_data_dir, exist_ok=True) + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ]) + + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=True, download=True, transform=transform), + batch_size=64,) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=False, transform=transform), + batch_size=1000) + + # Step1. Model Pretraining + model = NaiveModel().to(device) + criterion = torch.nn.NLLLoss() + optimizer = optim.Adadelta(model.parameters(), lr=args.pretrain_lr) + scheduler = StepLR(optimizer, step_size=1, gamma=0.7) + flops, params, _ = count_flops_params(model, (1, 1, 28, 28), verbose=False) + + if args.pretrained_model_dir is None: + args.pretrained_model_dir = os.path.join(args.experiment_data_dir, f'pretrained.pth') + + best_acc = 0 + for epoch in range(args.pretrain_epochs): + train(args, model, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + acc = test(args, model, device, criterion, test_loader) + if acc > best_acc: + best_acc = acc + state_dict = model.state_dict() + + model.load_state_dict(state_dict) + torch.save(state_dict, args.pretrained_model_dir) + print(f'Model saved to {args.pretrained_model_dir}') + else: + state_dict = torch.load(args.pretrained_model_dir) + model.load_state_dict(state_dict) + best_acc = test(args, model, device, criterion, test_loader) + + dummy_input = torch.randn([1000, 1, 28, 28]).to(device) + time_cost = get_model_time_cost(model, dummy_input) + + # 125.49 M, 0.85M, 93.29, 1.1012 + print(f'Pretrained model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}, Time Cost: {time_cost}') + + # Step2. Model Pruning + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ['Conv2d'] + }] + + kw_args = {} + if args.dependency_aware: + dummy_input = torch.randn([1000, 1, 28, 28]).to(device) + print('Enable the dependency_aware mode') + # note that, not all pruners support the dependency_aware mode + kw_args['dependency_aware'] = True + kw_args['dummy_input'] = dummy_input + + pruner = L1FilterPruner(model, config_list, **kw_args) + model = pruner.compress() + pruner.get_pruned_weights() + + mask_path = os.path.join(args.experiment_data_dir, 'mask.pth') + model_path = os.path.join(args.experiment_data_dir, 'pruned.pth') + pruner.export_model(model_path=model_path, mask_path=mask_path) + pruner._unwrap_model() # unwrap all modules to normal state + + # Step3. Model Speedup + m_speedup = ModelSpeedup(model, dummy_input, mask_path, device) + m_speedup.speedup_model() + print('model after speedup', model) + + flops, params, _ = count_flops_params(model, dummy_input, verbose=False) + acc = test(args, model, device, criterion, test_loader) + time_cost = get_model_time_cost(model, dummy_input) + print(f'Pruned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {acc: .2f}, Time Cost: {time_cost}') + + # Step4. Model Finetuning + optimizer = optim.Adadelta(model.parameters(), lr=args.pretrain_lr) + scheduler = StepLR(optimizer, step_size=1, gamma=0.7) + + best_acc = 0 + for epoch in range(args.finetune_epochs): + train(args, model, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + acc = test(args, model, device, criterion, test_loader) + if acc > best_acc: + best_acc = acc + state_dict = model.state_dict() + + model.load_state_dict(state_dict) + save_path = os.path.join(args.experiment_data_dir, f'finetuned.pth') + torch.save(state_dict, save_path) + + flops, params, _ = count_flops_params(model, dummy_input, verbose=True) + time_cost = get_model_time_cost(model, dummy_input) + + # FLOPs 28.48 M, #Params: 0.18M, Accuracy: 89.03, Time Cost: 1.03 + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}, Time Cost: {time_cost}') + print(f'Model saved to {save_path}') + + # Step5. Model Quantization via QAT + config_list = [{ + 'quant_types': ['weight', 'output'], + 'quant_bits': {'weight': 8, 'output': 8}, + 'op_names': ['conv1'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output':8}, + 'op_names': ['relu1'] + }, { + 'quant_types': ['weight', 'output'], + 'quant_bits': {'weight': 8, 'output': 8}, + 'op_names': ['conv2'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8}, + 'op_names': ['relu2'] + }] + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + quantizer = QAT_Quantizer(model, config_list, optimizer) + quantizer.compress() + + # Step6. Quantization Aware Training + best_acc = 0 + for epoch in range(1): + train(args, model, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + acc = test(args, model, device, criterion, test_loader) + if acc > best_acc: + best_acc = acc + state_dict = model.state_dict() + + calibration_path = os.path.join(args.experiment_data_dir, 'calibration.pth') + calibration_config = quantizer.export_model(model_path, calibration_path) + print("calibration_config: ", calibration_config) + + # Step7. Model Speedup + batch_size = 32 + input_shape = (batch_size, 1, 28, 28) + engine = ModelSpeedupTensorRT(model, input_shape, config=calibration_config, batchsize=32) + engine.compress() + + test_trt(engine, test_loader) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + + # dataset and model + # parser.add_argument('--dataset', type=str, default='mnist', + # help='dataset to use, mnist, cifar10 or imagenet') + # parser.add_argument('--data-dir', type=str, default='./data/', + # help='dataset directory') + parser.add_argument('--pretrained-model-dir', type=str, default=None, + help='path to pretrained model') + parser.add_argument('--pretrain-epochs', type=int, default=10, + help='number of epochs to pretrain the model') + parser.add_argument('--pretrain-lr', type=float, default=1.0, + help='learning rate to pretrain the model') + + parser.add_argument('--experiment-data-dir', type=str, default='./experiment_data', + help='For saving output checkpoints') + parser.add_argument('--log-interval', type=int, default=100, metavar='N', + help='how many batches to wait before logging training status') + parser.add_argument('--dry-run', action='store_true', default=False, + help='quickly check a single pass') + # parser.add_argument('--multi-gpu', action='store_true', default=False, + # help='run on mulitple gpus') + # parser.add_argument('--test-only', action='store_true', default=False, + # help='run test only') + + # pruner + # parser.add_argument('--pruner', type=str, default='l1filter', + # choices=['level', 'l1filter', 'l2filter', 'slim', 'agp', + # 'fpgm', 'mean_activation', 'apoz', 'admm'], + # help='pruner to use') + parser.add_argument('--sparsity', type=float, default=0.5, + help='target overall target sparsity') + parser.add_argument('--dependency-aware', action='store_true', default=False, + help='toggle dependency aware mode') + + # finetuning + parser.add_argument('--finetune-epochs', type=int, default=5, + help='epochs to fine tune') + # parser.add_argument('--kd', action='store_true', default=False, + # help='quickly check a single pass') + # parser.add_argument('--kd_T', type=float, default=4, + # help='temperature for KD distillation') + # parser.add_argument('--finetune-lr', type=float, default=0.5, + # help='learning rate to finetune the model') + + # speedup + # parser.add_argument('--speed-up', action='store_true', default=False, + # help='whether to speed-up the pruned model') + + # parser.add_argument('--nni', action='store_true', default=False, + # help="whether to tune the pruners using NNi tuners") + + args = parser.parse_args() + main(args) diff --git a/examples/model_compress/models/cifar10/resnet.py b/examples/model_compress/models/cifar10/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..386ff8321c477736f94195f7a296e50ce2115e08 --- /dev/null +++ b/examples/model_compress/models/cifar10/resnet.py @@ -0,0 +1,115 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d( + in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, + stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(self.expansion*planes) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, + stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, self.expansion * + planes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(self.expansion*planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(self.expansion*planes) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class ResNet(nn.Module): + def __init__(self, block, num_blocks, num_classes=10): + super(ResNet, self).__init__() + self.in_planes = 64 + # this layer is different from torchvision.resnet18() since this model adopted for Cifar10 + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + self.linear = nn.Linear(512*block.expansion, num_classes) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def ResNet18(): + return ResNet(BasicBlock, [2, 2, 2, 2]) + + +def ResNet34(): + return ResNet(BasicBlock, [3, 4, 6, 3]) + + +def ResNet50(): + return ResNet(Bottleneck, [3, 4, 6, 3]) + + +def ResNet101(): + return ResNet(Bottleneck, [3, 4, 23, 3]) + + +def ResNet152(): + return ResNet(Bottleneck, [3, 8, 36, 3]) diff --git a/examples/model_compress/models/cifar10/vgg.py b/examples/model_compress/models/cifar10/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..f293770c72a9f9567bddcdf38bbcabd24e5da749 --- /dev/null +++ b/examples/model_compress/models/cifar10/vgg.py @@ -0,0 +1,63 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +defaultcfg = { + 11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], + 13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], + 16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512], + 19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512], +} + + +class VGG(nn.Module): + def __init__(self, depth=16): + super(VGG, self).__init__() + cfg = defaultcfg[depth] + self.cfg = cfg + self.feature = self.make_layers(cfg, True) + num_classes = 10 + self.classifier = nn.Sequential( + nn.Linear(cfg[-1], 512), + nn.BatchNorm1d(512), + nn.ReLU(inplace=True), + nn.Linear(512, num_classes) + ) + self._initialize_weights() + + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + def forward(self, x): + x = self.feature(x) + x = nn.AvgPool2d(2)(x) + x = x.view(x.size(0), -1) + y = self.classifier(x) + return y + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(0.5) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() diff --git a/examples/model_compress/models/mnist/lenet.py b/examples/model_compress/models/mnist/lenet.py new file mode 100644 index 0000000000000000000000000000000000000000..7501ac2fc6522b3ce5021056fdca8aebe191f316 --- /dev/null +++ b/examples/model_compress/models/mnist/lenet.py @@ -0,0 +1,29 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LeNet(nn.Module): + def __init__(self): + super(LeNet, self).__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + self.conv2 = nn.Conv2d(32, 64, 3, 1) + self.dropout1 = nn.Dropout2d(0.25) + self.dropout2 = nn.Dropout2d(0.5) + self.fc1 = nn.Linear(9216, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + x = self.conv1(x) + x = F.relu(x) + x = self.conv2(x) + x = F.relu(x) + x = F.max_pool2d(x, 2) + x = self.dropout1(x) + x = torch.flatten(x, 1) + x = self.fc1(x) + x = F.relu(x) + x = self.dropout2(x) + x = self.fc2(x) + output = F.log_softmax(x, dim=1) + return output diff --git a/examples/model_compress/models/mnist/naive.py b/examples/model_compress/models/mnist/naive.py new file mode 100644 index 0000000000000000000000000000000000000000..4609862527b87c933d1bbc9148991fcd30851fb3 --- /dev/null +++ b/examples/model_compress/models/mnist/naive.py @@ -0,0 +1,27 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import reduce + +class NaiveModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 20, 5, 1) + self.conv2 = torch.nn.Conv2d(20, 50, 5, 1) + self.fc1 = torch.nn.Linear(4 * 4 * 50, 500) + self.fc2 = torch.nn.Linear(500, 10) + self.relu1 = torch.nn.ReLU6() + self.relu2 = torch.nn.ReLU6() + self.relu3 = torch.nn.ReLU6() + self.max_pool1 = torch.nn.MaxPool2d(2, 2) + self.max_pool2 = torch.nn.MaxPool2d(2, 2) + + def forward(self, x): + x = self.relu1(self.conv1(x)) + x = self.max_pool1(x) + x = self.relu2(self.conv2(x)) + x = self.max_pool2(x) + x = x.view(-1, x.size()[1:].numel()) + x = self.relu3(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) \ No newline at end of file diff --git a/examples/model_compress/models/mobilenet.py b/examples/model_compress/models/mobilenet.py new file mode 100644 index 0000000000000000000000000000000000000000..a5d1cec9ad1ecc0e097bbfd0ac49088716ef4fa9 --- /dev/null +++ b/examples/model_compress/models/mobilenet.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch.nn as nn +import math + + +def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + +def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + +class MobileNet(nn.Module): + def __init__(self, n_class, profile='normal'): + super(MobileNet, self).__init__() + + # original + if profile == 'normal': + in_planes = 32 + cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024] + # 0.5 AMC + elif profile == '0.5flops': + in_planes = 24 + cfg = [48, (96, 2), 80, (192, 2), 200, (328, 2), 352, 368, 360, 328, 400, (736, 2), 752] + else: + raise NotImplementedError + + self.conv1 = conv_bn(3, in_planes, stride=2) + + self.features = self._make_layers(in_planes, cfg, conv_dw) + + self.classifier = nn.Sequential( + nn.Linear(cfg[-1], n_class), + ) + + self._initialize_weights() + + def forward(self, x): + x = self.conv1(x) + x = self.features(x) + x = x.mean([2, 3]) # global average pooling + + x = self.classifier(x) + return x + + def _make_layers(self, in_planes, cfg, layer): + layers = [] + for x in cfg: + out_planes = x if isinstance(x, int) else x[0] + stride = 1 if isinstance(x, int) else x[1] + layers.append(layer(in_planes, out_planes, stride)) + in_planes = out_planes + return nn.Sequential(*layers) + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() diff --git a/examples/model_compress/models/mobilenet_v2.py b/examples/model_compress/models/mobilenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..0751a47bc3d84bd90aa0e2ce78b5ed16d7ceb226 --- /dev/null +++ b/examples/model_compress/models/mobilenet_v2.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch.nn as nn +import math + + +def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +def conv_1x1_bn(inp, oup): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = round(inp * expand_ratio) + self.use_res_connect = self.stride == 1 and inp == oup + + if expand_ratio == 1: + self.conv = nn.Sequential( + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + nn.BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ) + else: + self.conv = nn.Sequential( + # pw + nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), + nn.BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + nn.BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, n_class=1000, input_size=224, width_mult=1.): + super(MobileNetV2, self).__init__() + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + interverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1], + ] + + # building first layer + assert input_size % 32 == 0 + input_channel = int(input_channel * width_mult) + self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel + self.features = [conv_bn(3, input_channel, 2)] + # building inverted residual blocks + for t, c, n, s in interverted_residual_setting: + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: + self.features.append(block(input_channel, output_channel, s, expand_ratio=t)) + else: + self.features.append(block(input_channel, output_channel, 1, expand_ratio=t)) + input_channel = output_channel + # building last several layers + self.features.append(conv_1x1_bn(input_channel, self.last_channel)) + # make it nn.Sequential + self.features = nn.Sequential(*self.features) + + # building classifier + self.classifier = nn.Sequential( + nn.Dropout(0.2), + nn.Linear(self.last_channel, n_class), + ) + + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + # it's same with .mean(3).mean(2), but + # speedup only suport the mean option + # whose output only have two dimensions + x = x.mean([2, 3]) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() diff --git a/examples/model_compress/pruning/.gitignore b/examples/model_compress/pruning/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b4bd197eaaf6283a0a6b6e7cd5b89a8490186f8b --- /dev/null +++ b/examples/model_compress/pruning/.gitignore @@ -0,0 +1,2 @@ +cifar-10-python.tar.gz +cifar-10-batches-py/ \ No newline at end of file diff --git a/examples/model_compress/pruning/amc/README.md b/examples/model_compress/pruning/amc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e51cc402887184b69607566cb59fe381b445f3b7 --- /dev/null +++ b/examples/model_compress/pruning/amc/README.md @@ -0,0 +1,28 @@ +# AMCPruner Example +This example shows us how to use AMCPruner example. + +## Step 1: train a model for pruning +Run following command to train a mobilenetv2 model: +```bash +python3 amc_train.py --model_type mobilenetv2 --n_epoch 50 +``` +Once finished, saved checkpoint file can be found at: +``` +logs/mobilenetv2_cifar10_train-run1/ckpt.best.pth +``` + +## Pruning with AMCPruner +Run following command to prune the trained model: +```bash +python3 amc_search.py --model_type mobilenetv2 --ckpt logs/mobilenetv2_cifar10_train-run1/ckpt.best.pth +``` +Once finished, pruned model and mask can be found at: +``` +logs/mobilenetv2_cifar10_r0.5_search-run2 +``` + +## Finetune pruned model +Run `amc_train.py` again with `--ckpt` and `--mask` to speedup and finetune the pruned model: +```bash +python3 amc_train.py --model_type mobilenetv2 --ckpt logs/mobilenetv2_cifar10_r0.5_search-run2/best_model.pth --mask logs/mobilenetv2_cifar10_r0.5_search-run2/best_mask.pth --n_epoch 100 +``` diff --git a/examples/model_compress/pruning/amc/README_zh_CN.md b/examples/model_compress/pruning/amc/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..8a3f6fee5ad689eb992f81cbdea3f8710439ade2 --- /dev/null +++ b/examples/model_compress/pruning/amc/README_zh_CN.md @@ -0,0 +1,28 @@ +# AMCPruner 示例 +此示例将说明如何使用 AMCPruner。 + +## 步骤一:训练模型 +运行以下命令来训练 mobilenetv2 模型: +```bash +python3 amc_train.py --model_type mobilenetv2 --n_epoch 50 +``` +训练完成之后,检查点文件被保存在这里: +``` +logs/mobilenetv2_cifar10_train-run1/ckpt.best.pth +``` + +## 使用 AMCPruner 剪枝 +运行以下命令对模型进行剪枝: +```bash +python3 amc_search.py --model_type mobilenetv2 --ckpt logs/mobilenetv2_cifar10_train-run1/ckpt.best.pth +``` +完成之后,剪枝后的模型和掩码文件被保存在: +``` +logs/mobilenetv2_cifar10_r0.5_search-run2 +``` + +## 微调剪枝后的模型 +加上 `--ckpt` 和 `--mask` 参数,再次运行 `amc_train.py` 命令去加速和微调剪枝后的模型。 +```bash +python3 amc_train.py --model_type mobilenetv2 --ckpt logs/mobilenetv2_cifar10_r0.5_search-run2/best_model.pth --mask logs/mobilenetv2_cifar10_r0.5_search-run2/best_mask.pth --n_epoch 100 +``` diff --git a/examples/model_compress/pruning/amc/amc_search.py b/examples/model_compress/pruning/amc/amc_search.py new file mode 100644 index 0000000000000000000000000000000000000000..5c861a88879a890dc56d8aeeb7cb664f1e9f1897 --- /dev/null +++ b/examples/model_compress/pruning/amc/amc_search.py @@ -0,0 +1,138 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import sys +import argparse +import time + +import torch +import torch.nn as nn +from torchvision.models import resnet +from nni.algorithms.compression.pytorch.pruning import AMCPruner +from data import get_split_dataset +from utils import AverageMeter, accuracy + +sys.path.append('../../models') + +def parse_args(): + parser = argparse.ArgumentParser(description='AMC search script') + parser.add_argument('--model_type', default='mobilenet', type=str, choices=['mobilenet', 'mobilenetv2', 'resnet18', 'resnet34', 'resnet50'], + help='model to prune') + parser.add_argument('--dataset', default='cifar10', type=str, choices=['cifar10', 'imagenet'], help='dataset to use (cifar/imagenet)') + parser.add_argument('--batch_size', default=50, type=int, help='number of data batch size') + parser.add_argument('--data_root', default='./data', type=str, help='dataset path') + parser.add_argument('--flops_ratio', default=0.5, type=float, help='target flops ratio to preserve of the model') + parser.add_argument('--lbound', default=0.2, type=float, help='minimum sparsity') + parser.add_argument('--rbound', default=1., type=float, help='maximum sparsity') + parser.add_argument('--ckpt_path', default=None, type=str, help='manual path of checkpoint') + + parser.add_argument('--train_episode', default=800, type=int, help='number of training episode') + parser.add_argument('--n_gpu', default=1, type=int, help='number of gpu to use') + parser.add_argument('--n_worker', default=16, type=int, help='number of data loader worker') + parser.add_argument('--suffix', default=None, type=str, help='suffix of auto-generated log directory') + + return parser.parse_args() + + +def get_model_and_checkpoint(model, dataset, checkpoint_path, n_gpu=1): + if dataset == 'imagenet': + n_class = 1000 + elif dataset == 'cifar10': + n_class = 10 + else: + raise ValueError('unsupported dataset') + + if model == 'mobilenet': + from mobilenet import MobileNet + net = MobileNet(n_class=n_class) + elif model == 'mobilenetv2': + from mobilenet_v2 import MobileNetV2 + net = MobileNetV2(n_class=n_class) + elif model.startswith('resnet'): + net = resnet.__dict__[model](pretrained=True) + in_features = net.fc.in_features + net.fc = nn.Linear(in_features, n_class) + else: + raise NotImplementedError + if checkpoint_path: + print('loading {}...'.format(checkpoint_path)) + sd = torch.load(checkpoint_path, map_location=torch.device('cpu')) + if 'state_dict' in sd: # a checkpoint but not a state_dict + sd = sd['state_dict'] + sd = {k.replace('module.', ''): v for k, v in sd.items()} + net.load_state_dict(sd) + + if torch.cuda.is_available() and n_gpu > 0: + net = net.cuda() + if n_gpu > 1: + net = torch.nn.DataParallel(net, range(n_gpu)) + + return net + +def init_data(args): + # split the train set into train + val + # for CIFAR, split 5k for val + # for ImageNet, split 3k for val + val_size = 5000 if 'cifar' in args.dataset else 3000 + train_loader, val_loader, _ = get_split_dataset( + args.dataset, args.batch_size, + args.n_worker, val_size, + data_root=args.data_root, + shuffle=False + ) # same sampling + return train_loader, val_loader + +def validate(val_loader, model, verbose=False): + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + criterion = nn.CrossEntropyLoss().cuda() + # switch to evaluate mode + model.eval() + end = time.time() + + t1 = time.time() + with torch.no_grad(): + for i, (input, target) in enumerate(val_loader): + target = target.to(device) + input_var = torch.autograd.Variable(input).to(device) + target_var = torch.autograd.Variable(target).to(device) + + # compute output + output = model(input_var) + loss = criterion(output, target_var) + + # measure accuracy and record loss + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(prec1.item(), input.size(0)) + top5.update(prec5.item(), input.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + t2 = time.time() + if verbose: + print('* Test loss: %.3f top1: %.3f top5: %.3f time: %.3f' % + (losses.avg, top1.avg, top5.avg, t2 - t1)) + return top5.avg + + +if __name__ == "__main__": + args = parse_args() + + device = torch.device('cuda') if torch.cuda.is_available() and args.n_gpu > 0 else torch.device('cpu') + + model = get_model_and_checkpoint(args.model_type, args.dataset, checkpoint_path=args.ckpt_path, n_gpu=args.n_gpu) + _, val_loader = init_data(args) + + config_list = [{ + 'op_types': ['Conv2d', 'Linear'] + }] + pruner = AMCPruner( + model, config_list, validate, val_loader, model_type=args.model_type, dataset=args.dataset, + train_episode=args.train_episode, flops_ratio=args.flops_ratio, lbound=args.lbound, + rbound=args.rbound, suffix=args.suffix) + pruner.compress() diff --git a/examples/model_compress/pruning/amc/amc_train.py b/examples/model_compress/pruning/amc/amc_train.py new file mode 100644 index 0000000000000000000000000000000000000000..c70bfe785cb1e4037f4544bcb3f02d555aee64fc --- /dev/null +++ b/examples/model_compress/pruning/amc/amc_train.py @@ -0,0 +1,246 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import sys +import os +import time +import argparse +import shutil +import math +import numpy as np + +import torch +import torch.nn as nn +import torch.optim as optim +from tensorboardX import SummaryWriter +from torchvision.models import resnet + +from nni.algorithms.compression.pytorch.pruning.amc.lib.net_measure import measure_model +from nni.algorithms.compression.pytorch.pruning.amc.lib.utils import get_output_folder +from nni.compression.pytorch import ModelSpeedup + +from data import get_dataset +from utils import AverageMeter, accuracy, progress_bar + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from mobilenet import MobileNet +from mobilenet_v2 import MobileNetV2 + +def parse_args(): + parser = argparse.ArgumentParser(description='AMC train / fine-tune script') + parser.add_argument('--model_type', default='mobilenet', type=str, + choices=['mobilenet', 'mobilenetv2', 'resnet18', 'resnet34', 'resnet50'], + help='name of the model to train') + parser.add_argument('--dataset', default='cifar10', type=str, help='name of the dataset to train') + parser.add_argument('--lr', default=0.05, type=float, help='learning rate') + parser.add_argument('--n_gpu', default=4, type=int, help='number of GPUs to use') + parser.add_argument('--batch_size', default=256, type=int, help='batch size') + parser.add_argument('--n_worker', default=32, type=int, help='number of data loader worker') + parser.add_argument('--lr_type', default='cos', type=str, help='lr scheduler (exp/cos/step3/fixed)') + parser.add_argument('--n_epoch', default=150, type=int, help='number of epochs to train') + parser.add_argument('--wd', default=4e-5, type=float, help='weight decay') + parser.add_argument('--seed', default=None, type=int, help='random seed to set') + parser.add_argument('--data_root', default='./data', type=str, help='dataset path') + # resume + parser.add_argument('--ckpt_path', default=None, type=str, help='checkpoint path to fine tune') + parser.add_argument('--mask_path', default=None, type=str, help='mask path for speedup') + + # run eval + parser.add_argument('--eval', action='store_true', help='Simply run eval') + parser.add_argument('--calc_flops', action='store_true', help='Calculate flops') + + return parser.parse_args() + +def get_model(args): + print('=> Building model..') + + if args.dataset == 'imagenet': + n_class = 1000 + elif args.dataset == 'cifar10': + n_class = 10 + else: + raise NotImplementedError + + if args.model_type == 'mobilenet': + net = MobileNet(n_class=n_class) + elif args.model_type == 'mobilenetv2': + net = MobileNetV2(n_class=n_class) + elif args.model_type.startswith('resnet'): + net = resnet.__dict__[args.model_type](pretrained=True) + in_features = net.fc.in_features + net.fc = nn.Linear(in_features, n_class) + else: + raise NotImplementedError + + if args.ckpt_path is not None: + # the checkpoint can be state_dict exported by amc_search.py or saved by amc_train.py + print('=> Loading checkpoint {} ..'.format(args.ckpt_path)) + net.load_state_dict(torch.load(args.ckpt_path, torch.device('cpu'))) + if args.mask_path is not None: + SZ = 224 if args.dataset == 'imagenet' else 32 + data = torch.randn(2, 3, SZ, SZ) + ms = ModelSpeedup(net, data, args.mask_path, torch.device('cpu')) + ms.speedup_model() + + net.to(args.device) + if torch.cuda.is_available() and args.n_gpu > 1: + net = torch.nn.DataParallel(net, list(range(args.n_gpu))) + return net + +def train(epoch, train_loader, device): + print('\nEpoch: %d' % epoch) + net.train() + + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + end = time.time() + + for batch_idx, (inputs, targets) in enumerate(train_loader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + + loss.backward() + optimizer.step() + + # measure accuracy and record loss + prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) + losses.update(loss.item(), inputs.size(0)) + top1.update(prec1.item(), inputs.size(0)) + top5.update(prec5.item(), inputs.size(0)) + # timing + batch_time.update(time.time() - end) + end = time.time() + + progress_bar(batch_idx, len(train_loader), 'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%' + .format(losses.avg, top1.avg, top5.avg)) + writer.add_scalar('loss/train', losses.avg, epoch) + writer.add_scalar('acc/train_top1', top1.avg, epoch) + writer.add_scalar('acc/train_top5', top5.avg, epoch) + +def test(epoch, test_loader, device, save=True): + global best_acc + net.eval() + + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + end = time.time() + + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(test_loader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + # measure accuracy and record loss + prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) + losses.update(loss.item(), inputs.size(0)) + top1.update(prec1.item(), inputs.size(0)) + top5.update(prec5.item(), inputs.size(0)) + # timing + batch_time.update(time.time() - end) + end = time.time() + + progress_bar(batch_idx, len(test_loader), 'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%' + .format(losses.avg, top1.avg, top5.avg)) + + if save: + writer.add_scalar('loss/test', losses.avg, epoch) + writer.add_scalar('acc/test_top1', top1.avg, epoch) + writer.add_scalar('acc/test_top5', top5.avg, epoch) + + is_best = False + if top1.avg > best_acc: + best_acc = top1.avg + is_best = True + + print('Current best acc: {}'.format(best_acc)) + save_checkpoint({ + 'epoch': epoch, + 'model': args.model_type, + 'dataset': args.dataset, + 'state_dict': net.module.state_dict() if isinstance(net, nn.DataParallel) else net.state_dict(), + 'acc': top1.avg, + 'optimizer': optimizer.state_dict(), + }, is_best, checkpoint_dir=log_dir) + +def adjust_learning_rate(optimizer, epoch): + if args.lr_type == 'cos': # cos without warm-up + lr = 0.5 * args.lr * (1 + math.cos(math.pi * epoch / args.n_epoch)) + elif args.lr_type == 'exp': + step = 1 + decay = 0.96 + lr = args.lr * (decay ** (epoch // step)) + elif args.lr_type == 'fixed': + lr = args.lr + else: + raise NotImplementedError + print('=> lr: {}'.format(lr)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + return lr + +def save_checkpoint(state, is_best, checkpoint_dir='.'): + filename = os.path.join(checkpoint_dir, 'ckpt.pth') + print('=> Saving checkpoint to {}'.format(filename)) + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, filename.replace('.pth', '.best.pth')) + +if __name__ == '__main__': + args = parse_args() + + if torch.cuda.is_available(): + torch.backends.cudnn.benchmark = True + args.device = torch.device('cuda') if torch.cuda.is_available() and args.n_gpu > 0 else torch.device('cpu') + + best_acc = 0 # best test accuracy + start_epoch = 0 # start from epoch 0 or last checkpoint epoch + + if args.seed is not None: + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + print('=> Preparing data..') + train_loader, val_loader, n_class = get_dataset(args.dataset, args.batch_size, args.n_worker, + data_root=args.data_root) + + net = get_model(args) # for measure + + if args.calc_flops: + IMAGE_SIZE = 224 if args.dataset == 'imagenet' else 32 + n_flops, n_params = measure_model(net, IMAGE_SIZE, IMAGE_SIZE, args.device) + print('=> Model Parameter: {:.3f} M, FLOPs: {:.3f}M'.format(n_params / 1e6, n_flops / 1e6)) + exit(0) + + criterion = nn.CrossEntropyLoss() + print('Using SGD...') + print('weight decay = {}'.format(args.wd)) + optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wd) + + if args.eval: # just run eval + print('=> Start evaluation...') + test(0, val_loader, args.device, save=False) + else: # train + print('=> Start training...') + print('Training {} on {}...'.format(args.model_type, args.dataset)) + train_type = 'train' if args.ckpt_path is None else 'finetune' + log_dir = get_output_folder('./logs', '{}_{}_{}'.format(args.model_type, args.dataset, train_type)) + print('=> Saving logs to {}'.format(log_dir)) + # tf writer + writer = SummaryWriter(logdir=log_dir) + + for epoch in range(start_epoch, start_epoch + args.n_epoch): + lr = adjust_learning_rate(optimizer, epoch) + train(epoch, train_loader, args.device) + test(epoch, val_loader, args.device) + + writer.close() + print('=> Best top-1 acc: {}%'.format(best_acc)) diff --git a/examples/model_compress/pruning/amc/data.py b/examples/model_compress/pruning/amc/data.py new file mode 100644 index 0000000000000000000000000000000000000000..71935b3517913b6f733e08cd7db5cb89ba2ab478 --- /dev/null +++ b/examples/model_compress/pruning/amc/data.py @@ -0,0 +1,156 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn.parallel +import torch.optim +import torch.utils.data +import torchvision +import torchvision.transforms as transforms +import torchvision.datasets as datasets +from torch.utils.data.sampler import SubsetRandomSampler +import numpy as np + +import os + + +def get_dataset(dset_name, batch_size, n_worker, data_root='../../data'): + cifar_tran_train = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ] + cifar_tran_test = [ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ] + print('=> Preparing data..') + if dset_name == 'cifar10': + transform_train = transforms.Compose(cifar_tran_train) + transform_test = transforms.Compose(cifar_tran_test) + trainset = torchvision.datasets.CIFAR10(root=data_root, train=True, download=True, transform=transform_train) + train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, + num_workers=n_worker, pin_memory=True, sampler=None) + testset = torchvision.datasets.CIFAR10(root=data_root, train=False, download=True, transform=transform_test) + val_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, + num_workers=n_worker, pin_memory=True) + n_class = 10 + elif dset_name == 'imagenet': + # get dir + traindir = os.path.join(data_root, 'train') + valdir = os.path.join(data_root, 'val') + + # preprocessing + input_size = 224 + imagenet_tran_train = [ + transforms.RandomResizedCrop(input_size, scale=(0.2, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + imagenet_tran_test = [ + transforms.Resize(int(input_size / 0.875)), + transforms.CenterCrop(input_size), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + + train_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(traindir, transforms.Compose(imagenet_tran_train)), + batch_size=batch_size, shuffle=True, + num_workers=n_worker, pin_memory=True, sampler=None) + + val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose(imagenet_tran_test)), + batch_size=batch_size, shuffle=False, + num_workers=n_worker, pin_memory=True) + n_class = 1000 + + else: + raise NotImplementedError + + return train_loader, val_loader, n_class + + +def get_split_dataset(dset_name, batch_size, n_worker, val_size, data_root='../data', shuffle=True): + ''' + split the train set into train / val for rl search + ''' + if shuffle: + index_sampler = SubsetRandomSampler + else: # every time we use the same order for the split subset + class SubsetSequentialSampler(SubsetRandomSampler): + def __iter__(self): + return (self.indices[i] for i in torch.arange(len(self.indices)).int()) + index_sampler = SubsetSequentialSampler + + print('=> Preparing data: {}...'.format(dset_name)) + if dset_name == 'cifar10': + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + trainset = torchvision.datasets.CIFAR100(root=data_root, train=True, download=True, transform=transform_train) + valset = torchvision.datasets.CIFAR10(root=data_root, train=True, download=True, transform=transform_test) + n_train = len(trainset) + indices = list(range(n_train)) + # now shuffle the indices + #np.random.shuffle(indices) + assert val_size < n_train + train_idx, val_idx = indices[val_size:], indices[:val_size] + + train_sampler = index_sampler(train_idx) + val_sampler = index_sampler(val_idx) + + train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False, sampler=train_sampler, + num_workers=n_worker, pin_memory=True) + val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, sampler=val_sampler, + num_workers=n_worker, pin_memory=True) + n_class = 10 + elif dset_name == 'imagenet': + train_dir = os.path.join(data_root, 'train') + val_dir = os.path.join(data_root, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + input_size = 224 + train_transform = transforms.Compose([ + transforms.RandomResizedCrop(input_size), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ]) + test_transform = transforms.Compose([ + transforms.Resize(int(input_size/0.875)), + transforms.CenterCrop(input_size), + transforms.ToTensor(), + normalize, + ]) + + trainset = datasets.ImageFolder(train_dir, train_transform) + valset = datasets.ImageFolder(train_dir, test_transform) + n_train = len(trainset) + indices = list(range(n_train)) + np.random.shuffle(indices) + assert val_size < n_train + train_idx, val_idx = indices[val_size:], indices[:val_size] + + train_sampler = index_sampler(train_idx) + val_sampler = index_sampler(val_idx) + + train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=train_sampler, + num_workers=n_worker, pin_memory=True) + val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size, sampler=val_sampler, + num_workers=n_worker, pin_memory=True) + + n_class = 1000 + else: + raise NotImplementedError + + return train_loader, val_loader, n_class diff --git a/examples/model_compress/pruning/amc/utils.py b/examples/model_compress/pruning/amc/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a717cd5fea10042f5fc3cdffd49c80990e0467ff --- /dev/null +++ b/examples/model_compress/pruning/amc/utils.py @@ -0,0 +1,138 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import sys +import os +import time + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + if self.count > 0: + self.avg = self.sum / self.count + + def accumulate(self, val, n=1): + self.sum += val + self.count += n + if self.count > 0: + self.avg = self.sum / self.count + + +def accuracy(output, target, topk=(1, 5)): + """Computes the precision@k for the specified values of k""" + batch_size = target.size(0) + num = output.size(1) + target_topk = [] + appendices = [] + for k in topk: + if k <= num: + target_topk.append(k) + else: + appendices.append([0.0]) + topk = target_topk + maxk = max(topk) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].contiguous().view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + appendices + + +# Custom progress bar +_, term_width = os.popen('stty size', 'r').read().split() +term_width = int(term_width) +TOTAL_BAR_LENGTH = 40. +last_time = time.time() +begin_time = last_time + + +def progress_bar(current, total, msg=None): + def format_time(seconds): + days = int(seconds / 3600 / 24) + seconds = seconds - days * 3600 * 24 + hours = int(seconds / 3600) + seconds = seconds - hours * 3600 + minutes = int(seconds / 60) + seconds = seconds - minutes * 60 + secondsf = int(seconds) + seconds = seconds - secondsf + millis = int(seconds * 1000) + + f = '' + i = 1 + if days > 0: + f += str(days) + 'D' + i += 1 + if hours > 0 and i <= 2: + f += str(hours) + 'h' + i += 1 + if minutes > 0 and i <= 2: + f += str(minutes) + 'm' + i += 1 + if secondsf > 0 and i <= 2: + f += str(secondsf) + 's' + i += 1 + if millis > 0 and i <= 2: + f += str(millis) + 'ms' + i += 1 + if f == '': + f = '0ms' + return f + + global last_time, begin_time + if current == 0: + begin_time = time.time() # Reset for new bar. + + cur_len = int(TOTAL_BAR_LENGTH*current/total) + rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1 + + sys.stdout.write(' [') + for i in range(cur_len): + sys.stdout.write('=') + sys.stdout.write('>') + for i in range(rest_len): + sys.stdout.write('.') + sys.stdout.write(']') + + cur_time = time.time() + step_time = cur_time - last_time + last_time = cur_time + tot_time = cur_time - begin_time + + L = [] + L.append(' Step: %s' % format_time(step_time)) + L.append(' | Tot: %s' % format_time(tot_time)) + if msg: + L.append(' | ' + msg) + + msg = ''.join(L) + sys.stdout.write(msg) + for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3): + sys.stdout.write(' ') + + # Go back to the center of the bar. + for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2): + sys.stdout.write('\b') + sys.stdout.write(' %d/%d ' % (current+1, total)) + + if current < total-1: + sys.stdout.write('\r') + else: + sys.stdout.write('\n') + sys.stdout.flush() diff --git a/examples/model_compress/pruning/auto_pruners_torch.py b/examples/model_compress/pruning/auto_pruners_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..e3015492764bac79c5c1930facaaa5e9fbd30f70 --- /dev/null +++ b/examples/model_compress/pruning/auto_pruners_torch.py @@ -0,0 +1,420 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +Example for supported automatic pruning algorithms. +In this example, we present the usage of automatic pruners (NetAdapt, AutoCompressPruner). L1, L2, FPGM pruners are also executed for comparison purpose. +''' + +import argparse +import os +import sys +import json +import torch +from torch.optim.lr_scheduler import StepLR, MultiStepLR +from torchvision import datasets, transforms + +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner, L2FilterPruner, FPGMPruner +from nni.algorithms.compression.pytorch.pruning import SimulatedAnnealingPruner, ADMMPruner, NetAdaptPruner, AutoCompressPruner +from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[1] / 'models')) +from mnist.lenet import LeNet +from cifar10.vgg import VGG +from cifar10.resnet import ResNet18, ResNet50 + + +def get_data(dataset, data_dir, batch_size, test_batch_size): + ''' + get data + ''' + kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else { + } + + if dataset == 'mnist': + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=batch_size, shuffle=True, **kwargs) + val_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=test_batch_size, shuffle=True, **kwargs) + criterion = torch.nn.NLLLoss() + elif dataset == 'cifar10': + normalize = transforms.Normalize( + (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10(data_dir, train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=batch_size, shuffle=True, **kwargs) + + val_loader = torch.utils.data.DataLoader( + datasets.CIFAR10(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=batch_size, shuffle=False, **kwargs) + criterion = torch.nn.CrossEntropyLoss() + return train_loader, val_loader, criterion + + +def train(args, model, device, train_loader, criterion, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test(model, device, criterion, val_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in val_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # sum up batch loss + test_loss += criterion(output, target).item() + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(val_loader.dataset) + accuracy = correct / len(val_loader.dataset) + + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( + test_loss, correct, len(val_loader.dataset), 100. * accuracy)) + + return accuracy + + +def get_trained_model_optimizer(args, device, train_loader, val_loader, criterion): + if args.model == 'LeNet': + model = LeNet().to(device) + if args.load_pretrained_model: + model.load_state_dict(torch.load(args.pretrained_model_dir)) + optimizer = torch.optim.Adadelta(model.parameters(), lr=1e-4) + else: + optimizer = torch.optim.Adadelta(model.parameters(), lr=1) + scheduler = StepLR(optimizer, step_size=1, gamma=0.7) + elif args.model == 'vgg16': + model = VGG(depth=16).to(device) + if args.load_pretrained_model: + model.load_state_dict(torch.load(args.pretrained_model_dir)) + optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=5e-4) + else: + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1) + elif args.model == 'resnet18': + model = ResNet18().to(device) + if args.load_pretrained_model: + model.load_state_dict(torch.load(args.pretrained_model_dir)) + optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=5e-4) + else: + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1) + elif args.model == 'resnet50': + model = ResNet50().to(device) + if args.load_pretrained_model: + model.load_state_dict(torch.load(args.pretrained_model_dir)) + optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=5e-4) + else: + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1) + else: + raise ValueError("model not recognized") + + if not args.load_pretrained_model: + best_acc = 0 + best_epoch = 0 + for epoch in range(args.pretrain_epochs): + train(args, model, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + acc = test(model, device, criterion, val_loader) + if acc > best_acc: + best_acc = acc + best_epoch = epoch + state_dict = model.state_dict() + model.load_state_dict(state_dict) + print('Best acc:', best_acc) + print('Best epoch:', best_epoch) + + if args.save_model: + torch.save(state_dict, os.path.join(args.experiment_data_dir, 'model_trained.pth')) + print('Model trained saved to %s' % args.experiment_data_dir) + + return model, optimizer + + +def get_dummy_input(args, device): + if args.dataset == 'mnist': + dummy_input = torch.randn([args.test_batch_size, 1, 28, 28]).to(device) + elif args.dataset in ['cifar10', 'imagenet']: + dummy_input = torch.randn([args.test_batch_size, 3, 32, 32]).to(device) + return dummy_input + + +def get_input_size(dataset): + if dataset == 'mnist': + input_size = (1, 1, 28, 28) + elif dataset == 'cifar10': + input_size = (1, 3, 32, 32) + elif dataset == 'imagenet': + input_size = (1, 3, 256, 256) + return input_size + + +def main(args): + # prepare dataset + torch.manual_seed(0) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + train_loader, val_loader, criterion = get_data(args.dataset, args.data_dir, args.batch_size, args.test_batch_size) + model, optimizer = get_trained_model_optimizer(args, device, train_loader, val_loader, criterion) + + def short_term_fine_tuner(model, epochs=1): + for epoch in range(epochs): + train(args, model, device, train_loader, criterion, optimizer, epoch) + + def trainer(model, optimizer, criterion, epoch): + return train(args, model, device, train_loader, criterion, optimizer, epoch=epoch) + + def evaluator(model): + return test(model, device, criterion, val_loader) + + # used to save the performance of the original & pruned & finetuned models + result = {'flops': {}, 'params': {}, 'performance':{}} + + flops, params, _ = count_flops_params(model, get_input_size(args.dataset)) + result['flops']['original'] = flops + result['params']['original'] = params + + evaluation_result = evaluator(model) + print('Evaluation result (original model): %s' % evaluation_result) + result['performance']['original'] = evaluation_result + + # module types to prune, only "Conv2d" supported for channel pruning + if args.base_algo in ['l1', 'l2', 'fpgm']: + op_types = ['Conv2d'] + elif args.base_algo == 'level': + op_types = ['default'] + + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': op_types + }] + dummy_input = get_dummy_input(args, device) + if args.pruner == 'L1FilterPruner': + pruner = L1FilterPruner(model, config_list) + elif args.pruner == 'L2FilterPruner': + pruner = L2FilterPruner(model, config_list) + elif args.pruner == 'FPGMPruner': + pruner = FPGMPruner(model, config_list) + elif args.pruner == 'NetAdaptPruner': + pruner = NetAdaptPruner(model, config_list, short_term_fine_tuner=short_term_fine_tuner, evaluator=evaluator, + base_algo=args.base_algo, experiment_data_dir=args.experiment_data_dir) + elif args.pruner == 'ADMMPruner': + # users are free to change the config here + if args.model == 'LeNet': + if args.base_algo in ['l1', 'l2', 'fpgm']: + config_list = [{ + 'sparsity': 0.8, + 'op_types': ['Conv2d'], + 'op_names': ['conv1'] + }, { + 'sparsity': 0.92, + 'op_types': ['Conv2d'], + 'op_names': ['conv2'] + }] + elif args.base_algo == 'level': + config_list = [{ + 'sparsity': 0.8, + 'op_names': ['conv1'] + }, { + 'sparsity': 0.92, + 'op_names': ['conv2'] + }, { + 'sparsity': 0.991, + 'op_names': ['fc1'] + }, { + 'sparsity': 0.93, + 'op_names': ['fc2'] + }] + else: + raise ValueError('Example only implemented for LeNet.') + pruner = ADMMPruner(model, config_list, trainer=trainer, num_iterations=2, epochs_per_iteration=2) + elif args.pruner == 'SimulatedAnnealingPruner': + pruner = SimulatedAnnealingPruner( + model, config_list, evaluator=evaluator, base_algo=args.base_algo, + cool_down_rate=args.cool_down_rate, experiment_data_dir=args.experiment_data_dir) + elif args.pruner == 'AutoCompressPruner': + pruner = AutoCompressPruner( + model, config_list, trainer=trainer, evaluator=evaluator, dummy_input=dummy_input, + num_iterations=3, optimize_mode='maximize', base_algo=args.base_algo, + cool_down_rate=args.cool_down_rate, admm_num_iterations=30, admm_epochs_per_iteration=5, + experiment_data_dir=args.experiment_data_dir) + else: + raise ValueError( + "Pruner not supported.") + + # Pruner.compress() returns the masked model + # but for AutoCompressPruner, Pruner.compress() returns directly the pruned model + model = pruner.compress() + evaluation_result = evaluator(model) + print('Evaluation result (masked model): %s' % evaluation_result) + result['performance']['pruned'] = evaluation_result + + if args.save_model: + pruner.export_model( + os.path.join(args.experiment_data_dir, 'model_masked.pth'), os.path.join(args.experiment_data_dir, 'mask.pth')) + print('Masked model saved to %s' % args.experiment_data_dir) + + # model speed up + if args.speed_up: + if args.pruner != 'AutoCompressPruner': + if args.model == 'LeNet': + model = LeNet().to(device) + elif args.model == 'vgg16': + model = VGG(depth=16).to(device) + elif args.model == 'resnet18': + model = ResNet18().to(device) + elif args.model == 'resnet50': + model = ResNet50().to(device) + + model.load_state_dict(torch.load(os.path.join(args.experiment_data_dir, 'model_masked.pth'))) + masks_file = os.path.join(args.experiment_data_dir, 'mask.pth') + + m_speedup = ModelSpeedup(model, dummy_input, masks_file, device) + m_speedup.speedup_model() + evaluation_result = evaluator(model) + print('Evaluation result (speed up model): %s' % evaluation_result) + result['performance']['speedup'] = evaluation_result + + torch.save(model.state_dict(), os.path.join(args.experiment_data_dir, 'model_speed_up.pth')) + print('Speed up model saved to %s' % args.experiment_data_dir) + flops, params, _ = count_flops_params(model, get_input_size(args.dataset)) + result['flops']['speedup'] = flops + result['params']['speedup'] = params + + if args.fine_tune: + if args.dataset == 'mnist': + optimizer = torch.optim.Adadelta(model.parameters(), lr=1) + scheduler = StepLR(optimizer, step_size=1, gamma=0.7) + elif args.dataset == 'cifar10' and args.model == 'vgg16': + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.fine_tune_epochs*0.5), int(args.fine_tune_epochs*0.75)], gamma=0.1) + elif args.dataset == 'cifar10' and args.model == 'resnet18': + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.fine_tune_epochs*0.5), int(args.fine_tune_epochs*0.75)], gamma=0.1) + elif args.dataset == 'cifar10' and args.model == 'resnet50': + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.fine_tune_epochs*0.5), int(args.fine_tune_epochs*0.75)], gamma=0.1) + best_acc = 0 + for epoch in range(args.fine_tune_epochs): + train(args, model, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + acc = evaluator(model) + if acc > best_acc: + best_acc = acc + torch.save(model.state_dict(), os.path.join(args.experiment_data_dir, 'model_fine_tuned.pth')) + + print('Evaluation result (fine tuned): %s' % best_acc) + print('Fined tuned model saved to %s' % args.experiment_data_dir) + result['performance']['finetuned'] = best_acc + + with open(os.path.join(args.experiment_data_dir, 'result.json'), 'w+') as f: + json.dump(result, f) + + +if __name__ == '__main__': + def str2bool(s): + if isinstance(s, bool): + return s + if s.lower() in ('yes', 'true', 't', 'y', '1'): + return True + if s.lower() in ('no', 'false', 'f', 'n', '0'): + return False + raise argparse.ArgumentTypeError('Boolean value expected.') + + parser = argparse.ArgumentParser(description='PyTorch Example for SimulatedAnnealingPruner') + + # dataset and model + parser.add_argument('--dataset', type=str, default='cifar10', + help='dataset to use, mnist, cifar10 or imagenet') + parser.add_argument('--data-dir', type=str, default='./data/', + help='dataset directory') + parser.add_argument('--model', type=str, default='vgg16', + help='model to use, LeNet, vgg16, resnet18 or resnet50') + parser.add_argument('--load-pretrained-model', type=str2bool, default=False, + help='whether to load pretrained model') + parser.add_argument('--pretrained-model-dir', type=str, default='./', + help='path to pretrained model') + parser.add_argument('--pretrain-epochs', type=int, default=100, + help='number of epochs to pretrain the model') + parser.add_argument('--batch-size', type=int, default=64, + help='input batch size for training (default: 64)') + parser.add_argument('--test-batch-size', type=int, default=64, + help='input batch size for testing (default: 64)') + parser.add_argument('--fine-tune', type=str2bool, default=True, + help='whether to fine-tune the pruned model') + parser.add_argument('--fine-tune-epochs', type=int, default=5, + help='epochs to fine tune') + parser.add_argument('--experiment-data-dir', type=str, default='./experiment_data', + help='For saving experiment data') + + # pruner + parser.add_argument('--pruner', type=str, default='SimulatedAnnealingPruner', + help='pruner to use') + parser.add_argument('--base-algo', type=str, default='l1', + help='base pruning algorithm. level, l1, l2, or fpgm') + parser.add_argument('--sparsity', type=float, default=0.1, + help='target overall target sparsity') + # param for SimulatedAnnealingPruner + parser.add_argument('--cool-down-rate', type=float, default=0.9, + help='cool down rate') + # param for NetAdaptPruner + parser.add_argument('--sparsity-per-iteration', type=float, default=0.05, + help='sparsity_per_iteration of NetAdaptPruner') + + # speed-up + parser.add_argument('--speed-up', type=str2bool, default=False, + help='Whether to speed-up the pruned model') + + # others + parser.add_argument('--log-interval', type=int, default=200, + help='how many batches to wait before logging training status') + parser.add_argument('--save-model', type=str2bool, default=True, + help='For Saving the current Model') + + args = parser.parse_args() + + if not os.path.exists(args.experiment_data_dir): + os.makedirs(args.experiment_data_dir) + + main(args) diff --git a/examples/model_compress/pruning/basic_pruners_torch.py b/examples/model_compress/pruning/basic_pruners_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..cbfd8e32025f257426ebfbe6881cfc3ac509bd4f --- /dev/null +++ b/examples/model_compress/pruning/basic_pruners_torch.py @@ -0,0 +1,387 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported basic pruning algorithms. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. +You can also try auto_pruners_torch.py to see the usage of some automatic pruning algorithms. + +''' +import logging + +import argparse +import os +import sys +import torch +from torch.optim.lr_scheduler import StepLR, MultiStepLR +from torchvision import datasets, transforms + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[1] / 'models')) +from mnist.lenet import LeNet +from cifar10.vgg import VGG +from cifar10.resnet import ResNet18 + +from nni.compression.pytorch.utils.counter import count_flops_params + +import nni +from nni.compression.pytorch import ModelSpeedup +from nni.algorithms.compression.pytorch.pruning import ( + LevelPruner, + SlimPruner, + FPGMPruner, + TaylorFOWeightFilterPruner, + L1FilterPruner, + L2FilterPruner, + AGPPruner, + ActivationMeanRankFilterPruner, + ActivationAPoZRankFilterPruner +) + +_logger = logging.getLogger('mnist_example') +_logger.setLevel(logging.INFO) + +str2pruner = { + 'level': LevelPruner, + 'l1filter': L1FilterPruner, + 'l2filter': L2FilterPruner, + 'slim': SlimPruner, + 'agp': AGPPruner, + 'fpgm': FPGMPruner, + 'mean_activation': ActivationMeanRankFilterPruner, + 'apoz': ActivationAPoZRankFilterPruner, + 'taylorfo': TaylorFOWeightFilterPruner +} + +def get_dummy_input(args, device): + if args.dataset == 'mnist': + dummy_input = torch.randn([args.test_batch_size, 1, 28, 28]).to(device) + elif args.dataset in ['cifar10', 'imagenet']: + dummy_input = torch.randn([args.test_batch_size, 3, 32, 32]).to(device) + return dummy_input + + +def get_data(dataset, data_dir, batch_size, test_batch_size): + kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else { + } + + if dataset == 'mnist': + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=batch_size, shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=test_batch_size, shuffle=True, **kwargs) + criterion = torch.nn.NLLLoss() + elif dataset == 'cifar10': + normalize = transforms.Normalize( + (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10(data_dir, train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=batch_size, shuffle=True, **kwargs) + + test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=batch_size, shuffle=False, **kwargs) + criterion = torch.nn.CrossEntropyLoss() + return train_loader, test_loader, criterion + +def get_model_optimizer_scheduler(args, device, train_loader, test_loader, criterion): + if args.model == 'lenet': + model = LeNet().to(device) + if args.pretrained_model_dir is None: + optimizer = torch.optim.Adadelta(model.parameters(), lr=1) + scheduler = StepLR(optimizer, step_size=1, gamma=0.7) + elif args.model == 'vgg16': + model = VGG(depth=16).to(device) + if args.pretrained_model_dir is None: + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1) + elif args.model == 'vgg19': + model = VGG(depth=19).to(device) + if args.pretrained_model_dir is None: + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1) + elif args.model == 'resnet18': + model = ResNet18().to(device) + if args.pretrained_model_dir is None: + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1) + else: + raise ValueError("model not recognized") + + if args.pretrained_model_dir is None: + print('start pre-training...') + best_acc = 0 + for epoch in range(args.pretrain_epochs): + train(args, model, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + acc = test(args, model, device, criterion, test_loader) + if acc > best_acc: + best_acc = acc + state_dict = model.state_dict() + + model.load_state_dict(state_dict) + acc = best_acc + + torch.save(state_dict, os.path.join(args.experiment_data_dir, f'pretrain_{args.dataset}_{args.model}.pth')) + print('Model trained saved to %s' % args.experiment_data_dir) + + else: + model.load_state_dict(torch.load(args.pretrained_model_dir)) + best_acc = test(args, model, device, criterion, test_loader) + + # setup new opotimizer for pruning + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR(optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1) + + print('Pretrained model acc:', best_acc) + return model, optimizer, scheduler + +def train(args, model, device, train_loader, criterion, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + if args.dry_run: + break + +def test(args, model, device, criterion, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += criterion(output, target).item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + acc = 100 * correct / len(test_loader.dataset) + + print('Test Loss: {} Accuracy: {}%\n'.format( + test_loss, acc)) + return acc + + +def main(args): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.makedirs(args.experiment_data_dir, exist_ok=True) + + # prepare model and data + train_loader, test_loader, criterion = get_data(args.dataset, args.data_dir, args.batch_size, args.test_batch_size) + + model, optimizer, _ = get_model_optimizer_scheduler(args, device, train_loader, test_loader, criterion) + + dummy_input = get_dummy_input(args, device) + flops, params, _ = count_flops_params(model, dummy_input) + print(f"FLOPs: {flops}, params: {params}") + + print(f'start {args.pruner} pruning...') + + def trainer(model, optimizer, criterion, epoch): + return train(args, model, device, train_loader, criterion, optimizer, epoch=epoch) + + pruner_cls = str2pruner[args.pruner] + + kw_args = {} + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ['Conv2d'] + }] + + if args.pruner == 'level': + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ['default'] + }] + + else: + if args.global_sort: + print('Enable the global_sort mode') + # only taylor pruner supports global sort mode currently + kw_args['global_sort'] = True + if args.dependency_aware: + dummy_input = get_dummy_input(args, device) + print('Enable the dependency_aware mode') + # note that, not all pruners support the dependency_aware mode + kw_args['dependency_aware'] = True + kw_args['dummy_input'] = dummy_input + if args.pruner not in ('l1filter', 'l2filter', 'fpgm'): + # set only work for training aware pruners + kw_args['trainer'] = trainer + kw_args['optimizer'] = optimizer + kw_args['criterion'] = criterion + + if args.pruner in ('mean_activation', 'apoz', 'taylorfo'): + kw_args['sparsifying_training_batches'] = 1 + + if args.pruner == 'slim': + kw_args['sparsifying_training_epochs'] = 1 + + if args.pruner == 'agp': + kw_args['pruning_algorithm'] = 'l1' + kw_args['num_iterations'] = 2 + kw_args['epochs_per_iteration'] = 1 + + # Reproduced result in paper 'PRUNING FILTERS FOR EFFICIENT CONVNETS', + # Conv_1, Conv_8, Conv_9, Conv_10, Conv_11, Conv_12 are pruned with 50% sparsity, as 'VGG-16-pruned-A' + # If you want to skip some layer, you can use 'exclude' like follow. + if args.pruner == 'slim': + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ['BatchNorm2d'], + }] + elif args.model == 'resnet18': + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ['Conv2d'] + }, { + 'exclude': True, + 'op_names': ['layer1.0.conv1', 'layer1.0.conv2'] + }] + else: + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ['Conv2d'], + 'op_names': ['feature.0', 'feature.24', 'feature.27', 'feature.30', 'feature.34', 'feature.37'] + }] + + pruner = pruner_cls(model, config_list, **kw_args) + + # Pruner.compress() returns the masked model + model = pruner.compress() + pruner.get_pruned_weights() + + # export the pruned model masks for model speedup + model_path = os.path.join(args.experiment_data_dir, 'pruned_{}_{}_{}.pth'.format( + args.model, args.dataset, args.pruner)) + mask_path = os.path.join(args.experiment_data_dir, 'mask_{}_{}_{}.pth'.format( + args.model, args.dataset, args.pruner)) + pruner.export_model(model_path=model_path, mask_path=mask_path) + + if args.test_only: + test(args, model, device, criterion, test_loader) + + if args.speed_up: + # Unwrap all modules to normal state + pruner._unwrap_model() + m_speedup = ModelSpeedup(model, dummy_input, mask_path, device) + m_speedup.speedup_model() + + print('start finetuning...') + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR(optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1) + + best_top1 = 0 + save_path = os.path.join(args.experiment_data_dir, f'finetuned.pth') + for epoch in range(args.fine_tune_epochs): + print('# Epoch {} #'.format(epoch)) + train(args, model, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + top1 = test(args, model, device, criterion, test_loader) + if top1 > best_top1: + best_top1 = top1 + torch.save(model.state_dict(), save_path) + + flops, params, results = count_flops_params(model, dummy_input) + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_top1: .2f}') + + if args.nni: + nni.report_final_result(best_top1) + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + + # dataset and model + parser.add_argument('--dataset', type=str, default='cifar10', + help='dataset to use, mnist, cifar10 or imagenet') + parser.add_argument('--data-dir', type=str, default='./data/', + help='dataset directory') + parser.add_argument('--model', type=str, default='vgg16', + choices=['lenet', 'vgg16', 'vgg19', 'resnet18'], + help='model to use') + parser.add_argument('--pretrained-model-dir', type=str, default=None, + help='path to pretrained model') + parser.add_argument('--pretrain-epochs', type=int, default=160, + help='number of epochs to pretrain the model') + parser.add_argument('--batch-size', type=int, default=128, + help='input batch size for training') + parser.add_argument('--test-batch-size', type=int, default=200, + help='input batch size for testing') + parser.add_argument('--experiment-data-dir', type=str, default='./experiment_data', + help='For saving output checkpoints') + parser.add_argument('--log-interval', type=int, default=100, metavar='N', + help='how many batches to wait before logging training status') + parser.add_argument('--dry-run', action='store_true', default=False, + help='quickly check a single pass') + parser.add_argument('--multi-gpu', action='store_true', default=False, + help='run on mulitple gpus') + parser.add_argument('--test-only', action='store_true', default=False, + help='run test only') + + # pruner + parser.add_argument('--sparsity', type=float, default=0.5, + help='target overall target sparsity') + parser.add_argument('--dependency-aware', action='store_true', default=False, + help='toggle dependency aware mode') + parser.add_argument('--global-sort', action='store_true', default=False, + help='toggle global sort mode') + parser.add_argument('--pruner', type=str, default='l1filter', + choices=['level', 'l1filter', 'l2filter', 'slim', 'agp', + 'fpgm', 'mean_activation', 'apoz', 'taylorfo'], + help='pruner to use') + + # speed-up + parser.add_argument('--speed-up', action='store_true', default=False, + help='Whether to speed-up the pruned model') + + # fine-tuning + parser.add_argument('--fine-tune-epochs', type=int, default=160, + help='epochs to fine tune') + + parser.add_argument('--nni', action='store_true', default=False, + help="whether to tune the pruners using NNi tuners") + + args = parser.parse_args() + + if args.nni: + params = nni.get_next_parameter() + print(params) + args.sparsity = params['sparsity'] + args.pruner = params['pruner'] + args.model = params['model'] + + main(args) \ No newline at end of file diff --git a/examples/model_compress/pruning/comparison_of_pruners/analyze.py b/examples/model_compress/pruning/comparison_of_pruners/analyze.py new file mode 100644 index 0000000000000000000000000000000000000000..c7cd13f72a3e480e1d3c142a2beef180b6d5a669 --- /dev/null +++ b/examples/model_compress/pruning/comparison_of_pruners/analyze.py @@ -0,0 +1,107 @@ +import argparse +import json +import matplotlib.pyplot as plt + + +def plot_performance_comparison(args): + # reference data, performance of the original model and the performance declared in the AutoCompress Paper + references = { + 'original':{ + 'cifar10':{ + 'vgg16':{ + 'performance': 0.9298, + 'params':14987722.0, + 'flops':314018314.0 + }, + 'resnet18':{ + 'performance': 0.9433, + 'params':11173962.0, + 'flops':556651530.0 + }, + 'resnet50':{ + 'performance': 0.9488, + 'params':23520842.0, + 'flops':1304694794.0 + } + } + }, + 'AutoCompressPruner':{ + 'cifar10':{ + 'vgg16':{ + 'performance': 0.9321, + 'params':52.2, # times + 'flops':8.8 + }, + 'resnet18':{ + 'performance': 0.9381, + 'params':54.2, # times + 'flops':12.2 + } + } + } + } + + markers = ['v', '^', '<', '1', '2', '3', '4', '8', '*', '+', 'o'] + + with open('cifar10/comparison_result_{}.json'.format(args.model), 'r') as jsonfile: + result = json.load(jsonfile) + + pruners = result.keys() + + performances = {} + flops = {} + params = {} + sparsities = {} + for pruner in pruners: + performances[pruner] = [val['performance'] for val in result[pruner]] + flops[pruner] = [val['flops'] for val in result[pruner]] + params[pruner] = [val['params'] for val in result[pruner]] + sparsities[pruner] = [val['sparsity'] for val in result[pruner]] + + fig, axs = plt.subplots(2, 1, figsize=(8, 10)) + fig.suptitle('Channel Pruning Comparison on {}/CIFAR10'.format(args.model)) + fig.subplots_adjust(hspace=0.5) + + for idx, pruner in enumerate(pruners): + axs[0].scatter(params[pruner], performances[pruner], marker=markers[idx], label=pruner) + axs[1].scatter(flops[pruner], performances[pruner], marker=markers[idx], label=pruner) + + # references + params_original = references['original']['cifar10'][args.model]['params'] + performance_original = references['original']['cifar10'][args.model]['performance'] + axs[0].plot(params_original, performance_original, 'rx', label='original model') + if args.model in ['vgg16', 'resnet18']: + axs[0].plot(params_original/references['AutoCompressPruner']['cifar10'][args.model]['params'], + references['AutoCompressPruner']['cifar10'][args.model]['performance'], + 'bx', label='AutoCompress Paper') + + axs[0].set_title("Performance v.s. Number of Parameters") + axs[0].set_xlabel("Number of Parameters") + axs[0].set_ylabel('Accuracy') + axs[0].legend() + + # references + flops_original = references['original']['cifar10'][args.model]['flops'] + performance_original = references['original']['cifar10'][args.model]['performance'] + axs[1].plot(flops_original, performance_original, 'rx', label='original model') + if args.model in ['vgg16', 'resnet18']: + axs[1].plot(flops_original/references['AutoCompressPruner']['cifar10'][args.model]['flops'], + references['AutoCompressPruner']['cifar10'][args.model]['performance'], + 'bx', label='AutoCompress Paper') + + axs[1].set_title("Performance v.s. FLOPs") + axs[1].set_xlabel("FLOPs") + axs[1].set_ylabel('Accuracy') + axs[1].legend() + + plt.savefig('img/performance_comparison_{}.png'.format(args.model)) + plt.close() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument('--model', type=str, default='vgg16', + help='vgg16, resnet18 or resnet50') + args = parser.parse_args() + + plot_performance_comparison(args) diff --git a/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_resnet18.json b/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_resnet18.json new file mode 100644 index 0000000000000000000000000000000000000000..0ef5a6119d904fc8df2fb787ead32f06c14c8494 --- /dev/null +++ b/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_resnet18.json @@ -0,0 +1,392 @@ +{ + "L1FilterPruner": [ + { + "sparsity": 0.1, + "params": 9642085.0, + "flops": 496882684.0, + "performance": 0.9436 + }, + { + "sparsity": 0.2, + "params": 8149126.0, + "flops": 436381222.0, + "performance": 0.9472 + }, + { + "sparsity": 0.3, + "params": 6705269.0, + "flops": 371666312.0, + "performance": 0.9391 + }, + { + "sparsity": 0.4, + "params": 5335138.0, + "flops": 307050934.0, + "performance": 0.9433 + }, + { + "sparsity": 0.5, + "params": 3998122.0, + "flops": 237900244.0, + "performance": 0.9379 + }, + { + "sparsity": 0.6, + "params": 2767325.0, + "flops": 175308326.0, + "performance": 0.9326 + }, + { + "sparsity": 0.7, + "params": 1617817.0, + "flops": 108532198.0, + "performance": 0.928 + }, + { + "sparsity": 0.8, + "params": 801338.0, + "flops": 53808728.0, + "performance": 0.9145 + }, + { + "sparsity": 0.9, + "params": 229372.0, + "flops": 15304972.0, + "performance": 0.8858 + }, + { + "sparsity": 0.95, + "params": 61337.0, + "flops": 4305146.0, + "performance": 0.8441 + }, + { + "sparsity": 0.975, + "params": 17763.0, + "flops": 1561644.0, + "performance": 0.7294 + } + ], + "L2FilterPruner": [ + { + "sparsity": 0.1, + "params": 9680242.0, + "flops": 497492746.0, + "performance": 0.9423 + }, + { + "sparsity": 0.2, + "params": 8137784.0, + "flops": 436199900.0, + "performance": 0.9471 + }, + { + "sparsity": 0.3, + "params": 6702679.0, + "flops": 369733768.0, + "performance": 0.9415 + }, + { + "sparsity": 0.4, + "params": 5330426.0, + "flops": 305512736.0, + "performance": 0.9411 + }, + { + "sparsity": 0.5, + "params": 3961076.0, + "flops": 236467814.0, + "performance": 0.9349 + }, + { + "sparsity": 0.6, + "params": 2776512.0, + "flops": 175872204.0, + "performance": 0.9393 + }, + { + "sparsity": 0.7, + "params": 1622571.0, + "flops": 107994906.0, + "performance": 0.9295 + }, + { + "sparsity": 0.8, + "params": 797075.0, + "flops": 53534414.0, + "performance": 0.9187 + }, + { + "sparsity": 0.9, + "params": 232153.0, + "flops": 15385078.0, + "performance": 0.8838 + }, + { + "sparsity": 0.95, + "params": 58180.0, + "flops": 4510072.0, + "performance": 0.8396 + }, + { + "sparsity": 0.975, + "params": 16836.0, + "flops": 1429752.0, + "performance": 0.7482 + } + ], + "FPGMPruner": [ + { + "sparsity": 0.1, + "params": 9705680.0, + "flops": 497899454.0, + "performance": 0.9443 + }, + { + "sparsity": 0.2, + "params": 8160468.0, + "flops": 436562544.0, + "performance": 0.946 + }, + { + "sparsity": 0.3, + "params": 6710052.0, + "flops": 367960482.0, + "performance": 0.9452 + }, + { + "sparsity": 0.4, + "params": 5334205.0, + "flops": 306166432.0, + "performance": 0.9412 + }, + { + "sparsity": 0.5, + "params": 4007259.0, + "flops": 237702210.0, + "performance": 0.9385 + }, + { + "sparsity": 0.6, + "params": 2782236.0, + "flops": 175813620.0, + "performance": 0.9304 + }, + { + "sparsity": 0.7, + "params": 1634603.0, + "flops": 108904676.0, + "performance": 0.9249 + }, + { + "sparsity": 0.8, + "params": 799610.0, + "flops": 53645918.0, + "performance": 0.9203 + }, + { + "sparsity": 0.9, + "params": 233644.0, + "flops": 15408784.0, + "performance": 0.8856 + }, + { + "sparsity": 0.95, + "params": 56518.0, + "flops": 4266910.0, + "performance": 0.83 + }, + { + "sparsity": 0.975, + "params": 17610.0, + "flops": 1441836.0, + "performance": 0.7356 + } + ], + "NetAdaptPruner": [ + { + "sparsity": 0.1, + "params": 11173962.0, + "flops": 556651530.0, + "performance": 0.9474 + }, + { + "sparsity": 0.2, + "params": 10454958.0, + "flops": 545147466.0, + "performance": 0.9482 + }, + { + "sparsity": 0.3, + "params": 9299986.0, + "flops": 526681564.0, + "performance": 0.9469 + }, + { + "sparsity": 0.4, + "params": 8137618.0, + "flops": 508087276.0, + "performance": 0.9451 + }, + { + "sparsity": 0.5, + "params": 6267654.0, + "flops": 478185102.0, + "performance": 0.947 + }, + { + "sparsity": 0.6, + "params": 5277444.0, + "flops": 462341742.0, + "performance": 0.9469 + }, + { + "sparsity": 0.7, + "params": 4854190.0, + "flops": 455580628.0, + "performance": 0.9466 + }, + { + "sparsity": 0.8, + "params": 3531098.0, + "flops": 434411156.0, + "performance": 0.9472 + } + ], + "SimulatedAnnealingPruner": [ + { + "sparsity": 0.1, + "params": 10307424.0, + "flops": 537697098.0, + "performance": 0.942 + }, + { + "sparsity": 0.2, + "params": 9264598.0, + "flops": 513101368.0, + "performance": 0.9456 + }, + { + "sparsity": 0.3, + "params": 7999316.0, + "flops": 489260738.0, + "performance": 0.946 + }, + { + "sparsity": 0.4, + "params": 6996176.0, + "flops": 450768626.0, + "performance": 0.9413 + }, + { + "sparsity": 0.5, + "params": 5412616.0, + "flops": 408698434.0, + "performance": 0.9477 + }, + { + "sparsity": 0.6, + "params": 5106924.0, + "flops": 391735326.0, + "performance": 0.9483 + }, + { + "sparsity": 0.7, + "params": 3032105.0, + "flops": 269777978.0, + "performance": 0.9414 + }, + { + "sparsity": 0.8, + "params": 2423230.0, + "flops": 294783862.0, + "performance": 0.9384 + }, + { + "sparsity": 0.9, + "params": 1151046.0, + "flops": 209639226.0, + "performance": 0.939 + }, + { + "sparsity": 0.95, + "params": 394406.0, + "flops": 108776618.0, + "performance": 0.923 + }, + { + "sparsity": 0.975, + "params": 250649.0, + "flops": 84645050.0, + "performance": 0.917 + } + ], + "AutoCompressPruner": [ + { + "sparsity": 0.1, + "params": 10238286.0, + "flops": 536590794.0, + "performance": 0.9406 + }, + { + "sparsity": 0.2, + "params": 9272049.0, + "flops": 512333916.0, + "performance": 0.9392 + }, + { + "sparsity": 0.3, + "params": 8099915.0, + "flops": 485418056.0, + "performance": 0.9398 + }, + { + "sparsity": 0.4, + "params": 6864547.0, + "flops": 449359492.0, + "performance": 0.9406 + }, + { + "sparsity": 0.5, + "params": 6106994.0, + "flops": 430766432.0, + "performance": 0.9397 + }, + { + "sparsity": 0.6, + "params": 5338096.0, + "flops": 415085278.0, + "performance": 0.9384 + }, + { + "sparsity": 0.7, + "params": 3701330.0, + "flops": 351057878.0, + "performance": 0.938 + }, + { + "sparsity": 0.8, + "params": 2229760.0, + "flops": 269058346.0, + "performance": 0.9388 + }, + { + "sparsity": 0.9, + "params": 1108564.0, + "flops": 189355930.0, + "performance": 0.9348 + }, + { + "sparsity": 0.95, + "params": 616893.0, + "flops": 159314256.0, + "performance": 0.93 + }, + { + "sparsity": 0.975, + "params": 297368.0, + "flops": 113398292.0, + "performance": 0.9072 + } + ] +} \ No newline at end of file diff --git a/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_resnet50.json b/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_resnet50.json new file mode 100644 index 0000000000000000000000000000000000000000..dcea274149eb537e29ea77ed5a3af2f6bfe0ea20 --- /dev/null +++ b/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_resnet50.json @@ -0,0 +1,356 @@ +{ + "L1FilterPruner": [ + { + "sparsity": 0.1, + "params": 20378141.0, + "flops": 1134740738.0, + "performance": 0.9456 + }, + { + "sparsity": 0.2, + "params": 17286560.0, + "flops": 966734852.0, + "performance": 0.9433 + }, + { + "sparsity": 0.3, + "params": 14403947.0, + "flops": 807114812.0, + "performance": 0.9396 + }, + { + "sparsity": 0.4, + "params": 11558288.0, + "flops": 656314106.0, + "performance": 0.9402 + }, + { + "sparsity": 0.5, + "params": 8826728.0, + "flops": 507965924.0, + "performance": 0.9394 + }, + { + "sparsity": 0.6, + "params": 6319902.0, + "flops": 374211960.0, + "performance": 0.9372 + }, + { + "sparsity": 0.7, + "params": 4063713.0, + "flops": 246788556.0, + "performance": 0.9304 + }, + { + "sparsity": 0.8, + "params": 2120717.0, + "flops": 133614422.0, + "performance": 0.9269 + }, + { + "sparsity": 0.9, + "params": 652524.0, + "flops": 41973714.0, + "performance": 0.9081 + }, + { + "sparsity": 0.95, + "params": 195468.0, + "flops": 13732020.0, + "performance": 0.8723 + }, + { + "sparsity": 0.975, + "params": 58054.0, + "flops": 4268104.0, + "performance": 0.7941 + } + ], + "L2FilterPruner": [ + { + "sparsity": 0.1, + "params": 20378141.0, + "flops": 1134740738.0, + "performance": 0.9442 + }, + { + "sparsity": 0.2, + "params": 17275244.0, + "flops": 966400928.0, + "performance": 0.9463 + }, + { + "sparsity": 0.3, + "params": 14415409.0, + "flops": 807710914.0, + "performance": 0.9367 + }, + { + "sparsity": 0.4, + "params": 11564310.0, + "flops": 656653008.0, + "performance": 0.9391 + }, + { + "sparsity": 0.5, + "params": 8843266.0, + "flops": 508086256.0, + "performance": 0.9381 + }, + { + "sparsity": 0.6, + "params": 6316815.0, + "flops": 373882614.0, + "performance": 0.9368 + }, + { + "sparsity": 0.7, + "params": 4054272.0, + "flops": 246477678.0, + "performance": 0.935 + }, + { + "sparsity": 0.8, + "params": 2129321.0, + "flops": 134527520.0, + "performance": 0.9275 + }, + { + "sparsity": 0.9, + "params": 667500.0, + "flops": 42927060.0, + "performance": 0.9129 + }, + { + "sparsity": 0.95, + "params": 192464.0, + "flops": 13669430.0, + "performance": 0.8757 + }, + { + "sparsity": 0.975, + "params": 58250.0, + "flops": 4365620.0, + "performance": 0.7978 + } + ], + "FPGMPruner": [ + { + "sparsity": 0.1, + "params": 20401570.0, + "flops": 1135114552.0, + "performance": 0.9438 + }, + { + "sparsity": 0.2, + "params": 17321414.0, + "flops": 967137398.0, + "performance": 0.9427 + }, + { + "sparsity": 0.3, + "params": 14418221.0, + "flops": 807755756.0, + "performance": 0.9422 + }, + { + "sparsity": 0.4, + "params": 11565000.0, + "flops": 655412124.0, + "performance": 0.9403 + }, + { + "sparsity": 0.5, + "params": 8829840.0, + "flops": 506715294.0, + "performance": 0.9355 + }, + { + "sparsity": 0.6, + "params": 6308085.0, + "flops": 374231682.0, + "performance": 0.9359 + }, + { + "sparsity": 0.7, + "params": 4054237.0, + "flops": 246511714.0, + "performance": 0.9285 + }, + { + "sparsity": 0.8, + "params": 2134187.0, + "flops": 134456366.0, + "performance": 0.9275 + }, + { + "sparsity": 0.9, + "params": 665931.0, + "flops": 42859752.0, + "performance": 0.9083 + }, + { + "sparsity": 0.95, + "params": 191590.0, + "flops": 13641052.0, + "performance": 0.8762 + }, + { + "sparsity": 0.975, + "params": 57767.0, + "flops": 4350074.0, + "performance": 0.789 + } + ], + "NetAdaptPruner": [ + { + "sparsity": 0.1, + "params": 22348970.0, + "flops": 1275701258.0, + "performance": 0.9404 + }, + { + "sparsity": 0.2, + "params": 21177162.0, + "flops": 1256952330.0, + "performance": 0.9445 + }, + { + "sparsity": 0.3, + "params": 18407434.0, + "flops": 1212636682.0, + "performance": 0.9433 + }, + { + "sparsity": 0.4, + "params": 16061284.0, + "flops": 1175098282.0, + "performance": 0.9401 + } + ], + "SimulatedAnnealingPruner": [ + { + "sparsity": 0.1, + "params": 20551755.0, + "flops": 1230145122.0, + "performance": 0.9438 + }, + { + "sparsity": 0.2, + "params": 17766048.0, + "flops": 1159924128.0, + "performance": 0.9432 + }, + { + "sparsity": 0.3, + "params": 15105146.0, + "flops": 1094478662.0, + "performance": 0.943 + }, + { + "sparsity": 0.4, + "params": 12378092.0, + "flops": 1008801158.0, + "performance": 0.9398 + }, + { + "sparsity": 0.5, + "params": 9890487.0, + "flops": 911941770.0, + "performance": 0.9426 + }, + { + "sparsity": 0.6, + "params": 7638262.0, + "flops": 831218770.0, + "performance": 0.9412 + }, + { + "sparsity": 0.7, + "params": 5469936.0, + "flops": 691881792.0, + "performance": 0.9405 + }, + { + "sparsity": 0.8, + "params": 3668951.0, + "flops": 580850666.0, + "performance": 0.941 + }, + { + "sparsity": 0.9, + "params": 1765284.0, + "flops": 389162310.0, + "performance": 0.9294 + } + ], + "AutoCompressPruner": [ + { + "sparsity": 0.1, + "params": 20660299.0, + "flops": 1228508590.0, + "performance": 0.9337 + }, + { + "sparsity": 0.2, + "params": 17940465.0, + "flops": 1152868146.0, + "performance": 0.9326 + }, + { + "sparsity": 0.3, + "params": 15335831.0, + "flops": 1084996094.0, + "performance": 0.9348 + }, + { + "sparsity": 0.4, + "params": 12821408.0, + "flops": 991305524.0, + "performance": 0.936 + }, + { + "sparsity": 0.5, + "params": 10695425.0, + "flops": 919638860.0, + "performance": 0.9349 + }, + { + "sparsity": 0.6, + "params": 8536821.0, + "flops": 802011678.0, + "performance": 0.9339 + }, + { + "sparsity": 0.7, + "params": 7276898.0, + "flops": 744248114.0, + "performance": 0.9337 + }, + { + "sparsity": 0.8, + "params": 5557721.0, + "flops": 643881710.0, + "performance": 0.9323 + }, + { + "sparsity": 0.9, + "params": 3925140.0, + "flops": 512545272.0, + "performance": 0.9304 + }, + { + "sparsity": 0.95, + "params": 2867004.0, + "flops": 365184762.0, + "performance": 0.9263 + }, + { + "sparsity": 0.975, + "params": 1773257.0, + "flops": 229320266.0, + "performance": 0.9175 + } + ] +} \ No newline at end of file diff --git a/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_vgg16.json b/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_vgg16.json new file mode 100644 index 0000000000000000000000000000000000000000..9e476488c1804f4c2f7be5bcad894b5d576c1056 --- /dev/null +++ b/examples/model_compress/pruning/comparison_of_pruners/cifar10/comparison_result_vgg16.json @@ -0,0 +1,392 @@ +{ + "L1FilterPruner": [ + { + "sparsity": 0.1, + "params": 12187336.0, + "flops": 256252606.0, + "performance": 0.9344 + }, + { + "sparsity": 0.2, + "params": 9660216.0, + "flops": 203049930.0, + "performance": 0.9371 + }, + { + "sparsity": 0.3, + "params": 7435417.0, + "flops": 155477470.0, + "performance": 0.9341 + }, + { + "sparsity": 0.4, + "params": 5493954.0, + "flops": 114721578.0, + "performance": 0.9317 + }, + { + "sparsity": 0.5, + "params": 3820010.0, + "flops": 79155722.0, + "performance": 0.9309 + }, + { + "sparsity": 0.6, + "params": 2478632.0, + "flops": 51618494.0, + "performance": 0.9229 + }, + { + "sparsity": 0.7, + "params": 1420600.0, + "flops": 29455306.0, + "performance": 0.9031 + }, + { + "sparsity": 0.8, + "params": 658553.0, + "flops": 13290974.0, + "performance": 0.8756 + }, + { + "sparsity": 0.9, + "params": 186178.0, + "flops": 3574570.0, + "performance": 0.8145 + }, + { + "sparsity": 0.95, + "params": 58680.0, + "flops": 1050570.0, + "performance": 0.6983 + }, + { + "sparsity": 0.975, + "params": 23408.0, + "flops": 329918.0, + "performance": 0.5573 + } + ], + "L2FilterPruner": [ + { + "sparsity": 0.1, + "params": 12187336.0, + "flops": 256252606.0, + "performance": 0.9357 + }, + { + "sparsity": 0.2, + "params": 9660216.0, + "flops": 203049930.0, + "performance": 0.9355 + }, + { + "sparsity": 0.3, + "params": 7435417.0, + "flops": 155477470.0, + "performance": 0.9337 + }, + { + "sparsity": 0.4, + "params": 5493954.0, + "flops": 114721578.0, + "performance": 0.9308 + }, + { + "sparsity": 0.5, + "params": 3820010.0, + "flops": 79155722.0, + "performance": 0.9285 + }, + { + "sparsity": 0.6, + "params": 2478632.0, + "flops": 51618494.0, + "performance": 0.9208 + }, + { + "sparsity": 0.7, + "params": 1420600.0, + "flops": 29455306.0, + "performance": 0.909 + }, + { + "sparsity": 0.8, + "params": 658553.0, + "flops": 13290974.0, + "performance": 0.8698 + }, + { + "sparsity": 0.9, + "params": 186178.0, + "flops": 3574570.0, + "performance": 0.8203 + }, + { + "sparsity": 0.95, + "params": 58680.0, + "flops": 1050570.0, + "performance": 0.7063 + }, + { + "sparsity": 0.975, + "params": 23408.0, + "flops": 329918.0, + "performance": 0.5455 + } + ], + "FPGMPruner": [ + { + "sparsity": 0.1, + "params": 12187336.0, + "flops": 256252606.0, + "performance": 0.937 + }, + { + "sparsity": 0.2, + "params": 9660216.0, + "flops": 203049930.0, + "performance": 0.936 + }, + { + "sparsity": 0.3, + "params": 7435417.0, + "flops": 155477470.0, + "performance": 0.9359 + }, + { + "sparsity": 0.4, + "params": 5493954.0, + "flops": 114721578.0, + "performance": 0.9302 + }, + { + "sparsity": 0.5, + "params": 3820010.0, + "flops": 79155722.0, + "performance": 0.9233 + }, + { + "sparsity": 0.6, + "params": 2478632.0, + "flops": 51618494.0, + "performance": 0.922 + }, + { + "sparsity": 0.7, + "params": 1420600.0, + "flops": 29455306.0, + "performance": 0.9022 + }, + { + "sparsity": 0.8, + "params": 658553.0, + "flops": 13290974.0, + "performance": 0.8794 + }, + { + "sparsity": 0.9, + "params": 186178.0, + "flops": 3574570.0, + "performance": 0.8276 + }, + { + "sparsity": 0.95, + "params": 58680.0, + "flops": 1050570.0, + "performance": 0.6967 + }, + { + "sparsity": 0.975, + "params": 23408.0, + "flops": 329918.0, + "performance": 0.3683 + } + ], + "NetAdaptPruner": [ + { + "sparsity": 0.1, + "params": 13492098.0, + "flops": 308484330.0, + "performance": 0.9376 + }, + { + "sparsity": 0.2, + "params": 11998408.0, + "flops": 297641410.0, + "performance": 0.9374 + }, + { + "sparsity": 0.3, + "params": 10504344.0, + "flops": 281928834.0, + "performance": 0.9369 + }, + { + "sparsity": 0.4, + "params": 8263221.0, + "flops": 272964342.0, + "performance": 0.9382 + }, + { + "sparsity": 0.5, + "params": 6769885.0, + "flops": 249070966.0, + "performance": 0.9388 + }, + { + "sparsity": 0.6, + "params": 6022137.0, + "flops": 237106998.0, + "performance": 0.9383 + }, + { + "sparsity": 0.7, + "params": 4526754.0, + "flops": 222152490.0, + "performance": 0.936 + }, + { + "sparsity": 0.8, + "params": 3032759.0, + "flops": 162401210.0, + "performance": 0.9362 + } + ], + "SimulatedAnnealingPruner": [ + { + "sparsity": 0.1, + "params": 12691704.0, + "flops": 301467870.0, + "performance": 0.9366 + }, + { + "sparsity": 0.2, + "params": 10318461.0, + "flops": 275724450.0, + "performance": 0.9362 + }, + { + "sparsity": 0.3, + "params": 8217127.0, + "flops": 246321046.0, + "performance": 0.9371 + }, + { + "sparsity": 0.4, + "params": 6458368.0, + "flops": 232948294.0, + "performance": 0.9378 + }, + { + "sparsity": 0.5, + "params": 4973079.0, + "flops": 217675254.0, + "performance": 0.9362 + }, + { + "sparsity": 0.6, + "params": 3131526.0, + "flops": 151576878.0, + "performance": 0.9347 + }, + { + "sparsity": 0.7, + "params": 1891036.0, + "flops": 76575574.0, + "performance": 0.9289 + }, + { + "sparsity": 0.8, + "params": 1170751.0, + "flops": 107532322.0, + "performance": 0.9325 + }, + { + "sparsity": 0.9, + "params": 365978.0, + "flops": 46241354.0, + "performance": 0.9167 + }, + { + "sparsity": 0.95, + "params": 167089.0, + "flops": 38589922.0, + "performance": 0.7746 + }, + { + "sparsity": 0.975, + "params": 96779.0, + "flops": 26838230.0, + "performance": 0.1 + } + ], + "AutoCompressPruner": [ + { + "sparsity": 0.1, + "params": 12460277.0, + "flops": 290311730.0, + "performance": 0.9352 + }, + { + "sparsity": 0.2, + "params": 10138147.0, + "flops": 269180938.0, + "performance": 0.9324 + }, + { + "sparsity": 0.3, + "params": 8033350.0, + "flops": 241789714.0, + "performance": 0.9357 + }, + { + "sparsity": 0.4, + "params": 6105156.0, + "flops": 213573294.0, + "performance": 0.9367 + }, + { + "sparsity": 0.5, + "params": 4372604.0, + "flops": 185826362.0, + "performance": 0.9387 + }, + { + "sparsity": 0.6, + "params": 3029629.0, + "flops": 166285498.0, + "performance": 0.9334 + }, + { + "sparsity": 0.7, + "params": 1897060.0, + "flops": 134897806.0, + "performance": 0.9359 + }, + { + "sparsity": 0.8, + "params": 1145509.0, + "flops": 111766450.0, + "performance": 0.9334 + }, + { + "sparsity": 0.9, + "params": 362546.0, + "flops": 50777246.0, + "performance": 0.9261 + }, + { + "sparsity": 0.95, + "params": 149735.0, + "flops": 39201770.0, + "performance": 0.8924 + }, + { + "sparsity": 0.975, + "params": 45378.0, + "flops": 13213974.0, + "performance": 0.8193 + } + ] +} \ No newline at end of file diff --git a/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet18.png b/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet18.png new file mode 100644 index 0000000000000000000000000000000000000000..87a99e85bd9c87a6b7b5fb21bd0166bd2be72792 Binary files /dev/null and b/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet18.png differ diff --git a/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet50.png b/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet50.png new file mode 100644 index 0000000000000000000000000000000000000000..7214a368b02b907d578988e920b82baf87ae1b4f Binary files /dev/null and b/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_resnet50.png differ diff --git a/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_vgg16.png b/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_vgg16.png new file mode 100644 index 0000000000000000000000000000000000000000..93930561b38bcd7a47499d0e7cabfe65fea998cc Binary files /dev/null and b/examples/model_compress/pruning/comparison_of_pruners/img/performance_comparison_vgg16.png differ diff --git a/examples/model_compress/pruning/config.yml b/examples/model_compress/pruning/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..355529a16712ec7a6d958753d4482955e837796d --- /dev/null +++ b/examples/model_compress/pruning/config.yml @@ -0,0 +1,18 @@ +searchSpace: + sparsity: + _type: choice + _value: [0.25, 0.5, 0.75] + pruner: + _type: choice + _value: ['slim', 'l2filter', 'fpgm', 'apoz'] + model: + _type: choice + _value: ['vgg16', 'vgg19'] +trainingService: + platform: local +trialCodeDirectory: . +trialCommand: python3 basic_pruners_torch.py --nni +trialConcurrency: 1 +trialGpuNumber: 0 +tuner: + name: GridSearch diff --git a/examples/model_compress/pruning/finetune_kd_torch.py b/examples/model_compress/pruning/finetune_kd_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..f1137d0ed5aa5047391b314877738c64fa89e095 --- /dev/null +++ b/examples/model_compress/pruning/finetune_kd_torch.py @@ -0,0 +1,196 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI exmaple for fine-tuning the pruned model with KD. +Run basic_pruners_torch.py first to get the masks of the pruned model. Then pass the mask as argument for model speedup. The compressed model is further used for fine-tuning. +''' + +import argparse +import os +import sys +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +from nni.compression.pytorch import ModelSpeedup +from torch.optim.lr_scheduler import MultiStepLR +from basic_pruners_torch import get_data + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[1] / 'models')) +from mnist.lenet import LeNet +from cifar10.vgg import VGG + +class DistillKL(nn.Module): + """Distilling the Knowledge in a Neural Network""" + def __init__(self, T): + super(DistillKL, self).__init__() + self.T = T + + def forward(self, y_s, y_t): + p_s = F.log_softmax(y_s/self.T, dim=1) + p_t = F.softmax(y_t/self.T, dim=1) + loss = F.kl_div(p_s, p_t, size_average=False) * (self.T**2) / y_s.shape[0] + return loss + +def get_dummy_input(args, device): + if args.dataset == 'mnist': + dummy_input = torch.randn([args.test_batch_size, 1, 28, 28]).to(device) + elif args.dataset in ['cifar10', 'imagenet']: + dummy_input = torch.randn([args.test_batch_size, 3, 32, 32]).to(device) + return dummy_input + +def get_model_optimizer_scheduler(args, device, test_loader, criterion): + if args.model == 'LeNet': + model = LeNet().to(device) + elif args.model == 'vgg16': + model = VGG(depth=16).to(device) + elif args.model == 'vgg19': + model = VGG(depth=19).to(device) + else: + raise ValueError("model not recognized") + + # In this example, we set the architecture of teacher and student to be the same. It is feasible to set a different teacher architecture. + if args.teacher_model_dir is None: + raise NotImplementedError('please load pretrained teacher model first') + else: + model.load_state_dict(torch.load(args.teacher_model_dir)) + best_acc = test(args, model, device, criterion, test_loader) + + model_t = deepcopy(model) + model_s = deepcopy(model) + + if args.student_model_dir is not None: + # load the pruned student model checkpoint + model_s.load_state_dict(torch.load(args.student_model_dir)) + + dummy_input = get_dummy_input(args, device) + m_speedup = ModelSpeedup(model_s, dummy_input, args.mask_path, device) + m_speedup.speedup_model() + + module_list = nn.ModuleList([]) + module_list.append(model_s) + module_list.append(model_t) + + # setup opotimizer for fine-tuning studeng model + optimizer = torch.optim.SGD(model_s.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR( + optimizer, milestones=[int(args.fine_tune_epochs*0.5), int(args.fine_tune_epochs*0.75)], gamma=0.1) + + print('Pretrained teacher model acc:', best_acc) + return module_list, optimizer, scheduler + + +def train(args, models, device, train_loader, criterion, optimizer, epoch): + # model.train() + model_s = models[0].train() + model_t = models[-1].eval() + cri_cls = criterion + cri_kd = DistillKL(args.kd_T) + + + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output_s = model_s(data) + output_t = model_t(data) + + loss_cls = cri_cls(output_s, target) + loss_kd = cri_kd(output_s, output_t) + loss = loss_cls + loss_kd + loss.backward() + + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + if args.dry_run: + break + +def test(args, model, device, criterion, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += criterion(output, target).item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + acc = 100 * correct / len(test_loader.dataset) + + print('Test Loss: {} Accuracy: {}%\n'.format( + test_loss, acc)) + return acc + + +def main(args): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.makedirs(args.experiment_data_dir, exist_ok=True) + + # prepare model and data + train_loader, test_loader, criterion = get_data(args.dataset, args.data_dir, args.batch_size, args.test_batch_size) + models, optimizer, scheduler = get_model_optimizer_scheduler(args, device, test_loader, criterion) + + best_top1 = 0 + if args.test_only: + test(args, models[0], device, criterion, test_loader) + + print('start fine-tuning...') + for epoch in range(args.fine_tune_epochs): + print('# Epoch {} #'.format(epoch)) + train(args, models, device, train_loader, criterion, optimizer, epoch) + scheduler.step() + + # test student only + top1 = test(args, models[0], device, criterion, test_loader) + if top1 > best_top1: + best_top1 = top1 + torch.save(models[0].state_dict(), os.path.join(args.experiment_data_dir, 'model_trained.pth')) + print('Model trained saved to %s' % args.experiment_data_dir) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + + # dataset and model + parser.add_argument('--dataset', type=str, default='cifar10', + help='dataset to use, mnist, cifar10 or imagenet') + parser.add_argument('--data-dir', type=str, default='./data/', + help='dataset directory') + parser.add_argument('--model', type=str, default='vgg16', + choices=['LeNet', 'vgg16' ,'vgg19', 'resnet18'], + help='model to use') + parser.add_argument('--teacher-model-dir', type=str, default=None, + help='path to the pretrained teacher model checkpoint') + parser.add_argument('--mask-path', type=str, default=None, + help='path to the pruned student model mask file') + parser.add_argument('--student-model-dir', type=str, default=None, + help='path to the pruned student model checkpoint') + parser.add_argument('--batch-size', type=int, default=128, + help='input batch size for training') + parser.add_argument('--test-batch-size', type=int, default=200, + help='input batch size for testing') + parser.add_argument('--fine-tune-epochs', type=int, default=160, + help='epochs to fine tune') + parser.add_argument('--experiment-data-dir', type=str, default='./experiment_data', + help='For saving output checkpoints') + parser.add_argument('--log-interval', type=int, default=100, metavar='N', + help='how many batches to wait before logging training status') + parser.add_argument('--dry-run', action='store_true', default=False, + help='quickly check a single pass') + parser.add_argument('--test-only', action='store_true', default=False, + help='run test only') + + + # knowledge distillation + parser.add_argument('--kd_T', type=float, default=4, + help='temperature for KD distillation') + + + args = parser.parse_args() + main(args) diff --git a/examples/model_compress/pruning/lottery_torch_mnist_fc.py b/examples/model_compress/pruning/lottery_torch_mnist_fc.py new file mode 100644 index 0000000000000000000000000000000000000000..215bc5f5f70bd8af0c760a23496c2f4e24eeaaf8 --- /dev/null +++ b/examples/model_compress/pruning/lottery_torch_mnist_fc.py @@ -0,0 +1,151 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI exmaple for reproducing Lottery Ticket Hypothesis. +''' + +import argparse +import copy +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.data +import torchvision.datasets as datasets +import torchvision.transforms as transforms +from nni.algorithms.compression.pytorch.pruning import LotteryTicketPruner + +class fc1(nn.Module): + + def __init__(self, num_classes=10): + super(fc1, self).__init__() + self.classifier = nn.Sequential( + nn.Linear(28 * 28, 300), + nn.ReLU(inplace=True), + nn.Linear(300, 100), + nn.ReLU(inplace=True), + nn.Linear(100, num_classes), + ) + + def forward(self, x): + x = torch.flatten(x, 1) + x = self.classifier(x) + return x + +def train(model, train_loader, optimizer, criterion): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model.train() + for batch_idx, (imgs, targets) in enumerate(train_loader): + optimizer.zero_grad() + imgs, targets = imgs.to(device), targets.to(device) + output = model(imgs) + train_loss = criterion(output, targets) + train_loss.backward() + optimizer.step() + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format( + 100 * batch_idx / len(train_loader), train_loss.item())) + + return train_loss.item() + +def test(model, test_loader, criterion): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss + pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability + correct += pred.eq(target.data.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + accuracy = 100. * correct / len(test_loader.dataset) + return accuracy + + +if __name__ == '__main__': + """ + THE LOTTERY TICKET HYPOTHESIS: FINDING SPARSE, TRAINABLE NEURAL NETWORKS (https://arxiv.org/pdf/1803.03635.pdf) + + The Lottery Ticket Hypothesis. A randomly-initialized, dense neural network contains a subnetwork that is + initialized such that—when trained in isolation—it can match the test accuracy of the original network after + training for at most the same number of iterations. + + Identifying winning tickets. We identify a winning ticket by training a network and pruning its + smallest-magnitude weights. The remaining, unpruned connections constitute the architecture of the + winning ticket. Unique to our work, each unpruned connection’s value is then reset to its initialization + from original network before it was trained. This forms our central experiment: + 1. Randomly initialize a neural network f(x; θ0) (where θ0 ∼ Dθ). + 2. Train the network for j iterations, arriving at parameters θj . + 3. Prune p% of the parameters in θj , creating a mask m. + 4. Reset the remaining parameters to their values in θ0, creating the winning ticket f(x; m θ0). + As described, this pruning approach is one-shot: the network is trained once, p% of weights are + pruned, and the surviving weights are reset. However, in this paper, we focus on iterative pruning, + which repeatedly trains, prunes, and resets the network over n rounds; each round prunes p**(1/n) % of + the weights that survive the previous round. Our results show that iterative pruning finds winning tickets + that match the accuracy of the original network at smaller sizes than does one-shot pruning. + """ + parser = argparse.ArgumentParser() + parser.add_argument("--train_epochs", type=int, default=10, help="training epochs") + args = parser.parse_args() + + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + traindataset = datasets.MNIST('./data', train=True, download=True, transform=transform) + testdataset = datasets.MNIST('./data', train=False, transform=transform) + train_loader = torch.utils.data.DataLoader(traindataset, batch_size=60, shuffle=True, num_workers=0, drop_last=False) + test_loader = torch.utils.data.DataLoader(testdataset, batch_size=60, shuffle=False, num_workers=0, drop_last=True) + + model = fc1().to("cuda" if torch.cuda.is_available() else "cpu") + optimizer = torch.optim.Adam(model.parameters(), lr=1.2e-3) + criterion = nn.CrossEntropyLoss() + + # Record the random intialized model weights + orig_state = copy.deepcopy(model.state_dict()) + + # train the model to get unpruned metrics + for epoch in range(args.train_epochs): + train(model, train_loader, optimizer, criterion) + orig_accuracy = test(model, test_loader, criterion) + print('unpruned model accuracy: {}'.format(orig_accuracy)) + + # reset model weights and optimizer for pruning + model.load_state_dict(orig_state) + optimizer = torch.optim.Adam(model.parameters(), lr=1.2e-3) + + # Prune the model to find a winning ticket + configure_list = [{ + 'prune_iterations': 5, + 'sparsity': 0.96, + 'op_types': ['default'] + }] + pruner = LotteryTicketPruner(model, configure_list, optimizer) + pruner.compress() + + best_accuracy = 0. + best_state_dict = None + + for i in pruner.get_prune_iterations(): + pruner.prune_iteration_start() + loss = 0 + accuracy = 0 + for epoch in range(args.train_epochs): + loss = train(model, train_loader, optimizer, criterion) + accuracy = test(model, test_loader, criterion) + print('current epoch: {0}, loss: {1}, accuracy: {2}'.format(epoch, loss, accuracy)) + if accuracy > best_accuracy: + best_accuracy = accuracy + # state dict of weights and masks + best_state_dict = copy.deepcopy(model.state_dict()) + print('prune iteration: {0}, loss: {1}, accuracy: {2}'.format(i, loss, accuracy)) + + if best_accuracy > orig_accuracy: + # load weights and masks + pruner.bound_model.load_state_dict(best_state_dict) + # reset weights to original untrained model and keep masks unchanged to export winning ticket + pruner.load_model_state_dict(orig_state) + pruner.export_model('model_winning_ticket.pth', 'mask_winning_ticket.pth') + print('winning ticket has been saved: model_winning_ticket.pth, mask_winning_ticket.pth') + else: + print('winning ticket is not found in this run, you can run it again.') diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/Compressing MobileNetV2 with NNI Pruners.ipynb b/examples/model_compress/pruning/mobilenetv2_end2end/Compressing MobileNetV2 with NNI Pruners.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..4be5044f3c9d4b2590b9c2f69e6f696bb0b0f97e --- /dev/null +++ b/examples/model_compress/pruning/mobilenetv2_end2end/Compressing MobileNetV2 with NNI Pruners.ipynb @@ -0,0 +1,1653 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction\n", + "In this tutorial, we give an end-to-end demo of compressing [MobileNetV2](https://arxiv.org/abs/1801.04381) for finegrained classification using [NNI Pruners](https://nni.readthedocs.io/en/stable/Compression/pruning.html). Although MobileNetV2 is already a highly optimized architecture, we show that we can further reduce its size by over 50% with minimal performance loss using iterative pruning and knowledge distillation. To similate a real usage scenario, we use the [Stanford Dogs](http://vision.stanford.edu/aditya86/ImageNetDogs/) dataset as the target task, and show how to implement and optimize the following steps:\n", + "* Model pre-training\n", + "* Pruning\n", + "* Model Speedup\n", + "* Finetuning the pruned model\n", + "\n", + "Also, we will compare our approach with some baseline channel compression schemes defined by the authors of the MobileNets, and show that NNI pruners can provide a superior performance while being easy-to-use. We release this notebook along with our code under the folder `examples/model_compress/pruning/mobilenet_end2end/`.\n", + "
\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import xml\n", + "from PIL import Image\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import numpy as np\n", + "\n", + "from nni.compression.pytorch import ModelSpeedup\n", + "from nni.compression.pytorch.utils.counter import count_flops_params\n", + "\n", + "from utils import create_model, get_dataloader\n", + "\n", + "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", + "\n", + "num_workers = 16\n", + "torch.set_num_threads(num_workers)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Background\n", + "### Pruning MobileNetV2\n", + "The main building block of MobileNetV2 is \"inverted residual blocks\", where a pointwise convolution first projects into a feature map with higher channels,following a depthwise convolution, and a pointwise convolution with linear activation that projects into a features map with less channels (thus called \"inverted residuals and linear bottlenecks\"). With 11 such blocks stacked together, the entire model has 3.4M parameters and takes up about 10M storage space (this number is platform-dependent).\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "Now we consider compressing MobileNetV2 by **filter pruning** (also called channel pruning). Recall that in genernal, a $k\\times k$ convolutional kernel has the weight with shape $(out\\_channel, \\frac{in\\_channel}{groups}, k, k)$. If the input has shape $(B, in\\_channel, H, W)$, the convolutional layer's output (with padding) would have shape $(B, out\\_channel, H, W)$. Suppose we remove $M$ filters from this layer, then weight would have shape $(out\\_channel-M, \\frac{in\\_channel}{groups}, k, k)$, and the output would then have shape $(B, out\\_channel - M, H, W)$. Further, we have the following observations:\n", + "* The model's number of parameters is directly reduced by $M\\times \\frac{in\\_channel}{groups} \\times k \\times k$.\n", + "* We are performing structured pruning, as each filter's weight elements are adjacent. Compared to unstructured pruning (or fine-grained pruning), structured pruning generally allows us to directly remove weights and their connections from the network, resulting in greater compression and speed-up. For this reason, in this tutorial we solely focus on filter-level pruning. \n", + "* Since the output channel is shrinked, we can also remove weights from the next layer corresponding to these channel dimensions. In NNI, the pruner prunes the weights by just setting the weight values to zero, and then the [ModelSpeedup](https://nni.readthedocs.io/en/stable/Compression/ModelSpeedup.html) tool infers the weight relations and removes pruned weights and connections, which we will also demonstrate later." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using cache found in /home/v-diwu4/.cache/torch/hub/pytorch_vision_v0.8.1\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MobileNetV2(\n", + " (features): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(16, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=96, bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(96, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (3): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(24, 144, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(144, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(144, 144, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=144, bias=False)\n", + " (1): BatchNorm2d(144, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(144, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (4): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(24, 144, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(144, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(144, 144, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=144, bias=False)\n", + " (1): BatchNorm2d(144, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(144, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (5): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (6): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (7): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=192, bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (8): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (9): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (10): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (11): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)\n", + " (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(384, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (12): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)\n", + " (1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (13): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)\n", + " (1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (14): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(576, 576, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=576, bias=False)\n", + " (1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(576, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (15): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(160, 960, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(960, 960, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=960, bias=False)\n", + " (1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(960, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (16): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(160, 960, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(960, 960, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=960, bias=False)\n", + " (1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(960, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (17): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(160, 960, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(960, 960, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=960, bias=False)\n", + " (1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(960, 320, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (18): ConvBNActivation(\n", + " (0): Conv2d(320, 1280, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(1280, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " )\n", + " (classifier): Sequential(\n", + " (0): Dropout(p=0.2, inplace=False)\n", + " (1): Linear(in_features=1280, out_features=1000, bias=True)\n", + " )\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + ")\n", + "+-------+----------------------+--------+-------------------+----------+---------+\n", + "| Index | Name | Type | Weight Shape | FLOPs | #Params |\n", + "+-------+----------------------+--------+-------------------+----------+---------+\n", + "| 0 | features.0.0 | Conv2d | (32, 3, 3, 3) | 10838016 | 864 |\n", + "| 1 | features.1.conv.0.0 | Conv2d | (32, 1, 3, 3) | 3612672 | 288 |\n", + "| 2 | features.1.conv.1 | Conv2d | (16, 32, 1, 1) | 6422528 | 512 |\n", + "| 3 | features.2.conv.0.0 | Conv2d | (96, 16, 1, 1) | 19267584 | 1536 |\n", + "| 4 | features.2.conv.1.0 | Conv2d | (96, 1, 3, 3) | 2709504 | 864 |\n", + "| 5 | features.2.conv.2 | Conv2d | (24, 96, 1, 1) | 7225344 | 2304 |\n", + "| 6 | features.3.conv.0.0 | Conv2d | (144, 24, 1, 1) | 10838016 | 3456 |\n", + "| 7 | features.3.conv.1.0 | Conv2d | (144, 1, 3, 3) | 4064256 | 1296 |\n", + "| 8 | features.3.conv.2 | Conv2d | (24, 144, 1, 1) | 10838016 | 3456 |\n", + "| 9 | features.4.conv.0.0 | Conv2d | (144, 24, 1, 1) | 10838016 | 3456 |\n", + "| 10 | features.4.conv.1.0 | Conv2d | (144, 1, 3, 3) | 1016064 | 1296 |\n", + "| 11 | features.4.conv.2 | Conv2d | (32, 144, 1, 1) | 3612672 | 4608 |\n", + "| 12 | features.5.conv.0.0 | Conv2d | (192, 32, 1, 1) | 4816896 | 6144 |\n", + "| 13 | features.5.conv.1.0 | Conv2d | (192, 1, 3, 3) | 1354752 | 1728 |\n", + "| 14 | features.5.conv.2 | Conv2d | (32, 192, 1, 1) | 4816896 | 6144 |\n", + "| 15 | features.6.conv.0.0 | Conv2d | (192, 32, 1, 1) | 4816896 | 6144 |\n", + "| 16 | features.6.conv.1.0 | Conv2d | (192, 1, 3, 3) | 1354752 | 1728 |\n", + "| 17 | features.6.conv.2 | Conv2d | (32, 192, 1, 1) | 4816896 | 6144 |\n", + "| 18 | features.7.conv.0.0 | Conv2d | (192, 32, 1, 1) | 4816896 | 6144 |\n", + "| 19 | features.7.conv.1.0 | Conv2d | (192, 1, 3, 3) | 338688 | 1728 |\n", + "| 20 | features.7.conv.2 | Conv2d | (64, 192, 1, 1) | 2408448 | 12288 |\n", + "| 21 | features.8.conv.0.0 | Conv2d | (384, 64, 1, 1) | 4816896 | 24576 |\n", + "| 22 | features.8.conv.1.0 | Conv2d | (384, 1, 3, 3) | 677376 | 3456 |\n", + "| 23 | features.8.conv.2 | Conv2d | (64, 384, 1, 1) | 4816896 | 24576 |\n", + "| 24 | features.9.conv.0.0 | Conv2d | (384, 64, 1, 1) | 4816896 | 24576 |\n", + "| 25 | features.9.conv.1.0 | Conv2d | (384, 1, 3, 3) | 677376 | 3456 |\n", + "| 26 | features.9.conv.2 | Conv2d | (64, 384, 1, 1) | 4816896 | 24576 |\n", + "| 27 | features.10.conv.0.0 | Conv2d | (384, 64, 1, 1) | 4816896 | 24576 |\n", + "| 28 | features.10.conv.1.0 | Conv2d | (384, 1, 3, 3) | 677376 | 3456 |\n", + "| 29 | features.10.conv.2 | Conv2d | (64, 384, 1, 1) | 4816896 | 24576 |\n", + "| 30 | features.11.conv.0.0 | Conv2d | (384, 64, 1, 1) | 4816896 | 24576 |\n", + "| 31 | features.11.conv.1.0 | Conv2d | (384, 1, 3, 3) | 677376 | 3456 |\n", + "| 32 | features.11.conv.2 | Conv2d | (96, 384, 1, 1) | 7225344 | 36864 |\n", + "| 33 | features.12.conv.0.0 | Conv2d | (576, 96, 1, 1) | 10838016 | 55296 |\n", + "| 34 | features.12.conv.1.0 | Conv2d | (576, 1, 3, 3) | 1016064 | 5184 |\n", + "| 35 | features.12.conv.2 | Conv2d | (96, 576, 1, 1) | 10838016 | 55296 |\n", + "| 36 | features.13.conv.0.0 | Conv2d | (576, 96, 1, 1) | 10838016 | 55296 |\n", + "| 37 | features.13.conv.1.0 | Conv2d | (576, 1, 3, 3) | 1016064 | 5184 |\n", + "| 38 | features.13.conv.2 | Conv2d | (96, 576, 1, 1) | 10838016 | 55296 |\n", + "| 39 | features.14.conv.0.0 | Conv2d | (576, 96, 1, 1) | 10838016 | 55296 |\n", + "| 40 | features.14.conv.1.0 | Conv2d | (576, 1, 3, 3) | 254016 | 5184 |\n", + "| 41 | features.14.conv.2 | Conv2d | (160, 576, 1, 1) | 4515840 | 92160 |\n", + "| 42 | features.15.conv.0.0 | Conv2d | (960, 160, 1, 1) | 7526400 | 153600 |\n", + "| 43 | features.15.conv.1.0 | Conv2d | (960, 1, 3, 3) | 423360 | 8640 |\n", + "| 44 | features.15.conv.2 | Conv2d | (160, 960, 1, 1) | 7526400 | 153600 |\n", + "| 45 | features.16.conv.0.0 | Conv2d | (960, 160, 1, 1) | 7526400 | 153600 |\n", + "| 46 | features.16.conv.1.0 | Conv2d | (960, 1, 3, 3) | 423360 | 8640 |\n", + "| 47 | features.16.conv.2 | Conv2d | (160, 960, 1, 1) | 7526400 | 153600 |\n", + "| 48 | features.17.conv.0.0 | Conv2d | (960, 160, 1, 1) | 7526400 | 153600 |\n", + "| 49 | features.17.conv.1.0 | Conv2d | (960, 1, 3, 3) | 423360 | 8640 |\n", + "| 50 | features.17.conv.2 | Conv2d | (320, 960, 1, 1) | 15052800 | 307200 |\n", + "| 51 | features.18.0 | Conv2d | (1280, 320, 1, 1) | 20070400 | 409600 |\n", + "| 52 | classifier.1 | Linear | (1000, 1280) | 1280000 | 1281000 |\n", + "+-------+----------------------+--------+-------------------+----------+---------+\n", + "FLOPs total: 300774272\n", + "#Params total: 3470760\n", + "FLOPs: 300774272, params: 3470760\n" + ] + } + ], + "source": [ + "# check model architecture\n", + "model = torch.hub.load('pytorch/vision:v0.8.1', 'mobilenet_v2', pretrained=True).to(device)\n", + "print(model)\n", + "\n", + "# check model FLOPs and parameter counts with NNI utils\n", + "dummy_input = torch.rand([1, 3, 224, 224]).to(device)\n", + "flops, params, results = count_flops_params(model, dummy_input)\n", + "print(f\"FLOPs: {flops}, params: {params}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Stanford Dogs\n", + "\n", + "The [Stanford Dogs](http://vision.stanford.edu/aditya86/ImageNetDogs/) dataset contains images of 120 breeds of dogs from around the world. It is built using images and annotation from ImageNet for the task of fine-grained image classification. We choose this task to simulate a transfer learning scenario, where a model pre-trained on the ImageNet is further transferred to an often simpler downstream task.\n", + "\n", + "To download and prepare the data, please run `prepare_data.sh`, which downloads the images and annotations, and preprocesses the images for training." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "file_list.mat\n", + "train_list.mat\n", + "test_list.mat\n", + "Directory already exists. Nothing done.\n" + ] + } + ], + "source": [ + "# Run prepare_data.sh\n", + "!chmod u+x prepare_data.sh\n", + "!./prepare_data.sh" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " Then, you may run following code block, which shows several instances:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlYAAAIwCAYAAABECCVYAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9d7xlW1rXC39HmmGFnXfVrlNVJ5+mk9B0AFoRQRAFgVavIphAUExgul7Fq/fVa7iC4WICFG0woK/4okgQJAgoLdDd0N10pk+uXDvvlWYa4f1jzLV2qF11Tp0+h6oD6/f57KoV5pppzTXHbzzP7/k9IoTAHHPMMcccc8wxxxyfPOS93oE55phjjjnmmGOOXy2YE6s55phjjjnmmGOOlwlzYjXHHHPMMcccc8zxMmFOrOaYY4455phjjjleJsyJ1RxzzDHHHHPMMcfLhDmxmmOOOeaYY4455niZMCdWc8wxxxxz3DMIIX6bEOKXhRBPCSG+8V7vzxxzfLIQcx+rOeaYY4457gWEEAr4BPBbgCvAe4GvDCF89J7u2BxzfBKYR6zmmGOOOea4V/gM4KkQwjMhhBr4D8A77vE+zTHHJwV9r3dgjjnmmGOOX7M4D1w+8vwK8Jl3+sDa2lp4+OGHIADTjEsIeO/x3iMQ8SVC+z8IEV8TgJQyPp9+Voj4Tgizzwgp29eZrX+G6esvNdtz2rruBmL2z9GVtq8FCCeWO20TYfbPLbsmTn5uuuoXi2PbP22bR94Qp+7G8c/fsq/iyEsnN3b4/PDUtts8ucjdov38L/7i+7ZDCOt3WnROrOaYY4455rivIYT4OuDrAB588EF+4ed/jmAdwTpECBACtm7wzhF8QAmBNIZgFF6CFILgPISA1gYB+Lom+NASLUlwjspZhJJorZFaI0QcxIO1CCkPCVcIhBAIzkWSJlo61z6+HekK3hNaEijb9YnbkCsX/IzoHYVUCiHE7HOhPf7Z43i+EEqBPD0pFYLnNBnQdL+klPGz03VPj+v4Sm6z7viZ2bmani/vCd4f2/c7nSvv/amvC2FwzsXzf+R4jy6vlEIpRSAQfDM7Z3fa79m+n77R+HkpEdI8f9sVtJgTqznmmGOOOe4VrgIXjzy/0L52DCGE7wC+A+Ctb31LIAS8swTvUVLhgcY2KKmo6wpCoKMUSqUoBd45fDvgO9sgpQIh8MGBByloCUQcXJ1zADMS4L2f6WamZAt/SE4EEIRAnBjsj+z/9MOElgCEEO4YCPLhdGLhWzInpZyRi6OEQAhxPOJ2G7JwGqFTSh3uc7ufdwxWnbbul0m37dvzOz1WMSNpt9uVE+dd3Ckcdud1nMTtyO/tMCdWc8wxxxxz3Cu8F3hCCPEIkVB9BfD7XvBTIqbrBG0UIcCjb34rN6/fwsl+TWDjgfNc+vhHZs9nUaP45JblI0m5dT1hGgU7QmqOplKPLXubdb9cmJLaWYRLCMQ0DXgKpkRztn8hcLfE6vBzJ/blMEf6ojAnVnPMMcccc9wThBCsEOLrgR8FFPCdIYSPvMDHIpkyuiUPMYpx8/pVHvpLP/RK7/J9iee/+UuQUs6iPMH7GEWT8vSI02mpPZhpz6J87dbI20m8XBGe0yB1pCfH1nTbTJ04dvyH0a67k7GdjP69VMyJ1RxzzDHHHPcMIYQfBn74rj40TXfBcRH7r2HI9nwcJRcihJiiPMEuZnqw0yAOo0LHUowvdcde4LuZRr6O6cNO7PNhdOxwmWPETwhkm8a01s6ibVEWFaN34ejxtu/febeP73egjVy9CMyJ1RxzzDHHHK9OTMXVtxE6/1rDTHjewlqL954kSQhEfZaUEqFu47Q01Y0dEcN779FHtFdTCDgkt7fuyK3LHyXDU7Tb8d7PdG1a6yi8n77f/h0SnUi61Ml9arVwU2F7PBdiFoUSR7VttyFVxwoCjvw/09jd7nhPHv6LWmqOOeaYY4457hMEEQitSwLcYYA/AV+OGL7vv86e24ObjD/603e9/fLSB9n83v/7rj/3yWD/Xf+Og3f/57v6zNHIj+AoceAYaZmRl9us45XGLXqql4Ij5PqlrufkfrzUfZkTqznmmGOOOV6VCBBL+l7kAOirMcP3nyRW/+PutundXS1/LzElB6EVX89sB04hVTNLhJMVhq80sWq3J6WMUaiXuL1pClTewcLixeAksZIvkrQfxTwVOMccc8wxx6sKJ4XVt1O+DN7zfYw+9OMA9D71t1Jd+zh2/wbXvusbyB/+dMrLH6HZucy17/oGem/8fPpv+VL2/8e/prz0IYJr6L/5t9N/0xdRXvog+z/z3cisR7NzhdXf9vX4umDr+/4f6u1LJBuPsfYlfwEhBMVzH2Dvp74TvCM59wSrX/inENpw5du/hnNf9S2oziLV9SfZ+6l3svH7von9d/077GALu38DN9ii/9Z3sPDWLwPg4Ge/h9GH/zuqs4RaWCM5+/hdnaejxGCqRYo2CqeLtE+SqqP/v5I4Wvn3Ql5gp2Gasjy2rvjOXe/HST/UO6UOb4c5sZpjjjnmmONVgwD4EJCzwe50v6LqxlOMPvQTbPzB/xcI3Pg3/ztrX/q/02w/zwN/+J8AMaU3eM/3ceZ3/zUAhh/4b4i0w7mv+haCbbjx7/4Psoc/HYD65tOc+5pvxSxtUF76IPXNZ3jga78N1V/hxnf/H1RXP0q68QQ7P/wPOfsVfxuzcp7tH/oHDN//wyy87R13PCa7c4WzX/l38PWEa//ij9P/9C+m3nqO8cf+J+f+8D8G77n+r/7MSyJWwCxiFU29juuojp3bE4T1mPnpK4mTpqEvAUeJ1SHusqihPda7NZs/iTmxmmOOOeaY41UDwcnqrNMHz+rKR+i85u3IJAOg85q3U16+s5ND+ez7qLeeY/LL/wsAX02we9cQSpOcew1maWO2bHruNeiFNQCSM49iDzaRSY5ePItZOQ9A942fz/D9//UFiVX+2NsQ2qD0IrKziBvvU11u99/E/c8f/4w7ruNolGcm2D5KWKYi/xdJXGak6m6q56Yk7DZte27xvjrph3AHU9NpNMm7WB14mgHrralLiRD+6Epe2PD0KLl6iSRvTqzmmGOOOeZ49SCAmlksTAfUl2/1K1/wx8gffcux18pLH5wRnCmENoePpYQX0F4JqQ6Jj6uPv6lOrCu8RB3X1IdqWkU3dS2fvt/6WwkhW7v5263mCKm6gy1DPJgTbvNH03inRKGmju7T9d/RJ6td/2x5KQnWtZs9Tq5OPo5/pxCuaaue22709v5d4UVWn87F63PMMcccc7y6EW4dBNMLb2Dy5M/jmxJfl0ye/DnSC6/H18VsGZF0jj3PHnkzww/8CMFZAJrdq/i6fNG7YVYuYA82afauATD+yE+RXXwjAHrxDPWNpwCY/PLPvuC60ovT/a/w1YTi6ffccXlnLa61V5jaEUgpY/rv5N8LkSo4jDzdJm34SiO0EbZpc+1pxO2osPy0lj6v1L7czXbuScRKmvRGsPXZe7HtOX51Qujkpm+qjRdeco455nj144VDVOnG4/Te+Pnc+Dd/Hoji9XTjcdLzr+faO/8k+aNvZelz/hAIybXv/Hp6v+4L6L/1y7AHN7n+r/4MEJCdRc78rr/64vdKJ6x+8Z9h679800y83n/TFwOw+Bu+kp0f+cfId3032cVf96L2v/va38j17/oGVGeJZOM1d1z+K//1L7WBITH7vw1P3brw3RKRO4UET67rtIjV7ZZ/Eeu91QH+uNfU7RCXP2WZE8L4d3zaBr/vbRfuuK67hXilmd6pGxUi/FptPTDHK4Pnv/lLCOGUaet9AiHEbwP+EbFtx78MIXzTPd6lOeZ4VeKtb3lL+IV3/9yx10IAmaS/plva/O5/+j9OVPJN+wGedlu8bW+Y00nXPSRWp+xM+/bxvoC3uMu/CGL10RsjXr/R43v+yFvvsBvH1yGT/BdDCLf/AHON1RxzvOIQQijgW4HfAlwB3iuE+IEQwkfv7Z7NMcerFfftHOqe4d/+/jfO7BWOejCdap56O++vE7qp6Z/U+nQt1AnvKzH1kDpKrI7orkIIhNZhfaqZesFehEc1WVISfNRITV3Wp9WAR20ljmusDtOHs31q9+/3/stfuP0JnZ0S8aId16eYE6s55njl8RnAUyGEZwCEEP8BeAcwJ1ZzzHG3EALEyRYr816Bp1XJOe8R3s8IVoBot8ChOP0Y4ZgSIA69oUIItxdjnxSrt5+9hSwd0Wode+9E65zDVjTysIJPHPYF9N7jnYdwujWElHK2rmmvwKPEStyuKfUL4S4ze3Px+hxzvPI4D1w+8vxK+9occ8zxkiCO/QkxH8rgdELjvb+Vdp584WT135FolX+RlXDxY6dEqo5uhhMRpaP76RzeudtW3gXvCc4T/PEqxCkRO2qGOhW8h+BnhO123l0v8sDuSsQ/j1jNMcd9AiHE1wFf1z6d1XuL4/9MlwVudUo+esPRJkEpTQiepqnxzt3yudu5Ct/utZN6g5OvTdcnlYpVSdoghQIhmZXGI3C2wtlYcm6Mmc16rbWzCXCc+cabprOH6YOp1wyAd/bYclIpvHftgBIIwSOFIBCOzXKP7i/ENIJJEpxzNHUz29fpDPioUWIIAaUUxhjqup6dKykFSmmsc0gp0O3jmeP1kXM9bYYr29ddu1wAEHFfEOqORoXiNk+ms/yTg8CxZ9PIQns8tqmx1iKlOHGeQvt+g/f+Psq/3Ue7cp/gZH+7k5GhEMJtrFS5ZbmT63zFcSTadKftCcGtzZc5rNq7X/CqJFbl5Q+z+6PfBkqx+oV/El+OyB972yu2PXtwk+rqx+i+/nPv6nPb//VbyB97G93XfvZL2u6Nf/+NLH/e15Kee+IlfX4KX44Yf/R/0H/zb/+k1nMSow/9BNnDn47ur35S6xm+/4cRJqX3xs9/mfbsvsNV4OKR5xfa144hhPAdwHcASCGCVgLvQ5z4SYGQGilVOwDGGdp0NqbaHmDT99I0ZXFlHZNmHOzv0hTjYx3kp3BHyNZ0ZpokyS03t6M6Bq11JENAYxtCu57pckFqkk6fM+cusLRylqyzgFQavMV7Aa6hHN5k59pTIALLyyvkeY7WmsuXL5NlKVVVI6Wmu7BMUTSUZTnrWu+dwySGNE1IU83e3j7WOqQy0TxQCRJjIHiaukQSsE0sRT96LBBwvsFajw/wwMUHMUnC1eefoypLtNYkSRLPr9EoFc99CIGFhQVWlpfY3tqanZMkSSLZ8oH19XUABoMBSZpSWotSijzL2N/doy4rEqNZWlwghMBwOEQpRd1Y0u4CyxsPQtolEMncyXiMEAIpTms/Akoe6Q93eG3N/j9MlQDecbC7zfXLz4O35FlGWVY0TUOapoQQeODcWX7xPT9/61U9x32F03ycjiJMzUGFuC0vnUZ1jl47L6VX3kvBLcTqNkTpdpO+k/e2e4lXJbEaf/SnWXj776H3hs9j9KGfoL7x5CtOrMYf/R93TazuF0wbj74SxMqsPXRXxCp4F43yjjzvf/oX39V2T67jVYD3Ak8IIR4hEqqvAH7fHT8hBMJ0UETS5LxHBDcbMF0b8ZjeTNI0pWma6UcJ3rK7s431gWAblIjE4+SAq7U+sslIzGxLQI6+PsVUuzAlDrVtmEwmM9LmnMMSSNIEoxOE0PgQwFtE8CiVIGRcp1IKIWE4HLKwsIBzjrquqeuaJEkRQqGkwbkSpTTeR6FqWRW0KhCyTsLy8hKbN2/OUgXOa4zWCCAxBimYkarpMcdzFWmLFAIXAjdu3ODcuQfo9/rUVUUIgbquZ5EwG+zsvAMMBkN8q19JkoTFxUWKMhI5rTWj0ShuMASyLGvPraNpGmQ765ZSUhTFTIRb1w3oCucsODcjVqE999O0RwgBj5+Nj0eJVfCHkcSjaZJbvHi8B+coxiOCtwjvcNaSpilFURx+R0qRJOkdL9f7ARvnL/L8N3/Jvd6Ne4KNBy4ci1SdJFbHolBS3p5YnUjD/YpErFoyFY6mBl/luG+Ila9Ltr//m7DDbQiexV//Fch84ZZmlqOP/CSTj7+L4tn3Uzz9XqorHyPYivLKR1n8rN9D93Wfc2y91fVPsPcT34FvSoQ2nP29fxuhNDs/+m3UN54EqVj5zX+E7KFPxR7cZPuH/gGhqQBY/oI/Tnbhdez99L9+UY06Qwjs/cQ/o3juA+j+Gqjbn97q+ic4+Pn/H2d+519h8uTPs/0Df5eLf/Z7IASu/cs/wfk//k4AJr/8LnZ//Nvw5ZjVL/rTZBffSPDu1O37umDrP/9NfDkmOMvS5/xBOk98Fns//a+ONR5d/ryv4eDd/4nJx99FcA2dJ97O0m/8/diDm9z8j3+N7MLrqa5+HNVfZf13/VWkufWmOv74u6hvPMX2D/19hE7Y+AN/n2bnMns/+S8JdYHMF1j97X8O3Vvhxr//RpIzj1Jd/Sjd130Ok6fec+y5rwuEyVn8zN9Fs3ed3R//dvzkAGFSVn/bN2BWL7L9X78FoQ31zWdIz7+Olc//oy/j1ffKIoRghRBfD/wo0W7hO0MId+ytoUzKxcdei1KKJE0YDoeMdm5QTkYopWKvNCln0RdjzGxwdtaBszgsDonEoUTABTsLuQMzknDyBjwlT3B4Yz3ZOV5rHaM0IcV7j7WWoihomgaPJzOSzBgSJZmMBngcBEma9sgThdE6rtPHm2pVxSiJEIKmaVDKYIwkTTNCGOGcRUqQUpFlS1hrcc4yGo1JE4MgEHxDJ8sY1lGnIaTAeY/UMZpnjIn715JAOY3sAFmWUVYNm5ubnFldZjRKZ/tzWLZ9eO6qqiIYQ5ZlaK3JsgxjDLt7e3R6fYQQFEXB4uIiWmuGZUkIgfFkjHOOfrdHliYz8ua9jwTaOQKgtQGtI7ESAsUhqT2MIBwSq1lFFCDErY7UU2J1lGBhPYPRgOHgAO8cWsTXV1ZWWF5e5ubNmzjnKKoacUr65X7D9ecv0fJtfu87ox3Dd3/tZ1JJBx4MAh0Evixw1ZhqtMezn/gg1y49BWHCaLDLYOs6K/0uZ5eXqUYjdrZvkGaGtQdWqWzJ5UvPcvX5Z1nIcmxZ89Ajn8ri4gJZljGZFBRFwdbWNgCj8YjtrS1GoyG9Xg9jluktPcgTb3gTr3nTZ7By8XGy1Q3SxRWQkhAcwteoYBHCAVkcQ0RsnOybBmstSZ4fM+8MU7H5TIN+xECzXe4WZ3TgtKTgUafzX5EU4BG8UKRqivsp5Xc7vCLE6qV49hTP/iKqt8KZ3/PXgRhlufbOP3VqM8vqykdnKbZpxGrlt/yJW9YZXMP2938za+/4S6TnXoOvJgiTMvyF7wcBD3ztt9LsXObm9/x/OP91/xzZWeTs7/1bCJ3Q7F5l+wf/Hue+6h+y/Llf9aIadTY3n6HZucoDX/ttuPE+1975J+n9ut9y6vEmZx+jufkMANXlj2DWHqK6/mQkkQ98yuExeMe5P/QtFE+/l4P/9f8l+4q/zeiDP37q9vXCOuu/868i0w5ucsCNf/sXyB//TJY/96uPNR4tnn0fdu8aG38oNifd+k9/k/Lyh9EL69i9a/S/7C+y+kV/mq3/8k1MPvGz9N7webfsf/e1n83wfT80S1UGZ9n78X/G+v/2f6E6i4w/9j/Z/5//hrUv/rPtcVjOfdU/BGDy1HuOPd9/17+brXf3R/8JK1/4pzAr56mu/TI7P/btbHzl/wOAG+6w8Qf+3qstWgVACOGHgR9+sctrY+gtnZm5Jyf5Ev1un72dTcpihHM13jeEsoQQ0MqhhUVqRRCaQEALifQBgUJIgbQB78FLiVQ+6pOcRKJBENOOSJRMUNrgg4tVxsIjfMBZj0CCyEh66yT9PlIndJuawfZN7OY16qpBoujmCySdPtY5tq48w+RgB510WDt3AbW0jBQwtR1zzkMQpEkGQWBtIEslSmo6nZyYsnMgohBVGY02mrqpIYCSmoBEGUNp3eFAEwIBifOCgMS7GNGbntOZDCkIJILMaKpywnhkWFpaYnt7+1i0R4r4mdi9pCbp5nQ7Xeq6Js0zHAGhJYsLPby3WOdI0ix+YDzB1hWjgwOSJEGpSIRs65QdyaTC+4CUBiE1IbQDZAjHCPGsB9xhvIqpgFvKgDxBrLwPEBQCgZKROPvgEaJgMtynroo4+AqBtZ6DgwEXL16gqir293cZDgcEf/8MZiEErLe3vk6YCZtDDNvhhQXZ4JoarEcqxXiyz9Urz1COdri5+RTbW08y2r7GcPsmqu7w+Js/A9VodjevMRqXJCZFe8OlS5cY7ZRkepmd3QEri6vkTZ/iQGClok5zainZuXyJyc4eqZNw0NDzGaOrE+ygJj2bMOjc5NLCZcp8mTOLa2AdWWJiujcInIjXkURC8DECCQQJrmWOHhBGE5yjKibx+lQqpvGVwltL00Zp9dSB/QhEq6U7clKPEbVTdVq3fhG3J2F3IXifrSuu6JD4nRI9A8DVQIxwoSRIFascg8AFRwgWGSyyJdn+qL5xqjGdbvPI8+BP2DJ8EnjZidVL9exJ1h9m7yffyd5Pfxf5Y29Dpp2X1MzyKJrdq6jeCum56Fgr0w4A5ZWP0n/LlwJgVi+iF9dpdq+iF8+w8+PfSn3zGZASu3vt1PXerlFnefnDdF//OQip0P1Vsoc+9fbnSSr00jma7ctU1z/Bwtt+B9XlDxOCJ7vwhtlyndf8+nh+Nh7HHmzecfu6v8b+//zXsdGoELjRDn68f8r+v5/i2fdz/V/9aQBCXdLsXkMvrKOXzpKcffTINm++yHN9hXr7eW5+T+tS7D2qtzJ7v/va33hs+ZPPAXxdUF39OFvff8jDg20Oz8WnfParklS9FAgkQmp8iKmdEATSpJx54CJCaoSUmDSlKCcM97aoBjvUtSO4GKkR7WAbJC0hkCACKs3ReYc0M4yHe4wPBjhn4/YE5J0cKRUhCJQQgCP4eENXSiNQrK2dIev2cSoh6S6RikA5GiCEpNdfotvrs7S2gc66EALdbpfx7g0O9m5ifYPRjtQIPB6jDFoLQgBrXUz5uYKiKEmzDkopAj7+tTe+qS7M+0BVVgQfsM4jlcaFKGT3zoGUrXg/puaqpj4kJbMojkcIidE6RgO05uDggPX1dXq9HnVdz0TqPjiEAJMmGKMxRtHr9dja3qaqazyBvNMhzzOGozGEQFXXWB8oJxNEm5KUaUrT1EgOByWlFFmWEZBkWQfvo5YsXgsBPx0AjlZDnRKVkiIg5fQY5ewYva/ieZOHdMy0Hj/BhxjlVAqpYopzc3OTLMtQWlEWxV1Vhv1KIJwWbQkBH/yMWAsCnpjeDM6hkKiWUPf6fZYWEzQlk73rPPPhTaqDfR7ZeIhffO9H0FqQpIIkS6kawc3Nffb3S3Z2RiRJijKLVM5w7eYuVnr6bpnOWo9Op8vC0jK+aBhv7pGajHJQkHvNuCjYvnSDpYf26QwGZMMD8vGQJO+T6RQpRPy9t8cn2oILoGXzAh8iKYbICXzw2CO+UO2JiNHrqe6xvb6m5wiOFDi8CLwQ0bibdZ2GY/s0XdeJ9R2PZrWPZSRXQRyes9igue2HGGIU7xhZasnUKx31eiUiVi/Js8esnOfcV/8jimd+gf2f+W6yB29PSu6Em9/zf+Em+6QbT9B/65fd1WcH7/0vqM4S577mn0AIXPr7v/O2y57WqLN4+oXNxo4ivfgGimd+AaEU2cNvYue/fgsheJY/72tmy4hpc04hCUeafJ62/dGHfgI3GXDuq/4hQmmufPvX3Nrsk3gBLr7999B/0xcde90e3DzcXrvNF2osehRm7UHO/cF/cOp74mQD0xPP444FZNqdRdZuXcf9r/N4OXFLtZ2UCG2QpkNnYZ1Of4VeU9JbuMlw9wa1l4wOtvGuRh0xoQ+th02NZ2XxDKsbF8nSDuV4zLXLTzI82JkJ3jc2NhAILj9/Ce89Wscoim08QQiU9FT1mMvPP8OZiw9D7qlsyd7uFlVdcebMORZWzqCzLtoYCIH1jfP4pmR78wr9rsbWQ1KV0OmmCK9wzs8E3LYVeVdVNUsxxuiMbCMRxAgcAqUM4Nu0oJv9mSSb3UinejElFSAJBKTSBGq8Cy35gP7CIpPJhLrdh/39fRYXF2caqxhNOizdFiJqtaqqIgRPINA0Devr6yilKMty9pmqqmNKNzHHCgMmRUGWpiilSNN0Fr3Spr0tHxlDbjcQHK3yjIQi4MO0eopIvoREqnics/X5GN2ZFUe0hDUQCevW1jbr62txYe+P3XteTVAoEpGAVCQohA14K/FWUTqBDylJtsrq+iO4fI0rNwe87xd/iaWVPg8//AAb51ZYyhcRqULlfUQ6Ju31SIVgf/eANG0oi5JmOyB1IF1ZIjc5e0KzNx5zYekMbndMhwRMYG844uBgn2x/l3Swz3Ixwdkaby1CtNYIwkdS4D3T9PNJnWMIAdlaExyNMB3tr/dCFXb3G6ZViy8MOSNfQUh8AM8hoSaIVjsJcIqI/1cglfhKEKvTPHs+84U+ZIc7qLxP7w2fh0y7DN/3Q7Nmlmb5gWPNLI9CJPmxJppnf+/fnD0OrsGNdqmuf+JYKjC78AbGH/lp8oc+jWb3KnawhVm5gK8m6P4qQkhGH/rxGCqA2zbqzB76NITSbWRsleziGxl+4EfovvHzcZMDquc/SPd1v+m2x5xdfAPbP/QtdN/4m1GdRVwxxE32MWsP3fFc3W77vhqjOosIpSmf/yBusHnqOcofeTP7P/PddF//ucgkxw63EfLuLwWZ5IR2vWblPH4yoLr6MdLzryM4S7N7lWT9zsdybH1tlHL88XfRfe1nE0Kg2XqW5MyjL+rzvfNPxJ/MrI788D3BkdduGawONQcuCOToJpPh4J7ekYSIuqmpoNwYg2ssSkmU1iRGU5aT2KDVedLuIhcefS03rz7H/vYNXFXOxM4EiW08Vjj2DvboLK3T6S6hjeNgbw9bxSaz5XjCwe7eHfbKYoFrV54D4PpzH79licHe9h2Pa29n90Wfg/39fZ78xC+/6OU/GQyHB8ee13XNwcHBqctOXzbpHg9dPIcPgbqq6fZ7aGMoy5KiKJiaWFZVBTaK5Y0x6NbFOqYE1YyIFUVBVVvWkwStNa61W5CE43YK7UBxMmIFh5GcWdpDKKQQCJ+APPLTkAERbEuoDiOB04zRlFwmqaGuR/dVxdXdQAaB9Aq8QApNsDXSGxLTpQogVJ+1s49hbML7f/7neO/7f5HGJezsV5jNAbrTIemWnM1X6CyscSHvY20TSfEC1EEwmZTUuwWZhlRKqqKgrCuE0TgEMigme0PKkaVuAkUxIriopdIqILzFNhVIiQ+OID3SQRDHLULE0cIFHyO4U+H3UfJ1NKr5aiBWR/f3lujVyWWJE4WYAozVjT543DQF3Ib8oq+ZaCtkD6PUM/J2JA34SuC+Ea83W8+x+dPfFauhpGblC/8kvhqf2szyKLIHP5XBz38v177rG24RrwtlWHvHX2L3x/85wVYInXL2K/4W/Tf/dnZ+9Nu49s4/BVKx9sV/DqEN/U//Yrb+y99h9OGfJH/0LbOoSrL+MC+mUWf+mrdTXvolrr3zT6L76yTnX3vHY07OfQpuskd2Mab+kjMP48Z7L/hj6H3aF566/e7rP5fN//Q3uPbOP0Wy8QR6JTaWVPnCscajy5/3NTQ7l7nxb/9CPE9JxtqX/IXTWx/cAd1f9wXs/Ni3zsTr67/jL7P7E/8cX43Be/pv/bK7IlYAq1/6F9j9sW/j4Gf/A3hH53Wf86KJ1Z/7G/9gFuHodDoIKSkmE5LEkNGglaQsS5IkAWBr/wCZZBij6XV7BOD7frnhuXf+6bva51cCIURx+bRqT2sFTiKDw5cHjJohngDOUhUFvZWzGLPEQ4+9kTNnLjDYu8nB/j4+eLTS0cfKFjgETVlQFUN8U2Orya/Z/mqfLJ7/5i+haRo63S7dXpcsy0iThKJNH2Z5dybGFz56RDVNQ2jTo4kx1FU1S9PESsVWd+Y9Xk59h24tf7+tnQKAnPptgXfTtIjDNpbGRgG0FJLFTqDb6YBgpqESrfAshMCkmFDVYmZz8WqEtw47KRHeo5MMQqDTycm6KZOygxKSTtJB1JK90c+ydTCim3QZT8b0l/oImbI/GHMw3GdtfYWzZ89x9epVGutZXlnl+ad/GVeW9NMEKTy+KbFVgQyOlcVF3KSil+dsXz2gaaCygeFwH+9KjHLo0BBsiVMGoQwhuKigCpEUeB/Teop4ZUwnW0fJyJScH60YPdrW5n7HLd54L7C8E/pIR57D1jZMo3w+psl9u5ySx6/faVr7lTw3L3sTZiHE24G/HkL4re3zvwwQQvg7R5aZN2Ge42XF89/8JfyVf/FfYlolSeh0uzR1zebWFt1Oh34SNRd1XcdogHPsjkaknQ7GJLPB7fufClx655/h+vNP39M7Ut7th0de/+lIpciyLlpnlIMdivE+wdfYpgBnkTLqppbPPYZKlyAYRHCEMMFaS11VSKUI3jMZ7FI3DmEMmdEIW/KRX3rfnFi9RDz/zV/CG974OpqmYWllhSRN6Ha7lJOCSVmS5V2c9xwMR6RacbC/T1VV9Pv9WFXZVgGOx2OklNR1TRCScw8+RtrpEkSMWiHUscjFqYPmTPw7JVdxKBb4WDU93GY4GFBVNd7Hgeb82TMYrXn66adxjW39wdJZWlFKgfOWEGBv8wZ1Vd0Xo/Rb3vKW8HM/93OnvjdNgf2hf/U+hIB//9Vvxo1HhNqSJinBe5wMoBU7eztsb20y2t1leHOL97/7PfzMD/0km9evo5Rnda3PGz/1CVZWewyGu1y4uMEjjzzE5uYmw+EAKQTPPvVh8jTh7NoKF85tIJXg6uXLXL96g0ykNAcTznZW2XruOlJlPHdjB7eU8fivfxuv+Q2/gU95y2exeuYh8nwZLRO88KAsKoEk6eMaN5tgKaVm18A0ij0tfNBax+tCqaixav3WIF43U9+5EAJC6yhePwVTEn1Uh3i8kjBixhpuo727G9Jy222eghDAhgSlYyTKE2hs0wr1Q0wyeUG0alFoCVraI58/bjvxld/5PgD+w9e+5bbi9VsMU012T5ow371nzxxzvAxomoZup4t3jrosaZqGXqcze//oTMU5R7fTieF61yDldLZ+r/b+BIKnnOwSECRGUXvJ7s4NJAXQIIID66lal/EF16ClaPUFGiG6GBMI6tAZvJ91sNbjnKWfKarBi0/LzXE6GmvRxlBMJigpMUqzPZowKQqcizd+qQ7TMsvLy612LdpNIGJJvNIa6RxCabxvUK5EaY1TOY3QSNQxYgWH1YHH4dtUX2u/4D2D/V32blydpZAgphGH+wdxcLbuSIrJzWw8QoiVmRH3Bae6BbO0WEseDk0m24hdcDhX4JuKuhhQ1SW1ayi8Jet38aJhXI7wMuDw+HpMEiounD3L6voCVGPK/YpqvEexZ9jLNOP9fQ5291Ba0pGBRAVW1hborXSpioLgLbmAZjCg3J/w3PaQqgpcvHCGznjMblOxv3uTcrRDOdqjWVjF6A42eJRRpImhricYfTjQTws2pFJ452b6PWBGmqIWkaiFbN+fWoZgTNRe1TXSWoTRp6bCbiE20yzG0dRZS7yn3QNOw90Qq7syIG0zgQB1HSeWWgSqpoiRO6FxgPcOISQhiENJxGnbPEGaboepxvXFpg9fdmL1Ujx7frVj8z//rVuq65Z/01ffIj6/X7HzY99OdfV47cHCW76M3qeebiVxryAUVLZES83+aAgEhK1Z6uWkWlGWFWmq8K3RY1CSRCu8bbBNFcPsqPtiCJFSkeV9tE5wXoBwdJYWcY1AhArbNDglkCTk3WWE7mJdACpEK+Kc+lRN04nCa0JokDJBSEmaL93LQ/xVganQfnl5mX6/z2RSMBwOQYgYHU0MWsXZc7/fRynFcDgkTVOSJKEsy9njuq6j6NaBQiNIUEEDEieOeFXd4eYuOIwwCALe1hSjAdbamaXD9PNlWc4G7Wk6aTqoTj21Tmsfcj8ihDAT/8e/SLiee+Zpbj7/Cfp5xtm1NWpb43CIROG8wIUCpGU0PuC5555hMtjlTD/lwvoCWaYwNIRJQVeCHQzYtlGsXo3GKCmhKck7OVobJmXJcP+AYjjBTyzNsKIYVjQeKq8ZNw06Uax2+iTaM9rbZDLYwa4+gE+jxkohI0HUOUKq+B22xPFWH7MjEEe/d0BKRAiHUa4jxOykpcLt8IJWC9yO3N8d7urzUZuPVK3MCkdTFzSTAVYIkqyDSbqAprGexnmkmUZg5bHJxdFr+4UqG8VUw3aviBXA3Xr2/GrHmd/1V+/1LnxSWP3CWz3C7kcYKcnTFBegt9DHaE013CUx0Q/Ge4fwgNAUVUXe6+KsJUnMrEdcaG0H7jmEYGFhPcYfpMRaS9pZw4glfDOK9zXTJ0m6aKPxKsGFQHAV3sZ2MNMUweyGHMA2DqSi8QIhzKmbfv7vfhnmiDau+7rPYfGzfs9LOoxL/+/v5sE//73Y4Q57P/HPWf+d/+epy9mDm2x+7//NA1/7bS9pO3dCffMZ3Ghn1p2hvPRBkIbswus+6XWHEOh0OiwtLRFCoCzbYg5johu+s9EzSh4O/nCY/nDOYa2dGa6CxKgEoTK80DgnQAScOIy23nGGP6v680jh0cKjZTimy5peD1VVzcjTzK6hdfM/OmDGgeh+CeWejulxGWPY3t5mMBgwHo/5u3/vOyj3N3ntY4/yWZ/5Fh597BFkKqlcgSsttilp6oLNm9e4fPl5hK04d3GVtaUOZTkkEQoXoo5x3DSUozFGJzSTmnFV0TEBVwS2rmxTuQrpIdSKcuQYjxwHQ0/loQqebPcAXxb0V5ZwTcHmjctc3N/G2TJ2kxCtTYSUaJUw7a85/U6mhEAnyemTv6MRlSMkLISAtxapNUJrgrXRmPdV2rdaQLTzEQG8hbqApqBxDuE8Rmh00h5rcEwrh1+q5uywEOQeE6s55rgX6KQpRkQSUhYljVLk2qAQNNaiTKyMqpvYEscHQbCeJM8pJsN4IxISdx949oQARekxSRYb4yqPQlAMhjTFAWneJ+/2EGmHIAVBGkQISBHNLuu6mt2Ip/9X5YQgDVJJXPC31VkIndzW8uKlQvdXb0uqXmnUm88ca3tVXvoQwuR3Raxu10apaRo2NjZmbWCmZCVv3djtZEwnz5gMY4Qjz/OZ2LZpnbTh0BbC+0CQkgqFC21VYPD4cHzWfRKHA8Zh2asQlkwEOonm4Ej0Ylrhd9SJfboPU5I1TQe+IJG7TzCNzA6HQ37wB3+QS5cCZVly6cd+gq4OPPv0swB0+11WN1bx0qOCw2iJtw27m1tUowlLiymLizkh1JTVmG5vmdz02NvZpdfrE1AgDfu7E3Z2Dnj43Aof/6XnSbs3WD+zSppkDHYHTAaO8Rh2RmA9VMKjt8bkpiRrSlQDxWgP10wQviFMzXgDrRfdIZk9SpDCXVazTb/H6GEmZ62U7m+afAcI0ALwDhEsrppQjQ9ixMo5QtMQfCDvQtZbQkiNrWOvUYgdFpIkeUUrAuEeESuhk5vPf/OXnL0X257jVyf6K2fQWkcBcAB8YFKMyHoZQqvo7aM0QQbKuiTNO5gkGjUak5LnnvF4PGt5cs8hBFInBBS1rbG+IhGBqtrBNwdI7eiZDbzwOKaDrUBKg9KCpirRUrU99trKmWBJkwSUwDlL8M0L7sZRXPn2r6H3xs+nePo9BGdZ/x3fiFm9iJscsP0Dfw832iU9/1qK597Pua/6h6jO4uyzRyNS9dbz7PzIPyQ4G32ufsdfjt+N9+z8yD9+wXZKAINf+AFGH/gRkAqzepH1d/wlfF2y+xP/jGb7eYJzLH327yN/9C3s/8y/m7W96r7uNzH8wI8ghGT80Z9i5Qv+GGb1Irs/+q3YwRYAy5//R8kuvJ79d/077P4N7P4N1MI661/2F2/Zj06nEyNNbUVSv9+nrGIvQDfTUAnKsmRleXkWtZrqgaa2C1OSJaREZRkiSUFoZPSMR/hogXhIoMSpY8PRAVMER3CONDn04DoalTq6H9P3jr8fCdirgVhFv7CKoih49tlnGY3WCMEzGReUzqFFgkdTN4GyqFGZxgdLYhJWllZYXV7h/MYGa7bHwmIHWl+kSVHgx47hsKTbW8ZaSVM79nZrdnctyu0zHjX0U0tSJiwsCMpCsLlTUVnByEkkBhcCVRkwCopiQs9rXFOyt7PFaHBA0qnIDQgZyXT0sXIckuQXQQSmTZZhlu6bYlrtiXOHacNXIaamInhHEA3VeMDNa5fZ391CG8Pi0jpSGEqh0TpFJYaqqtr+o22RkpTHtFUvhBeygDgN94RY+abaOPq8f+6x8E/+2J/Ee0+SJLFtRZogpCDPcrSUaKXITAI+oI1BGRN9PoREpxnWxrYF3TzDNg06MdEHqamwrkGIQHkwIlMG0bL/adNXKUU7IMcZYVWWjA/2GRcFLngmRcF4MmY42Sb6ZzqkCjz88EXyLGUymeBCIO92yTo51juE8uT9Zbb2C7pLZzGdPmfPbTA+2EUL6Pb7OKG5cuk5FjKDrQusdXiZUNmGxcUOgsBwf5d+t0PwDmdr6gqyrI+Ukr39HWrvWFxabg3mBO9+97tZWlqEAKPxBGUSHn/8NYzGQzpaMBgUFEVFWU8YTw64vLVLOSkpSovKlnjoiTcSREK6sITqL7OwvIqblCwtL1ILQZ7HFiNVFf2TqrJCaz2rUFFSYa3D5BlegBISaT1NXVPZhjRJ6XRydnd3WVtfJwgfz18b6l5eXmI0PCBN42AzreJTOsW5gNaKprE0TY0yJjpJt7NuS0BqhZtUGK0pxrZt46EIWJTSND72oUuSrI1MSYqioqpq0jRD6ejWfa8hpSbrLuBchbcgUdi6pNNbIOQ5nd4qSvYJGIJwCFqXZcAFRwI4QARJknVxiNg7z9YYHQWdRp9OWoKtufZd3zB7ftTGRHYWOPfV/4jh+/4rg/d8H6tf9Kc5+F//nuyhT2Xx7V9O8cwvMvrgj93x2EYf+BH6b/kyem/4PIJrYh+0yf6LbqcEMHj393L+j70ToQ2+jM2OD37ue8ge+jTWvvjP4ssR1//Nnyd76E0s/cbff6ztVbDVrDclwNYP/D36b3sH2YU3YAebscXVH/1nADTblzj7+//ubQleJ8sJzlMWBaPhCKU1QhuC1hRNg0wSvACEmPUDNMbM+hZOq72gJQcOGmEweGSwBCHwxFY10ULhiC1CPJpjz0OILu1RaSPxSAIKrTVFURxGpITAE8mgageaqTg3in6nfSRl+/yOX+mLghDiOWBIvDRtCOGtQogV4HuAh4HngC8PIdzJTO2YTmb6ePrX7/c5t7GB2RKx8bZ1pCZjZWUN76CuHXVt8U1J3dQs9hc4u36G1732dRzc3GP/yQ8wLAuSRJN0O+g0ZTIes7dv6fZKgpcEDJX1SKPYOqjRAQob2KkGZPTRSZedwXWcSqjRpCJFhyiots4yGjnUpMB6x7NPPs3GxTfQWXyQtOtg6rkkJbZ2TN3zZz5M8UK5pWHx9PWTxGoalZwVPHh/qgj7btwBjpH3uyBot99EOPQYPArBrccIrWl1/P4nwwOuX3merRtX6HR7JDqh31/C1iWDg11UktHY2FBdGx1bfgUHPtzBXujEjojWxkHKF02u7v0I0mI6e5NKYdIEG+IMSkmJkgqJiJWdId4wnPcoHSsbnI/2/VJIrHWEdvbo2j/vPd42aGMYTQo6eQcpxKzsXklBTSBpn0tAJwZZ1xRlTWUrLDUmEfhgaRpLKjQSQ1W1JM5a6qYmExlpllA5j0lyjImDnqsKhFBk3QWqYkRRNyws9ljo9VHe4qRGaoE2mqopGA8HLC8tztykpQClExbzDuNxRVNbtvd2uPjgw+R5jpASZy2f/qlv4srVq3T7PW7e3OSRRx6hkyq06iClJ+12qCvL7t4eO/vbVEWBdzAcTlhKl7hw/gw/87/ezWf+xt+M6GQ89/RTXHzgPNY6uv0+49GYLM/BR52IUBKTmkiyqpLFxUUCnrIc0+11cU1NahK8DSz1ewgBdVmwvNinnIxI8pxet+23lqYt0dXUtYvfswNjohCxqKo4XEhJlneoqwotdTsRC1gXfYC0VgxHQ0RwZHlCIFZhCSkoJwXWg0agjaFpKsrSYp0Hoe6bnmiBgPNN25YlRQiD1QahUlw9pnRAcAhpYq8/DlM7VVOTyFh+7UOsINFKodKMwkU9z9TV+TTcKRXYec3bgdjuaPKJnwVii6j13/lXAMgffQsy693x2NLzr+XgZ/8jbrhD5zVvj+aycFftlMz6w2z/4N8jf83b6TzxWXE/nns/xVPvZvCe/xzPh21wbRTqTiif/wDNzqXZ81BPZoa6+eOfeVtSBdH8c3FxcfY4kyoSJmNw3uG8xyQJUkZN09Th3lo7c3SfDmpJklCXNhL+xoKQBCEBjwyHGiuYfte37k+gHXJEbH8iAqgkJcuy2SQFoAkB2aaIpoalwOx+Oa0qO7R2eMHT+GLxeSGEoy6y3wj89xDCNwkhvrF9/pfutILa1ly+cRlhA8LFxkBegFCxB+ZiL8GIGoLj9f2Mh1bP8qY3v4k0kShn2b2+SaI1KgSKgxqzuMDiao+zrznH3vg6Ozu7uLokkSnrustwcICpYXxjTNpdpNSwWTbsVg6lUox3dAj0G8H4+h42gSGB2lUEAQ01GZKlbsLFCxs01rH//IDaVyzYEXsP3mTp/BbygR6BCfhl+qHb6oLkzKMKWv4xbZJ+UvdzSrRq+r0e0xcdTSceee22/k5Hn4cwY0gnHeEPIdu/WxHCrdkAL8LsdQmxNyYt8Wkb/k2NbwPgfYpEEooJN5/+BINnP85qCs3eLmXqCQs5Ll1kUhc0w9h1IssMgejnp41EGhO3JtrUuTAtt2g4SazEdMfaxjkvBvcNsfLOYdKExjagJFJI8NGPwySxhUW8WGQ70wpopWPvJNfQ3n/i7KrtseQJND6GwxOpsN6h8hQnQtSYBDCpoZqMMTo6FDe1pWkqalthvYsDU5Jg6xF7BxV1VdDrZ3SNZDTcJu31WDt7AakEW9ubbO/tcnbjHMZ0KGrHytoauwdD8l5GVbXC1iRDisBgdxtFJAV5p0ddl5hEIelQTgqqSUU375OmOU3TkHc68ebrAolIuXjxQSSSNMsZTsak2kS7AetZXV6h3+2wsbZEIhqu37yM6vbwHozOKMqaXneVi2c1H/7Ik2xd2yFL+/zkj/wA1kN5sMX1Z59h9YGHWFnukZiEpizJlMTXJa5p6OQ5WSdnMp6gjULKDKkEAkcvMxgRKF1NLTwIT7eTUBQFAovRBqUMRdv6QynFaDii1+8jpWF3d3d2g0/TLlJrymoH730kYFWDkRpfxfSJlpLGO7RWBBFQCnpZhpIxuqakwdkmitXTLiZJ8bZCSUBqVCIZjQsguy/ad4QQ8HgECpN2CB50ClI4ynGC0oraVigERqZtT73YVsWomA4UUhGEwrfHo6TEeSirGq3UYY+xu8DtWizdDbqv/1ySc59C8fR72fzev87Kb/16zNLGXbVTOvO7/xrV5Y8weerdHPzs9/DA135rm1b8PzGrF44tW11/Aff2EDj3B/8BQie3vHVq66VTMB6PowhdxUmgaxoa29Dp9dBKtdVMkcikaSRqR92g6zraYuR5HoXsRkcNYJtKnLYouh2xmgnNpwOfkDFqFQJCt2T6SHRnGqGaluM759qy/cOVTknfK5w2egfwue3jfw38NC9ArKy17O7skkiNbgdELyOxUsqzsbFBkt4EH3jooQdR44rRaECtHDvbW6AEy4sLpGmKc47BcIBIFQ9ceIBQFVy7fJliPCIUJfVwzMFgiEk1w9GEBo3s90HEsFvtLQ6PIk5gmmaMtZHcBtFSjHZyE5ynmBSUtWV3f0LtIdvaZn9vn+H+AelwRNZfbK1iAJEcm/ucNIU9illVW1sRfNKF/eg6XhJLnqYST33rtPWd9lo49XVx8vEx0tdG6USYjesEi9CeyXiH5579OFevPMP5M4v44DHmPCGUCNGh2+tjQ4IxOcZIksQQ+2Z6pJruy7ETfFveNIsPvsh59/1DrLzHOT8zOgshYFqH7GlVihQSrSVSxOaytq4waUJdW5RU5GmKbytspBII58nyDFuCdD6aKYaAQGCbhixJoPWUCSFQ1RWVrbHB47AEEZu7egdYRWoWwIKoK5RzJEoznni61rPY7bN65hwHB3scTApWzi4TpILEsHruHEFEzYWW8Qv2LYE0RjEZD8k7iywsLHJwsIfRBpc4rHMkiUFpNWvmmipNp9cjhIAyhoODIa4lmYlJIfNoJcFZEi0ROAb7QyaDA/rKkCU5RiqUDxSjIc1oDFXJ2eU+xjtcFaiamp/8sf/GF/3OL+ehxx4jzzOUNtQuoBNDWVdMigJjFL4o6RlN3utyc3cbFyxGGTpJHs3cqhFaJEgR2G8NEtM0jTd44izdOcfBwUGMdnnP3t7eLFVS13VsCVJV5O1Mv5xMoljVGNI0oaqjYLjb7VJWZbyZeNcaAjoIDikNoMmyDtKkVHWJUYIsy6hqy3gyieaJIblrB/pXAkIIrJdoJeOEQCmEDAQXMCYl0YraWWxdEoQlTaOxo2hD5LWX6DbSG4IHZ3EykGRZLPHPcjx3p7G6HdLzr2fy8Xex+Fm/m+LZ981Sc7dDs38DvbTBwlu/DDfYotl8DrO0ccfPHEUIHjfcJnvoU0kvvJ7Jx3+GUBex1dP7fpDlL/jjCCGobz5NcvaxW1o6ySTHV0daVD386Qx+8QdZ/Mz/DYhVhNPI2Qth+LY/wlgqnIsWHoWM3jnTwoBGSqSMbYlsW+VVtSmdliq1hp0gpEBKRWWyI4OfOD1Nchv0hpfoDZ/DcySFpwx5p4Pa35+l3A+jlsz0XZFExZZJd/bKeskIwI8JIQLwz0MI3wGcDSFcb9+/AZyqvRVCfB3wdQBnN86SJIZ+3iNPOkip8AIaZ5HSsbqyijG72LrmsUceJasbgopp8Kqc0FtaQGmBMRJhJONyjJCaheU+nTe8jsXVZYrBkJuXLnF552MMiorUOsqqRmaOUFY0jcMBw+DIAI1ECUUZLHXwhBgXQLffm0REF/jG0+32kLtjbOOo6hqtNJ00QwuJkRLfWJxrEGlyTPf2gphGpzhOYY5GrQ6jXHf93b0sOPVaagtujiLE2BXT7oFhdkABJQtCOeLpj7yXj334Pexff5Ziv8NDjzyEpGJv7xrNZMSSAZUs0emsYEy8fzoXTUSDF0fu8yf36ZM/OfcNsTL6cLY6FVUq2ZKoNnQZO9YLvPM4G5BKovRhGXHTNEhoGalCK03tonaqsVEoq6WkLis6WUZwnrqusbYmiBjhcgLGTYnzFoej00nZ3y+QOKTcYXHBsb60RK+3wP6wYvH8RfpL6wgt6WQ9ZJozmUworWVhaQGTdVBJHhvDejkrq9VaUxYNxmgWFrrUdUWWpaRpBynBeofzDp0kKKMxWYr1AaRCt8RkcWUFleaMxxNwHq8Cg/GETr8fz4WUTCYFu9ub9LpdaBqqxqGlZjIZYrRia3cX6Ru6aYoWlsmwYNJYNh56gNc8/gR5t4sgEIJFGh0jIShS04naJRlQiUEYQz9PGZcFmcjJdMJgMKCXdeJsXEua9ntNkoTJZILShqIu6Ha7M2IwnTlP93/WpsE7tBIEB71uDgGSxCBEoG5AK0FZTVBaU5cFUgSyJMG7Gq0kPjQEoSmLGltYdJIhgoQQG8+madr2m+Ke3XSOQSjStEfA4lyFaypqWyCcRzYgtKGoSkyWMiqG1LUhzTK8DyhlCDKPxF4IjJJUVYWQgqzTBaGwPlDb0yNCJzVW+SNvYflzv/q2u7r0G76S7R/8u4w/8pOkD7wW1V1GJp3bLj/5+M8w/vBPgVKo7jJrb/9yQj158efGe7Z/8B/E1kkE+m/5UmTWY/HXfwV7//1fcP07vz7+1pfOcuZ3/7Vb2l7lj38mW//l7zB56udZ+YI/xsoXfB27P/7PuPadXx8J+cU3sPpbv/5F7crUF2g6+43nf6pVEu017SGAVFHO4H10NQfaFEe0VBBw5GYfCEG0KmYxnbffEXW6xIiLdA+eaXVW4IJA64Q8z0mSGDGeRq6klDPhfDytHqXVLE0JcSCMGrCX5Ufx2SGEq0KIM8CPCyGONZsMIYSWdN2CloR9B8Dr3/j6kCRpnHw0Fh8abPDUrkEKx3gyngnwP/6xj/LI8iqF8Kw+eA4RPM5W7O/XuG6Hfq+LUCCNJO1kCKlYOXuWut9jb3ub0jmKylGMakwMBTEYjqgbhwVKIonJtSQxBmcFRVPNgjshQAJRHlFDUVoWV7pobRCVoyoto+EoZrwah28sKklRWsXm6dMU3glN1S04QVhECMd0WC/FMuDlxu0IuphGjsQ02db+NqZR11m0qKWMYkBT7HL56Q+zeeUZZDNkuDdkstZnb+86wtbIhXU6qyvRzd57hNDxd4hASo0gNmU/uheH/3/ycpD7hlhFPxXT6naiOZ1XKkat2qomJWn7J0XNkbMeKSDPc6qyJMuyOE0IAe8sFt9qlCw4F3VVSuOVIrhAVVbUVQXEH6WXAicApahKG83kmoam2UPKgsUFTydVZInjY089iVy4wAPrF+murFHXFY2tCTqlt5RRlgWucQQacpXR1A1KaJQUGK1JkxwlJXU5wnsbe4clKQLFaDIi6+QUZRn7wbXnIcv7QEyDOhcoy5qs1ycg2bu5jQqS2jk6i8uRoOR9mqBYOXueYjyMDUi1pnFxBjeuJ1hnWV7uIxPDYFyRpAYtEt78Gb+e/cGY4aQi0Yosy8i60QNFI0AZCKBSgQseXxcs5Rn9JAEUUgQW+zlpmjIcDDiYTNBaU1VR7L60tERVN5SNm2lPqqqaNamFOJNO0zRqVpSgqWuapiHL+q3vVEW31yFJDVmesbO/R5oagpKYNCVNDNWkjH32tGRnb0hVWUzWwboGJSTj0ZgkzSlry2RSoHU3pqHvMeJczbczOUldO4xMkVqhjYnlxnVNORlhBDRlTZ7leJND1kN6SVVPSAykyrR6QosIijTrUhVDtK9P3fZDf/EHTn39wp/4ztnj9NwTbPy+bwJApl3OfPnfREhFdfVjVDeeRLQTpQf//PcCoBfPzjyqFj/r99zqi5X3j3lYTYXlp54bpdn4A3/3ltelSVn9bbcSIpX3OfdV33LstQe+5p8ee77+jluzT0uf/ftvuw9TbHzsu2lqiw8wGo4pypLewgKTSYHSiiRJKYpJnAgtL1PXNWVZxl6WIuoiRXAgJBZNd3kd3V1pSdn0gI8Sq3jjj5Gk0GpEIrYe/C2tMWzdDlKxpCEEi5RgEk1ZtuJ0H1MrUh6PhoTWluEkuXo5xuIQwtX2/00hxPcBnwHcFEKcCyFcF0KcAzZfzLoa29AUFb4JOOuxeKRW5B1DnuVkaYbNc/b39njvU88hOoa3rS2ACJRFgbU1rqnw3iK0pNfNMKkhoJBtW6zVs2fYOH+BzWevsTuoURLKsmJQloS2khIBNkAhYrFZE1VSiCN6bEN8PUFQlQ2jUUHRWBywNxxx5fJVmioGBYJ1qFRAaHs8HiFWR07k6SflFHJ1LK12jFjdIy3p7dKGQcweTslUCDFqNUvETXe5HLJ76Rk2Lz+Lm4zITSAVkmI4ZLi3y0JvmV4nI8s0vr20ZasZFEKhVEJsEzVd/4l9uY0+7G5w3xArZQw+xLYEfua54qMmRhuCDzR1TaIUQknqpmFheSlGtxJz5AIkVoxphWtnYolJsNaT6ATnLHma0dRxUEnTBGtrjNSMq4KqLimKEc5bpArU5YS1tQ51VaPkCuV4wObBNqvnVll59BFkR9FYT7fXp6pKqnranyjOFLXJSHSC0oaqaZBS473D2ainaKoRSWKo64put0+W50yqYmbelqQpPgQWlpYwSS+Wjo7HJFkeBbJSkec9ko1oGrc/KRBGs3HxLBsXLhJCQzEagFQkwpJ1eoyrhnE54dHHH0OVNZPJkGubNxCmg2vgtW/8NB565AmW1s5GAXpdYOuaqozExkMcNNI40AsCRgqSEG/kLsTUrTZglGdxISPtZtTWU5Sx8WxdFjFSlMRqwk4nx/uoh8qyHCEyyqpiPBpjjMba2ERZoHHWYp3DaEVRTFDt/85ajNY03tHrdNoGnAJjNKOypK4b0qyLF5I0UYjgSZOUoigRSrO+vo64i8DJK4kAWNvQ1NFJXWvdtqpIMDoheEcuLJCjJBTjIfs7m/SW15BKIZE4V0SNjhSEECO9tnEooxHEIoKXA3awydb3f3McDJRh9bd9wwt/6FcJ6qYhBBiNxqTtb1K1Gqlozhodr5MkwVo7c1iPAvck9nRzNpo+KonSJkYzbhmADgfCWysDmVWSEYjmkkfeF0ESpCLJcuQ4RqxkS9XcEZ1dTBHG++is5U67vU+2KlAI0QVkCGHYPv5C4G8APwB8FfBN7f/f/0LrqsqKp558EhUkMiikVAit2D3YY2mxi2imVWCG1bVV9oZjxnVN3TQURcGoHJMmhk6eMhqNUEaTdnPqusY5Ee9zZUViUvr9BaQ2COKcfVQUlM7HfnshoGSA1gDUW0tlLQXMNFeCGLGqgK5ReCQ7B0MK60ArJlXN/sEBQgi6eRdMQvAe62uk9sRg4ZFo1XFh3e0jUCf0WLOKwtm1c2TZ23y54WjY7YUiZp8MThQMhdm13hJDH5tSiyAI1jK8cZ2Pvec9XHvqaVIHRgq0VyzkC2Sqw+riGp2FJbTUOCGRctrSsE2Po+JvYhoimx1rq+E67Ujv8vjvG2LVEGZpv8wkMX3jHYqAqyu0VGSdFClBJZpgFEVVk+YdrG1IUoNzDcHFQd0Lj9ISaQPOg8ly6jqKphvbYHGoBEbDEdbXFMWQshjhmwI7HuATTZ4Y8gWFb0oSIalkgE6P1aUNzp5/hL2yJtcSJTwHe9usr6+SJsTec3mOcwkhxC8xSwz9hc5Mz9A4S5Casiqpx0MWF5aoyhHpYh+ZOERTkSiBUhppUqTJkWlCNR6TdjukJkEqgVJdau3Qi5qbN24gkg6PPf4YnX6X4d4uo71dxpUn7SyxstSlKit2rz3L5uVrVAdjVnodhjtbrPZXmHiN15pHHnmEq1cuIZOUNE0IzqNTQyfLYiSxTSXUdU05qWZOzUopjNYoo5h6TwZfoQRkUpAmin7SxbkU2xrWVT6gtKFxsTlq03i8r9tUsCdNJUYLautRKlZyNC7Q2OhHpTQkIWoRjI59z5IgyLXGNRalE2rraZqADbFdxGgwoEtKL+ugMoPzgtpZpBStsPde/hIipulrYwxaRp2U0BopDQEZdVNtSs8jMUlK7hpCOaQsxnTSHOEbnJVULiBEFLg3tsG2ovCpKeUnC7Nyngf+8D9+WdZ1Eq+GdkpaK7IsJ887MdVGNCJUSjGZTGZFGHVdUxQFZVm2njomNsgO0fbFSehzGKW4nRHo9D0hjs64DwcJKadVohAHJ40wKXm3z3A4IrgY6XdHiBO0DYwJs/29mxL8F4GzwPe1ZE8D/z6E8N+EEO8F/qMQ4muB54Evf6EVjcdjPvaxj7G2tIqRCUmSsbS6AsDm5iZnlhZIs5RiMubSlSvIsqaUMJoUDAZjRtWYbienmtRICTpNmIwmSKURImGwNyDUNcW4YHgwJHiHarXbHoEWkom1SCDVbbrXB8q6pg5RTqIECB+jVU5EMfuksYTG4+sGCzTW4QVs7+1x5eo11p94lCzVjCYjTKcX2UBLfIVpyXYIsez/ZAQqBLD2kDFN7RZaWcyUoIT2votSbUjtkKQf7T94jFRxJJXIkSjaS8Fp15QPEHx7CYe2qjW29cI7cLFozU4qdi9f4tlf+Bme/cgvU+2NyEVCV0o6OuXRC4/RWd9gY/0B6qRLXTu8dAwGA5Qq0NpgdIIxMk4gjkSmZoU4tyFW8acmTn3vNNw3xMp7S5LE9A3Bo5RuIxTMvIqc8ySJARG9iRwxXSQ1MaTe9lZSSlE1VazOsVHYTggI4QnBU1YjrC0ZDfbZ2bqJbSpcsHgaGlvS6eZkMuBcFX8sdYPWBqkTFhZ6oBLo9rn40DqjSfQ2ybMeB/tjur0OymgQll5/gaqKTVgnxYhcRi3RtNy6rmt6CwtsjYYMB8OoVUo1WWI4ONjDIhGqivn3bpfRwQGCQJqmaK3JsxyhcnIkZVnSeM/a+jq1dfhJiReK3vIavYUlgq0ZH2wyHE9orOPChYuUxYSiGLCwmCN0yujmHssry4wOtvF6zOs/9U0EpWl8IFWK2lrqqkabaHGQZhkiZZY6mP5VRTlrLwFRoO4DGKNm2qlumtLYBuEahHQkKpp4piqnLC3SeXomYanTYTAY0OlmMY3ZxKbDXoLwMUXSuECWpuSdFOssvda0cVJUGBNTYNa1pe9A3umQpSb2FkOQZhnVaMhkMkaIzmHj0XuJEEhMgpaxi7trYrooIAhe4EKIN/k0wweBEgHjHU1bpj8cjREqkHZygre4QByIpUK1Autpccj9jPu/nVJgPB6TJHmr17SzJrjT4gsp4+9zau8yTbP5tphl0jTUjY2mle2EBU5GpDjyePrkuGXGdDCcaqYOB0APQhOUQSqDkFVsCXKEPGmtZwL2k9Gxl8MwN4TwDPBpp7y+A3z+3axLSMnGxgYPPnARGRRV1WDShI3uBlI4ziwukH1imyxNEVIzbiyFgOevXme/LhFK8tDFC5igMEqitEJ4ifAS18D+7i51UbD5/GVuXLnGaL+Adr7lBQQpowCaQDdLmZQ1I+upoS0HiUO2IVZuKj2tCoTaB2wIVAEsEIRga/+Aqzdu8NDBHmu9DCmhdDWJs7MG2RmQGHNotfAqxO2JumjlPo4gQRoVyZ9rCEWJ0AmuqLjySx/kI+//AM996GfYvn4VUdT08pTEB+zEUY8aOisCb8GoDEzGYFJTN8OoC05zel1FmmgE0wrYNi0aaNPqt6YHQ4jf3dQ768XgviFWRkm0FLMSa9e2V5Btx/joOyVxzmNthUpTELLtx1Xh25LlROvIdkP0hKrLkkRFZ2QLFMWYK1cvMRhs0+0kSGXJjaRxgr3BhP5yP/7QqjFSKpIsRWQddNohS3PGZc2FBx9Fd3rUQrK4vEoqNVrHisVYNi2xzsb0Xr8PSKompiinpm3W2qhbWlgiWMv29RvUTYVKJf1eByWhqRsG+3uoNGuNU4mzKmIpPdrE1iw+IJSk042CYSGIYmwpKYsSbTICgsYLpMnodvsk5xSXn3uOcw+sMxoO2NkfYl2JkI6D/W1e88a3oI3GOoc0CV5CJ8+p7QBl0lnpuLeOUE/9owyZSWPErhWP1nXNpCjxHqoqDjJTM1EpwMiAFAEXoplnkmhyZXDexxlLCMhOTrbUp2xqdnb3yPMOAY+tgSCRUtHrdAGoiglSHLpMx4mWoK5q0iSZpZqNiYUNkYAVbcWpjq1D7skv4DgOHbAtxqg2khDQRiGEbL2rFNokNC5gvSKYDtJI8m4fV9U4WzIaHZBnEqNVLMWn1a4ZNdOyzfHS0TSWsiyR0jCZFPE7C2HWOmPW4ibPcc7NXptOQlDiSEXeYURgiuNl88db0CBO3uhD+6+dPQ1TwS8KlCHNcurJuNWhuhljm/US9AGhmP1+pu/dT0iSpLVlkXSzLkliGZcFTVOxsrKA8zGCnqYZDzzyKE8XT1LWE67d3OGjz12imycMdgfceO4aS70eqyvL+DpwsDvECMXO5hZbmze49ORT7F+/QTWpMbQWCiFEfamweGdprKXx8Yw3gJ9Ws4WAIaAE0eA6REm2FZpGCGoZcCFmX2SaUDvLwXhEp67opBleRusM51ofRucIxhwWN7y80cRfMZwahfWAa7mNkNGDsinBNpTb2/iiZOvpZ3j3T/8PrjzzLKG+QbG3QyjGlHUKRiG0ZH9rF7O0yu7WDipIumfPoY3GeZBCYkxCkuQIaVp9FcfLJ+PenHwB4Fgj7BeD++bOKvEIPAIRbRBUHOS88wgVndGNzgghDrjTSpWqrpAK0iRBCYlvO5wbraMmSxtwnqZuKKsapQ1nzpzl/Pl1vC24cuVZLl25hNKGtTPrIKKJZKrA4wnKsLCwhDApZRV46DWfQmdpEYcg0Ql5kqGkYjweIkSCkIHGRt3FcDigrCcsLC2TZTll7Y6FW/f391FKsLi4hHCevd0tbl6/xk0s60vLEFwU1zvHYH8vphs6PbI8Q5oUpVNCEEgVK+4WFqOhKMJjixHOeYKzFKWjaWr2hhN2t7axRcH+1iYbGxus9KPe5pnnr6KUptfrc/biY5hOF2miezS2odvJGQzHDEdj0qwTdSNZgrfxF+G8w1lH0/ZASxKD94Eky3Hek2iDa2fq05m9cw1CuKiRUBqpDaGdYQvhkUJEF/FEYYsxElhfXIgVT9YSlKJpXIxWtdWh9dhisuTIQBVn7Z1OB93ps7U3JE1TOp0OVVFRFgWTySR2L7cWJZP74qY1c1tWmoBEmzwKLqOJG94GpEhRKGobS7eTLG+jWRKEAS1ZWOtRFtFjyTZRg+acJell2Ka614f5qodRGmWymKqRE+pqhPWSLE3jpEAIhFJtU+SYslFtX7uprYxWCiGj3kkq1ZoXRtwqMz5Mw8hW4Dx7p7VziC12jgiXWz1VqhsarShUnEg6H5tDHyNvgtlvZzoIHtVh3Q9QSmKbhoODAUu9ZVZX1yibmsFoSFNPOJjEiJ/SisXlZVTewWjJuKkYFg4pHds7Bxxs7tHVhrNn1sAJsm4OVcO1K1e4cfUK1y9fQ9Q12sU4hiVmpaKVhsI6S1lYmtCm+8QR4bOPj3SQaAfKB/AKKyW1gBpJkB6pNYvra+T9LkVZMpyMUVqSGIXyDin1LLLo2+vjfrg/vRTMeh2ehI+RIiUFHs9gb5fdrRv4cszlj32MYmuL4uYWN576OMXONlV9E1tOyKUglaCCI3hPU5Vcv3KJutthrd8H4VBKY5IUKRI6eYc0zRFE42R5XGjG4S/tdH3jq5NYSYGSghBcrOizDqV11CJohRAJTWNJUoNsZ9re+2j0SPSQcb4hNQmNa6ir2BvINTaWnnpPnnURUqAEDAY3uXL5Wa5cu0RvZZFEG3Z3D+ikOb1en7Tfiv2UIclSXIDVjQtgDChBL83QFlTwBBGQKjCeHNDp5HS6GYnJ8aLmxuZVys2Kc+cuYrSZ3VCTJEFrzYc//CGW+j1Wuh12trfZH+7gveXm5cucPXee2gVUEBSTMf1ejzzPkFKS510aD2VVkSYp1jsWlxaxjWUy3KWpJ9jGIqWJA6tz9JdWqMuKkbX0ej02zp6hGu6ys7NP03jSpEO/t8TBpEKUDR/75U+gtOHRhx9E4TkoJmR5HkmR9yTtDFcpha3rGas3icG0Yl0hJSIE6qbBtO00TJLElho+wYWYomtcINSRFCrh2zGhrXqSEhPizNn5gPegsmjc6JKYGlSuij5hYdpfy7cRmTBr0KqVotvtzIh5jBTA2TNnGIxHJGnUIWl170Pt095/gajFCCFAKPHe4n0gTaLXUTGZxBt9ENi6iVG+uowaN5Oisw5BJeAbtPDUxZhi/4CRLzFKofMez3/zl9zjo311QiUZiTakqUCbFGU0rollYgf7+7OI+9S3DcBoTSfPZ61lIOqxGuep/K1+RdPb+LQEXYij4t7DnmdHZMqz2fhUbjuVWikhUdNIaIhVgaptLB28PxxWpqR+ZhdxfxEray0HBwNGYUhoAguLi9gQMKlhNNrHeAd0EAhW1tdZWltHFGPGe9u4ANZ5RoMxWQBPRT0uEV6ysrrK7rWr3Lx+nbqc4MqGjhRIESvsZkOvjDYAMQsQaAB7LNAhQXhkEKgQkG0AscEz8ZbKB0qipsgbS9rJUUYzGI/Qg70otraWkGR08xiZm2pz0zTFGHNroOVVgtOJVYwoCSWoigFPP/0UH/nALxLKETeffJJqaxM9GjG6scnk4IDN/T2W+rC2sczKYg/vakZFweXnnmaoNY91c9KNddJiBRIFwuN8Q9NYgj9N6SFu87h9RUR/Oe9ffBHHfUOsRBBY60iTBCcg0ToKO/FUro4aKaMJUtDJEiQSX9f4uiTNNCI4pJLUdRE1JFqglMBZj0kMZeUxCVjfRGuEicVO4ImH3sDEVzz33JOkGZy/sIzWDm+jH83q6jpkXVRvibqKIf7cpAgf0IlGGk3lLCrRpMLgmhKJA62RSrO++gCDvR12rz5HtnCOZGkZgWcyHKC0RDf7fOj9H2dcSLavXEKKwGOPP8rBzi5p0iHNM2zdsNRboJNmmDRFJRkOTVM1ZEkWPbCSlEQbjNaUE40HpJIUxZiyHFIUJaurZ3n4ta/j+pVnGV337G/fQPcSVi5eZKf07O6MePYjH6HSGesXdnnw0Sforp8j0QmjccHK0jKj4QgtVQxrNxalDZOipG4aslbcHpteRs1bkmiqxpGkCUUbIZn2SJOCVk+n6CT6WPqhaZoYDUPE1iRCoiJPimXiAmj9f+Is26KlpJMnSOlnWq4outSkaRR/rvU70d9rPCJJNVmv20b5HL1+jnMFTXPvmzAHAmUde7tNRzytYtWXUMT9tJYs7xGCROkYnXS2wgtweLL+KsE7Up1gy9iHzOiE1dU1hnu7jCcTHv91b+PMuccRIufahU/Hh8DK5XdjJzdZSxRaJrhiSLXzLJu72+wfxApTpROKsmJoK5ak4eLZs1wZ7rE/LukkGX/sj/5Bvv6P/gEoh9iiZH93yM2tPUyiGdvA//qlJ/nYpW0mRcGNq8+zu3mNuhxRNzXex0re8XiM1ppOJxZ9EGBxYQmd9+mtXiDrr3D90nOoJOHc+bPcvPoMw70b4GN6uNvtEkKYdbY/2rJjmiYui2LWozSEgFSatLPO+sXHyPoL2HLEsx97P81wh06esLTYRwlP8A37+/sM9vfx0qBNAjJBpD1UqCimJrZaH3M9nxZ6GBOrhI2S0aF72rbLOVwdfyfxEo9RLjm7Kg6tD+o2siSFbKMB8fhs2zf0MGU4rZhWqKyD1AlK1nHW3vrETX8vzsX+dOWRjgjTK/J+gXMxs1FOCj5y5UMIpUi7HVRq0IkiVZKqOR+/74VFylST5IvYg10CIIOiGEZ5glAK60suF8+x1bmGGwxidkRGEprIaOzsWzrriXrOsqkRQCViFMsHYv854VEEEiHo6gQdAo11VICVijIEhqGhAlQQeAdlExgVDaa21I3nYDTB1h7Rydpm3glKaGztaUpPtxNNXFUSyXloJ6HHbBnCrc/heJp5hjD7JxLG6b11+rkTbOQYOZo2NA6HfQkJgeDtzDZhWq0fXEAEH7WifuqArmm8IEkkhIp6uMWlj72Xp9//v/DjEdXugGp/gB2OoXEMRyPqoaK2MFAlS0lFp6sQyrI72KLJc7wvGextI252SRdB5AYlU5RNMXaMMTlSRElFaO1KrLAIGVpvQ3l4PqfXm4AgPagX9zu4b4jV7OYGeOfROg6Y0zJRbTS0rsB1VcceWFKSZxlSxhy0CAHhYrqNNiKhlKIsi5n4nQB51qFOe7zh9W8CGXjq8lOsLK+yvJJjEo3WkC/1o+Gb9+ACblKDTCBA0/bc8m141hhDnhq8NVRlyWQ8IiDp9BcRSLqdDntbB9iwSWoEC4vLKKHZ3tzjo5dHvO+Dz1HsD7lwdoNLV6+z765Sj7aRvWVWlgWdNKPf6WHSHJ1kpHmHqvGxgWrbBmTazFUIMdMwFUXBaDTi4GCf8WjCeNLQWeiTZD26S2tc295hubPKAxtLSJHy0Y9+kM2tGwhxBiEEu7u7LK6dYzAckebRcqLbjT2s8jyP7Wna6qKpbiyEgNGxdYDUrSZASDrdLqOhbXv2xf5XTd2QJXnUdQQZoy42VudppRFts+DY8y4OTNNKtulx1nX8jpWM3mfRl0ccGRDaipfWAbupK8rWLHap38NkKZubm+RGI2xD8O5EiPjeQAhJmudMm7ACKK/QMpLKshrTySRCBKyt8LbG2djSJ6aGHVVZIGqHRmCrAhdCjMa5Jur1pGFneweTLLK4tBpJaogu0ZmWmOqAwe4u4+EB1AcMJxU6y0AoGuuY1BVBHhc7g+DMmTO840u/mI6CsirZv77F9Zv7/NLHPs7u5nW+8Lf/dh57cIMr27scbO3N9q2sGxItUYlByqldQSvm9h4RPKPhAYumw2Q0Iu2tcO7BJ2icw2Q5y2fOMxoPEbbBNXbWG28aBZqSmkNXcWYi82l0yVqHNilSG2xQpL0lXvOGT+fKkx/ClmM8ButtvJfoLivr50jTlBubNxkeDKIHX+vLBsxE6tPfxjQCdNhs3hBs07aaafvCtQNR3EkIYjogHq8UlLOB83h7m+kyM1uGdj+EMgiTk+Ydmqpg6g97tFFv/A3FfZ8Sq2n09H6BQNDr9Km8ZLO8iVSS/tICw7JABsPAORofQEj06iq9i2dYXVpma3+X4cE+vrFo71si60mQ+MmEYjJmEUkXTektQUC3l1GUJZMqakITo0kzTajjNVsdTdaGmJ41QFdrFtIM5RyVa/AhIESCF7ECuYy1vTGgICRl47Fe4rxkNKrwBowaIJRHi5SFzjLeCmg8pa/RmcAgEIYYUVMC4dvKv2lFYPu9TsmOgEOCdPI7nR6Ci/YgYvYbCQQXm2sdHuqR5s5KAx7fVuPTrlX4WGwTtxdiVCrE4jHadYoQndasE2gE21ee5RMf+Bk+/vM/QXH1EnlQ2N0xw50RZeXodPrsO8mSN4SJZ/tqQap2OPNAj8oXqFyw+MASeTehKguK4ZhgJihT0etl6FTihcWGpu1HCL61Bmp8HdsPSYEQanYOQggEYi/ZIDxevsqIFRwKdpumIQhB4yyJjAahTVmT5RmursizHFc3sQy9rlvWGVBKI4SCxiITDUpi6zpqCYRAIhFCYdIMtSJp6praVpw9e44Hzp9hNNnB+wnWNeyNRhgpcTaQ9yTdpR5Lays01lIUBbu7u2itMWlClkdbgsQkLK+ssLK6yuDggP39/ZiOshYlJfu7V6jLAePhBlL1+Omf/Fne9ZGnMSHQFZ7JcEwQip39CWfPPcIHPn6JixvLfNZb3kQIgiCjnsMHSLShoYmzpqqazbhjqDiZRY729/fZ3NwkBFhMOiRJyurqCkMheOi1hqvPPovWgvW1VR597Ak6i6s88sbfwKe88S1MKkva7YNKaGw05JwOTFNPnuF4gjFmFq2SMrZhqcqyFV860jRjPBox7Yk2TYN6FwcTKSW9fj+Wp4/HWFfEa6Ala1OSNJ39R31WPL7pzPyoAHh3d5fRaDQTEKdZhkkzQnAEb1EioLTGaEVoHD0TU4DdrINWFvUCqUAhxHcCXwJshhDe2L62AnwP8DDwHPDlIYQ9EUfEfwR8MTABvjqE8L4X91tIY7ShfS14ECJQVQXGaBCOcrhHojw0HoRBZ12sCywsLOCJfm/FaA8VLEJ3o5+Lh06nC95iBRRVTbl5DXexRgiFdg09pbBb22xfeRaXpBTlmNLFm7fH4XwU34LAmIThcEhd1wQC62fW0VqSKHj/B97Pz7/rfXz0E5e5OdrlwbVlGB+w8+xTPLjSZetSw1K/y+LSIk89+XEWeymJydg/GMbuBGXZWny0FgFNye7OdXres3pmDdVZx1rHqHR0+uusb9TsXX8G19jWdFjNyM30vM7sCtprahpNipO4wGS0y0K1Hok7ASU9q2c2Yl5Oa6yQqDSjHI1oZGzsa8sJ2jcEPA4wxszax0yrALXWM8uF4XCI1IamUQjvqOqGhmggrLPksKdfC9vOsAWHhEkhjtEdIQ4J0dFjBTizvo6Qiq2b1xkP9mZRhimpPKqrmjZmnuK22ph7BK0VnTzHFRXdLKe/uMgDFy7w5KXn2N7ZZTIe47qPRo2mLVlbXODixhm2L2zgBnuESUmoGqR1JKKt3vO29eKO3T6EDKytLdDrd7i5WbGUpySdHJFmFEoTSo8ruG0gTwjRSiPic0cguIZGSDwBIyQ2RCuEhYWFeL3YhrIo0EJSe48nw4k6tkpqNDJolOlgRYV3lroCbMBkhlSliGBmUSoxJVItqToVRyKaHPncLcfSNnNvk9GAmHUOYJYem+r04nqkSY8EvUK0jmgjYT44hIrbt64iNx1GW7u8+6f+Jx96739n+/J1dO0YlyVXn7/B/m4dK5/VhIm19EiQwWFDYNxUDCooqKmEopck1CI2PPeTEXoFlrqL9HvL9HtLJKaDCBrnYkBgGlUzKkWpqP/13mLaXp3TMc3Z+pbf5B2v0Re11K8AjDGzQVQphYdoAmkCqmWPwgdSI9HEsLcMHiUVtu195UJAaYUUksrFyEZ0bI/Mc1pIWVclPniUUbGvneoyGu/irI9NoKWnbp3f86xLlnfIuz1sWyo9Nfzz3mPrisI3eJcxoWDS9sLrdjqknR7WWvZ3hljbQF2wfTBE+IQbW1fYurnNGx95kGsf+wUyHB996hnSzLDS0aysnefK5ee4fHWT82dv0usssXAhofFg6wa8pSqLmB5rj7NpGjqdDkrFKMd4PKYoChYXF+l1u9imoRkP8f0+i8urjLRgqdxCesfWzg6Lq4/wKW/+UlYuPMjBZAxa4oIgzzLGu3tthCzetOu6Jk0zFhcXGY1GMXxuTOsTdUDe6dBYx2g8ob+YIKViMhmxtLREnucMBgPkkRnxwTD2CpRaooWekahplGEymUS39baU/GiZ+Ox7VorNzU2efPJJ3v72t0frhyoK1IeDYSxvNzq299BR+xVCQOHpZDlKtGLjF057/CvgnwL/5shr3wj89xDCNwkhvrF9/peALwKeaP8+E/j29v8XgCCgZpXA8VgbXFMSZNVGTRqksnjnUNqQ5l0sMprt+kBZFlhXkaaGRBowHQgCSWA8OsC7wOLKOibpUJdjdmzsqzjc26TXV0hXsrLQZdcJBgexB2TTNAQEdRNtBfJEc+7MKs2kQNagpUJpw/Ubm7zx4jqL/T46eGw14sL5M7zhofOsdA2TvRt82mf+Ri5evMgP/dhPEVTKo48+ztXnn+JgfxSjYk2DbcvNhZS44JEEvC2oi31ssYfsrVHMzGYXWFx5ADs+YKe6FjsCEEXcIYAxyeGNMYTYtFUrkJLaWnJt2qKJmOpr6glewrUrTzPa3uLiQ4/T6a+iTReV5XSXHRT7DPauU3uwLv4uhVCoVvfmbBMNXMvWm03F6kzXegrVTR0LboJAaUNiktjeqY0KTNOHBBUbzvs2Qhii7kocEbOLNjVudLRDiU1rA/3+AouLKzEimHWimLdtWh8zWK2+SrR9C71FyrgOa5tZivF+gRCSqq4YDAY47+n3eqyvrbN9sM/ewQHBeZSSGGVY6SwhbMH68hIXH9jAj4fY8Rg/mVAdHMS+rx5sAVrHcyElnFntsXFxna29HWQeuHD+HMIkFD6GEHv08aMm2q4fQYBoyitiRwo3jSaG6A7vUDP/pABkacLq6ko0QW4ayrKk3+nEIhNhCcpHfz3R4IJFIjDS4FrD3wBUJRSNYUH0SdI0FktMixduR6qOhjiPpAyFng0oh4R6qvecNQI/XK2feuHN0oftdoXGuaZ9P1oOhTY6pLSiqgpGoyF1XeGGjqc/9GE++gvvZbS5Q+oEOsQWNKGORgFKKGwTUEg8DV4GkhREAl4LVJJgUoNVGic1QSlqYjeG4AzeRiuNJgSCd7NjCW27KGcVhHiM1tk4NkmJdR5N/G0K75GvNmI1vTnEATv6EQXBTKTXVDVaymgI4lxsUdNWv8jUgIyVN06ADY6qqdFSkWcZw8EQozXexi864Al4hIrRnnjRREdfjyfNFLJq2N/bgzWNbhpSYph0emNO2tL9qdmYVAaURupoVlnXFUHGWWaapjQTjbCB8f4B3c6Qp598iief+jhL/QXOL/XYOmh48NM+jWc+9HP0dMBVJXmSUBUjrl65wWOPfgpOKMrGIQO4ylKXJdY1pFk6i95Mz6FzjtFoxMLCQpyx25qlXOG8Y+faVToLPWSiSXsXKAd79Ff6ON2lMopB3YBKMUZSVDVhUmCMQYo4I5pMJqRpStb2L5w2QJ46SqdZhm0aqrqZhaHruqbT6cyiS8PhkCzPMIlqZ3aO2hY03iLbWX7TNLMoZn9hAduK7ouiiISpLGepQqUUxhguXbrE2toaU8PSXq93ODi1efOyqhgc7Md991E319hm1jj3hdIeIYT/KYR4+MTL7wA+t338r4GfJhKrdwD/JkQm+PNCiKVpC48X+k1MSfNhCsnjhcCkHYQIKOewMva0NKZNdftIPhpXYLSL9Ex3EDKnsUNiok+j0g4qTRA60NgSkBiT4b1jYkdMGsOCgl5qGA4rFpMUJyS+qtHGIIFxUdLXmtXMMKwruklKObaUteP9H32az37Lm3n4U97A59UNj77mQchyHllZoPEjXv/pn8ITj15A9lfp9Pr80oef4lKaU4wLnn/+WZT00bOsvaEr5CwKMJ3QDHe3WF15mEwFyrLCNmlMd6kEpMLT4EIDRI80JQ5bvYYpiRYxpeadja2vAHyI6Tw0TTmi3tkkdWPGg03ytQdwKFQTDVt9vk6edKlkQrV9mUQGynHBsCwwKkZj66Yh+IC1UNdVHJi8x7oC7yQEjfeKjpJQe5qqPozSylgxmAhAtPe4tsIwz3O63c5MArCfRofwxy+ea0mZw/vA4uIKOl1ma3cXLePkJ81zOmlOmuZUTcN4PGFSFOAcQis8ApUYlJBIlWD2dl/ocv0VQ9XUPH/9Bq4oCVozrmtubu8gtGF5bY29/QPqugENwQg6eZeyrEiTjDMbG9jRgI5RjHa22b52GV83CB3odhQLOsEYxfLGGiERFPsVK+dWyJY6NA58HX3isn4Pr/aOEatjdw3Res2FSKw8noZAlnUwpsPOeIgMko1zGzzwwDmMVkyKCQwknSQaZE+qCWmeoIVBuZLgQAWJaxo8AZVofICD/QPG4wlPrD/OomptVIRoo0SnG80e1V4dTR02TYNvTaCnOi0pY5VxgGPLTu+x0/sv7fuCgBOSpom/KSEDOgjAEYKjnBRcvXqZj3/so1y6fInyyjaDK9cY7FzDjXcIxYCNxSVSaVjr9xD1GBeS2EoIyIwkyyDreUwmsTiclPRW1lhef4DCBjKdcu78g2ycf4i6yXEYKidwHG3jJGaTLus1Digqz3A0Js89vV6Pxktca8WD8K304YVx3xAriOW/PgSCtdEsUwis94BEGINsGzODJEnS6DAcRLy4VUAKifcB6z0+eEZFCe1JFNYjRGBSjFAE6rJEJYba1mS9jHExIog4q01Sg/eKjYVFFlZWIe0gRYycBRFomgqtJTJ4pFeoYAjOIoVAuthix3S7WCeoygqTdECnSBO1R5PJgIWlPkmes7O7SV8t8sgTj3H1YMxjGwsUoyGTZ59mbWmBt7ztc1hfP8PDT7yWPO9G80GTMGkKkn6Py88/z9raKkpJdvd2SUdRF1Y3IFDU4zHleMh4sI9oanoLy2S9BRA1axtnWT7/OKPuTXa2LmNFyc7VX+bg4IC0t4xOOywsLsYLUcSbRJImNNbigp81PM7zDIjeUEVRgI/tJPoLOVs7OwgtCA2zqoxiUiKERApF431sORQabOOoq5oszUizmF5K0wypDHVjKasSJVWb3svRyaRdp2u1MhWbN6/zxBOPte690ZIjBIcQoBKDJ5DImLoVbbTTWktRTBiOhlibtHn3u8bZI2TpBtFpGuA8cPnIclfa116QWB1tXRLJoY19DJVBqDijVRJypQihpmnGpElCVdWkeRfwqMQjhaGpPcHGMvSqciSpoWkKjOwTZEbtG5AKoxRnzpxHj64TQmBU1ZRNtPI4GI1QSrG4uMjm1nY85z4wqarYemk0AjzjwYDN69e5sXWTh8+s8NCvewPJ+ir7ewd08xTVTdl4w5v46Ief4lyS8WmveYROkjOZFGyvn2Vre4u6LGbpOe89MjEYrdpQvaeqKoaDA7KtK6ysb5AnHYq6IhD7e3rbIKUjOIES8YZqQ5iZDQffpko8pNpEv7Y2AiSVp5wcIG3JZLCPDzUm63Hm/CN4ZQgBamtj5EEqpOqwcu4JFtcv4GyFq0smoxF1XcbjsA2uGKBlrOJVSUzxShFaI+QE7wXIhMqB8AYlddQZSoFQAmS0URAi3ieVlHRMoJNokjQlMQZj4+C2mmazqEKs0pU4FVhZ7NHpXGR5qYN/7HGWV86AVBRlxc7eAXsHg1nasqmj516eZyz0Fhluv6gWfr8iKMqST1x5noW8S9ek3Bzss1mOyXodgtYclAVlEv3Ddu2IXCfURU1/5QzrZ85SjQ9IFQy2b9JfXcDIQDEcYOuKxfaep/OE65vX6ayv0u32GUxqtM7I+4s4B12ZsrSwxf7W5Jb4dojBSpyI/lZKRNG7AxpvqRoLAh7YOMOb3/wmHjh/jrGvmQwPqG3FQprSJAl5vw8hfj+TpsBVjm6nlX0gyHtdnITrNzZ55qln2FnY5uGHH+b8+fNkWRb1hW3rnZOYTjaP/nnvj01WkyRGT5VSCPX/p+5PYjTL0jQ97DnTnf7JZjcfwyMiMyMjMqsyq7Kqeig22ewmBUgUUZBEERC0kAQBvZH24k5bbgUIEMCFQHGjYSdC1EINNbvJZk+V1VU5VcYcPrvb+I93PJMW55pFZLW6K3qqij6ApVv+bubxm93pO9/3vs9rUpc3eJxLXxtDwPo0ZYpBj6PkQPBijKWTaJMhRCREO3ING9quToikKqNuN1y//oLzzz5ldfaaeRa5f7hAeUtbb5jkEjvNCDF1cZ2LeF8jkJSFIitSDqw0OeX0gKLaJ2QLECVXy5piv0ZXh9gh0sWBbGyEQDKoWZ9wQWerLSB4df6Cs/M3zOdz7t27d1tY3nSPQ/g3rLAKIgUg6zzHFDl2sBidAIgygpKKKGQSRI87T6nUl5qDkHahg0vulrrZpegOAbZuqdCYiSEIj217/GDZ7rbIQtNtaqpJCW3PfFGiteLk3h4mL6l7SxQ6ZUrJlhAcQ98hcAgCWZajdIbJCqbzBUZIQlD0LpCZgiwTONszmU7ZXUVMJtisLjk9fsBf+6v/Fv/dP/wxH75+RS1fcXKwz5rA0zfXTDrFb/7o3+Uv/uW/lICPUYHQaK3wzuOdZXCWajqhbhr6tsG7IY09hWK92qCV4fryjOeffUxpNAezOTJ6MhUIQ027vkJVoOiossjgHHYYaFYXOB85fbSH1gZne7quxWQZ/TAgVdqh+ODHzlLqRqVOVka9azBZcnUVZUFRFhAC2/WGyd4UAK0NUYBSGd5FbO+IGjKd41ygtT0hCkJMuAE/XtCmTHov7wdMlmOdv3V4bdZrlJJMq5LBe9p+YFJV6YEpDV3viEqidQ7Wp26oFEgVmcxyvA9o4/+EcuWff8UYoxDi612BX1lCiL8B/A2ArJz+im7Me49WJo0FgBAiSmdooYiDxTqB0SUheiKeGFNgbJbl9DaJQcpM0dYds8kkZdhpRfARkxfk5ZyWdEOc5iX5oFn3PTvraJ1HjmN6Y9KI+cYgYAPsuoFyBPMKAptRJHx5dc3JwQS9t8fpdMJiuSa3Hj2rkJMpz5495/zsDXfuPOB0fsJ3Hz/gxes3vPP2O3z04R/f/E64idu40Urd/Nm3Df3yNWJ/gTIz3AimLacL5Poa51MqglIGtLqN9tDGpGBuaYhqQmkMRhsWi3l6kGSKKBRdP7BdL2k31+wtFkyP73G97ZGjO3NwHiUlQUmkzoimROYCPfXk+wGiR4RADEmfePPe5WjIuYmAQsgRhitSF15rlJSjcQFAkGdy5F19KT4O0bPxnthEiI6+TOykSyeTfX08jSdZSWkklcpAeuJsn7K8D0IleCIOM83YL4/xSCIpxF4RUVJwsDdPP/M3ZLkYuGxrLNA6m35WrchDCrjeDT2RNI7bDRZrI1pppK7o8cR8StQRvTjknbt3KQvDq+dPuLw4x2Oo7j1gtjejqyqCtfjBseuuECKjymcsTEEhJJ89O0fQfhmFdbsEQqUGgNaKiKV3SRO3G3q8lrz19kN+8zd+yPsffJeiyLm6XtN3LTYMeGfZbtYMMjKvFrTbLbazBOsJw0jbF4q5i1SzOXEQnL++4tnPvuCLL77gnXfe4cGDB0ynU/b29m7RMjeF+Y1swrkbuK28lVTcFFRFWSYH+lihS5Eg1ASSsy8EfPQjW0skacVoELuJ2QownsdxLGIcJs8RKqIM/Pr01zGZ4perNdef/BI/dBzeOWF/MUO5gevtirb2BCK7Nrm1hYzkUqBCYG82R1fQxoG8mLLdDLjzLXN9RDnNGHrB5dWW2K/RWmOtw9oUa5dlCdfT9j1E+Pj5C4Z+4ItnT7i8uuLO6R0GWSTdfYx47wg+4MPXQ498YworJ0DlGSozdG7ASIVWaQdNIH2eJQtEHJ1L9ubBo1WyokvQ0tCPI6nBDXg7IKzj6vyKvZMDpFZkZU7jHU5GMiU4v7wgzzUh9phOsH+4TxCS5XqLzIpkP5XpwWFHt1GR39wUBUFI5Gif9qQsqSJLQMcYLQRPX2+Z5AXXISJcT7e9ojI5P/jN32D+/JTLs3M+e3PFejdw9+13+Gv/zr/FD37zh19xCEVaG2Ho2KwuUSLQNls8aQftrKWpdwx9h8nKVCRow3q7I6tmlLlh8J7VegVKYDLD0f4eZSlxStNsBRFHFI7ryzew7WgGz8O3v0WR5wyDJctz4EsHpx8dJF3Xkef5bXRHFCDG8UTftrx++YpJWd0WCEop2rZFGc3gHblJzCszOi3tMMDo+rtZRmu0ntD3/W3bWUqZHKNa44Pn2fNnnJ6ejoW3wgWHzAzBRmwf2TQ9xaSiHzyFNilAVYIUiigEAYcQNyLLf+51djPiE0LcBW62+C+Bh1/5ugfja//EijH+Z8B/BjDZO4lfbeEn3c2oYfAWGQaidalLEwUxSPIiT8Hcrsf1NTJA39SAQwjHxlryrELogmB7bOiROAoj6EIPjE5T27G7vOD8/ILLzqEnc7a7XeKJZTnr9RYxjhx0ptk2LWY2RRuFVLDdrHnz5g3n5xd86+37bNZLTk7vYI4MNAOiLNBVxf/kf/wf8Xf+zt9ifXUBg+f+wYz3337A63nF8vqS3Wh4+BO/o6+cfxaGBmLEC82298ymBfv3vs389C187IGIVhl5USWdhPjSBBGlxqvJra7zZmfqSTqMaQgcP0xaKEnSnNzbFynOJKTx9jBYumHA+cRsiyExb0AkBJvQKC3Q+T5Cqi8RIACjmi/GOIJxI4GAjJ4YvqqdAuPT5lN95Xu9zIhK3OpdokgxX9swTU6uGPHOE+ua/arjYFHR1nVyOBUTgre4mKYCNoIlbcqSFiaJjCd5SZZJvhl5BGlFIdjYnnYYmOiMqqrIZEm923K9XNLYZFiSIdAMnlXdsbeYM5tUqEyh5QzbN+xocE4Q1y1nteW6Cwz1jibf8nh+wPTu28gY6NYbhJgSBo8xZUqeiGD7kI7gWFOlc0shY0yOQW/BO4JzeFIgfFlkPPjW23z/B7/Ou28/Jsbkep1NKnauYblZsbq6JBQVxRCZFwu2qy0iRHCe3a5HCImLcH225ODkDtIqtNP8+Mc/Jssy3nrrLR4/fsz+/j4nJyecnp5ijLm9T98gTG5E85A0zlLK24xLrRM4Oo5aq65rCMGPGwPJ0Hc0dUNZVXhncZKUaysFWZ7wBs5a7DgKFEpiZIaUkd6RYtKqirv3HzK89x368zcMzY7Nbsc80+gwUE0zsswymc/Z1gPnV1uWm0hlUrGmFGRZTt176tZTzRfk5REuTEDMKMoDmgHW10vquma9XuGco6oqEJK+G9hsSwD+/o//MW3bcb3ZJY1cPqfnNW3bIKWiKPJ/LhPHN6awkjKBI613aG0IvUVogxssRZYnuCSMXY5k1dcyWZNFpvAidR8CIuXISQEE6qZGOkvXr1kuAzLL2J/N6fxANBJZZEznc9p2l3Q2TeD03ilvzs4oJlMyVeFiEs0Vec50UiHFAUNf412fRG5aoxUIEei6Gt/UrDcbZpNF0nUoRW8915dLpFS4YUcUWyaLgnuZQbsZWb9ks27ZP93jh7/xfR4+voMPAyFG2rYhK0uUhMzkLO7f4+OPfoGInn4YQEicHVBS4IaOPC/IjCZ6eOvxu7ihY7te4YeB9fqas08+w2jJbrfj0be+xaTMCTGNQZzr0QKePfuCbL2jmi24c3p33OVrttvtrYj8RsBfjLDOMJoIoozjcUz0e2MShqIoCqy1txyrPC9wMWlm1OgAM8akDELx5S4waa0S0HC32zGbzW7/ezEExCi0ffP6Db/xwx8ikTgXiELSDUl8u6t7eg+2HRJ0plS4ticrDdPpFO88ziXGyq8IO7/++i+B/wXwn45//j+/8vr/VgjxfyOJ1tdfR18lZDJIpGtjxAUgU1ho8AzNGul7XLCIomQ2P8R7UmakHdAxoGVGphOVv+93mHIPoQqEzhPSJK8IfcPy8gUUJYKIEBEdB4QbKKuKUnucUAxDil/RWTHqHkc3ZgyUszl12xJioCoL2nbg4uKcjz/6iH/rd36Tg8URRV7REYiZQJg03n3+4gW9Dfzmr/+AZr2hfnHG+4/vM9/fp+s71us1u11q0Qsh0vU5HhtjDFVZMKlKhNQcnz7g6NF7GC3xFEQREMIlDZU0iJg8dGHMJBNjIaPHWBhJGGkTSZwcA0k1G1PYsRNpjCxjQGtJjCTBrB/IekPftgwjL8uhIQZESPgPlEIGEOqmk3QjAr4J/b6hJCVOVRi/5kafqI1O79sD8QYKKkAkI80NHkSoJGaO3qW/Dz6Jpp3j9bJmt9sQui1lUVKWE3wYcAHskOQNqUclENGDH5AEcp0Rhv6fJybtX/sSMpmTmrqHakJRTZBSsal39P2AMdmt+H86mUFpmFQ5ZVng3cDF9TnPn33B+flLhq5h6BucG+i6lqGLXA8StX/Ee9/9NqWSRM7xNt0vlNBkRYV3jrJKD+XxVCLGxDxCJB1YtCk/0ADTIuf06IjF0QEPv/MOh8cHlJMSXAJbV1XBzE04vzzj6ZMnPL57D6OmvPzkCX4YmE8m2K7j+vKKajLFB3hzccVkts/p3fscTvfo+54XL15wfX3N8+fPOTk54eDggA8++IAf/ehH7O/v45xjt9uxXC7x3nN4eHjbFS/G++6N4/qGt5ZMF56m3tD1feqcCkEIls2muYXgTqqKcjJBZoZIMou5IeUd5rkmNwXD0OF8wJicLM+4c3qPe3/lr3GnWrC8rvnZ7/8+RwtFJQ298ygdKSpBMZsQtEVmPVmXg0matUmVdIZbCvbvPKY8fMT1INldNVx0b9iGM5ZOsVpfU9e7MYczXT8hRLbFbxEj/Pj5H+GsB5OTFQVNZ+k//JirqyuMMezt7TGdTZlOp1/rHP3GFFYaATaxqJwfqEyGUZoyz0ctRERoMUJDASkQMgXoWpHmxdY5kBIPIGBXbylzw+r6CtvvkEVKfLpcDqAkVkRUzNjudhwd73N2vqOazLm4vGRaZEjhWV++wQZBNZ0hmQEZRE/fDzjbE/1AUWT4ITk1iqpiUlbJiu4HRFZh8jnV/h1817JdXpAVEza7NGuOAVS/Zqoti5MFeVZw73Sf+aLg3v1TTDFjCJ7BdhTSMSlyBjvw4OEjnjx5AgJWy2uMVsynFa2SXF1csDg4IjhHZz1aZ9x5+BgXQLx5yfD6JYUWbHcdn376OYU2GKFACl5fnLNtIovZAXcePmA6qSiKnL4Xt6L4fOxcfbXA+uqOX+rUSRr6gflsdqsLubFyN02TOg4hILQkhogcu24xxnTxjl2F2xFQ9LdidmMMq9WK/SyDcUy0a3aE4NNuBEnfd7gIrXUEPxBValNbZxFEBm+RWtP0FmTiDu2anoi+dYz805YQ4v9KEqofCSFeAP97UkH1/xBC/K+Bp8B/PH75/5uEWviUhFv4X329K0IixnidG+eXDJbgHVEa1OQA7yy51sjc4GOAMKCkZ/ADQ4gIOWCHxKyaz45Q5QxQSJVRFhJnBSoTbHe70akkEFIjjKLMIv1kn+/sSV5fXfLKGOaTnF3Xo4sqPUCcRTrogqR1ESU0OeCkwLYDr1+fcbVa894H3003apHjjEfFyPLZU14//wThLMJZLl694Cc//jGfvHjNtTecLTd8+1tv8+FHHzLYAR8sbddDTMHFUgSMFlQHp/Sd4yBARBKEQsbEKUPcsJ4cgkgMctQeJQjmzUYMwMc45pTGMddNEKROAmBGZ3KagDCMhZkAlMwwo8A4ekFdN7jQIYNFhBG6qTOkcSilYQRy+hARIYB3Yzk1ioyluHXoyRs3o9eEoDEqdRHUWHjdOB5vTBdej5o0m+Cf4WaMESI2Rq7qDtu2ZE2LqQrAMNjAatvQ2kAQadQsg8N2Nd47rBNUk8modf2GrAhDZ4mDQ5UyCbp7y3a5hhgpdYo3kwgmeUrBWG9WXJyfsVxe8OzZE87P3hD8wGBTmkFZ5PSDp+8iajewbCytFwx2oAuRcm+Oz/qkzSsyROup9qoUozZGC0VIyRCQnlckqO9sUnH3+Ji3Hj2knFfkVcF2t+XwYJ/96YS23rEbGqSBIs95/ewZp3v7PP3oY87fnHG4t+Cdtx4ydC2fffwJ8/mCiOL8cknbB1bvfIvT+/eJMbLdbm/d4DfE/1/+8pc8evSI999//5ZBuFqtklB9dJNDSgDQeQ7DcJtnedOhMZmgmmS40NG2CYVSlAWgRh1SMsy4bQcyI2R7KaImJlSK8xFl5AiNLsZ7u0DJjPJkytvvS37w2+e8frFkVe8YVGB1NTCfRSrbYGRONZXM9hbM6gVBDchpxGUZ2XRKqWa0qiCKHGtyXl2vOPvsKZvO4lVGCA6lkxW26xqapsW5yPD4+4Dk+vwCKTVOSrq+x9qBumnYbrZJXlCVHB0ecnh49LVO0T+1sPqzYPakiyXi7DDG1mRkSqOloGtbpBDkJsOIBL7USqGzFKzc9S1mOklgM5Lluelq+qGh62qqakFUkU23w8xnFFmRbkQC+qbFhTRO0cZgspzL6yum04r1ZUuMkWo6A2VYXtUM4YLF3l5KKg+WoWsoC4MVAWcHTJY4WwOQZRVEj/cORc7J3QfM84zzvMD2LdmsxSPZrLcc3SlQWYn3keM7pxyenLJdb/jl+o/ZP77LdG+OMoroB5SYMqkm5GVJXlU8ffKE1eqKoR+IVdoFLF+dMZnt4Zy9ZQwpU9A6zxAlWTnF9i0qKpQwSJlTVsklOB8s2/aKerfl6OiQg/19uq4lyzLapr3dIZssI44jGeBXLkLrLEZn1G3D/nyB63uKPMe6AUUkNwoIye0lDCYTeGtRRWqxT4pUuA1DAutlWUb0yQlVFAVKSubzeep8yaSvevbsGScnd9L7HGwSulcTrB0wSuKwxGiTpkhn9LbDt4Esz6mbNmEdBFjnbsOK/+mnavyf/VP+6q////naCPxvvtY18JUlo0e5BoQmSkWIAhnjbdYiQJaXCGPohpvoIEn0ghgUWaYYbMtkOqFrLUqngjwGibceowVSCdrOInROluXj8YukePgMoSV5BosqY9UNYB2F1tS9p+u6pLWIkfV2g3eWqsjHsZkgzzO0UtT1LumLnCWs1/SdpapK/vHf/ttcv3nO2+98h+76kieffMLV+RVnZ1c8W+2wSOzQUZUlzlnavk9YiZvzT6lUtAfLtMrZbZZomSH0BClGgCGSEGUCJ+IJJD1FkAGURgmB/ooYVYxFzU24i0Qk4wVihDkG/ODw0eGdu9VJFVmONJGi0vQ20u9qguuItkfIMbvRZFRliYie4C3NbkOw7gZOdnvtSJmgijfgYankbXe3yJPbtqsb5rMZXspx5J46miF7CxCsV1f4EJNTmiTUj27A2x4ZAtt6y6XQSOW5Wi7Z1C1BGFSWI5VBxkC7XSWx8f4RtpsnXMw3ZMUQsLsW4yO5UORIcJHDyTyZD3xAS4UWkus3b3j+4jmvXr1ktVqlQqlrcW64Da3XOsf5SF13OGfYNQPPX50jtMQIz+O7J8wmJbLIUqMwN8Re8eg7j3jy/Irr5dU48hK34mYX08N1bzHj4b373L1znDpGeKpJKq6yzDD0PVeXl4hcMzmYM51U9G3Hm5evuH52zstnz3n/vW8zUw4tQQ47rl+taHvHxeWK86sN12dvePe976K1TsaS83MuLi6oqurWIf38+XO+9a1v3U4Zjo8Tp+2rEo66rplnGXJ8/auQ6TJrmUxKjg7mbDaRpm2xvWc6nf6K/tG5gbbrqHc9ZVlSliXaKBBfJgIURZWilASEMOC84vitH/Af/Een7M/v8/f+5v+L5ZtPENmKyUJSTacoA0PoKYqMzbqj6xsinmoqyCcVPivZDY7KCdRsD+kV7WbHutni6jXOJakDwjMMHW3bEYLA37OAYHd5TZaVrJoVq82Kvu/GzU26toYtdKsLzl/kX+sc/Todq/+cf+3MHuhCRyYzyjxHCUm0FjcSilN1K2nblmIyGUOOE29ECMluucLbgbbZoQpNHwacGEAHhIGQCZhlWOkos8j59Tl78zl9s2Y63cOGwNNnz7lzss/HH71k6C2ZMkwLjSagc0VVzpAq3fCCj9T1jrLImc8WSW+SZegsQ2qDFJooM6RK+oroBvraI4sph/ffpW12iNWS7WaNVA5nGx7cu8v9hw8RWU413Sf4yMHJCZ33ZNUkCQmVxkWBdBEhI/NpwYNHJ+zW57SrLevrC+q+Y71ekWcXzBdzvHN0bcdq+QUmMwgRmc2m2CxxU+pmoBEWmRcUMqNa7HMqczwZy8sLpM6ZHexhhxQIfQP2HLr0ee/6WzHkjbskyxLsTkvF0PcMXRJ4TqqcaDtc15GVU3ovyJTB9zuUiASlESpnWzeURYEa4Y3b3Y6qyBEIMmOSc7AoqHdbjAI3dJy9ec13v/tdopBsuhY9KVBGIIYAg0cTqHKNlDrlEgqBEgrhU2ctSoHOTKKYfwN258E7muUbqvkBOq/w6ESXd+GWh3RDRVakeAzrAlU1x/uItU3Kr9MGkytMVhCiI88KJDLlR9om7a5V6g4arSCkwO75yT3ErqfyO0K/5f7BLI27REbd7lKHZXTbbZuGKs+QWhOFwEjBYprCn189f8bbbz/CGI0NA1lpmB8u+LXf+D6f/3Hg2Uc/Rz18jA6e1fWKi6sNk8k00fFD6uR03ZB4ViSCfmEyMmNQCKrcEHxLbkIandscZMJ3SCQSlY5z0t2Ox3YEIUrBGMaA4CsOdOQYcJzo0Z5RcoAE54lWEJwgCAgukilJ8BKBwegc1z0H2yLCAAh6F1BZSWs3DM2Obrekb3apRyjF2K0KX7KnRq5QGnMn5kxmcrTSeGsZup7ZdEYfQRqTQJghEOffA+DZz/4uUST2V0AQAqmYsD15ZiizjGb1jBgt/TDQdD1SGVSWAZroA35IrkzXrhgWe9+owG4pJJUpyJRgf7LgYLZPJFI3DddX11y+OaN9t0FE+P2/+/e4WJ0n3eboENeCRD9vW6RMnZrBWvo2GWa6rufs7JxdveR4f8qPfvA+EyXIpCAS8ErgO8Hpozt88MEH/OIXP+f86nI8fl+eS0oI5os5B4eHVJMJRVVhgyXPc6bTKSbPoOsZhoHptEDppCsO3vPZ55/Sna3Zbba0myUf/fyKwqS/z7Kcbrvk/OULrjcdfdOw3qzoZxOm0ykvX72iaTvsp5/Sti1lWbLdbtlsNhwcHHB+fk5RFOzt7d1OHJqmoW3bdB8Y9VY3xZX3nrreEcb3rnXitPV9x3ptbzWKN6PtwYvUMReRPM8oy2IUy/vbDY/3acyoJKT/yZjt3+N3//3/kDzCP/iv/0sMDUWV0BJGydG4ETl58JDz5WtC5Sn2FvgsY+c9m23Ddv05Z5uP6YRg2zdsLi/ImiHpvEQgRodzN9BwTQzpBlCv1vjc0fUb8AOZGtERwo8axogfAv3orP7T1p9aWP1ZMXuMVOSZwQ0BZTTa6JGAm8ZKUkoqk6NQdM6CljgCdZN2xNvVKulPWkE2LbFRoPMS7z1GSTIlMDpyfX3JDRtHiMh8XnG1XfPi9QvuP7iDVIrtasPp6SlN19MPA4WNxLrHjIGlPjjK6YRyOkEagzaaoqxQJk8OH2VAmiRiVQplMrTORsmGRBvPdArGFGTZjna3Zn9vRoiR1fUSkU04Ojlltr+PsQ6PoKomFMaklqrSeN9T1yuef/wRn//8Z1y+PmMxn5MVOc3qks83G+7fv0+WJ1dY1zWcvVhx78E96qFls9uQFzmz6Qxi5PzqmjtSoVHMcoOQmuXqnI+vr7nz1mPuPXyLGAPOWbI8Q0mZnIJ5TlmWSQMztpSttXRNTaY0IiTruwuBza4muJ5qOsNFybSqiM7iYsDFCD7Fs3RNi4wikbfbLhXSzmKMvuVgJdBiQIQbXZFlsVgk3IbzCCnJco2LjqosGZwlU4phsBiT3Kbb9ZYYXQr5VipBS0X5NS6bf/0rywzHJydk1XQk3yerrx/ZSF9G+0D0PbnJyHWJAOaLBW2nyQrFdrNBZyVt3yOy1N2zdsC5AUHE9R0emJbp55ZSEpXhum2pigrV7DiY5Dhn6Yxm2VlyKWkFSGUSEy5GumFg/2CPrmnQWnNyfETfNDibDCRSC/R+AlUKIve/+x0+e/oxeWHIjGK1uuby6pKiLMmqgn65QsaUSO+TrIxMSRSCyWRCkaWkAxsEMgY26wvi4MkmAY/F24RdEOP1n+dTotC3wFGlVQI4jpuzOPKGYoRCKLTvOZhXZMWEFxdXKF0gpUp5jMNAcJ7B2WTAiOBtKgQzBCoMrFcXPDg9ZrHYox0cgwupQzbZRxzO6NuaercdNYdJC3ajLXTjqNG55ERqmoau3hC9Tx8xshoabPDcTK1jjEyGDmJk8/RnaQxIipMSSuJF+rkHKdkBx8fHWBtp6masAhS+gzCysjKZxp994/Gu+RIE+Q1YGYL3FgcpOeP6ipdXV1hruby84PLyks16g7r370KMLM9egEiyhSIrCCHSj0BW5wJFXiBlRp4ZZjOJMZr53pw7d0+Zz2d8593HHMxPOZrNwTkG17PuGmKIZGbOD3/wLlXu+Vt/5++n802kEWwG3Ds84AfvfouqKCiNYW++x7p3LIp9piLDLXfMZhXVpEJLTXO5xS53zLqM5dM1nK85UIY3P3tCP7RoI3j30X2O796jKPdoJzv67TlaSJZXVwybjqu+Zmt9Kqa3PerNFW/d61DA9dUZIrb87B//XbQ0/KW//FeYFyeYKJgVhn6wNNs1RVlSlBPyLHWd8zzHbRX1dsf6YsdsXjIr98CtUxqHliOwVqacTieZiymlqJCtAOFRZSKZR6kYPGlzm2XIMscYUrqKcEzvz/kLv/fvUR4b/tu/mXPx/GNUiOgBpnpC3wx8Pp3w4Nf/fd6sVohqj50b+MUXn6Eqx/wI2s01V1c7BgvNbkvN2BAIYxdYihErESm9HR3Ub/CionUtg7dJRnCjbR1F+0nz+PXO0X9RjdW/NLPnq9by8vgtjFR465AitRV9TAGNUQpcTDyaUirwLrXkA1jhGXzPMGzo+w3BByZ7c5wfUnagSA95M1bhiIBUEWMqtDYMQ0vbrInBMZ3PKGdTut4inCMEj/WBzkZMAV29xUiNziTlNCfPk3tGSIU2OYgEIdTaJHaI0skFNOblCSXQKkNpg3cOZx3aew6ODmkLjZZwtbzm0bvf5uHb36IbHOvNmul8nojzWpNrhRBJGN62NS+fPqG7XvHw9JQsQt92bK+uaZZLVl1PvV1TlBOENhSm4Or8KhHMRWR+MKfpeuo2pV1NihKjMw7mC+bTiu16naCQTYut12xWV+wfnIyt4xSYrbSkrmvyPL91+4WQokeMgn53TXCBvJpTFDmb7Zqh2aEk2CAoq5K6XuNCYDLfw7nA0HaUeU6RZSMZW1JVM0Kwt3Z1SDdKKVK0wqtXrzg6OkqROG1PCCRCbxR4H2nbHh89RWkwJmewnrpORaFzDh8CRZZRZvmt0/DPewlt2L9zj3YsGpMo3IFKoucbV6a3DTJ2dF07uiMT52e+2CMEizIZQkjKcoJjuNWTTKsc4sB2eYZznna3TpZspejGzLLYWUofmGUaP8lptSFIx3Vrk/5HCqx1SJG0PE3bMYxC20cPHzA013RtTQiOzMxSNzdqVldXiKHn+x/8gP/PLz9FlSWHd46ZLSqGATrbs5hNmCwOaAOpOJCK3Ggyo5mUJXvzOdoYVD5j0+7o6wa73jJbDMka7Xp260uuzl6QqcjB0R10MWG73RJC2kmHkATrNziHmxHJybfe4t7BlHfuH/Hy7JInn33MdRvROp3nbdehpEwZhWXBNni0VIQRv3D35Jjf+fX3uXO4R1GUdC4wdPZXHFfWOXb1lnq3ZblaEUPqJE+mU5q2ZT2GAUOk73qGds2rly+5urxEjtopJdMmLzGIvgQ6Uu0xqSYIlYLstdJMdAplL8uKsprwne+8x3K54m/+zb+ZhPbCpu83BmSk6TqEEOiiGgX2X89m/mexvHO8evacs7MzNpvNbYd5u93eGmP2xtHq4GyCTYeIybIRZqnHD0VVVWNahaLqexaLGXuHC/SIiwHJYrFPbnKQFqU1QRucksgsozqKGC15+vQJX3zxgsEGjIT92YRHD+9zsL+gyguMkqyWSxof6asC2bcQLCpamvUKLRaEwbF89YbLFy+pL5dMh4BQAjsacAbr2S63bIsNLkSkjxSZQWaapukSMNq6FAhN0uDtdjVffP6Ey8tLFosFWksWixl//PMP8T7yox/9iPc++D77kwl929MNw8il8vRdOq+EFGiVU5VgtEratK4nzzKCT5rVEHxCMghJkRWU+R6eQFPXhOjJ8qSpilJSGoPygJR0vaftLYVWZFUaa5fmgPf/wm8RTOAf/G3D5cunWJ/4ho3bUUdFjcHMjmm8ZNd7lpueqxevEE+e0LSWGHKImrYfkMbjxg2JEHLUAo+w4fE88cHTD93t6D3R4tNUIIzfC9yOBv+09S8tXv8XZfZ81Vo+u/tujNajsxytsyTEjTejGXcLudt1W/JM0/YtLjqarsH6nrZf40La+ZWxxDsHtsN3HS43eBST/WNk7Aj05GUFEaazCevVFUpo7j+8lwBug6cU6SZ7cHTCYD2zqmJ1foGYzfDWom2g3UXKQrCxG/KpQ5ucajojyoCWKQOpKKsR8CeJI2Sz61uEhmpWIGY5Q2/p2i3XyysePX7MnXt3GVxP3+04P7/koz9eUxQlk2rKvXsP2T89RWWaqZzguo79g0N0dooqK7arNQfdQFQ5vHrD1eWKdriitQ4lNcIHwvKKalrRtg3X11dM5wtmsxnH3z7BRbjYbFhtHaUC125xdcfKtgxuQJucvs+YTqdJYyMS/fmm4Lk5Vv3QUyqBtS3RBZpaoHKNZgDp2a0uQGX4KiPXEUJK6YrekyuJzhRKBy4vL5KWqt+BVOx2NVrrW53ATWv49evXvPvuuzfnFXlRIo1hGCyTakqz2RJFxFlP0/XkRYX3fRISG01VFFR5ie8Hgr/Jj/rzXc4FruoETZVSE2xkcB47pK5glhkGa1NmH+n3MHQD1o7JBd1A0zZjcHaOCCmMOQRLO3R439P3DcHZEbrr8W7ADYFnTz+myhR3ZnNiljO4PnWS9uaEizdsW00vBU30mBApTMkwWAbrsARW9Y6/9w/+Pn/t3/5NuqEmiIDJcnAJpZFlOd55quKA7/3mj6iXG/LJHiqbsF5dEtEURcXpnTssdzWT3LDrI1ZKDmcVVaawrqdxA7V7yabpiKaiWBS0TcPQLkFJgs4opgtis6ReX5B1O3ItSfPfHTJCWUwgBg739vjgu9/h9PQEYwRH+wumE0NpDmm3pzx7ecn9h48YrOMP/+gnfPb5F+SZ5PHju1xerdj2jrpOD6XJNOf73/4PmMz28B6U8uSFSEUQkSGkTWNZTZhMKo6Oj/ExIIRKoNKsoJhMMUpR5MnAEAfLz8qfU7c/IYbUNdMqp20bvLNIJZFKIZXme7/2W9x/8ABjMpTWyc2cp89v9VgxInXBbD5js7lOrbogUD5CCPTOpocPbuQ0fRO2G2l1XcfTp0+p65phGLjJONztdkQixps0Rh3hjioqhEr3qpvC9quGm5ui2lrLdrtl125TYLnWCAFVVTHLS4amwXceIZPWMyrIveX+w/v8pd/9C+ztzbm+XpFJxeHenLcfPiTLNFlhMFLx+uo18/0j9qYlu6tL6s01wi64eP4MV++T6Yzl65fUl5eUjHmCrgeZaOHWwXK5wvU9PkR2Q2L7ya6nbRqcTs7x1FhJc0kfPdera548fcqu3mGdou8tXzx5ympdI5Rm//CIu/fu4YMfHe6psGq7PnVwi4Ku79BGsZgsqNsNu3qJMQJlxuI+pKJFK4MwEamTCzfDEINnV+9AK7Iyacs04ILHuZ4sU8gshccHN9DUPWIx4zu//duYecWbF8948fQLdqsl7WrJ5qzj46fPmCwOudzWWAGojIOjE86uLhgGmxA6HozUSfs85o3aPiQhuxolAmO2oRYZcZQ8Bj9qNFVyeQ6dRUqVJhtfE+f2L1pY/Usze/7kij4JYd0YACqVpO97TJZyz5xPbqd6uyQEz3azSuMMGfFiYBh6jCno+xYnIG6vGazDuRKRFcwPjshlx/X5G8wY9iuISCUo85zDo0O896zWayb7x0ymUwKCxf4hl69eEoMnANvtBh8VZV/Sbz3F/IAhQFYGUIaZKTBZTpEZhFS3dG/vemLsuLw8T+Lg4DFa42zg1evX9G3N7/zu79JZy7PPP8cNNU3dIGIiL9fLjp+fX3Dn8dvce/st1pfnSY82mdIMA7OjE+aHx3TrHeeXG2aTOet1x+XFS1obkog1WpQU7DlHsxFMypJCGaIP/PjHP+b+wwc8eustosi4vL6ANpG2L15d0frA4ugek+mU7XbDfD6nLCva3tP1PUPfp2RwKZlMKjLXUAeLGywndx7S2ZZCBWIuaLxHKI/rd1RlQdckMbSMEGzH2WrFZFJRFIrr6zfM5nOEkCmktGt569FbibgePYPrGYaeqipx3tE0LaaqQCqET2NBqdL5NLjkzEouO4lzHp1pfAi0bfMrNvg/7xVioBl1bBGBHfwtCDGGwOA8g3Ojg1JilAYPbbdDSsl2fUUIKVMz7DpAIKUZo32gaRqqqsTk8zGixqdRlxBM9k/Jpwta79llkiD2KXVEWLi3bxHdgFCes7ohFhVElUx4FmZFhYiOMHQoAqcnRziboqTCkNIOjApEEr3/wf2HtNOGNy/XeC8wJsf2npODQ4yU3Dk+Ym9/QXd5jhKexaSkyBTbvud8vWNvEYhRj+kdCQDqe4vKcrTOmc32ubx+w52DI7793re5vl5ycnTMo0dvcffuHfb350kHOPQMg6Vpal69uuSLL55xcLDPvXv3efDgMbP5HS6uVihl+K0f/Tb7+0dcnr3k3YeP0bzgiyfPwQ9YF+nUQIiJ6BxDQARH8MmIEYMnCAHeIkLAjIL5RFmXGK3TSJCYdGDRo4VE5SV3Tu5QVAV26BMYIYpUpMZ04xdCkJmM3/jBDynL8ra7C1925W5DloVgsVjw4MEDPv5omcbrQgMJG67EyIHzN6jHb85yzlE3zW3n9kYHVBTFLeDyBqSaFzllkcbci8UCSOc+MJLAh1sETHLK9fS+p6wqTk5OUrD4GA1jrU2FOw6VKarJBLotJmi+/d47HOzPGdokYzAyja23m21yHhoDwlNlEvqafrOkubog9y3Xz57QXL5hVk3ZvXlFZnsyZSgyw6brQQjcmOdYt4623d4636NSeNsTnEPqSJ4ZRPclF00JhRCKTz/7nB//wR/ywx98j+Vmy8effsqkOgMpef/9Dzi5c5em62/NSDEGYnBJXxQ9kUDXD4lzhkcqQHqkinjrCcElJ60IRHJcGNBakmeaZmjY7RpQkjJOUliKB4QhVxIpIm29oZeSTEkGAjLPycwBb//wt3j0vV/nztMnfP7JJ9Qffkhz9iGXZxfcNROu6g4vBBZN3/dYF2jbjugGymxKWc0QQo1pBWasMUZnsUz3C4FAj/dG4QIMyeWpMkH0AjqPjS6ZRb7ms+FftLD6V8rsgaRzwCdHkjEGFZOmLXqbtAYo6t0KbEeI4K1lvb1gMtUYk9GKgCwz2qEFNxCalAWnjUaJMglf8znabChiR9euqIeWYjajKHKEHXj+7A1ZNuE7P/gRTlsMlldPf0m0A8U04+TuEdtPN/ihwOuK6fEhRTllPttnsr8AJcm0RqHwUWNkcjHeONqsdxRZRrfrsE3N4Aea7ZbV2SsevPVtnjx9TT4pOTo6Zegs1j6nqTcs8gwZNTJruXj1GdvrVzjrkxZiMqOoKkQLm/WWMi94ebXii+eXXFxc46ykUBkhCmwo8W5gs+5RMtJ3DqUNfd9AGPj05z9l/fI177zzDkfHB2x8YFOv6ZoN9fPPOb77mOOD72FMjlaG4DxRGpp6gwyO+d4e27ZHqozY7CgzgZkc0zvLfqEZmiuUEazbFcfH97FuoHOSbrfjwd0J9fqSp08/QU2O8QQ2uw02BoiOsihRsUVrRb25RucTtJK8ev2MSaXo7Y4gIu3Q08gVs2xOiA4LeBWQUqGNoW97pIQqy1jXW0IYqIoMnRk6l1xct8ntf44rMXFSEfRVUKQQ6TEnhEipBNaRZVnaZChNnlc4H8jKCdPZbIyhuBmhirSbk4bDkyLt6qQhSomIHmUKpBAcPPwAKw0mRqJKo/DZrCD3kfKqwGiwZ2l0uLYOG2UCVQLCRw6nBTMVUN7huoGnH3/GNJtg1IAKBnyAvkVHRy4ztv0WYzL29/bppEK4yKPjO8hJxS56oogUmWJmFLOqBCHJMGgz4ENEZ5r94yOskAQcKjjoBlTW8N1vPeAH/4P/Ob/xg/cRQvLRR58zWNjbO6Yqc6YZiGrCZidY77bYCNnsiE1/xcdPz3nyeslkMgVhWK137OoGqTN0tcfDb+0TpOTg6AGLw1MePLiPzhQ6K5lN9oheoIVHCk9UaYPVD5bBepAqmXTUGDmiAwkGf5O7FpFEpFAYrciUYm9/htYSZ0Fpg7MOk2V0PSNbKNnCZ7PZbRF1Iyi+GWXcwh+FIEbFg/v3+eyzX6awZRJ8M6IQKgeXrj0RxxPyG7JCCKzWq1/JCRXj/TXXOcro22vGOc/FxQUAbdve5pTeiLNns9mtsy2EQD8M6Ewxn88ZhoHnz58zWEvbtmk0K0QygFQGjKSNPaGr8XiqSc7RwRwRAr63rJcrUupaZHADk6rA9Ruefvqa0OyIbUNtt8xEQDuLbhtOypzsboltevxgGPoGGwP9VzqGN+VygmJHooiUGnxmUHkqHm6/aJTTv379hj/8yU/4znvvstnuWOwdcX5+yWbbcLlcs9luk37V2qQF1JIyMwmfEz3VpKDtGnb1FhjQmcAYiXU9QjqETC7ZED2BjEBLN6SCrB/a1HVThuAsInhEECgtUDpD+oAKKqEsQkBHPY61FW3fEFGc3H+MyCquG0v88CkXz1/Tvzln2wzs2pbONSgdIUps7widQzhFDIogNc45JpMEA86UJjiPGzwmpvuW7SMg0YzvQwhKVeGjx2IJQ4r64V9VCPOfBbNHwC1WX40g0LGZObKHeoIN9O2OodkShUBqwaQq8L4n0zHtiJ1DS0HTNYjgAUmMge16SUBQFnfQSrFcXuK95fDwmN3gKCYH1I3DhcgHv/YB5SyniwbfbSmqGVWR8/rsnKYbEFKyt3fA/YePmB4c4KMiRIXMihSrUJSJN5Ml0q1zbowjUfi2JnQN/W6N71s262vqXc1sOiXGyIvnLwgicnx6h73ZHqd3TlivDM4OqQsgA0oKRAxolXhdwQ1cX9ZjBl/Dz376M56+eM3VeocXBlNq+t6m27Qeb0AhEXFtZxnOr9jfn3FydMi9uyUXZ1d8/vRzpPJUZcn5RYe3kYvzV5y8/AJrHW99+30Gkey1WQ7704LV5RnLi4YgFCIW+HabHtZSQEjMmXq9xAWbQqSHDqUM64trdusdX6wviK4D21AY8H3DtNBcb7eI4Ol3O6IPlPMp0mgCASUVZy9fsphPsV3PpNrHmBwvNEZmmCIjhoiXHq1lCoUm4txAjIKqKoki3aibpmEYBrKs+AYMAsfCaXwA3nQexEhD0AqIEYVhnuXo8UFM8Mxmi2SvJ2VtKqmIpOIK0ZGVAiEUMaRCK0pNFGmUKKRORhGVU2Kp8pyHp0fkykEcmBycoOYenWsenXr8Rx+iL8/YdANSK4J3lLnkYKa5c1Bx5/iAoih5+sUTjg8OmJWRTCq6bU2zrTk4OGCzHPjksy/4xSefIIzi6PiIvXwCMXKwN2f1+hWTIscNJe9/+9vcO31IN3gwCvX0CfV2zV/6y7/DvUfv8Ic//TDRx6cWJT1/9a/+Ln/1r/w280lCLqxXDevrNZ8/ec2nn37GdDbl/v1DeuvY1h29jaw2DVfLlrbrMaaic57d1QbnEpojK6f4CF6kUcIQBdl0j0xLFvtHVJOSfvB0fUCgCSg6G3E+7ZQHG+l6j/MWbQyTypBrg9AyjWIYLd4xYJSiLDLKIkeryEG3GAulBKm64bshRAIyqjGOZAT23hTmN+fTjQD3JjxXCMHdu3eZTqbYoQeXILxFNcWYPEEkJahcg/zmhDADKKOJfozUCgm1I6SgbdtkqnHJlCLHfMkbc421lr7vb8XJ0+mU4+Njdrsdb968Gd2Zqbs3DEMKiy8KtNSUZUle5ngjsDLQeUuUUE0neDsQhg5jBP2upW5qur4mKwoyk7PbNUznFSZYrs/X0NSE3TaxGL2lb3Z0KCYy43A+Z90PuNyw1proU9ErBRiRxlUAxIC3EOmRgZR8oMe0ipg4WlIkgHXbdTx7/oInz1/y0cefMtvb4/Mnz5kt9ijLiq4fkAKi8yiZEBR5Pt4/ncURmM4qpHY0XY+QAesHogggA0Kl0WMUARd64rBKcF4tiXSJ1xgFRgkKk+OVAGFSFz5ETD4BINj0PpRWhOiZFhU+OEIMPLz/EG89Hz57xicvX/H8/IyuT9pOaQSZiZRlirxTKpnfhn4giFQwGp2hVLjlL8YYqcaCtWvSdEDEJKmQSpLr1LwI1qNFMkLFf1VZgX8WzJ5Ra0c/tF/OvY1BRUnEU+/WaC2TaG6oiURUUEznFW/OlhRSpWDlpmFvb0EbAoJkz/TOMnQDWhuE28MPHd3QcXR4iJcaMYAxEzbbC0xuWOxP+PTJR3zn/d9giAFbtPz8ww+ZLPaIQnF69x7HR3e4Wq54fnFBNT3g4OgU5VKuVts7yBXtaom1AzGmCI1dWyP6ht3VOVrGxBgqDFcXDY/fecTTl2fMDo44fXiPu3fvcH15wWa1ZD6dMJ1PGYYBleeUZcWkmrJeb9Da8Pr1K/p6x8Xlku224fMvnnF+tcRGzRAiMQiO794nL0r6ENiuV+w2a7xLLK96CIj1DoLn5OSQ73zvA169/IJXZ695eO8ee/MFl21H6Fp+9vt/l+//9r+Nf+sxs9mc9WaN8i1dGNhdv2K73bI4OGKWH7PbXDEpSnrXURaCfnvN6vKc+WJKUZTgBoa2pVudU2lDu10yrSaozKDpsb6j61Ic0VBvmGYVm13L/tGdlFsmHc36ms35Gwpzl8NHjwk+0NUtMYtM7pTsdjsgJjel7RHekWca55PGRckxsd2nUNsyLwj+mzH2EGPCwA2BO8aYqPC31G2AMXgcQQwutf2VRCqIpBus0nok1HukyG7B8ilQNAWTJxDBSPoWkGeaRaa5d3qXaVVAbGkaiyZgDh8wdIY9KfnrB4csP/0Fv/z8M2op2PUte1XB2w+PefT2A/76f++vcbHastzWLJcrYu3ZXlxj6+Sqcm3Hizc1Ly+umBweclhVyCxHdp6ri0sk4LuOvaLk9OSUk0fvEkTG3l4FOH77cJ/vv/8Op6f3iNJweriH9ZE3F2dUZcbh3j7Pn7xiMZswm0yodwPalByf3qWYDzx/dcbZh68xo5YmBE/XOuIY8p46IhpjFEJY1E3+m1RJ2xYDQ5SokICoz5+fcffuKTHCct2y7Tw2RryIZGIc9ylDF1IuJyHgpKMqwFRTom1p+o48BKaTCbOqpCxzjJZMZ4agA1muqXcjNFaKsYv5T24Fbo6vHIGkX/34slj37B8csL+/z2p1jRvHlkJr5kd3KQ4eEJUgyzM2V1d/Nif+11h5nvOj3/5tmqbh008+Sc+PGNluNokbNmo9YySNh6y97dSlrhPJYt/UXF9fUxTF7QY4wSArzJgn+fjx4xT87XsmWUZuMppggUhe5NhYJuGzSxwra3ti9NS7Lc45Tu7co+0GytmcQkd2F6/QOuKDxciIa2rmRca2rmmbGqKk7T2x7jBlTqEEnXVMJNiQiqsbpIP3qTHlbcQoQT90BD0ah1y6N8TxqwcXeHN+waefP+HiesXV5TVBKI5PTsnLEh8ibddS5oaqyqm3G3JjRjakwyvJatdQlBqhBagIMQFsA6DzDO9Dmh7VA4oNUitUkGidNhZ28Aim+GDpG09RGKQAhIIhEL1Dak1mBH1XE/EYBTE4nOvx3nHv9IDf+5/+Hjs78F/9V38LIzNoW4KPFPMZWke0zhj6HmddQoeISFFNCC7ihqRBFVqOdPmky603W4rJFB0iQ9MTYiQM6bmATeeXjJKA+lrmpm8EeV0A1vYYY0bLc3rwSS3xziMl1LsVIgzYviErFHbw2EGRqYy+qZHSJKLqBhQw+EBwnklWMK9KQrD4rkUEjzKKuu/o+oDJJ2jhcV2NwnF18YrFtKTQkl98+Ev80KKk5v79R9TtlvlkwieffEZWVhw/uM/+/iFCRJp6B52gMBntFpxP+WHlaPUnRnbrzZh5J4ixRGmBfH2Gcy4JyE9OkFLy+vULlhdnKGnQizlaQmsbeu/Ybnd03XN224ajoyM0kcs3r7m4XLNterbbJkUSDJa8KKnKKc5bPv/wcw5Pjri6umJvvqDZDQQfUVokNwiWV92Ok7sn3L3/gNIYLs/ecDifEsPAbJLz/PySizcv+fjnP+F7v/5DJrlhe/0GXIeyW0KzJN+f4JtrXLOhDw6fSZQKnL18yrzMsU2N6zuMGdjVDTqLRGeZT7O0qzAZF2dPyXQqdDSRaWHQvgfXEG1LkUu2V2c8/eQjFqXk9GBKvb7Ay4pMQzUrwbUUOgUR54UhDoG+qZnM5qAlRVWyXK6oyoooRj7W6ML6JnSsuOlYSZl2TSGM3f0ErBRjSK+PSY8lRuq9II0ePEO6roK9HQP5mCM8IMYoIBFRArRMHCVBEvzePzlgnqc8rm3TEKJDyILrN+ccnj7i5H5JXK35jl3x8Hvf4+q3f8i56/j5H/4EKSOPvvs2j977Nkpp3n78Nm4InL15Q3a0z7Mnz/C7mvnRESt/zrbX7N09JW43hJ1Gac3F7oIs0zz97FPqpubu3ftMD07Q1QHbXc32asnx0R6P33pInpdcXF6hlGFSFmy2NUIWXF03bFYvKDIzxm3kzKblqFeBuq1xMXC9sxA7tExh6cE5opCjti11CW9Gw9YmZ99Nt6MfBnrnyYxBIhic5+zskr3FjIBMIcEh4qKnCwGiBVzKpQzJVdxuG1a7yPV6R5FpysxQWIuXliFKwqZJncBpShdQ+QxTenw/MPQ9BIuMAUJKMPAh0ls3Pn3F7c5dRE9VFgmuaS3eeRCJiXZ67z5Pnz0DOdxq+/IeisUBKjfJ4ay+Hhjxz2Jpo5nP50lXVZZonZh8m/X6y4eeuPn4spt3ozMzOkk0fPC3xVQ2OgYhabGOjo7GTMqc3W6H2zXIw0Pm+RQlFNFZus6BNKAytMnQZUkhYIiRPMuYThc4H9g1PVk5JSiHLDSTxZS+b1HBsl11rHc7unpARMiMoNvVzIqCTVcTXU8q59OPE0L6XJOe9xIoS0U+q3jWBJp6myQ1pOeo957tdksfLQjFYB2XyxXnF9eApB1sksxozbrr6JodQ9tgtELNpxAD1g70KmLdQOwFg23ROo73hdE9h2LoXYJ5FyV2sDhvEXJMz4ghFW/tdoQYG6KLKJFB1KM7d0zg8D3d0DD4NhlNhCPKIUUwCcf9B4f86Le+zx/8wU/4+OMXBAJa5ylkubf0TZ+c9FUBUdEOgUyPtAGfYsazLEsxeWLkVZF0cUZKghmLbGAMBiWM4csy0e3+9HP0X82p/i+3IqC1RClJjMm272Mg+i9tnN4NuL5GiEDb1ujM0NY1mclptxsmi0Ocd9TbDSJ4nHcYk43FmqDre55//hnKCFSmmO/vM/GKpm54+tnP2Ww2HB7uJ11J1/EP/5v/mma75s7JEXdP71IYQzk5JNeGSTWjnM4YYmR5fUVhcsykRGeGfugZuo4gBZPphOAS2RXAB0FVzuiHmslin+1uTdcPvHr1ClPtEca4mLppGPqak6N7HOzt8+LFZ+x2K5woabuB+WyBUpInX3zB62dP6PqBXWu5uN7SD5Yiz5nNCy4vr1i1DV3b07c7Xr3YEnxgXmZEbzHaMAwdwrVkZZUyAp98wnc/+C2q6Zzh+Qvqeot3DdZ15OUEO/Qc78/oVuc0fuDocM766hrpGny3od1cEfodOlpc36J0xfXZFSpYlhdrrpaX3H/4kK4eqCZTUIGyyuiHntVmQ4yavILjwzlXF1fU9YDzoEJP01u211O25w6/vSBuzsEOnL/6gmLvEK9mqHyGFAOXFy/Q2uC9Q4qeZmux/cBVf8ni6JD1ZoUUInURSBbuGELqAMVvSNeK9LCMgeRKCX58WEiilCB1GguLBMKMziZNjIAQFcSxMBifNl6ExJ6MHqWgzDIyk/hx3va8UBKtJI+OZlxfnrHe1fRe0AyJlyRFYO0vONqHu7rn7dUbPvnDPyT/nR/wm//eX+HxO4958vo5+XxGJOfNyzPuP8w52t/nJ3/0My4uN/zFH/4I4y2fvnrDq7pnuVlR73YJ5hnBbrbs2ppnH35EZQrCbIooSzwpLqfMDdvtis+fPeP52RtmZSLxK6XY398HwEaZHmh1TVVWTKcTVN1gX56RacWkSs7hYG0K4kbg/DhSUikHsHeJ6q8EeO9u0SrA2PX0eBEZvGPXNlRVRVlkDK5neTWwdZKtUwmUGCNRZre745v15eeCunUQHVL2COGBq/FvSIL8kDRXizsfMDu01NsN1xfPqa9eIWJCoQZgsI6z6206X5SC2JHnGfeO99k7OGS321F3SR8XRNJwHZ08ICsqBjtQlTmHd+6STfcw1QwhU0dTfCMgJOMaxdxt3yWWl4dd26DyjCzP8c6Nxyod15vw3JsIl5upiO89dV3fRmzdjEjn8/ktNPP8/IwPf/lLFmXF4WIxxnkFus7h2h6yDKkc2hTpWiTgxkKtmszYNQNRasrJHO+3mGlOpgX98hppNN4HunqgMJJgI1Weo72iyHPOr64JLj3glUiaYxegA0pSUZUrON6fsX96hxcfP6ep+1s5nCCZJ+quIY5SiGcvXvLy9RnXl2umkylPnr3gF3/8S4xSRG/RIuJtx2I2xSiBkhoZPR0tUkciKdtwaC1CJmyJc+CdZ7vrqWvPpKoIwY6GMkNRFGN3XGP7mqEdKPIpQ9emmClZoEyOyQ0iBgZfY2noY5M2I9ITpSVkKXIqzyw/+LVv88H7b/P06at0nPsOr0tyY9g7OCQOAYXEDskAEsa4qhA8w+Bp6h3WWo5DMs1FIl3Xpo65FCOOwY8FuEPJLx2k9ddg5X4jCisB+CixnaWsSkKE2HdAHEF/DhE8XdfQ2/F1EQgMlEV6yHRdzXw6ZbNpCXiMTreaYPu0Y4kBLyybTc1ssWDoWnwQ2L4huoH9+ZTdas3R8THX19fsNltUVvDkzTXzZuDBXY80OVYrgh3YbK4ZIhwd30XIyHZ9zmRS4K3HB8H86C5EcP2AVhrb9xRZkYKH/YBA0HcdEc26bvnRr/0GR8fHrK+v2Z5fcTA95N23HvP65Uv6NjKZ3+NiveLBw0fICLvNhhfLK+pdw3pT0w6Bpu6xgyd2iS67d3jMizdntN2AH0/+o6NjynKS8vlEYqSUpWFSZWgV2cs016+eIU7ucXR8yi9/8UdI27F/eMz57pyLy2uIARkHJrmkq7e4oaVvdxQSuutrfFVwcLCHygqsrZGhJUoPWrI4voOZ7jOtZigtCW6HGyxnL15TzmZMFyOIrx7wQ+D04JBd0+IdSBsJm2u0jCyvzlmur1kcHqOCgGag81eIsgM/EBE0PqAyQ7fb4nRJMZsiTY6LYhwrHrBsdyxme3RNErH70Pw5Xw3jCuDtVzQyUSR4I+khIRMH/DZqRUlJkAIpTNqM8JVNe/oXCDKCCLc348PFjLZpUoyPkmgp0VoRRKTzgmaIDD4wuKQ78coTVq+R7ojfnVxx9dM/YLPace+zN7w++Ii3/kd/nert++zeXDK4gLOe5XLF+cUVL5+/oG48k0mZ4naC5ulnTzhfXTHf26ftHetdy/PnL4n1BrPd4QuHmlQJo1Kk8Yk2kvl8yvVySdcPaJ2TZQoc7N5cpvwyoei6jqZpuLy6pixL5os9/HgTNWqDkmm8KuK4ofM+PVxixHlHcDZlApKcfFlRkueGssrJs4x+GOjqHd4NlEVOnmm8T3yvwTq8S3DN1qf8xSjcrXPvy6XGo5gcjUKA82F0dAm+/FJBjCYVyipHyEi1t8BkU3bFlNdPfokLHXkMOO9YDYEizxAx6SAZelbdJf7JGc46nHd0XQdRoWXAdgOmqBC7FcFaou2pMsF0om9F3eorDsM/7+WsSyDQUYSe5Tlhs7nFv4iRbxdCIobffM92FGjfjNgBur7j7OyMsiyxzjKZTCnLkr7vWa9XfPJJi3Ke3/q1H4wRLRUESxEtte2xyiBNiclK3NDTDz19Z1PCg3WEINjbO2a+d8Rq12PM5FaDq11GMY775tWE9dWa6WRCqQqaTcMwWDSQJREyUUiC9ZgIahzySQEqesLQ30Y53XS40kobBykU16s1f/STn3J5dc0weCItf/hHf8TFxTlvXr/mvXceUxaaTAq6/QUyOo4O9gnB4RlGfaxgW284O3tDlmWcHN8hLyr6znF2tsYOgYcPH1IWGT5YhIt4H5Ayid0JDucHCJLgIYxxU9F7pM9QOoAcQFmk8NjQYX0P0mMyRZZpunrJrIzcO1kwKzWZUuxCm/THkFiFbqDvBpwNeO/o2noEAYfRLZ3YVHHMAiV67BBox+D1GzZajJGAT1O0mJJXvs76RhRWI3qLLM9Sq1NKRExgvLbZYe0W71oQSQAagsOPjhfrJNVkwnK5JM8zijxjvV7fhgVb2yf6ukn2Ve8yCm3YrtZEBEdHR7zYLNnUG5xzKYur7bBtx3JVMz085PT+fabzir53DEOPCOmg7R+fMJ9WXF5eoU2gax0EwdHxKYP3SUMxDJhSYQfL3t4ezg3kmSEKT2YyjM7Iy5zFYspqecEXn3zKe+9/wOL4gM+ffc7VxTkPHr+DDYL3T7/LtKpYXV3x8ovPwdvRMirphx7vkwOxKCo2dUe1KHjv+7/O1ZsLcqmpphVHBwf8/j/8+5RFxmq1RAnPdDJnOptQGAHOsjct2SwvubhaIqUBMbBebSmMIdeSXIORgXa7oW1aLi9fURVp5DCdlIgsI69mJLFPy8VmTVFk7O3NuVyuKasCZZLguihKXp9dcrDYR+UZbuhwncMOA3lW8Ob1a2azGZtNw53TU/JM8/lnH3J4sIeuC+aLfXabhtXVNZODI7JMsV1eMFvsoZSibxvyomQyn9K7gIiB1XJJZgryLMNaT56XyCiI4ZuTh5ZsgSlMOI0D/UjxT64VgkXLiI8erQQxunEkqCAmg0MaATF2GyJGRrQS3D0+YG9aokVkUAASJzREgQuRq23LzgkGYej8gI8CZXJm1YKJ6blH5N1Xz/nH569hcsizp5/jqoLiL/2IhVJUTiPy9D5evXzNF0+f0XeWopzxZr1D5hnXr6/ZbnuKck453ef3f/YP+fDTJwShuacDRyRbvRyp2d4PIHK0yZlMJvgQWW22bOuWCpk608pgfUIjpgQETdvWrNYbehuY7e2Ppo8kVBEitf8RMY0CQ8Kv5DpDkDZD9XZDUWRUucZkikme8vsUGlcVGK3SSLBrmU2nWDugpSDTkkKBCwIv9Di6/VWchxgTIG4yGmNMY94wlsPi9lSIRLrxexgfsiCLObPjx1jnOH/1GZHkgnt5ubp1FiZpXiqzE8snjYt9CIhoUMKhQqSYHpM1Hb63rDYN0yNHXhiMMvS95VfqwT/nNdiBZy+eU+9qpBQUVUkgsZ7MyKcSYnSpxjh2rj390KeRjxnd2iSB0G63o21b7GBpiZydneG8Z71eg3dMTcbv/tbvjAgLQXDhlt7fR4XWGXleIvqWvq1x1qfYFhcwumQ6m5PlFaovUKqi1Bmz/T0yrREH+xgfCUOKlimqkm7bs96myKPCCALgpMQoQ4gChUB4BwSkgt22Zt0MKaPTQBvgZk8WxsIqhEDbNOzqHd6mIeIwDHzx5CnXV5d0uw1X56+5c7jP/nzKdjWnq9dMyw8SCDQ4uq4hyw3b7YaPP/6YyWSKQDOZOKTIiUEz9APeSbyTBK+xISSOW66QJAejlA4f6mQSEBIRDcF6bGgQhUapMGa79oSQRoAIj4hppJgpSxMs01IRQ4+kYD6fJVdn36XNYGuJLjkLIWJvIpnGbp5UaoSGp9eSfhLsTWTNmEkqlUQKhXMWHzzuaw0CvyGF1c26ERkqpYhCY/KSvm/ZXG9xfoVSSQhe111y0KgkwgwSptMp6/U6aSNuErlNEvDGMbxWxIhG0NUN/WCZL/YRAeqxlX9QVUymE8xqxd6kYjZTBC2RUtC6kMCTRGzXM3QN2/WSLMvJlEAQ8IPH5BV9P+BIPK6+7zA6aUe8T+3RLNd4Z9NIT2syo9gtL/jZz37Gt999jyLPuL68ZGhrFpMCW69TXpy1fP7JJ3g7sF4tef7kKYMXxJhEzTdxLy5EvJTs6gZVlBRFQSY169U1Tz/7hBgcQx9SgHGM7O8tqApDriMhBjbLS95573v88pe/ZD6bkhUl11fXBKHxfU2pIvXqmtX1FTqCjgLbO/YO9hmCZTE9wCPJ8wQPzcoJbugQYqDZbRCup+9TEKYRgkk1Zeh7+ranaVvK6YTJfMbl5SXHd064urri3lv3mE5n/Oynf8S9+3dpdluUMVxdXtH3lryscH0DePanM1xfs2talMlw0bJ645ktDsi14XQxI8tLnHPkUqIRNL1NLBYhvyEPkQChSzwVUg5jRBOiQgnIjcJIUphrWXJ1veKGDqi0RgaXWC0ItFIoKTHKUZUFhdJs1jXW2pHObXAxppgJIi8udoQAg5dElSNVekC7JrArHL8uz5i9fMHzTvLdOxm/fHPGs88+ZfGTTyil4PB4QTFd4EJks97x6aefY4oCM80hCIbtwPJ6Qz6d8er8nNdXG548fU7fd+i8INqESbmBwKbgZYjREkLSlhVlyYHOWG82dE1NzDPm8wXGGDZNiw8RkxdMlSavKrqhpxg68qpEj7BeJdPI0FlLCBGtUqvfSFBSYaqCk/0ZIXgiEpNlGJmisAQRI2HXJ+L1/sFB0nMZzbbepcSAXNE5CELfdhp/5QjfcIL+5JG/GUXf/l380s4uVOpcRIhSIUzJ7OQBXVfjYyoYXYi48JUImpvdeZpFppdiJJA2GhLJ/Pgdpnv3CXbAi4gvJqw6R6YUguybojwE0sa7zHPapkEKgZLiNl1jUpWJWackKkbyskTFNB0IbtTJyHG0KlKRFWOgbXsi0PeO9XIJUuGsw9vU/VYqPVfsMGCHligcSgRUCORaocsK2dUMKKSQo5xFoHUi34sQE7A1r8h0YO/4hCgNfltjvOdy/QplknnkanmNdRatQBhFIUjibyHIjUn6pZDwHForut6z8QNqkjPJM7ZdKg7SWZqKdhc8OMZ7XGKVRSJt20B0fPL5Z6xX15wcLLh3dMjJ8QHX9+9ycnKH+WyKWoxOO2nwXnB1tcJZ2K5rri62HByccHB0QlZ07Jqerg34YAmhQynPfF7BtERrRSRgnUeJlFBC7PBDMnlImSEzQfANbqiJDEgswXv8AL0XlFKS6cjhwQIZA0PfMl1UOOfxPkU4ESI2egQCpQTe36QHjBmawY6w5dSgMUalSLbRZSuEHLMbEwPRGHVrCBn+TRkFwpe7uJtZt4+OLDccHh+BHHj5cp0gaOOcc7D9eNIqdJY4FVmW3YZK3sDfgC9z1WK4ddH4EFiuVlSzaYIW5hm9dywyQzmdsLxYMZ3NmB/u0dU7bIg09Nw5XLA3z5lOMgZnaTaJc9N1DVFKdJbayFJLms7T9ym3bjKdsd2tKYoc7x3OBfI8gwiFUXz40z9gbzJFi8Dlq9ecv3rJdnnBwd4e87e/xd7BAZPjuxhd8MWnn1BWc/b2j3jx+gzrx4LKuURUNpKh2xGbAe8Gogts1xvs0GG0YjadUO+2SCJFkbG/N8e1K/KR9Nx2HZ9//CHvvv2YX374Ed955y3enF/QO8/h4T4vnn5BVhQcnNxhfzajrhsury6pZvsJjSEUpqgYPDS94/j0Hs+++JTDvUN6l7QOb16/Yf/gkF3d4onMZjNC36dCNU8xLAdHSRNyeu8uOjfUzYqjoz2apsY6l7LsVMbRyZR+GNist3S7DblKYsxcevp2Q+hr9MQR60hTr2g6SwwCl2Uc3XvEdnkBKOzQIYROXcA/5yWlYFIa+mEgzyXWJiGq8J6izDk52ktwSalpXWBSlQhliCMkU4SBOBYN0QeUkhwv5ljnsIMjYLBRIKImWOitw2sAQR90gtgqQ4weGSImRKyH3sGvyXOa1Yr1ULCXJWt/dXjM4WyPv/2P/lvuvn2f7+fv0Q6WJ89f0vaWxfEx3ihs09JteppNzZIdn3z+hMvra7quYz4tKcuCeW9RLqSbW2YIpK5VN3QYQOti5BBpTo4P6NoudSuUQJDEyHbMp/Q+0aSnWQY+PSizIk8dKpdywpQQiagcQnKcxohCoUTE6PQwiYgEUI2BGCLBW/quJcs0VbVPXuRok26nU1lBSAaD1kW61oO+cXZy20VMnajx3jd+zlc0cbcrRmTIxm6WTAUUgqgCQmt0MeXwziMupU4urZv4jl/5Nxw32/MbF50QIrljRY4UE3ShMUYQVSQqgdY5WuVps/E1rwkhxP8Z+B8C5zHG74+vHQD/d+Ax8AT4j2OMS5Fu+v8HEqKnAf6XMcZ//Kf9N7SUnO4tUHZgtVqxPT8ndh2ZmiL6LtlefcBbS19vU05c9EC6FzdtGgmGGBIsUmsi6RmRAb6ukXnJ0FryrKBrI59//pzFpOTdd++jCocMDZUemPuSTBaEMuP5RaRrBqLSmEwQ+x4tWyrZQu+YG0OojlidXVPN79M3ipU4p4hbyD17+4bd+g2zQ0GVzzh/WTMMnqN5hmkD19uWcjZl4wf6KLCAtREfU2h41nv2yoozZ1Mun9IM3iUB+Fe1AaNeL5Kg183Q4XaBbujYrje8/OIFD+/dR1BwuR6QE0Nc7tBFxW7jGboJRp7QbTyXL6/p+oambji47zi894Czs0t0M6AkDP2G3W7JwwcnGL2PUiC1QWtDlBFHQMo1IXMgUhklbMqk1cKjSQWataljazKD1RGdTTk6OWI6m+GiB+HxNAjdpfQMLYgyYLuAjBojFC46jErNFhtsQj0gk/Ox8/gY0MIQYiD4ZPAJKhDwv4Iu+TrrG1NY+ZG4fgO0Q1o2uy2ZyZnN95hvT7i4eEmWecqyIBLGWBWQ4iu7sNH1AqmgyvOcoigSdFSlfLp+cIg8x4fI64vzJEgUgr3FgsViwdOnT4mZpO62LPyUTGkm+RSrBEVVsbl8yazK2F6ssf2WaAd0oWk7i84nOA+TSc56tcQUE1xIkR71+oLptKSum5S1p9Iuqywyrs9qnAI39Dx99gXPPv+E7773Hos7d3m1buirgUXZst11HNy5z3xxxGrTst9bNtua69WOGOHu6V1eXbyhyMD2Pc3Viq4byPICKUjut76H4InBc+f4LmWe0baR4CyWgO0HdnXHu+99j1ezKXlRcHB0h6tty937D5lM53z7gw8IUjPYjqqY89377xB9z5vXzwhIVFZQ5mVyRe7WzA5OyGf7qF1LO3jK6YzeOrKqolzMEEpxUJRIH7Fh4OLigs1mw3Q6JQLeDjTbDUPTkOmMIi9xLrJrWhyBvCi4c3SA6zqGfsfQBBCKWVkhlcLHls3FEusiLgjadmB29x4qHqOUxMdIVaVz5KYt/Oe5pNLk031C25JXFVkIuHHHbTLDZW0Z+h4hU8h3nhe38ULeOQQaLzwhOrrBsm07GmvHDmkOAnbbLsFBlUCqJPaNMY2vtdQElfRGmQftBa2Gd0TgbuP5sQ9IacmKlpIp7Dz37he8/dYRP/nxz3j6x59QHNzh0+fPMYVirx84mR6zWm1xItL3Ax99/CmrrsYTOTy5g44e43oWMbCPoFUG8pwgk7BXDIJymhGIBK3phtGZNAYqCwKZyjBGpPF4349jxMSR09qkTpyLeCnwHqJLmw9iINOpsCiybOQZBRAadcMGIpkIvPcMMVBVBQGRQteVSly5GIgiJP3I4JgohSxh7SROKEAnIbK3BME4lhwN9KOINn61ZRojgYgNLkklbHJbSSmRQaURoykpjh5iqjkIUKb4lbGjECBV2r37MeCZCEZmKYlgxC9IKQlCEMcd+y25XcR/ni7ufw78H4H/4iuv/SfA/zfG+J8KIf6T8f//74D/PvDt8eMvAP+n8c9/5vLe8/LlS1arFW3b3r73lJkYxiy49EwIIaQN5/j6dDpFCEEzkttvNFk3m/AQk7MyjqJ3ax3X19f87Gc/Y3+W8f77byMzwWAFmTZImzoaMUSk0ikrNhNoFfDOYjKTCvgg0FUJWlOWEyoUxb7DH51ALYhDgokWE4MRhlk1Y2gtw+AxRY6uNCobuFrXjDKg8fwY66VE5xw7219vfdkQjSlkPUZcluOQvH7zGvkLyW/95b/A6aN7bFYdcWcJQuE6j4ma1y9eEOsdMVq2yxVXF1doNMO2QUWFw7Nbr2m7HfW2Ym9e4RTkQhKUHrVMDq3iGBVzg5YRhPE9OTdgMo0Qiq5r07mqNUSYTWfcOTmh6S7obZ/yS4XEeYezAYFKjkXnxgLKE8fUcjluU26uuRBHxmEcZQRS4WPSdQohUF/pWH2dX/A3pLBK0EYlZYJ8KonQmt3Qsb6+IoSBGGwKRh5SJZlnGVJE6u0G35uksZDJoh5DynNSUhCCo++TZdLMpqi8YLFX0HU9Q9uTWYsn/ZtEQdf27O8dsndwzJPPPyFqMEaz2Ww4uXvCxZs3HO0v2K2v2Wxr5rMFRVmQ5ckd4geLyCVDH3DBocfdqRs6uq7BhwXrzZq+bRF+gfAejSR0A05I/vAf/SMullt+5y/+Nt967z0aG5jP9jg8vY8yJU3X0Ww2fP7ZZ9gId+4/5pP/7u9hI9y9f48YBZnW9F2LCI5CKzCKtq1BZ1g74KwlyzSnd+9wenJI37WEELi6WpHlOQeHC2gGCq2RAdxg8d5xcnLC43ce8+jxQ3rnCVKAlChToIqKbjc6UoJn6Hvme4c4pel3O+bzA6wLBC/I84LJbB8fItO9A/YOFtihY3t9zWa1YrPbJmr4DCZFgfSBerfDNS3zYoKUmvWupnOOwzunTBdplFivNzRthxOJp6LU2N4dLO3Qcnl5STWZMpvvMzmYEuPA9YvPCTFDmAkxy/D+Lv4bkIvmQ2TXe6Iw1N34MBQpfyvlXYUEpo0pkiECbd+PpkEJQmNDJMgMmSt0SEBDGQOt7ZFCgcoYbIeMgvwGMgoEEeiDRXuJwRCloFGRY1r+w9ig3RnabJjmUKiMSjYsN9es8Zzcv8/v3Xmby/Mz/uCnvyDbrBkuO3768afcf+87PH7nW7y6fM3V6pr94yPulffZDpbOB2xTszvfkvuBWZajCpMo/llBHgxTPUeFDKtJsVXSE4ckKJVSo2WGlhl5Fgm+TSP4G3YTafSj9c3PGRmGnma3QRLZW8ypioJJWdwW17eJ9jeML1In0dobwWsKqE08sXTcJILooN1skVJTmByUpyentTElCSDH4xcIo937y48/yaVKr5u8YG//IAUN7zxiJK0r/eUtPBXH/Eom3k3RkRxRicAuxriTm6Sa29fGbtpNFXXz9TF+GVb7p60Y438jhHj8J17+PeCvjp//X4C/TSqsfg/4L0b+4T8QQuzdRKX9Kf+N26L5ZjOdNs7qV973zboRIt9AqG++T+sE/YwxUtf17fcIwSgJSA/41XrF8+eCzebbaWIiFZPJhCEMCKmRQhECSWtVlGgdiT4lhEilUlyQ1BhlcB6qfEKOhEVg7/SUYenp2yu6bkMkYTk622Eqha40TTvgvGdS5WzqHusDfvw9JOd8OpS3TYXbQv2f8UsUN+2rVJkFF2j9wGq75WS+IBBZrpasNhuWqxW+E2y3W7TOcY0jJ+Py+Rue/ewX5Lnk7XceMzscWH7+hBcvX/Puo3fJC4NvWnzX0W3rlLMrR3CxkuktjoXglzy29J6CD2NRHCirdGzbNk0UFBkhSsq8ZDadIsVFMt3EBI0OVjIEm3I6pUJmqV1nhLlNoHDOjl2odG4oNWq2Q0TkmizLb4supdI15pwlWMcod/xnrm9IYcUtW2eUWRJtZFZMCUPHar2j67cYHem7AYFGoNBSo5XBDv0tj0IJMbp2emKM5FmRdBpSopWmaVqkdyng2afU8MX+PrvtFjtYXr14hR0ss7097j14SIieYlaxPluxunzFbFLyxWdPuLq+4vDwiIhi1zQUgyPLCla7NbODffoIUmqk0gRn8d0OEVOXLYkoPeur13TbFZd9R71pE45g0zDdO6RvLBdnV2zblqxaYruGt7/7awTXI1XaVVof+Ed/+EdsWstsNuPuvXs8/eIJ33r7bT786FOMTEL+xG7RBClx3nLn7h2ODg+SC85uUViqomCzismC3O6ISJpdTb3dsakM77x1n2+//+uc3jtE6oiPFtdbhrZmsneIi5bJfEpVvM3Tp1+QmzSm7V1E6xylIt5aTo9O0MYgMsNksUdezlhfX7C5viT0NbmC77z9Dm8uzjBK0K6XvPrsC2SZMZvvMdiOuneUe3vcf3CPvMhom4YYweQ5k31FEFBkOcF51qsVdhiQSvHgzh2UFEgjiDplJ9q6o6sdOyfoVU48PE2Mnz/nFYEhjJ0DIcebjxhdLKPTbHzo9YOlH2zKahSJrH6jtxHyxpWWbhIok0wOo3dIKoMQaWweTbzV3kQpEU6iosariM0CM+X5DWo62TM1hkcGZiJn3wh8u+XNcsc7777LAs0HP3yP7//m93n+7CWff/o5b87Pudy1fPrRRwwoZvMpWTkhdpYgJMo5tsuemU9juF2Ws1QZovEsqoqsKBiKJN7vnKUwGc517IYxAkvplKsZEyJBEtFCEMcCScmk0xIkdpcUkGcGOZ2SaUVZ5BRjUHGe3RDK001XjG6r2246fuxCOKQ0SHEzdouJ5B8FWkqc60A6isKwpyV+M9APES/0eNMWo+g66Vecd+kYiaSLM1lGnqWuEkrjQqSazCjKKqEbSOiAr8ItkyGkuIXKpmzABExODix5YxPltt0Bt4XTOCi8Le5ivDnz/qXWna8US2+AO+Pn94HnX/m6F+Nr/0RhJYT4G8DfAG47Ujd63CzLbovJG1aVHzfZVVX9SjZg13W32YI3/85XIbwCMFohjUb0aVx0MwGBJHSfZRlVVRH7mMbEQaNcQBlNnhdoHWmaZCaydsARU66rEICmmMwQzhERzE9O2dKhNq+xvkGTo3za3GRVgVKapl/Tdh3TWcX+Yo7a9mib0jMcIF1MwN0YUobqzZv904ph8WVhBQnZ5IKnG3qCswQJbd/R9B3TfIZbNmRaczDbp14cMjUlv/zkC4yMqM5ycHLMBIN0gfrsgurkENU7GCzCB0QQGGXG40SitoswnsuQOubJgGGtTdmZxqBVBkTyvMLoHCkNNiSI73Q6GbXWOtHjpUykfJGBN2ipEx0gJkbfl8W4wjuXHMFAlqdzwYeUxyr1l0BdrTUQcV7jnaf+N6ewSg+QEDxd3xBCn1qqMv3CynJCJBC8ZTLNaJuOYYAiL1EyQ6jIYO0tB8pISTWbJdZMhDIvcM7S1i06M8gxA0lXBQSQJqOazsmNobdnzCcLmu2OPDdIk3Px+hW293SqRcuB58+fIIWB/ZSczjDgHCkdPgqa9RaMwpSzdCFLQScDXdtidjVVOcHbnu3Vlumk4OzVGU9fviZKw7qxhKJn2+549dOf0g6Ou/cfUVX7fPHhH3N2fslqvWO52vHzX35K37R86+13CDHpqEyWsVxtOD095fnz5/z/qPuTWNuydb8T+o1qlqvc1amijlu/yjZpJ8ZWOhOEnLTogWggQDShgUSPDl1aSHQt0UQgJLBACKVEpkQjLZR22s/2s19xi4gbEafcZxernMUoaYy59jnhfO/da3zfe8G4Ooq4J/ZZ++y15pzjG9/3///+xphJd2ZIyrCcz5mVhm57RyNXSCxCuMxLSo4QEvXsnK4beXP7msWqpV00PPvgCY8ul1SlQReaFAVOBLpg2Ny+pS4LksgwweXqjMIUfPHTP0FJxWo5R6nE3d01vh9Yr1YUpiLaHdv7N7x+9QIt8xh06I5cv/yaYRxw1pK8pdCAhM39HfPFig8+/IDZ2TnSKJwfkdGjUuDYH6mKAikT25tXDF1HcNlZp4qC4MgtZ18R5Igoy4m6DkYkRm+xQ5cdeN+Blfe2rJ8zRudTWZw2gMk6nFvc2Tl40s3kjTS/gIAHi7ASIKJH5ZfNhYb+NklYCEGpTXbsVBkoWgtLE3o+Vntm+i2HUHBWrPh81VEpxXmtWd0f+Ok/+yP+2o9/i8ZIXm1uaM4a2n7O57OfoL9e8qkqubnfsjkMvN3sOfYWbz0aEMc9i8OOJzJhfGInJMGUsO+Q1Y6iihQqEkTW2FRKcxxHNre3rNYrTNOSY28zc0ar/DxRp6gsKSCGnGXmI8oYSmNQUlAaTTGRtrVWCJnQWj6MDU4raxjjVABlcLEkZc+AzJuDcw7rHNZ7YkzUxiCNoZaJi1XDeNuh65aiqPP4UsiMiIBJ2pA/Xzl1TKSS03gnF8xKa4zMm70gjytPnZrT9hRCeCgWTgWic5kMXpjiAXaIVJOQO073f95kU4wP10IuNk4CnX/3lVJKQoh/65ZwSukfAP8AoCiKtFwuH7hUpxifU8fGOTcBcMVD5+5BeGztw4jT+4ydMMa8GyVy6viKBx1WnArqu7s7Xr96TbP6gDBps5TSxKDwLptGTFnl0dZwJArJseuQwqBUwdD3qKKhWld0rsMF0HWLWa0otmuc3aODJvUjdvQoncfgSQEyO9tsH3FjFuKn6dpDQBTQRR5ca4J3RfGf+n4CWaonHnRXANYFtocji6pkvlqgjCbEDLjddkdiStRS8/rlC968eI30CTv2vPj5V7z65XP2b+748Y9/wt4nlqXG7vaMtqMQCkVucMQYsc4+xIklKbJBRWbjkPcBax3GaIwpmSgpFLpCCg3REHzi4uyCH//oh3z1zWu2+wPL1RXDcMQPDlUXGFlDSvTDHqUUbdNMUy1D27Zorfh63hJj5OyzD+j7nj6oE18VpTRKv9cFjflwerz51dfrd6SwyqeQ6DN/aRz3jKOlaWpG29N1R6yzmVcSchCvtYGuH5jNWpwfEFJgdJFtyEYjTEHT5ApTGk1ZFLlqRmDEFE3RFHS9xQVBUTWUVUHZNjRVBT7Q9T23r+9YrJY8fXZBYeDli5fYoee3f+uHLJdnDMc91lp24wGlC/p+4Nj3PP7kY+qZYXs4QqHwQeQIF1OyvnjMixcbqnbJi+sv2Hcdm84xRseTZx8xesfXL6/5xRdf8Tu/+3scjwNf/OKXPP3gMcvZgq+/esWXv/iKZ08/4uwHFX3XcbfZcnNzSzdYyrLicDiwWq0eUt9jgr4fUSlgj3s0AXtISO0YxgN1kTk5/WgxquTqcsEf/tEXtLOWjz/+gEdXa0QcOB5sdu4pDSGi64a5joyba4rVGaOPzOqWOBzp79+QUqQUV0gjOW5vqbVGuCM3L17gnaXWkn67RxuTw7WD5+JsTvQV12+ukaZEzzRjSNSzgotHjynqksN+x3Z7Twxj/oyB5AP9cESIQBpHagWIzCYa/AAyUdQV3g+MrqfbTiMVDLN2QalNBtG6X8P28Re8hBBURr3b3ISAlDdzEkjJ1OVIE1ogs5vk5AostZza61MBJgRpemK4KcBbCgjp3Yn19L0KqTFSsweS8DS+53sl/Md2S+p/it8UVCrx5BPN4DpUGPnEVPyL//z3+YMff5/f/e0f0O06KBQ3b9+yOVqSKejGnsEOKCkohCQIyVEqjpsNYXPHR8LzI5VotOHntkdub2hEwezwhvLagGrYmJrmk8+Rj2qCFsQ0stvdUlUq50SGQAhZDmAm2raWYio+RYapSjWBh3MQr5YCpdXDJmxMehgHnTbjU95cjGlyVuVuoFLigdAcU/5zstBUs5YQ4sQeK6i1RGrJ1eUZ+1HgYpxwCwlnJ4hlFHmzQxBT7pSJXA+SpiyTmKbOpZB5/DgVRKfTdUpMJ+z3R2ACU8iJMK6w1uNcFrNHVC4gTxtsCg+Fx4Ob+t+9qHpzGvEJIZ4A19PvvwA+fO/rPph+789dRVHw0UcfYa3lcDg8dOZOBdR2u6V12f12PB6/xa06dbpOXavTaPD0XkFmXiXstJHG3Lnpe169esnt3R0f+cdYmzC1QUlJsllbp41BmwKlQOkCkIxuQEtA5CKorSqMKjBlwpQFgp5IT7k+Y+g3jLv7bG4aeipTEH2PjRnQu90cud8EFnVFCp62FkSRSELRWdgOgdPtLBB/bi384BA9fY3M978kRwL1Q6IqK6qypCxLgk5Y6eh9R4qCX371C7788hfE0eYcvSFBsrz55Usu2hXy8TndomW7eUuPy2ihokQqjU8xj5xPAvooUcp8q8uqlJmu5YIURc53JBfK0SmMkDx6/JT/+O//fRbLC/7Zv/jnHPsj19dvuLm+wQhNW9VIIagbwWw259GjK8qyZDab0ba5oLqhIYTAp59/wvF4xHpNP9hcBGtNUeTGxOFwIPiAEL+e/vY7U1gBSCXYbjekNOJ8z/XN23yiCp4QXD45JY2UCSECUiZG2xEmJ1DZ1JlPURaUswVKmykPKOZZegzcXr/FekfTtlRmRmFKAop6tqAsptfQiqQgCFidX3DxaI0pJd1mz36z5z/4u3+H5fKMF89fcv3mVUYHBE+7WE4aBYkPCWuz8M17ixA5f6lu5jjnGUdPdxhZnD/iZ1+/YecSLgSWqzmvXl3zB//6S549e4qQmpuba4rS8Cc/+yX3dxtevXrDMIy4fuCt63HeEVAEFAnJ8diRUmK326GU4nA45JlxjOzv75lVmllTYHcbTKMhZHHs2AdWZ5dEK2hmFT/87BOEFpzNW9xxz9EPtKtztIBucwt2xIsc0lnVDfa4wQnDopkz7O9p6UDCsH2N0AWzQuMPe243bwnJYYwkaEVbSUYfmC1WNO2c6DvePL9FCcnN3T2dc6zmc1bnNUFEBj+SoqAtC4IPWbeSssNEa81gXWav+ECMHqM0u67D+4H9bkdA4DFUiyV1VVOaGqkNIwIVyDvZX/E6jfSknMJUp/sgn6YViRytUBSaQmm6vic6B0pjqpKiKonBT2MxIEaCanDeo0x+XxARjSTF/No51Z0JDyIptwMDJW/Lgt9NA9+fHVDHJbNLjROJebhmdx+oa8Vl0TO7fsE/+k//M0SERVtz4+8Z+j2vvr5m8ehDQtTIMXF4+ZpZ02DTwP1+T3x7w0fW8UkQLEl89GjO7f0tt/sDP1q3/FCXqNHyzfGemS65725Yrv6b3DVrztcDh8Mebx1NmeNNjs7T9UeUklnQn/I1IKYHs5i0VikltMjyAUkuWpKQeO9J8Z1+5xR1pFTedJzNkTBCCqR8J2VJTDBEBOkEG/UZbkzdYuyB82rNfhCEpBHRISSEGKaC+DS+le80V0S0kQQSiOyQ8sHmOJZ4it14p9Fi+qyZNGWnakyqDEKNMTyMD70f86l8GqmJqSjPUS+eGNKk+Zle5//39X8H/kfA/2b65//tvd//nwsh/k9k0fr2V+mrTvdG0zTMZrP8vk+fU9PkTXIcRxbTuOnUsf8W8HHqZlVVxbNnz3jz5g3H4zEXkZPY3Yd33Z48nsrj1qIwOGsxsXoQNXsXsd6ijUHpAiEiUpscy6YUSEFRFZRVCSJxc3eDKWuWqyXDGElFRbU8J/mOjR0RNmQYZSyxLqCrksJZ9rcWncg0dgGLRuBSDqQebnN48clRmvIbNXWu/ow3Mk4zOCVA52JFjB6VILpAVZa40bLbbGFRYxpNdA47WpzrSNGhhaSUmhQiMkEaI9/84kvG/p4kPZuxwyyabNQqS0xRkBhRWqJ0BoQGn1BKorUkhNyBbds2yztMkbuNwU5jao2RFVIr3Oh5+vgR/+Hf+7ts91v+6T/9L+kOB/abLXUZmFUzTGFQmMzRUjAMOaLLh3xYcnNLDJGuPxBTwAhFEOCIGCKllDm9I/isW/7/t8JKa40kUTc1m82WwIg22RmgC4XwOfU9BklZKpRWOVhXCWTKeIXdfk/btIyjBWMpyJwS5z1CKJIWDMFh+z5nIXWa3gXmZ3OEgNevX3Pz9g1tXaKVYrZcYsoSFy373QblFBcXV3Rdz89//o95/eYNq9U6p90rxdFatFBoo1FSY61HFxoZQ86GkoKh75mnhPWW3fbIq1evWV4+Rb/ZEoaO/njk7ZsbdLWiKCqef/MNZan55S8tr246uq5DAru7O2zf86Mffo6Nkeu3tyzPrxCy4OXLVyglOBz2ufDwDiU1WunsvJIaIwXeeZwVHPZHtFLYkBh6x6uXr/nw6WMuLy5QRlE3Nc529OOR5dkFhZaMbqTEk7yj392ThjJzgJoVr7/+AmF7tLCoQjNGMp1YKLAjuAEjQg48njVIqZnNW4QpMrKiqrl88pT93Y56sUaWJYfNPZGEMpqqaegPA6O1iJTp2N45FIlgLUIoqqri7uYGJXIQtzYGrSXD0GF0yXy2JApDdAkXHN14pHOOcOYJ3v/K6/UvegnAKEmMfsJTxSxUnjbekMgMmBCJyVFqzSjGzHtxHqskWgq00RRaEbxDuMTgLEiJNgWjd7RVg0xMOpSJV+UjfUyZ1eMdetzwyargWJSsfcXCd4TqnBTOUOkGY3rEbKDYjbz65hU/+0/+U3743/4PUHONiIJXXz7n7r7j4+8/JhUW0eqMQtiPPHn7nCsJlzJwrsCkxDjc8ImI/Ncffcjf+vgR1VXNRhz4W6VhE0p+ue14ZW+5Syt6Y7g4v3gQL8cYqYzBFoZXr16xWq2Yz+cZKzB1sITIhWoMkf6wp21blMluvegdMfmHjThrMqbC1odMLnf+YUQWYpxcc1lEHEKculp5LHUaNR2j4HLeQFVgOogUqMx4yJ/3e868k/bn9HcQQkISD+Os3G3IRViYYjfe1widtGGn0XAiEd/rypxGZHnilTl9cnpPtFYYqQnhND7LkTx/WtDzn3rdCvF/JAvVL4QQz4H/Nbmg+j8LIf6nwFfAf2/68v8nGbXwczJu4X/y63wP7z3X19eM40hRFDx9+pTVasVms2G322XO1eT2a5qG+/v7hz97irZJKdG2LRcXFxkESu7wnUwIp/BiJRWzpuXDDz/g88+/x4cffkhVVcxmLaYxOO9IMjs3Q4SEnN5LzXy1Qh3AOwcyoXQC4RndCMaQlESXLaJsqJbnVIUgWMc+vsHGPcqXlEpSyMCmv80xVApM8pyvClZnBarSSFWz3b9BegExEk9//z97EsjpykvZx0Hu28ssOfARDdS6ILnAcOjQjeTqasXm5Uuef/Nz+sM9IgWIAT8V8gIIY8/ubqB3G4IMrJ495vLRJdWsQRUFSWYKvJQp/wLk1F3PncMJAyIVxhR4Fxht1jLmiCZFoRuCzE7MGAPzWcuPfvB9Xrx4zv39PXVVkUJgt91SFBpPPnxtthvGcUQpRVmWWGvpfu+/RoqJb775eoo5EpDEpMFTdFpz7LqsZR3G6bDyq9d3prBSk5ZAag0639gQkEVmVgXnESpMLgDBbpMdaG5whMmqaUxNUTZTVI2FmCY4psKPIx54dPWYw+013WFL8JbdoWc47unbhnEYmCuJDNmNUFWaSgs2myG7EVOGkt7e3tKPng8/+RyP4O1my3a7o20EZ2dznj5+yuAsx2HgvD3HdZZus6ezA9JnlERnN+x6j24WuFiwmJ2zdT3Pv35JWc6Zn60Q0TEcjjjXEDrBen3J2SPF/rDn0PUwOP74F1/iY2IcAvfbESE0LjlCtFNLVWJUmW2nIVIV078ryWG0CBeR5Yyb/Z4f//AHPDpbcP74nHqxoqpn7Pc7cInQW3SwvP36K5QxjMMBFx1VUVAZRVFppAwYk4h2wPWHTGOPEaMLvDvmm0HlVHY39pl0biRlowm2B++RKWLOH1GaOdX8nEpr3rx8gW/nfPDRx0htOJys0k1D9Ao9N/TdEd/1HHcb0BoRI7OmJniLGy1x7OC0eYmE8JayrlisznMHaLMhdCdPyl+9K1CkhAoWo9XkxE/M6gaRoBum8WfKQckiJaRULOazHMJrLWPf4aWEVAGCtpmTxp6+C3T9SJIKF8ENOwr1bkOPMdLtDoimZJQZEFnJAucVwZWMco2pZkifUGLPTEeMkehVzUXbst2N/MuvvuKP/h//CbPvf4STht46ZAwYFJu7G+g76u3A/HrHY5FQpzw0YxilRoyW6+WcP3lW8WKV+HFZU/mC0nkkDeeF5otvvqJYO5ScIacHZYY9ZkfRrCi5WCy5v72jVhrbdVR1huWeKOhKgrMjX7295rPPPqOaz0kp4sOUVxpOQMm8DZ3YeCcunhJ50wwhIVTWZvgQc27rNJpqmmZ6kFdoI5Flvk9OxcrJfXYSyb4/hnsQn/uQ8wCTnMaAD8Y9mKJ5QgjZqCh417l6byXedWpOP1OmvscHFAGAD+HhxU8arRzF8+ud1FNK/4M/4z/9t/6Ur03A/+zXeuH3llKK7XbLbrfj7OyMH//4x6xWK/7xP/7H9H1P0zSUVZndlOYdKuP9URPAer1+6FidwNJaTkiK6c9IKWibhvPzc1ar1cP7ZEymuSMyKiClTLNPCGKShJCywD1Z7DhQ1gWoyOg7okiYMoN5I4GmXSJKkLam3x847nviGNCFIVkPpAwMlVDUBYuq5jhscYOnKFv67kD02YEoY0LEdPL7/ZnrFHvjITsLk8cTKJVGI6iUpi5KNAKDIDmXLaPjyPbNa/r7O0zK+tQQszatFJJFqajaAnVWEVWimtWcXV0QlSQw6TqVRMh8IM61kiIECP40up5Ygmnq2guFCx6BoDAGkQxaqvw+OwtS8ezZY/7W3/wbpOCRUXB/u2E49FgLQWQTQtf1OJczcpXqpkPJZNqYhPv7Y5fNQokp3qsg4HOzJMpf+4DxnSisTu3KGBOjdRRFhYg5bNlPAahFUeCCY7O5RzC5ZoyhKApsCg8PJ2ttBoh6T13XQL4RT2HMIkXapmG/ucM7y6ypiARKLWmWc47HI4f9DqUkrj9iuyJHa6QcOWHtyH635/zsnK4fOdqRQklmTcWsbSl1QQiRvh84v1xix4Hj4YjCg9QcDkeCCyip2O5uGVNJU7egSx598BE3r2+o2yVDdNg+oWdL6vUFn33/xywXK4TW3N3f4YLidXyR25fAEHcIJKOzIAVClZytl5yvVwRn+fqrXxKCo6oaqrpksANIDUohlOaz7/+Q9cUZ7azgo08+oqpn7DY71qsFMQwclWRvJW2UaCFx40ApIxKNLgxV3dIuFpimhcszbl8PxOBIQWBdR1EYSJkR03Ud2IFKJ8LuFkiIokWUmRAebC6Kgx3Z7La4ceDR2Qp7vOdw7Ekid1yssyglGYYe7wO73Z6QEtF7kshhuUJpdAkVMcdFjJZjP+K9A+/ouiPKFEglmS9m2Vn2m9Hp/jstJQXrtkQojQ0B6/0URZGdgcFPnRSRkEbQd9neLZSGmDdu5zPZ2wdwHgqR1ftKCKqymjIA44QXUA/30LwqCSpgAVLNVrX8Z/uOn5aP+Huzku+FV5zHgXYE5TXaOxoSIfRcLgy/6xv+8S++5IvnL7DrC5IW2KHjzb9+hXxzxycFPLM9H6wLnh8NX+/vKVdrvri752YY2ZaG6tlHNMUVXznDL+OcXVsTfKIOgie1Zfbhh4y3O1L3Dhdw+vtnUnvB1eU5s7bmeDwSSQxEqnISrescvq3PVigJ9/e3tG2dNTPpXWDvw2Yc84gpaz2YTs/knWIyGDh/clsKvPf0fc84jjRNM4nGPd6OEycndxdOvKiT4/DfBBGeRLNMmhQ5daBOLCoxcbVODEDgYfN//zWY/sy30Q7vBO6n7xsmHtPpa4TI0R6/Lm7hL2PN53OePXvG7e0txhiGYcgamBAoioLFYoExJpPT33P//ZsF52w2oygKZrMZZ2dn3N7eZnDJ9LmcltI6Q59lHhOXVT6c9mOPqmQubkWaCqvcBQwpw2Xz51JRVSU+uqwxNC1lXSIUECXtfIXwCnuIyLJFNwv0GKlI7G/vIfYkEWkaQ+FLlosF/ZsdGpl1RFLw5FHBy9eHrBGMCRnF1JH60z+3E9hAkTtXIgIkfHRUQtG2NYuqQSXQIgdWJxdZ6orSBeLxQBkSxRQFJYCZElwtG84fn9N+7xlRa2YXZ8zWS2yMDD5jfpTIjlZiDoNPQhDIBapAZadeAuemfE00MTh8ihnsHFKeYmUUAN4O1HXJ0ydPePbsKXdv7zLyRPX5XjH5vgghMIjh4R1QMt8zgqxXDSEzw5TK/KumrpnNZlhriTE7+qWUvHj5/Fdeo9+JwiqRW+hK5lDSGKZNgTz2kGSwpYySqPMDzQ5DDqKV8sEV0vc9kE8mIUY2mw2r1YqyLPNDKyVEcqhgWcxalC5z4Gp0HDY3JHLQZWEMyWWLfnIOObXgR2tpmhnNoxpdlBRFgek1h65j3/c8v7mnKBuuHnU8/egj5k3Dfr9nGByzecuyKnn94pab19fY3uJ8xCqFRlG2Lc7ecfnsQ3zU6BqWqzNcEizX58zOVqzWF2ilmS2WdJ3j8snH7Hd7Ugrc3VzTH/ccDhtMUSGU5vuffcJyVnP96gX3bU0oC5q24ex8zd3ba8LQM58vUKbkbrfj2QePOL+6YjZfsD47oz8ckficZu4tzdk5y6YlDAdkCBgj8G6kqBrmqzOUKXB2IESPVIJoPX4MmKIieY8Lua3rxhHhbI60ITIcBsrVBbqNrNYXODvS9x3j0HPc71jOZxy2t8QYUdqQhKTr9mhtEKKEFGnaFiM10QV67yY4n6YqDX2XM66sd9MN1GGtR3gYnKduWoqqJiJINv5qm/JfwtJKcrGY0fvAsO0JPp8oEZIk8oPcBg/RURcNhVZ040ia8uC01BCzYWG0CaEsSoacXSmyPu2sNezHAXk6dMTcmZmvVvgikQ73rKJkWFY0taFafsj/+37L/6f6Cd9jx79vf8l6fIPYGoqjpXZgNTw+r/ne0HJze+T2xVsqCfp+Q13P+N2zM8TNDbMY6WXgi7f36OWKrw9bXrsBVxQYYbDHHnEe6EWBshKRFEooxqrmUAaKBMYlBLnQeb+wEiq/P1IbFuWS2XKeR6QhZOyCFJAyZb2qSp4+eUwiIUUiBJ8xIRMwEt7pct4vNh5Gj4jMEXoQP+f5y6lTctL85PGiQmmZdTo+PRRA7xdwp+/3/vcGkPhJbDzpZh5Ya+9Gh1Ot9a2u12l8GPn2OO/0s5z+2qfiLlvOw4MGTYjJ8vgdWmVZ8uGHH/Kzn/0M7z0//elPmc/nNE3DT37yE4wxdFWNUzmF46Szengvp/fHWsvLly8fxPC73Q7hcsxJ1lslYsxBvVdXV3z88ce5Kxqz9KCqShxuet+mIpgph04XCGnzQUYrkhRY1+PTSGlapGJ6nw11rekPR/rR4ZOknK0QsqS2G+5vLVoGZnOFMTXjreCb5y/RKjF0nt1mj5AzFOrhuqyQ9CH/HSe3w39lTeCNB4yLUYpKSmQIXK0v+OyTj/ne559zvlwzqxs6b5HRsb/Z8vxnX7C/vqci5dggIVAp0chEWyTWi4pHH3+IbGqoG1bn50Stc9cpCHI4RCQmD+odzFWKE9pA4mwAstHCu/w+gaAfRhqVkDLmdACl8CknT1xeXvDxRx9x/fotRhVEl3Igusgd2WEYskD9PWfoKMTkiBUoZZifNZR1SV03XJyfM5vPORz2bLc7yglI/uus70RhBUCSxJSmSjVM9FSHVvKhPQ8Tb2W6gK21VHVFCOHB/XEaZ9R1zXK5fDh9aq0Rkhx8mRzRWvpjj1AQXYcqKoQyaF2B1ChTPPzVhBAURcE4dAzDQD+MCKURUlFphW5K9vfQA+vVmmGw7Hd7zpYLhu6IdYHZ6orj7hWFKflX/+JfMb+Yc+h6rj7/nB//8HfYvPyaly9+ivWSUrWUtWB9dsFmt2VRKvqbF3Qq0lYt99d3NIWk/fAxQn3A2XrF119+wVdf/Bxv13zw4Ucs1yvOlzPi2DFsXnOxarjbHHBu5Ob2lsE6ki6YLVf4BB98+gmPnz2lbAxJgjaatq3Y3rzGKIcSlqYUuG5Hd3dL8o6qnNMdDtDMsDGRhpGmqVmvl+AHDrc9bhwhkqnUQkBMaJnF124M9NaDAVl75muDH47cb4/TKVzx6OoM5yxMOXbd8QAq82K8d/iUUJPAURclYxg59LusZztavnl+hzEKnUIGzE4amKqqKNoZZdNStzOsC5nGnEPp/txLVQjxIZks/Yj8bPoHKaX/3W8yukNKQV1rTFKU5ZrRWYbRZUeXDwgVslC6ULnbVtUP907OkctYApWmfLAUiGkKqSUxukw01oWhqOucLj9OmhvAJM3jxZozoTjKnqurFWfyKb/YjbzYH3jbXnD348/57//unOqnf8jx9/8VfPUN3N8iUuCjT57xM/eCuIdPVIPpLKbf8fr1G0IMPLq45PrNnmo+46ZSfHPMKIMKR0yO7v45YxvRJicDyJAoCk0oJENT8hbBuJpjX/cUKpPXay0n/ZQGwWQc8UgliTI7AL9VdISINpnxppQikU0cLuVCJHeK3mXsee8z5kCIyWqvpmeOyCdvRA5q9xEhElVZ0zazyUEIPkaEd1RSsneeOOlxcr30DkIaT6O4fLGR3gN7npyEAvJBIJ6KrRzgLMgblMzH/6xjTKepQHzQi52s+Gn6OcNJfyWn/z4hO8TJPfYdWtZa7u7uHtx8p47dkydP+Oijj7i/v8dOzsiyzAfgB2r2tE5wUO895+fnVFXFl19+yXjYIdF4n7u+amKgNW1LNzrutkeKtqSoWmZFg3c7xuNAjB4XB1zyCEpkVWRMgCyQnLhJBZKSQs0zZ8lHpFbT95O8ve+hXlHqil7doXpF1V7gDzdIHehHhzQFo4qUraFZt0QZKasCYQeKBsoQkX2kSJI+QZL5OhBaUChNGjMqJBEJEmSEQgpmRjCvClRIPL1a873PP+ajTz5kcbbElDWFgW575Pmbl3z9+jmDd7Q1pBHWM4HvE6XJlhrrA1aUrJo1oxTEqBmOjrkTJK/xImY9rTY4EinKB4NiHs1NAF6RTRQ+OE7w0JTAl0ciAykqfBIgJQKVw6+loW6XNEPCjpY4jODHB4drURQPY/cMU83XQ5hQTXVZM5vNmc1mrOZL6roCH3G9RaVcyP0667tTWE1ldVlWHLsscHPO4V0+SXpriSlmF06M2NFCCAwDRPntaILZbAbTQ/U0AsztPYFILlNX3UjwEY1gv7mjXSwJwtAuMoxM6YrTEdB5Rz96iJGqKri6usL6wP5wxNkBDTy5uuTYveTrX35FEJJm1jJ2HYVSFEXFcXD0o8OOjrdv3rK8WPH06TP+1t/5b7C73bK9e4sIFq1bfv7lV/zoR5/hrWNRFrjNNau2YNwCh4pXX3zF8uIRF4/OaNYrvHOsljV8+JTGFDx6+oT7zS3bm9e444ZFJdhVkq4qGIYRoSSPnz6l63qOw4jzAX1/z2o5ww2C9fmScRynzUHjhwOLWuGPNxivEG6gLksyRymfakfnWSxnaCnY7Q54ZymMRqWIcwGpFEVR0rQzDtst2/5ATAqvctxQTILjfo/3DmthvphTFAZvR5SSjDEhpaaosvDZ+sDufgsikXRB1S4RSdF1AyiVQ0cRFHWDGwdScA+b7axtaGZzdN0iTabRp5S7RHkTi3/OdQpkacL/MqX0z4QQc+CfCiH+X8D/mN9UdIcAVKKQkqKQzDETiTjkjq7LnCQXYfR5859VhhEyR0kECm1oqhJiwg4DUWniyW6ePPtuj1AK+kBldE48AC7nOfYJoWjOV2hbUBUzrN9jw5HgLbZTbG637M5+hPw7j2n/+r+H/+Yrbv/Vv6D/459S3u/50fmclem49wP70eMOFowgIRn396zPlvRFxXixwKzOCc/foocjOlgKHxjvj5gVhAZSaTJCRRkqslmlnEY0xmjKwmAKlcX+QiCmR5syE/hPhG/lh0op2R72jEPPo0ePWC6XaKVwIRBcRifEySnJ9O8nPhTkB7HzDj2d/f2UbailRBT6PYH4CWaZvy56T6sMdykSpHyvyJkeNykh3uNnnbpGp5illFJm0qWEHYep6JsumDRF7saYdVhT+gTakAQP8T5pEtenqRN16lTFGKdCKiLECVGQJq7bd6e4Oh6P/Mt/+S/puu6hMziOI1dXV1xdXXFzc/Pw+XjvH7RrpwLrVIidXJtt275zRRqD0gq775AkiIFx6PijP/ljdod7/tbf/Bs8fvYERY09JIqqRkaPFIKQLKMfceQCXAlB1SyRKUzuN4OUFZKS3HxOiBiJPtB1jm6IPH76lGE4sHUeI0tmqyfcHg7EMNCNGXPjBbztPHVKjF1HaTNsdHlZMW4G2FmMLhEJkjGAh+mwnFwghRylJGRCAbNSsjSSRsHV1SVXV2vOLtcszhYEmVEHR3/AhZHNeKBcz5g3Bre55aysKFLgzcsO0xTMz1fods7b+wPnTz9Gk7My++g57AaS1JRCYQoDSmBTltmceqK5sDrp1uIk+BcPrkwAp44En0hJgSgRsiZFiRAKhKGsWoraYSMI75HxHeT1/QNRCNnZK1I2OdR1TVmUNLqkVgU6CoRL6ChYNXN2U87tr7O+M4VViBGZcvhiECGTpIVBJIuIlqHbE2NAMEHtQt787NhRzZYMw0DdNKjCoKuSsR9zV6IosvtFwtgPGFWgigXRgY47Dm9vWa3OqOqG3nlEyDTW4XiDGz11NaMpC5RKWDSH/Y6BRJSaumlZLs/Zvr0muCPrWcHxeMx5dN2ON29egTSMPiG3d8zqC4ZacPnRZ1TtmsdS8fJP/pC7+wPNbM6rN69BdPTHLYfdnpmKlNJTyIiyAf+242aw1EJSpgEOd4DFDwOFPbIwiaoUuLuXyO0d0o1UeIZoqVTEJIeLEdfZHGkiyHRoIfnwo2f88Z/8a3746TPs+ARJRIuADA7pPPPC4IInREuhQ2asDI5ZWSNFBNdx/3KPHyyIgFGKvjsyHDvKomJ0jrvNG2azOSkKMJJhhDH0JJGIY4JKklyes4do6W1ACsX+bs/B589ThEB/POQHWIpIaahkwf7tLZGIKgy9hbvNgb4bWDcV0o7oRtLMZrljJRVjBJEMSpT4cGS72xC9INbxQdD4Z63JEv5q+ve9EOKPyMTo/y7ZEQW/gegOKZiE6Tl4PKVAYdTkpq/wU2c3poh3gVTVxFmND54Rj9GauqwpioL9do8N/uHBAmCdzTrElJBE1CF3QjQ91aKEJLH9ga7rwU8ieRH5/sdPWBYFP/x4xlpUFEoylJLwWUX54UfUf+MN/h/9Iz4T/5JP5TWvdgM/7UZuC0ETJetmBkXFNkTerBa4i0vmuiLOzkl398i3X9M6R+g6wtsbOFvji3eFCvDQ0SRYjnvPsyeXk6BYIFGk5B+4RlnEnDUU3nuOhwNSSpaLBZ1W3wJqkoAHAXDW2439wDD2TDa8XIj4gK6qdyLXmKZukJykDae4DJWFzS5RKomzI6ZqMDJNwmf5gLk40RHkex0iIfPfQXCKu8mZngBG8q6bJHhwtCkBkHLXSeaumJACNVnJ8xhQ4tO7kWBmdk0vlWeNU44gv7Zg9y9rSSk5Pz9/cPT5SU/7ve99j4uLC7788ks2QuCs48StklIyn88Zx/EBQ3N/f48xhtVqxTAMD9eJVppxOgTHFNnudjz/5hucG/jB9z/De4+1llIaos/xWUVRUpiKosiaocOwRwjBatYik0QSMUY90NiHsacqa0Ax2vx3quqa9WrN9dues7Mzyt2BcrFAlBVBSsp5QxwTq/WC7W5LUUmGUXA4dqweXVBXNWlvH+psbTReS6K3WSPoIiIGNFBpSVlpZPBUhc4FNXB2fs5yvWJxtqaaz+iDwyPY7ffY/kjVtHz2gx/QJI/d3HM1n3P/5g274RuaxRyzWqLaimg0FLmpUc9nlFrip4NN1TaAIPjswpTiTyelna6798fkYQpHHoaRGCVlVWCUIJCds5KpG9nUaGMIbUN040N38/Q6JzjsbZFzUz/55BOKoqAsS9q2pWka5vM5RVFwOBzo+x5jzIPc6Fet70hhlej9iBIJJwNWBnRVIG3CjRnUprQmTTmBLkwQMSkRUhKBWdNgigJdGPpxoDAlWhu8D7n1mWJmxRgDQSKrmrHfZzFoYhJy5nGk7Tp8sGhV0HdHtve3zNsShOS4uUPGWY6HkR1CGdwwUpgSow2ffPopz19dc339hqJqaBcVx27H+dVjrHUkYL5YsV5fstne8frFc559+GlGRbQzrHNcnq+wxy1iJlAqorXAjxlomGzPenlO9D3j7obj/Ruczd0YBcQ00g0D0Y24cUBrQXCWsigodE8sCg69I3jPOAwUZcHumON2nj66YL2YQfTstxsOuw2HzT06eaIHUxmCSJR1ZmKZoqAfHeLYY3wkWMu4P+Q4CWNwPpPvtdYM40hhNDEEMkHaUFYlRZVPjk2pKSQM+x3OZgH6fLUkuIHN25c4rfBjwXGzo6xKfAoMPoCCbuwRsuQ4DLy5fo5ziapu2W7uEfOK83mFkIYQE6aoKJqGsmkpqhYhJDYOVE3LaBPWB8K/haZkykX768B/wW8guuPhdQEtxUPHg5SFngBS5+JKSSgCxOCyfTtlfaLSNaqSHI89m80Gqw111VCErFU80YSlKKmrYjotRnSfR2lXl7McdBwFShQs25YYMoV6cf4YiaIl8fmzC2ZFwnU7tLNoEthEai8p/vZ/SPGDT3C/+AXPvrwn/ew183GPsoGj87wicbhc0D95gpcFSRdwWRC1QaQRe3uD8TAcdhQxG1hOImvvPeM4UpYF87rgcDxSaJVF1kpBzOwnIRL6JOROCSMzfytWBd2xQ5SG9Xr90L04nYgfRm8JUsih5GM/gMiMr0xpz10QP31tRj3kUWokTSiF7MYTubIhkXM8vYhUWjyMPdRJSUx2pYmYpoIqj/qEzGO+0475sOFMhY+cqrLc/YLSmNy5ek+zlTVWIPTp/QA9GSHedwG+/893nbTfACL0N7iUUnzve98jhMDv//7vM44jl5eXPHny5CHiBngIXz4dJsqyfHgNMIRGOwABAABJREFU7z339/c8e/aMp0+f8urVq+nAEac8yXfIBcidlBByQbXf7SmNYqUWhGBJIguhy7LCR8FgJ6eoiNNwdjJVqDQVVgPjqCmmsG87Zi7ZCVrZdQOz+QxpLVQFVCWpLChFxRAG6rJg8FA1Gms129EhDRxHy6HP8ggSWW9JzsklZdebjBmtUJmSeV0hnCXYkaP1zM4rooCiqZmfrUhasu86dJUjpYIfuXj8mFIlqhioZWLYbBhSYjmBqTspCFKwWq84jANjSjwqCnRVgAJd6HyP+AjSo6bx/Z+1TtiSk8OzrjUOSDEQY87mlCLjZ8IEAC+MoW0macQ4MHTv516+0zbO53N+URSklDg/P0dOETqn7lXbthRF8TD5OuFcfp31HSmsgEmsLmNEJbAxcOw75k1FGHM7OhI4HPcP9uCyLKnqmsVyhZCSwWbt1PrsjFKX0411ekhlOvXpjYkpkiTMFgtcSgipEPlASkISYqIqS9r5jMN+y939WxZ1yWreElLEaINRgqOziNIgVMH17ZbO3eETVGXJy9dvqA89i/U5PiYO+y1SCp4+fQIIbt7esVhf0FYFP/3jb4ghTPEwJToM2MM9etVSF9WE2Y+0lYbQ4fpAHHXWjUy05HEcM5AuRazrkSJhxzyfPu73+Ybse1LI7qT72xsePXmMGx0iRZ5dXbFsDYWI2P5Afzwwn8+w3YEYHc66bOkFpDQgJKVuKMuC5EdIgabUHCMIpR7eR6EUShm6bsTaSF03DIPN41yRQ0OHFBEhkEKkUHP2dxv67sB6OWNWC4LS3N1v2N7dY8oGryT7wTJ4iWnXdOOeFy9eYaRAjpbFwrKeFVRFIklPSInReuqiQpkCZQwhRew44HxgDPDFy7fYi0TVzH+tS1YIMQP+L8D/IqW0+zfEwf/W0R3v56Gt1mdIAVqJyXYsQAS00ZBETm9XgJREZZAyE7RDyDZ/bcAXBcMwEJLFR8V6UU25WrlY01pBDBPFXT50J7TOGp0Yp69JYupmCKIJpCiRMeCBW7dH1xJRKNI45i6as8SqZPfp59gPHnP48R13X77AvnjD3f2Gr9/e8cW2o3n6CKMMawwJRV9rkp8TxMeEZsHx9gZR1xRScdzsqa6ah4dsjJGh61nNGqRIGKPwKWV7tmKKZYkIORWRSSBSRjoYpVjO58SUBe6nOBPvM59KK8XYD4z9gB0GiIlCG3Ig6wQYVeoBOJpHjJ6yrOBUwDyI6Sdhs8j3ZSJQakVlIiJoTkOQLGCfROQ6h8fG6TAYYiIJMXWfxCQoJ7sQU9ZHRR+JKguRT5/fdCWCEA9sIyEyvyubdsAniMFnndbD62fNFiIT6bNm67tTWnVdx9u3b6fCJG+WwzDw/PlzvPe8ffuW4+NjjsVyjrquHxzjJ6PACZtxdnbG2dkZL168yIWyC8A0yhUKKXNqByS6Y8ebN9dsthtWy3kWQZeClCQpZXp4jAO77Q5rLWdnK+qmwo0dbhzph5F8/JV4PyJEJETPMHSURUHTllhnEQKMLrDa4IyhWK0QMVCPAuIW6QOmFoRoKRtogiCIQG+hHwKK3LU0MeBTyNRQAVqBjlAkQWMM7ZQb6qXHychssWBxtuLqgyfMz9b0wbEfe9qqRGvFcr1iPB4gnHPWNrj+yN3+gC0r0nxOKApkO6e9egRlgZeCkAT7oacQidlynnWPLuvXjMqHNfHnyJZOusaH/b6qSDYfqmLQOQswSZTI5paqbFjOF7S1J6TIOBaU5l3wNvDgwI0xIpHEFCnLd3iO06FJT67OU/7gSa/966zvSGEl8FEhvYcAhdNEmeewiRzfYD3EJCnrJmf6jJbBOurZAucDRZlHc0KrLBJ1+fRZ1Q0h5qy8yRnN2PeE4DFFydhZnEskISfLbKKsa4QSjNZRlonl+gw3QS2rqkaUNYNzCAEXV2te3+15dXPP+vwR3evXFKVhtljhYg5aPXQ9xW4P0XF+cYWSip///BeUZUNTlXz5i5/ihyNSKY7be8r1imZWUZdlnj4ksM4jFZjC5H8Xkeg9qIQfR+qmptQJ7wbsOFIVJceuY+h6UsgP3ratOBxGjJL0xwN27NludwSb59zzuuRsVqIJjMeOqtCURlPqBSnYCciq8ulYSaTSWJets0KICd0gmZmacbQ439HOFrgAUhnads7hcGS3O9LUDceuw6VAWzcslkuSjzhv6Y4Htt2Boi6IzpKcY9vdst0ds8MtGK4PR2yUSNNy3N8x2oBzgeAdy0Khk6UxBd4NhDIjJUxRMrpAf3dP0Q+4KCjLCqVKXr6+5eubI/PP1tiq+dVXrBCGXFT9H1JK/9fpt/+dojvSe3loH3/6WWqb+oFHo6REqDh1WAUkl0/Rk+XehzgVBwEbPMImEopHT55NuIXMJ3I+YHTeJH2ICJEfLkq/wy0obZBJoGQuWITIcMwwjaySVmg0Ds3F7Io0dPTDnqELHDd7hIkYLZn3BulntBXEK8veQ4fk7u5IICGPBqtGVKVROhKNolk2jFVDWq+onzzCaI0vatKhZxgGmqZhGLJlWitJURYslZp0UAmfAsXJLeRPTrrccUjTyO8U+aKEAPkuNujk4HPOMQ7DhEo5UBWZmp11mpOXKiWkzN1DZz3OWcqiZJLgniRPuQiV7xU5eS7I+WJGlDNiUozDgHMepXV+vei+zVNLCesiwTlUYYghx+sMvcufZ2GwdiQWU8B0mvhAKR8OEzm6KITJvJEghCxaF9P/wgSWzUL8fI/nHLyEUN8dfRXkzfarr74CeBAjX19f8w//4T9kNpvx9u1b9Mc7vPcURfEw+j4ZnE7FsFKKxWJBSontdpuLLeuoyiwhUUI/aM3G0eKD5V//4b/mbD2jqQqWq8/p+xwN5GycYlkM1nqUyuL4EELWcMZICBYhNQJNIjOwwkQAL4qMDhIicn5+SYyOaDTBGOqzc2qtKfoIQeIPeypfcrQdbVuwqubEomEmKkqzpRkTUhgcASMiQQmESjRlgdBQBEWrDNiACJHVYoGUsDxb8+jZUy6fPEbVBS4lmuWCejFnvO/Z73YUUlC0c0YBnU+kaoZeJNRxJJAwyxXrJ0+4HTzD/T3KlNzc3dMs5ngSZamoa41uCoTMvMmoxUMX/U9bJ8dqCGFC7AiaqiRGAykfcLTQ1FXLfLbIn2NwOO847MGP9uHPn1ZdZ4mE3me5wHq9BnhAOJVlyXK5fHCUng5fp27or1rficJKKMXl7/0u9988Z9zcMI5HfOoQSuBCoCoboo90LpDwCKnQxuC9Z384gqnQZZlHASGipaEfLdaOJCkoS0NEYMeBpm5IAkxRMB43+YbBMDqPLiu881jnMaZgHHqGwWZmVlUj/UjvIiEFTNUgCKAVH3z2CWP/S25vX3F+eYUuNVIazudLjoNlsAEXAqtZQ1UVbDcHlssVxhRcrM/46R/94STI76diJhdPg7Us5y2dDWz3R5arGTEkDl3HYb/n/Pwc60YQEWfHPFOOnq7bYxYSUmQcBoSQVEXBZrfl0dUls8GzPRzQSnHc56ytYC1tZdApEEZHZQy6MAQ3ZTRJTZQuu8qMZnAWF32G3SlNP1qCLlB1hULhAszmS/R0QYokcYPH6BIlE8MwgtDMz9ZoBAGDlDC6Mb8uiecv37BenpGsp7NHUAZd1twcHU61iKJkdxgYgfP1mma1YPf6JcSRQkqG/T1l25IwDKPj0I8EoGpnuAiz9SXBw/3tnj/52XP++n/03+GP2hnxV4jXJ5ff/x74o5TS//a9//Qbi+6QQlKXxYOGRikFKhFTBkZm3lIkOI+LaTJ6eE55cT6Q31Wj0VqiJhCkmmCXUimkAFNo6tJgjJ5GTnmMZabiQ06bkioUkpTHDAK0yIJc53NBN/rM6MmMH4F1njHv8kQMpmyZNx2l2QGK3o3Y3Vt293eU3/+Y3/vxD/nepx+zXq54+eaeL75+zu39ntEJUIa5LtHGPKBTXrx4kSOfypq6UXR9JpyHYcCqTN4/ueOEEKAhTo5hN3W8BWCm6JnTaXQYBkbnUTIXbTI1EAJaaoqyROpspQrJk2QkuCF3xWIkuEBZmixKnl4/d9c8RE9EEIRAhYFKBmwKRNWga/CFYvQBP9GnjdG5iI65Gz0vawQRU+a7JUSPjIl+SDSVQbUFr0QeDV7MZw8/k540ZKN1OJE42hEXBVEoRHJZqC4FCkmYnLMxBoRUJCFRUk1drF/vWf6XsVJKk7nmHbtws9lwOBy4uLhgGAYqn8GaJ/6UEIKyzMXx8Xh8mHoMw8CrV6+4vr7GWktdljiXWYjBJ8qypDQFfrRIGen7npubG+7v7zl2HT4KfIy4mPAJlNTMZgu6bp9D5EW+roIbstEm5fGX1jnqLD9qsk4rRw2dNn+BBXTbIE0Wm0tpCeaIqh1FbDDagsk5tC4mjC5py5ph12ECLJD0SU5ORShCZF63FNFQ6BKjJW17xnw5w4WRqq0pmhpRaI7jgG4aVBJY7+j7nmEcMZOO+fbulv3mQEgSp0qsyIHmu8HR2cBms+c4DMyXKwKCK5GzTBeLhhQKCpkgCAQBqb6d13jSlFprH0Z3D8iTyVwkkuYkMUwxdw0LXVJXNYv5Ah8szg9olYPHx3F80FKevtcDnmVy/af3nrVVVT10rM7OziaTjHn4ml+1fmVh9ZdhLU8kxHLFk7NLDps3HP7gv8S/3UwnhkRdtVSmpqxnHPojhR3pDjtaCVVVUKj8DgtlJhBlxCfLcjEjuAEXBmzMD5Auwtg7knX4KHFSU5iaerbgaB31Yg1SEIasrZILj2bKKTSGw/6AKiUohRstZ4sV3kakFIgUcIcj69kVsqkmB1s+sTz/5ReEiznz2hDdQPQWlObFN1+RvEVKgfOWZZu7Jdc3R3yQHP2eu/s7iqqkk3k+3u33qAj7/UjZaAQJFcFEsD4SkuDQjcSQQCqO1uGk5umHT1ifnzOMlldv3uJjYBCSH//g+5yt54gYCDZgjMQIgSAXkUIkhBIYWaGKAqk1hSxQWuK6PeNgkbqkni8JQjJsNigEg/fsx5GmbRBKE5Jmd+jYbbZUlUYZxePzR1RFydvra96+vaHQmt12R2ct1kWGkPBJ0sc8Nimbmk8++S1++XrLz37+Jc5aRFkxvnrLp+ctUliEhm44spxnqv1h6+ncgaPzBKFJ7oZGSKr2OXqx4m6AT/763+bp5z/mD1+nXyleB/4O8D8E/kAI8c+n3/tf8RuM7hACSq0eKOGQSCIxjANDPyCQaKnwQtCPPVIIjBKTe0ygZR4vaZkFz9Lk7LST3iSfjgVVoRDEDEWcxNpGK/SkhD6NCEmQokcJmQPPs8s5O+liwHs3ibY9Rhnm84zieHBhGY00ibYpWFSaMljsbqTbbdnvt6zXKz77/HO8i1S7gYvLS1AlX379ikIXNLMWOY3hTtiEvuu5uFihlKLruofNMuuvygfH2Ptk8fexBmoaa3fDMEUBOZhMNE1dIWJ2YEXvkFpQlxlPYUrN6EZ8CHR9R1U1zOctMYpsoJDqnUbqpHMi669OG4S1liHuWZzNSEKRnKdIkarI3aLMRJq0UROAkklLomTubD1+nPPUjsc+60tCVvQYLR9+zoyn8cxrg9QFTVVwc78jpIiX76JvoiR/tkIxm88ZRksSkpiffHy3VFY8bHonp2NVVUD+rE8dqtM66axOI8FTx3IcR/7JP/knfPXVVzx//py+75nVKwRQVYYUB9q2ZbVYsr27YxiO9H3P/f09+/0e7zxC1RmEnE5aLACBnkKEfXB5fB0CQoR8OCgMIY70/QGtygk86ug6j/eWrj/kkb3RVLM5yhe4JIABWR8gegqxoCw8PgWcgyAbhs4zDhYNaBeoUiSlzOtLQFNKztqWZbmgKBtmyzlXjy4IyXOzecvyYk05a0hKIo0miimiydqMPtIaHyK73ZHN/RFvYb/Jo0Kha+5vb9hyoCzndDbw+s0b+vMB7wN13VDParbeknyFkYlQSKpCk0oI0Wc0yqRzOn2u7+v98i/QOjtsU9IQDcSCmAq88BTKUGiDlNk4UtcVddXiXP4cHgwsE8/qdJ+cRoGn66RpmodnSFVVDwe631hhxV+CtVwgIOScbzmf8eSjj/hq8xLb2yw+lVmjY4SgkhKvs+un0pKqNOiioKxrojKZh5YidV2SokdLSX/ckYLHFAUiCdyQCdayXqBNQVM36LqmUAUhCUpT4g5b2rqg0on95pbeRsoyCyAvLi8QusD6yOvXd0hT8ubVa6IfWTQtq6Zi/ugCbQoeT4L4u80GjUclS99tGHqHVoJx7GhnNV3XsdttmM1m9H3H4CyzswsOxyPtxRUXlxfMmiyUb2bndPsDFkHXjez3Wx5fnOO6DXVhqKoWZQy7+x3NbEEYBi4//IT16pIkPIfjgZ+c/5BPPvmM9bMnGAJ+d4MKYwZrqgz0NIUGky3sUkqKpkGbEh8iRZXt/6pwCAydDTl0WmpkEnT9iPOes4sLRjvmyBIhePv2hsePn7Ld3VGoxPMvv+L8/DyPK2Pk5vaO7f0GpGYMka9fvAFT4HygrGsWbeKnP/8pL17vcYPDx4j3PdH2vAkHzlvDcrXMY19p2FnBzX5gd8xag8VSsVzUfPT4ije3O2K54NMf/ZBPfvzXCf5kXf/zV0rpP4c/c6f5jUR3CCEw+mTXj4Ckaqts+zf6HZQyOkojMabAaE1M2dWYqdOgVJpE1wZj6ocfryzfJRGQpg6LmDQ6ctpMhSDFSacoMlBQZ4lIRjOkSIyZAB9CxFlLdI5jd8SYnrIwxBCxzqG0oqhKzs9WrOYNV8sZm80BeXHG48sLtFKEmHLhfTyyPx6zLs5ZohqplXlwdx2PR548eZJb+ZMGYjab5cDj6UH8wK57D49wWqf4HlIiBU+pFUc74N2I0QqjBE1piM6hCwlKE1JE4alMQVFo2rrMWIt+oNvvWCzPCVIQosfZd5qNE3ZBKPlQWJ2+v3CW89WM/ehBCeS00ZRVxW5/IMREP2YdTghZZO4DaKWRSqJ1Zu8c9iMxnNAMELydgIoQnEdJQWUUi8Uc1pKrsxW7/QGbJNa6HLgcUz4cKp21VlJm7aGbQqf/DSL8X+U6bYbGGLquy06zqnoQOr8fY3MaFb6fLKC1fgBKv379muMxc/NOXKuqLGjbOaWpWC7XXF1e4oaBrtvTHXuu315zOBxQSuGCIPjMi8r61pEU8/0mpGTsBtw4YvSUxWiyNmocPE0jUUrjvGMYxulQlOi6PuM5yuzm1HWNSQKlA27vuDnsAYnzidFHXNIU5QxdC8p2Ri0F9jDmeBsRc3i7EayWM5aLOVfLCxbLc+brFbN5w/3ujsq3PHr6hGbeEklIpRjGESGzJlGUBUoqSJLbtxvevLmlP/Ycd3sulkv6g+X182uMkqig8BFevnoFHqqi5sZcc351TkweJZbM6hItDBTmobA8jWffF4n/VwurCV2SBCRJBkYoRNRoCaWpqMoR62PW75IQonwI6D51waqqYhxHtM3PiMvLSyAftrTWD9Dx07V06mj9xsTrf1nWcmOy2t/5SVSmJarIlHVhJBKBERpEga4L9iLlDkuKU5CjoWpndP0xZw2lhHWOKCCi6YaRwo9Y6xltJExQvOVqhp61YAylVLjR4YeelAJt02DHHu8d4zCymF2gVSbAmqpic7/lcOixw4Hh2FPXBc0sOxdiP2YYqUooI1g+WxNCwvnID77/KUoXHLuB0S4Q8hneew77A5vtBiENy4vHnJ2dI7XOcRyjxXcevTb89Bc/Y/noA5pZy2Z7j9c1B2uRSTDXOluqJ2JyUVY8vbjg0+/9AGNq7u7fcnV1RnCRrhuJvqPUid53GCUpRMjhvdPFlWzORzSmyEiKwaKURpvslhBFjYgWe9yjpUYkkd0nWlHKCmJk7AcWlWHY3XO+mlHXBWOYsz/sqMrE9fV11rWMI9Y7bFTs9z2yrClnS97ebfjgk4/43d/9bd5cv6HaHRi3W44isRsjPiba1QwjLOvVBSFZhiC5P3S82fZYSrSe06iRRgTKUvGz61suPvwJn/34r7F++gwbwGiyk+c7cDjPrkAmNysTWsBTGIVW7aSLSZRGP5zSM5fFZMHzVKhmobUmxCn3TuWCQqtJpCyzQDfGOFWKE8cIsmievCFIIXLc0PT/m6rADsMk+I7ElF2ix76j73pilWjrirpZErxjs8naun/1B3/I829egpBcXJ3Tx4wSuLu949D12BC52245HA+gCh4/fcru0NMNHXr6WcdxZLlcUpYlWmdg5KmIMsZgrf1W6/59U8FJuBpCyF0qO05j/wElM4ldERHJk6JDEdFa4rxFJAtBI4LOfDyteXR+lrvDQpIiWTDbvwt6PX0/MYnUT50zKSWKwM3rr1GzM6TQfPrREyqVu35PL5ZEqXn55oabu/v8/UUWV6ekEBT4kHDjiBSJutRIO31+MRB9Ftor8Q4mut3vpwI2oGSklppCFVj7zkHn3ZgdVi6DaKOPxBC/QxQrvlWwnoqpUxfWe/+gEUuJh86sc+7hZ/w3u5cxxgdxshTpAS59lN0ktvY45xmtJUbH/d09+/0hc+RcwPsEKiN9lNIURUmKghgsx+6ISFm3pXTCuZHjoUeIrDlNKUz6Lsds1qKnQOIYI8KHjKKREqULVCkwzZzZ2SX2KHH+QBQOUyyoz55QV5aLIZK2lrfja5QPGK2RzjNb1Zw9OmfRLKmaltX5Gc1sBipimoqL5orF2QpVGEbrOBwOjD5QNzNKU1PWDUM/Mg4j4+jZ3O958/INw6HjXt8Quo67l3es5i1vuhfsjx29s6TLx7jDwPX4kuQ9qpSs5g0SOXWD6gz+nYLPTxqn98d13+4S5UBxoiClkE0qMUymkyIjJ3CMPlF6GI0nRP2glTqNAk9aKXObD5jn5+ffcgyexoHvd89+o4XV++svylqOgPl8wb7viSnzRYK3BG+RCOzQ09Y1InmMyg/2oiw47PfUVUU7W7BcnbHtR5p2TooOu98TQuJwPHJ7e0c/DDRVkfVWMYvQF4slRVGhTYU2BRjgAVOckQoxBJJNBDyqrBBTjMX9Xb4Z3rx8TbfvmM9rjJLUdUmIDhU0yY8YbShNgVISrzR1pXIkCXksMm/LKXG7ZL2o+eDpRRakVi1KC1CSw7EjDD2HveXm7o66bSmahs47Pvr0M+5nMyoR2N+8xhCww4Czno8+/hQfE5//6Efsh4Fu6Dg7WxJ8zxB6ZqUkRMdw2FDKQGUUOma7alFVvHOqJ4rJYQbZURMminfSFVGEXNyU+RTgQkAZTaNKnLUsZzOef/Fz2qaiiZGykCzmMw77Pfv9nqqq3gmHrUOaCkxiCAk7OJIpuDsc+ef/+o8ppKLbbTibVzy6bCjaJUXZMHQd1y9ecHu7oWwKbvcD973HyzJ3WKodjx9f4QaP01d8/lt/kyeffo96lrs4Cj+5HQW/Zrf3L3SdTtfv3+zeOwqTC/vcW5IZAEr6Fvwxj50AxKSZiWSt/Ts7vVQ50Ffp7D7LmWr5dcviRIuevrcALRVKCmwICCkJ3hG8wjvL8dgx9D3EnKMWiYSQ8CFztlKMHI8d/dHRNAuKZkeMgvlyxWVZoYxEa8VutyNh8sMsRoRMrNYrIopj12OtxRjDcrnMRYOUKCWoq+ohmSGlrIsZp/y4EwrhFIZ8MgOM44i3FtsfaZsmj8vaJme62Z7j/oARElMYCq0YxsBgO/qY+VjBKaqyRCaIFXihcN2Qw+Rj4HA4PBR6Wc+V39uTUxmA6Lh9+5oqSObLNYKAFtA2BbPFmtvdgaY2XF2smLUFUih2uwP7fUddt3Rj4HDscvdQTugGqTBTYfH9zz/n+fPnudi1ueusfJiYWDELpGMuJrQUhARJ5usqpSz4N1U+sasHAf5f/TppYk4uzlPxdOpMKaUe6PWnIuxklz85vJqmQUr5kDF4AkmXWjxs7NvdlnG44eb6Lfvtlm7oaetyOgTmQ4WgQiuBjRE3OEKIFEWJd5HjcU/XdbS1wRSGEAYOhx129CwWNd47hsFxOOxpmhllWeZ7vMjw5SjHTB73GVorY0GSBUU1YxwPxOQQqkZUK2I5Q2lPPSZGv6Gct3THQBCBolScPT5j9eicZbum0XN0XaAKjdSC83mNMCB0ztiNImA3G6QyOZtvjGh34NXL12y3B27v9tzd3rO529HvDnRJoJwljRHPyCEe2XcHlCnY39zjbYRKMww9zaLh8mI9HfyyZjR3xMW3OswnBt3pMzyNBaVUpGghZUdzijn0XKSEUpKqrBAqUARBiJoIhFg8CNCBh0ibfD3k711V5uFaAh6wCydpwYPG6zc4CjxdzH9h1vLm0afEGJjN5xibeR6bYUSnjFmQIaCmDcOTCD6HM2pTcnZxRd1U3G02rC4fMZvPGfsD265nP+w5HAdWF4+4KipciLx8+Q2PL+dUWqBToDUFIgiMLkAlks4fdCFLRiTbt/dYm7AY3my3XF5d0DvHz376BVIoCpFYXK4yqMxolos1ptBICaZQlFWFlBqlC0pdY10GOiJBi6zL0pn0xzCOhBBZNDWRkf5wQOmKs2bOWTsnfVDlCB8Su8Mh60r8yNV6RlsWLCtNcha13dANlqppefTkKUVV0WpDWZQM3T39oWPs9rR1i7Mx50SZ5kED0LYtqqzwfpweVPrhNCGVYRgtYoK02ijwUXB2fvHggJE6V/vOWmKIHHcH9seRs4srPFu0DKjgmVcloxsfLtZhGIgx0QeIWmSuV5JIkYhS8PbuHhy4oeMH3/sIISJ1AYSOw+01h902W8kR1LMVcqbohp7lvKFoBNsh8MmP/jaf/9bfJOoCFzyV9yjySEjKipOj669+JXTxLk1dCEEpMofnIRsu95PIBVTutHkf8jiQSR8RJ3jlRDTOOKREFJNVP5GDpzm9HrRNjfPjg94hW5Nz53Ima1IkOwabBp8cSudAWxECtj/irGOz32HqElMWCGmoihpRGFLVsnr6ERqYVQVVXXK2mvPhB0+ZtxVJFbyWEZxlsJG6XrGetYgQJp1JXnHSTGYGVC4GlDjR6S11kZlpQ9fjfKAtS/TkDFYiR/Z0caCYFUgRaeuCtikJzhPGjsrISQCf39siaPCCwIhze1yy+NQwmy0Yg8ePHhUjSihmzZxhyG5cVJz0VmkSpsf8PFAGqw0yweHuNU8vVhgpOA4D264nVnNEUXG+mrOaVZSF4psXLyEOrM9ann34Cf2+4/Wr16xWa0JK/KOf5femNoamrvj42SMqE/mTn/2UMQlEUhPSyCNk7jQiZB6mSEmpJ11NisQoSVWRMTQkjP7uFFanbsJ+vwegabIu9VREw7sRkn3Pqn8qrlNKnJ2dMZvNubl5y+3t7UNHQpGLrcOh4+b2Bu8iZpqAGK04W6+oSkVdN5lbFUpCTNihY+gHAmHKwsuiemst87ZEK80wWoaho6lzZMph33E8DIQQcyB0WdL3HVorpDTsuj3O9SStKYWhahrEas39cY8pamQhKXSBky1JlTSLGfZgCfXA1bMn3L4V2P6OqlG06wXlvGa2WjI3SzQlSIlPHhETCkWYnoXCGOw4Uteabn9k63bIcc/N27fcbw68eXPHN18/5+76nnnRsGhnVFLTdQND16EF1MpgfeTl18+R9Vsunz3FRkvV13z0yTP6vqcoJaZQoAzI/Dw66axObs7cUXwvEPxhjH969uVfKUoEEmM0yBKdIlFIYoJhFA8H1LZtCCEHKufDZJYPzOczTryx0+FsPp9/q2D/jXes/qKt5fMPvp8O+z33m685O29Yna15bTQm5Ta0UZLgLDF6hEx4n4Vo2mgOxz3d0LG+esowDMxnLYfthhCze855z1lVMYyW0Qu+9/0fo0VHv7shxhE7HFFaMg7ZNXMYB3Rh8D0Mh4FhtDx++owxafow8Ivnb0iDQ9jIk0ePqZUikFOxLy6vcrxGZZjNW9Zn5/gIylQURYkmi9n7YcgREzEQbYdWhhA8bVWRQpgYJyJbZV1HGiVl3ZJUYDHLs9+r1QxnLd7nX3a0aJHbyhcXF9zdvGVzf836bImuDLO6RUvBYRyIzvHo4oKqqHh7f4uWoPyIEZqmrkFKUgr0Q5dHR0ISkVRlTYiR+WKJnwKVrbMZ3hoDNzdvgchi1uLsgB1Gjocj2/sN7XzBcRio2zZHZARLqcG5vGlXdc1sPsd5x27XU1aGmLL7ableYMqKumqoihrvshuROHJ2vmQ4Djn2ZdaC1ERTMr94zGYYWT9+hBSJpl3xw09+m+bxZxRna4bhliplZ5yWkqZqaedrwi9efWt09Fe1pJTUVckDM0nKvFGTkFI9tMlPX3s6TZ3+6YPPo/CQrfVRRAqZIxxGa4mJbHEWEiE1RDWNQQVVIRFCo2RmvKQYcNZyCB4lCoTQCJnovePJ5Zx2vkQGx2FzR9cdiaMj2cBxnwnnSEHR1CxWS3qXKMqGsTsyn7fMFzOuzlcoKdBKYmPgyZPH2CB58XbLbr9nnGKXVFF9+8SYTjTyd2J0IZhOoYF5W1EZlUnrRLRSFDprw8IYMlJCVyglKbRCpEA/7tFKMpstHizWIQSSVBRNnY0wzqKSRimJNAXzhSZs99jtHlNWU1exfBhjxJBIIrx3XeW/tzEFCEHwPS+++SXzpubDDz8kJPinv/8HHEdPXZV8+OwJH3/0IR99/ttgvuLVq1d88+UvkUIgCRgZuVwuaeqRGBNPrtY4bznu75jVhlIDE6zXJ0mSkoRBKpEF6+mkc82k9hAhiojQACH/nH/1t8TDej/r7SRMf5+Ivd/vqUPGIJzPVhQuHxueXX6IJ9IdRrQsKU2N3Y/0twdSyIiLexMJU49AxKxxstFyeXY5dcY0P/nx7/E7v/13acpHHMeMGikUmGBJdmS0Q+4U70fKVNPqM3yv2Vw7tJqjUs3d23vutnd048Dlo0eEcmTrLK6UqHrG5nDMSnhdYssaaVruIviZxn9yyXGvmJ8v2dwd6Hcjs7Jhphv8IiCj4XC3odSPeSYeY8qC9fKcplhQVmtSWXKMlhi7rJ1zoLx8KDCEd9hxpDEV25u3GGP45ssbnr94gZASNyaGfuDQbQh2RyFKdKNZXEb29yPESEyC5HNIen88krym3yikOOe473n7tkeWV6RiTW83nK8LUgg521BqQhJ4n7KjedL9pZQQURHT997TYCmkFlmLHSXD4DGqQMaQi2qVqGpDWelJApDHughDjBIpQzbsFNlBXNaaNMkiYnJIDEVRTROARE69/dXr13EF/oVby0mwu7/l5pc/Z/OVxZQid/qmgEWMyhqpQpNSQIocreG9hyhJSrPbbmnnC+5evUAnz/Vuz+A867M1IVi8PVKqmnKyQicpGFLEu579PtHf7Dn0EaELXDzgdjsWsuNqXrFe1DC/oFw+5hd//CfsXr5isdQsFnP6w57l8ozF5SOCTChDPrHoCp80sjCoskJXNYwW7wJFkd11uiixwxE3HInR0XVblJZUOosove8RKXDcHxEsmJkL7L3DTVX0aEesHbHWceh6lDEsFzPs/p5GDJhZSaFH/LBnXlcMxwM6RC5X55SF4f7ujhgSV6sV1998wfnFJe3iDBtHRnckYklJE2JJVTVYMrsqCJHDat2Iycddjseey8tzrB0Z9xvsMCKngjGkmIF6bqAoW46HIyF5tE6UOmGJdN5h2op+O9CUGmFKglDUyjBfL/EBfvCjz6namigK9odAEiB14nr7S8rHn7BajqQkGMmQ0g9/8lt89pPfyfE1UqF1gSkKnO1IQeMRGKPxUpI8VMFlF1z6buwikgyrzM68LCyGdzqd08adf4lvndZD8PT9yHa7y/FFgBc5Od6SKMoaoTUieGKIdMOIkCVKiklrllvjwziSYpwYcJLdsWOz7VBGMV80vL3r+PjxGu9GrB2QJAplmJUNb19fc7/d8tmnn7GuG0YSh0PH0HVsugNKwKeffcLF+Yrkc3xIUVQcuwGpchD6MHhcyJ1UMRWQpxVTIiYecsYebPWVIUaLCyPtrKGpC5LUSCL77Z7CSMZhQApBVTSUpaIwAm87YqMzkX4SMp9OykEphIIyFfjDnrpqWK0uaFZXHPd7Wp84Hg6MdkBU+TPLtO5cRCGzFkhMgMisnZMUJgdLCzxf/OLnVGVNUZacr1fE+wNvbu558eae/+Kf/xFX52c8e3zB2eqMRxdnNHXNdrvBu5Hk+xMLlNmiZLPpefX6OWfrNb/zWz+hMDW3mz3PX9/w4vqGZjZnOWsyIFbA0B1BpgwCTTmGRykJIj6YF74rK8bccXifUXXShD6sKZjdOUdBidLZFHDYbuj6nrppcLe33G02DH6kEAojNAKfuYfaUDc1yQf6Y5eZYikhFRSlAZHoh57RdRlO692ET/DZPSvzZp+mCC7qirqsKSvF6AZ2hz3Be2bzGXVTPTg4E2LSswliyvFIyQu8zJ1lpQsWzRlqVmJDRq2YogQhGIcRhKCsa+QaZu2MaoJo67JAVyV13YCShDE7YLOBIxFEQgqww4hSGikkb16/5osvvsA5R2XOcT4f+rtjxzAMSAE+enxQaFNRlwLvBvwYSX02Ai3nJWMIiJBZknVZQIw5p/bYU9RzUAFilgOczCin+05I8Z4kJV/fMU2/8d6QLKXIOPQ4F6m0oTAFSieSys7yfN1k13N+7WLqYlkQPOghsxtT5VEjJ0exeNCSIn5DhRV/KdZyQd8f8fbA3ZsvKbVHDPnhrbQmkbDJ5xOE93kUSNYCCVLWYM0zzKsfBxTw5OkTTJnFZ13fE4RhdCPnhWYcOra7nsOuI6A4RMFu7wgURCHYHbb8zqdPkHVBvZxT1C1RGbq7O8oInzz7gJff/JJXQ8f+cKSaLTg7W+Oi57C7pzQFwQ90hyysLquGOJuRnMO5DCY9dtsMNBVZq9R3OTiT5PHWZX2KzfN1rTUiJoLdgJI4l4GExhSU0dBqzXq1pB8t4zAihOTs/JKiLrPQuKkIwdENR9pFiySy222RGp7N1+zvb1DaYIFSgbOO7nAEEfHRUdUF3fGI1GrSD5DHGiJrdLquy59TSmhTIWdLkD1GG5SuiFEQnGPe1lSFYTN0qKnti1B4H1FGIYTKzk2t0GXNEBLz9TmmblgsV5RlRW9BVjWf/tYPkfWC28MR2c9YL89YzJcc9nuOhy2L5Yrzqyvq2QznQxbiJrCjRZkCbx1IxZAGqqoAKei6brogf6175y985eDemEGOKre1U8oEeTWRv513DxoEKRW5G5ImDXOmjzvr8T6y94ljP3J9c4fSJUjFzEjaumYIkdEp6rLID0BdTJtXNpA459Gjww2eWdOwORxg3/Ev/uDnrBd/jVolfLQ4n3VQvXUoXXB/v+UPuz9CJnj2yQc8fXxFIQXrxRyhSmazOcvlCqLLQmAyo253OOJDRGlDo0tiDPjwrqh66P6knKLwLZFrEmhtcKPleOiyhorEbnvP6xcv+PSTj5EiURYFbVsybxuUjDgrqEtDTAoh5MN4SEzXeSZYF1SzJZdXH3B2+Yxt5xDaUrct6/WSN29eI2QiJp+ve58f+qbKr+fsSFnWaG3eacTKHPMx9AP//J/9PqvLNc+ePeNiWdK2j/nym5eZv3TY8PM/ueZv/3t/jWVb4tzAetnSzB4hhKL4+Ru8D6jSoCtP1/V88/qe4D2VksxWCxaLit2hZLfbgRuZNTVXjy/hbIZWgrvbDcNgAY0xBVqbSYf15+Cx/5JXCOEh5+80PqqqiqurK47HI5vNBkO+RkbrSAGkD7y5ueNue49zgfvNjuPhyO7YYVRJW7UoIRljB8lgtEIkmTWCJEabmUhV3VI1BdZ3bHa3VE1+xvbDSIiWEB37/YZCK6pSE9oKUj6ESgGFKSaC/5GCgsIUFLrIHcNJe3caaQpdooUgyhKRFEYb6rKinRlWcs3tdsPurmMcbIaSmpq6qkjSEE0BIWJU5q+FE3U/Bvqh57C5x0geRN0pRupZCxGczwfMm9s73ly/5fr6midXnzEM46SFHVBGM18sEMFSz2rmyxWrxlAazX6zpUiBy7pivppz7Du6YaSqZyzbGSJFjocD+/2WsmlIhcP6iExiahMKEiHrBiUZVk9+liHJcQEiawqzdDEbNIzRxIn7hpBTgLp4iCh6v8t/0tRJeUAIcg6tECBOz1aFc5kpl4v3f7tN4ddxBf6FW8vhFDhrwR2J/QGVEjEZtJQZsEeiOx6IQwZhKpkZFTElCg2kzDKxdsT1HeP+wKMnT9ntD3z4+Bm7QwdEvB/Z945DH3BWMjvLcTP7fofrPdY7Pnz6jMWixo57Iob72z27/p7QR66fv6A/HCjrgna9oFmuma3OsOMIRNq6JYTsJioUGCGolaS/v0NJmXPevKAtS5wdGKzj7vaW+bzNI4Qg8S5/mOvV2bfQ+lrmzdRi6X1PGAKH0KNNps6Xs5rOW5SuEEFR1iVllcWVm/tb5vOavjsiBVnPEjy3L76iKEva+YJ6sUAokcc53qMLRVkUpHDKzwpY2+cYDilJQjJ6SyTRdQOHY4eSmvm8pZpXuGGgXTQcu4G+2+Uw5tFBkhMWAFyISF0wX64oJyE20rA/DlRlxQcffMj5o0fcbPYMXnD5weesHn2ClyVjEtSN5nf/xt/C+0RZVlTLgbn31GWBSIngBVoapFEc+x4tM3/M9gPFbDaxcCKFNsQ0Oai+A9ZyJQWzpoEJgZBz6QIJKMticjRJqlRNovN3gnfvPMQMR5y1M7QucNbhQk4reLqecXd/YLM9sLvveXl9TzQVo29Q0jE4x+PliqoskEJMhbyjKT2NqbnZHHMhoyAhOPYO0wS0UZSlQYkae+hwIeJGz1d/8oe8vb7m7/39/4hPP3zGuN9idEE1W1PXmS6f4bY5ZiDGhA+RSE5dOAm/3x/RvrPQR07xFCe3HykhRUFdFQTriEHmpADgbL3OeYky0dQlRkeUDMzalliWpBgZnefYHR8KVmMMOUlIIHRB2ay4fPYZdXvGffeSwQYeX1wSXU/fz+iROCdy2LiQjKNlfzwyX8xQWuN95n5JXZBkpChzh6jwmkPnuL55zf3Nc6pCI03FvNBonZg1DU3ZsN9ec9xds9tsqJoZqmyJumJwLc4Hnl/n4PMX1285HnvWyyXlWU1VN4zbO9bzmnlVo8oSN/a8vX7FfF7zwbPHICPDGHj18i0uKPa7Q+ZqfYd8ge87O0/Px8ViwdnZGVpr7u7uJjxEIpEISrA97Hn1h7eMPueqFjbT9dt2zqxuEDHRHY+AZNa0CMEDh221XHO+XnC+XvPBsyd88NFjqsaA8tRNxuPEaNEaxmHg5u0rgnfMZ03WXXlH09Q0bYOSc7T0yCTR2uRIFqFyFmeMecyeBGVRIVmidRaspyCoTcGiKSjKRMQDinAReb5/yeHY0SwyzBSZkFWFiHG6712OGEuRoRu4397T3d2zalqKVnK833DsO87OLxDLxPZ4wHnH/WaDVFnQvtneU9ct8/mK5XLJ+fkZIgWGwwZDoJrV6EJSpUh3GJi3iqury+zSLyqaqqasaqIdGQ57VNVACiQi3keGIRCTxxhFVRdonbEvuYAKJBFgotUjpvxDIpzsKCIjIqRMQM7AjDHCxJP7035lvVZ+tpyE8/HUKePbX5uxC+o32rH6S1lVVdDbEV0UhCELACURKcGPI0ZA3/XgA1oqEoLgPEVhUAK8tex2O7QpOR6OzBZL1uszqtmSsp5RLROz+RI3drxufsnYO6rzgvV6TR8s67LE20TT1nzy6QccdrfoMOPmzTW7+x3bzRHtI5UyXK3W9NFTVA0Yk8dWzmVrs9K09QI37KhNhUyCcXdEIYkywxO1UPlnCInCaNZnK4IPDP0I0+lQkvDjZIH2nqZpKYpmEhQ3CJEQ0qO0YvSO5APL+ZKLZsZw7EjOElOOUhj7nrrQ+O5IawxuHCA4bq7fMBzuuHr8Q+4OPSA57LbYrkepHHDpXdYqCARKCKTSJCHRVYPShhhGxtFSVjPaWa7wQ0oUhQahiVN0UGJOECU3d285HEaqoiCgsW5gtjhDasP6/BxnR/re8vjJiqKZPdCR148+QNVrLp98hk2GGAJ+6GjLAlW37A9HUnJ5A5QlwzDSVFWOObGeSO5yppTDu5eLBW7iHmktWSwWeGdJjHw3BCU5I+59GnEiaz5SYmJPZc3Vg3uPCYZocr7ZbDFDTPTsHFCa9QXxSSIh8T5w7D2bfc9mf+D3/5XPgu/jQPFUYwqT883UO2G41IGzVYv3A8euR1cF++Oeq/M1wg9oYRgHS0iOcThC8kSywP365XN+/PnHtLOG/u4eN94DyzymkALhcmelbRsKU2CmbkNIgRCmsRwnErNDqTxWyKJ9/5Bzl9Ik5pcSU9cIBH3fczz0LBZzkoC6aZBaMlqL854QI6UpsONAUtB1e/rDEVCYeoE2WfUuTcN89QQbDCoJrj54zOff+xjhB/x4oDtsSC7ipSKkDNatZga/zwVvVdUIaYgCmMZFlUoIIsJEfCmwMRF8pA8jJgisO+KVxh12HI3hRuQxnU8gt3uMqdj2kXv/PWIS/LM//CqPVIRESsXr+y1FIbm4WHO+WrGYBb786gWtFHzw4aP8OR8P3F/fkJC445F///d+wvXbHf/y/is6Lxit/6u5Df6UJaV8GNW+77y01j5kueVuY+Sw35N8z2BHIglTFNRNTVFVkARN26KU5rDbc3QWqSRG5a5toTTtrOHJo0sePTrjbL3i8vKMi6szikZTNArre0KyzOY1MY3stpYYLYWWBDfSH7f0fQdxTlNXGKXpukB37DCVQTe5sAoT2kLpfL8WugAUddUiVME4OAqpKEyJTBZre8b9wFm7wq4t4+4lwQd0qREyu3hTjARrJ9dzdsO5vme/3SFGDypgY///pe7PYnXb0jQ96Bnt7P5udbs7+5yIE+dERGZGZlZl2VWiCiE3EndIFhKSDRJYAmEuQFzAFdzYkuULJJobIyMjEOYCSsaAsBAIyWBRTmO7GperKiMzmoyI0+9udX83m9FyMea/9o6sdOSpcpXzeEpHe+111l57r/+fc4xvfN/7Pi+hd4TRc9wWhMT2sOf67pYpeDYX53zw7Q9ZVmdsNhs2Z+fltUuJnAI3L79if/cGlSZi9iSlmHxEHAOVNqimQ0jBwRWIcRpH3NCzzCVay4eBTMCHImcQAmIsOskTq+/UAWZOvCzFzam4AojkHBmnAe8zStel4FYalOCU7veu0/qXr7eMrDw7msm5rJ2n5IlT1+prbg3fmMJKSskUE95HVC7J9FVlCDEgM0VYGRN6fohIiRQjUlYgEjGVnLi2W/De+x8Ue63WiAjKVsiQkbrBCsPlo2c02oD37O6uEeM9TerLbJqez3/0H/Ho2XO2t3fcvr6h1Q3vXTxBjBOPr65YX15w3e/ZRwdVDULipomurgoYDkVbd2hhkFlSVbZ0RZYt2sxxO4Mvo0oraduOEGKZkcdMcCM5DKWDJQRTHgth3Ejk7JJbrGt88CRn0dlz3PbcHN9w9fgxlzPLab+/JXhPip5F05Fc5Pr1q0LlDp5pv+XiaoPPvtDRyeAjlSmxHGq22iM0IXiUMaV7IBXOBWQq3CdT1czYLMgKTyqQQlsTxp7FcslnX+54c3/gbHOOqZaMw8A43tPMHZXN5oyqqqmahhhnp48xDMPA8uyMenXG2ZMPOfSFQC6lYLFYzIDDTFYGqQ3BHdBElFHkWAS7bgook2YAnYZUQrSFECV7biiagRKSm+cA3z/dK+XE/nB864KBX9JQSVnYbjFGSqLKW1t5SoXhBnNIsIzEOEdBCFHYbikiVGa1MKyXFd9iw+qTVyAET59csb2fuL654ReffMbVo8c0XUtOAylmrK1YdDVtU1PVdYk/QaOrBUTBfj9gTSG6Gy159OQxZxfn3Nzes90fOb+4oO/33N+94YWQnJ+tMboqEM0MXdfS1BX9EIkJhmHChQkBeO9YLNqitYgOlWbWV85M01gAp6Y4AoXQKBTI8noqWTR1RptCghfgoyO4iXs/oWQZ/SQEOXqcG6ibBe2iJRAQWqNtx3Fw9H5LEBltoNaKYddTdSvOrh4zvXyJFgWimgrHkOA9n3/6GY+fPGNzcYnIiiATlZHURpKSJ+hE12qqVA4OKSViStTWIkTRoJxGX8ELTGOprUSK4uzNvkyT9mMEwgN/TCvF52+2ZCF4/viMvj9w8eiK9x6doWWJ/OgqCyny+PEFKY5sNgvG8ZZuGWipMd+Iw0a5Tp1E5xzAw+Ejxsjr16+5ubnhfNbHHYcBkS1ZwHq9eXierNL0lILcOccUPbaqISZkgq5u2Tx5zNOnVzx6fMlq3dJ1DU1jCNnTjwcab9ntJ5Zdx9n5Gd73QCwJCKJoJHP09Pt7xsMWqyuuHj2b6wNBTmVcn2IqhoGQ0VogUVhd4b1A6QajKpIoonCRCkpjPIy8+fwrnr/3Ac+uHnO4OzDkXNIYUpm8uGl60KIF59ge9kzjyG6/p0PRx9KVVVpzvjonlrQrpNRMkyeSOT+7KCDp5TmLxZpuuaRp2tnY4bm9uuCLT3/O8f4NaSrynartEPsdw7GES2slieNEt1ggqwojMkokvBsYbnwxbV1coZQojRJVMEA5JKQu/6aUZ1dzyoUxl98BiIp3MAiZ0q0SeU4dyOR3Ovp/9D46/bGT6UeIXJh/OWNMhZjD6MvfN/8bvsb1jSmslK2o25b+esTHPVI15KxwMdBUBjd6lNUgJKaqmNxEVhKXMqZqabQk+x4RGxAGY0t0h5wceEctNTJNhOTRUpYwYTJH5wnZsLx4hJnjWp6uVvSHAx997yNk6NldH3h88T51bvjoux+xczs6m4n7O6ROqOghwxhGRBIsVobHq0coFFM/EYbE2foCsPTDxKI7YwwD+8MOK2vqbkGSCTRUlWa9XuOGI+M0oJREGUuMobBmYiCMkaQESkuyqaiNoVsvmSbH/f0NKXfsD1u8c4XeLSS3N68I/lhGeQJ837OsFV23KptsHhimA1kkhC2jyJRLUG/VANniskFjqasFVdUQsyDrApgMoQh1RZaMbmR/3OFFAGsIzlF3DYGI0oJmsaZTZ8RXmnwYaaoGN43E3GEXC45TAmUeugjZOYbXv0CRGc0lujlHBmhay/2xp6oKwBE/YZXETb5oTZRCa0PdGRAR4TMhOuq6ohCJY2njx8ixL8G+JZj2T/86ud1OQMOTQ+3EWjltsCmnOUyZQkam/OreoWVLIZCqnGCV1rMIV82FczkBylkoKoRgsWjZ3vcFU7BY8PLlK0xlef7eBRcX58SYmCaPkpLdYeDm5jWXmwULaxinEv5sjJ1HiKWrEFPmfj/wN/7Dv8U/8Y/9Jb79wQf88P6Om1cv2L33jO7pM6w2+BgwWlNXlpwORB/IqcA8ixW7BOI2bSFtx3EqxHDKUXcYBharZeHPpYjVCqMt1kiSF1gz87jGicrqudPkqLrFAwcsIWmajhwj0tpyH5sGbRuytAhhGKaJcHdH8BOv5Q1NVWErQ726YHk4EuOBLDwhC4QyD+P8pmlIKZZRLYLKWKSqkUBtI7W0hKIpLsaPGDkdkwt3RzBNCaU0y4Xl8eUFh97TrBbw6dvNIiEIoRxYXRKMIbIf3/D6fs97Ty5oFbx4s+PqbEVlwGhBVXVkETC15P5wT9Vq/tzv/Doaw7++qP+UnoS/+zptoifGUN/3nOJIThDIIkSWrNcrpmnO0gwF1OmniX7mjIXZ1bxaLdHGoFPmbLVic7bh6vEFz549pqo1QqbCFZQw+ZH7XcIFBylxdXmJ1IK6rnj06JLd3Q39YY+SiuQcIkT2uz31dyxWl4lKDokpTZyfXzD1E7fbHcrUnJmWHDMpJASaHCVSKDrbIkXGGEltFT/9/Zf8+3/ld/ntH/w2H7z/IU+vHvE3f/QjhmFgc36GkJLRTZi6hHDv9gcOhwNGKhpb0amK/m7HMA6sNhtW5+fc7bdsFksGH1gu1yw3K549e86jx49Yd2sQgrrtWJ9doJRiGgsUO5P4SmTG/R1CKy4eP8UNcL/do2Tm6vKci80ZSMnxuCfXlvG45+7mNYvLS4TocCETncfUNS5kJueY3IHz8yVdVzP5TF1bYs74cV/WOyVRWLJQyBxo6gVKljVcIsq+lPyvHmPPi/3pnmFmus06BzIl9WK+8/i6LatvTGFlteXq4gm7L39GwBYCe+nIMY6OnHKBeOb8QJE+bTxaKYxWhdEzAxHHccDYAg1TSmKriv1+i9aS/fYemRPj1KOUZHFxxWKx4OLiosyUp4lN03C8fYVKkkXTsewWPDl7iqkNYSrE99V6hbaGjED6iAiR2hiWVUdNS5gCtegwrcFg0cYiUiIcJ4bdFjce+eTmM1aXl1w8fVQ6W36iHx2H/Z6YyqizqgqkL6REpljtlQAlDHEaCc4j5lmxFdDvtyghQGm00gXomTJuSuQsccFha0ttDcehRFpYJZmGnroys1A6I6QuQNVYHugcPM4lhK6QdkHVdEQhiTHg4lj0bcHjBleKorome4HjSAoTtRFoEsP+niQVF2drnK6wdc2uP2IOBzabDSSB95F+f6Bbrgi+iCkP97ewrDH1Cik1+8MBrTU3NzcPG5f3jhDSA9D0gT5uiuLiRNPt+0JVHofhAQgIp/PIn35pVRYO9XCPn8YeJ6fag2tGSE7BywlILjKlMrY5FWVCCFQS80m/fB8ZM1PKaAW20r+09AiZOLuwrM/fI8SnhJjpxwmZBcEX+GXhmmn6fiAEx/7Y09kObQrIMqXZCi0y3o28evGaqe/58Y9+xOXZml///sd88OHH/OxnP+Pm1QvO1it0VVAfRidqaxC5dKGtFiBKcZJzZLHoMEYTY0AGjwuhEJsluJwgBlIoRclq0WGUxEuorKSxegacZlKIBa7ZNKAUWhlEhhwS2lTUi+I4Trol0PLq9YG2law2S5qFmTlgipwFLomSs6kMy25RUABkRp9IWXK2OaOyFVLp2f2V8DFwGCLLWrNqK5qm6Li8L+9GTG+L6YIVmHMghUJrxaoxPH98wfXdHjkVI0+B/fnCnhOKEDNCQhKZgxe47ch2+IrWKs4XCxaLJXboUSJirOb+3lNXFRHD4SDY32/p2mZuR38zrhgju93uIfft9EycoI7FWVb4R7vdlhChsjVaFnmJ1QKRBKvViifPnrFYLtDWsNvvCceB2miUzCgy1sxsJBGLiSoEUo4kCUJr1m2HMrqMbseRGCJGzVIPH0guQEjkEDkejrz46gX9oUcrjW3KhOPFq1f0k+fsssVqi1ZFQ2wbi0iC5CNWWZQSpOCIMqAFfPXpJ1RZ4vuJs8vHGGO4vbkpjEFKx6btWna7Pf3hQI6JLFUhnrctrp+I00TvPYN3ICS73QHnA6vliidPnvH06TMuLi8xsoCtm7aj60pupzZFl7q5uOSw3xZncAhkbQlKEaOjykWfVxzOkhoDKTINR1S/x04No24ZxoCQCecTIiR8yCjVYqsVMWdubg60Xc1mvS7vRc74kIkxoaVCqz9a+M+AWN52NP/Y652OFYBU88FavP1/bxtdX39f+EYUVjllXD9gdUXXbhinHu+O9ONIU9kSpREFIpe4BiHeBrHGEHDTiBIGpRXRO0RwVFWHcwMxFYtqVTcYJfDTQH/YYmQm++khm6upaxTghyNxmrAhsHt9zapaYJqadbfE1prJDfjoULoIZxFgjYZYrPEL06KCIo+aZb0sJOwYSVOkd1tySmghGG52XO9uodIsVwt2t7ek5FmvV1RVcakdDkfO67Py/soSglzCWQMpRQ7HgUqCFolhKB0X5xyTj8RcnEzalodIakucBbVN06JlwiWPVhWXF1cc7m8JOeEPe7KtEXUNKc6jQFnAb0ZT2xohJS4kFApTNWQ/YSWgMyYrjDTFCdIfHgKy27oiewHBYYUgKfDjgDKWyU1UlWU4ljgUbXTJbzQaIXURGTctYwyo7Ol3tzSLDSlnnC8ujvV6XQrImDBGPWQ8ee9nmGnJVTsej3jvS6jmFMmubMohhDloc+TB3/uneomHYvFUIIUQmKbpIa7lFLdweuCLvT9yivtQ6t2svNP3iPOv+aFoO4mA4+y62+0OGCNIGWLKhJQLeHR2F5aMPkvwkYurC1JqGd3A5BVVbbB1hVaW1XLNMAwMw0hlDcN9j64qfv7zXyC04dd/4zd4+njHi5cvODs/48l73yqC/LqibSq6tiELw+iLQUJphfcFL9J2Lf1hRM8jlxQ81lrauprHX5IUA1pJtJJYW8bzdWUYJ4+adS1V3aCVIQmDtDVaadRwLGNXWRFFzeQM13vP/pi5vt/S3jvWqyWD87gQWbUt3/3Oc5o6Mh1vqZcrnJ+IORKSx/m3GXU+pKKvEgI/j/T62rJZ1GzWLT44vJ8hrjHhvHuAikLp1tSmRipJ27as1itShOGmL2L3kGhUGZcHUnFHhoAi4oVkwuBTxMVMVQne3Gx59O0LapMZ/YTAMIyOu/2Bz794ifABEReE4P+4m/RP5YoxPlD139XLnEaDAJtwIvEnuq5l0XVYqdFCsGhWPHn6lO9973ucX16SlWAKnl988gnXn3+BSBGyQ4jSLXVupG6qYtqLiSyKdd9HCLF06l32TKODLKirljQFnD+SfCK6yHQc+emPf0rvRUEAZT9n78E4TkhZ1larLbWtUdqgTYPrfdFOGYmSlM58GHh8ccH5YkF2E9ubN9R1w7JpeQWMw8gQHAkwlSXmojdsqwZS4my1Zr3aEHIm16YUSXWFloLR+5LpeXnJt779bZ49eUq3WBC9R2hF0yyo2xqpDNpWxRQiwTvHOA2MzkFVIRYNJmhaqzFtU55PY9BG4bUkTgPRj4gUCN4Xd31VMQ6BTIT5MDj0iRg997cTwWuUSDRVGf+lWBI/pKjmJfvUUTo5huc80F85vnvrxCy/vltI/f1f34zCKidu3nzFuN8WqrOumaaeYfIFKVDZEuQ6c31OULgTUTelRHATWtVYq2fbZTnFTJMv9bIQaAFxGghTj1IwHHY0TYsVmfvXXzFaRddUqJzpb3fUCaxuONs84fz8iik7tv2OmDyoU7dA48dAlTRX60tW9ZrKtqy6c3KKhakTPcE7psOO4/6AC5Ef/eFPWb//lKfvP2U6Hqlm99d4d485WxcWx+aM6ANvbq45PzsjylQKMyNRQpBTpB9GmrqhrkrUgjWGEMpsPAmIojxcqhFUMAMPHd5P2Kqlqi3OTUWDYiyRzOjKxqNtBQKO/ZFpGqmamnZZoYQnp56YBXoa5jFTZDgOQCYH4J12/Wq1gjQhbEWYPEJpstYFulg1MzNHIWTZbHzwuOCpjCHEzO7+lni3JciKJkjaq+KoNN0SlfMcXD2U6IGUsbZ9uLf0jIEoC3HLcrl8KE6EMA9RF6d/6zeippqvUiiluVgqRU/JNOOdDeUtrfgU5/EQ8juvECGUDtYpf+td9k+KghQzMZaiVAhBf/Tzdz4tO7MLNCeEBKM1IcxW55yBEhlRiO/FJt20LbaqOfZDYU0JiRHgh4H+eODufsdPfvYJF9Yjc+awL9Ei2lRUtjhLL84XyN1A3EcilhATTd0wTSN+coiZuizmaBaspaksSWastBAjRmmQiqbtSkfLlY0q5zIqEKKM/7NQZFkRs8FUIEWEbLi+cdwNE8cgiHnOKjx69v1tQXSMjht5TyLy5377e6w2Bll3CJWJccK5xP22Z7e9Lwu8UJiqbJw5RFyMHAeD1JcYU9x/RosZMZRRWsyusgUiZ3zwSKURSlE1CwYXWWw2PNYtzRdgk+A3rt7nbrtnP/gCBK0qcnIcXCTmWNAtPrK9veG+zogPr0jZ8+jqnOOhdCwePVrRdd/BCM3VxRl1/c0ZBcIf34XIOeOce7jvT1BH53qikYiqZrPc8PF3vsN3P/6Yp8+eEXJiP/QlQF4EmlqVIN+25vx8TdvVhBiKJs8opFZIJYrWV5WIocOxp6sbBIWob5TB6gp0KMJ0V+Juvvz0c1YXT2nbBqkElanpjwO1rWm6JU1dum1GFxiiDyUwWKIQOc9Ot+KQS2Ei+ZH9vePy4pzaKO4PDiUEQilqU4NSVG2DMobUBSpl8OPIerXm7PICs+jm17J08o99zzgOdIsFjx8/5smTpyzaDokkyXkUbWSRDEiJ0mU0KKWgP+y5vbtmv99TdQsW33qP2A/o6AsgWBfsjxcZaQxBZCpd9tskFePoUEoxTB5Ixcw2ToTrAzF5Jqcwo+Lmpme1CEUGkzIpSYKWSDp09ceN6v7eqqR/ULOKb0RhBfDyq0/o6ort7o5WRmxV40MhRE/OY2zFYXvHxaZ0JoZheAjhrOyMZYixON50hZKWOCe2xxQ4HvdoIYhuZDweMG2FyBGSR8aRq3XLiy8+JWjBqm1w+wEVYbNcU2tL8gFdC4yVNFWLi47j4UAIma5aYIVhUZ+zrNcYU+FTxGiJzx4hE0lGYvQM/ZGffvYZctmiH51zdBN5t+fs7AyRIi5Hxu0eT+Z42BcLu9Rsr2+QJDIJa83sCoQsDdPctUlITGWpkqI/9KjKYtoGWzccdve4wz3aVqQISnUsV2cEd8uh70txVFmSkqTjUPINpaYfJvp+oOk6qm6JC4nD9hVCGqqmRdU11jbUdYtKhcfjUolDWa/XaJHwh8NsG9bUumEYHZNPNMsWodUDvyX6UHQtAqw2ICUpR7wLaG0IwXP98kuWomL9rON+uy3Or1MemxBM3hFC//C5EALWWrrFAq3VQ2CrEKKMSUQJaD0F9wqhvhEwxKKhig9dpFN36VQYnX6GBx3VrME6FVXvCnpPX3P6vic9wTRND8L4tz90GXe8K/RMqbCyhC7dsRjLvy1nSCEilWSaMkLWKDkV40ltODvf4NzI6zfF7n129YzD7g5CpN/vuDOGel0hbcv1/ZHzfc/FxlBVLR988IymEfzkZ5/RO8UYTj+3RKkF0xSYJl+er36LthVZrwGPrUq4sIoJIxVRzE7a6GegrqOqyhimNgtcHtFGMQToh0TTWLo2cTwG3uwdx9l5mbMiCk0MAU2kNfC9Dy746OOPcM5xOI7c3d6g08B/7s/+BlZ4/PSCbVUxzPeyrSzKaHKGWgiELff50U2c54qz1ZL+OJGyYgqRRMRYiTWW9bKjPx6IQiHqBdOY+OzVPcaW+JAYG1KGVSNQuVjzfRSMPjIEzWalESKz7FoWXUt2B54/fYSpS2dEZcHz957hYuT2dsfu/shnL17w5cvXjNP0D/uW//u+3o19Oo3+3y2u2sZitMAo6NqKi82Ki/USLUqharWgaQyXF2estKBtCiC27RZ0iwYXAk3XokzJ1xNSlY6gD+yOt/T9SG3r0h0ciqs5uICWmkpXKKFQ0kBOBOd5/foNF5cXBelwLGtV07QYrdHSlPib0XGYJlrdYE3huOXkEWT2+y3BjXz7g+cMh56utgzDHiEa6qpmSgFrLNJotDFcXFyihUCGjOt7ztdnnD15zIUpUNXSEdMMx74gWhYLLs7PWS6WD2uoEMw4g7frRTGbJLSxNIsFi9Waqm5wVc9q1SAmRzoeCPsdMWZiToQYkZVGCkFlDV1T43JFSJkYwbuAVAKja7yLeJfxPuOdIFhFinBgR91UJYEiSiRFzsJcSP/SvTGjan7F3QPwzs/1Dwa1840orIQUjIeelcxoIndDz6KpsUphbc3UH2hryzQ6ttueqi4U5iwyPkxomYm6BLGKmLAxoLMsFlafMEIRUyrtUT/gp4Egy6knhoA77tDOUCOZtgfudwMKw8Xqgs1yCSgqozi6vhQ0JhPJoBTT3rFEsTm7oKobpC4CeyE1XoBQGYLDu8jxfs/uZscOwcVHH9DsBzqpuHjyDKuKk0n2PS76wvNYrHBu4tD3ZBIu7AsywGuqrsNn0CHR9wd0ZdBW47PHVBI3OvrDnjpUxF5ipKZu1txub4gkHj1+D+dKtyDEyLLrGNxEICNthVSGkOA4jqw25yitsFYjdYVtOrLUeJ8ZXSQlhxAGsmScJpLskcYwxkglMlJC3dTFgTN5VKVoKdTvh3tASWytEbIiak1E4ZIkOZCdQtkFS9thhWLMghgn1os1SRiCL9gJPwkqo+naDu+KLstIg3MThJEUJUobTFXhUkJQtGoxlMJKaVMyp74h5HUo3SbvPVrrXyqoTi6Wd/PRThlqp+4d8EvF1ulrijtSvTMmjO+MVd5akk9fU0askRgKbDQ/5HSVkWMpUBPb/cjioqUyNfubW1x/4Pb6FWEaqIzCNg1N19APA7ruuHryAc1qybTbI6RiP0YemcJem6aR1XKJNbq4AZ3Dx/zQOclC4LwnZ4/0jqppqeoKETJNu8LaGqMrtALCRIwwTK7ka6ZUSMtaIEymCGZU4avFzOQ1wite3R05ThCkpDGenCKjHyAllm3Ff/7P/xY/+O5z+r7HufL6Pj9/Qn8ckRl+6we/iVKWyf8c0uoBVIssC350EYUGIbjb7nj/6QVaW5pGkoVGTOVQNkxHYkpobVltLpiS4mY/chjmUNop4Fwg5RZyJvgJKcEqsFpBCgzjgJCGx1eX5BhQaUK3Gh8n/uAnPyX7wKvXd1Rdg6zKQXW5WPLr31+Tgkerbw4gFMp2WBJW58MCpZNazdFYddsipeDsO9+h7w9U2tDVNevVmsVySaR0sZECUxliimy6DqyhrRuUKVo8IcpUpGtbpNEoq4r5x7uSMjElpimVojtJhtFz6EeyC7TGoGwpcGxT0wmJyoH97Y7loubmGoTRVFU5YGpr0ZUBBVMo63e1kCSjSSKQhCflkYBj1x+ou45+f4QUGY89x1y6TyCoqprlesXm4pxHV4+otCE6jx8nztZrFhfnZGvmCUdAK40PnugDTdOw6DqMLvBQvMBNoQS3q+IUF6Ic0AKlkLV1TbtcUC0W7PY7BjLL5YqqMmzHnv64w1hJzAllBBiNzFApA7Iheg3C4H2mkRYtLEYVyUskMY0BrR2LRVfcg8YUxmKWGGUwOpNSj9aWWLCXSPE2MeDdYvt05Zz/GIKCmoctMzvv3dHinzBUfPf6RhRWIFDWsr2/xagSBjoOI6tlwRDkVNqgbdOWitcXB4aQpTASSpGR1E1LiJnh2JNyIAtZUsGFw/nIMB6QyTMeDix0SceOfiJ6xxgCOgtkpsy4syX5VLLujiNqu8V2FakRRJXQStNULXEIXC6vWC/OULYhSo1QGqUskUD0DhkSeQqMo+PT/R0X33qfasqsleXy7AJjarTRpRBMknDY0w97hvFIJGAqxeRGpCg09hg1tuuom47gErWtiv08J3KBgBS7aiqk7CQy28FRmxXTOHDx6II8k6H3xz3NzCWJkyON4yxSnogxs14tZmJ0IBLJwqOrIthcdh31LCA89gPDMNE2HVWzKIuQlMgwst++KQDOBEpnNAYfPMH7wn+TAlXNESpjT1IKZRukaop4XlqEbZFVh2lW2FhIx1I5dCVRUjANA7KYdugPB6y1SAH9UE6EpmmZpgLIdBFizkghC+slpiJg1ZachwdzxJ/mdXL9AQ8F1bshoCfS+B/tVC2XS/q+5/b2lpQSXVfEpm/txG91Ve+C8uAUCfN3R+acCrfxOLLf9wzDRGVrhCjuWqEEUkSOxxH15JyUSzTK7d0dzhWqs7EVqmpo6go1jJxdvcfFk29B1bJoRrq6QsaJKQosM8ZDK54/e8rLN0egRFyNk3voOhaBfJlG1k3NerVCxIrl+oyMIBhNmEakd8Rc9HbTNJWQcaVQuiAPlK6YQuGDOTeC6bi5PzJFwegiUid+6zc/YrnoeHVzR06Jj7/9Acuu4rMXr8uJX2uqqqIyhvWq8L8Wm5Zf+953cVOPC19xvzsW2KYoi/Vi0SGknPElke32yJOrCxadYZwiKQ9IlQvROwYO/cDm7IrXtwf+4A+/JItSTNaNRslCixcSFl2NUqbAIX0ZhypVlciSww2LrsWKQr9uNFhbzwaFwJQnjtf3uPHI48szvvX8PZZd8wCh/SZcUgj0fI/lXHT1y8WCJ48f860Pvs3Z2Rl/Z7EgpcRqdYbMGqUlVVMjmw6nFEfv0UrSVDVKgHCehTaIalE6U0oV/pPS2KY4lGM4kiUoE0EMpNwzuZZ+1PhYcxi23B8cYxQIUdynqTak1iJig5iO+LtX7Ldb/NmCcTqyuLqkMmv0oqbaLJCtIchIqhKNzIRpy5YDzdoidGa3vUUox1F4Vk+u2N3vSVNExInBWlRVs14uadqWi8sLnjx9ymK1nEGfZcS/Wq+p2ubBLWfngqMSRUKhtUZbOztoKFzMHMnza5JSEeQLIRC5MCebtmZzccnZbs/BRfpxwLkJ7RPRauSyRi8q4jTig6ORS7LP4AVyucJULQLB1B9ZWMvu5kClBSF6lAA/jexEoF1WZZ+OESslxlhy9uR0gxCORXfGcEylgy5KxJHQbw0+eV4HtVFlfZ33n2IW0uSkZrTD22LsIfMzQc5fTyvyDSms4OzxYz7925+yUIk6Ze6GgYPIXKxXCMF8Ok1YW7D1AGTJ5AJaB+p2gY+JbrGi73uCT0xuJAuJ0RUpeIb9Dpk87rhnMpKma8jRcdjdUClDU3f4lBmPPRfLDR+89y0WyzWvtzv6FJhCYPIOUVkqpYtYe6HZdOd0zQppSiROECUcVcSIjpE0Oob9ns9evuLpn/sBKkFzfcSuOkaficFjLGhrEFjIkuyLhV5rSLFH5ImUc0FFSFFOnSKjl10pUFJCZ0GYAi4VJk9d10TvyDnRNDXrzZrGVazWK6bJsz/uUHOe1e56jxsntFRMYSIZU0i50aOFpO060BUJhbQNpu7IQpOSK4DQqmK13JQYDFsVq7g7cHf9huoEV8vFjRVC2RirqsEYPZ+EFD6G4uBUEPyICInoMrrqCguIjJAWVEtlFsQAPvSzQ81glCY4/6AVend8NoXM/jjnF2aHsRWVLiyWNMe/BMLsrvxmXO+6+k7snpMw/TQSPP18p+v169eklLi6unoopk4dqnfHgqeRybtt8lPH6t3R4UmXBaB0IRUX+GvGB4dAIZAYDd2yQypBFqXbOEWIQrPYXBJzRpkKqTWbds3F4+dgOoTUrNcd1hr67Q0328Jvq4xhGg60dUXb1OR0T0yloHbO0TSFn+Vcz1nXcnZ2xtn5OavGUGvFcRwZtGDnHQhVYrBmAbm1dv7YIKr2Iew1C7i8bGmXG1R9wadf3CDwfOv9Cz761hOEVPgQeHP9hpvrVywX3yLJhrvDDUpHNqomCjgMBxpjSLcDjy4anj9/xIvbgWHypGmau4MJRMksTYCPmVevb/gzP/h1jC5uQzMbBpq2xY+RY98T2NFPkqtn3+Gw7yF7gp+K4WLmlLVtzWqxxCiFEhJtNAFJnnMBY3AllYASTWVMhdaa5XIJWXJ/f49zI0rBcb9j7Ivp4etcQoj/LfBfAl7nnH9z/ty/APy3gTfzl/2Pc87/j/n//Y+A/xYl3fa/n3P+f/3Jf0dGq9Kzqqzm6uqC7378Xb7/a7/GerVBCslPosU5T9c1ZFGi0dq6YrFYoJUu+kVSGWcnZodhRGgQwZNnt21FU4xKqlC+ZQKVDYKIoLhPUywxayfkQ4yRsT8SprKZN02Ld459htev32C6FmkMQwgoramrmqqqZydjKr1gkanrihSL63G7HUgiABF37BmnCaUNq82GRbVkGENh/i2XPLq64upxYce1XQdSILVC51IwLpfLgmE4abznA9dpXdBavxP58haS+e7H73bMoWTtnZ2dsd/vOfY97dSRxyP5mDGVQI0ttQLT1GStWVxc0K2XeDLWls6gFYrGdOSUGPojatGAKAHaSpl5VJgISjOKzGpVzZE8bma8RWIKSGWLiz3qAhgOIFBlnEpxj5ILhDfnkpUYfOE2vl0O3ypM32paTzyrP/n6xhRWpm4IQN8faZXAaM12u8UIaK1iHEeMEoTgsLbMV7XWGB8ZJ0c1edpFxTgVMGUMET8FJueQYk5C9yNWZGTyDLt7Vo1ldCOEiNE17uiQQbBenlE3a2LWLNfneG0Iu3uoWhKeIAPBRdx25Nn6CaaqyKJQyfXcWhW5PHDJO/aHLb/46jP2ZDZSEe63PFldoDYrspDgAs7FAkc1phQfMXD0I0YX0KUbp8LJqWw5DQlIMcBURhNSQPIeXaidJIq4z1QVYg6rTDJiq4oQCjVXEebIpYzRmiEcUUqglGacJmzdlLFJU5cWc4aYihBz9EcyRcy5XC6pqhpyOUH4kHDDgd2bL6nNLMqfRobRIaVksWzLxo7geDyWYkaKh+5HjoUdEkJACU043DMdD1CtyGMimSWL1TneRaZY9GWyiACobEVdK5xzWGtpmoZxHBHKUlcNShv8nEGnZEZYy7A7sD/0NG0Hufq7WsZ/Wtepa3XKRHt3IfulbDx+Wcz7bjfqVFC9++egMGBO3ahTtuBpgT25D6215UAnZXl+UqCqNVqrojNJuXSLJWidsZUixIn9cOTl9R1jgDHAy5cv+OCDDxBGk6WmWqyQukLpmqYp78er12/Y391yvmo4XzdUspCVyyHJlTy2aWK/L9leUmu6pkFjubo6p1uuWa/WfOeDJ8T9Hdf3uThrewtTRhn5QG2XsuBXpLYI3dDWFqU0IXpiCmgFqhKcrRueXT3h0UVN3++5vT8QkLz33nvIHJnGkeQCNpfoqnA8IqwlZUGYIpOLGJvYXJzz5HHP65tbjn1fnvE0L9uivO7TOPGy33N9c8ejy9U8hmUmSBuS0bjgOA6O3THzautpKs352ZJFa7FKYA5lbNo0DULkUkAJUQLP67q4hJXEu1S0MbqGDMoqYgrk0CMSbBqNWK7wMeK8LzzLrzsDgf8d8C8D//s/8vn/Rc75f/ruJ4QQvwH8M8APgGfAvyWE+F7O+VdWcVopfu377/Ho0ROWiyWLxYonT56wXK6RokRUEQEK5LFp5tQAVfIPjSnvt5hdZDlBCGkuHvOMwwBjKkxV3IAKjcgZiS4oG6nQUjBNN+y2W9zlOUKA1ZqYIkPfE1RxpzZtV9Ijzi6wgK5rbm/vGUk8/7imbVuaqkGKYlSISKIPTP0RSAgNzk8gixlqHCb6fkTEjK1abNvh0khbWa4ePeLpe+/x6PGjOV1AoYwuGtqckVrRtC3amoeO1btrS0mi0A8ygNNh5N315t116ORMPgVin5+fc3+/xV1fo22F1RvoBXVnaYxknAaCVNSbNWrR4BVUOiGiIyWojQEfWa7bMqnKkBEY2xB9ccx6UxWXciqSlRwCKInzDhMjddNgTEV/LI7MlCVJlLSGE05HCEourTgNlBU5azJl/H76fDm4zj836WsPA78ZhVXOyCywdcXh/iUoQb1YcRxKoGZ1eUbOCSU14zTOadMarS113dJPRw79QFYa5pGgiBmSQArJfneP1Ao/9iiROVt09Ps9/X5LiBGCYNw7rLC8d/kez599AM2a3e09L16+wdQWlQvTRimNUJH7uxsu2jOqusHLiBSlHdsYXVrArqSc9+PAq90Nv//iEz7+M38O/frIB+tzotFUyiK85zD2DG5CWE32gsH13O7uMGcLvMx0iwuigxh6stIIBd5NeJEwKZN8IOpCnNZW07ZLdNVBThx390zTgJCCyQ8Y2RD6iRhGcvRAZoqJcRoxXUtV17gQWaoVWtvC9ZEGpQ0xU5LUZUXICqkttq7mjV8QY2K73TK6A2nas2jA9QPHfiibr5EoBTlPZDLH44AUlrppSDnhQsC70p5VWqJUIWdrKbFZghK47BmnPXcvt2yunpJVebBNpaiqisPxiLKlKNvv9w9dHatyCetNpZM4jiOyrZBKsF6vuL69R+vyOHwTCqvTonaykBc3YH4YB74rUP/jCq53R3inj9UMxU0zRkNKiRIFQOi9L8XpXGSt12t2+yN391vMzP4yslCRpSyiX0TR+EkzZ3Rlz3DouX39Fff310QPxGL2kKK4D7MWJJlBS6SC45j4w08+Y3IeoieEzNna8PSiRZkaawIpRESKNNYw6LJJVFpSyUi72XD+4a/TNh22rlgsWpKK3B8HprueHMqCKIxCzIR2KUFaQzaGyja07SkqqpD+M/Ctb32LZ5c9Mc3Q0NyA0ByPPU+uzpnGkdevX5cQ3boqgfBClDiRXA4GEcmLu4FHj9Z8+70rttcvub95XYTNtmaKieRSecZSwrnE5y9u6bolQo40XY0YNGlMHJNn5+b7QilaPRJj5uWrG2IM1FXF5J4gpWSYQAiNSIGYIzJBjmNxc/G2OyPpHwpNKSWLxQKIKFWyKpldiUaImaH1J185578ihPj217zN/yngL+ecJ+AXQog/BP4C8O/9qj/UtDW//ds/4NGjJ2UUlEp6Rc4lccD7QJq7UZWtSYQ5WUHMB3KDNeXAqeboF05ZlKIQtksgvMHaCq0KsoIIyBIHFaPAjTCNPa9fveBbz5+hZYGSCkrmXMqgdVX4UUhs3cDoeP3mmqAU73/8HVarDcYUdBAZYgjEmBiOPdk5mqYqkU4hkGUk7CeU0hhTYwTYZIoMpRqZpOXps8KdWq3XpXBQEm3N3PkpWtbibJSzqTf/kkzgXTfxaT05HdBOX/vuiOyPFmBt27JaLdlu79FKsDCGw3Rg8hklJEFrkjYELedQ80CbJ67ON4zDyHjYY7WlqiuOh76MyoWkqjt8KuDOFDQ5O6YxU9ey6Hvn9y/GiNYGZM00uPJ+S4MgUQjtnnEaSmNBl+KpHCwFYj7MI9JDMTnf1e8Un/8Z6lillDje36KsBWs59gdaPbLqOm6v3xDcEiEUKSts1XI4HohZMLnE06fvkaRkdzzidsfSlVCB1mh8DpACMTr6fio245RQSLRUjPsjIUaOx4nzxYbVesXz599hvX6E3qxZr8+5efmSEDxhHNj5PfVmxbQbqLJm3a1YLVdQaUIs+qcwTtTGIIHheGB3f8vv/ehHPP3ud4mD47LboJQhNw0igc6KSlmyThzHniFO7I933Ozv+LO/8V1e39wAivX5FePxmsk5YpiQlSEMmUaU8aA1bXEf5sg0DfN4K+DGgakv4lcqTaUDwQfc1KNkorE1k5/mjaVQbNtFg4uRhEQoQ8igtClp7yHRLCwKVXQrwRGCI8XMYXeYR9YeouN+d4efhjJyQRSukCwb/2G7I8TMomuZhhHn3cMDn6XETSNCKGKCKWRyVDgxIFeZxdVTnmzOOE6RfT8W4GMyuFA6XzlmtNIlnoY8i4UzSmZsVVyilVXsDzvOzs9Kqz5EhFC/1AX607zeLaBOzkX45YR259wD6wp+WXf1bqF1IrVDGS+nBIJSUDHnJ67Xa6QqGqTgHclqtBKsV8vS4ZMFa2AknCCV5d8pEVohZeBwv8VPOz75w5/hkiSGEgdxeXlJac2Uf4/zgbu7e7Y7x5gUh8HhQsBKgU+CYfSMU2C5WHFzs6OtK6yWSB9YLTuMralNGRm3XUfVdLPtOxfziFSEGOmH4UGPFXJ66MyBQNsaYaoH3tnpdT4/P8d5z263e3Aexxhp2iXLrsMoxTQHNF9dXJDSvJDP75mS5TAXY9FsKK3Y7o48XW348KOP8VnyxcvX7I4DMquifRlHlJBUVc2bN9c8fXyBrTS4VMbtUuGjL//mLDBSs+i68n5TQsNjjIh9hpzY3t1zkAKjJbXVM1LkZEoAJSVWSYQ0D/dbCIHdbvfgvNQ6z6OpomFK/8kfi/+eEOK/Afx14H+Yc74D3gP+/Xe+5ov5c7/yauqGp0+fY4xFUNYLrWu0MgSZEQTwBYVzfn5ZxsLDUFxoM0C0cBBLLGhIqcT/SAM6IlOZHCy6tgQnK4vwgSwFWpbxakJCDrih58VxT/itHxS48lxs2DnKRRuL8w4XSwbsMHjutkcev/+cJ0/fIyPI8zQgp0ScdYDH/ZZWlgJj32+ZpomqMhwORzRlPV52C7INrJZn6NZRL9Z8+8Nvs1qvsdZgrCWJ+aAoBWpeK5TRDxKAdwuld2UFf/TQ9stQ4rddrXdjhXLODyPBu/s7sptoK4URjzneX/Pm/ppAoNtUGC2ZkmdyA/Z4x9XZGWerihvfc3vzipwu8CkV8KcszRIXMkrZeTQHh0OgajLGVoQYCwT7MFDZovVdrhcIG3mz7REpYGswUtKPEz4OZKkJwRFC5MXLz1kuN2gjMEYBcl5X5fw6lEKdr2ls+kYUVjklPv3J72MaA2ikVLj+SNu2LNuO+/sdl5eXjFOYb5gaHwJSWm7utmwuLvDCMPiJhOSw7zELQwwTOQaiHyEGBBnnPLUqM90wDuQE627Jsm2RzNRjVeMnj0xwtlzz+tVXTNPAcTiwu79ns1jzaHNJo6tikbUVyAK4zDFw6PdINMcXb/jrf/i3iesF0imen52hupagFSoInB8JWRKDoLMNUmauX77i5fWXrC9WvPrsq6J5GLdszhZI2SC1wlYQky+Q0+SIAo7jQCUbaltRWFITbhhw/ZHsA0oIFAoRPXUlaKrCA5rGIyQKdTrkwvhhIomMrhps0zCFREgChEZVltEVS7uSkv2hbEAiQ9c03N3e4YcDRgmauojPY3BkRHF4jiN3d3cE79HaEINHajmzXUoXxs8dq5RLcdcsOqTdUG0eMeiGKUp8NnRtQ70QTN7jfCCEsqn5aeC0aqZUmEoEgZGgraTCIIRFWcluv2W329G2C3IsLeCv7/34h3edgmbfjuHSQ6F1Ap2eWG6n0+IJn3Aqrt5FL+S5UxNieYZOHCdyRAgYxiMpRYzWtF1NjJ66siiVqCgZhL7fklKiqSvqpmG32yGrBVFkSBPrtmV/85Jxd0B0S7wvNmjvMhmNrTuUslhb45xnGLcMSYKuiDGRRAlX91GRsFR1x/3tHZ998nOmKVJXSwiCqrIokTFW0y3XVHVLiKmM9qRmGEfu7+7fKU5LNZlC4V3FnEBohCyj4qqqHl73k3bt1ME5FagpJyqrqaslzrmHjUbKqoyO5vdHSB6KmHF04CM5e97kzOrskveeDUXr8eoaFyQZBaEUcsYYdkfLcZgIopCud4cDh2HCp6JjORV3RhtyLHiSlARKV9RVCep+8viibJQxklKcOxOl81Y0KQFQSAElCy09jJ2Lpi6T0vHh9UCIr62x+o+5/hXgX6SUtf8i8D8D/pt/L99ACPHPAf8cwGq5QGBJUSJQWFthTU2KAiVF6eqHopdZLddIo2maisoa6qZCaYE2EinK3iOVoGlKcG9Wjiwy2tQ0TenUCqlZNQuyLGuDkCClYdKB4CbGYYAU6No1Y38sQc62pIQIpdGmJiSolUXbmrpdcH55RbdYMQwT0g506zUiz52hmIjOQ22IKZBykWroOZ7suD9iRIkc07ZG1R1WVWyePeXR40fYqiLlVNbAGB9kFlIphJIPNPJ3zSvvajff7Yyffj19DDw8H29RDG8PdNZajDVopfBCIK1lVV+QiVzv7hico5aSrDXSlLHp3fUrct9zcX5JP0786Cc/4r1n7/PoyXPaxuJ8xJqK2pfDb4gRgeBwHOmWDUpVpSNoLJ7M/njAB4ESFSEp2k6RifhwYHB7vvzq57y5fsnx2HN3/z7Oef7P/5ff5cmT9/jNH/wOH374UXEcpiKnUVIiRMQH/0uHkV91fSMKq5QTx+0WdZSYnCFmpCr6m9MIZJqm8sJ5P48xSuXYH4/Yumaz2dDliOtHYpy43+2IIWCNQqJIfkLlRPADkXLDSi0hSbzL6GXDanFO23SF5Dw6RMpk50v+3ehIPrPerLk8u8QqjZGK4BxBgqk0urL4wROIDLt7fvyTH/Fl6Pno/e/y3HesFiuyErMbLYJQJAApGNxIUrDd79jvd+hK0pkFRlqM7qhShcuBnART8ggRUYBUdiZSe/xUKM3O+ZI1JQQ5JSpbTuZCKaZpnGF4p/FRRkqFd55hLCaA6TBQ1TW21hzv99TdkinC9e6e1fqcbrkkZsH2/o5h2LNZr6m05cVXX2GNYXNxBkRy9GilibK4uQ67XcExpIS2tpwadUYaQYgBkEzBI9FYbef8N+hdKiaEakW3uqBTFVpKNBmdEzlK6kYRUmKYRlIaSEmwXJ6X7sfRY5RgsWw5jkeEkBz7Qm5u65rd/sh+v6euu+L6+NOvq4oAdhwf2tunzkrbtjRN87AIvtu6t3Z2wbwjRH1XoC5k4nx9Rk6CN29uWXRNYcGkkq8m55NtU1doqdicn/PlVy/ZnF2w3e3ozi4QspDR7+/veX19g1BHksisOgubFj9NbNZrtiHNqIjAMBw5vzhHKf0wholpDoJOASMtq8WS508ecb5sIB45HAOXmyI0v7+7ZncYuXqvprENIZS4mLoy8xgZlNT4mEAWPeY0jQT/FucRvJ/F2wJTtyhbo0yNmbUhJ5CslBLmE3nOmePxWLR6VeH9IGDRtaQ5OuvU4Ew5zVq2ORmBE4hVkhLsXcJawYffep+2Kq7B19c7pFScr1ZsVkt8DEzjyMs31zx7/hEBgxPgJaQc8CFxPB7p6gpmqrwxCu/TXCSV8Z2WRU+UtSHlErklchF1ZSUxqnQwBW+RGqdRz0ljl1K5/0rIt/olQ8Pf65VzfnX6WAjxvwb+7/NvvwTef+dLn8+f++O+x78K/KsAl+fn2TtYLhfEEAlBMI0RKGO4GOODKExIaFoDGKw1aCWwViFVJsUIRKTMNG3pNummgD+FMEhVOn3ipDPSxbxwWjNTSvhpwGpZCh8l5864QklddFFCYmxFRhCzQOqarluTkThXWHAcey5T6QorKZikwCpFJjOMJVGjslUx2oRy+NPagrJo22DaDiM0i/XqrX4ql+lMIaNIlC4moZzLz13g/+/yvt4WWe+6j096zXezSk+F1h8dHZ7WH631PMrb4ZOlshZTt6zOL+Gww2eYnKNrm0J8z4HhsOPFNIBQODdwv7/j/MljYg6EFKm0xFaWECPOB5wfiAdHu+wwVQMSggs0dUsIkV1/zzhMDGOgW19grGD091zffMVf/xv/P378k9/n/n7L7tk/i/eBf/ev/n959vQ569UVH7z/HYJPD3vkOJY6JIQS7fZ1rm9EYZVz5vrVG842KxIOgkNURXA4DMNDtluwgcoWUGRKAa8ybbtifzjgyejKIqXApURwgWnsqW0Zy6WQkAQUmeD8zHCCHDMXqysImq5aU5saNw3gPTlEhmFkGCd2/cjm/Ir1YoPKEhEyWXnQkuQ8jgmly0gwjxO761f8O29+xj/6O3+eR71i894VMWWs0mWMLyQFYSPw0ZOk4G6/49XNNcraQrZOgmW3wPUDaXS4GFBNRUwJ50asKGHFRmiMLV2GFCemccJoWyjLQjCMAy54BIKcS0SAnAOdY/AUXdoCLTTWGNIUyIky41aW+9stLsPl5eMSDWQt/TAipeTq4hw3TXz58mWJLJAQwkB0EykGxpnmq5XG2hqJwFhL07VYa2cemURWmgDUQuLHxBQjIcOExWuLaNboxTm6KpDSMUREDBgElW0Isby3jTHUjWJyETcNxUXiE1knxkliTcO+P5BzyXXzkyM4T91V9NNAjM03YhyoVBGZCuDly5dcXJyXMWzTcHV1xY9//OOH0NkT2+kUywM8bJan7yWlZHN2Rtsu2G4PXFxc8ouff0ZVCZ4+vSpOOVW6YG3TYLTmsNuhpODLLz9nu93RdEuWyyVZGraHERcFnZY8fvqY5aICJpqu49GTJwyvrjlSCv0319ecXZwjpUKKErWzXi+5erxCqUyk6ChU9pyvLpgmSfIDzkWWiwU/+PVf4zg6DhPsplRE6EphbIWQitF5jM5MU8L7gDaGuq7Z9gdSyoQUGfpjWeCrmqpqkMqyXJ+VzWx+nU4iXN7RkKxWq9LJ8b6IyUXBtGgpUdaQ4lvmQ4ilIAlkVCiGitIBsxxDZHs4sj5reHxxxocfPGd7/xPevLlhdbYpLi2pyVi2uwO8vOboBA4IRBaNRSuNsZZF17JeLLnf3jM5RzVr4IQoBfJqUSGUAaV59eaW++0WY5vSnZESMXcWWjWPe1Iu8VFCkInURrNat+S8KiRzCjfo7/cSQjzNOb+Yf/tfBn5v/vjfBP4PQoj/OUW8/l3gr/5J329yjt22Z9Ft5qzAkRRLdqWUsmgpJSgExuby82lVTAkSlMrk7EgpMNfRkEt+Y0oF/lnef40woLViGI9zLiwImUnJE8KEFpnzszVtY/FuKq7MruNwPCKkJgEuRBICn2DRLpF1w7F3vHp9w+pyg+0ajDZYoxmHAT9OCEquZ0zlP2PKYXwaJ0QWCGlYby6wpsG2S9abc5qLFbYpa0H0vgghpDghmeBUBAlQYk7v+COC9HddwO/KCU6fPxVXIYRfYuG9+/UAPgZ2hz1Nbbk4P0fIXKKppCSkULSPPqCModWG5Cdubm5pV2uqSrM7bPF+4s3dG9wUGMaRtu3KJMMJdrueYdxxcXnGYrlAqoRSZVutG41zgWGcGP2e4XbPctUhZeT16y/44e//LX74w9+j70fi+UCMicOrFyhVOpt1UxGif8igPB6OxBRo6oavK7/9RhRWAsEvPv+UGJ/x6HJJkplxmmib5gGQaK3FTY4YHFLN9mgy4ziSnWM/9jRdi5G6iJOdRxtLCBE/DohU0snDFBmHiRQyAsnTq6c8unhKqxYsuzPGYUJqwXjcE1zkMIy82m3pLs5ZtGtU1uQAKUeyDMScIRhSlGQdic4z7Pf83o/+Dh989yM2suW7Z+/hjEYJBb4IaIVQZCWIOeJzxPmJ2+0dh7Hn8eVTzs+v6JpV6fCEETceOLpIaxYkLWcn0UC1agkRcgpImUkx0FYVcn6oQwwIPUMhU4HopVBO74KyGZcR/4AQmt3+iLSGxlhEgiwSuqqojMXKTBgPHLZ3aGOpdelaSWC17JAztiD6kf1+SwoRJeQc8WDmDkLGWEPMCRcTOutCmFYKLyQ+ZXIyCFkhbUO7uGC9vMTLAg01SCSZ/XFPTplluyzjnRAJfsIazTTOES9CsGgto3OM08Srl6+xdYtPgcWyJvnyfoUQSmeiaYujMn49geI/zGscHZP3PH50xnvPfxuJYH8cOb+4YHIOZTUpJxarjqE/sr3fE2OiqhoWXYsWvpy6tQWVMZVktViXDq+SRANPH29Yr1eE4Lm7vi1j4Ln1bbQkJ0XX1PhpIlSl8HQ+cH+3p2ksH3/nfaqqwofA8bBj9Ac0cHZxxcvbI3Cgqi3tYomQpsQNmUKivjhf0S7W/P7PXuBjYOh3/KP/yG+hWsOyMSSn8cIxKQGLBYtVxd2Xrxi9QypNdANeCdy+RsqGXZaMNtGPI9rW2Loj5yPTOJKDgxCgqqiXZ5huxWa9pmssCYHzEWM0ShtCPI3z3hL9YwyMfqSqKmIOGK1RSoIoYEohi27NGjt3fBqGNAKBEDw5S2QE7xWvdpFFpXl0vuH9ZxdkkYhZwKzLgqIbUjLRVQrhIyol1kZycb6itpb1esVquSQkjzHmnbFOIOfE6BxSJMDjjgPj7ohsZ1SHtRhpqCXFfStnRlrMCCUeAoyDj/P3lWj19dMIhBD/R+AfBy6FEF8A/zzwjwsh/iylF/wJ8N8ByDn/UAjxrwO/DwTgv/snOQIBgg989tmnHI47pBCsNysWqyuUTEgFfb8n5dKtnPw9CYWt6vJ+SQkygMhIHYu2SgjmSNPijkwRIUEIhUqRwvwrKSAlzkWADAgZaGqLVoLoHUYp6so+OOQKgFLhYyaLwkcKsSQbZFXMPuPoaEPBoShRxoApeCTlQACFGyWFIqdADBEpFFJqmsWSpl6ipGV5do7tyqEqpoSPER9DiZ5RpZg+RdFAiZg6Ff7vml/edRe/CxA+hV2/G4t1OrC9W1x57+mHgcNwLIHqlUVbi9FrxmEsmiyZ0FZjsiT0I1laKmNQFFhzInFzf8dnX33GoR/56ouXrJYbfufP/A6r5QojO7Se97iccH6AGGlby+SPWFGhjWJztkIawZu7NwyjY7/f85Of/ojrNzdY0yDaim0uchGlKq4unxDCxLG/Z7lYcHt3zU9+8lO893znww95/4P3Uf9Z6lghoGobXrx6SQhHzi+XSCEYp6m05MmM01jaoSkRk8f7jFIJKTK6rokZ9rt9ERdmSNkz9I6uqhiOA1ZJkrBst0dyEkg0m/U5i3ZDpRuW7ZqYwLlAdI7heODufstumFhePULXDRJFDsVyKbWEWFqiYXTIJhOVwruBVzdv+PTVV/yF5x/y4aP3iI1F+0yS82KVBEIkspSEFEnAME389Oc/Z31+DsogZVP0F0KATMTsGSePCU2BaSKZhglRe7S1RRcUPCIHgvOkBKau0caUbDKlOG53BWExp7eP4zDnGbZkBC4EqqZDVZYsBEmA0hrbNPhpZPtmS8jQdAvubrZUdU1dVdzd3lIZM+s9EjKVAkfZahatx5kvFTB1TVYKqQ0pZaYQwdRkVSGkpTIVplmijGH0mfb8CfX6imN/YBo9+92rMqqxCrtoQRYbuhDMOYcOo2tCilirCNHT1BprO7QyHI8jXVsx7O/Z7w6slucoJXAxQHDkXP8S2+mPvV2FqIG/AlSUZ+jfyDn/80KID4G/DFwAfwP4r+ecnRCiotjP/xHgBvinc86f/Kq/o64t62VLYy1WGj7/5Au+vLsj//zTUvxLzW53oDLwj/zZH/DRhx9xe32NBNpuwc32yGefv6AfJqq24uJqw8V6Uyj1SrFaLlivlvT9gNEae2XR+tXD2L2qDMv1Gjs62qZjOBvpxwOgEWjGaaCqLHVtsdkQJsGbmy+p8sij84syVhOlff/06bNSOJBoFgvqds3u6Bj8kYTkyy+/YrmoefT4CZWVxNExHnfEPOD6npeffcX9bqBZnTEMI0oX04OY7ezeTQjTsFytmaZAN3fFwqwxit4/dGuUranaBQjBNI3ELOYCShWtoCi4gpAjRaZSBN/GzMBEKPiBVOJ9UkrkOdBa5lmjl4vux/tS6PT9RInEyWyjpzIty9WaJ08ecZw8h34iCk1IBREjBbS1pVmsQFmykHRGYkwRor95/Zrt/T11XTEMPcC86WWEnAO4pcb7SGU1z58+oa7NgybvZHg4HnukFA/W8nziuc26Ge9LlFEf30Yr/UlXzvm/+sd8+n/zK77+XwL+pa/1zecrpcinn/2Mz7+QdIua73//Y5493yBEQii4vvuEofo+OWc++/LHPH7yIQtVIUTJWTUGlJxHZbN7UEiF1pLalm4moowDsyh61uWqLeiFOQszhMwwWKJ37Hdbdtstla3KYdX5tzFbUpdoNaXxYWQ3BXRl6RYdkw9sX77CzKkiZu6cls6/ZD9NcyYeiFTgCEYbhFBUdUPTdqxWZ8QAqqpAlKxVhCDOo2mj1ZxrWPI+T3iPlFO5kXmLWck5P4zz4G3qwwm/8u4I8N2x4bt6rHEcGcYCWZZa4Xzk0A+smprVckOYHNN4JIeElBlrJDJmjLasl2vG2XX36tVLDuPI5AKfffoFTx8/5Vvfeo+2tai8YrnaYCqBVAIfRqQOjG4iZo1UJS0lxrJ3Ggv7/T2/98Pf5z/4D/4GP//5F2hl0Loq0M8kCF5AthyHLT//xQ/R2vDzn/+M3/3d3y0Ts7/4F1msNJvN5mvdo9+MwirDernkkIt9eBw9F5uGRVPhUiwuFSPxFOI5WaJELroJAntXmDt10+BmAB9+oraa8XjEGM3Q90TnmYrFjUW1YFGvWOglVa7x3jO4qVj6vWd/f4PLArNa0XQrWtMRDz1SFPBmZSqM0kX8nT1+CKAlbhiJx5E/8+Fv8GsffEjdWnRVhIYmBqYwkZUBpUg5chwODOOBn3/2GVXbsTk7p65rzqqWSlt8yOzGHd5FhrhnU18itUVXK0Y1kLInBIHVluDLJmHrk32+kOfDNLDarFlszpBKcdzel9Z4zMQMOUSilNTLJRHJcbcnJ7h6+hRTWd5cvyZ5h86ZZrWhHyZ03eGQ3N4eWXQbJIEcp9LezWbmhpSg3ik4skoILcmztsHtI9JWOJnpmg7bXeBFDbrCWEPTLbARVNXipx43TBwPR9abs1KIjhM2gpvm+XfOIEs0RYxlQVjVK6IrhYQWsKwV501Lv7/j8x//HWg6VquWxaLiMDjqpjCsvobxYwL+yZzzQRSv7+8KIf6fwP+Awuv5y0KI/xUFfPivzL/e5Zw/FkL8M8D/BPinf9VfUFWWbz17TNM07O4HDjvH9c2el9fXeJcKIThBTI4hSv6Jf+y/QL1+RPKeFzd3fPlmx88/u0ZKzfGLVzzZH3m8WbDqml8Sti+XHd6H2Tl2Tc6Zpm2YfCCkgTgvesZWrCjkeiEFWle0XctisSDESH15gUoDfv8GoeTDaRbA2BIIKyrDfpzYTXv6KeP8FlsbLs7PuLrcMB57DlvHmzc3VMIjjq+5+/JLFkLyt/7gJ1x98G2WZ5fIqkYZRdZq1oQZTN3gnWO76+k2VVncQ6Cua3b9scD/lMFWFdbWxJTJIYKQDxvIqYg6ncqTAinLZpJmPWLKiRQzShf3XIqZmOJssS86JinNAw2/bVsEZdxQtDuZyQcu1isuLy+52x0Z3XWRKogiMnfjgBsHVpszYs4PB8tpeiuuH4aBYRiw1jzofU7OS2MKs80598ArE3NHQusCSoyRWcBdxMw5lZ/jBFE8vQZlY33LQfsmXJnMbn9HjB4fOra7DXf3a2wlqBvF/fYF7myEnLm5+xJTdWhTiO11bTEGsjFAInhXvqO11Kp0XzUKWxXeVaI8Z1VlkGYe6+UAIiBk6eRoXSQUbdsUcfvsxA3xrQZPKl04Wlax3CxxyfPpLz7B1IZvf/Thg7OX2QjhXckFrKoKcioZuFFS1w1aVayWK+q6Zb05YxgDWSigFDfGGEwu96w2JWg+z5rkmGcheswziPSXNVanAuqk8TwejwzDQN/3DxpEM4/aT7rPnN+GX/d9z+vXr/niq694+eVXrLsFH3/rW3z3O9+hsTV11RCmoUxQssAKhRYakQXLxRIRCn/w1etX5JsbdFVwGULDj3/yB/THA999elZGeyoQUwl/b2pDyq7gUXLDCUhd2YJgKdOuRIrgpsxuOGBtRsRCVJ9GmMbI8XjP7/67P+SnP/0pt3d3vHn9hrZtefx0w2/+1ndZrixf5/pGFFaZjJscm82atGjpjwdurreE1YK6KcDHmDLZJrKKkCJZyYIQALIQBO8J7yzmLniSdyUrK/h5NizoqhYhDKu6Y1EXy7LLheYccmR/OND3PU3VUbUNddsVp11OaASNrbDaoOXsjNC6hCAbg0+RjGCxXHF5eVEo5FKiBDNnBlIQSFUoN1M/MBy2/OEXP+dm3HJ1eU6d4arqaOq2JKN7jzSa491UYlhM4UnVXce0r0sLO0Sur1/TNRVxCjilqNsFwUfqpuLzzz/HhYF2tcZWFlfV3N/fYZRkTCWYWtsymos5c3u3ZbU+R0rJF198Qd0tGMeRy7MzpsnhkkSpjKlrLlaPaIwgTTvcMRBThlhGG5lCWZeiUK1TzvTDWBxZqsDZjLFE7/HbG5IwVM2SsIc3r14RhSWblnZ9hq0bNpsiymy7BVXdMk3T7HLyDMMAFOFkSBEhJbvDgWEYqKsC+tO6RlmNiYnf/vN/iSQkx9ExukAlFCYVhk1Ov/p0nssuc5h/a+b/MvBPAv+1+fP/GvAvUAqrf2r+GODfAP5lIYTIv2K3UlJirebm5pZXr+5KLqSusaYhRfdWoCosP/n0JT/51/5PKClJIaGV5PxsxfZ+h1aK73//O3z88fvUlX4Ajp42fiGKvTjP7sAMOF9ek64taIM3b264u73j6eNL6rYCSgyQHyeOuXz9QWQWiwXtpub+zQ3DUITP4iFjrgB0N+cXaLPg5n5gfxjRCtq6QyL45OefEqMnK4ttJCIG+u09jbU8e+89stFvv9dMxi5xFWAkVNYy+QJ9HIexZCyqoouZJocYBh6bCqkLl01ryfHYP7wmVVXNgvVhLlQySum54Cqoindt6qfX8CTsLsWpQuu3Ds4CxF0gMqQZbZHCwGH0dIslF+cbDoee212PZHYuSri5eUO3WpeRZkrk9Jaa/zYzUlJAuvHhdck5M/RFuK+Nou1mE0h+a4lPs3HAGPUAn/XeIxVk1OwKnKM/tC5E8m8A2+10aQ1dW9aV5UIxjjd8+mLC09MtLffuDSmHYpQP99y++UP89Iq6KYWPVIpHl5c8enTFomu4u7lh2AUeXV2RtWdwe2xuMblFGsvN/ZZ6alEyI0lURqFERsUDVh3xYSLrhO5a3OjQTYsYR2QOSARdVVNbS5IjVZWBI/fbl9zcfsK3P/qIui5FzjB4+t6TpWUKYxnlY0kygQWRAviAFJlaSazMyOyxFlJ2JFkmGYE4j/8FSmcykTzH7GhhIGuS8OVZSyfqepFADP3INI4MfSmqdvc79rsd07Sdu9R1yQC0VSk8U0ZrQ0oFcL3f7fn8y8/42c8+4fPPPqdtGmyz5Pm3PyYl8FqTrUKGCSEmlPCgGpK2oDSbZsMwaWxo+fzTT1huFmSR+HIcuPnyc0SYeP/Rb7FQJRNwGDOTGzFmSVMXDhtRYiuF0BDySCszU6vQEkRWSNEQg8Rlg6WM9NGK/XDkr/213+MPfv/3eP36ddHtKk3TCPb3ie1d4Gzz9TIzvxGFlUDMdnFHU5nCvUkl9y5EwTBFpITJhSI+FKLA2GamjjKGnBKH/b60uVWJsHDOodsGLSVd2xKnwnBatQsWtqWtOrKU7KfjQ2Wec6brOmxd3AZVXeCGMhVnRtu0mDmRPouMVjVKg7CaNE203QKJxKjiLssxQvCElIhGoeoKN44QPMNw4MWLz/nks1/w/OMPcW7iolphosRNHq1Aaou0Fb3zqLbiMIyouiow0LomTAeUUCyahjjTeYXQ7HY9q1WHc47LyzO2+y1oRWUsTVsyGMdhRFpL1xZR3nZ7R4ie5XrF+eUFt7e3rBZLvnrxkkePHzFMjrvdkWqx4dtPnnG7O6JONPpU4GkinzLuxIMrLOfiwEwxY20NupwEXcpYUeIllFFIBMftHU4ppGlwOWCsYOsGFsbQNE1xOIkiwi/W/4ZhGOi6Digz/mE+Xa1WS2LKJEERvKeI1JZkivNzGnqMqrBdSxMi+8MRUsXXgcAJIRRl3Pcx8L8Efgbc55xPVrR3mTzvAZ8D5JyDEGJLGRde/5Hv+WApf//95whVTpq3+3v2k0MhuNyckXPEVhJjJevlgq5qiVnx2ZevOAyl83Kxtiw++oCzzZqLs5aqooB4Z3v9WzZWfPh5hWAGFmW++OJLhFB873u/xudffMmXX77kZ1/e8Bu//iGPzlqMFiUIPia0UuWwIDMIwaE/8PLlS5wPLFfropFUhq7qEBH6aYck09Sl4NG1Zb1a8t7TZ3z15Re8vNmhPFwaRd1ZNk8eMVQdn754ze32nsVKoKkRAhK5wP+iJ0eFNg3aWPq+PM/LRRl77bb3RFOj506CDxM5Req6+mOs42IuOEqBW/Abb92VMcaH3MHTn62qiouLS372s5+xWCyo6/phXJLmtSxmShcjZfbHkYtVxbJrCd5ze31N3a3IKeJdhv7Icb9juVqDUOSkH9Abp7/P+4BzJUC8/H0SqYqmJgQPoqAetFZIzDuQWB46G29r+/zO8/oWUDvfl19bY/WfxiWV4OrxEiOLS3x3uGEU9+zcDc1CYyQIkREIVA5Et+P69RahSk6j0grnD0xuR2Mqbt9cEyfHfnuDXZ4zxQJNzlIhbMXd/a4gNlLEKsG6qWkqiyZztqnAdIQcOIwjLuaCZUiZFAJSSCptaKsGWo9Jjpwnzs5qqu45j9+7xOhEjI6YDD5EUs4IqzG5JriEEwFl5wIfUTRvOTEdD+yVRtZV4culsp2nGEpWbk6EMBFnx7WSJS8y+kTKnnE8cuz7t+tCCOzu9xwPB/bbHXd3d9xd33E4HDiObxAC6ropRXqGrl1QNy3WVIzjVPhbh56X1294+for7u/uuL+75+z8E54/f87l+abkyyiJkRarS0SPT56QArUxWLtg1QjOFpd8Mv6c7fU9kXIP123L559+zvPnP8c25Z6UuWju9tuAkTXaqIJJiMXtmeOIVI66kjg38uUXL3j18hajW7LO6JRJKbPd7/jhH/wen33acf36zXxgykgZyTHwhz/9nN/9d/4an3/2+mvdo9+IwiqTmdxEU9mHxaOypjA3ZmF3AbjJ0mbPJYBTIFBSEpx/mO9WVYWfM/aMKeGZKUWmGIk+0dmOumrQ2jA4R78bQRTxat0tSsZe1aBti7ENRleIDEpQIiEqg7CmIPSjR6oKqSAJgakkhFiYSKkwN1Iooch+GhFqRWUqpuHIod/x+s0L/taPf1jo7XcDzy4fc3X1hMpW9H7icOzRVuFTIspSqKykKu4hqehWa7bXB5ybaKqWyY1YJTkOB3zIs1NC0NQ1RldE7xljwq42LNbnxLzlOBzpj4l2seD80RNiSkhj2B+PnJ2d89lnX3C2PiPHxM3tHe3qjOXmjPvdDlO1dE1NmCLC1mRfE1JAqFxow7kUW8aUroE0iigkWRaNVa0bsmwJSdI7yWJzwfpywWglVbvAhUjIhViskqI/9qXbkBIINetEjg+gTO892phSOMYIUiFV6dJYXQS5d/f3hbWiNaZqOB4O6AIm5myzwmw1Rv/Jp5JZZPtnhRAb4P8K/Np/4ufgHUv5r/3a9/N/9Ld/j2GcuLm9L/lXqnBplAFrMmeblt/+wfchBD7//AWtdATh6LolH77/hBQiF+ctlRHc3b5mub6gnxEOxmic80zez4RojZ8RHVWz4vLyEffbPZ9+/hWHIZBkxevbO1YvGs7XH1IZSW0tPpXTtjYSg+Tl558yHPqC2YgJ70aOd7dszi5YfvgRxlrG0HO2WZTnMxTQqRKZGCY+eP897vY9NzevePzBgsvHT9hPJS9sOo588oc/Y3NxyXe+8yFSJrTU1HWDlAYxrx0Cw/Xr10Qfy8EmRqa+p2pLhJQyBqsMybs57kPOtvL5fUiZ4IsBIuZitkhzntypi6OUouvaoumcxcHBO54/e1a6SxlCzAVOOU4cDgPjOLJcdFRGoBcNr6571qsNl4/W3N7tizlDQ/KJaZi4vb3l4tGTIidQlmm23g/DwDRNlM2ySBMKa6dwmbQ2BU6qSigzgFGlCPHzzyWVIrniFJaqwDSVEqQsyCEy+VB0q4eSxRn/ARBC/0FdQoCuJTkmpn4EnamVJvjMeHQw65IykFxG1QqRI0M/IlRB3Bz2n/DZLz6lsVVJrpgcn3/6BdVqgzQ1x2HEJfBZ4GMs+4pziBhY1IZFU1MZxYVtefTeJVKWw17TVFhbEgp8LnrclDLGaBaLjjyBz55l27LUNd0ccO29Q8hZM+QCWgtMVoRcCkQpJRJBVdeoXMLlvXtJfTyyODvjoqpRwpSCUipSkkyuZArGOSLMaIuUhuAToztyd3/N9ZvrAm7WumTG3m/Zbe+5u73l9ctX3NxcE2MiV3PcT8rs9wcOhyNkQdctuLi4RCmD1sWgEkTk6mKFUXDsR/p+yxdffMKq+x6L1pKpULGgPpKfCLJIVrrZzbpYb/jg2x/xwx//PmM4IKTBh8ywHfjFp19xcfUHLLqG1WpVMkOFZJomhkHRiPLzwazfkpJhmhBS81u/+QM+/UtvmMa/yuEQcJ4HLAcxMRyPjIdjMfC84xQex5FPP/2Uu7s7VqvV17pHvxGFlRDFzmttVcR2usIYiSCRUeRcojgEhd9R+CuiZD1lsKZsnnVVsdvtyverVHFqSMGybaiMYXSRqMrNehwn9qPHVjV13ZaxlDYYqbDaYqsGWzUlJ0hA8gHRWERliRKiBIwlpISSVcnr0wolE3HyCCVJsQj03DAUkWhIDOPA5Cbujzv++u/9TbxSPL94wq8/+Tab1RlqtSKqDHcOIWQBX6aihapnDdZY1IzYpi5icOGIIlMvO46HO4TMdAtbwnKR3G+PZZMLPbaqud/tadsli/UZVoMLxT0idIX3JTV8uVoyDANNXSOAVy9fUXULzs4vQBu2+yPPNpfEqYdULO40i1LMpAN+jh6y1uLDBCkTQmQInqgisoLGLrDtirpacAyas/c/4jhGKp0Lh8gF6lrRZEnIsN/vqeqmjF1DoqqqB/eNUgrnHdqYwjUSEqktoz8WWN3Y01YFLKuVxHtXCvSqIB+MLhReyJSwh6935ZzvhRD/NvAXgY0QQs9dq3eZPCdezxdCCA2sKSL2/9grxsTxOBJCZNWVLEYhmIn0mc3ZiqurC25ubwkh4YBm0fHo2XOePHmKkZmXL7/izZvXXF5dcPn4PaytqRv/ENfw5s0bfvyTnxEjPH36hP3hWFr3Vc049AzHI/245fZuh/eRxgrOVh1VVTaOmAVZiiJUne/1V59/WVxLSiOEZzgcOG7vOVstkEahKstZt0RLQaUL4wnRzsC/HcejYLlsWTWPaBYaqyXx9Y5K79DC8P7TZ+i64nA80HSXWGXLWM/W5JmoH4MvB7V2iQ8eHx21sVitkUaSdXGCaalxMc+SgpKRmVKiqk6MsBN2oXTxSkE68460xk3FuWmsKfBhQsEuuIkkFElo3tzuub7b4nwkhcA0BS7WHSQwuqZF8PjZFa9e7Lnb9iVkPkv2w8DLly/ZXF7x7HlNmrU8pxDpU5F3YnAVJ5oj5fTQhZvvz9Jhy0VPdrIxpvS2s3waZ4YQCN7R9wP95DlMgc+/eolzocBOvyFXJrEf7xFBkXIq94BSWK0heXyfS/CuyKikSowRzNEyJXj6oTuZEgZJ3/f0SRD3fTlga4uLifv9QIjlcBj8hJFgz1ZQl2J3vz8gr68ZhhJ/FFJxrNtK4QYexqwnzZqQxUl6d39NyBOmLpmlBaJcIL7aCMIsqbDGYkxNFA6RI+vVgt2bG6bjkbEfMXXH5fQE0zRM3qKUwFhJyo7jccvxuCfEAsbVSqOUJeXMbn/Pq1df8vrVS7x35TCZC8pkd3/L3c01b65fcnd3R2UrXARlShalbgWXyxV1XfPtb31nfm4yz997n6urR6zXm1LYihKe7Gf21ma5JAw9h/vM/rYny4wWGmkrEnZ2MGsW7YLv/tr3+X//lX+L2ze3+OgZx0gGvJd8+vnPePb0iqb5Dm1bxvfD0GOMQMhU3JUIlCpRTClGgk9cXV3y8Ucf8jf/wz/g7vYFk4soCsY9nfhkCuq2sDNjqc9IObI/3BPiVByIX+P6ZhRWQFcrlEgkFwm+BIVmMZ8OZzeDVhbQCBHnVt8s+BQlo4uUMZXGOcd+GJBopihZrDpuDvdkD4vacJwiyWaWTUM9owQKY0eWjkjVgFZg1JyxNJ8GjWWSgiwEcXbMlWlIyZESIZJkJutMmHoqJMmlkmBvNdn3ZVHY7/nh7/+IjOGjyw/43nsfc3F5OQfaSpL35KSIzpOVZzrukSHSYUhvjtRnK3IjcO5I054x9a+ZfBljNosrxqFHSkVOgnE6oHQmEemaFSkGfH9gQFJ1K6Iqr1cVA4f7G0DQ35cCZfCBq2fP+fHPfkFVL9hcPCEbxeAHLh89JkwepQJGQgoTOnlidEwp45IgJTGHS5c4B6EUqikFXMwWa5bURvLFmy3t5bfoPajasJvhrk2zIObEq5stXduxXm+YJsf9zS1t1xEEiMrSLFqEkrgx4voDRnezoBesqYsbUVUMoytW5JQZpgmryr0VitcHZSoSmfgnFFZCiCvAz0VVA/wXKYL0fxv4r1Ccgf8s8H+b/8i/Of/+35v////nV+mrgPJAR+aDhqTtDG1ds1g8oalrbFV4bn2fydGz327pj/u5k5FYtjWLZfvg+Dkceozx8wip8I7atuXP/fZvlucLWLRlzBSGA0+vzll1DSlLvptKx2g/jtSVJAZPuzxjOAwcjseSgpAjelVzdn7OyxcvSTnjxolXL16wXHaYqqKtO7SyRBQhQZxObrO3I7OUEspYBA5jK+I0FE1d19Gcr9HLluPhwKsvv8Qg4DsfIyivk1GKzbLjONxSdR2RIkbPStFuVizON4SUSCHgvUPFYmRRc1fTO1dE9ySUKlmVpZMlZ/abpO8H6rqm62oOx4H77WEWKxucm0cWlUUbxf7Yc7fdkoHGKoRVbFYLHj26RJIfDk2b9RXr9UtevHyJrgxBJlyYOB52/PD3/g6r1ZL16uxhbHd6rU5FnhAC59yswSpQ09OJ+6SNOhVhJ8t8KbxKFqVz7kEU750r750rLrX3nz15+Lm+KVcikaVHW0VOMLkRecikAN5lFkpRfjpJI1tGnzgejhzHEWkUddtgmor+0PP6/kBbSbLLNLXBTYlxPLA5X+JGx7QfGSdP02iCjzhyQTD4ib6yGCcIohSydW3wxxGpMsVyV+7tkwFACFBaQIjc3V8z+SMXlxdENyJJBEoGpzAZBTCWuBujDVoLop/Qs1F8GHpevHjBYZzYvHnB9tizWX+bpqk4P19iK8nYH9jeXbPb3XE8HmfivoAs2B7uuLu/pj/smNyIICJFpm0qjod73DSizYDSBxADq805T54+eSD3n59d8OTJU957/j6/+Pmn/PxnPyfmDU13ydWjluE4Mk2eqq4YxjAfghMuR7QyOJ+K6cVapC0iSRcDREf0A7a1/Lm/8Dv85Gc12+09t/dH3BTwKbHbv+GLr37B5aMzmqYi+EjwCqMVgoQWVYm+SZkYEtZUSCRvXr1he3/DxfmKvp/YbQeyUeQsubg6Y5gGztcdF+cb7rdb9vv9g2RCqUTbacZp97Xu0T+xsPpPw1qOgLatSSGSZETkXPLHVAnCPGkDtNRoqSAHYhhx40RKoWgQ5oVDGYsREoLAjYHaVoyT5+bmnvPlBTEksi003BKFMHc9UhE1KmXIosShxFTE3FoVRH+UAp8Sysw5dLk85qTCpkreF5dJCIjJlyy9lDBNDQKG/ggCXn/1gpuvXvPxRx/x8Qcf8ezJM5IUKGtwKZJ9JlHCbbe7O+5u77i6uORic45RFXf7A0iLcJ7/P3t/GnNbmp7nYdc7rHlP33TGOjV3V3W3SDYHi0zkyKSUQIMd0YEnJYEhBwKMwHGQRDEiO3+cAA4g5UcSAQ5sKLERyQiiKLaSCE6kwLGlMBpIWxQpimSzm9XVNZzxm/a4pnfMj3d9XxUpmd1ys7uOkHpRB3W+ca+z99prPe/z3Pd1q5C4JYfDIc3msxIbocpyzq9eUOQK5SJFntMdDrSHHWVRAJIQQSomW3bEj4aqrohofAycnZ6yXq8RUlCUOc28Zj/2iKxiNlvhLYx+w367JZoB5R3eJAt7XhSpE1RYzGgSvTl6ggj0Y8d+v8Z7T7+umZ2+xurslCADF+fPKMo5VV1jbdKP+BBpu55Z0xBC5M6dO4k91Xep4J4iFFbLFc57DruRuq7Ybza3xxFDIM9y9m1LPum1yiy196UgaUtIAk95Mz/5z1/3gT8z6awk8OdjjP+hEOJXgT8nhPg3gF/gE5v5vwP8eyKFzF4Df/jbPYC1lu5wIMskdVNQFguOj2ZUVYkxhu16j5ASMzqij+Ras5g3FFXF6ckycahiTIGuvYGYRM83RZXWmrIsmdc1h/3+FjUgBOS5ZrE4JdM51jrsaBhHy34wZLlkPi9xziLkFLtjDCI4tNIcHx/zN3/253BekCtNvz+QZxrjHIfDAUzk6cUG5wOnJ8e0+xYm3VzT1BwdHQEJ/pkXFaY/4IKhtxavBDEkwOysbiiytPv2MRCjJzrPclby7KMDPiYHVxK5Z0SdHFFmHHE2IpFoBMF6sizcFhZCpOc0FSTpuRoHe1uoWOuJcUSKLonAY6TMc3SWg0gbsNF6Dt2WEKHINMdNQz6N5OezhtVyQdvuGYOlKOdkyvLW26/y7PlTuj4ShMcFx/n5C67Xa1599Aqrrxzd4h5u1o0u7GYlbhKfFKi/ic4/DMOtiy0VV+I3FGtxonWHEFLmoZIoGSlkdlt8vywrKxXSRYwbybRm3jSMa4vpoVjm6SbrBa73qEqQCcWibvAiUuQFZV1jBsvQW+Z1hq4SFub+vWOG0bFcLliHHcwlvg4sF3OGscdOxU1wjiF4qmLBfD5LY2EtQXhCsBjb47whVzk305ayLlDA7vI5/XDAmJbnzx7TNEe88ebvIFMlh/UeJQVFrYijoG17ChWYH9UQLcPQ0cwqTHugbfd89PQpTy6ecb3bMa+es1rNefTqfVbHDca0bHdXbHdrLi5esN1uU0C1T6NAHwwEz2gGvBvRGvbbyNDtaJqSphLslGEcHWbtKPM0IdrvD5w/Kfnw/YambnAu8OzZC67OP+bv/uLP8crDR1RFkyDSKsXO3b37gNPjO8ybOat6gRAaH8AZgVcjmZb0psVGGLZXPL/4GJU7vvDuaxwdf5XttuXics1+15HJnourJ3zwwQxrDPPZijsnZ1iXAKq2VAhREKMgBlBojPNoqXjl4UP6zjOfL/n48XOeTriRt958RIiB1VHO2dkRFxcXHPYHpJS0bYuQgrPTM54+fcrTx9/+/PxOOlbfc2s5MQVhKpXGekkwmi4GUpDspsZhfE+uNBAYxx5jBkLw026hwrobcnJyDKaTo+bi/Bxj/MQmUWilkSLtSJNrKExidEkAvI+oDLSUCTzpfdodTdRyRUBpDS5V+cp5orVgDMJadIgIl0BwVkSsHac2pWS/2fHs46f8xA/9GGfHJ6yOT4nTHc0Fny6eSkAmyURKVXfWc3pyl8xFbD+wHdacHD2k7Uf63Zqmrri6eJH0K8awPD7CuIHeeaq6JjiHMckqu1ws0BKiTF21sq4p85zD/kBVV0gEqihTaK2AzXpNUzUs53OMHekGy6I+xvnES8JZINJvPG4ciR6CjAgR6bs2CcyrEqkVwo1s19c4H2jqiqgF9974EuXihM721HXFrC4ZJ2Ce1prtdstqtaLvBxCSegqflQrqqkZplXLmnGO0lrwo0o5jvUEpydh3SCWp8hxnLVVd0XYdQqupS5Eo9JHE8JGi+bZC3RjjLwE//Pf5/PvA7/z7fH4A/pnf+rf+xjVfzPiJH/9hDoc2jd18wI4GrSTBRQiScXSIKJDA/Xv36MeBLM/xfuQwpHQBP4W/SpkhFRRF6mAVRXFr8bbOI6S6vXFLlRFVxr7vCc6lMbiExay8xQHEKBEkHVWcNI/ee66urnDBo7KK2A0oEvm/njUQBd5FIoreOC7XBwQpRHi/3+OfX3B8vGexmqP9gTfvfYG8qNjsnnNxcTkVC5p8nrFYJEgmIo31oreUZUZTZETrWVRzvvnBY6wxHM3njGPPtn3KK6+/w3yeTBB912KMI0Y/6VwGui7ijLvVUt2M/z4dKZRs6YZca86OV+gpjFaIyGjMbQFT1zXvvvEKR0dHfPObv87xqk48uXYPRJZHC6TIccZxdrbitUf3+MZ7F7jYg4AsU5RlzjhtIIBb3tBNIXTz8c3XmKJtQviER+Wcu/1+rfVt1qGUDiHkbdC0UoqyrAghUJaf/E4hEjD1ZVlCgJYwjANVmVMWJfvdDq3gwd0VyiadmRCCMisgTx09lKJezNgPHd55FvM5SqQO4/FyhRkHlBAsZzO0znFVxav3jhlHiwBGUyAkFKXGe0PXdtR1w/HxEVVV0g8dh/2Wrt8xjMktWOicvMgRnSXLcnKdEAh1XVDkkb490O1btNBkZUkKnrcEIcl1hhCGw/4A2lNXmrJMGq7d1SVdv2ezuUQWacNY60vKMufy4h6zRUkIA0J6hrHl4uIcY0wyXRiL0pH5okIrRQiOTEFd5WgV+dblFi1H9sYRnMeNkhAGDvoF3gfG0eLzgm5zzUWMlEVFGEa24wEzGtr1JfP5EbtDS5aXdP3IZn3J42rB229+EXX3EUpqsiJPTYQwoLVkGDswhrKuGO2W9z/8VRZHDZ25ZLs50A8JE+JsS98Jzi/nzJoFJ8enGGtxxpFnNcZ4vI846ylUlpyceaSpLMvZnK7bU5cZb73xCpu6JMtyfuInvorzjvX2Yy4uP8AFR7NI77+iduR5ztFRiYsNv/JL3/4c/baF1ffDWh5iZL3ZU5c5mZZolXYPWmmETDZQIQRqYsUoraYsuxTeaszIoevTz+gkZM51RfSOtj1MoLSSXJdpRBYFSmaATBgHmUi0QmoiCaQWgGEYCdbhtEKXRToBg6Qu8iT4RCCcw3U9MoRUYBmLNxZlPYGA1wJvDIFIiJLL80u+8PYXeXjvAXVZInSOT8FNacQYAzY6YgZaZxzalrpukDFhGy7WL9j7HSfqIfV8QbtfM4wjozVEKdlcXlDOaiBydHyWunh1CqiUMRJMSyYhCok3qRuhZRqj5EoTfGCMDpllbDbbBJOcLYgBHj95wetf/ArN6hQzWmK/paxyZFGjFwFf5ARn6fuOvu/RE9DQWst2s2PoDhQ6I8trZLPi/ts/QGzush/GVOwIaKo5rhtuNQlFUUzuK03XJ3PCTUZbsvt63IRbEErirGNWS/IsdSEJqYsgbynCITlQnSWrK2QMjMNImeXI+IlA+TNfMTCOHXmWdnZKapSKBA/EpDVUskApiVTphtrMG/pxwLrIbL6CKBgGw3azp233hJhMIFVV3d6UldK4GBMvKYKPkc3+QNjHlH026VNEBIEBmaI/nPc4a4iknwnBM8aRzXqTnIBOsNk/QWvFK6+/wcNX3+D45A5DkHzt/ccUdcMwjmQquXiPjo/xIVBWiUHTdUOC1MbI0Pd89OGHzI9PqOtUnORFQVYWaUwXUtTP0VGDtwPBOu6enPLN9z7k+vyS+yenODOyW6/5ub/+N/ixn/ivcHznDrO6msTdnxQqMUbwie0k5aS2EwLvPMM4JpL61AUqcoWSEedGqkJT5orFrEKqE6w1aClZzGvGdsesqej71J0zJt3cem+ZVyvmynP0QPPKg7t861tXQHImn56ecfHigu7Q4qydzveQkgZugKe3VPTUYbsR4944/pIWbPwNYbmfELNTpz9O+psUB+OQ6hMhfIJEpOfoZVkCwHtyqfjyO19mPl/yi7/0C+y7PeV8NgGQ0/v/aHXE6YMzfv3997AE7t99wHzoCTLdd16EFxx2e2b1DFCMY5f0bNJSSoXregqVMV/MubwcGYYeVTQTKd/SdT0xQp5nFIVG6ggiwYnb7YhzJWVZsphLApHRepTOyPOc0Y8Tw3FJVdQIVVA1Da3fYpxBR8jzghiZIJ0ePUUORTzOG5QC40ZcDIQu4lzBRx+t0RnEOOJDcv9VVclisWTse7r9FmSgyFegNdak/MlcR/a7Da88OOPkeMnl5Tl2rjg7PeWbH7xIhqxJe+R7g1KCxWxGnmcIa8iyHCcVw26L8AEzGszQ0Q+Guimp62PG8cD5xXOO56c4BxJFUdQUeZmkQAhyHSnLSFE4jLlivWnZ7g7k+YxZs8QOIwjFOO4RMrDerJELzXKxRIqkK850jtYiYU4CZFJTZA4lFd1+h4+RxWqJUgLnDecXH9IPPb09ZzDJBXo4dJRlfnsfsn5LPau/o3P0O9JYfS+s5b95XVzsaeqcslBkOglbnTS3beyiKJBE7DhgTIpm0EVO08xoQtIttW2LcY7BGIQfuHN6xNXleupE5AmBH27YNJrgweJRhbrtVgkELgSid0gfcZMdvPcGqQpyPQUnk2BqylnCOOKMoRQSFUnVs4/ELMUIpHm5xaPQKuPBK48oqgq0RkqVIJaZwgPBeXwMiExy2B24Wq955823koVUCLxIrouDGVBRo7Ocsd+hswydZ9RZkUKsiSyWK/K6IUgYBpvGqEXBYX1JVZXMFgu6w46+6xJk01qGYUDPSuQkSp43M4KznF/tyBdn+KARUlHUEdPvGbeavNCQl6AgOkUWA1lRpIJnGG7F8cvlMSIoOq8x+ogulMjdnsGkbMPUAVEsFkt2u93tjc45Rz8OnJycJmaYkozG4ojUZQEh4p1jXi/oh4Fh6FJcA4G6zG+tsyHG5OxRiT49jIaiSJwf0w8UWYK/xZfgJuJDZLPvfgPM04QpjmL6OIZET5aB29yuLNOUN3lh3pMXiqOThtkiRUAwWelvuhafBKlODatEi0SJhAoJMWInrpGfmDU3tnwhBMJbgqoolCKOe+qqoZ4d0R32tN0OkQlefftdRH3KevAIEbh3dgyT3T/pxQoOXcfgDMZ32IOgzjJkrrHe0h72rC8vqedLRExFshYSfCQTaXx3fdhy96xms33B1fqKLM95881XaWYVlki5WPA7Hj3i+Owuy1WNVgKpFFqmwuRG7C2VJBKQKgm8/URvT7EuOhU4VZFE6s5gxYRiEAI1BTf7CFJnGOe53rUA6Ly8LWyKIpHZCyTeGkLoEe6IZVEQ+nOEEWS65vi4QVKw3ex4/xu/xv0Hr1A1c0bnkSpPXdbgbjtqMU6wy6m4uhkF6okC/unokhhTCLNW6tYEBBGpNSkzL0AUqTMpPtFqvQwreChkjipz/GBwhWWxWJCVKUtx2HY474ghaZaqrODe2T22XULTrBYrTHBs9jtAUjczTk7P2Fxdo4sCJQR1UeMGy9XFJWcnd1hWDV2+w5me6FyK+pKS4+NTlosFxo606yu67oAQkaLUzOY1ucwwNmE2EKBUgbGeYUgdz9deeZs3X32LXBUEkTOfHTF2I0EOKK1pdEkuC4xv04QGy37sCMFRVTknp0dcbK8RMnKyKjg6XuLDSJZBXswZTcf5+QYzWnabke16S9eljbjWDiUF+12LmRW0e0VZZDy484C6qQjW8fTJY4QXnJ0ep66yDxAG2jYgIjhjUnZhAhVSlyUDIzJEmrKgaGq60RDsyMXFM86fn3P/7BEPfvQhsshwo6ep6oTOcB3GjohoybVASY+1HZKRTDtE7LFGYF2HM5FnzyPHR/cxQwQrOVod431MiQkqQ6lpPO4VRV6xmEvu3e25e/cOH3z4LT7+6AL/xj+WNptNZN/tOBzWSBWRMm0eQ0wTGa0LrB1Yrebf0Tn6HRVW3wtr+aeZPdnxQ/I8pYNrqRLSX3iU4FYHYMcBExPIDEggvXY/tbILqrJg3szpu5ah69IcuR+xY8TZwGpRgVAgNVJnKe9JTKn0dqL9CzW1vCXOB7x16DwjaxpUXkwhkAoXLVpEhLQ40xHMSBgNIi9QUVAWFWNMkA1jh+RaNCNSZRwdn6DrGpHnyEksihK46AlCILQAn270X/vlX+bOtEtvmgQZzLIS0zrs6AhR0BzfwV16lN4hoyTYZA8dxwGvFGfNnLqekZcBN45kdUnX9ez2O2aVoW5qiiqRqI11OKGYVQuqsqA/jPT9gCw01eIIXVd4N7JfX7JazSmzHOcs5+dXxOgoyuREzPIC7x1KCJT3lEqhXUEMguvNAVk3rI5OCN5w2A8sTpZEleOFB6HpXaQ3lqYoKJRKYdqzhn13oK4bYujo0Sg00QbyTNNUmtEMFIUkhhxCjfYfISnZ2ZoydEi9QIY1wirGakVpB9rrS8pMpRayT06wWwvuZ7iCDwwTGuGT7tJN8ZTdAirVRDkviuL2e6xNwMgboGQIpJF3ZLJ/f5L1dQOCvI3xETdOsohSkjxPv/db3/oWF5fXrFarW8uxVgqJTLT76DBDR13XhBDp25aj1ZJ+qFisVmRFPu0ePffPjgkxdR2lyhjGEWNH7DimUVZUia1G6ho/efqcrutvx1s3WjFiKoQW8xqpoa5Knj/7kBfnlyiV0TRzjk9hvd6wWKx48wtfYDZf4aIgikiMDoRHKk3XDfSdYRgtWxtwPhUkehqPVZlEREGMOV2fOujEhHfQWjI6gw8HQvAUukArfatnS891GpVqrW/z2JpCE/IKpWZcDyl83lw9YTAVd15ZMARJ1eRcnr9gt7/P62VFlml2hwN5EYlZPo3C0+t8AzK9ee1vulNZljFMjL6bohwgxkCMITkRb5yROmUjeu+R6c40wVFfnsJKS4GwgjIv2F3v2G4O5FVGVswZ7cC+3WOsgQBX11dUeZoKbK83PHl+TjGvqRcz2rFPYNjZjNlsztAP+MHz4vFTjhZLohOEYaTf7TjUNVOFxH67wQaL9Z6+7lkslzRNw/Oraw7tHmt7pIwsFg3jIfEVg8+QWYH1lt2u5bDvOTtueO3VN7lzdheCgpgxa5YYPTJyoNYNmS6xwrDrHH3f471jtAPWGqSEiGMcO7IipygNeWESlT5EpC4YhgN5HlitjmnbgbLUPHx4j812g3MdMQREdFgD0aXGwIcffERdVtw5O6XK52zXLa+++06CZ7c96/WaKHbEAMaBi47FfEHX9zRVhc5L1ufXFFWBqCNlkSGUZr3dYIfIu29/ieVqznBwFFlJXR6hVIaqagbVYUNLqUuGQ0s7XpPlikInJMXYO4wbKfKKvt/zK7/yS3z1B2qWzQlt2xF8TpknZmJUAik0mayQQZErOD054+GD+1xcPuFy/TFh0oYqOZBpQ4yerkth2mWRZEN9bynLGu89l5e/pZn7k3P0H+SE/u20ln+a2VOePoplrlEyIRSCcwlpQESGT1wwEZHGFT7pIkYzJI2FNGyu11M7U6KlwDhPuz/QtyMCQTGJ1P2k00AKophiIJg26qT2sBRpPiuUQhUFMkugo+gCPoaJNTJgD3tCN1BERV3XaczoAxGd6OIyoLVIVbhK0erNfJaEriToqABMsGkkKcDFQN8e+OD996iynAf37jFrZuRVSfSRoqiSvqY3NGXB/PiEYejwlxepkFGC/SGF5J6/eM7V1TWvvv46xycnqFzhXKBsZshg8GPLbhzYt12i1wc4PjnBGs92fcEw9NTLI5rTO9NFfoaSkegM58+eU1cVy+WCqr5Le9hz2G1xw5CeP+dwk81YCsFwOLDdt5ioOD25gx/2ODMQ5SkfPP8Aa2peffQqVneECGVZYpxk8BpRLlnEEVFKBgJxVKxCh9CRweUYnTE6RUCxLGfkwXHlDmAfkMeOXFqiOuZgW7LsDB1HKr8jmKQd884iiwI3jGl8+BJ0rG5iRT59Q7vRytzcsG+KrqR7MLc37rQZST/zaXfYb874Sg7CTz1mTN0X5/w0Yo+Mo8E5T1nWPHrU3P4OSO8VYx1BRNy45fr8I0aXdEZPH3/Ew5MjiiomGzUSgSfX6b0slcY7R+8MzltmdUWZ6WnsFvA2ATz95BJ88+0voLXmcDjgnGO5XFLXNZFAVWqOVjPcOHB5tWG92bFeb7n/8FFCeyrN6b37LI9PqZom7WazjCzTIBzGBKzxrIdUXDmfxn773S51XI0BmQCFs9ns1pEnUWRC08xK8kKgNcTo8KOnqqoUHG8M3juE0LfW+5u8PhkMPkLnClCRV46O+Il/5Ef42V9+hgwebw3X1xeoTHF0ekrVVBRFwVtHr6W4qhhSYsQUkBvjhDKdNFafkNbDb3j9bwrrNBZXzGbN7bkQgkOoRLYXgsRE8i9XxyrTGtc7hrEn5pJD3xHySLUqQUZU/glqwriR9fU1xnv6sePF5QH3YsOD146ReUZZVXRdx5OnT1MHarXi6vnzlP0nC3Kl2K03KATD2NOaljGkbnlAou+lW+j+sGd/2BGDI8skxDRmNXbAWENVLxEqZxgdWV6SZzndoefpx094cOcdZnONEImh1+g5MkYKUaX0jfiJRi74TxoOeZEzDv1kJIEXLz6kbUu6vkNrmM0TI+vunTucnd1ltz3Q7fdIAg/unbBep/MxVg47eiKBzbCnLguCiQR7QXQpOeBqs8WFQGsSC1FXFUplzOcLrLEcHZ1gXrygt55u3zJvZpycnpA3Be3YI3TSYxbHM157/RXyQtEfRvKiQsacjIbl6owoHPvuBXnuOTs+Y3y2xfTjhAlRFDpDaMXRckZc1PR7yTg5Yvf7NkXeTQkEMQqUylGxwlmbtGEuYszIOPYEb7B2JATL46ffJETPcnnEZrNHZ5pZoxBSUBaWLKsoihuzyLcvrr4TV+D33FoeY2S/3VEWGWEqsBLmIC3nXBrhCUUmM6xLWpFMT7EpoSXTGd5ZWuMmDpG8ZVDlWeKcBBcQIkWWhBDwQqYOBRE9PV4goqREyykRXIB3SXfgcOioiCGJ58XoKNCUOicvCqJIUTp5WRKsJyAQKkOJQJQemWdkhUaLSHQOaz1eQ14lLo+xSVx4+eIFpcq499r9pH+awlONNRhrQQiqumYxXyC0ZCQQiyKF3DY5pttTZpJZvcIHGNs9F6bn9M6d5A6MkSwv0iij26PFJApuKtrNNZ2J5EXF8dkd6tUxZAVSKPb7/e1FOol5I9475rMZWivm8zlm7InGYI2jP7S32g7Tt1S5QAaBip5cKYTM2Y6e6t6CZrhLlTeMume33VPXR5gQWTQFSlr2F9f08i0KrRFyQ61gFBW+3+L1GUrnzGJHfrgkFiDjgSJ/E2EDPnq83HBCybb0eDNn3O1pipYgK2xWoEJARj+xZF4Ooe7N6ObTAuXf2J2IEyU7heiCSDdGcQPTFemiK9L3CBFvu1433KJPF15xGgc56zDGJYyCmEJ6o0h6x+m9E0LSqnEzXgyese/4O1/7dWbHZwzdgX2mKGdLhNKgczKpp4iUVAigJFp6VCZRQiJimfhjUpHrBL2wzjMax737D3hxecF+v8cYw2KxmLpvDi0Cs1Lz0bfeZ7/vKKqGsNlzvd5Qz2Ysj054/c23OL13L4XURiirkq7rsM6QZwUnp8fcu3c/xbk4g3d24g8FrDUcDFxtduz3CWnRdS396JBCsd14lqs5q6PU6c0n48WnO4Fpc+hvO0bDMHByZ0WmFdt2QIZAzOd8+Xf+JB/s/jpfe/8jgi4pijppW5S+zSvcb9dpM5AVzBdLiqJI7zN5SOOvqXi7OYe8cyitbyN3br6W5+qW0i6lSCNPneQKw5B0jlpJZrPZS9HFvVlKKr7y7hfZX3c8fvKcYRwosgolJIEJJTENNyKRcbQ0izlHd8+w4jHbvgMkh0PC0njr2W63yRhlBJnWbLc9o+8op66sFDAOA8NoUKUkU4LBRq4uL1lfr3HGMGsadKEw/QGtMpywlKWnLFJB1LY7RtsiUcznS7SwKQ4mCpTUiS8WBVmWY71GkN6n1idcQwqzNwQ/UlUFr776iH13IChBwBG68+TgMw5rIURHXRXsttdsrtc8uP+I5WLObrPhlVfe5ehoxm63Z7fZ8uzZBdZ4mqpAoLi+blnHAw/u32XoHTYfkJlm33dcbXZEmPSZ6dRYHAvmyyNCiFxdXHPndIWxltAH8iKjnDWMxmHGgf1+h5SS09MT2l2PGSwiRqwKqExQ6Ir69D5f/YEfoSgCl+vntG2HdREtJdW8wpiR1eKER/cfMbQd5+cvWC6OkWL5KXOaREkFXgEekBOGKJJlkpOTFY+FQErB6ekxbbtnMJJxTOklMpNombE6O+H8xTlFUXB2dgZ88G3P0e+kY/U9t5YLITk9OSF4S3AWbx1BCKzzieuBoCxLYgDrDFmmqcrk0HDOIFUqmMa+ox8HdJaR63wiIYQpAywJcPUk2Ewhq+nCJ5QkCBJvahLyZlqjtEZn6YImhMBYl1hTIaKlJsugFEkoqcqC0VucSJ0nkWlCsAghp0DOAaFEAsEJgTM2ReVk2dQ5C5hx4Pmz53SHPfeOj9BRUOcFagqKdd7TjQM60wxm5NC3nDQZgYiuaiolkN7QNFUixSuFzvV0y4X19SXHJ3domobrww4l0wjJ7A9keZEE4FJQVwUyL/FCs+9GtodrTs7uEINnPmtud6/tYY8rykmrIbFjT3vYY7vu1gZ+E7/RDYeEwpAlUZe4qe19dvKAYb6nZqA2aw5uwdHZPcbOIM2W0l5Quyvc0OKbkte6DxjGkWa+5FvlD6FiJARD0V8z6x8zi2su1Os8GGvC+BcJxX2usneYH34eNV7g9I8Tq4ZQZzzs9lxlFZ0riMOG6AZCXKSC/CVYn3Z73XQZPr1Huekwaa3SmC/GdCEREMKn9TQpbuamg/FpEfON/T59X8JVD+OINYl5oyZWUoziU9b9CXxoDDY4vHN88eEZlb3m//0zfxNVNrzy8AEXTx7jREZZ1oQAnbXEwdwyl2IIyIwEdiRQ5QWZUmidUeSBoe9YbzbMlys2+5HdbkdRFBwfHwMpCUFpyTi07NaRF0+fkuclbdcnfVRMXYXX3niDrCjZ7g9prAXTTjXifCK+x9gT4zYVnD7engNZnqGKjLmGUs9xq9SJijGhF0IUE3Ay3fiMtfR2uH29sjzp9hJV+5PXr6oqjo5PWFSK0+6QnhOZsxc9Z6++xi998JTdrmXeLCbmlSWSRNJaBpQoGB2T7iUFjwef9GLGmN8w8pMigTDjdB7FCX6qs2R8iCEyDKljpSe222I+Sxo0pVBTQfyyLOc97X7PIss4EYrWSO7pY4Z15MPHzxBSk305x0efIKKNoxd7rq8vKWaRH3r3DZyztB20uytKHzk5OmJoDzxbe843Pe0hsKghC6BVZNGUbHeKQ8zJooIoUVoSnhr0deQsO+HK9EhZEWKB8JH2+hqzNRS+pO9a3HjFMJ5zstDsQ8FhP5DVFb70bMIVwteoLqPOckq3IOZFMin4gHAFziu6g6FUkrpecv10zeHxnpkvuHP/Pu/LNZkWeBUJ1hN9ZHvdY9rActZw/fgFhRTU8wVdfwlNoA07rHbMTmbYPqBlQbfvE+3ceDbbgaLMyIdAMW9YMuOgR7ySnG+27IY9J0dLnj0+J4+So2bOSTXnor+iFjUXT65SwSMzpMjJ86SnmjUNXdczhAOWPfvxKUf5EY1sEC5SqYbf+9U/QHHQfMv+KtfxCcbtkNojxQpRBu6sFM4/JnpP7FqqcJ/j8gHajjAcUBkEsUdVPVkM+NjRCMGjkxUfek2ZH9OUTdKRrjV1cYfWbzm9d0yeJ9SRLAJOBIq5Yxg63vvw4js6R78TV+D33FoeifRth5QREXwaB4aAiIFZUaC0TuJQkVxKKXW9J8aAkiBiiuW4cbhY43GmRwtJjGKiEqefVdNFJjC5mbxFBImQgbxIWWJKqeRWyrI0wovT7j6rMYj0pMUIPiBVpMg0JgIqI9ep0CFEZEhdgOg8Ogq8tUQfcC5d5KxOdHljDP3Y8+zZE9r9gbvHJ5wsVtR5gc4y9mPPwYzYEFKhqdQERZXYvqPMcnzVkEdPsCNFXhCEICrN7tByaHuEiCzqnCIv0VlBWdXpjZRXNMuMfjAMNqCyDC0TENILTd0smCX6Kot5M4V2JtdRcXREPxp2uz2nJ0eUZYl3NhF8nZ0u8IrRW3xesR48X/jyuyxPX8WJChTsx4zazXlg/ibOfMB2+U8jo8NaS5CSw2iZNQVf7v4uz8WKpX/BcXME5z9P+egNxOyEOGzJh4GoNHjPyfaXmekT2tAihudkw56Ze48L/YO8vf+P+frin+Rh/+vcGy848MNk5QIxJD7ZTRv5ZVhSTtlfk/6FkM5fIW+0hpNOMPjbzlVit0z4ED4Z4aQ6QdyO04VQE2M+EH3Sa920iHMtyWXS5uQT3R4mEohKnRhjkv6u60aU8syKiJs3KDzry3NWR3NEXXL/1fvcPT2mNRJDwhCUpcRah9Y5XsQp306hlUg4kxjJZcaT9z/kw/feR+c1DkdWVvjoscHRjT1HIhKN4vLFNZvLp8yamqcfX9JuO6QoELokZgV7F6gOHblSKTxWa9p1m7ozeeLlWWchpM3Z9uBw3tI0FdaOGDNO533qhRs3ApEYkoZDCsFiViaNRwAXBPt+QiaM6TkUPifLNNnk4Dw6PmJ1fIwWnigEuvRY71lvd8R8xqtvvM3Xv/EenXPkOuPi8XN+5Cu/gy+8/QYIh5CBrrN8/NETLq/2vPLoEfFXzhO3RwkWizld15EXGVpKMqUQOmnzpO8ppKfIViidwrwXRZWud1mGB2JMxZoLDmvHW9zDy7CMsVxcnJMtTzi7e0xdtnT9gd4GFsuG1dEpXZnjrCMvNfuh5f7RferpOWm3h6mwj2gyVK6QumC2KrnnI92+Z3GqwUfc6FktZrRtS1lo7tw9YbSOzW7H0eqI40oTpScIR1FqzDhFI4lUlJdVhZIJmRPaVHyLoBj6nv1unzagQqSWxRRbZqVMDnjvCcERnU1B9ePI2A+gBO1ux8cfP+V6vaHIChCKL3/pXdpDx2G35+rymuFgyKfg8v2uo2WgKkqWyxXX1zuUVZjRs9+2RK8ZB0+hc8bR07eJ96Z1RlZWGBs4/+AJo4j0BIyAoQ8YOVLoHl0K3Gjp1luEhFgGgouIKGn3LUJofvRHf5yv/tCPcufOA/q24/nzc/a7PWQSneXJiFKl9771iuWi4fTBffZ+y+7jDVeXl2gpWImSh6/co213fPzxM5zRLJqK7aZNKSgxo+9c4oplEcWYJCwCUIrX33yDq/UlH370QZpKhQBKcXl9zcH2vP7m6xyfLPnWtzy7/ZboA/fO7jEOluvrDfDt6esvxTZEAErLxIRSqe1aSpEYNzFl/QUXUsEyaYujS1BAYiBXAilSMUQhExzQGKxzUy5gEmdKLRN/amK7IKDQRbpZCInzniyEBMOTkihTqKkI6ablo0+ZWllOjAGZC6KQGAGCgJKTjTl4hIoUQuOjZxwtyoU0e3cOIRVOgCpzejsyjgNPPn7M5mrNo4evcP/sDvOsvB3/+RAY+hYfI60buPPqPVRZUxQldui4fnFJ2/fcPVqymC9wzpIVJToveHG9o1ks6Q57unHk6vqaO3fuEmLg8mrNMPSc3bnD2d0zfAwpTNMYyHKEkvQTQ+f66oKj4yOcTTfxsizTm0EpylmNGYbJ5dXgCQir0HlB13e4GLn/6E38+ZbF8V0sEhsjHklQgmYwqMNTKn2OEDW4A4V02OaUQ1bQmgvuSHhm77GvHjELFxTFCOS44Jgrh13cxYUlfjdwcrig45yx/Ecp2m/xwP48xi95fnzCuxdP+aCB19r/FN+v6OwOF/dkAsxouEl6/6zXDX/o0zorqSRh6koJUkCwC55xGJIWynqyrLjFkdyM6246TVrLqXv1yagzdSgEqihQKl0wVsslSnLLbrrRdulpnJR0XSWzpiEiyYVlc/4B3dDz7rvv8Nf/xn9KoV5hPl8wPzrGE9G5pq7nty4751Inx/lEPM+nrLLToyPGfkAQ+ejDj3j8+Aknr7yONZZMSC6vrmjKkrHrOTQNuc/Ybp9xff2EH/vhH+F6s8H6gJx0jM57njx5ytPzC/Ki5PTkmOVsTt/1HC2XHDd1Go8PBmsdV9c7trs2cb6kQsuGzXXLpjPYkN4fxEiIyRWVaz25jKeYJO/JUoDK9Pynjl9negSRTCUUyI/+yFcpyrfx3iByKDJJbA8cnZxyr/NcXV3z5huv862PPqY3I0Xe8Dd//pf4lW9+QF7mlFXBrJkRo6CZ3+FyM2JdxHnP3/3WJVXVTqL5LumxvKPrR/Ii49WHZ9w9PsLjwfbpmhgjcTTE1uOtmzLYIl23x4VUzLwsS0wQ1H174KRaUtYF+8s1RVVx5/SUejbj6yISosMrTzNfUjRVcoejiDZix2S9L7IZ/Tjy9MWGumnQQrLUBXVZ0dR1CnEHdJZhypy79+6y2x0Y+o6T5QJvtmz7aw7jFr2cIwLILAUDh+ix3nHoOy43G7bbHVJa9q6n3e0Z2oGu61BSkhcFXmo6MxLGAS0ktdZIFVFIqjyjqQpMVdN3e9pDx2gDWT0DJPvekNkkgFeypCoWZNKhpabdHnDGoYSEGMmLgPcCVWhUqClUil8aQsfQg3MaFxwocBS4mLE5v6Q3FqegC5aYS2Z1Q6YTV2+93pGFwLAbKUuJcoJMljRljTWBPCt589U3efXhIw77ns16x/rymr7rmR/P0IXGjAc24whBoaREnkMXHY+vr3j/4oKsqtmOLcN2z2jF1IUvMRaUnjFfnBLJETKnbS1RSoqZRmXp2iGUpHMji5Mj3vjC2zw5fwaTm/fo9AQvI5snW9rNjkwIClnQZDO8cWxe7CnLilk+A759EPNLUVhFoB07Mpm6QUoICP7WBp14MhEvEvY/Rp3wAzGJK0GThSR011IQokcKTZSRqqwSSTskC7Wa9EqQ0AqEmMyCSqGKpEEQEfyksXLOT+MPifQeGTwChcw0UQaETIWa1JogAJ2QDn7sURFs34P3Cbw5idNdCFghGV0gGMfm2QXdds/vePcrLOZL8iwHFDLL0ZnBdzvG/sBoR0yjyRYN2SSGPew6iiyn6w4cugPVckGWz0ApimbG8mhFLgV1keOlIMsU+/0W0/ccnZzw7OMnXLy4YrPZU1QF81nN/GiFyEuyoiIg2O5bpIgM7YY8r1AqZ7fZkRdJtKt0hpY5ZkhauCiTqy0YixYSVWT0+5bT+ZLD1SVWaaLOKZb3WeYt1eZXiWWDGiS60Ti3oikGLg8js4Xh+PGvcn3yFrOj1zi6/hZy86s8X/0Ew8HShopSR67Lgtf8N5HtB3xj9rt4ff1XCMcNDBal54ixh3zJXo6cdn8DHe8xNDvy1ascecHOlczrGWL7MkjXUzFjzICU6paSnmt169S7sdX7ENgfOrTO2G4Tf6Xf9ZRlwWw2Q4o0PsrzRIBOmsK0bnhgyVL/G0XyQcRbkXw22fWBW7H0LapBKaJgCjNWnN29w8lqifKBYj5D6JzODJR1Pt0Qk65E6/S+zmU96X7SpmSxmLPebCkyePsLb7M7HNjZyH675/mTZ2iddvPNrKHKc7SCO2d3aJqCpy+uKGczjoKi7cdJI5k6xQSBHTtMazhXFyghub5Y82Je0cxmmHFEa01Rz/iBV19F65xvfP09Pv7wKcNgEido2tgJ0o020xItHVqTgr3nDbnOqDNFWSbwoFYpz865wA0vKwTP/Ttn6TlXGXmlUjc999hdh7OGqix4443X2e33PHn2nM4Fnl5uebHpQWqUygjRpTw0pYje0dlXicCzpy9uX1+IEzg0oV9i9Dx58piT4yWLOifPpxSCsmIcBrwxKODNN19PmlNZEwDrwvflvP9OVqLjey43G/KTjMV8wXy1xBLwwvPRkw8ZlmmTsDieM8jI7tDSH3r6fUumMryLZFlBFIL1vmez70BueWUx55Wze7RdBzadZ/u+xVhH1w/sdntGYxiHgfPzF9xbzFneWVAtS2STId0I0uOjxQaDcQPGj7hoIHqE82yvr4nGUZY5ziQ+WVGV9FZivWP0HhUBZxFptgLRUmYZ86ZhfXlO1w1JPF/UHA4d+ycvOAB5ltF1Pc5ECArrAy7kdH36XUJAWUmCV2w/2uFchKgZxpHrdUffW5TKEEJRVCX7ITBcHbh83iJExAooFgUyaMY2MmKIPkGxz1ZzZC0Yh57eQDA7yqKkqmpef+0tzo5OuHx+wdAb2m6g3x8Sm82M7Ic9Q2/pDoa2Gzm0LRebKx69+YiPLy95sj5wUiyIeUPpj3j2+IqqzjgcDtw5e8Tv/b2/n1cevMM4esoyYxwNIRpErhB6JKIRAbZXV7T7PSrT/MBXf5C/+usV/dBTVCXLoyP22w0Yx+bFFXlecmd591YesZjPqesKeP/bnqMvR2EVU75VmWUpFNEHlIgURX6rC4FE9A5CIEg6DIgoLRAenLVpNHijRwrJoVeUNSJKtMpQQt3qVW4FvmrKCFRJlB5jIuNGwE8EcHUDCoQJ1JdsyJlMO0Gd5WmMJARCSfABE8I0504MnExpPAIfHcE7otTYoccfDpx//IR3vvIVjs/uILI8RWQEgfeRfkzp8sZanu+uOXnndeanJwz7A9vDGjMcmM0bmkXNbrPm0Pas5nnqNsTI8fExpu/ou47dfs+z/YG7Z6ecrpb4ceDkbIl16bnTOez6PZvLa+7ce4XjkxT7c3q0ZKhyujbpOaSG47NTdvsDh24ghA4lNc4lXQnSJXemEAlwpyW7rqXI88QFiRlZNkMEcOM1o+twdk89XsL6l2mKyGb2FaTasXj+mMZ1XC5+D0U/sOx/gW+tvkCW3+Gke4+oV7jmASfWs9x0mOoH6I++QNn9LCG2FCrj3M7J8w3LHVhV4MM1plcUdU+0I4GCrKzZbA+EqG9hop/lUkrx4MH9xNuasAtKQlkUeO9vwalX1xsQSWCsVIYU6TxWKjGKrHUQUtioC8mddkOavxEye+dvxcoA4ziSZ0l/dwNkBW7ZVzcfO+dwcUDGgdPViuhGyrri9OwEEQIqK8jKilxnRGfJmgqdVbdsurIoiDZ15AQgpeL66hJPYLWYs5jPyYuCYX/g8mpNJPLotVdZLhesVkvu37/LyWKFziTz5TwZXLTGjo7tZo9xlnZI8OCUWy7x1kEIZEoifKA97NlurqdOXjpf9+tLttsdWmXM5xVHqwWzSlKXGfP5nCzPyHRGgpH7W9eyVFMsjEjP4434GCGRMeWXSaXQWc583kyatkjwDm8tQ9+xXV/T7ZOR596DB2w2a54/ewbBQnDgJYm174GQOvrecufkiOUwQ0rJ77zz+i1ZP3HgfBKui4jzBuMC7TDS9p6r82uG4ca+n7JKkYKPLnYsFjPGcaAuy5fiPXGzBBIlazwD63VL11kGZ3FC8PDoGC9SJ5cI28OeziWm1XAwmM6Sa42Wqahs+45tOxCExvSOUGqyWOGGjvPLS/K6YvCW0Rt6M3K1a1ktFxyf3SXPNNthx+LsiGpRY5REyIjUkGUJ2Bow6CzSLEuiK4j9QA80ZYXMFVVRJgI5YL3DT4WwDY794UCwhhgdSkS8t4gYKIuK6+stm92e9b5NAdki0LegZw1FniPCSN/2jIMheEHXxwlvEsn7OGEfOqRKsVXOS+yo6VpDXkqkluSixAbwTjJfpOu9dY6ZntENPf3Ys1jUKKlYzApynRMLhQiRbhxxY2DwFhEtY2d48nHapJjR8/Tpc548eUqe59y9v6CoMtpDz3bTst4ceH5xjQmWPowM0SBlxsXlnrzKUetNuv86cB7e/sIXefdLX6bdB9ruQN1UiXMYHOwD0nqMTvfl9dU158+eYq1J8E/vscbQtQcO7R68A6cxxlLqmmW9omlmyRlMpKmr7+gcfSkKK6EL5H/9f44FLPG2iOnglnwsIM1I00/wybgmTv8le8JtARTSjeV9nSWnkRC3dGEhxZQhli7wiWl1E6EjkF6CT59PF/20TY0h2ZnlPnXQbsotNXW3bhhAIWqizxK1OqbZpXCCaD7Ry8Qh/RPcaFBvvEnmcsSFQtzAH31yXlmzJMY5ffMQNwvkoYEnArzHukTC1k6ngm6eCj/lBbJXKDPtlq0lqoA60ti5w4dAbRJJnubmOYtJhyMlYSYggLhMX7kpQhECESV0Iv1JKubEMXITM8lNLKjhk9fjxm2Z2RJ5yImoiSkmgCNU+CpK/SBq/HGGwzEyjPS6Qwuo7Ck65tirARMVs/AVvMoYfYeQ93B6hsVTMlDaU6KSDE8v+X+I34e82kL4IVq9oPI7ImsCfwTX1ShGsuuebjsnonBEiA1r83LcQIQQ9H2bRhDGkOd5shqLiNYSrRUhKCAhEUI44H1AoCjylGBQ5AVVVaKkIss0IabXJvGrUkcKElW8qiqUSgELTdOQZ+oTi/fkAvw0auHGVWhCpMwE1jsur68YxpF3vvQuIXjW/UhZNhRSURYFeZUCoNNmKaSMMutuCecJjTB1kQUYMyaBa28YRsPx3TPyOgWkBylYnRxzMp/jQkSIDKFg9I5IpJnPqGJkpTRDP4KWXK3X9F3P/bt3k9MW0FpMxUm4dUsKIXl4/z5CpmLrxjl84wLxANHjw3S9iOlqYG3CVMTp35PnOS7YSeoQsK6jLucUdY7MkuheSsHYd4gY6PcbRHAcH62SuUAI7t4540vvvkOmCuq6ZrZYIqTi2fPn9ONIUzeUmeQf/32/h5/9S4+xznNyuqJtWxbLBp1ldIeWOq9YNiWj6bnc7pF5yclRJHvtjK5tCTGyWa/pegsoguvYPt+Q5YoPPkzF18uyvI8IGoqiYnt5zTgkZ5xqSo7GiA2TflAKIjlm6PE+0HYW7yHLc1rrOYwd/eAxBlarJbNKErzmw4+usAS6UbIeO7a9QWSRsimIQVJGTZk3mBg4vXOPt979Esd37rBuW4TaIWUkLxRCOnzoMH7ER4OQnrIosGWF1hG0YD6f3wajW58ycrWQOANxiFhrsGZAiYQBii5lg15dXvGNX3+P88sWlWuqOqfvelZHlpPjE8YBdltL35s0PQgZWmf4WLDZGXbbHmsdRa7YHSxKZ6SoKcFgDT4GehNQWcrrzYzGBEleNEQKCI77d4559dF9xv5AKSWXL57iRktTz9F5NW3+DLtNyy/94q9wdbkl0yXjOHJ+fjkVK4Ln546qysh0en6dCejokJnm6tlzhmARMmJM5Pr5geBHskxyaHfcuXuft95+kxcXT2j3DsgYbcVoO6K1uAh5GTEkBHt3teHq8XOct+z3W0b9kCIreOX0LtuipAHE1NS4d+8hZ6f3yfMUst33HXVTfEfn6EtRWJnz9w/f+jP/ytc/48M45dvQ4T8/hv+/OYbXPsPHnlbETi6zEOPEjBrRMlmGdTcQfKCpG7LcMQzJYq+kICtutD8CnWnKosR5hzMe58JNPXxrQlgua8qyRKlUNGW5AJEwFTfd4qIqMIP9e4T9Ukoy4Rm31/SbdSIzqwylNGrwDG2KKnLe0203t67FSVmPcyScgEgaMibjR8TTm45h9PTG8eobr6JinPAP4hYcyl2FVBrnR6q6ZjSGy6s1xjhCCMwXC66v1+y7Pd3YIYHzqytOjo9YzGru3zlBC8XVdpu6OzYlHyyXS4QUbDbXCAHDCCEKvE8jnKapGceBpqm5ySodhoH19TW5UDRNw8OH99msrwnB01QN1hqUrlguNMEMqJin13gYyLSmLCqKvKcoBcN4YBz23Lt7wsMH9zisLzg5Pqasal6cX1CpJa+++TYxeFazkqNlhg/p37zf7Pnow495n484O7uDlJLd9pu88/arPHj4gF9670OuO8+dWcWbr9/j9YcnNFWGM4b9rudqfc183lDXDZv1ga/9+ocJFvqSLO8iV5cd2gWGncd5kLkAGXj/g3NErpEqQwC7g6HMKuYnSy7kNcNoiTpPcWNGkFcZZZlwIuMw8rztcD4QM4GXEZErdN3gZWA/GJSG3fMLHp9fcbJa8c4XfpjF0R327UiWlcllVhj6dkuhoakzfBgY7EjEMhqDsYkNZ6bXqywKRkjpIiqd/3iPm6QjbdcigifXir49MHYdceIDBgJjn/TEXgXcuMH2oLRmuxvZ75KBIc8LJDmIEhvBeo+UBdfrnma2xAXBdrtH6wI/ZdYeugEfIt5DKbN0vbAOExxCRl48uWB/vSbXkuPFjLFPUxoqjSCNG4fe4R1orfjwgycTMDcjxkCW5cntLw7sDwdWc42I0B8OCO+QMZCFkrKo2LSWqqoRQ481Iy4K7t8/5h/5iR9itix5dv4hSjXkuuJibWnbA22743jZsKpzqjJHBuiuN+zOL9nvdxgzIF6ZfAOj4858xWKKySqLGq2LZCBwnig8++2O9dV3tsF4KQor4Osxxh/7LA9ACPG3Pj+Gz4/hZVneB9brLcbY246okJK6rrFmRMTIrGm4c/eU8/PzNC5EUVZF+hkhsSGgvMe41F2SIqYLWUzwUaU1SkGWSbw3wNRx9ZZwo82BCfPgcc7iXQruRaSLpbUGQWB98SLBP6Oj7XpmZUGRa7p2x+XVBVlegJRUZXkryB/GkcdPL7j/4AHWWvI8QynJYjZDKUE/HNjtD4zWUzQlTZbfCui11rSHnm8Mz9hutwBT3I5lsJ+AMrPzNUx/lyIxoYbBs9+NEDV50WGdoyga7GgZg2G0A8P1DiEFXWc57PdAulk4b4GQopBERKkNeZ7z4MF99vuOQzcinKMbhjR6lbBcLgjOsTpasDya44KlPRyo6hlZXtDZgQyNkBJVFAztSFOvOLQDFy+2RASvv/kqxyfHXJxfQlbz4LUTeq+QQnPVWrYfvcC4hJhY3FuycnvefOMN8kzz+OOnLOURi+MjurFDK5lGXWZktqw4u3+EU5DPZ9xZnEFeUpRJbtHEjB/8Ssm8yj+Dd8Hff0UiwQtCkChdIFWJKDSUOYfeU2UFoPDBM1rJvMrJdMlydYTsBoyPrBYNp8d3ePLRU8a2p8xyRPDsBOy9JYSI0JJZPSMvNdt2x+5gSbE0EaUAuSOQkeUNWVbTDT12sATj8CalcwQ7UmSSTEX6ocW3BuMcWQZt37HZbrHWUs1KNDl9u8MZQ6YUPWBCxLiANyNj8HS7HevrNd45ciXJtUAoUrB4N7Dfe7xL8hCiIiA5tA45BHTmGFJgAW50FFpjo8BMUGupc8qqZrQDfuhBRHItiTqm9AapcDHg+55ZXTKfz2l3W8gU+9ARvMWNhr67xE96xDDlUCqdYJtaZ7cduoQ28Rhn6EePki1N2XByvGK3Sx34Rqci+bq3HC+WLO83PHn8FESkqHMWqxnf/ODXef7snNdf+yJda1Aq58mTJ0lPJkGPhkcPH9CUJcFaTpdHSOf5u++/z36xR2vFxfYZd+/fQ8hkoLu+umSxWJFlBRfnV1RVxXu//jWu1t/Zfv9lKaw+X5+vz9enVoywvW4TEDZGrHPYCHmR8jSPlg3L4wX1rGTWN0hZUJXzSY83IrIChGC3P1CWSVeIHzHGcHx8zDAME/cq+3v4VknTdcO/khSzGXVVYbqBbuhJ2VkZ2+2Wfd+yev0eRd2kA99eUJYancH1bsuDV15nNi+p6xnRywRZbFsuLy9RStN3e7brS+7eu0+e5Xzta1/n5HjJ6698GREjfdeyXXfcnz+81YTdhA+3bcfBplEHpHiPGAMBeSsfCAiKPMdHSQzgrEcJxfnljheXG7755CmQvhZCRCARMUUFZVmGVpokKUu5YUJEskwRbRpnKhXpRsfw4ROklDjrkT5Qz2tEVmLtiC4qdNZT1hmD8/T9iNAFT55c4Gzi2d2Ejg9dj3KB1eqEiMLYQD+M/J1f/gZFUTIMI8+fn9O2Pa0JSBFZzAru3Dlls51jrOc/+msv8GPHRx+3NGWGsyNFluGCIQbD0BveeHiPy0PLr33wEb/8zfe49+AeZVUx7ke63YGj4wVFmaGk5NnTFxyGl8cVKKVE5xm+7ZFSUVQVqiyJdUG/3zL6m7zXiM4bhIAnT58ThMBF2B1aytLQ1A11U9BUOW6waTymctA5ZZExW9QsjuZkuUqU7rHHheTubpqMB/fv8EM/9CPcu/cQR+C62xAdFCKjtxFGTzQOb0e6fUvX9XSHFhkNUaVs2vliznK5RNcNrYnkSqWQZimJyyO0zhlHQ7vb026u2VxecNhtOT9/gRl6Xn/1LoPzScahcoYxmTa8F4QokDIjRIt1ARMsgz2kNAMZMMEzeE8YR5T0BAJdf2C0KdqlqjV1XSAlmFgkuU0IKawRGLsBLTQqSobOUJcFZVPR9S3ISFVVKaNwf2CwkwuzJKWICMHusKcfHCKPFEUycuzaFm/2FHnGoe0JIo31JSkM3kxif6Wga3t+5md+ZoIgS9abDZv1gSKv+eCDjzk+PkZaS/v8Ba89eoVX7j/gm9/4Bq+/+irvvPMFigly7ZzjG7/2dT748AOKecV8tWS73XLnzh1CiFxfX3N8fMy+29D1++/oHP28sPp8fb5ewmWNIXhPphTOOvDJ4TX2HWVWs5jPEARePH/BixfXtHuDc89wzhDxZEXSBcxmM7quo25q5lWBINJ1HYvFAu89w9DS9zcRJ+mCSYxI5G3OHD5wfXFJ3/U4a1BSMk5i60xFVkdLFtlDHn/0AVIEyrLCDAN919H3HV3X0jSp6Ou6jq7reOedd1gsl2z2G4qy4unT51y3A33bUT+4C8HT1CXvfvGL2G9+jIApG1Hd0uO1VigvESQ9pFI6RbN4m0JhAeED3ngiGhcg+oRZseNAFJEQFEqqqXgSadwhJUKk+KcY/MQNmyjqU1yTdQk0qmXSb3Y2pTNIIVEChmHgSC4ZhoGnT5/x+oMHbHeOn//Fn2W97xldxLskRr4BEGutmc8yftdPfIn7949QF9cE2dAPGbbruXf3FKU0bz68S1lW/NLX3qMqCx7cPUGISP407fILZ1gsG770hdd49OCU4Bz7MUW9XF2dE6zj7OQuh49e8Au/+HWcD3zzm5foTPLg7inROX7x7/wyX/7KuyAcl+s9/fjyFFZKSapasTuYNNJWBSYY9puWwzjixxFtU/C7CXC53d2O1GNMoeXROi6eP2U2myOk5GD27M2AjRKVS1SWwLGmA5ziqMnJRMViOee1Nx+xOl6yWC559OojlBT0vSFah3AB21mkEyl2Zz+y2e847FvW6x3n5xc0JagdVFXOvQcPOD4+pnMSvKMuSrJC4sYRKxWujIisYLSO7XbH9eU1Xbujb1vwjqpIQeXR+xQPI/UUjyZSRE4WcSESfSDLC4RWZAicHxjjSJBgo8O5ZITwziMFFIWmzHXSdhHZtXsgUld56iiHgOkHog9Yl8ZldVHTVHOE1GyGA0LlzFczhM5wztC2Pd24Iy8LyrJkeXxMaRxOGKzrQecoqRhtx6zKkcYwRovrd5CBFYHD0DEaR1WXBCQffvyUpkm5ort9R/CCuppjXTKNCKWJUnK5WXPnzh3KpubZ+Qve+sJbvP7Wm/ztosB5h0+yYqTWjNbQdnuePpsSFoae3eGKPMtpZiXQfttz9GUprP70Z30AfH4MN+vzY3gJVpZlPLh/NsWKJFNDlJ5Max7cu0tdlQx9x9OPnvPh+88xxvHqa4+YL5bEaFEijcyKskAKOcWZJMdfQjkkQbyzKXfzpluVRoEBpSRmNOy221uhe3AOIbiNpcm1YnWyIlOSvfMcup48V9RVydhbvvD2lzi7/5DF0QlK5/Rh4PjOGUfxlGEcUWOP8yO+tRyvlmiZYU986lS1B8ZhINOS0+Njep9CdW/y9lLIcPLHCQFNnfLuyiwxgcQk6q+qLOWhSfBK0CwqJBElK/q+p5920giJGSxSCZQmFadaI0ScomhUigsCyqpIZgBrmNU1q+WKy8tLrLWsVktOT45SQbnfMKty6rpmcCMxwJfefZuIQsgsFY9NyeXlFTEEFosFJ2crVOZo+wGVZVgf6IaBqsgQOtnxt4cdzWLO8ekpT58+4eHDO2mMqgdqJfmxd14hekNWHNgceohw/WLETNDgoTdcr79JIPL6/WPefP1NvDXcu3dENSt5/PQFiypnVTc4P/I7f+oH+At/9n/3Wb8lbpf3jn7YYPxIqTKyPIVgt90eJyXGWhRJyrfrOnQ0lHmBObRoLVk2qbsanEXiMXZEZqCbjN26py4L6rpCEYi2I0TB6ekR77x5n//y7/pxfvdP/W6KpuDQtmTZEYfDnvVuByGihWTsBvzo8IPn6sWaXdfiYtLo2RCwISVBPXrtEV/+8pcoi4JNNxBdIJOKQmqEdYzGM/hIbxzr7Z4XLy44f/qUsW+BgDeBbr9lux1RGkbn2ew7tFasVquUquE81geM9yl/0ybjl8oDMlcUhZ64kICFPNfUVU5ZaER0hGBThq8OycyhAOGTdjOboaLEjQbTj5jBsPEHvAgEBDZEqiynqGqk04ibOCsCUQmWi2OUlhzGlqv1FSZGijxndqxASlSlU4xO14ES7ExL5w2z2Yo813hnWcyPqKqCrusoipLZbIEZAlVV07YDOMfgLdoqPnj8MUWm2R32/PLXv87J6YpQxjSCzzOciFSzhqap0TpdB07lEet1em/nmSIn+47O0ZeisJoCmT8/hs+P4aU5hs96SSkQIjFsUkaf5Xg54+7ZGUppgvd0hw5nHHleUGQ5280V9x+8yWxxhPQCJRNQNITA0PcMPu1KF4sZdVMjEfRdyjHr2h5rLB/uI/+jv3x1G8J7E5sTY7jFCoC9/XxZGqT8eMrWu4Ozx5MQXsNO4D4OWHuZnIUx3EajpJFeAoGmkaPBO48PNYJA+UvXdO2cQ5vhwj0ikyPXTgy6ydF7axUeUoC7UknHlRc5FotE4oS7zTpUNsXKSJFC2J1KLC+BwGc+/VtTjAOKKf4qBqSQtzFAelTEIWUq6k4irxUhVAlrMGh4DsRm6kRNvHvRE4lTriMkIFZEiA4/MaKE2KFUCiHv+5F+GFJEn9BEIjpLr4uzEfl3nk4C/QX5i2sgcuVy7uSBdisIMcPYjOempy4y7p2u6Fzk4+fnyZUb4dUHM9557YRcaR49fIsyg+dX1/TznOzVB7ggGQbBdtvCS5KfCUkjJDNN0VSIEDExBV1XZYaMglJL0On8mNUZJTXjMBJMQMZIVgZ8cORas2pKvEgOQBEdJ0fHRETiikWBRDJrat566zV+8qf+Mb705Xcpy5L20NJ3Ix9vv04/jJxfXNLUDYuqYd/twFs6N3JxdYl1jmY2Q3lJJgvMaFjOGt5598vcvXcHgkH6DhkCMmpypVFFRhYGjPB4M3Dx4gXPnzzl+nxHoVJMWV40FKpEiQTZddaAFNy9e8a9Bw9Yb7e0Q8/Ras4wJm1XP5h0XipJWSVuY/Ax4Sk0KCnIS02W68T6EpKqLnnrpERKRd+2+NEitKQqazKp8Jlm1ApnXAqMzhRVkZFlMNrEVyQmVqQQgr5L+ARnYxLMVxKtc2IUKYtRwMH09P3IYpUzGshyzXAIWKvpRE8UBcPYcXS8YLvvOD/fcXwcEapkuzkwDIb1umNRZyzqnOZojomO0Y6oJuOD54/pQkucJy2YyiXrzTX9tzqOjo/wwVLXJcfHK8bRcu/eXUByenoH/vKTb3uOvhSF1efr8/X5+o0rQsoJUwIfLadnx9w7PkFEsKPF+oCNEEXg/r0FTVHx/MVzMp2s5kHAaAwheA6HA8ZYxt7TNDmLZY0UAWscZhix44iSmt/7Rs3/5yOTNBohRW3cjN1u6OtCyhSZA4DA+4D3EKNA6QIhM6TKiIJ0AZeCLM8SpJP0u26E5Te6LjFBN5VSKK0IbooFkRIbUnhqJCakh0rCeRHURDxPxyGkJNeKpi6x3ifXUZ5hrZsu6OmIQwgIxO3xpeIz/XuUkgn2IhIJRMSbxAVJpiR+GtvFCSOSZXoqvFLRp7SajjUkyOwk8nfuJmMxxQyFmMLLEwNynMaQqXTMtMYai9IJmjoYlzLvRKLUA+n5nzAwuc6oi5yyzDkBfverOT/xVklRNzx58pRdK1FKMz85wV6tOZnXxCiYNQ3vvnEPrRIAtqoSX2w5eqSq+aY75/nj51hj2e474svDB6UoSs7uP2J7fY0dDPVsxkpnfPz0CblULI4XvFAJXvuVL7yCu96y3+85yhVt2xJthyRS5IL7pwu0Unz04YdUs4IWz+gmEKwQOBs4unvET/8z/xRvv/VF1usN7brHjI6PPn7OKPdkeU7Xb+m6LcN8gbEGMw58vH7Os+05mECZZ8TB47tIFJpXHr3NV3/kx1gt52RZ4HguyGUghoQAoaxYyQ4ZQMWR/fUF/b6lEIKTxZzRGoqmpqrmnIqM6+2OvMkRfY+Jls1+zW6/w5iOsipZzgtCzBlNlrhNgwXrUtdOSopMIguNtQYXBmTI0bkmz3N0UUI7kmcQbCC7zd5N6J6AJ0qPyCHTGboQGNlibXIhSyVQqqAsCpQqOX9xRbsfybKIIEc4B9GjVQ5Ck6C2GmskIuRIp8lUTSYKmlzjykO6YASJQ6DLGlUciErSGYMsFHVRcRhH6mWJVp7NkGLXpMjYrjfsdwf0XIAkdeOjZTaruXP/HmVdcXFxweXFmq4dcTZx07bbLWXx7eNs4PPC6vP1+Xppl3eOotDMF0tOT04S5LAfCBFsCJjRTGT2jHEcOD4+5sMPHvPs8gqtG3wIk45qwDsHHupK8+N1w/OnF1R5yYN7d+jkASk1f+gdzR/6oqAberJKo7VK+ARIdG+XRmEhpMIh3kblaIZxINMZq1VKl3/85DFkCqU1hdSoqfi5Xq+JQD2fsdttmVdVgv19Kj7H9wMPHq5471sX/KW//HOMLnJ9dcHVxTV3753x4NEr5HlDWc5QmSIvSh48uM9XvvQ2wo988OSC7a5lHC1t2zGOlrIpkVkGUXF9vebe/VO6doeWGuc8LkSk1IwuWeAzIVhWFXVTUc9rZssFg7MchgEpNP2u4/zikqh0+rlxxNrEwgpTzl7SlLWMxhB1nZ5L55EukOuMXDpW85rV6pSyLJktGpaLGh9Hcp1xuTnwC7/yPpveMps1FFmOGUa8ncj3UvLw7hFffvuEt167h5SR7XZLDClnbj6fI5TmcDgQx4FllXO2ekiYxo5locjzjGEY2O125HlOnufIwdG27a2pYRiHWyjsy7CklAxmJJCI/4mHBPfu3KFuGrKq5AVAjJjR0puBwVuKWU1W5vR9whUQI1fnl3hr2V5vyXNFUJpFk0Cru92BR3fv8Qd+z09y/+SI6+dPOBxaQPDxR4958uwpy7NjqDWVbNi3HVvTY43j2eOnfOvXP+Lpsy2ZUJRlT1CKvM557Y0H/NR/9ad4/e03WR92xEMCWdfzBmMFh0NLVc4p64oYI03TJD1idYmNsN5t8SHyYt8y6w80qwVCp82Q94bNds2h3TMMI8ZEpAKtE1eqLDOszSnKHqUl3geqqkp6RaXY7/dIqcizHK0VRIEdBmx/YBglRVEwXzRsNlsObZeC3H0ap2d5RlOXoAIx5CgZiHHEEynLjMVijlbp+e+7kfVmz3w+S+8X5xA2OY+rqqCuc9q2J8vSuNIHh3ASnU9wVSmQEUSIaCVYNg3docWPhtlsToyBu6dLVvMZY3tgt9ux3XRUZUlTL1Fk5FmTIupkpMhneB8YOs/VxXna5njFxYs1QkjeGz9EK0W7N9/ROfqZF1ZCiN8P/ClAAf/7GOOf+B49zr8L/BPAeYzxd0yfOwb+z8DrwAfAPxtjXIvkM/9TwB8kcUr/hRjj3/4uH/8R8GeBu6SGxJ+OMf6p7/MxlMDPAAXptf/3Y4z/uhDiDeDPASfAzwP/fIzRCCGK6Zh/FLgC/rkY4wffzTF86lgU8LeAJzHGf+KzOIaXeb333jcPf/AP/VOfs90+P4aX4fHhpWC7pTVaw+Pnz6nynDrL6fsBawx3z+6gpMIbg5YKqSRXl1fstmusMZwcH5FpTYiCsqioioxcZ1TNjCYvadsDq6ak7Tv2+wNVXvLWK/d5eHbM+7/6K8QISiouLi75tV/7egoolxUqFFRqwcFZdm0Cbz5/vuXJ0zW9jRgCT843vP3OG/y+n/79vPWF16lqza+//x42DGS5ZjZf0jRHZPmM6+2OYhgZiPSHlqHv0yYpRgZjWC0WGGvJJKiqYDAGH3ziXYk0tY04puhcUjxuoOt3WOsoy5LjoyVZltEeDmns5z3RR2Z1leKYdIZzLlH5nSWvMmKIlHVO1ZTsDlui8ww2pTEIlTEYBzJlNGb11KGNTCN0iZQR50fAUtUKHxXNXONdoCgbqrrg+voSY/qUkagCzif9WFNUVGXKGnW2Q2YaHQXBOKTKmFcNY9sjPJiuJ0w4mmAjWlXE0NO1DmcGikyjVcFhN056Us3dO6/w9OlTri/3XF2tKYocpSTWJF3qYTMwm8+o6+9sJP6ZFlbTzfV/C/zXgMfAfyaE+Isxxl/9Hjzc/wH4N0k36Zv1rwL/cYzxTwgh/tXp4z8O/AHgC9OfHwf+ren/381ywP84xvi3hRBz4OeFEP8R8C98H49hBH5PjPEghMiAvyaE+EvAHwP+1zHGPyeE+LeBPzo93h8F1jHGt4UQfxj4k8A/910ew836HwBfAxbTx3/yMziGl3l9znb7/Bheisf/B1nfjw2kD4H1oeNiaKmVpCkVq6Zhv9/Tdx1IiXg3pWuEEFB5DkIQhcA4hwuBtu8JzrKYzZBCkecF4zAQnUf4wNFiwWp5hB1GfuY/+StY45BSJ1v/es3V1RUPH76Kjxu8Fag857Dp2bUtbTfw7Nkl15sDPihG78hs5MHDR/zQV3+Q45M53/rwPWzomS8bRmPoLi7INgcWixM+fnKOkjmHwdL3qVA7PTtDRzh/8oz9bs/gDMvTY1SRcbnZ4IJneTRLrDnvp0zA4jY9QUqo8pyKfGK6Ac7jjCE4zzD0ZFlGXdfJgSxCilYTkr7vMe5Akee34vW6ySlKxYsXFwlLkknGcUDIHO8ctktd1XG0xAjD2BMJCYxqOxbLBc18yXw+Z7vdUlUlTVNhTEUk4L0jywVlmTOfLzhanaFkzovn5winkF4gg8D0I50NZDqj0iVVVWKtS8YdmaUEFZGRZyUxBMbBMbQjJ8crrHPEmIxCZ6f3uL7acnmx5nh1xHK55MmTJzgTmDcz2sOB4CJKfGcl02fdsfqdwHsxxvcBhBB/Dvhp4Le9sIox/owQ4vXf9OmfBn5y+vufAf4qqaj5aeDPxqTQ/VkhxEoIcT/G+Oy7ePxnwLPp73shxNeAh9/nY4jAYfowm/5E4PcA/61PHcP/jFTU/PT0d4B/H/g3hRAi/mb89j/gEkK8AvzjwP8C+GPTxfX7egyfr8/X5+t7sr7nG8gYI7NVjWl7lIuUZYHOMoZ+YHM9uViHIYFtrU8ZkirD+4h3jqquKTONt5bdoSWGyHw+Y3F0zLPz5/SD4eRkQVXP2Gz2fPDhr3F9tZ2QH5GqTGOycYzsx8fkZcnoHINzeCFwLqSR4aRRjN7x5a98mZ/6yd/NvC457DaoGLE2cPXigq7rsd4jZMZ8seH99z+imS243A08e/qMWVFxeXFOqTWWSOtGoogMzrJYzJivlqy3W5qmwDl962K9Yb4JAc756fMpxL3KCoQXmDxLDDXr0XmWotKGnqgtkjLFshHY7Abu3tW4YNgd1qljlmUcnyzQOgWwuzAyXzbEWLPZ7xJVvlRImdyGWktmswqlApGUT2tdj/cj223PaDrqpuTk5IjLy0v6fgCRuGEhWPTkAPVjYiOoqFBRgo1ApNQlmUi5vVppdtd7Zos5KJk0i0ITnCD4SdcpkqA+xsBuv7t1UAfnmDcNZycnGGPQWpNrnfI3PxVi/1utz7qwegh8/KmPH/Pdd2X+QdbdTxUqz0m7rP+843rIVBh9t2sq8H4Y+Lnv9zFMXcKfB94mdQu/CWxijO43Pc5vOIYYoxNCbEmjuu92LPG/Af4nwHz6+OQzOIbP1+fr8/XbvL4fG8gbvddsPqNEUwiJNYYqL5k1Nc5ZtjFlO86bGaJXDMNA3/YE73BS4jJN3w2UhcITObp7Rt+2rNuR3b5n051zfRgZ+pGLqz0SjfeWIq8IWUbvPdeHARtgDCP7ruPQ9cgsJy8KnBsgOLQSzBdzvvqDX+F4OefqxVO67oCLFuNGrDO02wO7wwHvoVv07K/X2G5E5A0yCj766COuLy4pdMZh7JBFjrGG9WGP04IoBcb7NEKTkjzXCJkMJzEKqqphNCPb7R6lFGWVk+lkFCnLcioyBEWRYKR1XVNVKdOv73tihNm8RGqJjw6iAhnphpb5fP6pEHKBsQPNbM5pcYpzjvV6PRVUM7JMU1UVWabY7rZ4bxmGZGAxxqAzSVHMJ9xBYrUZ4+n7nuAl3h8YekM0CQSslaLKS0II5DolA4QpezThKByH/Z5iVlKWGTF4Ru+RMhJxVHUOMeBc5PHjD9Bac+/+KcMw4PxIWecgE+NPqHIyM/xDorF6WVaMMQohvuddECHEDPgPgP9hjHF3Exvy/TqGGKMHviqEWAH/V+Dd7+Xj/eYlhLjRuf28EOInv5+P/Q/ZehmQE58fQ1qf9TF81o//X2j9dm4ghRD/IvAvAig9wVTrhpPZEtcPXD57QV2UzJcL9vs9CMG4fMR7P/gvAUxmgnjrDpVS3AaLvwA+KvIkXr5nJ/wG7HJNCDfh2ymj0mUZ3TRiHMTE9pcywWt9Gn9ZIdFCUPuAmoTgP3N8zF/9Fcc43ptC7acw85CCu32RyPvSKcJROk7rI+FexJ0msvgBIEac90BytA4yOU2zEOhFcov+5rWLNzFAAScFV0pxHZOrNIRw6/y1UuCsYxAi5XbGdHwxRJCRjRTTGDHpjJxz7G+ir2IC+B6EIM8z8qwAIsZahhjppuNK5o4EImU61BSxFTBKspeSD9WUrvCKwjqHsw4h1YQlmbIUp9918/PpOZ1QKlJM+q6A846dmDReE0JGIOim16CvHpAfPmR/2JDOLU1ZVqCTMN6GkdGl11EIkTIpv4P1WRdWT4BHn/r4lelz36/14mZ3JIS4D5x/L49r0jX9B8D/Mcb4Fz6LY7hZMcaNEOKvAP8lYCWE0FPH6NOPc3MMj4UQGliSBOTfzfpdwB8SQvxBoCRprP7U9/kYXvr1MrC8Pj+Gl+MYPuvH/y+yfrs3kNNz8KcBilJGFSK4QH9osf1A8B4fEhjS73fI9/4q4s3fjY0J4wFTMSOAmHhiIrEyUzE06W1SELhIrjAhieITN2RZVuhMpyQEIdKoTaTAYkRE6iIFeTuHvLkRAyF4trsNZhxJKAGmr4OxdkKOiBR+nDgPqei5KWwiSJl4aqmuCFMKgLjFf9wgQKZSA/gElXKTzXnzR0mFuPlOkX6vQCBCCnWOPiJR5FmWulTOJ1eemNhx00sppLjFcCS8CLdRWN6nzlGWZcSQtFUxBkDgpgL0lukWb4qkhG+JMeLxxIxJ/J4c0sGn9AOtUtmSOpdxQsAkcDAwRV6l3y8ndIx3nijTzwtuCmyJ3n6L8ulfQwjo+x6ZZwQJvR0QQqLLnK7rqJomjQTznLQv+K3XZ11Y/WfAFyZH2BPgD/OJzub7sf4i8EeAPzH9///+qc//y5Pm68eB7XejbQKYdET/DvC1GOP/6jM6hjPATkVVRTIN/EngrwD/NMmV95uP4Y8Af3P6+n/y3WqbYoz/GvCvTcfzk8C/EmP8bwsh/i/fr2P4fH2+Pl/fu/W93kAKBLnU4Dz7dgt+ilEyhoNocTEQv/6XUN/4y2iZk8ssFSNKUhY5VV2SacUwdFPxkcCYUgr2NqLyCmctSmvMJL6OAb74xXfJspwXL14QI9RVRRUOGGe52myo5jNUXnBxvQYSDqDMSnabLTIKhBlZLGqc9WR5Yr31Y4LAFlXJdr9Pou0ijbWsSdE7eVHgg+fQd0itUfmkafKOYRxBCIpMU09jqzQCjCwWS2azhhi55dnleX7LLJNC0HUdbduitb6NnAo+UDc1JycnFEXBOI5s2w1ZnpyCMUSUVlhjMdagVeJfGWvJ8zxlc1pLXdcslyuEkGy3O/puYBwtYbTkRUmRpzBm5w1aS7JcYsw4jSEHyrKiKCraw8g4OPou5QQu6xQkn9AQacwYgeA9xhisc2RaU9U1OpfYaIgxIIhoqShyjXcGrRTD5KR0VQlRI3RBa2x6rrKc1WqFyAq8kPT9iDLutzo1b9dnWlhNmpl/Gfh/kXAL/26M8Ve+F48lhPg/kWb8p0KIx8C/Tipm/rwQ4o8CHwL/7PTt/0+SS+U9klPlv/PbcAi/C/jngb8rhPjF6XP/0+/zMdwH/syks5LAn48x/odCiF8F/pwQ4t8AfoFUADL9/98TQrwHXJMK3+/V+uMvwTG8FEt8jiD5HEHyDymC5PuxgRSA6wek0ogA0QdkrlE6wwWPLnLqTIEUlGUNXia3INAsFrz+6iPGoWe73aBIo6PoPcYaqhyaRUXXJcF3VAEhFNvtjq/9yi8SI2idk+cFmyvLo/mcbujpti19b5F5xqHr8DGS5yWRjG03oIVCacGz9ZqAmDpOiqIsU7xTHGhHg1SCzAeMM2Qmjd2klNjgyMuCIMAGjzUjbTsyjhGdw6zOWDQ1ACGkLtV+3yKE4vT0FO8945g6ad5D2x2QKkF6jbfITGGDQ2iZ3IRasd5tCLcpBWC7ka7riDGmOCyX3HcRiQ8CpTK0TvBQiIzjwHp9TZYVBJ/E+n1nAIWWFYIMokyZmQIwAe8g0wXbbcvhsOXkWCNQaKXRWuA9bPbJfxUilFmOzPMkVt/vQWkIERsipZDkRU0ua7x3jEMq7JyxaKUgSGLIEFKw2w6E4MlUTj9aRpP+Pd0QMSaQFTmj9WRRfWfvg883/5+vz9fLs6ab6zf4FIIE+G9+LxAkQojfTXKJ/tlPFVb/S+D6U+6toxjjH59Gt/99UlHz48CfijF+V0aTqXNx/9MOMuCfJDnIvl/HIIDm0wgSEgrkjwF/4VP4j78TY/y3hBD/EvCDMcb/rkj4j/9GjPG3Bf8hhPhjwI8Bi6mw+vPf72P4bpcQ4h8F/r/A3yXl2kLaQP4c8OeBV5k2kDHG6+n5/zeB38+0gYwx/q3f6jFmdR5/4gdfYV43mIlhVVc1SIGPkf3QoTKNI9n9JRrvPME7cqU5WsyxQ48IASkgVwolYLfbsfcjy5Nj+r5nHFPIc6ZznAsYY/E+kOc5SmqC86yCQmYZXkLnHIP3DNGj8wIfIYTIOIxUZYX1IyG69LUQCT7iY0I/CJlGZPPlPDkOzUBtBd6kDpbKM4qm5nq7SewouP39EJExUPtAlmnyIiebnHpKSuq6JssyhnHEmtRZ6seOrNSUZcl2u6VpGoYhgWC11klP5lzKKtUaQfp4HAx5kZHpjL7vqeoqAYKHASklZVlC9CjpEUIy9CNVNSfTJW3b0+4HvBfU1Wwa8wl05igrnRILouP09JiPPn6Cd467d+/jvSB6yThYjPEo7YkxTsJ7kbJApw5WVSWoqjEmIReKEuc81hpynVHkGd5btBSYYWAYBo5XC4ZhYDQD5cmK/dADkaaZ3Y5S79y5Q4yR1WrFX/i//Y2fj98Gf/JZjwI/X5+vz9dvXJ8jSD5HkPxDiyCJMf41blU4f8/6vX+f74/Af+8f5DGUEJzM5ikMvO+Z1TVSiilapkc5T5XlCCm4NgOD8Sm/Mi9oDwc+3Dwj05oiz9PNN1eEEDhYyRAK4i7Sdp4QBFmW44JiGEe8S92qzd6glMc6xz7LKJVkGEZGY0EJlM5SIQdkOuX+megROkPJHKEUeIdzltEYxtFiXaQoFARB9BCdIKhIzCNRB7JSIeX/j70/j7Yly+/6wM9vDxFxzrnTey9fDpVDZVWpipIQIKNZCJCQmdVIYtkgwAyCtdQD0Aa72wibtezuZXuhpgGDTeNW2wzqJaZuI1AjYQYZzBIYSxhkjZRUKlWpsnJ80733DBGxh1//sXecc+7LzMqUlPVeZuX5rvXWvfe8cyJ2xImI/d2/3/f3/SVmrYUM3awjpUzOmZs3b/LiS69w986K49YQQuK4neHbls16w+beJY33xFB6Chpj0GzQCMt7KzRBGjNGLfNugRhhGAaMWqxx9Ouetu2w6pEcyWPVVCWDyY5xHSEbrPdoAEXojhaMY+Du+SXdvHSqEad0R5bNpqfP93DW0rYdTWMRiYRhxBhhDCOnJ0cM48Ct27c4PT0ipoTxgtGA+FlJY4YVXdfQ5zVt29AeO5DM2ekNXnjhZS4uLjleKI0tUaakAazFN8UCYjE/Jt6DjSqrkBHTkQIcd0d472maoq9q/Yxri1NCCCya+Zu6Rg/E6oAD3l44WJAcLEjgYEHyulBVXnq5HOowFE2OqjJfLFiv1iyXSzabHmsdwxgJ2SJjwDklpESqjbbDMGBCxIdYvJhmCzop/SLFJ1ztKRlUcd0cSZGu7WiPFOdKKx11lpQSYwyMojjjaJra3DgElus1IRQys1gssMYx9MNWaD1rWhrrODs7o+97XnjhFebzou0hrmmaQhDHzYZxo1hj6axFYiINI30fyccDM2dpHruOM4YwBjTDer1h3PQYMfTLDd65krqLiTCMoIVQtm1L61sCofafbHCm9sHMmRiKK3vTFPNPV41EBaFtWmbdjP+Ss0AAAQAASURBVKbxOOcIITKGAd+0+KblrGqelOIw77zFONmmE3POaE40XYcRSwgjYYy0bcdmM5BTrDq3XMmO4z1PPYMIvPLKi4xj8cEq6UJ45JEbLObH9Js11sDyYsMyR85OZ7UN0oBzBmOExeKILBHrO06unXLrlbuwWTOTFmvAiEdzJIyJu3dv473n8uLNrV0OxOqAAw54TRwsSB4M5GBB8rOCGMN6DFhjiApDLDYDw8US7zzt4oicMkEz2RgiFIPJDNlZuvkMYDu5qxbCBcrF5UXRDjUNs664eMcY6PuBGANN09I0HmMsYxjJ1QMqxEAmkTHcvnsXax2zWUMm4VuP9x7RjI4DJkVaU6vzjHC57pl7z6Jp6C+XdF1pTOxMMbYs+qihjNf7WtUI864jDBfcu3u39Pybn5LHyKLpmHezUo3nOlKMDLph1s2YzWb0qzWYjOvctifkVMHnvWexWBRbhGpHMZvNWK9XpaegN9XzytP3wqZf0XUdMYEYRUnE6k/VdR2LxYKLiwuGYSgkUYSu62iaQh6XyyWX50tSElJShiECI03TMI6Ro6PSmMMYh6pBRBnWA8ZA13QcLTrQWMTus46bN04JMbOYeURnaMo423F8PCNEx8nJgk2/Lv5a3gK5iOdtx2zecPPRUyAzhhHrFOchhJHl8h7Xrl1j01++qWv0QKwOOODthYMFycGC5GBB8mkQUuJyHGmblnXfY0OpFlPN+FysB/p+Q0qlnU22jpATQ8iEEGileEAhQoqBEEY2mw0pJcI4Iigxj4yhx7niYO68YJxDJIPJhBRIuYi3u65jQ8bPO27cuM6nPvUi5+eXzFqhcaZosqwl9QPaB4wqvmlwpuh3bAzce/FFmtZz0riqAyqVjiEGcsp0zpcIUsqc1l6BOWcaY5CUmTctM98wZqUVS2NKfVLjDdK05KbDGVOaqFuHaea4RRGgQyGHU2Vg3/cMw8AwDMznc9q2ZT7vmM1arIWYEtYIi0XHyUkhPsvlkpSKG3tMhnUtFgCYzWaI1PSiKRYLbdtydHRU+hGuRmJIXFwsSSnhXUsICc3CrFsQqiVF3/eEEJi5dW0sDY3xNNYyauJo5hmHJcvLNf36AlF45Maco+MzjBGWKzg6nuEbIeVA01g2m4S1MJ83PP3MU5yctrz00gssXz4HIuv1mmEY6Z1FTCa8SYPQN9dR8IADDnhQ2FqQiEhDqYL8rge4/6l6C15dvfW7pODLeDAWJA9iDDdrpIo9C5IfZ2dB8lpjmMb2llmQqOpTqvos5fv+H1T1dzzIMbyTYIzl9JFHOXnkJtcef4JH3vMkRzduMD+7TrKWdUysQqZXUOuqx1KJwIzjyOXlJWMIhBC4XF6y3myKZUFXIiA3rp/SNo7LiyUxDIgmINE2DiNKCgNDvy7O6ppJ/ZqwXrG8e0Eeek7nDU8+esbMWVojmBRI/RqXE60VbE7ldwMzazidNWgYyH1PYwQdRzxK6zyt88zbjpPFEcfzI+Zth0VojKW1jmsnp8zbDlJGgMZ5QixaqpwzQ9+zWa/JlTRu1hvCMLBarej7nqbqzFK1KrDWMpuVRsylHY7gvQdJIAljFeeg6zynZ0ecnh1zcrpATGYYN4xhQ0wjIlLNOg2np6c89thj3Lx5k9PT0624/Pz8nNu37zCOsUajHM61eN9hTUPXzgkhk7OgagghE0Jm7IdSDRoT/WrF0azj2affw9GsZX15l+XFLTRtSGGFpp4QBlIeWS7Pef755xjDhieffIxHbl4HFGMhE7l37zaadeskXwoDig1H0zSklGjb7k1do4eI1QEHvI1wsCA5WJDs4WBB8hoIKfKpl1/CSBGsT+kmAM1FiyPeE2LVOPUD3ayj62bMmhYxxbgzDOPWedyIIY2Bftlz4+ljThcL8hDomoZxGDHAbOaJFBuGpLBoO3yNTSxOz3jlldvce+mVbfRHU+Lk5ARjDKvVCiFjRGjnM2azOc5ZlssV867DUEw129qTLsZIjomuKY2UUyrO7F3TFH1WVmIIdF2HWsdlWBFyKtV/fSWKbcMmjKQQcNbRNi3OWVQTGtmm+9brNSmVPoLDMJTzUZs3F73ahrNrM7quIYSBGEeaxhNC4MUXn2exmJNSKK1iNJVG0LlUEUptV3N0dMTZ2Rmr1ao441Oc2i8uLggbZd4d4azfVh5aa2majs26B8A5Byo0rmW9WpFjYDZzdPPibN94U9zWc8AaZXY8Y3m5Yhh6jB+ISVEt5qPWCtYK16/fAIQUS1ubfhN4+eVb3Lt3Qb+JWNOhahE8jZ+DCifHN4CX3vAaPdgtHHDAAQcc8I5B01i9+fgcW+0EoKSnRIRZN2Oz2dC0LY/evMknPvYz2Kh4L9u0nYiwvLwkxFitEwpxabuOfnmJUAiKd64YblaCFMaRoTblXa1WNL7hZDZnHEdWq9VW7D2fz0sVorVbXZQYQ04JzcV93FrLMAzklDDVpHToB1JOzLoZCgw11aiqW/+oruu27u593xfheVtSohe52JQ76/DOobFEocgKudhEnJ6cIiKs+kuSxu0YV6vVtkWMc46TkxOstTXFl7h+Y858PtvaG0xEb7Va03Xt1mTUe89ytSGz02xN2z0+Pubs7IyXXnqJzWYDwJ075yzaE2btHGMMm82GcRw5OzvbEubpeK0terO0DjhnmC883cyR80BIa46Pi1/Viy/d4/SkGMYmtfjulJwDy9UFi8WMbuax1tA0He95z9PEkPnJn/wYRizWdDz//Mts1sXM1boi8Pfec+PGDZ555hm+429+78Fu4YADDjjggM8eWGM4Ozom5cQ4FFft1roSZYkRUmZ9cUk/n3G6mHHSzFitV1hjsBlUM51rsAgWw6zttqkfhwHNSMykHMFmnHWYDI1x2MbgrEWbUiHYOI8VgzWF5F0ul2iqLWIUUkxEMRhTyEHSIgh3QD+OxckcpWs7cJY0RJZ90XsZ74g5FfH6OKBZiTlxcnxC23WIMYxhRIeBkBKrccD7BjGAZlQUdQZRIJvi65UCxhSyiCiLxYKu65jP57VnXyFjIsLR0RHHx8dYa1mt7zAMoUazHMa4Sjgixjica7ai9JjA+pJinDyxyvktqdhJcwWl3ZBkT+Pauu0SabO2tBw6u3bKZr2hLwbzpKQ0bYtIKTiQMdO0hs7PWG96jBGsLZWdiGU+P8a4GZs+4X2DMbbot1QIYc3zn3qe09NrnJycsFr2DEOmaxd07Zzj42Oc81t/r+Oja9x85Ik3dY0eiNUBBxxwwAHvGKgqEkeIiTxssNbSNp4YSgrp2tGMV165zfmtWxgVNlEJw8jRtWtYa4uWqAq5AWbdrEZfVhyfnDAMPTFG5os5jW8YxpGYwfsWbycndGhnMzbrdYk85czR0RHNfF4aEpeOdIy5CM4lK6v1mr4f6GYzZjNDEkMWQ1YlKIQQubhcY23xz/JeSSEyhkLAcs5oGMnAyekJSZTNUGwljPccHR2RYqptaRKztmPWdbRNg0FIIWKdw1nHbH4D7822Um8SiE9mn13XMZuVqM8wDJydXi9jUC3EUpUYE6cnDSIG71qOj08qmWrpFvMtsQK2pGo+L4RlSj+mlIiD0voS4ZvE4eMY6LqGa9eucefOHdZrS0qZwQrz5hjvLUgkph6IGOeYtW2JMNEAwhA2zGYnYBpiLGnTrvO7HpFZSUnJCWbdgnFQumZW7Br6Hmst8/l8+/PRRx/jxo1H3tQ1ekgFHnDAAQcc8I6BiFwCH3nY47gPj/D28hE7jOeN8XMd03tV9eane8MhYnXAAQcccMA7CR95I43Lg4aI/Iu305gO43ljfCbHdLBbOOCAAw444IADDniLcCBWBxxwwAEHHHDAAW8RDsTqgAMOOOCAdxK+7WEP4DXwdhvTYTxvjM/YmA7i9QMOOOCAAw444IC3CIeI1QEHHHDAAQcccMBbhAOxOuCAAw444IADDniLcCBWBxxwwAEHvCMgIr9ORD4iIh8VkW95SGP4uIj8sIj8oIj8i/radRH5ByLyk/Xntc/wGP6CiLwsIj+y99prjqE2Lf+z9Zz9kIj80gc0nv9ERD5Vz9MPishv2Pu/P1rH8xER+bWfgfE8LSL/SER+TER+VET+3fr6AzlHB2J1wAEHHHDA2x61UfafA3498HnAbxORz3tIw/lqVf2CPR+kbwG+V1U/CHxv/fszib8E/Lr7Xnu9Mfx64IP13zcDf/4BjQfgT9fz9AWq+j0A9Tv7RuAX1s/8P+p3+1YiAv++qn4e8GXA76/7fSDn6ECsDjjggAMOeCfgS4CPqurHVHUE/hrwdQ95TBO+DvjL9fe/DHz9Z3JnqvpPgDtvcgxfB3y7Fvxz4ExE3lzTu5/feF4PXwf8NVUdVPWngY9Svtu3cjwvqOq/rL9fAj8OPMkDOkcHYnXAAQcccMA7AU8Cn9z7+7n62oOGAn9fRP4XEfnm+tpjqvpC/f1F4LGHMK7XG8PDPG9/oKbW/sJeevSBjkdEngX+DeB/5gGdowOxOuCAAw444IA3j69U1V9KSR/9fhH5Ffv/qcXD6KH6GL0dxkBJp30A+ALgBeBPPugBiMgR8N8Bf0hVL/b/7zN5jg7E6oADDjjggHcCPgU8vff3U/W1BwpV/VT9+TLwnZQ01ktT6qj+fPlBj+vTjOGhnDdVfUlVk6pm4P/FLt33QMYjIp5Cqr5DVf9mffmBnKMDsTrggAMOOOCdgB8APigi7xORhiKA/q4HOQARWYjI8fQ78GuAH6nj+N31bb8b+NsPclwVrzeG7wJ+V618+zLgfC8d9hnDfRqlb6Ccp2k83ygirYi8jyIY//63eN8C/LfAj6vqn9r7rwdyjtzP9YMHHHDAAQcc8KCgqlFE/gDw9wAL/AVV/dEHPIzHgO8s8zYO+Cuq+t+LyA8Af0NEfh/wCeC3fCYHISJ/Ffgq4BEReQ74j4E//jpj+B7gN1BE4mvgmx7QeL5KRL6Akm77OPC/BVDVHxWRvwH8GKV67/eranqLh/TLgN8J/LCI/GB97T/kAZ2jQ0ubAw444IADDjjggLcIh1TgAQcccMABBxxwwFuEA7E64IADDjjggAMOeItwIFYHHHDAAQcccMABbxEO4vUDDjjggAMOAIxvX9Q4Pgxzz4cOcc1LOQyPP+xxfDbgIF4/4IADDjjgAEBE9L1/5O887GE8FHziW78WVZWHPY7PBhxSgQcccMABBxxwwAFvEQ7E6oADDjjggAMOOOAtwoFYHXDAAQcccMABB7xFOBCrAw444IADDjjggLcIB2J1wAEHHHDAAa+B3C+5/Jff/bCH8Ya4933fwfn/XPoM3/ruP83qX3/fQx7RuxsHYnXAAQcccMABr4E8rLj8V29/YnXA2wsHH6sDDjjggAMOeA3c/cd/iXjvRZ7/i3+Q2bP/BgCbj/0LQDj9it/K4nN/Bbf+zp9k/qGvYP6hLwfglf/fn2Dx4V/O/INf9qrtLX/4H7L+if+JPKxIy9ssPu+rOfvK3w7Axfd/J8sf/gcAHP3iX8vJF3/dp339/J/9dZY/8r3Y+Rn25BGaxz7nVfsbXvwod/+H/wYdN5jZCTd+4x/GHV1/a0/SAa/COzpiJSK/TkQ+IiIfFZFvedjjOeCAAw444OHirZwXrn3V78GdPc57vum/pHnPL2B86WM88U3/JY9943/K3X/0F4nLOxz94l/D8ke+FygRruFT/5rZB774dbc5vvAT3PyG/5Anvum/Yv2R72N44ScZXvwoyx/+hzz+O/8Uj//OP8nyf/17jC/91Kd9ffXj/4QnvunP8ui//Z8wvvCTr9qPpsjdf/Bfc/Pr/yhP/J4/w9Ev/tXc+yff/vM5HQe8SbxjI1YiYoE/B/xq4DngB0Tku1T1xx7uyA444IADDngY+EzOC8NzP8bi834FYix2cY3umc9nfOEnmX/wS7nz9/88aX3O+iP/lMWHvgIx9nW30z37BdjZCQDzD30Fw3M/BgLzD305punq619O/8kfBfS1X9f6ui+vzz7nS161n3DnOcZbn+Clv/7Hygs5Yw/RqgeCdyyxAr4E+KiqfgxARP4a8HXAgVgdcMABB7w78VDmhcXn/ypWP/qPWP34P+HGb/hDn/7Ncp+5+WfQ69w/8gxP/M4/+ZnbwQGviXcysXoS+OTe388BX3r/m0Tkm4FvBnDOfOG166cIBlRJKaIiiBFEFDGCZiVrRimtfowx5UYQQVVRzYgqgqAIIgZUEFU0J3JK5FQ+K8YgSLlxsqLlE3V/humOUsCIYK3FGItOY1NFROp9qOScEQRjTB0XgGCMYK3DWYuzFhFDRkkpk3IipkhKiawZg2DEYASk3uCqSs4JVUBqiyOVcp7qmVBVstZjEINI3Y4xiDEYETRnVBVEsMbinEOBnFPZf051f6CaySntfVHQNC1NN8O7BoCsiRQj4zAQYuTexQWbdX9ouXDAAQe8Hn7W8wLwhda+tipGmhl53ADQPfULufzBv8vi87+G3C/pP/kjXPuq3wvA0S/6Gl749n8Pu7hG88gzn3aA/cd/kLS5RFzD+if/OTd+/b8LItz+nv+Cky/7t0Bh/ZP/E4987b+Pqr7B6/825MTmp76fo1/y66/sx19/kry+YPjUj9M++bloioQ7n6K5+d7XHdv8sWcVlP1Od4qCljlgH9N89npt8e7nj69C2exrfm7a5HYT0y+2ReOA3v1UeV/dDntz2bSNOj0SErdU9eYbjOYtxTuZWL0hjG9fBLYNNWPMvPLy3Qc+jtliwdf+yl9KihFEaNuWo6MjfDNDVdlsNsxmM46Ojum6GTEEzi/OyTnStp5543BGiCGQVWmaBt+2GGsRY7CNo53NuHHtOsfzYxRhvRm4d37BK3duc2d5m6gDmERnPC0tp82C4+MjmsYT4shms2IdVqhVMBaCwGDILmEbRwqJFDOqQkgJEUPjW3zTsZgfMes6rLGV9HkW8yPaWUfQyHp5yd27t9msloV4xoDmRAojYRxAM4nEE+/9HJ589kOcnt0gjiPL5Tn37tzh+ec+yfnqgr/63/3dB/7dHXDAAZ99UNVvA74NwDmrZ6cLAG7fubzyPjs7oX3y83j+v/0/MHv/F9E8+j5e+It/EBCufdU3YY+ulfctruFvPP2agvX70TzxIV75W/856fIWi8/7atonPgjA0ed/DS9++79Xfv/Fv5bmsQ982tcXH/7lvPAX/yB2fkbz+IdetR+xnptf/0e58w//n+RhBTlz/EW/6dMSq2/6hi8npYS1ZYE/jpH1auTevbs0TUNKmWHoWa1XXF4subxcsV71xBQZhpFUAwmdh7mH1lsa77BGEM0IGZIQYyBpJibDGISUIOYRa6FpDKrCGMEINDZjrIDz5F/7f0WzMn7nt2CMgFpSVlT0ypiNAe8KWX7xMn/iDb+UtxjvZGL1KeDpvb+fqq9toXF87O3QUPMT3/q1vOepZwghMI4jxhi6tqPxlpwyzghN43EWRDPWCN4KQ0pY8TTO0VhLdp6cEs45cs5gBLEWNTDEkVcu73LerwFDvxm5uLjkcrXEuYZ25lETkKTkmIlkAhkxmeSE3FpyI5jGY8Sjg5KJJJdJJtA2Hp8dRMVmGMZIIjD3c5yREoUygvGexre4tkFrpE+swbctw9CTYqQxhpACKSeMZprGkYxls15y+/bLbPo1Q7/m3t27rJdLLi/vgmRU88P+Kg844IC3N95wXngtyKcJr9z8Tf/nK39f++rf+6r35NAT7z7P4vN+5RsO0B3f4Ppv/mOvev3kS76Bky/5hjf9+ulX/FZOv+K3vur1R37jH97+3jz2fh7/Hd/6hmOasDg6QkRIKZWsxHpD27ScnR2xXq+5vLzE2hbNAStHzLuW8+aSzabHCiz7kawlm6NiCFGJKeCtwdXkjxXFdxaTFB0zjRpGVYyxpJzRBM4Z1GZCSEQRrBFczgglI6K5bN9Zg5KIWVFVjDFYK+SspFyyQDC+6eN/q/BOJlY/AHxQRN5HuXG+EfjtD3dIr4/HnniU9XrNMAyICNYYOutpGk/XtiXSYy3WelJMzDrPvXt3SDkSU8Qag3MW5z2gGJSoiawlbjrGwHhxiVqDwZBDRoGT68d41xBMz5BXpBgQL4QUucxLXPaoUTamJ5gB31isCqIgKYNVsmR6FO892SkZwXceb1rEGpIohozmCEFwriGTETEo4NoGVkLMic3YEzRhBKy3WNeAZtQIQx556c6LcFcQTYQQSSmAB+sd8o6uYT3ggAMeAH7W88JEqV4vpfVG2Hz8B7n9d/8MJ1/09Zh28XPaxtsFbdeRUsJ5v5WihHEgxoixSts5UgRn5gy941LWhNAw9msaa3BG6GNmGJUUlFnjaJxlzBlpPBaIZFTBWUfrlaAZzRATGLGIlvmt87bIWTCgYJkW1oWEaVbUKMaC6I4MFghZDTn+3L7Tny/escRKVaOI/AHg7wEW+Auq+qMPeVivi6OTGb4zDL0vrNoYZqbl+OiY+WJR0nvOIWIZw8hqdcLJyYKLi3NiiuUz3oMIOSca52glk51hkyNZE+uwZgwBg6HzddtNQ9ecMuDRIdIPcZvbHiWwjGuGHEoKUAIJT2tbPAbJApXApRzZaESdwQLzdo43HVYbnGnwtiWnhFLCskpNfRuDdY6mbXDeY5wla645cFP9PgxKZpMDGtZY72i8lFSnF5KJBI3kn+OD74ADDnh34GHMC7Nnv4Cn/vd/8cprm4/9L9z9H//Sldfc6WM8+pv/GPyif/MzOZyfF5JmjKsVjapYLKghhMR83uG9YbPegDM1xWeI48g4axnWPYJirSGliLElsxFjovUO7UOJPDlLzmXBbjE0riFrJOciNUEVYxTrBGcNfQZNijf7+itTNb15q+XKqlUHXX7PMX/aSORnEu9YYgWgqt8DfM9navv3vu87ED/j9Et/M7e++08z+8AXs/jwV3L77/5Zjr/4699QpLiPjY9gLW27oHUN3ngWvqNrO46PTui6OaowpoB3jlYzxynU3LQgxpIEomaygLVCzCNRA0PsCSaCUaxaDILaxEaW2MZimwRZkOgIBrKFZA1D6lmHFWMeEMASaWMk2pFWOsQpI4k+DIQcyDljrKVtPM46vGnRcU0aVgzeINYxZ4HEhnG9JBvIxtAPa9b9JUE2mAVgDCElyIpTKSSunWFypo8DY9oQrcMgBA0MsiGSUDmkAg844IBPj5/7vPDWTcKz938hs/d/4Vu2vQeFrpthrSWEAEAS0Gxp24YUAiFlZk3DMAwMw4oQe8QkMAlpPV4VYkbFYUQgR2LM5FyyLiKCSUrbWFrXgCiZDJIRyVCLt6iFU94b4qBoKot8mMhVpkSldFtiFcSS1WBINLa85+cahfz54h1NrB4Wbvz6/+PP+jMxj5CgtZ629XjrccZhbKHhIUZCiIRxQ06BNG5wAmcnR1jnyQhjimzCyJgiaiAEpU+BTRqLeK9rUYRh7FnpiKaB9ajcEHDGkCSQrTBopA9L+mHDeliSNGC90BhIKTHGkZkPOONJJrMa1oyh5Kkb60tUDSANxHWPiYG5mTFrTshkNuOanDKbvmcYA5jMelghJmO9lNSgJsYUiUoRKmoi5MAQevqxJ10mVJQhjDhvUKOHiNUBBxzwGYAA8sZVbO8C+BqF8s4yjiOapRKZEg1qmgbNZYHrnGOz2dA0DV3b0jZrzu9dsNkMxJhK9EmELLksyrVU3QcixggxjCxaX7VXghhDjlSpjCvSkqlIHSWpYJUS7coKKqRY/o5JMQY0Z5xzeFuiXDln4MEvyN+VxGr5I9/Lxfd/JwDNo+/j7Jf/O9z6nj9D3lxg5yfc+A1/CHfy6Ot+/sW/8i1c++rfR/vEB9n89L/k3vd9B6SIO3ucG7/hD2Ga2as+E8cBg8G3HbOupbUNkgo77/seGAkhEjcXkCPWWuZdhzGGLEJEkZDRVPJ4rvF0rSX20BrFNhbfeMIwcreP9P2aQUdSCLBZM+/mZAWMkmJi068Im4FxGIgMWErVxRBHRA1DjrS+JYyBIQwkEoKQcyYFh5oelwMxrZE0lBVO22Bsg8mJ1d1zLu+dk8ZAIpEk08wbjk+Py01kLZFEiGVlRFL6oWfdrxjCQD/2RM2MacQ3jmbWbC0wDjjggAPeKuw/V4xv+cS3fu1DHM3Dw8nZNZwAmjHWYA2oNUQjGGewxpNCKnona8k5472n6zrmszlnx5E7R3e5vFgyDIFxDMQh0vc9wzAiGaxzRFcsGlrfYq0plYJGQHKJXqkwxETjbNHfSiSjjElpK0cyRiq5MohC5y1ZlaiZmMBaj5FcbIkOxOozj/GVT3D+z/46j/87fwI7PyVtLrn93X+Ko8//Go5+0dew/KG/z51/+G0lF/4GSOtzzv/ZX+ex3/qfYZqO83/+/+XiB/4WZ7/st716v31i3jY4abA4rHjGOBZG7g3ONaRUCFXbdTRtixhXWDwlcjNm8B5MA77tEGs5Wlwn5ETSTBp7+s1dQrCEjeJRGqOEMBLU4psGZ0FjwGQ46U44MgtW8Zx1WhKDoirkFImpZ0gBEuSYyFmxztSw7rrk39WjKYIGJPW4tCGLgwE2m0vW63MkJ8RbcJYxB/o4YrMnamY99AxhRFPGG8uYAkMKJR2YAyEGsiiaMnnMpENV4AEHHPAZxI3TGaodGGh/3X+EEWH87v+UnBKbfiDFhJga2tLql6SKFTPFvarOtHggajHtI4khqJT0FAnvHacnx7zvvc/w/ve/n5wzMUass7SLBY8//lgpZjKmpubqM9c5qKJy7z0ppa09gnMOW9NkAjRtW4apCWMoPogyVcoV78MpVZZS8WDUnIunYUqIZpw1GCs0jUWSIVWvRwlCjBHnHDFG5os5TRMxXjg5OyHFzPn5Jf1q5M7du2AMMUZiSkQxOFN0W16kRqOUkDIxFa/IqEochJSVkUwCjFraGllsWkFzydqgBlLG2xJ7HFUZU2bhLDHGh3AVvQuJVf8zP8T8w1+JnZ8CYGfHDM9/hJvf8B8BsPiFv4q7//gvvaltDc9/hHD7k7z4HbUcN0Wa93z4Nd+r0aLO4swMb2bEMW+tF5xrsNZydHSMZ1GMQl1DFkNISk6hGmgabLvAeEfbznCmKSL0EFguV5zHOwwZxlRMPGeuoTENKTfICMaAM+A003XHXO9uIBFeWTrSEIjjZBYqhcwk8DictLimXLUhBHIeSCZjZF4t4ixqhD71IA1kiBpAEtiMNI5klCEN5NESLxPaWNIYyMOIMxbTGCwWpw6rtviUJsVWtaLmdLBbOOCAAx4ItPox5Zy5WK7QVKqckeKxBGUSVxSEYjSdtZKeKXVWiEzxXdZS1WbKJ2eto3GGYRh45ZVXOD4+pus6ZvM53ckRmWIarSK4xiPSlMq8SrSgps8qUXLO4b3HoFhToknO+no0glSZB5QUHhSLHO99FY1rIVOFHRJiQICUIpBxzpJyQERJOW2tDYwxHFWLhk0YSEYxTdFVnXjD4kQwXcNqtWK1XLHpN6SQCDGhZEYUK8qYEjEB4sgKhoSqwRilccKoSo6mVioqzmdyKhalYcilip1CrgKGkJW4b0D9gPGuI1ZvLZTu2S/g5m/6D97wnU0K+NziRUAM4iyucRgjdPOWtvUYY8uNaSxJIVR7ghyGSrY81jWIbUAcY0wM44ZNv+Ly8h6X6zus4zkbH9lkYeFnGOcxOZFiIiVBELwXjn1L1zjUCzaADgNjUHIGtIR9nbQcd0e4xhNJpBSIIcJY8ue5LYL8xjaQDUmFaBJqM7FVxqasLrIbyVZISfFksgghBzZxA2JoXUdSSzYZrUJGVaXxM6wIMQ6EMXGQWB1wwAFvNYTScKJUKhdClQAUUs4MU/rLFK0qWvyUpsDVZPOdbenoQe2QIZM/jNad1HJs33jmswWPPPIoJ8cnzGcLjo5PsM5hmwZrLIJFcMxmLc5Bjoa2bUkpl3kCJcVYm4JM7ueK2JK2mzUdMU6VdoWkOee2KbxUTZ5jjFtyZgRiCJBzrbArUbeZ78g5M5qMtw4jgUF7SmWeYq0DMfiU8cYTNNK1DSdHR+RRmTeO5eWM29aw8Y4QM8vlBSmVanbrDHNvyal2C1EhBhA15KRYVWYWsi0kNqugo9JYQSxkr+gIUUs3EAfklBiSeRP2758ZvOuIVffML+aV7/zPOPnir8fOTkibS9onP8zqx/8JR5//q1j92D+mferz3tS22vd8mDt//78m3H0ef+095LEnLW/jrz/5qvc2IjiUMPTEZqBpZ5j2qLZ38Yh0ONcQ46aEQlNks96wXq1YXpxjjODaDtM0iCsX5zD0rDdL+mHJen0JEhjMhnbRkH2DyeUmzqkh6Yg4WyJIAhsJaNowDoHz1YohZGIMkAVNQtcVUjXrOrLJJFVSGImhxyCIlJJZK4amaUsIXISkCdtaZO4weVYiTy6iRExIJB0ZU7n5KN2ASJaa7hxY9WtCGDDW4qwvDzIS4xgeWoXHAQcc8NmNKY1XolCCEUprs+mRI0LKCVCsKe27jBSdjwJaK95ESssx8iScruajqkhtm7NYLJjN5sxmcxbzBd18jnGOpu1YHC9o2gYjnrZt8c4S04acEyIWa802BTg9y4uOiPp7aXGmaNHchkAcAypmG5nax+RVZYxBKVGoJLJtrWbE1FRd8Z1SLQ7ns1lLCAkwWONIKXO0WNC1Hd1qtSVs4zjgneH4aF5SjDmzfOU25FS9GQUxJWrlLGQpkbSIIQyF/OVUzrWzO5KUkpKl2Pk4C+pgjBHJFNNQFcZxMv158HjXEavm5ns5/fLfykt/5VtADM1jH+D6v/m/49b3/BdcfP/f3IrX3wzs/JQbv/EPceu7/gSaigj77Jf/ztckVt+jX4VuFF2DeaW0opkuaGNMXd0oqlPVRQPMyPmMnB6vYc3qOguVfCVyurFdDRljEWPRwJapm37qZ6joMqOrTCKStKyqNNc+gS6jtlRbCDW8PDgIRRcQcyq5dbSEs6NgosFahwmmRKZUdw2aVMFCzmX1oJprKtLjjC/lsRpJOSNjOaaUI7lNaFNuaDFSHl5NZr7+V6A/8fP67g844IADPh0m4rHtQbcPEWKNAFkpkujSpk6ufD5nLT1ZK+HJORfvPudYLObM53Nu3LjBfD7HNr54LqniWl+8/mpnjqOjI/phyTiONTKlWGerRssQ67aLlgu8tyiZlANOiuG0w2BNSxjHLYmCQsCmSsgpgpVT3uqwSvuaRE67iFbpYZsQccSYcM6iamr/WlsiULnouYq4PGEby8zMixM6SkiR9abn4iKgQMyKNabMR6qICkahMSBeiFm3JNVah6lTS8pCpBhZW2NQq0gs/WUNimbFGFeONzz4lOC7jlhBaZh59Iu+5sprj/+2//xV7zv7yt+x/X2/TcDjv/2Pb3+fvfeXMPvdf/oN95myVidyrfnlEsa11tSbyVaSo+RUViLOmtp/z2JsaQyd68WdayPjcgsDlH5LZLNtGC1CWX3I1MzZ1GeFrc2WFciI6CQYqIadJbStKJpLI+fpATI13ZyaOTsMzrhyXNUlfop8F6M2tiuVrmlpfDEULY2aI0MciDmRKcenUJdeuzH09jF0/gUY8x1v/OUecMABB/xsUCNNE/HQ2oRY6ixe+8qXx2J9X0Yx9XOy539VFsuy7WU/vVZIjHDz5k1u3ryJc66k5rzDt03912KcpWmaQqB0JziHVBfjirVCSnE7PmNL0/u6Nge0Nr7fHh7W2ivHWEw8015qkW1acVrwO+cII6Sw01RBWSxPwvzyXofJut1vIVUlDSnGYL0Q+oBvHY88+gjtbMb5vRPu3bvHarViDCNqDF3jyTFiyFgRnKvFAGPaWiyUkZYvJKlFKBWMgUy2pdCLDMY5GNlGDR803pXE6mHgxv/6HSgZsQY15cL0tkSrbt58jKPFMf0wcrnqsdZwuuj48Aee5dFHrqNkXrz1Cs89/xJ3L5bcu1hyvlwhYvCuwbkpbNxgTQn/JlXUVOdzI5gkWBRcxHZK9hnnItmsGGRNT89a7hbSFhRHQ+tnCIYxJFBh7maYMTP2PQvfcnN2yo2jG8xnx1xuLrm1vM29uKQngGRW6zX3lmtyjFzrTvjcp34BH3rqw5wuHkfEcb6+4BN3P8mnzp/jVv8yQ9owjj3WeLybI35AQ+Cnze+qZ/FgNHPAAQe8tXjNp4pS0mCVIalqEajXD8QMSMZbe4VYTVucBOzTZ0vasKTijo+POT4+Lm1jjGC9Yzaf07Qt3WzGvG3RbPDeMQZLygZT04BQqrxzzrStx1qPtbJLAepEEnPJFuRMjhnvdi1qSlStdNOY7AimNCJqtqRqqvpzBjabzXb8U5SswJRolabS3w/LGBTfOKyxhJSIIWKdYdEsikVCJXalk4hhuVqRgHVULA5HWcwbUYwovrEQDTFVg1BTMhwhK40zxKwMaojiCDGSkyJE3MMpCAQOxOqB4d7FiqyKmHLTOe/xviWlyHL1EqovMQ4Dvm1o2oZbAsvVwI0b17i8vODFl17m7vkFWQzdbI5v5njnmdxnQyyVHCkNteegLc0spYjGhzHQOIeOAlEQaRlNRmlQe0rjgHbOkM8ZczENjRg6O6NNHoktZmiRAD63MBro5qQkbNYrVqs1/TrSGM+864ipZ9Q16jON9ZydnLHoFiUKR6JRj7Rz7rYNtxvBDMXYTdwCIxajgjGQTH1AaV0yvkMhIr8O+DOUNhv/jar+8Tf4yAEHHPAAUOL2up2wd2qrsgBuGsc4hm1uoFTZFSsAI0UYbimVf2py3WbRnColaq/V3mC12XDv4h7GCcf+tHTiaBvapuFoPkOMsOlXzOYzNuM5akaMVRrbFlKTIqoJ7yxOBGM9bTMjpVAX7iXzgRabHGMs1gitdSSkpusK0fNSevAZUyNgmFoxWCJXzjnUGFIAYyzOThEvQ85lHKX6MZE1EUJJNzrn8a4pKVOXidZhbSE8ISSsUYxkhMxi3pFT4rLvy/lKiZQz3hjEZAyKs4aUlZRzqZYExApJlT4Uc9CQhZjKeMQIVgQzsZv04HVWB2L1gDDs5bi99wxpZBgj3jfkIReRYYy0mhiGHoC7987xH/fbEttudkTUzKQC6PsNUFYlzjlShhCh67ryDlW8d0i92VMtmyULmmMxcIvFO8sYg20fYT6/hjcXhLyhTQ1eDCkJcWNJscGLw9d+havBk2Ikjj1DXBEIzGYNR+2MEIW133CM0JqGxrUkkzjv76EIHXOMcXTOcty1nKQ56xgINYUpKSNiEQeShJynNgbvPIiIBf4c8KuB54AfEJHvUtUfe7gjO+CAA2BnEjrZIzBZKojQdV1pRnyfVkeBmDLeWUQzhkqipPS8U4SkkKvOwkp5NnezWbXP8RwdHTGbd+W5rIk4xmJrsJkiQInSnMMgKGJLy7KmcYjCYrbA+45h2BR7GyDWvntGSjTNuVJFaKc+rZqqLLcesyrO2EJQQigL3KrFyrXC0VlP1LjTaTlHSrF4U8UI6LYH4Hy2QFXo+54QSru0tmmKSWihnIQw4K1hqO3ZGuuJKaMGsiTG4kJKZy2hD1V6UgmjQhgTMReNVnF4V1w1nhaRImnRg93CZz1SjSxpSsSspZIuKzHGK125h2HAObfNYQ9DsVqw1kIIqJR7vu/7IvCuK4u+74lJyVrKaUMIeO/ZbJSsicVigbVltTGOA43vdtUgqozjiA8tcS342XUWM4PrDZmEjspwERhWgcbBbObJGOIAorEQNTzGKppKpEzMjCZf5yiucbaEnu4tL0kCIRuOfMKqIafETFqO/BwxQ+lNOGwIY480FqRoxozRKrh8R+JLgI+q6scAROSvAV8HHIjVAQe8jbDTWe1Q0m4tOfeklF/1/pQTVoq+R2uucKqSnvrVtW3H8clxIVJdx+npKW3X4htP03isNWRNQCaGsabBPM67Khvxtdots+nXeOcxSCn+ialaHrD1zprNZkXWkTOphKXwjcc7y2azRhT2xfVQio+cd4ASUwCKVkrIiFGMLQRPdUprmq1/VoyREHratsU5zzjGWtCUsK6YeTaNL6StnrvLixXjGDHrHosDUQKGqDDEAZcgWsFjiXHAbCNkQgqyrVpEpmlCil64fDHIQ6wiPxCrB4SSWjbYxpWKBefQnOn7fnthT/2YpqiWqZWD0w1inMM1flt5l6tV/ziOJSeeyrprqvLw3mOtQ7IhpxKyHoeItZ5hGLbETUTYbDboONC2LXov41uDscoQofELcjJcXvQ427NeuRJqNVLC31qrBbGoJmBTNQYj1gbGPOJnG5puyfFJz/lR4Hg2x1tBTASJuAw6jIy5ZwybYrmgLWRK1AqDnXxh3nl4Evjk3t/PAV/6kMZywAEH3IedF1RBrs81KM/TcRwB2RpqTnYCJSqlZKn2C1CiMiLbqsJJs3Tjxg1u3rxJ0zY45zg6WtB2TU2rlc4WYxhAI0Z8EaSrYd4doTkhUiq/u66tJKoYkrZtizHKpl/CXjXitOguNhEw72bE6tSutePF/vF7Z4iVmJWKvyIEL+aoinNlu6plu5OwvsxPCWsLEYuxBAzEQNMUKwYViDWA4FwJFDRNw2w24/z8ohKhUm0ZVWmswxjYDAMh1UbLYrDV2V5yMbzGsI2C7RK4ypXqgYeAz2piJa556RPf+rWPPexx+G5OrKWshHKjLe/cJQw7QaD3vrJ9t40gtW27FRAW8rOm0e5VFXrW2prS81ux5dHRESmlmkYsTrvDMG5LZ6019H2/deMVgWwyy/WSFMFbQ8wbxpCw5oLZ/IgwbljFNQ2G1rVQfVxyTTlaK8SYCCFUH5RICpel4k8UcYbZvOfG9YHFSYPvMk2bsSaXsK6OhNQzhDViMl66IsZUwRlH49qH9yU+AIjINwPfDNA4vvDRk9LEdBLMXvW6k8mX8Eq59/bXyexw9/btg377/vu2t/vcfZ/dw6SxqH/tPvBzwDSOknq570Eor/plGsD2tb3C+O1wXv04lbpgKR/VvTdN+91tUbZOIdPGdLt/vbLNK3u/Upv/WifjPu+g/c/XirPdz2mRUt4zVWNNX9l0LPt7UqZj29tVPa7tzyvnp5KIKitQUwTPxZOpWAqA4IxgNIPCcoB+1HdsyPiNMJGo6TyEGPD12dj3fY3u2y2ZcM4VgkJpx5KLu1+JWO193cYYYsogVDJSnvVSsw6bYaBpPE3rSLmIuqe0nzXFIJMMzvmiZaqNiVNKNNVXappDpvtpWpR7XwTrZiwXf79c4ZyjNZYRgergPh1TrlXgTdPsiKYWCwfI2/tCa5DATG1qatuYpvV17BbvG0IIbDYR1UxKRSsVQsJUUjX0YXsuxQRSGLEGjmw5l30cyr2gkDCMGayWu8dZJYuCKdKRonbRq3zqIV6tn9XEKofh8f2/F48/q//B7/k1PP2eJ3nf0++la1suzu/wMz/zCV568aXypvpwM8bQNi3ON/QhMPYbxDiev3vJj3/sOV545R45C9jJS6O2FXC+WiCUsKpq5vJyyb2Le8RYIk+5MnfJpct3CaWW7twplYutbYtOqmyvPGCtsfTrDb7xO5Hl1pfElI7iwGw+xzceYvFPWG+G0pIgFkfccehp2nYbKUspbSfSnEs1yZiLr8k49KhuCOOalEZSHhizEHwhPSlnnGsYBkVM3j6AwlhWRmj1sspKypHYD4T1PZoZdAs4udYwnzel63kaMCZiErhuhs3CzM0KqbIN3jYP4rL5TOBTwNN7fz9VX7sCVf024NsAnr5h9A//b2al4sdK0VkYtnq4KY1rRXB14hVTriGpD/GpdHr/H3Dl81lqtdPev/JofPXnUhWBTp8XEVJVp+yTO/b+3n/g70cFttve67uWYyoRUGtgL+07rcCnSie0VD9lLamX+0uqJ9fo8nJ5b4wwhkSq7s6aqx/OnnfPtJ9QfXNyBs1St7GbvKbj2E6qebeNEqkweyvmWnFF2V/MVCNd6kS5K2UXEawRGreTBWw2G7z3NL4soHJSUlY0lyiuUqxcioC3kCCNeZfemdqVZKpOsRKorJgU6WOg14x6D96xGSIv3Bu52wvXjmY8OUvMifyt739n6hvfDDRlxuUGjOC8YwxFfzqv/29tsclBiu/fzrLAoqmcy6yU6mtg63xM+c67ruP6tWs0zhV90RhhbskZulmLcxaDKfYNqjjXYq0jhUTbdJhKusdxxDeWYSyvxwTOWS5Xl3hvaDuPoOSUiCnQ+a6I1zUT1hvGTU9f/bSMl8lbBw31ZlCl9Q5v7db+xypETfWaKZ0xytqmXOu+abDWMY4jOZS5q1juZJrWITJntVxhRIiUNmkpRKx1HM0XrJcbGuMgRSQGrCnpvDEMYAux1AxJE1qlL6CIrY+IuvA0ZpoR977XacXyEPBZTazuh6ry0u2XCGPP0K954tHHCMNITlXzVC+WIYzEsScOK5xvUdexGuCnPvFxPvr8CyyHBLm0n9Gc6iRWLv4QA0YamqaUq15enrNarxi37QV0FyXKmbZt8L5qqCgpPOc8KUXGUTBicd6Rq3C78X6Xv6eQot2NXoSS/dBjvKGbzbh1+zYpCm3jsaZGvbzbaremBp4hjOSY0BQY+s3OeE4gjCOr1SXkiDWCbzoGKTl4I6WH4BgCqnFreDqFz6cJ5/4V4RiEFBySBR0d7UxpXWn5I86SxNHZhoVZlP6B2JJPf2fiB4APisj7KITqG4Hf/mk/IYJzBjGlN5aYq0Rn0t3J9rwWcq/VtnC/l9c+OSrvlSuv379tg9QWH1f9faZu9tM2pp/3v296ff/vCdN+KaOsq9/idC3WomlL1e7b3i4ap1qjaloMBPf3Vf7VxVEuZFA1l0oha7bmhSkpMaTtedzX1oimuiIvKfx61q7sYxrHVFCxS7+U/ITUlfQUWbRSqpamDUo9qKvbU2IspM95h3UNTVciENZENGdCSoxjpNx7ZV8qhiwAk7u2kKfgkql9NzVtz8sEdQaPR0Ok70tqyCLcOJ6BU5arDa9ky82TBqT/tJfrOxmqioaEitKHMF19e5HNjLEWI5aY4xViZY0lhlB0VlaxTCnAXehqsZhz7fSUk+Njjo+PWBwdsTg6pmkavPd473DekHOibT25pu6cb3BuejZnUh4RcRgxGOMJYxlLN2tQIiknGudovCflvLVlaKpVQ9wEchzIQSBYmm6Gsa6QnpyxpgSxrC0Rp3GIGJOLNUIq2QjVqgcWg/MNjW+x1pNiJq5HlpslxigxJ2KK5JTp2hnjGFGTSCbTdg1mDLTec208IfQ9KQQuYsTU51b2DX0/FsNqkeLMLnl7TjHsVWm+emFXv9iHxaveXcRKRHAGlpfnvJgTsd8wa2c7v44qxGvznHHsieNAFsfd1cAPffST/NQnX2QMI9Y5NJUHnVTx9xSSLQ/WzHq9Yrm6pO/XhBh2YVRAc9qOJ6W0FZpvx+hszZuXm1mMKblu3U2YKeetrUKMcSfqq4Tm6OSUZ5/9AC+/cof5fE4II/1qTeMcKcZyQ9WJd9vY0xpGVZwrK5BhGEiaiePIOAzkFPDOYl2JIMWYyECMgVTLgEsa0NA0DV3XbcnVRAB3XiqGFAObdSaFlkcedzhXJtjWC8FlPA0mFfPRqLH0KXwHQlWjiPwB4O9R7Bb+gqr+6Kf7THlOlMl58qmRPVJyJRJVKykRquPwzuRv/x9wJbIykRBE7ntNMHp/tCptt7sbY9VicFUfuP//r3EuALYajbx98pWxGGvZGsXe9zkl1feZ7U9TSeS+Z9B+LmYiZGXblN5jtkb4xKB55y80aVaMmNKDrD7HcyVDU7Rwu0jgKtGS7ZcwEauy+i5dB0oPs6I1ydMRv/r8UKJacYy1A4OQMjhnMNbRiMO6qaenEkJkGMfi8SMC1C4OZkfWVPP+FDS9jWTKwqhTgQQhZNRZThqhc8Jl2/HC+Uh/kciv+kY+u6A1ZTpFpmyNAqsCuYiwsWbv+aXbimrfNqQwFjsdqad+T1+Vc8Y3jmvXTjk6PuLk2indrKvXk8W6yUuqXJuqeUv4jTGlEbIqKQQ2ccSI5eTojMbPEVGyRlJS1us1yXmuXbuGpFz6w+bMoBFirMRJCHEgjJkYEuIbxLVY12IbQWwiaC5Gn94xhr6SHVsWM0bq+RG6xtM0ZR5JRrHq2AyQ6hwVU0Kq+ajzBiSR1TCMK6KOxCGSdCQTWPcXqAZQQ0qWYSjZDlf3pzUFOF3HdvqutriqkSvf6UMLWL27iBUiPHL9EUwqK4s4RgaKDULTFEFh0zTMfIPmY1JK3FsN/MxHP8JPfOJ5+pgxKugQi7maNZh8dRWYUmRMgc1mxXq9IoShiO3qYzRrCdUCuKbddSWvN+DU3iaEEWsbhEzSXVg/pVQqVFS3Astp/2MYQRxiHDcfeZSTk+s8/tiTtK3nuU/+DKqlnFazgZi2OoG2pgWdsVud1xShiCExzdoxJWIY2Qwb2vWK+fyItu1Qcp2cdtGpXHP1TdNsid+Ujy//1+J9i6rh7p0VKoabN8/AKdlkXGdoXEfeCFET5Mg4Dg/4gnnroKrfA3zPz+YzrrUlImKkNFetRMiYSX+TS/pBKrGqvRsFIQvk+jk1JWUn2KIOLRuBquWAOslPK20xZClkjtpjLCpY50p/LinvFWsxNeq5P+HcH8Havz+ukDdTJhG0+ghJDd+XE3YlklN+99so0PSvmCLqlX/bUnnKYZrtNks6NeeqTzNCqvsUNWjekZ7y/rwltlPFFqZURcW8p8ySqz+p39FErCrjxRgtEVkEyUWXk9VtCV3ZRomQlC4LZYxZlRQE66ikL6NGq3ygTHA5l7RmzDCm0nRdtaSubL2f1ehWVjANP5MRD1ZMmWhjwEhi7h2m8xg35/b5ivAQvIAePKYQ1U6rI1C+K4GYI1LTUfsu7VkV4yyaUyFXe5S5aTyPPfYYzzzzDDdvXmO+WGDbpqT3bbmPTSXhMWZSilg3RefL8zoMI1kz/bAmxpGmmTHMek6uHQGw3lwwjCM5R5xYVqs1Z2dndR65IIcRp9C6hpyVGEbGccPm8h7t0RGL01NERmI0GHWU5spQxPKGHMrCYzabkTVtpSMxBoxoyaYooIKrChShbEdc+ZlTJoSEmEAzc7TDwGa9QRWOTk+4ttlwnu8yrsetH5api0v0qqoSASOThvDKN7clU/sR7oeBdxWxEuD06Dqt9TgRLAaVErKchN5N0+DbOdZ5YlLu9nd4/uW7hBiQlJBaeYc1YAyuRn6mSpESgYpsNqsSitVYL7pdpYlQVgCTwHBavcO0kk/VPDTV8Gt5IE4P7nEcMTVS1TQNwzDUapWil1l0c87ObtBvRh5//Elu3XqBmEpUbux7YsxY67er78vLyzLX5kDjXXFxr40+N+NADIEQA7lWfxQTt8gYRrp2TtfNKjkr53kiVqHm3PdTSYWs5a3WpRBauLhnieOS+YmhFaFbWNJE2GoIOD9EX5IHDtlP2bHrLSm79N4kaC/6Hd1Fnkxx96d+zlRvl8lktbzXVH3C/am7Gg0yRb+z1S/V7QLbKO2kR5w+/7qHcl8Ua/+aMGKuEK+iB9pFZnevl5Oyr+eaFizsbXP73nrPoDt9IiolykTRJUFNser0FC59ygrRAWPNdkxkyidN0TYBpRHv3hh3xzgdJ9sldmncW54d1pawRsolGsWVGN3V87IfEctJayqmfH3OW6QSIlShGlw2jSc7V8hWzPU5srN12aaB1aBSRMs4wYlACISYQRTnlbkNuGsdIuvX/X4/eyDbBF5JNe/9nsvto+jeAliuZP3EyKuiJDln5vPSgDjEkRA9rpvSfWVBPxl0IkXD5F1XIj7jSAqRHMuCdr1aMYw96BKjDt8umC9mzGZznBPG0KOxZC5CSDRNi6rgstKq4NVxuQn8y3/1UX74p34C9cKHPvwBvvhLfymSlJxG2kaYzbqSbtZEuUCrnsp7kOJflWPGosSwIcZQqibVE+KURre4pkXVFcmCcaDCZjOw6S/JWXBOyTqQsmEcYdgkUkgginUUmcM+e2IXsZp6Bk4Z79eKTD2saBW8y4gVVM2FFUTKoRf32OIsa20hOJtNj9hAzMqt27e5ffcOKY5oDIXJ2+pOi72yqg4hEMaRsZKRIkydWgVMkaUyeU2RqqkKcNKvGGsBIYSISBURbtMdRTgYY7ngVSDGuC0BVqU8+I1htV5hfcv16zd48aXn6bqO87u3Sy8mKRqaPqVCJLfVIan0NEz14WE9i9kRo/G0TcNqfUG/WaFZiaE46aaYCeOIPTvDN02NTBXSJBiMnSILtkavEmOOxS0+lFUWkrFhDjkRYqQJmWtmjpkFdAzQlRTRVuj8LoHIrlWFMZNIc0esttVueympMimXKBVwZXIWsXVlHK/sY/q5JTtmj1DANlW+3x9s/7qfXr+fIN2vv5r2c/++9j+3bz1SGq/aaaRMj9Wdbg8wV8nZrllsvjKmrMVw1lpTU4H1Hqpl29O9Yyl/lB5qZR+5ple1RvMwhswu5fEa39wesapRxjoJF3LElgAbczXaNhFmuFokUCIbpk7cpfdmVoqdCmU7IUSENKn2ERGsM9sCmSniPREtcnG0LteVLdVctgENhBgRE+mcYHIsKZnPZuxFOCZSNf3uan4v6kScCsFVIAn4pqlV0DBRscLtyzW4Xq/ZbNYsVxbXNHRZ8U0DVWieKKSssQ1xDGiTsNYT66Jo3Ky4d/sVfuKnP8kmLnns+nuQWBa47/3g5xCIJJSjxTE5Upspl8vg7Ow6/a0X6YzAKEDDi3c3vHJ35PrNhovLO3zi46/w5FPvQTUxaxusOMgjaKZxQhJLzuUZ7rwlRUuWUG0W6iKkylOQcq8471AMiMP7hhgzYzUf7ZoOg4FsMNcdxnj65cDy7kWRlOSAaOkXmLdfz96XM/1dI4t1ObUX2dp+DQ8N7ypipaqcX9wjzmZ0vsEgxFQdz9sW6xxJIeTAZnXBetPzyU9+nPX6kjCGsvK1FiMlx+umKqIQ9v6NpDCWHeZp9T3dkGVyKOFke2X1OD1UU10hGFNTBPUh3TSevu+3x5E1byNgTVOq5cQYXNNgvWcYB2aLOSEEbt58jE98/CcZxnX1A7ElFWgtIRX390kw3A8RE0okab44xrsA+ZLl2NdInRRD21zGnHJxS7+8vODs2jWmiEipIkkl9SFFJ1AmNsG66aklKIZ+E3BmpFAESxgicbnmxs2Wpgtom8uYP8uf7fsQdtEqYwpB1aksem/FbGrE5X6N1JSyuNostbw+kZX79Vr3k5xXp+KuErDp/+5/z4R90nE/yZqIkxF75TNFt7VbbExRlqm8+/5x7a9o96Nu066naHKJFpWGGEJpZJuponmptiG5LFZsPbcTySsVRyWSl3TypBOcLUKs12r0Op0XI7KLnu2f17q4KRqatPe6XvketsRqSxzLgaUMGpSQR4pSoFQlixrIiRhqJVcl3vuaH5EivM7RYHNZsJWK5YTkEskyKuQxYUXorN2mjD8boYDWij7Jr37M2Pr8boBJPp2hLGLFFPE6pXjEGDuVj5C16B/btqFp29InFiXGhHNata3luy1VgQ6RTIxDEZY7z2a9ZhjW/I//6J/yiecvyM0l12a3+Yov+lxsUuZHHSfveYwxRVglDJajo6OSqguxRos8GhJ5jMwWR7y0XLM49XzT7/1l2A7+1b+4xyefb7l2rcH7DAygmRQHQogYK3hXJCJGLFKbHRdXdlvvWY/mCFoWbSKKaJ5uGyDXyFxCyHhbfLO08ZycHHPjsRvcvnuLPmwggsklzTrx+enRskv56ZYAlwVGedMUSUdKgVepZH7w1+67ilgBrJdLUggMTYsBUozbiru2bem8B1XGGECEmFMVn+8mjimFNz20h2EojY9rw8lJQ7X/wL1fTOy9r2Fgs93mNDGYuuouq++S+kup3CRjbY2jdUKYxO+qivMNua7UH3nkEWazGXZh+fjHPlrdfCHHSEwBhyFGoWmaWtFioVpGXB178ahq25YQhzL5xJ6cd6mFSfhpneP09KxUU+WSspj0N8D2mJzuHOehpj810veRlA3OGVJOyC149ImO4vabr0z67waUa6ZEPI0I1D5g+0TImKLXmf621pbWFfYqmdknRvdHWV6lEdrDFDW6Xyv1eu+f9rV/bVw9HrONchXxeub+xcVVUmP2yNXu/quju/LI3An0dSv83UW30tZWIteHrTU1JZd2QvMiBp40UlMkrswOk+P0RHL2SetVYlk1XPumOtN5qNErTKkXzzHvjXGXDiwf2fuuZfKy2j+fdYPoNjquIlvyNfVm2+Nl931Xuo3WOSeoOkSVnDIkwzAqwxix7VTh+NmNNw5ylB57SEkpKyUd7LZFJVoiLmJKSllgcXTE8ckJ1x+5AVIihSEOuGTwxmFMc7UyFSWOI7FWAi4vL7j14su8eHvkmUdu8rkffh8/8C9e5OOfusB7i3v5ZTZa5obGWFrvyLmvlX2e2XyOjoE4JO7evUfqRprOMypcbJbcOL7Ji698DHOnp2mfYd6OWFHapuijxhCJMWxTlqq5VPtlxVhfFidGq0ZXSGm61so9kzUjOdbnd6oLRUEoPRhzzjTZcXbjOk89+wybfs3qPJYuIXmXV33Vs2e67qkpWq1ayLqYQItMwO1XEjxAvOuI1UROUipGZdTQuIiUzuIiHJ+ccHLtjOtj4JMv36kMPeHqan/fUqDvhy15CCGQYoSpCmjPSwjYVd/BNrUyvTat0HMq7Qmy6jbUqlqEf/sTkohcEbVPUYCUE2dnZ7Rty2az4e6du8RxwBmhH8r4NCmpRjRCqJOQbUCLgHJKF1lraZyn7VqyVvd3cTixbNbLK145MQT6vqdte7xvtwZxjSvVjpN7/HQMTdNsyWhKicZRhcWOzSbU3lmeGMyrIinvCshOq7Ml5FL6PW4fTnsrM0yplTbOYZxFJo2VGIqBwlRJtyM3ky4LmSrtdqkoMVKi6znjnK+C3J2GSSaJbp3c9wnbNG54ddRq/2fZ2NWH5hSdKtYIuZZbV2JzXzpge5CwO55po0xCf7Mlh1OEdyssB4oYLVfH7EKuTK2uzZVwZS2arMkegno290nW/jFNRzwlL5WirzSmCJbLcz+Wd5pyDnN9Ty6h7W3kzhgzLcBBpkmkRHrFCqW4peircs6EWNJCIqU4wdoyGZZ7636z0LyLaIlQTCCLR5C3lqZzxGhJGj+rI1ZQ0+mfxv9UdGsCAlK/f2p1Wk71Hiyp15yUXKOCjz7+OEcnJ1tN7Hze4pzBe8H5cr2VRWqo6eZEjiOb1RIRSxx7nIX2yPD006e8/+kO6z6XWwGaeXFtX969y2w2Y35yipDZLC8pWXKhX89IF5foZuDWvVusEty4tuCVl+Fv/38+QjP7SS4uljz7Poeb2obJlNorx5ur31rWWDMpuRAqW3z0poVUyhnvSx8/wWC9Z0yRcexrRFhKUUUc9+aDjG8sbdcyX8zp5jNCvyHHgEjJ+MA0x8GO0lKeW3YXuY4xs79mlOl+egh42xArEfkLwNcCL6vq59fXrgN/HXgW+DjwW1T1rpSn2J8BfgOwBn6Pqv7LN96H4WRxxMVqWdyFNSP1gTfEwHKzxjYe2gZjLRllcXxUHrp7E5y1tvo0hS2pmqrdkGLquf+g3U/5wU4HMkVt2rY4ipdJSBjGkUloPKXWintt2k1UUn2LdCcMh2JGt1gsamQp8PGP/zT3XnmRi/O7qI44MWhSQvWcSikxm3WFcEEV1+5KfbOW1YY1Fu9npCSkMJZIV705RErFYN/3zGYjzjWVGO0c5K9ESUQYx/FKSiflgGitREPoNyOaHeMQisAY+6q00Wcziu5lqgCsrxmhdf5KBCejWF+uN7EW8R6tka6ynd11dP81udP0mC25EqmVTuT6UCtpaTKY6gdV3lOgWx3Rfqovb++RaV/3pw6hPrxfo7H2TkRvahSppLnTnuVI0ShVD/MrpGo6vonzlKtajCC50pzyoeKDUx/WmWrmUI1HRYTEToxsbNWQaC4eX0XJvH027GPfX8dU4qlMEadcKpo0YxW8sYgKIZXuBJPp4f65UrS4o0+6uVyIdM5aqhpNTfmpqZ4SRedYIsa69X8zRkrqsuqDil6MeqJKulPNvleQ4lqLS/mhdQh5EPMC8KaCGhO1rwqNaamCKmSp/uSlnI7iZSYsjo7wracfRrwzdF2HSmQY16RkMWa38ARKZoFEWq5rVbiS4sisg+dv3+I9/aNcDpmnnn4K74stgo1C6ns2xjCbz0lp1yv2vD9nc+82abMCq5ATjxw7PvD00/z0x15gtbrHyZklrVfMXNGKlVR8iSanKrJXjYh4So8x8L4pBSFAqsSyqIMNRnL1YysR0azQuFK5OtYUaQiptlUr83LrHFYFkxQNEck70j9JS/Ke87rmkrUp57x8F1utJIVUClPV84M3t33bECvgLwH/FfDte699C/C9qvrHReRb6t9/BPj1wAfrvy8F/jxvoveaUAw2rRhSjoipQtZKXparJUMYuXd5WW4c6wjjgLeWYFLRXhiLIls9FSrEMBBDNce0vnThZpcq2I/UTJPQRKKMtcU4kLL6QUr6byJsKU0r2HoMpqym9wW7E0Ga/E7GceDOnVvcO7/k9isvcuelT5Ubg+J5klO5UOM27Zhp2hZjyxMjpoARg3UWZyxGBGM9s84hYtE0ImTCGBnzULU/JbqxXq1w1gMl746w66u1lzaZrB7K2MvKxRhXhdK+pGFjmNQzTD5O7xrIVaF60dns0qnAlZ9Tuu5K5GRPk7RP7PdTWK+d/NBK/ANtbda9ndS5bw66L9U47W/6eb+2av895TVe9bn73zNFjaYI1LSt6R65Wj346nRlOXdTem63j1IYuB9FK67mohMh3ZHDMgAwzpBzSeGJka0P1v7+tn9tF9a7aOC0n/o/lRDVaBZlMVd0ufvvn3SL0/HYmvrL23NSIntT1I2ajtdK/nQ7txgzpXXTfWOZzl8hBdsPKFjnEUYeEv4Sn+F5AV7jur6faUnRS013wUSwLLJNRZGnCjoHCl03Z7E44uTklOvXThnHDYhwtJiTUvEGHMNYK3CrnMII826BdQ2Xl2tW656Lyw2PXH+CrjviMjqeet9TqPYYA+O4YTG/xnw+o2kbvG9pGsPZtY7z83uEvsd2FjUGjQPeKNelIYUV18+ewZlfwGzmODs7I/SJ9tEGIzWlmeJWg6h56jygOGdp2q4K8IUUM5vNukRhvQDFlqLMU0XHGEPA+5ZZ1xFrhew4RsZhJIw9kg02J7Qfd8a+VEshylPK2q2EvbT7mR5fOWNrZe90HksU6+G5r71tiJWq/hMRefa+l78O+Kr6+18G/jHlBvo64Nu1PEX/uYicicgTqvrCp91HfWh1bUOrReOU0G1KCord/mq4IIfIcrXmoo/MvWPsh+JsLoagNVUmyslihibLnbt3iSWHgjG7h+LkbF7aWNQcvTEY57C+ue9BrzWLIdu0RcqxdDGPRevVaFdW3yljt9VbhXjEmFgtl5zfvY2mwKc+9SmWF+dI2qC5hGq3E1LMWO8o1n8ZLPiafrCmuADHGMnicc5jjKKi+NbTpgU5K22naM7E2Jfy1xRJYSSEAbGFAAJbh3eRmmLZaoIKiYyxGMeJlhRpiLEIQTGoNKgoWRJZPnvbarwaV8nQhPsr9/bJ176gfeejJK/axhWC9ZpJnm2ubVt1VqZg2f7Xdot7238tzdUUnd3u777o0lQtd3VsV41QJ6HqVSG+qR0DwtUo1utE5URMIRk1wlr2XSqopEaVjK3T6RSlMlNKsaZsRMmprMKjlKrA/eOeVtZUIgVlri2kqd6nYsuiTqbec5UsAYheSXXsk8gYp4XdZPWwS3dSv/Nce9KVbNVEsGSbRin33eQa7hDZVU9e/e53gvkSMahRzYeABzEvACT2Ih8IlV0Du5Tu9L0KbAOf07kud1IhuFAiOM63LI5PaX3prjGEUnyQkzJsBlIIhH7AekdIkW42o23ndPMjGt/StkccHZ1xfPIoH/jclvN7rxDGS8Jwi6ZxWDzrzYYX+oFuNuP69UewZy3NbIG0HY8//RQvferjXNw7BweJhBVouxne3wA8j9x8CuMb7l2eYxpQLWbP5EgYVyXVnBVnfGndg2KsI+eANcXOwbcdKSb6YUBJiMlojBSNU+maMcSRWJ3UoyohC03TFUd5SVye3yL2S1oxbIxnzKW7wE5jxfZ3oBZvTYujuiAwwmTmm5VtgONh4G1DrF4Hj+3dFC8CU0PlJ4FP7r3vufraq24g2Wts2918plTBVTNQAFIk7pObXFyKrXOcnJygPjCbzbhY9WUSUiWGAauJuYPP+8BTIPBDPxa4syrVIfu9x8rktXv4Tik2KCxe9iZFY4oxm+bSMy2GcReVF0g5069XNF1bVjnZXJlsVDMxjKwuL3jlpRcZhqFsI8UrJfaaM8ZO+f1cyFUo0TFr3Tb6Mb1XlStifescvm0YQ18rY6SIzSl+LP3QY73H1abJ2/Y4qtXSIlx53XtPCOW16TyknHGtoT1qKBqUVJM17x7sE5JponwN7rLFzlV9V/X3WtGj6e+Cq0Rk2k8ItVVTrTo0ImSzS7tN6RBbNUD7VWz75Oe1IlVXUfZ3f4RmOv7tdk3xbLq/+nDfWHefNL16XzUGtZeiBK3VlhMBMkAqVa+UWdbUqNIUx0kUUbizJQqW833O71siUv/SSoIUqP3MJiF6Sb3qlliZ6jIdU7wS6S7/X6Z8tCx6cmYn5mI6f/V87PU1VJm8vgwieS+tkq98Z/vnbDuJyf2i+rcN3tJ5AdhWiKpug5j7b95bfty30KmvSD1ndmpojdJ5z3zW0Q8bbt3asDg+4ujomJwGvHNoSkRVUky08zlJIKREDAONd8xnLd4bjhYN166f8JF/fc5zP9Pz0x/7aVaXl6W/nkDbNMxmcx69+Qjv/9D7eeLpZ9DYMD9e0LgGbz2JhG+PaXxDGjMiI91sQcgr0vqSnDc4WobNJY23bNaXGBJiBFdTfqXTiN1GR9frFSIGZxtyLoQsxtodwUixVKBc103jGYdYyFCVnywvLkGk6IG9YwwBP58h/YYcqs1JbdM0YRKDhKngsMocMxMXNvW72H8WvLtTgZ8WqqoiP3sKqnuNbRePP6sx55JDFgixVPDtd+ieIkyNdRjvmIljMvNUpGosIpbEh97/LI/fPGG12vCex25w/vEXiDldefhvNVBb5q31QpFSflsnhUnsHsfAZrMmjGONBpQ1kiA1lw0xSKkAlN3kM5GhnALn9+4W/ZNmUgyvKgkvRG+qYKHoShKgJUsOOyJlnL+SdgFApBgsCoh15DjUFXTZz9APON/SNLNXpaCmn/tVY1IjZZNObap20yYjHdsJ6dOyis8yTBGa/X+T3sjIXp+/vXNira2LvEwNwF5NJe5vf3t91iq1vQljIgNN1+xW5FWPM12/VzzF6ph0b9tTFGjyg7o/AqU1tTdFZ+4/Hs16hZBPVhOqGeMcaHW8rsIKsSVlrfX1acxFZlsfrVNz5SmlI7t/U2oeqkloFXqrKavh6e6xdZJVTZhcI24yNTgu9C1Px1BX3IJSfBqEbCdv4cmuZCoiMFtiJXlKHVa1lk7FBROBLeNUndIe08G8+vxvv1rZLz+gku/dPX2FcCuoJrbhL66mbN9OeCvmBRFRmRz8ZRe5+tlgOs1GC0nOAl3j6VpP1hHFlKrzWcdmNdZnaqkQvHbtGs2sox8HxmEDbYOVzMXFLQDOTo/ZLF/k3u0X+eEf/BFUPV13Qup7RBQdleXmAu0j9+7d5Yut5zEMc+84mc9Y3kkUH8E58/mMo+MjfuaTHyebkg3RFMhpybjZoCnQzVoggy0dQFw7A6oOzNjtXFYWM0V4P82fxkhdlHnQ8uwpJtclWKGawCjr9ZKUQr3OlJOTEzZnPbdeOWfWtMTVmhxjtSvanedpjTIrm8Fkiv4Syk1YCVepot5VDz9ovN2J1UtTKFdEngBerq9/Cnh6731P1dfeEEMYGMJYVn01naa1PHsiH+M4gs14ioZCqxYpZkgxYFGefuJRPucD76OxAMqzTz3OZhz56edeIqm9slqf8sRTP8JCJCymVsxNRqExRlIcyTGQUxGNQ2k3IbLLH6dQJ9HqCzLBmpLuiJUE5ZzRlGCvelBEyDmRtHhhpZqHFmeZz7qtJ9ZkIuhs0UpNnkLGFO1VO+vIRFIOpDwQc9pqgHJObDYbjHXMZ/OtizuwJVH7RpD7D/d9B3o3E+ZnHdSeZsa+e8TrcJVsiNRV4P0NkyfiZEqF2Tb6Ia8mVVvyve+7lHIxgZEykauUCI6lKa2PCpMjobV3XcFO+F7+zkx/y3bJP6WhplRcqq0vSsXabrU/bW8iCFMLmvLQnwTwipjyVDU1VVfKlqZEJaiUpuGSUiVGqZbHm7K4qP3YJgooxlC7DZaoqBaDQ6zZpv1yzQ0aobbzqfxepDzUU1lkxHp36lT/zZRKK8dUoq4ld6FIMTYUCEnRKc02SQHU1ZRhQmpaftJTbcm1hZyLm3xpC1RzIhOZmq4hpBBiKRMYWdE0vbN2XNuLQhY+tvtmcqLSsbfVouatnRfqJSvILoX0JqnapK8ylZAbqAxbieNA2zVVy1vmgr7vayTYsFxeYPJI7JeIFjuca2dnPH7zJmLg9KTjcnlJ2yh3bt3hZ376pxn7yNHxCbP5jNwsyCEQ+hWL+YwwbhjvBr7/n/0A/9Y3PkscV9y99RwpLhExhAj3LjZEGVicLkgBUsiM44ZZ6xBgMffMFzOCZtabTalUzXl7TU3G0fuWJ6H2ik2p3NtlPvPbubNpLH0/VomCVt1W+WzKkTgWY+3uaMGN69fpX7rLY6Yj5w1o0RNPlkiv1O/l0brtckkrUUBUcSJ4qYVT6eFlON7uxOq7gN8N/PH682/vvf4HROSvUcSJ528mjw6wGccasrWcnpxwdnJK40s5eT/03Lp1m/Pze4xpJOXMJlFC85g6sWSefeZJfuWXfSHXTo/IOdIvLzlarTg5PeF4seBf/9TzJHYpYSPloV8uLIO1rv5umc06EGEcxmLdsHVs30WZpn5jSvWvykIcA9bvdFxlP2WSmaJBeYoy6c6OQXOpJClTAfUKV1DDOJbWOLPZvDR3TokUdZua2EoHxZRSW2vxTUMIDSkmqK7SOSUIRZxpjcXZ8k9hmwOfola7lMcuOjERuMXpDD9rELX1RnwXidcrrpKonQD79dJ8W23NXgrn/ujp/ZHClPVKtd8UtZ2I2y5SeXVbO3IlV/QPr3EU26jWpzvOCUXfVI0wp2tYCpuaiOF2TNs0+C6lJnWRtH1fDY/d79+V99Jiu3TcrhqxkI5JkLw7z9NEk9JUjVd6Nk7npfRP0z2yUwtkSif17bGmdHX/++drt5/99OhEPnVLfvbTdPvf93Zzst0i27Sj7PZz//e2I1m710uU4G0VsnrL54Xp+SpCMfbcP15lkquz/6N8qrxB6tNRKFHNhKJxZNhsODqb41wpOEgp0Y9r7t69y9Bv6Ewk9EByzI8XXD+ZoSlUs1Hh7GxBv1nRWMfqYkNOSt9vyDKSc8vl+TmWzGaj3Dg9BlVa13L3zm2OnnmElANDf0nTtMyPFvQh0o9Lrl97lFsvXzCfzbEo47AkxZF+sywR75rO05zLvFejvM46nC+ta4ypPlVSUuKmVqnXOGqZQ2Jt6GzYWvlMxsfF0iWTc2QYepw0HC/mPH56jbB6Ga+23va7/Oy9eo4fySUynAWCZkIltR6wKSLAAA9LGvj2IVYi8leBrwIeEZHngP+YcuP8DRH5fcAngN9S3/49lJLaj1LKar/pTe4DsULO4F3DtfkZNx59jKPZHKfC+cUFwxC5XC4Z1itagcvLDf0YiAlyijz52Alf/7W/ivc//TSuiq+HYWC92bBZr3nvM8/yxM0f45/+yx/iso8IGXIkG4sy6UAUYxzdbEHKxep/HEaGIRCH4uGxH9Fhb0IQpCwjY8A7z7aIh0xMoVQW5V0rEEFr+jKjqeqstJS8TtEzVcWoqeWqpbqw6VrEe2wqosy0FbOXz2sqK14xDW13jJGWPI7EOJZy7jSSxhXBGoIvbQ0EgzOOkMLWXbuEiuO2MhCmlJbStp5xncklsEeK7ybx+msRpvuiEff93/Y1rpKo6fX9z00/p8ySqZYL08S/9VCa3ruXzrs/yrhP1u4f/47MTGmlVx/jPiat0L67/JQ617337OMqAankpn52P1Jq9t5Torl5S/R349lZJJS/i+5xWy0ou+MqKT1FQsbkUtFlrDAmRWvLqRLSkG1zXiiGpOW87KJ0V8Yhu+9vG3WkNOiFSYtVCNt0vNu06Z4ui91h1LHvdHpTQcD+BfVa39/2e35IEasHMS+UFV/9ruvxRmPJ9agTxfATycgUma3c1lQyVl4qC98pTrJZL1ktl9h5RyOGRhPL5ZKhv6Qf1jjvEVqMNahGrAbC+h4nN2eIzWyqJc3lak0Mwuz4lO5SMM5yfnkHImyGhJWGm088QQ4jR/PMU0/fJOrAkDLzo5usL+/SeugaQ8qGrl0wb465fua4WF2AM8jocU7BKiHk6rCeMTkQ+kTynmwzVjxKwqLkWIsiMFjbAWBMImsAIsZ6rHYgGyT3ZFW8PyVh0NbgjCfHUkneZEMMGyyBawvPMkfanJhaB1V3k+3S2tdrWClEViuZHRVaY4vvWLXAeNPhx7cQbxtipaq/7XX+62te470K/P6f7T5EhNPFnHU/4ARSGkuEqElbhm69o+1mNE0LxnL3uVvFSykFTmaWX/3Vv5wPfuD93Di7VpplumYb4r1z5w7L5SVfffMmdrHgv/+H/7hEj8zOLLQ86A3z+YIcIyEExurcLpqrHOLqw26aKPb/VpSYEs64rV5ElW3IdHq4JtUihI9x+9CYHsTbyFadmLwakhoSlmAsvulwviHlyc9kdx6LvqWapVaX35QTqraWamRiSsg40jShhIZVsc5A2qUEVXWbGpyiVgBt02LGlosXBtJjZZJK47uHWL1+RIrXfH36u/6yJRJwv73Cfds2k9lhmfhDCNvG4rA32ebyoJo+f7/x59XtTxGSvegYu8fb/WTqygQuZe0v5ipxYxsPuPr++4/n/u1vU/KUKr79aFdp9/FqQpFUETOl4LV4iMa8ve+292glYYIlxETMGZsV66bohyGnKn4Xt03Ja871XJqaPi99NbdNfPfGDpMR6USY6vm31dn6Pq+w3bmvjHkbZDbl+SJsI1dlMLtzNhUATNhPET6seNWDmBcmXLHN2Lu+8+62YjqhpZUUVyJ+U/R2ujY0w/Lygkfe8yj9as2gkUvNxHHD8WKO5mI67bzWeFmkD0uGcInVjpyg7yNGFhxf6zi9cZMf+dfPEZMyP2qJcc3p6RmXF2teevllHrt2wud//udzcm3B2bzFpQ2ZDV3TgcAwQNMeM1+c0bQddjMwX8wRzQSBoY9EHYocQFzJPGhCUUIYcBTNVZgq3QHXNkjOtcSW6vVmy8K99ai0xJgQlHnTEkaL1YRNkSyRtukwxtIzIoBHWK42OLFkMTt/MLZBq3L+807LiAjZCEETSZSxhBiJmK2V0YPG24ZYPQiICB/+wLO8dOs253cvGPoV53dvM6xWWGMYc0ItzE8X5AybMfPK+QXjOGBy4EPv/yDXry+IuScSUYmIlod8kASNYR17NpsL3vvM4xzNHcu1IraEXFSLczkUb6dhtSxl1NbgXUlBOGnZbBL3T5b7q/L9NEhKCbFFj6GURp77D1tjDCnupQJfIz20rSDSTAqRQTeEEDk6FryxOO+x1rLZbPYmpt24prGVtFIk195aKSUkRoZhoPHFcRioIeJ4hdQVMe+uQrLrOuJS2YRIul5CBTk9nFXzw8L9PlamPrD3r439/7/6T+u8epVcTZ/bknUtAvNpm9M+9zF931u+8jqpoZ1GZ3dtXSE87Lkm3yfbuUK29Ko1wyQsn4I6+wRgOqZXp9NeTbj2Xyv/TKlSqouMHcHKiJhdenTillMRyKuiQOW9JmVCSrXdlCEnS4JSSFAjU4gptghQhfEUnRRgMEUO9hrpObbnYTrOaVwl7TIRriliuSUBdaxGzFaPNV0f0+fvJ6L7zw8o0brPdty/gJ0uNqU0Ws6AlUlDNRnDlt+niBVM6fTy/YzDyCc//glOr5+UKL5GZm3LyfERs7ZBc2bWtTiXUXU4V5R6IfYgltnsrFjfqKWZtTz7/vfzoz/+cS4uV1jXoi6TFI7PTmhQPvThz8G3nuPFnLk12DCwPH+ZrIIxLdbOaGdHWDcDhKZ1SIbNagkozho0gTERSDRNS4q1lZOmYhOSRqxpiVGLGD3nYp6bMtY4Ysg419a2SsL8aEFa5SLl9B3GOoZxg/dNbbGWWCyOOb+4JC3XuHsrNvcuIBQPw0ix6NH6zJpupbuiRCnWDTEXuWWmVu2aab6A9DqR8s803nXE6j03H+FoMefi2pJhM2BMi1RdUkyBkCORnjErd9Y9r9x7hTGued973sOTT76HPmx4+eIl7oz3cLaBXFb5MUXW6w2ryyU2C0YCj98846d+5mVCVpzZpRGcs9UKYSjmjqKkWLy0msayWCxYrVYlivUaEwOwrSIUZ2slIttJd987yNaKiX0itf9zen+pAnRFnyKlAnG12dAZy6JWCE5GlFYMYx6IUfHeA0oOEWTSkBU/l1wjZSGEQq6atqQRdVcsMK2yJ6+rInYs/l4pBAI788O3l8zjM4/7I1JQq/PYOdTvvw/gG/8vP8mdu5uHMt63G25cX/Ddf+KL6rWu5WEnJbSrgOZaGo5iTVHKqCqapKZEU612LIxx6/tVr+FsSpm+muJ0b00R/9ssBMkkA8HmsurPJTqoUpo/J2XbPPtK2lamtF9ZaU9RKlNndFNL33MuUY7aeQig6klkW3s+SVP2Wq6xa1CtZGJ9X72W1CBaGi/nPKWBry6kPptxxWZmX2Mmu8juREZLJmBP9bk9PUVmoRnECS89/zxtZ3niqUe5cfOY0+MOK5mxXzFfzDk6m2FNRrPFGkUkE0PGCNiZo+saYiplBtdOjviiX/r5fN8//X7iOJIV1v1I2yqPPXaTG49ex3aK+GJ1MGwGFotrjOeXzOZzjs5O6Y6OidVtN8aAWFgsZmxigGzJ6klppGk6DCXy5BtTn+01OGAEa0t3kqgZjUUT66zHdG0VrZe+uU03smgbNEZICU3F36ofAmkYiCHhrKfzDevxgnvPv8y4WpNzQDGlmJZy3yg1zSpwaSAIjNQqzlx8GIsJdlmcGAoZfhjzxruKWJVcWeTk+IijoyM0ZkIUNus1fb+BJCzThuX6nMu+54XbS1ZxyZNP3+ALfvHnszha0KeBF++9SK8jVix5iCxXl6VHIIIzDddnN1jMZ3zRF/4SXrz9fZyvi4APzQi52jtkrEC/2VQPp/Jg7Qeh61qcddvo0HZVLLvUhnMO62yNVu0efgq4vQrHLGx7D7IX6p4cmK01NE1L27TgG6wr5CrVlUBMiRgCbVdy6OM4YinkMCWLsmuvkrJumwQbY0q7ILfrGxhjpGksiNR2B4bJlXobyFDl+Pi4+n2BmNpSgXfDo30Pwn2kqrwoWzWmTK9cIWB37m547x/5Ow98uG9HfOJbv3arPbJ7Ubhd5GlfOSTbta2a/cl1IjZSIj4ypRPrtqiGkqYSJVeaPBsRQtJtunDqdpBRrCui3vtTkIWw5a2x7i5CfTU6mXOuZei78U2w1lCMRk1ZwSfd3jjTcUwu9uXWk6kuYPcm3ScR7xIIOO8wYggxQN4VRSDA5LJfI3z11JXnag0MliigFt83ateNELh39y7Pvv8xct5wfj7QNR1d22G9MoYl1hrmbUfbdLTeAyNNM6dtO8ZRISbiOqBhwy/4nGdYdDNu3Trnxz/yU/Qh8pW//JfhtfZX9ZGgmTEkum6BtcLpNc/J6QmubRnjgPENXdexWTsShRSJAWeL59WmvyiZCtuSQtHM5pSRLDSdwxg4Oj4qVio5EWIsMhrnmfkFMSp9vyKlwPLiDkYcGjOzbkafexrnka54+Y/jQL/pCeuei5dvMVwsISvjNnJbFi5J92LkCpLL4tKhBDK53t6SC6lRAfMQV+LvKmKVVXnh9iucHJ9wND8ufQGtksJADrcZY4+TnsYJxmf89cSHv/wp3vvEYxx7Q4OibWYtiWXaoBkascQ2E0wsN54VZJZxvuWpZx7nmadv8GMfebHaGtTUXMpV85QZxwGxBt/N6WZnuLYh9htMXpMkkUzEiMVZXyNe1dfINWht/aKm9PKajDWLq3uxSFBAvattBYatADebFuuhceCMYXW5IWuP9eBnDbOjk9JEc9Mz5ERTGzKnqKQQMKK1bLhMBv1YVuVZI8YCGXwz4/ojj9M0nldeucUYR4yzYBrElskixVLBYatthfeek5OTWkVYJha7NVl9mFfPg8U+Ydq+tvf3/Xqq10p7HbAfqb16/WzP497fkxh2igLtp91N1T/tokxUfdSU+AG11QGdjDqL1HR3spYcMyEWwbxFEWdKVWDaPfwnDddrpeOmyPL0vunnq79zLa0/RHa6sMld3lAIgylLMMvUKqdGuagMS7QSiSmq/fP9Ft7mEBArWF8yCZii8SzQEtnUTNz/yJT62+utWCKOdSGsFtRiVDi+dp3bd+8yxpZrZ9dQcbh2hvUtrnGsLi5xOBadwztPQLBti/MW1cTYD/igvOfmI/Qk+mGDEzDxGT710iscdw3XT05ZNAoMXD+7gcFxfHbCMCzJyRCxjOsNxiitBe+PWByfsV6tsEaxfg4EjGaaHGgaT+tarPEMeSRqJKcIaWTYJGbHc1R8IenOllSncdjWIy4TtIFoiGlgPYyoCl6FpJakK4yzjJrZDCOXF+fkyyXp4pK42SC52L4VwXquUd6yKJn0btkrpIxRmEG1L9H7UrRFN81DKHp6VxErVeXFF19iuVxxdrZhcXQEKCGuiWkgS0B8RrriyDzrHO+59gi+FRKJKAqtJVD0VBZLIy1ODev1JZt+JGvPpZ7j3KyUyz5+A/3JW5DDdrWZa7TGWY/XRNM1nF1/hBgNR6cn2NNjXnruk9jsQARnPa1vr4huqVGo/Uon7z2xhmVTrQx0Imhu9ipcBpy1iDH4xuGbhtVqJNmWyIohB9K9c+z5BTeuPY73rrqpD7UVgieFkRjLCklzIUO5SQxhQEMmhoS1nhs3bnJ2dg1FWSw2rFZrnG8gx61gfUoFam2tcXx8zGI+L1qH6t9lSp7mqinluwT7qYn9ak+YxM1XW9wccBU7wqnbyrsrmiLZS+3VrgsqO+uFfe1VlTaV5GElVpJBKIslpFT3WVcmgsJkFJOVlA3iCvFKqZoYWlvaYHFV46Oy2/e03/uLBWDXLujKvzoBFd0XkHRXoFBMmqZawrIoq4RzGxHQSSpcDUudQFQ+2+88g5RKspTpZl19Hl0tBNrHtNBTzKtC6cUTKwG5FkRZVques9Njcha6bsbR0TGQicNYCUARWiswbxyby3tov0YUzl+5jZcW4w3ZGM5ffp47L56z3qzxfuS5T/44q+MZnYPHHr3OPaM8evNx7t17iXFccnR6ghCJcSBpwntBibXIIjCGgHGKYBE1eGYgYBvPvHW45Fkul/XgypyzXq0wpqPt5owxIEDOkTFsql1QiXShGW8NISQuL87pulnxVrQl3Z5DgJhY3r5Dv1yRYsLk/aIXqVmQsu8pdSG1StfVL8OYkjGZ9G5b+eFDqnd6VxErVLlz95xV37MKPSfjMY23pGFJGHvuDUteXF9ykSOhgRxLk+EcDeotXeNAHJFM1IQTAe8wJpNDbcwqytqssHJe7BA6IRnFVR1TrknirutYzI9YrizGKmHoEWl48blP4p2jnc8IudgnNLU6b9vMmCk1KFfI1mw25/LycptCnB7Uzvut87RtBDRX48+GlD3NbMYjjz4B5oyhv0DTOc9/4iOc33mBk2uPoGKRfsBYj4jBOU+KI0qJWg1jdQAutRuIGI6PTjk7vV6qClPi9PQa63XPOASabhLzl7F3bcc4xK1ofSIQU7sfqamWT+eD9NmI16pu249glGtBt9GN1yNXn/i//Sb8zfdCTvgbT3PjN/5hjO8+8wfwc8SL/+//E4//zv/7W7a9KWU2YUtEpTiq71fIQk3t1fO7Iy/sbWOKXrFLmZmrETCzFbaXikFnHClmTMqMVJsHEZy4bYRKtZAgZXdv3D/2afyTHmgihDtilZnMREtEyiB551KvWjWLqSTYVaVWW9UwQU0FsovDlTTLZzOz0uK9l2IxlBX9/7P357H2bVt+F/YZc87V7L1P92tv37z72npVFaewTTnYQWUTSzEQCpvIikGWDSRWJEhEl2CQE6Q0YBSBsBIF4gQSG5nECFtAFKzIgRjLQBmwATu4XOVy1etu/2vO75yz915rzWbkjzHX2vv87n32K6reu5d7a16d+ztn93utueYc4zu+4/uFqVpswe3r8Pjv4yYiqXPGRgES3gvr1Ypht+P+fesiPz+7U42ZT9lurylpovHBynjOuK27qz1x3BPLJSUOPH7vXdqTe5ydnTLuJsqwY3t1yYeXI/v9ng/e/oA7Fyds1j0fvPMtXn/tVabdNZuTntOzlu2zD+j6FXGarLu683z4wftICBRNQMF7aFc9cSo0fYdD6NuecRxZtSumaVr2H+89OSayGBIl3pFzovjCbnuDODNwR5UcMzqXlnPi5uqK/bAz/tSQYcrE7Z7paouO8dBsU6+fXBs3Ks3xcH2VKso6d9JWzpuXQyylRxSTH/T4XAVWFsAKuziyuxy4TltWfY+o+TNdlchlmbjSiIhHgyONmVQS3o0oOzS21RZjoFmfoG1DTANDGSmh0HUt2cF1vqIVR/F7Qsg0rqVtW+JkXYDee1zTEXPBayZNkTRZSS82gWZ1hu9aqxk3nSE9clhMxbtbNjmzB+GsPzTzSJxzOPH4xuFESNOEUpC2Z31yj6ZZk7UwDntOzr7A2YP73LsjPHvyAXG4XkyTnY/EONG0Pc45YlE0WtYDkFIk5wnnzLrh/Pwufb9GxboSg284PT3n5uaG0JZbpY5FkLL+HaPpeIXQmCfiQp79PJE+5mDq2FfukD0fgixXIfO5G+mjQ0LLy3/v/x6AD/+f/ztu/rM/wdnf+Fu/759fS0bcL14t/5czqDJP0ENg9LxMw6z7NF8/qmZMzHw0pf5bA6eycG/sNcxWiuXcIKa+PrfjC1XupBQ0GMFWqVJ0CpSEbwrOeeLkrCwniaQg4qsEi+IppkTPjJZU50JRM4efAywEillTlTorzIB07uosOFGcry47ZT4Wxz8eQSjFXBr+KzjG/NdqiIh59xVz4sg1gDi+f/730EFtx6R1pqN03E3rxINmmlZoGxhvtsT1CX13gkggp2zq5M7hXcdqY6iPC54pZ5w6VIUcJ66ffMB65Vmd9ez3Nzz64Cndes3D11/n6fAITZ5+veH87BQfGvbXb/Pkg0doypyctoz7lvW6o/OFPE0UccSpgdDjxXN2esE07K3M54TiEhend9Bc2O9u6FYtaYpLYueco+Rc7cpafNMi3q6j9eqMYdyzH24qyd3mf4wT3gVyiqSYTDBUHMF5vMJ4dUO62sKUbyUmymwvJIvUgoAhVPU6NK/Ayl08QqqKUp3QPpm5+7kKrASh7Xq2ccfN9TNu8o51PqNxnjFGrjTyLCducsRrRiiM0fyKnN9Z23Re0biGrq3aTRT2cWRII13XgBeKOoNdJeH8RHCJ4NY45/G+cuhV2Y8TJ6enXD17RMAW4wZnQmdxQsXRtg0uBNQ5mFEcjKSas12gUyWG96ue3W63XADzBuCcx4t1LXnniXGiO73Dw5ffYN2f8Bf/8/+U/f4S3xaePht4591C6Fc8ePg63iWunl0Si0k7eD3II4gzjpSqZSiqhbY1O4O+X9G2PTEXRExJvu9W7PcDMdqF2rQNqoa2DftI33XEGJeNLleroTlr/1yRaTls2DM6OfvLHRt5c1wG/B4Qvf7VH2b68Bvk/TWP/8QfIF2+h4SOe//df5D24Re4/DN/hPTsfdLle6SrD7n7m/5HjO/8ZfY//+fwp/d4+Hf9LxEfGN/7OZ7+e/8XdNrjVmfc+9v+YcLJXd77134v7cO3GN/+S2x+6G8m3H2FZ//hH0Vzwq9Ouf/f+8fwmzv2Plcfki7fI199yOmv+UnOfs3fAcC3/vn/Pq//I/8GZdrz4R//X1OGLZoTF3/z72T95V9HevY+7//r/xT9q19nfPsv40/v8eC3/T5c033k+87lnHkc63Nl8+C41UW7NGLk2UbGmMnOzUjxjBBqvbaqLIQcsmaV6g2qxUREW4cUD9HKil3jKQFcglgS5xcrzs7Peec7Txj39r4ZK4U772wtkiMuV/0xIaUqqutM1ycVyHqw+6jYinU31uzfXsNeZQkanBysguZmEZnLkIJ8QhvUD2KIUMWejyQ7jsRx3RHfzM3BRSlmfuwKgblkWz1bMQsuZcLlhEyZTbemaXt2uz3nFyfs9zuKZu6cndKvV0w5MU4TTdvS9SfEaSLmwmqzZre9ZhhHmtBw78FD6M5Zp0B2G5688218GQgN3H14lyavWbUNoXHEccfN1YjGPTkaad33K5o20K7POD2/b2LUSRnjjkKh7Tv2o63Fw2RVCpZr44iLmKvMQkxWPgwNOTm69oRxNK0rMHdz70wEWkumaQKxZFb9iqvtFTfPrpBU0DHii8kpyNxIopaU2HsvdfiKFNdH1WnpxYCG2QfUcQiIP4mN43MVWKkoEeH6ZuTJ/hlOMxfeI03DNu+5YWBHZj8MoIWimRhHO5MFSgeeQtOeE7pzKC3Dbsv25hk5jmSv1aMs4NyeaUg8ev8ZmgTXBOZlLufaeqqF9WrFnfN7tTMQLNw2YY7We8sWi0XyjfMWZInp2bTBAZ7QeJou8NIrb3FzE8k5ElyBkilSTOxOHOKDSSE4R4fn/M4Dzs7vIv4/5bTL3Lz3V3GhxzUb+u6EIRXWq5bNxR2ePXt24GBIoZDRlE3RPUVrbfUB7+07huBNF0XcYnLtvefkZMOz7TNcFlzxeNeg+GVzm6ZpKWuggrQzQfijyt6f9XGMTB3LbBzfP3cPfi9DS2b/83+O1Vu/mmd/5o/QPnyLh7/t97H/5n/Bo//XP7+gWunpe7zwO/5p4uNv8d6/+j/jwd/5T3DnN/59fPDH/zfs/+p/wuqLv5anf/Jf4sHf9b/Ar8/Z/vSf5vJP/2Hu/63/UH2fxEu/618AIA83vPg7/zlEhOv/4v/Nsz/7x7j7m/6H9j6Pv8MLv+OfoUw73vk//485/bG/FfGHJUlCy4Pf+vtw3Zq8e8Z7/+o/xupLP14/4zuc/h3/c+79lv8pH/6bv5/dz/6HnPzwb/zrHs9jLTeA5xGseRzzl47Pxe3XOazZt17bOWTxV9OlXLIERmrIl5XhHOOwY2iF+/fX7LYT2xtHHKuEin53ooh9HvP7K+XweWZ+1szR8s5Xbbnj8uLRZmW33jomh/ee59lntxboFHqdfSXttujdIrDbN2b6TS6GNjmPbxqz6yLjqraZF1eFp2eFfI/gcQG6VeDRhx9wer4GD2MZIUBUSLuRzWbN6ckFN9sbVJR2taJr73NzqfSbFrQhhBXSNKjv8J3jpVcuONkEggpp2BF8YdX1BBfwrqX3G9brgPNGRsoefNvj+g3adAwpQYlMccs0XdG0DePWfDbz0KJxZCIxjTukRIJCLg2+O0V8Q5GCOlcpIS2pJJwIbdeS8mglcDxNu6KUiA8W3Me4Zzc85eb6kv3NJcPlpSUe0iyK6YZWHTwKnQh+TlxgWfeWOT0jWnqQwJCPTu0f2PhcBVYAT8drHm2f8nj7lM4LrC6hBIYUmShkVeIYSWUi58Q47lE1QnZcKevO0XcRpDBMW55dP+Fm+wRxVjILQVEZQAe2jwY+fO8GKR0g5qeHTYhSCjkl9vs9TRXg9N40qcItmQWDWXGWvc6zJYTGSnoSUBHu3H3Anbv3abtvEaMgai2wUrJlD94WWBeCaXbFkcePPqTbnHPv5dd48u6IFiUWaDRwvl5TVMwkMyUTg5v/q6hJoaCJxZOwaRpUE9655Tupk8WEOWdzPW+ahmkcaEJDcIGiB37LXAZcRCnjbUHUz8u4jTrOHnm2yR4rcot3f93AStPEO//X/wlgiNXJf+M38+4f/kd58Fv/SQBWb/wqHu+vKePO/n7rVyM+0Dx4E9VM/9avBqB98Cbp2QfEJ99hevRN3v+jv4/6QfAnd5f323ztv738nq8f8ejf+mfJN0/QkgjnLyz3rb74a5HQ4MM5bn1O3l4Szu4ffXDl8k//IYZv/5cgQr55TNleAhAuXqB94S37XC9+ifTs/Y/97sdo1C29KA7r7ccF7McCpctjnwuyDgHSAfmxx2dDQYKr3C3bGOb/YrQSXvDmWpBz5OrpM8QJTehZb1bs0xbNhnqZx+BHP599LpN+0GruvJhOw9F1pIso6fGYP7PNK6nolEHqavkcnxj79wc4nBP6aj5/6ALVufrLyoM4j2QHja/ilwAZcjF+FHY8U0o4702KAcglk6eRR48fsz455ezsBIowDpGutfKjUkgxMqCcbtaM40ApmU2/gtMLcpzwvgUXmLKpnfumo2sDdy9OSftIiWu0jMDExfkdijqmceDsfE2aDJEc88CwV9Zjw3rdQRoZdldcfvgeXeMY4sA+FpwIWWw/aEJLSROaE84FXGjs+wFd3zFOiZgzuUycnBmuuVqtjMg+CdNuQCSbjE9MpJTJY+TJk8c8ee991rkgU7JuVZUDj0qwhATDTx2HNU5EKsI4XxSylABntOuTHp+rwEpVuSo3XLMlNUrrPDvZIhIYciYlKMURp0SsgZUhKIUmdERf2KZt1SmBXBLPrh6x3V3RNJ6+21CKkPKOaRjZXxbGrRBks3yGucvIursMUo4xHjJk9BZJEFiyqLZpKZW87r0pohd1eHV85atfB4SLu/e4fvaUOJlhJ87aqudFtu972q4lx8iTpx9SfEuRhruvfJGYEpRM3zR4hHG/Z5jGpdw0i4Tmo8VaZ00gZuHTCectsHLe44rxXEIITNUAu2ta0jSRppE2hNrOzEeJuEXJKd/e3D4FQ0T+FeBvBz5Q1R+pt90F/ijwJvAN4Ler6lOx3esPYB5mO+B3q+qf/0W819GmbYH0vKmaFEXVBVPlu5HXjzlW39OoTgHWWRYOgZtIFdWE5v7rvPQ7/7mPf78jYvyTP/kvcfZrfyvrL/84w7f+Apd/5l/7yPsAhqrqbfuJ7V/6U+TdFS/9rn8B8YHv/It/H5one/zRc02u/OOtK24Rwz8mAJ15fc+jOXAIbqFet8fPOyoTHT/Pe4ce2c+wIK2FQsFJITg1UV+p3Xe+pdT5HsfMlLf2XNFDCfA5fs9xefz4Z44GXE1uTNYFQjgEWrOGl5Zjg+lad17Kg4LIoez/WR/HGmEiYCQPG2tvqCDejvlSeQey+HqbWxogzDOwquk7T0F59Pgxd4uw6t5kGiZOL9ZonkhpMD5b9rSrljjtGfdbnMAwDhSE0PUInqbtKOOEkOlbU/BPmlm3LSUr2+0eXAudZ9V1dKUxfz7viXGk8UIbAus+0Eo2UVxRumAi1QUHpRCCwwskTUzDSBxuEN8iwezecCaFM00TLP6KhXHYEkKwOTyM9F1Pkonr6xumySg12+0Ol5U8jPii3Dx5xnSzty4/qJxCO3ZeLEgqOjeJ6KFEOPMHZQ6qdNFng0M6MHcE/6DH5yuwQrlJ17Cyzd2LMKQB7xqSKENKDIO1jKqYe7cFOIGmbQiN6ZKM48SzZ9cULez3A9M0kiuJzk2BOE2UCONWSCOE2nY9n+BlcUPx/mCEHGOsdhhyC5VoQkPXdYhzRpRsGkOqcDRtx/0HL/HaG29xc3PD3SePudne4EvGe6GURKq8paWFW4QiSoo7xu1TRM3A2TkTHp2GgRwnK1dixNuDVMQBnrUSd5UBCIeSBBwFhRwWebtNcOpofUOaRiYPfd9VTyd/FHTCqBNN0EWW4VNUCvy/Af8H4A8f3fZ7gX9XVX+/iPze+vc/DvwW4Mv158eBf7H++z2PGT1xzi/ZolCDkYVH99dHro5H/9oPs/0v/79c/PrfwfCtv4BbneG69ff03ObuK5TdFePbP033yg+hORGfvE374I2PPLaMO/zpPQBu/uK/9z1/PnvuFr8+R3xg+OZfIF998It6PhyjS7fqdof7jx4HBzuZZTk+Qqn06L75ZeYAx66tVPlwB/ug+dzN7gFOZqV2qV5sFiwH16AixGLG7a4Uwpy/l+Pg+rCGzNU6C7RqpyJytL4cHifibqF3S/PIfIwqIeuY0D8/79OT0nx/hh3HQ9Aq4rDUuaJ4VVJh7qw8oJmG7sy+guZCcXitrLZeqnOkcURLpGs9TjLBG+F63N/Qti05OZysGMYdmiba1YoQAldXI027IuVsPCkvlBLZb58RgmeYBtRbZ7prBUdDCI7t9hLnPF23JuaBIgMqida1TMMzGskM+z3juAcJRp53jq5pKTmCg+AKcdgRKKjzhM6akYIPC1KF8zjfkHPi2eWHgFAKxClxLYaaTVM0NXnnGPYDQStIoMr19RXjsMeljKeirZXP5sX0C131vSww06wIMp+3QsEcKUxTsV6j1kHyi1oTfznH5yqwAshExBe8CmQljTZBosKoiT0TKY0E72ials16Y4asHnIeKCXYQr/P7Pc7hhTrglSY4g0pQho8Thvy5HHicKJLFjv/pGTQqupcj3dHHUy6oENN0xC8xztvF3kIttiJY3N6zgsvvMyXvvJ17t1/yMnZBR9++D4vTCNx3LG9ecY47GpmylJuVJSpOFIcuHr8TjWbLRSUocZ/QiVMt+2SLQDWkuwMhdJSKLGia06glvSeJwPPBtSr9coy/5LIPpBzZBr3jMMWkYa+qrvP3YyCKf4um0n5dJQlVPVPi8ibz938k8BP1N//EPCnsMDqJ4E/rPYlfkpELkTkJVV996/3Ps+XAl0IVvo7WjDKUt76xS0i57/+7+bxn/gDvPOv/INI6Lj/t/3D3/NzxTc8+Dv/CZ78f/5PlHELpXD6a/6Ojw2sLn7D382jf/OfwfUn9G/8KtKz977n99l8/Sf44I/9r3jnX/4HaF/8MuHuq9/zc+fxT/zcb7VfdPnfR8YcqC5/P3/D8/ctr3OggX/khW691Wwbc/w6WvV+5peqiZTaOV2q34IhJTUbPw72tEZ/B1+C47Jl/V87v9/hg93mmB0+45v7/4SvDH8aw+ZcFfwV9NNx2X3fxoyCpGT6eiklkFmMVSiYIrsPnpmWprmQVaoNzdFridCIr/ZGmNAq4BpP1wnb7SVNf8K4LzgPUpQ4DXgH+21DybnasJi1152796xLNUW22xvWq5WVKRHSFFmvVoSmZZx29Ks1Z6tztjfP0DyhJZB0At8QmgY0MY0JckHTUBNaxYUWV3Il3RfiqOS0J8WBPA20zvaBfn1CLlbuzDmbZZkm2s6CKS2JcZwQQt3bZpTdqhkxZkITGLY71Au+bRhzYsyZHtufSjo4IohlLss1Z36X8wmzbtnD/C0Ed3AcEXHG//qEkvHPVWBlE6eHVsh+T44TWTs0OeI0UpLSEeh6hxYl4Mxkspiw5pgSafD0DQgD292OpAPdKiEJKA3jFtJWCF4YrgSNjqJbtHQfYxybUHVHWS3g/KJRE0Kwkp/vkGqRoDnT9z2+7fnhH/kRzs7u8Oprr9GvTrn81rd5cPdFLk7PePTh27wbd0yj0nVdLQnMQZvQSUPWhJaEhGBtsNVfzPsAokYenGI16Z2z14KIBwl4n0kCziRYyJPipUFcW/2dDCIG8AG6LhBjQ0otMSdcDpSSyKVUZCrV42N6O148JR+VaX7A8+UXOV44CpbeA2Yy0SvAt48e9516218zsJqRllsmzH5GqG5zhT6OlH08Xv9H/o2P3OZXpzz8bb/vI7df/Ia/57s+9/i+9oW3ePHv+Wc/8vwX/+7ff+vv9Zd/Hesv/7q/7vu8/Pf/Hz/ynn59/l3LjcePP//x3/axjwEOAYbIIRLhNoFbjh7GEU/7446myBykHBDoZV892mBvxVEfE4wZn6le98dvfutlbgdLH7kK9PC4+qrUmvqtu4Wj76S3X7fuXTwNr8FK+Mrw7+OcvxVAqvKpv/h+KUOA1gtTqqVRH4iaqfgTuWQihVaUUAMlA1fEMm6ppaqScALizbTYB2rJ1ZCkNqx4+uwZ3Tqw6TynZyfs8xZfFF88ZRrJWWi6Da7pCP2GOO7BFdQVmtKxHfYm0yCO0K2IU0RIOC+M+8RlfGad2bJBRKqoc0+MkTgJoWlx9sFIMdJ3gZQLXltKLsTxmjzt0DTgU1rW81Y6ZLKGK7RAnhFSSGNEnOmjeR8ouSJ2uRhiFrc1WTb5jqgTiHXor0/vMDzeU9KA5kLAgs1FZV0OhuHWnYpdzgoi1twVah/gLAA8X0khl48Ymv+gxucqsAJhHU5MCTdNTHXj92LaHMNuxLlACAZ/T0MhTREvQg6RjDLuCjmY3xEJmuJREjEq4hzDLlKGAq2iZWUlgJJQCUt0fYu3oeaFZxG+Lp5mM+S8SCZ4+3tztuGlV17GhxWr1QkvvfwaZ2d3CKHjwYOHaIo8fry3spEzxCzlRBU7qhuJ8VmceBLZFnbnKSVVja64BHe5cjTyoollpYkQ7PVFDElyTSCpEpqW9XpDaMIC2x7zT/q+JcVCTKaLlVIC3BG5PdT3Bq2B7cfxXz7NQ1VV/iuI/4jI7wF+D8CD8zC/lt3nZlj8djD1cYrcvzIO45/+0h8HDgrrOedbPD45QgWf56gtjxFhNi6Ro9eBQxlpfvwxynj8U8r8OAtq7HMYfzBmjN+JI2WYYiamDBjfs+SZS3W7jJdVF+rAPHIp5HL7O6oqTppb5Xy7v6qLi/DvnP7DC/YmclQirAjNp4IR/H0cc6CbUjKqhR509rQG5r5Gocc+kwIEH6pmm6PxZlUUGkcusc4pZ/54qgQJBBfQLMQpkkqk8YKQSHFgfXKOemg7E2Ner07JpSDTllW3ZupG4jAaIuYEnDBOQ5U1cJycnNA0LTc3NwvKvcwHoGkCWuw7FFGGMVZh5lO8mP4iWUk5kXO0/bANIIVp2jNWva2UTWPQ+wbnrYEqqJKS8WWHYcD7QE52fHbj7lCpKbDf78kxcn7vDmUf2X3j2+Sc6Mx0c4GNtaJSpgd3mIatGDo1N2sopnWvxSxwHILXOa3/we8bn6/AShXdJWRVUJQxR1Qn1v0J603HOE5sb4YFvhQcZMXhkC5YZ52fO40cLjmaHEg5M+lEdpGYCs4X8BnXFoqU6jZv2ZB9DF3KXbdr+xbRhzCThysR1QvOC03Xo+oRWl5++Q3u3HuBF194mdPzOwzDxMWdC66vntLe9LT9Gt/0tN2GkrZLAHcI1AJTieD8LSulY36GBXdNFUw8oCgxmoWBk8NmYoHcwSB63rCOycOzeGiclGHcM1uywNyZZHILIViQqQLlqG7+KQ+s3p9LfCLyEjATgt4GXjt63Kv1to8MVf2DwB8E+NKrKz2WWnAizz/2457/S/8Wn7GxXFeqS5B+jPRx/Hsdx4HW0ll3dC7m+45Rwue7OI+HlccPc9w+1yFwC8H06VKyv5tQO/NKFSBF+Vjx16ObDlpmLFypOfg7fuCxYvsRcFU/pyVwxmmfUdGlSvmZHQuyJ4egVxa0RGpH58x39cQpoYL5seaCBNvQBSgpE3xAVGlm6RDxpDhxc3XJxcWG4WbHSd+yj4mimd040nrBh0BMe4IrCD1OMBmDrGRJnJ2ecsMNuVnTr3qQzLPLS/bbSNd3ONfRdj1gTSf2PczeDGecr0aE0AYEpSyGSoUcB0IbcBIpeaSUhGtc7XAspJzMK3EmvPcdU0omzOlt/Q8hMOxHppwOHcxyaHwax5G2bQlJUV9wLZQwcfHwDt3VFfv3PqwBrdngmM3Skf8fh7JrqPdZ4FtIIriiBKuNA9ZAIJ/Qmvi5CqxUlccffEBz5kghUsSRdCA1kX614fRsRSmZ6+sbUiqIVu8k19J2LUomBMX7YgsfSi6lIisBFNZ9R997QlAG9YgHFXfwO4KFjD2PY7I6R+TTnGffv8z69JTzi3us16fcufuAhy+8xEsvvcp63dN1nq5f042em5sLxFnnysXFBd/51jf4zjd+3mrfAinZpPdNR8CZvIHWIC8luq5b0KNSCoW0GMXOG43p4lg7ctNa7b5pGjQVihoxcUYIyhHiZN9R6LqOvu+Ypj0xBlKKS0lizmrmwMrERQ8yD5/i8W8Dvwv4/fXff+vo9n9QRP4fGGn92ffCr4Lb5UBggd6/G3zwK36BHx3f7TpbgqQlsC8HL0E++pxZ9PN4LhdV3BEq9N3G84nBEtg5h1BqcFUzdZk/Qy2pIDhJxFoS11J1fmaE7SjgE4FbygyqB77WUWlvbibJBXLWQ5CFLFICx8nU80H9Z28cEEHnnBkA10BJAC/ezks5IJLI4TEe4zg6UXKK5JIIjcP7g98dKCVODNfX7FtPvDijTBmHo3UNKZXaZWcWZtN+wOGZ3ITzDcFByeZ00fY9u/2OlEzzr21bQIkxkXUEcXTrNV3X1c7QyPX1NU3f0a9XOO/Nrm2ccJpxFFLKpCEz7negakiUNKbOj69lt0DTtRQnqAr9es0wRnAWvJWUSDktZT9DQw/X2zSZLyIFI7yXPd47trstbXAkb1pVZkVTTKFNFYc+V1u3l9VsnYG5IogUxWMJvyCod0aw/wS2jc9VYIWA84UyFlzxCIExJ0q5qWW6RNNm+pVjv89IMbRKs036ZiWIS6QScXTQQPGOJrRonBBXWK1XrM96YGC6LqbfR40ajngx83g+y71dPrDFbb3ZcH7ngtPTMy7uPOBrX/8R7ty5ywsv3efs/Iz7D++hFN7/4AnDcI+2aXBOePLhh7z40mu4Uvj2t7/NOFmpM+doH8d5xFl7qzhHCPkWggZVf2fhZxxugxoAFcX5Aw+o8cGyknAoPcxtyHbBWWazWq2XUmDOc/D2MZuUuJp1fHq6AkXk/44R1e+LyHeAfwoLqP51Efn7gW8Cv70+/N/BpBZ+DpNb+Hu/1/cp6iuvzXSGbb2ZrW2qc7saB8EtnUy/Mo6HYyaFJ2aASmtZ3Eo6NXDXYurkVNWcWgM76EXVEtnRNaxVFqVARZisE+nYrmkex0jZIcAyXTmp7+9dXS/UVx6J1PYnszjRoib4O39OxyEQO+KFzTYg4jweE1osaonSrBVgAUEw4WPN1MYra0KpSJl4pUhtbf8sx1Y6zwtl4djNwahYY046Oqc+eOvSy5nGe3OkEEfw4FTJ2cjlXhwqamubKKKFpx8+QkTpzzaszk/YtG3tohOcg3Ea6JpTkIEYExKsgckh7HZXhK4lDRmcY7PZoKVw+eRx/R6KOE+/WnN6elr1pApPnz5mvVkvyFVB8U0g75WcI14Tzpn/n5eAOAUneN+hsSAu4PyKEHokeEsCXGAYzXswZdOnmvYDIjPXytaucYiM48h6vUZEuLm5obShluG9ySzEyLjfLQ1Q5vc3z+tD+Zyj35IWxGGiGDXudWL/+noeM+VXEKsf1PCdJ4kiONbZMY4D25sb0pDxjZCyEfHarkXwdVGDkpQgHU3TMk0DpYy0fYenpWQh5MA0bUkevDvDt4HN+UTTXBPVoeSjxXomh9rFOpsOqyoexWnBVTXy1WrNxb273Hv4Am+99RVeee0LvPTSa5yennB+ccLF+Yazkx7nlHVzn3UTePT0hNC2lKIM48Tq9IKm/8CUcZ2QUl64IXNpQETAdSbNAIirthwiuKa6sMrsVWaQeDafTbxrzcJBJ+MnVOsdd9SlMdvYeFfNqLWn79fs9yPCBMTlsdTPZo20LBfHpyOsAlX9Hd/lrr/lYx6rwD/wi38X+UggvnQCFq1ETjk89rNOgvmvOOYEpZSECepWP766mYo7dBHJUc3ruMtSK+RznAQZN7Ded1xehCqUq7fms73mgXs1d3Lmo0Tm8Pqz/UxBVfDB0ajYxpjt2lS1kozOXB+ZYwG1jYWDn6FJwUj1Gpz5K2CdvxXplPkzUrt+rSSYF2HRz/78cs6RcsEFI3RX/W9CgDgVVMChrD3cxAhth2pGSqYLLQHHJEqWjHpHEcHhcGLlrTxlun7F9vqGYTdQnMOfOlzfWZe0CzgnTClZgJEjPlsDlLoVPngr5QVPLoU43BCnieBr24ILNO0Jm9UFwdnedf3sir7f3LJoKqpITgTxZDxJU3XMCOgoINY5HtoO1whJPaHZ0PYnZC2IF4Y4kGJkGiMhtDhxdN2KkpWSI6oGOMRW0TQxxIkhTagXpmlE00TOI7vpBpdGynYHU8RZWmNIMIc5zbwfYcFTUxGsVCsnxoOzuTujjyVLFRb9FY7V93WICOcnF+w0so8DKUd62TDGxP7ZnrBxuI6qcg7W6+FweATTl0FNi4maubTSEkshlUhOyrCPDMPEum2R4Gj7wKTxuY6e4w3xeQLy/DAh+Ib15oR7D17izS98hdffeJO33nqLl19+mZPNCtHCZt3QOPuk3bqFB3cYYiE8uaRfrav+Vku/XrHb3SyE9lyDuZTSooyu6nC+MWTJm1iciNaOj1KFQTFIPM8fduZh2GuG+r1yLlYCXTaRufRiiFfwga7t6dqeIQzkPNxCpEyQ0TgC1aVt2RA+L0M+JrCakb/vNu7e2fDNf/Zv/0F8vE/9uH/vZJl/zvsakFqS4GQOJg6laptr2Gw74lMdk9xn7tIt5OoIYZ6J7XPCcjyevxaK1hbzo8RKVWoJaf5RwKFB0HTQqwJB5SBEOpfSMRDT/i7zhiOIeDNEr23y9l5VFd7N30WrztZsCzLrb33GESvmc2Ocz1IsYAU7fs4Zkhezkf2DE4IThlKAbOWynCgE66zLiTQpPjlDDtVEOlOcKL4lEvnOL3yb5mTFiy+/SPPSQxp1dK5BRbnZ71h1LZoyqJDJuM5I7ycnpwxjpBGBuMW1DbtdJBelaRtOTza0XQAK19fP8EFqqblZAvcYI5qzyRMQ6neoiZoXcqrAZkXcvPe44BAviBr4sN8NpJhoQoP3ofKCHfv9WIszwjCOFJSmbe09AR8CocmU0eZY8J5pSnTFkFYDmypyqAcE1ubmYecMlSqCVApzvf0YbLSmjU9m3/h8BVYIF6sLpGzZ6cCYExs5JUjH+0/fZWKkbapmVJ4j34Mr/DhtQTqcc7Rtz3q1wZVA05jI5UlcE1PEi5H1CpmwCqgf0TxnpbLApaq3S4HGK1LECaFpODk956WXX+eVl9/kjde/yJe/9Bavv/YSp6dreu/walCpoyAUEtAET9s0tG1nOltDhZSl6kvlQtM25DgCLGKcbdsuiu+mseWsdi82TRffMe9xUl3v0cOCVEsr84YVmkDOhWPiOlR+WUXvmsaET0NomKbDRrYoRNcMy+uct3y+Aqvnx3E56fjv49v++D/931zm1P/27f8BAL/v1T9qx7Zy7LqmJdTUzjaOQ5m1lFLLi0eefVLLjnwcN5Bbf8+v8fznmhfb4+C5FDujuTg0t8QxsL3JhOwsu19vSHni5LxHMQ+zpvXgEiKW8YpTa+5wgsyOT2oX78eRyMF8LFFDb0oxw1xXM+LjgOqWYvvRbXAs/HkcmNnr548jyR+N+blU8u3h7/l+Q4zmLkJNBadGgJ+P27yVOH94XNHM3FUyryVW3tJ6SLSuTRYczrmRJZG1zO8Pm5JxNevu9VkOrKSq0i/n6/Y5y8diq/W8eu9Nxw/IYMTuokylMKWM5mwd3kUJzpPEkK3ddk/TtQzbLe1uTRwTjQs0Lz8ktYoLgiNQahdnKtZ0RBzwTUNOSt9t8KFj2EamcSBlm4edCyiTlehUyTlZidt3C4oTqqVZjBNZTcMqeLNHc2puIngBT7XPCfYZNJHGLakUxmkECZhJeAARpinWINzW9VJgmhJx7jQ/SjqYtfhwlKkwXI+sCwTn6jFVstiB9VrnuBzOA1jZr6Cm+XUUWdXpbonBc+vND3J8rgIrRJAijNvRCIqhoekaTtcbghMe795nigNFCiUJIbQg7jBpnKEFXdfQNKBExjQZAtMEnGtMv2nmb3jFd4LrHGU/kxiPuEq1K+6Yg4Q4QtvQ9mvO79zn1dfe5Atf/DJd3xG8Y923tA4ChSDWEXHg9jlElJQzbdtWkviKtm042Zzw7OljEEfTOGJJy/vOn2kOftbrtREp62f0lUO1BFfBIzhympaOj/nimc2k16s1+2FAy8EMdr6wShbatl0Cq6Y5qM8vnUxqMLwu3KryiV0kn8QQDhvw8SI/d5N+V/ua56GFOSjgUHKWOZDShUiyPNw5VyfT8evXmXu0UD3PFzp+/498hvn26kCgGPHXiMLFuuGSA+1oAgSfKCXivLLue5x3ZuBaMkrCS0H83HlqZZZSzBBchBrwl4PQrMzJkfktlqVrqZLQpeBcWEjaxyT1JRjUsgQcf83jzQGFOn7sdzteH3fsilZUV48+c7ltu2LBqwXHyzxxhq070cVhwYI/BW9dU/YdipVI6ncyDnzF6gQLUMscdB2Rtz7D48CvkmVuHN0Jz3E8ba0qpgWIlfyiKKkGV/ZyBQfEigx6J+QyEnIhrDrGXWR7/T5xP9F3PSEEmt6S4qSB0DSG3mhmtbJAZj+MrNcdUpTtbjCR5dqJGHPh6uaZCU2rBeyrfsVUu02990zTZF3dztOv18wOILkYRyq0PaEESslkzZSo1aUjU1SsWUkSKReatsO5wDQl49TmjHOBlAacOIL3ZA57wixnAVZWbVYrwuaE6AMDhZVoBQuMByZOcOUoUavLla1Oc8OBxYFaT9PM/51bDz4pEsnnK7BS5Xp3xfWzK/YkSu/Ztpes13d56cEDNrvA4/EDnu72xkkQT9t2tE3HfjcQ82gojE5knYjjljip+adlQVNDkBUu9ORieHqzbuhPO/ZjXBa6ZU+j3ApunHOIE9q+5a0vfpEvfeWH+cIXvswrr77MnXvnvPjCQ1sQMwt5tWhlavlQJf8LbdMQ48T19TU3Nzfc3Nzw9PKpmUCr0IRA17YM47gEM7OG1ME+Zd7U7dA9n8l7d+BohRDYb7eICG1niN7N9gbnwsI3ObR+HzYZ7z1937Nardjv7TMfHsTCHYH6fT9HgdWyw+msq+0OheSq22JNB4cDpfV58/1KXYysg4IcM11f+XBVmWkmhxqnCBZuzkcO9UESxN5mDqCkcjcOnYuqc2nqdhBWtCYBQClQUsGFzhZJ5yia6VcNMSVCEJKarg+ScI1ZwGhOVp5wjpIyfuYb1SYKU8R2FS1VRBLOQ4l7cipIewcJm1ruKhasxUhxiu8FDVrRamGWIpurCTOTbSkZwWJnMiNZx1YxtxpSuN21OaNgYJ17Jm8+W6jUw++0qq7bTz4SSiy5BgBLYHdEKZA6R+ZORwehQHFWyioqqHgcjlRSfd+DNZIqJhNTA9VqR/mZHccJr+rCxFjyDXGCRwgaKMUSAQtuM0lr40MNZHIp5PoCXmrJGePDTRSyFnyKyN6EYX0R3t6+zXa/5Yd+1dd57c3XuLi7wYUedc6QmZKIsUrdNIkp7EnDlv0wUEpCnPm3jlOkBfMPxThTw2if0wfTjhKpSW1w5GhdetMU0ZJo+4ATJaZInoaKCAlN2+PcilSsGqGaaLoqMpoKLjQWaNUk3BArQccJ5eAVa0hZpGta5CRQvEfHyPqlF9k9e0ITlaYUJJeKkFspcqYFzvvBfGrmpW8pF9YyojJfQ5/cpP3UBFYi8hrmvfYCdqz+oKr+AfllNLdVlMt0xU3ccrOf0EGIfUbHwr2z+/TrDafpgtwGBk1kV0gu0vctbXfKzTNhv7+2UmElutfwmZSV0DpONie0TW+ER58QP9GdBIbLVDVOajSkH18yCb6nX13QrM958NJrvPzaa9y7d8HFxSmb9YrGezOAzoWStHZ5GAIgwbEfIzFnUkxcX15yffkE0Uzf9VxXQm7RYhynRhnHEYoJwomz1wlNg/fCFBOOxtCBUpZJbYa31vXivWmdxLrhpRTN6iAropkQBNfUoK/aJmhJ1jnjPaFxrFbWFCCyx8LEA2H2sOEf8o/Py7BEWY424TmgOSJay9ymf2AZ1CZ9ew1sk805I3ic+IOsQA2k5o11fv25RDjHsSIzzvTxSBQc+EeHYPx2UDWjSqoO1JGzIARSFJrQoOrRioyGrqVoIE4FF6jIVEJ87X7UAqLVQ8w2NvuyM38KIw47h6ot5nlyrFb36E9eJGmHUMhxT9FELgNKMmuRMKNrx59fONhnHDgb86OOOVYHaYxDh+vzJdCFrD6jwPX3eWM/bBeV3+OtC3CmJ4CjuMqXKvP7HAIsQS3gnH3+imKKARW9UrMgKXUd0nKgoTjx4CzQxVeSvn5yG9QPZl+gphm6rD4zOgKmnA7WZZYydAQL8CspqBRlwsjgOE/r23lmsOp6RCCWTNKMx1Nyrt2dmCq790wx8e1vv0NMyle/tqFxDdp4+r5BsMg25sR0dcUYh3rNm3eorxWDKUbKMBmyFFpymXDVyUO1LNZiIZif7TiO5g2bIk0IxBRxzhPzyJSKrc+uQVxHVsjZGjZOT0/YD3viZI8XB3nKjLEQqrdrioaOuxxN3Bq7js82G8ZxyxT3SI4EEucvnBMf3SU/voKUaHLClUJ2ugS5OqOIdRlarqZ6qThYOFcwr1eZo0f+QMenJrDCytT/qKr+eRE5Bf6ciPxJ4Hfzy2Ruq0AOEWmVclOYrvaU3LAlkq6esm57Wmk482tOGuEq79mNe3ZxpG/X9F0H2fQ6cjLLFqHQtis2mxVdu+FkfYoUR3GT+QQGR7tyhDYz5bGWLCohe1lMDfnpuo6Li/usNhe40NOv19y9f4e75+fcuzin9Z4gQhCqFkmpZUdT/RW1Rd07x+nJhgf37/Ktn/8Z3n33nUV4NMYIojQVqp2YgyVlmkbbjOLM/ZizWI9rGtBMjok4jSSMlxV8IKVsfCiPicz5UDMIW3hEQOrFZeTZYgtX1fYKjadtOrtI5RjZ8ocs/GNRlM/+OCY7fxQtuh3qfFx5cC4d5ZxrAKMVYTpYRBwHRXA4zLff7zZqeJwMHGtD1WcuKM8S9BzRS7U4hICTQNFipeumqT6YnjqNaRt7HetQmwnnUndBW/hFBEpibt+2KKF6V+IoxZOS0vb36Dcv4fwG9hNp3KHZkV0HBHIZIUWalBaeEcxyC1WTZy4sLLwr+5xzgnRsPD5f1/PvM5I1k9SP/z7mcs1cmONz7H0lDsdcy4Jq/qWVePy8SflSUlwCvbpB6YwwYq2CYmrdh1MpR0G3fR/nHDGmj4upf1Dj+74vGDgs5HIQYp15a2DBj4qhTVNSfBZSVcN3avpVRa25J3jPycma1aYjBBNE3pysyTkxTZHgey4vL7m+vqRpPKfr08p18sZzjRGKst/uSI3HuzWgxJRp2kCKkbJLNE1gtTpZymshCOv1KUoCjEs1DAM+WBf6OA7sdmaHNo5Q8qFSoWpK/9M0EcWkJJw3dK6oo6jtBfP0jtNImmx/08qrDd4Ri8kSOWevgULfGXF9mkZEhBQnckykNJGGPS6N7Kct/d0LtpdbWo007niyKWakNuPuH78NzJyqIgcmrl/m/fcyzX55x6cmsFITTXy3/n4tIj+Near9JPAT9WF/iF+Kua0q0/6GMk74EgjaksfC6BNT2ps0QWh40K2XSowX4Wa35enVJc61rJqOgsMVaJrWJPxzA65Bk2N3M1LyRHET+2HAO0921iI7wYHbMtdLgJnAul6vefjwIV/70R/j9be+zOuvvMzpqud8syKoEkox5MgZl8pLMXHPYrYworBuG5yaHELKkZgiqqUKg87mxqZdY2TkOV+bg7QDN0VV8U6hJNBCihNxHCm1k3C+qGdBT7NG6Gnb9sDlIVtWJY5UplvZPRzECk37KpDSxKz3xrw5ad3Ufzkn3H8NxscFVYtAIbeDGzja8GfcnNtoim9naQ2YOWvH3J/l9SoCIsJH7/uYz3b8/vVe0LnAcpAnmCbTTHPO27+NJywCgop4M1YVp4AndE0VrzV0Ezw51U2+YCV4cYgPeG9l7JznEpb5Tapm0Ia2OaeUFcMuMu1GZMo0vkFDMF2idI3TGzSO4M2uQ4uAWgP4jMR93LF4PrCcy97Hj1vKoUclQ6lcuWOOolbu1BzYLNejCKHxlKy1GQUQuRVMz4T446AOqJy66vFWHSDEeeNcHdXb55LqfKUdixZ/UnHVD2RfwLrTtJRljTnuu1VRslpZNVfZjlwyGcVJQEQJdT08O73g5ddf4c6DM8SZTpRIYb1a49ThfUdOhcdPPiDGgYuzO6xPNjy7esYwjHRdQ84jiGcYI0rBOej7llCRxZRybfgZa0fe4Wcc46ILaDZpM0fvEHzPyOiMZOVZQxClVErIsUj03NR0nEA0TYMlHqF+noA4jItVTPC25JnDZ41Q+/2+zl1Hv9qQpkzbFspauXl8jfYNw82e5nihP0LN4Tg9O9w/C4Iisghxixjd4JPaND41gdXxEJE3gR8D/iy/RHNbOfJf6+6/xsaf0K6FtbRsdzfcTFfEXUbbgnaFaRogjtw9PcMr9ASk3RDzDcM4wWRRuI5KKY5piNzEG4Qt3jU0jce5zJj3gOekuQ9VnM85Z2JxFfrnaLMMIdD3PaFtWa/XXJyf0rees3WLpsiYonX9CWRXyx05k0q2jgktdHpCE1asu5YP0oTzsFqv2Gw2XF1d3VpszYl8qMRVxftKin0u65XKQwlO8AJThYOP28lnTR/0aOILHylheW9QQIzxVpkkLKKiVrdnLrvU4G5BSj5PHKvnxvNB1m2E4vZjnv99UfqfQRY5aBw9Hxx9t6EcEgC43c36kQCvHPgN1gGbEWlo/NqCPLU+KpMb6I7sj7QmAZnQSEVlHKqRufXH1U6rReiy6ZBjorjm2mINORVyKjha0pQYx0vwnpgjjXNM2crnqThUPKVkhu0lXTilaVfGn1KpSLDNy48EmLWh5flS3/EmNh+v59G++fo4RqikfvZjmsBM8rdr1aHqK0osy3V2HNR+JCCuAVeuDStaTEfPAtijE1yf/zwyd5Bj+GTH92tfCE7wOEpV8y5QDYDnMqE1BOViUgrZeXAgWIBxcXHKyWlPTgXvW1Tg7v37dJ3HBeXq6glaCn2zYr06o+TC3YdnDOOO/bDn1Vdf5oXpPt/6xjeZYuSDD9/mIt6t6+FmCWyc88SYaVpvKuu5AAf19a7rsKYNW2PbtrXkI5jbBdg5jTGixaxoVHO9Puc5fgj+Qwg0ranCH9DTAyprnFz7e5oSpWScnz1wS+2ytaBqt9vVRihPzgLeWUdiC25KnNy9YHu1g2c7u96Lla4/NnFTQ68yy9SuqNbBZcC6WT+5ZPxTF1iJyAnwx4B/SFWvnjuwv2hzWz3yX9u89AV95f5bDEMix47LZ49wjzKX47WptAZHTBPvxGdc5ivurE5paNCSWa1WqETSTeRqdwNeCb3DBY+mTC61A1BMA2uu0auKkR2PeRh6WFDnMS+8wzByfX3D+++/z0nfkIYdH1w95cmTpwSB115+iS4Ecio8vbxiN+zJpZBK4eGDB9y59wDNiWG/ZdX3nJ2fcnV5udTXx3EkeFPPHYZcIyDjrIibI/4jvoyaYvA4TJSUjLuB3Mq8x3EkxokQhBAa2q4lhNayPC2UbMiCc66KjbIgaJa5C13f0XUdu92Wj5Vp0k/yMvnkh6oeLX5yxKkBKhdtvg+Z/7ZhLvPd/ND6v4+WDQ97q83jYwVxrURpFZbARY82YBYyvauc+5n3Y63VmhXnVyiZEgeSTraJuUBTu/4W5Ce4agyuNSN11euv6kc5Q5JmxGbWWZtRYFcKOWaslFzqpjgQGscQR7TAflS8BLxEE6KVjGMixxvcSM3Aq3F67eoyiQZAKqojUhWf5VD65ki4Ve2vooXijo+xwIw8WsTKLHAocxBTjCvFDGzXY4Ca7YxWuZaZXncI3OaAriLQOttjJaZhOo6b6geyUo/6w+eznzkge+7xn9D4fu4LffAqgBdnSt5FLciigqNFiSWbmTEwqqGddy4uePjgHicnPZvTzjrzIkxT5vLRU+7cPWFz0tAFB+JY9T1FCyfnJ/R9oHDG9e6GjFngvP7mq3znW9/m5HSFDzAMe/p+hZVoqZ2hnpQKwTucMzrGHHwb8glte9jW5wDdVU6eBWuW/KQ0kbMlMnMg1DbdYZ9yrvKJ/VKRmO+b/VznNdxKynZ/0wYkZlQTMVp1o+97lkYppcr+dDh1NGOk9JmyXrE6P0cfPTWkWep6djhnh/mALKW+2ZsCwMnBG/STTAU+VYGViDTYxfNHVPWP15t/yea2y+sj3Dl7lWc8JYVCSj3jbkXMA8+mPdErYdURXWaUwk5HXI6MYyL7gA8NshL2uz3jNOIVNicrTs9OCSFYO2uaUDWEhyJM+x0xzg25oXI/xAIZbO3yIdC0HYiw3d3wwXvvQs5sAuh0xbe+/U122z0P79+nccrZ2Sl/8S/8Jd5+70NCaNicnJCL8rN/5a/y8IUXcP2G9z58xM12x7Ora/bDlikONMHkDWKMxJIJXUecJqt5OnCloBSCb2o5RSg5MuaJkqpsgnN48YjzxBgZx4E4DagID158la/80A+TarmCUtCYsc5vpYjgnaPr3JI55ar3Enyg6zratkHVEDCnygGc/xSs7j/gcSy3YBvJcWClzCR2s7OpwY7OJZyj8o6adEhtcwBYyk2L9lEdC9enHAdN1LlgZuAHlGV+0vyPoFnq+xRKERNPLAISCN44GeocWqyTSVAk+EO7grNNyKl1EeaUbeE0x+/aLXTUAen9AQGG2m5onWz23QpNZ5pyqQxIcaR9ZBqzEXOnCXUtKntWfbHrQHegG5RVjVMTTu1zFTF9KLOxMe2fpVw9nysOchbOuYXgbr5pFRWuQY/U7zz7EYpTyJXPkvPyfeZNxtx4BO8CpSTrGsR0x1JKFLUgr1jL4rKZjcPEOCSaEEAKWhIED7SQXY2hlLxoZM1B2i99Hv9Sx/d7XwAOCiNVvkOq7ZEKTLkQo503rcckA2OKDGng7uaM0DXcv3cfzXD57Bm57Bn3cHZ6lwcPXuDps2f0qw3gabuOpm24uHvBaZwYtlumYeD0tOHk5JyT0w3TFBmHiWdX19ZwohlEa8edCdCG1tOteprQmG+l8zhRurZlmgzJCqFb/B9nysY4jlXuRJZKgonFGjo5S3tQOXfKAcWajZ1hDvKM0yUieOcZhpHZusyqIJCSddM7V9HPIqhYd+IYjR9ccqJrW3xF1gtahUHlFrdzXquOGQ9zIVOAsCDzFlx9dynl7+/41ARWYjvHvwz8tKr+80d3/dv8Mprb5qykkriZLhnYkppMaZVQHFEK6oWLzV1SGvGdR1XIBaYx4lyiW3es+55mcozjHjycnZ/hm5btzZbtbkTcRBtAcmYad+TsUHHW3WFSVaiz+ryqst6cGL8oF1wQnj39kCZF/tyH38E1iWHYE1zD29/e8PjRY+7cucuTp0/o1h1ZCs+uR7puxTRFHv3ME3ZjZDdE3nn3Pb7z9rcRnYDCMA44F7A02DhQuEJoA2kYoCRr6i9VKDTOth2Vj+NchZGN9yIUUor44GhXG7701R/hx37Nj/PTP/3TXF5e1kwiVGyk6nup4I6sfOq5N1HUEGoGVjtLxoGYI74Shz/5YsQPdhgA45Yg6Lga89cqDd5+Da18iLlk+PxRlKMfWDg6RY+CtxrYOTkKIJ57n/qBzYR2zoAdSMAFQ8vGuEPVEMqmae0dXSAn6LqW2QLJOW/SDCjOFUqqXaiVVW5LxW3ph5mnBOAbU8B2qrR9BwrJ4nz24zUpR1vopaekFhVltXGIeFKsBO7njqUugEjFc9T4UHnhRpkgo/eBuQvzmNeSK/R0C6Wu2JZ4ZwgWs8ecHDgjYkrotz+PbVCzOwKAlrpBpkMnlBYoWYixEMeJEhOxHER8C4Ary6ZkG6guz/+UlP++7/uCCiRsXqoUcBkpnpmSMMaMKdU4ghfarqVddZyenXDv4QNWJ6cEJ1xdX1PShPqM+EJKcPn0KV984Ydw7bp20Qm7/Q0320wshXsPXwT1NKFjverJKXJ2uuH9999HRWk7SzhXqw7nLIDw3hOaAM4SzyJ1X8uJxgv7/Z65IUpVl79ntApMCDSEFpiNtxucl9q5azjQwXrt0Gg1NzQYQFyOOFtWdp8RrIJVKxBDe5vGV/AhIMPEuB9MiFqE9WpF2t4wVR5voybz4OYgtiZOS5B0tAbMf+sxultLg5bk6CeSk39qAivg1wO/E/iLIvKf19v+SX4ZzW1VC++8+02upmt2eUvUyNAm8sbhtWHW0E3REZpKSgyOzabDedP88V7pm5aSPSk1dF1P0wWmaWLY7ygpIt7avcuUGYeEuB4fIJJweJwLhLYxtKGaEs+TtlA4PVuTxhtuHl8iLvPhdqC4BlXhW+885uLiLt5n1puGJrTs9yN379zjzp17aIHp5hnPHj/h7Z//ebb7PZlM1/W0bcc0JkM1hEoWT8QYa2BnQqizzY1dTKXWxfPhIipqmZ0Xmq6naRtefe0Nvva1r/EbfsNvYBxH/uP/+D+ute6DqKENqXX959SMOQRWKUWmaSBOI7FEuopsNMF/t1P7GRyCOcofk8Q/nsj+PL8HbsPms5aYVXdu8xZEbh/TGXEpFHJJuFqGsjlgxqdWsTt6z4rO1uIVOSu5gAsB51tUQs2AJ3LlnosGfAiI7/A+kNU6/1QdrtQkBMvSrVN0JtRbGdQ2jUOAecwLKSmbwGDwlh2r4nG4XGgaZdpH2tARp8KUJjZnvfEixz2UuYFDl2Kp1BKoCIa4lUxO0TzRisk5FD2ysqGqch8d69tZd+WyqFoA6D04wQVvaJ0EQ6QE0/fK2AbF8dNl8fNLydSqPWJdXJjXaM615BMhRzP2zFNCnX0Z0ylqb20887WoqgeVhY/GmT/I8X3fF2bKhgUZwcjeS8BQFb9reX2z2bA5XXH/hfvcuXvB3bv3DEXKA/vtU9pOUDEi+bpbA47Lp0959fVXePzofWIcgUzwwrB/wnvv7hBVXnv1ZaZhj+jE5dNrYrxhHHccisfVfzIpeE+KGa+wixHvA943eBeYkp3Dtm2r1lkxXlZjSct+v7dSXu36O15L8syVZeZS2fHQqrc4juPSoGTImT0/57zwwObb5waOnO39Q7DXa5qWlJVpnMg53eJR7XY7upTwpViCr1gHfQVRj9M/E5Q5lNDna/V4nn6CKiGfnsBKVf8MH02n5/HLYm5bVPkrf+Vn2OY9dA6/atC1oJ1luCkWpJj6aymKlGybiJgNC0DbemIcydm63Zo+kPLEsNsjpdCGYPV5NfG0XAqNV1IoqCRy5Z4InqYGVTP5e7/fc/HwIev1it3+hrYNvPfeh1wnJTvL6Lfb93n/gyfVzLhFgb5b8+67e+7d33H/7glBJx5cbHi786QcoOkrNBsYR7M5SDW7aNu2XgQKwZNzWurx8wUyJzpzcOWcmLCiBFwIvPHWV3jw4AHX19es1yt+82/+W/jmN7/Je+++i3AgPM6L9seNuTtw5oENw56cJjKm1Oudo2k+NdP1BzJEvB2tmaskHw2gnv99/tsCkMPvrqqUfxzJ+iPnRKjeYjYfZvulEEIlnt8uEx54PbYDq5qGUlELCkRnHzBvpapYoPEU9ZgHshCqvlYppsHkZpXMJRsVcjakzDhGB7TquFw5lzvEO3ydxzFGRNRKzJpwCDfXiTS1rE7PAE/Ke3IZaXw2Qu4xOogY9wZLrkpOTMOeko1UP28iTsBERWrLu5Nqy3Q4VjMPauaEiSqa84IuKeBcgzQ9UpMpnUsbtWPQe9NWUgx9mi2fshYrIYonRSP751wo2bhgopBLxvhpoP5j5DmWcrOaZukSEH4ykdUPYl8Ak+ZzlZA+B/TWOEO1dDFUdb1ecXFxjg+AxCpDEHDSsN5sKHmgbc3j9eELLwCOOCaePHoEJLrOEzJM04QorBpDib79jZ+j7xqG/Y5xivjgCT7gxNF42O+uAKkBk50/JqVtO4L35kLjwFUrqr5fWZfgMKDB31rP52Bonpfe++WnFOvqizHSdb2V4OXgZ2mJeKKUQ/crUOWCeoAqrzBZxUM9xRkClmIhpZFUG6BKUab9gFQ+12a9Qf2VCf2aKlAt8Rvn8niZc4olB8XQrMpVXwjsIiwWRZ9EVvC52qlUlWlKePH40iCDMPlCEWU/TEx5pOs681zSxDAONMUWp/0QceLZbM5AM8Mw0K3NGsYuQFMjt23H2qK1uNoJl2yjqabCiAV5bddR6sJv8gSONKo5qTvH05sbPrzeMhZwfiKEhpznidPgvBksj2NhioVY4Hp/w8sXK0iDkQOnQtu3TFPk5mZLjNlKKnIgKDZNQ0oRV2HeGQkxTZJD19NMONda+xbnuXv3Hq+++RYpRlarFTFGXnv1Vb7+9a/zztvvQC5HG73pvThYLtJ5iEjNorSS4RNIrcuL0LbN5wqxqvjUrcVkZr8YnerIIglsk5ZaPjrcupRZoYpDciC2L+jE/Ibze1d4xlXbGK2WKkVTfV9dfPlcLRUiltlPMTHGgkoDUpsWvCUouShxyuRkMkqrvmVS0wdqtNA0AUVIxRYmE2AsRnyvwQUzclS/Xyla2+Tt+wUfoDZhzIRcC/oy0zRyc3XD5aMRjRecnt5DpMFscTLeZ3xI5GQK8aaaXaVNnCPmBDmRpomcJtuNxSQi3Cxkq3aNzz59TmcjY6nyJPZ4L/PrV+RqRohUKSky5QK+BdeaH5tWtflifDMr3jkjxteNJS8lHAu+cipVrV5rWUWRrKjMSMUcMN0W5D3sQ9WQuHzW20akShPM6IkjlUgDhvJpWRB1EzKu2moaudle0qcVThTNpmKvGTRnigir1Zo4PGO42VHE1kjUPBunYSRHawiK04BOgXEcWW1OjC/lDAGKMeHFujqnIS7lOLxjUks6gzM/PbzUZo7Ifj+YADSmL5VzXtZ3LZnZI3SWVHC+6s/VPhCzNXP1dlm6+mwfOEjmAMt+cazTVrTgQ0PrGlBhZCKl+l5O6LqWBmGsn3O73dLERKt1js9tf7r0GB+WqXqbr/QEsYuHQz72yZaxP1eBlSC8+NIr4E2J+GZ/Q0o3FAoBT8yOHAslZto+gDf1Wi2FtgnkIuRYiGMmDsVIrW5gKkIaIo00OLGujWlMUITQdJRU8MHTdopqIJeMZNvSBBbysRfT9xEfKOL5+bffZT9GcpoIIjRtZwCoOJLuwDlCbJh8YD80+Jue1c0JnTzgwZ1zNmdAf0bKE94nhv0TK2NoMeIroKWpMhAZcHjf0LaNIQVViHA/7EkxolRjz6I0oaVpWl597Q1UhWmIVmJ0nlXf8vWvf40//e//KfY3+1u1fVVFnfkN+mSZlGkTmQJx0zS2uZSIE+XenTPWJyesuo6byhX6XAyhBiZzQFEsYJjJmWL4twLUjc/NAaxQhfJmbsRcdp2VLw+ikJWlUYO4iv5UAitVbE9mo19haa9eNI6KoE5Qn61pQTxJGlQDmmsAl0aaxpO1IWOmrmUsiBS6zanJAIwTJddiYhBSUdO4yoHgG0QzSqRosgJAEbzrTL4EW8znzqSMWbIcC29mIEZFp55WW/qzC6RTkh+QtkWl0EokcI34CxwOX6xjDq9oyrhciONAHva4qg83lyhcVYlWLaibdYOOkmUJS7nJNvK53CmWfRcL5nBCqtwVckTTRBGHSI8PoZZmleLVZCCywzsM+fJC7RkBnUBHKAPkPSKmpySuWDzoPeRESXvwtalBBOpaNAuiLpn/J1lX+T6PucMTOGQuSyhpkbxvPKt1z+nphtOznqbzeBFyntjtJ7qu5e75HdJYuHv3jCHuePr0GU+fPKPBMQ17Upm4vt4SQsN6tSYnlnnQNL0Z1yfY7zPnZxucE9qmI6UdKU/MFmExRmLKeKkOHPmmTjQLjFzlIs5zL2up/D8PBEzbze7PueD9zFGsYtDOL52/CEuZD5Y4B7itqZdSwoVQJSkyKReoTU6WuJndWVbImk3FvShFhTiOlvx1Ae08Oth80wWytvNzzJdyc0I4n79bFIe5ieSTG5+rwAqBfhPQvjC6PbHZwXXEqdIHj7iGgjLtr8naQOsYY8GVQte0rNYd3jk0Qdwm8piQlBDxlCJVgBFIe0pM9P0Jbbdm3E+UXUHUMgeKErxY11wx3R3B0YYOFwp4pT05w4UV4+WWkkbGkmmaaYFstSJGeRzZLxmHwbHD1RP2b32Zizv3eeHOmqubHc8uH1s5JY54L8RpIqWEPzkB54x0SQvS4GdLhlIY4w40E+PIrMocxBOc5+TkhK985au8/NJrpClycrri+uqGmBJvvPEKd+6csb3aMo7jkvEAJDWLTJzgsgULRYyX453pWQUPq+D4VV/+Ih+enuK9qx5Yn5MxL2CV2zF3wy1Zm1bApAZZzxUDF2bGDEfVuGjRjFoWIVFmRe7ZA88CuXmhPnQAzt1rcLv8qKWQdSa9Vv5OOdimiBNSTjWN8BR1pnI9RvZlz2azQnAMU0QoeMREO8vMG6Kq9B9KGCKGbiKWtR70deaIUZgVGKwDdUQl060DTQjkHBlTpml7kCfAh6S8xfsVjets4S7RSh5pIsaJNFmSU1Ik1OOgFXmaUT+38LFq9q9VbJd54wrflRzsnLvFy5qHWYlsUQ24poMSLCiTme82N5vPBs0ZLQnVZNe15qUkb5tpqvOrLI0oc4A4I5iHz+CsUeWzG1cBVu5bOjilkp/VrgbnPfcf3uGF+/e5OD8lNCYp0DQ9uIL4TN9bCfH1N94k5UwZrRKgJbPfbRmr80HX9dXGpmfdnhCCM9HR4pGm5XxzgWYTd/aNGSuvN2fkkqqmVKLpIMbJeknUREhvtld4b61CKWa02PXcth0E8MHmcyzRrukyW91YY0hMk/G01HTnnAhzRdyuu4L3DU3T2j5SzNd1Li+qKsM0oTONxrfkPFdFBCjEyn1s+g4dDDWNOZFKoV+t4MX7DLs98WpLI6Curieqy3edhxMxrbFyO4g65jQeeQ78wMfnK7BSJQ83RDL7MEJbCCem1izeIaVjnEamYSKnCAQUx360UsJq1eC9ZQ2bk7V1F6W5hd0xRSP34YTVes1mc4JmYaieXLkqKM8ZREqpwrNhaX3drDfcuXOXru14+6XvEMeRZ0+npWy2EHXFLQJ/u93OiLOhISusUiarcnZ+wfnFHbJ+wJPHHwLGsZj9xdq2vUUuV8m0XYsPnhInUorsttbJpbksqrw+NPim5fU3v8DDhw/50hff4vz8jNB67t27ixPh/sUFX3zzTb71C2/fIklSLxCtpZbibHG306OLqm/TNJxsOs4vznlyTLj5nAwr0FQbF+drecu6z9zzwRF8ZDMuVYHYku9l2fnIc2rDP4da4PHvt8fxey68prkcqZByMoUAtUUvqxBTOSA7akSfop6sQqqoW97aphOcI6UtkjLrlaf1lbiqGYcziQCpAUoNxsU7UlEcetCucQ7vA30f0BRJccQLJOfRXoiDsruZKJN1HabpXbx/gnMFcaeUJOy2W8RlcoFUovkTqpUatfozuhr0Lp5/MxGYg56OuqpfV0sVSwRWRUdnnsuhSWEua8Bt7aDBNuCoiGvtJ0jlozmyGP8q51JNdSOUCJoQct3cHCEIOVmwKyVX/865pGPHby6f3p5bn+3I6jYH7jCvRZSHD+/z4P4dutWKAmyHPS+cP+ThgxcR5/FBGPbXUCJPnryHAucXF3R9D/WctKsVL778AoLj+mbLNEbariNQyW8UfHCkmHA41pt15cJOqEBowqIHVUphGJxdF05IyfaecRxoVyeExuFdMEV1cYTgrRHFhfqdhMLMTaw8vNoNWKQKQAdPI42lWWLCzfZ5lBCs8WqmhcxSIqUY5ytnJSfjNDrHgT6ylOXVzKpLpuS69s+fqxTatiFv460Zt5Rp58toJlLVczVTVp4ft7oHf4DjcxVYiQhBTxnGLSkXpPO4ZkvxpvSsmZqJNnhxNE2PDx1bPxHjSEqKq22563VP0dYCsWnCOSXlCVFH0/as+1Ocem6ut4y7PajZvCQ1wl/JZQmWLAu3sljXrVj1G/qu45VXXuPp46cEB/vdjfn8MQcm1tkRQuDevXuUUphSpt+c8OCFFzg9u+D1N79Av1ozxcS773xnER8UuNXVMS/gSc0kupTEOA3sdltyjOSULOMVR+MDOM9Lr7/B7/zdv4svfunL9I2Je0pwbLqGxgnBN/zaH/sx/uyf/c+4vr5eSJNzXX+5SOr3L5WzE0IwMVbd0/etHfccD2Wqz8sQKwmLm9vwZkSpYN1x8223A865qUK0GjUroDPSdMS7mo/nzJFSpRR5vghyK5g6Js8fzuF8Hg8bk3EOBXEBxDONe0NuqHY13oML4DsIPZkOzZ79tLfgYBhBPfQNQQpBjDSuFQFTp+Ct9JfVShKpHFrB56w5xVQRJuNVhqZll0d2cceYRhCI8RldMxFUTKG9+IriRJCC7U0ODyZvIAJNRYyP9MOkOhFoqaK6R3GqIYWHkmzOpW5Mh3O3IIPuoIN1jA56kepNmil5hJJMtV7CgoyVWuLXYoGVaKYNQolUSyA7u877ikyZVZUcc1iei58OHZef4WuvzqFjE+35xPngePjwbuWpTkxx4PxibVpUXUvOwjhEvFtx9959PvzgO3jn2V4LV1fPwAWatmd1sibjuHPnHpuze4hYc0XQxJMnjy0p98KmW5GjIcCrviNoYBj2eIVhGFivzb2gaQy1XBoLRAnBo7Vs2YQWP3eM6iEYmru8tdj3mxP8mTKQ69z16qpsyhwYyWIbFXxD23TG2yoZ75olqYlTNnFojFoy+83mI3ubFOOhoxm1rkbnGMeRru2qonwNihQ+gpKLadxZA4sezdHnA2Plk9oyPleBFQhdv2EfI70X42GodWGUnBHvccCqWdNLQHMhBKE97ZlSh4hB6W0TINSuhlg9+HxVRBaQYBjqfrfj+uqKEhWyr072dbHSsgQ2pWScswj+2bMrLp9dsVmtuXv3Pqen53zw3tvMXYnHwdAsi3B2dma191woEticnPH1H/5R7ty9xzjaJF6tVosu1fEiud/vj7Jl8/bb3tww7Lbsd1s0WtmgbdvqUi50qzW/+sd/nK9+/Ye5d+8ujVaCtBf6xhHEsL4f+frXefPNN/npn/7pSkiP1nbrbSNAWdr4FfNO894E9HY7Q9em2oFpmfQnJff2yQxFLAHIVXDPmdbS7YVGgYM3nYiQU0Z8cxQhHVvY1Ne+tQAdgtzDk57L4I+MUY/LulYGK8tTStV2ypWg4UODCxZs5NF4WGiBYPpKw7hHUM5P7tD2DeM4kdVzdb3HCfSNopItqKgG3jNHzDr9zDxXRMyBIBs/qWSTRBAtlZthmly+7Ti5CKy6kRJH9nGAEXArex+X8erqspyX8piAoVZiQqEq3jaqWo6cQ5MDL+QQc4ozscJcS+nzsZt5WLfPh6slvkPHnojgpLGSqhZKmex98twN5cg5MY4TMSY0R0MzqlVVCB7znDNvuKYJ1lxj8BjuKDh3s71L5acdmlU+u0NhSfzAjv3cjOC9p+sDIbScnV8wxT33H94llcKjJ0+gwMX5HV55+XVy3CMEtCR222tiBtf0bJqefnVCHDPvvfMB69UJr7/+BtM4kNNA26/QGsTdv38Pj+fRow9q+dyanEJFNmeCuKmiyyKB0LgG4xoaMqkiiBeCqwK6CzeySrg4Eweev2MpaXlu07SLHhU14JrJ6uaPm2piZ8domqaKgoH3gRhtnwvefA+X8qBYsDYfdFWsLD/FpXozbLeclmIOH/kQNN0ymJ/XsoV2MEv2HJcCPzm0Cj5ngZUIhL7QSaEwoDkiKHnKZI10fU/frVh1PZvQEdTAguKUm5i53o22EQhs+rUpF2+H6hF16LTa70fiWMj7SI4Zr6F2Q0QUaqZdGMeBpm2XUk3f9zhxjMNE3/SICHfv3eftb6+4ubkEKoejLnhOYBz2PPrQEJ3Qddx94WVeeukV1ptTVDzXu2c8evSIcRjwwS6qlDIaU/V6MiPltm1pgjf/wRjZ7W5I44BgquiI0HQtIo479+/zN//ET3B6eoJgmRIOXDAirfETlJOTNa+88hI/8zN/ufquKaX23xerGdH6gPPGLXHqCE1T7USclVo1V6Xq+pzP0ZBi0hbGp8poXfznn4Nfo3WUgi0jU4ycNaslVnLU7FBMREqqirlFD9XIW2Q5R8aKt842tCwdgHAUAMzzkAxOyaqkoigBRwtiUg2qhdA6KNauPuTCmAvjcINmZbtVtiXws+kD3njjdR6c3ycPj5A8MAyFAKQ8oEEJFFSEXCyBKRSEsAR6WpG3EBp8a9wgSiankZJGUpwIFWlzmxU5N6R9ocg1kgc8gNrmVIpxoJwaSVwx5wDBpAt8PdhOTA/I14RJqzG6UkvdOpdLTGJFj8pudjxl+dfV4FVmsq5Y8OUQU6ufHycmoJrjWF+mMWHQHJEyonk07zQxEdGm9WglrKvW7kFRPI1x46o8xIyAzrSEnC3Qk+rT+NkdVpoSDuXXg3+lEKeRpulYrVacnm1o24bdbsc0Rrx33GxvePz4KfvtNU27oSTjpRaFputJMbPfDmxWHQ5rynjywSOUTGjrPPOe87Mz1qu1rYtda8LSVa7D6awZlaruoXXpNm1jHbc5LYmGOM80RUskfKjXyCGomgP6WU5l1lLMRUlTIUXF+UCKJv4sztZ1xBGniOa5fFgqx6smdqFdaB5t0xLrvmLyDEZYzzkTK5DhnEOahn4FcVjRdB3TLJSnJl+Si11Lz3MPRYxyw3LODsnjbCNeqlzIJzE+V4EVCOerO7R9y+X4hOvxGZRE354wxUhOQk6RSSam0HBvc8H91R3urM+IKfG2+5C/enPDNk5o8vjQmXwBgYbGeBPiYHJESaSY8L4hpNa6DdNB0ybgyCUiyRSlS0lcXz8jhJ4Hdx/gz8/ZDyOnd8/48te/zs/+5Z/m5upqkSsQzTjFDGizRfT3797nhRfvcefiAZfPJrLf8e6H7/POu+/x/rvvkqN5+qV4yHoWtepaHhj3A+P2hrTfW7bfOAgO6VpK09C0HT/0oz/KF7/0JU5WPZoT3ttVIJIRDgTQtgv86Ne/zH/0Uz/FfoxQkiEHzjRaMuYQj3doNhgaZ90j/XqFY2SfMlMya4+UPl+BFVS0xMmiJAyH4GZuky5q6uqzmXfbdtapV18joYuat6EsWKSkAAceD0vQdPT+S6I4b7zHdy7RVg3sHOAR8XhxSBBiBjL4pqHtV7bgFnh2ecPVkyv+3H/2l/jzP/MdbrLjK2+9yW/+m/5G3njxlCYImkfGYUQYqw1GqAE8qLMO1cY3yMwTqQusqP04BB8Es9eZyCVSUkZzqcnDjpvrZ0ja4tjTrRpWmzUhrOvXO/I9pCxZuuG29VqvpcD5nBwT0g0JsWtz/lf1iJdS8bBjPTAwyo3WDUUqepXr84yjad8v50hKBTSRUjaNoEpW9ya0hPc2h4pITeoMwSIbuV9qV+j89s45ZuL/XOm9hRZ8Jocs10DKmSBzkGul3TRERq5IpxuasOHJh9fcvXvXAvhgCLsEz/n9hzRtw/XT93j25D2GsXDewb279zg/vwBXQAspDQw64b0j0ZDjRIqRcdhzrQVXy9/O2zzLOdeAXQmNKcKDErPZ73gTOltKdo03/027Hox0vjRJOCFUSQSjDYQlSQrBUfJEyXmxulK1LvkZcQJHIjJG6/aetdVs6pbKoyrVJ13JmQWNyjmjFLq2IU0wjJNZljnBbza0qw355ITh8RUn9VowC6kDdUHtdFWtyHKLa4WYcIhqXdpmf81PwNjmcxVYCbBqVjgRhjww5UTKgqY9zjUkLWx3W3bjgACX4YbLkyvun5/zwsU97qxPuH96zu7D99lXjauSC40P9F1P8cpuGBEaZol9L2Zjo1ltwVQQZ3yHUlLt0BGcMzuMYZy5RZEYE6enZ4tk3zd/4ed5+uSxdfzMJZy6yG82p7TtihA6imaCh/3NFR6lbTxNExj3mZIto5/V3uFg5DkMA08eP4JS8KElBIdrWoOqERDPnTv3+Y2/6TdxcXFupUEfSDnThWbhbx3Tbn7oh36I1157lWdXf5lxzChWonBzF1dFG4J4ctSqs2KaVpIzH374jN0bA6UUdtP0A58zn/Q4RojmMSM0zjmc97f4BcMwEEJgSrEuKtbejFiJJ1dkReyFmFXWZTY6RpeOKHtLQzHmMOD5z2UPcpUYa8iG9x7Em36Sk+qlLCS1x/nQcHan4+nTPT/7/iXvDQm3OuGD64H/6Kf+c179LX8TJ6cNzmVy2uKkkMTjnAVRKZulTNN1xCpoa9o4nq5pUaw8aElIJmfLkLVCTbaBRtI04mLEi5oHolOSWOAlIsz6mbPie6WlA3ObuZBq5n0omz2fWQsxpooU6HJh6FLWPrSsz8HtjFRpsfJ4UUWCsaKyUjEkgZLIKaLFAqtcTAk+BL8gEvbCBfG+tsDPfm3zGT2UlxW32AbNvKrnmyI+q2MxHa7X08Fk3gKK+/fu45xjv9/XQCFxcefcJARKoeu7pWM1Vd1DcQ0pFfbDgFxfsd70OIG2MS/WEAIJW39Xfa0YFHM8sKCuLMFuqd16wBw1MNtN+Vrqt35e4/21bU9KkUPQWBP6Kn0zlz5nVwbj+xacy5VwrvV2K/MZguqNd5VNn+4gEFoW/tbsFmHX48GYGViObxziLXHRlBJxHBBRmlXLLjhGhTAHSFiiMfsWAtUiai75HR63qMGX2dPzVxCr7/sQMdL51eUlV1fXDGVCvaNEx37YW5utBiPSjgOTmF7Io2dP+cY3vsmDi7v4VWDTr7hJFa5XEyhElcYFGpeOuv+UHAsuOjQplMpNcR7nAzpGmwgUE8Rk5G7X8K1vfYOXXnkN1zRcPr3mww/eZxwHVienDMPAMOxRNU8058w1vV2tcKGh61bs9zeEq8e8+/57vP/Bu1w9u0SLldS8byqCqouabgiBkjNXl5ekyYRCm9WGk9MT+n7FFBMpF/r1CX/Dr/lxfvhHftSCsXpxBw9a1CQRsE15Zp3cuXOHr37ta/z8L3yLOOwpWckpVbVoFsd1551lZSJVS8VRsmO7HeqmUYjp88OxOuYLPI9UHevHzL8f865CCEewkwVbYMGq925BtuZuGqiSFyXjZoubo3LUUqL6uA+qFRnTuqiL6eAgjsY5SjSj4hCs7GTNaVay2273PN3uSG1gfXHC4/0Vq3jD++99m/P1C7SNYypNLSMY8qbFMtim7cnFhG1jNDmPtmtAIDSKd2Bm6GqfzXuctsRpRCtPMISOqWmYhkv7zKGg1cnSsXj3YOiT1HJe9bwsRrydM+l5o1i6no7+tn1JaoZdjm53y+MP57oe+3rb3DHr1Dhk1JKVEc4V75Ss2TiYmigl4ZuqQ4SR/sUpmqkJjIPKDUWt9Osr6gdWDtZ04Owpt9X6P4tjPl/HQaT3s7+maUllE2AitA2rVU8uE7vdM4apoW17Hj26ASxIIEdOzu5y9+59UlFwVHmGxoLjbFZRMUb601NKiqQcCd7QpFSNjq2RqFSl8qq2XyyAsfKdZzaSKjEb31Fd9QA0l5CUjJw+E8/nNf9YyBOowVeiaQMxRpyHlLIZKdf5mHKx67iUCga4JYByIvgjZHNxQKjdg8eq71r3njmpVlWGm2s8BW08pQ1MziF5Ls/aa8rR689J48ytWs5lvT5V5BMKqWx8rgIrBZ7unnC5u+Rm2rLPIxpgKpExj4zTBM7RhQZXTBhzN2R88NxMe67ju5ydbrA8wLLCvu2QnGh9IKZUfWpLJelCHBNuhDJlS+i9wwUjmodghq1d22Gt0A3b7TU5J07PL2j7DdvdQC5K03b0q8zJ2TnOeeI0UkomNA2+6UCsNHl1vUXkMY8fPeLy8rLyMjL7YVe7LQz/Uk0LT6eUws12i5Zs3S59z/n5HdYnJ4z7kdPVGQq88YW3+Inf+N/h3r0HzJovltkbKXZG1o4netM0/Oq/4W/gz/yZn+Ly8SO7oJy75T0I4CXUurhlynP5BXdAaD4v2fN3G88HWs/fPisrB++P7CCs00hVybUMHVO2ueqcNVwohDAHRLaZuxpUzUHB88vUMWLlxEpTPoAc6Dp1IXU0wQy/gwhp3NOsHH0Qzlae+8Vxvdsz5vdZqdJteny8Zt09pG09TlaMk8e5TNGJog6VFmLDFIUxJaYxst6scTmABLImRApeCuqpPKtUuwS1FvMcrm1Z3V/RDh3j7gmqEV8dDUTcoYAgZrkjHJsUV0eB2uadj/hvx4jHfH4O3ZK6HKCZGH7rfCpWgq9Ed61ISq4bFfV1Ss7gDCEJzoGXRQV/1p3SWXIDtdIlZUGj5lKkc57gm+XsCgdDa6nw82f9sjvukhWxKoQPFljlXNjtBooWmnXPF7/0JV5+5QWePv2Qcdqzvbpic3JG1xnhu+sadjeR1eaE9ckp17st/crI6Skl2uDxTSBUZH6aJtBMjpEJJfSG6s5zpZSCd6EGIQdOJWBzoUYe1lhlyJEFXU3tVg+mSXUk0XLgWckSYJnAbgPVQURVWfUdMUbyNFXRWSHF6vzh9aib0ALTVA7K63MwdywZMnvOxikuZPjZKzCIMk0DJUBuPL4NpN20KLQdx/ZzcW/mcx2vTfNcNaeIT84x4PMVWGnh2+9/g2fDDZMkstTepVYI2pCcWleNRutsc4GSLIiaxJHI5N2NLT7tGlzipO9pbSYyTTVKR5mGgTw6KIE4JtJkfmAqxkcpMVeSqCPGyMnJGavVugrBBXa7LV2/4ez0lJNNz/X1M+6cn7NZrxiHPU8fPSIXM1derdf0/Qpxge3Njmm0Cbnd7i3DSBMpmTZXU1tbS53wJWeub24WI2bnAy+89DL3Hr5EVmGzWtN1HWfn5/y6/9bfxFe/9kOLGONCjnaVIA1HPwdRyvsP7vOVr3yZ9975DsM0kGNeyljM2b0DygF96Vc9JStxmpjqa7pPyQovIq8Bfxh4AbvO/6Cq/gERuQv8UeBN4BvAb1fVp2Kr4R/AzGF3wO9W1T//130f5IBz17Ld8m+Fyd1ce8W6c0yqwhCNw+vM7fa1hV+OuFKq5JLJKZN0Nt+ub6lS7Y9qgFVjAuN8WeapqnVOm1DhFM1exjc9uUglsDpEp6pR1pPjhJbASb/m1//IG7zw7Xcobcvdi1PeevlFvvpDX+Lug/ukXMi7HV4DTrKhRFlxvkWanqKe4Xqg6U6Z1FGSw+NoK+E7i9o1XuU6SjHSb64NFC4IsURiLsbfUiVIADyuljoOQT1LJ+/MVVI4SEDUAKkU03ubAyDqbU7Cwm2zzc2KrrqcvkPglevrzehCqaVLO10HhMtpWJAmparR4yBn44YV+5yZsmzA9pktWCtkfAjWlatwkO44BFOCLOjAZ3XMyUMpakTwkpFsJseKcn29RVWZivL06VPu3j1nv9/z4aN3ado17dTz4P59Hj9+jBZLXJ49u8a3RstYbTb0fU8bApqTWXdl89zLzhF8bUShBrY+LI1OITS15CUL0XwOZnwtvUu1bkLFmikyllComJ6Vmv5daCuSrXOX4Ixq2px04mrnuCwUlZwFwUR1rczf4EMglWTzyFtnbEp52VPA5lgTTJZkHKclmE8pmd5dSpSUidNEibkiYsn4oOs18VnVhMx52UvmQArmamhN+OSIp1gvKGs2uR2Q/SDH5yqwKqoMw4jmqniOkosjSSRqtK6iGLgazFOv9WtrcS+RxrUE7+g68I2n3ZwZd8IXQtMwTYmpwJhBIpTUUCaPix6NyWw8ACmFNIz23lHxraPv10xTom3VnMO952zTcXHScP/+feI48rSF66srnu62XD15wrC9Agd917FenXDn7kMzzRThyeMnqBZaFxj2OygRlyOtzzhJaMqUEskRppgZp2Qdke2KF19+jfsvvszJ+T2KeO7cuWC9WvHWF77A177+dc7PTo0jJRkvlgejYiagpZKYxW5DBFega1tC3+FDoG0aYqkWCbWE6lQpKS/cHi+OO6fnBH/O5eVT9k7qpvRJzp5bIwH/qKr+eRE5Bf6ciPxJ4HcD/66q/n4R+b3A7wX+ceC3AF+uPz8O/Iv13+86KjixBJNLqeYYE6xcC1MWt43X1eN/DIWbZM2BlYcetSaLLeRzEmx3WwdcmqzTyLzpCo0zbp7HL8GcuIJqNm5W8DgHMQs5QhFHEUhppPUmHBoItG3Pbl8IwfPW117lzS8+5GS14uzuOef373J6usZ3DX3b4ruWYTeQUmIaPW0j5AIxwz5mkq7IRThZn+G8MA4DOTmcFpMbQAki1oXkxQjHYoEaaiRhQkNMoYoCUz0rDd2ZNxylKsuLbbaFDMUt/Cs/P6eOOcjSigzMshTWcZjx3lWtsflEzuKJpQZVFuDkogfxxtoZNiNKWgwjLqVUDqfDaSWsOyFhBOdqDYqK+TXOvB3vtXZXUt//sNEeNrK5TPnpufh++cesCzcTtAXkcC532z3DfkKaQJysCaltPSeru6zWmyqJ85hxGtms14BjNw6sh4Hz83M8niY0pDwiCtvt1myoVAldU8vnAlV7LWGyBSGI8aTEFj/nzDIGjNjua+Cs6lDXklKkYNI/UxrAVdSmzAFRA2K8QCsnzgmWYpkt1sFb+Yo1myXGWWXd+LqbzQn70bx0c7ESnWsKTBN5nGwNKrpwAV2tUMxIrhNQ53DBQXHshi04b6KlviGvVlw216yaiQst+AyTN+7/srVwxA+d/5M5wdDF8uZXlNd/AEOAlWsoFJIoxdWFLAuiwQKFzrSc2hbW3YacIsN+RNVanimBVb/h/M49rm6umG6uCXhyysQhMo0jnQt4CZj4oiNXiH7mS5RyqOdPU6TrlPPzC9brDScnG/q+Y7aPWW82nN6/z0svvMD19TUvvPAC7777LjfXz4yQi2UFTz74gBdefIm7Dx5w/949bm5uGMeRDx59yOPLx/imZdrvubm5qRuh6Q35pqXtek7OznnzC1/lzbe+xMMXX0F9Q2h7Shy5e+eCL33xLe7fvUvXBrRaTR+gZTu6IvPGzZIZiUC/6rl3/55lYUd1/eNykneWfVtLbuHkZMVJHyhp4qnzqCjBH+3+n+BQ1XeBd+vv1yLy08ArwE8CP1Ef9oeAP4UFVj8J/GG1L/xTInIhIi/V1/lrvc8t/s0x1+V5grQpFre3H/Pc6/21SqnHpRDqYmiSBQZVaTHE1dztj4xgVbDuGwEVShZK9qgLZBFSScQ84PtA41t8I0Ci6wOrTUDCHVZN4LTrOD/fEFqPNAkfRjxK34DftMTcsFq3lDHx6IOnPLm85ul15Ge+8z7vPnnEiy8/5Ee+9hZ31h197ykqjPuRQCJ4pQvQ1u/pfLVtcabs3jQ9jkJ2ViKfSxwzMptSWkp4IrLwybRCe7caDI5/x1CnEEItfVeStD+QbFFhbqmfT5pWsdPDtVIDYJlRBnvg8yUsKxUezZ1ZpR8LRkuMtezb4GrwNktIzJ9ZpPJBP0dDMQV/Zl6hPwp4lSoXMJH2SswTU9zz8ssPuX//Ljll3n333Sru3C2ae957zs/Pl865pVQ884400fcdJ+dnOIE0DWYoDPRtj6qRyL33JgUhB/eJxRosJpP8wYSVbd5ir1/3mOO5WXLBNyZl04iVl4FaskvLY+ey4UzXsPXFJB2maTT+mbNyufP22BStu9SJq52pSsmeKR8QpRjjwq3y3hNzqpIsioSGzfkFcYw00jLtRqZxR3KCy7XDth4BdG6TqUdk/o71fJYlnPrkeFafq8AKgDEvXWfFKylOlCFTYkG8tay3oa1BQiSnkRwj6jxaYJeUzdqTpsw4TGy3e5JLeDwaMy5D6y0zKAopGTQ885GAW5slwDiONE1D2zRmOLxaGVm7KE+fPeN0s+HOvXs8ePiQ1954gx/NGXImj5HdMHKz29F2KzYnp7i2oWs7VJWb7Q27/Z5vv/sejz78kPffe4e/+ld+1kiJIdC0gbbrefX1N/jKV7/GG1/6Oi+89ApN21LUMU4Tp+uOL7zxOg8f3KNvG0tiFt+6uhjXv77bxh2nSN/1nJyc8nR7c8TxYAk4pW5SpeTlZ5oizhVCVbxum0/fdBWRN4EfA/4s8MJRsPQeVioEC7q+ffS079TbbgVWIvJ7gN8D8PBO95HjecyxOv7JJS8L+/GoVKdlkz5+jY97zeW165MNaalZrcM2HznoqJVSyLWJIxclJ8PFUjZ0RX2gqIcSuNlNhCbQquCCIzSBk9OesQyETmh7b+iXQNuZ3s/1k0fkDP3ZGat1RxxgmgqXl1v+o5/6//H+04GffbqlOz3hP/gv/gN+4ee/w0/+xr8Rvbdhtelp2p443pg0QYGpwp5OBBcaEJP+QBxeDcHSNFlp+zkeoFZ00NWAclG1F124bfMxPx7Go5R63MrRGjATc0uVtDggVibZoLeSEHFh6QpbRCBrafL4epoDM2uXN3XvVOeHBYuOmAumVWxGuT5Y6ROpUhVSaQKVo/UZrwRWNLAGEGpB1sw3pN5u19CBoxZj4vLZJadaauBVWK97YhzJanZh2+2Wtm1p25b93qMaWfcr2rZhHyNShZuH/Y4YIwk1BwuBnA/2L1aas8aHuaM7JyubzfMjxtmdgqqrGKqWFXjnl0TIOWcNV1WBHW6vD/NcnufULCoLunSSb7db+s0ZbdsuzUegkIVcTLPKeQuwUjmgnwuFpG4YSdJyvTgXoBG8CrJWNvdOCXFPfO8pIddQaSbx1sR9Bh/sHFoYVebkQmbZ5E9mfPp2qu/jUFX2u4HJK673SMrsn12zu9pTkiBNQoOQfQKXzbRyjOSoNF1DEzr21wOXYUtU5WbaklK2bkDvCOpwrmHTrpBSmIj14ridOcxjRgdU7cLIpbAbBoZpout7y4idwcopJ9Zr4zsF3+Dp6M86znI2HZAmYCI/xqXJueCDcPfePe6/9Do/+7N/Ge8Dm80p777zNilHHjx4yCuvvc5XvvpDvP7mm5zdfchmc1IDm0Lf99w5P+dks2LVtTgUL9zqzjj+Lt8ND2nbBu89p6enXD36gFjS0jW1KHiXvIguztlN1pEh7REn+BCWVuFPyxCRE+CPAf+Qql4dn1tVVTn2gPkehqr+QeAPAnz5tdPv6bkHoqg/BEfcPh+30aiPBmjz6xz/7WYEUo8yQ7j1mtYNhGmYpUxMhRAcQYVIIamCNDjvmXaFy6c7nNvQr6BUb7tN7+gbx/pkTdP2JBGubvbEmy1lu0VEObmzoV8Hhpu9lTbanp975wPa8xd5Fq/ZEChtx8/9wjf4xhv3+PrZV9HscaI03plnXuV3IYVYdXqEgveCKbgFnGsoXil5XDaWmXDrFp0uOzpz6e8jqtBHx3cJto4CpBACRVN1G7DmExHjp93S5Dk6xsbbKlVb6qgDFD3k5jJ3WDlKibUE4xffP3EOp+bhBtSmBV87lCtv5+h1DmR8+EVO4//aDVWI1Xj4OGFZkosqEyAoJycnvPTSi5yc9Jydn3CzvaFfrWjbFqrW1H4/ovuBe/fu1dc3xXTVaOWxupbFKXJ5eWkl6xDIcbJ1j6lKHhSMEukQUVJSSjHtMtWyNCscdzQa+gSz7dUcHB3mx4yGckvUer5PRJaOvhlVneebzVFLBuZEzjnHMAxWPVFLiGcypl1jzcLFvIXiuoATt3wHj6ekVCUmlLYLPCMTHHR1H8wctN1ULdAS3OLIsUgruKW4/iuI1Q9qTClbFtH0hDRynZQwCa1bMY2JmBPaFlwLofF0wTLuplvTtmsa3ZNT4dnTZyRnujH7tCd0wqrtEYTWt+x1NDNVvc1PmFtTjxdQ1cL19TVnZ+fcu3sfFzxN0xJCy3a7J8fI6cmatu9oxXRPGt+gKlWmAEpt1S3JJv3p6Sld1+JCw0nYkEuh73ree+cd7ty5y+npCS++/BJvvPEWL778MienZ/SrtXE/gK5taBpDtJq2QdT0jYJ3Jtqmc4fh8iVufaejO0Acm82G2e7kuCV9yaKir4iIZU3jkFjf8wTXUjDdok9T27eINFhQ9UdU9Y/Xm9+fS3wi8hLwQb39beC1o6e/Wm/7Ht7o4/8+PhYxRlarVb17wQ+X+4+JnUuGKAfpgPp9ajA213Pl6GX06LW5tfGYf51tACEIiAXlViFsUQ2GArie3e4pzl1zVswOJpfMSd9xenJOv7nD5fXIz33zbT585x26Ennl7gl3760BU0/POUJoOL17zmtffIM//9Pf4dHVjqtU8Fo4OWnpew9pguQJzqxzKG5p2AjemWdhXZxTLotdjtNDomNt5MdBjq88KdOHmnV+SvVTO8aqjkt0S9erNx5NTHGxoppNcZfki9oUcMSdc85XwrkJLgIHhXyZ1xcMbTLyipUMq46XqplFO4GkZdlsSrFuZ3FhkZUwdMM2qiU4l8O/n+WRczbez7w+g6F/1LKtM5R1tV6zXq9Yr9fc3NxUc+RAjCNt29E0geHpJTEVfuEXfoE7d+5wfn6Oc0rXG8e073rOz854dnnJdrulawNOlDRNdF1H37XYxVUsqBUo2RTUU4rkVGrlwMqDNj9mKsAcvFQPPu9t8tQRQiCVgg9hWSEOKJUFboBxuNTX+S1HSCpQRUv3+73N6TiBKl0TKhLWEONUS55lMVoWahe6HnS0SkVTSQWXgVSI4wha6DZrJr8lqZUsVY4gKz9b2VQfyxmkqNd20aPS4ScwPneB1fqk5+T0lHsnZ7iYYef5cLw0ocXdjjwqjXik8TSrlrYXWm/efmjiDNM02abaPVEKY0ykfeJ8c4e+60ilQHb4EhAdwCXmCvncXipAqeRB5w12/vDRh2zOzvjKV7+C94Fnzy7Z3kysXnqRew9f5PT8gk3fk2Om+/+z92eh1mbrfh/2e8YYbzPnXM3XVLOrdu3mNDqRzraOrcZ2hBIwSnyTmPjGjoSDEEGgmwQCIgScG/vCgegmdsAmxiCCYgIHRWAsRAy50AlCQUHoRLYP6k6rvXftXe3XrGbO+b7vaJ5cPGO8c66vau8jyXtXlarOgKq1vrXmms37juZ5/s//+f/HARFXBQo9qKPrAkkmFoRxsyP0ig896ju2P/MzhBB4482vkXLi8uqCx48e8eabX2N3sVvtbc6/hhDw4nC1W8Z7s+SQolYCcQWHr5o6cn6W19HIsQUJvlpsdOgy450z0bfKSSBFIzBqJqeFJUfGcsHV5SUfOEhxYakE7c97iKVefxH4B6r6fzr71V8F/gzwf6xf/8uzn/+vReSXMdL6ze/Gr+LszGwBzmqifYaEGFHTmRDt2S5yHnhpoXI02g9YNyI5/xnt51TYvZawANThz56jlSCppGxXTZZ95+k14EvPgmcp1pWWvTBpJt+8xGlh3Ax0m8Buc03YPOH5MvIrf+cf8Vf/2v+LLme+/mTLL3zzMf/id77J4zczKpEoplf19PGWP/Kdb3O8veGHz27h7oYnW+GXvv0OP/vtN9l0Sjze4AZP1weC30LJaFkQPKEPNYHokZLQvABd5SCZEGLXuQfBv+ZMqYRm0xWyi+a8rEFay/w/rdwqUjNqEYZxu96jk3J1QGjaPvV3uQVtxj53rWTT0CUXoCpkS8vXRaB4kIKX5t2opBIqAt5uq6JqCFfngykxiOKc0omQ24HteNBh+mUe52smi66HY/FWHm3BbEyJj59/zLgJdGKlvlxmbu+e0/kt15ePT2d9isyHW64uXieEHhXPHBMv7/f044geMkEcMS/WySmKr1pSwzASY7VBC55SZoJXY3GXzJQTIp6iDsiUfGQMW6SKibbAq6iSSkaKkcr7GiCdSvqnBGAtF6cFxSywzlX4Vc2aLZAtQJNgEtXOgwp939GFQBdGK8vNE1SP3ZQUL44okEoEgXHTk9PCHA3Vc6L0wZM6z/Xjx7zcRxZ/wE0RfyYQSjE3iSTWkOEr76oh65nalNHg9s94fKUCK4McxbzPxg4/9DzajJRlwzTPJDKimZwcQQO7cUd3EQgdzHEmzhkZPS4rYQ7MxxmNgqMj5cLt7Z6yg3HsDJLPxQ41TqWBc0KhKgyD+RMOw4btdkdMkWfPnvHkyROePn3KZrPha2+9xW63YxiM1L7dbilaWOJC1/WIeIZ+JKfEuNmiMuNDTzcExAdc6Fch0KbMPW56hmGw0mII6+/b164zHRdfkQnrDKuQrtZNRqv4of64jNaEQ7u+xzlXuQDN+qOpVVs78DmSJZI53u7ZDsEQAqcn5eHPf/xx4E8DvyYi/3X92f8eC6j+soj8WeC7wP+8/u7/iUkt/CYmt/C//O/6BtYSnjRexicP84Y6vVoCfPU5PvnkD1GX9tjz51m5P7BuXIKc5ga180w8TjxCpht2yLJwvz/ih55tvyNK4H4/87f+7j/i//p//6vEKOyuA7cfvOTdD+94fpe5eu0N3nAmSnh/vwcpPH7yiD/0S7/A1aXjxUcv+OZbb/IvfufneOf1x/jNyHHek/IREesODJ0Hsc4ps/IwdCb4QNZo7eKlgGZSTCsHxaxhWDV32jjXBfo0JLWVU9pjEXng83leclrlE1yoZZb0oDyjKM5VrpZqRX3tihekWn742mlG0+AwoUmajIAnZWuLj9lQiNAZgtL1AzKfysiGPnpbk9nW5pd9PCjpNl4d7Z8WnJScuL29JeeZYQxsl4EubLAGBGHoNzgxBwKlekaKY7MdzFy+M4eL0HVmSr9gHav13he1kpdg82fobd9OKbPEZUURS7ZuaqkuAN45YsrkkgnkVYfK/AM/xcSY5i3o1nmWKpHdfmZio7G6dBRvnDyRcsbHqs1ECsF7YjLxX0OlAn3fM88zyRnxvfH1crb9vYloNz0rFRg2o/nTagDtmaYD/WZkujkY8FFNo9FKUVCb6qWuA3FCrtITWQ3hau/1sx5fqcAKIC8Lx7s9L7uOOUVu4x7t7aDvpEeTUJLS4XBTQntlwYTilmVmXiIlGeH8uJ9Ic6ZzHQ7z84o5IUuVW8haUViPc+dkRBut0yPlxICV+MZhoO97drsd19fXvPbaa2y2W0JnQc92GCkpPyijOedXw0vnhHETUITNdoci4DzjOK7lIsQmpD/rKGkEzUZybJmLgzXoEbESglPbfKXyCb08wD5eGVaGEDESe0rpgYDiWtuH9eAYxoE3n7xGP0LoXA34gmm6fAGGqv5NPgWfq+N/9CmPV+B/9U/3Iifuw6c836m1X3U9/D/9SfjUjfXVA/58vPrB2jzjUw5YweaHeZ1F5hmUHtNjKrWs5PChx/ktiYztx4ElB+YJnu/3/K2/82vcHTNFB4IfiG7D7fMZ/tFH/PH/4cSbb13RdT3L8oJliey2F7z++mMeX3yLUn6Gq8snvP76a4wXI04y48aRSmfyC7ka4nrHsNtYx1TlgOTSAhkrZ+ZUW+4VW7/tHlSC2XkH3jnBvR1g56jVefBlQU0TZT1PKs4JuCc9rAccF3Gr159WD7nWASjVSqiUZM4RmBOElrR+vlKg5GICwWq8FOcDoR/ox3FFM209C8WB89UyWkH9l7wOyMPEQeHsQD777ALTNNF1jouLHWCdeOO4IefCfr/nMO8ZNj1ff+dtPv74Iy6uLhk2W6QmrDGe9kDxrjZDBCR4jscDXoXD/b2hk5WwnpYFEeP6WXeuSRb0/caaEFQIXok5m+xKYJ1/bY69ynFqdjhwppKO8bJC1TZzLpBirsT5Twp9ggXsznmcFJDMvMxM82FdM6U4ctYa3IcHWoRxWZimI9M0kVM0P1PrHDGT8ZINxeoCOS24YJ24UsuRpRTTqqrBqdaAyrRMW2PP5+PW8dUKrBRcVlgSx5s77paJfYqEvsNvt1w+2pl452JmmcUlpmUiYVHwkhYWXciqLClBVnoXGMfRJnEuFCnEmEkz5CK1Pn2Cmc/JwloDjIYUtbJgrgtkU0mRzhn3pes64mKSDuNmtAg9Z3JWNuPWSoOlEIYRxCE+EHyovBC3Bk2WgZ2ymUZEbO+jHQit68KtC9JKE1oFBfGuesrJJ/ag81GAcRirVYOlHKXoGtDl2s7essQYI9M08Wh7zbAZbPMRzziMP83Z8YUbryJGcDrY272TVx7/T/Kc/wSvvD6udfKUYtY07fv2NZfCcjxy3O85TIlIj+sE6cbK13E1MBd8f0HJkZxn9nOCSZnnhQ9fHDkeEuNmwzRnnn90C+roM0TMvHkcL7i7P7AskaKeJU4MQ4dywcXmgkePn9LtNkRf2GgiOMEXh3aC1lK7DzXYcbWjLidDecjmz5YjITjEj6SYeODNJs2S5qxclO3vViL/WUDVDrT1WumJBPyqMnt7ztPP9EEzghF1fc27LQlZu5/qmjEOWH0OJ+Dsc7VgLJdCyUqppGYnZqslrsN7kzoxsMs0ywzwtGTK+08r83/5Rrtf511yrzwAU9z3iHR41/P8+QtLykNPTBaw3O/vef/Dj7jYXbLdPWKaIjEd6PthXU/jOLIsi3WiB4846Abb01tX51SOtgeUQgEzMF+yyYSgdN2G7Kn12oF0v9A6RZv0w3nHattvz4PGNm8bz0/UELFm1m7E91OH6vl/XedpoWiunXultNK4dSeez/mcjPzed32lMMB0PNaAc6mUEpDgVxP5aVkYvIn9NraDiJCdI2MioBlWjmJSQ78UqUYD1XHgMx5frcAKJSCMYmroLiYGAlfjNdsLyyqGcWQOwjQfmKd7hnLgvhw4zottTq6gAbou4AcPKeNcxgfoN4E+DOS9MOdITvUGnwVWUA83AepkjTEyu5kYI8uy2IKrB2eM0dqhnXXKSTER0fNyzDiOlFJY4sTF9XWVS7AWeN95Ou+q6nV3KjucCRp+WvdS85Tz1EVSnckbrCrUVnxgbRrSRqU8u+IVst5sNjx69Igf6mmRtvcSqodW+9ucC8+ePWN5+YLx8YZ0Zerdn4befGnH2QH9EFWSs4NY6Vo7Pzx4zCsR14OM9dWx3rMaJItrgon1xw0daYF4SizR1JSn48x8PJKWhZSFLA6VDJJQsQ4fbSUq6eiGDfNh5m5/ZM53SBq4fbGnDx1pPlJyxucO72AzJF5//YrtOBAXS0WdcwzdSOi3hNCh+ZKu6xiGjtCZ36aLVrZxXhn6QEomq9D4UOKElBq11RAgV8sIS8xI9uaXVrmGdhFOweTaJVVK5Z6cMv/zg6QlCq3z6RN7ACdu1ikgyw/ut1RyS9F2qLh177B7bCVSqoipHcxnXLxiSFWKVgbMCKHrGTcbwmaDD8EUZFvbQ0WfnQNXpCLtPxqP/lKMV0rfRlY/feITf85+dn+/J2dlu9ny9HWjaGw3V7Ux4YDLDgjs9wsffPAc7zqePr3m448/rrIMWy4vL7m6ujZ6RIoUTWuQkxczUHdivp9NRTxn05BDM0NnzUtd6MnZugqdBFDTkDpOU92qdZ3Hp+7B095yMkOu3n+xrOs9hK5+7nJWKjyft9RkI7Pb7Zhn485a+TLivb1nqPIlqubwUAVyY864WiXpumBIq5oae+g6+mHg8ZPHTM9v0C6sSUQBohMKjkiztTHOpwkjt7VT9cg+B9TqKxZYCSEMoAnvPNthy/V2x9M33uDi6gnqAzEr+3RP0sTLqfByv2eOB9MKWqoeFeZBNoyOQTfgIXvotgNDt2E6VkJsUSQrTvMKVZ42TNuA52kmFxg2O7Io03QkZzNbvr/bsxkvMKCmdvd4oenvtC6LaZnwIeD7QL8ZjEQYOvOj0kLwnuAESqLvevO9CtaW62pNX5x7sHk6xAyVi5VEUDWTTT2ZXhYsW8jy6RMpoyyiLBmcM05XkfIgqGobWRSlLxHVieATY9eRS+SDDz5gmRe8uOrW/tUZJZupsX1vwW4jsK9ZpzjjF4g0gwu7l/VAtlG5G68cICJSW5Prge8EbcQF6gHe0k/UzGLTwvG4Z56P5JxI0RMjZB2IOOZF8VoYXEFcNB/JqpHkyIjb0AXHfHzBdHif/eR59nxi1/e8+fgRH754RiFz0Qe++fo1/+p3fh9DcKSkKB4fFOdnHl8/MtSg6w1dzjNoshKY5a6kmHBS6IMdiuKtEypnIwxPKZLigVB1qbwfmI57qBY4DwJbdZRKSDa+SFmRHWszb0Qz4za1AOxk8XEKoNoekNWQCAn+dEg5E/zNRdcyIEJteqnhj9q/89raXjmQ6ipKkSgaUcw2JUaIKMVjJcBhoN+Oxp9Re/+1RmhrvP1TMFcELV/qwKrREM6Rw/NQ8pRw2jzQkpmORzrv8e4Rzvla/rI1erW94snT11kq9eHiYuDp08d8+OEH7PcH+n7DNCUudqZ5+OLmJZeXO8a+Z54njvOR3XaLAnOOtYkgsszWQCW1IUEXJYeAOk9JzrrRUyJlXfdzEYfU5hUnAScdiHGyCgLiKWQKxTwjcyTnQqnNRTZnzcf2JC/iazmwVEunzDBu6MYNHZZwHBeTD3HayuEWNBVNIIFpiXVOe1wIyBF6F8iiOD+aU4JbUBGbuyi9c+uWFgUT7EaqUHZdSyuvilPzz+cwvjCBlYiMwN8ABux9/RVV/fdE5GeAXwaeAr8K/GlVXURkwPza/gjwDPiTqvqPf/yLwDD0IB3by2vGq8cMmw1h6E3B1gnLcuD25iUf3z7no/sX3B/v8c7Ir84FfFWS7YeOUBw+wma3JdXsPU0wT7nWw9tnY92kT4rL9jxD34P33N7eonUzf/nyJeAY+g2bzQUuCOOmh1Ksg8d706VxHo/gQmCeZy6eXKC5IM7T1ZLeMs/0fVi7RFp5o+SythZ/Gs9Gi5JVcWpFAifNHc5G0YLUD6jt45z/PRZYZYQlZrSY4Wiz5zhvQ28dZloylEwXAsELXW8ctLlmUCV/Ycjrn8lYD+JWmnvlHrWywmnIWpFtWecaHNES2FNpsWWvp6etr4WJCBrwlVES03xgOVS+UhFScpTSk7LZpizWMU0sjnlKFGZC8RQHRXryimp6vO/Y7S45HpXb/T23Lz8izZnXHpuZLTpzOQi/+HPv8PbbrxE1cb9MeByK435/ZBwPXOyuidOE80pwZouENsTHIRgRV1BaV1fjz6TaYZpLpsSIlFLld6RdqQclT8EOzUbCrTdoJf6u/JW6/syL7UTaXXlMZ0hUM0ZeD4L2X7vvrnoJIvW1zx5bP0uoHpBFT+uqldtFrJs5xkzG0XUDw2bLuNnhQqgL92E2X0U4bM8Sh3fld2lO+emOz+Rc+JRxxnxbr6uqrPzSkjPTNPHixQ25ZDabkb7v2O02DP3A8XiszUaZlCLPnn1chaB7ayRIiZubW/TikqvLK/re/PiGvseJsiyRrjMx0GVZyGoUbUrBOyGVTJpncJ4wbuh9b+bG2uaa8Yv6vsOHJtxbanlXcd5ERm2OZExw1jxFFbNiwjlyyus8b8lCSukM6TKE6ng84vuB3W5nlJUYORwONpsqYr1ys5x1rZ7Q+Mo7LKW+L9PE6vqefhzwXaD4hWW2IBOBxaul/ipkzUhRQ2tVV/szlYec5s9yfGECK2AG/oSq3otpBP1NEfmvgD8P/Ieq+ssi8p8CfxbzWvuzwAtV/XkR+VPAXwD+5I99BVVTfH78Orsnb7B79JTgHffHO27uX3A33XJ7uOHu7sjL+zuWEhE8KVqmjgh9GNgMPRe7LfPxyM3+Bu8DQxgIDHjXc/RVyPCsRNag1HMhNsQOQu8c/TCyLDN3t7doKYTQ8+GHHwKOOV7x+PE17C7oNrVOXpTgpQoHGipVKjer71wt1Zmp5uFwYLfb0TruVusFedgx8rBcVL3DtEkpaCXqtkl8XpZYP+b6tWC8krko85L56KOPKMn+tr32gxZfxPylxFmXmyYcytAZAmeL+fMhIn4u45X94DzwbfexcUFe5V/9qLLfqz+XCpuftK+MY+MQ0EiOkVwWluXINB/RLKCBUjw5e0qxbDeTSUXJGigYhymro6SCD+CDrGXiUvlW4jtCv+HxtePw+sLd/fv4kni8HdmOIxeD4/HFFcLAkkcOE0g5cnc/U4ry7MUd+8PCbvR4Z96VDuvqK6p0weE9Zyjvw/KHbcCnLtQgkFMyFMhgnPXw8N5bQsNpbaSUrMwZbc62Em0pBcrDRKUpVp8LLmrLRPR0L9pzN35jKy2ec7jgZIfjpHUNnj6TnG6lvf96oIXQc3FxZURqF8hrycuuw6m8WNd2BS6Nq/K5luB/+udCHQ/2pNNPzzhID+24VJVnHz+j2QaF4Om6gSqlT855pWnc3t5aqcyfTJRf6HP2l9dstxucg8vLHZvthnHcsCypNkcUNpstS5yIcWaaIuPYkVHiNOP7nr52W+dS8M6v+mWlFJZloe+3dh/FscRM34cHc78R0lW1EtmbX2VVPK+BVQuqvHerIbTzwYyec6kiqLqWz4e+R2uj1XljR27Fuhr451xL2JUzKM7hQiDOC37oGXYb4nGGUGryoejQmQyEC4TiSfcHe68iFNEVwS/6+ZwZX5jAqnZO3dd/dvU/Bf4E8O/Un/8l4N/HFtC/Wb8H+CvAfywioj8uRBXhydfe5rW3vw1hxPuOoguFQiyJm/uXPL99xjwtHA73LE5xnTe7G4HLqwukFDoRXFSWw0zOifv7PbMsXIyXdP2IcwtOjLOhWSus+elq5eeow3a7YzsaQfvly5dQNXXGbccbb75uE7UuBoewTOYAPs8zFxcXLMcZnwUp0PkOCYIXx/39PV3XrVIKTam7XveHWfDZ9w+z6lIXSCJgSujnAorrfbT4iIKSVLk/LNzcThzuj9zd3BCcMJ8FAOt1cA6HoQaaMzkuJtFAqAjDqX39qzbO28AbdNA26B9LRv+UX52XAo3/Vk6BsSo5Y4WBPLMsC9PxiFYekmJ8JS1CLoGUIGYLorJCUk8qJvzpcgIpeJ/xLhI6M4F1CKkIBY/zA8EpX3/zDS53F3z8/I73339O8B2Pr67Y9FuCG0mLY3GwHPfc3k10ocf7TEoH7u8iXfDmB9h7xqFj6D2qGe9Mryp4UHXE2o3VNvpcTEPNOVnRUCeO0Fv2fM5JRG0dI6z8rOBtDaxotBYTb/S6NoLY61XErI52mJWKJn7yHp4irpMC+umerZl+DebM0sgORPsMUte6mtGyCtvLS3YXl2vp2BDrFcA0NFpAKmG90rNwTc/scyoGfibnAqd78KPW06s81HZPliXx4YcfE2Pk8uoSC8T8Gkg0eZu21zdhzRAC3jlub285Hg8VRTIEdbvd0HU94Kr0zcB8PNL5nus3r/HBcTgeKEskxsjxcGDsNwiQciZU6681iEeI1W/POb9SudtnOA/YmySOcsa/rRzbVWJFHF3XrY1X1ennFQV4O19iPKm4rw0dUOUrslVGcrYSHra+cg0Mfd+ZfmEpuD5UtMvmvR97puNMqALZYehIMVdV+jPN9ZXK8NmOL0xgBSBGKPlV4OeB/wT4LeClqjZFvuaxBmf+a6qaROQGg4U/fuU5V/+14bVvsOA5JiUf95Ait/NLbg+3HJYD9/sDSywUnVGZSTmhxWrVFxfXdKHn/uVLoioROB73ZuhaFHWeKMJyPFqQUHU3StXrOM96ThZqI4SWAAEAAElEQVQBGV8KV1fXjJsdvkK/zjn6vieEwGazYazBlrXoGul8KZGh60GVoe/NUFM8QSHOCykskEHqxn84HCz4WsypvC3285bw8+DvIQpSa/TOBAXbZ7ByC5yxdGgVqKyFaVnYHxc++ugZH374IWlZKPVwAx5k5FZuKcYFy9Yx6ZynrwGhNuTsqzKETwbj9eO/ajXxY4Orc0TkrPV6RVAl01y1hFoaKxlKIkUlJ+jChhIFDZ5cEjEqlELKypLFAisswML15Dzb/SqgJZvdUs6EfkBcT2rChgQ8I4Ujb752zTtvv8azt55wv8SKViaOh/fJ+oxu7FhiwvuBcdyhUsgkcIN1ZXUeOkdxjlwWhnBqCbdEVlcUqDWFqFbxxFoK7IDQBUNr6iHY1oYVyIrxkLwpxxufw3R6VM2eg9DUocUSo0ak5bQWoZYjG9n2bONvsXNbn+cNJue2OGBJTM5mbaLOrShDOXu+vu9tD9ntTJS3vmep7gla96hWJjY7q4cdiudz7/MYP+1zYfXDPBt6hnQ0l4lXUUOT8dCq0eeZjgv77kDWQj+YjtNut+Pi4oLDwSQIpmlimiaePHmClkKOiXk+0nWnff94nGogZqXk4/EGyYlcCruLC5sztXyW1bSm+n7AZYjH/ScsrpTqW1uU3ldeVbF10B57TmRv5bnWnd77U5Jw7i8YQmjLi5KNZ9gqIm1OTysv62S/Yx23Wl/TzoCu80SBkhJd33E8VA5nyXRjT55mK1/XcULsbU3m0tCsltzXbtlPSf4/i/GFCqzUWJT/kog8Av4L4Pf/BJ5z9V/bvPEt/e3f/HW6732fkhXvBN8bqfX+ODPNkRQL9zFSkuPabSx6Hj0Oz+EwkWNhHDeAsrscTQQ02c98PzCnTJlgOS6Q6q12suag5wvY+0AXegSDfENvm3VR8KHn6vEjrp48Zhw2jGFg7AfIxhkJoVu9nkrtTnJe8K6gOFKJFDHkqvMBp0Kaq6BoMfsE5xxJTejNh2DqvGICbyK1M1CaxYYiEupBkcFDcFJNbXRlCprSupIKvHh55P6Qef+9H/I7v/3rHKdDPcK1dj+ZerwTT1bj8uCsrd91PU0s8Txb/8oMfVjac5WoaWi5VH+3hjqeCLiNI9V2GEWRotXSxFGSHagpLib+6n11qtcaVGE8KDeiJAqJRIeKkBcA80Q7pkJMjoLHtGpM/R+B6+vH4OF4f28NHFhTxhQTXV+YpkzSgg8O1zkKgZv9AfYzw7jljUtIcUJzImtiPy+E0hO6rXWzjR2qiayKBI/renzf0fWCl4hiAVYXPKXENYCIxfTEcymVx1GQUrXaNDGnQud7xGWyFkOrSyHlbBo71V5EqoSK3RvruivrOvKrMGGmWPBUTBPIxEjrHiCNT9WUr0ttbXe4agaPnoxrS1NYF+vaLDUobAKhilt/ZsKeGN/SQ9gooRuQprids63bVsqXtj9ZN2DjWBlPrwamn/H0Px8/7XMheKdgzUFSy1Fu3bCrbIUDjymHO6laS8UqHcty5HDoub3tGIaeR4+uEIVpnpmdoTuXl5ccjsdags6kZWK33fJ8f2f6gV3HcUn4OVGmTC6RZTmQc2S7HdhuRlKaefHiJZePrrm+eo07dcjxiNfK8xs6mFylTJR6RngCQk4ZCY64RDtjSmu4cKRsZP2iEJfZxGHVyv7eKSktDygGhl5luq43zmxRnFc6UWKcSLMjxtblWGxfUCFrQknm3JELca5JfhfouhHnB46HO6b5iHRKSFaK3G5GyhS5OcyGoykU3+Gc4hDmw9E6dItVS5JKRV/L54JWwRcssGpDVV+KyK8Afwx4JCKhZifnHmvNf+1dEQnANUZW/JFDUDRNLHG2urA4ujCy2exIC+zTEUlKOSS8CleXO6T33EtkOkykJbILAxfDlu3lBVePHjMdjsz7o/nblYSmxDQtVYyvElq10IyYT7A+oEqMiWmaYYnsD0eGvme73fL661seP3mNi8srHj9+ysXFJQ4FbyKADcU5r9k71zREClJF/ooW+tBbpp4LxWW0dpdpzUCMryGmSyWyljfaJh68VCG6qpFT6+BGbq73rLJ1zvlVL17e8f33n3N7c8MH77/L7e0NcyWwW7Zf1gV/WrjKMPZmk6DW2m/Iwe+CzHxJxwPkcM2YpQbglfvAw4zbzoQTh6d1NKWKYIoKTqyhQRFDZZtxajEfvAwkHBlPylUpvxiilfGo68jiSFnJWSjFlLqdU+ZlQYI1VVCUmBLiPHMq3B9eUrKAU3YXI0U2qBvZXV5ZgFGU6fAScY5+uKAbekLf04Ue5zv6LlDPNSN0+wTJ9JlKVtQDzrFk65xyYgdmLomUCyVlOucBsbKdWmlPUPp+QNSjYhyTrFXPyYl12p2VOlztmgrB1pJrJXTUrGNqwOKlKaZ7aAHXKrjo63Od7l0pSjrrSlz5WBUtA7PwoPHVqL5oTcHa6vCmzl1svxBP5f3Iuo68E5NZEFmV2m0LcNZfVbTOL/tUX4Tx0zoXwPboHDO976q3Yt1LaTwzK5GDoq6iXN6ZnEfOzPPEfn9gu92zTFfknMhaOBwOhK5ns9kCYvY3cWZZJryAd7bfKso0LdzffYjPhWk+ENPE48eXPHn6NQ77PaHvubq6ohR48fyGlCPTceJi3IBXEplYjYxFrIy9LAtSMiF05JiQcEJBT36XFVWqKHnjsuZcagOIrqXA85Le8XhcRaUFjPOboORE33XWrJQL4Mm1e8WaoPJaXgToO3MuSSmZBnFFzFwXKJ0np0w3Dvi+Y+X1iiN4QZMJ4ao0Er5dy9YhXWfOf6d5988yvjCBlYi8DsS6eDbAv44RD38F+LewDpA/w0P/tT8D/K36+7/+4+voNgZ/qhEH31GcoCWzGXqutlu4X1A/0DmHr4ECRQlR2dFzMezoXMcm9HiEvESbyM4g9v3RlGSDD0RNK2n3VV0gEUMdGuTfDT3juOXq8oqvf/0d3v76O1w9esKTp08ZNhumJTJ0oR4SSonRhNYaIbDyNnLOSA2Wmou5qtngTNOEqh18Ifi1vODXx7tToLW27Jv6svMrt5Xge+NisBY9sL2nTmoV5liYl8Rv/9Zv8uzD9/j4o4/IaaFkC5RCcJVca3YOa7CJlTo6F0xDp2rKuHZYfQXHq+T0T1NRf/A41DaveiwUEWJOdN2FoSZxYYlqYoDF5AFAV1jdnici0uG8Z45CWQSPIxVlSpklCSkLsQjL0hS9Bc2J4AquWGbvanCWszJH4xv1XWCz8YhLxBKrcGJACsxLhH5ks+3ZbLf4LpgBbjBBxq4LhODwXgidg3K0ICBGKCYeqtnsP1KJeAdOFO87RBNxmcErPjhyrhwQMekS5ztTOV+5VOlEClfztVw9NM/4UxYAiWkMlYKEcipvt3tSyoPyUr1hoC2obcGzXefz0ozdb1cPt5Oekq01rZ2HNWF75TBx0hH8uCYubbjacbiGDjUIP+ftnYLIz68I/1mdCzkZilcqscx48kZ1CNIDUCQTVaFyZkUEqUHKPEdKuWW3u+C99z5g6AOxmKdrKcrNzW0VdDZLpa4LHI9HS66XyDxHujCACM7D/eHekKrdBTd39xyPM9vNFlXh5ual8Wx9IS4T2nccDwtLNSRuAXkIgWYZ1poYsjZNqhNxvc01vybmZ1qDJeHPStLtUrY9qHGtVrJ8XDBumF0z7wMlW/BYStO+OvGxmnD1kmMtgXtK15PizFxlH4p3FKcMm57GdxyCJy8zy+FA74TizTXB6jU1zde2T3723eRfmMAKeAv4S7We7oC/rKp/TUT+PvDLIvIfAH8XM7+lfv3PReQ3gefAn/pdX0GEYdxg0vq+chIS+7tbSnaUZcHFxAaHqJCWSIqZcduz211z0Y+owDFHnr94zv0PfsDhbo8odNuR7mpHrpO7sY4a0nC+0bb/ci6kfGQUoRsGrq6uePtrX+fi4or7+wPHycoaUpTHj64scClKcGZ83IIi7z3LsuCroNu5v9caIAGbzcZ4TXLS51kzjvqY9pyuaoa4Rhk5G6UU+lV110bb0Jv55TRncoZnH33ID9/9HtPhHs3JQq+W3RdTH7FSjUlJOJeZl4VMYhiDiYdW8uxXqhR4NtbDrl7w80DqVR+39QCtVWhVmBalGzYoHSkLqQZF1pZsUbORSJvhcpXi8APOD/hsavipFDIBlQ51npQcWS1zN2ikmKxGmnC50HcbCILrR7wLSFRefPwCzYneO0Qyxd8TXGGKBwodc86MIeD9gPM9Up0DVB19FwidxwdH8I4QhN5tKDmivWOZLAsPTshpJniPakJFrcSJINIxzxPeQUyRJS5VK6ojqxh3a10/JvfhvXkeShfW4LbZgpiReNX2ccEQg3r9Vn5iDUwaytwCJrPPsfd1zqXSs/t8znuxBOS0fkq1rbHA6LQa7WDMpuYdRoRxTbBWQrMaWVocJkRnb2NFtFaHhlJFgT+/pOanfi4oVSOu1GA156rNV5PHxu90vqIpPOB8tqaeUuDjj58zH4+E4Bg2G6NlxEJME7uLrfGuNjZnl3km1Occ+wFRpR8GKIVxc8FxOvLD956RUuTq+prdxSOePXvOMlmHoO+UOE3cq5W36TwhjHjnH/DB8hIJoe0TsqJO7V6vnFkn6/582nNOlZbzrshzFfdYpUtijLanF0PqwPafrus5HiM515Svdiu2dTTPM2DlbnFC3w/sNjvyZuH5hx/S946tD0xYgCYq5HlG48IYvAW3oqbWrkpUIdEqRV/xUqCq/rfAH/qUn/828K98ys8n4N/+p30dP2zZbnZsNlumaWaZ78hJOewjAdj1PTl445RQwFkW6VJBJVGcsKTIkcSSIpfbHQD7uBCPR1z1tYtlPm1kIiuhtG1YOWdUwIfAOG545513iLnw27/9O4ybLeNmyze+9W0a50pcAHHWLaIZqbpULQgKIdhBInLKSpaFUpSxN8udofoQljYRaylxJcbWYKstIBMdPFNbr2VCE6E8bcaNMNLIg0UhpsI8J8a+Yz4eWaofVEnR2nTrQgYqoTjhqqSDd1ZyignC0NcF+Kqm+1dnrJtD5Tf87iVRMwa3xAGW5CF0CNbtFqMaylSqTYQktFQSdrGyXskm6mkq6oGiXTWVVVQsoMJ3tDZuI64u1pXklNBZA0fXbxDXmZVKiWx2V7z86CXxkLi+3NJdeaZDRv0AYWTcjWzEnoNSzKQW8JIZ+4z3Ni8t2BGCEyQIMWX60cRKg+uMZ1astOCkaqAVJfiOHGdSiqhmUx7XyilxQlHjIebUWsRlRXOFvKJYTVjXDqqK9iH0/UgsJ0FJVcVLRXLLSRj34eHmVqqA0qRMXkUmW5PJp1nigK46VpX/5SC4yl2piMsnR+VmrkgF62u8ilp9XuOzOhcaOt/aDbLkFWEsxQSKfQlniJ49sqylVNACtzd3pHlmt9uyLBnEcb8/EIIj5WhlMu84HvbWXSqFovDo+gnv/fA9thcXON8xT/O6l242V9zvZ957/yO8GAF9mY5EPTB2PfPxgO8CcY4IwbrBz7qGu94sZJa8oN4jxRIGOCFHWpFPLScZnno9EU5JeYxxJbWfX7tTkh5IsdgayqZJaEBGIWm0cnzVUGzeiXYeVb/LYtzA+XiAlOl9z3R/y/HmJXG/BzXvSykZSjGulyohZlypbhRigVU6Q2I/6/GFCaw+q+H9QNdvuXr0Gt00s781QqJ3e+7vEo4BCYqrBsXzspiCLoWlbszeFUYPS3Ik7Eb24ohq4LG4TCaZa7lWSLIKF7ZuBu89BfP/88Hzve9/H+c8m80OF2euHz8il8R777/H1fXOiOkhGEytZnUwdME8DHMm9D0SbDGVlIhxwnvjUS1zogsXpASh6+mGbm0vn+eZvrfgRbLVqqkol3FF3MqzMN0bEyU0loogenJjMkE2jJMTrG4+bjaUUhg3I/cvmwEz4E1DR8TgZsh03jNWC56iQClM+4N1fHzVyOsYCqgIWbEg4syG6PQYGyctqkpErgdsUUFlYFmUfmNWMYc0My9QkpVZQ/C4YIFUVqEUx7K0rhpFnBG4F618HpQ5whIjWrlapn+T6fquShwU/JrVuhq4dAxj4PHTwP3Ll7gwggvVA60jHR0qgYPs8Zee4E0SNHghBBAxsqt33taZQske0YzmaOUOcWZ9I0JBqwwBUDIlRdBkiU9aUG12MyYmmitHqeR0lnDkUxbvnJU26nxMNXgS1zoEW7JiPBBVXdGDFjjlXFbuG7ASjc+7zVpJDlrQc0KdSz1MSsmoNIpBAfGVWK0WFBTTAgpdj6tIdq5mu62UaDmR1PVt5HznqCKTZwrxrbvySzoUWHLB13lgyE1c71POJrzpgpWfT2uu0Txs1bWkeVkiogeGcaToLfAuV1c7dtsBVNm/vK1WL0IWGLvAi5cvceJ5+fyWbruh7wa8WKAu4piXwvziHmoC+uTRNXmJxDonl1gQL9X/0bPEZBpTPpAakqRW7chVJDqlePJrzWYzYxJuTV4nVZK6I2ZFpEO8wzuY4kyHkJPifU8smYK3tZyOFCmoU+uOz7WbMMKyLKSSSFXwVB22flQQLyzLZOu0FGKabU8J1vUam4m4Kn1RJEMfBnRe8MnuSBFZ/Wv9Wjf6PUubn/7Iif39DYoyx8R0d8849KQU2Yw9XfAclwNOIHQDzncoE1oSh2liSUryinQQNj37/YHihGEz0LmeF7cHU63FUCqHmVlqg4/rhlXUOvtQuL255erxIy4uLhmGkX4YeXHzgmcvX/JH/ui/zLgZOU4T8zTThcDjR48Ze3P8VrVDL/R9dUo3VfacMinOUEngaMI5+5wFpQ9WYluWxVpq+96OZIWYrOOqGzo7qHiVx2OZXaXD1gurq59ZLqb23EjWMeX1cY2gjlrYUHKuwaWwGToG37IXzM1crbNjJdh/RYYiJBWC1OYE5y3YbDjJ2bVoCt2KkY9VlVyiaUvlwt0h0Yngi3Wnl2LcvDlNJsxaxcdiyaRkbOdSOUR2Q+F4TLyYEtOScL7Hd6MZltcAPQwBVW9dpnhyjhATJVv2q9JD1+N7ZXBKGK7MSkOUeV+4ef+OjWzoRPG7mdj3DGNHKQvijWe9ZI8roBR8BQqKCwYYFMFrwekCCilHUpwpJRovSS0jjsuMrygBaiVFu57gXCCpEvVg6C02V7UeGiWDiCcZw3a1emlCndSSjAim+2UQkHXbUvmOWIcWNAFO6j1pytFUhM6Cs5gLHjU4RBQoZhiNPX6VSmgWRM66d5UO6Tq6wZDsmE/K1CtyVsnzp5KQTa9zhwWrwOk6776MQ4sSU0G9rMLHbpWSMZ4QCqlkSpJT0H1u9SOsCJdmZc4LICwV5Xn5/BkXF5v12lv1YEBV2JeF5TjRDVt214/5xre+iYjw7vff5XDcr+tfS2HsB1QTSypW1s9pTW52F1uGcUNczJbGFM0Dx+NkjRLS+JTQZt65G4gF+VKdBGwe94MF5aUmas4PFC3WSJUs6Uk5oTUxuT9Mdp6ANYNUhDXGVAWsF6s8O7uezlcu2GLPMQ4Dh2WGnHC1aUq8mDZXg+CL0sdMjyNPEWLGFyhiiO1JnPt8Jn+24ysXWOV8wEnP3e0z7g8H4mHmEAJdF7i62JHjwv3t0bqiyj0Z4TDNxHhAKfSbHf5qwyxKdgoODmkiiTKMHTklShIcHu+htidYizYntdvWVZQVxu0OLdZl8fLlDaHvef2Nr/GdX/wOV1dXfP/77/LmG2+CFr7+1tsMw0guiSUZ+oUPNrHVEAvnu+q0bodOzomYEi5nun40vkyMDL2VBs/r5J1zxmtyUrtClK4R3dumLL5W/x5SZVtnYK52D4fDgWWZERH2+71B7GdaKE1DxTmpZiXFyoC5kFJmKnnVmHHijI/wFRznyEG7Hg94VvpQfV1VSc6vpYT3P7hjt7nElcjQ98SlkKuK+iKFw5KtsylmQtiY19dUxfqqereIY9w85vLapA1cFeYrnPTMjJRaWA43zMsEaSaExGa8ousC2ZvRqpMOEUyC4S7z+uM3ydMteVpIZV6FEpdlAR/ZhAGKkGNEpXpYiiGkqRnSlmzyDCkRqgnsSQw04wW0fl9KwtG0oSqvhRogGXZFrlmzVmcDV5RmvSHOsyxm0oyrqO7ZfShqhsd2b9qhlR/cH7Bmj1IsMAY7M0woUc0LDktcUvXqFGcleK1BHcnQr9Wn0F5h/c481Kq8hCjUBhS8mIp+fnjorNwqb63xJtdR0auf4Hz+Io6SM0kVnFK8KfiPVCuf6svaVc9LQ3JO2n+fWi5V68hzGogp4oLjftqvwpg+BEbfMS5W5r4fAl//fb+Pq0fXOBU++vAj5tkEoC8uLsicOJaCsN8f0JIMYSxHhqFjnuIa5F9fX3M8HllqxSXniXEcV5HScw5dKeVEQl/pINZV3hqjxAklVbSU1j04E0KlalRUcxxHLi8vmedIiomUj/RhrImLNY3letaIyHr+WPkxn/ihtdqjToi1gWnse+O7qTLEgqSCNP0qtdPHNR1EjMhefi+w+ukPATbjQEyZVAqdd7hhqLuNwZTzdDRuCcJSInQdDB0xGXNyKRlfEnOOJMmMuwGV2i5K7SKK1Ay1WOvyGVq1ErepJPDefJVUhMNxYhw3bMYNF5cXfPjRR9zd73n89CmpKN965xt0w2gGlhSO88I4GjE1FaXzzjYB5xCfYeVg1YlfX9uHgK9+VaUUNpsNXddV/khCsqPzPT6Y4agR2h2d942SvwZRes5rrd+UnGtQtXB/v1/Vh6d5rovYrV1VbVPqgmO33bDpDQ4ORZnmuBa44Ixr9BUYInISglQTtvSrCvZprPPpnGCqoM7ue/COTgae3x65fXnD5eXOjLjrhrSfhag9x+NM343IYiWhzeVm5cK1LjjvAuKrUKZrgU1rLDAyfAgeLzuiL8Rlb3PNgyl629+JC6gTeudxJZAQ3v6Zt7h98TFpEUKHdfyJQb2ipRKLhVSS8aoApBCjcQPRjEOJcSEu9noPu54SFFsDwZQXa5Ak+NDRSEaas3UJ+qoJJc46ldQCnZgqSuXbvbFya2sUaERyQwxOLeXtAG6Blaqae0FtzrB1ZAiXb0hUfVzwvnIMtSJaFsxBqV2C4ex5LZgKXaDvOrrQ1VJsXNe71ozeq//EHCpg5RnvMHcf/VzNbD+LIQLBOVIpzDkj2RojtLpu52L0B+uMVnwNOOZ5XlGhc9kbqGXCmrAWFKKtF9TEZ2NcOBxvuZwKo4N+O3D1nQ3vf/8HfP/4W/SbkTD0+K5b9cmGviPHxUpoJePr/j4OvVE0Qsft7e0a8PV9z/39PTFaAtv4TC3Qb/6xLakOIayls2VJ5KKM44CIEJeMqrAsc6Wk2Kc8FyvOFRE3DSxHKR5Ksn0/y2ov08jy56LZwBrYKc0NAYIP6NgjKTNXWF4U/FLNqCsv0ZLIen2dq8lg1Wv77CuBX63AyjZPbyW1fuDqwpCiw+FAXGamabKMN1gAsR23uHHAx4TbCCUns/TASieqGbcZ8JuBMi8se5ugrnhKqvwL78zRW09if6omI0DLcoHD8cDl5SM2mw2vvfYatzc39FNkmmYrPTiPhMDhOOFdoGTrspjmKoCII3QDWrGfvh9tgnpHKJUDpVplC+xgGsdxJSQOw2CmoWtLuFY9qxogri3dDygin3J97auIMM+ztRSj1oarzQbkpBje/gveyjKdA9f3bEJgdGbZsFp3fE6+T5/XOPFsmqVN84OTTzzm1b8zTq3QBeGNy5EwJA7HwAe3d3gfcN4xTzO4gRC2jNsdwSmPHw88erSl8+dNDK6Styv6oUpW005acqbg0CxkbDOU4Mldj5QZainNd0bKVnHkgtnN9ILP5jv2cv8MPwrdsIGygMNQEzxSraFyMheDHJMFmkE4Tibh0QWHZtPwyc3vrPaolpItYMrJtHZU6aslk3hPysUQuKLrHlwQxPlVoLaVUZrcicgnA/0mzimcAqo2msL5OSncLPpcy3do2mRakSvAkGOllu0qYlC5blV6rCZpp4StKbyv3EkRBgmEWrrNOTMvtq80z7iVuycWwJpYWMPMvtxDEPrQWWBeFc5jFX2VShnJuZjZd1XWz6WVZqnmyifXjLWmuj6//d+39v9sSXw+LDgJ+FjQ+4n3f+u7vH+4Z7vdcnF9xfb6kgvvkRTZ7S5phtnTPFnn4DAY/zdFhqGv+4TS9x0ffPDBWRe6ve45SmV7emEYhrVbtQU2rjZPtOR3GAZKnpkm4yWGEDjMR5wq8zwZQqsF560zcJkzQocWuxYpRnJSUl7WppDmqNEI7gQ7U4+TdQhqUZMywnThxMsqWC1akNZUVYNVsd4Du2fOTOTDSUryMx9fqcBKi7K/PTAviWG45GL3GOkEuo67ly9YpskCoMHhxXQz+u0W3R84FsVtezbdhoyasnpWUonkKTEfZub7BNlXPaeqKaKgODM7PduInXNI8ITOMy8LF7tLOu+4vr7icLjnxctbnr4e6EvPNB0Zx4Gbm1vGYUvRI3Geudht+ejjZ4TOc33t8F1vZQlVuq4zYrlggY4qcTGiug+BUlg1eVZTWgzNcr5qLBTwnSDiK1BiG4ajKa5Xeg4t0LLfh3ByQn/54mPm4z1xPtghW2vuKZteTBCHd0IvhbzMFD+wGbeknFGf6bvNyg35vDuUPttxKhlDlcFwJ/xu7Uw6gwvbYS9O8FL1XAQeX3m6TeDlsafbbljiQgiBJ2++bt56XaALjj5EtttMcAdCCWvpuh2uuVhZqqiRZaOa310udvi74gmuYwZC31NKj5NM13nj1dWwRXwAMYVy50AlmRhgydVCJ6CrYrqQ41JBfal8qmy6UqW1ay9AoCRDOEvOJhqoBZEq59HKHMHK49aKbea5po1jh2UpakKjpXL7XCDmSBdM6blZAaGcDi7na4kO09Gp5NlmbtzI39o4WFBlRApOHgZbUA8DaWT3U9eW91LNky2QWnsGC7SuQVfJwM4JoetXVNkVtTOoEtx9P1K0kZUVuT2Vm01s1JDPklco7ks7VI1fGLpuTd7aR1atWmc+ENT4st7XU7yuiz4YBSSWU2OSSKNOt/lVQR6E2/nI4IXRgddi3MAEP/zN38A/umJzcYmUwjTP9Euk32zMF7ZVBIqyGUfzGKzlL0O4O8ZNRynCMs+GSAn4rrP7elYCtMTZrQgmQIyLrSsxQ3LBc3+3Z7czdfb1M+RC75yR0/uOw/GIAPOccVI5frpUsdpSbdTOuMWumZrXIL4eICa4G8iLJTHiPJKz0Qssojur+CirElDlCIoqXgDNZ1WOn9q0+bHjKxVYQVUtdt74SM6zvbqg24yIg9uXL7m7uWEMgusdN/sXDBpN+yrC7XLPduwNru88QW0C5xiZ7hc09fRuyyT1INBs0btzNQs4bZ7ee8Q7Yj0M5uPRVKCXmfvDgT50aM7c39/xjafmKzXNC/f7I9/97g944/U3mObId7/7Lo+fXBH6rn4uW/jinImcelc3yQI5Mh3vDakqgWEYVqNZ7z1UErRz3pAtNVXqENyZOnSpodUJtdKV0G5bSd8F5nkCCsf9HV4KOU4187esqoihYSKwGQKPLzvbBFxg6DcMwD7fsT/u143gy77Bn49ztGhFE9StQYCVgCpN5oyHZn9rW367WptB6bvC1ZhYLsC5sXp9ecQVnI/GX1OtpF0Bsa4lmhRAMZ6giKOIIzvIvuAz6+NwHmt6U6RkOhcQzDapifR5MfucWj3DYV53opb4eFVEM1KMGOtQE1UsCSpRvdQ+VIcR/L1z5llYjPvnnZqiv4LDslwXWLtWXeitlO096kJF4qx9W+tXi258Vcb1RtCtZb9TOaU5IEjtiqwiolpVrWv5UsSjXkkloa4iAa7eR3WWgTtQClkzUkt0dlnrulMj8pbiLSCUDnXJ1mXxaMk4KSAzucw4HUnF1w5Fg6yL1g4273DFiL5SVdxbU4rx1ZRm5ZO1VP2hL/EQ64Ce5nkNgmp0C4BWEd0s0HXB0MFK7ra9s4pvlkTKmRit3N2M750401LLheO0INse0cRAQUqEytfqisB0ZP/sGYNeMw4dAMtxhs4C8qLJypZVjzBX380QeoZhw7NntwzDwLY2YsVlZllMjqF1ADZeUwhNvqWipmpoV+Nded9VwX6rkHhnlZg0LyBl5RU6b7SS3fbKqj755Acbp5kU46nsV4zLWKIFeZ0L1im4xNUezYdgJe1iSHXwHeyE+PIOnKAZs7OqCYwTjEOoIJQGfjV68+cyvnKBlXaBIMJh3uNuHcM2MA49T5884fpyx4e959nhI4bNQPaZm/mG159suOyvuP1w4nC4tXr14UhJiSAwhhH1sCxS/dZOJSupuk9a5fZzzquWlORsh4tYC3cIgdu7O5YYubg0Qu0wDFzsLvjogw8Z+5H/5r/+VYZ+ZBwCv/Pbv23ESF/oOoNV53lh6Du8CH600uC5DcGyLKb4Xi0HGqHQOUc544KZzpRN3JSK8QyCHVK5MqxaxtCCrGqlhvfC1eWG733vd0Dg/v5uJdLSMvyKPvReuBh7ttuR6XhcheU2G3Nrz7F1ULm1lPpVG7kqfqMPD7hXKg4PENFXBSMFxbvEblPRRy04l6Cq/1snq+K01nrrAdNeosZvlTtScN52r4aSFG0WICeeQ0FIsfIcsC4fxTZyqc0bWmp5uuI9UgN0U1cHKJUI7kHDaT2RERxd3cEs8D8RcyUYgtXC/64bIHgrhal1erVuynpByNXX75z30S5pUUir/5lbD6TmtQb22UuxMmkLrEoNXNSS98qh8hUF7ixoreR/C3zcqk9nf5PXDi97qyeEwV4PzG8qoWqq962MQ+3kUjXCL/U17IlaJWtNj9aAvCHEwXu8c4YefKnR4ofI4On+V10rm/QUEnNKeG2GzFXaJFtZ0PuAd+05TIm/0SyKFvIc2Wx2LB6YZgtqa/nZVSFoiRkfEzothFIgJfChSvMElnkBUS4vd4Rx5ObmhlKbfe7u7mtgt6ekSPCO6XCwystoHLBGUgdLbLvO/FqXZabrekPtciamwjBuaMbKMUY0W7LS5maNQFmWtCaC7SzJ2SRYqOdJjPFUlq7NIu1cMiubwjwtlg5W1FBCIGwDpSRygn472s/r+UQrf1fOoJYzhxBO29jnkRV8pQIrBaZlhgqxL/OBw90N4zAQusDbX/8a3/r2N5m/d+TZzQtmTUya+PjmY1zosQU4M3Yb2wD9UL3LHLfTPcvReCA5SSW4eusGrKrP51CsqpKjTURaeSwl9scj3dCjpXB7d8s3nzzlB+/+kHfeeYcXz59xf/cSubjkb//t/w+H45HLiwtubsZazrMD7WK3ZRx6xr6zw0BkFTYcx9G8l3q3klkbsdYJpPo+VO29NbQt52yfDct6u+ArbmW6IU4aSmIQ7VtvPuVrb77G3z4eiUusB3NFVWqQNPYdmx56p8RlYVkWttstfd8zjiMXmw2vPX3Kd7c7EOj7HyV0+OUdDzb834Xv8lAS4+HPxQlBDDmSGiGZblHz7irtbq4B3Bq3iRHajRfo1o3KezP/bSK2qRSceHCC0xp0JCuphTpfvNrrWnhu3CXBoQbHIBS801o6N1kEp9atJ+roQo9iZUDnLLCykp8FY6kiWYSwerFRVchRd7KfwtC3JtyJGqLQDJdbgLWWYmsreikPFdRDoM76U9k0pgQ17TBuI8SlaoZ1HSqerM54Y6VUex5DzqSitvXIrglIXgOk9p7OBUhboKwaWeLMOHbrgVkqAZuzudEOolZCecgVO1nkPBQx/VJjVus9hcZFOgXLXe+hKEtO1mlWtNraNO2wVoJlTRQamd2S3ZmshYtxiyAEETQrXptVVyVbZ8XHjBxnshN48oh0OOBKIYeeFGsHtcBhf8C5QBeazYsDddaVrpn9/miNRymSNdEN3SqrY3wws1BrTUsinpQKy7IHbF5Mc1x5VtZAk+l8IOdSS6eG3mVAnJHj10aXimKFWhE5P2ca4nvq2j2R2lVPnbgAXixZSjkxDIPdG4z/5u2Nrgl+cRW1asWNzzEX+EoFVmAHuJRsrf3eoYsji+Cc8vzFM3aXW3o30jFQXEAlcTzMxDLRjz0+KNu+J3Qjowx4gTlGPpJAcsqxyvaXulGfGzFDnbDTZCW4dZKZ6GFbgERhXhact+xhM16wTAvvvfcu+8M9qplhM7LEiPfCd7/7j3n77XdwzjPPE4+uL9ltN3TewxDo3ClwS1UkEFjr7u3gFeesfVULOYNzVgYIocqB1o2ikAzNcp5AFUWtn9NjWfFm7PiDf/AX+X//9Su+39R1YUUrQvAMnWfsoCxHkh8JITCOI0+fPrWFOU008cmcEzEun/WE+VzHqxIKyI/eKc6DqgeBVft3QyNgjZgs285rGcBVngJyhmNoex9WelSq2TfmNeiDQ6PS8m6DQRwiAe+zCRSmCCoWMKEICS8Fp5DE+FmNv2GNjwXItbTWPpPxFItmnGP1C/SuFhSz/U1wlRPYHBOolb3mIiZNTwpKMnK7VF2edgAIp86p9atriYTN4RDsdXIyJXjjUtUgxwdDi2hdmorzXeVNOZZYambtKUVMaFRMrd44PQ8TsOYP2IK8FlzlWpbUnEAXvDeUout6vO9AwhkRvjyYT5+cZzV4PkPrzibXP8l0/ed6NESlHf5Nj4yKWOUanAfvSdESjJTNmNk7MS5tVbAPTii5rIGAdfQ6jvNEkA6RQKcgVSy2oKYbiKNLipaZbjNwePGSRCH4p6Bu9UstueCrBI73gSY8m/NCP45sxoF5OnK4vyOnwhLjKt2Qc2YYBi4vLolLWs+ncdgwLxO7yw2H49EU1pfFyoarr18N9sVcFWIyKZbt7oLb21uSGv+wNW5YV7MQa2XEuFY2GrrVqhBxWRAx6okX8yCMMVoTWM6QMvP94RT8NvqJ0DqzTrzfOl1P+vKf/fiKBVbQO0Acve8I3qEiLMuRVBaWPPPi5jmHKqi222zofOFYIvPhnsM00fcOycqT8RFPLh/TO8/L+zsuL6/REjne79FUbNOuCJJp95xKCC0LoHaQaBVYKzlBlUHQUlBXSHHh2b357b18+YKrqx05R6bDHThHOk6I67i4uEJF0Jy5u33J1eUl23FgCBfM82IlQ1cPPjEibG1MpJSE9yefQZyQi/FuFFhKQpwCxssxAO4VVKRl21SA2CnfeOdt/rU/8T/md37jH3JzvDdExDmDoJ1j2weEhWmekM7TdR1393dM05HNZsRlu265M6f4r8D+/mCctzIbYvWwFNhEU/WfsEwj6s4Oi9PfirRp0Z799HziZIXz18cJeBy5lv1ao8YJOXGUgrX69yNLFb80HbQa/FQyeRFXOX1Ypy2gmjBMiYrChspZjOSScT6YOGOVcvLeVYV1XctgTpw1YdRNvpXATJi3EmeLddmhBUdZk59zy5jWou6dh2LrOcVcO8asnFKq0GH73gjtDs2n+5QxtDY3I1qpumS+M/FGZ850zgla0qqD1YoaLZgyFfYaAK3EciujOgfjOOCMUFbnjq/onDwImmzdn97f+ZyCh8GVBYhf5lLgKbg875Bs5a5ik7A2PUC7J1ZutXJtqd2VofPGHwWO82xIGFXDKXhyUvK84OeElwCaTsutFONQlcL88iVoYo4TeZm5fPImQ3/NEiPkzNj11vDRhDtxDH3PxeU1fd8TYzKvwnEgxSOHw5HtdoeihH5kjhZUzcuB7XZLWiK7y0uKJuKq2r7Swy0ww7w0obDUpDyEgCQLdASxCkkLlmIkzjPJsiO6vrdyOzyYj22PC6GzM7AlOFrLrFmJ00xXNfUqfFv3ntOeVV5JCtHf41h9JsM5x5tvPmVZUi1RCCkvqGaWnHCzWdkUb3pV0/6WEhwlmJL5nGZCd8EclYObWdLCMGyYlsW88Y4LaUqIWnZr5QETWvTOr5H8argKaMpk8gMpBucc83SkMPHs448Am6THwy3BZw73dzgR+q4jakBd4Nn1Na4LOIXD/o4PPviAr7/1NWKMuLhQcqbfbWwBOMtOTY8FVLN1CKWqdVWw2n/OxJzxwVNSQiQg4umCW1XXW1egQD0MM50IBUc3dLzx1jd46+1vcvfsY9IyV6Shp/cwumwtu+LYzwtdTmy3oyltazJhyGbnoZ/MtL/co8L7GMqxlq1qwCFi4pQtqHoV3WpdnnJ2zQrW2SPSdPOp91vWwx5sk6aVBM8PYwS/9oRawhBTpqNm67T3kHHOOggTjigdUpSeBSQAJrJpDROVg9R8C1twL4LztWuvC2hJa5CXszWGlAJFs+nl1GGlgpOcib0f43LZOVm5WuIouiDV5y+XhKZMSa2jSCo3xLLsnJbKObLyPrms90Ol2tvUEpvLycypq6acGVq3rqm+BmGmwD5PkWFn/C8nHqeKukKOJm+43ldqULYiVGrOChIRF/FBCUNH6LaEMFqwthZ3FW0y71r1gmowZtFrO35aWbAiZRmsceHLve5U255s9k7ee6ScgtE5tiD3XMjVyurUkrfpphWURCnNwN4Sd++9cYdQCBAWpS8ewSFSaP2kRep8yoqPhfjyJcxHDoc9TIUYZyYH236ko2OJCfGei4uLlSur6ri9u2dJhX7csrvYMN/fcTweiRE2uw1zVFQKY9+TSiJXK6rb21twptFmpTqz87GuXFDn7CyopdBWyjMxXmX0nZXSg0edOX74EjjOEwBd39HJQI5xLUG25zjX1aLU8r73FKz8air1h1NJW9u9axQJWdG/1qW+5gu/x7H66Q7vPP/9P/Y/4Ic/eI/33nufaVoY5ILQdbhqvDzHhXLYc3v7nOc3L0guod7cyQvWHn1xccHCwou7FwziUM30Q0eMN8QUP1GWOW+nPpcNOC83tKFO0LhwKAUVx4vnZp58e3MDFKaDcPP8OVePrjhGUwB23cDd7S1PXnudm7t70MzNzQ3H48RuM5Cno1neOI8LPaEbkJpt4ZyReVXsvZcTr6Shay3YS03j67wstU5aC7KkZreKweZPHj/mO9/5Dj/8x7/BzXM7pLogjL2n7xzbcQdyAaFD1QyYS0os88KTp49owohfpCEiI/A3gAFbQ39FVf89EfkZ4JeBp8CvAn9aVRcRGYD/G/BHgGfAn1TVf/xjX0RZS0BV8OhkiwQ4aYHC6dq8Otc++ZRVRJRTB+fK2jov/z04bM/ByaqJU+yvvDiK2IHgKkfPVV6Tub44+tqBG+Nk5Tpf31/1wAuudsRVJEArA1XqZzArjvomtCJPYuUxQ13tUGodTyeR1JrVlsx68YyJQc7Ga2qdkzlnU4POidq2VIMJIyE3v0ATUmzihtK4s/bWVrSt/dfKt/UQqEiZFocTbyRn55nyRKrdg4gFjGtzQEsosO7PkqwRoPkZak1kxCn90DGOW3zoca47tdU3BKY1GlZ0wBwU9FMSlhpMN8mFisR8meFiEdur2pxJKZ2042qA3SgbDalabYG0EcGllnozMaYHSLO9hqC+dVsnRNrfPhwFiJW6oUmQw4wumeP8AfN0ZHzjNQ5ZUXFsgp0NbZ9eloWYEiknvHc8evSY0HnIhXmJiFjF5LDfU/JIjlYWDKEjxszhOJFLYhwH0+1yQnC+cu48jbMXYzJ+mS8VUbYmhzgbl3aeZ8LQsSyRebKgqgkNA3i6Kq4q67miFclt96MFRueWO7FKnbSV/PAmcpY02hPldgvzqw/+6Y+vVGAlIrzxxtukJMQkTNMC4i0zDoGsxaD//oab6UApMM9H1BfTfvHCdDgyDiP7JdEF5abc8fLmlhfPbzgeZsvmzw63c8uDc9izfd9IfevmVjKNCCs+MB33lJwoJdF7z3S4By2UJTLHjLrAxW7HMIzEeeFwOPCNd95mM461NbgQc2LsdvhuoOs3hG5jKIgzsUN7s5Y1tIPWdFECVO6V9xDTXA/mh0lA+7cDUGeAFzBXv8D7u7v1oAzeM/aBR9db3rge6TvH/nDgflmIy0KKM0GEi4sdr73xBh99ZIhdqITIL8iYgT+hqvci0gF/U0T+K+DPA/+hqv6yiPynwJ8F/i/16wtV/XkR+VPAXwD+5I99hRbvnPNiiolstn//KATvn7aD6xOk9095WntMJUxbEazyfqymZCR4411Z6dCCNhHoe5PSKDGRkxK6hm5VPpM29lINFmu3lavq8aqlImDGi0yVF+i8M+FMMX0n74xDVTThVIg54cXeT6nBlSE5hiTnUm1ryhnKVwpO3fo4yORsbeiqhZi12ts8rErk3AIQ+7NSI9Uau1QOYiPvZrMLCR1IRymemEE0m9chVXurlf+cI0U7MGJOaErEOONcouusHb0LPcEPeNd9gkvVgun2szUokBrIvnKfG9oJD8uiX+YhSm1aqAKvxbiH7XAHKl8wrfcFqDQP8MHmjJXBrZOzaazFmBj6niXZfe9ibDopNM3QFkCrKLkmKl7BJUVihHjPvMx04wiPrsi13AxwPB7XBH3cDuYnWJRpWvDRM0clFUdwghfHMPQIME8zMSYzjQaceLIWE8FWCL5Dga6zpqEYI8ejeQHmXAjem4cfFnDN82zvSYRYMrlkckqrpM961nFKAtv1bE0Y4qxRwDjBhoppKRyrIKndrFfuXf2/EzOIh5pTfY65wFcqsCql8I+/9y739wd8GBg3HfOyGPcjRXIp3O7veO+j7/PBsx+QypG+7xg3A2HomWLkfjqgi2OZZuJQOGrhow9vefniQE7e1GbdJ1GEV4Oqtuk1RGh9jFZfplqL9t4zHw8IkEqi5ETfOXKMeAn47YbL62uGvufu5pbLy0suLi752ltvMY4mrtmPHd0w4Lue0G8o6iniWOJMrpl0WhYkV7uOml2YF5rDVXPSvh+qvolfD0LW/KABK0JSazmf5oVnz59zc3t7xhERvCss04F84fB9R0kL03FhWUwErx963nzzTZ6+/hrfe/dd/FitR9Ys8vMdajfvvv6zq/8p8CeAf6f+/C8B/z4WWP2b9XuAvwL8xyIi+uNqm/pJjlXTEDt7H6++r09HqlaO1Clo/nHo1rlC+MPfl1p6rFIcxTR8OvFoFgqVS9SaByvpOziPkw6VrXWPiqGQJZvQp7wiktmoReJOPmCIJ4SuolumzeS8Ay0VXBNiUsS1rkcL8LU05MoOFUOaErlERLRKHNRrXIQiQk4FdbYOvOuImteN2spGVrJoYp8NWSvFeFoFV5G1E6LmSiOHWwejqwKL3RDM4LkEEsVMseuhLM7I7E7t/U3HA1TVaUt8fEWqOoZhxPsBM00+UQoMvXs4Hx7KCjwEo1bBSH3lsT9yon4JhloDTrtm3nvmbEbX7dpYV1ykHeNNNVwEQ4XQiqgofdet6JCVrY1AHrqh8qjMRJxs6+nB1VVLUD0W7Dmt5fy44Ck8/8EPuQgBN26Zu9nsmTDfVUOfzFqmZGW/PzAMI/f7CfDEmLhP99y8WMA5+mFjmobRusW1ft6cakd4hZJzUiOxewFdSLF282WT+1CNVnqvyG8/jsRlIRfzRITTvAoh1I7K6hvYOMcpk2sQtUzHtXRq+mB5XaPtGjWEqp0/Wsqqjdfumf+9wOqzGaUUvvfdH9B1Q217FpY8s+TInGbuj3uePX/Bs/0H9IOw2z2i73uywj4vEDp6vyEeFRcD6j3zXFhmRYplij44FIMsz9tLW6nDNv2GTp3aTNtXBeNZ1N+lamJsPKNiaFUx09wweC42Wy6vrusm77m6uuLp0ye88cYb7HZbKAnfB4ZxQzcMZsdRChoX7vd7hn5gGOzabPse1WqSLOZaPvSDoQboeoC0DGFFrupE1/q1YN6FJRsP5PmzZxz2Vh93zv5yXiY+/vDA5p2v8ejRNZPeV55CxHvHN77xTY7HA7e3L9ErPZWBviBD7AT7VeDngf8E+C3gpWptjYF3ga/X778OfB9AVZOI3GDlwo9fec4/B/w5gNcejZ9AHs6hpHPi59nfPwjkX3nuB18/Ldj6UXHeOj9RO9gR67qj6osJBLGgxCXFeTsMROx528FespBKpPMgoXoH1oOmtWlrs6rgRK52zuGDQ1xvCIJmlHwSGRVp0IIFPo2H5gI5LRWNcNAcBMqp5JZzNgPnpl9VhTNTKoRg3LYQuqoELavWVMylKku3rlnTo3PerDtAViPlVIm4vvIZh8EEECUIrhOUAeit7JYTToOJLNZrGLySnWXvcZmhZLwThsHThWD6Q6HHu76iAqf71poUSr3H50Hzp99vO7Gs0eBHo6JfpiFichcpWkkqc/JVbaML1lyQq8xCQxS9d2clYuMFtm5Qqc4CUEt8JSMxshFZn//UlHJ6Pac1fNO65sT2zACMRWA2E/EUI2kxfanQBYJ33Lw84kNnsidSqouAOWoEF5iOd5RkpebzTsjmm6kC3hkCtcwLQ98TfG/egTmRszLPC8EHjofJaDQVhRXNhGA2UEvlJqqA+JOGFsCcEl0ILHHBO09Mcb0WcZkN6W1aal1nmlxyauABWg8WSEXJ2zYoun7r+Pwq2F+pwAqBsmSWHE2zRhde3r/koxcfcDffkj1MMVM0cNFfsOl6lhi5O+6ZuomLqwu65MkTqAj7JbIst4z9jtl7tFtqnb47ywhbicBVsadKahdWaLMdcE1U0UxWa7EkpzNBNiq3pMcPPbvrx1xcPqIkcMPA1fU1T5484Y033uTx48emWRUjfegZhi2IkYXndETmwnw4GOqAYxhGy7AtubfMHk5Zh9pMlqbAtgpC1sVTJ7qZd1QydBFyitw8v8HhCf1A8MJ2u2V0CeKeFy9uuHz0iL5KQyxLZNwMXF0/5nu/9ev0zfQX94Xa5NUUX/8lEXkE/BfA7/8JPOd/BvxnAD/3zpW+Wu5TLCg+b7tfVZOpfKmmiSOrOADQkJNTRt7+3oLi9hxnOlmVO7I+J1amqFLMFdkUHN4CmVoeM0sLquyB8e0KArlKKxRTbC5LruV1O5xwzjJ+USvDVcTHe2+CtqEzzpmcCP1eHKVE4wsCVNsoMclzS05qnFbEyqhFi/kdZsPXUOsKtA7BhwhWLjabqarZTlq5h/oejHfTyM2uXSoxHSRt178Uspoxs6llq3W8ukbWTYZUE8jJ5nup121ZIinPFBKxIrpj33FxecHVZc8wNFcE+7tz8PHkFWelW5OScBUV8WsA0AJZbZmRnmVJ/NOXlv95Gwoc41IDpRYkO5ouH618Sp3zzlBLMPsWxPa7XAzVzFWl/7wUm+v82OIYsgVPxdWGH5EHYq1VSnnlEjmx8rtPSpfMnaDkhbQo+5TMnLnzeKfEDF0HIShdJyAZIbPdXNJ1HdvdjmmaSGnieLinlFxFQ0d8dSRYYoRivpglJygWIC7LwjwfmeeJ7MIa8DUCetd7xosdy7LQS2+VkCrSKwjiDcVTB6k+vxbroC8po7nYmiazTAYoFGdIVEjKUhR1VhHyekL6HGatdc55pL6qthrrZzy+UoGVqvLx7UfMS2aOE0s6cFyORkwvib7bUI6wHBPH7sg999wud8jGs7ve4cXz4uaGjoHtxY44Hy1gCAMiC0bwK7XL6jTh3FkJ60QgrvXkNukq6e6kO3OW0XxKyafB/DFGrq57rq6uuL664vHjx7z22ms8fvSI4GtG60/QNBlyimjJ3N7ecLHbkVOEqg58ns12XQfalL9XIYXanVV5JHW4GlkpuhKXsxZ2V0/4F/7wH+XXfrVwd/PMtIXEBEZThOO84A5HUI8XM8t5+2tvEZzj+fMXa9bP2Ub1RRqq+lJEfgX4Y8AjEQkVtXoH+EF92A+AbwDvikgArjES+48da/doRT7bvXkVddBSA3ZnemK2KbdtGQvqqbygM65D63Q6Ly/WP+C0GzkakV1d64AyOQPj3zQulW1jToyfLjWJLHVjE8F4RcWsP4beNODEGQl7XhImGWACgFaG84aQNnZ7Ma6VIJTajiriqzJ5KxkWSkwE6S1br7GROkVrFyqVW1RyRqpwbtHmTgCaqilz1cyykoQz0nsNrHxDp4pZVpXaIWmXNBFCR15S1dtxRlBXofeh2uIoJdqh7ViQLCyxImY5cnc44PuOOSY76FQZx57txcjFdmAcOvoh4LyYdx2lmr1jmdHZHPLe28/KqcPPlpJHtZWrzhNBbAatT+POVvqXb7SA0tc9tQl8IiAqD/dvPckxnJOvHyTHpboYrN8DLeAved3fW9mqcWrX5bqWXs915+xMSXGh3O/hJsCjK1Q8DkeOGTkILgxEItutiX8uy8K4GRnH8WGgXflNYHpTMUZD1VKqiUauyGhA8PhgkiZT9dOdl4nNZiTG2RTnvXXjtmsxjmPt/PMr/2tZlooQK+LrdSusPKqUEqJNt83RhcDhMOE02/7iHWKwL9Fbifxzg6R+l/GVCqwAcIXDfM/d/Q0xz+QcGTpPj4djppuVKwZSKUQim4stw9VAzIlSMmPoGfzAbuiZNbPbXHFzMzPP0ZzpAc6CoXN0wYCXKneAgmsddjZpSi4497C884DYDqtSrQum6Nv1MxeXl2w2G3a7Hd/61rd44803GDcbhs6sSgQrAcRpIVey4H5/Q15m+7czw16DYE/vtwVwK9m+xnt13z1jV8m6EQBVgkEJXeBie8Ev/IHv8Fu/8Q95+fIZA2ILpUZLiuP+MNFLoOhCEM/XXn+DmxcvudvvWVKz9LB25i/CEJHXgViDqg3wr2OE9F8B/i2sM/DPAP9l/ZO/Wv/9t+rv//qP5VfBGkie/7cGS3WjbX6B7R79yKc6KyW2x53Ky59WNvyUJE8aciGnIAYwgc0m7mmBVfCOjFByheUr+dy4RRBCb2U2VZC+zh7bPB+WO2UtBy450QX/IChsB0Wq88N7h+DBFZZkCupLTHQ+oNlEQK2XO1NyIceIy6WWvIxbYnPZlLFXZfjagWl8EYfHfAKLYnY0Yh3HSjusApqEVMwrUfEcZ5N1kU1HwJGykmNeS5/eOw77IzGZqvT+kHHBELDN9ortsGUYPJuNYzMKXWd/Y7zDk/guYnIZ7f605K593+ZDKWVNjD4xJz4lxf9iHl8/mSFipWLVsvI4TY3fdqg1ODrj+bSEuXUQnhPaP63cLlpwxcQvDUdpyu586sW1KpcFfCc1DIGSyfs7dseR7joTcyEXj/iOtCREHde7C0opJ0FOEe7v75nnmWEYEDG/wO12a52EMZJSYpoXFGW33TDNE8t0BEweJ3S+yioUrh9dczzsK23E5l9RZQjjauFzzitua7WUQk5m6ZaSBWkt+SjZrmOOEcVcDdK8GGpY978iLccz+RCTtziV/NYK61kW8HulwM9gOHH83Ld+lmmOHJcjz158yLvffxfNESVZRuw9vRcWhDBukW3H3d0t98c73vra13EbYfQDQxcY+yucjOz3L5Fq9lrWnrgTb8VuelUrqW27Vnd2VZDztCl69+mdOk1ctP07xshw4Xn99TdQVcZx5Dvf+Q5vv/02l5eXbDcbM7BN0bz+cmY+HK1+LzAd9ux2OxzFMnQwTaL+tDkYGdcRpFo2nJ3rjZxbt5DV1iarcJwXbg+ZZx/d8MPvf5cP3n+Xm5vnSI6M48jVbsN28KTBsZ9nlqT0YojEk9ef8Ojqml/7tX/Azc0t3TDWqyAPFsznPN4C/lLlWTngL6vqXxORvw/8soj8B8DfBf5iffxfBP5zEflN4Dnwp363F2hZ7IOgR4WSqZ1ytUxXIfFXs9FPlm9OLI5zPpXwEAm0AFpq9a9JM7TnOx3QVuqw99TKFaGWj+15Ks5RWPGwYeyJs3LY3zGjDH3H0G/NENqZJ5jN/bKCZjnbZgxqAostsaiHmamdO1Z/MRHEBZZlxiOoOOvGqpY8KqC1GzDHRHMkMDKyXSdxXZ3fpSqrW8LhxdXg3ng0RU13CKo1iTcrmpihRCVFa0DJOTMttobmZGKi3jnmJZOyoQPeF25v703wUTq220f0vacLns1moO86ht4xDEoIia6jBqvy4L62pO48KWqIwSqMqhVZwEqz53PinKh+mhYPA94v3VDonCOmAtnOAAt3HybH592UIYR1LnrvVyL2yZfVhGtXmRrv6AR6r0hFCItgc/RHXFut76F1lmqTaDgeiB9/TBegu9oRAbcNuBDMAso5pmnCOcd2uyUnM4delmV9v+MQuLi4WFEr+3yZnDL7nJkOB0qOOPFEX+hKIMaFEDzzNHN9fcWLF8+tfBoCThzjaO4Z5y4f3rsHlZu+diSm2i2Yo5Huo0LMlvhDNYEWh6P678LaLWyoct2LXrlm6+/l4b72WY+vVGCFCJt+axMy9Lx8/pygAQkOxeH6qsF0zATg0fUjDnFhf1Aeb6/Z9CObbc+mG7k73NIPA3d3e+7u9ijWFi2Anm32rgk7numdiDSulZyE+5xlTNbpJGsQ1YZWLzcTPjRz5qvLa4oqT5++xh/4A3+At99+m+vra3bbHWaRZqgWWltr54k+2ObvRBj6npwSwVfFWxFKNc9sB26Ht823lumoCa1CVcpza9YVF2XOmcOc+eEPPuQf/v1f593f/kf88L3v03eOFIQxOPKy0O0ueHz9Opc5M8eCHo4sM3z7299CUV48f262HFB5LY1f8/kPVf1vgT/0KT//beBf+ZSfT8C//c/wOq8EPe07Z/OqBTcNL5SH6FNL3qRyQNa5d/bcJxTqjE8jVUwUrfPXnsgO3WbT1DJxRcUsPZyAx/5NcNZdR7OWEKRYc0LOCRccfR8InRjXKlSLkGoD5dyJO5ZSJoRTWaUdXqavY6VJ1ROXLJdSO/iMYC5q5e8gIB7iPAOV11H5VmVtnRMLsKRJolTlbGnaXN5QtMYds0+MVsSjqCNlYZpmYrTSYIyJ4no6Lyw5w2KdVce5UJyn5ILqQghbdhcXON8x9J6hcwSvDJ2nC4muh9BZGd0qVVqvLGt58zycPuf4nN/vFpimsxLgq3Nu5Vut44uT1fykR1sf3plJt7lePLiSAPhq7WXculqehxpERELorPw2z2uw1HiM1hxRCO5kpr7ysOThK33iStfzomhFh6NS7vbczTPDW6+Tr68Q6dCLEXGwzBPLspgtWFwQ59ZyeRkGrq4umaYJEevMdc4xjCN915MlU3I05K7Uc6iAlsyyzMhmYCoZxBIExLpTQ+hWLbkWSIlASjWILErXBXLKVoKv6Htwnul4ZDoc8dJEeS3BCqG3PaQmAk3dXYBOHFlNmsLQvYfX8POuEH61AiuU+2Xi2cuXvPfe+3z4g/dwfuLy6oLgR0LlIunGJoW4wJyOfO2N19k+uSRpZrfbsB22+I8g4ljSHt+NhKFjWo6ktOClW3V3mrix95UMLJ6S1bJfTbi62TkFp5Vcen7IUXGDVrcXXTPT7faCb//cf4/f94vf4c233mS3G9hsNnYw1g6ilCJDcMTpnq4zcc64LGw2O4QA6qqdjaOU6kauEJyJGE7zREeHJMFVt3tfJ3hBqrihEQdjVqYpsz8mFM/u+pq3vv3zbC4uON6+5Hs3N7iuw+XC3YuXvHi+8OabX+Nnv/517m5eIHLFt7/9Dj9490MU2Iw9XdVAytV65Ks1ZA2eSg1cDEipB6o7oRUt2D0F7euPa9zuMcTJhDlPTRXViqbW/6ShrmLFvaaqDw83qzUgc+BMU9PKgfWroa+VaF4/x5IW5nmyn4j5YZaS2WwGex7VavPS41xYbV186MzweS0tuAflrSbYmKpnYS658gkrR00hx0SmULwpnqc0rd2ArpUtnDOO05rttgaNk4ZWzNZp1ZopCo3wbijuNEdiCswJchNS7TYoHVkyosphzqQl4YYrQjfinDD0gRCMJNwHIyL3AbwrBC90Q6ELQvCCb1tEi3j5lBoID0tRq4xCvXe5lBUVbb9bD/pXkdJPoJ9frqEoiVK74kwqo3HRbGnUNXK+GtTuSwtabf5FtKRTElivYQjempWi0iexcjXF1k1tASzoepldaUFzQ2gqdqX1JwVcLEhaSO9+jNxFeAq3NxN0hbjd0A8Dx5ytAtONOB9QJ9ZlTmE7jKQ5cZyOSPBMc6LvOvOXVYd4jxZvVY95oe+2+GHk8vKamCMqQj90lFIIQ0/fdbVr8KRLBYbsCSAFfDGz9RwLToV5mknLjOa0yku0Pcx1gdREeJOdR7kGoSrSKIMrYuXUOL9ZINVqSlfv7ucBtn7FAivhe+9+n7/3D/4Bty9ecr3b8cYbl1xsNnjf4VzA+0CnyhIXXh7v2V5tuXz6mPvjkePdkfGiY15mpmVhSsKwueCdbz4iFbjb33F3/5LleEQQggTjMK0bcasFW3tq8yZbNzF9WNI5jTOezVmp582vfY2f+dmf4+LiknGzYbPdkXMmLjNd8GhRhmHgeLhDvKPvOsvIxMoyfWcBoPcmxEhqZq9W8w5Dj/qAUyNyajbrtVJ7/7KyHpoqYnBzTBwPE4fDxP3+wO3tDR9//BH39yanELzj4npnB1ssHI9H/v7f+3toSbzx5lMg8Prrb/KdfyHw/ofvcTweEanqvl8cgdCf+lCEhJWfUiVqN1I4jWugJ8kDaGUFqR1Np5GxEl1Qv5YTW4B16lu2g9a6CR1gti9wjpz5B/NSqJB7JXiTDdnpnScmaHZEZjaeyMsRzbEmLfZec4mkbFppy2Jmzfg6xxxVH6rxyNoaMAsRA1ATucws0Yxej8cjTjyazfrFCbjgSaWwxJnOC07VgrCiCE2x3ZDmXBTfFVK2Oe7E1SYOM09WcaSUidVWZhgG4085mGIhJkdRh+t29JsecITQGxcLIcYjTjKbEfx4QRGPJ7PpBS+F4CFIRaVcFWEMQu86Om8IlnfW6WWVaKEUWe+NarvbrPfOvO6kmsI3JKWgmurX85knZ0T+hgJ+NRKa81LfOs3FZAJSjrWx6FTig+YlaZIbZRUVtWtmulKBZbESWo/AfH4tz0qvPzJ2PSHMDc5SpfIXq5BszqTDgW6zpUjhxjvcboNsN4TtljvZMwwj/TgQ+p7D7T1H7tmOozWh5EKRzGGJBO/Z7TZIqVY8TgluNK/ZEIjJSO7jONjva1nUe2++hmfl0M1ms55tJZfqT6jkZMKh5EJaIqyWU7UsahAsrpgnaVJYUjJXErsYpFKMcwVQqGeRzXOt1yvrj7msP+XxhQusKm/l7wA/UNV/Q36CNiGqyu7igm//zDdJb71BDwxdoe+tg0jEOkNcab5fStLC/XTgw4+e8+LDZ3zs3+fi4pLjosza8/SNrzGMl2wvrhAPH338Ph++/wOOhwPz8WhCoktcW0GtDVsrFvCwDvwjvz/Tk2mH3MXlJa+/+Ra+63n0+CmPHj9FEaZpYjMOliV4R8lmGDtsRprJbV9TXh9qu2sxKNuHilphruk5L6C9dWLUUVRtQ67sGxVTv1aFZU6kJRNj5sWLGz744EM+/OF3+eAH3+X+/ta0iDxcPX3CMt3x8cd77vf3zMeFeTnwxtfe5N33PuLR9WPefufrPH7ymF//9V8n+I5Y4j/DbPrnfzTESTAOxDkC8aAxglfKOOcB0I8gJZ8OzBMS1F5vDZbOnv9BufD8DbbXqTwkFYdTAxi92kyJZQGNhIrcUiKqVn5Oy1yJstD3fcVC/crPsDAxU9R8/HJRUrbuo1xV2HOVJVmWiEi1pimVqF68fY2zieMW28hr1XEtL4h4pjjRCSjGjemCIy3ZLDuSaVTNMSHOU9SxIGTtKQmSOsbtiO92phovntUKpHrLdSXUAErABVOMV2XoBS9K8EJwGDIlgvdK8NB1Dl/RKhOz/Kc5Mh6WAX8c9+ST3YH8rn/zWYyf5rmAPpzjJ4kK+2VDR83Jwn62LHFNSr1vyKmrXL1SH7OsBPJCIWS1hbXWrc4V8eu6kxU8XpHfmkOtf7OuZFWcZlxckJhw+yMemEU5DD3l+hJ/HdGuIxcTMB2GVEVBI07Nvy8XC+A32y3b7Ybb2xtyiXRdIPhAnE1IupTMFBdEhC53K1K8LAv7ZaELrHzF9ruG5BmF1poDtBTiZI1TDiGlFpDW9S6OfgjkKZqMC9YIlc+Etxti1fpdmvpdVl3TCtNu49Vt7zMZX7jACvjfAP8AuKr//gv8hGxCVJXf+e53meZ73nh0zdUw8OR6x2Yz0hzhc1b2x5l8f882BCZNvP/Dj/j4w+cMzuMC3N7dkmWD9Bf4Ycujp28Sk4KHb3zr57m4uOb999/j5bPnhG7muD8SvGcYe+5ub5iXowUm5ZNq7Oech5WEesaLaR0dF5dXzEvkydPXePTkKXNMeCdcXWyBinxhfBPfWVbuu0pMh8otSdZCHkyw0IkdZrkkjlOyINPFyt/QymkxiwMXPM53CCfLnr7vzassjExT5uXLl7z4+F2O04H7+1t2Hbz2+mu89Y2v85u/8fcZNxu2w8gyzITNU/7ov/yvcnHxiF//R/+IefmAx48e84f/8B/l7/zwCYfDHu+/iNP1pzdenQ/1pw9+/2lE9XX+8DBj01IoZ3PJhmWcp1KQrPf7Vc4W8InXWn+OJSVFhVSsTNk6TXOKpHmP6MLYOWvN1tNniXEBNVXmeTrSo1VmwRtK4FxNfIyz1Qi3qqDF1a6mDCRQYUkJofrp5UTpPF1FbLIY9ysXU42WiuxK5YrMGVIyIdw5ZbIzf7c0x8p16dHQowi+G/DdQImRTd/hvDc9uCr22VBqEJwzeoEThyMTQi2nl0wQxxCUIIL32H9O8U5XrS/nyroPrGgj8iDpOt3XV/6tp+/PS1enPzsVjrWcUK21SeGLgVj91M4F5NR9/aB0yjl3p3rVqaxBVJNTaMjNQxmG03UGanDmTguyBlCuoa9ngSxgwV4LCqTywM4QLkOCzZjbOZBS8Np0Dy2Qn6eOEjzj9RXzMrPMM2maWfYHtrsd8+HI8XBgs9uiRVnmmVIK+/0eJQMjy7JYYOSMM+yx0uZ5J2SpHNxUO/tawgOG6GlRSkpQjAKjKdOFgJTMUss4glQVdjMsX5ZonK4CkmsSmVqQaTzOgja7eKOjnP0Hp8D0Kx9Yicg7wP8U+D8Af15sF/+J2YSoKr/xm79B5xUXj1y/83UeX19xdXVFXBIxmqx+8j27MMDxiJ8netnTu5HLiw2vv35JLMqz28SiA851+NDz+LWnbHYjw9jxjW98m2+9fMn97S03L17y/Plznn30Aff7OzaXl3SpZ1km8hTPDs9SFW8rL+SMcFqqy337t0G0jje+9hbb3Y7D8cjTR9cM48iyRPrOCLElJ1Sz0Vulqkj3PV6E43HPdJzw3rqxconkxcopsUoy2Eq3qdt1HcMw4LuubioBsDKP0VlMeHGJhWUxgbyr6yu+9e1vsb95xkfv/g5LnOmHHnVC6Dr2KfLicMRL4Jd+6Q+yubjg42cv2F1ecd1fE4+R29t7Yox0fb8q934lxllZuH01vaJT0GMPO3WAfVrQo5xaw6U+bwvmbXO0MhI0baw2Vz4FneIhktp+L9jBlGsQIa6SH2pZajneEYh0o92/UtRK5ChIwIsJgi7LAgJxmnDe43zAiUmGxJwInaPUsoE9TxOOtaw3xmik7GJZscMEZ2NMbMeemB2JSpJXcxfoXDiprhPIrgfpiVnBb8nY58oidF1vZZBhJHQ9Png600lkrFxA1WJSCjmfrR9QKSCKl4J3pqYOQspK76FzhSCK83ZQeteCK/MpxZUq01LhjPIp9/qVe3MeYH9akH7iqbXIi1UyZi3h1CYBfvS2+lMfP+1zQer+tsoTwNl6smuWS6Hverz3TNNcJTpOXdptjYZgBO0YU53D3hIazXgaJ+sUsBU9KYUDK1/VKmLWOegqOmmoKivCVsQ60ZOq+Rxi1MhERkIHXtBqvVRyMfX4oizTYlItnZ0jMUW64xHxHh8C/RAIvmN/v4fKIXTec3F1Sdd37HY7ckwcDgf7DKWwGUeGbjA/wcMRHzz3d/fEZIFV5z1kZc7WMVhShlKISzTOrg8UjSt/OKWE7zo0m4WTT1bvW9d+Bf84Td0q81MTLq2I1j/pJPsJjy9UYAX8R8D/Dris/37KT9AmpHv6dUaB3//Nb/MHfuFnefON1+jEMx0TU1LmDFGVw3TkcH8gxcT9zUvyPLHd9kjneHlcOM7KYfJ0Q0dOE5SJziU6ES7GHX0X2I1b7q+uuHp0zde+8RZx+UWeP3/GD374Lh9//CF+nojykmWayCkixTRtjAjr674pVUVasAKA1OzG8eTpGzx9/Q2Cg84pqLVuD/1ALKkqRQsQENevxHPBROaWOZLSzLLsSXnmcNhT4mSHAw4XOrq+ZxhGhmGk63tc6HHerIDUuM1QiezBO0r1kDoeDniX2Qye3e4R3/jm7+P9d9/lB9//B3x895x39m8SUkGPE4+evM7l0zcZLh9ze3/H3/wbf53XX3+dn//9v8D1o2tKhvJcmSd7j1+VcX4Yrj87Qx7OSxenzFhWC401INLTv+WVr/U3nHhLrWPVVS7HJ0uAn1Z+pImStudeoTJlmQ5onulDM22u7dfVAyyb50XtqqrZrjYrDkWCEKOVs1O299FKUymdPMcaipNSZon2GeI8IeoI3jNFYUq286biUOfpfEAo+NBV1Exw/WjWOcX84MZxJKXEtrOWehWH8z2h6+h76EO00p2oqc2jzEshLjVwqd3AaqZveDExXSday5DK0DuCKJ2rdjROLbASNeI0BXzTBKNd3NP9cUaUbtIY7fA53cNTUHVe/lVtUhpnpahXyoAP/+ZzG/8RP8VzAeA4TUANqGqi2xaPiMM7szlalmZXJmtAWkqi68JJWkHtviXNpGIm2dusDCmZl2a97gWpquo2mla/Cus6ppa2vFYpXzGBZRTTxqpzoTIk8Wpm0pRCmhd0U+rJ0VAuYdxumOKR7dARnGeeDtzfvqTrtzhv7hhdcAQHuUT80OMGTzzO9L4jz5EYI3FeVt7rfJhYaref9x6dzGpGstCHjjhFchUAjXlGgLjM9ONoidNs1lPioCSlk4HBeVJamDWaGn0tBQrCqNbV2pTnHLVXRSDUC2qkls9nfGECKxH5N4APVfVXReRf+0k9r57ZhIyvf0N/6Zd+P3/w536BRxcXzPPMs5vnHOeZqIpWzsOz4x33xz0lJdQXfvZb38INwnsff8BhzhznhVxGJGeePXuG9z0Fz+MngjjP5eUFF5cXjNuRq8sLDocDRXtef/1rPH36Ou9/8B53dzdIjtzd3fL844/Y390yHfeQllWcDmpW7oNpq+RqBuuE7e4CMrz3/R/y+35hx8W4ZRgHSjFtnr4LaMnWqRVC9ZZa2N/doFrIKXE87Pno4w/Ybi65unwM2UiWw7hdlXD73jbmUgpOLXvRZN2M6hzijPCbi7LEzDxn9vcTN7c3NK2lZYmUAiEMHI8zfuh582d/gafvfJNdJ0z3L9ltN3z44YccDnvu7kf++q/8Cr/4+3+Rr7/9DVPhDv4L5RX42QxZDzgLbpqdS/v+lJ2ttiZaA5wqM9DKHEbqNA6VE7EOuLO/l6oR08RrS3lYZmyoWPt+Deja4U7tWFXFuQ7NypQiy3wghEJw0LT6S7ESXRUnMTPXORJ8AKV2ykLOkaKlbuABIawCmqrCEo2ntb83tMmHgVI6pnwApxTfMfQjod8Q00wKnr7vcdm8CVEla7bd3AekCwSBMZi2lnihHwbbrKsQaKicJ++gD5k+ZLyvOm+VeRicEL0Qk6FrznlUsukQ1TvrKkG6c0rfCcEFrFsyIxLxrh6iWEk060TTuiu1TLSybeq/W3BkJIAqz0KuqMh6UqOY0nUQKhXtlNc76cklGtJ9ttw+r5X3WZwLIvLw49VIU9VC2EZAl9rIEGMkZ+PmhuBNjRwqx28hZROndc4I2JqsQ7XiX+dvYr1f/uwtPAh0YQ2kzt4g8ArS1ZBrgeyEhBDGgX67pevDathtZX+T9VniQnamDRe6gDkfQE6Jl89v2G0HNtsNusQ1MJuOE6UY+rXdbFfR0BQT22HAO7cqrMPJ5UOLVVCauv0yz3RVVy3nXDtyi3XS1yRhWcwdoRRTrNfKp0TVusTdScEeWL1D15Sxlls/j/GFCayAPw78z0TkfwKMWC39/8xP0CbEOcfFhWlQHY4HYs5MKVPbD1jizDQVbuPMTdpDWZAlo88jm6uOfutJEkgvFlQ84jzOBQ7TwsWSwHmDNCt8ut1suNxuePLoEbn0xBR57ekTvvXNbzDNR+7393z80Ue8fPGc4ODF82fcvXjGBx+8z3Q8ompdR+Nui6LMVSV3HEeg8Pf+m/8fu92ON54+4mq7YZn71Ywzq9XdkxbydOBwf8/93Q3TYY93Qt917Pc31u7aj8R5IsWZ7XbD/f094+6C7TC+ouitJ0G5bAeSuGKlklyISVjmTM4wHRd++N73ubl7xkcffWiaPgTubg88u7nhF3/pD3F/e8Ov/X9/ha8/Ghn6nu9/7/s8fvSY/X7Pb/3Wb/Fbv/nbXF8/4cUf+nPVL/GrFVh9km8HLciF1il36mCyx9ru4s50eJqhqTaOT72WVr4w/bVGeNdysss5PedDB4FW9tB68si6lZkaeKnZtJRkX8U0dFw93L2zdmpVNbmOUhj6YHpS2ZCdUuzQN5S1mB5PLtYdpEJWJWVljpEYC85D0A7ve7rNFf0w0oWOrhtxzpPixCbHB04COSVKSZUPUxg3AyE4vASCF8be4yQTfEMVDElyUug7Z116Ujtqa2clCp0T+s4sm1qAmtT07Fq0YkGrghc6b9dEzkq8FgZLJedWT7piqJzDNHw01+690hphGoJYX0ZOpHOtAbmoW7W7tLa5pVZ7AlR0RV5enYOf0/ipnwttGAJVeVK0Q9msWFRP3KIY0zqPYoxrotHsXFrQbgblBc2F4BxSyifI1FpLfuuLrr9YjTpqVUAf/PLT+I+Albp9IHUeGXrECz44XHDgqIkEuM6bebSIdX/nXIOYBS0F74X9/Z6SM8NuXIOfYRiQouzv7um6ji50liB4T1wWtAqEWkNJJuWIU0MBiybIJxrCCXm269Ys1cz2zBKZkiM+eFJUwpmERcbK7q0zcMUXazmwAsS/VwpU1X8X+HcBambyv1XV/4WI/D/4SdmEAMfDkd+5v8ONPdp7um5EXGZ/f8OyKHH2vJyPvDg8I/z/2fvzuNuytK4T/D5rrT2c4R3ufG/cGDMzcoacGBWVQZBJUm0twSpFpduyxKmq7AKrta1qpVu6VLBUKPkIipYW2mIJhVQhUyJIMSSZCUlmkpkxZsSd3/lMe1hD/7HWPue8dwhyuHFvEHf/4nPjPWefPaw9rfWs5/k9v6cIqNaxs3sDf9mhBgU+jDg6agnKc268ydaJk+TlEOs8k9mcYjBiY3tMORyQiWBUHE6CZFgntC2MBoa6KRmOhpw8cYJqMaOaTWkvPsT06JDp5IjJZAokUiUebSKJtmkaJtMpvq259MJTeB/Yuf4iDz/8MF/+Fb+Hje2TeAt4j7cN8/mUajalqhaRu5JE4Spj8L7BtQ3etcymR5G8W9cMxxvLjsJaizYmunDbFlHxkfHOxVqHKuBDJHY2jaNtA8YoNjZGnKi3aO2cxx59jO3hgF97f8Pk8AaD0QkOrl3l137lF5gc7vHE4+/i2vUbVFXF1saYqq44d+4sh4fTZWZNpxr9IOFWw8qzLoFwu/U74qj4NJfrStakVCPn3TG+jdZrqeU37QdWocZOvXu9fI4swxhp0O48XM7h24q2XmDEoyWgJYVXAggxTO1CQOHIdFfnMBag7URqo40Qy4BEj4yjrqtoJBAV/p0qKEZjBsMRWT5AmwxUhjFZfHeSN7UclBCiPIFZL0LtLcZotJFEJiYORDiGORQ6qmUHHUUMugoDSjyKWLGgqyeYLgWEWDdQ4xNfxqHXDJzOFdIN3EaTijD7ZahfPHQFk4OPMRLvoifROZ8U6WPI3yWPQAhJg44kSBwcy9IpqR6VdAYggo2CSTG0T/JlBX/MUFh/Fu4H7sW4IMmrq7Usn3PdCTenU++y30KIcgvryutZmiRA3E9mopEBAawnF43yt2nC0uEYlt6pzosMyScdSPd23ZO14n6l67J8Fy3CIkCbmThRyAw6M+RlvjRmJHmsx8MBg6IgOE9T1Szmc1q7IDjIchOzyAP4pkUyQ1PV0RAaWJq6XvYFsa6noiwLbKq9uWpTNP6r+YJhUcZMwEzjUxgzpLqALniUMRRFQQg+yliEQFCCj4U2aet21e8JBLW6Zh1pPXS8KonXT/ceqzviW7hbZUK859q1HRpj2bdT/MiwMdgkz2BRHdJWgYO9GiGn3MqZuwkqwGg8xNWB2SwSwcvBNjrPKAYlW9snOP/Qw2xtn2b7xKmYwRSapWR/VAUKeLHkmZDnGc6B1lH00DaajUFGOyqxTUN76iRVVbGoFsu0cOUj0S8SYyMptmobBgf7LBYLYIF3M/b2rtI6h1EabxucbfHO0tYL2rqKnJNU2sM5g9LxJavrCkIsLZDnOW3TYF3AukgIzhYV5XBEVsQK6NEFHssn1HVMPz+aHLK3f8De7hFN46jrBc7HUE5wjoP9Q7wDkYwP/vpHGSyuUbgZp05cYHT2dTz/8V/Be8/R0YTpbMbW1janzpzjkYcf4yfMKdqmiaToBwVrnJh1I0dkRVZf91yth+eC97TJgFofFH0IKzHM9eVr2UzrSRKrpoRl9lP323pa+vq8sCtBMz3cx9kaRZoJtxDQywEiclniTDeSawNKfBrcLRIsPoC3Loa+ULTOonWs/yfKYDLDcLDFxtYmg+Ew1t4UEK9TmzyomKGoxZMn9fZoo8c4WCaCMZISMuIgp1Q0BnPtKQwYFfASQ2r4GG6R0HmV4tQ4ZiwmLlIQlAYbUqgthTe7a6lU9AyiFN5HWYVocKbqBmi8D3gnuNZjGxf5Orara9itGb0Fei0zMLbNE3OmXExMSWrizq9pVxHw2qO0IHqtOLNbZQsus+NemZ7iuzYuQHwH8jxffu5I/AJL4yEq/CcTNNyU0e1jlELrQO39spiwBKKgb0jFztQa72fNo7Ie+gvJeL/5t47o3rUspDDwEiJ40QSTkY1G5GVJlmeYTKNNpwOYqn9ojTZZlEhJ73XdLKKX2BjaKo5fdV3jHAz1KL7HNpa76Woreh/wOCTLmE6nsTyVjokh0QCNRqbSirqpwQdcW0cxUImhQkmzDGU0LkQivHUuRlFSWFUrRd22y4sSqQyJWN8ZnGkC1l0T1XusjiOE8B7gPenzXSsTEoAr1w+pQ0sVHIOtjNlp4agNzCuDDgErNVJ5xJnoIhWL0sJ8EphPYLw54LWveyuPPvEk440NwIHopLMYKIuMQVGSaR2LXXRCoF4hKqZXxwFFUQxKKgHvFZkIjVJkTU0hOaNMaG1NnJ/ksdZfXRMzuGAYcrY2SmbzOd45NoY5N158jsmNXUxy0SqJsXXnm1hKJDi8tYgEjPJ4GzMMZ7MpRVEy0Dmta/BNFCH0ztGqGj8YICoKgJqspW2qWAm99RxNF0wmc2aLiv2DAyaTI3Z2dtjf38W2Ld46Jof7eFtTtzE+v/Pic5woHAwMFy6cY1FPuHF9h3FRcrCzz9bJs5w8e5bRcMBgUOKObMwueUX27y8PYgd7/IRjRxY/r2cLrhOTl0W7l9uuQohdkOn4PsMtn2+XXXa7Is+RDMsy9BRnmpbZ0ZS2jrXGJFgy03nN8m7DZdtd6FL6Y8kL7xoEhxKfSilplKjIp0rdlVJCkMBwULJ9cpuNzU3ERH2gIETVBZ9CkCqGJ3MDRQZR9TrJQQShSJ4KUSlMIwGTprySeFNI7KRjSG5lAElXUS7E43QDTUwYiWEjn66fk+73qNPjVYhaWOjlQOutpfUe1wbqyhKsJCPR4FNigEn0A610bIes7pdPA3hsk0uekDgpU85hbYu1bRRltS22sfH6J/4XQFu1kcqwDC92GY53eFDvIV6ucSEaGyvJG6VUEt2NBm9X37UroxFzhWLZLx0ir9EHEDGoYLC2RVzMAsxFJeJ5Mo48iFbL5wlk+R7F8iyBblqwfs0dKSxILP4dy4spYrHSuL3Fs1cEZGQYFnl6rtMj4WPI32QZBGKYPM8RhMViTkAYjbfJTE5TVRSZZzadYbIs8nNrj20abOUpyhynAibPMFlGkcRC57NpDE9rh9HRw+VCm0o2+eW1NkrRNvXaBC3y05QIwcbiy1gfxyqikYoP0YubMiR9CJGkLuBUvDUmOqAj3zFZqbdzFN4LvCINq5cPsSu0LtA2lqrap6wtG6e20GIItGyfPoG2GU2osQ5wmsNpy9FhjW8yDg72eO97f55nnn+Gxx9/DecvPESWFRSDaAQNqgFuXDIeDpFMp8hAwLt2KbroQ1KeVYrcaBaLhsxolBRYPJWL5FUnqwGry1LpXK15npTTR1FtvcgyjBKyTMW6TrWLLzHdoBGienCIWiGRG+DR2qRyJ6sO2iZ3bds06CzHe09d1dBYRMVU+PjAxkzDoiiZL+plinKWaU6fPoUgzCczvK2ZHtUoHctFOOcpR1ucOL3J6XPnuXz9KhsbY6Z7h1y+fBm5dp2TBwec3Nrm3PmzWCzVYoExD47yOrDUdYlhNgihC8El4ykyQVjZUYlwuzSC0u8+LFmcXlYZgN3vcZ/x783hBYgDdpR6iGrcSjSgE2cn6jM5a5nNF1SLGtdaNBLrfEn0VEXXfezlY+aPx3uJ2T6x6RgRfHDL0GVrXRzkEVyIkxLrA6IzBsMRm1sn2N4aUBQrUrfzDvLoSVLKJ2+UJ1chyqwoYvkcFTtsIyBqFYgJIaSaZdCR7aOXN6V6qzQYppBeSNdwZbTGFgeIskUiycvlY/hDUiDTB4KD4DS1bWmbhqZucK0DF8VF8yzJmyi1DHsIkt5XSeTcZLT6sAw3dQkI8QCJfeItJtVpdMu/ntbG7GBxkXLgqoD1dqmoH5aG+asZUUOtC32G5G0iREO9063yPi4vMoOoEEslqRhKldAR1S3Ke4woDJKMqq5CQhzwg/dLw1t5WBYQZzXtiTxFkiG/3tL0VwJehMwLBqE2wpEOuDJjOByQl0X0qGkVw4Ep5K9TvVClwJgU0vQ5zlnKYhCTJUhloKwFco4ODqmbGqM1rdY0bRW5iLVB5xnzxTzWyfWesixpm5bax/HQtc2yMLPWOirUuyhrIRI1GbXW2K6gtQhN3eCtxXuHVhKNtdYuw6LSGcASs9xXhu3qGnbhwfuFB8ywisqsSlJ5lxCQacu02sPnjnJ7gB4p8qFBHNRTy+yoZr5T4ebRy0Nb0ywmPD/Z4dILT7O5dZaz585z6sxZRhubnD13js2NMdubm4yGJYOiIDcavKepo1qvD1HEra3bROoNtHUVXccQSyjYJmWirA+k0bLP8zyVUvDLeHbHARA1JzOx1locjH2M90saVEQiVyokXoESirxc6pm0NpbK0MrEwppZ0j5xNToryFOxTUSwDhaLBhCKokCJ4vDwkKeeeoqqWrAxHrM13mI8HuNsTbWY0aTYeSuGt7zjc3EEDvefxdiW6dEBRgmnTp9i++QJjg6PaD9RIY99PoPhkNFwdF+fnXuKZTiuM3hWHAuRztg57mWKv6XhV62JEaaZ95pvKhrdayHG1T66WXsUi139Hrsrler/qa74sbNUdTSo2jYN5N7hrAOJZGutc3yIWTw+OKx30WiyGpsy0pqmwWQxC6huGhBFY8F6j12WtBHKomA43mT75GmGoyG5EUSiJ8ioOHOXNFNXImgVMOlfZlR6n0Ks0RZ8Kh6drlVIHjgCSqs4O/YuhvGEWFIjdLfHp1BM1Kfyy2sHSFdAXZa9vUITi9p6rA00bUtTWbyLRPGlDpIaxNpuqYRTN1npMjolhUUg8tGi541kvMkqrCKxukNIWQAuWLTPkmZdrOnmrSezLa3NUbNowOeqIDSBxrd4PD55UV6h4cC7grD0SskxflDHNI9q6o5Og8q2ccD3ARod3ysTAtoFtA9kEsnaam2gV50nSuKAXzsXuVep1FF0liahTMLKi0ncSN1y/SXdmxgmn4swGeaU4yGmyMmKHF3koLpISfS6NYn7lYVYhzUm7wWGwyFt09LYqIjeOkeWxRDhcFiyv7+PSE7Ao3VOU9XkBVhnKQaDKFCakh7aNlbJKIoC7wLORlmG4CxtY/EqEui1jpnemda4pkFnGd552rYhSvhoJMRMQWdtzK5cg/Nx4tapsEOcNK0LhfahwHsEnRkUGmUd3jtyZSjynBbL0e4MW1uyzTwO/vPA9MYCNwcjnbhhtJi192QCgmV35yoHB3sMRiMW8ymnTp/BtS3OblBnhtFggFl2TrEGGSbWM+teWHwM0zVNvfQmKaVxrqUbErsXvHNXd2mtneVvbUvmHT5Y2rrFuZBmtylby8fZamZM7CBsIMtiaMF5n6qKC5ubwxQjj+KJLpYoPcYTiUZdrAi/IkJrNjY2OHfuPNevX2VyNOHG1RvU1ZS2XkBoo5EoOQfzlqPKsb9/A4WPAqqTQ5xtKVOWSvTCxSKe4+HouBDNqxwBIXhwNoX3SLPUFKJZ5zsBx3hTMSTYGVMr4jms+Fjrob11zhSsvFXrhNxO7mFp4OFom4bF7AjbRhK11lkcGEKbjqNiKIouVBXJ1C54auupK8uiAogDmm1j2rr1kciqtcH56GEDIS9ytk9sc+LESYpygMkUuhPPVSvvGioaVloiMVyJj3UW04ClJHl1giDexe/JABKRWKo6eZaUFmIts9W9WWU0OVKWAJ23ME6O4nDqbPTwOetwVSz51DbxvYze5CggOdAlkkUCsMlyROXpPsRrE7/o1ceUwKCIwo1aZBn66CZiSsLSyCVEsdQQPM61KNeilCWIx4pGKY1axOdpUIwwZJhmQWObqHnn3Eob61WKLgsyy1KpFrs63yzLUsJAVxaIaMgSMznz3ICzGO/JAB0cKnRew7gPWYbiV1H6Jji8xCxSFdL+uueI4+HApQ9aVqV2IFALLBRMFEg5xBQFRVli8jxm3opEja3OUAwhiqHahrrWy4m6TQXStejItUo6iLZpsTZO8Dc2xvFd85HrW83n6DxpyKUas12/1NXtNNHlG/VJiNIVXkUDSekoX1En/qxzbpnpqhAyk+GaOnIQkeSVSl74m+7fMmEwea49UdQ3jq333nf1wBlWYqKzU4vCe83mqGBUDrCtJxzCjetThkajtWF6/RA/teSSR+K0CHiFIo8hjaBpqgXaZPjgqffrZIXHF8E2NVubGwTnGJgodqZUJLa2yV7SKQznfaBNhWSjxyo+Os7HWksx5LOqW7We7tuVJPE+ksa1VlF/J3SFnwPe28TRirNYrTKyLHmfEIw2uBDrQ1nboiRmvShRhOCIxXD9UkgvCiQO0cbQtI6ssZSDASplh1y4cIH5bM7k4JD5zFDNoxCdbRtQhqNFw/t+7cOc2h5EsnXTYLRisDFie3MDledsb2yxMR7z8XKASGAymdzz5+V+Ig7U8XPnpRJZ1XxbN37Wi3evG0mrwXi1Tdz3cT7VusG1nnm4XN8nT1iq39c0dSS2VhWSnuMlFyIdr/N0dJOBSFrS+KBZ1Jb5wtM0kfCutV5yqLxWBB3lTPJCU+Q5w2HJxsaA8XjMYFDE7D0dNaOQFG73HlScvWodtYG0XmlGLbP3QkjSBbHUtaBSeJPltUimbeRPdWThcJx71NFkQgDnBIIGr3E2KkpXdUNdt7SNRVlD8DGMZ0weJ3jGpGxfhdKR1yNKE1RKWacjK0NI7ZOkVxbDOdF7pVWXRZjutVKpnFDMDKQjU4fkCxENYol9gSL4yBoDjzElEiJzR6FpvSK4evUgvUqxKqTs0wAPEJ//roiytVHBHFmF5DPvkUWLIZAR+32zDP9FLmBIyzupqmXgWaBJcjWGmM2pk9cxpEnRurd5uawztjxUOrAoBEZjhnqAUQaTZyCQ5Tn5oIwlpdxKCkIUDAYl0QMddeWsjeHhIJHraEMUAA3eU9c1o9GQra1NyrJkOpmy8FBVVawzmMYQ61d9CURjNZhYwkryLHp5dfRaQyDPDD7E+r3Nokpi2TZJrEgqSRXlKoL3S49s9150SA5bUtR9aZiGZIzdDzxwhpXRxMFBJJJEbSAsWlwINMEyGGhoGmZVQzUTtBpF0bQQOUuxQ41lZuq6YpBrNjZOoJIxVFcL9q5dJTQVh7slRVFw+tRJLpzejiURlCEQKMsBJtNLvot4RxYUlmhItXhQA5TKULqlqiPx29r44hvlUGqlF1LXkcMEgdZZPFEhmlSLKqDJyxEiiqpaUNVzCgpMGb1COjMoH8N+bePITOKcuKjzowRcNYsz5CxHBYNgyYxhUBQMhkPywQZ1azk4OuTSlUtMD/ehbQjeIRJny1019kDg2rVrjMvzTPduUOYjHjp7ES2QZZrhIA4+5cgQnOfw8CjW6npQEKJXQ+uOQxVnirEjXBlV6x6szng6pvkV4GYicqeWvM6n6r53WU8duuPEzl5hW8dsPqFta5RIVBtP+kqkCYVP4rIxLLVWL48Sh2exqNg/bKlq0EZirUqJZY60UpjhKHJEQtS3Go8GjEdDBoMcoxUmhc+VdB5gQLEkYHdqzFG6JyTxxe4c/IqU3kkSOEcIaynivvPQxTBa5Dce1/Hqwn4BjbfgLLSNZ7GoqaYV1rrkvdJoPSTL9dKoUqKid88YJN2LVeaZIhD5KsH7bmglpJl6p08m0SW3CtMmdnQnlxiQtK+Q4lEpXiIhXqekx6VUCosmj6TSefRO+0gOlhTEEl7dhlWXqel9SFlzq3fCBzDaoJxH5Qoxgm0bbGMpgQxBi15yqEh/Y2hv5V/p6mZ3+krRI7Uynm36m4duYhIRIOoSyorEHvcRPUG6zKE06IEhZBnBFOi8iJlzzoOzaEmTeO/AxZC0KI0yCttaXBu9kl4so+GYRjV4B9VsDs6zMRovSzrlg5J6sYhJET4g1pGVBXUqbJ4lmo3JTPQea6GNgyc2tKkGrdDagLce11Z43ybWeZxoGVFR0kdCzBxUilYTecoiWIEsrPpGJ6w9o/Ge+TUKzb3GA2VYCbA1GpMNS6wSptWcZlJR2Qp0YE6FGWR4F5hPKoQyGSdRil987JRCiH99sEyOJlRVw+b2CUxe4BaWxdEhO9cuLVNcz587x/ShC2xtbjMeb3Ly5EnyrEQyHWczSqIyr496UF4LsZ6Aoqkci0XN7u4OVVUhSJyJGMh0DAGiobZtKqi8YDjepBwoMhXTsYMnqj/7SH4vygGZz7CtpWk8+ThPA4DCi0NELdWGB2WJAC7VYXONTQR6T91U5EVDXgzQ+ZDt7RFZ9gjDQc54NOZXf/UD7O9exzYtzXyOby3BtdFYMxmusdB6Xv+a17ExHtM2NZPDA6ZHhyCeR09fRGnD5NpRLMgpr+7OfR1xFpaUzInlM0TF0FJYdiixE4repuMerW5Q7Wa/MRyol9vLWsmMzqt5rA/qZsmQKtM75rMZbdPig4veH+WS2z72eD5NOkKaL8b1hNY5lCloKse0atg5mNG4jGI8Js99VPvP80i6zTPK4Yg8zxCi7EFZZClkLUk7qmMcrYxAifHJOINWmkCIRF26DjckGYJViEVIXoDU6SOxdloQ8MEti7yGQHIdxsoKUQYFQogioItZxWLeUFctzkYxz8yUgEJrg1YGnSclbmUQFNoYlMmRNNE69mwnrx9p5r60rui4P6SZ+8qb0XGruizGuJtoEAaR1aiuwjIWFYTofVF6dfxU6FyMj4KWwWN8SFlyr150RuzSwE1cQojJNnXdYJIXPxDfS61iVqCiS3a4tX/qQoDdq5VsWnSXJtiFCpPB7HyghbXQYFzHEw0vTbxvoqKchwuxn9AqepO1iREH7wzeRlkFpVQU6tR6aWQ771ABrGujXqGzaKOoqoqsrnCti0WbrWW0MY5yJsB8PqOuGxZ1Rd00GKXQTU0wiqws4zU0q0xXY7I4oU4GKwSqumZQDsATw+SuBR31D7XW0VPlXcwEbltIAqtZUSQqSdR1pL31arO8pPeXF/hAGVYInN0+Tb4xZH8xo2pbKlPhlSUYMDrHeaE+arB1cp9L1DTxbqVwvEQIEBxNXbFz/RpBYup2mWuss9jWUg5KnF1wuLvDyZOneOSRRxGtEGXIx2OKsowq0iHglUG0olnMOTo4YjadMDk8ZHq0x8H+DoeHRzFcrTWjwSD+G40Yj8cxHEictdd1jagMkQyTx1BDXTeJv9UQCCh0fLjRtI0nGIUqVMoSFByCF0XdNORZdiwMGTPVVjwCLYHMBIqhYTw6yXA45NSp8zz62Ov50Ec/xK9/4L00VU3bTpO72+BsoJ43HOwdUQTHlStXuHr1MvPphMcffYQTJ7d59tlnmU5m+POvjenBD5hIaJTpgKCSnhWeTGV4Hw1yj0WrmOIdkiCoqCQJICuOVWc0aW1AYlHWLuOoC42tk9lj1FvHIq4ogrUsDic0TYUQa+y1bYWIJSgPOovHF0VQLsZCAogLBKVpgqZq4Oq1A/Jik41Tj1GOBug8oPFkRlPkGaNBQWY0uRIyHUnpokhkc5tCc91IlIarLqNKIklX1GrEih6lOLTFcjspFJpe42iiGRQ2RszQoHMkDm10/rDgBU28YG0A5xSh9UwPF8znjrZuUAEyZShyg5g8FZDWaJ1HraA8Jnx0XrFYSD3WHowGXtJGImYQoqM3O3oqYamRxWpmLmHd2ElUBYllcWLkVsf7l0j6yy2TCKjDYiWWQFmO8UohLhZZD9ql0iG+e1helej61I4n2ol/dl7OrPuuQiRp24AOHiMxI08l71RninWSAMeOAUuPbqruRCfLF1IsS0ms5deEmHGYRUrU0sAKIcTEDAEbAq0IIc/IBgPEZClzNVa4CN7hWk8rHoyg8gyfOFAYzdCUVIsq1ejUZFkBIdBUAddGQyg4S1FkjDbHlKMS17Qo59m/do1FVTMqB3EbaxHvUcFHQry3dBpf3nm89bGck1EUeYa1DpeKVGdagyhaV+NtwDct2C4cG2hbi/Yk8zR5jQm4+HLjWLKWb5l4LydR9wEP1EilRPHQhYu0wTOdznHTmkw0g9GIlhqTZezuLZgdNuiQpY46xMKrxAyGVRigCyXE7A+lTfII+CTM6cA7qtmMG1XNpJxyNJsxXSzY2dvn4sVHOHPxMba2tynyHOdhPq/Z39/j+rWrXHrhOQ4P9sgzTVlqdveuc3BwQNPEWoKFMQzKMtZsGg45c/Ysp06fohyUWBcwWSwe6wIYldO2jtlsxmx+FFXY6yi1cOrkSba2thiOhmyoGEOX9LAPhiXKJz2rlOlhjCE4h9E6yj0QCN4SbEO7ELJiyNbmmNF4i8Fog3w05tGHH+G5j32YX//Ae9m9diVyxpSidXB9Z5/d61cYjqPhdPb8ec6cO8fewT6KwOOvfQ0fYoR1LvLGHhCsh/kgdhpqWaLEQ4gyCh6PS3Xd9JrMcMcButye5Duu/Z40UVUrg6Tb7/ox15Z1ISYfPC6lOneck8g3cqldXdgidWOSDJplurrgPLTWo7cLMpNFVesmIE04tg+V2ha9L6x5alZBkfWQ1M1ci2M4tv1qLrs8+2WMbT30GV0B69TYLrQmSyMt4GwUQV3JmajE40ockHQ+nRepu39rN7Pzod2m4WGtvbJq61qbjp/k7RaHm1ZffY99V5fRuMoCfcFu87Dei1wzHT04XjSCWsqxvGqRQm/LDMwQCM4t3wOjdBS2dBblAzmCgViPU1bPh3RJBMmYX3+OhJXopwrrx12zWZchwijv2oTIuVIiMRgbkrhzeojbQqOKAjEGUBgVS+loEVzbQFBRbkSypKLuCbIq5NwVPY9lnTTOtihCTHDwQpFntLZFSWB6sE8zmSGtp9k7oBgMyUMU6/VaqBcLbNvE8SOdUFmWtN7FQs4mCgE7pciUjlQBH9VzkyhRklIA7zzKg3U+eneVjhVCmq5qRBQ16a7T0ncdViLJKzPs/uCBMqwQsD5wsLePndZc2DjF3DVUbkawNXu7h8wPHTQaSSVkPJ4gSQdkrSMCknHlYpzYOTxJx0nrSOZNMV4lmrptsUeHVHXDfF6xqBsmFh5qLePxmLppuHz5Mtc+8SwvPv8sN65fZj47RMRx8vQpLlx8iEuXXgCgbRsWzjKdCqPRiKqZ8MKLz7C5tc1DDz3CmXMP0bSewbAiL4YYyWnaBYtqymw2YTI9pJrVOO/Z37/B6dOnOHnqFK3zbG1ts7GxQVZEt64xClMUzGYz2hQOJAQyrcgzk0qIRLFFg0Z8wGMxmWK8UfKIucDWqMS3LZPpAlOMOLhxmcl0iiNQ+cATFx/m7Okx1tZogd39PfZ2dxiOSibPV8zPvQulVCoU+mDg0o3F9L/6zvd+9DPbyw8B8Buf/g5Ow31PxuzbcI+O/xvAT9z558de7uPfN0iXSbuqI6k67ivEMLCPKvUmBIqwdMouS6ik6caaqXuT90RSKaR1fhUkg/f4uhkKR8ASw4KIp3AhJV1F415pwWmFKjLEZCidaknq2I5Y9NgjIQpfE2IWnlgfK4MU0RyRdO7aZDFpITdR7d+2UXahrpgderQLHF29gVQtalEzm9W4fB6jLUYIuWY0HCB1Q1HE4s12NoesjMZjUmi3TTpJH8syKWJpq6Ail9G3FhWIjokuai2SdMVWsyTnQwp7x/ukArdA3eY+3Cs8OCMV8SZdvXEdt1igcmEhDUfTCW2zwHmHbzRN5SEorPUxL8YYJEgsu5Fi2pHbIDFc0k03QohyBInrgkCWRcV056NYoRINIZLeDw72obyC0pqN6SaTyT5PP/1R9q9c4nB/l/nsCOcavLdcu3GN0xfOMtocsb+/h84M6Oh+nk6njEfnGY1g5/p1ptMZ165f58TJ02xun2A02mQwGEcSoGuZzY+YTo+YTqbM5wts6zg8uhDJw/mIwXDMfFGjTcHGxgZlWRKcY7MYE5wlOMt8PuXwcMK2yiiUiaUOXE3VWExeko82UKk+22Ck2doeobTGlEMuPPI4870bPPvsb/CJ5z5G3cyYNy2D0YgbLx5y5dp1JNMMTU453Kaj74pSzGaz+/r83GN8NITwOfezASLy3r4N978N9/v4r3aIKHyQlPEXpWmCXnF1nHORU2UDuahEdI+/qpD+JX9mF6zq4s1LX27H6Vs/bvr/rUN/NHi0pLCXhwooSCrrRD3GEDTBd75YH5XVTaRtGKNTGD3KfSjVZemS+Es1eZHjQogUC+soTE4zXVDPU6YvUWNqvqhw8wVNtUBmFdI6cgTfNGivMWVBFQRvHI0LqaYlsZyOCIPhEO8tbduktmiG5Yj5dBazYZXgbE1wFlzUwJMQHRdKq44Whgu2k97FIKBVNM6WBQKTs0PWFdfvD8/qATOsArWtCHlgUs3YOdzD1pYyy5nNaiaTChdMJAKqVKU8phsBMf4tSqe6BPGhjVUkErG1u7lCCiMKuckT2bulTUZXCNHoqhYTrl29xOXmeRbzCfP5AVU1pWmrqAbtI1E9GOHylUucPn0aJGUBLioM0fU8mUw5sbXFoRxQ1wuODveZTCfoKy8yHI05sX0arRXOW46ODqjrira1scBxHRWzF1VN6+LxHh4O8T5Q1y1IRlHkZJlgm5ogisFwTJjPWVQVyuToLKd1jtY2YB3SWPLBGGVqfPA0raVqara3tplOZjz94Y/wiU+8wGIxw+BoWs9HP/Zx2oMJT7zutejRgOZwgm1annzDk/xGM8R5t5Sm6NGjR4+7hVV2bKC1SSE/cfVipDgqguchkspDlygQOv2pFftnTR0u7vOmf6GLynbOl5vG/bC+bQgpm0+wKfRvg8cIKcQeOUk+12QaTJaTZYYsSRsYo5aJF9pkdA1SnSfORTpLwFNVDSH3GDEYpZlNp0ttNN04QtUw8PGcM6WxTY0NgnGOxnpECppco4KOfGERGu/QNsoIZXmGSIg1/6q6qw4ERL032zaQMmHpal8qIr9tsQB81MHrsnYSv8pLVLfv7CdPUsuPpuZn/Gx8unigDCtCwIlj31XsTw6op3NEG7SHug00jUMkS7H2uEnHlXBdDF1FzkjHVQn+uLhifGlWAour6ueRYhe/Rz5RUy/QStHUFdeuXaap5rSLOdY2UftDGZyzEAK2iWrpWhSHR/topbEB8qRFleU5Dz30EHuHe8SabTVH0wmz+ZzZdBE1SnAsFnNEiLosztE2DmM0e/s3ePbZZ+I5+MCFCxdZqjwHRzAZRV6g9YCmnmPynLqJSu02WAKCR1EvKjwtzcEhdWsJohgORrStYz6bo4DxeMBgUNAsFAo4PJwwPLfBa548R75R0io4d/E884NDrr5wiXD2HWQ6oyyG9/yR6dGjx6sfnayJSpIlXSgKwIRY304RiyqvVO5vD7np87qopyRC+jrCmlelY991vC8h+r6yNYOo8QGviC4tApmKkiXrHLGOOB50XNa2bcy464pNe3A+CjxUdYsPnmBjskzbNDFEZy2lMsx3D/GHE4rGUQBiLYW3tCKoYEAs8xacj9nlPgRMkcUsxBBomiZmJapOLkRQ6XpHUdLIlZJUwUBI8jHJmxWljGK1ko5QZQO0XTQDVlwrWfkLHWsW1z3Gg2VYCczdgqBgvLnBQGfM2pq69hxNKoLXaCOrG5tSjDvCevRQhSXJMISVsCJ0BNxV+rLWiaS31KCJrtrBYAACk6ND5rNDCC3BzZNCeY21LdoIxuhEStac3N5GJ/5WbjK8D1H0Dci0YTadcvr0qaiS7gPTRcWiifs6PNxPruCoJYVEa14rzWCYoQ3kucbaGuejAWZdw2RygCZg1Dg+3N7H+lRFSU5AZQ7RMRvxaDrnaDrHe898sWA6nVEMhrx4+SqHB0e89nWv58T2KZqqYjo7RCnB2lhnrm08jQMyxbQ6xOWGzcGQ4WiId4GmqdE6phE/QPie+90A+jZ0uN9tuN/Hf1VDBPIii0kJzoFXGFn18xpQndJ655UKHW+qI6wfH8BfylfyUgmWx35Ku+xI8kL0VHmtCLlBFzl6kEMS7e3qz/oQJ/MqlbxyzlMU+TIZxvsouqlgWYpJQqBpK4KNciUeiw+B6dEhzcEBxcJSeNA+xGzdADYxxJ0SQh4Fb5XRKGNYVBWD4RAtijw3y0xBay0IVNU8JQhEz5vSJEMMRIVUK9GDg+BcrIRi7THDqibeE40CiaaVJ2lYEXAitxix9woPlGEVgIPFISfHJyg2tjjSGYupZ3/vgKb1QE6naCxr2VM+hJj6HkLSClomPC+Nrw4iKumFpDnKUqYgKtqOxzHzzraWeTVBaYu3C+qqWWaOGKNw3tG2Lun3KK5fvcZwOGQ8HvOax59gtpizmM+p5rEo8mQ6Jcs0YmA2m7K5vU3rLFXdRKKf0mgdH2KI5XK0EYK3TKYLlIaqCdRNg7MWax3nzp6n0BrXtlSDmsF4g7IcorMMZ1t0psiLMipMNy27e/s899xz5HlGZjSLK1cYbZ7kta99HdV8wZ7bRSvFbDphd3cnFoK1Hm9g9+CQt77hUTKB/cWMFy6/wOG1/UigfNO7UgHn47WiXs0IIdz3wbRvwyujDff7+K92xD7aEvtzDXjE22X4TiNLiVQJslweieg3M6fuALkNm+oOm3U8qpjpxpKjRfLGYAxmNCIrB0imQHWRBdIYlQR6fazGMRyWSzX0EEKs1pFKkZmkc1XXddSakigq6myKlLQ2lX/yibgUw3xONAuBoDTzPGOWaTZ0lIFpbBv5VXlOaUrqekGsVasJQYGEOElOWYAhBIKNxmDnocszQ9tE5Xdb1/imXtY7jPUSoJUozhpSdqALURakU7v33fW6D8bVA2dYBRR23hAWgTzL2Cq3uFbvRg0TI4lMZ1JGHzFFNXiUKLx3sYp3euolpBRx6fYdkmGVL9Nou2w2pXLGGyc4f/4h9g9vcOPwMkF7hlmO0UJWFoQ2IIMhddvgnE0emvhAWtdQuRZtGzaMpnEOZXI2NzIyrfHBcWN/l0cee4TSO2bzKSe2N7lx4wZefNQl0TrqiYgCDVmR42zAzmsWVc3GWLExGjCbTrl8+QpVbalPn+bs2XOEtiH3aaZgo8ibMQYNFIMRoqZL4cqPfPjD7O/v0LSORx5/kocfegStNNPphGoxZTTIOXfmDFevNDjfYj20c8v1w0N8sWBRtVx5/jrT3SNyY6ibhto21A+Wx6pHjx73CCFIKuITDRst0smCYQIpG29VN6grrbIURUthwpVGf1p1PQVwbYBfL28jy/Vlaah1y9TaZoHIozKDAU5pUJFon+WGTOt0vCiyqXWsuBGNoxa8i1UNlKZ1blUmLel31YsKQeGIhZSt9WgHbt5gbDxHi+BS9EXEEHSgyTVsDimHGSoNhEWWkeUFdV2RaU0Qn0RKbaxNWEQpoFgCy+OdxfkWJBZWyo1Z8qzWy3CtrmFKDgNUiJVsvXSyxKt/y3twH/DqltO9BYGDgwnTRYXSimFekLkMX8XisF5cIp2n12tdEt8HosZurOcVq4YblBgQjdIZojOCMmhTkJcjyuE4hs3KARcefpQnXvN6TD6gaiswLdlQoXMd3bsu4NpIch+UBadPneL8ufNsbW5TFiUmzwhKmDUVVVsz2hgzrxbs7e2xs3MDbYTtUyeZzOecOHkSkxmcbzhz5iRbW2PyItaIKgcly0q06Z/Shrq2TA73eeapj/Hxj32US5cuE4IwHG8gxqBMRgCsCzHzQxkcUDUti8qhdc7hwSF1VVMOSh66+BAXH7nIdDrh+eeeYzGb8vTHP8yLn/g4zz3zFJPDwzgP1BlBKZrG8vFPPM+Ng0MuX7rKtcs3qKoFGxujpMkkmDy/f4/OPYSIfKWIfFREnhKRb30Zj/N9InJdRH59bdlJEflxEfl4+nsiLRcR+R9Tm35NRN55F47/iIj8tIh8WEQ+JCJ/4T60oRSRXxKRX01t+O/T8idE5BfTsf6liORpeZG+P5V+f/wzbcNaW7SIvF9EfuR+teHBhKBVRrA+lmdB0LAKxRFJ68mmSH+70FwgpDDUzYN6t+7Nob9jJW3Siip5xbTIMsMwVs+MNQi1xHqWjugR6ib9EnysCOJTCMytpCK6iIp3Dte2+NaymE4R58BFCQnb2HjeSc7BOR+Lqi8aqhuHyKSCRRs5WQgtQgMsgqUl0BrBimOU59H7RSTbb4yHDAcF09lRLIhuFEVZYDKDzkuycojKC4rRiKCjarvKNCbPo7maiOxt28aITwBn7XI89pLuSQr/dUKhloAlqtT7dE3uBx4wwwoG44Jyc4DVgRd3rvDsc8/RNEnBWkWRz46MfrOo3+rjqrREUEJXFiKgKcoRmydOc+bcBR56+FFOnjnHiVNneNOb38YTr30d+/u7TI+OMEozHI7I8gLRhoVtqb3jaDplZ2eHF198kcuXLzOdThEFm5sbbG1vUpYF09mEMs/Z3Bgz3hrjiOUHyjzncO+Ag/19Hn300ZhRqCDLDTpTlMOS0caIvIzLu3pYRis0gdbW1E2FyTSnT59kPB4y3hiTJf0oSe7kunZ4q9ndnbC/d8TOzh47OztMpxOatqZtLU8//RwShCcee4TTp7a5euVFTmyN2bl+jf29Hfb391PtQAd4Kt8QQuDgxh47ly7x0Jlt3vrW1zGrDyBxILR69T+uEmMR/wD4KuDNwDeIyJtfpsP9E+Arb1r2rcBPhhCeBH4yfSe158n0708B330Xjm+B/zqE8GbgC4BvTud6L9tQA18aQngb8HbgK0XkC4BvB74jhPA6YB/4prT+NwH7afl3pPXuFv4C8JG17/ejDQ8cRMC3LSF5UTQBHdY9STd5QMKaHlXo5BbW5qrc3qA6jrCkfiiJHjGt4r8s1cPsMgJ1UngPgJOAyrNooGi9rBEazyMmV1lnl5SVyA+ORpZzPnrEQiyt5JcunjiGVXXDZP+QxY0D3O4B+mBCligqnYfOh0ArsBjntBslDEvK0ZjxeANlFEEcohx1NWE8yhgOB5FXldqXZRkOhxhBjJCVGcVogMo1QSva4HACTfC0qXi791FKIoTECUOWGYFeBA9RLgMSv6pLIgu3pl3eIzxQoUCApmq5Pr1BWQ6oW8vRZEYXW5egUmmOgChZuiG9j2VtOoOqI6xHBeP4iiiVkWWGjc0TbJ48xcWHHuL0mZMcHh1itObio6+lrRc0TU29WJA7z961nZhV6BXBBjKlyUxG2zbL9i4WsWCyyjUh1+R5gSLDtjUnt7fYC5YT+Qnq+ZxRUUTj6uCQE6dOsH36JNevX6cYlixsTZDAaDzEq8BkfkBWGHxbY2LqCs62FOWIgGN3b4fXK6GuKpxzDEYGay2tDVSVo25qPvGJp2ltw/bWaa5evcLTz3yM4XCEMYayHPH008+wt7eDCprgPfN5R9YXtCYq/HaxfwnYxmLbBZ/1pjexWW4wmR3y+GsfoxoPUVonZe5XPT4PeCqE8AyAiPwA8G7gw3f7QCGE/3Abb8e7gS9On78feA/wLWn5Pw1xyvgLIrItIhdCCFc+g+NfAa6kzxMR+Qhw8R63IQDT9DVL/wLwpcAfWWvDf0c05N6dPgP8a+Dvi4iEz7AwmYg8DHwN8G3AfyVxtLynbXhgEWKJGgQMARU8Oqx4tCqVHYoJSyEVV05cq2VosCtXI8cGc7lDnxXotK86Yy2F/5Imk5KUPZiORRBMrvHDgmJzDKIIvl0Km5LaZq0lU1FuwftUwgaHd1HqQOtsqddlbfQKeesSr8pB43BHU7K6RddtzJKUVA9RRS6TzzR+PIxhRK0RY5jOK0IQtMnJ84zgLfViynh8ktlsHgW0E3m+blq0jlmWs7aODCmJxZtdaKnmC4KN0SNSEpirq7h9EkhtO4O2y8AHQlAxO3DJR1vPt7y3eKAMqxACs/0FhcoRZzmczmhbh5JY1yuIolNy7TIBu4dh+VKlG+1TJfSUD4how8lTZzl58gzj7W3e+JbP4vyFc0ynR9R1Q5GNOTq4webmmMO9giITGltjfXzonY11CbNMUYzHsVZSCGRZRlXPKMocp2Lx3MViztQc8YbXv4E2NFTVgmGRMygKTmxusXu4x3Q6wy8CVdOQG0+eZwxGQ3SuyayhlAHKKJq2iS8x8XzmiykexeOPP8l8MeXg4IDxxiblcMzR0RH7BxPaNnD12hVu7Fxib3+H0XCDum7YP9jDe89otMH5c+f5+NEB165e5vSJ08ynU/b2rhNC5ElpbaIQH5FgqYNH2cATDz/GxYfPsbNznYsPP0RlHUprtDIslXdf3bgIvLD2/UXg8+/h8c+tGSpXgXMv0a6LJMPoM0Uy8N4B/OK9bkPyEv4K8Dqit/Bp4CCEYG86zrE2hBCsiBwCp/jMldG/E/hvgI30/dR9aMODiQDiY+0/FWIW4Lpinqyt1yl8Hw/lscwUPBY//E2Ralh2mwUQCcsagl0YSysQr1gET9DCrK7ITE6RxbqG3b+4S52MKZbSC1rrpYetaVqG5ZDWOrQysbIIgaaNNfqkjTqE2jlEutJtKVIgQlCCMoZAosxozaJpMbkiMwO0DklCwVFR4ZhTFAVKKSaTSaxjK4J4jfcWvIteu8zEzO/WEtoYquzGX2/baMwmrYqQQn+eVKxaYmF5z0p2gXB/h4sHyrBSonjo/EUWkxl1bammNYRYyTykmYlROmmNpAyEZS2xboaiUpZgLAQpgM40g+GAc+ce4szZhzhx5iSPP/FatjY32NzcYjabUy1ilsX2xgbt2XMMy5zZwR5HkwlWexa+ivpTiSg4Hg3w4snLjFllaFxLpqEoM4YntvG14/DoiHI0Yv/wgFA1aBTb25tUtoq6puklmB5FrlJmDLZtCRLIywLrHE1TMzLDVA+wIYhncKKgbRuqxYJpPmE8HlMtFhwezvjVD36Y4WjMzu4N2nbO0eEhs+mUshhQzxdMgsI2LVub25zY2uT61QOuXv4EG+MxmRHaNnr4vA+xUxHw1qK05+TZk2ycHnHl8BJzN8NNFOPNE/gQsE1NXVf38el58BBCCCIvvy9dRMbADwJ/MYRwtD7LvxdtCDEe/XYR2Qb+V+CNL+fxboaIfC1wPYTwKyLyxffy2D0ijAji1wopp+XHjKew/j3FKhL36uYntCtyHlUbwjF7a7Xv49UiQ3RMLQnwWrpQYxx7DArroyyBMjppQa1I3koJQaJkhNEawaBFkmaVX1I5qmqOMnmU7GlsLD3qonZXwBOSEKcQuUwSorHiAjitcVnyVGmNzvIYfgwOW1cYLXjXMCoztCoIzlEvFjjnIqE+hKiabi0qZYEF5wgCVeswiWDvEgdMB6F2Ad9YQm2P1QqMxH5Job8ktUDUr+qSAnq5hXuAEAIHiwPyIsNWFmcDISgCCi2aTEfV9dbZeJtCWFntSCKqRy2RkLIDFdGFOR5vcO6hh3nssSc5c2abc2fOxqw5ZZCgaepdbD1nYBTnTp3GOUux0SLOcrSYIeMCGzy2DYkAGBiPB6hSk2+VVE3FfHZI1UwRCVTTFhfg9IXzFIMhu0cz3P4Bo/GQwaDAChR5johmvluxmFSMh0OK0ZDrV3YohjnKC0EUPsCZs+dxrqK1cQZBcNTVAjY2Cc7S1jVXr1ymrmZcuXKZjc1xTMVFONg/4Mxpw+HePuoENNWco/1d6mrOyc0xL7zwApLc1sEHgjIoYgkJo2ImSIvjhZ0XuDRbgGrZGIzJRmfItaRCoArr7Uve31cJLgGPrH1/OC27V7jWhddE5AJw/eVsl4hkRKPqn4cQ/s39aEOHEMKBiPw08IXAtoiY5DFaP07XhhdFxABbwO5neOjfDnydiHw1UAKbwN+9x214YBG5UmGpSdURvyFleodVUd9uaUdMV2s0ns5DtNzaAxJWFIZkhK32w7FMwS7H0AGFj4riKnmGrKgYGnOeTGtMkWGSt2vJgRVBZ+CcI8syVDJanBfypHwOYG0bBUA9FEVGPZ2TidCogOQaXWT4aZ0yJQM+kcycCI3RhEGByw2SaVSZkRtD1TapWHIsuKxNjheNcg5jDD54xJOKsBtsynwP4jAmipl6H7AhRMOOgLQO7zzWB4IDN7dL5XoPyLK0UDSovETyuiMaqCocu7z3FK9+NvA6BGZ2wmQ+YbGocZao56R1rK0kUbQySiwcvyWS3hSlYnaG1hmiNIPhBptbJzl15hznL1zg4Ucf4rHHHmU0ilyjoigwxlAt5uztXufEiU22NkdUizlFWbC5tclwMCBTmmFRMt4esHlyzGA4IDcFGQWzyYy8zBid2KTcHFIHC0Y4PDpg98Z1xqMhEDiaT9g7PCDPc9q6xlUNdrFgtDlgUS2wzqMzw3hjADSAoyhz0IbaeSRTlOMSU2jqek5RGDY2xsQJhmWxWKC1YT6b4tqGvZ0d2rrm8GCPyeEBk6MDjo72uXb1MrPZEYeH+xwdHZHnedRJSW7p4F1U/ZXAYjGnaStGpWZrNKBQBVvlaU4OH2ZrMObw2g4KwWhNkWX3/pm59/hl4MmUEZYDXw/88D08/g8D35g+fyNdJee4/I+lzLwvAA4/E24TxCw/4HuBj4QQ/s59asOZ5KlCRAbAlxMJ5D8N/ME7tKFr2x8Efuoz5TaFEP5yCOHhEMLjxPv9UyGE//RetuFBR8r3XvsPSJ/U2pIlif025PSOKqJEULLmnUpcKW7atiO6d4bWOiG+C2N5ASuBxVBga8Dw5CYmMxRagxKyogAlUaXc6JTlrVMZNYsxJnqUrIulZEjeneCp65rZfE7TtFgbswO99TFLWyJJ3UoskuyI/4IS0F2x6jguNE2zzKAvioI8jzVyOxX7pmmWnrXWtswXM+q2Tpx5Q5xbQUiZgB3fyzlPU9fYqqFZxDJsq4sYL6sF2uBj+5I3K/28Kh90H/BAeawIcHLrJNWk5cJjF7DnNM899xySSgIQhK2tTbLCcPXa1WVpgLIsE68KBsMRk9kCH6AoR2xtn+DJN7yZx177JA8//hjnLpzlxOZWNG7a9MAGT11NCb5FgqWaT1HiWCxqBOHMqdMs6oqmbVEDEz05RKHRje1tzMBwee8y2WaOGeaYTGho8dYzOTigzDMuPHSew6NDQpkx2N6A3KBFcbD/AsPxBsPxiJ29fcIAxltD9vdimRtUoKoXTHfmlKVia3Ob0XDAI488yqkTJxmUZXxpBIajES+8eBklMJ0cMp0cUZYZtq44PNjDu4bp5ICqqiBY2rph4VrKsgRYZncoI7i2JSidXM8WpYTJZB+rWk5un+XUiQsosbz20cd5diYsZnOqur7PD9DLj8SZ+bPAjxGpHt8XQvjQy3EsEflfiCTx0yLyIvDXgL8J/CsR+SbgeeA/Sav/KPDVwFPAHPgTd6EJvx34o8AHReQDadl/e4/bcAH4/sSzUsC/CiH8iIh8GPgBEfkbwPuJBiDp7z8TkaeAPaIh9HLhW14BbXggoFLoTpFCXzeH9oD13MBltY31ddYzx0PMo7udoFJHLIlesHDTnpOwpY9/UYKVgBsYVKnJBhnG5FEdXWJIEMBaByFGAIJzGBNLykTvVU5mDNUiRSG8p61j5qAm8pacc4TW0cwqXOuWRqNJMTVNzEh0BJSK+oxKq7WKJB6XIhAC6btD6VV2fUe0FyWEoBBtlgaUUYrgmui5au1SOFREUM5ja0twK4FoSQYfqXRvSOHK5XX2t1z2e4oHyrASEb7iC7+KX/7l9zEcbBPGOUprZrMp49EmxuRcvPgwzz7/DE888QS7u7sI8La3v53FYs7uzg5BDEfTBXk+4NTpU5w9e563vf1zeP2b38JgY8B4PCSXfGmhO+eo64r59IjD/R3mB9do6xm7e3u4puXs2bNsjbYYlgN2btzAVYHxcMjW9ib7s0OuT24w1IoToxEHzRSvhIIC8Iw3R2RBEO8ZbI4599hFnAYjimG7yf6NHcabYxyecjjkxu4Oi3rBaJSjyLHBog2EMjDcKNgYnWRYjnns4ddy7vwjbG2cIsuKOAPyEgtkipBnGXs715kcHTCfKZqm4mC/wrUtbbCI9yxmE5TEFN86GUTWWrI8i8r2mcbZFlINxcpWGB/IhwYzMFRuQZEJR0cTqraKKcT3bf5xbxFC+FGiEfFyH+cb7vDTl91m3QB8810+/s9x577vXrXh14ik+ZuXP0PM0Lx5eQX8obvZhpv2/x5iFuR9a8ODCkmht/jlU9juZikev+JOdVwfumzBtS5sPRy44l2t9NkdMfMcbchzgynzmDGo4zpZniFapVI2shSk7pTXbdNSFkXkJHlP2zS0bctwMCBYh3NQzeeEugUfaKYL2lmNCy5miocYYtOQCONC7SyhqhCEjY0NVGYQpSiLgsU8EtO9c4iPpXRIWfRdhn1RFlR1hckyQEdvmPUUpaG2DhWih9A5i7cWWzfYRY1rmhj2S9fPhRiiDGvevRDWkwi6+3H8+73CA2VYKVHMd2ZsDbYZbp7gEy9c5rM+67OYzxe8+OIlHn74EQaDIRsbmzz22KP84i/9EtevX2M6nRGCZWNri49+7Bm0ybnw0MNsbZ/g0ccf57HHX8PDDz9CVupYgbuNWRZNa6mrmqODA3auX0IboRgUHB3sUOY5Zx9+mI3xBk3T4q1lvLnJwcGUnWu77O/vk28VOOOZLSrKgWakC/b3jiiHOSFYvFOQ5cwWU5x4Tj90FlsaFAraWNdvUVUUw5J8ZMhnJparcQoweG/RxjAelAzKAYqc4WiT8cY2J0+eYTjeRJtYU7BZNDRtTJPd3b3B0cEeTbWgaetIQCfWkArBJdK/RekcpboCmnFm5X3iLIQo+Na0NaI9vq05U5wkOPBqweFsB1NntIc1/qTHZOb+TT969OjxqkUXfkuiBaxU1VfovqnOCurI6SshhqVswvoWsvZvvTzNbVUYOpuu43QphZOYhKRDVCRXgxKUBhdwwZElD5BC0kQ28pNUsiiiMWWT3IGLBHTnKYuSOoXWQgBfNfiqARsnuk4CdYiioDnRUPAEdJ7jtEFEqJsGnGU0GhN8YDAY4G1D28R2ReeCxEQwBdZZmqYFrVFK410gS/UBnbO41oLStE2Ldw5bN7TzCr+o8Y1bkughZkx2hPqkRhGv8X0ypG6GPEih+eL8a8Pj7/4L1I0FiUqwsZjxKgsChOFwGPWm6jpawUlPQ5Aox6A0w9E46jUVBZubm3H2kF7I4D0+RPesbRvqakG1OAIisdC1LXmWUdULnPNkWU4gYK2L4bl0Szw+xs61ikUqRaCFxluUBkHFuLKPhkpZFug8I6RyPLZpsE0TOwlNdPEi6FzjbOSRdSq93sXSPcbkbG2dYDgYYXRnzAi2dSyqBfPZjLquYgZJ8LGzuKmTWM3gVm7gzq0bUo8UXb0eH6K72BSKYTGiqlq8mqFVTpEPsU1DXZxDz17k8r/4VmYHVW9e9ejR464hEwmnUIlLFWsDWgLmG74NOfsEXH82rXk8i0/W/n+70fzWdW/9fKdtlnuVmCyFiVl4YtSqT01GYLd+p+dE0nKKRlxc4oOPme9Kx+LHIvjEefWtxdWxdl+HjiAOHf8sKctrBSbWF+yOp7VO9mQk54ekE7EaN2NjQohK6NqY2P/HGN7S6vTJ0xfHIiBxrqIVlTITzz6Bvv4s2z/wl2M7k8eqCxJqbjVcfyXwKyGEz3mJy37X8UAZViIyAT56v9txH3Ga39paN4+FEM7c70b06NHj1YN+XPgtPy78Zrjn48YDFQoEPnqvLddXEkTkvQ/y+ffo0aPHbdCPCw/w+b8ceLDkFnr06NGjR48ePV5G9IZVjx49evTo0aPHXcKDZlh9z/1uwH3Gg37+PXr06HEzHvR+8UE//7uOB4q83qNHjx49evTo8XLiQfNY9ejRo0ePHj16vGzoDasePXr06NGjR4+7hAfGsBKRrxSRj4rIUyLyrfe7PXcbIvKIiPy0iHxYRD4kIn8hLT8pIj8uIh9Pf0+k5SIi/2O6Hr8mIu+8v2fQo0ePHvcW/bjQjwsvBx4IwyoVWP0HwFcBbwa+QUTefH9bdddhgf86hPBm4AuAb07n+K3AT4YQngR+Mn2HeC2eTP/+FPDd977JPXr06HF/0I8L/bjwcuGBMKyIhUyfCiE8E0JogB8A3n2f23RXEUK4EkJ4X/o8AT4CXCSe5/en1b4f+H3p87uBfxoifgHYFpEL97bVPXr06HHf0I8LEf24cJfxoBhWF4EX1r6/mJa9KiEijwPvAH4ROBdCuJJ+ugqcS58fqGvSo0ePHjfhgeoD+3Hh3uFBMaweGIjIGPhB4C+GEI7WfwtRW6PX1+jRo0ePBwj9uHBv8aAYVpeAR9a+P5yWvaogIhnx5fnnIYR/kxZf61y56e/1tPyBuCY9evTocQc8EH1gPy7cezwohtUvA0+KyBMikgNfD/zwfW7TXYWICPC9wEdCCH9n7acfBr4xff5G4IfWlv+xlAXyBcDhmmu4R48ePV7t6MeFiH5cuMt4YJTXReSrge8ENPB9IYRvu78tursQkS8Cfhb4IODT4v+WGE//V8CjwPPAfxJC2Esv3N8HvhKYA38ihPDee97wHj169LhP6MeFflx4OfDAGFY9evTo0aNHjx4vNx6UUGCPHj169OjRo8fLjt6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEvoDasePXr06NGjR4+7hN6w6tGjR48ePXr0uEswL8dOReQrgb8LaOAfhRD+5stxnB49fqvgU3knVFZcDbY5d88a1+NVDTH5Nd/W5+93O25GP070eLVCQgh3d4ciGvgY8OXAi8AvA98QQvjwXT1Qjx6/RfCpvhMiEh77lh+5hy3s8WrG89/+tYQQ5H63Yx39ONHj1YyXIxT4ecBTIYRnQggN8APAu1+G4/To8VsF/TvRo8dx9O9Ej1ctXo5Q4EXghbXvLwKf/zIcp0eP3yro34kePY7jU34nlFKhKEsC4L0jywyDYYkIBMA6h20dIUAIoJTCEyMyIXgEQQk45wg+ELoVERDiTojfRQARbonopO9pK0QEUYrgffopgEjcXYifj23Q7S4tFhG0Ulhn475EoZXCObc8VreLtFNCiO3y6XetNaIU3jm0VuRlydb2NpPDIyZHE/B+dUDC8pxEol/F012CW883bumRoKAoGZ7YJjQti4M9QghxG7rrGNBa4X08d5HlKaSjy3oz4na3QwjxnCTtB0ACCBilMRrGowFFOWJRNRwdTWlbhw8c2388QuCYq3bti4jCu/iEqHSRvXeIxHYeuxTdlxDS+XZfb+8Jflk4Vp8MRORPAX8KoCzLdz36yCPL30JqvIgsb3Z86O6fN9t7j0j3MN70dizRvcTxr1IKkOW2t64ft4m/C0qp1cv5aaLbfjGbQttidNyvKIWkNnUv1Pp7vmxa6qW6c1g9T4EQ4kPYvdTeexrrqOsaFwKDwZDxeIwofdP5BUSlJ7W7j92BQ1ju0zmL9x6tNN772Bwly/V8CMsOUSlBa4Nohdx0XVcP/s33KRx7WbrzD8vfPIvZjKOjCVXT3vOHbf2d6NHjbsJotXzyQ7hTD7P+chwfWaINcu/DievvhFKKU+fPU46GVPWUsxdO8c7PeSs6U7gAbes5OJoxmzV4r3nnuz6XfJghGsR7qvmMa5eucPnFF2jrmuA4Zkh0Y8z6WCPJuOrGoJBMjUBAieLMmTOcOLHNjRs7NE2L9z72i00d+0kfDQLnffru0/WMn9u2IfiAKJX6R9DaUM9meGujQSiAeATwwXO4v4+1Pm1nyIsCaz3eWZQBMxzyTX/uT/Or7/s1fvE9P4efV+AzEAc4WtsCgpIMa2Fua7TWcZlKxp1WSDDMdYumobBD8je9jbe/+2u48oEP8uxP/x94axEPSgJKOZSyDIdjZtMGay1ar/XjoghBln11d52W44xAHCI83rZICAzKgkFRgNSozFMUJYUylLpmc+w4deYCb3v753P+wmv41Q9+nPf87M9z7cYNPBrrM6y1EHyyIwJKC+ONEqUEa6MBXi0aQggYk9E0DYtqRpbFe2BtwDvwPqBUAO9pmxrXNi/1EgEvj2F1CXhk7fvDadkxhBC+B/gegDe8/vXhu//+3wPiy2OtxVpLlmVoranrGmMMWZbdalyF1cyBtb/pGMsXI/4GyOol+WQMtW7bqqoQEYqiSEaASw/j6pirl6VFa8VgMEApxXw+j0bATcfr1l8sFhhjGA6HywfuduveqW3r5xhCYLa/wwf/43vYxHJqWFAOS8rhGKMMZVkur6tzrruEBBUfbpQi+Dir8x68g6axWGep25rWe2rrmC4q9o+O+Njzz3H1xi5nzj3EF3/Zl/PQw48SlFoZYN5TVRVFmdG2LcZk5FmB94HgiTM9PCF4JpNDvHec3D4FQNtanLM456nqCmfd8jyzzFCUA0SbW65P1xHe7vOxZyN4XIjGlgTHsx/9dZ7/2Ef4337ml37T5+JTxKf0TojI3SU+9njgsT3MCRLfy9Z5nL/VXxD7xdihrvdpIUBbu7vdpE95nNDGBBGhbVva1qLTZFErjRKFVrC9qVHMqRvHaFiQjXKMVhglzLWwn2uKwiDBEpxa9uEQPT/rfWo3MSZN8gGcCmijIQSUUlx45ALjjQ3mTcVIVJocr7xahIAyJl3b6IlSKu7bOY+go2fN+/g3+NQheyT46EkRaJqKosg5deoEl194nmeeepqjg0N861FBCD7QOoczATMYMBqNEKBtW1SIpuA6hG78C2RBoXw8P4UgCpQHCQGtQnIogFZCrjVtVUHwqOQRstailWO0NUCJ0DTN8hp2Yy3dZPd2XsDUonjRhBBtMaqqJljHcKwxWsBZrA2EQUbdBg72bvAff/YnOXnyQ3zW2z6Hb/7P/wjv+8AH+Jmf+xVu7DcoCaB0ciQIkozHkO4dQSjLEu/D6jkQj4hL9zwuUmp5N1EiuM7xdo8Nq18GnhSRJ4gvytcDf+Q322j9RV43KjoD6JMh2d9sjNxqbIXbrvfJYv2lW9/38Qdote5xw27dT/nSbb55X3fCnQy1vSuXUM2CYpijM0EbhSgoigJjTPJaSTJ0DEprrHeg1bFZFYC1lhACtrU462m9Z17VHM3nvHDlClev76CznCff8EbOnrsQjaq1U10aNksvmCxd9bd27XG21L0MGiGIQrRnoDUhkIzBgFLR/X3ztVg3TNc9gZ0RuX4fPckDGTzzwwMuP/s0xtnb+hU/Q3xa78Sng+qFX2fvx74LtObUV/wZfDVl8NrPfTkOBYA9vEZ96SOM3vzFn9J2O//uOxi89nMZvfGLPq3jXv0X38qJL/kmigtPflrbd/DVlNmHf4aNd37NZ7SfmzH94E9QPv4OzMapz2g/k/f/KJIVjN/6ZZ/RfrTWuFXE5iXGBFmGYIDVu/sZHf22+LTeCeccpsgZj8dsbm2htF4aVx4YlBrbOrRxlIUmLzO0UmgR1KikzDWjQQ7OInkGgHcOk2WIdN6JGI7rQm1ZZiBEr5MxoLTGewcIJjeUgwKdawKCMWbNGR8960pn0XMFZJIvDbjgA2DQaSLaIYYHk29H4qSzdEM2tza4cOE8R/vX2doaE1xLJgbxgkJorEMNCxrAO890OqXzPHgXEBWWXiKlUp8ZHIpoREEiXfuAENAhYEgBteAZDkuMCEc7OzE8mAIPohRaBQZFzsHBEcaYW5wDnWG5Ps6vnTHduACCMXncf2pQ21Rsb21AEDQlSMArRdNalLbs7l7hZ97zo5w+fY63v/NdvOENb+Anf+ZXeP8Hfo1F1RKCwnnBec/RpIbglsZW6AxKpQnBp3vhcM6nAEuM8CwjViq2VSQ5Iu6Au25YhRCsiPxZ4MeIabTfF0L40Kexn2Pehk9qm/RXbvq2vME3O7vWDJjbGUa3axMcN6w+U9z+Qfvk0MWfl2cqgrWW3WtXGSgoC02WabIsSy5eQWu9dNHmeQ7EuLLSkY/g/So0533ngYteo6axLJqG6WLOlWvXuHpjB0Tx+BOv4Q1vfBNZUeCQ6BW8zXmuQqI3/drFw9P99mmGZZ3HeR85Ecng01rjrKVuGrz3zOdzyrJMs8uVYdrNADsjcrnvNZe8T55HnOMTzzyNtg4t6q4bVnfrnfhkMPvwe9j8wj/E+C1fwvSDP0Fz9eMvu2E1+/DPfMqG1SsFvp4xef+/e1kMq+z0Y5+SYRW8OxZGD96x8Y6v/pSOe/M+1iEpvKeUQtbCMDftIY7FaxPGu505Dp/eO9G1RymF8w3TyQSVJk4SBC1xwMuzeP5GQ24USgsawApKAnlmWEj0PHWT3jyPQ2HXb4isrlHHudGi0JlOE7U0BjiHCoFhWdJ2XicfDaoQwBgdB/bU52kTvWsheIICQUUPGKuJcXKhR5qDpImq8qhMUQ5zRkVGrgUVHG1rk+c/4BEK4uTZOcdsNkt2VediucnRkLyRnUtKUhuiw8wj1uNCQzCWTEo2NzbAWZr5PBpiybWkFeRG0zY1bdPgUcsJ7fokvev218fOLgRpO6tfhMwYtOq8XC2j4ZBhmTOfLjiaTjB5xnYxZFiUWOfwvgaBy1deYOff3+D8Q4/yRZ//Ft72lsf5hV96Px976jlmixYXWrxz8doHCygIXRs7Ayug9MoREs9DgPiMeYk8NgW4sHZuN+Fl4ViFEH4U+NFPd/vbvdQ3x8KX32MccG3j7n/LecOaQfXpdxCfitfsZo/WCse9Vuszw0+7XcETiO5JTWC2e532aJ+hgkxlaJVjdEam86Xx0T3sywc/AC7Eh9l5PCsiqHWOxjZUTcO8qplUC27s7fH8Cy9SO8fp8+d5x+d+HpsnTuBFiHOddK1E8Hg8gg+Cc6TfI1fAtS2+aWiahtl8xuToiLqu2bt6hcnkiMP0vWkabN0QnGcwGGCdZTab4bxnNp0xGAwwWZwlOecweYbzcd3MZBRlgdJZJJmGgPeOIi/Y2t6mqSqO9nZZHO5TaiEEdfx5ukv4TN4J31Ts/NDfxE52IHi2ftvXowab7P/094F35Bee5NRXfDPTD/0U89/4ORbPvp/F079M/eJHCLamevHDbH3BH2L0pt95bL/1lY+x/xPfg28rxGSc+8PfhmjD7o99F83Vj4PSnPzS/yvlY5+NPbzGzo/8bUJbA3Did/9pyoffxP57vp929wUu/+M/x/itX8bGu34vBz/z/VSf+CDBtWy882vYePtXEUJg/yf+JxbPfQCzcRr0nbue+srHOPyF/x9nf///g/nHf4GdH/7/8shf/JcQApf/0X/BxT/9vQDMP/pz7P34d+GrGae+6s9TPvJWgne3Pb5vFtz4N38dX80IzrL9O/8owye/gP33/BPswVUu/+M/x+Dxd3DiS/4kh7/4g8x/4+cIrmX45Bey/Tv+U+zhNa79q79G+fCbqS/9BnrjFGf+wF9BZcUt7Z/9xs/RXH2KnR/5W4jJOf+f/S3a3RfY/6l/RGgWqMEmp77mv8SMT3L1X3wr+dnXUF/6MKM3/U7mT/3Sse++WSDZgK3P/wO0+1fY+/Hvxs8Pkazg1Ff+ObJTj7Dz774DMRnNtWcoLr6Jk1/2f7v1oopCBYdIZAkR5NgEtOuZ0vCKIrkjViTEu45P9Z04NlgLzBcLIldIIax4o1kymDIjZMlbr0XwSlAKnGswJoa8vD8e6iERsLXWkWPjQaSbdAuZigOtCoISocwMuVZowIWAEQgdL1SixejXxgStog/I+WgIKi0YnfpEH3CJZyVaUBoSHRbvhcEgh+CgaZG2JVNCIx7JVLpj0aARpWiahjqF5Jx34CV6rIJf9s3dUORC6rXXjCsCiA04X+O8RasC7y2+tdiqIiYDBJREb1dR5Ni2TvuO+2jb9vj9Y9W1rhtcXbg52ZN4FRCf7oUSFos5i7ljWA6pFi1H0wXTZs6pjSGbmwOy3CAScAEWVcvzz36Mq5c/zqOPvZYv+13v4q1veZJf/pUP8syzn2BW2URn72yM5MVN9985t4q2+JgoEJvqkRCSYRY9Vy81Stw38vonizt5h25ZdotxtUJHhL+5g7jdvm82oG4XlrxTO+9kJN1sbK03dN0L9lK4I+cqRJekiIBtuXHpBUxwlEWOVpo8L9HKoGTl0el4AN1LsJrBJJPUB1rraK1lUdVLo2rWtuxNjnju8iWOFgtOnjnD2975uVx4+FHEZPggkaie4u8+BIKzVPMZ4hr2rl9jX4TZfE7bNOzt7nK0t0dVVVSLBU3bYtsWCQ7nbCIYJk+Wi3yDbgbpnIuEfCVMDnaPX18RQnpxfYjxc0dAa5Pc4h6tFUUxwFnHuMw5vbmBSHT3v9KwePZX0OOTnP1D/x0QvSyXv/ebOff130Z28iI7P/K3mbz/R9n83HdTv/jhZYit81id/PL/4pZ9Btey80Pfzul3fwvFhdfj6zmSFUze+0Mg8NA3/QPa3Re49i//n1z8U/8QNdzi3B/+G4jJafcusfO//Q9c+Mbv5MQXfyNHv/S/cvYP/jUAJh/4P5BiyIVv/A6Cbbn6z//vlI+/g/baM7S7l3jom74LNzvg8vf+Gcaf9eW3Pd/83Gtprz0DQP3Ch8hOP0Z95ePRiHzoDatz8I4Lf+w7WDz9yxz+x/+F8uu/jemv/fhtj282z3Dm9/8VVDHEzQ+5+s/+EoPXfT4nvviP0+48z0N/4u+la/0+7P5lzv+xvwMEbvzgX6d64dcxm2ew+5fZ+Lr/hlNf9ee58W//JvOP/Tzjt3zJLe0fvfGLmLzvR5ahyuAs+z/+P3Hm//JX0cMtZh/5Dxz8h3/K6a/+i+k8LBe+8TsBmD/1S8e+H/zcP1/ud+/H/h4nv+KbyU5epL78UXb//Xdz/hv+3wC4yS7n/7P/4SW8VTHMEwhoWU+scct3SpRC5HiCR9z2pQeRewkfoldoUVva1uKDQpRG4eN7DygdEOcjX0bFzDUBtBY2N8Yc7ec0iyquq6AL7XSDbeeliF4nvyR0i4AyCq10RxdCKcEYweilOZooDnFbLRolkmgHQpboFsvQW9pHCB4IiE6Dvg4gXYajjwaMbygMGKOWw53WCp1l0bvvAqFtyMsS8dDMK3AO8S4m1flASFQJggUfwFmUX4/EsPIsaYMJCuUUrQj5YIhyjrqak3uHCjpyosKC3GiM0cymUDuJz1G8pGlngnKRpxUIKJ0sRolGnEZ3AcFlwlIgIMGxOTaMh8JiNqWuBRcUjW25cTQnlCUXTp2iWkzxbUumolHdtC1PP/MUL165woWLj/N7vvx3cOPGAT/3C+/nE5dv0HqDDwovLaJqJFhUUChtGAwK6qahxa4iKEEhIWaT4gP+VnPiGF7xhhWswkifLPfodtuv/11ffqd93m5d/5sMuncy1O7U5pcyxD5ZhDTzkBCoJhMOr19lrDV5nlMUBVprjDEYo9HGHLuWzjnEaFSmV+E2wAHWpYy/tmW2qJnWFbvTKc9dvsSVvT3MoOTxN7yex1/3enSWJ+5GNKpmB7sc7u+zf3DA4eEB169dR0LL9GAf6xx1XRFC5DaIDzjvogteYuKCsEYATe5xUQqbeFKr8Kkn+JV303u/nHWHzqUfAt61IB7XNrGTDBCsMKkaCFCocSJ9yapzeQUhP/M4+z/1vey/5x8zeO3nooohZusc2cmLAIze+mVM3v/v2Pzcd3/S+2z3LqHHJykuvB4AVQwBqF78MBvv+r0AZKcewWydod27hNk6y+6P/wOaa8+AUti9y7fdb/Xs+2huPMf8o/8RAF/PsfuXqV74dUZv/p2I0piNU5SPffYd2yZKY7Yv0O68QH3lY2x+7u+jfuHXI9fk4bcs1xu+/rfF63P+ddjD6y95fLNxmoP/8P1UL3wIRHDTXfzs4Dbtfz+LZ9/PlX/y5wEITUW7dxmzeQazfY783GvWjnntk7zWL9LsPM+1f/lX4gLv0eOTy99Hb/wdx9a/+TuAbxbUl36DGz+0EicPduURGL7hi+5oVAFoIfJHPHiENgTcTTyYVV91PEzYhcLuN3zwBBUiH6rVDEebBAwBQbRL7p3IKcWucTgVkAykIjfkxiS+DHRZ3ikaxpLAn3hFncEDiToAgOCIk72AR5QjhAaRbK3v6OgIggOC+LQXFa9ll6Qp0dBw3i8dACGEaAxJgMTxkeDxVQ11jTIKk+doo/GtJTiPeI8BMiWYAMoFxHpUCCjvUHgkCCr18SoEwGFCSxZU1+SVkS1CqxzbgzFVteDAZAw3N2kmR7T1lNy3qGCil0zDaAjnzp5hMmmZHwWCJENRJWNXCSpFFOL5S1wnGbQq9ePddfESCHgKIzx0ZpPW75EPSjgUfNB4CZx5+DVcfPRhnn3+GbRoHjl3nunuDoOsxPkKLZ75fMqLn/g4+zeucvHCo3z17/ntPHNplw8/c4PDqaWaT3GLfarJHhIsmcoJHR9XSIafRLkJ7xP/rLtvd8YrxrC6E0m948ncLlPuTvtJn4BbjaZPJqR3O67VzVysm421m8nsXds/mTZ3Ibr1499uu9tlunmiuxLv2Lt6BakWFGXkUSmlyLIscqx05Fp123ZeKy9pHxK9Oy54nIfWehZVw2xRs6hqposFV2/scOnKNZxSvPUtb+Utn/3ZlEXOYjplPpuyWMy5cf06Lz77FLs3bmCtxTmHbVucbRGSKxVWXAJY3luXDCJEEnE0zfziSR+7xl1cfKk7snYPlCis9/jgiRyFOMtAoid9nX+ltSE3WfydkDRfXlnITl7kwh//uyyeeS8HP/s/Uz56Z6PkpXDtX/5V3PyA4vyTbHzO131K2x798r9FD7e58Cf/HoTAJ/7W77/juid/93/O4DXvOrZs8fR7P6XjFY+8hcUz70W0pnz87ez+u+8gBM+JL/mTy3VEZ+mDIvhVcsLtjj/94E/g5kdc+MbvRLThxe/+kwTX3HLcQGDrC/8QG2//qmPL7eG11fHSMfGffKZcdvpRLvzRv33b3yQrX/J7bFhAFaOlZ+3WfdwaklyH/pq/ijz9c8hHfhxFNLRCes+O8xI/+WShe42ujV2G+Gw2w9oWbUYsLSNRqEyhbEC0QYksQ1Yh/SfqNzcUb+bTRq/dyiOuROL1Sx4/RPDOH+N6rrY7Pj7cfG1v5iN1dAVRgFLRK6biORujadomGXyrdWO2oUYlYnxdN3GZqDhxDbLsC6MXJrahaVuC45YxTSRKSzSVBUBnGm00O1euYGdTqnqGCVHfKsst00nD0cGLzKc1WmLWtxIVMxZD1J/a3BCqxZzxxiaT6YKqbvEIIhofbDRKXYrAJAPL6cDu4RwXHCIOJzFJQGcj3vSmz+Pn/8+fZzqfkRvNl37R5/DjH/9RRplla5SI+CHgFjMWbcML1RS5WnLywsN80ec/wtWdGVdezNi/rLi0O8fJBFWWEARvQ5q0a5TEUGkk8TuCdynT8s54xRhWN+NOpPI7fe6+r5bdzGf65PZxvxFCoGmaW4yo9X/rnrNOT8o3FVdf/AQDJWRKrXmqYqpv5wJ2zi2vk9bR8rdpZuaCxzpL1XgWdc10Pmc6m7OoanZ297h6+SrBw+vf8Ho+73M+D289v/Qf/wM7N64zOzqkrRe0TY1vIknQ+xjTBwguca0SAV1JJMsHBOvT7EaireWDW567UirObnxYEm9DCu/5EBIRdPWMeJ9IoYlnEQAvoCTDe5cychwBhXcBvEVL0pDxkJksZeu8cmAnu+jBBuO3fAmqGDF5349gD6/T7l8mO/EQsw/9NOUjb71lO8kH+Gax/H7uD//15efgWtx0j/rKx46FAsuH38LsQ+9h8NjbaPcuYY9ukJ18GF/PMRunEFFMP/jjS+Kq5MNjxyifeCeTD/zvlI+9DdEmecZOUT7yViYf+N8ZvfXLcPND6ud/jdGbftcdz7l85C3s/Mh3MHrrl6KHW7jFBDc/IDv92Eteqzsd39cz9HAL0Ybq+V/DHV2/7TUaPPFODn72f2b05i9G5QPsZAdRn3o3qfIBIe03O3kRPz+ivvQRiotvIjhLu3eJ/MxLn8ux/SUv5ew3fo7RG78oclhuPEt+9jWf1PZy6nEgwId/LEq/3OEZXyWY3GQEvCJeibCcrClRUT/PxckTXZabKJCA0hptMjIVs347tmddVbFPeglFk26i1xk8t534JlHQPC9QqkvMWUs8WDNYu+yx7lp2+1ufpN+OftL12yF4RCvatmWxqNApI3rdEOv21TQNIjpdJ7uagIYV2bwj7Psk0eBdWDoBtIrCnALgHdZavDY4HxiNx1z/xPNQL/C2pfWCd2AyzeRozvSoIgRNKzUKjTYlBEVTL2gIFF7x6MPnmUwXjEtNu3DYxse+eGmIRr2peO2hcTBvMspyjHVgco1RBpUNlxnutm7xdcPu7gGN9Rzs77Obec6dO8mgGKBV1LMKtsHMDdeefw4phVNnz3HuTac5On0C4+bsHjrqpsYTRU4JCtEK7y3BO4JtYyQkGdb+JV6KV6RhdbPH5qVCaS/l3bnZ87T+AC8f3vjj6iWg83XF5WFt+7AMGksyq+/cfpJ1G+1mT6QxHluLdeNv3WhaSQZEV/UyK2GdD9VtlzqO/cuHuMWCQisKYyjyYmVUEdBJA0wpFb1TEmLoIEQDyLqYYlo1LbNZw9F0zmxesWgadg8PuL5zncwoHn/kIq979BGe/9jHePqZZ5gfHMRMpHS+1rYEz5IfFa9x0r+VOHtxPhpRPtoyBLXSA4sereSpSnFsHzw6rIxB56P+VNM0SOr4bNtEl7kS6DKeJPLFlFaJi6ZobeRmeQQXfHJVx3W0wHA4OCbj8EpAe+M5rr/nH4MIogwnv+LP4OsZN/7t31yS1zfefmv2WPnoZ3P0C/+ay//4z91CXhedcfrd38Lej/9Dgq0RU3Du6/8GG+/8GnZ/7Lu4/L3fDEpz+qv/S8RkbLzjq7nxb/8/TH/9pxi85l1Lr0p+5nEQxeXv+7OMP+t3s/E5X4c9vMaVf/IXgIAabnH2D/wVBq//QqpP/CqXv/fPYDbOkF9840uec37hDbj5PuUjMfSXn30cN9v/TSdC47d9xW2PP3rzF3P9B/9fXP7ebyY//yTm5MMA6MEmxcU3c/l7/wyD13wOJ77kT9LuvsDVf/aX4nXKS05/7V/6lJ+J0Wf9bnb//T9YktfP/L6/zN5P/EN8PQPv2ficr/uUDCuAU7/3L7H377+Lw5//AfCO4Zt+5ydtWLHzHCKglcEHwYhgxUUvTCLvSprcdJaVIJEEFL/cd4gojDZY21I3NTpL3mcBkZUnyimWWWuDPCM4G7mf2jAclmgTvThL6aKbxpqbMyHXPfyd/lMXCDo8OiTL0j7CcU/XMiqQvPDrGlfrx1s3rlZ816WPhBCiHlWxNaZtG6az6VIGpxuXuu2VRFmBqqpiApKzKbsROus4HkPwwcXJaZaBQNO0yNKojF5N7z1WGfLNLUbbp2gai3KJbL7kZkWuV9sEXHA4FXldroEyz9HBIjgyVdDWUfj0aH9KWzlsCy6AXwpvh3QvwWSGzY0xVWMRSoajApMLuS2om5yPf/x9vPn1F/jEcw3bW5tcv/oMw7GiDSWLFm5MPYNGsTUuKQ14LKHOUa1Fh5rdK89hdM7pkxf5it/zDi5dPeIDv/pBrly9DhgkxJBpZ5R672LmaSLVO3Xnl+KuF2H+dPCG178+fM93fxewejCj5S3keY6IUNc1ZVneMQTXYWXU+GPLbn5R1v/dblk3G+j4SDHLJGc4GC6XA8m6Ph6ms9YS8JSlpigyJpM5Rnfu/ZvDjIH5fA7AcDi8w/lw+2UiOKVQzvLRX/5FDp57ilNGGA8LxpublMMBJsvIixyT5UiIL50jxMwVAQgsqpoAVHXL0XTGZFIxnS2YVXP2Dw+5fO0qi2qOyQyDwRDnPG0bCZHxJYv7a53FekfrA621y5Bd56Vav74qabe0SdLBpvWVUkSRW78UbQshYMQkl7ZHtD7Gt+qufZdmvORbpQ4ruslXInydx6txFiHwxMWHODkaMjSGYVnyPT/6U1zdO7xvQ4n0RZh73EU8/+1fy2N/9NsB0D/y3+OVoXIwcw3OBaI6sEPExiSXoI71rz4EZlWDc/6+mlc6M2H7/BlOnTqL847RZslnve0tnDu7iZYmhsEQnIe6bnj08cc5c2IbrRTGaOq64vkXnuPatatcv74DYbjso2IJlVVfse5V6jwjIQTIIMtyXOvxznH+3BmeeOJhnn32OarmVkqLMQavDKREpY6i0fWLSinyPF+NG10f6RtEPCgTJ4K25eTmkLOnTvCh972PFz9xif39A6x1S/aCEoU2BSEr+O2/80v5N//6Bzm4eg1palxgGcLsDGfnHItFjTZZlNVp2mMGtA4gQQjDMY/8ti/hc7/29/N//vC/5eov/iyKBeINIXg2tzR5pti9Psd6wWoHTmPIMCqgqNnaLjh1YpMXX7yGMhkihqpytDZgXczo6wyryAKLId+Lj8T75+qAUJMNFPMqp2k3UOaIC2fPcHJrm7Zp2D04YH9ySONAVAYuMMgMm6OMjaFiY5hhpAA8YloQCyoQRFGWG5w5/zqMyXnvez/Ab3z0aerao01O61zMvm9bdIDRaEBjG2bzFmvdbd+JV5zHajkzuMnb1D14NxsaNxtHK/eou+26tyNr3s6Y6V609RfFaMOqok0XZP3k+5oUir9lGUCe5+R5fovb+XaG7zG3McL86IiDa1coNeRZDP91L3Ce55Fb1Z2X0ZE0riRqgARAFFVVUzUt80XFrFpwOJswm8+5dPkSk+kUk8eMurquk05L5w2LzjvrXdT1EFmm9HYdRXdNOyxLHXSzrBS67O6fVvqW6+BCNKpQirpplp5EtbbfdaNq/Vp1Blrn7l4acSIYFYn7isSb8H7FpejR41UCbWKIZ+vkkNoJvrKIKllULc4lKRQh/s8d52G+UugSItA2DW3bIkqoqorZbMbRoSeTBaBonUdpg/eBS88/y40XDcF7GtsynU04c/Z0JJQ7R6zys4oYdLjT5+576DxTIkuuV2stYI6NGUtR4nSYmzm3N48v3bLjE9C4rlKK4XCIsy7Kz1i73E+nmdXdL6UUTVPH/s0YxHd1B28NOYqKk23vPazx62IIUxBtKDa2ee1nv4PKC/NFDWiEnBhFiVVSTp08QVMJh5MGnWeEVhiWQ/A1ioY3vulRbKO5cnUf6zyioRiW+EUTCesutUtS6NLF82rqCiMKwzBGkGxFNVvQNkKW11z9xCWu2hfYGG9wMJ2iypzzZ08wO9phOpmSDUZkOqetKg73GvITAwbjMcYN8CFD6RwngaOjmqp9ms3NEzzxxMPkRcFzz13i8HDKYlFFvrISTmxsRG2yypNkIG+LV5xhdbsHuRt0u7Iyd/rXDdAAIubY8rjs+IPcPeh36jhu/k2rGI7zPlBVi6Sme5zL1bV5dR7rL+7tvWt5nh8TubzZi3Z8/ZuMSRe48eILhGrCMFdkRtBphhUNlOSCRpYeq+S1Ti9gS9NaqsYynS2YzBYcTKcczqZcvXqVw6MjlCiytE/vPCHIkqvlCVjvolRC8hJ2op5LHtcaF6AzbGJ2iIBaZSQu70cg6cjY5awOYthQiaCMxvqoK9Ld71VNqtU9Xvc2duj21bZt/CyCa1q8dShjcGsdVo+XH9f/zd+4JbvuxO/647eQz1+p2P3330196cPHlm2+6+sYf/btpSTuF4oiRyRw8eI2i0bw144ww4JrN/ZwShPyDFTA122iMKxRLV4i5HEvETwMyxHDsuBgsg+SUy8WHBx6MgNKK6q6Ifga5z03dicgmqaOJXBs23Dq9EWMtmiV0dWA6/qObjINLPuPW5HGF5WSDYMlFk7OaO1xb9dSbFSpWEIo9UXdeCYQtcV8Gzlvrk1leqJGVJDITY1yBIJ42BiN2NwccVmitpWQJuMSy9FEaQBHs5gQQhuzEZVC0U1cY507bQzBKkQcKIXWMey1NP5EaIwjaxW2tuw6S240frHA6BZlwdHQ+gbjFMPhJlXZMt07xFvwuXDi1AZy1DLODK87u8nHX9yLztGQo6xnoD1SFhxWLQaPqGGiZywYjw2lKfDzhmJsyLIGpWE6azm1ucHu7mEq+ZOjByWzpsZLwNuK6dEuWQbDrYLWVjS0GFujQsAeGPYP9hhtblCMxzgliC6wLlDP5tRtoMyHnDl1ihObpxgOxjz1iWf4+MefQpzHOocuNMPtAV7a2zwfEa84w6rD+oPdWes3G0I3G07dsrjN7ffZ4U7EwXWjptum++yDX3Ie8zynbW/NKlpHF+p7qXOEOOsBjrmC19tw8+f1ZXZec+PSC5Q4CokaJ+s1FY3JUuVzv9RzIZHIrXO01tG0jqpumMzmHB5N2JsccfX6dY4OD9BaszEcIVpw+DXjLrU31VkSrQjW09qWENJMSVZlZay1x7I7uw7HizpmFMX7opaGWVcewcNScsElb2FXImHdk3kzR6LrxNb5DuvPTZHlbI43CN5T5AVtXb/kPe1xd3H2D/yV+92EzwinvuJWjbBXIiQEjBZObea0LkdCjuSaejpntnDUzpPlOdYFWnucRvFKQmYMeZGhpoK1LbZ1iMrwWqGMQaNw1qGSF6ZpHXuTObZuKYucqvHY1uPCytt9c9+xvuzmz8BaPwpdzb+6riFN5tf5vN77pE11a5+OELOQE4dHJWpGSMsCgSBhGeqw1uJdrDYRqXBxwgwq6mGlqhEqhT5tu+7Z77i+dIzXZR/o1s8/tVGIh/UhoExGubkRqRhNQ0ojpAvbWSdcvXadejJDE8DHCbfKBAYanzmuLI74yFPPULUaj8I4h2tblIlq+KIsiI86V+LY2BixUY7xTcVrX/cQb3zTa7h85UUuX7nG1cv7GO3ICs1gMODgcEbbgk6lh2ZHNePhEKUMKuuMyAyVaWazCpRnVu0xqBrGJ06hnEoC1o6qXaDIaOoJ3gtZUfDmt76ZN7zp9Tz7sWe5eukK13euIzkYGdzxOX3FGFZ3cpFmWbaMQa+WH9sSuJPLduXSvnmd271AN6PbfmVMrEjRsHqpbs+LegluFMdfZu99VMcN8VHt3oZle5MX51jbiKrABzs3qCdHnDKawhiMySAZJHmRR4XYdN2stVGnKhUfds5jnaduG+aLioOjI3b297m6t8Ph0RFZZhgWJUVR0rQ1JCE9n8iQISSdEgGbDDWlNZA4BLYrZhmNKESWNbhC4lZ1ZQGWHibvkUwvs/6Ws0ljaK1NchCRgB6I4T9tNMGHpet9fcZorUWU4G3y1IWVS16IXsiN0QjVNuSZwTZ1Hwjs8aqDUoISKI0CH9DOMiqHnNneoiwck0VN1bZxABZu6vfuc+M7CFT1Au8do9GI6XxKCFAOxngjabJlEBXLzESPRsv2yTPUVR017cSAzgmiI5vjNsZV9/d2n9cvhQ+eLMsYDoeUZUnd+uMeqW684tYoSdzZrd70W8jtpEkosUxOlhmKokj9f0yKWo0rsYaqNnpZCLnrf7tSO+vno5ReiiaH0PFP4zaRCB8n6X5YUI6HYB22WqCNQBuiQ8AJWa6p6xZTGELrAQPKYhc1ubOcOL1JXVW0jcMHg1Uq1qelJTcwEkVrPVkmTGZzlLYY49DKs7BzNk/knD2/RW0PaNuKyeEM2zhUrlnMp9jWYlvBusiVM6KZ3JhTjgtG4wG7O4eMBwMQQ1CeRTVHFwalCq5cvoENmhPbJxkUsQj2bDKlHI4oRxssbM3+5SO2t07wtne+k9e/Yc4Hf/3XePr5p/HVneVWXhmG1W1Gsi6sp5Q6Fk8Wudkgivocx5cd9xa9FEH/dqT2262zMsRYxdjv0Pjjx47/u5MBFylOsU54lygYlhMcObZeliaSXsBLNDT2rjxNGVpKNUBJhmQDglZrGXXR4HDeYVP82nkfM/daT9tUzGZTdg/3ubq3x5XdXeaLOblRbA5ztChscASlkTXhwBguiKmybWsJQYhuck33YtJVe/cevdQAijMwUZGLFYUP4jUySqOMBh09ViG5w1trkRDbrtNz4b2LCsghZm3EFOrOkxhdtJ6Ydh31rFhp2KQswCCGxlq8rcnxVIsKF+S+ZwWKya89/+1fe+6+NqLHqwamKLA+cmamtaDEYHJN3c6p7Yy8HDBQJc0ErG2hyw68qQ+63xCgbWoW83kKSShENMPRBo2Kki3GB5yzSwNBG4svPCJznG1BIg+LtXp2L8W9vR0PqvOWa4mJRxvjcZrUHd9fFxbsjKB1T1YIqR9Mx+zGueU6aeLXeRGUUsxmcyaTSSK+H5+A65T53NXeCz4cHxfTukqS9ELyjokStIIQumhQJ+UX+0FFhtoYo8uC3Arae4KAyiX23EFHb493ZJmwcWpMY6GeHKAnEx4blbxxuEE1mfKsaOatxWcBK1C7hpyomj8aGrRRzOYNRneCtlH+ICjP7sENbty4yqVLV///7P15uG1pXtcJft5pDXs4053iRkRGBjknmYAIKQoU7cNUDqng3Eo5lbNSXU7darV2PVb5VIuWcwnoY1nK44RFOafVCFqICEgr0qhkkglJRmRE3PkMe1prvWP/8b5rn33OvTciMhMzrlS+z3PuPWfvtddee+213vc3fAe6dc+krWmmM9aruxAT3npCykxC09R435N8gJDwfsPGBWS9z+HsiGncw0VL1zvu3TvD+cT9O2dcOZhy/anrgKTve2yKoBRCak4XS5RpONg/4H1f8sW89bPfxgf+18e7MT0ZgdUjxgjA3r3od2OTiwHQwwGUEOmhbV8twHq1cbE9d+nJxwVM29ekS5s+QnxP7Gy2cxNcnNMSXu7uzXP64Ban9+6ypyTaaKqmRmlF3dSYymyrVSGGC+8ZQiT4wGAt3WZgtdxw98497ty+S28dUlVcvbIPbiCFiA8FpB7TFjclpaTrujwRS5nLrjvHl6t5hUCwk6V5f87QixkQgJRqO8mklEVE2dnPFgtVBPhGgPkuhmq8VoZh2L7XFrSesiTFWK26QHZIid46qrribLXatmXfyBHd8NR8NknvftuzPHjfbyelRPM9f47BW5x3BXuWF5DgYs7SSVy/cpUYPJXRXL+6h0gBIeD4bMXxYsOz1w9pjKAxif1pzf6sYVppaq2YNg0xOLrNGilSLs+Pi4PI15cks3NHlpRzjsoIRiry+P1MJhPapqEyFTHFbaArC4bFWstmsyGlRF1nYUtbQMkpJbTIFUgfs1el0goXsoVJArwLxcMttwLqWlLVGik1Qx+w1uG9ZTJpmU5bhqHH+YSUBml0TjYK/sUOnq7vCTHiivhk8okQE1o3dINjsVwThKYPkbPNwAu37tKHRECBNEQSotgmASQfSSGiSFRKcfPGde6dLHjLm64xUT0heFZO8tHbJ1w92uepSYUhMQjNC7fvM5lMcwX5bJmbNlJiKnMuQ1KuX+9drjQgqE3FtatXODk54bmb12mFI0XPyiZeunuCL1Xm2/fPQBiknjGrNG3dsFh3GFVztN/y4EGHY2wHPTlBFZArK+TF1lQVWmnqqmVvb49e2O1csMVGJdis1mxWHUqSq+vRY3RunY2VpN3xKOztNrgqLbax+hNjomka6rrGOpcrIjvB0+4+d3/fBlk7+75MuhHkBDulokGI4ODgcCdwY7tujPjd8aNopRjsgA+ey2tf2h5HxgqnkrCKsQ2ZzrfSaFKS6NkE3VTI0wDWIlQiBkgiIZQpkjWRru+YzisgMBngqQp+2uGMt0qBmtTwruf4pz/6Cresy9ZEWqBNwifPwf6cs+WAVoK9ecNs1rI6XhFj5M7dBfv7+2w2FiUlb3rmJndunXDr5XsMvSf6HEjOZi1Sg8Cjm8jNZ/YRZF1Gn3rWvSQMAykFlJFbmyFrE0Z7ThcL1kPP3uEhTz/79HatO7xyDR8Fi/WaRdcT/MDR4ZzpdPrYS/WJCawe1U7bjfJHoPe42cXtHxUw/eTWr3dvlkcBynd769sjKO3x3YtZiIvb7rb+HvoEl27SIHN3XMZE6Jbcf/HDtERaU6GNQlcarSWqLIaytN1iTMQiZ3Y+KeeFatM77t4/4eXb91j3DqENb3/3uzjcn/ITH/oPGVwZ/COrbsYYYnT4mDBGEqzLff+d9p73PmtP7QiTnrdoRWlJBrRSFwK3cZLZViu3XdFHT0Tj+4wB1gh+z+3kDEq/XKIn5H0cLzfEmJg3NTY8IeD1lFCpVPOk5GDeEGINguyRFiN2GLDWEUMi+sC0rjg9XjOdTInDhiRyVrlaLmiNYT4xHM4brh60zCpQcUBJQV03aCUICYySWNuDzIDnfC5zVhtDoK4zfi+LMwY8klBIBkKAqjTeR9Zdj9IVSht0EWCNxbpo/P5daT1VxiCFyEKtAlLMWD2EwBiFMhojDMFHNpsOF1ypGAii7wlKYRlISZKiJAZf7Cci0Ue8CwQfwAhEyCDnSMQ5T/Dk1pgAY2T+rEpgdIXQmpX0zJopWgmENtw/XTOvrnKy7um85vbJGhvK5yqLb/SueMFlTZ7kHck7/GCxWBAJQsJEj4mB5C2eTIySAZL3BGvz/4kyaVzU7Esp4a0rk0quYgc3IJOHYIkiYxqFUghZGM5SEFXFau04ObvDtaOWEHJbbdMNaJk42J/R90OWYQBy+JGeiBArJ1mitMQqfLSkJGjqGiUfltABSMOAk9BoRRQRRSQEhxLn7N/LwdTuz+5QKp/P7b7L3CWEwGiN9Y+eyB9XGduFsYwEnu18XypWOc7J835KYKqKnKuOUjRjF6VAXkrSEMOuMju54n8JT5JS0UvcUaLfvqsAQgQlOXzqBlFJzo7vIUMkxNHIOMM+EiC1YTbTXLu2z9Cd8Nk3n6W+c58bB4qbVyuS9LxtmPLBlyuOjzusEsz2WiotmExr9g/m3LnzCqTEdNJiVEW3foDzio98+DaL0xXRr2mrmr7rqbRhvbQgFBKJFIGqkgjp6ft8j50tFkwbRa0UwzrTXlMLkJjMGpIQLNfrUvWcsBl6bAicnp2RBOzv73P9+jUksH9wyJ7SdNbibE+I/kIn7fJ4IgKrx2VGjy7RPrTVI7e7HK18siDMi23A8xthG1BdOtbz/0XpoceyqMRtaXZ3vzFl/fHLZeIYLmlskUgSQgCc5cHHPox/cJsDo5lUFaYyRX04i5eNNyspZeZe8AW6mKN75xzr9Zo7p6e8cOcOXUiIpuUtb38nn/uFP517r7yEaVpi36FCymro4tx6IYRAVVUMQ64y2MEWuwnFCD6Hi9pSu5OXUgrrHcFHaq1LuzC/dveCHYMst2PIzCPO+bjtGEztBnK7E9j4mhACKSQEivtnK7QxXL12BR1z2/SNHimmLNaX/0Jg0UUtvlYCZQyx0miZsSJKCvZmM6Y6cuVwRiUHPIrOBQ4mFdeuHPL0lTnTxjCrNY2KaFGTUFlhOURsn5XzU/BIqdAys0Grqsqig96hdYX3DilBSo0fMvVb62ydpJTOXn/G4EIgJLasMimzWvZyuWTETxJTZj6F8ywkxYBSAl2smJLI4oXJp7w2xIRzFodjOm1QQmTWlFT4kNlNWkskMHQ9tneEmK+plAJhyGQNFwIpyq3shkoSGSNCJITyiJioDQipUCpRVYpKT9mfVCzWPXeOV7SyZfABLzU2ksVuY0sKHi0EjdYcHUxQ0nF1r2IiFEoL1sFj5JxrhzNqnSCCTzUyRXTdcKYlU6OyW0EC1VQgMng7xEjwHtt7gnMomaiMZL+RmKCZ6oihWDoFiG7IYo4iYwrrdkI8c9w92eDcwN7enKoyhOCpir9ojDvzcnoSwqp87LPZLId6CZx1WDuglCRJsXWl2k1mjRYomVmDUQiUSNRtjVYS78/njYfWB/Fo3FWKYRv0KKmyU0XfUdcVIT1GDmgb6CSk1khB1hKMMXvipYtBc3lhDqDG9y6JaN3UVFWdCw9hVHvP60w2Nk7UlSZ4hxYKKUb9wLgFvJfS3858GkssJdBVlW2ApMCkinWQzK/ewKiKk9NjJAGlDa44aeQugCJ4S9d57t+7x6SJGBVRdYJJIEw8LjpM8kwrmFWGQSf2D1p0Cmhd8+B4QUoKISL37x0znzzFtRvXuXO3o1t7XnzhPleOWlzX06+XnJ5YYpJU2mCqiiQi+3szerthtXGAYL2OBAurdQbYHzQ1PnqGYYPSia53xCRo2yqLoyrJ4f4+3abj1kt3uPXSHV5+8SXe/Flv4d2f9/kYU7GyAykENjZXuR83nojA6hMZjwedPx7z9Hr3+bixG1Bd/hlB0KPg23kl5TzLSECIgeDdQ+8VYtgGVufHcn7hj4/lzAJiEpzcus3xiy9yXcO8qqjqGqEFUmWXdO9dps8WLFISGSwupCDGrFg+DAOr9ZqX7t1j5TyibXjHu97Nz/hZX8KVoyuc3L9PVbcslouMd0MSx1ba2DJJicl0QtdbBpez5xD8tn03ng84B4yHENDGbC1vlMpAxliU37UqnBtx/rlzsCQL2yUHaqOQ38UWZ7gQ3I7H4Fz2KbxcMSNlDFc3OO4cn3Lt6gGf/Zbnt9ngGzmUkuzNZpyorCZ/uD8jekEICe9yO9DbAVdwZ/O9KUoMXDmqONyT1EFh0TTBcNA8xcG85aCtMVrl4EEKlGmIMRTwpyUGXypXgkorlMzVihQDaVRxFrmd205aUsy4uL7rt4tTjJHkPSiZW8QxZnsRKXDOEWMsAVhpqVi/DdRHQoMQiaapqJtm2w6UUmCDw5frTApIxdNr6D11bUgx0PUe77u82CYwukEgIQlCef8kEsE7nLNIJVCiItiA94JK1+ipxvlIdA6UIAUPSuNiyFWhYNmvEnJPc9BW9D5ik2TjI0kaeudRUoD3TJuao4MJTeV46qihJRKTZ08brh+0qOiR0iOlJiQDocE0U2oZ6WpNSNmpIBlZzr8gRUEyhiAVItXUlaQygqtX9hlmmqP5DJUcLiZMD4vVhqFgC4PrCV4jpUA1ezxY3mGIK2azhtlsgrehfBcRhHoc0uENGblKa4lBUlVTpJA42yPwmVGX0tiM3a4CWoNUCanKfC1FTkLLuDwfj0EIZPXvlFKZV/PjyTu0qUsbKbeiTa0YXEdKD7cCRymE0dhXRp+T3xTZOmnttgDH16ZC5BEpJytEqsbQuZ6us/TdUOa4/L3G5LNsgoRKQRwsuICMmihyCzSRGI2i8+F5pBBUpbU/6h6O7foay7qdYI6eoY0NYnOGkB2sLEkWeY5gCUmhgOBhsZGc2sCkGnjL4T5xf8ZxisQuciut0QczpuuOvblkNoscTGfceXnJnQcDUk7RwqF1Tzut6HEZB9wFpIkYPWHTbTheenwSCJVwOLSGdmqoqprFWUfMp4KD+RU671kmiyAg6RkGSH1AG4sMEuc0ziYqmdhrGw5nczplWJ0sSQk2i46Xf+yjLO/e4dnnn2d2dIRpGlBZ9PVx44kIrHYDi12Gw9j3HStV5xfExXGOpzrvFV/Y/2OqTuN7x/GG3AZMo0bJec/+cjA1VmHKzi5grXJ1Buw60vcO6xxK5bbE5fDvHFpVjj2dZ0cXRDJFMf1cndHf+gmOVGK/niG1zoskMmOJ/HjesrN5SlmoEykQMbuh296x6AZeOjnm3qojVg3Pf9Zb+KKf9SVcuXYdUW52rTIWwdseIXXRUolIwDuH0holBbNpQ+M0m25DCJHOO5Aqu6mTwbI++qypIjKbJssnCAxq+6WKot4OuziobMORq0giO7knUMVrMAqKn1Mkimz2SZFUSCllLS+RrXKyf05CleDQp0gSDqESgsitl+5QKUP/GjIan47hQ+TBasCHXM24c6/HpTw5Kq0I3hMi6BhphAc55WxhOZxPECkSBBAsDD3TtmVqJFUjkOQAP8SMWVNCoqLPGb1MSCJBgRQJoqfr7blYbgQ3ZCyUUYqmaWm0QaWI94GmMvjgcT7ghUekfG9KJXEpobVBKk3d5GA6eI+jaPfoHGQJWejRxX4oV1k0tZFEF4G+CCLmdrcSARHA9g7nMx5QYFDSoLXBh4EYXQlIJCIl+n7AWY9RNQRZAi1LXRmqOlfupE60bUXX9TlwO7pOdFlIUxsDBCbJIKWnVpmleraJLIcNZ70loAgBfExMmhpvJRqDUT3WBV556Yx7Jx0Hs4Y33ZjRtpIgYD10mCRYr1esu4FY2j9NpalrhTGaGHPy0onA0HuGTUeQAjuZ4ftAtS+plKRJgbapmJob/LDRQGTWVvSrgA0W7RNSQ28TYhWQYcDHQG89RmkgIS8AQN/4EWPYatAZrXHOoZUCFbYtsV37sFHTD8CHwHqzJkZdKurny9+F9t9DXZKL8I7dEbynbVtmsxmLhd1uO2JMR3mXh96DtGXpne97529xLuqQSlt5/BxjNyDL/+yW6VLBTY1YvPPPsFW2TucJa/AR7yOpJ+t5JU/fe0g54R8CxGsV8/0jYoh4PzCbNkgx4XjdMwwWhMxemjEgROTp/WusF8d095cM7RH/9qW7KNHzpueeRk33WL7wMWKV8VhmbahSloEQKVGZDBl5+uZ1JpOW2y+f4GyPFAElIvt7iueff5qmmfDRH/84L7x4StPUzPemnJyccuvWLfo+n/PaaGqjWKxXOBcwMick1TAwCXAgIQyeXkCSklorvI+89NLLGFPlYFZIJm2LriuOlx33/92PMN2b8+a3fhbz/T1e7b54IgKrXfzM5Se2FZzyM158l9W1H/X7OB617TbAIrPHdoMmYGtS+qiMBrjQahpZZH3f5wU+RZyzTCYTxgtcKUVdNdvM4FGtyctB3+7viTwx3Ds9QUXH3qSlrmoCIeMvCgZmNIisqyrfmOUzxZh9nDKuyvJgseLuyQKf4Mq1G3zBF76Pq9eu54CoVLdOzs7Ym+9xenxcbB80WikCwGhRE2MRHRXszecM1oL1OcuOFIB6/vp8OWehgJTH9uDYvhNKZfuCoqMznmO5RcWff3cjpscU/IhP2Uw5hmy1g6C0K3MrcRtccbGyFUJmEw62Z//pZ2h0RfMEANid99x68ADts0r+enCgwAdH7Ac2mw3eeXQSNCqhpi39KjBtJ3S9y+1CIdmfz9ibtbRthdYKUla2JyW6boNKUGuN0dnrixQz9q2QDBi/45RIKV/nWiokogR3cbvQpRRx1mJ9QMeItz3G6FyVlILBeYw2KK0JPuCs3eJBvLNEb3NFTZpsBst52zjJhFMWoxTeOoIP2bg1QtSmtMzyopgX2EDf9QBIpZEiV1O7oaPvbW6RBIFz5FZACPiQ6G3A2UjdGGKIrJYrBJqhX2ejjRgxlUEKTV1JRIpUk5a+d8iJwShFRcRjcEnT1i1TMwW5QRpPSAPoyJXrh+xfv4lMFmMcgYCLAaErhGoQegCV75/ORrwISBXRPiKVyHiSKjKpDESJ0ZJmqkhJkUTWnAvBcbpcIFSbK3wIDmYTooysu5CJJCEglMIGx7ILZNxNAnleeXminDNTxshZa7fVbK0yDi/GuGXgjWMEm48VcK10TtKlQCR5Afv0KFzVo3Bb55ukotuXtuSLLRPwEYn8ri7j+Nyuht+u+wTpHNwOmd1trUWrc+khpVTpliREOi9AZCzjztyJyjnleAITSJkTZJkESJ2FRQV4P66DiiGC1FPqdk4KkeB6lEwcXdnHtBNWm56TxRIRPEpEGqMxUpKCIOqKeyvLarNGNwJcxZW6pb0+Y0bPm648Q7c4oTKaw8OW2ycntI3mC77gC3juzYc43/Hud34e/+QD/wd3b99jf7/m+eevcf3GPkdXD3jnu57i+77vFX7kRz4EBNrW0LYG5xZUtebG1TlnpyfZsqfYzkmlODCCZ67uMZ8YjBxw60CqDYnA2XJTSDiJuq6YtFO6rqdb9SQp2XQbzlYbjo9Pufn0jZE++cjxRARWibSjuzFeTLtmxI82sNwdu+XXy48/qle+Ox4FXJTp/LW7gdBue+5C7708PlJmM8DSbLP9pmkwpnoknmz3xn0Uhmj8PXjH4vgBk6pi2lZUWjO4vriYJ5SQaGXKQia3UgsxZb0oF6H3nkXX88q9YxaDp53t8Z7P+RyeevoZlK4I46QqNcvlGtEYrly7zmKxZHF2hjGGpmm2n1UbQ9oJlIzWTKWm63tcDEUoVBJc2AIqQwgXWkfjd7BbIQS2oPeM4dEPnStR8GIpxQLYF9nOIoTt9uNrXIwYnX23QghFYT0vSBn4G3jhpZdRia256Bs5EjCk7FFFgPtnpwhRsBJCFImLiI8SlGFlA6v1hr1pi7cbrkxmHB7MmE4NVZVQMqBlbonlKDyiapPbAOXaSgmCz+1ZY/L52zoZlMxYlHMcvMPZoRyLQ2tN13V0XYeuqozxKO3E3NaAvrfYEgg75woAPlJXVcaupFi+C5sDPCVRKGLBgFRaoQRoKZBGZ3ZkgmhtNnEVKkuXiIAfBqQwaF0DCqV8AadX2zbh0Gcx281mTSKgVEPbNkgRWC1X9N1AZRpAUteSkCIx5kpsijCfzrAKXL+h1dDWFY0JHE4ki85xtrGEIFj2it52dFPNJnhcTKyHyIPFKUcHM5zMx9F5xe37HVUVOT49Y7naFKNygWlNPp86VzukytR3hUCr3Lo1zYzBBroQqRK5mlA1dENhV0qR9Z2c5ebhIW4IrM9WLINFVhVBClTKlaqyqmcIgSq4nDd4JPJ8Zsx5Neg8gBBb7NUuSaUy1XYeV0oW6zDQShOCeCgIEiUxhYtzcQ5QcnIhRCY5OOcyzqoQY6QMD71u3O84r+3Oc5eDuQtBXCopfxq7MDmIXG/8NoGHHFRlSYfihJHCVrB0+55CgRw7A3ltikkiYkJpQRLnc+Uosp+lbhTza0/lZFskVmdnrFcrdJ/YuMje3gFCGRaLBSE47BC5dfyAu6sFzUYg2kO0avAOXvjxBzgtmV+Zc+XGUzRmn1svKk7u3eLkwQItFbWJvOPtzyD0hs3xhrc8/xY+7/PfwdA/zdVrMyYzyfHZA2xYY0yN9wNKC9pJhTaS5aJDKcEzzzxFW0lO772CT4lmViOIYKCJcHVao0Vgv9LcWlmW/ZrJbMJ0OsU5h/cRaz2kHu8DK+tJSiPIFW/vAh//2EsMT7qOVYqPCqxKiXPc5tLFD+dU/McxOh7185CNjYD0qOrRqyyuu9ih8dguH9fDAZ0o6rcXP88nMtzQs1ks2DNZ5yOIADJnHVLkvrLSYtuqdAUXE0kkKbHBs7GOuydnnGwGqKe8+bPeytve8U6qut0ykGKMoDQ+Js6WK4SQHB4eEkNguVwSQsjB1ZjR7giwSilR3jOpKgYZ6K0lkDWrxnM3npNdtuBulqnKRD6ao+6yQ8dzGwuAVxaxufExrRVup/y+VVqXEhdDxs2UdqQWGucCUWQA5tlmw7/8wR9k1fef8Hfzkz0SF8+V1KW9EQuoVQFCoNC0swYEtG3D3nzCvFHs1xNqozA66+1IoN9sgISW2TYjxcBm6JEF1BxCQilTWm3n+DbI96iSReg1jsT/jHMaVfWFKIr7CUgBrQwpeFLKQgQx+BxAkUHlMUUEKuOmSus5hUiUHqVyG1nLvCAKBKJg8JQUaKWQZLHbGHLVrKpzEC/IuKQQA32/RknNbF6htCQEx3Qype8ti2GNd2AqRTupmUwbjNF4l7DWs1xs2NvfY29vSnAeHz12yEB9KQ3DkHA+oJXGB0u3WqDqlmA8jUikSnKy7rm/7IidQKLxweMRLDeOO8crnKuYVYEYEkMyHJ9Z9g8zEH6IqcgkCJwVYD0lvMj4nEEiYg6stJQIFdgsOoxpmFQBrSHKhs71OZgWAlM1XDmYcu/+hpgE164covo1y2EgRcFq1eVrrlQL0yNaVm/UGLGXwzBkYk1K+EJq0SWwOm+FFU1yUdaMEnSNAdY4l1yomD+iajXeByOxRmuznbulFKw3G9br9TZpfOyxP6Iatvv4iDM8/3tMZM7XD60Uu3ZDu52olAoRKhbTaHFRViYV9t647MSQzY9z4LZzXCP8BkHSmtmVI6KClCIP7t5H9h5fa86Wax4sNpi6Zrq3j5EJGSzRaGTjSd6SyMcRnaU767n98n2uP7NPW0f65RLf5wDmyvUJwVclIF4yDGfM5hofOt7yjmdZrY/ZrFe8cueEu3fvstl0XL16hR//6G2OjvaZTFteefkO601P3eR77MWXz1DG0FYKedCwvz+hchb9wKKNwG8GXJ/Z7qauWHc9KVbEmPAuMmmnnC2WxBCZzGa4mLCDJ5MFVKnsPuGBlZBiC5obKxkjvGg3eAIeeQNcxiM9qpX22FIvPJyM7VSKLo/Hg+cvPrb7fucZyHn28rj9j88/HLjBZrXCdR2mmpBiICIzW2/H/HjMbpxzuX3oPSEDlRic43S54t7JGV4o9q/d4F2f/R6msz2EzAtVbqfEPIEURtLp2RnGVNy4cYP5fM7x8THL5TKztoy5kIHFmLFdglxhiBiWXcduG3f8jsdF+4LVTVFp985fOAeXsz6RZ5zMYClfoRRi6/U3TpbjfrfoNpl1vRDZhDpncwKkQFc1svEMx4tHfjef1pGyhMJ2JgwhAyZlWTJkDj6FENSVRkloJxVtJTjaa5jXNXYYcINHaIkSFZUxQEKkkMkCLtOSpcjt19zaFQzOofVFqyGhBKokPLpUsUIIGb9QKrNKKeq63lZtQ3AMg2VUdYbsVJArkAohFNZmQDIxICuDNipXx1L2SysAgHy8PvvYyRy5ZTPUkAhhp+IZI0RJihrnbKaG68BmEzBmznSyR4yC9XpJDNkCSgiYTNrtguycxbuMwXHWMwwD0hc7peCRJNqpZm07epeY15q6NSgTEdpQM7BZLpkYg2k1KnqWLmFdT9KSpDToSBCJqAWylsiUkyRx5hGVQtYaYRUyFscCpcs1H4lRIEQimlwJtikQk2SIlkU3cLb0DMajNbgYeOXOCcO7PJVRnCw2VGaaKy8KnHdMJhX1tGVxsmFIF3WWxhbakzCEEBhtcL60KQu7OcVQrpWcQGSjlfH4d5Pq/L+1dic4ejgp333NOPK8k9ne5/CUrBm16/O6uxbtwlYuJ+Pj2E3KL9hucXEtECJj+1ISVJUpleTRAq3sh5xAVlVF8LtWbCmzDynrXSRbmW1JQecBwoXPrGH/ygFKy3xPWE+dNESNNDVt1RCSwIVcARRGUAnFtXqfyArhASNYdD2T+Yw3P/tm9g+yrMlLH3mF1ULQD4GDKxNWZx3geeXWj5HUhrqZYZ1m72BGheJHPvIybbOHqo6499LHee75N3H9uuPq1UM2mxXDMHDz5lUEmtPFmnXvmE8m9GFgbzpHzxTSQhMnDESUVmyGDiGzCv/gLTIGgk/0vUOIoZAHBLKQFkSKCGFIaKKAxOOxuE9EYKWkYm9vb/u3EGIrjkbJjaUQjHd6/nU3yhbbqlNKOVDLreRyAxR8wZY9zHks9egpQ+xUsdL2v7SzOO++UF6quFxuG4py7GkLFdsJwEpvO5XPuG2HpVSCAYGOZKHOYSCGgRgUzgXAIHyxb5ASrTSmqlDSEKyjHzaZ1psyzX3jLC/dv8+DfkBP93j3O97JzaefQVdVPq+JQgWOCCNzlpMUzjtevn2PB4sl80nL9RvX8d2GkwcPCLZHKo2NkSAkIRVNIK0zfVsr9iYtm24o9jUZdBwjKJWZXDFEKAbRgYhQKp9rOQbOO9eCyMDKUdMm7iiyx0tB9O5klouFJTgLIVv/qIzXyGzJSBSR6XyK0m/8baGV4sb+AVZrEILZbEpU2bbBWk/v2KrNT3XFXqu5Optyba/mYCLRRuTJLuVKEwlqUyEF2KGDlEUlQ8jVotz+zEJ/6hGJzLiYSJkNZWNKCCRSy3K/Zk2yWAQ9JdlzLrgMKvYhL35Q7t2Y7yeREsFlnEyIgZASWjQoaVBS58WibO96hwig0HgfSnUq232QEnYIhchR2FQZWkOlBXUlkTKhVML7LGxq9loSWaRUkiAmVss1Jw82DL2nqip0YawqlRmSTVPR1BVKSprKEJLFBY9PYExN3TTY3nO0f0iMkbaBaeW4Xwc2PuCUxGnwgwPtEcLjUz7emNWsCt6lGPeSF3M9JpFkKxIhBEn1KFFa6UTqKqBMwGuBaCpC9ISksYmsF5cSy26ALnGyHKiNYtLWbFxPOzEwqzg5yfphkWymTkql0v4kBFcJHxxC5LZoiD5jYmJJkIQkitI+I58rTUSlCMU1wgdHcH2+nkuAProxZK2w87VhN3HfVpFiCehTRKQML9BCb3Ghu2Ocf8bW5EUHEbHFL6aYcaFZJifnySnkdUyKzOiTSmCUQZCV1YUsPztSGCn4YhUj2QwbZOoRwRPEhCxIEguUIItyypixublSJss6tLs2SpprB2AgPVgh7cDM9Lx7YjipKn58YdnImi5Eur5HpcisatibTUgHFdW+pju5x2xW8/xnPUM9m3Ltmau0lWa9aPjg6qNMTMIPPashMTeSu7cfsFp3JBa86bMq9g9nLM86XvnYfdp64P79FVVb86Y3PYsxluOTUxbDGhsj3idarbGdpTIKnwaMiUxkQocESXPSW5bDgFGGUz3HB1ivB5CGwUe881k0etNluRmpGPo+q80LMBoQoeB1H39PvPErCOTYZ+ciTmMII87Lt0UftjxfLoAxgGEMUMYgim0Qtht/pRE7sPP6r/45v5D7d29/Wj7mEze+5W+80UfwKY+qbnjfO54t10QOMEnnLcCt7pY/bw8ancHpmaHoIZxXvFIKTwRYV0nJ3qTJcgsC5vM6g1SFxNrAYAxdb4nOIbwldZ52r6bVFZVS+DAgJRiVta7yBF3sKIqWjRQCpQ3e2cLGBGXMFsuyK7iaAbW5nZAro0WrbBShBAZri9BryK2ZlM+19z5XIotptogZOK1K0EIis5MAlaWt8C4Sg0NKm6UWBkffDdlUXIB3WUfGx4wv9D6BMKQokMJnmw5iCYgiTW2oKoGUkaaWSDFBSpWNfH1kvd7gXcBajxsESugi6RBy0VBEqirLfWS1eIGMDoVH6ypjttyQg3SKkK/WVEbQJs/Ul/ZuCGy6gB0swQ7gPUkLhMjaUbhMlXfdQBzstoXjUygBgCiLYwlwhUQITYyBfsjfhw0dPhlUKWyaqsKNi4DWhChYR8dm0xNjQFcZN9Y0GqlARLG1xBLl+35CilaE4FFKY4zGeQuMEh0GIc4N1qFUj0RmuI6rivOeadvmZJgxWd75cGPye2mcdx5GW5hcUY0+ixtnp5CLki7jeBSMZduuFGwrI2mnUpgQRB8yS1Zl4o0bLN7bLXB/99jGNqhSgohAG5012GrJwqYsphsjpIzNzEFWQKSiWSYyjGMXroLQ1PNZPoa+pybwputzvvD6Hnc2gjuLlzl1AS8TMQwoAafek6aJ6888RTXXDHGFW6zooyNIwcYGBBpk9rU92Nvn+PQe1oLZr3n54/eQVNw9PuX5t72V9arj+N6K0/sd9ZU5RijsZqDvFkznNR/56D1iVMz39zk7XdMJS7fsqKcG6zxtrfDLNe1kzsZGXDJ4rbmz2HCyzIKgKI3UmiQy5CLFhAyFPR4CKEFVV9uKYiZMgH100wl4UgKrsZS5k5WNF/vlrOGhl77GHf+4lts47t+9zZt/3z/6JA76M+NJGC98w/svTDIhBARFgNTabWAl1XlrawtYTakE5KUVqcaq3atfM5+OkVIkJLud45URNNJQ6wrvc5UvRQjWMqk1Vw+mXDmcI7FsNiuqJrfltFRomQOfFHxmLpHxTDHmzEuITLTYbbvnVl4mDozPpZgXDGPMtt1HyapDCCUQyYtwpBiBVxV93xccVn6+rmtmM52DGe8Y7Y2k1HnSTRCCK/itHPW6weJjRCSB9zFjCxGkUFhRJUjWWtFUBqMzHkUIqGrD/sEcoQQxZuX6um6x1hG8oNtYFmcdVVWhVE3TRpxL1I1mOp0gFVSVYTqbMs5Lxhj82lHXFUpqrHVUWiHIGJfx/Izn62AmqWxkGg3VJtDOGyZJcvPqAZXOqtDWazazCW1bI6wlVYYkZK7EVOcLsvelIkf2aUtJokgIF2mlARuQlcghWYJaCFYRKKB7ZRTNrOVs2RGToDtdck3lzyGFRogd7MgTcC9sR2n/RRGLnZbf3t+PG+ewAc4xVlKx080ru340VOTVDycHuZuuY+iHUoXP1cRREuEy7GNMUs4JOI857m0XJp3r/UnJfD5nPpsjuFNwWGIbWME5Mjk4R1MZpnXN2StL4g5kYyTAiBJu7h7F7npqmpa6naJSwi5PsctTaCKmrjA2USuFso5IJse00wlBJHwd2NBjREM1b4kiMr9xRNtOEEmyWna88tIdXnzxNjevH6B1w4EWuK6jW3XMZoq9vWzkfO/ufT720VewfSJ4aKoKRcQOHX234uzBCh8qglU4m5AysDebsXcwoxtWTGcNve2xi4Fh3RE7y2q1wbqA0IaqNoSYsXpCKmJIlEhz6wgy3ZtRz6dbg/IQMuvfDsePqXyQ0QABAABJREFUvTaejMDqEeP8S398UHV5XGbRXX6NEOcB22fGT52xDZ6kRKmsTr6LxRqVw8dS1BaMWibaUfAUIIn4+Nnu0zikktRty0qOuXbKxqAMeOtQpVNaa8nBrOVw3qBEwA09tclSC0pkbZgcgASiTFDYQ3nCzuK0u7gQYwxtm61ORvbRiIeL4SJTM+PpBGGsdon8fGYCpgusWOdcDn5itsUxxjAMmcXjpYAoiFEQw/hd5gkMElKR7ZqqDCAe3IaQssDsKOcRU1aE1mZkloJSgqo2zOezwpLNnoTeZSmCzaaj7we0MlRVVbAqAQgonan6XdeRUsQMFSAxRhGiw5hZ/hzkNlAs1STvLKauqKoKYKt3F70jWAfRU0fYn+9z42hG3SoULhN4gqHfb2nbKTpFqoKnE0oR6tyy3pUpqaPPqvUoVIpcP5gxuAlRaw5mChEdPuZqx6nIjV6VMlZNppRNa6sKv4R7xyfMJ5MSpD4ayvBGj7F6RiFVOOfo+z7bFT1mSJEtvsbuRQiBTbd57Npyudq0+/iY8O+Cwp1zOOto2oZ+0W0ZzLv73dVBHPdVdsCu/dcufGG098rbjgQcUXw3S3uxSOCM52bEKNdVRXfmMEqyP5vx4y/cJWGQ6vxzSimLlY28AObfatYJgdk7QNUNWiSWi1N833F8ZnnpQcODVWTdd8ikmRrNdDZDNg1BREydiJuBIGBzZ8GNgynTpmY+nXDj2hW6Vcdq1bNeO1588T7Pv+U63m2YzmqayrBYWQ4O9pAyYvvI8b0FIoEbHGfLU649PaPbnDExmjhkrbUQBLPJFBEDtVZslguEhPXZKs9XsqfGsBE5oJq2U3wIWJ+r+CpluA1lLaiUzvCIlK1xUrdGiJwsbn2MX+Vafc3ASgjxl4H3A3dTSu8tjx0B3wo8D3wM+OUppRORr4I/A/w8YAP8upTSD77We4zjwgW90x58jeN71f08fJNcurg/M/6TH7uK9yP7B84nyRE8GlO8sGhkiMUIkC7Xijh//ePGp+OeiDGx2vRbnS0fBaaa5LaeHBDBowUcTCuuHUypdcL1a4zKlkZb+68dOroUirqaZubb0BO9wvuivF4CJYCmabYVgQvA2njuaTZm5MF7UgH2am0QKRILmHx8fVVlDa1h6Mp2KoPA1cjQyhg7ZwOxGRejgMzFGoQApRVKVVnUVIvcBivYkBQCVaXRlc6VPu8RKExV0zTV1h7HWo9A4r1juVxlJrLI2el0VjP0Fu8DppKl1elKS1TTdQPWOepaoyuBNtmDstK62NhkhexgIjbErTPAtnolFSEMJByaRBxO2N+7jjICoyRSaKzX2IMZupkSkkdXEhcKDLuWjELKI1ZHebI/oVBoBE1jaFsFSnC0pxFJYH2i0lNu1QYtBdfnLUsr6AaPsMt8PaXcKl/1a3Sl8b3L18sWqzR2Ed74sUtg0VrT9332ldwZF7ocUmyDjxQj1lomszYnCPF8+8tah2NifnlNGrsru0xlSNs23tZF4BKh55EYXEb4Qf5OxwocjNf/mPAUhrTMVVjrHNaeC/fGmNvEUiqQMmNVgydFx/58ghGJ3hV8osptv7FqFWMRSi5YsF1mYlVVSKMxSrI6PiZEuL9y/NufyBZocjLh6uSA6bTi9Owup6dnxJg42qsRQbA+6XAPVvTWUr8tMG00ba05vrvBWst8ts/p6TEnZ2uuPnWIC0ukyfjaGzeusT+f8cL9E1aLnugjSkBdQ9NE6joinONwbth0kuQF3g5sVksmkyoz/3xgb97irIQQSdGzXvVEEqv1krANeHM1k5QDJ2NM1kaTEiUVm9NT/OAQQBAOO1rGPQJXN47XU7H6K8D/BHzLzmO/H/inKaU/KoT4/eXv3wf8XODt5eeLgG8q/7/mEJcXs7zSbS/m/JjY/i7G50dI7KWybqaYXnz57oopdvb1mfGf9lBSbcU/82Smcuk/xi2ZQeuRJnuuk5UT8YvZ6nkx/VXHX+E/8j3hfeRssUGU2f/4ZIVUNUoIquRoFDRtzeH+DJksIikgIIWkrutsZKo1pFTYfILMtBNs1mtEChitECEDNp3Lqu5am6yi3jS5KnBhsnd45zPTr7QhpIjZ9Bi2FcOxwpWDtVGMNWJtj3OeruuKjVHc+rVpI7f6MVpLqlqiiimygIJtkQglmM/nLM6WBJ+IMWfZymhIkZgSUgjatuHoyiFSZuaf8yEzoaxjGHoEYLTC1ApjMig4e40n6rrJlQhXgkgfiCkvYClFprpi6HsENdEHYszHlmLGwaw2WbLAVAajczXMGMN0NkEKRYiJzXrDnVdeAiG4eeOQpq2AxGJxgraO1WrJatMhRM6MFdl8VylFbRRKSbwV9IMjREe0lqlt8H3H3qSiPtzP/nLAYBJGZxeFg3lD7RRCBVxviSROYyCoTD4YxY6lkOdYw/S67on/+KMEJLllm90YlJTbe/917IAYY6lOXrQme62x3e7S69p2gjEV1rlyiBdZgWPAsiunsPv6mC5KyZxvJ0kpFAmU/LmllLlCVwgiELgc+OWEI+KGAS1zi1iLBCHiQ3ZIMEYVWRWZsX2C7XHuYsSa6QSpFFoIloslSdYsUuTlPlJNFN5M6HuLsxu8tShdIZXCdYLOOlbHD7gxKQQalZDCs9mcYu2K45MHIFuUrjhdbbj90RU3b0xItme+d8D160/hnefurbvY3iFTFjU9OpxwdGXCfK5Y3T4D7+jXHqUnuGCpa8mNp/YJIhJDYG8+RSbBZmm5d+cEN6TscxgiRqmi+5Vbrap8j6bV9IMten4wn80IhWQ1Jui59v8pBFYppe8WQjx/6eGvAX52+f2vAt9FXkS+BviWlL/t7xdCHAghbqaUbr3aewjGvvfORZ5y+2Psa1LwAkJQLrosGpiZdiNmhtzuGbOIdG4hk9LOU5BBaZ9C1WrzkX+Fe/Ai+z/zlz12G798wMl3/gWu/aL/5vHbnN1hePmDTD/7Z3/Sx/KTOfoXf5jFD/xdrv/S//aNPpTXPWTMVGMpMjMwlgkqIZAFmyCLFEUoVOZHqd+nwoB6rez803FPpBRZW0dTgsDlaiBJ0MlxtfK89e3P8tSVfbSIWTYhBto6K+MLAXYYtp9iKED0WiicK0GRkFgXiD4Dv50rmSoqY51kwkiF0qYI0EaQiSSyz2BiFGcVaF0XoLsh2xFlFlZmI54Dn1MEb2Nh7ebFg1RRVZ4Qim+fBBdlNlxWWcyw0k3GXUWf7+PyHcmd1r4fXNayqirqtmE2n6O1QmsBIqHIGWzf99nvL0Umk4r5/gQhFKtll6t8CWw/kBJU2jAMAxBJMctFCJ0DsL7zBBtIwWF0hSqK2CEEKiGIKaFizOK5UqCjI4mIqWpOFxv6zuXznAxaTvEMrP2AqFomkxnrztK2uX3pQsCo3Mpp2xpIaKPQFaQq03qINTevz1htDG3dokW28HE+cPvuMd11i5bgRU9da64IgetaAoqzxZSTrickjRARiSWFmKth47z6BIyx3TXYAedstiITmXG63eZClSlduJOlFEWvLWzbPLutvd3X7CKWMm5QPHL7rUCpyKzT3ZbebpA1/n5hztlpKV4O8BJxe317H6jrKlcqRa4SnxcYSoU9ZZZu9ngsWoApbqUoRomZxHi+JDHm/Y/B9LZqDwgpmR8c5POWEkPfE6VB1RqUYeFOWC1WSFnhwkAKDqsEUUr64Egqz6X1pGH/6j4rt8L5NX03sF4vaduae6cbDo5m6FZyeG0f4Zc5MVOapmlYb+5zenJa5qWMoZzNDUIEUhowKvLUtX3u3TumqjKx4fCgZbZXsbY93drSDQIRc+W3ntR4BMpnpwetFc4OKFlwoTExaVvWqw4fsy/qetPl71YIsjSFoFK5HTi4n3wdqxs7C8Nt4Eb5/Rng4zvbvVQee2gREUL8ZuA3Azz11FMEcc72224jZTG7HC/GHC1mLirn5ajy+VJKeDsaiJaybIk0nXPE4HBuwA4WH/ynNGFM3v5F8PZXLzzo+ZVXDaogB1brH/nnn3BglWIG2/2fbTzqc++quGtT7Buk2urBkM5xU7uCsluNq5GVk0bfyE/qwvhJvSeklKjG5OMi+wMmAo1KvOOtz/LOtz6NiZZu07FxuRpRVTW1MVRGI+R521PrbLrrXU9KkaqqSSlPsCm5zAKMudqVyvkcrC2tiNwu7LoBZwPOZ1aRlAKpUvaDLNiMEVvii/7WaOpaVRWr1apk2wktZGlnFb9CJFpnh3mtVWakxZSV2Z0kVSWIkgEpc7tCqohUoOLon+ZLIKVRWhCjx1qBtRFTaTadZb1cZ7B8CDSVZn8+ZzqfslyuWS7XBJ+rcPl6kNvqhlKKrnfFQiSSQrbJCSESC3vR+wFjKmazOT4F1psN1roishqo2xYK6LZpBNPZHieLbIi82QwEGzjeCO4fO/rU8fL9U6zLvRttDDWBGDwni4z5atuGRigUGkk2pV52mpNVzzRmI2yDI0rF008/zcebBpLPBr3BIRIkHJPpHgf7E5KRrAZLRBGrilSENzMTG54UkJWqNX0fCSSqukbqzJJNMScFiSyiuv0RYxKVH3He433Ii6oLKBQp+VI1yvISFJhA1siLiJiZhbtyLuMB5cSiyvItw/lCu2tCv22lP/T6i/ir3XZhTK6ce0UqUjSpyHFoYx7CZMVxzQsRIyQqiVwRFgI1m6LWqTCjDSCwfuzY5KqmSBFBIIiEFYqkKtSVA0xUNBvLsjsBMVAlwVm/IXiRLXGIBG0IIhdHtCD79GF5+voeBweK/X3NZnFKJWbUck6/6DnY2ycGhaoSciLRVxviOtDf33B4VFNX2Se1UqCVZDZrafcU1TTjYxcPBmZxg4qWttLszyc0jeBgX3J0qGEhWJ0mFr1HiIbT047lcoORalt5Ul7RtpOcPJVESpAxiTl5zAmuL8xlylWVE/MMR3jc+JTB6ymlJMRDjbzX87q/CPxFgHe/613JFXbQebk1l91DCFuwbAgWH+wWMOiDJ/pAGOzWq2noe5z3xOgJ3hdRv2zcGaPPINdiIKv1owMTf3aHO3/7v6V++p0ML3+Q6ubbmX3OV3H2PX+dsDnl6vt/L+7Bx7G3P8LRV/027n/gTyHrCfbWRwjrEw5+9q9n+q4vxZ/d4e63/WGe/g3fiD+7w/1/9CdIbgDg8Ct/K82z7+bku/4q7sHHeeV/+a+YvfcrkM1su1+Au9/2h9n7Gb+I5rnP5cU/+UuZ/bSfQ/+xH+Loq38b/uwuy3/zD0jBU998J0df/dseGWylGHjwv/9Z7O2PAILZ534Ve+/7WtzJKxx/+58nbM4QUnH1a34/ANF23Pu7/wP2/otUT72Vq+//vQgheOmb/ktm7/0Kuh//AVLwXPva34+58iaGV36U43/6F0neIXXFlZ/3OzFXnmX1777zkZ8lDh2n3/PX8rF5SwqeZ3/r/8xw+8c4+Wd/iWQ7ZLvHlZ//u9CzI27/jd9Pdf0tDC//CNN3fxl7P+MXP/QZt0DPcWIU2fpki3Xgkqk15wSH3QmqlEQ/0cv58n4/5XvCVLrMeJn9ZpRk1ije/qanecdzV4nDkuV6gfOCwYKSuZJT71cIIWmaTA8e7x8pJapSeJu1bGIUeJdDSKU1VV1nYG/fbQHtI5bHe0/X9cSQDVMRuZ0QgkcLTeQ8MBUiK0S76LM+1E77IpRMue+HAgQ1CJEBwDoZvAtswhplKkyVvQJ9ijA4JIkks7RBDGlbBSvqGqQkysIX8d6y2SRSmhQhxcB6uUaKRAoeoySVGQ2NE+vVBhBlApVbFuSIeanrXJELPuBDLPYbFXboyfIXLgPpA8S4wpdMbwxoq6pi6D0hBrSRNI1CVwofau7c7ekHzzpYzoaaW/d6jozkzumCwYVcdZWaSdNgjGEyadDasOoDD9YdwQa8HZAisegE9x7c49mnr+EnIptDm5oHiwX2qqWusgaSkgohYjbdHdZImdiftySR2HQ71R8K+uIJUV4HUSqyiYP9/WxPlcJWFPjRrxgB4GJbqZ5MJty7Fwoe8xFYKuB8CR1/z//n+URuAyfvPcfHxywWS3wSFxK3XZzVa2F6L+OCU0wE4pYNmGLWW/OO7bqVA7GSVBaskFSlbZ8y5m+9XpcEocaXyoyUusx5OeeUQkASJPzWhSIVBqLRCr9esVycEqJn6LOUipRqO59KKWnbFq2zuXWwXekI1RjTIoTh7p0TYqiQYsrxgw0vfOw2TdsSUqKuNN3JGa1SzKdTDg4PGIJD1zWTWc3NpxvsYNFGAoGDgz2Oj0+h9qyHges3j0BOWfeWl26dILRisVxl02XTcv/+ktPTTZZMAEKRejHGbEPwo6NDFosl6/VmWwXcFjUvXE87VcJXGZ9sYHVnbGcIIW4Cd8vjLwNv2tnu2fLYq47NZs2//r7vZrADfVeo2SJbYPiQKeG5+uTLonAOECQlVJHyR2x5Gxl5JXK2lUpkrk3FtFwA1loWi8dbl/iTV3LgcPW/5vZf/V2sf+S7uPF1f4zux/4VZ9//vzJ5+8+8sH1YHXPjv/hjuAcvce9/+++ZvutLLzwvJ/vc+BV/BKEr3PHL3P+Hf5ybv/ZPc/izf+2F1tvq333nY48puT4HUF/+G3H3P87i+7+Np77ujyOU5sE/+UbWP/JdzN77FQ+9zt79CcLyAU//hm8EIPYrAO7/w/+R/Z/5S5m844tzgJMiYXkPe+ejPP0bvhE1P+L2X/u/M7z8IzTPvqd8jj1u/ro/w/IHP8DiB/4uV37u/w1z5U089XV/DCEV3cd+iNPv/pZXrdRN3v5FueIH3Pt7f5TmufeSgufkO76Za7/kD6Em+6w/+N2cfve3cPXn/c782aPn5q/9048+L5eucjUutsVyYLRiyXHTeSC1G2jtAt0/8ZAI+Em+J0iJ5N32zm6Az3n+ad50bYZ2HXboGGzIgdWQdWmM0vR9lmhQ+pz2L4TYJiIpJqQo91PwxUsyMd/fwzvP6ekpXdcjkewKGuYFouCnnC8CnQJUIBSsh5Q5KKmqrPVkrd0GaNZaSHkxGCdgKTN7UYiMr6qqBiECNgSSjSSdRT6zfwsIBd45QsiZeQyjSGLW2jZGUVUGrVMOzERe2E5Plwz9gDGatqmQROazKdF77t9bslyuISmkyGrw43kbP7dzDlI2eu17vzU719oQgiDL4UuGwdF1PT4FjKkwWjHYgcrUtHXLerPBDSOWrUKrRF0ZBi+QQuN7QVIKaTQhkVtxMldg+5j93/rgyerrnuRlsVJJzCYtgxR4snbY4Aasi9jB8xO3zgjvhhAFyy4vpD5UNC0oU+Hvn+CjYF5PSMHSbyxxBwD+JBSrxhFDIJVWnjaKzWpd2rWPGWIn6Sr3eL4WiwjzYxTRH7u7HQxSjBFjDIeHh6w2HcvNsA2gdvc7+ho+iqm+Ox4FlB/N5pXOjFjrLN75LXnkImYrIsuS3tQ1s2vXODld5KJCynIL3gdCcOVeLrpVSmOUzgB4RU5etGFvNqWSMPQbwuiHGAuZRKot0HuXQQwC13dUWG7fd9QTg5nP2KwTwc/4+IsLPvTBVzh5sMFUlptPXyV0ASUGjhcLDvcm1M2E/aOrfPRjH2Uz9GhT0bZT6jpx/cYVQvDcvXvKpk70QeF84O792wUnCq/cXnLj6hX6zZrj+wsWZ11p9UVCYot5cy5XoY3WHB+f4Mo9H2MiivPA6qFv7DW+R/jkA6t/APxa4I+W///+zuNfL4T4W2SA7tlrYUkAhr7j4x/98FbPhhIYyRFPRZ60RfGBUuS+b75YS2uwlG/hHIQsSgRf1RWVqUgohsGxXhZvp1c5N/rgBtW15wEwV5+jefPnIYTAXHsef3bnoe3bt/9MhJBUV58jbE4f3mEMPPiOP4+981GQEn/8ymudloeHkEze+cUAdC/8EPbOj3PrW34XkCs/arL/mM/yFP7sNsff8c20b30fzWd9PnHYEJYPmLwj70/oans66pvvQO9dBaC6/hb82V0ogdXkHT8rP/7U29h8+HvzRxvWHH/gT+JPXgHENqB5rXH2r74NYSrmP/392Hsfw95/gTvf+gfzkzGiZkfbbafv+s8ef1p2L3Jx7rE3mu1CzgAT58Khuz6EwJbZJtMnvYr8pN4TAGbEuACHkwnX2ooJgWQH7OBJ1IXp6CBlHzNjatq2JcaBzWZTJutSsQtgB0tKIYOJREQITTubEmKmT0eRW/DBhYzHSuft05hsxkUh8C7LIYgqUVW5XShlzuD7Lj82BrLO5WpyDr5UqaTFLHVQ5VbOZj0wm08RMhADGdSeJMIoIOtX5YpQKDIzGWelNIwWL2OFSOsMovfeMww9dnBolVXGlYDKaOazGYN1yMGitSFGQSWqYh2TtguFFNleSeBoKkXbNATvsX2PD1kfTZDbdSEkrMt+Yj4E1utN1lNzjhRgcbqgmbRYa+k3AVHVZYFK2Qx7gKrJ7Uats8BojFluQZhSZSjaXCEmgggIne+3elbTh566MSiRg2CfJEsnWVhFEwXORx6cWoyuWXcOHx3tVDNpMqbLuch0MuV0sQYRkGVGEPLJiKwEoITCKOjW+dqOPjD0ObB6VIA0MmKBrX9e13Ul6NcklR7afhy7uKddpuHutqMBObA1Y9597W4iMc49xhguV6guv+flM55i5Hf+lj/M2YPHaye9+vjE/U//8R/5Q5/ke2X68+kaPnbvFiPq4fv/9d+7uNHac+fksjj3hu//3n9x6bH1+a/fe3l7x+XPdut+x4d/4qWHjqnv3UOPyWiZJldkYiQppJLHpW3FVpDbpp9IAP565Bb+JhmUe1UI8RLw35IXj78thPgNwAvALy+b/2MyrfzHyOf217+egxAIjNIkxfaiioJtNg0JnxOznMmTLzQQxRQ2brORcSjVYKoSUKXEuuuwbs22QyMvMsIeOiZldv6Q27+FEPCIwOHC9o/4Ahb/37+Hmhxw87/8c5ASL/6Pv+jRb1xA/Ntd+R1zal1daPVN3/vlHP5fft1jP8M4VDPj5q//c3Q/8YMsf+h/Z/2hf8HRV/zmx24v9Plnyed3pz0wfs7CggI4/Rd/jea5z2XvF/9B/Nkdbv+NP/Can6X72A+x+dC/5Mav+qPbx8zV57j5q//Eo4/JNI//gCMWT41snIwXktvASWyzjBhiAT4nUvTb0m6mQL12JgKfnnvCGM3V61dYVgaREjevzZkYgRGJoMEYgR8cOoGPiaZpAYHQEZd6vI0lIIjZiqG0UPLlk1lleREQbFYrgs9BUbBFjzqNi4QiJYmzORvUemQ4RYzRJATOk5lqQuJdFvK0NiFE2solSFVluYAIIQjsEHHO04oarQ3aeJyzOQN2Q06CUMQgqVWC6HGANhqpc3CUvyqFI1KZFq1MniOiJPpcWRMpsb83A5kwlaapmqy07hLrbmDwAWUqCLEw9gI2dKBAYfL1IsDICm8ztqtqDJGAihohWpTKAZ5qJGrogOwfN7ZAfehZrdcgFZs+40TrVlIlicIybQUyGqoYmbeKPpwRhCNhCDLPf9GO7NeIlpLggJiFQkXyVAj6xUDTKga5huQJCXwwSJ3K9R2xoWPpI7eOByaNQvYLhiFQNw0iOUKMVAoGNyaubAVmn4ShCwDdW5tp7zHhivffIwOrskaMWMWsp9aijdnKgrwaM/BRrbzd1uFms2G5XG5thnYrVZf/v8wIHMHijw6sRKm4wtiWPHtw/Bkx65/k8cI3vD8nk6KsExdawI8er2eNeD2swF/5mKce6jkV5tPveM13ffhdMruCTGke2X5b8HHeOfmJuH2NKAthypLOCCG2RrBa13gf2GzWOOuIKaJUASQWMOYnEoF+qiMOG/T8CkJIVv/uO7afQ1QTou222+n966x+8AOlLfeA4daHH7m/5s2fx72/89+z94Vfi5oeELolyXbo/esPbRs2ZwhlmL7zSzBHz3D/H/0JZD1Bza+y+fD3MXnHzyIVg9tP7rOtUfMrwMVW5uM+iz+7y/F3fBM3fvl/hzQ1AOboGeJmwfDyB6mfeXdWCT9+meram1/z/UdNll0cxa6x8zg5bqtTpW01/g6cT4o8qvZ7+f3+498TWkmmtWFTrtepSbihZxUCWpX7RUBTG2xwONcxmWmUBucH1iuPkoJKZwV6NBid21djO6RtDc7ZrAXkfMGO5IDFF1qxUhKtDCmBs+Bs2Go7pSgIpd3WhYHKKKJ3CKnwwWOMLmzB4uvo88LmrS/AdoPW2Y6lbXPgnLGUmZEWfMjfZyxefaXVX9cVGVTqCP4cQzYq7YNAlOS0rmuqqmIya7Bu2GLKum5gs+kIKSvYZ1yMzNIUoiZ6jxKGQEASMVqgtCKSJR20MbR1VczKE0pLrI3UzRRns7yHFG0WdbXZeiWEQD9YdFVnBqXIvtptW4GX7M81dh05OY3UYo8Uc/XNGIklt04JCSUk0eRjFXikMFzfn3IWE3uzKVME0XeEKNisbG5zquypKKsGESo611HVisXpKTEKGl8zmVRIJVFSbKGGr7cVKD5NeodC5ETPe490CiFzFXZsmW3Xxp0hxTkbT2vFbD5jtVqwWfcX5oTLgdBrjbHq3TQNSq0fYv2NFatxPtqKxY7veWl/F45hhwU/Yrs+M/7jjBHHlt0fso6W+BTP9xOhvJ5ixA99jp3KzRwpF9oWJ5WKWWwRLZTy3HqklFdzKyC3ALr1OuMkEhglSEmByNk7orR+Po1p2Pzzfx73/t7/m9W//2e0b/mCbQWmuvY8CMkrf/nrmX3OVzL/wq9BHdzglb/02zFXnqW68dZH7q+6+hwH/9mv5s7f/kM5qJSKo6/6bY8OrFYPePCP/wyjyu/hl/1aAK6+/3fz4Nv/PKff89czeP1r/8An9dn2v+iXcP8Df4qz7/1W2re+b/t4/cxnP/KzrP7ddxK7JXf/zh8BQM2OuPHL/jDXvvYPcPydf4E4rCFG5l/4C19XYCVFDkTGSmYubD56krzgIH8JYDo+9prIxE/DkEKyP6k51QoF7LXZ2Fhrw3Ri8N4SbEQbAyYx9A5jshrzer1mvUocHOwRfKSuWpSUuJAD+BBDcbwXRCHQdQMy4H2uREUf2ZvvE1M2LO77AWRiMjkPFPK5y1IosSQ5LvjSrmHbHqnrGiESRmvM1NB1PRvRs1l3TKczfHSlVZNKhSwi1QTrQajc2htVqDMTUSKlYrTzSCltleJd0RIaq9pjsJWFDyuapqXvBhaLBdZ66qbGx0CKsFqtESIDWoOH4M85DNpohMpyE1prdGWomwZdCZTK91RufWaWo1GZ9q4E7M2mDINGKkuMFhMVg3OcnC6QWnJ0ZY+qMqz6TdYoE5GKxGxvikBQGUHTKCyisBJBi6zdVGlNig4tI9evHjCVgsPZhFYHfNSsQ+L+ek0lhzx3SpC6IfjM3FRmyqrvc0DoHC7VzKezDLYfMt08hLhlw73G+Ct8GvQOlRoV6MfKKrkKlXKFh1TwtZAfIyFlxl2KBEOf2aG2t1vhzl3M1DgexeAbISZ5uywFEoLH2gFSJDqfA/OQEwIFiFgW6ZhVvWXKMJYMayno8VQYmOXYx3k6poTcOk8+KTXDn3ojFfC/GGMNMuNPjN2x7Y8gG3Wnh6PiS0M8CZHwU1eP0q96f0n2x+unlG63pdjM4SiT5nnFwRiDquptv9u5opY8YmXGHimQCLBlMOVH/uT/8nc/U179T3i88A3v56u/4F0oec6U8ZdUji9jJcbfz4VCzwGpIUb+zUc+zqob3tCZbDZr0hd9/tu5/Tm/GSXgqx78TWZ1jXc9UmRbi1o3KK0YgmW52DCZTnGuZ7VaUJs5gkRT5cpKXRlC8nR9RwyRumkwWmOdZb1aZ78sF6jrbFDr7EAIPlcE7MD+/h4T026DlWEYciWmUNmNVnhnkWRBTSU1WiuqOjPZYvAcHR4yDNmra73eZCaSjKVqFbfKzzEZjs/WCJWNhGsVkSngo6dp64Kdy4ubkhV13VxgD2eh1PydjgDbqtGYSkOSLJcr1usOH0K2nzE1i8USEChlcC6yXvYEF6krxWSiaVtFM2mQSuNjrlhJKfDeYoxCypjnlwR2k4O+0cg6eM9qsHRdYLCSmHJWHERkOm2YzxsWm54HywGvDMvBZjFYpZFGoLTA+gxeJmRyhR0GCAGSo6k1V46ucHL/jGeu79GqDQ5BHyXHm57lxvIT7/p6Ykq86Ye/kbsnHT7VqFby8ku3CAW7Vjeao/09gvWcnS6RxQMxpcRi0+NDfNV7QmRtt3+0U7H6UeBn7xA6viul9E4hxF8ov//Ny9u92v6nszZduT7l9KyjqiZMZzMikfd87rv4OT/3yxEykFIWpc3Zs0SJyPHpGbfuPuBssSE4x/WjQ85OTxh81j0bq0ljEDVWtHfZfXVdlzZfuEB6aduW559/npOTBQ/un26vt92EbfTW3HUiyJirQIgWrfWFRHCUCUpkF4Vh6BEx8pf+7Ld9Zq36SR4vfMP7mZhMjtGIbdzhi9hwSmQ/zi0tLgfq4+MxPvqeeCIqVkDxc6LcFGwDrPFCU6rgdUp7oqoqjNaElOiKyet2ES3+YbmKKs5xVSXuTKlYcjwBQeVnxqc+JoXWjxAMw5CdytNFxs/u/6+GqXhSRoxwutgQIxgjMSYvtN3GImWgNpm55kPApYiSFafHK0yl2ds7KiBtw6RtSCFruG2GrI48nTWZFeM9i9W6mBI3TOoJIBj6ga7vaZqa6bTBOIWpNVVFzq6JVHXFeu1YrCxVXe0sGA6FIIVUDLFNZtmkxGB76qYlFo2qYRi1sgR1kXtISaCkYbPpUFX2OhQhUJscJGUhxtzuc3bYVidGLa3x/h+lWsZrwEeHtllpOnsQnvuqhZCTrRBSlnTxiqHPVRCMwmgFMuPFkIKu61E+sFwMrM46rl3fZzrTKJ09DmMIKKmoq/wdaSWZTBqc6xls3FbiQ4C+jxAHBucyKUfARBl8kqw7j+0SNlpwmbGkpUQkiD6ghEDJiJQZZ2SdozIVWobsuThYjJU8MzvgFa0IKbE3aVguB5JQvHJ6SpI5YBNR4mLgbLHEiIvYyLHN/kmMT1nbbXcIMs6saerctlWqSOwMGSQuKYvhWFFIW+mQbfu/SIvUdc3gHwYzj59392cb7ITzxHyscIcQGIoY726iBlyYgy63Gh/1c/G53IqX5Javfwwh6IVveD/z930tR1/+GwE4+1d/h+Q6Dr70617tVH5CY7j1Edb/4Z9x9JW/5bHbvPgnfynP/e5ve13723z4+9BHz1Bdfe5TOq7XI9L9ekYmqbz+a3ysbPEqRKcnJrBKcdShKVlCDJkBQ+n3p4QxOrvJK4X3jq5fl4DKYOR4MZNL3uzSaMX235QEYccX7afauPUtv5sULk4YV9//e7YMx5+KQwpBChEpNVqaAkY8x1TFGLdMjxjPM8kQM5gvxszucs5lcPQTwIJy3nNvsaJJESE13nXcPdkgRaKqctCklGK1sQhlWHddXniUQiDRRmXTVu9IMbfG5/P9Lag6hYBIkYP5fOsT2DR1fk4nkhYc7k3yBBIUMiaWqxV78zmz6SxjnxJMGkHf93jn0KbOrTsiFCJKCKMwYZYjqOu2BFIVMQaqohkloiDYIq0iexBZDd67gJMRqUoiJTLjMIaEqhrqqsYFlzFfJHRlcDYvSELlIMsV+QZTGZQq+j0IpFZZKgKBEoqh7xDI/Hop0EpgaoHQApc0ySq6vufB8Rkxwmrj6F3ipLcc7bXMJxWTxtDUNdZZhI/Z41Argh2QmtxSBASa4D1dCKw3ebvpfMZiM2AtnCzvIyvDfD6nqfeQJrdEtTY46zP+LAmUdMynLfP2ABWhqgVCGHSKiN5x8uAOZ1Lj3+TRSnNl2pCOEqdrx5V2SuwdQ4jouiKIiA0Ba20B7WeD2pg+9bkypU9O203siOZWtd52JqSS56xN7x/W2trBWgnYsoLHuX/XPuZRDL1HMQLHx7cm7gXTO5/PEUKj9QLgwj4vV8wfF1Rdfl6Qg2xSJHhH2CH+XBjKsPnw97H/M3/ZY1nhrzZej9B0ffPt1Dff/gnv+3Fj85Hvp33r+z6hwOrycaYYXpdI96vtY/v4hTAhPfq5nTFitKWQDz9ZxhMRWCUgkpl9W1oruScupSjCeBOEEGw2Gwa7LJ84L4gSsrZJKd2Ns368FOWPGAhSOs9sfoqNm7/mT77Rh/BpH3WlM9vMZ8YUMk9QPhWqMwmhNSmRwcgFZ2X0RZNmoXUBHb/xgRUIhpiQIVCFwHLTk5zPwp8q6xu1kylnK8diuUKIxN58wmzaorXEup7a1EDA6Iq9+R5aSZx1KCnQytA0DcfHx0hBCcAMUiQmTcX1o32UUmw2G0RVFbsYw3Q6QSCY1A2H+/us1ytWK8mdO/cy6Jlz0cDcms+A0KapcdayWW8wxlDX1VYtPVeQIklkNfV+6Lh65ZCThSP4SEyWWHAPimIXY0yO7ITIjLbynWXNnoQuLbiRAi8QdJsBIR1102QFbhcQg8Coir53xChJAVxMSK2pWkO7NyEkz3K5AQZihErXxCiYtBqhHaTIcjWwXvaIFJnv1zR1TdPWeYL1DgoGkIL1cSGgTaHpBxicwy0WVM2MRlZcbRsQ2WJDIXA6z10+CYaQGAZP9AlBB0ISfc/QW6z3SBlBROb7c972rj36zvFxrREIaiMxKjFpDQ6wtmE5WGwCqSuSjIQAiHHuTFvl8k9ifMrabmlHNHc2bxNC4JzHlKWrqqosFF1sZ3bb/Bk8suOjsBPsXNavGoOl8fdxu+2+tonauYzCbtDknNtea2Nr+zJpZtzn+Jpd4PzuyIWFhCjtblIguMdU16Ri/nn/OYt//fc5/LJfc+E5f3aH+//4zxC7BWqyx5Wf9zvRe9e5/4E/hdAGe+ej1M+8m/5jP8RTX/cNiHrKS3/2V3H4Fb+R2Xu/gvv/6E8wfc+XI5Taai1G23H8HX8hi00Lwf6X/Eqm7/wSAE6++1vofuwHELrm+i/5g6jp4UPH27/0Qbof+1f0H//3nH3ft3Lta7Pe4fF3fBNxc4YwNVd+zn+FufKmh44z9qsLf1fXP2srQB02Zxx/+5/HL+4BcPgVv4nm2c/m9Hv+Ov70Nv70NmrvGtd+4f/j4XMocqUz2/6UvtYO1PZhkkEmTCn9+PDpyQisUsR7lz9gqShoLbNtQ9PkHv9igS20WlXaB5m+nQHtMY6eTjkYK6JFZf8lG8jzNyHkCfenXlj1f85xuDchRui7gdV6gxS5VSzaihgCzkWsj5AEGolIue0jVAaGhuBIKZY2y0i7fWOHEJIkchttsI5lH7k236PSgsk0K3GnKLIwYWfZn0+YTmcYI+i6FaYyWDvQNBVVZYpLAVthwQwqzyDgGCN1nTFDTVNjlNrincZFI6bs8bdYnDJtJ/QpM3ZDtCiVQHic65BSoVTGkGidW0rD0CNlZiV2XbetFChVqmrDQNO2yCCoq5pqMsV6zdnqDJc8e/MZSmQR4BgT1nqkhMrUhGIAPVYQnHPbyl1KqWhZDaiiv2WHAR88UmtqZXDO42ykHzwiKXxIJCmpmgppFKtuQKmc3EFWnLaDIyXBpGmwNmFdYLCJQPZfdKdrptNE53xmVWqZAfCypm0buiEQUjaEDjFjw4QSJZBd46UgSI1FsO4Hum7DJnliAiU1znlCSEgkWjlMm73QiIYhGnTILl+9C9y+fcrBwZXz9pRISK0YNh2JwN68QWjJsnM4nxBaYBpDN/RoueVef7KX8U+6ttvY8jUFGyWV3GLrlD43WR8xtGMrUMhzdqMsbevHVYvGcfn3sUV3udU3Xuu7xJjHVaku/P2IFejcqDlmmRuRCMWO7XFj/tPfzyt/+evZ/6JfcuHx4+/4Zmbv/Qpmn/MVrH74n3D8nX+R67846wSG5QOe+i/+OEIqHnz7/0T/8gfRe9fQB08xfPw/MHvvVzC88iGOvvp3YG+fM9PPvvdvIesJT/+GP5/3U8Smk+upn34nh1/2azj5P/4yy//ft3Pwxf/Xh461efbdtG/7Itq3vm8ron3nb/03HH3178AcPcPwyo/y4J98E0/9yv/hoeO8/4E/deHvXQb68Xf+Rebv+xqaZ9+DX9zlzrf+v3jmN30zAO7+i9z4uj+2ZaBfHiMpIRVHo1e73M/5cq+upv9EBFYArli9VFVVFgjNMAycnJxsAelS5Hw4SonUEkjFIDbTt7e+VikRQ7pgdZAXifx7SgmtRGEWfWb8pz7mkxrvPK73NAYms4bKVKWFoUmpYtMlTk/Osu9a3SCFIKSc6fbJs9kMoDVN3WRmzxs+UgYnk5lZISmW646nrh8x2J6mzsHhqrN0NrInNJt+QCRV8EKBps7nJWqN0RrXZ8C5MYYYI5vNJlONVZ5Y5vMpCHCDfUjQMITAZr0hhsDZyRlNVWULnSY/V1c1KTuiI2Vm7SqVFxvnHUqJbfvFWrsF8IaUkNogZBbZ9DFRNRXHpws2m462aZEitzYnkwmRgZR8DjwT27ZOPk/5fFV1vdUn21LdXUAUZXiEYDKZ4GNk6C1aVyiTPftAURmJNlnbpu96YorMJzWTtsW6HEAGF1FNRauBkLLeVJTUVUVV5WOzg0cqsFYw2BUIRdVMMFWuRpnKoFNub6nKIJREW8/KRl6+c49Yz5ju76FnBjn0rFdr+m7DZtNl5fskkLInolChQ4mAMZ5aBJKU2CS5txgIaijm44LBRlyAqp7g0wbnBvam2Y1iuerY2AElJYoEMaAEWO9fE4766dB2y9dWvoZcAdWnlLDlevUuEaPbBiwCEDJtJVi2EkViJ4h6tbZczLIfwEPbAlv17pwssK1Qjds/KmC78B6XFvELVbCYkOLcqzG9CnRF1hNm7/1yFv/6HyD0efAwvPKjXPtF/08Apu/5ck6+669sn5u880u3bbH62fcwfPzfE/auM//8n8vyh74dv7yPbGbI6qJ+YP+xH+LqTtVHNbPyi6Z9688Asnh0/7EfevzXuDOi7Rhe/hD3/v65nmHawb7tHuej/t4e1ws/hHvw4vk+7GYrY9S+7YseG1TBeWsPUmEFPjy2l44Y5zjJLonh8ngiAispJVeuHGUqs1Z0Xc/x8THDMFy4WIUYM4Lc5hGiUGuT2DJBSvBJQWsB+aIUaaxundOHtdZMp1Ne+Ib3vzEf/DPjUx7z+Zz5xLBaOfamDdXhPqhs2qtUlt8YhoGhO6WuEkdHsy1QOqaK3g6sV6cczCfcvHmTadvyfR9+WLX30z0EoFNAFNBtpTUhDGz6nhgGtDZsVh3WJ2b7hyQpiSnRdRtm0wYhM/apqQ2QcqWmtBOapmG5XOKcYzJtEMWwPKaQFynnt8bmYxbe9z3z+REnD46RCOyQiMHRdUPx3Mtq6lmlOKK0REjQRiFLNUYVKZSmadBas9l0rLqB+d4c6xLW5uoZ0nN2dpaVzK1FzmpWqyXDYJnNa0IMODuQIkynVdGt06VyoPM9XqoGowyLqMH7gC9U+BACVWVoW0NMoCtJHLLp8qzVGC3RWtPWcwCCHxAiMmkrlNyj2/RE71DFxy+GDiMTIW4tHhEC2rrBe8faCzZDjxkiUvVEoDaGpqmL1VBEapVbvY1k3+3z4r0Vd5ZLrOu2GjuymPIWxc+SJCqCyybCznuEDPgAqyFx+/gUzATvE4JI3yf6PmGTwugplYaQAgezmmmrOF1pus6StMpmtEKi1Wu3AtOnQ+8wpW2wHkJgf3+fzbDBWssHf+TDQCzdiGzflC2WKpQ22dDbe7QUDF2HHXp0SqiUkCnhQ0Cm8xqSDCnbJY3vKxIhgihV5BxI5eRcKYkyAi+GIrMgSDJ3XxACJUJprWYpGEQO+FIMBfd70a8wjWWTpIuYWIWqXj2ynX/h13Drr/zXzD7nK1/XqRQ7gUbzpvey+sEP4PfvcfBlv4bNh7+PzY/+S+ritvG69lf8B8sfr9t9g5SQ9ZSnf/2fe83jfNTfu/u5+av/BEJXj9jHq4hLA7LO8igUtt8oZSiKfU9iB2sVU8aPKsGr3RRPRGAlRPYT6/s+698IQdM2W9ZH8AGK1EIYNUTIooOCrAkSSx1vNwg772fn0rCQGZuiShlPSslv+gVfWrqGke956hcjEHzpnb+DiBGB3N5cKUWOB1j0llfu3GPVW2zM5droLUKoAqQszthC0MjEV/70n8Yv+FlfTC0SL738ca5cPWL/YJ9+vebWSy8zq2umB3Pq+YTgI7deucsHP/Qh9mZTnr35NEYr2smEECOrRce//diPc+f0BBUib376Js9MW/b29oi6xqJ45d4x//4nPsqP377F2gX6GFCVRmtBbRQHe3tcPTjiYG8PoxT37t/j7GxJ9AFJQsXIkAQRkQHOkDM1smyFlJIXv+C3kIDn/s03o+S5F9bWlJNzfMKrRfVC5G2VUsymE5QUdJsNGxu2LV2lJPP5Hs9cnzNra6ralHaOy4wZqTDaIPeqLPSoVNEwGoVBoa4MXTdw/doRs9mMEHOL5uRsRT90XL96hZtP3aSp6gxwfwKaxJVRvPXZG9wxGimgUQrQ9N2AkomuG1it1gx2g+8Vk3pOQqF1riSFYKm1IYbEg/snzKaTLEEgBYPN1eG6qpjPpllHzgcGOxQvPo8g47CkyPu0NiBRVLri8OCQYRg4Oz1h0uSJLvSBvg/5fesKtx44ONhjMpsiRGatCalo2yabHbvEar1E6Ao7OBAaF7Iyftj0DCESAUS2LzFaY+oKX2xjUiLPESaRUqDvPKBwQyBGR9PW20pbCBmjUlUG6SWDs/RDD5IsPJokRitqrUkhZI0jpbBCYnS2uZm0DVKJwvhLTKcVrpcokXXGJnUWCh0GS4hZb8koBc6iRWI2rYFMo6+UQVcViYiQmUTgo2N52rG/v8dsqthsFpwsF8imQZh8bKTMipMy62xlBmWFkDmxrGuVAfolyMZnuYwMm8iLw3LTc+vuCardxw0d/dAx35uiNGiRuHq0x/Hxin65JssVqK1o8xs+RJ5Psl6hyclBcMzMhBdf/DjjVJPFcxNKK7TJ14Eqi73QkuMHDxiGAecjQmXcHkJTRBpKmy4iU9lhOifb52UokULG8CaZGZrSqEKMSFuwfxr/2Wn95YBflTU5FbzOxcBqZKmNgPzcMn98xQVAtXOm7/pSVj/8Hcw+NwdX9TPvYv3B72b23i9n/SPfRf3sZz/ytXrvGqFbkKLHHDxF/ex7WPzA3+Xoq37rQ9s2z38+yx/8AEdfmZ07Qr86r1q9ziGrllSqSbKeoPdvsP7Q9zB915dm6MG9n6C6/pZPaJ/N85/P4t/8w2071N75KNWN17cPIUBIia7L9xdy4JyhSfECCQLydaC1yqzhx4wnIrDy3nP37n2AbetBG0VlDLPpFC1z9jTYgd7mKhYii4jGEBElMxiRZylGEL4szNmWQ5aTJ3d+ILMNCYEozlH+GqBMRoKckYgEVVWzun/KuusYQiAiICZEUsVjTAABhCIpycR4PvvpGxzUhpdOT3HJ00waEHD84Jg6KWamoTaGRGS5WPDxO/f4+N0HvO/aNaL1QF4Ie++5c3LGK4szvIRGGj56/wG3HsBq03NnseJk8GwiGNOg5AQvBrJhq8S5RAiRbjjj9v0lSkn29vbQCTZ9oB8GZm3FwaSlVhWr1TqDkUu2FlNe6NIOHDSRHdQvl9B3I/nL5fGxAlJ2kIOwmAguEMjdLxFSBsRFEEJjZDEZrnK22rQNMdY4G1kte+yOcj1kjTKlZJbZEMWSxFRMJjMo32eMjsm0pZlMISa8s5xu1nSDZRgew8D5NI6YEsbkdppIiWUXaI0gukBSir5PrNa2JAU92syzErkyhASNztICp6dL2mlLEmALttAoRa0Nbd1kG5oYISoIEoUClehdBkJPmxndps92Kt4ym7UIGRnshqoxzPYaVqsOFwIxgalqYgIfJP3gmYWENgJl9NYo3QePD3nBn02zbtRqs8GFiK4li2XHauMIQSCSpVE1+/tzhtjTbQbqpqEyuRIXgmM9WBQGkTQiyXPGWFmEAZwdiCngQga3N3WDVlVOjpLMbTzVQJKkOOBizDpDxjOZVAgEwdmtrtMw9PghUasWPUoyEOg7Rwo5gQveg0goJaiU4/rRFKUMKQq8j/QxUNWgk8A6IEn6jaVuFG1TMZ1U6NogkqcX5ZaIhXEWyaB1IanbFt8PKC2ROlfgEAkpA1rL7T2QgKgkoqk53WwQwXP/ZMXJxnJ0OOdw3hL6ASXBWo+RNSCotARexej40zjyd6pA5HZQVVcZdAwFB5dISaEKVERrjURgtKIyFUoKlMzM8KRrZFVviRahGFoLkRMymYpg5/mbI2NWVJTkJE6rLKmRvMtkK0TukAiZqxs8jLHaBcc/6rldZmDG8kgq/erMPYC9n/GLWP7gB7Z/H33lb+X+P/7TLH7g72zB648b9dPv3LYb62ffw+k//6uPDMT2v/hXcPwd38Qr//NvByE5+JJftfWvfb1j8u4v4/j/8+dY/Jt/yLWv/QNc+QW/l+N/8o2cfe/fghiYvPvLPuHA6ugrfzPH3/HNvPKXvx5ioH7Te7jyn3/963qt1pmhTGGZZrxjXrOsdSR/nlUUpFGx3np8e/aJCKxSLG26lNVs7RCwFtYlcm+qmrauadqG6XyW++rW4r3P1QvriDtu31JKkGnL0NgGUTuAs90+eixYrLHVKpVi7CkKcnk/ojldrThdnDFYi1B627vX6lzgraqqzEoisj9pmLeG2G+w6wXz2YzpbMZmvabrO4729pB7UxICfzZw//6CH799iy469ucT9mYzVBJY57Drjg++dJv/8BMvcXT1Cm++cZPjxRk/fOcOm34gKYluGpKS+bZ3hQRgNLGcr0nJ4mPMQODjB6fZjFoZZvM9bjz9FM9eP2Jvb5+Tk2Pu373Lyb17edJAZHZK2NGGCgGbzi0bXotlednyQQmBKpPLWPU6Z+CckxpCCHTdgHMu20cUzargcwk/lkxQMOITsjGvVCBEwtqBYXAoJTM422ikMIgQcX1ur637LjvH+ydDhkNrRWcHrPcoKfnYyZJWdBzOW0QUSBTeKqyVTJo6+73NGobgqBAMMSDw1JOadtLgQ090IQsipnO2kvNZhd36wGrTYYwEqYjeMJ00hOipW0ldV9guBxZ932fRzapiGDq0NijlqGct1mWw93Q6RUqZg7JiaZOKwvTImhJKk5KiHwbW3YAwFWdnHZWoqVWLbGtS8mAk7bRhr5ps7+XNesMw9DifIwYhIwKLIFGZugh45uttGAYigsF5fAShK/rBs1x15bgkfWfpOksMUKnCDlUJqWQJ6IGk8b4v12o2Ql71m9x+JNLUFSkpJtM2tytdoG5qIBMAnO9oW4F3gUQO/p3z9IPNfov1hKqqefBgSaMU73nzM0wnNf1mwZ1Fz+lig4+SPkqsz7OT8KFg8AJBRLwf0CEbNPfWMsR8jyZyFV/pClM1pL5nMt8jni5YDp71nQdshhnXruwRk8uMp5SQY636CYEdjnNuTKVSHiL4yGw2YzptCcGX6lC+1oIPVEZByt9ZW9XMZzOklNw722yTqK0GWlmDRvZf1llTBB8x2qBl9tfMVj+JFAKTpqWzPTHkzoiSbHtHwQdC8SG9EDTtMBMvz52j+8MuWF6qR1f+d7Wj1PSQ537P/7b9W+9f34LAd8fVn/+7Hn7s/b9n+3vz7Lt58+/7h+d/P/e5NM99LpCrTVd//u9+1eOYvutLt8D0R43m2c/m6d/4TRceu/HL/7vXPM7Lf88+5yuhtD7VZJ9rX/P7HtrH69HzCiHHH5ndJktAO8pxjN/L+fUgZCZH9e7xycYTEVgJkU1cRQEmptLoTELkDCB4NmvHpluTCmOpaRratmU+n28DrVF5PevyeELKsvTbDGH82S27lt56FpjMxyOVwpOQJRMKCE47y/FigYsxl5ZNldk5PoDiYlVGCIiBWdOgUqJbrSAEjp66RmUMJ12fW1ZGE1uDGGCzWPLCg2M++OILfPZzb6KZTVBtg5KacLbidNnzoVu3OR483f1TUjScrJd0waDrGqUEdVWVTCvnWdP5DBs8q/UaISTWuozTkHJreGujY763zzvf9W4+9z3vplaRWmne8c538PKtl/mX//yf45cRRTmX5+Qa1M7NfsH64aHv9yJ+YDuZwFaVeBcsXVUZEwHZF05rjfURHyKDXe+8V87mlNJIIYgpZs2klPFVh4f7aCPZbNb0fU/f9wzDQF3XSKHYbHqcD6z7AtqtDM1kulVxfyOHAp7aqzjTCh8C/eaUqAXNdMqDO/eRIdFWDSE49g4r+t5D2lDXCjF4FA6ZCqaQSF1R9HYkigxQTynLjqy6nm5jEVLT2x6ERAqTPQJlom00SsKDeytIMJlM2N/fz8rrIrMuJ+0Eax0QmLQTRMpVQmMqnAt06w1J5XtxPq/oraVpp5ydLXEBbIgM3nHr7inzek5TtfiQq5RVo4nJQTTnC18KpJhy5QsQKSBEREmoqjmmqjDGnOM0pUJXNRoJQrLe9CQtMVXDZtPRuYBHILVESYMxCqUiUiSs7RCmQgB1AfMqldl5y7N1YaTlVtLBlSt4H+hXPbrK+B6lFEY2DDbj0bJYqGMzOKwNKGFw0ROcZeMsq85S1y3TKmJSx6xONEeKp/ZmOAwv3TnmOK6Jocn1YzsgUyD6SPI2q1AkTyUTSkS867ZWT972iBSI3pFSVSr6CZ/gwWKNj46D+TxXop1HFMzJa+RMn5aRxp8C7h5lCbRUVJXZdjtM6QBImfXtiAGpcgKipcTZgaHv0ErS2aF8trStRlgXSCmWuVIw2I4Y8vU20QZjNG1bE1NkNpvgBpsZxZRuSWG2x5B9KGOg2PDsYqi48PtuwLULrM/YOllM1j8z/mOMGBJSZJP63Zb3RUmOnKQpJYkiUdcV/bp/7D6fmMDK6FxSFWNgQiLHj5ARPxAiRHGudgtklldTM51MaKcTAJx1WNtjnd3ig8bSasafhO1FLIW4WNUqVY8oxBbD5SPcW2xYdB1IgSrlZZFGU2exBc/miV8Rg2BwkcFFNr1FmYZ2mv2/3GCpqppmb8Z0MsWv19xerPnej3yY+4sl1/evUk/20Ht79D5xfNbx8dWG1La86cpbSIPH9pbZwSFvPrxCbSp8CSoHO7BZr7ly/SrXn7rBK7dvs+k6qspkjIbKE4P3Ea0raqOZNhXPP/cMzz3zFA9uv0xtDIdHR4hGM/2hPVZ9jyG37GIogpxknZUkznWLgAtBbP5uxYX/L1S1doKs7Dp/PvnkDDIDrYWQxMI4E4iCM5FbDFUSEIpsQEwJQsQ5WC6XaJO/n7ZtUUqzXm1wRf2694FV12UQdzulqiu0Ulusxhs5Qoj0fcGaCcn+bMpktoezjuvXbtBogd9siEnjXY8QhsUq4k99limpIrWucV3H4Wc9g/cbRBFTFFLlc4Vgtd5wcrogpQz0RQiMylgqqSTe5XavHfoiZZCrUeP9I7ISaM7qY1ZrV1LjhgDJ5+9KRJq2YmMtzgdW603+kFIxDBFZVQQc696x7LMq+2yqUSoxFZFKGMLQ48laQ86GjJcRBVepcvZvTNbLgnydjdYi8/mcIYCNbN8/JgFC0dmEDYIkNTZ0CAKTScJUubLpwwBkg1Y72EKG6NHacP36VWbzhtVyjXWBylR4H+jshnrSIKWi6wasdeha0zQVkDCVQmnwKWJdZLCR9XIDMlI1NZNJw2RikMKSYiClwKzStEbgEszffI3OHtL3iq4/w2jHjaeu4XzP4ayhlREvEgdJIiNU1YQXSyvpcFoRAiwWiVaDVhEjBD7m9ocLgeOzBQiJkhKJB/FJ61j9pI6tvEGMaJVJCbmgFrcq/jEGvHdokxPwFDxuGDBasXZDrq6TK03RBQgOQcKuuxx4ek/wqbQFR8JUrkQF79mkiFaSuqmy1YxI3L93j8Mrh9jgM8aRomafQBdtrMvMwN3HHiXHMOK6xr/162gFPmnj7Hu/lfWPfs+Fx6bv/FL2v/hXvEFH9OgRY0SZETOVf3IXZadly05Rhkjd1K8KO3wiAisSiJSzwy3YnLgVCBUFQjVKKexaFKSYKdFd1xUWiKFtW9rJhLnO1SznfNbiKbINQsqtoKgsZs6jTcbWAgGxFQ1bdZ7N4AmJLW1ZiGLDc6lvLlUuGyMNi03P7fvHzK5VyHmTxex89mvTdU09nWCQrLqeH33lFT50+xZzpbh6cETQFXc3A7fPlrxy9x4vnC0QtWK5WrI+PmO/brl24wrNrKZtWrQxrNcdYr2hd5YrV6+yf7CPi4F79+8xWMV0MtnS3EdD19YI5rOWu6+8xP3DOUYk+m7DarVCGkUzaTlxnitXjnC+xw5DWVCh0oooiiFuGuHto6cjD5euOL84GQMq7zIeQYgd0VZP0zY426NLy3Vc3LJPXGZFpcICEmG0myjXT4w4FwnRQ0GGZVCvzt9rzIyddT8QBExmMyZNQ6U0KfgL1bc3avQu8sEXz0hvywKUd087DlNNv17y9ueeZlpBezghkbh9/wGbrkMITdcHNt2am0/ts1gNXD+8wnrjkCTOFkvaumbdDxilsc7TuQFdVWjVsF6v0RqaRiJlxLmB4PJCs1kPkASz0op3zhVNIY33AyCYTlp8GLKKufNEH0jRI2WinU2wURefQctsNmO1WqO1oRss1gfWmwGta0KUrPuew70Je9OWSaVpjMCHvGhSFjkpwJiMadFalXKGRBcW2FbWIQaWy44gNb31uFA8JUO+PoL3OO/RxtA2NXsztjicnIgVk/iY2Y4hRGK0bLo1+/sT+n5NTInZfMJ6vWGi2qIwr7O34HJNjIFhsFg3EILLvoXkO8b2A/PJFNNIetfjXMd6sWH2/2fvz2Ity9L8Puy3xj2c6Q5xY8g5K2vqqu6uLnaR3SSbFEWLkCiAEgzINAzIsAwDepEfDPjBguFHG+CTAdqADND2gyUblg2QAGVZoCyJk0ibFNnN7ia7q7oqqyozIyMjbkTc4Ux7WKMf1j7nRmZ3dRdBdVcS4AYClXXjTnHO3mt96/v+/99/MUMogVSG6A2VMtSmvLZKCeYLRZwvCTFydrLi+iZhpKSpQFpNFhq3GxFCF72kEJwtK+p2ThYKWyn2+4aX24ExCow2TA0slBKonFBZ/K4byB/kdXi2xTTRCN6hq4LoONwTzo2E4MmpOEBlseGUEOqcS3D11Mn1Kd+thdNBWlPWdZ8KsuQwyMiTNvPAEIshErwnBM/j/WMeP35MFgJjDU0zo64b6qalblraWUNV2btuyLRW8koh9du7+gklxNSxonSH/zm7Vn/sv/+5K6J+1+tgajxIgCcpIwhIcEjuEFJgq38OCiuBIIWi1Tm0TNMkNj9s4ghxtMamlFBSHWfPQt2xe4LzbJ1ntyu6n2rKhWrb2RF66Jw7arRIJbaB6ftlyn+rkIlCM2bB9XZHDiCzpm3skV+SSESKDsGHstHoibkzjiMbk7l2PaPvqDMo5+mcYx88q9mMSlZY53nebfn25SeIIXFytmKjMn/vBz/gZogMURC8pxeZZy+e0bnCZlKrU9p6xugiQkUaWRhMyQfiYo5pDDFHqsrw5S9/kevrK6w1ZKlLEboLvPXaA84fPOT8/F5pfasihr16/hKfEovFgtN2ya/drqmairN5TW1NGQHmzLyuiUkQUyKmXBRpGVIKJOKky5q6enlCDU4VchmZFld/SmF6bwqwExGRqgScunHEqBZimorsfCTtkzNioi2LyfqcM+RpDp5F0c+klMi+hNmiDMM4EnzAKkmry8hI5kxTV5DNb4/H+AlcOSdc9OjpNVuenrDuHFJINl1HcJlBq6JTUvXU1ZVoWUrOoRtpa4syEh8iWhkaqxFK4TpXilShsLZBKMX2dg8pUxlb7OK+BC3P5nPc6HEuYG1N08wYx4GUA1Zp/FiKtqapy6bmATIxjZjaEsmELNBJEEYHAeqqxXWB6CM5wW7v6V1ibiz3WonC4rynMoFFXaONJuZEjom+G8g5HzVA4jDySeUwpgyQIn50JIqLOOWyGMaQGYOgd5EUA5W1bPqBVmVmRlBNCQ9axMJFC4ngi4ap0g0QqLQiZYH3CT9k1tExDhGpBKMfmJ8s2e/2BWBaKZAVqoLgirMu5UwJNy5ROk1bE3xGS83Y76mVoNKWcfCkAUwNSoZyiKFDSMnJsmHfS3qXCaFBkjDWUxlNrU7QqmdwA5uu4/p2T06R0UeaWrFoHSYqdouGxQyymzO3mqtNwgXNkCJSJSqTyX7EZHGHHfgJX1II6spitcDHkRDKmDzGyMvLK4zOpDRCTsXEIDRSxWO3rWj78iSJKDDgSmuqqiqamaHo56KIWA2BErp8kCpIqchTV2wYeqRQWK2OzumUEzJK4ujYe8/t7Q1CCGpbU1VFH3x+flaCy9VdRyqldAxiPlLac4kSOuxrv5u7+l9c/2xX2zYTrikhI4gsp/0MAhReHGVyIIQkK9BN9btqij8XhVUWgCoWYUFpTx1EYp/S7rwyVjoIU8XU6Si2yPypU4GgkJaHfgAycgpdbduW1XI18VD8tCGEI8bepcJvkUoTXGTfDfTjdFoW+jNiw6JTCSEcRbkpZ5CZHCEkiQuZagj43hG9QzhPLaAWERdGNv2eXd9x8eARZ+fnfPfJc9Z9R+88b731Nj/15XcZ37jg61/5Ch9+csmzqzXPXzzn4xQwleVkdQLzOcsp9+3i/IKmapEoSAKRJBdnF9xuC2zVOcdqteTRo4e8+8Uvc3Z+Drmc3p58/ITNfkcUmTEG5qcr2uWCannC8/UttdEIWVyWs1lLiQmKpdCc3oOQy0gkHzPGDkHY+U6DJib3XlEFQ84YbYpVmqK9ksB+HIkhHvVcB6F8COFTuoTyludPYR8OH1dKFU3b9D7lXETJShS2kqCMAF68eMHJalUozT/hK02taJ2LqM3YmuGq49HFObbSxNixHXv6kHh2tWY1m3PSzkkIbFVz1e3JWtM4R/Yd91ZzmnpWeD4poqfXcQgj3W7EDZ66sggxncTHQNu2jJM2DTh2pEJ01LXBe4dzgbpqyJliEJCKYegQMqKUICZBRnF720HO9M5RNTN2ux4pNddd5LaPLJcrrEzIMAIJazRCBLSaMutyIKZACA4ojruiqSnHSiHFsRvbjWXUeLhPhCxRPsVkWxUekdTs+wErBZrEsq2x2uDdyD52WGs4PV2gtQHAj4Fuv8UYia1avI8YLRjHfdE31hW2XvD+9x5zs+mwRnG2WnJxfspZuyS2PdrckbljFNysB4bOM/aOIXsQMKsaYi6k9nFMDM7RzCzWgDQKrQqywlpFvF7T73v6ELGd5XLTI9SCs6yIQqF0zfmDFrBcVhYhcgG5ek23u0bEJVLMmM8Vug3cbjdIrxjGRMwJRFnbZPpnYK//t3lNv0RKpeBFOJSx0zpeXOApRgS5SJDFtBlKjmBa59wRMiooXXPnHHoqsLz3ZO/JuRRizrmj9rP8fEkWUySROBikzCSPSJMTvax3VRJAwoiAjD39esTPKqw1xdGOOK6Rv31MmMoB8hVz0L+4fn8uW1XkHMlRInSGVNz+UmaE8+SUigyJqXOpzdFt/KOuz0VhddBBAUWwLn57UXUY20G54T9VcOVc2FRST/lhkCddgBCKJNLx5/jRsR4dTLqoqq6omop2PkPn4qCr2prkEz5mhjGWmIoYjuGfh0ICKHqq6Xc7xBGEGNBaEvvA42dX3LM1tdGMncMHh/AJYmZ0jqth5Nl2x9nFfRamYRgc3/34EucGfvqrX+aNizNUGHl4doLUc3Z7x/OXa7z33NyOrFZLtmp7dDbWdc2XvvhlLi4uePzRY15cvmC73lI1lvOzM37w0ccTpHHFbDZj1tQYKenHESE1y7NzPvz4MYMb6YeB/X6PMpqPnnxC8AFCYHivaBbGmGkqi7Fmct4kYvT4KAkTYPBQ/KT8apv77r1M5KMOwWhNjEWErpREItBTQXTI73tVH/fZvK5DKOphcTq8H+ozES2HovxuBHwnVLy+vv5cdKxEgagcF1bXD4jgaZQiBV8CWrVm70b6LFnJop3JCDrvudyN7Mcy4npw0qCqml3vub25xWqN0QJhNJtNR5gKTaEghBElMlrbY/zMMAxcXFwgCMToJnOBL267KIs4ve+PWWnkSFVrQnDEpCb2VKSZzehu19hxwE+Ykuc7x9XVpoildUYlj7WKqq6ZzxZUVpJTibuyVsOsxbtETIJxKEVYcW5JrNUlqkQWWngpDoorMPjyuySZCAn6YSB6x7I2tIsaN/SE3JORNIuKk5MVSgn6oaOuKyqj8V6gdBFKW5ux2hDCSJpwHuv1ttDbRY33ATdGXjy95Gy1QFUCPW+pjGXwPQRYzhakODCahAuZupmxHzrGMSB1hbEVGckwJEKKNNqQXCBvtgg0KjvmjcFQsesHLm82hGAIJwopRpLM7B18/PRDhjcdlVH0AwhjOD87xTYV1x++YLvboauWk8UpeowoOZC8J0WF8BxBiT/xa3oshZTkHI7FEJQx8cEinybcQZ66SNGFO03tlAiQczpqNauqIoRwB5pNqcQNTeuFneJznPeTA1UiJmCrELJELcmMVKC1xGqJIKKlKp0zMiE4Bp9KZFYuLKuioPj0unj471c7VkU7llmdn/0LmPV/y5eyNaYyBF+03JlCydBC0FqF9EUykcLk6JRiYpb981BYiZI0n2IqvKSUP2UJfxWT8NluRM556nrcfa/DxycpOvqQQi0nFdBhlcgwdj3DZJmOq9KpOlmuEEJyux1wL/f4ULRV2pSCLoRwPPVordh3haN0DNWkwONklHzn8VOubl7wr3/zG9y/eIBUmjEIbobIh99/zK++/wOudj1eaJz3XG82rG/X/MI3v8E3f/qrnC9bZk3N6Dw3feTJs2eEVNrTVhfRd9d1DPuOcRj4o7/wi7z+2kNOTla8fPEMQcSNe0LomS1KWK1z5Wsgc3P1gpcvntO7QO8CVzdrvvvt7zBras7OTrm8vOT65RWQmbcNWZfRX3KJD5+9YN7UzNuG5axGC4GxCjEmtIacSuRQjKWLdyiuDu9PzgXOdzQRKI0SkijSUU9V1XWZeQsxFcqfLqRevRd+p0L88OdwHymljl0t8crvYYw5WvPj56CwMkpy/2TOXimM0ZjkuTeTtHKA4Mgik6Wm2w8IYF7XVFIQlOLFes1659nGjhyhbReElwMqB8iCmTJUpuF6v0c3C1w/4txQdEwpobSgH8o9Xdc19+7dgwkZIIWk6wp2wbspX88PbLdbrC2bXGUt3o8obYvXKibmiwW7IaBsRZKCkAVD5wghcrqcs2gMrRFILCn54sYbR3IoIzpEAYMqpambMrZRerqnEhijjuOUg9u0UhJb1/TjgLGe9d5zc7vHoYlIdN0itWSzG5ibUpDbqibnxH7fE5PDuQ6pViRZnMjBw37vceOA0YXNFXMkpkhKgcWiZo7h0YML/NijSEQ/sr6JPL3cEGJguViyWCwwNlMZsJUkS9gNe2IKSKPo+57eedp5S/CRzg0oo1gsakSK1JVFy8h2H/EhMW9rLk7nhREmBSIFyJIxZW4Ghwqlm/y9j15g7B6XDffmmvmyBW3ZbBzjkFgt5ygl6EbJfuynLv7vlGr3k7lijGilCkYmpdKRnKzxOckJlfCqbonjcy9eOUiFEEjTWn7QCx4OEmnaOw6a3WO+prUYys9Cl263EgVSLFUmi4CgAGStkszawig0WvPi5TU+u6PZ6SCHCOEuU/NwHUw5hxc9Z2iamr/4H/yv+M3f/Ce8/xvf4x/f+7cAePAb/yfO753wb//b/wN+6Y9+C2thzIm8GzkTlkTGPjxBJHBdT4yBuml48fQTVqslTdMSg6LziXpxhjK2mJLcwHb3ks3mJW3TsFieMLhMNT9jP3i+993f4C/8r/83PHvylOSLwWuadmOUYFZpHj1Yce+k5WRh+amvvMf9i4bGapLzEIpUxIeAVBKpVNHbTvtBJuOdZwyhGMYqhTAGVbUkYVhve3a3jhe3I9/78Jq/8Xd+mdvdAKJMpuarM9Qv/XvEnOn/7v+et+6fcN4YBIXvdxCnxxgZvafvOvrNln4cIEekgEpKrDIkYwgJfIr4lKjb5vfU4X4+CiuYZprTAyyY5vqfHvm9WlTduZI4Fk4HDhIAUhy7HAf2yfTFx1FU0W4pco6lz5cyich2e4sHQjLkXLg+MaWjiPDVros1FmfCUVx4sGQqEkYZBi15ngLff/6M1y/u0c6WvNju+bVnL3n/xQuuRk/dtKzmLd3mlvXtFavlgm98/as8unfK6XKGthVRGj56/32+9/gDYpb4EArBWWuMNlycn/Pg/n2WqyUpB7r9LV/84lukNHB5+ZgnHz9jN3S89tbbfPL0Kbe3t3z88ROuLp/hM+x7z3e+/wE3my0zBeL0hO31Dftuj0yZSmdmKpC14lqWLLQ+ZIZtz+2up60Up4uWk8UMAK0kyirA4CZx8mHBglfsxfDp91JPrXVZ5tmVtYVVM722r/JfPhuE+er7cvi7Q2J9jvFY2L0qFD3cN4eTaYbf86H5g7iMUjw8mfOxKYXV+bxiedGwaAwxKnySdFFi2DLXGpMjIiVQAWFBoanblo+fXdOPmVlTczLTLGY1UWVerJ/R1KYcCmKmEpLUaiojcGOHUmriVN3FSuWYcK6IoWOAcchYA92wx5p6CsONiGwRQkEWdN0OW9fE7HhxveX8YsHoh4Iw2e6Z2YrWVKwahRaFDN7UFqM1yXv6wTPiisjdBWwVkTIym1cYa+h241FMP45l7COzIouMj4HR+2I4EYq6qlgsDaGP3O42VNYy7HbcWzS4mBEZXO+ZC03OBa45n69QUgOZEBIhSNyYcT4RfXE9ZikY+p6q0ty7t6AxitWJYbcd6YcCnhRW0FSFszSmkbgLNHWx51urEFrjRcAqS1XVaOvpesfg+iJ0j4rBBewokDmipcYYxWJmMSnTx5HXzmfcdg7VJnRUZDRWaVTTlg6+0mSzZDsGtn3Hk0/WtG0DQjJf1Axjx9DdYGwFomj8LPo4tv88XIdkjpQSYnputZYYraeOVUIZA3HqWOeAEJ8OHz8YlQ5rxKt/cs7kWPSmB33TYR2RUqJlRkozYYBKcWCNoqoV88WivHdKkyc3bzHJBFS9ZDVTKHsQPWfIk270d1pv8t26eND0jpM2tOsHDnWXUpJ79845Oz9hf3vLdRh57PYMu5GvzO/x5sU9MhJcR+x3xWhkdImVMhU+CbY+cbt3+P01VVWzrA2r1jJbrEgiMvY9/eAYAmyur/FRMK6v+NN/9Oe4enYfUsRWlucvr9nu9jx6cMGjh/c4P51z/3yBkQmypxGRFHYgwDaGGAJRRIRMIAu9HphkJREqoGnopu64SAGZMlJYFiKDTdzmAZ0HjIpUpmgvjZbkYQ0pIHPG9Btun+65986bzGYNCEFMmRATMWmk1WijmdU1Xd+z264J44iKER1zYaYJyaAkmUw1dTB/t13ic1NYFd1S6UpIqUoS1vQ0f0pP9RnI2sHdVgqqdLTKZ1WE7QfAWiYhEciJWXI8gokSX8AkXBdSUs0WrNd7fPRII5jPW/qxw/sAOWONQVAYJkIokg/kEIgplswvqTHa4iXF3RgV21DhZMW26/nBZs3VGOiyYDafTzP3yGa3JoSBn/9Df4K3336H1sDqdEGzXPLk5TV//x/8GiAZ+x5BJmZY1XMevf4I0xqubi/p/8k18/mM1WzGe2+9Qy0FSmSUUXzy7DnLk3PauuHyds3jy2t2Y4d3Lb/4i3+GD//ud/F0yFqxW68xIqG8o1aJma1Y2hqE5OlkMFhVAp9K9T+ExCfXG17uemqpqbRhMW9YzlqaVqOGoXS6yISUShRdLkLB8iZP8mutMCiUOOgKMlKrqX1+KJYEOX96sf+dcA6vuke1Vjhf9BWHt/8A9Dt87oFK/jmoq4gp04ctYuqyVrbh3mlFoyJSRFzM9Fmi9RJSwkrJbh8YHKhc0TSO2aJh2xue7zrU4LncSKTc4N3IYtYSo2cYIksr+MZ7D0rmXExkUXhofT+WjE1yobpnUKphHAPBe0xV3He2qhjGwG4/IJUBkZjVkt0wIuqGYC0fPHlGvXjI+5c3SGWQ2XLVJ2Y+sDpvMErix55FM6OZFyF8N5YFtq41WThsoxECnIvEraOuKmZ1XSjo3hNUZkieGMpY0lQNWWpCSmQiurJoEdA5kkOmS6VjOpOZRnscEkGFSxkjMlqVtUhkUw57aiCFnjGOhCiQUhODJ8WEloacHBKHT5Zuoxh7T7crmrBqirxZLRcl8ic4QorFpThGnr+4RmpoqoJyqZE4BNJYmrah3ztcP6JWcyQewXSqriRhGFk1hiRAykDGIkVNxGNlpkbRTV0QIz2pTkidePGy47xqePHykraxnJ4tmM81265HZFHGgJP7+XPwSAAcu9uvPu8H7aQxxUGsVIkBy0kwjjtSjseD+eHgbYyBSd90+Pihq30o3g4/7/AzYvDlgFm3VJWlshWVsdRVhTUZoR3eJ0Y34AIMY8CHTDZz1GyFsQYmMn7RreXfuaji1aIqIsRdPqLzHqPttH0Vjp+UksvLZ8xS5G/8rb/JX3/2Q5arc/70F36aP/+v/BmUj6TNNd3tDVJb6tpS24pu3/PBs+f87e+8z69/+/usdw6rFf/df+3P8C//wjdZtRqpDCF1ZCGZLeYoO+fqdsPKKv7YN79Gcm+S/J5MYnBlVGqNQopMXWskEe9dicuhQZgWY22ZnIwjJhXNYZiseEJKqrrknPZ9j/SJuW1oraUfO7JzIBIqS9TM0i1qNJ5KJWa1KuP/6DACcp4K3+gRLrFfb7l3dkoWpTMvQ8KV/Cm0sWRTYaqKprbsr6/RvaPW4F1goHw/RzFVHbqOP+r63BRWr45uhBDHwurVkQ/chcK+Kh6HA47hbiQYcp5uysksmw/Mht/+cxEJraspKkHix8i+H4gpYyrLYrng6ua62K2VRmtNW1vOT0+mXDBDyol93xFzaS8mKcvcXQiMMIwRhpS53u8ZciII2HcdC62wumLf7dnt9pyfn/Mn/6Vf4t2332LYrtmPPaEb+OjxU77//vdRU9ZhAOqm4ezslNvbW5781iXOhaLfUgFrBa/d/w7zdoZLoKsW7UeePHnC22+/zcXFfSDT32z4uZ/9JivTcGJq9g6CC4haMGtrciwasrZuaCYivZreg0Vb4VJx5vgY8cEzOIcjokSJiGkqy+lqyayp8WNfOFRao5jq5lw0cVKWdrCAAnEERA4lzFcUiNunF6FPH6GL3gFSuivCP218mPhbh5GglEcrffn7u8//PGwiUkrms4at0cSQiH7A2gaSn4S4ntttx8nZObXVBOcL2HbruFrvUEKTvceITFvb0ikJheLvved2t5+eM8lMt1TGsNusaWqNcyOORFsVqCUpY03JIOz2A91+mCKDymnTB08/9PTDgLZgvSwJ9LYhS8NHjy+RZsHLl1s+evqSexf36LZrGl3RLiQXj5Ys6kh0Ahdg9D3r2w0xZtpZi6klTV1yD8fRobQtbfwkIPtjp9v5wOgcRitMVeNDZPBltJsyVCnixoA1DUYreh9pa8OssjS1Yux6cvA0rUWrRGVKwLIfA/04kIj44Cjdq1jwHdPiXQTMJXImpEw3OEYXGF2AnNBTFz74QFNXRSMnBF0/8uLlLU3dsDpZIIkUPLyiFZY+OLa7HQpLPzjWuz3LmcX5UMa2JmKNLsLaFFi0mm6MeBeJeGJI5HDAkGRSKG7cClNcs0YRpeZq07EbPfculjTtjH4fUeLATvp8tKsyEKdOkMygpSyvrVEYq6mbipwK+TxTqkKpFEwjwrK3HEjnEVUEU9R1hXcB70paYHGTxtIZJFFbxXJWNtzZbFa6Xqp0o0IMJDESleXldccwRrLQKGWJGIQVYBS2aoq5KRc3c+GTJWSenM65SCfkQR+aRcGVxJEQR/Ymc20V2Q/F+UpZN4Uu+ar9eks8PeXp9paf/1f/LIuz1/iv/+O/yp/66a9TyTW+e8nQ7ajbBWGokKbl448+4q/+v/4aH8Ul8Ytf4qs/8zX+xv/9/8p/8p/+Vb70xkPEg1NCtyf0G4IKNJVCy5rVrMIvKnos9fIUPypyHEtWqI+kOMGHXSxRVpHyeiSJFApV1YzOIesGW9mSz0fpDk5jJGrbEJUlO4+pa+q6pnal2KqqIo2J+54375/x8uGWX7XlIKikJWVBFvG4nhdorGW7H5CyNEScj7gQcaGgebwPRCuIAjSa0/NT1KZD7QecD5Aioyg4pdOzs2OM0o+6PleF1aviY/GZvzsUXK+OgA4iv8PGDByJ3sWt9+nuVmbKF5+KLYFAG0PbzoseJEOMgd1uh3OuZK41lhA81loEsoSwKnmED9ZVTYiZ1o2M3hUMgChOx5xKEGhWgvXQ8cPLS267jp1zrHd94VkZRYye2/Utxhh+6Zd+iXfefouLB/f4wf6GrXM8fv4xTz5+zte++lWeP3/BjYDbdcEFbPe3PHt+xfVth8oNX3336yTV8fjZ+3z/8VNSStimaKu892it2e12hFBS4Gd1zW99+1f59m/8BkN3hRAeH0uMj5KKti7kcykEOXgQ+XDvo7TARoGWCq0ERhcAo0sCFxMpRPbe8XK7pa4NjS1hqO100rNGIy2oCaYnhMDoUlaVDmMRfgqRiaoUvZ89qU53COJw4hGTtu6Vk2a5LzhCXA/3WdE36E+PFT8nMw9BprEFtpll5OxsRj/usUqilCWkxPp6z3Y98M7br9M0Cq0rhNZc3W6QqUICbz+4T6UF49BzkwS32x3GSjIlqkOFyNuv38eqhEoRkBijSc4hpmdKSTWFGTus1owSzCTSDSkTp0VeT8LuMSail0jgk8tLsrQEJ3n/o5cEVXO5TYz7wP1lTd1YRjfiuo7FvCVOIyhlDM28RiuJNqKM9HzR3sUJqzGMI6QBbQx2VhP8VFCEhEqe3X6PnhZVOQVJj86BtZN4WWCnEWfXDQzjQKUkOSSsralNhRs9XdcX04kUSGFpGkPfrcs9qsv9H1NAV4bk06TdcKQspsBvTQielBN1XfRiSknGrkOKzBuvXxBj4TKN3uNHX5hfSeJFRlmND5CEYtcN1FP3K3mPChFtyinaSElyHYZyAExCE5NEF0MuAMkXPqCKAmImhgxoIpr9EPGXVywWAwpLzHnCyXw+rpwL1gZRTC0CCqxZiok3VdZbUsKNDpBoZTFqGuUpgAPCIKNSxFhDbQzZe+IhhjlHZrOaxmpW8xnztqIx5eeMEXwIuGGgHwaGceT07IzVxQn981vGIKiqumQQiuIOF9piq7sosSwyB/SfyAUVU4qtifQ9dRdjDAz7LcSBsVF0uwZyxPmSEUqGmCInqyXvvv0OUiZiHPnht7/N+fma89rg1895GUeqSkDIZG8Iww6cZ3f7HLe+plYV+12HHh2t8yjvEMPI5uoK3AbXb9jHjuBGVhdw9XxN1+1ZLBdoEREikHwu+bTOMQ5hSkEp+lpjKtq2xbmEd54QInVdIbUmpIg2BsQdYFtJBXVxQvtxQEtBVTfoukLbmvl8DgLamw3bIfKtb/4MH1/e8Lf+/q+z7SNyGtvmaa/QRoKM+Jjo+zWvv34PHyLd4BkGj1NFF+cChCzLbCsrVFshYgI3FJ6fkBRa4p2e+kddn5vC6lAYHTtUOX9KuP4qnfbV7sJnhZVHZwV3n3f43KPGZhIrt7MZ1mjc2NNtt8T2Lm+wjBw5nvSqqmLCYjKfNVit2Xcd2lRlXt0VMGnu+zLymnIOmfQP192WX/ve+8xPTwhCsh96qqpESjx//oJM4ktfeo8nT57wl//yX+Fbf+jn+OjjD/kH//CXeXjxOi8ur3E5IkVmuVgQg8caTTeO9FPAbK0lv/hHfp73vvwG/8H/8X/H7X6HqYoovq4sjx49wlrLYrGgqipevHjBret5/OyD0nmzCh8TddaEUCB40gg05ZSLUXcaOEQRDDO1VUWhDFslMXmCOcbCUAohsu/LCZ6UuWGHlgpjFLZRLNoZja1obI1QZSyaUp66SnkCAio+a/o+ZHqlVErm8l6XX/C3keCPJ9XJpXiwUPPpDEMhPh8aqxAj230hlQcf6MYOkTNRV2gtqdolX3yv5fmzZ7h+x6Jpy9iaivOTE1AN280tkohCsJo31MZiFXSDw8UMUnFiFQ/PF+i8YVbradEv7+mB9RaVxjuHtZoYAkoKmsqidKY2lhAT/TBydnaCj6VbE8aIzqFsfhK8c5ycnfJ83/Ni06NCJMREd9uxDZlZbbnuR5IUpORZLud4VzqhIk0jYG0hFvBrpvyeSEXIApEEWViyLM6/LDLNfI7VuqQwOEdIkvl8wW3nSDlglKWaAKG2tbQKWl2hFSAU683I6IqT19hMrWqUqnBDpK5L+LKQCSkSPjmGTU+KGT2dwgUwW7RIqXDek7Mg5VDAqzHQ1DVKaWL0KAE5ZnzOZK2JXjCESFIFn9H7SFaGIUQ23cC8MhhTgSxMreB3JXBZZlQSGCnJKlG3M956KPieURgtOT+dEX3CDpmZ0YjgS5ck5YnID5tNj5IBnxKVEBN+5HNw4JjGeXraOBEF4joMCSkjJ8s5RsM4jKVwQk9YFSAnUiiGeaUkKgtkErj9njQOaK1YnDTUdcVs1hTmX2Z67xybfmQYRvZDPEoaiohEkU2DMDW2rhEKjKkwdV0MDTl8aq/67PXZfa38MwUpejIBCCBScdjGQAixdO8P3yAlPvjhD1nfXqNmhrNKcfX4h1wIxc9//V2auMGPHUq0zNpFyb+MIzkn3n3jPn/uX/2TfPfjDb9xfc0/+o/+I+STD/jZX/gWD07m9P0t435LSg50Jowdzz78Pi+vt4RhXxICGkvwHeN+g6RMb6xVeB+oqnrShwVyhvl8MWFkSlamNrboQoVAKQsUPaKddGgxZVKMpBBwIaKkpKoLd09JRd22dH6PQfAv/6k/zm++/wH902vkRA0IMI3wI0rBarVgddJSNxoVZIl1kSBGQJSGiE+KkBVJJ7zV5MZCbFnMF9xc35QJlDGM/ncHtn5uCitgUuqXjoKYulYH0eGr4vXD50Jpr/PK1x0+V8i7sd+rG6XWmtlshtYFlLnbbSBFhFLHNmQ5LSSMtez3+2MFnFJmPp9RWUtdFZ1Vzpm6bljMiyNncK5A63I56aUpJPi224H3mLMzgsglc1Cqozj46z/9dW5vr/nud3+Lb/+T3+T/89f+S7z0fPVrX2G2mvH88jmb9RqAGBwPH9xnNptxvd5wenbG6RnEYeAf/MO/xm/8ekPsB2LwzFcLzs9fL5vrcolzjvv376O1pus6EoIH9yPd0LPvO2SykCQhFMK8lhI9dYOyvBu7QhGoCwQqFmtwEgU1UQMYjYsJHwsp2kdDiMWWnMj0KbF3DnzmZjdQSU2tDcv5gigDVms0mdVsRq01cWKLHd53rTXaKLQWkz7qVZbWndj00O1UU+zJAbsAUJ7pzxRrnxkh/qSuEBPbMdEPxUm0HUbiONm5K81++4LXL1a889b9MpJjQGHwfaZWFWPcY3XpN4QYGUbPXGvuL+fsrWcIJR9uoTNGBLQoyQchFzSCD74UtlKWfEVrSCEgUFhThMLWKKq2oRsGVqslSll23UCVBDFAbTVvv3EPKWEcB96IiZej5fHlNSf1CW8uZ2gCMiXSMKArDQriZHnX1gAZodKRpB1DAiGReroXpCUmQRjKmDBFgW2KIHzWVCTvaJqK9WYko+i7oQA/Kw1Z0GhFZQy968gm4/1AzJohKPohMYaAbSz3TlqSj9zedAy9o2nrqXsSqWs1QXMlwzC+sqom6rpisZgXWneMOFeYerYyhJAxSiBFgVFqVcbkYQwMzjOGhJCG/TDQ9wlTa1KKJGTJzkwJo1Wx9pMZ3Did+FuQCcGIEoZFkzEKtILViSIGxSwa3omCKDQiecZhT0wZodUk7gUohZoUh2jan+xVlue7g7Z3DmkMKUeSVUTvEQlSiBAzUktECkedGOTSeRQKLRWLel4QHZXGWo3SIJWAlNhvbhlGxxgS232PULqMoClcM2U0cuqWmWZJ1S6xVU2JC9FTEV3ybg9NgldxCncTl9+eEVg+N4IocUbkRAhTbm2IRc83DV5iKBmTH/zwB+g3L/jaO2/wJx484KyqOQWUHBlypERW1SVvdehJjCgpefedhzx65y1+djB8/PFLdi+f8qUvvYXPHbOZRceKccJVVEbx9MnH5Kyo2xn7YaSpTeG6mZrdeo81mlk7p2naEjuXoetL2PVsfoJUGh88MSeElEdoq1AahUSZTDwUoFIhpCIQENOeKhDgA9oIVGWZLzK7zvPmwzP+2C98k8v/918HrUhJTEY1ePOth/zUT32RR6/dR4tpjO8jSEEWZZpRDtQRnyMhR3wGHzOptqiU6TYdQ0zIqoZc+GP/TOJ1IcSbwH8IPKAcW/5SzvkvCiHOgP8H8A7wAfDnc843ouxKfxH414EO+Hdyzr/yT/MATT/36OZ79Yb8dHehPGQxxUkMnRHT5vnq56UpOHnetmhrGIaBzc3ttMGWDUWGdBwrel8y2vyUHO9Gd8yiglxwB0lTNW0RDwpoKstYVfiqghgnzUMqYMqJGj5bLmjbhv0wcLJcst/uqeuaBw8esL5d8+zZc9pmVizdQ89ipXn7zdfKvxGJyJmQMuMYePBgybtvv0X68CMuXzzHGIXPmh8+/aB0C2yNrQx9v+fpJwMP7z/g/OyMm9tbNrstzjkuXzxHoTk/OaXpK5IPqBSorCFLiEKQpSpOKykK70jKCWJR2GFMm+9Bw5a0QlFEiD5EfIhYUXLaXEjH056QuoBhyeSYIUaGvmfoO7wsDCtBpjJrzlYrpCoumpgSRmuapkH2GS0ly+WClEsMRU6SFPwEACxC1/l8Rt1ahikXsJxGM21TT4tenNrW8diR+0k/E5lMknX591YGUykYM/su8my9JfqReTtQmRalBQnLOAqub/ckOed6WxZTpQR1VdNazTgUcvfZqsXHktw+UyCiR+lSFHuf8C4h0HifMFoChadVVYa+G6mrGmPuIL7GVsyVYXAeWyuELoweazVCZqQoqA6N515WfOH+m6TRIYMv2ZXakGN5rno3lu+rCpFfkIm+bCpFGJ8RKJgGxjmNhFQKwhBzeWbHjDSGIZTR3eA9nUuEFOhiIAhFUzfIVIrK3o0ILUrb30hikLzceta7EWLHm++8Ruc1Ny+u8P1AUzVH/WYRMRv6vkcJWxhLRFyCYQhs9g4hLOfnc6SS9H3p/nnnyDmRokcgkVlSmwrvRmLwZKnJGkIGN3jGkCGqEpzuEiiBFh6Soqnq6fsYhMzk7MkxYVSBn371vYf8sC10/ouzGSkKfJB03tGPESNawrhgOyR2Q4eqbDloSjFpyCTHnI+f4CVkiR8rgNUytospooUq94/zRBlIMRSDkfDFUCAVbd0wm80Kk6wy5dCYFCF6QvRTSHvHvtsXHWKMSG1RtmYIGSUklbJIDFJJtDEoo4k540Mu3XihSLnEICEUiFS0WvLTWYAH3EsZ5/12yPFdZ0tQoryArJCyQqkaq4fiRs2Zbr/H7dZ8/NGHfOWdh7z39jvcW86wKbC7vWE7OtZDTyKw92Yao2UgolRGVZrlyYzXl+/wlfe+zIurJ+yGW4RNtHWLim3h2yXPMI7s1jc0dUt17z7b9Q3VvmdRSZpmTrdZ03cjTT1jPluglaXvOypbl3+jLFMHpS0hOLKQGF1kDUKoKT0jkXKcUjgEiBJRFWOJ+JFKobQp3VVygQb7jpgF3/jpr/DX/+u/x83WkYVCK8l8MefP/Rt/ltnMImQijp5xdAxDQMiRjERKjZSaLEZsKuy/FDNRa3ICryP7WIotq80kbxN8elb26evH6VgF4H+ec/4VIcQC+GUhxH8B/DvAf5Vz/gtCiH8f+PeB/wXwZ4EvTX9+Afg/TP/74z08h0r+M8XTZz/ncJVT6x3RWCtFEoIU09Ey3rZt6VANPdub20LXPdzM4pCFJY7W/wPJPYbE6APD6HCjQ0hw3hWelVIoY0je492IlpK2qvDWkrxnmDKolFKkvkchSqs+JRZNixKKblcI0U+fPmW73dK2LSDw0YMS3DtbsZrVDGPCR4obJGW6YcuHHz3h/vk9hr7nxfMXzGYlsifpmpAcMUcaWVFX5aZ+4803Jl1R4MX1FZvthu1uR20r6A8dIDNxSBTWKLJSeKCS5SaSEwvmcEkEWRw6RuGoEUhKFcoapauVdWm9GyWP7Bk5CQELdC2DUcdTnUdNOpqMC5HL6xuELJt7ymkSwCuMUGSfqOuaxWIOCIzRZXzIpHETgn50yG1xBoYQSGN5/32KRUgsJWMsbDIlfqyz+e/7M6GNwo3F9aOUwGqNiAkX4OnNFpkTbz1qud15skhkofn46QtCUvTB8eR2QIlMraGtFGerObZtcePIbnvLcjGfirVA8JkheqIIpCAI/q4jCBFty0hFW0OrFFVljxrEYlkuAEtbG0xjSZRRZrGuRxrblPGLzpgM4+AYhUKrikopfAhMZh50skRVXKwi5WNenxASpYuN3Y1hcoEptCmh2SKWwstPLLx977nddEfHV/CRYXREZYkk2kaRYk8k0adATfkZ/ei5uu14srPses/SBBYu8PTDLYvKMG8Fy3nD1dWGqq44Wa0IIeFVGXdoq0rnVNb4LDFC8vJqh4sds1nLbDYDIej3HTEGSgyUxAfo9nu2u45EZnSZpBrSpOtk6rhGLxhiRlSW05M5yfclAzXEwlMyYCpJo0wRZHcjN+EZqZ20oYcuroi01QytIrOq5K3azqF2md0wEnPRK4lQnM6fh+swwThOK6aMURAoWRAC3m+ROlJXisW8ZTU7LS64uppQLqWY6ruB6+3Ay5cv8RMMNMay/gQpQRkqWSF1jcaijMHYGpHKzysxXaVGERR6tzKGPAR8iAh10FGJYhp4pWN1vPJdIfXb/iCQwiBEcS9KUbG57Xn2yRWbmy3poqyj69tbXr9/xqNHD1gslyxXM6oKwPP+D17y3fef4DqBJ7E6e8a3vvXzvPHGI8gRRMbnSNwOzOuEqVrMokWzR3mPtoJcz4hKk4ae6EaUMdRVxXy5ou97usGhs6QWiso2dLs92+0eayvqumEcR4wpE4Q4gWazEGhtS6E1kfFBImQmTevJYdSrjSVkkCRiLjDplIuIOQsBIpPjiPOBi9P7/MIf+UP8l3/773F+74z+9IS2rbm4d4a203jVgjEOKUdSVsSkECKQUbiUsGGK2grT4VZkgjaodoYaiiOTlH83QyDwYxRWOeenwNPpv7dCiG8DrwP/JvCnpk/7vwB/k7KJ/JvAf5jLXfT3hBAnQohH0/f5kZec2nyHEaB85TcXn/nvT2muJmfHgVd0GPdYa5nP5xhj6LqO9XpdQlcPTrRjtVnEhFN0cCkSfOFSKW1Rr8AlpRAFYBnC9ECVz5W6FBx1XVg+wzhipDwWcIei78AiqWp5DIm9vLwkpcR8Pj8K92N0NLViuVyURTtmUAJbV4zdgAsBtxm4vr0u4cKvUMUBrDE0VV3cENowX8y5d3bO0Pc8f/6czX6HC0XIrg5g1pSoqmqyHUeUsoUuK1QJn84BSTq+zgfGWJ4SwQ+I/5TSUdx32NTuGGKAUmWMKiaJ+iunz0Ox2zDd1CkRC1CeKGThicQ4jfvK5h0TjPuObT8UbULOZFUE8bO25fzsjH0/srnaE2KYXuOJzkyiaYoO4HC/tLaaxiA/2WdCIunX2+LATJkwClASrwoH6WS2YNOP9NmRRGbfd3x8uebeg0e82Lxk5wrrZjdkZqEh6Yq5D5yuFnhp2O8GBBkrFLWyaJg6KAJkOYnHnKmMRoiENoaIoJ5cUUKPJR4phBL95D0ySdpJnxKjY3OzRmtFY1UJNSYgUkakQGt1OYmmQn33MU66vIAyhc8mUsaPIyFEqqYiC1c6xDpS6QnFQQHK5hypmgYVEqN3SKXRWpFSxJjyHpsxcLsbqExNjoHsA8Nk4PAxo5XA+0hSlufbju0QaB8sebFxXF1e8+b9FWePFiX2QkRmswqlyz3eNDVdV6J/cqoAgyDwcr1huZiTh0QQI85FKmMR0tB1gZgF3gf6rkdIwereEh894+1Y3gv0dD8GfAwgDT4kUIndrkdlj1G5CHxlwZD4MWJsKFqXnBh7R98NJcMUT2UV2gi6TjAMgXZWio7FvEZXFrXrudnsilFHapTIwE8+BDiEQEoZW1XIrqA9fEp45wjOsFrMaGdz5q2iqsr7bwSQMiF4uv2O7W7LixfPKZ2gitvbddnkhUJKg6B0XGVVYesGU1XgHYLDPjRBoBMUG2gkTx0yKYvBJqdXOlS5yFUOuuF8UKgIgFfGg9whZMoWJSErclI4l7i8vOL6Oz/k+maDiIH4pYBSkp/++k/xlS++xaOHF1RVRd20NK3h/cc/4K/+zb/D9UvP0lwwpI78wye0Jw/5mW/9IoJADAO7zS1pN9Bdb6hOCg+sUgqxHwhiJFcWWylcKMX3/fsPUZODTxpLv+uppESJMDEVNSEERucxtvw+zjmqyiKNIebinLW2Qk/RPiHE6Rw+rd8U85cUkizlFHeXAFHWJe+xsnSzhCyOTz8MqFngW9/6Jh988oyv/czX+LumHJqlURO/rIGoUHog0+GTxEdJTA7hI0LJcgBUquSFSolC4IVAWou1FVVlydzlOf6o659KYyWEeAf4JvD3gQevbAzPKGMRKBvM41e+7OPpY5/aRIQQ/y7w7wIs2holy9xSimKDPxQ5r44BD9dBI/Pq35XMJo0xhuVyidaaYRi4urq6i0GhFARiyp3LKZNy+bsk0tSxgtGNKFlIsNpUR7yAlmU2Tyj6qFlVY4QgeFfgkjmz2+0wtgAO1+s1fd+TJ7F8KSICzhdR8G63O/7dIWphGAakEJwuF8zqBYvFOWHn6EePyQFlNIMbkSLRDwNNW4Cczt39DlYbFosFi8WiULNz5ubmhscffcTNzU0Zfagy3+723VHnVdelGJtXFaeLGYu2QkTHGD0yQ6MnHIY8FEFMhRXH9+Eg/H9VEyenFr5Wd2YAKXKJhlCSbPTd+5pLmnsZ0UHWZbHxk6AapY4t9Ihgkh6U34XyJ0jBarnidLWi2+25ur3BxzJbDyGUmIspvPXweyqlMOYaYzS9G3/iz0RdWeZG00+W8nHvEHPLfhxRSnB2tkJpQU6GJBRDHMHMUU2LyxFty7gshUwfIW46YqtJQjFvG9CCzWaNwdNWRXAqUEXfIASKxOBGhDRYLQipiEejEMRpoYmh0L2zUKVAcB5dZZQuxo2mqamMpbLFWdoNe4ahx/kS+SRSJuZUnHAxEskIpdBTYTV0HbP5vGhmRJos1BmlisM1xgRhosUbg7WSmDxSlvtSyoJBUEqXmBzvEcaQosTtewxQW4OPHqstKRU0ynmlmckB3c45refYmAl1y2o+o7YaLQOnZ0uMVUhZGHlKSdq2IeeefgiM4556tsRtJI+vd9xXc6qmYrvbss97lGnY9YF+9KQUkBKMLJqmZTvjxWaAEDG6KuPMkBm9Y/AepTSyktzsOlolqHRR/gQ8kYxtKnq/YyDSLlc0dfMph6dAFru4GSF4NtuOq8sNwi7RreHeWeEk3QwD5bMFn4cYZikkWlp88GShAFF0OUphdeTh+YxZHVE6EJND6xqiZ73dMLrE1fWWkASbTrNYrsg5EoRCaYtUhwxUTW1M0U9BkZZQ7jeMJcvDnpSLNlcIgveluAuenMOET5jWPsohTwJpyksVUiBUJIuA1GXcl7MlZkVIjn23Ydh1bG+39Pst49CRvEMKOF023DtTPJmK4a+1Z5ytJFoMdOsXfHxzjW0W/M3/77d5/7c6luevcxUtLy9vUVrxq7/1gj+XV6wWGrd9jraOKAf2/ce4dHnkO3UepPM0pqatDNhEtp69Tyit0HlkiJln+0TTNmjfoVTG1pY0SWgCCkxFt9tSNxZdKQi5OHczWGVKg8LoEnaeHZJy8FVCT4ffsi6leAjDLvuldwlJhcgN1eyEfVjjU+RkteCPfOvnaE/m5OupQGvn6GaGFpqQBco4RF6Dy8gKZABUGb8npYhKkDQQMkIXc5TUknlVUTcCZIeO+o7B+DtcP3ZhJYSYA38Z+J/lnDefGcdlIf7phvA5578E/CWAB2erfMi9LSnS8FlKxGfDdQ9uL6UVUhe21KFDNQwDm81miju4GynKohYoi4QQIDOR6WelQwF6CAkuAMnO7+n7AWstja2obUVKJVahrqrJLWWPnZzFYoEPgSFGmqaZbKe+dJVCZL3eUDct6/XmWBC+Gr1SYkMERlb4UXD5yZrrvSNiiG5PRJIojrn1dkuTMk1TWq6HrLblcsnZySlSSc5PT2mahk+ePuX58+fHwkpN9F0/3gWTVlU1jQtLtIBSBqkUQxcwonTZ6ro+FrSFASVeEVzeZQAejAhHx8u0GJWTH6VwCiW1PU0FtZmiDY5fcxCZ54zMAp3K98jcuUGTvTMb5FzIu6vzc2ZNy831Nd16Q2skXmWigmzvYIBxuoekOjgIIyncEdp/r+v385k4W87y6/dP+NCUgFlSwgfNduuotabRoPAUxZqh6xzWNmy3W3x0SOpSIBmJ0aZkS3aBISXWfU+/35Ni4KxtiQh8KvmA3jnqygIOUip2di0mXVwZs/uUUUqjbMV20yFc6TIF71hvNjhfmD9SV/gEImZCjuzGwBAyHs0wBHIOGJ0nXUV5n32cNHnO0dRNSUPIRTwbciSnqWgSmjg5rmZtjVCyuO1SsaIbUyCEIQZkhqqt0bkUgP0ugJLUUiBSj9CCMQ5UQjOvG6wVnCw0L7qRFPf4wfP62YyzGirhqa1EyKqIcKMjpYBShrouUTu2isyWDX3M9CHyYh8RtWfeJFa6IrqRza4npEylBSFMqREBhv2ARjFv50SVQSmGcSQ4X4Cioy9jJuW52ew5aSsqKahEKt2ZDHOhWZ01aJ3Y9XvGMMCDEnJOUoSQEVQI7Tm9OGXY9Dx6dMH1euDm9iXVoubsdEXsBtjuJ/L8T/4SUpaOQS4dqEBECllcpyHiQ2D0gTRF8Tw8vUfdNFxtBqSV+LwnokBVVM0CYwWDKxo3a2tynta0qav0WclJyndJEYe17rDOeeeOXamDFvcwBcnT+CtneTTXCKGIsUwDREq4cc/t9S23Ny9Zr69IMeDdWA7ws4bV/RXnpycsFjOqKnE5xavVpqKyltvba3QqztW5MGzWW1KCTz55SuciOcJsMWO2PGXfBxbLhpA1t9uetN0Rk6dui7g9hIxUNXm/JSuB0nO6fsfoeoTM1E1FkgoX9iVzM9YAaG2wxqJrhTSlODdVRTubIZQqeBedUdOEpOzp6vhay4lDGWO8c3BPru9DPm8IohTAsvQOSzGssLbGxURII4vFgqQsha4skWaGaU9pbE3OEjMMhKBwTpLSFudA6YBUA1JM+Y/TO0guI18pwViNrRt8FkXiwo8ebfxYT4wQwlA2kP9bzvmvTB++PIwzhBCPgOfTx58Ab77y5W9MH/tdfsB081Iy+UpXUBVxsxDToju1UlOcxuqlvWqMoZnNjvEbm83mUx2tu3m8mDpWx7H8dDIvAMrD6DFD6fykoWzUUpNTqcqtNsdOx6GoCKFYw0tOWin09n1Hv92VjLw0ablSpB/6wu2wFc77o/X00GkrN06xYVemRqDZbDt6nxmGSCPKuEbIktd3fXPDzHkqWxFDIOWMVprz0zN+6itf4eXLl4xDT/SOjz74gJdX14yThd55T4olc6yuK4yxpBjx44hUkq7vS1SEopzmhCCHUGBp+ZX3LB8ekkzOqZzaDjeX1kjB1CGcRr1STBmC8TjuSymRpjFjTBEm0rBQJUpE5vJzptDA6X2d3ixZ3keJwNY15+dnSCQ3L5+TRsfC2gnk5/ETaTnJPLXc74CnKWfIpezWP4Yr8Pf7mRBC8OjBCY+n16+qFNvOMewG7p0sSeOAF46YM2OE3bbn/P45L26fknwRaktZtGhojTaSLGA7OmxQhJhKHFOCm21HahLtSUtVW4TImKqmsZa2Nuhpx/beo3TRF/bjWNALugR47zdrbF0s59tuZLsfOD05Yd62CFmCtXvv2O735CQwugIhCNmhjMKgyQQSsWz8IpFjJseAEgJtNH4ozlBrSwGTU2I2m09ROp6UI5JMZey0PoBI5R4RQhB9JLqRWinaWYOMnpQESUmkLmkBkkzSgXuPFlz+8BNuu4GvvvUGr61aTB6wctLUWENIYWLDTUwlJahqAzown83pvGQ+b/joes0nL7fUEmYPVtSNBpMY+h4toLHN5L4c2O869ruRUVVUzQxlNc4PkEscS6KATZ88v6LKGakqapHZh5FFXbpzaTfQVoGzk5rT1YIhFsdzFgKhGmJIbHc9T57s0Xqg0okkR84uNPN0XtACY8+srel23bHb/5O+ylSvrP8Hl3bZOw8uSUiUDmoWMPjA7GyJnq0YB4+oZuissNmi64aTZcNu3zEODjFt8CCPB7fPOvlyKvTRV0Xox7/L+bdl/h2uWE4DpQOfSv8vZYEfMtvtmu3thv1mg+97UhhRItHUikevnXKyOmHWFo2iVgqjC43/QFzX1rLb77CjwiIxS01lK958803EP/zHxbmKYrZYcHp+xhe//FOgLD5KhGqo2xUyOba7Dft+QGqDMhVKVUV64gbUIAjRTZiTBDIzJoG2DffO77FaNoj1uhSZGWpj0XVJ6ZBSM5svCG6Y7qOyltyNQDkecHPOOOeOE5TDJxxE7EyaOiEkQpVeqhSajERJg0JS6bJ3DLGw+jIK2ZxQLe4xmy0xotAAEhUxGUKUqN4jR1e6ubJ0+gWSyV2FyKKsCxKyNISUEcohxI+GhP44rkAB/J+Bb+ec/7ev/NV/AvyPgL8w/e9ffeXj/1MhxH9MEeiufy99FQj8ZKU/zKMPAuSYii0+o0ubVcTjGGuxmGOMpe8c2+32Dto1dZw+pdo/dEGmvB9xHHZnZLoLCBZAbSvapi7RIT7TVi1j3yPbYus3xrA4PUFaw3w55/RkgdIGW9V0Xc9mu2W93uB9oGnnGAlx3JNyQlvL6B0xpaKZmE4+h05baX9C33csljN07XnzzUe8WO9pcXgAUdLb+25kPltQGY1sZ0UAP6t5843XOTs7xbmBJ5885vnzZzx+8pQkFUIbREpcnJ7y2qMHbLdXrHc9JydnyJx5/eIeo3fYqoD11jdXNFUZVRo7K26O6fEwxpJSICVxLDRBHV9rKQ9uFzGxpspbc2iJKynJk2j4zqSQCclT2uN5WkDKDa7UZDTIZWQrcwG5RTKzkxXLsxP2+x3dy+ekmI+njwITLFlwxcZcoo6YHh4hJoeH4MfaRP4gnokYE7YOFN6h5Py0Zbj13Ju3vHF2ilW5jOBEIu4lFydnnJ0a/KiZmdfo0o591xdNQtRFh2hK0RhdQMZS4I4qs6kELkeqNnLRGIieAKQYcCOFD6WLdXnXO/a+aIWMLIDdgcQoMzebkbzxzJoWiWDzyTUPLgTL5QwpYLGoaSqD6yMpSpLPDLIIh0mZGEulnGJhzCilECqTiXjnyUEhlcCPAyTPclGT8YRxoKoaMooUMyEKQkyloM+JFDMvX2yoVEUdAjMriDIzZo8QGpETMlPuEaPRtuILpxVNkKQ48taDJUY4pKzJWdG7QNNqzJTUIHN5HkLw6EoTokUnw6qCn3r3guc3Pd9/uWMYA2TFg6Wh1gEpNX5MGJlRJlLVmiQrXq4T10PizCQqk7HVjAUFpJoNrPee253H6kyi49QaZloRhaDzCeHh1GUUYI1glOBSJo+Rb39yTe8dQww8vb4lZcmsbXjy5CknqxX3L+bcvzhnGD3Pn74gyYg7HHZ/0tekS4IiF0gHEbEQSG0Lz0wZYvQIKQlJcHLxiPMu0HcDg5fc3m5IeLpuoK4UYcJ3HKS3hwIrpfipqJtDoaXUp13phwLAWFu0h6KsdSndaWtTAK0EOURcP9Lve26ur+n7DiET/X6HzJFFYzld3OPi/AwXR5Q9uNsTWuvJpLNgtjrBdnVBkIREcg4tzRSQ3nF1fcV7773Hl7/0ZX7513+rYDsEPHrtNR4+ekRGECNIUVAJ466w4HoXkBHu379P3SxYzGbsdmuGcWQYXXGKJjC2Yu8NwmQWK81ibohjzdDtSAiykEhlSMVOjhSG7EZ8KLBcMWUoHsxmUgjG4O9QS5MZSogCcs0IlLFkH4or0JTRbaaQ3HOegpVjQsjIYjFju5mmKQiEbhBmXrpWpgLd0YyJqvew3YGuSFJzKKgOcy0m84EQmpxHEGFKfEg0aoc1P/qp+HE6Vn8c+B8C/1gI8avTx/6XlM3j/ymE+J8AHwJ/fvq7/4xiK3+fYi3/H//eP2KaV1OqQzGJ2MT0jysOjLLx2aphPp+jtS4dqvVNCX4Vd7lwB61UaSemoz5AiDswZGYSFk4//5UCGYQo8SxjaT32bkTqQyBnGfctl0XDlOOSWW2Khdo7WqtYzWpeKiAFbFUzayqczBMzYxr3TQ+oVOI4BgwhHNuf3dDjXOD+/Xucv/6Q3/ruJzDUGAW1MaXbZCsW8/k0aiyi+q985cucn69Yr1/w/vvf4fnlC2LMyKyY1RVdF2kWC5q2ZbZY8uDBOT/44CPe/cIXeOPRQ+ZWM44dt7c3kBLzxrC5vcW7obgCM8eTxx1889N6KjWhLl7NdXxVxC+lxE7070N7/OjSRKCVBfLErhJFx1PK4fIe5VSYO7K4587Pz2jbluura/b7XcE3cPj5rjhIcumoHfIG5RTXcSjkDy39cv2e5/Pf92dCiEwt4+SelKzmLUNyLCrF/dUckTwxG2KO6NBzfznH5z0PVhXOKfqwYLA1Q4zs3IiPAR0kTVWAtjKDSJmh81hRkaLkybMB83DGyayhVT2KzOh6etcTckJpy9Vux2aAzW5gtVzx/MUNo4sYJWlqS2UMt92ANmWk/2L/jNViTmU0tSrFdF21KCNY728LJV1KSL50o3MC22BUhZSCFMphSxs15RZOfDVdTaPmRNu25FyK+5J2kI8Zh5Vt6AeHcyMxD8yMQIg0dY8LaiMDWgpyFGQl2d/2KBtZzDQnJwvamSC6ch96X7SN82WDoYKU8aNDK41QRWIglMGHyLw23FsavvXVN7j51Q8YdruSgdgFRCtRMhfdSRL4mOlHD8qSNey2A5UNzBtLYw1C6xJJYi2+76h05mS+wAZHTWKhNcumIpLZd1vGIeLDjLjPbAfHOAsIK7na3pJlIiuJMIaxczRSM4TMk+fXbPY7Xn/9PovlgrPzc24uX4D83Yzlf4DXpOuUCmxl8L4r6weKSCmkhC7xRLaqaOZLlLHMlydst58UXMB2yziMyJxIJy2ZMhGZkvtKkSYo3dLP6Ht/p27UcV2LkRQPkOJ8PLgJIRh3I7fPb1jf3LC9vSG6EZETQmSqRvHgdMYX3nmDHBM5JipjGTYdOUA7b5nN5yxXp8wXS2xVgZKIQZZDudLEcYorSoHNfkToFsyCn//5n2dImh98+IRvfOPnePcLX6C2mug9Q5cJwx68J6QSodOPnpgdb83mGNPgEyjTIJRByILySCmCNERpyTJyMp8Tw20Z0caafTcAAltVdK5Q8gvTq4w85fG1Oazl5XWUE9NqHMejmQgoGJzJkKaNLYdkWcaKQhetlTYVDL4YXcaBtl6itr4U3jkTgiDEQnsLSJJQJKWPJPWkBEkAh4IKUYorMRFEM9M9EsgxYFSmyWua6kfvEz+OK/Dv8KN3mv/O7/D5Gfj3fq/v++mvKZTVUghNP2xC0h/iTmprmc3nIKHvB9brLSF4pFSfAofCNPZ7Vax+GO3kuzbkHdaBY5zK4Uop0Y8DSM3gR/qx6C+kFFS2wtriODk5WRHGHpkCo3esr15ATjQqc2+1YLfvCX4kWoWtakL0OOdAqil3UJFCOI4CD7NlbTT9OBJi5ObmmsXFCaenNc8+3FPXkllT44aBN998vVDktaWuai7Oz/jZn/46Vme+991v03c7TlZLttueWdNQzWpOV3MePHjEZt/x0UePMQq2+57vfve7VErw3rd+js31c3T2PHv2lLEfaJuaoATEglQ4zFSzpIR38cqic9AiTP//EB766nXXQhfHke5RmwXHbmLZLItgOUs4ZMKVwriM/u7dv48Sgsunz4ijwwhBNtXxpOm9B6BpLeMYJiaMKKJIcRfKerxvhPg966o/iGfCakUt86TSENSV4Y2HC1L02BxIXhKjZOctfeqoasHNi4GUK1RlOakkSYAnsR06fErMq5plOyc6Tw4BLRRDs8c0Ahc0bhgZ0sD1PmEkZCFJWVI1JTg1JodqWvbbNbd9IqnAJzcFoDtrDNGPZJ0Zg8eNieADSkieXu0hJWamwhhJZRTzWc049gQvqWtVFtvkmc9qllO8VN8PaJkxugA4m9qWNv7BlJMlUh0K/MOIJiNypDKGYSyjw9rW+KHHqLJpkUvnwadIyqXr5kNEEJFT51lJwWpRc3LS0g23WFuxXfd4T3l+RbGKKyMLOmXfoaVg0+1ZzE4IJVGFSkXee33Fh1fnXL8UXJzMyeO+wEEbQU6uoEWSJCdJPzpC0tzuR+qmpfcJZaDrR/rR8fbD13j2ck13uyOHyMms5X4jWVZgrJhAo5akLB0z+u3A85s96TUBUiKMROliQJi1FZvtDlNZbNsyjI7dmPjkxYblGGg0zBcz/Gb7uRgFZpi0oMVlqpQkpUjTzHAhIpXm5PQcGBncyHqzpR8GpBS8uLyktoZZXRHHkRxDcXGKwh074F0Ed/DZg0zj9/y9pjUv5cNaIui6ju12z3q9Zncz4PoBkQONFdw/X3LvdImUGakDdaU5XWg22y3PLl+yWl6w70ds03JaL5gv71G1c9A1SZoJPVN+klKaIAQpRvb7Pd1m4OXNHvXJNbPTR3ztp75GlhXLWc3JoqHbrrm6fIafV2yuL/H9hhy2+ODY7DY8ePgag/O4ODBv5rTNkkQqbDWK1KMbBqJQBTC8XGK7PXWuEGRaX8bjtqpJyuBD0Qwz7QOlw/TqYby8bq86yA9FVc4ZqXWJJ8qgtClSBClR1pbncBSkNGnsnCMrgVWK7DtyiqSo2O92WLtDqYahMrhxYNd37IaOzvW4MOKiK3tGPHQb74rmmKc4vBwIbqAy8HAJj82Pvic+F6pEZSxnD944dpMQkGT5x8iUqLWiMprtZsN2uynkVKUK1Gs6xbzaPcm58IvENDI63IUH59iruAYxKRWP7V1KovjoHDFFNvtdEcDGcpJpm7p0SnSB5glKxtduc0vyA7VRCCO4d7rkZrNlN043i9H4fRGKV3VDOAj0xKejV6ScSNneMbgCr1suLGenNY8/sIzrNcvZjPnFGd45yInTkxV13fDo4UNWyzlhHCCWYjTFzGsPzwlhxen5GT5mvv3d9+nGwHq7Q6RAQtAPA7/yj3b86T/5i4SdRNSaky99gU+eXnJ1fTPpziQplKrqMBuX+U4nVWCt6VjUHq5XO1aH9+jwvynfFTTFnpxBqOnBKx0tKcUk0y6GAmMMpycLzu6dsd1uePbiZenA5Hw8NU6TR4xRvP76I77wxdfYbLd88MPH3N5sicGTskJMnatXT6Ofh01ETjbi8ppmvv29H+BiZjW33Fs2zOoWISx9l0E0ZJ0ZHOy7gJADVouCPqgtK2UJsRCa97sbxq5HZTBSY+vM2WpBRuNHxelCkfzIrstshj1DzCyTZlZrwtDTu4SWiouTBfO6Yr8qjJ+60cxaQ1VpQmpJaNzgJz5VJHgPMeJixEdHPWvphoCuZwyp6Lcqq0imIqQMTIwx4tTVFTjfFeBoeYU4xEYdECHFCaiAzNAVvUg/doxjYDmrOJ3XyBTo9v3RDZwR+JhASoyxuBhpTVlvLlYrfHC4IVMtDNv9FXU1K+MXypnCDY5lOyPtO7q+nNb7rkfJFlE1KOPptyM/84XXuFlWNFKghGFwPYgy6tVKI4VmDIm8H1hvR0bneXa9oakMse9Z946QNU9f3uBTLge1lFBKcP98wdlcI6zC5cwQ4Z98/wkf7i9ZzhqkbWA6ROSU8EPh9tRKIuNIdB1GC4YxgVbsRodbB2ZaYpJEoBFFhPATvZQUrGaamOHW5ClOyJbDF4Htdsvlc0k/bMsXZMGjN/c0TVPGyiJzdn5KU1kQxdCw3W0mrWdhipUg7fLlZU0+jBvLwS5yFxRcVjkNWXNzvePFyy0vLq/YrtdsNxvCtD63Gs5OWx7ev+De+QmzxlKZ0s263uy4ur3i9dUpl7dbPBJT16gx4GPmZrNh2/WYqmKxXDGfL5jNCti4bPaJJDKdH0s2LgpdCUa34ebDHfPla3zzp36GHB3j7Y7HV99jeHaFVpkYOjabK1zcc3X9kve+9B5vvPEexlaEmAkhEWREGnB5nEwDid125Pv7gXlTczqfI3xFJRtiCjSNZTOWw4K2FT6WzlyaIoRKL6N0g3KCPEl9Ysz4kEqAMgX7kEvHZXKUlwmHNgapNEoXIHEWECnA3CwkPlE4U5S9ABFZv1yDF4R9D5XBh7Jfb9fXuL7DDx1xHAguEXzhV/pQwtRjLmL7kDNeliSSlsBXXl/yy7++/pH36ueisIop8fHz65KFFCMxRMZJFG0EvPXaQx5enNONjhjvMt4OxcjhY6860g6tqE9vkqVjBZ/uUB3HUMfvW9xJm82OrhtBCqSR1FZx7945J6cnGFsyyMa+Iw0949BTW0Mly++yWrQ8uLiH2vT4EJnN5oxjj46psDqmjpXI+RgEfOjs+OAJKXG7XnP5TLDdvIHWmcXqjE8+uOTR2QyBp+96zk9PCn183vLuO29Digz9wNA5KmM5f3jKcjnn5dVLkhvZ7Ttub24JCLTSRYsVEvt+x26/43Z9w/lyht+XRf2nvvJlrm5uePzxE9zQk7U6apFCPAQ83BVPd93B/KnX99XC6kDFLxgGeRS+H96jAv4rOV3H0830feuq5uz8jLZpubx8xssXz48aNSkkQglknFq5soj5v/yVL9HOA/fuz7l//5RPnrzg6dPnbG5HxiGUAv4zBe5P+sq5xFAjiuC/ahfUWtFYqNsKkIQgySHwaDUH1pzWhpVpCXLgNjqciIRxTyUFi8bC3BB8wOoJFBkSwY8QLUPnaIzhxNaYSnFeR7bBsBk90Q/Qe2KCpq5ociF6LypNWC1AG2IK+N7jhrEssG1Lcg4lDIKMVBlhQAhNZQwnJyuUgHsrg1EQgyangCAQxoCqa4w21FWNlBk37HGuJ2eJ0RVKmWK/To4UE30/low2YxFZE4NndD2jG2lnLYuZxShACOraljDdFApXZ+r6IlUBsUqo6zLm228GQjSEJPExMbeWLDzeFwPLbr9nVjdoY8pGKgo2ZrPvaMc5i7ZhzFtqMfLG2Yxhu8UYzdnqlEzCDwOEcs+1rWLWLslq5KQzXN7uWQ+RbnDsXClMe79lDJl6vmTc73DJEdKIqSqiFuy2Pb/14RWPbweCSrzZ1LSqFKM5JWRKaDRSNtSzzGZeQRwxMpHCSFRl45JJsNv3VCFjsjzoeH+ilxSZSgdCBrJDymKsyTmRgmOz2VBVkJNnMZ/jRs/L51d8+StfRknNvttglZq0S6JECEnwOR7XgENMWabsPyHGIwIoTVOVlBJGG4ypcYPn6SfP+fa3v8d6v8WPPVYJKiV4eHHKowcXPLxfsVwUvtthXTTGYGzDPmnqJLj32hf47geXNIszlKkxNhAPdHYyKXi67Zphv+VGKcb260Dm5faKHHuUiPiQqHWLMfCFt17n5sbx6//ot/jv/Vt/jKauSSESOkcOHh8GkhK8/qVH/Bd/6z/n2995nz/0h/4oVrU0TYsQ0O+3BR9UCUL2SGkZXWA7OLYDfP1LX0SniLYWGRvSsC2cLiULMFi3aJORIqFNTXYOcsluFFIDB6ZgmU6kqciSUqOnqYNUJWvwcFgu525djAq5SDliDvjkkcaQo2B0kd2ULqIS3Fy+JHQeN9/SS0rCxNAx9jvG/Q6/74iDw40B74tuLU6660Qu5OIMwrZYlTlRO776zuuk9OGPvFc/F4WV94HL61ukEMdYERdBa4W0moREKotS5rghi+NJtQSuykmQrKabMR5caNPGVLQ6CTnNvY/jQCBP0EJyETIOPjCMqVTExKL7ylArw+nJCacX91meniET3O5e0q2vSaGIUQUZREarzOsXJ8gsuNp0WF1RtTMCoowDSdRWkWXZrA6FRZaFz6JFKVxePN/znX/yfd5+7zUu7iWefLji5X7DyWKgMXNm7QJlMicnK+raQIqQR05Oa1577Yucn57R7fdYKbC25Wq95cnTlzx7eUMSgvXmqthUQ2I5a8gBZqsFmkv2Nzcsl4F3H54zt/C9Dz5icKXlTi5Zisl5YkrHUa4UEpkyxzopl7aqPOT8ybJIh0lMblUBEIZXMiFjDtO4bhKbJxBGs5jNuLh3RgqBjz94v+Q4TnR1GSOy6OYJURaBPwFpMvvhmsXpHCEjy1VDO3uLt956je2u4/LyBZeXL9luerxL01z9J7+LxJzZDomYSlH59sP7BAQxOrpxZHCRIcDl9Y6H6pTNTWTTjygjyUmhskZmRfABlCLmErHR9yNhLEW30ZpZbThZNMiUaOuxxKSgSZVi7Hfs1nvm7YzVvEHQEYCr2wEqy94N7EOPH3tyDsxmNavFfBL5GqwxKKlIgyOHMpYjeowKEDqc77FS0NqMsImQLbe7jEsKHSTjMFLr4raVymLM9D2AlMuzKYXAp4BSoHQmZ1dO2mRGnzGywvhM3A30WlI1NbpSzLWkmnlGn+gHz+gcw65gE/phR/WOIXSKj59d89qbp4x+RBmLqYvWJERftIDZsNt7jLLURjL6kSQVg3c8eXLD2/ohOmd2Q09tK1RTMY4DoQsE59j1ni6VxINWR1orOVnUvHPusUqTckG11FWiqpqShzj0bG6vOJ1bkpY88YpnTzturm7xoYzGfuHr77I6ibSNQGG5nReO3lmjWczmNHVNnwaie8RtP3J+NrK5jgzjJEmYGTKSMY1lTftciKzE1Nko3cmcmTL0CtW7naj2MZUcONtYNtv1lA/b0ne7aS/I02G8YBa22w6jJXmK7iqUlHR32JUSKS1tM0NqS9ftubq5pdt/zH67Zeh7pIC6Nrz+8JQHF6e8/uiC5bxGK5CqdLmUtmhbU7dz5ssVWhlerr/DerdF5JJnGETRF0tZuFrEQhhXxqBVSTzw3hHrwvy7enlFTsVJaJVmVnUofY+cE2erE77xMz/Ng3tnCAyud8yWLUoKfBx4+vJj/sGv/Bq/+Vvf54/8wh/lvS9+ESVLWkO/3+LcpgSE5wqrDF0I9DHhtOTdR6c8PJmj/Loc/kyFsHOELlExPkQsJYImpxJMPToHFI6k1JpyehYkDz4m/BQFRy6JDuUADz6E0nFlMrMJVYR2cBzbGWPRVpMGzycvb3j27DmjGcEF/uE//AfM5wseXlxAW2PrCi0g+hHX9yVZxfmJkxU/ZVjIOZNVxKgGKQR5+JB3vw6LBfjxn0Fj9QdxJTKjCFMcTfnHNMYiUobokeQp8PdO/PwpjMJUOB0KJfIEnxR3H3/1etXNwXEUeNftcj4wjOOxgDugGlLKjOPIYj7j3vkpN89fkrzDu4G2siUTahphISTGaFarJS5mvBuRUlHZihQj1tqiF2ha6qpYz5XS+JRIqZxohrEHEbm53vKHf+GCflzz4MEFl5cDu/2O185maCM5Pz/hjTdep61ryJGnn+x4+PABF+f3Jlq8oDIGqQzNfMF712tCFuy6oWSR9R1GK955/REnbcWTjz7kxfPnzOczrq5eUNeGedvw8P59Hj99jpISayu+8Y2fY7vdc/Xyit1ux+gcKZZsLA7gVfLxoTiI94FpzMenbuTjezh19ZAgJnv/2dkZJyenbNc3vLh8RnAlRNS+QnxPMR6NCgc6e+wSv/5r30OKL3J2dkJVSYyW2KVhebrg4Wv32W17Xry44cnHT3nx/IrfjU/yB3WlGFnv9keB9a/81geslnOsEXg34pPhZtdzfbOlXc558nJL73oiA8XolKfxhyjUYRWIIRRgbUxUJqKlYqwCyxPP7X5A15oRhUaSkmexPCFLS7/dMfY75osKHyQxiyJqJdDWgZgnaKyUbPtE1xcHzRg8xmhwESMEUmV0zqxshZYNtcmYukYqj8yO6BLbmysGF6nvnRR3kAc/TNgSMkzsqmJSkcQUqGwNtrCvYoqMLjO6RIwSJQRJSFwOyCwIOaGtQuqCXLApUTcN+96z7zwhRE5PF2SZeHl9xW4/UNkG5zpmbTNFJgm8dwVHYg39uCcZTdvU2Fpxs92yWDTcrDs++uFjzk5aYggMOVPbCrcrQmdbGW77LVf7DqTFEllWkvmiZTlr8arFiwYfFbIpKQk5Z17jEd77iSpeuGspZuZn93h4/5wwFJr8apYR9MhUDjxIiW3n+MnwUBt49/ULdj6xOHmXE/09fvD4muv9nuhycf0qi3Pj56KuCiFwdXWNj4lhGAk+o7RATR3Tk9Mz7t2bc3n5mN1+S2UtDB0hek7PTthvb8vhOxZ9VowJJTVu9Oy2HVVVU9cN1eTG01rTNnPqasYwOC6fvWC97th3O8gBayQ5Oc5OZ8ybmi+8/QavPbqP0RklA1I6pMgoU9PMlixWp9h2AboiS030oeTkSYmWJcIpxVTcd0D0EWVVEaaHRCAQXCAJNx1Yy4E4x0TIgTH0pMqRQqKSMy7OLD/zlZ+mkoqxT2RXsgGVVFytr3n24or/5lf+EafnZ/yJf+lPUNeGxbzFDzvW188Z8sBiuULLiuAEo09svaNLiZ99dA/LWEb1tkRNoRtQ/ZTlV0Z3k33zzskniyA8T8BnIQuU2PtpFCtLSDNTpzDGiPOBLEoqQt3O0NNIsrgGJyF8LkkcKWb2+55dNyBOC5m9spably+4ffkcrxRSSdqmZj5r0FKSoifFOwOVECWC5zC9SNIx9oLYe5Z2z09/7RTn9/TDP5sr8Pf9skrx9r17Uz6ZLhRsqTFqytabNRA9JydLzERdLvEG02asymb66ijwwAo+FFVH/Q537jRgEk/LgzFz+tAhruWVRVyUYmkcHUZrxm7P5vol0fc0laKyGqZOSxaSNAE1rTGcrJbs9wO9H2mqqugcRsesbo409hAnx8tENk8xkFNkPp+xXm/58INPWJwtWSw0Q3evCARbxayt+MqXvsQbrz1C5MRHH37AwwcPaJqaxWzOZr0uwZuzGd1ux3K55Bf/8M9TNy2//Gv/mKtthxCKi7Mz3nvzDZ58/7vcXr0oAv6+4+LinK7rikXXeyozjWBi4GZ/w+Lkgi/ff1Rgqn3H9fU1w+aWft8xurHkmMU45QiCmAouRHmHYr4TtktZHBhyGs9kBLoyPHz9NayueP7skt12jZSSuqlLdMQreoiD1VlJIArAAIbNredv/+1f5fR0wZe+/C6vvX4fWykqJbFWc3q2YLmc8+abD9ls9vxXv/4qJP0nc2VgP5STn/OR7z/f8Za0hbKtFUkqbrqOgcx1t+OT61vGEMmT6aNuKrKfEgf6iQuTwY9jcRMhUFIRTyueb3qurndU8yVkTyMkUnlGYAiZ1ekJMysIWbDeDTy72nKhGoLvC0U5JTrncQmSKlqh0Y+MIRRa+zCiEZhKMNMKqWYIlVhvAyengWgyi6qhbRVffHdR9CJhILqBnCcifPTkCThbnse74nwc3TGqKISEVBYX+hK0WtUEwbSRBpLIRJ/QImOVYdY0+JBoZnOWITMMjpwzu92G/T4Ut58oxbhEo6UskUlYBueo2wo3Fju7kDBrW7phj0uJ5aLm+sUNhkyWvhg5QkJXNft9dwx6HeNA14209QwfInVbgned9zxfv8QHQa71EWR7WI+sVoTp/dRSIYWkWm9YVC0vrjdIU1GriEqFmxRT5sPLWx5eLKm1hBhpbcT5kdvLNQ9OWlpb8fRmw/P1lr0LSGMJzvF58AXmnImh+EKV0ghiwbjEfMyde/Ptt7m5vSyGlxiBzH63ZbVasDlZst9uCF5S1TVuLOO9qqoJYerm+kx974zFvIj5b27WrNePWa83JX9WVggSxgiaStE2S7743jtT9NLIhx/+EGsVSpXPqSpL1WSqUbLeR1AbhK6QxkLObLs9q9WSHGLZ83QJF1ZaAcVFZ60meEcMBUYdhXqloyIgq8LiExkhFLtNx9P4nLcffJ37p/fRSbAfempTF66gyFzdXPF3/pv/HyFH/sQf/0WMgqYyeNdz+fQTuv2a2dmSgGIbFZ2D693Adtzz7hff4bSpkDmAKN24gtORaGUREwrBx4SQmqwyOSRMVR/Zjnka7cUQS3B6BmsqnHPTaDCBKJrANHUm4+TelMqgtCmjbanRypDzyDh6us7hQqR3vowZY2K325BiiWSrrEZqSQqem6seIUrBrqXEvOJkf9X8lHIsh9XU887rM87P5zz+YIfzn/OOlVGK12ctMBVAKeFDN8mV4aZbs5YlMmKxWBxhmsMw4JwjpPCpDtaBRfRqoZTSnZMCOJ7m0zSqOggVEbIko3NXuQpRsvCELCfUymp26xu2Ny8Jw46mNggxjfKmkyFSM1+0SGmADUpKxhwRUtJ3HbrkbtDUNdpWdF1fCOjGlkBVIZC6xCX0GR5/dMkfe/sN5vM1f/gP/yv8Z//pX2G7H/nal77M2dkJbVNze/2Sk9WCexcXZDJmynBLNpKDZ1FbbjY72qrii+++xW635eNnzxiGka9/9Stsb2+4ffJDcvRYa7l375RhGFA7hbF2EsuX1y94z3e/9x1QP8Ray3K14vzsjIcP7tO89SYxBDabDbfrNev1ms12yzgM5GlQrvjtcUXH8awQKGtYnZ1xdv8e3Tjw/e9/Hz86rJZHjdfhdzngKkpxVd5HpYvOKk4F6+gFl8/XvLj+Fd588xFf/NI7nJ0saZqKpmnIKjGbN9R1VfQ1P+FLCIGqLJkSJ2NsRT2bUauANoqQLD5uaZctHk+Ssny+nFrtVt0JXKcxa5EhlxFsDKXtruuW203HfvR0LhLHgUFItIGrwfHyZs3bD08Rc822j9zsHNebHln17He3hJTxEcZQeEeRYt3OUoCSyJyJoZgc8ImOhNEVt3vPvhuZrTwzo+i1KbFPU3e4kpa6MSghcL5oMyYAPTF51NRRQCSMqSb9i0BKaOYzktQM7qaAZ42hXVQ4PyC1xDYWZRRZFE2S85HRB0IC52PpUPQlxaBezhAyMrMVwRcQqZCmOFSPzsCa6EvsFEJiVEXXdzTNgrPTOb4fsK3Ce0c/lAzShCYmhfPgXEDpljFLnPecBcHGedbDyN57QpJYdBmP5AwiTQ4rCM6XDYZD9EwkLQWfXN4imjNmekQHgX9UxNDf/+SKrBRES6sthC2DF9TVKcL23G4+4Xy1oF0uuLxec3OzI6XwE3wSPn1JqdBCMJvN2XcbOCASlGEYHbP5gqppqCvFcrGkXZzQDx1KCoxRLBZzqsqUTMgUyRlOT8+4uHiId2Uf6fcj33//CZvNGqkmp6mILFczFk2LUpKcSsdKioQgcnN7TT+MZDJNW9JOq9qitSP6TTm8y2K4mmxKSAVKeRbNjO/85m+SJs5TTImmtVht7jb6WPJrm0YzjMVIIITA2orkBd4NEAUxgEiWr3zxZ/jG134eI1rCkEnjgK3rImJPmd2w4/W3X+NnL77KV957E0Ngv73l9mZkvb5lvpihVUuipdNzHo9bnr685RtffofXTs6ROQGJnCKjHyE4ZIpoKUvRK0siSoEzl/szTzBuxP+fuj+JtW1L9/yg3yhnsapdn/LeG3Ej4kW+97JwJoIURQMZgUQrO8hGyJZtWcoONBA0sOhA03RAlkBGKUBySqAEGZDdgAYCI5MStnEWr34viluecldrr2KWo6Ix5lpnn3jvxQsDmXGZ0rl733322cWac4zxff/vX+SAah8iPmSieEz57Mz7R/aEk2LyoSO7BiQhUdpgTIGUCh9jFseEhJQKayWic3mMejC1TrkO0EqiyAVbGMM0bs2TFCFk5nFNfokHVaKeEl3aPiFEj9Udv/XjH+DGyO/+7Bb3S4iH34nCKgTPw/b+AxQnBTJlj5osr8zmXW3T0OwbjDHUdc1isUBphQ+eYRjo+37KEAtHMjLwUdH1ONw5TSqyBKhHH0vT24NhmbWWsiyZz+b88Ie/QVkUXL/+lqFrKLVAq+wUjRRAlkxjLMZkbyprNFJILs4sN7e3pBCZz+bHMU1Z1ccAV1Ie4TAhWW3XYK3l7es7fuvHf41////xH3O3/orZfEEIHdYU9G3DVoE1movTZ0SREZ+u7QghcHN9jetbrk6X3Lx7y7fvb+l9YlYVPD1dENKCodmzvbun29xxMi8Zx4HT0xXOOdq2wfoc97LbbHClm/KwIPqBwfVc7ze8f/U1WmtsveDk9IzT01OevXjJJ9/7Hs551g9rdg8bdg8bhrZjGPrsizJdcYLFVVXw/OVL6rrm+vqa6+v32SguZaJplAKZJrSSyRfsQIhXipRCHkmqhJYJqWOG3ydS6qvXN1zfrHn25IynTy45PTthVtfUdZ1f+8cV+K/pyu7COiMhKXG2rLE6E3glkeAH3NBydr6g2d/ljyuFTxEhpyKfdHxdBJB8AJlQQk7cNsn56Qn3N1vqOo+kiyTAwRAl19sdd43nZAiINOCjJakCVInQJQ+7jl2UEy8wJwJoLXNoryAvqhRBR7RQCAyaQLlc5DF0oQkI2jHSNCNJJbZdy9IoLuuCxjVUNheWCYguZqJyTAzBURQJY2R20s+vWj54C83VYs7D9oEQBkxR0Q8d1aSS9CELFvpuIDpNjJLg8wFttKJrIwJLURjOTk/QJmY/sWnrcD4XYQjDdttAigxdy7yuCH5ETeD4ODScn63YrRNjyEiYNpa294Qo0CZn350sFgRdsO08qi7pkuC+7diOI8HonIunzaTFnZrHw31NWcZ2UEBHJZFVgTOWQVlmpUaFmEn1ErqoeWgCJY776KEYQFS0m5Grczs1eSOdc8xKw2gy5+jXzzrMa6IoSpKUzGzNMAh2jUdKRUiepm2RUvL5Dz4nhgHvBjabNWVZ8/TJFVplxNEYTQiJGBzj6NntGna7B67f3zAMI0RFcDGLGOqKujaUlWE2qxAh5AmDA6OmQ1vmBIfF2TlSGawtaLuBm4cNVWWZFSVWq0x1iQGVsgdfthogpwi4zA9u+556uWLoW2qrETpzPgsrgQqAIchJ4j75V4WIloZAJAXF1fkz/spv/Q20KMFLwuhQJLRIpORBRH77r/wm1VWJNoHz1Yyub7m/29KPI9JoZFHhg+H2vuWL3QM7ofnxX/mrfO/ZOZXvyJOf7Co/jgPCZW8wJQVSZ/Q4xVzwxKkwPDR6IcacJRjCZAosUFofQY7Mt81P+zA6fMjkjMIWzOYLbFlOySHpqK4vipLe9YjJFJqpqIJECB4ZAzG4vFaUwns/FeQq84MTuJCbqbquWa1WeO9pmoaT0zPc2FCngpQsX3/T8M27SBTf8cLqmAmUssGfnPxpDmoMMdkaKJFv1ti1dO2e29sc8zBfLpjP56xWC5zztG3LMIw4l8eFCTLnKibkNApM8eCKlA5JKccbUaqENYpxVBhjcuxLoXj67AlPLi9wTUP/sMWkDMcLcSBnZ4Rk6Ea0EzTDiC1LtNHoQtHts7O61pqqqjLs6afojhiRUlCVFudyOjgClEqQIuv7Lf+r/8X/hrF1fPEnv8P3Xz5ltw3s92sWM01ZKFaXlyijafd72qbBDwN1UXL39g2EyDd/8hO+fHfNzS7L5i8unrFe3yCAT5894+nFKQ9xxKiAsXn8MjqVR5PC042RYXCZwyZEVlEdRq1kt+IQAvv9A5vdPV99nYvE+WLO+dk55+fnfO/zT9Dqc7quZ7vZsrm5Y715oOly3M/qZMX3vvcZSkq++fIrNvdrZEokkf+QIE0qpRA/FMFi4t4pBCQxhc1O/LkkKIyeeHQJIXJHeX+7YX3/kIOnZzOePX/GJy+fP+Lf/fquGCPuQNQlsagksetJOuCVZvuQSD5RKMntdo+IAe/zyDa70ZtJCACQxRfR+Tw6kAJFwpgcj9G6lidnS4wKKAG6FDgkwy2UtqTQNvOqrKVtPMooCitJpkBFOa2jXFQJkQ/xSETESU8tcqcoUklSHlMHUmOoS0E9r8CNYAWdh/eblkYJSiGYi0ByESkhCcUweIYhd/UIgRMhj0iKAlOWRCT7oSVut3zy8oRCarquYd82nJzMKKqS4ANdOzI6jxQJYzuqqqIsLU3X0w8epTVVYfLYUQ8oVefiyY04p+nGyH5oCSEwNAPGZE7OGCJJSCyKwhS0w56m27E4XbJ92BFTorCKqjC8Ww+8fejoh556VrJYLWnXLVW9oPUd9+3IkGTmoYhEkH5aYxmBDSGipSAMDhmyqiohGNrMw1HBwxBJKmctAkgk1iiElsjakqJgSI62H3j9dosLp0TXsjpZ4Zxn2/VIW+Ck+gsHgUKIT4C/Sw4eT8DfSSn9G0KIM+B/C3wP+Ar451JK6ym94N8gG+e2wL+cUvqHv/ybSKIuECIxjD3e9xA9ccyNVbfZcPPmDbe335Jih5SBEUNpdeba+US7H1jfP7Dfd7x6u2GzeSAFP6G62TNtVkNlNSenJ0iRkYthHFBuBAJagtJgCpUbezFy8fQMW83xLuI9DINDJkG37zBKZJ9JlVEcJgW6AKyokCI3i0LB4AIiKtpdS7N54LPPPs1+TNFjdeYHWWswKhcOpYF+9LhxRAtQJP76j/8K3z97ie8SIXW4cYtVEiMCLnjc6Pji21fcDBv+xn/hP81GdoRxg5IddaEJsyu+3M/4po3c7necloK/+duf8oOnc8rUEPEgRvADodsR+wYlMoIbRS6mtBJEP2Yn/AguSAppSTEQUpyoPFNMkBAEEtF7/DAiI6AkTd/SdH2md2jDarmiWJ6AVLgAGIg+c7CNUnih2LQdQiQKJchUNclqYWnbFoyZ6AMeaTKVIASHEnn0aky27nC+I7QDVVWhjcC5HmMNST7lH7xy9B2E5V9C69//cx/V70RhBaCF5JDELKWcFF5TJzr9J0PeuXNMKQcoj25gfT+y2eQDsqryuHCxXBFCpOu6SUnhSD6PRUIMeSQlpnGhmFy9JzcGSTa4zKRvS10WnKzmPH16jlFwc3fD2DVY9cF/yR9iH5JAkme4SkmqqsSWFUOI+M0+/65a542g69DKZD+uEKirCmNNLh4sGX3zaXLpjfzOP/595vOKwkp+9L2XGFEhCWglmc1qtFYM40jbNnz1xc+prEWsTrm7vqbd7ZFK8aMf/Yi/enLGl1++YuwdT85+wHxWUWmJa/cw1HTtDq1zurjzDqkVbnRsdg3Be8ThHolHodghIJAZclWREDJiklKk3TzQPKx5/fWXGFtS1XPOzi84P7/k6dVTXPDsmoaH7YaLy0v2D2t++pOf0LctIpHHpiJO2ZH5eYmA1JqDiehhs0pCIKdPPKgRjX7s5Ja/gFI63y8RCD5yc3PP7e09r755Rd8P/+Qe9F/1SuC7hHcBJTWn1RwTPEmN+KTomhYlJM4Ftk1EKIstsweUkBLvHAev+qM/nI9ToRVRIlFpSd97xsFnd+8oSFIRifTBMbrAoiywUhCTJIVA03W50AljDmYWufAnpdwgkJER53NQefKZWE1KeRyYp+SMXeL0zCC1RCszRdoYolZTEnv+d5EM13d95k/0fcBaBSJgYkSKSBABF3u0MSwXM6pCMHY9VmkGMvkpIVnfr7MhqLKIqBjdSEIhlcV7GEeL95JqbnKWoJSkGNlsBoxO9EOg6xLdEBiDIyWPtZoQAk3T4lyBNQadFEmMKC1p2hFjUnYFNwWmkEg/YIuC7e0DCMHcGBCCYfS40LAoYOxGkAYrFVqBMXncjfhgDWK0gtFi0QgkUQjqUnF1MkN4x0mhWdiICAOSlFG32DM0EVetsKbMOaMIkm7oo+Tufsem7Xn+5BJhLMNmjzaTguuXXx7476aU/qEQYgH8AyHE/wX4l4H/a0rpXxdC/GvAvwb894D/KvCj6c/fBP7N6e1fsCzyz9H3PcPQk5LIvkY2IyPOJ0LMAcf4iEuS12/u+eKLd7z+5hWbhw1d22ZZvcvWGFVRUM5sRq2rkkWtcWPLbFZ+SOqI+fVPJIw1LJYXzOYzfAi0fXfkysYpmmnzsCbGSFnUGGPQxqC0+khIJci+WUJmT0SEoCgLbm9vCd5hrZzEEpoYFHoyRTVJHhNGrs5XuEXJ0HX4cST0kYuzU5JzxMM25iaUph9JCmJ0/Mmf/CF/+T/3NzBC8nDbIVORieOm4H3r+HZ7g/OSHz5/xl/54UtenlTo2GWOLBERAylMbufjiLLmyLU6olQ+c47D5AcVEUffqlxPSbRWRDGN/5ynn8xASYluGHNOMJK6nrFYnU7oZFYBIgVj38KUm2isRU1m2zm/M0+/ytJmkUmXyfazejEBJgXDODCOjtlsRj+6zIu29jgOVErRth3ApOZXRFVTVNku6c+7vhOF1eFRe+xFlR/oj8nnIWay4oRB5Y1CZqWMFBLvPI3f0+4blCmo6yoXWvM5iUTfDTRNS9/3uWIOgRRD7vyO/gCAkBRWE0PCKMl8VnN1ecFqMSP4gevrN4TkSFIROJwr6cj7MYVmuTjh/PISZQt8Au08s1nH69dv2O12R5d1IfOIs6wqqvmMfdtgtGG50Ax9z8PDQ5akhkRIgd3Ow6zi9at32ax0nqHoqijx48jd7R1D39DtdzgpwTmaZkdIkeQ8vm8Iu8SLE8PQBXbbhr7fEG0e51QzQ4jFMfhaacvgA+32ge2mYei76UXK5ND46P4c7qFO2XwyQ90ZGUxMsHU/smnvuH13g7I/p5jNuHryhM8//5yT88zp8jFQL+YkQXYD934akU7F3FEB+qdFCzHln+uxJcfhevwx7z1CxmNsTl7kmt2uYxy/G2aIi8JgJnPc5yczSJ4oCoaoub/zVPMSRORksaSoqrxBJ1Ba0w4dIfgjCiuEJPpMLHcuEFxPXRUMux6bNAUa33k8AqEFG+cY3UhZW5IfUdpkTs8wUBczfN8ztC3h6JIsUCJn9QkJWhq0UIxxyGMVIIke6XPHPA4JQYGfkJds9AcuJXz0ea2rgtYHBu9o+pFhDIxDQLkBqRJqTCgtWWqBdz1zAtYkNAXWKMrSsN2CT4K79Z7CaoqiREiNCyMBxX4LzbtbuiGrkrSxcL/m+fOn1FVN1+eN3seefkw0XaLrRlbLCmtgNrd07UBRnDL0DrA0fUdMHmsUSsL6bksUsN5ueHqxopaKslAs6pqysMxrk9FfqSit5XJh2J+fElWBVhprFbLMB0dK2SA3pYTWkjCO6CAgSXxKGBW4OCkwcsG8siyrQ2agQiX4wadXjP3ITGnc4PHjQCSv91Bogi15t97ikHzvsxesgqd/iH9hWTVlX76d3t8JIf4IeAH8LeC/OH3avwX838mF1d8C/u6USvAfCCFOxBRg/qusj6MRtNKcnJ3SR0/SinbwDF7hBsl+N/D2+pr9vqVrG7wbss+UNZACy8owmy04OzmhsIYYPEZrbJkTQLQB7/KI++xsRVEWWKtyJqDKKF7b99R1fk60UCQ8IkWeXl0SI5kYHz1SyaOa/cNZkRMDDhYPSkmWywXr+x0pBU5PL4A0eW4Z5LQvyHTg00FVaAoDVSEIoYBRE6Mn+pHkBFIYlNcQE4MYSTZyv73m8x98wsX5Ctc1jLueddczas0uNVBaPvvsKZ+fnvNsNqOyAj00KJnINhQRgmfoWvq2QUyO5cBkjaQzNzRktJnJlzCv8XDME0TmfTelmEeKzuUEhJToxpHRO0afKOsZp2cX2KIgpoTWZiKYyyn4+hjFgFKSoiiw1mY+V8rngpIZ5KhUYlaV2fAaSC4jus32gc5n6kTbthnhgiM3O8b8rJVliRDZMukXE18eX9+JwupwfVTNi5xOf7gO4ztiztw7Eh7SwWV9IqxPb4Mb2W1GHtb3SCGp6or5fMXFxQUpJfq+p9nvGYYeNw5ZrZAOoJWgKgqsKbG2YFZVXJydoBC8/uZb2ralNFNkSEy5o0hxInlmw09tNUVVUs7m2Lpmu2+5udvk79s0pJRYrVZYU9B1PVVdMTjHarWiMJaxG6isYT6r8RMRvBtGCjujrGZ07YgbPJfLGd65nOfnHDe3t7i+IY49DsFd1zK6XIXHlHD9nj70tM2eSCT5hJICkqEfelx0aGMRQnF++QSpFT//4ue0bY8bXLa9mF56rQ8BlR9UFHBsvj5S+B3yGVMiK5h07jyKouD87IzNwwNff/kVAjg/P+Xz730fYw1t12US/P2aruty8vlhtDWpDbMibEqYn973U1TQY5f3x3mFMUa0yahWVprltHQhvxs+VloLLs4089qipODqJOGCZowSWiitZnm2YLO94XtXc6zNhU8InpgiG2tJZGjbaJ0RWwcJmVGrUPHZiye0+8jJk0vOlyV+bOm6iI+SzUOLSYGrleXJvMCL/HyolJiVBb7fY6Wk8xkBBjASZrOK5bxGTpl+Q6ezcSYR5xPWFhgEMjoKVeCanjiJOvpo8CGSrGLTO9RsgUqBtm+zI7PSBJG5KFVZoDU0XZcDq5XMvLvgMLJi6FqEiAglEHIKyI05VP3m9jqPAWJC2oLZsmQuFS4E+r6HVPHw0LDb9RM/JBJkZNt5to2nKirStuHipATg6bNntE2Hc1uGcaBzA8Fn25R5VeKcY0iwa0aiu+e8VqRqwWpeslrMqCuD0BZkgVaG00rQjw5VzEiAlkCRR4B935PSyDD0DF7iBwdjmPYgCCowdJlMPwiHns/QQiAkyAgXc4tZLShMgfOJdtyxGwSVlgSfI0GSsdxsG9S7Gy5PZ9RlATS/8rMrhPge8NeB/xB48qhYekceFUIuuh7Lb19NH/vzCyuRERuIxz1ICEHXd8iqoGk7fvbzr7i9fs/6/p6+74ghUwCkTBBhuViwmNcsFzUFOX6F2CKSRURH8hJBRVEojM3qweVyyWw2m8ZImfAeYub9qCkb1fu86WmliSqhrJ6QmZC1TBOdJWuWPoTSI+IHJFJlPu+z5zVGg9GgtJy+h0CSCweLRk2KNG0yn0kZgXORKBTfvnnF989/AGOOjzIoej8SZKAPLUPsePbJE/a7LbGVlKc1pydzsDP+mecvWBQSmwasdxi3w0qDUDL7PUoQ0TO2uXknZF5VSkCclH4RkshTHCZRSOZCTeixlEd+E5PSM0RylI7zeO8ZvcenhDQlq7MJoJiaNpWy4W2IuckYxpHR5bg4IQRlWf6pKKKu6/LeF3rGLrvxA7RtmwEdbVAT9+pxbm9OK5i8xYSk78fpfodfSsX9bhRWk/rusT9VdoXko4MxxkxKDiSU1qSDOgzBsSo6ohgf0Is4JYh3TYPWGQKs6pqrixOEUHTDSNM0xwM6xMisrri8fIpSGmMlVVnS7js26x3L2QkJB+nQiYuMxqSIkIl6Xh/N6rTNTsZt12VCssjJ3eM40vc9Whm01nRtx7bZc3ZxTgieZr/LxHdrMUpyfnpK6TKBvjAFbTtglKLvetqm4S7lhd13Ha5tCROZP8SILTSJiDUKSWKzXqOLgiAFyUSUzlD13FqapiUlgZA6G6c1jvVmR4qRQmnqsjiGLKuDV8l0f47Q6DSySwfln5hUlxOikgBTFbx48pTLJ0/ZrB/46osv6NsOJST3b9+hjaac1SxPVpycnXH14yfElGianL+13Wzo+44wjh/g+pSjUeSkpHmMUB2Ug487xhiZsqty0e5jQIr4nSCvg+D27gF37pE2k26tUdgEw+C5PDtF14KHrSfpgihzZzurs5FdEQNKqiOB3Y0jcvAIqRlcgOB4viq5dg3FsmRWeUQpsVbRdAIZEj94ccmL05KZ8IwUGFnx4ukTnl49Y337Dq9KOg/ODfjQU1aaelawmhWoY2EV8aMgpsDoYbVccrosiRdznp1ZVBzwYy5Exn5gbEdCueDbuwe2O8fVvKLWUz4aAQpDIQ0iTIa+fkQEi7LZpgWRg8K9G4lxZL6YUdZL7m/XGJ39p6wtWS7nGGsntVZWMW53W1bLmqaNdF3PfrfHGM1yueJ+u2ffR7ZDpHUtn12sOF2dUZWK7WbPbrdFCLClBFsydI5231NaRV0q7tZ72iFS6gLnImNsUNKwrDWzmUYXRVZFBShLgRCevt/hnMf7EVFAUZb53hYFWleoYkb0EPuJyBuzf5Ip5pQioAgIZacmVSFEJLmRh80GYywnJ2eczksWlWG4EuzjQCsCGz+ibMH79QZbZOL8r7oihBBz4H8P/LdTSttfSLhIIrtv/uqrQIi/DfxtgKIwx3WslJ7ii2DoB0QILGdzvvnya+5urwkTFcNqgdGCuq4o7IyTkzki+WzJEgM6xdxY+5DXjhQonakG1ipWqxWz2WxSEOaIFKYzIpGfHak0Rie0LvA+UNgC7x3O5e8j5KH5zJufkpNynOz+LkRGXpWSWGOR0iDxCBEmewRQSmC0zhmJxDx4BXRRkUQ2RR2S4+5uS3v3JT98+mNOzQlVWRH8QBCOED0/e/dTGjGwMiOnl8+pFiue/fAl56eXmYPZtrBfU4QRqQXCWg7hxNF7RPK4sadvd4gYpkZ7sn3IJBEQGa1yLmJKiVQyB0yLCZ2bhE+ZyJ7TD5z3hElB2PcjQiuMLTCzBSjDdt+xXCiqssh1QIq5MIsqcx2Hga4fJoQp2yQkDZDwPuSfIUmsqSAFBp9jsJQpMMYweo9AHG2cDoWZlDJHbCWJ84G+HycDZPVLubjfjcJq4uI8UtBn9d4j4CA+OiylFJMB2cSR4uBbNZECJ+6UkgK0OiIUSmbu034c2O+2rLXGlhVFPefs9IRiyLLt0efq2hiVC5nSoJXBp4Gz0zOquuDm5j27/QYtJSIdisD8s52fXzCfnVLWNSkm7u9u2azvMi8l+jyWHAfYJap6xmwxZ7ffZySt66iKksV8xuiy6/J2u+Xy6pLL+RWLxZKH9QMSgTWWpu1pmg43jixmM0RwBDcQpxFXWZYUVUU3DBAirh8x5ZxysUAYTVFqdtsdUkhm1YxqtqRpe9YPD7x9956ma4ghsJjNmBX2w3hW5PsWw6SumIpcMXUl+bZkcYBUORMxxARKslgsef7iJWVV8fqbb3jz+g34gJGToWNKROdoNht2mw1vvv0WaXII9+nJCecnJzx/8oQQPJvNht1ux263Y3Qjwftp4SWSm5QoiRx1M3WXHNArn382IT4U9GEyivt1X855hm4ghjiNJgu0jLSD5931LUrOuHm7pulGeh8QIicV1HWJVhLIhrvZrypzrmyKKAxWa2aL3Fws55HTk5rK9jifHajL0hDFKU8ul1zMIpU0jBTcN4JK98xMYBMa5jZghcYrSZIli5OaujJombBSYqRiVBFEmQvtmDg9WxIDcApXK4EIM0Ko6UbHZthSkPJIEcnb+ztO559QlZbodixKTZAlyWeTVB8Ti9mKvu2QJIIf8cEQosOlSEhZ3fS7f/IllbVYmZG+orTcrjcM44gWFqEkKIEpLMI7+sExjvm1r5ZzVFHx5v4dt7ueIHKR++Z+l7kZhWS5WnL19CVVIfBjx6vra4wtUEvD4Douzk7Q2xIlBxCS3ktigmqlKIpEaQRSeoZuS9N6qvM5xIDRBmsNSs1IpUYbwzCMdC4wjo7Qtvgh4HsH07hCikRZNvRjIAyRqhBoEQhV3iPr5Ql6tsIoQaEFkoBK8Ox0RuMNRsM4ODbtiLKWd3cbtB9/pWdWCGHIRdX/OqX0f5g+/P4w4hNCPAOup4+/Bj559M9fTh/76Eop/R3g7wDMF1XKSEKcVF0TT0mqnL+4a/Cuz8IMq5jVJZVVWe1qNePQEfsdQiRsYUkqFwLOw2w55/LsDFsYmm6P322RKoLwIDxCTkTrFHDeEaNA6RLvR6yy9P1AaTOZPQaPkgJhcm5lPBQcTD3nRD04rNlEQOvMFVXqQ9OqJCip0DpnzeY2KZKOCnQBpsS5wK513K1b7jcts1HwO3/8B/zN3/zrbFtN70ZilfjZq5+xFw2x1uzGgc/OL3n6/PssT68QoaeII56RUUOvbd5DRBZ8yRgJzhG6Ha7b4cNAYVTer2MkiggiERITn4qs0Cbv5yEmgiQXotp8QOxSwoWMVA3OfeCDyuw2H5ETOX+YCk+D0tm8IcRwbJK9z3mkMcYjpzoWB+HMRB06en1JkspB6kgJ2lAVNWoqqpzLVJBDIHTXjviJO3Y4Kw71yJ93fUcKK8jeFuGIPIT0MW/n8FbrD4SxGPMLh87z2gM+nMjeFllFcwjVzFX0Y8PPwUe67R6xb9k9KMblbyOVZteOiEkhUhY2qxGKChHWdH3L7d2G7W6LVCpXzhN/aJJ+0PeB5So/PG7oGPZ7QtfR7tZ435OknLrPhFSC+WqJsoZiGhssF0tETGw2G4QQnKxOefH8JYvTJUIIVsslwzCwXC5JrmeI2c8m+DV+6AjRk0R2ty3KimKx4LwsuXl9C3pgPp+jlKQsK3QhsEXNbrejmM8wMXK3bbjfbI9CgdPFknldZjTn8f2QEkUmbR7UeSmA0JpDJM3x9gqFtoaTs1Nefvop7dDzB3/0h9xfv0cJiVHZkyhJiYiHghlkmlSb3tM+rOk2D7yTORuwms+YrU549uwZn3z6KX3X5UJru6FtO7zLTrwpREIK2VLjwwyB0XkYfYa4Dx/muwFY2ULzW7/5gq9rgxQC4SPD6LjbRbyuePHklPRmoKoWpOgJIjHEjG65GBmiZgwJQgA/0jctIY4QS5RIPHkyw7Rzun7guV1glcQqS11IXDSUdoVlRCbJCEiTEFrw/KLg6ami31oqFxmdpI+SnkjTtQyjQasc7CyjxoUcmiulpJCOel6wXUfOLzQXZwkTEt0QacaC3pX4Yc5iuaCpBQ87jSkKpHG8fHKGG+Ht2y3bLuBF5tOIpsVoWJ3UKBUYXSbVrzcdo5jx5v6avQftHXNrWAjD229uMZVkMa9Y1CX1vEZoSdd37JsGhKWwFSTLtk387ldf8c3dDpRGCJe9bcLAZkysCgPrWxZv73m+KnhyUlOJkrtxoBlGFlZhtEGmyOWJIsaBbStwYWBxoSgrixIKCCwXC7RWzEpJaUoaLxj6bIbcTQ7jLgJC0Q+J6Ae6pqNtmmyeGiIiKJT1jD7RPShmM4cVCn/iEQK+ud3z7qHle0+WPKkFWttsrkmLJnC1nNOddai0Yzd4ojL0Livmftk1qfz+l8AfpZT+x4/+6t8F/iXgX5/e/juPPv7fEkL8PTJpffMX8auyOKYkRo+WgdIUOB9JYUKnpGG5rFAiR8kYrXF4yllJYTTgMEayqGu0lshSYY3BGE1ZWKwxQEKN6siX/WDnkhM1BJIhZsdzY2uElFSzmt2+5eb+ltVywXxWfxj7xUASWVyQr0eCrGnQ0g0js/kJt7e3CCGpqxlaZf6QVgIhE4gs75fSkGTmMIUQ+fbtHYNPpCTZd5HdrqfpBv7+P/wPuH7/hk+evKCwJV/evMLpyMsff5+LFy95+fmPefbyc5bLS5JX4BPBe5x39AJMPUeFMfOfQsSFkbHZ0a3vEHGknJWZeyQEUk2olsjFYJA6n4cy+4v5ECZfKwNCEoMnkhMlusEx+jAhXLmhkdPnjkEQXMALT/KRpu0pjCUVFq0EcrJscBPp3I0+NxjOUxUlhySWHIGUEATcwUVsqgGM0fgYkdJhtaYw2Yx1GAaM0aSoKAqDkvnnE1Jlu5cx/BlP6IfrO1NYHeNMplPtFwN7s3t3Iv4C70rKgwIs+04dvKcyipJNOOUkbeVQVElx7HSyMkwieQQhBk9Z5y6nrAqEgK5rGMee6B3D0FNYO1k55AfyUN3OZjMgR9JYa9ntdrx9+5ZhGPATj0tpQ3aQVdiiYLFYcHJycqy+67JifXuXPa7Kkvl8TlVX7LuGfbPn7OyMTz75hPl8zmbzwLs3r6lsfkBiyuamY3T4CMoUlHXNfLViuTxjvX6gLEvW6zV9cFxWpwgkXdtTlTXr9ZrNwxolBHVVUpUFRk7o32Fzne6BNeajMRx8HGJ86NSEVJRlxeWTp5yenXFze8vPfv6zrIrUGi0VSmS06oB6HdDIzGtIR7TycJ9jjHR9z+37W5RSVFXFcrlkdbLi4vwU7yPNvmWz3bHb7RnHDu/zIXV81mIeVx04QgdH7++Cy/TgAjc7h1hOnDYf2beBh91AVS8JSeCSJIiJcyITdanQpWAMkVNdkULMVlIu4OY5TT46A2nkYlUSuj3BbZF6gRAuk9OD5Ob6gffXW+aF5LNPrygrixQlvd8htWI/Ru52gX0X8bEjyIiZ19SLGQJouoGvrh+IvSeMIRv7KcnMArMTbt7c89s/+BwtoJA7lIXCaCp7xtmiYnSJISbmJazf3/P9yxcUM0kSPbYuiPsdrh+otaacC7quBQIhQFUuuN/tud8ldKkYhqxULJdLkoBoNKYuQSZczLL4sggsCoNWitOzEzoMTR94GPb8/M073m0aIgo5cfFG58F52q7joTCczud0bce439M1A2cXS6Qy7Hb3mEVB5wKw5/RkRlkviGnG6myOLQU6lfg+H2jb1nO79miz5N22o3EwjJ5xHBgFCKUymV5OTaKUxJQYXA9x4vuQQGYz4yQUKDuFyQIpsRs8b+8e0ATM2YyiSEjtCVIweI8fA6UyPDu/RK03NGNHTBMK/cuv/zzwLwK/J4T4x9PH/vvkgup/J4T4V4GvgX9u+rv/E9lq4Wdku4V/5S9eFQIta4QMqELi64G+b8lO6COLSlNXBqstfdOzKBbIqyecXpyx39zROYe0impRs1rOUDqbQR44mcZmi5K4FcQggQKSxagSIbPLuEayWszxSRGQFJQEIovzE/oQWDdbLp+cooh5FJsEiOzDCBxFPGnS7I5RsO8TupwR5Ya379/z6fMrTFniRo+ZW6QSuVnFMHjBdjvQD3l09tCC1CXaFFRVwczuuNtuGEXkH7/9Y/7o5icUVtMgefn9H3ImDRfPf8DnP/5rFNU876XdmuQ6hO9JwaNDQHZdNuGVEj/27Nd3tLsHZPTM6hKExIWEMSbnv2ayKt5HhqBwwU9I4oH/KkkYRh9wLk0ConCMkfKefA5rDUrTONj1js63zBeeQmW0qC5djuTSGuMH4jhOXl6QQrZkksEzLwxGZTNWW1R0bQPJf0QRSSlNSr/8s0elaPvsxl6VejKdHhBJZVslsvny4LIX2C9G5T2+vjOF1S9eh4P6QCLLZnj5wXxMRA7hA9x3hOZSlhaLCQGTjw7+LLyGOEnED6qEHEeTP6euCi6uLjg7P2U2m1FVM/a7PZv7DqMl3/v05UQo3+VAVaep65qrq6sJhvQYk8lwXdex2+3QxtANI85HfMx8Eidhs93xPEaePHly9Lfq2w6r8o1dLpcsl0tubm7QhaGe1fz4xz8+8pmKsiJN4x6jJTfv3xHGHlEkxs7RDZ7zokIqxen5BacXF/R9TxTQ7PesVqes7x/wk1rs/n7N0PcYrVgt5iiRJn5B7lpFrjyAD6R0rT8Q2DPPJZMzQ4IgBLPFkhcvX1IUBT/7+c959+5dzkvUOtszCAnxgwlhevQ2hMxpEJP0/bECkAjEjHR2+4ax67m9vgajsGXJbLHi4skVl8+f4YaR/X7Hep1J8M45lDLZeXeKR3AxEqbohF/3NQZ4d9/jn2Ry6N1Dw6aFd7cNZxc1375+x7fvbvGUWCGQCmwlEXpEKiitzw7VRUXyHhElWgSUtUDkdLWg3yeWiyKHxKZAjILdtuHm5oazi1OeXy6xJqGMJzHy9tuOwiruxFse2geEtcyUYrE6QZUmjzQEcDYjomkf9pmSIiSRwKI0nM2W2LOBy7mnDJFBaIYY+erVG7b7gbPzK07mBXUc8WPieuh4db1mcJax79mPgVGA8yNXZwtOnsx49y4yBEffecakeRgG1LygqAwXesbybMGsXjDsG87mmjDk/eBu0/LF9Z5q03F1fsq8KljYGtf3vLtd83bbsx4DvdAoJKvVCmMMXdfRNA1jSDTtQETwdDGnJ/EwSvx6xzjxMWbPr9j1nqo+5eziEm1n3K09r983XJxXSN8iA3gVWTcDb+72CA3f3qxzCPcEb2hrKUpNOAgsRD5sRYooSibXNnTQ2Loi9gplJUVdo0WcOCYJXVVgSpK2OBTtvskKE2EYBlgtVgw3D4SkuThZoveJh4fuw+jmz7lSSn+fP1/18V/6Mz4/Af/N/yRrIp9jI1ImigpWwjJ3kZT8NDXIlAgpE0MYOZuVVPWSqpjhdENhLC9fXHJ2Mjui74d9y5hsyfIhKiYnOGTTYTllz+b0Apg4a1GghGd9e8P9w5YYs/rSaEWhDYKQrXseCWLy7Zw4nglsysaYfhyxJgdgD/3I1eU5ECbURzF66DvPdtuya3vCPE3jX0nyQIooIZktF2x2G0T09N7hY6D3jvr0ksVyRT1fUJYlzW6brUe0wvUtcWzRcUQkj0gwtHv6lDIfqWtxfYsyitqWKJ0jlw55gD6kiZCTJtNPGMcBbcAyCYfioZjKQcfjOLLf7/PobkpREUhUMaPtR97d3PGwHwhSsN011NbAxTl1YRlSpLSGgpHgHcPYE5MnxDE7vY99ntwUWelrRMLLxNB3pJQ5plp/yINMKWVvsLKezHRhdbKk61tSSgztAMGjJwW0OIikfkkD/p0orA7xMYdDM0yZYPBh7HSMMHlEaA9h4tKkD+O9wx/JIVARUoqTiFBAilMW0cEgNGaSNWH6WWAxn7FaLVmtFtT1HKUMKda8GQf6ziGVYDFf8PzZU6SUR++L3S4f3Ccnpyil0FofFYAhBNYPG1yIRCY36ZiOHhlSymNcj0Swmi8AuLq6YrFYsDpZ0fvhg4ph8sFaLOY5J1DkrrJpe4IfGZodfTfSO0/TdSQlKbqOuq7pu47gPS+ev+Dm5oZXr15zdXXFmzdv2WwyCfdktaQwmhQDggApKz7gA6KTHe4/DrpOE0IolSShODm74OnzlzTtnj/5/d/jYf2AFmLKNjtYJUzIVHgU3HzYiKb7libz2I9m2wkOHiYI8aEgC4G2bdnsG9L1e6Q2aFNTz+YUq0vumzc87BpGN2aYWjD5z+Rw3NH9cpj3n8Y1jp7Bf9i4vr3esd7D6+sNi5NLHtZb+iAYhMJJhQiwEHUuNL2nHRN+HNAMJBcgRiorMDpQ1+CSZtc7zk7nCHw+aCTMZpbPfzBjtqgQoUXIPGZx0XO2OqEqDft2zWK+wCuFlRZt5/jomXSfGBkROEIcMoFeSHwYeXl+Ttd6rk5rrG0JIRFEmeF5KTFGcXPzHp3mnJxYVqsZV88sX3z7jvcbQ7ttWJ6u6LqRq5MZp+cruuBQ1QwXBIMStN3AdgwUpSX6EVspTk9LRIRyUVHbkdXpkqqoaJxgftOyvn/g3f09ksypZPSMUYGpcKFBJEGIkfv1ZmposooppozkzquKs8tzHu7XXLc9u7ZnXpd88skLlLH00WOLFZ23DEPgJ1/f4kXEp0AREgQINnL70POw77i6XOJiIkn1AQ1WCrTGaBBaTrQJnZMjQj5UU4wTtwdEUCQlkIVE4o7cyKLUGCPz/lQVYGL2kds5bm8GXNQkJem7AZUC5+dLum7/HcBwc2E1n2mUjnnkVJcYXWT0Qgp2zZ6H7Y4kPOWswCWPGQJpSNlawnuK8oMQQvKBoPx4/8rvi4+4uRwLqwM6E1FSUZcWKZYUNvsRLhczqlKjp7GhYFLSTSXnge+T0iHnL6P+r9/f8ObdOwqjqedzpNZIbfE+st507HY9IWpS0ihZZdENCSWLbJSsMkn89OKC0Tvu724gKnz02LJkeXZOUdbMZjM2Dw/4KDh/kk1HU/uAig5tBIWRDP3AfrMhEDP1JkVQEqUlsrDZeiUeVnvm24ZpXw5J0PcdPnik1EdboZQS+4lHnFLKE5wJNHHOkcgO6HebPfu258tv3rHr+im0OfHk4gxCxCqFJlEVhkWVcnanSIxuoO8b8qHgEdEfzUf92GMkFLMaHxX9MDL0Az6Pq5BCMY6BIPqJYxtp9vus5iRxslzQtwMhJlzw+ZyK4ZdOx78ThdUBhcoPc/gg239UbAGZa5M+FEVCTllkR/TpwyEvSFMRdeBVqWOnAJMyg3Q8wo9oScr2AzEFhj6P/NqmYbvb48YeKWB9d8dus+Hy8pLV6gTIstq6KnFuhrWWqq4nE7sBHwL7zYbdvskS1JTVaJCJkJvthtVqibUGrZecnZ1S2TwGzNJRw9nZKT5FNtsNWmuKosgPrBtY397Q9QNIxW67wXmPUZLeeW5v72mGjmfPn2Nkyeb+gWa/Z7/f8frrb7h5f01RFFxfX/OwXjOMI/O6pprk+0kCSRx5ARndy69VCCHnwR1tCpheixy4+eLFpyxOz3nz7oavvv4545BhViHlkVQtZEYiD6PaGCM+xUm8II4oohKSKOJHz4ZQWcAQBQiRsjwXASExpkTnAw/7Pfuuoxskzk95kCn/+zjdf631kY8n0nfDbiETMJiI64k+WR7aDh8lTdvx7et3bL2gl1uk8BhtKHY2E5iVQtpsrFqZYtqQNDFoZBAkCze7nnfXe4yx6OSYlXnNRKHY7LY0fWRZSVaLBVJq/Dig1B2L+Tl3dy3r24E+FAyyQxYjUQSMhllhuTpfgO8JSWOFoW17fBjpHNxtPOdPC/aiRQlLEQcWVvPDl09IKdJ3feYxyAFrFYvTOf7VPW/vG1bVgn3QtD7yvK7og+Pnb68ZXVb3FPUMqSyzYoUxJSk4pBgp7ZL3dztmsznOSG73O2ZBIaTmcp44LVfEZ6d88+1bnO8w5oQ4jvRDQwqCq7NTbu/v8d4fVUMxZW8cmQSVNazX99zc31OUNauTBc9ePGHzsAY5Zql9Hbl9uKUNkvebNRjJ01hiRTZETUKiigppAvPFnNmsJo05Hy2GkL0spqwcIVOO5hB5XSSlMr9qahCFjiATUgMiIOXByPVAR00IIlKDtYY0gh417dDTDIFt01CUM0bvKJJmtqhBPPya1wNoLVmtDMZICiuAiuB7kvcYqZkvL3n+8hlIS0qWP/zjn/H6bctyuWLoNwjRTbEy2c3+cO4c0P9DcVUUmf7hnD8275KD51KecigpEFJRSMViVvPk8mzykfvgUSWPIMAHoCClD98nxkRyjsIonj254vzigrosKQtNIvH+Zs3d3RYpS2LSUwGo4JFQizT9nDEiC42TcP7kCbPVinHoCd5PnFqNj9BMjbySgm6/wyzm2dx0HEhjYDtmND8JRTmrKQpLijFPLBJIo0ApZAQ3urx/C0GMORKm6we8d9l2YVLrSZWd6zebLc65SdWaS49xHPNIsA98++Yd37x6SxCK3b6h6brJnBaqoiKMnjA6Xjx9ghSKopCElNC2JLGbgrkj9eRTdXi9xyGnI2it0CZzjyXZDkOpAxk9C7Fy8ZxQRIwgI/luyF57MTL0XbbaOBLl/pxn9f+nT/7/p1cCNwbSNAPND2E2ZjvIaw+d2xGxSAltM4xn0gRFpsOGc+BrAUeXWpUVFSmrY1LIRcJBci+MgcPWNHa8e/0WmxI2OnxKPNzd57BUJTk9O+Xs4oyYAl99+zW7+w2Fhvl8hqlqyoscLSO8QlqDKQvmSlGPAt2OKN8jyXL/0XX0/Zbt7g5joZ4ZbDHHGAki8O7968lZfUY/jrx78walFJ999hnNbsPD9Su+/PILRgcXV8/pBs/95p5h7FFSk6oTmmD56t2er9/+hJlOdJtbNuv77AOiJLU1+D5HLyxPV5NaaNqMhUCg8oM0cd2YsAnIrrlC5aJKap1J8fMTnr94gZSKr7/4GW/eviXFiJEKJol0Dr8E7eNxVHu4t2oqtA4dxyHrTgoxQfITQpkzHJhyl7N9b4TRKa73W97vG7reo4IgivDRMpDTpiSAGPIhqZTJG+M/naf+l15SCooiOzJLKSlnBet3A+XMElPHJimi1FgSSRiEUOSXIRJiZOwzb7BLjnEYSCFhpEIKySfylHAz0vaOJM5wPrDZOgYEd4Pj1e2O7z+vif1AEgFjazZt4mZtKC28Xe9xRc3SWmQJ0lr2+xYpFG6MvHqbM8eGMaCFxKTIp8/OeWgitw+BagUhNJhYUQjHeyd4c7vm5YniclWiYo0kUdqOmQ18/8Up+xbW9xvaoeauDXyzidw1DbcdzOcnlFVNh2LoBogNQzcik+fUjKy3c7653vHpC8vcRKyaM3hBK0f++O2eMpR8/+kpZWUxo6Tpe667Ne2oKOScd7d3uWsP2cgQKSnKAglYLdn32frh4uIF0Sfud3vq9Zr3d4b5xcj3L5fkTDmBFgpnBLOioF6VGJVQURKkQHaRJDzogiAgig+JAlH4rGCKEpksKRlQIquMJ9QqP+GJMAaGYSTEiO89hShhOYXNOw/hILYJ6KRRNhFmA3YmULZkvespgmY2q/FeZuXkr3c5ANlyYHWSVa8ZEUqkVBJDNto0JqMq2VtPUVvNTbvFFYZx6DAmTudAQpJRu48mIo+uNDWRx/Mi5nuvtcmNpMieWlIJ1GSZkO/AFLMlEunA7T2ED5MbwBgnFXyIGb0v89iaSV14e3fH7d0DMSlCmIjqMSBUwlqNEfq4R9nCkNTEL06eIAxFYSm0RlcVznuiyHYHu32D0oaT01Oqus4m0DHgQmK/a4n9Hi1TbtLqAmNKlM5JIH4Kck9S4xOIOOaUhpT9BMfBEWLA+YxQkbLCWoYcsHx/f0/X9cfpTbawiPR9z75p+fbdAz//+jXXDzuQOZA9ofAh+0L2g+Ph9o52t6cwluLZU0LUDG7EKE1VLyGSudJKoIsOxom8PiVrRedZLQoWiyVdO9B1IzHAOIZDuntWyYrE1ckpy3nFbrtmvR3o+7y66rKczkL5/wcGoY8O1vy/U0yB+gDR/qm3B5MxIRAHwvqj64BUPV48GcRKR7fto/JDyQwBTietUoqEyjY5PmDLktUyBxI/e/qUqiq5u7vh9u4GN2VChRDZ7nfMVFYyCSmPnKOzszOkUnTJcn17lwsGMsE9Bs/d3ZrlcsX5uWAYRnb7hpaWoe95++YVXbNnuVgw9A2vXr3i9PSU1G/5yU9+ym5zj5SK1ekls1lNPdMkEbm5u6awUwq4j4yjJ7iR67u3VCpSlQW2KKmKjByFAIUp8mZxIFj+qc0mZ/9llGnKckwQfSSSMGXB6dUVTy6fsd83fPHFn7DbZQ8sKXIxFsIBdUpTbtYHqwOtM3TsnDuSDB8X1Y/vp5SSJA9chUTwkZSy0u9m13HTNPRhQhaS4HEQ+eF5OHiRxJgT1LWUR0j7131pJbGlQGnJvnjG7372L9CeD6TC8FUK2B844MA75JjMnqZwanmkGybskR+Th953VclNCKQIm9KiRJo6eEHnAsOp431ZYLXEKJkJ23VkWEV+oiXbusGFLE0QBzVubsUnmxKbD/GJU5GC56Eq8cEjTjVflBKBR6ARJFqXCPPIV9MI5YOIQNArj3ySJedt29OPHoPkRgpuReb57Y7N2OS0n7LEXYjEvZZ8awu6y5FNXfBH8pGHDpFdlR2zv60M4ml+hkAwhgCjx4bEeZhKlkfPRe4zBFpJAply8ACQMt/t27qgbQN9rdhpOSEeEZ8E3fORUQj+X7XNDUzG1+mXif7TwD+aFbSL/8ojSXrmjvuDeIPMWcmFxdRM8mEz/7qyUwQR/KNaoJBszBXL4R3RJ6KPOVPTR0YfUAaqYoZWnmbfoLVkv9+RUqQ8WWbU9Ne/JJAS6kplfsAUHSawBJFd+w/qeTkhOoUSiOTxLkexEEEIPaHjEw3kz1jreX9IR+rBBHEfH/MoyTYEUqCEyBm2JJjGQ+nw+cgcscUHr7xsRB2nqYNCekeQEEjc3N5wfX0DKbJaXdJ0CedgGGNulG1ExoQ1hzud8GFEKVA6F4xCSVwIeTIiJEnpPFJODqkUy9UKrRTejSAEu52ntJZytsBLqKyiKErkZEUkyRQTH8b8CKgir6/g8TEggNFHxpDVg6YoYeI2Ox8JIdB3PeM4Mo7jETg5jAfv7u548/Y9X3x7y36IKFORpMn2ODGSRLZqaLse7zyvXr3JaQNK4fySWVUiEChVUFXzjLw5z2bfEk0WbfiUz6vgIyGNVHXNbF5ye/OAd9lDLPsZ5li2yipePr1gXhnU1Yqv3z7wzZs7fIj5j/dH7uOfd30nCisBR/IgTHNuyaT4+5iY/pi47r1HCoGV8gjnfkRi54PaEPhoczx8nlY5W+igDMsSTEvfO1LKkTlt29MPIxcXl8QY+eKLL+iaPYIceSMmsmNZ1SxPz4gyk9YPvKnXr19jbTH5lOSfUymFUooYIvtdw+3tPcvFCbPZAu8Dg/PcvHvL5v6Ortmxvn6DjAOFEIhxzx//zn/MMA7UpsDYisJognegJWVVcrI6QQiFlJqUYBgG3t1eM3YdF5erbPSpNUZN8KeUaD0VMFHAVMAcVJa5KzzcL3GE0YPzRAHFYsbzTz9hcbLkzat3fPPV10eOgpQyJ4xLMFqQ0uTpMiGPh3YuPio4H6tCHxt7HoorrXWGnEWWLHuhaYaRfddz0/eM04BXSzUxHeKx2D183cM45/BzjuP4XTg/gHxA2AJe9r/PaylxMaMRSqkchH3AEA7cwWPj8SG/Ecid2MFWjDwGUkrhXZxcrHOPHadA6xDyKS6A4MNhPprVaSERo8jqU5HH8DHlA2RaXdmlPA5TYZ6xzQPS6MdEYQ4Vbv6JAgIX3FSIJTwJEQNCKEYvslu7gqHt8S4XPiFlnmLK32CibU/P0FTkHOi0By+dQ4eZQ6Lz90+Te7ZQ8sgHEOlDkydlDlnvhpjdpA9jZDmFfE/r5nBoIvKeJZXM5oXyA+IK+Wd6vBeJw31Kj25lSnwgM04clpRIEWSa8kiP/z18LXm8vwdkNyPx4lhknIzvebH/fWTIcSSFlsyMoXcjwQVCUsQgIHnOVkve32/Z7hqCG1mW5RFx+XVeAoGa0JrDvU/kcY6SGjWNPH1wSBFRNqFMBCbkT2ZPJWUUQnj0L6z2w+9oVMylrjSgFLrIIz5FgVKToi9FxuAypUEopDQgCg5348jVSgIfh2mPkWhpssJaJMYIfTBs7re0bUtRWJ5dnXF6smJMc/7wp99OeZkBbWRuToMgKD8VhtPsIE48MaUZ+pDHggcTZ6UxKqvQi6rEu4Gb67cU9QxTzbi8esasXiLCwNhsUEyNr1QkZab9RCC0RCRQujii+im2RNejUyCJiA8jQlmUzokEKuX9Y799ILiA8D3jMKDnS7oePIr7xvHF23v2sWbA4x1IGRHCELH5PghBOwSkLJG15d12oPv9n/Ibn33Ki+dPOFmVU4umSMKwaxq2/Z5kJuQxZk9DIwRjG3Cl5+WzC5ZS0jd7Cp2tLUYSQ99nZ/5aoFVkuVhwcnKOTJG3N/dsu4QuLFJkUODPu74bhZWQx5loDrMMHxVWwEcH6wFOlFNBdXAC//D1Prz/EVw3IS4fNk6ZHdwP45/0YXOa1XNWq1OKoqCeGeqF5O31a27fvyd4hxEiR0WkRBJ5A58vV3zy+Q/QRU1dlgzDwOnpKbvdjvfX10fynn9kz7BaXvDk6oqzszO8j9zfPSDIhMEQPF2zJ4wdRsJ8Vk4beWBWGmalQaKRxlKWBYv5DDObIza5kAg+IaUmxsTd7S13tzdcLkpmVYkSeVGKeDCuEzARxEOMH22kfnLFBXW0mEjkObOwlsuLc569fEnvRn7yk5+xfn83pcXnIkkIQWE1Kfrjay0EGbpN8tHt+dMo2RGdmu7Z4f5774+HGULRDCM324bB50gWkQRGSJJKeROK4qN/L6b281A4Pv7Yd+EQ0Voxt5ZV8zt81v6EL95sefX2jr/6m5/ze3/8x3SjQoaETJ6oTU5fTwFE5h7G6Ugd2i7bSqTcWdcLx1/6/Lf56e+/48WLOU/OJTp4onM4WfIHX9/io+AvvVhR4ii1xsuSn317w8YZajWwubmjFWUOQVYKWxYImccKiYhMZCWNACUTl6uS7336KX/4szf85d94weVcEMOAMJpNH/lHP33Dalbz2y/PMjcljbSd5Y+/2VCUgsXM8NXPvwVVEIQkKIGpCmKKWJUPrwP3ZOhHFIEgJZKBH33/E0Koub675z/z119QsUdGjZeGd7sNv/MnN3z+7Anfe2oRoSW5kiE4fufnO6KIPLmw/M7vf00zgNQSawtsVaKtwceAURrvPH4cp3pNU841f/k3P+H3fu+eH/x4yacLm42ExUCL5T/641foBH/th0+wMYD3ODxvN5Ivv9rwo8+f8oc//zkPTQcoRJIIq9CFIYRMR/BhROgKPwZiP2SulZQUyfObv/WE9QZULPitH0CZymwqKSJfhAEtHIUJyNBTacmYIjHlVIu6shireHN9hxSZlzM4z3eDd8iEVv0Cn3baTA6ju1xdgrIqewWSJp4UE/cpf/zI7YQPvUnKcVK54ZpUgiLHpsmkEZMQIAl5RMFjSuAd8lFzLoAwiWC8hJgkwxgmbi/E6Nnu9txvR9w4MKtKVssZhdG0Tc/b+83kLB/RAmIUOalDawQ5M/eAoB3OzoxQScTE6ZJCQIxoKYko2rblm2++BiInQrKsFhRVjTYltrDI6OnbPaML1LMq5/lNQIC2xcR/zc2wJJvcOjfihg4hszN8mhqW0Xnats3Zf1Nx1vcjw+BQRcRoS9s7mj4QZYlLKjdMMZtzB58RMHFwahc5E/DQ1F/fPbBfb7CFoZ49oSh1TlnxcRIk5QBmhaAqSpLPdjJGaMIQaXcNz68uSKsKKyPaKLz8AO50XUeUksFa5nXByyfn0zn4gEfhkchfsiS+E4XVY5XZBx+kQ4nzWAL7p//EGAm/MCaCj2F7MZGiH6NVhwXpnSMlgToaj+afZrFcsZgvWc4rTFmx3jbk+AuZgzoRGJm5O3HqgouqBKm4ubvnXbM/urjOZjNWyxVv7prJbC53z1VV8fTpMz795JMJ1ckLsetyfuHd9TV913Ayyw7CSk+jqpgtCKSQGHJu0dB3nFnN1dUlSUDTNLk7Bx4eHvjqq69IwVFXJzmGIE1ql4MxWEoIoVESUuph6lwOf4SQH0muhRAslgtOnz5lsViw3e346ssvadomE2kfdepSSrRICDkRL8VUQBMR6QPaeCiqH9+fxx97/HHIhaH3ES80m32DI2fX6ZAL05RiTk6XCSXVERn4CLWKHxz9f5HE+uu8RIT2fovVgiA0+3VPbS1+7OnHgShmWVwA0/xDkGLOhAspHgdMkYzKZcNWy3xeMPpATJLFSYEuB7QXJK0RwhCIVLOaujZonwu1JBIuhTwuwlEWhoDBpIjSCWXyxheCyKO0FDE2gZJo4VmeLBmdp7JzVosKpUakrXAmm/5FFTm7WGBLRYwKvECVNV16z7OnZ+xuG6Q9YdAeJpdqU0hEmHL0OKDYIGTKLBoZkUSqUrPbBspSY2xCpgBBEkWgcz0RmQtDTR5tq4LBCbqouHp6xb7/hiAEUitm85JqVhOmZiorHgVRpKwakzJzOoXISr0kUCGRRk8AYuzok8P3eSQSu5y9qZJESUUlJXpCUY3Wx0Nfkjkigvy1C6uplWVeCGxSFLJmVlWgJFYZfuNHV9w/JKyc8dnTliJaPJmAHOqBVFi+//yUSxMZh8h+7PCdR8qINZr9/jZzioiEIYdkfxfqql9Uih8+llJG3YU88J0ygioECCKCgCDmvS4GRNKIX/yFjpEsmXOrtMKHMGXCRYTMUUmSXMwc0M2IyF83CVL44KWYUsIai5CCMSp+/tW3vH59jcSwWMyZzWuklNT1KfOLbCj6sNnS9w8E5+iDggn5sXbK/DSTWfARjE5Hl/GqqoiPEE0BjENPURQMfZfH9EZxe3uLtQVjgMXpVUaogRDD0dpGqRwTk6k2+VzWRqOYzujpd3TeZ/PN6Z5obXIsjctB1FrrKW4n0XYD99uR7W6Pv24oZkvuNnve397TDXFCo/M+fFAL5vdzHE3Os83PotGGoigJXcPDwwO/8fkLtABdGNqhxxiZjVXJ+6M2JvN/J76cc9Dse7rlwNlqhlFZTesxOOdp2yaT2WNgHD0tHWVheHq+QhK5e9gxhMCfj1d9RworARA8MuN2jz+a3/uFYulxBlyC3J3KPM/OyFNWvUDO7ws+ZwsRmbhBUwAkEGXOMPLpAOVLFoua07MKXUC1mBF8otnv6PcNIuXIDUTKVMWUQCiMrdB2wdvX79lsd1g8w+jp+p62GyirmkJrIKGswVYVZVkTfaBr2qzkUhldim5HGBqid3zy6acoPG7sjr8vTNYzKZFEmAxPA33b0DV7VlVNM1/QjYEoBe72PYKR80XNajlHGg1xBBGOr8OBlJ4VT+SNYiKtH8YRiIiQU6ipUnzygx9R1pYvv/iCd6/fEENEHsiLh3ZqWiSBnCul1GTaKhVG6RxLEHNX82G/nNR+MUuSDwTSA4L5IVMyIJRgCJouGNACbSYj2XSgkWYuWEocE+aPRbXPcTwx5NdBa30kx/+6r0ScvHYk/ejYNANnc83+YY9rPKQ9SDERlvMBH7xDa5PhcxMmakhCTGRbiWcxnzE0DVYrqgqSDAiRI0B2A7iUuJhplM0xNagMy7uomS8Mba8IMhPS1TQyjlIilCKmjMQSQcRDkSqpy4rgA0VVYSrQIpLQJAn7LqExzGcFyXi01yRlaMecxTaTknfNFq9LtFKESf6eR4x5XJNCDqeNYerugSQVKimMLuhdizICESF4gZJ5b+j7gFJQVwIRImAJItEOkuD2xHjC2/ceRcmszOMCNJAiIkZklOggSQNEl3kciYhjxO09hB1xmCOKfLgXUuClmrh8AVQikgnRMWYVmUoRIxJzHXBaonWJtuCtYj6fU5Y1dbXA+0hEMbY9+92OzW4AmTDSc372hHdvGhbVwJmt6YSjUx78jNvXA+1dJJ4taGKPkJFyOedyafnRuEFLz09++oai0qikCM7hXA/fgTUBgq7rOHjnHegUWeDxuOHmOAbLe0bk0IrFACQ57Q1hGuccB7NA3ieUUriQOI5dJ6Q9psPZM43BpQIhGZNkCBMKTsQHj+tHhJBsh5bf/cMvOD25YrE8pahrgoiEmNg8tGw3LSSm4skS0Sgjj+PuQ8Odmz+Jd/0RtbPWHrNn5/M5iKzW8z4LKlLwjC6ibUFKGucC796+o16c4J0nhMjoRowIuaCZftcQAsoYuqH/KNBeSsk4DoSuhxDxMaGlyjzXkPmNY9fiY8zB4gi2+5bX797z7et77h62vH13g9AGqQu0LXIAstbEKUrm8Lvmt3mUH+JEKVAabSxSgJ7N2Lct0XuM1SQ3YjTUpaEqJs8pphxiIYnO0bs81vehYQwDTb9gPtPMZzXDEHKiipTM6hlVVTGMI92+w0jFrK74xGjmdcX1/QMfDH7+9PUXFlZCiBL494Fi+vx/O6X0PxBCfB/4e8A58A+AfzGlNAohCuDvAv8p4A7451NKX/1F3+fAw4iHUR8fkKXDgjmG/MJxlCcOnaL48GsmfsHobRrxpZhRngO/QggxqRwS2hiMLbDGcL58QllZnBsZxiFvYsFxfnZK8C4T/2DiGeWF5UNifb+m7R136zVpink5v7ji3c0d4/iOZ0+esVrMGUMuNqy1zGczxnGcwp4zrFxUJT4ozHLBJy9fUFeWvu/Y7fbsNlu6tsU7R3AeISJmMsOs59mR2wiJUYpRC/a7HW2zo7Sak9WCuipJKXN0YgxEMc3Lp27nMC6Lh7m9mPL/UsyEQm0x1mBtiTaan/7hH3J3e4eCHBmjVEZPDtwpMf1byLLdqaiR0yKVKsO+8RFZ/jCWPSBkUqocPqpyNxMP3Cyy8sZNypMQI8ZaEND3/cGHYeJYfXiWpJQURQ7fHIaBeOBeiT/Vx/761sRxzQrcJPM39QkPfcsgbO7BUyIRMGTn4hAD0UViCBReTryGSIp5lBM0WHnGetNQF3P0xA/RMnMN923esGelnQioCqEEIYBzgarQbPaBxIeADpEy8fPACxJTM5NvvUSkgFaaYRhzBz9J/XMzlRgHT6EstbUo1WOEwrmYTXa1pdRlXsMSREyTIkseCJPH50aGRHS5mxYikQRT8W5wzjFfVCSfSE6CFght6NuAEgIrFRoBSTKESNuPaO0Jsacfs+GqBIjiKGghBILPnnZjGkgiTlG1gjJatMjmkcSIVgrlDCZqAiVVmDM6j/GSWZkorGKuKxazOYVa8fwHT9CLxP39gPeSMewhFgihaDYN47ZDKY2tFcsqy+JLvSBJmJWBH7ycc1bVzOrE5YlCpAGnO0TqeHJZ4NwVSvX4UTH4RBCwftiyvb3mRz/8ISfzc6RNoCwpBrbbBxL7X2Fl/JO/Dk7pjwsOrafGSciJS5e5q0qWSGVy8S0CPkDTeuqFnvI08zOaBR/y2AvmfD6N9NPXihn5linhpT4IREEq3JDoh5FN69g7QVVX1FVNVVfMy5JhHEg3b/j00x9SV6tc1KU86h/6juRyU5kAHyFKQYiCMI4Uhf2IO3wIB7ZFMe3J+fWwNgdtG5NTLAqTn/mysEcEKAozIZ5Qz+ZIqfjmm1dU8xMW8wUn8xynlrzL+6uQpBhomxatFU3fo5VGKkXbNAz7hmVdIpVGiiwSciFglCTFnHOqCkESeTw4+sT79R6PoTo5J0ZA6exlFxIihQ9n+mNqxjQlUlP6hzZ62vcyYb8oSwiRYb/HyohRUBaaWVUgfH7dXAjZ72qyBepTxAVJ6CP+vmHWG+zWYWQONyeB6yIP7Z7b21uSMKTgKWTgdDHDFiVlVX9kMfSL16+CWA3AP5tS2oscsvn3hRD/Z+C/A/xPUkp/TwjxPwf+VeDfnN6uU0o/FEL814H/EfDP/9LvkH5BEShzRxHSh5HS46LqI8WYlB/VjWniyAhljjP34xgJcoci5OR9lRfWyfk5T5885T/cWkbn8Dr7hRhTUFUVIURefvKCru/Z7XfsdlNlncSBgUqKke3DGm0sYRzoxuyBst5+ST86hFAoJZjVFbcPa4J3jH2XxzgqH2Bu7Ekx8u7hFlyHUZLf/90HFvOas7MzlueXnF8+ww0D24cND+s1zdChqor65ILF6TmmnOGHNqelx8TYtwxNy6wsKEubSeTToCgEpoinKWsx5QPp0HnEMKWIT4nlxhY8efqMosiL8E9+9geMuy211vlgFZm46UQezx66KilVNgBN8U9tFDkcOVtjHIO4H41sj8X1VGAcSasJ0mRamrlFkRRhcCOVLZnN5wzDQAgBH6YwZp8j4Q9qTWPMR+PAQ9zNr6AK/Ce+JoQ4eK1lrkfyntJqbu+2iOSn0UXKh77KBZZUU5GRPSzxPiBi7q5FSpRKoEJBt9twtoBx3yPVkDt8FdluO0RKFApkDGhlicnTDwMxQWEgujF7fQnJQXoY3AGpSh8AgJiIyef0gwTBObQu8UOPSz3RZwf1rutQSSF9JKaRKPI4rW06tDQM/UC37xhito/IFY6GyVYiT3cy6uyGEe8cQkSC8BgTGdqOse2hKuge9scryRMAAGrZSURBVBQi4JWgiYlm16OSJvTZSyfGgE+KZu/RskIlmSN5osyE+pAjtcYwUmoNqULGGp16YnTTyMkjCGiVkDKSosNohdAtpRGURcHnaUmK8KMfvGCmDDEE2nHP3TbycLuhLBfcvm1Yrwe0rtGFoSwT81rz2dNnGJPz46yOxL6HwSHiCBIq5ZmzoSdSa4MWIGNCtXOUFWAiSZg8Dp0FBi9IqiI4ifzsEyqrWS0q6ihwCTa7gdms+E6guEJ8LGw5rFvnc3gxUiLReAdtM3B7u8XFbLXjETRtyz/43T/ihz/8HpfnC56cZERFqcytSVOzxiNaiZBZ8a1UNokdk8b7xHbfsNt3OJfQpsBUM5bLGUpl9eq7d5lTW9czrAqcLM9IUZFSpnyIJDHaEvEopbM6UymizOHARisO5/bh9zVTriETIieVpLb10WyzaRokWV2ttUZMfGIpBMPoCMKjJSip+fqrb/j0B7/BH/7BH/Hk6opPn18Rh2aibOTXst03uLbD1DXtZkdRFtlzar/nEAentUEkn/P/pr07WwkF/NAjTEWICVuUqKrGjRFZZCJ8SoKAJ6YcvfVYFX60TFK5CQ/xkOwhj1ysEBOL5TLzHNstZTUZohrFfFaT1pEkcpGcYrYDGkOmQZASXRd46DqgmQjrud4IU7agUoqHTUvn8+efFNkUGFpal47ikT/r+gsLq5Tv6qFdMdOfBPyzwH9j+vi/BfwPyYfI35reB/i3gf+pEEKkX3ZaHV7Ex7ypI5KSjrBgOh7UHyT6Aib39Y+Vg3mupye5KCTniTJLN4UAawvKquLp06fMFssp6iRb2M8vZyxmc4zVbLc7drstXdcy+OxAi8hoAVOBctTkxABBsKgs290WHxJXT54ileb27o6hayF6SqMnaDOjLPMpkDmOAR8DVmUTOgEE79k+bNmsN4Qvv2axWHB+dsbF2TkX52fc7xt6H1iuTlksFiShUMlQVgXfvPmKd29eI2JgMaupqmI6+HJ3rqQhqvxaZeJmygTziUxwgNWTFMzmC1588glVWdLfdozjwNA1aAXiGP8wvR5iQgpFVuzoyaIhHWTHKX18DyfeQDZrywXSgadwzAdMB1+zR4WZyzwXqSLgGceIMQWRxOhGhJIYo1FRI1Mufg+RCn3ffwSxH7gRPHqOfp1rwhrDy+cvQRcUDw6f5nz2fMG4fY+hph8DMXicSAij8cHnAoOMpHoR8dJNXXx+RmdGo0Lu5pdzTS0TKRlEivig2O07lBDoFInDyJACI55dk7tKKz2ubxn7OJHjE0Z8sBYJme1L33WTTxIYEYnO44YBLStEUFlp6DPaOA4jtahQPhHGAZciISmGtseqkjDGKboIYGqoyGNwkabi82AoOzVPcmp4rNQYIQmjY24ttQZNyr4+MeFHR1VZaiOxIpssgmRsMuF7IUtslEQKUhqm9e0xgEwBjcfiSdod8w7LYs58ueKzTz+jKJdcXJzy6YnAa4vwA90IKd1wf3vN+1XN3JbZVsZAtVzy4kXgN156ruaGh01CSoNLI8HU2FIzugekjEgVkcxxo6OYCO4pSlTSkGpSGJFk93nXDdS2IiSVC0g3UEkgjfkuxkh0A+enc3rXsZxFhMxj4cXcsmnlEeH5tV6/cE485tTGlPAuEYLj+v0d33zzir4fQVqkragKg60W+OD4+ddvePtO8r2nCz799CVlcRgZ5qizwygR8v7nXCL4wNg1XO/W7HYNdVkzmy8oSsPoAsE5glsjhGAYBnRKLEtL9NmxOwWyajtlo0o/jpMHVnZMt0rhY+ToYTwh9tZopqTHvOelSQOa8gRm9Hl6MvQ9MeaolnEy4URkZe8BsQLJMDpu7+4QyvDNN19TlJmrWRcKg6e0ihQjRVUzdD1uHAkhr2k3jIzOsV4/cHp+Ttv1LGqNi4FxHEhIhnHMvnkpTwCKsuD0/Jzy3Q1IRSDClNVLyoh3RpqzKCOmKb5sKgilzmPGOFk7DKNDMJl4xyxIaPcb0v4BQwkarK04WS4Qm4wO+pC9uWIIDCFTZ6TInop5ciJohsgwNmiVR57OOwQCHyRJVwghaX3L+4cWUiQIg3+klP/F61fiWIksufkHwA+B/xnwc+AhpeSnT3kFvJjefwF8Oz0IXgixIY9Gbn/ha/5t4G8DLGfVRzyqwwOkjf5TndLjs+jQscRH7x/JzZMZYl4smVyaYkJowWw24+zsLCvx3Mg3X/yUfbMnzn6cZ8htw3r0PDys+ebbr7DW8PTZFSfnV8SUMrnNu6y6mxZj/kaB/XbPfr8HNyBCYl4ouqGnVNDsd5mAp2XufkPOckJKytLgkmNezdhvBvpmoC4rpDEkHyZyoWO/uafZ3PP+1TdURYmaL6kWS1J0tLsN2hicH5BK5K6HxLwsKY0AkUduYiKsSwQuTEXQ8fXN/LPgfR6facXJ6SkvP/mMthv5nd/9PYbzz6Z/nyaTwnj8ekKIyWhtGiVC5lDJD4/aERUSPOJPZZQwRof/BQK7lJIos/Iyj+w43uPccQqs1XTO0fcjxsoj4TPEqUCDI0/gsCFb+wEqN8YwjuP0u/zF3fk/8TVRF+z3DV6MvH+/Yegiwa8IqWA2XzJTBq0lSpNNQw9CgxDxwTF0e9p9R/SRccjBpRfLkjSOKJGoyoARieDJG6IPjC5SVwWFEhiyEgll6Ic9Wlu0jEQ/QsxwvNIAHywrxIRyEg8GAPmZ0CoXU1oAMSJj3tgGlyOEyspiM3RKlAqiIvoJqQwRQc6TFMQJqmTalNOH73VASFNCTs9OaSxMQdTzyqJp0MIT5BQFEz3L2lDqhE2BqAPJJ4wYqOvEwrRczANBQdMkxuipy5KLJ09YzGpkilR1gRtG+jYHIQ/9iB8j+03H7c0DYz+yCDVS1cx0olaGHz25oHh2xpNThVV5jXrRsXeOsVZYeYKMDSo5/CAJyaFLQQxZ2aRVzmVsxgG0gHAQDyTAsI+WrR+pgd51GB1o1Zaffb3FU3G60Dw/1xP9wuADjG5A1wX393fgEkpHQnQYZTldzTFa8V25Dv5oWmerHKkEzo3crfd8+80tD+uOGDVFMWN1ds5itcIYy2a74/5+jfOeLhi+XlvE3HO+Cizmeho5G7wwSG0QaqAdAptXDSFqIgpVFFQzhXAtsb3DGEttCrwwtD7zNdPE0TJSErxnHAOjz+vTWoU0GYk2WjMOI23bH7mdpbUEMqqipURPRUSS4JH0LhAD+JhH6tvNLqsYBUhpEKrCOQduKjykzqPQmBtNbUq6cSSEjn7oWSyXvLeSxcxSFQZPiUyRwW/oo2L0CeP2dNtNRqSF5OZ+TRvhZFmjixX9foeKDnwguEA/OIIAIQMuOlyAza4h+iFHzfjM581IYT7P+xRxY6Z5pEltLASoKVYnpYlQP003XEzMNZzUhrG9I3bXNKpkvlxBMMyLOUoqgk84JtI/ec9jOnPGYSQezgMp6WM2ZM40hon3KAtkiCiZv44LeqKMfODk/VnXr1RYpZzA+88IIU6A/yPwl/6/WxqQUvo7wN8BeHZxkh4r//L7WdoplQLBkVej5WSERuZl5dDcDyZuceJPqRhwTBMqqRBSYQrDyckJT588RSrJzc0NdzfvIQykJI5I1B/88Z8wdCPrzQO7ZsvTJ1d4H2i7QFnVVGVN2+yQahqBkXlE0WcTThkDtRZgFPfvXhNS5l4YW7GaL9g2HcPQIxCcrpZcnGfuliYgw0Df7imtOSohDj5Eiolj4iP4wBA7pCpYnmhSTNRVhbaGXRN5eNjh3UhVGHCRsiqAPKKTIr9GPmWPIvhgZQAQkiAmidKGZ0+fcXF5yfX1NV9/8w1d15DO8z10MSIntECqrFj0MRz5U/lrZcRByQ9u+Idx7aFIOvrxJLKqa7rnIeSf1wc/xeZMBNIwKTxDnF5bqK0FNPt2pHOe0XcYnTvRwloIWaWTwz5zMegejQaFEJRlRSIhf4Xu/J/0mvj06Wn65MmKAcPgIucnUNqIVgnvB4axx3mffanucxEklQWR+WhaCObFAlUa1EphjOKzZzVdKLEzy7NLTan6zDNw0IXAXMPFomBRaAoCScAQNcl56koTXDb6iyikUMwKgzWTCS0BqQTReRQOrfK91ggMGhmgFB4VBVp6SAOahImauTEUKQKaIBQiRWyIqGqgxLNSllQJCpvdxm1pp9QFkLogOI9VhrEvSD7gfY+SM15clZwuDJ+/vOKHn6yY6wKpwCtL0QlGYfnk6VNenM0xsc9FKQanW8pSYItIM3Z00XA2Zgq0qgravuO+3TCOHiENMmbLqspYCl1zMvM8O1VU6pJ6tuB7lwppPJoREfdcSYmI2XQzeY0RCRE7lDeINOD1jlFucCowBEHSI31nKKoaHz19gn7oWW+3DP0ALhvkeh+Yzw27YPjmi2uK2SeczTzaVGwbxR99sebV26/54aeX1H/9M5ZzwZgK7trEehR4F3l721LqigpBTGNGCIU++vx9F64Dcf2g5I0p0nUDt7f3tG2fs+yEIiDYNDuwmu7+lqZt0UqjC4W2BlTJvgtYI6lqS0qRXdPR7DYERxbvhIgiN3xERxIeUxrqxQJrNCFB7yLtOKJMcVSTQUbXy6qmKERukiZyuDaWGAI+ZE6RNuaohIsxUpYlQmuUFMgUCG7IozwUPuRi45AckXmxMJ/VCGXYNQOH/FXv3ZH2cFSUM9nnTLSH/W6LlJHbRcF8NkOLi9ysCMEYE8472nZHGPpciCTB69ev8K/f8r3PnrOaKeI4oqMnDtkEFMCLhPSaft9wt+nxMQdjJJkpBDHkJuxQTCmdVYeHJj2lbPtDCB9xmR4DK4vlimqxYFHUxEIw9nt6D8oY2qHL3C2ZeWXBB8Qx9gkgqxgzVy+SfEDb8tjIZ5L+OBVx2Z3+SEs6CJx+yXj8P5EqMKX0IIT494D/LHAihNBTh/4SeD192mvgE+CVEEIDKzJh95dcH9CqwyFnpOZowzA5YycfUGrqgCdybM4zyRycJPIYZBoiHonqShvmyxVXlxfUVcV6vWa9XtO2LVJkI8DEVMDFxJvre+5vrpFG044D/at3PL0I7PcjV0+umAJZUEozQVa5mJMSozTJaKyaOnAE6WAzoHN5pJWlLLKHltUSJaCaL+hSxLUDViuszkSZmGImJaeUteXhUAsJhDJcPXmGseWUXD5ncCNFWVNVM8a+R8vE6cUpP/j8+3Rdx2azYb/fZ/6R96hHDLXDaCwhmc8XvPzkEwpr+fLLr7LLvBunjKd8v8aQsDJNRdU0tntEFD8Qwg+vqxAfvpdz/vhwHhCzrPaR2XuL7BPkPhoJ5mflIKcOYkIpQ6IUApRAVgbpFSFkZU4ICecCSqgcmmssEigmREtPBHajdX77Z6Ckv541kahkj0iRQgdOlgVa9nzv0uCTYYz59y3qrCAaBg/K0vUuew+1JX3n6XuHG3oSjqul4v1mR2EUC1NjCkk5N/ho8DvP91XBp88uWVlFKUZiGnBxSdsvUTOLDDtOTtcYbzHa8v0XV+iiJ0jP4EZKY0mjJ45zlCpywkBMvPjkAqUTV+dPefHEYFUDEfpR8aCWfHI559lSIWJNHxMxFHjxknLl0b4j+BNG7UhOZvsIY7LZI4kxRDSgkkRHjVICtCf2JfOiJMYVhJ4wOEY/EiV4mditO9qbG8JsRiMCmslSRAq2Dy1hbhjGwDjk1a6BmDyVLjl/ckFhLEpErE3IGBibhlprkg9IaVlUnhAEUgS0CBMfTKJQiJDw2HzQRHKIbJS0TtF6TTPAtg3sesF27+jcnvV+i7KW3jvcxD81whL9RLhWBmMqrBBse4+XBetmJAXHxie+fHtNEJ7zZ3OCdHz75j3LRcn7zcBPX21oxoDScH27wUdBVUjmteX8bMG+uWUY/S9/XP8pXOKAiPMxDxMhcC6w3TXYakaiAJEFE6IseP7Zp1xfX2PrEiVzkxGTJATL+5s7hk7RtT1Nu58a+MBiVmW02zlicMzqGWVZYasiKwadZ9uNDCGhbEWasviSkLiQnzUfE1IbEJL5ajUp9aacOaUY+h4/9JO5Z6YjSJ194aIEIRIKSWFrlA70Y8BYjZbZDyullM2tQ0Bpi7YFoh2PHo/WWpxz2SIBcXR8h6yu64Yxc3uHDXfXr+D0gpm1yCRzoR5HwDN2eUJzd7fh7mHH+9s1zeBYLUqG4QpDDlJ2fT8prCVCZwWvixEXUo7BkYrks9oXOWEGSmfkbhJ06clXEiYu7FREH+qCo9WOFLTjwPX9ltnTE0y5wpiK1ntSH3n17pYQv49QOR0hxbzXZ/ViyDzi6Rw7CBcec7sec21jCscJixSTsfTjRJc/4/pVVIGXgJsOkAr4L5PJt/8e8F8jq6D+JeDfmf7Jvzv9//9z+vv/2y/lV02Lw01Sy8cxJocKW0y8CrSYRLKH/KYsNpdaI6U6IleCdESSZrOay8srlqsVwzDy9ddf0bVdHt+llKFFJFJMTrVSsJzVXL+DorZcnp5hlGaxXPH0/JS23RNjoChtnsunKZNwUjBorSEVR1VgRMKEmGlboUUmTBtjiDHRNA3PXmaFWiwslV7i2wV9szuOw5RSqAyA4X32wUIKbFUgpWC1WrJcLnHeTZ1lopusF4aupTeS/W7H2fk5FxcXeO/ZbrdsNhu6tqFtW8IwTFYKcH5xzrNnT+n7nj/6k5/QNu2xo3r8MCmlc6ir/PAwxuiOJqJMKrtcYH1AxGLMnYOQmpgyApkmc1IRcgA2U9d1sMbII8X4kVnoQdkokBidVaVaRaxy+KTpvWKMWW2TQia5+nBQPSYQEufHrHJLAmXK4+/y614TpMTYjiRt8L1HlBVd1+Gdw4fsCSalxApD8hErEtZErAgYbRlMwlWCGDRCFBgNv/HJJ5TvdsyrkpVNpOhwMdCFkXa/w/ctYz+jdWrKGPP0rmP9fs3Tz57x7s2auak5PTuh6/dsH94x+jlBWEIyOQvSeaIbSQyIBKW1uCdLhn7HMIxsNg4jOkiC3d5z8+2WYjSotqK0JUkoutDx+nrNmSrw+3tuNyPBOipt0MagjEbqrD5VwmOkRMREYXWOd5EKM5uxPNGUxqKLmourgjqNWWChJC9OJH/t+59SaIURITtOk3BJMcQli5nFDzuKsMQnS0yCKCO61rjYUxWeFBLeJ5RMhNgSRjK6k2r60eGjIgTH4EQe6wFeSBoHmz4gCFRCYImMceCh9fz8mzvunOeLb25YbzzdAGhPUcxQkvw1SYw+gsrS+sMYWGvFOEJZzLjZ3jF7H9mWnlIpRFEy7gau7+8pXzzl3a7lbt/Re839eocqZ3igCZ6IYLsbud7B201LHD0x/IWCjn8K18cipyPCHiLr9f+7vT+LkS3L0vSwb+3hnGPm83DvjRtzZGVWlZLFBrtJNJvQAIGAAJIQxBcRLEqQ+ECAgvQigQ9CNwRNAF+oB80NtlpoQS1CUrFFSSBRQIOiyBaEfukmS0VW15CRU0TGdEefbDrTHvSw9jH3GxkZGaWKiHurZCvhGebmft222Tl777X/9a//X5CyktddPcPXM47uzWBWs396j8PTe8QwkKI2Da1WG25uepyvGIfAzU1bdJlGvIXj/UOEESuGk5MDRZHKGIZxZN0FxmxJ1hGiJn3W6Hrnq1rRFhG6vscYj3MVVd1wc7OAnPGVxzkt509rq7MWsZZ+HKlnFd5bJAxYMs5EvEnEPDJ2PckXoeZCm7lerBCzYRjCNiGoquo2AXDqlMDEHYsKVtR1zZ/7h77DrJ7pGrlZYo3HW09KA9bByb1TxiHw/GLBk+eXLNuRYez1GsDW1mp63RgipoiFLjct3ZhZ9wHrazwGE5OWrbMwjkp6v+uwMclpmEmW5POJThGwXnc9/+CHPwa+w+l+w9AOrFdLxrTi0fMrzGHhQ8dRKzQFjNGS+a32nbWabMYwbpHQqZFJEVGLWEMYR0Ukg6KS6UuW8K+CWD0E/mbhlBjgb+Wcf1tE/hD4LRH5V4HfBf5G+f2/AfwbIvJj4BL4zV86XfKtEvqUVIFu2KncCBNZPJXnKeUszWmsusBnJacjhrrynJ+dce/ePWKMPP7sMxaLBblk7SFk3cCTJj45C5Oq69HejPv3z3nve99DvOHxZ4+onCWGkdVySdNUkFQ7SLuzptZdHVvlvRKK05SdqxaHdRZfzzk8PFTPM6tEx3EYmM3mrEPg8skjusWCysr2s9CuOIVODVp2y0WaAkmcnZ/QNF474nzFet2yWtyQohL9wtDz05/8iJ999BGz2Yzj42OOj4957733EBGWqxUXz5+zXK04Ojri3tkJjx8/4vHjR8W37xaenTL38kSZIHZ7TaxxygsrtfJUTmhbOY2sopWmqINjHDEGMrfWMloBjVu7ETX1zC9AwhOELsZhUsJmaAp8O/OeLJY+CpshMsRcNGkmKk6CrAuTNkCm4mwujOQXhFBf1pwYx8Tj50uCjXz69BqSZblecl06Ul1llAe1UaRFPSFHIGNzZGzX5AAWwdnMvndUrJj5NadHnuNGE1MxHsTx4NAzDAeqNSYG7/egstwsDWM6wVQbvIHTgwbTJN567QGzmcPkjpQjY1Lia2ojYy/kpN1LlTW8ebqkHntef3DK8Z7FFbX9cCK88/oeThKOljiO2r5d7SHjGUf3Hf1N4IBIqjtqo7ZE1cyVOSCk4teWQ2LYDIXbB/Q9bxxFKtuyMsJhHfAyiW1GjMtF+b9BYo+1k71OhrDG5TkprrBxQ06RGDNBovqu+YQkq5zAgjqFrAQFm0FCy7CBHBvGvqNfayIwRqENmb/7O+/zk8c3vH7/mLdODvnV994kCDxbtvzs8QUL63m+DrTBg2vo44quHbBDYtO26JJiyQ7C0G8NcmUQwiikB0J2matloKnmtN01Ymccn5xxver5+NFzvvPuO2zalna94WB+wGfPLtgE9ejMKvRDTLBYrnF3xHVfZuTM1inhRekd6PuAiLpQVLM5zf4h9x484OSNt3nvV77D+ckx7WrB488+4ZOPfsam7TH0OBOJw0ACDuYzZnsnanECdKu4RcknE/qcEpsuEG1FiHo4rL2uYeOgiL51jjx1GIsQY6bt1hhRLTQtF+r78d5tBTlTadAZwghdYuZnzGc1x/szVosFNzcLuiFQFYuhnGAYBsR5Nn2vf1+Jj3Sd6k/VdU3TNOqnmCOToMyQBtXMiokHp2ccHh4Sx0C77nDWcnRwiPVn9GHAiqEbIkMS+mQxdUUlcHh4qIkIqs0YvWfsOurKE4Dlas3F5YpPn6+5XqzpxpFY+IDqBCD4Rr3+Ug7bismURE3C1FMFZEp6tg1ucWQzJn762TOeNjVDu2azXLB3cIT42VaGyVtLlEyIUfUfscQYtmXSMKbtnjYlb03T3AqWGwNi8bXTg7xMOcqfALHKOf8e8Oe/4PmfAn/xC57vgH/ul/3du6EdYbcf2hYZKUkV6FvIhQMVprprnvzNEmEY9bRRV5yenfH6a68za2qePX3C86ePFUHKWt8WMjGMpaSWyZiJaqQIyzBwvFfjGOk2PSb0tMuebnlFipGmVsNGklppZCgaWeohltCOJe8cWbR9dkLeQNtmN13PEDOr1YqqqqiraguDqm1B2tZ7pyRFys/IkIylnjWc3ztnHAfabsPB0SH9MPDRzz5kvVpSew9jV5ST1fdvsViwWCx4/PgxddNwcqIk/u9+93ul+yJx8ewp7abHuwayVeKjJGC8PTEWDkGOuvhs9V6SkMrYxVhsQfKArf4IUkqGOZMJpFzsiYzBZNFSSYr6nu+IglJg3GEYigip2Z6UhEztHLWbkYCYMrMs7NeJbhgZspYLVNJBuxPHmAhMukvltPVLgKRyj3/jc6IPkUerkefLZ7z/8QXLIbNY3dAOEW/BOVU9r2YZV0WaSlXznRFMZxjXghPLzKtwnuDpNj2qT9XTDyty0muTwkjftdRVTWUqKt/gG89gIm23YN4krO25f2+uorhGyKbDmRozWsR4rER8cqSE2quI8iRnruGw3qefrZjXI5WJ1IUzOIaIt5YcDTEIOYJNBu9GDiqPj2us9NR5VI0y9sgJXGhUUwqwda0oVczEURUgYo64MIPWgK1J68y4GTB+JGanatroKdbmsBXLzdbQJSH0gc2qpe8HNn2iT4khZvo0UluPlwoxFc55RlEF6kXvyVEX7aHd0Oce3CFXVx33zt6AnBlT5mrdkmZ7VMcWs3/E9Zj4gw8/IciGZahpM1TrDd56rtqOkCK2iorSCWUTT4xtx8YGrLPcNidlCImby2sMiWdPFsxdxbyG9WJFzBtmvuHp08f8zs0PeOv8lNo6YjdyUFequZSEeTVjHVuC0TVncnB4+XE7N++WbJx1NM0ct1HRTgVDLOJr5genHBzfp5o1XFxcs1wPbNqBEBLODCAjYiIPzs/1oOANMgyEfsCJauTFBKqzbemGgWQ82VTUdYOgXpabzUBMkEbtzDPWlQRAaSJ9X2gUZcz9MFLXnpi0caZq6m1i4a1BUmJ5dUN9NOPowTH3j+4RHp5weX3D05uEjNrNNpvPiVkw1muJsu+31Z/JzD7GCN6pAHVOZQ/Va7q3d8DxwRFWVBj7eG++bbCqZjPq2HBzs6QPCdvs0UYhYHApcXh0pAKl/YBFO5n9hDpZQ0raKXhyes5qsNxsepUgybpn56xm8772WwX4z1/bjKLzE/I25QZNUyNG6RzrMTHEgTxmMA3LNhCLkr4RYdbUJIRhDMRRiej6d/Qwrx6xmdneDOfcz6Gik+OK8t9mW4meLytsvCLK61Jqv2y7uKxoiYisQmBs653Fzy6ErShhzBGxhqPjYx48fMjZ+TnrxZL3f/AB3WaNs4IyljLEAKLNNEp2z9zupRlypvKWMQiXTx4Rc8JmXbiNOJx3ik/FicmVEZNLWVAVqCUnhFRKhGyTg5wiVuDo8AAxljj2NI2W8+rac3p6Sh42PPushzDcKVlRUAlBon5iuUDSGTg5PuFmtWB5s2CxWjJ0HbWzXPcdTV0XA2RVIDfGapIVIu2mZbP+hL7vIWe6vgfgYP+Qk18/o+s6bq5vuLi8ZLNZ0fdtMb4upwg0kbRWT7SqV6XvufQClI5MUZ2TrQio3vTaTZlQDCvfIlNZkS/JyiXJ5dRgxJBN3t4rU4JW1zWGcg0QYnYYYjldJOrG0kYheSWjTuXJJEY5AOO4RUwjtwrvLzPafuB33/+Q7Pe4bgNysWAzjCzXPUKvzQw2YZzBuZrKZyrjcMZQeYOVFm89tQnMK2H/uOHqOhNyRQ4VY3Z4a7nejGz6oEajtcN47ZAZx8yiG7i+annnrbd59vQDoCsEU8jisNREO5INjFkXsySjNmsW7mG0I9kHoosEY0nOMqaRTKYHxAwkEmMcMbUmW+06s24baCxk2doSDXap1jEuEMu9UZnin2eElrWK0AISAzeDYVZHOiP0wcEIMQZCNjx6tuB6sWRe17x+/5CqyoyDdv48errg4GDOarPm06c3dLkmZhhjwC1HXFXhjUOsYdm1DP3IarlmHBOCIxnLWWuY7815/HhBteepTUsKkZAMfRJu2o6qbagP5mzySEQYEMQ72s0aX1uIQQm+sYdcY6paO2xzworQDwMyFjmSpIeTHOHZk+c084r1MvLo02fMZxtMrnVjQQVd2zZweXmtTR9JUeVZXXG17ths1oxFdkWs4LbiZC85RHCuKuV6s53vxgrNrMa6DhDCONB3G64vn4H1LC+fUleOdnXNzdUF69U1JvfMvGXdF0cao6iMGSNOtEU/ZTVjVp08VSlr+4StpxJbIVljcFVFlRLDMBZ+ZyplpKyq3XszQojEGLZ6VCGE0qwjpKQG0ynptc1G1/eLmzWrP/ghJwcN7779Bg/v3cPVI/ZR6fqTRD/GMjYV8bS+2qLYMSa6MWKI2KyEdm160UN7zMKjxwuOjw/xdYVxXu1grCjvrw8sNomf/uwxxs05Pjnl8voG7zzeCBUZl7OqppeDsKs81+uWddtzdbPio8efcLMZ1ew7RV3bRZsQjDXEHMhWKz8xBKy4273KZoybrMSlWDtBDGpDUzulbzinVRyqSnXHShUpplwsvDLeV0BUrraxtOsNcQw0Vc26a1mEDc2som4qYk4qfxG0KYeSl6QcmTWNekf+CQVCv4UoHmOSSESM0zLfJMA5WblkSpcf6AactGZc1TUPHjzgwcPXGFPigw8/oLu+gpS2NjIpJf39FMkilPSoSDrlrQdTRs07G+ptW29KiWwzBqcJURJCKpksCTeha6LvI1vBxUA2xbctq3WK5Iikgb1ZzdALrpmxf3SI94IlUjnL4fEZlxcXDOOwTSY1kdFJPkGQzlhmdcOsnuNdxcFsn5QCM+c4bCou+5ZZIWKnAgC7rH9FS8wO72sePDjj+PiIZ8+e8vjxY0IIWAeHBwecn9/n9N45Dx4+YBgGrm9uuLm6wlqFRGtfQxq3Gfw0WSVryU8RxkgyFA5VOQkYUzr7FIUELcWGkAsh02FFk2FXEEwz1dgLEpjKtcfYLVE+5pLspfI3nU7ObDJVGhFjJ3CCpIAjjTfkouY8wb7uFZDsSSlzsYx41zMul6zGyGCEqO1KuiCNGZstIbV0Q7tFOK01DKEHLAZDTeKiHamM2sls4n32K4OlYzNkfvzJM9r0mF//7lu8e6+hoqMdoRsbomm46XqeLTquu7ydf75CE6JQ5ma5HOv1gMuWmfXEMJLrxM1Vy81Vy3x+SJNbUl6TTMXTxchnz1t8jrx1/4Cj/Yp+jOTkGcaWJp6T00iXl/T9DJIQJZPWia5Ilew1mdBrsr9ejIQx6j0Rr+jiEfNqyXLVcnj0LmO+QoisBsd/9P4jPrsaee3+IU/Xa3797VNySGzGyA+f3LC/SaSc+GyZGGXUsrRAJRm6Ti2ojKEbBkIItGPQRVgCLiTWmzl7swH6K64Xc+ZNogJSiHr/pchy3XI084y51Tx0CLgcuN4Eju0hMUX6sSeKIuJ9NygdwFidIwgp3Jax0zgySOZ6A8fWE+hYdpFEg0T1K4xoKc17YdMnFpsVQ4wMMRaPSbstdUgW5TwyKSm93JDSMHSXX6XPozIGMqnuJ+LQsr5JxKFjfdFgTCKEHlJAcsBLJk28UIEkFu88pERg1I7yKanKSk+IWcU1ry8uOTg+ptkz+LpRkrrNeG+LUKnFRiVej2MkBl37qikhm96NUcpE3/ekmKl9RQ6ZIQaMt+oKIkbXquXA6gcfMK8rmr0D1cIKahPmXCYn8JVqVQ1DKCbTU8lRyegph20Z0jiL8xXPL6/54JOnvFft8cEnP2Q2r3njjdfZ398jp8zTixVPny358QePWPcDoZTgYkrEIZCHEZ8zUQzZ6vwUaxCr3cxjyMzm+zxfPMMa7V5PqZTYJJKLNZzKTeh+rILO2vUrZHxlyntRsWEjk4SLYWh7TazmtZLTU8B6r6K+xpSDlpQSctDX8Y7GWjbLJUPbMqxWnJydE6yh7Vc0taHb9CQE52pEFGFTU+tMCuHWXeQXxKuRWE3dfCmp43imtKyagnxoyYeY1Z8oq4O1dY7ze/d4+Ma7eGd49vQJz549JqURJ65s+C9qIlHKalscT37exmQil0/1/G29F0tObMUQbyUEbmvAuSQUEyoi1mxLgDlrB6K1KhLnCmz6/PlznCi3ZblcbhEkuNVsURJ3+SwyhJSYzRqauiLHyNC19EPHxcVzVsulyjnIbetoLmNWqw/D3t4+D994A2MyH374ITc3N1sINIxw8fyGi+c3VPUHHBzsc3J8yunpPV578ID/15M9QgicvvmQ9dWNWvIUtCkBRE0KI4mYA6moOqjnU7rN/kunxxaB4hZ+nUqgd4VEFc0qyaXWXV44NZiCallLOWEq/y6mhPVFWUkESYIkg0ONpV8wTc0Z+3N3xLcfKUObDJtuoKeIOCZhGIqoaSwWFyFtXdZFJlSyGDAXI+qRyM3FktYZhqHjJgqVRNLYUfk5izaz2HTYT6/47FHPYWOo631WXctnjy955+03+Pjjp1yWnpCMCu75yhOTJsW5XMt+GDA5UlnIMVJ5y3Mqnj16Rpc96bV9XFZvzo8er/k7/+ATTg/nXPbwK28/wMoexs754aOfYheGHFb8+JMntNQQNHn282bbFZhTUP9PDDlmNcIFKgMbO3K0X/PsauD4YuDEqXH69aZldnTEvozk2nFwdsrVZkByRRe8dnmJpa4bQnxOn1S4V4C2HbaNL8YWGkBOjGMsyT8MIdEP2pEaYmaz6amtpw2BOARGHHmItP2KpYBNHTlGYvLQR4bFkmQdTZmzVE4NtjN4W6tnIag/450yXS7WKJHM9Xoghsyqa7lcthDTlt+IFKFkijCwmbqqzVZo9W6YL1gjX1bo561Uiy0vhowVVfzOOSI5Ygh4HI6AyQMmZ7xaYStCXuR8JKM6YDljRRDndD8KqpHVdYEQFIVCjJoAG2F5fQ3oZ4MojcE2c3WHwIDNmmj7itlMdZRCVIcC55wmcegcsdaSJ88+63BSPCfHHmyxZRJtwlmvE7J4wrA/krNWBKqqwhYXiq7X8t8YAnXtinq7duFNzhXeVYryG0PtPZ8+u+STp8+5ubnmvffe5dn1muPjQ0IIPHr0jMdPL7m+aVl3PdnCMEaMwKYfCFlNkZ11CIYQR5KuvPRdz2q14vHTGwS/7VbMZJwIMaltlil7gDUGnDZ3xTEUG5pI2LRYa9ibNTgL1qrkwXKteYIxls2mxXk94M+a2fZ+mfYS7/XzTkbveUkZVwm2SqQhMA43uKridN/T1MLjm2vWm56m2Wfv6AxjHDkpEpZisWH7kwqEftORc2ZMCiOYrKRaU4ihoKf3LJkUEykkIpn9wyPefOctzs7vc3214ic/+SHdZkHtVJVAeFG9e5tU3eFwbXlcnyuW3uV63R1jHMvClIFSujKFB3Y3ASNnQjkJkXVBvP15ZtY0xARCop7POT8/5+TkBMlwfXVF3/c05lZhOOepWHabJVtjmM/32JvNGIeOvtsUlCuyWi0ZhoGqqrbjDzmrMa213L9/j/v377NYLHn0+DNWq9ULOlZk1YeJKdD3PTmPXF9f8cEHHzLf32M4fwPnPb/2G/8pZIDr62suLy9ZLpdsNhtyHpXMWSZyzhmSkLJ2YFImlS5It4nVdK2mBPWu/U2ceGbWbrl1lJPIXZI/ktE7pHCwtI7KrCQniCdKMUrNatwtziOUUqZ1SLG4eJkRY+LqZqFILkJXEiom2mG5VGOAXCo11mr5vPjOkkVlPrzJDMslM+bkMbHol1jFLuldoEVoh5HNB0+ZV0Ilica3tCGzWLdcbAJd27FKt/exdZEY11hXkJtOyzAxRirvVbQzJSpn6MNTFlcLcvR88vgploBzNUPyRJmzGYXL5cjmjz6gdkIKnk8fX2D3WvZnKorYETDlYMA4lKYINY+ekMwUE2M/kqwl5MDljTZD3CwX/OSjp+zlHskdUYSrER5fLqg3jtCumElmHKBPnsc3Sh+YzRuurpd0hWxvJL9wWHFZtt1DRszWCiMlaLvAYrlhDImbxZo8VsShU6FfHLFP9EPPUoTGytYSymKpjUVC1DKdQHaWEA1jVPrD0A0Mw8g4HZby1JiRC71BEBm2MKIWWTR5Kr+o6DqTDyjl9/TnKYVbS5cvI5J86yEg5fBKLqbXRfcwJXKKQMAYdVqweUSyVcHibMg5lg6xuC3jSc5ITJgEt+mj3CZt5fONMdKNgdANnB4d0Mz3WK43XDx9gvMV84MDZvM5znlWG9VQstZjrWNMgf39fWxBajKAszreiGprVZahH5CUGMYRMYm6yAOsOlU/n8yHZ6jNjojK3PR9q2LJotesrmtFyQrtIhXUBrQDbiLLg2iZLI1Fn87x/k8+QkT5WdZZQsxYp926vmlIChlxNHM4r1IS6hcIRixeKrp+g7UJ67yW7sQy9CPeOqqqKuVRTTR9pWU3TcwAm6msJRqAjBOPtzV7ezNOTw6ovHae911HN/Tk7IvupZYHQakKde21dHhnv48xYrxlNmuorHA496yuKvLY851332XeVGzaFVVTUZuBDz78hKsnn9CuN5zeUyu3VObplxHX4VVJrIRC+C4oRqacSGX7MyUVW6r5nDcePOC1hw8Zw8iPfvA+l8+fk3NQUcKprDhtvXKbjAi8sFiEELDe/dxHdFekbDvGPCVPWkeWsolglCC3VYEvnXAZkFySuRCRnMkSiWHD/OQeTdNQV456Pqcp3nveOo6Pjzk9PWV1+WQ7Fl3IZTt+BUjh6uqS6+tLnLVsNitVH9+sCePwAuoWY0SMpZrNeeuN1znYm/P40SOur6/oi47KlMzohIvFE0lvoMKpJKfIarmg2++Qvud3//7f597pPc7Pz/ne974DGRbLBZdXl1xfXdGuNwxFL2tMKj8h1kHORa9KMKTt6//8Z327YUxiopP9jHYLWoQ7nZPl97RDVMjZoG70ThN1o40KWfQaeu+p53sc7O9zeHTIyckpJ2f3+Q9/8OHXcVv/icIIHDU1MepGLYiWOGJkUmorDL+SdEMOyqXLWReXJPqZB8kMQ+ZalNT6tHiKacFUmOo8PbASTUCdrIhGGHPiZrNRTmEyLx46BMTebu7TGaXvK6zxmuANkRVLhj7yycUSYVSJDlrIjvW6RQbD49DiRLksYRi4WI3UKTNsAsO6JYjyRIwIYdBW75wiQ6cHFyuKJtus7dUpDoypZzQwrlfcXN+Qao9LeuAZU2YcAoihdSoCqFIPI2MoaIONhKRGskYyaVv2VpQqjD3jMG45P6Ck4IwQhkjtW8YAi3VXGgVg0tszrlIETJTPlkNmSB1jzvRjor28UZJxTowCKaqtypQoKTI+XcVpTStWL1NSINPXLR1AKRT6+9FCubNUEX8qx3A7B4FXKrlKk5BzSsRC00BgM0T6MWIkguh6l1MgxYFkM4lyIMtQbGKn8zE5aEnLzWalJV+2m3JMOuestTCOhG6DnTUczmccHR7QDQNPnj3nyacfU8/mnN+7hyPT9x3VfI/aW5I4NQdHS8Hz+RztEtVDuPNOO3HdRFABCFS1Vx6dsThfb+3YbFb5HiOCq2owjrZtiTliXFVoFom6nm2bitqhx7kKX5ofYkyEwuPV0rLHWa+NEeOI85Uai0smpWLxYgzGKnAxqz378xnEkSwJ61XQVJxlHC2r1YahHxmGwKQyT1Z7HbN1vlAz6RgSQ6fF17ryGJepZ5UqrseIQzjYm6lXaFRAIoXAbFapVmG6TZyGYaTvR5yzxKNbUdtJ86sPgc16hVSW43nF2RvnnB7OOT06JMdESJUeYN64z/nxMT/76BGPn93w/PHHHB6dMNs7UOeK+d6rz7Gqqoqj42M2ixvykCEX3zZjb9W8nePs/j1ee/Mtau958vgxz54+Y+w7XOpUBd0YbZ+3FltqsdOGu02uyiKR8237/oTWvIAQldb/u//W2tJZUU56yisq/kbl36uNSiJRIPWk5UsjAiZiKmE+n3NU1XSbNW+99RZjjNpdUTRpppNTCOGOBAVkJjkC9d5TQ8yldiLmyGroVAjN3fIQphLl0eERr731DjkFPvjwQzYrlZ6AF8tpih7FAuJ5chLdpHPAOO0Smrza2HQ8bj/m8acfU9c1x8fHPHjwgHfe/RXefc/SrtZcXVxwfXHJzWpBV8jvyvHwhYgbX0ik7n7m2vlXkCw7yWmUjXxaZO50i2jipR5QVhzG1ljX0Mz38UfHHJyccnp+ztm9+5zdu8f58QknB4fM53PVlbFKqvxf/et/9Vu5778srBEOnCVbIGsSmUwi5inL1f9zCEXyb4tmpZwZM3QhId5BHrFEyL54VKrmjm7ObDcPXbIs2FJq3Srdayk6mEyYHHukiL6ixrK5jEElUAJi1H5FgM2yUwSuXStXUASTlIsXcCzXkWdXmuwGDNZERiwu9swZGUbDKAaRhLNWXyNFjBECoTQ6pHJCy8TJCyyBsYkuWJZtRxpbfNRydSeWOCZSCmxkxBOI9AxATIah70lo2SOIwUgsr5luBQuhzPGivUZxg0CRhMVyTQ6BoR9ZbjoV+yWSEpCEQCIvE1Y/apJkotbmMCURC2SyLfPN3kl0SvnzxZNzKWlNCbdQfD+LHEzhx1mjpZRBAilFrBiMQTcuMVg/2XmU0vIrglxltIFA11lFqcYwMkb1bhuSsL83p6pmxBAwxquVmahsT4pJ9bhyWVfQZEdSZnW9YG9/n+wKCpbT9hpvuw+dQ2JkeX3J2ckxx8dHhDzn8OCA11bnPHn6lCeffKTGy3VDGlravsU2c3ylwqIhJeIwUNdqLaNjSlhjaKqaplQZYtKKixFRwejKYo1Ro2VXlb1L8L4ixk672cJkYGx1PwmBEPQQElOi70f63rxwkDZG6NsNMVTEqGVC9WrNJAlFosEXNfaMpIQjMvOG4/0ZaVzRdS2mafDNHsbWuhcDl5dX3NwsSFR4X6tBdpE5MEY1CMOghz9SEfKMgWEYsaaich5bFTjeZBAVUk0BUnZKxB97HWucmtD04DM1JImI6v+FwHw+ZzZvqJywVxkakzk/mvHa6SGz2tOO2gjAKNTesYwtDx884PDwmMvFgo8/fcTV9XOqZo+9g8MtAPNF8UokVoLw1tvvMAwjNzcLrq5vSP208TsO58e8+ebbnBwfcnX5jB9++il91+nmSkasR4o5ss2oDotVnanp9KXEZAGrPJyUop5WhBc2dOBOO+VEeisnvKTZthFDjAGxuqFofjJxorS9NibdgETAqIomZNXeCWHAWIOzmcrlkmSMNE1N13nari2ojnbwTf5JQgZriKJ8gMPDA/YP9gkh0g0jYjzJjAzhCjFGC2Lec3bvPg/u3ePm+oInj58w9P2WQGkoaKHI1q9K8uT/N6moZ1xBzO5KPGXJpKTL+7DueLL4lCc/+wQzm3N8esb9B6/xxptv8u53foXFZs3F1QVXF8/YLBb07ZqYlTCbctaRCHod46iTLhn1xBKDmcjx2eJsTTYGMQ3GzfB1RTOfMd/f5+jsjKP7D7l3/wH3793j5ORErQ/mXr0Bt3Y1P+/2tEUoX/4eoqPLZaM0GSsJNzUFABFUaT5lPIBk5emFTGM9mxy4JDCvHHNpGMJIyInGeGQcaWoPhpI8BNVnAmzSjVmMUeRPdMENKbMWgx9hRLghMYTInnOkGLREg5RmhETKY3GlV0QhW20VzOSy4YvyJGNiNFqaqlPGmICerQIhrrlJqaBBegrvi9eXQ21kEoZRUqn4SlmDVd/q2iRuhhHJicWlkmwdid5kYhLMCEhgvRl1HiQlt+Yg9DFwMwbleuTEWEoguiZNDgXa1TpJtUwNFSlZjEAvxYcga1OEGNGDiZhiIK1rlCY1UDnlKWrSpLIVAM5abB7oMrRRmzeqrJxPxGybQlLOIAlj8m3FuJSDdM0w2KxdfgYYqQlR9eKmg4szJa9Ool51VvurNt/4Hf/LI2dFEtO2+65cB2sJ2WB8w8HRKWSh3axVP1DMFsFOhbMLhQHCoGToBIura85fe4A4r/uItaqT16njQ54EpXOkXbV88tFHWOc4PDnBzxqaynF6dMAwBi4ur3j0+AnDGDg4PCSKI0elNlijpsOXi+fFcFjLydqIEMpmoknCMA7FG9ISujVJhLFd0YonHui+tGpX2CJMbZxjCJmcpGgwaRd9DEqhcKWRC3SPGwZthkg5Mg49znmGvqNtB5z1VDNLJuKsRySTwoA1CeuEX/3OOwybJWlY4GKLicrtsjmTssH7mr4f6fqBYCySA7W36nAxDqQxbZF2U8SOrTHUtYUcGIeeDRHnLTknrq6vcEa7BUmZrutpC0drHAPjEPG+5lZj6rbaMX1pk8CI25sxPzhkv8rMnKUqzQ6bdmTTb7SsmLSD8+pqTR8GYhx479036cbIBx99wvPLZwxD/wvv1Vciser7jp+8/0ccHZ9wcnrG/Qf32XRrLi8vOTg45N69B3TtwPs/+jHLmwtNiBAtw5lybruDOJkiDEZ5nmmBKIuy6hZp0jXpYX0++9SuxLzlJ4QQiSHiqwpXWYy4qWlx68at6sde+UPxljg31fNzNtRVRVVVOGPYDANXlxecnJ6zN9/fLpICVM6VunFQONRZrC1CqcJWmt95z3xvn30E4zx9Ub0Vq+Tb119/nfneHo8ff8bV82d3EkWz5YPlGLfq5rJ977qYTAmmmU7SZipCKdIxlQszegqJMTK2K559uubZo09wvub49IzzB/d5/f457735Om3bcXF5wdXzZ6yvLhiLjsjE9RjdIeTCCclgnYe6oW72ODw84fTkHqen59y/f4/z+2ecnJ2yf3DAbH+Pqqrwxpd/f6ecMW2Gd8q7Px+vVunDZt1clSmFyngUUVzt+rm9ZmninZRSnilJV+0suQ+knLDOkkKgKuR2awzrtkOsZQyj6mCJGr9W1intxzr6HPAi+pWgtZYuZ4yx7DmBYLSzKmuS7qwljEoq3a4wOdOU0tVW0ThGcjSs88h+5TgUw0jgOmo5bFZXEEZqZyFpstcFlWpwCJLQhPAOCg2a+I05gnVIjrhEUWk35Gy4TIFNSNQ4jCRSmNAmtVraFETPJ2Xr9S7SiMHmqeklFzA6M5m9W9EPIKOfg7PKH5TSQGCtU526EHDWYRAqo3pFBjXadVYKV0g/L2emNnMYraEVgx+TSmoUj07lG+mGHWPUBLesSwY9ZIoIgVxKpoaqaPW4LITCy0vlHrJWpWhyUtFVay21NZ+nob6UyAn6jdqRmIJK55SIAVy27NU1KfaIsWAhiuBKom0mWQMyKQZyTpgUcDmSyLRtR79cs1+fEkXIrsLVM9JqRZpq5ZK1W2/I3Cyu+eH7P+DNN9/i9OwM4xx107A/3+fo4IA3XnuN66trHj95zNXVM+b7h0RrcVVN1w/0/cB8PqepLK7xOGsIfUclPXt7MyKetlMx0bbd6DWMETGeBhXFFRHmTlF7Z2uV/2g3DCHRDpFghCSW7D1pGBiLnI4S1yusNXRdi/eVqo+LYD3UolIQbR8wrmJIAZGs7hYk3n79IQ+PZrjYMfYtQ2gxzpCsUDtHkD2eLQMXg4P9c0xKpHEofN1J3FlLkt57pS8QcU75U1XlETwhJ/o+K8Hd7hFShpiLz67DidWDcqE9pNCpXymGlG+rNhmVZIpjYEjqsUAeuJGR9qDG23uMfcu6iyQitrb4yjLbn+HbkfU6Mxq4ePacWV3zG7/+K6xXSz7+ow9/4b36SiRW5Ey7uqFrNzx98pjjk1MevPkWv/pr3wdgtVrzyWefsFguMEUD45aUd0tO18TGbRVT75bD9F9IKWHc/uzzZPYtzyhpId5OfoMi2sad9RQoprRzlpPqVDqEUoIsBEGBooabkWyofUVTVSpSmBKhHxiHDn94SFU1HB7sqY4JAinixVJ77VQZsyprT9F1HV3fc3B8jLGesXRKurph7+CQd995mxQDP/vgp3RtuyWET+8xZ90EnbW6gJvSfVaS0Rc/Q8hZEJnKdmyTRkX+DCIW5zykAKhYX4qB58+ecHXxHF/X7B0ecXx+n5N7b/Da298lrFuurq64vr5itVqrf+LBKUfHJ5zdu8/5vfuc37/PwekZB4eHCrNXFdZYHcuU6JWTuXIQ8p3PSRsG7l7fL78Vv/zn31pk8KJtxlZEE6qyuU1dNBlFdhQJoSg3OxIwxoh3Hkl6OAgpqgxFjBjnSSnStz2+rukL78EYPR2nYSglo9KRGfU+qUu5qydjbTkASMI47VIdU8JbLfGaDLagjLacIk1WUV4LuGTxxhIlYSVzaNRAcUgJas8qD8wlMK+FmUlIVCOrzkWiqH4aScsVmgyWuQtkX9H2AWtgz1XMMWAUPRqAJliW1tCYGkvHkCLJGLK1LDYb1cSTmkPxLIaWGxOYO4dX+Lvk38IYEgmHGDTZEcFkaGOPtwZnCkJkPDGptp0YwRkhxUilaRhC1iQrK7o1WWWYIrZqRIhjUHJwEkyK1NZiSpMIAt54YmnaSHEq+SnvzDlbDNfLAXRQRwYxSpmYEGtjykGu6AZKOSjFouf0skPXLUAKzw2VGShVU3W8EHW4oFfUweGLEa+Wt3NZq3OxMEqinWLLvuVquWD//pnye0v3ty4lenyxplxT0dcOfcsHP/0hn31Sc3h8zNn917h3/76uTwL29IjTk0NWQ+LTR094+uw5VTPTkhYQxh5TZAKMwNHpKadHh+zv72GtY9N2XF9fIwKr1YpxHEsnX0PlK4wV3n3jPbpOBYA3XUcXI4mAz1qZaTvlVVbe40sJG6BtN8oxrWtylheoF1VVYUxQPptxdGMo+oRCXc+4/+A+zaymtpmxM4h1xCx0Y2TcDMj8gMvrBd0wYowKWtczNUsfhoEQxm2iPg5avfGVgawSFd5XhWut6JSKURtM8Qm2xjCOI33RRsQ6XNMwiXY644hhqkSpGLh3inw5C5XJEAfERMKQWS6uqb2jspEhjqQQMVXDfG+P2SYwuhnDckM9T7Tdho8+e8Lx4T6z2eznb9ISr0ZiBUwq2s18zv3XHrI32+P5kyes1ysODvd56437hAdnXF1ds1quts7TajdxyyvatujDFrWaYusPyG3H4N3T7t1N1ZkJAp5QMPXry6XUaIxC5FlkW068m8BpQaQkYdPfzZm+61jeLDTpynrRicqVuhmvefbsQk0fS7nEyUSkVBRiIr+CJlabrgURZntzbIgcnZ6waTsW11dc3yy4ev6MMHSQ01ZtF25J4JYJjdIyNsZuyYXTJLxrNcA2gcll0hTbHjFEyr912vmimjNGEQGx4Cra4BgXLRu55rU3Dnnze7/Gr5+fMZvNmDUN+/t7zGr11TLWllZwsDmWxX26puW9qPM2hTQBZPJttU+TYzud33khufpFyNQrgVgVVNKgyOBWs1SKu3rm1ow75cL7U+TIFl5eZXWjzzFRNQ0hBF00uUVSSFnVylOiqmqIkdr5bclqKIcMRXZL+Vf0nnBFhi9PvMCcy0avIquqRSZKjBYYjfLiEkKSxJC1RNHnQAfsGUfIUctWTabyggTlAXmjem7RqQ7bkBO2doxxxAqYJDo2I7phpYgTg08JlxMWIEes89Qk1jlTkWnQsuIoKtrpjOGhq5hHaGJk7j2zmNn0A77yasdUkp0shhFR0jAJh8UDwQg2ZyoUdXRJG9AnHZ5KNBkwFkb9NIhW1MXAJJIp6vUlWTYi2FIelGJLYlEkc0p+bZ7M5DVRIGUkqQaTLRpyWu5LJEmF7pC3nKuYiwk15f4vZHwjBuss+eXnVTrF7xyGp0HlAnb3w4Cv6xf+SS5ermVpKB2E+vkH9FoFZ/D7ezRHB4zlwD7p7Jk7DQLWOppZTdettZEgRSrvkNizvH7OYnXD5fNHvPX22xweHFJXSgyvZxUHezO++97bfPboCY+fPmXsRwieyxiZzRpUqqQmIwxRrW4A+lG9G62v6HqV+tikgbEJeHFUzYzFquPxo08YYyQV6YwQEsZZmrompARBqxK3vpLaoTeVDCe9xqlZqu97nHWkmKicL01aKtOx6XqePLugcbBXew4PD8EY2n5g7CNNY1m1fdk/EtZqw8mUGBpjt9WQlBK1rW/1vXIunnxTh185FGe7pQ+4rSmzilobsVhbISarCG5Q/UojhiyZyisiGFNk5oUUegUVZiptQdL1IeaB2juG8vlhHMY72uUGV8+ZW092Fe16zc8eXdD24y+8VV+NxEoEP9vnwWuv89rrb7Bab/jpj37IZr0ESayuVZl8vn/M8fEp5+f36LqOxWLBarkgj8MLm+XUmSdb4rfuSqlwsvRxeiEZeuH7XHhSpaPOlvq0tV6Js2WCT/v8VPKDguIAJNVWkQxx1M3GivpwtesN1lkO5g2HB/tA1vehFCqOTo65uhoxOZNDUqG+BElKi21ZJcZx3Kqvd0NPiInZfM4bb75FzpmPfvIjJI3kOGot/U7PyVTec2gp1DpbuGda/rjLOZtI+tMCo//e4F1deG6ObDwJi7EVNB5fVxwcnXB4fMzp6Tmnx2c8ePCAw5Mj9g4PNBG8o+eyPU3nhJ3Ki7cfMdwZO+SSR+kiomUytgnWC3mRfO6/vCKJ01eIZMtBQVSl3ipjp5SoRZsiUO/IUEy/Q4yQM0MqvKfJKqKgkVKkEIaup25qva/K71RoacpbbU3X058KrSJCsio+2GftzrJJ9ahSFmLKZONKiR5EEilpYqjOB4JEWw7/SibOkokhEY1q8Khfp6EWLdP4KNSiOj42JjxCEsNqGIhiSWKJWa1wnKgSekyBkCE5yyCwTJkolv2qIvYbVVOWzJBHslS4BENSkUJtfHHsiwGbWOUe7xxHriL1qtSvdAL9PCcUN1kh5EDMiVos1lUQgpLns773aClcQmBCs5Oaf0s2SNTOxpSKzk5JnJ2x2v0mSXWzClKTJKumTkFVzCR4XNBakal5x4Lk6dyqIqBGzzkG2SbVRQhFE+WStEhW7lU/Bl4F4mHKmbFQGYAXDs7WGMZh4PLyiqppVHeoJGBhTIpqZe2GmyQPEoZkDNE57r/1Bl2ItP1A3XilH8SAtV5L2hic8zT7B8QUWS2XpDBqSZ5iixIHLi9a1ssrTs/OeO21B3hfUc8O2W9qcoa9997kzYf3ePbsOR8/eszj5085ODigrmvGvqNtW1brDZrUCm3XbTl0k07ZEFTHMabIH/7wx+QktEMggYrIZqiqWpM/Y1QzilswwXu1v+n7vuxxt6j/lFRJKYcbIKTAOCao9F589OQ53WXmnTceEDJsYouva3784SfM9k9p2opVNxZahtrs1LOmmIQrYmWMoWlUhNtaU+zaPKoJpyLI3jswEMYRTNQ+sBC2TV1BtISuWo+a9F1dXVM5T4th2A+IwHLVUlUeRCkOe/M9tdUykWw83QghDlgn9ENg2Q5EU+HnqmI/27es+4R3jjqDq2ZU8wOc/+AX3quvRGLlnOf7v/Hn8N7zycc/49Fnj2BoqSst+4UxE7qWzc0G56+Y7c3ZPzrk9Yf3ia/dY7XquL66ZLNaKsFcKJLzU2ty2hLdyRNHpdReUy4kR2CLZmTGrJo5tlYPP1OQKSkwuTGT4Oh0YiqlMwWoydaUrFy/N5o5IGRWyyvqZsZ8XuOahvn+PjllxjGy6QaFirPBmgxW4WsxtvwNtXFBSgdkssQozG3Fo08/5Or6inFMLBdrDg6PaddLxpS03Tve6kSZwpi1aQBAHf6SEqJNLix1gWwR8RjbkKn0BG0qxAhH5+/i9w85PDzi7Pw+p2fn3Lt/j/2Tw23Zzle+nEamxfBuMVMXpYlbBROHZUL+yu/nz3cl5TvrfEmEZUt/uf2tO4nzF8Xd7s8piZ6+XoWQ0sFncuG8lBJsNlJskqbNvXi6oZ/omFVOoMqixSErMI7UxuGT8kQGAzNX0/cD2RhmzlCTtQQcE2JVbNFaVf1XPp7B58S+r+iGhDUTJy9pabaQqVOOmlSJQFaR3BgiTqYuLT2YxJxIRhksxni1Tslgo0WywQXRkooVxBr6EOlSUlTVOKxY6ih4Y7ZijykJzhtNFgQEwwhc9C2N0SaI2lhqp2XkEFV0dCrzZCNYAzcxcpkG5gmOxGGMpR0HfF3pOhETzijKZDE4W+FE+ZsxRz3doxZWoXTweWDMiU4lQvGkYo6r6wc5FVRq0mdTqygl5BYiuzfaEp/09Z0oUT5nIWRFu0gZh5YhJ2sO627tUiaRXn2sXC5KBxpZrVKm5yRG7SB9BUqBoFp45ucKk2m7dscU6fueujaFolE0EEvDj4gtatyRiSuXgW5UMduPP/qYB6/doyr2V4quSKEeGKSec3xe08z3WS9v6NZLUgj4ymIdZBJGRpbXz1jdPGNvb4+z04ccHZ/SzOY01tHsN+zXr/H2mw95erPmo48/4eLiUm1x/DHtZq0V5wIKdMOgJcxy0A3ad8XKnvJ3w39Wk/wD2S7Zt5/W7XWT2XRInfT+SjWm8AKnfZH8otSNfka3VRJjhB8FsBHqjyulknhHTInV6gjjXLmHvk+eoRWMOUWgWtcFSumZiaZzh4ozjXuqNiCQa71IE4dX5wTbMW2vc4awN7J/eIgxjhBrqv6SZGquVy3NrCGvR9btyF5TMfNCHwKrPuOdwXlh00WWq5GQAvU8U89qZnVDzCPWOWazOV0/0oSgCP8viFcisarrhpubGx599glD1+EEfOP1tpguftbOqBhbVkPPYnmNeeo5ODzi9Ow+97/3Xfq+5+bmmuurS0KvnWXxzr83ZafOhXejG4hVBOiFmZox7tb4cauonvUYuSVFT6fClMmmmH8WYnyYEDOZNvcpA8ullmyo5zPEOGbzfbyvyQlW656b66UiCTlAjlS+RrZ4hRJaU9b69NXlNU8fPSHHyJPPPuOjjz5ktnfM06eXvPn668xmBzjrWa8WiCiXxtrb92aSbhRKxEVvduMx4rCmwruG2Wyfo7Nz9u+dcXp6yh/8vuC857/xL/zT1LO97QlISiLLlOSU+HxyIy88Mr/0MPzzudGdxOvzl07YLgqZ2/JuLN1EUxvulFTdFXy01hYy5SvgacP0vrfLISlrB42etpVyrMgjW+VlEMSq/IC6yKvYYx4HKmvxFN5MEYYUMeAylROcZKx3dP2Ael+qsrkzumlbY1SXp8xFoXAPk173GAOVrwlZ1BgWPVFKYbQklblWknvhKiWEkKKqxKOHoZShHxLzWY3kxBjLeKbDTEjEscdXDZVAjoHkK9W7I2NiKOhLKUcK9GWOm6jeZjYmvDfbgxLk4sGn3K1+DARjaVNm31maxtNuxqLArqU+m42iq1GQJERJIBNZ3WJKKde7SbjVkE0iokmpl6LRB0UCw+hakkXL4FIkEyYRz5SU5DwmojHFwimj7Z0lgciqTWbRhFNXi9tmHuW7TQi9ln3mszmrzUY91qxytaT8L5euzFcJ5M1wZ+O/U6VIikhZV0QjS8JvnSOGoI1FY9hybygH7pTh6bNnOHF46/jBH75P3TjeeHCvHHjVrsi5img9ztR4DIfes3+wT99uCOMG8sCkLC6iyFrXrnj82ccsrq84Pj7l5OycZj5nVhlCgtfvHXPv5JDlas1nj5/y7OKChFDP9hHrqOsa51SvyYhhDAGxltP2A3JTKABMZ9O7FQXdn7RfSzORiYdJuvPzu+hfSqV7/Vaz8bZkMFUCChcXYQiJMSZkDFqmQzAJ2FY79Gv6TIAXBGn1Yk5lXUVHJ/FXMkWZ/UX9xslP1hhTUNgJsVIQI4kwDqqttWcjD2ctb//6P4wxSp1ZXF/Qrpe0qx6TA95kVeoXcJWH7CDr/VINHWaxIVu1LqrcPghUXsuoYwi/8B59JRKrTbvhj/7wD/DOUHu3VTO/K3UwsftzKYNJVkLn4vqK9vqKejbn6OSM05MzTk7v0a433CwWLBYLwjjqTSK5dN2kkoWXbr7CgxL0lNA0jZaZ8ouox7Th3k0Utjcu3OrblK7EuzyuWEo0cNu5eHZ6yqzZo+8ie/MZB/uHXF4ssLYmxpFh6PTklHWD0kxfb1FrHF4qqtrz2Wefcn19yeJmyWrZcrMYWa16+rPIrG4Yh0DdHGjHg7Xb8YkYjJ3j64a9/T0Ojg45OTvl4Pic09P7HB2dcHR0xKyZYWsHlZ6g/rc/+3sAHN47w+Si51M6LpkW+j9m6eCLdMa+LD5PRJ8SpQlxuttMMF2DyU7iVhvszinpc49fhQgokpJy1vWudItx+58SUsRShTFm5RNNZVUpyYV1WFEkYij2LJU1pDEjKVMZiy+nSGsFby0hKlqq4q2i4pdJsM5sFztrBLFlrghKXBdF10xZ2I2olIdyEvX7UEpYlbFkUePymLUrTd3kS9db0feQnKnEY50hJ13IiUG7ggyKlBUOoc36ng2lIww19XYx4Z1nVllmadAkpnS2Tg0ZiooUHhWKBG3CwJ6pqZxXbpc1MAbEUPwwtfQ2bR4uCw6hEqE2lhwCQRJ7roIEm6hEdNCTvEEtTfT7ifc08Uh0TNORypXSIjkztbLcHvoMpqxjISftKC1m5n0ct+KuW5uorAnWcr0iIuSCTE0HTuc9EoN+Rq8CYJUVgZw6X1NSlfK2XbNZrxj6UasPiWI9ptYuunJmNu0GsibxKSuConPKMvSJ5eaGuqqw3tBu2q1W3nq9Zrn+GfODI/aPDlUry3maqqJyR6QYGPsVm4V2OIPOO2cnSZdMHlsun33G1eVTDo+OOTs/p5nt4Y2hrh3z+oijgz1+9Vfe4cmzZ3z06ROuLy/BGA6OTjG+JjuLNRGH4eH4Iffa90HyNiFOCcR67SY1QuU9UOZv4TR1xWZGtodIe3sv5tuKTs6Zyhk2m5Y+RKrZHDGGylusEcYAYRwIY48Yg29mGF/jfaVdgHEk9K3qidmy1hqh69otJ9oYo5xPMTgx1HVN1/fEFBkGVZv3tSPnyOF8xlsP7/Pg3jmkqDIVsaftBq4Xa65v1jx5dsXjJ8+4/8Z7XLXK91zlzO+FUZHDs3PeePP77M0PGPsN7eaGm+untO0Vi5tL8s1Y/HyBFHAmUPkM3nGzWNJsNuwdHOKbuR5ezS/eJ16JxCoG7eBxppxyC4/hrqWJJjUGMSrJL1azd2Ii50i3WrJarnj86Al7+wccnZzy+sOHvPbgAcvlkpubG9p2Qwxj8ddS2D2TcaIKOxM0KVs+0a0Gxuf5WNO4Ui7aZQXZMqVbcKt9tU2ulAg5rU8Tb2s2n2tZw1q6risX/00+/egniDQYUbG/mDQRVJXcmvN796maGb5quL654dGzS8YhEJJn00fc7IB6/4iDg336kNnbr5ntnXJ0dMTx8REnJ6rwfnByzOHxEbO9uZqEOofI51fRUnYqedN0O7kJstVPRfvV8/T7f7y4KxL6eeX1u0T66fOfkqi7CdQWhSsT9vNdoV8lXpWkCtiWTbf4nEyitonbz1jw3tF1vbKhy3Pqj+jUriNmvHF4q5ysFNDuS6sn2GwtdeFiZBGy93hvti38prT9a6dq3i4oqj0jSDlMqP6RbvCqDK0q4Eb0pGqyUa6XUBBZ5UU4gcpabM5kIt5XhGLurLpjyk2MMZBwNM6SopqhGtH5Fu9kmq502BlKqc6ptYjJI8YZoqgY53SfOefpyxwFFWd0tiIPA4FMZ8CFUb3MRrXhUHLzVslKSeSl68+IahN565jMybMod80KDDHoIS/fCu4a0bXPlFK/UNa9pJ/X9HesWIYcEGdVPDRnRSaLxYlkRcNijPQxUN2eRpm6gadDRc5Fsw9hDEFJ76IE95wS2RicdWTCF8HGLyXGUef9ZrNhtdImJiEXPq6hbhw5ZLCQQyYE7VAubT8U5UEyyjXLmIIEN4yxo7JK1M9BOax1rby9m+WK55c3VJXn5OSEN958yPHxEclYrG9IxnLUzIljT+g3xL6D2Ov1sBkhlFJ+YHH9hHZ9xXzvgMOzh+zt71PVDb4xiPHsN6/xxv37fPb4GZ88esrTy+fgGg7PHmCrGlfKYUM/aNNJXTF1OeYQMFWlWk+DakvVtWHP657pyxq52mzoh4DzimwKipKGMWg3pfMYV3N8flDsqvSeCkOgK1Zlzjm813JYv95QVYEhLmiapnCTHcbdHmLHoVOKiwi1V8K+QbsXnbO0fYd1jhDUD9h4q1zMFElhjcsbpHvOwf4eKSUuFhuuL68Yk96z0QBNRR87GgvZqDVYSJHYrnj+WcuTTzdU9T4H+4e88dbrfP8f/lXm+5ZxWHHz6CnPnz3n8ulTnj76mJv1JeReG00qTxao5w0n52fF6PkVR6yMqP6FLfyGyWj38wa7CaE4EUNWs1w9eKkacwojEgNt6Fgvb/B1zf7+Pqenp5yfvk3XD1zfKIrVdd1tSShH7c4DpsxhIq9Pcbd0dDc0EbhFs7YdibyIgmgXYd4uwhPxvClESWM0QbAOHrx2n3a94OrqCaSgy2w2iHjmewe89c67HB6d8Nnjx7jZAQ8OTqn3z9isW2YH97n38HX2j495643XOT05YtbUHB4esbd/pGU7Y7cblnJecxF2nPrubkn+t0dVgzbK333z6CZ69yORn3vwleLu5zptAFPSdJdIP0lGfB59upuM/aJk6pVKmr5CVMZhRbT9VyYPOBVwVUK7ToW7XZumlEKdd7rAtZ2eBq3Fmn7LKRFjcJKpRB/XTpGVJJMCsiZhYrQMPglNSvn7EzpjBU34pvtkSpzEoGCSluANKPfHOUJWtEqRGU0kbOmEiyhRf8u/mCSExJDEEIqIp7Voc8eEBk/jFCGWsr+1ykeKohuOy5m271h6WA+DQv+ourjNBfUzovYxScuCI5kBVfuvvVfl95hoqlrRkFySxPKJKG/MKWpitZRnslFUICcc4EW2n5ctxOhSsENQ6xrtQrbKK5kOGOhhZlbVrONInwKVddvuwJQLWlloCNM1MkgRLY4vzAHl26hYcpJMgNsOZlS92li71VH7shCRBvh/AzW6r/xbOef/oYi8B/wWcAb8DvBfyzkPIlID/wfgHwUugH8+5/zhl71GympYv1qttgliSomh67i6uuLg8IiqnlA8/VLjC0X8zQT7ikq1qE1TZugHLq+vFCUs79Z5Kfdg4eOmREhC2LS0bcv19RXvvPs277zzNjEm6rrGYnHzGZL3GbsN3WbN0G3IOZR7uhwwgJh6rm96rlcLZvN9Ts/uc3J6D19ZKlcDmfPzM+pmxunNgovrG54//gBf1RzsnVF5T6pquqFn7EfarqOZzUkkNpvNC8b28/kcV3mGYWAIgaPjY5r5Hqu1imGG4nFJLBpTVYUxhrbvmJXy+2w2o+u6LXE8xviCM4jKNtyioSrmGrZVHOccla/oy4FYEalYSp2OWMqSfd+DEZrZTO+/FLEpUeEIXY8cHiCm5vLyghAj+weHDFEIduC+a5gfnrDe9Iw5FOFRRam1u9DSpaeM+ZInzy2X15/w+/+gomnmnJwec3h+wOu/9hf49X90n6uLJ/zsg/f5yY//ENk84/zAc3CwR4w9klZU2VL9HABxG69EYiUiJZNWAipM9iYU4OgWJtlO8DwR0IVsDGkcdVFICYmayIS+5XKzZnH5nPl8j4PjY+7dO+G11+6z3my4uLzi+uaG0EVuu84KPmAMdlp0YrotQ5YT+9T9J0ZP5XYrVHiLrpgi2TDBq0IurfFqX0DOVN6qqFkcadsWsuHgYI+T8/ss1ivGodcJ62rO7j/k3Xe/w2xvj+ubBecP3uLkwT3u3X/A3t4B+3v7agRaVZji3aSfU+GQUAxAS4v3xN65jansYn5uIZ18xz7/nHzBkjuVa78spsk3JU1TInX3nribPN1Nln4RKf2LEqo/bcnUFEJp1TdCKq3xtwu90WSm3G9kbQDJBfXQk2skjKN+nyLZCiRhyJmYVSMrZOU9OZReMYaA9RO5OZXOKimlk7zdiGPK2s5tYjGsloLclK438nb+5rtIMJlxHFSFXCaURgX7vPO4rB2wJG2kSGYqiekYXJF8CClRWZ03Ee2Wi+WesOUA49HPbiASsxrFhpAIZIw4jPdEo9ykyZLKlzJ5yqPq38RY1NFVB2xuKhrrCSGqLEThPG0TerRbMllF5tSeLhNiZOZg7pW/1Y1o8mesGgknFRRNccQZ2TZ7pELUt0ao0GRXgrabe2O3WnlYwcZb3poeDItTgRVCPyKmqKtTXlsKITknVewnEkUpNjFBSBlvfTk0pV8ymwG1mvwnc84rEfHA3xWRvw38K8D/LOf8WyLy14B/CfjXy3+vcs7fFZHfBP414J//shcYh4GL58+3h9iubbfq4cMwMgwjs8JRy2michjFqbYNAIUSAkgWhmHD88tL2q7n9Owc6xv6vifkzBAi88ZhyKWb0JSzqLBabfjBD95ns9nwa7/2q0V6RrXWjPU0e4fUsz3GvqPrlnR9SwwDkyizCEUeo2OzGtisl1xeXHBycp/j43PVImwqQt+SGsvRa6e8d/+Qm8WS5zdrVqsRXzfsNzWXNwvadcvx8SnHZ2cqw7PZ0HUdIsL19TUrOyH9meV6o9ZsCZqmwVjVgwphLN57kdlsvuUWiwibzYa+77FW7XKGobtTXbiV79GES14oO0+HY+8sMujPQkG9hmEomlVqFUdZ98exoG3OcHJ4hEkDVizLVctyPbBcb9TdxDqSKC90DMJ87wBfz0siZYpRtx6+2rZHRpWsEDJjf0NdHdJ3gadPWz69eMQPf/gTKu+xBpoK/N4hpI5+7KnHzP5shrOxaPKlX3ivvhKJVSn264kPLelsE5msp9U8iROFuOUITeWgiZyXyk0dy+oghWcRx8BqsWCzWfD82afsHRxxeHLKO++8xZvpTa5vVlw9v8Jky6U95v+x/0/r5Ly7nBTC3bSATjCNbDcT/Y7PPb77N7baKyJ4V+Ef17jnbvsectaShXWOmPYJe9/BHlq8r/Dea4fdhUUuBZFzREBWAj/ZoKYTT77m6/LF8YePFnz/4eEXJi13S6d3v5/e45Q83S3TfZ40/suSoS3n7s7pessb+bMSIqWyN6o/ljXFPBddmAt0LwWN0IQgYYzek002SIoYo2KhVgyehnUciUaYB9iIqk43JE24xKimS2liIBtCUA2mLGCzltyyMcWKxUI05GwJogmdituqWXDaXo+C2qZcAKipfVy0JFcqyk4MznhchFFGBpOoohCzJSHMnDDm0u1loLZKuFftIS0BErTcc+gszgqxMVuichSw4pglh02ZUHuGPmi7uQGfQVIGB0OMOAdHrqaPPdZmhr5n7hpSjKpLJYLEkZmvCKGQzMSpSFLWz9ymjDeGE4SZMQySaILR7j0x5Cx4SQhRxSqBLOqNKUl7da2xNFk1uijrjUeoMWwK8bYWRaRGkwsn06h6forkggaGHAmSp9yLbBSps2lC2wIZTS6HNHFGlc/yi7cQjayTb1W+9eUrA/8k8F8pz/9N4H+EJlb/bHkM8G8B/2sRkfwlkzhFtQ8Kw8AwFN5sVrROxCpiP9/HuRprMzFqCdk4j7F+6zyQ0bVocb3gw48+oh8Dh8fHNHsHWFcRTU0clgxjxNqaqvKMMbDpM43zWF9Mi53lk48/paoqfu1738G7Ihotem9jHN5VVHtzZmNHu1nTb9aalMSAsYJF5ywkhnbFo03L86dPOTg+4fDoGJMGDueeptJmrsPacv/ejHXb86OffsjF81ZtbGJmvVpxfHZWBD4NBwcH5KxWLqvNkhACTTMjJ1SnKQt9NyBOu269qZCgIp7DOGiCUaoD46g8pdvDbXErKLw8NXRWlHjSwprW/bvdftO6PWlo6c/1UBDCiC/I18QFvrh6zmYBbz44ZwyR1fMLYgLrK8Yw4OsGtW9Wqsy6bVX41ihHkKz8zsZY9vYP2awPubq+IpuOai8jsioHMk+d6rKmDYR+ICXLQWO53jR8+OSG8Nkl+zPDvAKTx1dfx8oUr6PPl30mQbPpIk3CYHfRDREpqtG3f+OWk8UW7cg5IymRx8Dy8orl1Q1Pqk85Ojnj8PSc8+/9Cn/pcuB3btZE9GQ5ETyRabOXF3OnOzH9fAL573akbdtHRU9QWnLwhKTdRM7PMEbVtN10M1s9Yf1xOULfRnz/4SH/7D/yxpeWB/q+fwEqnjoRv7AB4A4S9fnnviz+rKBTvyxSUlNv9SFVNNEooWkLDKYUKf2qpcNTtiUQsp7OQ4ragZdSsXthm6RNC6AK15ZydUqqG2RVvTuHaWOX23m1vc8/16Y9IcylbD9xF01Bf5Bi0SFq7twPxVPM6nswpWQuUREWqRzJZLox0KOK20LCW2G+N+dqsYZybrFWCDFQV3WRGCnI11i0ubxFysYsSitCjG4KWYSYLJIyB5XnANQM21niEBnDQFV7NuOAcxYjvrw3fdMiKoGAWPbrhtz2uGwYBPquVd5ojDSVJxnHph++sIQ9PdY5JDrGcihT7c5SSpxQK0zR7cvkFJUPxq3LhDEWm/IW6ZR8S1J21mCyheKfao0hBl1fw0Tu/wr3qYhYtNz3XeCvAj8BrnOenLv5BHijPH4D+BgdQxCRG7Rc+PzLXqPvuhcoGoI6W8yaGZu24/Lyis2mZ763TzPfY15VGOupqgYBQhx5fnnNpx9/QrvoqGrPg9deo5rtoQIYDlPXyjgxSkKfzxpms4bLxYLYB+bzZnsgrKqGn334MWcnhzx87XTrT0vx3tRBOpypOPBz9veO6Ns17XpJHHskqW9rlogQgEQcIzeXI4vrS4Z+oK4rXn/4kMODfe10jIHKWf7cP/QrrNrAx4+e8+jJBcPQslwuqYqR8zAMeO95+PAhi80+y9WavlNyuBFDGAPjOOJmFbmIB2cRrFdZlH4YCKOaF08812l/tc6q4TPyggfhbeleivaj3SZQq65l6pidDtLGGOqqpu9ajNPnJ+SqqiqapmHYLPnws8fcP9nn3ukxY9exWbYMITJcL3Czfeq9I+Z7e6R2YLFa4+y0/ztCQcWMOGbVPc5P5mzaCzAtw7hmubqh9g2p9ThX4X3D4cGc1fqaGHuMq8DvM/SRRRfoQ1KUNPzi44a8Cqd8EVkC77/kYZzzSyb1bgz/fzOGd3LO917i6+/mxG4Mr9Lrwx9jTojIMfB/B/77wP8+5/zd8vxbwN/OOf+GiPw+8E/lnD8pP/sJ8I/nnJ9/7m/9y8C/XL79DeD3v4438w3Gq3Ctflnsxvj1xC+cE68EYgW8n3P+x17mAETkP9qNYTeGVyh2c2I3hlfi9f+4kXO+FpG/A/wTwLGIuIJavQl8Wn7tU+At4BMRcahV5MUX/K2/Dvx1+NPxOezG+PXEn4Yxflm8GkqIu9jFLnaxiz+1ISL3ClKFiMyA/wLwR8DfAf7L5df+ReDfLo//nfI95ef/wZfxq3axiz9N8aogVrvYxS52sYs/vfEQ+JuFZ2WAv5Vz/m0R+UPgt0TkXwV+F/gb5ff/BvBviMiPgUvgN1/GoHexi28iXpXE6q+/7AGwG8MUuzG8GvEqfAa7MWi87DG87Nf/pZFz/j3gz3/B8z8F/uIXPN8B/9wf82Ve+c+B3Ri/rvjTMMZfGK8EeX0Xu9jFLnaxi13s4s9C7DhWu9jFLnaxi13sYhdfU7z0xEpE/ikReV9Efiwif/kbfJ3/nYg8LW2+03OnIvLviciPyn9PyvMiIv/LMqbfE5G/8DW8/lsi8ndE5A9F5A9E5L/9EsbQiMjfF5H/pIzhf1yef09E/l55rX9TRKryfF2+/3H5+bt/0jHcGYsVkd8Vkd9+WWN4VWM3J3Zz4mWN4VWNb2tOfIVxvNQ58xXH+NLn1VcY4ysz776RuKuU/W1/oeZzPwG+A1TAfwJ8/xt6rf8c8BeA37/z3P8E+Mvl8V8G/rXy+J8B/jaqP/eXgL/3Nbz+Q+AvlMcHwA+B73/LYxBgvzz2wN8rf/tvAb9Znv9rwH+zPP5vAX+tPP5N4N/8Gq/HvwL8n4DfLt9/62N4Fb92c2I3J3Zz4uXNia8wlpc6Z77iGF/6vPoKY3xl5t038v5e6ourzsm/e+f7vwL8lW/w9d793IR4H3h452Z8vzz+3wD/whf93tc4ln8bbUl+KWMA5sD/B/jHUSE29/lrAvy7wD9RHrvye/I1vPabwL+P2l38dplk3+oYXtWv3ZzYzYndnHi5c+IrjOeVmTNfcbwvdV59hfG9tHn3TX297FLg1tagxF3Lg28jHuScH5XHj4EH38a4Coz559Es/VsdQyk3/MfAU+Df449hOwFMthN/0vifA/9dtgYInL2EMbyqsZsTuzkBuzlxN172nPhl8VLmzFeJlzmvvsLYXoV5943Ey06sXpnImgp/4y2SIrIP/F+B/07OefFtjyHnHHPO/wh6Qv6LwK9/k6/3+RCR/yLwNOf8O9/m6+7ijx+7OfHtxG5O/NmJb2vOfJV42fPql8XLnnffZLzsxGqyNZjiruXBtxFPROQhQPnv029yXCLi0Rv9/5hz/r+9jDFMkXO+RlWRt7YTX/A62zHIl9hO/DHjPw38l0TkQ+C30NLH/+JbHsOrHLs5sZsTuznxYrzsOfHL4qXcr18Wr9K8+mXxEufdNxYvO7H6D4HvlU6ACiWl/Tvf4uvftVX4F3nRbuG/Xrol/hJwcwdC/f8pRERQteE/yjn/T1/SGF667UTO+a/knN/MOb+LXu//IOf8X/02x/CKx25O7ObEbk68GC97Tvyy+Nbu168Sr8K8+gpjfOnz7huNl03yQjsSfojWV/973+Dr/J+BR8CI1m7/JbRG++8DPwL+n8Bp+V0B/moZ0z8A/rGv4fX/Myj0+nvAf1y+/plveQx/DrWV+D3UJf5/UJ7/DvD3gR8D/xegLs835fsfl59/52u+Jv95bjugXsoYXsWv3ZzYzYmXOYZX8evbmhNfYRwvdc58xTG+9Hn1Fcb4Ss27r/trp7y+i13sYhe72MUudvE1xcsuBe5iF7vYxS52sYtd/JmJXWK1i13sYhe72MUudvE1xS6x2sUudrGLXexiF7v4mmKXWO1iF7vYxS52sYtdfE2xS6x2sYtd7GIXu9jFLr6m2CVWu9jFLnaxi13sYhdfU+wSq13sYhe72MUudrGLryl2idUudrGLXexiF7vYxdcU/19n8sGfq80eSAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Show several examples\n", + "# Code adapted from https://www.kaggle.com/mclikmb4/xception-transfer-learning-120-breeds-83-acc\n", + "image_path = './data/stanford-dogs/Images/'\n", + "breed_list = sorted(os.listdir(image_path))\n", + "\n", + "plt.figure(figsize=(10, 10))\n", + "for i in range(9):\n", + " plt.subplot(331 + i)\n", + " breed = np.random.choice(breed_list)\n", + " dog = np.random.choice(os.listdir('./data/stanford-dogs/Annotation/' + breed))\n", + " img = Image.open(image_path + breed + '/' + dog + '.jpg') \n", + " tree = xml.etree.ElementTree.parse('./data/stanford-dogs/Annotation/' + breed + '/' + dog)\n", + " root = tree.getroot()\n", + " objects = root.findall('object')\n", + " plt.imshow(img)\n", + " for o in objects:\n", + " bndbox = o.find('bndbox')\n", + " xmin = int(bndbox.find('xmin').text)\n", + " ymin = int(bndbox.find('ymin').text)\n", + " xmax = int(bndbox.find('xmax').text)\n", + " ymax = int(bndbox.find('ymax').text)\n", + " plt.plot([xmin, xmax, xmax, xmin, xmin], [ymin, ymin, ymax, ymax, ymin])\n", + " plt.text(xmin, ymin, o.find('name').text, bbox={'ec': None})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Model Pre-training\n", + "First, we obtain a MobileNetV2 model on this task, which will serve as the base model for compression. Unfortunately, although this step is often called model \"pre-training\" in the model compression teminologies, we are actually finetuning a model pre-trained on ImageNet. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# This script will save the state dict of the pretrained model to \"./pretrained_mobilenet_v2_torchhub/checkpoint_best.pt\"\n", + "\n", + "# %run pretrain.py\n", + "# %run test.py" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Compression via Pruning\n", + "In this section, we first demonstrate how to perform channel pruning with NNI pruners in three steps: \n", + "* defining a config list\n", + "* creating a Pruner instance\n", + "* calling `pruner.compress` and `pruner.export_model` to calculate and export masks\n", + "\n", + "Then, we demonstrate the common practices after pruning:\n", + "* model speedup\n", + "* further finetuning (with or without knowledge distillation)\n", + "* evaluation\n", + "\n", + "Finally, we present a grid search example to find the balance between model performance and the final model size. We include some of our results and discuss our observations. \n", + "\n", + "Note that the code blocks in this section are taken from the file `pruning_experiments.py`. You can directly run the file by specifying several command line arguments and see the end-to-end process. You can also run the file to reproduce our experiments. We will discuss that in the last section. \n", + "\n", + "### Using NNI Pruners" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from nni.algorithms.compression.pytorch.pruning import (\n", + " LevelPruner,\n", + " SlimPruner,\n", + " FPGMPruner,\n", + " TaylorFOWeightFilterPruner,\n", + " L1FilterPruner,\n", + " L2FilterPruner,\n", + " AGPPruner,\n", + " ActivationMeanRankFilterPruner,\n", + " ActivationAPoZRankFilterPruner\n", + ")\n", + "\n", + "pruner_name_to_class = {\n", + " 'level': LevelPruner,\n", + " 'l1': L1FilterPruner,\n", + " 'l2': L2FilterPruner,\n", + " 'slim': SlimPruner,\n", + " 'fpgm': FPGMPruner,\n", + " 'taylor': TaylorFOWeightFilterPruner,\n", + " 'agp': AGPPruner,\n", + " 'activationmeanrank': ActivationMeanRankFilterPruner,\n", + " 'apoz': ActivationAPoZRankFilterPruner\n", + "}\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using cache found in /home/v-diwu4/.cache/torch/hub/pytorch_vision_v0.8.1\n" + ] + } + ], + "source": [ + "# load model from the pretrained checkpoint\n", + "model_type = 'mobilenet_v2_torchhub'\n", + "checkpoint = \"./pretrained_mobilenet_v2_torchhub/checkpoint_best.pt\"\n", + "pretrained = True \n", + "input_size = 224\n", + "n_classes = 120\n", + "\n", + "model = create_model(model_type=model_type, pretrained=pretrained, n_classes=n_classes,\n", + " input_size=input_size, checkpoint=checkpoint).to(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-08-31 07:17:21] INFO (nni.compression.pytorch.compressor/MainThread) Model state_dict saved to ./pruned_model.pth\n", + "[2021-08-31 07:17:21] INFO (nni.compression.pytorch.compressor/MainThread) Mask dict saved to ./mask.pth\n" + ] + } + ], + "source": [ + "# Defining the config list.\n", + "# Note that here we only prune the depthwise convolution and the last pointwise convolution. \n", + "# We will let the model speedup tool propagate the sparsity to the first pointwise convolution layer. \n", + "\n", + "pruner_name = 'l1'\n", + "sparsity = 0.5\n", + "\n", + "if pruner_name != 'slim':\n", + " config_list = [{\n", + " 'op_names': ['features.{}.conv.1.0'.format(x) for x in range(2, 18)],\n", + " 'sparsity': sparsity\n", + " },{\n", + " 'op_names': ['features.{}.conv.2'.format(x) for x in range(2, 18)],\n", + " 'sparsity': sparsity\n", + " }]\n", + "else:\n", + " # For slim pruner, we should specify BatchNorm layers instead of the corresponding Conv2d layers\n", + " config_list = [{\n", + " 'op_names': ['features.{}.conv.1.1'.format(x) for x in range(2, 18)],\n", + " 'sparsity': sparsity\n", + " },{\n", + " 'op_names': ['features.{}.conv.3'.format(x) for x in range(2, 18)],\n", + " 'sparsity': sparsity\n", + " }]\n", + "\n", + "# Different pruners require different additional parameters, so we put them together in the kwargs dict. \n", + "# Please check the docs for detailed information.\n", + "kwargs = {} \n", + "if pruner_name in ['slim', 'taylor', 'activationmeanrank', 'apoz', 'agp']:\n", + " from pruning_experiments import trainer_helper\n", + " train_dataloader = get_dataloader('train', './data/stanford-dogs/Processed/train', batch_size=32)\n", + " def trainer(model, optimizer, criterion, epoch):\n", + " return trainer_helper(model, criterion, optimizer, train_dataloader, device)\n", + " kwargs = {\n", + " 'trainer': trainer,\n", + " 'optimizer': torch.optim.Adam(model.parameters()),\n", + " 'criterion': nn.CrossEntropyLoss()\n", + " }\n", + " if pruner_name == 'agp':\n", + " kwargs['pruning_algorithm'] = 'l1'\n", + " kwargs['num_iterations'] = 10\n", + " kwargs['epochs_per_iteration'] = 1\n", + " if pruner_name == 'slim':\n", + " kwargs['sparsifying_training_epochs'] = 10\n", + "\n", + "# Create pruner, call pruner.compress(), and export the pruned model\n", + "pruner = pruner_name_to_class[pruner_name](model, config_list, **kwargs)\n", + "pruner.compress()\n", + "pruner.export_model('./pruned_model.pth', './mask.pth')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Model Speedup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Note: must unwrap the model before speed up\n", + "pruner._unwrap_model()\n", + "\n", + "dummy_input = torch.rand(1,3,224,224).to(device)\n", + "ms = ModelSpeedup(model, dummy_input, './mask.pth')\n", + "ms.speedup_model()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-------+----------------------+--------+-------------------+----------+---------+\n", + "| Index | Name | Type | Weight Shape | FLOPs | #Params |\n", + "+-------+----------------------+--------+-------------------+----------+---------+\n", + "| 0 | features.0.0 | Conv2d | (32, 3, 3, 3) | 10838016 | 864 |\n", + "| 1 | features.1.conv.0.0 | Conv2d | (32, 1, 3, 3) | 3612672 | 288 |\n", + "| 2 | features.1.conv.1 | Conv2d | (16, 32, 1, 1) | 6422528 | 512 |\n", + "| 3 | features.2.conv.0.0 | Conv2d | (48, 16, 1, 1) | 9633792 | 768 |\n", + "| 4 | features.2.conv.1.0 | Conv2d | (48, 1, 3, 3) | 1354752 | 432 |\n", + "| 5 | features.2.conv.2 | Conv2d | (16, 48, 1, 1) | 2408448 | 768 |\n", + "| 6 | features.3.conv.0.0 | Conv2d | (72, 16, 1, 1) | 3612672 | 1152 |\n", + "| 7 | features.3.conv.1.0 | Conv2d | (72, 1, 3, 3) | 2032128 | 648 |\n", + "| 8 | features.3.conv.2 | Conv2d | (16, 72, 1, 1) | 3612672 | 1152 |\n", + "| 9 | features.4.conv.0.0 | Conv2d | (72, 16, 1, 1) | 3612672 | 1152 |\n", + "| 10 | features.4.conv.1.0 | Conv2d | (72, 1, 3, 3) | 508032 | 648 |\n", + "| 11 | features.4.conv.2 | Conv2d | (25, 72, 1, 1) | 1411200 | 1800 |\n", + "| 12 | features.5.conv.0.0 | Conv2d | (96, 25, 1, 1) | 1881600 | 2400 |\n", + "| 13 | features.5.conv.1.0 | Conv2d | (96, 1, 3, 3) | 677376 | 864 |\n", + "| 14 | features.5.conv.2 | Conv2d | (25, 96, 1, 1) | 1881600 | 2400 |\n", + "| 15 | features.6.conv.0.0 | Conv2d | (96, 25, 1, 1) | 1881600 | 2400 |\n", + "| 16 | features.6.conv.1.0 | Conv2d | (96, 1, 3, 3) | 677376 | 864 |\n", + "| 17 | features.6.conv.2 | Conv2d | (25, 96, 1, 1) | 1881600 | 2400 |\n", + "| 18 | features.7.conv.0.0 | Conv2d | (96, 25, 1, 1) | 1881600 | 2400 |\n", + "| 19 | features.7.conv.1.0 | Conv2d | (96, 1, 3, 3) | 169344 | 864 |\n", + "| 20 | features.7.conv.2 | Conv2d | (59, 96, 1, 1) | 1110144 | 5664 |\n", + "| 21 | features.8.conv.0.0 | Conv2d | (192, 59, 1, 1) | 2220288 | 11328 |\n", + "| 22 | features.8.conv.1.0 | Conv2d | (192, 1, 3, 3) | 338688 | 1728 |\n", + "| 23 | features.8.conv.2 | Conv2d | (59, 192, 1, 1) | 2220288 | 11328 |\n", + "| 24 | features.9.conv.0.0 | Conv2d | (192, 59, 1, 1) | 2220288 | 11328 |\n", + "| 25 | features.9.conv.1.0 | Conv2d | (192, 1, 3, 3) | 338688 | 1728 |\n", + "| 26 | features.9.conv.2 | Conv2d | (59, 192, 1, 1) | 2220288 | 11328 |\n", + "| 27 | features.10.conv.0.0 | Conv2d | (192, 59, 1, 1) | 2220288 | 11328 |\n", + "| 28 | features.10.conv.1.0 | Conv2d | (192, 1, 3, 3) | 338688 | 1728 |\n", + "| 29 | features.10.conv.2 | Conv2d | (59, 192, 1, 1) | 2220288 | 11328 |\n", + "| 30 | features.11.conv.0.0 | Conv2d | (192, 59, 1, 1) | 2220288 | 11328 |\n", + "| 31 | features.11.conv.1.0 | Conv2d | (192, 1, 3, 3) | 338688 | 1728 |\n", + "| 32 | features.11.conv.2 | Conv2d | (87, 192, 1, 1) | 3273984 | 16704 |\n", + "| 33 | features.12.conv.0.0 | Conv2d | (288, 87, 1, 1) | 4910976 | 25056 |\n", + "| 34 | features.12.conv.1.0 | Conv2d | (288, 1, 3, 3) | 508032 | 2592 |\n", + "| 35 | features.12.conv.2 | Conv2d | (87, 288, 1, 1) | 4910976 | 25056 |\n", + "| 36 | features.13.conv.0.0 | Conv2d | (288, 87, 1, 1) | 4910976 | 25056 |\n", + "| 37 | features.13.conv.1.0 | Conv2d | (288, 1, 3, 3) | 508032 | 2592 |\n", + "| 38 | features.13.conv.2 | Conv2d | (87, 288, 1, 1) | 4910976 | 25056 |\n", + "| 39 | features.14.conv.0.0 | Conv2d | (288, 87, 1, 1) | 4910976 | 25056 |\n", + "| 40 | features.14.conv.1.0 | Conv2d | (288, 1, 3, 3) | 127008 | 2592 |\n", + "| 41 | features.14.conv.2 | Conv2d | (134, 288, 1, 1) | 1891008 | 38592 |\n", + "| 42 | features.15.conv.0.0 | Conv2d | (480, 134, 1, 1) | 3151680 | 64320 |\n", + "| 43 | features.15.conv.1.0 | Conv2d | (480, 1, 3, 3) | 211680 | 4320 |\n", + "| 44 | features.15.conv.2 | Conv2d | (134, 480, 1, 1) | 3151680 | 64320 |\n", + "| 45 | features.16.conv.0.0 | Conv2d | (480, 134, 1, 1) | 3151680 | 64320 |\n", + "| 46 | features.16.conv.1.0 | Conv2d | (480, 1, 3, 3) | 211680 | 4320 |\n", + "| 47 | features.16.conv.2 | Conv2d | (134, 480, 1, 1) | 3151680 | 64320 |\n", + "| 48 | features.17.conv.0.0 | Conv2d | (480, 134, 1, 1) | 3151680 | 64320 |\n", + "| 49 | features.17.conv.1.0 | Conv2d | (480, 1, 3, 3) | 211680 | 4320 |\n", + "| 50 | features.17.conv.2 | Conv2d | (160, 480, 1, 1) | 3763200 | 76800 |\n", + "| 51 | features.18.0 | Conv2d | (1280, 160, 1, 1) | 10035200 | 204800 |\n", + "| 52 | classifier.1 | Linear | (120, 1280) | 153600 | 153720 |\n", + "+-------+----------------------+--------+-------------------+----------+---------+\n", + "FLOPs total: 139206976\n", + "#Params total: 1074880\n", + "MobileNetV2(\n", + " (features): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(16, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(48, 48, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=48, bias=False)\n", + " (1): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(48, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (3): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(16, 72, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(72, 72, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=72, bias=False)\n", + " (1): BatchNorm2d(72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(72, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (4): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(16, 72, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(72, 72, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=72, bias=False)\n", + " (1): BatchNorm2d(72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(72, 25, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(25, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (5): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(25, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=96, bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(96, 25, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(25, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (6): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(25, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=96, bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(96, 25, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(25, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (7): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(25, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=96, bias=False)\n", + " (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(96, 59, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(59, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (8): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(59, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(192, 59, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(59, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (9): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(59, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(192, 59, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(59, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (10): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(59, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(192, 59, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(59, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (11): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(59, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)\n", + " (1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(192, 87, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(87, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (12): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(87, 288, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(288, 288, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=288, bias=False)\n", + " (1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(288, 87, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(87, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (13): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(87, 288, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(288, 288, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=288, bias=False)\n", + " (1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(288, 87, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(87, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (14): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(87, 288, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(288, 288, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=288, bias=False)\n", + " (1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(288, 134, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(134, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (15): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(134, 480, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(480, 480, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=480, bias=False)\n", + " (1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(480, 134, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(134, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (16): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(134, 480, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(480, 480, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=480, bias=False)\n", + " (1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(480, 134, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(134, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (17): InvertedResidual(\n", + " (conv): Sequential(\n", + " (0): ConvBNActivation(\n", + " (0): Conv2d(134, 480, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (1): ConvBNActivation(\n", + " (0): Conv2d(480, 480, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=480, bias=False)\n", + " (1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " (2): Conv2d(480, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (3): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (18): ConvBNActivation(\n", + " (0): Conv2d(160, 1280, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(1280, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU6(inplace=True)\n", + " )\n", + " )\n", + " (classifier): Sequential(\n", + " (0): Dropout(p=0.2, inplace=False)\n", + " (1): Linear(in_features=1280, out_features=120, bias=True)\n", + " )\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + ")\n", + "FLOPs: 139206976, params: 1074880\n" + ] + } + ], + "source": [ + "flops, params, results = count_flops_params(model, dummy_input)\n", + "print(model)\n", + "print(f\"FLOPs: {flops}, params: {params}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Fine-tuning after Pruning\n", + "\n", + "Usually, after pruning out some weights from the model, we need further fine-tuning to let the model recover its performance as much as possible. For finetuning, we can either use the same setting during pretraining, or use an additional technique called [**Knowledge Distillation**](https://arxiv.org/pdf/1503.02531.pdf). The key idea is that the model learns on both the original hard labels and the soft labels produced by a teacher model running on the same input. In our setting, **the model before pruning can conveniently serve as the teacher model**. Empirically, we found that using distillation during fine-tuning consistently improves the performance of the pruned model. We will further discuss related experiments in the following section.\n", + "\n", + "Note that knowledge distillation can easily be done with the following lines of code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# sample code: training with knowledge distillation\n", + "\"\"\"\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "def train_with_distillation(student_model, teacher_model, optimizer, train_dataloader, device, alpha=0.99, temperature=8):\n", + " student_model.train()\n", + " for i, (inputs, labels) in enumerate(tqdm(train_dataloader)):\n", + " optimizer.zero_grad()\n", + " inputs, labels = inputs.float().to(device), labels.to(device)\n", + " with torch.no_grad():\n", + " teacher_preds = teacher_model(inputs)\n", + "\n", + " student_preds = student_model(inputs)\n", + " soft_loss = nn.KLDivLoss()(F.log_softmax(student_preds/temperature, dim=1),\n", + " F.softmax(teacher_preds/temperature, dim=1))\n", + " hard_loss = F.cross_entropy(student_preds, labels)\n", + " loss = soft_loss * (alpha * temperature * temperature) + hard_loss * (1. - alpha)\n", + "\n", + " loss.backward()\n", + " optimizer.step()\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finetuning after pruning:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using cache found in /home/v-diwu4/.cache/torch/hub/pytorch_vision_v0.8.1\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Start finetuning with distillation epoch 0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/338 [00:00\n", + "\n", + "\n", + "\n", + "Therefore, in the following experiments, we limit the modules to prune to the `conv 1.0`'s and the `conv 2`'s. Thus the config list is always written in the following way:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "config_list = [{\n", + " 'op_names': ['features.{}.conv.1.0'.format(x) for x in range(2, 18)],\n", + " 'sparsity': sparsity\n", + "},{\n", + " 'op_names': ['features.{}.conv.2'.format(x) for x in range(2, 18)],\n", + " 'sparsity': sparsity\n", + "}]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run some experiments for this step, please run `pruning_experiments.py` and specify the following arguments:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Example shell script: \n", + "\"\"\"\n", + "for sparsity in 0.2 0.4 0.6 0.8; do\n", + " for pruning_mode in 'conv0' 'conv1' 'conv2' 'conv1andconv2' 'all'; do\n", + " python pruning_experiments.py \\\n", + " --experiment_dir pretrained_mobilenet_v2_torchhub/ \\\n", + " --checkpoint_name 'checkpoint_best.pt' \\\n", + " --sparsity $sparsity \\\n", + " --pruning_mode $pruning_mode \\\n", + " --pruner_name l1 \\\n", + " --speed_up \\\n", + " --finetune_epochs 30\n", + " done\n", + "done\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2: trying one-shot pruners\n", + "After determining which modules to prune, we consider the next two questions:\n", + "* **Which global sparsity range should we aim at?**\n", + "* **Is there any one-shot pruning algorithm outperforming others at a large margin?**\n", + "\n", + "The first problem stems from the natural tradeoff between model size and accuracy. As long as we have acceptable performance, we wish the model to be as small as possible. Therefore, in this step, we can run some one-shot pruners with different sparsity settings, and find a range of sparsities that the model seem to maintain acceptable performance. \n", + "\n", + "The following figure summarizes our experiments on three pruners. We perform 30 epoch final finetuning for each experiment. Starting from the original model (with accuracy 0.8), we observe that when the sparsity is below 0.4, the pruned model can easily recover, with the performance approaching the model before pruning. On the other hand, when the sparsity is above 0.7, the model's performance drops too much even after finetuning. Therefore, we limit our search space to sparsity settings between 0.4 and 0.7 in the experiments for the following step 3 and step 4.\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "In addition, we observe that the slim pruner has better performance in the one-shot pruning setting. However, as we will show later, when we consider iterative pruning, the importance of choosing base pruning algorithms seem to be overwhelmed by choosing a correct pruning schedule. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run some experiments for this step, please run `pruning_experiments.py` and specify the following arguments:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Example shell script: \n", + "\"\"\"\n", + "for sparsity in 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9; do\n", + " for pruning_mode in 'conv1', 'conv1andconv2'; do\n", + " python pruning_experiments.py \\\n", + " --experiment_dir pretrained_mobilenet_v2_torchhub/ \\\n", + " --checkpoint_name 'checkpoint_best.pt' \\\n", + " --sparsity $sparsity \\\n", + " --pruning_mode $pruning_mode \\\n", + " --pruner_name l1 \\\n", + " --speed_up \\\n", + " --finetune_epochs 30\n", + " done\n", + "done\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 3: determining iterative pruning strategy\n", + "\n", + "Now that we have found a good set of modules to prune and a good range of sparsity settings to experiment on, we can shift our focus to iterative pruning. Iterative pruning interleaves pruning with finetuning, and is often shown be more performant than one-shot pruning, which prunes the model once to the target sparsity. The following figure establishes that the superiority of iterative pruning under the same other settings.\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, we consider the following two important hyperparameters for iterative pruning:\n", + "* the total number of pruning iterations\n", + "* the number of finetuning epochs between pruning iterations\n", + "\n", + "We experiment we 2, 4, and 8 iterations, with 1 or 3 intermediate finetuning epochs. The results are summarized in the following figure. We clearly observe that increasing the number of pruning iterations significantly improves the final performance, while increasing the number of epochs only helps slightly. Therefore, we recommend that you should spend effort in **determining a correct (often large) number of pruning iterations**, while need not to spend a lot of effort tuning the number of finetuning epochs in between. In our case, we found iteration numbers between 64 and 128 gives the best performance. \n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run some experiments for this step, please run `pruning_experiments.py` and specify the following arguments:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Example shell script: \n", + "\"\"\"\n", + "for sparsity in 0.4 0.5 0.6 0.7; do\n", + " for n_iters in 2 4 8 16; do\n", + " python pruning_experiments.py \\\n", + " --experiment_dir pretrained_mobilenet_v2_torchhub/ \\\n", + " --checkpoint_name 'checkpoint_best.pt' \\\n", + " --sparsity $sparsity \\\n", + " --pruning_mode 'conv1andconv2' \\\n", + " --pruner_name 'agp' \\\n", + " --agp_n_iters $n_iters \\\n", + " --speed_up \\\n", + " --finetune_epochs 30 \\\n", + " done\n", + "done\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 4: determining finetuning strategy\n", + "Finally, after pruning the model, we recommend **using knowledge distillation for finetuning**, which only involves changing several lines of code computing the loss (if we reuse the model before pruning as the teacher model). As shown in the following figure, using knowledge distillation during finetuning can bring about 5 percentage performance improvement in our task. \n", + "
\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run some experiments for this step, please run `pruning_experiments.py` and specify the following arguments:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Example shell script: \n", + "\"\"\"\n", + "for sparsity in 0.4 0.5 0.6 0.7; do\n", + " python pruning_experiments.py \\\n", + " --experiment_dir pretrained_mobilenet_v2_torchhub/ \\\n", + " --checkpoint_name 'checkpoint_best.pt' \\\n", + " --sparsity $sparsity \\\n", + " --pruning_mode 'conv1andconv2' \\\n", + " --pruner_name 'agp' \\\n", + " --speed_up \\\n", + " --finetune_epochs 80\n", + "done\n", + "\n", + "for sparsity in 0.4 0.5 0.6 0.7; do\n", + " python pruning_experiments.py \\\n", + " --experiment_dir pretrained_mobilenet_v2_torchhub/ \\\n", + " --checkpoint_name 'checkpoint_best.pt' \\\n", + " --sparsity $sparsity \\\n", + " --pruning_mode 'conv1andconv2' \\\n", + " --pruner_name 'agp' \\\n", + " --speed_up \\\n", + " --finetune_epochs 80 \\\n", + " -- kd\n", + "done\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Comparison with Baseline Methods\n", + "To confirm that using NNI Pruners indeed results in a model with good performance. We implement and compare with the following baseline methods:\n", + "1. Shrink the number of channel of all layers to half. This is a basic compression method mentioned by the MobileNet authors. We experiment with the following two settings:\n", + " * randomly initialize the weights and train with knowledge distillation\n", + " * use `L1FilterPruner` to prune the ImageNet pretrained model to 0.5 sparsity, and then train with knowledge distillation. \n", + "2. Random pruning to 0.5 sparsity. \n", + "\n", + "In the first baseline, we observe that the randomly initialized model only has 0.45 test accuracy, while the model initialized with ImageNet weights has 0.7197 test accuracy after training. However, as shown in the table at the beginning of the notebook, using NNI pruners we can achieve 0.7703 test accuracy with the same amount of finetuning with knowledge distillation. This established the superiority of our approach. As a side remark, this observation is also consistent with the AGP authors' claim that \"large sparse\" models obtained by pruning often outperform \"small dense\" models with similar amount of parameters trained from scratch. \n", + "\n", + "In the second baseline, we observe that random pruning performs worse than our one-shot baselines, giving 0.7385 validation accuracy and 0.7182 test accuracy for 0.5 sparsity. This establishes that the pruning has its unique values that cannot be replaced by the final knowledge distillation process. \n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this end-to-end example, we have shown the process of using NNI Pruners to compress MobileNetV2 on Stanford Dogs. With iterative pruning and knowledge distillation, we have pruned the MobileNetV2 architecture to 1/3 of its size, with 95% accuracy retained. In the last sections, we also introduce our approach to the problem, and wish that it could be a useful reference if you want to solve a similar problem with NNI Pruners. " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nlp", + "language": "python", + "name": "nlp" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.3" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/final_performance.png b/examples/model_compress/pruning/mobilenetv2_end2end/final_performance.png new file mode 100644 index 0000000000000000000000000000000000000000..8c05f139c8cb279573902b002a74007e05660607 Binary files /dev/null and b/examples/model_compress/pruning/mobilenetv2_end2end/final_performance.png differ diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/mobilenet.png b/examples/model_compress/pruning/mobilenetv2_end2end/mobilenet.png new file mode 100644 index 0000000000000000000000000000000000000000..9238912fdb061860b78de2e2f9aff2af58011ec3 Binary files /dev/null and b/examples/model_compress/pruning/mobilenetv2_end2end/mobilenet.png differ diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/prepare_data.sh b/examples/model_compress/pruning/mobilenetv2_end2end/prepare_data.sh new file mode 100755 index 0000000000000000000000000000000000000000..d05229019c773490bca1e8e90b0858f8664a4a04 --- /dev/null +++ b/examples/model_compress/pruning/mobilenetv2_end2end/prepare_data.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# download and preprocess the Stanford Dogs dataset + +mkdir -p data/stanford-dogs + +# download raw data (images, annotations, and train-test split) +cd data/stanford-dogs + +if [ ! -d './Images' ] ; then + if [ ! -f 'images.tar' ] ; then + wget http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar + fi + tar -xvf images.tar +fi + +if [ ! -d './Annotation' ] ; then + if [ ! -f 'annotation.tar' ] ; then + wget http://vision.stanford.edu/aditya86/ImageNetDogs/annotation.tar + fi + tar -xvf annotation.tar +fi + +if [ ! -f 'lists.tar' ] ; then + wget http://vision.stanford.edu/aditya86/ImageNetDogs/lists.tar +fi +tar -xvf lists.tar + +cd ../.. + +# preprocess: train-valid-test splitting and image cropping +python preprocess.py diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/preprocess.py b/examples/model_compress/pruning/mobilenetv2_end2end/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..88b72dd7ee8a98f5c8da2813deb8a0ce8395ee12 --- /dev/null +++ b/examples/model_compress/pruning/mobilenetv2_end2end/preprocess.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import xml.etree.ElementTree +from PIL import Image +import numpy as np +from sklearn.model_selection import train_test_split +from scipy import io + + +ROOT_DIR = './data/stanford-dogs/' +NUM_CATEGORIES = 120 +OUT_IMAGE_SIZE = (224, 224) +RANDOM_SEED = 42 # for splitting train and validation +TRAIN_RATIO = 0.9 # train / (train + validation) + + +def get_bounding_box(annotation_file): + """ + Parse the annotation file and returns the bounding box information + Parameters + ---------- + annotation_file: path to the annotation XML file + + Returns + ------- + A dict containing bounding box information + """ + ret = {} + xml_root = xml.etree.ElementTree.parse(annotation_file).getroot() + bounding_box = xml_root.findall('object')[0].findall('bndbox')[0] + ret['X_min'] = int(bounding_box.findall('xmin')[0].text) + ret['X_max'] = int(bounding_box.findall('xmax')[0].text) + ret['Y_min'] = int(bounding_box.findall('ymin')[0].text) + ret['Y_max'] = int(bounding_box.findall('ymax')[0].text) + + return ret + + +def main(root_dir): + try: + os.mkdir(root_dir + 'Processed') + os.mkdir(root_dir + 'Processed/train') + os.mkdir(root_dir + 'Processed/valid') + os.mkdir(root_dir + 'Processed/test') + except: + print('Directory already exists. Nothing done.') + exit() + + # load train test splits + train_metadata = io.loadmat(root_dir + 'train_list.mat') + train_valid_file_list = [x[0][0] for x in train_metadata['file_list']] + train_valid_annotation_list = [x[0][0] for x in train_metadata['annotation_list']] + train_valid_labels = [x[0] - 1 for x in train_metadata['labels']] + train_valid_lists = [x for x in zip(train_valid_file_list, train_valid_annotation_list, train_valid_labels)] + train_lists, valid_lists = train_test_split(train_valid_lists, train_size=TRAIN_RATIO, random_state=RANDOM_SEED) + train_file_list, train_annotation_list, train_labels = zip(*train_lists) + valid_file_list, valid_annotation_list, valid_labels = zip(*valid_lists) + + test_metadata = io.loadmat(root_dir + 'test_list.mat') + test_file_list = [x[0][0] for x in test_metadata['file_list']] + test_annotation_list = [x[0][0] for x in test_metadata['annotation_list']] + test_labels = [x[0] - 1 for x in test_metadata['labels']] + + label2idx = {} + for split, file_list, annotation_list, labels in zip(['train', 'valid', 'test'], + [train_file_list, valid_file_list, test_file_list], + [train_annotation_list, valid_annotation_list, test_annotation_list], + [train_labels, valid_labels, test_labels]): + print('Preprocessing {} set: {} cases'.format(split, len(file_list))) + for cur_file, cur_annotation, cur_label in zip(file_list, annotation_list, labels): + label_name = cur_file.split('/')[0].split('-')[-1].lower() + if label_name not in label2idx: + label2idx[label_name] = cur_label + image = Image.open(root_dir + '/Images/' + cur_file) + + # cropping and reshape + annotation_file = root_dir + '/Annotation/' + cur_annotation + bounding_box = get_bounding_box(annotation_file) + image = image.crop([bounding_box['X_min'], bounding_box['Y_min'], + bounding_box['X_max'], bounding_box['Y_max']]) + image = image.convert('RGB') + image = image.resize(OUT_IMAGE_SIZE) + + # Normalize and save the instance + X = np.array(image) + X = (X - np.mean(X, axis=(0, 1))) / np.std(X, axis=(0, 1)) # normalize each channel separately + + # image.save(root_dir + 'Processed/' + split + '/' + image_name) + np.save(root_dir + 'Processed/' + split + '/' + cur_file.split('/')[-1].replace('.jpg', '.npy'), + {'input': X, 'label': cur_label}) + + # save mapping from label name to index to a dict + with open(ROOT_DIR + '/category_dict.tsv', 'w') as dict_f: + final_dict_list = sorted(list(label2idx.items()), key=(lambda x: x[-1])) + for label, index in final_dict_list: + dict_f.write('{}\t{}\n'.format(index, label)) + print(final_dict_list) + + +if __name__ == '__main__': + main(ROOT_DIR) diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/pretrain.py b/examples/model_compress/pruning/mobilenetv2_end2end/pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..ead547c9e705b2dd7fd6b5f1d81827bcb56cc8db --- /dev/null +++ b/examples/model_compress/pruning/mobilenetv2_end2end/pretrain.py @@ -0,0 +1,123 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import argparse +from time import gmtime, strftime +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm +import numpy as np + +from utils import * + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def run_validation(model, valid_dataloader): + model.eval() + + loss_func = nn.CrossEntropyLoss() + acc_list, loss_list = [], [] + with torch.no_grad(): + for i, (inputs, labels) in enumerate(tqdm(valid_dataloader)): + inputs, labels = inputs.float().to(device), labels.to(device) + preds= model(inputs) + pred_idx = preds.max(1).indices + acc = (pred_idx == labels).sum().item() / labels.size(0) + acc_list.append(acc) + loss = loss_func(preds, labels).item() + loss_list.append(loss) + + valid_loss = np.array(loss_list).mean() + valid_acc = np.array(acc_list).mean() + + return valid_loss, valid_acc + + +def run_pretrain(args): + print(args) + torch.set_num_threads(args.n_workers) + + model_type = 'mobilenet_v2_torchhub' + pretrained = True # load imagenet weight + experiment_dir = 'pretrained_{}'.format(model_type) if args.experiment_dir is None else args.experiment_dir + os.mkdir(experiment_dir) + checkpoint = None + input_size = 224 + n_classes = 120 + + log = open(experiment_dir + '/pretrain.log', 'w') + + model = create_model(model_type=model_type, pretrained=pretrained, n_classes=n_classes, + input_size=input_size, checkpoint=checkpoint) + model = model.to(device) + print(model) + # count_flops(model, device=device) + + train_dataset = TrainDataset('./data/stanford-dogs/Processed/train') + train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) + valid_dataset = EvalDataset('./data/stanford-dogs/Processed/valid') + valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False) + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay) + + best_valid_acc = 0.0 + for epoch in range(args.n_epochs): + print('Start training epoch {}'.format(epoch)) + loss_list = [] + + # train + model.train() + for i, (inputs, labels) in enumerate(tqdm(train_dataloader)): + optimizer.zero_grad() + inputs, labels = inputs.float().to(device), labels.to(device) + preds = model(inputs) + loss = criterion(preds, labels) + loss_list.append(loss.item()) + loss.backward() + optimizer.step() + + # validation + valid_loss, valid_acc = run_validation(model, valid_dataloader) + train_loss = np.array(loss_list).mean() + print('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format + (epoch, train_loss, valid_loss, valid_acc)) + log.write('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}\n'.format + (epoch, train_loss, valid_loss, valid_acc)) + + # save + if valid_acc > best_valid_acc: + best_valid_acc = valid_acc + torch.save(model.state_dict(), experiment_dir + '/checkpoint_best.pt') + + log.close() + + +def parse_args(): + parser = argparse.ArgumentParser(description='Example code for pruning MobileNetV2') + + parser.add_argument('--experiment_dir', type=str, default=None, + help='directory containing the pretrained model') + parser.add_argument('--checkpoint_name', type=str, default='checkpoint_best.pt', + help='checkpoint of the pretrained model') + + # finetuning parameters + parser.add_argument('--n_workers', type=int, default=16, + help='number of threads') + parser.add_argument('--n_epochs', type=int, default=180, + help='number of epochs to train the model') + parser.add_argument('--learning_rate', type=float, default=1e-4) + parser.add_argument('--weight_decay', type=float, default=0.0) + parser.add_argument('--batch_size', type=int, default=32, + help='input batch size for training and inference') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + run_pretrain(args) diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/pruning_experiments.py b/examples/model_compress/pruning/mobilenetv2_end2end/pruning_experiments.py new file mode 100644 index 0000000000000000000000000000000000000000..193f1756c628f3eddd6a8c0564bfc586c7423a1d --- /dev/null +++ b/examples/model_compress/pruning/mobilenetv2_end2end/pruning_experiments.py @@ -0,0 +1,375 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import argparse +import copy +from time import gmtime, strftime +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm +import numpy as np + +import nni +from nni.compression.pytorch import ModelSpeedup +from nni.algorithms.compression.pytorch.pruning import ( + LevelPruner, + SlimPruner, + FPGMPruner, + TaylorFOWeightFilterPruner, + L1FilterPruner, + L2FilterPruner, + AGPPruner, + ActivationMeanRankFilterPruner, + ActivationAPoZRankFilterPruner +) + +from utils import * + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +model_type = 'mobilenet_v2_torchhub' +input_size = 224 +n_classes = 120 + +pruner_type_to_class = {'level': LevelPruner, + 'l1': L1FilterPruner, + 'l2': L2FilterPruner, + 'slim': SlimPruner, + 'fpgm': FPGMPruner, + 'taylorfo': TaylorFOWeightFilterPruner, + 'agp': AGPPruner, + 'mean_activation': ActivationMeanRankFilterPruner, + 'apoz': ActivationAPoZRankFilterPruner} + + +def run_eval(model, dataloader, device): + model.eval() + loss_func = nn.CrossEntropyLoss() + acc_list, loss_list = [], [] + with torch.no_grad(): + for i, (inputs, labels) in enumerate(tqdm(dataloader)): + inputs, labels = inputs.float().to(device), labels.to(device) + preds= model(inputs) + pred_idx = preds.max(1).indices + acc = (pred_idx == labels).sum().item() / labels.size(0) + acc_list.append(acc) + loss = loss_func(preds, labels).item() + loss_list.append(loss) + + final_loss = np.array(loss_list).mean() + final_acc = np.array(acc_list).mean() + + return final_loss, final_acc + + +def run_finetune(model, train_dataloader, valid_dataloader, device, + n_epochs=2, learning_rate=1e-4, weight_decay=0.0, log=None): + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) + + best_valid_acc = 0.0 + best_model = None + for epoch in range(n_epochs): + print('Start finetuning epoch {}'.format(epoch)) + loss_list = [] + + # train + model.train() + for i, (inputs, labels) in enumerate(tqdm(train_dataloader)): + optimizer.zero_grad() + inputs, labels = inputs.float().to(device), labels.to(device) + preds = model(inputs) + loss = criterion(preds, labels) + loss_list.append(loss.item()) + loss.backward() + optimizer.step() + + # validation + valid_loss, valid_acc = run_eval(model, valid_dataloader, device) + train_loss = np.array(loss_list).mean() + print('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format + (epoch, train_loss, valid_loss, valid_acc)) + if log is not None: + log.write('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format + (epoch, train_loss, valid_loss, valid_acc)) + + if valid_acc > best_valid_acc: + best_valid_acc = valid_acc + best_model = copy.deepcopy(model).to(device) + + print("Best validation accuracy: {}".format(best_valid_acc)) + if log is not None: + log.write("Best validation accuracy: {}".format(best_valid_acc)) + + model = best_model + return model + + +def run_finetune_distillation(student_model, teacher_model, train_dataloader, valid_dataloader, device, + alpha, temperature, + n_epochs=2, learning_rate=1e-4, weight_decay=0.0, log=None): + optimizer = torch.optim.Adam(student_model.parameters(), lr=learning_rate, weight_decay=weight_decay) + # optimizer = torch.optim.SGD(student_model.parameters(), lr=learning_rate, momentum=0.9) + + best_valid_acc = 0.0 + best_model = None + for epoch in range(n_epochs): + print('Start finetuning with distillation epoch {}'.format(epoch)) + loss_list = [] + + # train + student_model.train() + for i, (inputs, labels) in enumerate(tqdm(train_dataloader)): + optimizer.zero_grad() + inputs, labels = inputs.float().to(device), labels.to(device) + with torch.no_grad(): + teacher_preds = teacher_model(inputs) + + preds = student_model(inputs) + soft_loss = nn.KLDivLoss()(F.log_softmax(preds/temperature, dim=1), + F.softmax(teacher_preds/temperature, dim=1)) + hard_loss = F.cross_entropy(preds, labels) + loss = soft_loss * (alpha * temperature * temperature) + hard_loss * (1. - alpha) + loss_list.append(loss.item()) + loss.backward() + optimizer.step() + + # validation + valid_loss, valid_acc = run_eval(student_model, valid_dataloader, device) + train_loss = np.array(loss_list).mean() + print('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format + (epoch, train_loss, valid_loss, valid_acc)) + if log is not None: + log.write('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format + (epoch, train_loss, valid_loss, valid_acc)) + + if valid_acc > best_valid_acc: + best_valid_acc = valid_acc + best_model = copy.deepcopy(student_model).to(device) + + print("Best validation accuracy: {}".format(best_valid_acc)) + if log is not None: + log.write("Best validation accuracy: {}".format(best_valid_acc)) + + student_model = best_model + return student_model + + +def trainer_helper(model, criterion, optimizer, dataloader, device): + print("Running trainer in tuner") + for epoch in range(1): + model.train() + for i, (inputs, labels) in enumerate(tqdm(dataloader)): + optimizer.zero_grad() + inputs, labels = inputs.float().to(device), labels.to(device) + preds = model(inputs) + loss = criterion(preds, labels) + loss.backward() + optimizer.step() + + +def trainer_helper_with_distillation(model, teacher_model, alpha, temperature, optimizer, dataloader, device): + print("Running trainer in tuner") + for epoch in range(1): + model.train() + for i, (inputs, labels) in enumerate(tqdm(dataloader)): + optimizer.zero_grad() + inputs, labels = inputs.float().to(device), labels.to(device) + + with torch.no_grad(): + teacher_preds = teacher_model(inputs) + preds = model(inputs) + soft_loss = nn.KLDivLoss()(F.log_softmax(preds/temperature, dim=1), + F.softmax(teacher_preds/temperature, dim=1)) + hard_loss = F.cross_entropy(preds, labels) + loss = soft_loss * (alpha * temperature * temperature) + hard_loss * (1. - alpha) + loss.backward() + optimizer.step() + + +def parse_args(): + parser = argparse.ArgumentParser(description='Example code for pruning MobileNetV2') + + parser.add_argument('--experiment_dir', type=str, required=True, + help='directory containing the pretrained model') + parser.add_argument('--checkpoint_name', type=str, default='checkpoint_best.pt', + help='checkpoint of the pretrained model') + + # pruner + parser.add_argument('--pruning_mode', type=str, default='conv1andconv2', + choices=['conv0', 'conv1', 'conv2', 'conv1andconv2', 'all']) + parser.add_argument('--sparsity', type=float, default=0.5, + help='target sparsity') + parser.add_argument('--pruner_name', type=str, default='l1', + choices=['l1', 'l2', 'slim', 'agp', + 'fpgm', 'mean_activation', 'apoz', 'taylorfo'], + help='pruner to use') + # for agp only + parser.add_argument('--agp_pruning_alg', default='l1', + choices=['l1', 'l2', 'slim', 'fpgm', + 'mean_activation', 'apoz', 'taylorfo'], + help='pruner to use for agp') + parser.add_argument('--agp_n_iters', type=int, default=64, + help='number of iterations for agp') + parser.add_argument('--agp_n_epochs_per_iter', type=int, default=1, + help='number of epochs per iteration for agp') + + # speed-up + parser.add_argument('--speed_up', action='store_true', default=False, + help='Whether to speed-up the pruned model') + + # finetuning parameters + parser.add_argument('--n_workers', type=int, default=16, + help='number of threads') + parser.add_argument('--finetune_epochs', type=int, default=180, + help='number of epochs to finetune the model') + parser.add_argument('--learning_rate', type=float, default=1e-4) + parser.add_argument('--weight_decay', type=float, default=0.0) + parser.add_argument('--batch_size', type=int, default=32, + help='input batch size for training and inference') + parser.add_argument('--kd', action='store_true', default=False, + help='Whether to use knowledge distillation') + parser.add_argument('--alpha', type=float, default=0.99, + help='Alpha for knowledge distillation loss') + parser.add_argument('--temp', type=float, default=8, + help='Temperature for knowledge distillation loss') + + args = parser.parse_args() + return args + + +def run_pruning(args): + print(args) + torch.set_num_threads(args.n_workers) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + log = open(args.experiment_dir + '/pruning_{}_{}_sparsity{}_{}.log'.format( + args.pruner_name, args.pruning_mode, args.sparsity, + strftime("%Y%m%d%H%M", gmtime())), 'w') + + train_dataset = TrainDataset('./data/stanford-dogs/Processed/train') + train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) + train_dataset_for_pruner = EvalDataset('./data/stanford-dogs/Processed/train') + train_dataloader_for_pruner = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False) + valid_dataset = EvalDataset('./data/stanford-dogs/Processed/valid') + valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False) + test_dataset = EvalDataset('./data/stanford-dogs/Processed/test') + test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False) + + model = create_model(model_type=model_type, pretrained=False, n_classes=n_classes, + input_size=input_size, checkpoint=args.experiment_dir + '/' + args.checkpoint_name) + model = model.to(device) + + teacher_model = None + if args.kd: + teacher_model = copy.deepcopy(model) + + # evaluation before pruning + # count_flops(model, log, device) + initial_loss, initial_acc = run_eval(model, test_dataloader, device) + print('Before Pruning:\nLoss: {}\nAccuracy: {}'.format(initial_loss, initial_acc)) + log.write('Before Pruning:\nLoss: {}\nAccuracy: {}\n'.format(initial_loss, initial_acc)) + + # set up config list and pruner + config_list = [] + if 'conv0' in args.pruning_mode or args.pruning_mode == 'all': + if args.pruner_name == 'slim' or (args.pruner_name == 'agp' and args.agp_pruning_alg == 'slim'): + config_list.append({ + 'op_names': ['features.{}.conv.0.1'.format(x) for x in range(2, 18)], + 'sparsity': args.sparsity + }) + else: + config_list.append({ + 'op_names': ['features.{}.conv.0.0'.format(x) for x in range(2, 18)], + 'sparsity': args.sparsity + }) + if 'conv1' in args.pruning_mode or args.pruning_mode == 'all': + if args.pruner_name == 'slim' or (args.pruner_name == 'agp' and args.agp_pruning_alg == 'slim'): + config_list.append({ + 'op_names': ['features.{}.conv.1.1'.format(x) for x in range(2, 18)], + 'sparsity': args.sparsity + }) + else: + config_list.append({ + 'op_names': ['features.{}.conv.1.0'.format(x) for x in range(2, 18)], + 'sparsity': args.sparsity + }) + if 'conv2' in args.pruning_mode or args.pruning_mode == 'all': + if args.pruner_name == 'slim' or (args.pruner_name == 'agp' and args.agp_pruning_alg == 'slim'): + config_list.append({ + 'op_names': ['features.{}.conv.3'.format(x) for x in range(2, 18)], + 'sparsity': args.sparsity + }) + else: + config_list.append({ + 'op_names': ['features.{}.conv.2'.format(x) for x in range(2, 18)], + 'sparsity': args.sparsity + }) + print(config_list) + + kwargs = {} + if args.pruner_name in ['slim', 'taylorfo', 'mean_activation', 'apoz', 'agp']: + def trainer(model, optimizer, criterion, epoch): + if not args.kd: + return trainer_helper(model, criterion, optimizer, train_dataloader, device) + else: + return trainer_helper_with_distillation(model, teacher_model, args.alpha, args.temp, optimizer, train_dataloader, device) + kwargs = { + 'trainer': trainer, + 'optimizer': torch.optim.Adam(model.parameters()), + 'criterion': nn.CrossEntropyLoss() + } + if args.pruner_name == 'agp': + kwargs['pruning_algorithm'] = args.agp_pruning_alg + kwargs['num_iterations'] = args.agp_n_iters + kwargs['epochs_per_iteration'] = args.agp_n_epochs_per_iter + if args.pruner_name == 'slim': + kwargs['sparsifying_training_epochs'] = 10 + + # pruning + pruner = pruner_type_to_class[args.pruner_name](model, config_list, **kwargs) + pruner.compress() + pruner.export_model(args.experiment_dir + '/model_temp.pth', args.experiment_dir + './mask_temp.pth') + + # model speedup + pruner._unwrap_model() + if args.speed_up: + dummy_input = torch.rand(1,3,224,224).to(device) + ms = ModelSpeedup(model, dummy_input, args.experiment_dir + './mask_temp.pth') + ms.speedup_model() + print(model) + count_flops(model, log) + + intermediate_loss, intermediate_acc = run_eval(model, test_dataloader, device) + print('Before Finetuning:\nLoss: {}\nAccuracy: {}'.format(intermediate_loss, intermediate_acc)) + log.write('Before Finetuning:\nLoss: {}\nAccuracy: {}\n'.format(intermediate_loss, intermediate_acc)) + + # finetuning + if args.kd: + model = run_finetune_distillation(model, teacher_model, train_dataloader, valid_dataloader, device, + args.alpha, args.temp, n_epochs=args.finetune_epochs, + learning_rate=args.learning_rate, weight_decay=args.weight_decay) + else: + model = run_finetune(model, train_dataloader, valid_dataloader, device, n_epochs=args.finetune_epochs, + learning_rate=args.learning_rate, weight_decay=args.weight_decay) + + # final evaluation + final_loss, final_acc = run_eval(model, test_dataloader, device) + print('After Pruning:\nLoss: {}\nAccuracy: {}'.format(final_loss, final_acc)) + log.write('After Pruning:\nLoss: {}\nAccuracy: {}'.format(final_loss, final_acc)) + + # clean up + filePaths = [args.experiment_dir + '/model_tmp.pth', args.experiment_dir + '/mask_tmp.pth'] + for f in filePaths: + if os.path.exists(f): + os.remove(f) + + log.close() + + +if __name__ == '__main__': + args = parse_args() + run_pruning(args) diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/step1.png b/examples/model_compress/pruning/mobilenetv2_end2end/step1.png new file mode 100644 index 0000000000000000000000000000000000000000..bbf6086fcafe67684170b3ff06100db705773a5f Binary files /dev/null and b/examples/model_compress/pruning/mobilenetv2_end2end/step1.png differ diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/step2.png b/examples/model_compress/pruning/mobilenetv2_end2end/step2.png new file mode 100644 index 0000000000000000000000000000000000000000..e89262e429622d985f3e67d31885c64c73666e60 Binary files /dev/null and b/examples/model_compress/pruning/mobilenetv2_end2end/step2.png differ diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/step3-1.png b/examples/model_compress/pruning/mobilenetv2_end2end/step3-1.png new file mode 100644 index 0000000000000000000000000000000000000000..be7b316b8ca27abc762d407d3d822f0c878bb997 Binary files /dev/null and b/examples/model_compress/pruning/mobilenetv2_end2end/step3-1.png differ diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/step3-2.png b/examples/model_compress/pruning/mobilenetv2_end2end/step3-2.png new file mode 100644 index 0000000000000000000000000000000000000000..b220ad7d1c54cfcb70986412e9ebe5d200c1ee60 Binary files /dev/null and b/examples/model_compress/pruning/mobilenetv2_end2end/step3-2.png differ diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/step4.png b/examples/model_compress/pruning/mobilenetv2_end2end/step4.png new file mode 100644 index 0000000000000000000000000000000000000000..cdfefa507f6625de345f3aa7333c510b3753efba Binary files /dev/null and b/examples/model_compress/pruning/mobilenetv2_end2end/step4.png differ diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/test.py b/examples/model_compress/pruning/mobilenetv2_end2end/test.py new file mode 100644 index 0000000000000000000000000000000000000000..81909370730252d41a2def7e4299fbe20189b1ff --- /dev/null +++ b/examples/model_compress/pruning/mobilenetv2_end2end/test.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm +import numpy as np + +from utils import create_model, EvalDataset, count_flops + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +model_type = 'mobilenet_v2_torchhub' # 'mobilenet_v1' 'mobilenet_v2' 'mobilenet_v2_torchhub' +pretrained = False # load imagenet weight (only for 'mobilenet_v2_torchhub') +checkpoint_dir = './pretrained_{}/'.format(model_type) +checkpoint = checkpoint_dir + '/checkpoint_best.pt' # model checkpoint produced by pretrain.py +input_size = 224 +n_classes = 120 +batch_size = 32 + + +def run_test(): + model = create_model(model_type=model_type, pretrained=pretrained, n_classes=n_classes, + input_size=input_size, checkpoint=checkpoint) + model = model.to(device) + print(model) + # count_flops(model, device=device) + + test_dataset = EvalDataset('./data/stanford-dogs/Processed/test') + test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) + + model.eval() + loss_func = nn.CrossEntropyLoss() + acc_list, loss_list = [], [] + with torch.no_grad(): + for i, (inputs, labels) in enumerate(tqdm(test_dataloader)): + inputs, labels = inputs.float().to(device), labels.to(device) + preds= model(inputs) + pred_idx = preds.max(1).indices + acc = (pred_idx == labels).sum().item() / labels.size(0) + acc_list.append(acc) + loss = loss_func(preds, labels).item() + loss_list.append(loss) + + final_loss = np.array(loss_list).mean() + final_acc = np.array(acc_list).mean() + print('Test loss: {}\nTest accuracy: {}'.format(final_loss, final_acc)) + + +if __name__ == '__main__': + run_test() + diff --git a/examples/model_compress/pruning/mobilenetv2_end2end/utils.py b/examples/model_compress/pruning/mobilenetv2_end2end/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..09ca2765db07877ceeb6ec06e0c18e5d199e7ca3 --- /dev/null +++ b/examples/model_compress/pruning/mobilenetv2_end2end/utils.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import torch +from torch.utils.data import Dataset, DataLoader +import torchvision.transforms as transforms +import numpy as np +from nni.compression.pytorch.utils.counter import count_flops_params + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from mobilenet import MobileNet +from mobilenet_v2 import MobileNetV2 + + +def create_model(model_type=None, n_classes=120, input_size=224, checkpoint=None, pretrained=False, width_mult=1.): + if model_type == 'mobilenet_v1': + model = MobileNet(n_class=n_classes, profile='normal') + elif model_type == 'mobilenet_v2': + model = MobileNetV2(n_class=n_classes, input_size=input_size, width_mult=width_mult) + elif model_type == 'mobilenet_v2_torchhub': + model = torch.hub.load('pytorch/vision:v0.8.1', 'mobilenet_v2', pretrained=pretrained) + # model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=pretrained) + feature_size = model.classifier[1].weight.data.size()[1] + replace_classifier = torch.nn.Linear(feature_size, n_classes) + model.classifier[1] = replace_classifier + elif model_type is None: + model = None + else: + raise RuntimeError('Unknown model_type.') + + if checkpoint is not None: + model.load_state_dict(torch.load(checkpoint)) + + return model + + +def get_dataloader(dataset_type, data_path, batch_size=32, shuffle=True): + assert dataset_type in ['train', 'eval'] + if dataset_type == 'train': + ds = TrainDataset(data_path) + else: + ds = EvalDataset(data_path) + return DataLoader(ds, batch_size, shuffle=shuffle) + + +class TrainDataset(Dataset): + def __init__(self, npy_dir): + self.root_dir = npy_dir + self.case_names = [self.root_dir + '/' + x for x in os.listdir(self.root_dir)] + + transform_set = [transforms.Lambda(lambda x: x), + transforms.RandomRotation(30), + transforms.ColorJitter(), + transforms.RandomHorizontalFlip(p=1)] + self.transform = transforms.RandomChoice(transform_set) + + def __len__(self): + return len(self.case_names) + + def __getitem__(self, index): + instance = np.load(self.case_names[index], allow_pickle=True).item() + x = instance['input'].transpose(2, 0, 1) # (C, H, W) + x = torch.from_numpy(x).type(torch.float) # convert to Tensor to use torchvision.transforms + x = self.transform(x) + return x, instance['label'] + + +class EvalDataset(Dataset): + def __init__(self, npy_dir): + self.root_dir = npy_dir + self.case_names = [self.root_dir + '/' + x for x in os.listdir(self.root_dir)] + + def __len__(self): + return len(self.case_names) + + def __getitem__(self, index): + instance = np.load(self.case_names[index], allow_pickle=True).item() + x = instance['input'].transpose(2, 0, 1) + x = torch.from_numpy(x).type(torch.float) + return x, instance['label'] + + +def count_flops(model, log=None, device=None): + dummy_input = torch.rand([1, 3, 256, 256]) + if device is not None: + dummy_input = dummy_input.to(device) + flops, params, results = count_flops_params(model, dummy_input) + print(f"FLOPs: {flops}, params: {params}") + if log is not None: + log.write(f"FLOPs: {flops}, params: {params}\n") + return flops, params diff --git a/examples/model_compress/pruning/naive_prune_tf.py b/examples/model_compress/pruning/naive_prune_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..f224342d7ece15389e5dcc3524d317b14cd2a08a --- /dev/null +++ b/examples/model_compress/pruning/naive_prune_tf.py @@ -0,0 +1,168 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for quick start of pruning. +In this example, we use level pruner to prune the LeNet on MNIST. +''' + +import argparse + +import tensorflow as tf +from tensorflow.keras import Model +from tensorflow.keras.layers import (Conv2D, Dense, Dropout, Flatten, MaxPool2D, BatchNormalization) + +from nni.algorithms.compression.tensorflow.pruning import LevelPruner, SlimPruner + +class LeNet(Model): + """ + LeNet-5 Model with customizable hyper-parameters + """ + def __init__(self, conv_size=3, hidden_size=32, dropout_rate=0.5): + """ + Initialize hyper-parameters. + + Parameters + ---------- + conv_size : int + Kernel size of convolutional layers. + hidden_size : int + Dimensionality of last hidden layer. + dropout_rate : float + Dropout rate between two fully connected (dense) layers, to prevent co-adaptation. + """ + super().__init__() + self.conv1 = Conv2D(filters=32, kernel_size=conv_size, activation='relu') + self.pool1 = MaxPool2D(pool_size=2) + self.bn1 = BatchNormalization() + self.conv2 = Conv2D(filters=64, kernel_size=conv_size, activation='relu') + self.pool2 = MaxPool2D(pool_size=2) + self.bn2 = BatchNormalization() + self.flatten = Flatten() + self.fc1 = Dense(units=hidden_size, activation='relu') + self.dropout = Dropout(rate=dropout_rate) + self.fc2 = Dense(units=10, activation='softmax') + + def call(self, x): + """Override ``Model.call`` to build LeNet-5 model.""" + x = self.conv1(x) + x = self.pool1(x) + x = self.bn1(x) + x = self.conv2(x) + x = self.pool2(x) + x = self.bn2(x) + x = self.flatten(x) + x = self.fc1(x) + x = self.dropout(x) + return self.fc2(x) + + +def get_dataset(dataset_name='mnist'): + assert dataset_name == 'mnist' + + (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() + x_train = x_train[..., tf.newaxis] / 255.0 + x_test = x_test[..., tf.newaxis] / 255.0 + return (x_train, y_train), (x_test, y_test) + + +# def create_model(model_name='naive'): +# assert model_name == 'naive' +# return tf.keras.Sequential([ +# tf.keras.layers.Conv2D(filters=20, kernel_size=5), +# tf.keras.layers.BatchNormalization(), +# tf.keras.layers.ReLU(), +# tf.keras.layers.MaxPool2D(pool_size=2), +# tf.keras.layers.Conv2D(filters=20, kernel_size=5), +# tf.keras.layers.BatchNormalization(), +# tf.keras.layers.ReLU(), +# tf.keras.layers.MaxPool2D(pool_size=2), +# tf.keras.layers.Flatten(), +# tf.keras.layers.Dense(units=500), +# tf.keras.layers.ReLU(), +# tf.keras.layers.Dense(units=10), +# tf.keras.layers.Softmax() +# ]) + +def main(args): + train_set, test_set = get_dataset('mnist') + model = LeNet() + + print('start training') + + optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9, decay=1e-4) + + if args.pruner_name == 'slim': + def slim_loss(y_true, y_pred): + loss_1 = tf.keras.losses.sparse_categorical_crossentropy(y_true=y_true, y_pred=y_pred) + weight_list = [] + for layer in [model.bn1, model.bn2]: + weight_list.append([w for w in layer.weights if '/gamma:' in w.name][0].read_value()) + loss_2 = 0.0001 * tf.reduce_sum([tf.reduce_sum(tf.abs(w)) for w in weight_list]) + return loss_1 + loss_2 + model.compile( + optimizer=optimizer, + loss=slim_loss, + metrics=['accuracy'] + ) + else: + model.compile( + optimizer=optimizer, + loss='sparse_categorical_crossentropy', + metrics=['accuracy'] + ) + + model.fit( + train_set[0], + train_set[1], + batch_size=args.batch_size, + epochs=args.pretrain_epochs, + validation_data=test_set + ) + + print('start pruning') + optimizer_finetune = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, decay=1e-4) + + # create_pruner + if args.pruner_name == 'level': + prune_config = [{ + 'sparsity': args.sparsity, + 'op_types': ['default'], + }] + pruner = LevelPruner(model, prune_config) + elif args.pruner_name == 'slim': + prune_config = [{ + 'sparsity': args.sparsity, + 'op_types': ['BatchNormalization'], + }] + pruner = SlimPruner(model, prune_config) + + model = pruner.compress() + + model.compile( + optimizer=optimizer_finetune, + loss='sparse_categorical_crossentropy', + metrics=['accuracy'], + run_eagerly=True # NOTE: Important, model compression does not work in graph mode! + ) + + # fine-tuning + model.fit( + train_set[0], + train_set[1], + batch_size=args.batch_size, + epochs=args.prune_epochs, + validation_data=test_set + ) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--pruner_name', type=str, default='level', choices=['level', 'slim']) + parser.add_argument('--batch-size', type=int, default=256) + parser.add_argument('--pretrain_epochs', type=int, default=10) + parser.add_argument('--prune_epochs', type=int, default=10) + parser.add_argument('--sparsity', type=float, default=0.5) + + args = parser.parse_args() + main(args) diff --git a/examples/model_compress/pruning/naive_prune_torch.py b/examples/model_compress/pruning/naive_prune_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..88ff3df6d948d94ef2d083524c24318597d9a402 --- /dev/null +++ b/examples/model_compress/pruning/naive_prune_torch.py @@ -0,0 +1,153 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for quick start of pruning. +In this example, we use level pruner to prune the LeNet on MNIST. +''' + +import logging + +import argparse +import torch +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import StepLR + +from nni.algorithms.compression.pytorch.pruning import LevelPruner + +import sys +sys.path.append('../models') +from mnist.lenet import LeNet + +_logger = logging.getLogger('mnist_example') +_logger.setLevel(logging.INFO) + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + if args.dry_run: + break + + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + acc = 100 * correct / len(test_loader.dataset) + + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), acc)) + + return acc + +def main(args): + torch.manual_seed(args.seed) + use_cuda = not args.no_cuda and torch.cuda.is_available() + + device = torch.device("cuda" if use_cuda else "cpu") + + train_kwargs = {'batch_size': args.batch_size} + test_kwargs = {'batch_size': args.test_batch_size} + if use_cuda: + cuda_kwargs = {'num_workers': 1, + 'pin_memory': True, + 'shuffle': True} + train_kwargs.update(cuda_kwargs) + test_kwargs.update(cuda_kwargs) + + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ]) + + dataset1 = datasets.MNIST('./data', train=True, download=True, + transform=transform) + dataset2 = datasets.MNIST('./data', train=False, + transform=transform) + train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs) + test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs) + + model = LeNet().to(device) + optimizer = optim.Adadelta(model.parameters(), lr=args.lr) + + print('start pre-training') + scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) + for epoch in range(1, args.epochs + 1): + train(args, model, device, train_loader, optimizer, epoch) + test(model, device, test_loader) + scheduler.step() + + torch.save(model.state_dict(), "pretrain_mnist_lenet.pt") + + print('start pruning') + optimizer_finetune = torch.optim.SGD(model.parameters(), lr=0.01) + + # create pruner + prune_config = [{ + 'sparsity': args.sparsity, + 'op_types': ['default'], + }] + + pruner = LevelPruner(model, prune_config) + model = pruner.compress() + + # fine-tuning + best_top1 = 0 + for epoch in range(1, args.epochs + 1): + pruner.update_epoch(epoch) + train(args, model, device, train_loader, optimizer_finetune, epoch) + top1 = test(model, device, test_loader) + + if top1 > best_top1: + best_top1 = top1 + # Export the best model, 'model_path' stores state_dict of the pruned model, + # mask_path stores mask_dict of the pruned model + pruner.export_model(model_path='pruend_mnist_lenet.pt', mask_path='mask_mnist_lenet.pt') + +if __name__ == '__main__': + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example for model comporession') + parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--lr', type=float, default=1.0, metavar='LR', + help='learning rate (default: 1.0)') + parser.add_argument('--gamma', type=float, default=0.7, metavar='M', + help='Learning rate step gamma (default: 0.7)') + parser.add_argument('--no-cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--dry-run', action='store_true', default=False, + help='quickly check a single pass') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') + parser.add_argument('--sparsity', type=float, default=0.5, + help='target overall target sparsity') + args = parser.parse_args() + + main(args) diff --git a/examples/model_compress/pruning/speedup/model_speedup.py b/examples/model_compress/pruning/speedup/model_speedup.py new file mode 100644 index 0000000000000000000000000000000000000000..61cdd6ad1a9a2bbb3d532f0bca0d41310258b489 --- /dev/null +++ b/examples/model_compress/pruning/speedup/model_speedup.py @@ -0,0 +1,98 @@ +import os +import sys +import argparse +import time +import torch + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG +from mnist.lenet import LeNet + +from nni.compression.pytorch import apply_compression_results, ModelSpeedup + +torch.manual_seed(0) +use_mask = True +use_speedup = True +compare_results = True + +config = { + 'apoz': { + 'model_name': 'vgg16', + 'input_shape': [64, 3, 32, 32], + 'masks_file': './experiment_data/mask_vgg16_cifar10_apoz.pth' + }, + 'l1filter': { + 'model_name': 'vgg16', + 'input_shape': [64, 3, 32, 32], + 'masks_file': './experiment_data/mask_vgg16_cifar10_l1filter.pth' + }, + 'fpgm': { + 'model_name': 'vgg16', + 'input_shape': [64, 3, 32, 32], + 'masks_file': './experiment_data/mask_vgg16_cifar10_fpgm.pth' + }, + 'slim': { + 'model_name': 'vgg19', + 'input_shape': [64, 3, 32, 32], + 'masks_file': './experiment_data/mask_vgg19_cifar10_slim.pth' + } +} + +def model_inference(config): + masks_file = config['masks_file'] + device = torch.device( + 'cuda') if torch.cuda.is_available() else torch.device('cpu') + + # device = torch.device(config['device']) + if config['model_name'] == 'vgg16': + model = VGG(depth=16) + elif config['model_name'] == 'vgg19': + model = VGG(depth=19) + elif config['model_name'] == 'lenet': + model = LeNet() + + model.to(device) + model.eval() + + dummy_input = torch.randn(config['input_shape']).to(device) + use_mask_out = use_speedup_out = None + # must run use_mask before use_speedup because use_speedup modify the model + if use_mask: + apply_compression_results(model, masks_file, device) + start = time.time() + for _ in range(32): + use_mask_out = model(dummy_input) + print('elapsed time when use mask: ', time.time() - start) + if use_speedup: + m_speedup = ModelSpeedup(model, dummy_input, masks_file, device) + m_speedup.speedup_model() + start = time.time() + for _ in range(32): + use_speedup_out = model(dummy_input) + print('elapsed time when use speedup: ', time.time() - start) + if compare_results: + if torch.allclose(use_mask_out, use_speedup_out, atol=1e-07): + print('the outputs from use_mask and use_speedup are the same') + else: + raise RuntimeError('the outputs from use_mask and use_speedup are different') + +if __name__ == '__main__': + parser = argparse.ArgumentParser("speedup") + parser.add_argument("--example_name", type=str, default="slim", help="the name of pruning example") + parser.add_argument("--masks_file", type=str, default=None, help="the path of the masks file") + args = parser.parse_args() + + if args.example_name != 'all': + if args.masks_file is not None: + config[args.example_name]['masks_file'] = args.masks_file + if not os.path.exists(config[args.example_name]['masks_file']): + msg = '{} does not exist! You should specify masks_file correctly, ' \ + 'or use default one which is generated by model_prune_torch.py' + raise RuntimeError(msg.format(config[args.example_name]['masks_file'])) + model_inference(config[args.example_name]) + else: + model_inference(config['fpgm']) + model_inference(config['slim']) + model_inference(config['l1filter']) + model_inference(config['apoz']) diff --git a/examples/model_compress/pruning/speedup/speedup_mobilnetv2.py b/examples/model_compress/pruning/speedup/speedup_mobilnetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..db819298e9a23fe4d64ae132080c560bd7761cef --- /dev/null +++ b/examples/model_compress/pruning/speedup/speedup_mobilnetv2.py @@ -0,0 +1,21 @@ +import torch +from torchvision.models import mobilenet_v2 +from nni.compression.pytorch import ModelSpeedup +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner + + +model = mobilenet_v2(pretrained=True) +dummy_input = torch.rand(8, 3, 416, 416) + +cfg_list = [{'op_types':['Conv2d'], 'sparsity':0.5}] +pruner = L1FilterPruner(model, cfg_list) +pruner.compress() +pruner.export_model('./model', './mask') +# need call _unwrap_model if you want run the speedup on the same model +pruner._unwrap_model() + +# Speedup the nanodet +ms = ModelSpeedup(model, dummy_input, './mask') +ms.speedup_model() + +model(dummy_input) \ No newline at end of file diff --git a/examples/model_compress/pruning/speedup/speedup_nanodet.py b/examples/model_compress/pruning/speedup/speedup_nanodet.py new file mode 100644 index 0000000000000000000000000000000000000000..70fd208605e14f7b4a8663a4e32a9f6f6356280c --- /dev/null +++ b/examples/model_compress/pruning/speedup/speedup_nanodet.py @@ -0,0 +1,39 @@ +import torch +from nanodet.model.arch import build_model +from nanodet.util import cfg, load_config + +from nni.compression.pytorch import ModelSpeedup +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner + +""" +NanoDet model can be installed from https://github.com/RangiLyu/nanodet.git +""" + +cfg_path = r"nanodet/config/nanodet-RepVGG-A0_416.yml" +load_config(cfg, cfg_path) + +model = build_model(cfg.model).cpu() +dummy_input = torch.rand(8, 3, 416, 416) + +op_names = [] +# these three conv layers are followed by reshape-like functions +# that cannot be replaced, so we skip these three conv layers, +# you can also get such layers by `not_safe_to_prune` function +excludes = ['head.gfl_cls.0', 'head.gfl_cls.1', 'head.gfl_cls.2'] +for name, module in model.named_modules(): + if isinstance(module, torch.nn.Conv2d): + if name not in excludes: + op_names.append(name) + +cfg_list = [{'op_types':['Conv2d'], 'sparsity':0.5, 'op_names':op_names}] +pruner = L1FilterPruner(model, cfg_list) +pruner.compress() +pruner.export_model('./model', './mask') +# need call _unwrap_model if you want run the speedup on the same model +pruner._unwrap_model() + +# Speedup the nanodet +ms = ModelSpeedup(model, dummy_input, './mask') +ms.speedup_model() + +model(dummy_input) \ No newline at end of file diff --git a/examples/model_compress/pruning/speedup/speedup_yolov3.py b/examples/model_compress/pruning/speedup/speedup_yolov3.py new file mode 100644 index 0000000000000000000000000000000000000000..e9c7f00e4f5db443f720b8f5f7fcab2c7ce9370b --- /dev/null +++ b/examples/model_compress/pruning/speedup/speedup_yolov3.py @@ -0,0 +1,36 @@ +import torch +from pytorchyolo import models + +from nni.compression.pytorch import ModelSpeedup +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner, LevelPruner +from nni.compression.pytorch.utils import not_safe_to_prune + +# The Yolo can be downloaded at https://github.com/eriklindernoren/PyTorch-YOLOv3.git +prefix = '/home/user/PyTorch-YOLOv3' # replace this path with yours +# Load the YOLO model +model = models.load_model( + "%s/config/yolov3.cfg" % prefix, + "%s/yolov3.weights" % prefix).cpu() +model.eval() +dummy_input = torch.rand(8, 3, 320, 320) +model(dummy_input) +# Generate the config list for pruner +# Filter the layers that may not be able to prune +not_safe = not_safe_to_prune(model, dummy_input) +cfg_list = [] +for name, module in model.named_modules(): + if name in not_safe: + continue + if isinstance(module, torch.nn.Conv2d): + cfg_list.append({'op_types':['Conv2d'], 'sparsity':0.6, 'op_names':[name]}) +# Prune the model +pruner = L1FilterPruner(model, cfg_list) +pruner.compress() +pruner.export_model('./model', './mask') +pruner._unwrap_model() +# Speedup the model +ms = ModelSpeedup(model, dummy_input, './mask') + +ms.speedup_model() +model(dummy_input) + diff --git a/examples/model_compress/pruning/transformers/run.sh b/examples/model_compress/pruning/transformers/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..1032bda7690ef87354b31a2bc660129344209a6c --- /dev/null +++ b/examples/model_compress/pruning/transformers/run.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Usage: ./run.sh gpu_id glue_task + +export HIP_VISIBLE_DEVICES=$1 +TASK_NAME=$2 # "cola", "sst2", "mrpc", "stsb", "qqp", "mnli", "qnli", "rte", "wnli" +PRETRAINED_MODEL="bert-base-uncased" # "distilbert-base-uncased", "roberta-base", "bert-base-cased", ... + +# parameters for pruning +SPARSITY=0.5 +RANKING_CRITERION=l1_weight # "l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo" +NUM_ITERATIONS=1 # 1 for one-shot pruning +EPOCHS_PER_ITERATION=1 + +# other training parameters, no need to change +MAX_LENGTH=128 +BATCH_SIZE=32 +LR=2e-5 +N_EPOCHS=3 + +time=$(date "+%Y%m%d%H%M%S") +OUTDIR="models_${PRETRAINED_MODEL}_${TASK_NAME}_$time/" + +TASK_LIST=("cola" "sst2" "mrpc" "stsb" "qqp" "mnli" "qnli" "rte" "wnli") +if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then + mkdir $OUTDIR + python transformer_pruning.py \ + --sparsity $SPARSITY \ + --ranking_criterion $RANKING_CRITERION \ + --num_iterations $NUM_ITERATIONS \ + --epochs_per_iteration $EPOCHS_PER_ITERATION \ + --speed_up \ + --model_name $PRETRAINED_MODEL \ + --task_name $TASK_NAME \ + --max_length $MAX_LENGTH \ + --batch_size $BATCH_SIZE \ + --learning_rate $LR \ + --num_train_epochs $N_EPOCHS \ + --output_dir $OUTDIR \ + 2>&1 | tee "$OUTDIR/output.log" +else + echo "Unsupported task $TASK_NAME." +fi diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py new file mode 100644 index 0000000000000000000000000000000000000000..e71906a9edaebdffb4e47100633a867cf8cc6d9a --- /dev/null +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -0,0 +1,387 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +import logging +import os + +import torch +from torch.utils.data.dataloader import DataLoader +from tqdm.auto import tqdm + +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + +import datasets +from datasets import load_dataset, load_metric +import transformers +from transformers import ( + AdamW, + AutoConfig, + AutoModelForSequenceClassification, + AutoTokenizer, + DataCollatorWithPadding, + get_scheduler, +) + + +logger = logging.getLogger("bert_pruning_example") + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Example: prune a Huggingface transformer and finetune on GLUE tasks.") + + parser.add_argument("--model_name", type=str, required=True, + help="Pretrained model architecture.") + parser.add_argument("--task_name", type=str, default=None, + help="The name of the GLUE task.", + choices=["cola", "mnli", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]) + parser.add_argument("--output_dir", type=str, default=None, + help="Where to store the model and mask.") + parser.add_argument("--sparsity", type=float, required=True, + help="Sparsity: proportion of heads to prune (should be between 0 and 1)") + parser.add_argument("--global_sort", action="store_true", default=False, + help="Rank the heads globally and prune the heads with lowest scores. If set to False, the " + "heads are only ranked within one layer") + parser.add_argument("--ranking_criterion", type=str, default="l1_weight", + choices=["l1_weight", "l2_weight", + "l1_activation", "l2_activation", "taylorfo"], + help="Criterion by which the attention heads are ranked.") + parser.add_argument("--num_iterations", type=int, default=1, + help="Number of pruning iterations (1 for one-shot pruning).") + parser.add_argument("--epochs_per_iteration", type=int, default=1, + help="Epochs to finetune before the next pruning iteration " + "(only effective if num_iterations > 1).") + parser.add_argument("--speed_up", action="store_true", default=False, + help="Whether to speed-up the pruned model") + + # parameters for model training; no need to change them for running examples + parser.add_argument("--max_length", type=int, default=128, + help=("The maximum total input sequence length after tokenization. Sequences longer than this " + "will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.")) + parser.add_argument("--batch_size", type=int, default=8, + help="Batch size.") + parser.add_argument("--learning_rate", type=float, default=5e-5, + help="Initial learning rate.") + parser.add_argument("--num_train_epochs", type=int, default=3, + help="Total number of training epochs to perform.") + parser.add_argument("--lr_scheduler_type", default="linear", + choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", + "constant_with_warmup"]) + parser.add_argument("--num_warmup_steps", type=int, default=0, + help="Number of steps for the warmup in the lr scheduler.") + + args = parser.parse_args() + + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + return args + + +def get_raw_dataset(task_name): + """ + Get a GLUE dataset using huggingface datasets. + """ + raw_dataset = load_dataset("glue", task_name) + is_regression = task_name == "stsb" + num_labels = 1 if is_regression else len( + raw_dataset["train"].features["label"].names) + + return raw_dataset, is_regression, num_labels + + +def preprocess(args, tokenizer, raw_dataset): + """ + Tokenization and column renaming. + """ + assert args.task_name is not None + + task_to_keys = { + "cola": ("sentence", None), + "mnli": ("premise", "hypothesis"), + "mrpc": ("sentence1", "sentence2"), + "qnli": ("question", "sentence"), + "qqp": ("question1", "question2"), + "rte": ("sentence1", "sentence2"), + "sst2": ("sentence", None), + "stsb": ("sentence1", "sentence2"), + "wnli": ("sentence1", "sentence2"), + } + sentence1_key, sentence2_key = task_to_keys[args.task_name] + + def tokenize(data): + texts = ( + (data[sentence1_key],) if sentence2_key is None else ( + data[sentence1_key], data[sentence2_key]) + ) + result = tokenizer(*texts, padding=False, + max_length=args.max_length, truncation=True) + + if "label" in data: + result["labels"] = data["label"] + return result + + processed_datasets = raw_dataset.map( + tokenize, batched=True, remove_columns=raw_dataset["train"].column_names) + return processed_datasets + + +def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dataset): + data_collator = DataCollatorWithPadding(tokenizer) + train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, + batch_size=args.batch_size) + eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, + batch_size=args.batch_size) + + optimizer = AdamW(model.parameters(), lr=args.learning_rate) + + return optimizer, train_dataloader, eval_dataloader, data_collator + + +def train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device): + """ + Train the model using train_dataloader and evaluate after every epoch using eval_dataloader. + This function is called before and after pruning for "pretraining" on the GLUE task and further "finetuning". + """ + train_steps = args.num_train_epochs * len(train_dataloader) + progress_bar = tqdm(range(train_steps), position=0, leave=True) + + for epoch in range(args.num_train_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) + outputs = model(**batch) + outputs.loss.backward() + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + progress_bar.update(1) + + model.eval() + for step, batch in enumerate(eval_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression \ + else outputs.logits.squeeze() + metric.add_batch(predictions=predictions, references=batch["labels"]) + + eval_metric = metric.compute() + logger.info(f"epoch {epoch}: {eval_metric}") + + +def trainer_helper(model, train_dataloader, optimizer, device): + """ + This function is used for to create a "trainer" that is passed to the pruner. + Finetune the model for 1 epoch. This function is called by the pruner during pruning iterations (or called to + calculate scores for pruning when ranking criterion is "taylorfo"). + """ + logger.info("Training for 1 epoch...") + progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) + + train_epoch = 1 + for epoch in range(train_epoch): + for step, batch in enumerate(train_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) + outputs = model(**batch) + outputs.loss.backward() + optimizer.step() + optimizer.zero_grad() + progress_bar.update(1) + + +def forward_runner_helper(model, train_dataloader, device): + """ + This function is used for to create a "forward_runner" that is passed to the pruner. + The function just runs forward on the train set without updating the parameters. + This allows the pruner to collect data for activation-based pruning methods. + """ + logger.info("Running forward on the entire train set without updating parameters...") + progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) + + forward_epoch = 1 + for epoch in range(forward_epoch): + for step, batch in enumerate(train_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) + _ = model(**batch) + # note: no loss.backward or optimizer.step() is performed here + progress_bar.update(1) + + +def final_eval_for_mnli(args, model, processed_datasets, metric, data_collator): + """ + If the task is MNLI, perform a final evaluation on mismatched validation set + """ + eval_dataset = processed_datasets["validation_mismatched"] + eval_dataloader = DataLoader( + eval_dataset, collate_fn=data_collator, batch_size=args.batch_size + ) + + model.eval() + for step, batch in enumerate(eval_dataloader): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + metric.add_batch( + predictions=predictions, + references=batch["labels"], + ) + + eval_metric = metric.compute() + logger.info(f"mnli-mm: {eval_metric}") + + +def main(): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + args = parse_args() + + ######################################################################### + # Prepare model, tokenizer, dataset, optimizer, and the scheduler + logger.setLevel(logging.INFO) + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + + # Load dataset and tokenizer, and then preprocess the dataset + raw_dataset, is_regression, num_labels = get_raw_dataset(args.task_name) + tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_fast=True) + processed_datasets = preprocess(args, tokenizer, raw_dataset) + train_dataset = processed_datasets["train"] + eval_dataset = processed_datasets["validation_matched" if args.task_name == + "mnli" else "validation"] + + # Load pretrained model + config = AutoConfig.from_pretrained( + args.model_name, num_labels=num_labels, finetuning_task=args.task_name) + model = AutoModelForSequenceClassification.from_pretrained( + args.model_name, config=config) + model.to(device) + + ######################################################################### + # Finetune on the target GLUE task before pruning + optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, + model, + train_dataset, + eval_dataset) + train_steps = args.num_train_epochs * len(train_dataloader) + lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, + num_training_steps=train_steps) + metric = load_metric("glue", args.task_name) + + logger.info("================= Finetuning before pruning =================") + train_model(args, model, is_regression, train_dataloader, + eval_dataloader, optimizer, lr_scheduler, metric, device) + + if args.output_dir is not None: + torch.save(model.state_dict(), args.output_dir + "/model_before_pruning.pt") + + if args.task_name == "mnli": + final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) + + ######################################################################### + # Pruning + optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, + model, + train_dataset, + eval_dataset) + dummy_input = next(iter(train_dataloader))["input_ids"].to(device) + flops, params, results = count_flops_params(model, dummy_input) + print(f"Initial model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M") + + # Here criterion is embedded in the model. Upper levels can just pass None to trainer. + def trainer(model, optimizer, criterion, epoch): + return trainer_helper(model, train_dataloader, optimizer, device) + + def forward_runner(model): + return forward_runner_helper(model, train_dataloader, device) + + # example: prune different layers with different sparsity + attention_name_groups = list(zip(["bert.encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + + kwargs = {"ranking_criterion": args.ranking_criterion, + "global_sort": args.global_sort, + "num_iterations": args.num_iterations, + "epochs_per_iteration": args.epochs_per_iteration, + "attention_name_groups": attention_name_groups, + "head_hidden_dim": 64, + "trainer": trainer, + "optimizer": optimizer, + "forward_runner": forward_runner} + + config_list = [{ + "sparsity": args.sparsity, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[:6] for x in layer] + }, { + "sparsity": args.sparsity / 2, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[6:] for x in layer] + }] + + pruner = TransformerHeadPruner(model, config_list, **kwargs) + pruner.compress() + + ######################################################################### + # uncomment the following part to export the pruned model masks + # model_path = os.path.join(args.output_dir, "pruned_{}_{}.pth".format(args.model_name, args.task_name)) + # mask_path = os.path.join(args.output_dir, "mask_{}_{}.pth".format(args.model_name, args.task_name)) + # pruner.export_model(model_path=model_path, mask_path=mask_path) + + ######################################################################### + # Speedup + # Currently, speeding up Transformers through NNI ModelSpeedup is not supported because of shape inference issues. + # However, if you are using the transformers library, you can use the following workaround: + # The following code gets the head pruning decisions from the pruner and calls the _prune_heads() function + # implemented in models from the transformers library to speed up the model. + if args.speed_up: + speedup_rules = {} + for group_idx, group in enumerate(pruner.attention_name_groups): + # get the layer index + layer_idx = None + for part in group[0].split("."): + try: + layer_idx = int(part) + break + except: + continue + if layer_idx is not None: + speedup_rules[layer_idx] = pruner.pruned_heads[group_idx] + pruner._unwrap_model() + model.bert._prune_heads(speedup_rules) + print(model) + + ######################################################################### + # After pruning, finetune again on the target task + # Get the metric function + metric = load_metric("glue", args.task_name) + + # re-initialize the optimizer and the scheduler + optimizer, _, _, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, + eval_dataset) + lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, + num_training_steps=train_steps) + + logger.info("================= Finetuning after Pruning =================") + train_model(args, model, is_regression, train_dataloader, + eval_dataloader, optimizer, lr_scheduler, metric, device) + + if args.output_dir is not None: + torch.save(model.state_dict(), args.output_dir + + "/model_after_pruning.pt") + + if args.task_name == "mnli": + final_eval_for_mnli(args, model, processed_datasets, + metric, data_collator) + + flops, params, results = count_flops_params(model, dummy_input) + print(f"Final model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M") + + +if __name__ == "__main__": + main() diff --git a/examples/model_compress/pruning/v2/activation_pruning_torch.py b/examples/model_compress/pruning/v2/activation_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..4e79bd5102da4e256d9fd92767607b1f8208aad8 --- /dev/null +++ b/examples/model_compress/pruning/v2/activation_pruning_torch.py @@ -0,0 +1,142 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported ActivationAPoZRank and ActivationMeanRank pruning algorithms. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. + +''' +import argparse +import sys + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +import nni +from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +g_epoch = 0 + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) + +def trainer(model, optimizer, criterion): + global g_epoch + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx and batch_idx % 100 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + g_epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + g_epoch += 1 + +def evaluator(model): + model.eval() + correct = 0.0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + +def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160): + optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay) + scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1) + return optimizer, scheduler + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + parser.add_argument('--pruner', type=str, default='apoz', + choices=['apoz', 'mean'], + help='pruner to use') + parser.add_argument('--pretrain-epochs', type=int, default=20, + help='number of epochs to pretrain the model') + parser.add_argument('--fine-tune-epochs', type=int, default=20, + help='number of epochs to fine tune the model') + args = parser.parse_args() + + print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50) + model = VGG().to(device) + optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs) + criterion = torch.nn.CrossEntropyLoss() + pre_best_acc = 0.0 + best_state_dict = None + + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + acc = evaluator(model) + if acc > pre_best_acc: + pre_best_acc = acc + best_state_dict = model.state_dict() + print("Best accuracy: {}".format(pre_best_acc)) + model.load_state_dict(best_state_dict) + pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + g_epoch = 0 + + # Start to prune and speedup + print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50) + config_list = [{ + 'total_sparsity': 0.5, + 'op_types': ['Conv2d'], + }] + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + if 'apoz' in args.pruner: + pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) + else: + pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) + _, masks = pruner.compress() + pruner.show_pruned_weights() + pruner._unwrap_model() + ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model() + print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50) + evaluator(model) + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50) + optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs) + + best_acc = 0.0 + g_epoch = 0 + for i in range(args.fine_tune_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + best_acc = max(evaluator(model), best_acc) + flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/pruning/v2/admm_pruning_torch.py b/examples/model_compress/pruning/v2/admm_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..f37bbf71d29cbecb2fd8c581c1ae1091e2cb864b --- /dev/null +++ b/examples/model_compress/pruning/v2/admm_pruning_torch.py @@ -0,0 +1,138 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported ADMM pruning algorithms. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. + +''' +import argparse +import sys + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +import nni +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ADMMPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +g_epoch = 0 + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) + +def trainer(model, optimizer, criterion): + global g_epoch + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx and batch_idx % 100 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + g_epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + g_epoch += 1 + +def evaluator(model): + model.eval() + correct = 0.0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + +def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160): + optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay) + scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1) + return optimizer, scheduler + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + parser.add_argument('--pretrain-epochs', type=int, default=20, + help='number of epochs to pretrain the model') + parser.add_argument('--fine-tune-epochs', type=int, default=20, + help='number of epochs to fine tune the model') + args = parser.parse_args() + + print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50) + model = VGG().to(device) + optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs) + criterion = torch.nn.CrossEntropyLoss() + pre_best_acc = 0.0 + best_state_dict = None + + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + acc = evaluator(model) + if acc > pre_best_acc: + pre_best_acc = acc + best_state_dict = model.state_dict() + print("Best accuracy: {}".format(pre_best_acc)) + model.load_state_dict(best_state_dict) + pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + g_epoch = 0 + + # Start to prune and speedup + print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50) + config_list = [{ + 'sparsity': 0.8, + 'op_types': ['Conv2d'], + }, { + 'sparsity': 0.92, + 'op_types': ['Conv2d'], + }] + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=2, training_epochs=2) + _, masks = pruner.compress() + pruner.show_pruned_weights() + + # Fine-grained method does not need to speedup + print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER PRUNING ' + '=' * 50) + evaluator(model) + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50) + optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs) + + best_acc = 0.0 + g_epoch = 0 + for i in range(args.fine_tune_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + best_acc = max(evaluator(model), best_acc) + flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/pruning/v2/amc_pruning_torch.py b/examples/model_compress/pruning/v2/amc_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..94e50e55450b5d70c7608f854c9092a63297ed6f --- /dev/null +++ b/examples/model_compress/pruning/v2/amc_pruning_torch.py @@ -0,0 +1,98 @@ +import sys +from tqdm import tqdm + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +from nni.algorithms.compression.v2.pytorch.pruning import AMCPruner +from nni.compression.pytorch.utils.counter import count_flops_params + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) +criterion = torch.nn.CrossEntropyLoss() + +def trainer(model, optimizer, criterion, epoch): + model.train() + for data, target in tqdm(iterable=train_loader, desc='Epoch {}'.format(epoch)): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def finetuner(model): + model.train() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + for data, target in tqdm(iterable=train_loader, desc='Epoch PFs'): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def evaluator(model): + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in tqdm(iterable=test_loader, desc='Test'): + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + + +if __name__ == '__main__': + # model = MobileNetV2(n_class=10).to(device) + model = VGG().to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + scheduler = MultiStepLR(optimizer, milestones=[50, 75], gamma=0.1) + criterion = torch.nn.CrossEntropyLoss() + + for i in range(100): + trainer(model, optimizer, criterion, i) + pre_best_acc = evaluator(model) + + dummy_input = torch.rand(10, 3, 32, 32).to(device) + pre_flops, pre_params, _ = count_flops_params(model, dummy_input) + + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.5, 'max_sparsity_per_layer': 0.8}] + + # if you just want to keep the final result as the best result, you can pass evaluator as None. + # or the result with the highest score (given by evaluator) will be the best result. + ddpg_params = {'hidden1': 300, 'hidden2': 300, 'lr_c': 1e-3, 'lr_a': 1e-4, 'warmup': 100, 'discount': 1., 'bsize': 64, + 'rmsize': 100, 'window_length': 1, 'tau': 0.01, 'init_delta': 0.5, 'delta_decay': 0.99, 'max_episode_length': 1e9, 'epsilon': 50000} + pruner = AMCPruner(400, model, config_list, dummy_input, evaluator, finetuner=finetuner, ddpg_params=ddpg_params, target='flops') + pruner.compress() + _, model, masks, best_acc, _ = pruner.get_best_result() + flops, params, _ = count_flops_params(model, dummy_input) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/pruning/v2/auto_compress_pruner.py b/examples/model_compress/pruning/v2/auto_compress_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..b19a137a24379a995f84898696d85f0dc208be1b --- /dev/null +++ b/examples/model_compress/pruning/v2/auto_compress_pruner.py @@ -0,0 +1,94 @@ +import sys +from tqdm import tqdm + +import torch +from torchvision import datasets, transforms + +import nni +from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) +criterion = torch.nn.CrossEntropyLoss() + +epoch = 0 + +def trainer(model, optimizer, criterion): + global epoch + model.train() + for data, target in tqdm(iterable=train_loader, desc='Total Epoch {}'.format(epoch)): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + epoch = epoch + 1 + +def finetuner(model): + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + trainer(model, optimizer, criterion) + +def evaluator(model): + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in tqdm(iterable=test_loader, desc='Test'): + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + + +if __name__ == '__main__': + model = VGG().to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + + # pre-train the model + for _ in range(10): + trainer(model, optimizer, criterion) + + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}] + dummy_input = torch.rand(10, 3, 32, 32).to(device) + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + admm_params = { + 'trainer': trainer, + 'traced_optimizer': traced_optimizer, + 'criterion': criterion, + 'iterations': 10, + 'training_epochs': 1 + } + sa_params = { + 'evaluator': evaluator + } + pruner = AutoCompressPruner(model, config_list, 10, admm_params, sa_params, keep_intermediate_result=True, finetuner=finetuner) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() diff --git a/examples/model_compress/pruning/v2/fpgm_pruning_torch.py b/examples/model_compress/pruning/v2/fpgm_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..335ffb3e78cdbfc3f6d90d1d8c9933d4b6d1df6b --- /dev/null +++ b/examples/model_compress/pruning/v2/fpgm_pruning_torch.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported fpgm pruning algorithms. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. + +''' +import argparse +import sys + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import FPGMPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +g_epoch = 0 + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) + +def trainer(model, optimizer, criterion): + global g_epoch + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx and batch_idx % 100 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + g_epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + g_epoch += 1 + +def evaluator(model): + model.eval() + correct = 0.0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + +def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160): + optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay) + scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1) + return optimizer, scheduler + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + parser.add_argument('--pretrain-epochs', type=int, default=20, + help='number of epochs to pretrain the model') + parser.add_argument('--fine-tune-epochs', type=int, default=20, + help='number of epochs to fine tune the model') + args = parser.parse_args() + + print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50) + model = VGG().to(device) + optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs) + criterion = torch.nn.CrossEntropyLoss() + pre_best_acc = 0.0 + best_state_dict = None + + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + acc = evaluator(model) + if acc > pre_best_acc: + pre_best_acc = acc + best_state_dict = model.state_dict() + print("Best accuracy: {}".format(pre_best_acc)) + model.load_state_dict(best_state_dict) + pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + g_epoch = 0 + + # Start to prune and speedup + print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50) + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + pruner = FPGMPruner(model, config_list) + _, masks = pruner.compress() + pruner.show_pruned_weights() + pruner._unwrap_model() + ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model() + print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50) + evaluator(model) + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50) + optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs) + + best_acc = 0.0 + for i in range(args.fine_tune_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + best_acc = max(evaluator(model), best_acc) + flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/pruning/v2/iterative_pruning_torch.py b/examples/model_compress/pruning/v2/iterative_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..bf09678592160f5eddc08280ae7527047a684c37 --- /dev/null +++ b/examples/model_compress/pruning/v2/iterative_pruning_torch.py @@ -0,0 +1,138 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported iterative pruning algorithms. +In this example, we show the end-to-end iterative pruning process: pre-training -> pruning -> fine-tuning. + +''' +import sys +import argparse +from tqdm import tqdm + +import torch +from torchvision import datasets, transforms + +from nni.algorithms.compression.v2.pytorch.pruning import ( + LinearPruner, + AGPPruner, + LotteryTicketPruner +) + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) +criterion = torch.nn.CrossEntropyLoss() + +def trainer(model, optimizer, criterion, epoch): + model.train() + for data, target in tqdm(iterable=train_loader, desc='Epoch {}'.format(epoch)): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def finetuner(model): + model.train() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + for data, target in tqdm(iterable=train_loader, desc='Epoch PFs'): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def evaluator(model): + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in tqdm(iterable=test_loader, desc='Test'): + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Iterative Example for model comporession') + parser.add_argument('--pruner', type=str, default='linear', + choices=['linear', 'agp', 'lottery'], + help='pruner to use') + parser.add_argument('--pretrain-epochs', type=int, default=10, + help='number of epochs to pretrain the model') + parser.add_argument('--total-iteration', type=int, default=10, + help='number of iteration to iteratively prune the model') + parser.add_argument('--pruning-algo', type=str, default='l1', + choices=['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz', + 'mean_activation', 'taylorfo', 'admm'], + help='algorithm to evaluate weights to prune') + parser.add_argument('--speed-up', type=bool, default=False, + help='Whether to speed-up the pruned model') + parser.add_argument('--reset-weight', type=bool, default=True, + help='Whether to reset weight during each iteration') + + args = parser.parse_args() + + model = VGG().to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + + # pre-train the model + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion, i) + evaluator(model) + + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + dummy_input = torch.rand(10, 3, 32, 32).to(device) + + # if you just want to keep the final result as the best result, you can pass evaluator as None. + # or the result with the highest score (given by evaluator) will be the best result. + kw_args = {'pruning_algorithm': args.pruning_algo, + 'total_iteration': args.total_iteration, + 'evaluator': None, + 'finetuner': finetuner} + + if args.speed_up: + kw_args['speed_up'] = args.speed_up + kw_args['dummy_input'] = torch.rand(10, 3, 32, 32).to(device) + + if args.pruner == 'linear': + iterative_pruner = LinearPruner + elif args.pruner == 'agp': + iterative_pruner = AGPPruner + elif args.pruner == 'lottery': + kw_args['reset_weight'] = args.reset_weight + iterative_pruner = LotteryTicketPruner + + pruner = iterative_pruner(model, config_list, **kw_args) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() + evaluator(model) diff --git a/examples/model_compress/pruning/v2/level_pruning_torch.py b/examples/model_compress/pruning/v2/level_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..1df8f66ab447545715f1ad7111c9580cf9bb2ad0 --- /dev/null +++ b/examples/model_compress/pruning/v2/level_pruning_torch.py @@ -0,0 +1,130 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported level pruning algorithm. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. + +''' +import argparse +import sys + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import LevelPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +g_epoch = 0 + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) + +def trainer(model, optimizer, criterion): + global g_epoch + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx and batch_idx % 100 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + g_epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + g_epoch += 1 + +def evaluator(model): + model.eval() + correct = 0.0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + +def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160): + optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay) + scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1) + return optimizer, scheduler + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + parser.add_argument('--pretrain-epochs', type=int, default=20, + help='number of epochs to pretrain the model') + parser.add_argument('--fine-tune-epochs', type=int, default=20, + help='number of epochs to fine tune the model') + args = parser.parse_args() + + print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50) + model = VGG().to(device) + optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs) + criterion = torch.nn.CrossEntropyLoss() + pre_best_acc = 0.0 + best_state_dict = None + + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + acc = evaluator(model) + if acc > pre_best_acc: + pre_best_acc = acc + best_state_dict = model.state_dict() + print("Best accuracy: {}".format(pre_best_acc)) + model.load_state_dict(best_state_dict) + pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + + # Start to prune and speedup + print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50) + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['default'] + }] + pruner = LevelPruner(model, config_list) + _, masks = pruner.compress() + pruner.show_pruned_weights() + + # Fine-grained method does not need to speedup + print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER PRUNING ' + '=' * 50) + evaluator(model) + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50) + optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs) + + best_acc = 0.0 + g_epoch = 0 + for i in range(args.fine_tune_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + best_acc = max(evaluator(model), best_acc) + flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/pruning/v2/movement_pruning_glue.py b/examples/model_compress/pruning/v2/movement_pruning_glue.py new file mode 100644 index 0000000000000000000000000000000000000000..ae0418d6203f529be32683b924f330169302f657 --- /dev/null +++ b/examples/model_compress/pruning/v2/movement_pruning_glue.py @@ -0,0 +1,125 @@ +import functools +from tqdm import tqdm + +import torch +from torch.optim import Adam +from torch.utils.data import DataLoader + +from datasets import load_metric, load_dataset +from transformers import ( + BertForSequenceClassification, + BertTokenizerFast, + DataCollatorWithPadding, + set_seed +) + +import nni +from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner + + +task_to_keys = { + "cola": ("sentence", None), + "mnli": ("premise", "hypothesis"), + "mrpc": ("sentence1", "sentence2"), + "qnli": ("question", "sentence"), + "qqp": ("question1", "question2"), + "rte": ("sentence1", "sentence2"), + "sst2": ("sentence", None), + "stsb": ("sentence1", "sentence2"), + "wnli": ("sentence1", "sentence2"), +} + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +gradient_accumulation_steps = 16 + +# a fake criterion because huggingface output already has loss +def criterion(input, target): + return input.loss + +def trainer(model, optimizer, criterion, train_dataloader): + model.train() + counter = 0 + for batch in tqdm(train_dataloader): + counter += 1 + batch.to(device) + optimizer.zero_grad() + outputs = model(**batch) + # pruner may wrap the criterion, for example, loss = origin_loss + norm(weight), so call criterion to get loss here + loss = criterion(outputs, None) + loss = loss / gradient_accumulation_steps + loss.backward() + if counter % gradient_accumulation_steps == 0 or counter == len(train_dataloader): + optimizer.step() + if counter % 16000 == 0: + print('Step {}: {}'.format(counter // gradient_accumulation_steps, evaluator(model, metric, is_regression, validate_dataloader))) + +def evaluator(model, metric, is_regression, eval_dataloader): + model.eval() + for batch in tqdm(eval_dataloader): + batch.to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=predictions, + references=batch["labels"], + ) + return metric.compute() + +if __name__ == '__main__': + task_name = 'mnli' + is_regression = False + num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2) + train_batch_size = 8 + eval_batch_size = 8 + + set_seed(1024) + + tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') + sentence1_key, sentence2_key = task_to_keys[task_name] + + # used to preprocess the raw data + def preprocess_function(examples): + # Tokenize the texts + args = ( + (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) + ) + result = tokenizer(*args, padding=False, max_length=128, truncation=True) + + if "label" in examples: + # In all cases, rename the column to labels because the model will expect that. + result["labels"] = examples["label"] + return result + + raw_datasets = load_dataset('glue', task_name, cache_dir='./data') + processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names) + + train_dataset = processed_datasets['train'] + validate_dataset = processed_datasets['validation_matched' if task_name == "mnli" else 'validation'] + + data_collator = DataCollatorWithPadding(tokenizer) + train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=train_batch_size) + validate_dataloader = DataLoader(validate_dataset, collate_fn=data_collator, batch_size=eval_batch_size) + + metric = load_metric("glue", task_name) + + model = BertForSequenceClassification.from_pretrained('bert-base-cased', num_labels=num_labels).to(device) + + print('Initial: {}'.format(evaluator(model, metric, is_regression, validate_dataloader))) + + config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}] + p_trainer = functools.partial(trainer, train_dataloader=train_dataloader) + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(Adam)(model.parameters(), lr=2e-5) + pruner = MovementPruner(model, config_list, p_trainer, traced_optimizer, criterion, training_epochs=10, + warm_up_step=3000, cool_down_beginning_step=27000) + + _, masks = pruner.compress() + pruner.show_pruned_weights() + + print('Final: {}'.format(evaluator(model, metric, is_regression, validate_dataloader))) + + optimizer = Adam(model.parameters(), lr=2e-5) + trainer(model, optimizer, criterion, train_dataloader) + print('After 1 epoch finetuning: {}'.format(evaluator(model, metric, is_regression, validate_dataloader))) diff --git a/examples/model_compress/pruning/v2/norm_pruning_torch.py b/examples/model_compress/pruning/v2/norm_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..a87510e84f859f9f081608a5d8571c7a6d35bfa0 --- /dev/null +++ b/examples/model_compress/pruning/v2/norm_pruning_torch.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported l1norm and l2norm pruning algorithms. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. + +''' +import argparse +import sys + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import L1NormPruner, L2NormPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +g_epoch = 0 + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) + +def trainer(model, optimizer, criterion): + global g_epoch + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx and batch_idx % 100 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + g_epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + g_epoch += 1 + +def evaluator(model): + model.eval() + correct = 0.0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + +def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160): + optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay) + scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1) + return optimizer, scheduler + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + parser.add_argument('--pruner', type=str, default='l1norm', + choices=['l1norm', 'l2norm'], + help='pruner to use') + parser.add_argument('--pretrain-epochs', type=int, default=20, + help='number of epochs to pretrain the model') + parser.add_argument('--fine-tune-epochs', type=int, default=20, + help='number of epochs to fine tune the model') + args = parser.parse_args() + + print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50) + model = VGG().to(device) + optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs) + criterion = torch.nn.CrossEntropyLoss() + pre_best_acc = 0.0 + best_state_dict = None + + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + acc = evaluator(model) + if acc > pre_best_acc: + pre_best_acc = acc + best_state_dict = model.state_dict() + print("Best accuracy: {}".format(pre_best_acc)) + model.load_state_dict(best_state_dict) + pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + g_epoch = 0 + + # Start to prune and speedup + print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50) + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }] + if 'l1' in args.pruner: + pruner = L1NormPruner(model, config_list) + else: + pruner = L2NormPruner(model, config_list) + _, masks = pruner.compress() + pruner.show_pruned_weights() + pruner._unwrap_model() + ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model() + print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50) + evaluator(model) + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50) + optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs) + + best_acc = 0.0 + for i in range(args.fine_tune_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + best_acc = max(evaluator(model), best_acc) + flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/pruning/v2/scheduler_torch.py b/examples/model_compress/pruning/v2/scheduler_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..711e651cb9a4439c25d98a61bd7fd7d1b533cd1d --- /dev/null +++ b/examples/model_compress/pruning/v2/scheduler_torch.py @@ -0,0 +1,100 @@ +import sys +from tqdm import tqdm + +import torch +from torchvision import datasets, transforms + +from nni.algorithms.compression.v2.pytorch.pruning import L1NormPruner +from nni.algorithms.compression.v2.pytorch.pruning.tools import AGPTaskGenerator +from nni.algorithms.compression.v2.pytorch.pruning.basic_scheduler import PruningScheduler + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) +criterion = torch.nn.CrossEntropyLoss() + +def trainer(model, optimizer, criterion, epoch): + model.train() + for data, target in tqdm(iterable=train_loader, desc='Epoch {}'.format(epoch)): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def finetuner(model): + model.train() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + for data, target in tqdm(iterable=train_loader, desc='Epoch PFs'): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def evaluator(model): + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in tqdm(iterable=test_loader, desc='Test'): + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + + +if __name__ == '__main__': + model = VGG().to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + + # pre-train the model + for i in range(5): + trainer(model, optimizer, criterion, i) + + # No need to pass model and config_list to pruner during initializing when using scheduler. + pruner = L1NormPruner(None, None) + + # you can specify the log_dir, all intermediate results and best result will save under this folder. + # if you don't want to keep intermediate results, you can set `keep_intermediate_result=False`. + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + task_generator = AGPTaskGenerator(10, model, config_list, log_dir='.', keep_intermediate_result=True) + + dummy_input = torch.rand(10, 3, 32, 32).to(device) + + # if you just want to keep the final result as the best result, you can pass evaluator as None. + # or the result with the highest score (given by evaluator) will be the best result. + + # scheduler = PruningScheduler(pruner, task_generator, finetuner=finetuner, speed_up=True, dummy_input=dummy_input, evaluator=evaluator) + scheduler = PruningScheduler(pruner, task_generator, finetuner=finetuner, speed_up=True, dummy_input=dummy_input, evaluator=None, reset_weight=False) + + scheduler.compress() + + _, model, masks, _, _ = scheduler.get_best_result() diff --git a/examples/model_compress/pruning/v2/simple_pruning_torch.py b/examples/model_compress/pruning/v2/simple_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..8871d9583d459c7458ee2c0875a0238307bc29fa --- /dev/null +++ b/examples/model_compress/pruning/v2/simple_pruning_torch.py @@ -0,0 +1,88 @@ +import sys +from tqdm import tqdm + +import torch +from torchvision import datasets, transforms + +from nni.algorithms.compression.v2.pytorch.pruning import L1NormPruner +from nni.compression.pytorch.speedup import ModelSpeedup + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) +criterion = torch.nn.CrossEntropyLoss() + +def trainer(model, optimizer, criterion, epoch): + model.train() + for data, target in tqdm(iterable=train_loader, desc='Epoch {}'.format(epoch)): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def evaluator(model): + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in tqdm(iterable=test_loader, desc='Test'): + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + + +if __name__ == '__main__': + model = VGG().to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + + print('\nPre-train the model:') + for i in range(5): + trainer(model, optimizer, criterion, i) + evaluator(model) + + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = L1NormPruner(model, config_list) + _, masks = pruner.compress() + + print('\nThe accuracy with masks:') + evaluator(model) + + pruner._unwrap_model() + ModelSpeedup(model, dummy_input=torch.rand(10, 3, 32, 32).to(device), masks_file=masks).speedup_model() + + print('\nThe accuracy after speed up:') + evaluator(model) + + # Need a new optimizer due to the modules in model will be replaced during speedup. + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + print('\nFinetune the model after speed up:') + for i in range(5): + trainer(model, optimizer, criterion, i) + evaluator(model) diff --git a/examples/model_compress/pruning/v2/simulated_anealing_pruning_torch.py b/examples/model_compress/pruning/v2/simulated_anealing_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..8b33736f9d91500ec566cf4d428289caa2f8e832 --- /dev/null +++ b/examples/model_compress/pruning/v2/simulated_anealing_pruning_torch.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for simulated anealing pruning algorithm. +In this example, we show the end-to-end iterative pruning process: pre-training -> pruning -> fine-tuning. + +''' +import sys +import argparse +from tqdm import tqdm + +import torch +from torchvision import datasets, transforms + +from nni.algorithms.compression.v2.pytorch.pruning import SimulatedAnnealingPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) +criterion = torch.nn.CrossEntropyLoss() + +def trainer(model, optimizer, criterion, epoch): + model.train() + for data, target in tqdm(iterable=train_loader, desc='Epoch {}'.format(epoch)): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def finetuner(model): + model.train() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + for data, target in tqdm(iterable=train_loader, desc='Epoch PFs'): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + +def evaluator(model): + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in tqdm(iterable=test_loader, desc='Test'): + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Iterative Example for model comporession') + parser.add_argument('--pretrain-epochs', type=int, default=10, + help='number of epochs to pretrain the model') + parser.add_argument('--pruning-algo', type=str, default='l1', + choices=['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz', + 'mean_activation', 'taylorfo', 'admm'], + help='algorithm to evaluate weights to prune') + parser.add_argument('--cool-down-rate', type=float, default=0.9, + help='Cool down rate of the temperature.') + + args = parser.parse_args() + + model = VGG().to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + criterion = torch.nn.CrossEntropyLoss() + + # pre-train the model + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion, i) + evaluator(model) + + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}] + + # evaluator in 'SimulatedAnnealingPruner' could not be None. + pruner = SimulatedAnnealingPruner(model, config_list, pruning_algorithm=args.pruning_algo, + evaluator=evaluator, cool_down_rate=args.cool_down_rate, finetuner=finetuner) + pruner.compress() + _, model, masks, _, _ = pruner.get_best_result() + evaluator(model) diff --git a/examples/model_compress/pruning/v2/slim_pruning_torch.py b/examples/model_compress/pruning/v2/slim_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..33437ac106ddcacc7a35d08c2a47c765d95908e8 --- /dev/null +++ b/examples/model_compress/pruning/v2/slim_pruning_torch.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported slim pruning algorithms. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> speedup -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. + +''' +import argparse +import sys + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +import nni +from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import SlimPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +g_epoch = 0 + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) + +def trainer(model, optimizer, criterion): + global g_epoch + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx and batch_idx % 100 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + g_epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + g_epoch += 1 + +def evaluator(model): + model.eval() + correct = 0.0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + +def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160): + optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay) + scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1) + return optimizer, scheduler + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + parser.add_argument('--pretrain-epochs', type=int, default=20, + help='number of epochs to pretrain the model') + parser.add_argument('--fine-tune-epochs', type=int, default=20, + help='number of epochs to fine tune the model') + args = parser.parse_args() + + print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50) + model = VGG().to(device) + optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs) + criterion = torch.nn.CrossEntropyLoss() + pre_best_acc = 0.0 + best_state_dict = None + + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + acc = evaluator(model) + if acc > pre_best_acc: + pre_best_acc = acc + best_state_dict = model.state_dict() + print("Best accuracy: {}".format(pre_best_acc)) + model.load_state_dict(best_state_dict) + pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + g_epoch = 0 + + # Start to prune and speedup + print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50) + config_list = [{ + 'total_sparsity': 0.5, + 'op_types': ['BatchNorm2d'], + 'max_sparsity_per_layer': 0.9 + }] + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1, scale=0.0001, mode='global') + _, masks = pruner.compress() + pruner.show_pruned_weights() + pruner._unwrap_model() + ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model() + print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50) + evaluator(model) + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50) + optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs) + best_acc = 0.0 + g_epoch = 0 + for i in range(args.fine_tune_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + best_acc = max(evaluator(model), best_acc) + flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/pruning/v2/taylorfo_pruning_torch.py b/examples/model_compress/pruning/v2/taylorfo_pruning_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..59d1b17ef4830dab8f32cad0a0241da0303bb7b3 --- /dev/null +++ b/examples/model_compress/pruning/v2/taylorfo_pruning_torch.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +NNI example for supported TaylorFOWeight pruning algorithms. +In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning. +Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required. + +''' +import argparse +import sys + +import torch +from torchvision import datasets, transforms +from torch.optim.lr_scheduler import MultiStepLR + +import nni +from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import TaylorFOWeightPruner + +from pathlib import Path +sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) +from cifar10.vgg import VGG + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +g_epoch = 0 + +train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=True, transform=transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomCrop(32, 4), + transforms.ToTensor(), + normalize, + ]), download=True), + batch_size=128, shuffle=True) + +test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + normalize, + ])), + batch_size=128, shuffle=False) + +def trainer(model, optimizer, criterion): + global g_epoch + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx and batch_idx % 100 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + g_epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + g_epoch += 1 + +def evaluator(model): + model.eval() + correct = 0.0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + acc = 100 * correct / len(test_loader.dataset) + print('Accuracy: {}%\n'.format(acc)) + return acc + +def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160): + optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay) + scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1) + return optimizer, scheduler + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch Example for model comporession') + parser.add_argument('--pretrain-epochs', type=int, default=20, + help='number of epochs to pretrain the model') + parser.add_argument('--fine-tune-epochs', type=int, default=20, + help='number of epochs to fine tune the model') + args = parser.parse_args() + + print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50) + model = VGG().to(device) + optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs) + criterion = torch.nn.CrossEntropyLoss() + pre_best_acc = 0.0 + best_state_dict = None + + for i in range(args.pretrain_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + acc = evaluator(model) + if acc > pre_best_acc: + pre_best_acc = acc + best_state_dict = model.state_dict() + print("Best accuracy: {}".format(pre_best_acc)) + model.load_state_dict(best_state_dict) + pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + g_epoch = 0 + + # Start to prune and speedup + print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50) + config_list = [{ + 'total_sparsity': 0.5, + 'op_types': ['Conv2d'], + }] + + # make sure you have used nni.trace to wrap the optimizer class before initialize + traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) + pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) + _, masks = pruner.compress() + pruner.show_pruned_weights() + pruner._unwrap_model() + ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model() + print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50) + evaluator(model) + + # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage. + print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50) + optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs) + + best_acc = 0.0 + g_epoch = 0 + for i in range(args.fine_tune_epochs): + trainer(model, optimizer, criterion) + scheduler.step() + best_acc = max(evaluator(model), best_acc) + flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device)) + print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%') + print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%') diff --git a/examples/model_compress/quantization/BNN_quantizer_cifar10.py b/examples/model_compress/quantization/BNN_quantizer_cifar10.py new file mode 100644 index 0000000000000000000000000000000000000000..f6d4c27316470ddca37b41bd0c7b3fd0fdaa6369 --- /dev/null +++ b/examples/model_compress/quantization/BNN_quantizer_cifar10.py @@ -0,0 +1,154 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import datasets, transforms +from nni.algorithms.compression.pytorch.quantization import BNNQuantizer + + +class VGG_Cifar10(nn.Module): + def __init__(self, num_classes=1000): + super(VGG_Cifar10, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(3, 128, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(128, eps=1e-4, momentum=0.1), + nn.Hardtanh(inplace=True), + + nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.BatchNorm2d(128, eps=1e-4, momentum=0.1), + nn.Hardtanh(inplace=True), + + nn.Conv2d(128, 256, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(256, eps=1e-4, momentum=0.1), + nn.Hardtanh(inplace=True), + + + nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.BatchNorm2d(256, eps=1e-4, momentum=0.1), + nn.Hardtanh(inplace=True), + + nn.Conv2d(256, 512, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(512, eps=1e-4, momentum=0.1), + nn.Hardtanh(inplace=True), + + + nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=False), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.BatchNorm2d(512, eps=1e-4, momentum=0.1), + nn.Hardtanh(inplace=True) + ) + + self.classifier = nn.Sequential( + nn.Linear(512 * 4 * 4, 1024, bias=False), + nn.BatchNorm1d(1024), + nn.Hardtanh(inplace=True), + nn.Linear(1024, 1024, bias=False), + nn.BatchNorm1d(1024), + nn.Hardtanh(inplace=True), + nn.Linear(1024, num_classes), # do not quantize output + nn.BatchNorm1d(num_classes, affine=False) + ) + + + def forward(self, x): + x = self.features(x) + x = x.view(-1, 512 * 4 * 4) + x = self.classifier(x) + return x + + +def train(model, device, train_loader, optimizer): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.cross_entropy(output, target) + loss.backward() + optimizer.step() + for name, param in model.named_parameters(): + if name.endswith('old_weight'): + param = param.clamp(-1, 1) + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(train_loader), loss.item())) + + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + acc = 100 * correct / len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%)\n'.format( + test_loss, acc)) + return acc + +def adjust_learning_rate(optimizer, epoch): + update_list = [55, 100, 150, 200, 400, 600] + if epoch in update_list: + for param_group in optimizer.param_groups: + param_group['lr'] = param_group['lr'] * 0.1 + return + +def main(): + torch.manual_seed(0) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + train_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data.cifar10', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + ])), + batch_size=64, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + ])), + batch_size=200, shuffle=False) + + model = VGG_Cifar10(num_classes=10) + model.to(device) + + configure_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 1, + 'op_types': ['Conv2d', 'Linear'], + 'op_names': ['features.3', 'features.7', 'features.10', 'features.14', 'classifier.0', 'classifier.3'] + }, { + 'quant_types': ['output'], + 'quant_bits': 1, + 'op_types': ['Hardtanh'], + 'op_names': ['features.6', 'features.9', 'features.13', 'features.16', 'features.20', 'classifier.2', 'classifier.5'] + }] + + quantizer = BNNQuantizer(model, configure_list) + model = quantizer.compress() + + print('=' * 10 + 'train' + '=' * 10) + optimizer = torch.optim.Adam(model.parameters(), lr=1e-2) + best_top1 = 0 + for epoch in range(400): + print('# Epoch {} #'.format(epoch)) + train(model, device, train_loader, optimizer) + adjust_learning_rate(optimizer, epoch) + top1 = test(model, device, test_loader) + if top1 > best_top1: + best_top1 = top1 + print(best_top1) + + +if __name__ == '__main__': + main() diff --git a/examples/model_compress/quantization/DoReFaQuantizer_torch_mnist.py b/examples/model_compress/quantization/DoReFaQuantizer_torch_mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..10de85257086233ddd8a3c916c32a67e371e6042 --- /dev/null +++ b/examples/model_compress/quantization/DoReFaQuantizer_torch_mnist.py @@ -0,0 +1,71 @@ +import torch +import torch.nn.functional as F +from torchvision import datasets, transforms +from nni.algorithms.compression.pytorch.quantization import DoReFaQuantizer + +import sys +sys.path.append('../models') +from mnist.naive import NaiveModel + + +def train(model, quantizer, device, train_loader, optimizer): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(train_loader), loss.item())) + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%)\n'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + +def main(): + torch.manual_seed(0) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=True, download=True, transform=trans), + batch_size=64, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=False, transform=trans), + batch_size=1000, shuffle=True) + + model = NaiveModel() + model = model.to(device) + configure_list = [{ + 'quant_types': ['weight'], + 'quant_bits': { + 'weight': 8, + }, # you can just use `int` here because all `quan_types` share same bits length, see config for `ReLu6` below. + 'op_types':['Conv2d', 'Linear'] + }] + quantizer = DoReFaQuantizer(model, configure_list) + quantizer.compress() + + optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.5) + for epoch in range(10): + print('# Epoch {} #'.format(epoch)) + train(model, quantizer, device, train_loader, optimizer) + test(model, device, test_loader) + + +if __name__ == '__main__': + main() diff --git a/examples/model_compress/quantization/LSQ_torch_quantizer.py b/examples/model_compress/quantization/LSQ_torch_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..449a4e179cc9d1a15046bca1d5220a297ed030a8 --- /dev/null +++ b/examples/model_compress/quantization/LSQ_torch_quantizer.py @@ -0,0 +1,142 @@ +import torch +import torch.nn.functional as F +from torchvision import datasets, transforms +from nni.algorithms.compression.pytorch.quantization import LsqQuantizer +from nni.compression.pytorch.quantization_speedup import ModelSpeedupTensorRT + + +class Mnist(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 20, 5, 1) + self.conv2 = torch.nn.Conv2d(20, 50, 5, 1) + self.fc1 = torch.nn.Linear(4 * 4 * 50, 500) + self.fc2 = torch.nn.Linear(500, 10) + self.relu1 = torch.nn.ReLU6() + self.relu2 = torch.nn.ReLU6() + self.relu3 = torch.nn.ReLU6() + self.max_pool1 = torch.nn.MaxPool2d(2, 2) + self.max_pool2 = torch.nn.MaxPool2d(2, 2) + + def forward(self, x): + x = self.relu1(self.conv1(x)) + x = self.max_pool1(x) + x = self.relu2(self.conv2(x)) + x = self.max_pool2(x) + x = x.view(-1, 4 * 4 * 50) + x = self.relu3(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(model, quantizer, device, train_loader, optimizer): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(train_loader), loss.item())) + + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%)\n'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + + +def test_trt(engine, test_loader): + test_loss = 0 + correct = 0 + time_elasped = 0 + for data, target in test_loader: + output, time = engine.inference(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + time_elasped += time + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + print("Inference elapsed_time (whole dataset): {}s".format(time_elasped)) + + +def main(): + torch.manual_seed(0) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=True, download=True, transform=trans), + batch_size=64, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=False, transform=trans), + batch_size=1000, shuffle=True) + + model = Mnist() + configure_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': {'weight': 8, 'input': 8}, + 'op_names': ['conv1'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8, }, + 'op_names': ['relu1'] + }, { + 'quant_types': ['weight', 'input'], + 'quant_bits': {'weight': 8, 'input': 8}, + 'op_names': ['conv2'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8}, + 'op_names': ['relu2'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8}, + 'op_names': ['max_pool2'] + } + ] + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + quantizer = LsqQuantizer(model, configure_list, optimizer) + quantizer.compress() + + model.to(device) + for epoch in range(40): + print('# Epoch {} #'.format(epoch)) + train(model, quantizer, device, train_loader, optimizer) + test(model, device, test_loader) + + model_path = "mnist_model.pth" + calibration_path = "mnist_calibration.pth" + calibration_config = quantizer.export_model(model_path, calibration_path) + + test(model, device, test_loader) + + print("calibration_config: ", calibration_config) + + batch_size = 32 + input_shape = (batch_size, 1, 28, 28) + + engine = ModelSpeedupTensorRT(model, input_shape, config=calibration_config, batchsize=batch_size) + engine.compress() + + test_trt(engine, test_loader) + + +if __name__ == '__main__': + main() diff --git a/examples/model_compress/quantization/QAT_torch_quantizer.py b/examples/model_compress/quantization/QAT_torch_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..fd174588c2c61aee097b7299b38b3a4fbe026837 --- /dev/null +++ b/examples/model_compress/quantization/QAT_torch_quantizer.py @@ -0,0 +1,115 @@ +import torch +import torch.nn.functional as F +from torchvision import datasets, transforms +from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer +from nni.compression.pytorch.quantization.settings import set_quant_scheme_dtype + +import sys +sys.path.append('../models') +from mnist.naive import NaiveModel + + +def train(model, device, train_loader, optimizer): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(train_loader), loss.item())) + + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%)\n'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + +def main(): + torch.manual_seed(0) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=True, download=True, transform=trans), + batch_size=64, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=False, transform=trans), + batch_size=1000, shuffle=True) + + # Two things should be kept in mind when set this configure_list: + # 1. When deploying model on backend, some layers will be fused into one layer. For example, the consecutive + # conv + bn + relu layers will be fused into one big layer. If we want to execute the big layer in quantization + # mode, we should tell the backend the quantization information of the input, output, and the weight tensor of + # the big layer, which correspond to conv's input, conv's weight and relu's output. + # 2. Same tensor should be quantized only once. For example, if a tensor is the output of layer A and the input + # of the layer B, you should configure either {'quant_types': ['output'], 'op_names': ['a']} or + # {'quant_types': ['input'], 'op_names': ['b']} in the configure_list. + + configure_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': {'weight': 8, 'input': 8}, + 'op_names': ['conv1', 'conv2'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8, }, + 'op_names': ['relu1', 'relu2'] + }, { + 'quant_types': ['output', 'weight', 'input'], + 'quant_bits': {'output': 8, 'weight': 8, 'input': 8}, + 'op_names': ['fc1', 'fc2'], + }] + + # you can also set the quantization dtype and scheme layer-wise through configure_list like: + # configure_list = [{ + # 'quant_types': ['weight', 'input'], + # 'quant_bits': {'weight': 8, 'input': 8}, + # 'op_names': ['conv1', 'conv2'], + # 'quant_dtype': 'int', + # 'quant_scheme': 'per_channel_symmetric' + # }] + # For now quant_dtype's options are 'int' and 'uint. And quant_scheme's options are per_tensor_affine, + # per_tensor_symmetric, per_channel_affine and per_channel_symmetric. + set_quant_scheme_dtype('weight', 'per_channel_symmetric', 'int') + set_quant_scheme_dtype('output', 'per_tensor_symmetric', 'int') + set_quant_scheme_dtype('input', 'per_tensor_symmetric', 'int') + + model = NaiveModel().to(device) + dummy_input = torch.randn(1, 1, 28, 28).to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + # To enable batch normalization folding in the training process, you should + # pass dummy_input to the QAT_Quantizer. + quantizer = QAT_Quantizer(model, configure_list, optimizer, dummy_input=dummy_input) + quantizer.compress() + + model.to(device) + for epoch in range(40): + print('# Epoch {} #'.format(epoch)) + train(model, device, train_loader, optimizer) + test(model, device, test_loader) + + model_path = "mnist_model.pth" + calibration_path = "mnist_calibration.pth" + onnx_path = "mnist_model.onnx" + input_shape = (1, 1, 28, 28) + device = torch.device("cuda") + + calibration_config = quantizer.export_model(model_path, calibration_path, onnx_path, input_shape, device) + print("Generated calibration config is: ", calibration_config) + + +if __name__ == '__main__': + main() diff --git a/examples/model_compress/quantization/mixed_precision_speedup_mnist.py b/examples/model_compress/quantization/mixed_precision_speedup_mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..f43850f881feaf5725e44e15e63a10db29c27110 --- /dev/null +++ b/examples/model_compress/quantization/mixed_precision_speedup_mnist.py @@ -0,0 +1,152 @@ +import torch +import torch.nn.functional as F +from torchvision import datasets, transforms + +from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer +from nni.compression.pytorch.quantization_speedup import ModelSpeedupTensorRT + +import sys +sys.path.append('../models') +from mnist.naive import NaiveModel + + +def train(model, device, train_loader, optimizer): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(train_loader), loss.item())) + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%)\n'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + +def test_trt(engine, test_loader): + test_loss = 0 + correct = 0 + time_elasped = 0 + for data, target in test_loader: + output, time = engine.inference(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + time_elasped += time + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + print("Inference elapsed_time (whole dataset): {}s".format(time_elasped)) + +def post_training_quantization_example(train_loader, test_loader, device): + model = NaiveModel() + + config = { + 'conv1':{'weight_bits':8, 'output_bits':8}, + 'conv2':{'weight_bits':32, 'output_bits':32}, + 'fc1':{'weight_bits':16, 'output_bits':16}, + 'fc2':{'weight_bits':8, 'output_bits':8} + } + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + + model.to(device) + for epoch in range(1): + print('# Epoch {} #'.format(epoch)) + train(model, device, train_loader, optimizer) + test(model, device, test_loader) + + batch_size = 32 + input_shape = (batch_size, 1, 28, 28) + + engine = ModelSpeedupTensorRT(model, input_shape, config=config, calib_data_loader=train_loader, batchsize=batch_size) + engine.compress() + test_trt(engine, test_loader) + +def quantization_aware_training_example(train_loader, test_loader, device): + model = NaiveModel() + + configure_list = [{ + 'quant_types': ['input', 'weight'], + 'quant_bits': {'input':8, 'weight':8}, + 'op_names': ['conv1'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output':8}, + 'op_names': ['relu1'] + }, { + 'quant_types': ['input', 'weight'], + 'quant_bits': {'input':8, 'weight':8}, + 'op_names': ['conv2'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output':8}, + 'op_names': ['relu2'] + } + ] + + # finetune the model by using QAT + # enable batchnorm folding mode + dummy_input = torch.randn(1, 1, 28, 28) + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + quantizer = QAT_Quantizer(model, configure_list, optimizer, dummy_input=dummy_input) + quantizer.compress() + + model.to(device) + for epoch in range(1): + print('# Epoch {} #'.format(epoch)) + train(model, device, train_loader, optimizer) + test(model, device, test_loader) + + model_path = "mnist_model.pth" + calibration_path = "mnist_calibration.pth" + calibration_config = quantizer.export_model(model_path, calibration_path) + + test(model, device, test_loader) + + print("calibration_config: ", calibration_config) + + batch_size = 32 + input_shape = (batch_size, 1, 28, 28) + + engine = ModelSpeedupTensorRT(model, input_shape, config=calibration_config, batchsize=batch_size) + engine.compress() + + test_trt(engine, test_loader) + +def main(): + torch.manual_seed(0) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=True, download=True, transform=trans), + batch_size=64, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=False, transform=trans), + batch_size=1000, shuffle=True) + + # post-training quantization on TensorRT + post_training_quantization_example(train_loader, test_loader, device) + + # combine NNI quantization algorithm QAT with backend framework TensorRT + quantization_aware_training_example(train_loader, test_loader, device) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/examples/model_compress/quantization/observer_quantizer.py b/examples/model_compress/quantization/observer_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..cb8c59bd07de2e9f78c7f31a5b7b2bd020797775 --- /dev/null +++ b/examples/model_compress/quantization/observer_quantizer.py @@ -0,0 +1,117 @@ +import torch +import torch.nn.functional as F +from torchvision import datasets, transforms +from nni.algorithms.compression.pytorch.quantization import ObserverQuantizer +import sys +sys.path.append('../models') +from mnist.naive import NaiveModel + + +def train(model, device, train_loader, optimizer): + model.to(device) + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(train_loader), loss.item())) + + +def test(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(test_loader.dataset) + + print('Loss: {} Accuracy: {}%)\n'.format( + test_loss, 100 * correct / len(test_loader.dataset))) + + +def calibration(model, device, test_loader): + model.eval() + with torch.no_grad(): + for data, _ in test_loader: + data = data.to(device) + model(data) + + +def main(): + torch.manual_seed(0) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=True, download=True, transform=trans), + batch_size=64, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=False, transform=trans), + batch_size=1000, shuffle=True) + + model = NaiveModel() + configure_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': {'weight': 8, 'input': 8}, + 'op_names': ['conv1'], + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8, }, + 'op_names': ['relu1'], + }, { + 'quant_types': ['weight', 'input'], + 'quant_bits': {'weight': 8, 'input': 8}, + 'op_names': ['conv2'], + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8}, + 'op_names': ['relu2'], + }, { + 'quant_types': ['output'], + 'quant_bits': {'output': 8}, + 'op_names': ['max_pool2'], + } + ] + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + + # Train the model to get a baseline performance + for epoch in range(5): + print('# Epoch {} #'.format(epoch)) + train(model, device, train_loader, optimizer) + + test(model, device, test_loader) + + # Construct the ObserverQuantizer. Note that currently ObserverQuantizer only works + # in evaluation mode. + quantizer = ObserverQuantizer(model.eval(), configure_list, optimizer) + # Use the test data set to do calibration, this will not change the model parameters + calibration(model, device, test_loader) + # obtain the quantization information and switch the model to "accuracy verification" mode + quantizer.compress() + + # measure the accuracy of the quantized model. + test(model, device, test_loader) + + model_path = "mnist_model.pth" + calibration_path = "mnist_calibration.pth" + calibration_config = quantizer.export_model(model_path, calibration_path) + print("calibration_config: ", calibration_config) + + # For now the quantization settings of ObserverQuantizer does not match the TensorRT, + # so TensorRT conversion are not supported + # current settings: + # weight : per_tensor_symmetric, qint8 + # activation : per_tensor_affine, quint8, reduce_range=True + + +if __name__ == '__main__': + main() diff --git a/examples/nas/.gitignore b/examples/nas/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f055590651c99a2b5ca30d2660ae0e07810579f9 --- /dev/null +++ b/examples/nas/.gitignore @@ -0,0 +1,10 @@ +data +checkpoints +runs +nni_auto_gen_search_space.json +checkpoint.json +_generated_model.py +_generated_model_*.py +_generated_model +generated +lightning_logs diff --git a/examples/nas/benchmarks/.gitignore b/examples/nas/benchmarks/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d04628aeea45ba822aedc594b045f710a92cdcf5 --- /dev/null +++ b/examples/nas/benchmarks/.gitignore @@ -0,0 +1,5 @@ +nasbench_full.tfrecord +a.pth +data.zip +nds_data +nlp_data \ No newline at end of file diff --git a/examples/nas/benchmarks/nasbench101.requirements.txt b/examples/nas/benchmarks/nasbench101.requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..08fa109991bc11ed24862747965d789b9e3730d9 --- /dev/null +++ b/examples/nas/benchmarks/nasbench101.requirements.txt @@ -0,0 +1,5 @@ +# nasbench claims it supports tensorflow>=1.12.0 and we have tested on 1.15.2 +tensorflow +tqdm +peewee +git+https://github.com/google-research/nasbench diff --git a/examples/nas/benchmarks/nasbench101.sh b/examples/nas/benchmarks/nasbench101.sh new file mode 100755 index 0000000000000000000000000000000000000000..606a77f1bcbbeda205777652cc81ed3fe43ba4c8 --- /dev/null +++ b/examples/nas/benchmarks/nasbench101.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +if [ -z "${NASBENCHMARK_DIR}" ]; then + NASBENCHMARK_DIR=~/.nni/nasbenchmark +fi + +echo "Downloading NAS-Bench-101..." +if [ -f "nasbench_full.tfrecord" ]; then + echo "nasbench_full.tfrecord found. Skip download." +else + wget https://storage.googleapis.com/nasbench/nasbench_full.tfrecord +fi + +echo "Generating database..." +rm -f ${NASBENCHMARK_DIR}/nasbench101.db ${NASBENCHMARK_DIR}/nasbench101.db-journal +mkdir -p ${NASBENCHMARK_DIR} +python3 -m nni.nas.benchmarks.nasbench101.db_gen nasbench_full.tfrecord +rm -f nasbench_full.tfrecord diff --git a/examples/nas/benchmarks/nasbench201.requirements.txt b/examples/nas/benchmarks/nasbench201.requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d70bf6589292b7efb5ee7af1591f6ce682bf787f --- /dev/null +++ b/examples/nas/benchmarks/nasbench201.requirements.txt @@ -0,0 +1,4 @@ +torch +gdown +tqdm +peewee diff --git a/examples/nas/benchmarks/nasbench201.sh b/examples/nas/benchmarks/nasbench201.sh new file mode 100755 index 0000000000000000000000000000000000000000..1c4a3df79fe2b173d273eb999b0c86a36a1d9a3d --- /dev/null +++ b/examples/nas/benchmarks/nasbench201.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +if [ -z "${NASBENCHMARK_DIR}" ]; then + NASBENCHMARK_DIR=~/.nni/nasbenchmark +fi + +echo "Downloading NAS-Bench-201..." +if [ -f "a.pth" ]; then + echo "a.pth found. Skip download." +else + gdown https://drive.google.com/uc\?id\=1OOfVPpt-lA4u2HJrXbgrRd42IbfvJMyE -O a.pth +fi + +echo "Generating database..." +rm -f ${NASBENCHMARK_DIR}/nasbench201.db ${NASBENCHMARK_DIR}/nasbench201.db-journal +mkdir -p ${NASBENCHMARK_DIR} +python3 -m nni.nas.benchmarks.nasbench201.db_gen a.pth +rm -f a.pth diff --git a/examples/nas/benchmarks/nds.requirements.txt b/examples/nas/benchmarks/nds.requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5ca83aee88bd379521f97091a0b80b8b3a810e80 --- /dev/null +++ b/examples/nas/benchmarks/nds.requirements.txt @@ -0,0 +1,2 @@ +tqdm +peewee diff --git a/examples/nas/benchmarks/nds.sh b/examples/nas/benchmarks/nds.sh new file mode 100755 index 0000000000000000000000000000000000000000..781ab1007d8ed1a98cbcf8cb45caf5657fed9bbb --- /dev/null +++ b/examples/nas/benchmarks/nds.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +if [ -z "${NASBENCHMARK_DIR}" ]; then + NASBENCHMARK_DIR=~/.nni/nasbenchmark +fi + +echo "Downloading NDS..." +if [ -f "data.zip" ]; then + echo "data.zip found. Skip download." +else + wget https://dl.fbaipublicfiles.com/nds/data.zip -O data.zip +fi +unzip data.zip + +echo "Generating database..." +rm -f ${NASBENCHMARK_DIR}/nds.db ${NASBENCHMARK_DIR}/nds.db-journal +mkdir -p ${NASBENCHMARK_DIR} +python3 -m nni.nas.benchmarks.nds.db_gen nds_data +rm -rf data.zip nds_data diff --git a/examples/nas/benchmarks/nlp.requirements.txt b/examples/nas/benchmarks/nlp.requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..52b81ee53c294acf647a15dd54c63e0599e039d2 --- /dev/null +++ b/examples/nas/benchmarks/nlp.requirements.txt @@ -0,0 +1 @@ +peewee diff --git a/examples/nas/benchmarks/nlp.sh b/examples/nas/benchmarks/nlp.sh new file mode 100644 index 0000000000000000000000000000000000000000..49a6b3140db755b9d1b8ebcbb25a885c810e0f5e --- /dev/null +++ b/examples/nas/benchmarks/nlp.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -e + +if [ -z "${NASBENCHMARK_DIR}" ]; then + NASBENCHMARK_DIR=~/.nni/nasbenchmark +fi + + +mkdir -p nlp_data +cd nlp_data +echo "Downloading NLP[1/3] wikitext2_data.zip..." +if [ -f "wikitext2_data.zip" ]; then + echo "wikitext2_data.zip found. Skip download." +else + wget -O wikitext2_data.zip https://github.com/fmsnew/nas-bench-nlp-release/blob/master/train_logs_wikitext-2/logs.zip?raw=true +fi +echo "Downloading NLP[2/3] ptb_single_run_data.zip..." +if [ -f "ptb_single_run_data.zip" ]; then + echo "ptb_single_run_data.zip found. Skip download." +else + wget -O ptb_single_run_data.zip https://github.com/fmsnew/nas-bench-nlp-release/blob/master/train_logs_single_run/logs.zip?raw=true +fi +echo "Downloading NLP[3/3] ptb_multi_runs_data.zip..." +if [ -f "ptb_multi_runs_data.zip" ]; then + echo "ptb_multi_runs_data.zip found. Skip download." +else + wget -O ptb_multi_runs_data.zip https://github.com/fmsnew/nas-bench-nlp-release/blob/master/train_logs_multi_runs/logs.zip?raw=true +fi +echo "### there exits duplicate log_files in ptb_single_run_data.zip and ptb_multi_run_data.zip, you can ignore all or replace all ###" +unzip -q wikitext2_data.zip +unzip -q ptb_single_run_data.zip +unzip -q ptb_multi_runs_data.zip +cd .. + +echo "Generating database..." +rm -f ${NASBENCHMARK_DIR}/nlp.db ${NASBENCHMARK_DIR}/nlp.db-journal +mkdir -p ${NASBENCHMARK_DIR} +python3 -m nni.nas.benchmarks.nlp.db_gen nlp_data +rm -rf nlp_data diff --git a/examples/nas/legacy/cdarts/aux_head.py b/examples/nas/legacy/cdarts/aux_head.py new file mode 100644 index 0000000000000000000000000000000000000000..9a67d09fecaf62ed3a80fdcccf1387421dce554b --- /dev/null +++ b/examples/nas/legacy/cdarts/aux_head.py @@ -0,0 +1,102 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch.nn as nn + + +class DistillHeadCIFAR(nn.Module): + + def __init__(self, C, size, num_classes, bn_affine=False): + """assuming input size 8x8 or 16x16""" + super(DistillHeadCIFAR, self).__init__() + self.features = nn.Sequential( + nn.ReLU(), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), # image size = 2 x 2 / 6 x 6 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128, affine=bn_affine), + nn.ReLU(), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768, affine=bn_affine), + nn.ReLU() + ) + self.classifier = nn.Linear(768, num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + x = self.features(x) + x = self.gap(x) + x = self.classifier(x.view(x.size(0), -1)) + return x + + +class DistillHeadImagenet(nn.Module): + + def __init__(self, C, size, num_classes, bn_affine=False): + """assuming input size 7x7 or 14x14""" + super(DistillHeadImagenet, self).__init__() + self.features = nn.Sequential( + nn.ReLU(), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), # image size = 2 x 2 / 6 x 6 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128, affine=bn_affine), + nn.ReLU(), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768, affine=bn_affine), + nn.ReLU() + ) + self.classifier = nn.Linear(768, num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + x = self.features(x) + x = self.gap(x) + x = self.classifier(x.view(x.size(0), -1)) + return x + + +class AuxiliaryHeadCIFAR(nn.Module): + + def __init__(self, C, size=5, num_classes=10): + """assuming input size 8x8""" + super(AuxiliaryHeadCIFAR, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), -1)) + return x + + +class AuxiliaryHeadImageNet(nn.Module): + + def __init__(self, C, size=5, num_classes=1000): + """assuming input size 7x7""" + super(AuxiliaryHeadImageNet, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + # NOTE: This batchnorm was omitted in my earlier implementation due to a typo. + # Commenting it out for consistency with the experiments in the paper. + # nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), -1)) + return x diff --git a/examples/nas/legacy/cdarts/config.py b/examples/nas/legacy/cdarts/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f0200f39cd16542ad64c29673ca909d5a42a2ab5 --- /dev/null +++ b/examples/nas/legacy/cdarts/config.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +from functools import partial + + +def get_parser(name): + """ make default formatted parser """ + parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + # print default value always + parser.add_argument = partial(parser.add_argument, help=' ') + return parser + + +class BaseConfig(argparse.Namespace): + def print_params(self, prtf=print): + prtf("") + prtf("Parameters:") + for attr, value in sorted(vars(self).items()): + prtf("{}={}".format(attr.upper(), value)) + prtf("") + + def as_markdown(self): + """ Return configs as markdown format """ + text = "|name|value| \n|-|-| \n" + for attr, value in sorted(vars(self).items()): + text += "|{}|{}| \n".format(attr, value) + + return text + + +class SearchConfig(BaseConfig): + def build_parser(self): + parser = get_parser("Search config") + ########### basic settings ############ + parser.add_argument('--dataset', default='cifar10', choices=['cifar10', 'cifar100', 'imagenet']) + parser.add_argument('--n_classes', type=int, default=10) + parser.add_argument('--stem_multiplier', type=int, default=3) + parser.add_argument('--init_channels', type=int, default=16) + parser.add_argument('--data_dir', type=str, default='data/cifar', help='cifar dataset') + parser.add_argument('--output_path', type=str, default='./outputs', help='') + parser.add_argument('--batch_size', type=int, default=128, help='batch size') + parser.add_argument('--log_frequency', type=int, default=10, help='print frequency') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument('--workers', type=int, default=4, help='# of workers') + parser.add_argument('--steps_per_epoch', type=int, default=None, help='how many steps per epoch, use None for one pass of dataset') + + ########### learning rate ############ + parser.add_argument('--w_lr', type=float, default=0.05, help='lr for weights') + parser.add_argument('--w_momentum', type=float, default=0.9, help='momentum for weights') + parser.add_argument('--w_weight_decay', type=float, default=3e-4, help='weight decay for weights') + parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping for weights') + parser.add_argument('--alpha_lr', type=float, default=6e-4, help='lr for alpha') + parser.add_argument('--alpha_weight_decay', type=float, default=1e-3, help='weight decay for alpha') + parser.add_argument('--nasnet_lr', type=float, default=0.1, help='lr of nasnet') + + ########### alternate training ############ + parser.add_argument('--epochs', type=int, default=32, help='# of search epochs') + parser.add_argument('--warmup_epochs', type=int, default=2, help='# warmup epochs of super model') + parser.add_argument('--loss_alpha', type=float, default=1, help='loss alpha') + parser.add_argument('--loss_T', type=float, default=2, help='loss temperature') + parser.add_argument('--interactive_type', type=str, default='kl', choices=['kl', 'smoothl1']) + parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to sync bn') + parser.add_argument('--use_apex', action='store_true', default=False, help='whether to use apex') + parser.add_argument('--regular_ratio', type=float, default=0.5, help='regular ratio') + parser.add_argument('--regular_coeff', type=float, default=5, help='regular coefficient') + parser.add_argument('--fix_head', action='store_true', default=False, help='whether to fix head') + parser.add_argument('--share_module', action='store_true', default=False, help='whether to share stem and aux head') + + ########### data augument ############ + parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight') + parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') + parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path prob') + parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa') + parser.add_argument('--mixup_alpha', default=1., type=float, help='mixup interpolation coefficient (default: 1)') + + ########### distributed ############ + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training') + parser.add_argument('--distributed', action='store_true', help='run model distributed mode') + + return parser + + def __init__(self): + parser = self.build_parser() + args = parser.parse_args() + super().__init__(**vars(args)) + + +class RetrainConfig(BaseConfig): + def build_parser(self): + parser = get_parser("Retrain config") + parser.add_argument('--dataset', default="cifar10", choices=['cifar10', 'cifar100', 'imagenet']) + parser.add_argument('--data_dir', type=str, default='data/cifar', help='cifar dataset') + parser.add_argument('--output_path', type=str, default='./outputs', help='') + parser.add_argument("--arc_checkpoint", default="epoch_02.json") + parser.add_argument('--log_frequency', type=int, default=10, help='print frequency') + + ########### model settings ############ + parser.add_argument('--n_classes', type=int, default=10) + parser.add_argument('--input_channels', type=int, default=3) + parser.add_argument('--stem_multiplier', type=int, default=3) + parser.add_argument('--batch_size', type=int, default=128, help='batch size') + parser.add_argument('--eval_batch_size', type=int, default=500, help='batch size for validation') + parser.add_argument('--lr', type=float, default=0.025, help='lr for weights') + parser.add_argument('--momentum', type=float, default=0.9, help='momentum') + parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping for weights') + parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay') + parser.add_argument('--epochs', type=int, default=600, help='# of training epochs') + parser.add_argument('--warmup_epochs', type=int, default=5, help='# warmup') + parser.add_argument('--init_channels', type=int, default=36) + parser.add_argument('--layers', type=int, default=20, help='# of layers') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument('--workers', type=int, default=4, help='# of workers') + parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight') + parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') + parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing') + parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path prob') + + ########### data augmentation ############ + parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa') + parser.add_argument('--mixup_alpha', default=1., type=float, help='mixup interpolation coefficient') + + ########### distributed ############ + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training') + parser.add_argument('--distributed', action='store_true', help='run model distributed mode') + + return parser + + def __init__(self): + parser = self.build_parser() + args = parser.parse_args() + super().__init__(**vars(args)) diff --git a/examples/nas/legacy/cdarts/datasets/cifar.py b/examples/nas/legacy/cdarts/datasets/cifar.py new file mode 100644 index 0000000000000000000000000000000000000000..493335f151587363f65db63bc52e4a1c5830347f --- /dev/null +++ b/examples/nas/legacy/cdarts/datasets/cifar.py @@ -0,0 +1,111 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np +import torch +import torchvision.datasets as dset +import torchvision.transforms as transforms + +from datasets.data_utils import CIFAR10Policy, Cutout +from datasets.data_utils import SubsetDistributedSampler + + +def data_transforms_cifar(config, cutout=False): + CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] + CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] + + if config.use_aa: + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4, fill=128), + transforms.RandomHorizontalFlip(), CIFAR10Policy(), + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + else: + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + + if cutout: + train_transform.transforms.append(Cutout(config.cutout_length)) + + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + return train_transform, valid_transform + + +def get_search_datasets(config): + dataset = config.dataset.lower() + if dataset == 'cifar10': + dset_cls = dset.CIFAR10 + n_classes = 10 + elif dataset == 'cifar100': + dset_cls = dset.CIFAR100 + n_classes = 100 + else: + raise Exception("Not support dataset!") + + train_transform, valid_transform = data_transforms_cifar(config, cutout=False) + train_data = dset_cls(root=config.data_dir, train=True, download=True, transform=train_transform) + test_data = dset_cls(root=config.data_dir, train=False, download=True, transform=valid_transform) + + num_train = len(train_data) + indices = list(range(num_train)) + split_mid = int(np.floor(0.5 * num_train)) + + if config.distributed: + train_sampler = SubsetDistributedSampler(train_data, indices[:split_mid]) + valid_sampler = SubsetDistributedSampler(train_data, indices[split_mid:num_train]) + else: + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split_mid]) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split_mid:num_train]) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=False, num_workers=config.workers) + + valid_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=valid_sampler, + pin_memory=False, num_workers=config.workers) + + return [train_loader, valid_loader], [train_sampler, valid_sampler] + + +def get_augment_datasets(config): + dataset = config.dataset.lower() + if dataset == 'cifar10': + dset_cls = dset.CIFAR10 + elif dataset == 'cifar100': + dset_cls = dset.CIFAR100 + else: + raise Exception("Not support dataset!") + + train_transform, valid_transform = data_transforms_cifar(config, cutout=True) + train_data = dset_cls(root=config.data_dir, train=True, download=True, transform=train_transform) + test_data = dset_cls(root=config.data_dir, train=False, download=True, transform=valid_transform) + + if config.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) + else: + train_sampler = None + test_sampler = None + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + test_loader = torch.utils.data.DataLoader( + test_data, batch_size=config.eval_batch_size, + sampler=test_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, test_loader], [train_sampler, test_sampler] diff --git a/examples/nas/legacy/cdarts/datasets/data_utils.py b/examples/nas/legacy/cdarts/datasets/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..096b5a1fa7d79377a0a9e696a47070b81229031f --- /dev/null +++ b/examples/nas/legacy/cdarts/datasets/data_utils.py @@ -0,0 +1,400 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import math +import random + +import numpy as np +import torch +import torch.distributed as dist +from PIL import Image, ImageEnhance, ImageOps +from torch.utils.data import Sampler + + +class SubsetDistributedSampler(Sampler): + """ + Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + Dataset is assumed to be of constant size. + """ + + def __init__(self, dataset, indices, num_replicas=None, rank=None, shuffle=True): + """ + Initialization. + + Parameters + ---------- + dataset : torch.utils.data.Dataset + Dataset used for sampling. + num_replicas : int + Number of processes participating in distributed training. Default: World size. + rank : int + Rank of the current process within num_replicas. Default: Current rank. + shuffle : bool + If true (default), sampler will shuffle the indices. + """ + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.indices = indices + self.num_samples = int(math.ceil(len(self.indices) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + # indices = torch.randperm(len(self.dataset), generator=g).tolist() + indices = list(self.indices[i] for i in torch.randperm(len(self.indices))) + else: + # indices = list(range(len(self.dataset))) + indices = self.indices + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class data_prefetcher(): + def __init__(self, loader): + self.loader = iter(loader) + self.stream = torch.cuda.Stream() + self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1, 3, 1, 1) + self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1, 3, 1, 1) + self.preload() + + def preload(self): + try: + self.next_input, self.next_target = next(self.loader) + except StopIteration: + self.next_input = None + self.next_target = None + return + with torch.cuda.stream(self.stream): + self.next_input = self.next_input.cuda(non_blocking=True) + self.next_target = self.next_target.cuda(non_blocking=True) + self.next_input = self.next_input.float() + self.next_input = self.next_input.sub_(self.mean).div_(self.std) + + def next(self): + torch.cuda.current_stream().wait_stream(self.stream) + input = self.next_input + target = self.next_target + self.preload() + return input, target + + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + + return img + + +class ImageNetPolicy(object): + """ Randomly choose one of the best 24 Sub-policies on ImageNet. + Example: + >>> policy = ImageNetPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> ImageNetPolicy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + + SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor), + SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor), + SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor), + SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor), + + SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor), + SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor), + SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + + SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor), + SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor), + SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor), + SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor), + SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor), + + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor) + ] + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment ImageNet Policy" + + +class CIFAR10Policy(object): + """ Randomly choose one of the best 25 Sub-policies on CIFAR10. + Example: + >>> policy = CIFAR10Policy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> CIFAR10Policy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor), + SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor), + SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor), + SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor), + + SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor), + SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor), + SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor), + SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor), + + SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor), + SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor), + SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor), + SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor), + SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor), + + SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor), + SubPolicy(0.2, "equalize", 8, 0.6, "equalize", 4, fillcolor), + SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor), + SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor), + SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor), + + SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor), + SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor) + ] + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment CIFAR10 Policy" + + +class SVHNPolicy(object): + """ Randomly choose one of the best 25 Sub-policies on SVHN. + Example: + >>> policy = SVHNPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> SVHNPolicy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor), + SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor), + SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor), + SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor), + SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor), + SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor), + + SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor), + SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor), + SubPolicy(0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor), + SubPolicy(0.1, "shearX", 6, 0.6, "invert", 5, fillcolor), + + SubPolicy(0.7, "solarize", 2, 0.6, "translateY", 7, fillcolor), + SubPolicy(0.8, "shearY", 4, 0.8, "invert", 8, fillcolor), + SubPolicy(0.7, "shearX", 9, 0.8, "translateY", 3, fillcolor), + SubPolicy(0.8, "shearY", 5, 0.7, "autocontrast", 3, fillcolor), + SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor) + ] + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment SVHN Policy" + + +class SubPolicy(object): + def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): + ranges = { + "shearX": np.linspace(0, 0.3, 10), + "shearY": np.linspace(0, 0.3, 10), + "translateX": np.linspace(0, 150 / 331, 10), + "translateY": np.linspace(0, 150 / 331, 10), + "rotate": np.linspace(0, 30, 10), + "color": np.linspace(0.0, 0.9, 10), + "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), + "solarize": np.linspace(256, 0, 10), + "contrast": np.linspace(0.0, 0.9, 10), + "sharpness": np.linspace(0.0, 0.9, 10), + "brightness": np.linspace(0.0, 0.9, 10), + "autocontrast": [0] * 10, + "equalize": [0] * 10, + "invert": [0] * 10 + } + + # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode) + + func = { + "shearX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), + "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + self.p1 = p1 + self.operation1 = func[operation1] + self.magnitude1 = ranges[operation1][magnitude_idx1] + self.p2 = p2 + self.operation2 = func[operation2] + self.magnitude2 = ranges[operation2][magnitude_idx2] + + def __call__(self, img): + if random.random() < self.p1: + img = self.operation1(img, self.magnitude1) + if random.random() < self.p2: + img = self.operation2(img, self.magnitude2) + return img + + +def fast_collate(batch): + imgs = [img[0] for img in batch] + targets = torch.tensor([target[1] for target in batch], dtype=torch.int64) + w = imgs[0].size[0] + h = imgs[0].size[1] + tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8) + for i, img in enumerate(imgs): + nump_array = np.asarray(img, dtype=np.uint8) + if (nump_array.ndim < 3): + nump_array = np.expand_dims(nump_array, axis=-1) + nump_array = np.rollaxis(nump_array, 2) + + tensor[i] += torch.from_numpy(nump_array) + + return tensor, targets + + +def mixup_data(x, y, alpha=1.0, use_cuda=True): + '''Returns mixed inputs, pairs of targets, and lambda''' + if alpha > 0: + lam = np.random.beta(alpha, alpha) + else: + lam = 1 + + batch_size = x.size()[0] + if use_cuda: + index = torch.randperm(batch_size).cuda() + else: + index = torch.randperm(batch_size) + + mixed_x = lam * x + (1 - lam) * x[index, :] + y_a, y_b = y, y[index] + return mixed_x, y_a, y_b, lam + + +def mixup_criterion(criterion, pred, y_a, y_b, lam): + return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) diff --git a/examples/nas/legacy/cdarts/datasets/imagenet.py b/examples/nas/legacy/cdarts/datasets/imagenet.py new file mode 100644 index 0000000000000000000000000000000000000000..3bba3d552e81c6be4d83bcd7cbd3c10a19af9431 --- /dev/null +++ b/examples/nas/legacy/cdarts/datasets/imagenet.py @@ -0,0 +1,100 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os + +import numpy as np +import torch +import torchvision.datasets as dset +import torchvision.transforms as transforms + +from datasets.data_utils import ImageNetPolicy +from datasets.data_utils import SubsetDistributedSampler + + +def _imagenet_dataset(config): + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + train_dir = os.path.join(config.data_dir, "train") + test_dir = os.path.join(config.data_dir, "val") + if hasattr(config, "use_aa") and config.use_aa: + train_data = dset.ImageFolder( + train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + ImageNetPolicy(), + transforms.ToTensor(), + normalize, + ])) + else: + train_data = dset.ImageFolder( + train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ColorJitter( + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.2), + transforms.ToTensor(), + normalize, + ])) + + test_data = dset.ImageFolder( + test_dir, + transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + return train_data, test_data + + +def get_search_datasets(config): + train_data, test_data = _imagenet_dataset(config) + num_train = len(train_data) + indices = list(range(num_train)) + split_mid = int(np.floor(0.5 * num_train)) + + if config.distributed: + train_sampler = SubsetDistributedSampler(train_data, indices[:split_mid]) + valid_sampler = SubsetDistributedSampler(train_data, indices[split_mid:num_train]) + else: + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split_mid]) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split_mid:num_train]) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + valid_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=valid_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, valid_loader], [train_sampler, valid_sampler] + + +def get_augment_datasets(config): + train_data, test_data = _imagenet_dataset(config) + if config.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) + else: + train_sampler = test_sampler = None + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + test_loader = torch.utils.data.DataLoader( + test_data, batch_size=config.batch_size, + sampler=test_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, test_loader], [train_sampler, test_sampler] diff --git a/examples/nas/legacy/cdarts/genotypes.py b/examples/nas/legacy/cdarts/genotypes.py new file mode 100644 index 0000000000000000000000000000000000000000..0cc4d3fa6323b553bcd6809d04a93492ea559f93 --- /dev/null +++ b/examples/nas/legacy/cdarts/genotypes.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +- Genotype: normal/reduce gene + normal/reduce cell output connection (concat) +- gene: discrete ops information (w/o output connection) +- dag: real ops (can be mixed or discrete, but Genotype has only discrete information itself) +""" +from collections import namedtuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import ops +from ops import PRIMITIVES + +Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') + + +def to_dag(C_in, gene, reduction, bn_affine=True): + """ generate discrete ops from gene """ + dag = nn.ModuleList() + for edges in gene: + row = nn.ModuleList() + for op_name, s_idx in edges: + # reduction cell & from input nodes => stride = 2 + stride = 2 if reduction and s_idx < 2 else 1 + op = ops.OPS[op_name](C_in, stride, bn_affine) + if not isinstance(op, ops.Identity): # Identity does not use drop path + op = nn.Sequential( + op, + ops.DropPath_() + ) + op.s_idx = s_idx + row.append(op) + dag.append(row) + + return dag + + +def from_str(s): + """ generate genotype from string + e.g. "Genotype( + normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], + [('sep_conv_3x3', 1), ('dil_conv_3x3', 2)], + [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)], + [('sep_conv_3x3', 1), ('dil_conv_3x3', 4)]], + normal_concat=range(2, 6), + reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], + [('max_pool_3x3', 0), ('skip_connect', 2)], + [('max_pool_3x3', 0), ('skip_connect', 2)], + [('max_pool_3x3', 0), ('skip_connect', 2)]], + reduce_concat=range(2, 6))" + """ + + genotype = eval(s) + + return genotype + + +def parse(alpha, beta, k): + """ + parse continuous alpha to discrete gene. + alpha is ParameterList: + ParameterList [ + Parameter(n_edges1, n_ops), + Parameter(n_edges2, n_ops), + ... + ] + + beta is ParameterList: + ParameterList [ + Parameter(n_edges1), + Parameter(n_edges2), + ... + ] + + gene is list: + [ + [('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)], + [('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)], + ... + ] + each node has two edges (k=2) in CNN. + """ + + gene = [] + assert PRIMITIVES[-1] == 'none' # 'none' is implemented in mutator now + + # 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge + # 2) Choose top-k edges per node by edge score (top-1 weight in edge) + # output the connect idx[(node_idx, connect_idx, op_idx).... () ()] + connect_idx = [] + for edges, w in zip(alpha, beta): + # edges: Tensor(n_edges, n_ops) + edge_max, primitive_indices = torch.topk((w.view(-1, 1) * edges)[:, :-1], 1) # ignore 'none' + topk_edge_values, topk_edge_indices = torch.topk(edge_max.view(-1), k) + node_gene = [] + node_idx = [] + for edge_idx in topk_edge_indices: + prim_idx = primitive_indices[edge_idx] + prim = PRIMITIVES[prim_idx] + node_gene.append((prim, edge_idx.item())) + node_idx.append((edge_idx.item(), prim_idx.item())) + + gene.append(node_gene) + connect_idx.append(node_idx) + + return gene, connect_idx + + +def parse_gumbel(alpha, beta, k): + """ + parse continuous alpha to discrete gene. + alpha is ParameterList: + ParameterList [ + Parameter(n_edges1, n_ops), + Parameter(n_edges2, n_ops), + ... + ] + + beta is ParameterList: + ParameterList [ + Parameter(n_edges1), + Parameter(n_edges2), + ... + ] + + gene is list: + [ + [('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)], + [('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)], + ... + ] + each node has two edges (k=2) in CNN. + """ + + gene = [] + assert PRIMITIVES[-1] == 'none' # assume last PRIMITIVE is 'none' + + # 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge + # 2) Choose top-k edges per node by edge score (top-1 weight in edge) + # output the connect idx[(node_idx, connect_idx, op_idx).... () ()] + connect_idx = [] + for edges, w in zip(alpha, beta): + # edges: Tensor(n_edges, n_ops) + discrete_a = F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True) + for i in range(k-1): + discrete_a = discrete_a + F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True) + discrete_a = discrete_a.reshape(-1, len(PRIMITIVES)-1) + reserved_edge = (discrete_a > 0).nonzero() + + node_gene = [] + node_idx = [] + for i in range(reserved_edge.shape[0]): + edge_idx = reserved_edge[i][0].item() + prim_idx = reserved_edge[i][1].item() + prim = PRIMITIVES[prim_idx] + node_gene.append((prim, edge_idx)) + node_idx.append((edge_idx, prim_idx)) + + gene.append(node_gene) + connect_idx.append(node_idx) + + return gene, connect_idx diff --git a/examples/nas/legacy/cdarts/model.py b/examples/nas/legacy/cdarts/model.py new file mode 100644 index 0000000000000000000000000000000000000000..0514004a5e47e1af5f90c58b9a0ccc9e1091a5ae --- /dev/null +++ b/examples/nas/legacy/cdarts/model.py @@ -0,0 +1,162 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import ops +import numpy as np +from nni.nas.pytorch import mutables +from utils import parse_results +from aux_head import DistillHeadCIFAR, DistillHeadImagenet, AuxiliaryHeadCIFAR, AuxiliaryHeadImageNet + + +class Node(nn.Module): + def __init__(self, node_id, num_prev_nodes, channels, num_downsample_connect): + super().__init__() + self.ops = nn.ModuleList() + choice_keys = [] + for i in range(num_prev_nodes): + stride = 2 if i < num_downsample_connect else 1 + choice_keys.append("{}_p{}".format(node_id, i)) + self.ops.append(mutables.LayerChoice([ops.OPS[k](channels, stride, False) for k in ops.PRIMITIVES], + key=choice_keys[-1])) + self.drop_path = ops.DropPath() + self.input_switch = mutables.InputChoice(choose_from=choice_keys, n_chosen=2, key="{}_switch".format(node_id)) + + def forward(self, prev_nodes): + assert len(self.ops) == len(prev_nodes) + out = [op(node) for op, node in zip(self.ops, prev_nodes)] + out = [self.drop_path(o) if o is not None else None for o in out] + return self.input_switch(out) + + +class Cell(nn.Module): + + def __init__(self, n_nodes, channels_pp, channels_p, channels, reduction_p, reduction): + super().__init__() + self.reduction = reduction + self.n_nodes = n_nodes + + # If previous cell is reduction cell, current input size does not match with + # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. + if reduction_p: + self.preproc0 = ops.FactorizedReduce(channels_pp, channels, affine=False) + else: + self.preproc0 = ops.StdConv(channels_pp, channels, 1, 1, 0, affine=False) + self.preproc1 = ops.StdConv(channels_p, channels, 1, 1, 0, affine=False) + + # generate dag + self.mutable_ops = nn.ModuleList() + for depth in range(2, self.n_nodes + 2): + self.mutable_ops.append(Node("{}_n{}".format("reduce" if reduction else "normal", depth), + depth, channels, 2 if reduction else 0)) + + def forward(self, s0, s1): + # s0, s1 are the outputs of previous previous cell and previous cell, respectively. + tensors = [self.preproc0(s0), self.preproc1(s1)] + for node in self.mutable_ops: + cur_tensor = node(tensors) + tensors.append(cur_tensor) + + output = torch.cat(tensors[2:], dim=1) + return output + + +class Model(nn.Module): + + def __init__(self, dataset, n_layers, in_channels=3, channels=16, n_nodes=4, retrain=False, shared_modules=None): + super().__init__() + assert dataset in ["cifar10", "imagenet"] + self.dataset = dataset + self.input_size = 32 if dataset == "cifar" else 224 + self.in_channels = in_channels + self.channels = channels + self.n_nodes = n_nodes + self.aux_size = {2 * n_layers // 3: self.input_size // 4} + if dataset == "cifar10": + self.n_classes = 10 + self.aux_head_class = AuxiliaryHeadCIFAR if retrain else DistillHeadCIFAR + if not retrain: + self.aux_size = {n_layers // 3: 6, 2 * n_layers // 3: 6} + elif dataset == "imagenet": + self.n_classes = 1000 + self.aux_head_class = AuxiliaryHeadImageNet if retrain else DistillHeadImagenet + if not retrain: + self.aux_size = {n_layers // 3: 6, 2 * n_layers // 3: 5} + self.n_layers = n_layers + self.aux_head = nn.ModuleDict() + self.ensemble_param = nn.Parameter(torch.rand(len(self.aux_size) + 1) / (len(self.aux_size) + 1)) \ + if not retrain else None + + stem_multiplier = 3 if dataset == "cifar" else 1 + c_cur = stem_multiplier * self.channels + self.shared_modules = {} # do not wrap with ModuleDict + if shared_modules is not None: + self.stem = shared_modules["stem"] + else: + self.stem = nn.Sequential( + nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(c_cur) + ) + self.shared_modules["stem"] = self.stem + + # for the first cell, stem is used for both s0 and s1 + # [!] channels_pp and channels_p is output channel size, but c_cur is input channel size. + channels_pp, channels_p, c_cur = c_cur, c_cur, channels + + self.cells = nn.ModuleList() + reduction_p, reduction = False, False + aux_head_count = 0 + for i in range(n_layers): + reduction_p, reduction = reduction, False + if i in [n_layers // 3, 2 * n_layers // 3]: + c_cur *= 2 + reduction = True + + cell = Cell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction) + self.cells.append(cell) + c_cur_out = c_cur * n_nodes + if i in self.aux_size: + if shared_modules is not None: + self.aux_head[str(i)] = shared_modules["aux" + str(aux_head_count)] + else: + self.aux_head[str(i)] = self.aux_head_class(c_cur_out, self.aux_size[i], self.n_classes) + self.shared_modules["aux" + str(aux_head_count)] = self.aux_head[str(i)] + aux_head_count += 1 + channels_pp, channels_p = channels_p, c_cur_out + + self.gap = nn.AdaptiveAvgPool2d(1) + self.linear = nn.Linear(channels_p, self.n_classes) + + def forward(self, x): + s0 = s1 = self.stem(x) + outputs = [] + + for i, cell in enumerate(self.cells): + s0, s1 = s1, cell(s0, s1) + if str(i) in self.aux_head: + outputs.append(self.aux_head[str(i)](s1)) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + outputs.append(logits) + + if self.ensemble_param is None: + assert len(outputs) == 2 + return outputs[1], outputs[0] + else: + em_output = torch.cat([(e * o) for e, o in zip(F.softmax(self.ensemble_param, dim=0), outputs)], 0) + return logits, em_output + + def drop_path_prob(self, p): + for module in self.modules(): + if isinstance(module, ops.DropPath): + module.p = p + + def plot_genotype(self, results, logger): + genotypes = parse_results(results, self.n_nodes) + logger.info(genotypes) + return genotypes diff --git a/examples/nas/legacy/cdarts/ops.py b/examples/nas/legacy/cdarts/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..285dc2998bd9ac4c90cfa5614e89e6c0ea6bfa92 --- /dev/null +++ b/examples/nas/legacy/cdarts/ops.py @@ -0,0 +1,161 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + +OPS = { + 'avg_pool_3x3': lambda C, stride, affine: PoolWithoutBN('avg', C, 3, stride, 1, affine=affine), + 'max_pool_3x3': lambda C, stride, affine: PoolWithoutBN('max', C, 3, stride, 1, affine=affine), + 'skip_connect': lambda C, stride, affine: nn.Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine), + 'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine), + 'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine), + 'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine), + 'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine), # 5x5 + 'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine), # 9x9 + 'conv_7x1_1x7': lambda C, stride, affine: FacConv(C, C, 7, stride, 3, affine=affine) +} + +PRIMITIVES = [ + 'max_pool_3x3', + 'avg_pool_3x3', + 'skip_connect', # identity + 'sep_conv_3x3', + 'sep_conv_5x5', + 'dil_conv_3x3', + 'dil_conv_5x5', +] + + +class DropPath(nn.Module): + def __init__(self, p=0.): + """ + Drop path with probability. + + Parameters + ---------- + p : float + Probability of an path to be zeroed. + """ + super().__init__() + self.p = p + + def forward(self, x): + if self.training and self.p > 0.: + keep_prob = 1. - self.p + # per data point mask + mask = torch.zeros((x.size(0), 1, 1, 1), device=x.device).bernoulli_(keep_prob) + return x / keep_prob * mask + + return x + + +class PoolWithoutBN(nn.Module): + """ + AvgPool or MaxPool with BN. `pool_type` must be `max` or `avg`. + """ + + def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise NotImplementedError("Pool doesn't support pooling type other than max and avg.") + + def forward(self, x): + out = self.pool(x) + return out + + +class StdConv(nn.Module): + """ + Standard conv: ReLU - Conv - BN + """ + + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class FacConv(nn.Module): + """ + Factorized conv: ReLU - Conv(Kx1) - Conv(1xK) - BN + """ + + def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False), + nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class DilConv(nn.Module): + """ + (Dilated) depthwise separable conv. + ReLU - (Dilated) depthwise separable - Pointwise - BN. + If dilation == 2, 3x3 conv => 5x5 receptive field, 5x5 conv => 9x9 receptive field. + """ + + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in, + bias=False), + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class SepConv(nn.Module): + """ + Depthwise separable conv. + DilConv(dilation=1) * 2. + """ + + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine), + DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class FactorizedReduce(nn.Module): + """ + Reduce feature map size by factorized pointwise (stride=2). + """ + + def __init__(self, C_in, C_out, affine=True): + super().__init__() + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + x = self.relu(x) + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out diff --git a/examples/nas/legacy/cdarts/retrain.py b/examples/nas/legacy/cdarts/retrain.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd320d58c3c9be4af6b90ea645fb9ca6d4419c3 --- /dev/null +++ b/examples/nas/legacy/cdarts/retrain.py @@ -0,0 +1,156 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os +import time +from argparse import ArgumentParser + +import torch +import torch.nn as nn + +import apex # pylint: disable=import-error +import datasets +import utils +from apex.parallel import DistributedDataParallel # pylint: disable=import-error +from config import RetrainConfig +from datasets.cifar import get_augment_datasets +from model import Model +from nni.nas.pytorch.fixed import apply_fixed_architecture +from nni.nas.pytorch.utils import AverageMeterGroup + + +def train(logger, config, train_loader, model, optimizer, criterion, epoch, main_proc): + meters = AverageMeterGroup() + cur_lr = optimizer.param_groups[0]["lr"] + if main_proc: + logger.info("Epoch %d LR %.6f", epoch, cur_lr) + + model.train() + for step, (x, y) in enumerate(train_loader): + x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True) + optimizer.zero_grad() + logits, aux_logits = model(x) + loss = criterion(logits, y) + if config.aux_weight > 0.: + loss += config.aux_weight * criterion(aux_logits, y) + loss.backward() + nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip) + optimizer.step() + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} + metrics = utils.reduce_metrics(metrics, config.distributed) + meters.update(metrics) + + if main_proc and (step % config.log_frequency == 0 or step + 1 == len(train_loader)): + logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, config.epochs, step + 1, len(train_loader), meters) + + if main_proc: + logger.info("Train: [%d/%d] Final Prec@1 %.4f Prec@5 %.4f", epoch + 1, config.epochs, meters.prec1.avg, meters.prec5.avg) + + +def validate(logger, config, valid_loader, model, criterion, epoch, main_proc): + meters = AverageMeterGroup() + model.eval() + + with torch.no_grad(): + for step, (x, y) in enumerate(valid_loader): + x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True) + logits, _ = model(x) + loss = criterion(logits, y) + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} + metrics = utils.reduce_metrics(metrics, config.distributed) + meters.update(metrics) + + if main_proc and (step % config.log_frequency == 0 or step + 1 == len(valid_loader)): + logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, config.epochs, step + 1, len(valid_loader), meters) + + if main_proc: + logger.info("Train: [%d/%d] Final Prec@1 %.4f Prec@5 %.4f", epoch + 1, config.epochs, meters.prec1.avg, meters.prec5.avg) + return meters.prec1.avg, meters.prec5.avg + + +def main(): + config = RetrainConfig() + main_proc = not config.distributed or config.local_rank == 0 + if config.distributed: + torch.cuda.set_device(config.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url, + rank=config.local_rank, world_size=config.world_size) + if main_proc: + os.makedirs(config.output_path, exist_ok=True) + if config.distributed: + torch.distributed.barrier() + logger = utils.get_logger(os.path.join(config.output_path, 'search.log')) + if main_proc: + config.print_params(logger.info) + utils.reset_seed(config.seed) + + loaders, samplers = get_augment_datasets(config) + train_loader, valid_loader = loaders + train_sampler, valid_sampler = samplers + + model = Model(config.dataset, config.layers, in_channels=config.input_channels, channels=config.init_channels, retrain=True).cuda() + if config.label_smooth > 0: + criterion = utils.CrossEntropyLabelSmooth(config.n_classes, config.label_smooth) + else: + criterion = nn.CrossEntropyLoss() + + fixed_arc_path = os.path.join(config.output_path, config.arc_checkpoint) + with open(fixed_arc_path, "r") as f: + fixed_arc = json.load(f) + fixed_arc = utils.encode_tensor(fixed_arc, torch.device("cuda")) + genotypes = utils.parse_results(fixed_arc, n_nodes=4) + genotypes_dict = {i: genotypes for i in range(3)} + apply_fixed_architecture(model, fixed_arc_path) + param_size = utils.param_size(model, criterion, [3, 32, 32] if 'cifar' in config.dataset else [3, 224, 224]) + + if main_proc: + logger.info("Param size: %.6f", param_size) + logger.info("Genotype: %s", genotypes) + + # change training hyper parameters according to cell type + if 'cifar' in config.dataset: + if param_size < 3.0: + config.weight_decay = 3e-4 + config.drop_path_prob = 0.2 + elif 3.0 < param_size < 3.5: + config.weight_decay = 3e-4 + config.drop_path_prob = 0.3 + else: + config.weight_decay = 5e-4 + config.drop_path_prob = 0.3 + + if config.distributed: + apex.parallel.convert_syncbn_model(model) + model = DistributedDataParallel(model, delay_allreduce=True) + + optimizer = torch.optim.SGD(model.parameters(), config.lr, momentum=config.momentum, weight_decay=config.weight_decay) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs, eta_min=1E-6) + + best_top1 = best_top5 = 0. + for epoch in range(config.epochs): + drop_prob = config.drop_path_prob * epoch / config.epochs + if config.distributed: + model.module.drop_path_prob(drop_prob) + else: + model.drop_path_prob(drop_prob) + # training + if config.distributed: + train_sampler.set_epoch(epoch) + train(logger, config, train_loader, model, optimizer, criterion, epoch, main_proc) + + # validation + top1, top5 = validate(logger, config, valid_loader, model, criterion, epoch, main_proc) + best_top1 = max(best_top1, top1) + best_top5 = max(best_top5, top5) + lr_scheduler.step() + + logger.info("Final best Prec@1 = %.4f Prec@5 = %.4f", best_top1, best_top5) + + +if __name__ == "__main__": + main() diff --git a/examples/nas/legacy/cdarts/run_retrain_cifar.sh b/examples/nas/legacy/cdarts/run_retrain_cifar.sh new file mode 100755 index 0000000000000000000000000000000000000000..a63d159ae75cb50f7d66c565c9543aeda3d3dba9 --- /dev/null +++ b/examples/nas/legacy/cdarts/run_retrain_cifar.sh @@ -0,0 +1,13 @@ +NGPUS=4 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +HIP_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS retrain.py \ + --dataset cifar10 --n_classes 10 --init_channels 36 --stem_multiplier 3 \ + --arc_checkpoint 'epoch_31.json' \ + --batch_size 128 --workers 1 --log_frequency 10 \ + --world_size $NGPUS --weight_decay 5e-4 \ + --distributed --dist_url 'tcp://127.0.0.1:26443' \ + --lr 0.1 --warmup_epochs 0 --epochs 600 \ + --cutout_length 16 --aux_weight 0.4 --drop_path_prob 0.3 \ + --label_smooth 0.0 --mixup_alpha 0 diff --git a/examples/nas/legacy/cdarts/run_search_cifar.sh b/examples/nas/legacy/cdarts/run_search_cifar.sh new file mode 100755 index 0000000000000000000000000000000000000000..1ba09de12639a8eb93cdb06d62a8f41b3f2ce550 --- /dev/null +++ b/examples/nas/legacy/cdarts/run_search_cifar.sh @@ -0,0 +1,14 @@ +NGPUS=4 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +HIP_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS search.py \ + --dataset cifar10 --n_classes 10 --init_channels 16 --stem_multiplier 3 \ + --batch_size 64 --workers 1 --log_frequency 10 \ + --distributed --world_size $NGPUS --dist_url 'tcp://127.0.0.1:23343' \ + --regular_ratio 0.2 --regular_coeff 5 \ + --loss_alpha 1 --loss_T 2 \ + --w_lr 0.2 --alpha_lr 3e-4 --nasnet_lr 0.2 \ + --w_weight_decay 0. --alpha_weight_decay 0. \ + --share_module --interactive_type kl \ + --warmup_epochs 2 --epochs 32 diff --git a/examples/nas/legacy/cdarts/search.py b/examples/nas/legacy/cdarts/search.py new file mode 100644 index 0000000000000000000000000000000000000000..4dd2581acefa95f5dc7a04c7e615c3793c9d9179 --- /dev/null +++ b/examples/nas/legacy/cdarts/search.py @@ -0,0 +1,49 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import random +import time + +import numpy as np +import torch +import torch.nn as nn + +import utils +from config import SearchConfig +from datasets.cifar import get_search_datasets +from model import Model +from nni.algorithms.nas.pytorch.cdarts import CdartsTrainer + +if __name__ == "__main__": + config = SearchConfig() + main_proc = not config.distributed or config.local_rank == 0 + if config.distributed: + torch.cuda.set_device(config.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url, + rank=config.local_rank, world_size=config.world_size) + if main_proc: + os.makedirs(config.output_path, exist_ok=True) + if config.distributed: + torch.distributed.barrier() + logger = utils.get_logger(os.path.join(config.output_path, 'search.log')) + if main_proc: + config.print_params(logger.info) + utils.reset_seed(config.seed) + + loaders, samplers = get_search_datasets(config) + model_small = Model(config.dataset, 8).cuda() + if config.share_module: + model_large = Model(config.dataset, 20, shared_modules=model_small.shared_modules).cuda() + else: + model_large = Model(config.dataset, 20).cuda() + + criterion = nn.CrossEntropyLoss() + trainer = CdartsTrainer(model_small, model_large, criterion, loaders, samplers, logger, + config.regular_coeff, config.regular_ratio, config.warmup_epochs, config.fix_head, + config.epochs, config.steps_per_epoch, config.loss_alpha, config.loss_T, config.distributed, + config.log_frequency, config.grad_clip, config.interactive_type, config.output_path, + config.w_lr, config.w_momentum, config.w_weight_decay, config.alpha_lr, config.alpha_weight_decay, + config.nasnet_lr, config.local_rank, config.share_module) + trainer.train() diff --git a/examples/nas/legacy/cdarts/utils.py b/examples/nas/legacy/cdarts/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9cf88fa3406045aebe08cfadca078f427d92876d --- /dev/null +++ b/examples/nas/legacy/cdarts/utils.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os +import random +from collections import namedtuple + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn + +from genotypes import Genotype +from ops import PRIMITIVES +from nni.algorithms.nas.pytorch.cdarts.utils import * + + +def get_logger(file_path): + """ Make python logger """ + logger = logging.getLogger('cdarts') + log_format = '%(asctime)s | %(message)s' + formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') + file_handler = logging.FileHandler(file_path) + file_handler.setFormatter(formatter) + # stream_handler = logging.StreamHandler() + # stream_handler.setFormatter(formatter) + + logger.addHandler(file_handler) + # logger.addHandler(stream_handler) + logger.setLevel(logging.INFO) + + return logger + + +class CyclicIterator: + def __init__(self, loader, sampler, distributed): + self.loader = loader + self.sampler = sampler + self.epoch = 0 + self.distributed = distributed + self._next_epoch() + + def _next_epoch(self): + if self.distributed: + self.sampler.set_epoch(self.epoch) + self.iterator = iter(self.loader) + self.epoch += 1 + + def __len__(self): + return len(self.loader) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self.iterator) + except StopIteration: + self._next_epoch() + return next(self.iterator) + + +class CrossEntropyLabelSmooth(nn.Module): + + def __init__(self, num_classes, epsilon): + super(CrossEntropyLabelSmooth, self).__init__() + self.num_classes = num_classes + self.epsilon = epsilon + self.logsoftmax = nn.LogSoftmax(dim=1) + + def forward(self, inputs, targets): + log_probs = self.logsoftmax(inputs) + targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1) + targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes + loss = (-targets * log_probs).mean(0).sum() + return loss + +def parse_results(results, n_nodes): + concat = range(2, 2 + n_nodes) + normal_gene = [] + reduction_gene = [] + for i in range(n_nodes): + normal_node = [] + reduction_node = [] + for j in range(2 + i): + normal_key = 'normal_n{}_p{}'.format(i + 2, j) + reduction_key = 'reduce_n{}_p{}'.format(i + 2, j) + normal_op = results[normal_key].cpu().numpy() + reduction_op = results[reduction_key].cpu().numpy() + if sum(normal_op == 1): + normal_index = np.argmax(normal_op) + normal_node.append((PRIMITIVES[normal_index], j)) + if sum(reduction_op == 1): + reduction_index = np.argmax(reduction_op) + reduction_node.append((PRIMITIVES[reduction_index], j)) + normal_gene.append(normal_node) + reduction_gene.append(reduction_node) + + genotypes = Genotype(normal=normal_gene, normal_concat=concat, + reduce=reduction_gene, reduce_concat=concat) + return genotypes + + +def param_size(model, loss_fn, input_size): + """ + Compute parameter size in MB + """ + x = torch.rand([2] + input_size).cuda() + y, _ = model(x) + target = torch.randint(model.n_classes, size=[2]).cuda() + loss = loss_fn(y, target) + loss.backward() + n_params = sum(np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head') and v.grad is not None) + return n_params / 1e6 + + +def encode_tensor(data, device): + if isinstance(data, list): + if all(map(lambda o: isinstance(o, bool), data)): + return torch.tensor(data, dtype=torch.bool, device=device) # pylint: disable=not-callable + else: + return torch.tensor(data, dtype=torch.float, device=device) # pylint: disable=not-callable + if isinstance(data, dict): + return {k: encode_tensor(v, device) for k, v in data.items()} + return data + + +def reset_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + random.seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = True diff --git a/examples/nas/legacy/classic_nas-tf/config_ppo.yml b/examples/nas/legacy/classic_nas-tf/config_ppo.yml new file mode 100644 index 0000000000000000000000000000000000000000..8725d20a0be6afc8543eb0b2174c01565f01e125 --- /dev/null +++ b/examples/nas/legacy/classic_nas-tf/config_ppo.yml @@ -0,0 +1,18 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 100h +maxTrialNum: 1000 +#choice: local, remote, pai +trainingServicePlatform: local +#please use `nnictl ss_gen` to generate search space file first +searchSpacePath: nni_auto_gen_search_space.json +useAnnotation: False +tuner: + builtinTunerName: PPOTuner + classArgs: + optimize_mode: maximize +trial: + command: python3 train.py + codeDir: . + gpuNum: 0 diff --git a/examples/nas/legacy/classic_nas-tf/config_random_search.yml b/examples/nas/legacy/classic_nas-tf/config_random_search.yml new file mode 100644 index 0000000000000000000000000000000000000000..b7a04eb8ba04ca7bd32033305b75077812da77cb --- /dev/null +++ b/examples/nas/legacy/classic_nas-tf/config_random_search.yml @@ -0,0 +1,18 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: local +#please use `nnictl ss_gen` to generate search space file first +searchSpacePath: nni_auto_gen_search_space.json +useAnnotation: False +tuner: + codeDir: ../../tuners/random_nas_tuner + classFileName: random_nas_tuner.py + className: RandomNASTuner +trial: + command: python3 train.py + codeDir: . + gpuNum: 0 diff --git a/examples/nas/legacy/classic_nas-tf/train.py b/examples/nas/legacy/classic_nas-tf/train.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ed9882b214bc7ef4c1e0960baf18f59c96e4cc --- /dev/null +++ b/examples/nas/legacy/classic_nas-tf/train.py @@ -0,0 +1,130 @@ +import argparse +import tensorflow as tf +from tensorflow.keras import Model +from tensorflow.keras.layers import (AveragePooling2D, BatchNormalization, Conv2D, Dense, MaxPool2D) +from tensorflow.keras.losses import Reduction, SparseCategoricalCrossentropy +from tensorflow.keras.optimizers import SGD + +import nni +from nni.nas.tensorflow.mutables import LayerChoice, InputChoice +from nni.algorithms.nas.tensorflow.classic_nas import get_and_apply_next_architecture + +tf.get_logger().setLevel('ERROR') + +class Net(Model): + def __init__(self): + super().__init__() + self.conv1 = LayerChoice([ + Conv2D(6, 3, padding='same', activation='relu'), + Conv2D(6, 5, padding='same', activation='relu'), + ]) + self.pool = MaxPool2D(2) + self.conv2 = LayerChoice([ + Conv2D(16, 3, padding='same', activation='relu'), + Conv2D(16, 5, padding='same', activation='relu'), + ]) + self.conv3 = Conv2D(16, 1) + + self.skipconnect = InputChoice(n_candidates=2, n_chosen=1) + self.bn = BatchNormalization() + + self.gap = AveragePooling2D(2) + self.fc1 = Dense(120, activation='relu') + self.fc2 = Dense(84, activation='relu') + self.fc3 = Dense(10) + + def call(self, x): + bs = x.shape[0] + + t = self.conv1(x) + x = self.pool(t) + x0 = self.conv2(x) + x1 = self.conv3(x0) + + x0 = self.skipconnect([x0, None]) + if x0 is not None: + x1 += x0 + x = self.pool(self.bn(x1)) + + x = self.gap(x) + x = tf.reshape(x, [bs, -1]) + x = self.fc1(x) + x = self.fc2(x) + x = self.fc3(x) + return x + +loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) + +def loss(model, x, y, training): + # training=training is needed only if there are layers with different + # behavior during training versus inference (e.g. Dropout). + y_ = model(x, training=training) + + return loss_object(y_true=y, y_pred=y_) + +def grad(model, inputs, targets): + with tf.GradientTape() as tape: + loss_value = loss(model, inputs, targets, training=True) + return loss_value, tape.gradient(loss_value, model.trainable_variables) + +def train(net, train_dataset, optimizer, num_epochs): + train_loss_results = [] + train_accuracy_results = [] + + for epoch in range(num_epochs): + epoch_loss_avg = tf.keras.metrics.Mean() + epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy() + + for x, y in train_dataset: + loss_value, grads = grad(net, x, y) + optimizer.apply_gradients(zip(grads, net.trainable_variables)) + epoch_loss_avg.update_state(loss_value) + epoch_accuracy.update_state(y, net(x, training=True)) + + train_loss_results.append(epoch_loss_avg.result()) + train_accuracy_results.append(epoch_accuracy.result()) + + if epoch % 1 == 0: + print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch, + epoch_loss_avg.result(), + epoch_accuracy.result())) + +def test(model, test_dataset): + test_accuracy = tf.keras.metrics.Accuracy() + + for (x, y) in test_dataset: + # training=False is needed only if there are layers with different + # behavior during training versus inference (e.g. Dropout). + logits = model(x, training=False) + prediction = tf.argmax(logits, axis=1, output_type=tf.int32) + test_accuracy(prediction, y) + + print("Test set accuracy: {:.3%}".format(test_accuracy.result())) + return test_accuracy.result() + +if __name__ == '__main__': + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + args, _ = parser.parse_known_args() + + cifar10 = tf.keras.datasets.cifar10 + (x_train, y_train), (x_test, y_test) = cifar10.load_data() + x_train, x_test = x_train / 255.0, x_test / 255.0 + split = int(len(x_train) * 0.9) + dataset_train = tf.data.Dataset.from_tensor_slices((x_train[:split], y_train[:split])).batch(64) + dataset_valid = tf.data.Dataset.from_tensor_slices((x_train[split:], y_train[split:])).batch(64) + dataset_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(64) + + net = Net() + + get_and_apply_next_architecture(net) + + optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) + + train(net, dataset_train, optimizer, args.epochs) + + acc = test(net, dataset_test) + + nni.report_final_result(acc.numpy()) diff --git a/examples/nas/legacy/classic_nas/config_ppo.yml b/examples/nas/legacy/classic_nas/config_ppo.yml new file mode 100644 index 0000000000000000000000000000000000000000..038977eb0e4ed3e736de10c292da149d9373d971 --- /dev/null +++ b/examples/nas/legacy/classic_nas/config_ppo.yml @@ -0,0 +1,18 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 100h +maxTrialNum: 1000 +#choice: local, remote, pai +trainingServicePlatform: local +#please use `nnictl ss_gen` to generate search space file first +searchSpacePath: +useAnnotation: False +tuner: + builtinTunerName: PPOTuner + classArgs: + optimize_mode: maximize +trial: + command: python3 mnist.py + codeDir: . + gpuNum: 0 diff --git a/examples/nas/legacy/classic_nas/config_random_search.yml b/examples/nas/legacy/classic_nas/config_random_search.yml new file mode 100644 index 0000000000000000000000000000000000000000..cb72aafc3c4e9804ee6ec9bbac4ba70ad658070c --- /dev/null +++ b/examples/nas/legacy/classic_nas/config_random_search.yml @@ -0,0 +1,18 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: local +#please use `nnictl ss_gen` to generate search space file first +searchSpacePath: +useAnnotation: False +tuner: + codeDir: ../../tuners/random_nas_tuner + classFileName: random_nas_tuner.py + className: RandomNASTuner +trial: + command: python3 mnist.py + codeDir: . + gpuNum: 0 diff --git a/examples/nas/legacy/classic_nas/mnist.py b/examples/nas/legacy/classic_nas/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..303e33cfdede66dffa8e59ac8db11d5b0e4a6f88 --- /dev/null +++ b/examples/nas/legacy/classic_nas/mnist.py @@ -0,0 +1,183 @@ +""" +A deep MNIST classifier using convolutional layers. + +This file is a modification of the official pytorch mnist example: +https://github.com/pytorch/examples/blob/master/mnist/main.py +""" + +import os +import argparse +import logging +from collections import OrderedDict + +import nni +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms + +from nni.nas.pytorch.mutables import LayerChoice, InputChoice +from nni.algorithms.nas.pytorch.classic_nas import get_and_apply_next_architecture + + +logger = logging.getLogger('mnist_AutoML') + + +class Net(nn.Module): + def __init__(self, hidden_size): + super(Net, self).__init__() + # two options of conv1 + self.conv1 = LayerChoice(OrderedDict([ + ("conv5x5", nn.Conv2d(1, 20, 5, 1)), + ("conv3x3", nn.Conv2d(1, 20, 3, 1)) + ]), key='first_conv') + # two options of mid_conv + self.mid_conv = LayerChoice([ + nn.Conv2d(20, 20, 3, 1, padding=1), + nn.Conv2d(20, 20, 5, 1, padding=2) + ], key='mid_conv') + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4*4*50, hidden_size) + self.fc2 = nn.Linear(hidden_size, 10) + # skip connection over mid_conv + self.input_switch = InputChoice(n_candidates=2, + n_chosen=1, + key='skip') + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + old_x = x + x = F.relu(self.mid_conv(x)) + zero_x = torch.zeros_like(old_x) + skip_x = self.input_switch([zero_x, old_x]) + x = torch.add(x, skip_x) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args['log_interval'] == 0: + logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # sum up batch loss + test_loss += F.nll_loss(output, target, reduction='sum').item() + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + accuracy = 100. * correct / len(test_loader.dataset) + + logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), accuracy)) + + return accuracy + + +def main(args): + use_cuda = not args['no_cuda'] and torch.cuda.is_available() + + torch.manual_seed(args['seed']) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} + + data_dir = args['data_dir'] + + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args['batch_size'], shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=1000, shuffle=True, **kwargs) + + hidden_size = args['hidden_size'] + + model = Net(hidden_size=hidden_size).to(device) + get_and_apply_next_architecture(model) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], + momentum=args['momentum']) + + for epoch in range(1, args['epochs'] + 1): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + + if epoch < args['epochs']: + # report intermediate result + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + else: + # report final result + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + +def get_params(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument("--data_dir", type=str, + default='./data', help="data directory") + parser.add_argument('--batch_size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument("--hidden_size", type=int, default=512, metavar='N', + help='hidden layer size (default: 512)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--no_cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--log_interval', type=int, default=1000, metavar='N', + help='how many batches to wait before logging training status') + + args, _ = parser.parse_known_args() + return args + + +if __name__ == '__main__': + try: + params = vars(get_params()) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/nas/legacy/cream/Cream.md b/examples/nas/legacy/cream/Cream.md new file mode 100644 index 0000000000000000000000000000000000000000..a871bddf78e97703857330597bc75c342a298547 --- /dev/null +++ b/examples/nas/legacy/cream/Cream.md @@ -0,0 +1 @@ +[Documentation](https://nni.readthedocs.io/en/latest/NAS/Cream.html) diff --git a/examples/nas/legacy/cream/Cream_zh_CN.md b/examples/nas/legacy/cream/Cream_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..67d339751c9fc327e5d0f8c6c76fc09c8df7be8a --- /dev/null +++ b/examples/nas/legacy/cream/Cream_zh_CN.md @@ -0,0 +1 @@ +[文档](https://nni.readthedocs.io/zh/latest/NAS/Cream.html) diff --git a/CHANGELOG b/examples/nas/legacy/cream/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from CHANGELOG rename to examples/nas/legacy/cream/__init__.py diff --git a/examples/nas/legacy/cream/configs/retrain/114.yaml b/examples/nas/legacy/cream/configs/retrain/114.yaml new file mode 100755 index 0000000000000000000000000000000000000000..1cf1ecaa71d629ec33a889fc38002ebe4ac7f4f0 --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/114.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '112m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 470 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/retrain/14.yaml b/examples/nas/legacy/cream/configs/retrain/14.yaml new file mode 100755 index 0000000000000000000000000000000000000000..b17f43961b33861e47d0175434a46e8bc88f0811 --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/14.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '14m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 470 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/retrain/23.yaml b/examples/nas/legacy/cream/configs/retrain/23.yaml new file mode 100755 index 0000000000000000000000000000000000000000..9b872ea9ce8ee4057e9196fd0479043dd100ff78 --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/23.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '23m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 470 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/retrain/287.yaml b/examples/nas/legacy/cream/configs/retrain/287.yaml new file mode 100755 index 0000000000000000000000000000000000000000..7bd9246833ccb9455a9ce921465f650acc2c4f1d --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/287.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '287m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 470 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/retrain/43.yaml b/examples/nas/legacy/cream/configs/retrain/43.yaml new file mode 100755 index 0000000000000000000000000000000000000000..346242fffd2bd3258f8eccc03c7f437fe4819fc8 --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/43.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '43m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 43 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/retrain/481.yaml b/examples/nas/legacy/cream/configs/retrain/481.yaml new file mode 100755 index 0000000000000000000000000000000000000000..4fd08fce62bb3ce0adfb8c246c77e479aab8f3b4 --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/481.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '481m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 481 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/retrain/604.yaml b/examples/nas/legacy/cream/configs/retrain/604.yaml new file mode 100755 index 0000000000000000000000000000000000000000..f54381f6ea089970e14839f20877b7a688e8365e --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/604.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '604m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 604 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/retrain/72.yaml b/examples/nas/legacy/cream/configs/retrain/72.yaml new file mode 100755 index 0000000000000000000000000000000000000000..74d794d65a9be6a864549ed84afdd9b35d12a953 --- /dev/null +++ b/examples/nas/legacy/cream/configs/retrain/72.yaml @@ -0,0 +1,51 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '72m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './experiments/workspace/retrain' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.2 + SELECTION: 470 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9999 + + +LR: 0.064 +EPOCHS: 500 +OPT_EPS: 1e-3 +SCHED: 'cosine' +OPT: 'rmsproptf' +WARMUP_LR: 1e-6 +DECAY_EPOCHS: 2.4 +DECAY_RATE: 0.973 +WARMUP_EPOCHS: 3 +WEIGHT_DECAY: 1e-5 + +AUGMENTATION: + AA: 'rand-m9-mstd0.5' + RE_PROB: 0.2 # random erase prob + RE_MODE: 'pixel' # random erase mode + diff --git a/examples/nas/legacy/cream/configs/test.yaml b/examples/nas/legacy/cream/configs/test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4bf568517f86fb7ebe48aa6d608a6573f42d3b0d --- /dev/null +++ b/examples/nas/legacy/cream/configs/test.yaml @@ -0,0 +1,37 @@ +AUTO_RESUME: True +DATA_DIR: './data/imagenet' +MODEL: 'Childnet_Testing' +RESUME_PATH: './experiments/workspace/ckps/42.pth.tar' +SAVE_PATH: './' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 4 +NUM_GPU: 2 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'bilinear' # Image resize interpolation type + BATCH_SIZE: 32 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.0 + SELECTION: 42 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9998 + +OPTIMIZER: + MOMENTUM: 0.9 + WEIGHT_DECAY: 1e-3 \ No newline at end of file diff --git a/examples/nas/legacy/cream/configs/train.yaml b/examples/nas/legacy/cream/configs/train.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85164e0edac183330ff4a08ed65fdbbbe3d68c8c --- /dev/null +++ b/examples/nas/legacy/cream/configs/train.yaml @@ -0,0 +1,53 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: 'Supernet_Training' +RESUME_PATH: './experiments/workspace/train/resume.pth.tar' +SAVE_PATH: './' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'bilinear' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + +NET: + GP: 'avg' + DROPOUT_RATE: 0.0 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9998 + +OPT: 'sgd' +LR: 1.0 +EPOCHS: 120 +META_LR: 1e-4 + +BATCHNORM: + SYNC_BN: False + +SUPERNET: + UPDATE_ITER: 200 + SLICE: 4 + POOL_SIZE: 10 + RESUNIT: False + DIL_CONV: False + UPDATE_2ND: True + FLOPS_MINIMUM: 0 + FLOPS_MAXIMUM: 600 + PICK_METHOD: 'meta' + META_STA_EPOCH: 20 + HOW_TO_PROB: 'pre_prob' + PRE_PROB: (0.05,0.2,0.05,0.5,0.05,0.15) \ No newline at end of file diff --git a/examples/nas/legacy/cream/lib/config.py b/examples/nas/legacy/cream/lib/config.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0cb05782088a22cbe2d20241ab22c3e7181b59 --- /dev/null +++ b/examples/nas/legacy/cream/lib/config.py @@ -0,0 +1,123 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from yacs.config import CfgNode as CN + +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) + +__C = CN() + +cfg = __C + +__C.AUTO_RESUME = True +__C.DATA_DIR = './data/imagenet' +__C.MODEL = 'cream' +__C.RESUME_PATH = './experiments/ckps/resume.pth.tar' +__C.SAVE_PATH = './experiments/ckps/' +__C.SEED = 42 +__C.LOG_INTERVAL = 50 +__C.RECOVERY_INTERVAL = 0 +__C.WORKERS = 4 +__C.NUM_GPU = 1 +__C.SAVE_IMAGES = False +__C.AMP = False +__C.ACC_GAP = 5 +__C.OUTPUT = 'output/path/' +__C.EVAL_METRICS = 'prec1' +__C.TTA = 0 # Test or inference time augmentation +__C.LOCAL_RANK = 0 +__C.VERBOSE = False + +# dataset configs +__C.DATASET = CN() +__C.DATASET.NUM_CLASSES = 1000 +__C.DATASET.IMAGE_SIZE = 224 # image patch size +__C.DATASET.INTERPOLATION = 'bilinear' # Image resize interpolation type +__C.DATASET.BATCH_SIZE = 32 # batch size +__C.DATASET.NO_PREFECHTER = False +__C.DATASET.PIN_MEM = True +__C.DATASET.VAL_BATCH_MUL = 4 + + +# model configs +__C.NET = CN() +__C.NET.SELECTION = 14 +__C.NET.GP = 'avg' # type of global pool ["avg", "max", "avgmax", "avgmaxc"] +__C.NET.DROPOUT_RATE = 0.0 # dropout rate +__C.NET.INPUT_ARCH = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + +# model ema parameters +__C.NET.EMA = CN() +__C.NET.EMA.USE = True +__C.NET.EMA.FORCE_CPU = False # force model ema to be tracked on CPU +__C.NET.EMA.DECAY = 0.9998 + +# optimizer configs +__C.OPT = 'sgd' +__C.OPT_EPS = 1e-2 +__C.MOMENTUM = 0.9 +__C.WEIGHT_DECAY = 1e-4 +__C.OPTIMIZER = CN() +__C.OPTIMIZER.NAME = 'sgd' +__C.OPTIMIZER.MOMENTUM = 0.9 +__C.OPTIMIZER.WEIGHT_DECAY = 1e-3 + +# scheduler configs +__C.SCHED = 'sgd' +__C.LR_NOISE = None +__C.LR_NOISE_PCT = 0.67 +__C.LR_NOISE_STD = 1.0 +__C.WARMUP_LR = 1e-4 +__C.MIN_LR = 1e-5 +__C.EPOCHS = 200 +__C.START_EPOCH = None +__C.DECAY_EPOCHS = 30.0 +__C.WARMUP_EPOCHS = 3 +__C.COOLDOWN_EPOCHS = 10 +__C.PATIENCE_EPOCHS = 10 +__C.DECAY_RATE = 0.1 +__C.LR = 1e-2 +__C.META_LR = 1e-4 + +# data augmentation parameters +__C.AUGMENTATION = CN() +__C.AUGMENTATION.AA = 'rand-m9-mstd0.5' +__C.AUGMENTATION.COLOR_JITTER = 0.4 +__C.AUGMENTATION.RE_PROB = 0.2 # random erase prob +__C.AUGMENTATION.RE_MODE = 'pixel' # random erase mode +__C.AUGMENTATION.MIXUP = 0.0 # mixup alpha +__C.AUGMENTATION.MIXUP_OFF_EPOCH = 0 # turn off mixup after this epoch +__C.AUGMENTATION.SMOOTHING = 0.1 # label smoothing parameters + +# batch norm parameters (only works with gen_efficientnet based models +# currently) +__C.BATCHNORM = CN() +__C.BATCHNORM.SYNC_BN = False +__C.BATCHNORM.BN_TF = False +__C.BATCHNORM.BN_MOMENTUM = 0.1 # batchnorm momentum override +__C.BATCHNORM.BN_EPS = 1e-5 # batchnorm eps override + +# supernet training hyperparameters +__C.SUPERNET = CN() +__C.SUPERNET.UPDATE_ITER = 1300 +__C.SUPERNET.SLICE = 4 +__C.SUPERNET.POOL_SIZE = 10 +__C.SUPERNET.RESUNIT = False +__C.SUPERNET.DIL_CONV = False +__C.SUPERNET.UPDATE_2ND = True +__C.SUPERNET.FLOPS_MAXIMUM = 600 +__C.SUPERNET.FLOPS_MINIMUM = 0 +__C.SUPERNET.PICK_METHOD = 'meta' # pick teacher method +__C.SUPERNET.META_STA_EPOCH = 20 # start using meta picking method +__C.SUPERNET.HOW_TO_PROB = 'pre_prob' # sample method +__C.SUPERNET.PRE_PROB = (0.05, 0.2, 0.05, 0.5, 0.05, + 0.15) # sample prob in 'pre_prob' diff --git a/examples/nas/legacy/cream/lib/core/retrain.py b/examples/nas/legacy/cream/lib/core/retrain.py new file mode 100644 index 0000000000000000000000000000000000000000..7468db2bb5b9db36544aa5e4009aa0a78e4f5e13 --- /dev/null +++ b/examples/nas/legacy/cream/lib/core/retrain.py @@ -0,0 +1,135 @@ +import os +import time +import torch +import torchvision + +from collections import OrderedDict + +from lib.utils.util import AverageMeter, accuracy, reduce_tensor + +def train_epoch( + epoch, model, loader, optimizer, loss_fn, cfg, + lr_scheduler=None, saver=None, output_dir='', use_amp=False, + model_ema=None, logger=None, writer=None, local_rank=0): + batch_time_m = AverageMeter() + data_time_m = AverageMeter() + losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.train() + + end = time.time() + last_idx = len(loader) - 1 + num_updates = epoch * len(loader) + optimizer.zero_grad() + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + data_time_m.update(time.time() - end) + + input = input.cuda() + target = target.cuda() + output = model(input) + + loss = loss_fn(output, target) + + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + if cfg.NUM_GPU > 1: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + else: + reduced_loss = loss.data + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + if model_ema is not None: + model_ema.update(model) + num_updates += 1 + + batch_time_m.update(time.time() - end) + if last_batch or batch_idx % cfg.LOG_INTERVAL == 0: + lrl = [param_group['lr'] for param_group in optimizer.param_groups] + lr = sum(lrl) / len(lrl) + + if local_rank == 0: + logger.info( + 'Train: {} [{:>4d}/{}] ' + 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) ' + 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' + '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'LR: {lr:.3e}' + 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( + epoch, + batch_idx, + len(loader), + loss=losses_m, + top1=prec1_m, + top5=prec5_m, + batch_time=batch_time_m, + rate=input.size(0) * + cfg.NUM_GPU / + batch_time_m.val, + rate_avg=input.size(0) * + cfg.NUM_GPU / + batch_time_m.avg, + lr=lr, + data_time=data_time_m)) + + writer.add_scalar( + 'Loss/train', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + writer.add_scalar( + 'Accuracy/train', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + writer.add_scalar( + 'Learning_Rate', + optimizer.param_groups[0]['lr'], + epoch * len(loader) + batch_idx) + + if cfg.SAVE_IMAGES and output_dir: + torchvision.utils.save_image( + input, os.path.join( + output_dir, 'train-batch-%d.jpg' % + batch_idx), padding=0, normalize=True) + + if saver is not None and cfg.RECOVERY_INTERVAL and ( + last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0): + saver.save_recovery( + model, + optimizer, + cfg, + epoch, + model_ema=model_ema, + use_amp=use_amp, + batch_idx=batch_idx) + + if lr_scheduler is not None: + lr_scheduler.step_update( + num_updates=num_updates, + metric=losses_m.avg) + + end = time.time() + # end for + + if hasattr(optimizer, 'sync_lookahead'): + optimizer.sync_lookahead() + + return OrderedDict([('loss', losses_m.avg)]) diff --git a/examples/nas/legacy/cream/lib/core/test.py b/examples/nas/legacy/cream/lib/core/test.py new file mode 100644 index 0000000000000000000000000000000000000000..7ab69b57c0d29908b7709262fd0fe564b7254be7 --- /dev/null +++ b/examples/nas/legacy/cream/lib/core/test.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import time +import torch + +from collections import OrderedDict +from lib.utils.util import AverageMeter, accuracy, reduce_tensor + + +def validate(epoch, model, loader, loss_fn, cfg, log_suffix='', logger=None, writer=None, local_rank=0): + batch_time_m = AverageMeter() + losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.eval() + + end = time.time() + last_idx = len(loader) - 1 + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + + output = model(input) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = cfg.TTA + if reduce_factor > 1: + output = output.unfold( + 0, + reduce_factor, + reduce_factor).mean( + dim=2) + target = target[0:target.size(0):reduce_factor] + + loss = loss_fn(output, target) + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + if cfg.NUM_GPU > 1: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + else: + reduced_loss = loss.data + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + batch_time_m.update(time.time() - end) + end = time.time() + if local_rank == 0 and (last_batch or batch_idx % cfg.LOG_INTERVAL == 0): + log_name = 'Test' + log_suffix + logger.info( + '{0}: [{1:>4d}/{2}] ' + 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( + log_name, batch_idx, last_idx, + batch_time=batch_time_m, loss=losses_m, + top1=prec1_m, top5=prec5_m)) + + writer.add_scalar( + 'Loss' + log_suffix + '/vaild', + prec1_m.avg, + epoch * len(loader) + batch_idx) + writer.add_scalar( + 'Accuracy' + + log_suffix + + '/vaild', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + + metrics = OrderedDict( + [('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) + + return metrics diff --git a/examples/nas/legacy/cream/lib/models/blocks/__init__.py b/examples/nas/legacy/cream/lib/models/blocks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..83a19f2b91b5206cbeecb5eeb15a79e839fd21a2 --- /dev/null +++ b/examples/nas/legacy/cream/lib/models/blocks/__init__.py @@ -0,0 +1,2 @@ +from lib.models.blocks.residual_block import get_Bottleneck, get_BasicBlock +from lib.models.blocks.inverted_residual_block import InvertedResidual \ No newline at end of file diff --git a/examples/nas/legacy/cream/lib/models/blocks/inverted_residual_block.py b/examples/nas/legacy/cream/lib/models/blocks/inverted_residual_block.py new file mode 100644 index 0000000000000000000000000000000000000000..2f501b561b4ec56d0982f62db12a12c163dbaf17 --- /dev/null +++ b/examples/nas/legacy/cream/lib/models/blocks/inverted_residual_block.py @@ -0,0 +1,113 @@ +# This file is downloaded from +# https://github.com/rwightman/pytorch-image-models + +import torch.nn as nn + +from timm.models.layers import create_conv2d +from timm.models.efficientnet_blocks import make_divisible, resolve_se_args, \ + SqueezeExcite, drop_path + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE and CondConv routing""" + + def __init__( + self, + in_chs, + out_chs, + dw_kernel_size=3, + stride=1, + dilation=1, + pad_type='', + act_layer=nn.ReLU, + noskip=False, + exp_ratio=1.0, + exp_kernel_size=1, + pw_kernel_size=1, + se_ratio=0., + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + conv_kwargs=None, + drop_path_rate=0.): + super(InvertedResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d( + in_chs, + mid_chs, + exp_kernel_size, + padding=pad_type, + **conv_kwargs) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + # Point-wise linear projection + self.conv_pwl = create_conv2d( + mid_chs, + out_chs, + pw_kernel_size, + padding=pad_type, + **conv_kwargs) + self.bn3 = norm_layer(out_chs, **norm_kwargs) + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + info = dict( + module='conv_pwl', + hook_type='forward_pre', + num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict( + module='', + hook_type='', + num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += residual + + return x diff --git a/examples/nas/legacy/cream/lib/models/blocks/residual_block.py b/examples/nas/legacy/cream/lib/models/blocks/residual_block.py new file mode 100644 index 0000000000000000000000000000000000000000..75892eee79a8cffb27af93fad504270f8d0962bf --- /dev/null +++ b/examples/nas/legacy/cream/lib/models/blocks/residual_block.py @@ -0,0 +1,105 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=True) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + + def __init__(self, inplanes, planes, stride=1, expansion=4): + super(Bottleneck, self).__init__() + planes = int(planes / expansion) + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=True) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, + planes * expansion, + kernel_size=1, + bias=True) + self.bn3 = nn.BatchNorm2d(planes * expansion) + self.relu = nn.ReLU(inplace=True) + self.stride = stride + self.expansion = expansion + if inplanes != planes * self.expansion: + self.downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * self.expansion, + kernel_size=1, stride=stride, bias=True), + nn.BatchNorm2d(planes * self.expansion), + ) + else: + self.downsample = None + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +def get_Bottleneck(in_c, out_c, stride): + return Bottleneck(in_c, out_c, stride=stride) + + +def get_BasicBlock(in_c, out_c, stride): + return BasicBlock(in_c, out_c, stride=stride) diff --git a/examples/nas/legacy/cream/lib/models/builders/build_childnet.py b/examples/nas/legacy/cream/lib/models/builders/build_childnet.py new file mode 100755 index 0000000000000000000000000000000000000000..8ddfb40024053f511e00b4656e3638e16689c1db --- /dev/null +++ b/examples/nas/legacy/cream/lib/models/builders/build_childnet.py @@ -0,0 +1,181 @@ +from lib.utils.util import * + +from timm.models.efficientnet_blocks import * + + +class ChildNetBuilder: + def __init__( + self, + channel_multiplier=1.0, + channel_divisor=8, + channel_min=None, + output_stride=32, + pad_type='', + act_layer=None, + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + drop_path_rate=0., + feature_location='', + verbose=False, + logger=None): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + self.in_chs = None + self.features = OrderedDict() + self.logger = logger + + def _round_channels(self, chs): + return round_channels( + chs, + self.channel_multiplier, + self.channel_divisor, + self.channel_min) + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' InvertedResidual {}, Args: {}'.format( + block_idx, str(ba))) + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' DepthwiseSeparable {}, Args: {}'.format( + block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'cn': + if self.verbose: + self.logger.info( + ' ConvBnAct {}, Args: {}'.format( + block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + self.logger.info( + 'Building model trunk with %d stages...' % + len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some + # conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + self.logger.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + extract_features = '' # No features extracted + if self.verbose: + self.logger.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + do_extract = False + if self.feature_location == 'pre_pwl': + if last_block: + next_stage_idx = stage_idx + 1 + if next_stage_idx >= len(model_block_args): + do_extract = True + else: + do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 + elif self.feature_location == 'post_exp': + if block_args['stride'] > 1 or (last_stack and last_block): + do_extract = True + if do_extract: + extract_features = self.feature_location + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + self.logger.info( + ' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block( + block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature + # extraction + if extract_features: + feature_module = block.feature_module(extract_features) + if feature_module: + feature_module = 'blocks.{}.{}.'.format( + stage_idx, block_idx) + feature_module + feature_channels = block.feature_channels(extract_features) + self.features[feature_idx] = dict( + name=feature_module, + num_chs=feature_channels + ) + feature_idx += 1 + + # incr global block idx (across all stacks) + total_block_idx += 1 + stages.append(nn.Sequential(*blocks)) + return stages diff --git a/examples/nas/legacy/cream/lib/models/builders/build_supernet.py b/examples/nas/legacy/cream/lib/models/builders/build_supernet.py new file mode 100644 index 0000000000000000000000000000000000000000..37d9c575c872abad3a3f11e7480d756bf14dc895 --- /dev/null +++ b/examples/nas/legacy/cream/lib/models/builders/build_supernet.py @@ -0,0 +1,214 @@ +from copy import deepcopy + +from lib.utils.builder_util import modify_block_args +from lib.models.blocks import get_Bottleneck, InvertedResidual + +from timm.models.efficientnet_blocks import * + +from nni.nas.pytorch import mutables + +class SuperNetBuilder: + """ Build Trunk Blocks + """ + + def __init__( + self, + choices, + channel_multiplier=1.0, + channel_divisor=8, + channel_min=None, + output_stride=32, + pad_type='', + act_layer=None, + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + drop_path_rate=0., + feature_location='', + verbose=False, + resunit=False, + dil_conv=False, + logger=None): + + # dict + # choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + self.choices = [[x, y] for x in choices['kernel_size'] + for y in choices['exp_ratio']] + self.choices_num = len(self.choices) - 1 + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + self.resunit = resunit + self.dil_conv = dil_conv + self.logger = logger + + # state updated during build, consumed by model + self.in_chs = None + + def _round_channels(self, chs): + return round_channels( + chs, + self.channel_multiplier, + self.channel_divisor, + self.channel_min) + + def _make_block( + self, + ba, + choice_idx, + block_idx, + block_count, + resunit=False, + dil_conv=False): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input + # filters + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' InvertedResidual {}, Args: {}'.format( + block_idx, str(ba))) + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' DepthwiseSeparable {}, Args: {}'.format( + block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'cn': + if self.verbose: + self.logger.info( + ' ConvBnAct {}, Args: {}'.format( + block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + if choice_idx == self.choice_num - 1: + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + logging.info('Building model trunk with %d stages...' % len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + self.logger.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + # blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + if self.verbose: + self.logger.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + + if stage_idx==0 or stage_idx==6: + self.choice_num = 1 + else: + self.choice_num = len(self.choices) + + if self.dil_conv: + self.choice_num += 2 + + choice_blocks = [] + block_args_copy = deepcopy(block_args) + if self.choice_num == 1: + # create the block + block = self._make_block(block_args, 0, total_block_idx, total_block_count) + choice_blocks.append(block) + else: + for choice_idx, choice in enumerate(self.choices): + # create the block + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, choice[0], choice[1]) + block = self._make_block(block_args, choice_idx, total_block_idx, total_block_count) + choice_blocks.append(block) + if self.dil_conv: + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, 3, 0) + block = self._make_block(block_args, self.choice_num - 2, total_block_idx, total_block_count, + resunit=self.resunit, dil_conv=self.dil_conv) + choice_blocks.append(block) + + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, 5, 0) + block = self._make_block(block_args, self.choice_num - 1, total_block_idx, total_block_count, + resunit=self.resunit, dil_conv=self.dil_conv) + choice_blocks.append(block) + + if self.resunit: + block = get_Bottleneck(block.conv_pw.in_channels, + block.conv_pwl.out_channels, + block.conv_dw.stride[0]) + choice_blocks.append(block) + + choice_block = mutables.LayerChoice(choice_blocks) + stages.append(choice_block) + # create the block + # block = self._make_block(block_args, total_block_idx, total_block_count) + total_block_idx += 1 # incr global block idx (across all stacks) + + # stages.append(blocks) + return stages diff --git a/examples/nas/legacy/cream/lib/models/structures/childnet.py b/examples/nas/legacy/cream/lib/models/structures/childnet.py new file mode 100755 index 0000000000000000000000000000000000000000..668b92e1572ee28729b0f5eba8c5acf0a4bdb0e7 --- /dev/null +++ b/examples/nas/legacy/cream/lib/models/structures/childnet.py @@ -0,0 +1,145 @@ +from lib.utils.builder_util import * +from lib.models.builders.build_childnet import * + +from timm.models.layers import SelectAdaptivePool2d +from timm.models.layers.activations import hard_sigmoid + + +class ChildNet(nn.Module): + + def __init__( + self, + block_args, + num_classes=1000, + in_chans=3, + stem_size=16, + num_features=1280, + head_bias=True, + channel_multiplier=1.0, + pad_type='', + act_layer=nn.ReLU, + drop_rate=0., + drop_path_rate=0., + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + global_pool='avg', + logger=None, + verbose=False): + super(ChildNet, self).__init__() + + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + self.logger = logger + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d( + self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = ChildNetBuilder( + channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, verbose=verbose) + self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) + # self.blocks = builder(self._in_chs, block_args) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d( + self._in_chs, + self.num_features, + 1, + padding=pad_type, + bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear( + self.num_features * + self.global_pool.feat_mult(), + self.num_classes) + + efficientnet_init_weights(self) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), + num_classes) if self.num_classes else None + + def forward_features(self, x): + # architecture = [[0], [], [], [], [], [0]] + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +def gen_childnet(arch_list, arch_def, **kwargs): + # arch_list = [[0], [], [], [], [], [0]] + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + choices_list = [[x, y] for x in choices['kernel_size'] + for y in choices['exp_ratio']] + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + + new_arch = [] + # change to child arch_def + for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)): + if len(layer_arch) == 1: + new_arch.append(layer_arch) + continue + else: + new_layer = [] + for j, (block_choice, block_arch) in enumerate( + zip(layer_choice, layer_arch)): + kernel_size, exp_ratio = choices_list[block_choice] + elements = block_arch.split('_') + block_arch = block_arch.replace( + elements[2], 'k{}'.format(str(kernel_size))) + block_arch = block_arch.replace( + elements[4], 'e{}'.format(str(exp_ratio))) + new_layer.append(block_arch) + new_arch.append(new_layer) + + model_kwargs = dict( + block_args=decode_arch_def(new_arch), + num_features=num_features, + stem_size=16, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict( + act_layer=nn.ReLU, + gate_fn=hard_sigmoid, + reduce_mid=True, + divisor=8), + **kwargs, + ) + model = ChildNet(**model_kwargs) + return model diff --git a/examples/nas/legacy/cream/lib/models/structures/supernet.py b/examples/nas/legacy/cream/lib/models/structures/supernet.py new file mode 100644 index 0000000000000000000000000000000000000000..f5a84ae1ea4d22c35f3cca73d3f9dba4305d744e --- /dev/null +++ b/examples/nas/legacy/cream/lib/models/structures/supernet.py @@ -0,0 +1,202 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +from lib.utils.builder_util import * +from lib.utils.search_structure_supernet import * +from lib.models.builders.build_supernet import * +from lib.utils.op_by_layer_dict import flops_op_dict + +from timm.models.layers import SelectAdaptivePool2d +from timm.models.layers.activations import hard_sigmoid + + +class SuperNet(nn.Module): + + def __init__( + self, + block_args, + choices, + num_classes=1000, + in_chans=3, + stem_size=16, + num_features=1280, + head_bias=True, + channel_multiplier=1.0, + pad_type='', + act_layer=nn.ReLU, + drop_rate=0., + drop_path_rate=0., + slice=4, + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + logger=None, + norm_kwargs=None, + global_pool='avg', + resunit=False, + dil_conv=False, + verbose=False): + super(SuperNet, self).__init__() + + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + self.logger = logger + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d( + self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = SuperNetBuilder( + choices, + channel_multiplier, + 8, + None, + 32, + pad_type, + act_layer, + se_kwargs, + norm_layer, + norm_kwargs, + drop_path_rate, + verbose=verbose, + resunit=resunit, + dil_conv=dil_conv, + logger=self.logger) + blocks = builder(self._in_chs, block_args) + self.blocks = nn.Sequential(*blocks) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d( + self._in_chs, + self.num_features, + 1, + padding=pad_type, + bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear( + self.num_features * + self.global_pool.feat_mult(), + self.num_classes) + + self.meta_layer = nn.Linear(self.num_classes * slice, 1) + efficientnet_init_weights(self) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), + num_classes) if self.num_classes else None + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + def forward_meta(self, features): + return self.meta_layer(features.view(1, -1)) + + def rand_parameters(self, architecture, meta=False): + for name, param in self.named_parameters(recurse=True): + if 'meta' in name and meta: + yield param + elif 'blocks' not in name and 'meta' not in name and (not meta): + yield param + + if not meta: + for choice_blocks, choice_name in zip(self.blocks, architecture): + choice_sample = architecture[choice_name] + for block, arch in zip(choice_blocks, choice_sample): + if not arch: + continue + for name, param in block.named_parameters(recurse=True): + yield param + + +class Classifier(nn.Module): + def __init__(self, num_classes=1000): + super(Classifier, self).__init__() + self.classifier = nn.Linear(num_classes, num_classes) + + def forward(self, x): + return self.classifier(x) + + +def gen_supernet(flops_minimum=0, flops_maximum=600, **kwargs): + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', + 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', + 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', + 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + + sta_num, arch_def, resolution = search_for_layer( + flops_op_dict, arch_def, flops_minimum, flops_maximum) + + if sta_num is None or arch_def is None or resolution is None: + raise ValueError('Invalid FLOPs Settings') + + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + choices=choices, + num_features=num_features, + stem_size=16, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict( + act_layer=nn.ReLU, + gate_fn=hard_sigmoid, + reduce_mid=True, + divisor=8), + **kwargs, + ) + model = SuperNet(**model_kwargs) + return model, sta_num, resolution diff --git a/examples/nas/legacy/cream/lib/utils/builder_util.py b/examples/nas/legacy/cream/lib/utils/builder_util.py new file mode 100644 index 0000000000000000000000000000000000000000..cbdc05782846440e327dc3a1e979dfd8659f5033 --- /dev/null +++ b/examples/nas/legacy/cream/lib/utils/builder_util.py @@ -0,0 +1,275 @@ +import re +import math +import torch.nn as nn + +from copy import deepcopy +from timm.utils import * +from timm.models.layers.activations import Swish +from timm.models.layers import CondConv2d, get_condconv_initializer + + +def parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def decode_arch_def( + arch_def, + depth_multiplier=1.0, + depth_trunc='ceil', + experts_multiplier=1): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + arch_args.append( + scale_stage_depth( + stack_args, + repeats, + depth_multiplier, + depth_trunc)) + return arch_args + + +def modify_block_args(block_args, kernel_size, exp_ratio): + block_type = block_args['block_type'] + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + + +def decode_block_str(block_str): + """ Decode block definition string + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they + # grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = nn.ReLU + elif v == 'r6': + value = nn.ReLU6 + elif v == 'sw': + value = Swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be + # used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = parse_ksize(options['p']) if 'p' in options else 1 + # FIXME hack to deal with in_chs issue in TPU def + fake_in_chs = int(options['fc']) if 'fc' in options else 0 + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def scale_stage_depth( + stack_args, + repeats, + depth_multiplier=1.0, + depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as + # long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every + # stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch + # definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def init_weight_goog(m, n='', fix_group_fanout=True, last_bn=None): + """ Weight initialization as per Tensorflow official implementations. + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer(lambda w: w.data.normal_( + 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + if n in last_bn: + m.weight.data.zero_() + m.bias.data.zero_() + else: + m.weight.data.fill_(1.0) + m.bias.data.zero_() + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def efficientnet_init_weights( + model: nn.Module, + init_fn=None, + zero_gamma=False): + last_bn = [] + if zero_gamma: + prev_n = '' + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d): + if ''.join( + prev_n.split('.')[ + :- + 1]) != ''.join( + n.split('.')[ + :- + 1]): + last_bn.append(prev_n) + prev_n = n + last_bn.append(prev_n) + + init_fn = init_fn or init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n, last_bn=last_bn) + init_fn(m, n, last_bn=last_bn) diff --git a/examples/nas/legacy/cream/lib/utils/flops_table.py b/examples/nas/legacy/cream/lib/utils/flops_table.py new file mode 100644 index 0000000000000000000000000000000000000000..254241a075561834bf277f157cd96807a72cf8ed --- /dev/null +++ b/examples/nas/legacy/cream/lib/utils/flops_table.py @@ -0,0 +1,79 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import torch + +from ptflops import get_model_complexity_info + + +class FlopsEst(object): + def __init__(self, model, input_shape=(2, 3, 224, 224), device='cpu'): + self.block_num = len(model.blocks) + self.choice_num = len(model.blocks[0]) + self.flops_dict = {} + self.params_dict = {} + + if device == 'cpu': + model = model.cpu() + else: + model = model.cuda() + + self.params_fixed = 0 + self.flops_fixed = 0 + + input = torch.randn(input_shape) + + flops, params = get_model_complexity_info( + model.conv_stem, (3, 224, 224), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + input = model.conv_stem(input) + + for block_id, block in enumerate(model.blocks): + self.flops_dict[block_id] = {} + self.params_dict[block_id] = {} + for module_id, module in enumerate(block): + flops, params = get_model_complexity_info(module, tuple( + input.shape[1:]), as_strings=False, print_per_layer_stat=False) + # Flops(M) + self.flops_dict[block_id][module_id] = flops / 1e6 + # Params(M) + self.params_dict[block_id][module_id] = params / 1e6 + + input = module(input) + + # conv_last + flops, params = get_model_complexity_info(model.global_pool, tuple( + input.shape[1:]), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + input = model.global_pool(input) + + # globalpool + flops, params = get_model_complexity_info(model.conv_head, tuple( + input.shape[1:]), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + # return params (M) + def get_params(self, arch): + params = 0 + for block_id, block in enumerate(arch): + if block == -1: + continue + params += self.params_dict[block_id][block] + return params + self.params_fixed + + # return flops (M) + def get_flops(self, arch): + flops = 0 + for block_id, block in enumerate(arch): + if block == 'LayerChoice1' or block_id == 'LayerChoice23': + continue + for idx, choice in enumerate(arch[block]): + flops += self.flops_dict[block_id][idx] * (1 if choice else 0) + return flops + self.flops_fixed diff --git a/examples/nas/legacy/cream/lib/utils/op_by_layer_dict.py b/examples/nas/legacy/cream/lib/utils/op_by_layer_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..47ca509ce4529ba21fd22b417c5e9c8bd525309f --- /dev/null +++ b/examples/nas/legacy/cream/lib/utils/op_by_layer_dict.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +# This dictionary is generated from calculating each operation of each layer to quickly search for layers. +# flops_op_dict[which_stage][which_operation] = +# (flops_of_operation_with_stride1, flops_of_operation_with_stride2) + +flops_op_dict = {} +for i in range(5): + flops_op_dict[i] = {} +flops_op_dict[0][0] = (21.828704, 18.820752) +flops_op_dict[0][1] = (32.669328, 28.16048) +flops_op_dict[0][2] = (25.039968, 23.637648) +flops_op_dict[0][3] = (37.486224, 35.385824) +flops_op_dict[0][4] = (29.856864, 30.862992) +flops_op_dict[0][5] = (44.711568, 46.22384) +flops_op_dict[1][0] = (11.808656, 11.86712) +flops_op_dict[1][1] = (17.68624, 17.780848) +flops_op_dict[1][2] = (13.01288, 13.87416) +flops_op_dict[1][3] = (19.492576, 20.791408) +flops_op_dict[1][4] = (14.819216, 16.88472) +flops_op_dict[1][5] = (22.20208, 25.307248) +flops_op_dict[2][0] = (8.198, 10.99632) +flops_op_dict[2][1] = (12.292848, 16.5172) +flops_op_dict[2][2] = (8.69976, 11.99984) +flops_op_dict[2][3] = (13.045488, 18.02248) +flops_op_dict[2][4] = (9.4524, 13.50512) +flops_op_dict[2][5] = (14.174448, 20.2804) +flops_op_dict[3][0] = (12.006112, 15.61632) +flops_op_dict[3][1] = (18.028752, 23.46096) +flops_op_dict[3][2] = (13.009632, 16.820544) +flops_op_dict[3][3] = (19.534032, 25.267296) +flops_op_dict[3][4] = (14.514912, 18.62688) +flops_op_dict[3][5] = (21.791952, 27.9768) +flops_op_dict[4][0] = (11.307456, 15.292416) +flops_op_dict[4][1] = (17.007072, 23.1504) +flops_op_dict[4][2] = (11.608512, 15.894528) +flops_op_dict[4][3] = (17.458656, 24.053568) +flops_op_dict[4][4] = (12.060096, 16.797696) +flops_op_dict[4][5] = (18.136032, 25.40832) \ No newline at end of file diff --git a/examples/nas/legacy/cream/lib/utils/search_structure_supernet.py b/examples/nas/legacy/cream/lib/utils/search_structure_supernet.py new file mode 100644 index 0000000000000000000000000000000000000000..b13491c2c73e959ccdfd2a081327ec1fb9b98421 --- /dev/null +++ b/examples/nas/legacy/cream/lib/utils/search_structure_supernet.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +def search_for_layer(flops_op_dict, arch_def, flops_minimum, flops_maximum): + sta_num = [1, 1, 1, 1, 1] + order = [2, 3, 4, 1, 0, 2, 3, 4, 1, 0] + limits = [3, 3, 3, 2, 2, 4, 4, 4, 4, 4] + size_factor = 224 // 32 + base_min_flops = sum([flops_op_dict[i][0][0] for i in range(5)]) + base_max_flops = sum([flops_op_dict[i][5][0] for i in range(5)]) + + if base_min_flops > flops_maximum: + while base_min_flops > flops_maximum and size_factor >= 2: + size_factor = size_factor - 1 + flops_minimum = flops_minimum * (7. / size_factor) + flops_maximum = flops_maximum * (7. / size_factor) + if size_factor < 2: + return None, None, None + elif base_max_flops < flops_minimum: + cur_ptr = 0 + while base_max_flops < flops_minimum and cur_ptr <= 9: + if sta_num[order[cur_ptr]] >= limits[cur_ptr]: + cur_ptr += 1 + continue + base_max_flops = base_max_flops + \ + flops_op_dict[order[cur_ptr]][5][1] + sta_num[order[cur_ptr]] += 1 + if cur_ptr > 7 and base_max_flops < flops_minimum: + return None, None, None + + cur_ptr = 0 + while cur_ptr <= 9: + if sta_num[order[cur_ptr]] >= limits[cur_ptr]: + cur_ptr += 1 + continue + base_max_flops = base_max_flops + flops_op_dict[order[cur_ptr]][5][1] + if base_max_flops <= flops_maximum: + sta_num[order[cur_ptr]] += 1 + else: + break + + arch_def = [item[:i] for i, item in zip([1] + sta_num + [1], arch_def)] + # print(arch_def) + + return sta_num, arch_def, size_factor * 32 diff --git a/examples/nas/legacy/cream/lib/utils/util.py b/examples/nas/legacy/cream/lib/utils/util.py new file mode 100644 index 0000000000000000000000000000000000000000..7017c0953ec67114dbbd8488b529ef56b11f25e2 --- /dev/null +++ b/examples/nas/legacy/cream/lib/utils/util.py @@ -0,0 +1,180 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import sys +import logging +import torch +import argparse +import torch.nn as nn + +from torch import optim as optim +from thop import profile, clever_format + +from timm.utils import * + +from lib.config import cfg + + +def get_path_acc(model, path, val_loader, args, val_iters=50): + prec1_m = AverageMeter() + prec5_m = AverageMeter() + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(val_loader): + if batch_idx >= val_iters: + break + if not args.prefetcher: + input = input.cuda() + target = target.cuda() + + output = model(input, path) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = args.tta + if reduce_factor > 1: + output = output.unfold( + 0, + reduce_factor, + reduce_factor).mean( + dim=2) + target = target[0:target.size(0):reduce_factor] + + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + torch.cuda.synchronize() + + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + return (prec1_m.avg, prec5_m.avg) + + +def get_logger(file_path): + """ Make python logger """ + log_format = '%(asctime)s | %(message)s' + logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format=log_format, datefmt='%m/%d %I:%M:%S %p') + logger = logging.getLogger('') + + formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') + file_handler = logging.FileHandler(file_path) + file_handler.setFormatter(formatter) + + logger.addHandler(file_handler) + + return logger + + +def add_weight_decay_supernet(model, args, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + meta_layer_no_decay = [] + meta_layer_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith( + ".bias") or name in skip_list: + if 'meta_layer' in name: + meta_layer_no_decay.append(param) + else: + no_decay.append(param) + else: + if 'meta_layer' in name: + meta_layer_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0., 'lr': args.lr}, + {'params': decay, 'weight_decay': weight_decay, 'lr': args.lr}, + {'params': meta_layer_no_decay, 'weight_decay': 0., 'lr': args.meta_lr}, + {'params': meta_layer_decay, 'weight_decay': 0, 'lr': args.meta_lr}, + ] + + +def create_optimizer_supernet(args, model, has_apex, filter_bias_and_bn=True): + opt_lower = args.opt.lower() + weight_decay = args.weight_decay + if 'adamw' in opt_lower or 'radam' in opt_lower: + weight_decay /= args.lr + if weight_decay and filter_bias_and_bn: + parameters = add_weight_decay_supernet(model, args, weight_decay) + weight_decay = 0. + else: + parameters = model.parameters() + + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available( + ), 'APEX and CUDA required for fused optimizers' + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + optimizer = optim.SGD( + parameters, + momentum=args.momentum, + weight_decay=weight_decay, + nesterov=True) + elif opt_lower == 'momentum': + optimizer = optim.SGD( + parameters, + momentum=args.momentum, + weight_decay=weight_decay, + nesterov=False) + elif opt_lower == 'adam': + optimizer = optim.Adam( + parameters, weight_decay=weight_decay, eps=args.opt_eps) + else: + assert False and "Invalid optimizer" + raise ValueError + + return optimizer + + +def convert_lowercase(cfg): + keys = cfg.keys() + lowercase_keys = [key.lower() for key in keys] + values = [cfg.get(key) for key in keys] + for lowercase_key, value in zip(lowercase_keys, values): + cfg.setdefault(lowercase_key, value) + return cfg + + +def parse_config_args(exp_name): + parser = argparse.ArgumentParser(description=exp_name) + parser.add_argument( + '--cfg', + type=str, + default='../experiments/workspace/retrain/retrain.yaml', + help='configuration of cream') + parser.add_argument('--local_rank', type=int, default=0, + help='local_rank') + args = parser.parse_args() + + cfg.merge_from_file(args.cfg) + converted_cfg = convert_lowercase(cfg) + + return args, converted_cfg + + +def get_model_flops_params(model, input_size=(1, 3, 224, 224)): + input = torch.randn(input_size) + macs, params = profile(deepcopy(model), inputs=(input,), verbose=False) + macs, params = clever_format([macs, params], "%.3f") + return macs, params + + +def cross_entropy_loss_with_soft_target(pred, soft_target): + logsoftmax = nn.LogSoftmax() + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + + +def create_supernet_scheduler(cfg, optimizer): + ITERS = cfg.EPOCHS * \ + (1280000 / (cfg.NUM_GPU * cfg.DATASET.BATCH_SIZE)) + lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step: ( + cfg.LR - step / ITERS) if step <= ITERS else 0, last_epoch=-1) + return lr_scheduler, cfg.EPOCHS diff --git a/examples/nas/legacy/cream/requirements b/examples/nas/legacy/cream/requirements new file mode 100644 index 0000000000000000000000000000000000000000..5ddae72e4c57f826fb5db3308654b52a6e2c93ab --- /dev/null +++ b/examples/nas/legacy/cream/requirements @@ -0,0 +1,12 @@ +yacs +numpy==1.17 +opencv-python==4.0.1.24 +torchvision==0.2.1 +thop +git+https://github.com/sovrasov/flops-counter.pytorch.git +pillow==6.1.0 +torch==1.2 +timm==0.1.20 +tensorboardx==1.2 +tensorboard +future \ No newline at end of file diff --git a/examples/nas/legacy/cream/retrain.py b/examples/nas/legacy/cream/retrain.py new file mode 100644 index 0000000000000000000000000000000000000000..a91f2c7432372e6f6413a5a01c295840756a823c --- /dev/null +++ b/examples/nas/legacy/cream/retrain.py @@ -0,0 +1,321 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import warnings +import datetime +import torch +import numpy as np +import torch.nn as nn + +from torchscope import scope +from torch.utils.tensorboard import SummaryWriter + +# import timm packages +from timm.optim import create_optimizer +from timm.models import resume_checkpoint +from timm.scheduler import create_scheduler +from timm.data import Dataset, create_loader +from timm.utils import ModelEma, update_summary +from timm.loss import LabelSmoothingCrossEntropy + +# import apex as distributed package +try: + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + HAS_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + HAS_APEX = False + +# import models and training functions +from lib.core.test import validate +from lib.core.retrain import train_epoch +from lib.models.structures.childnet import gen_childnet +from lib.utils.util import parse_config_args, get_logger, get_model_flops_params +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def main(): + args, cfg = parse_config_args('nni.cream.childnet') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, 'retrain.log')) + writer = SummaryWriter(os.path.join(output_dir, 'runs')) + else: + writer, logger = None, None + + # retrain model selection + if cfg.NET.SELECTION == 481: + arch_list = [ + [0], [ + 3, 4, 3, 1], [ + 3, 2, 3, 0], [ + 3, 3, 3, 1], [ + 3, 3, 3, 3], [ + 3, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 43: + arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 96 + elif cfg.NET.SELECTION == 14: + arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] + cfg.DATASET.IMAGE_SIZE = 64 + elif cfg.NET.SELECTION == 112: + arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 160 + elif cfg.NET.SELECTION == 287: + arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 604: + arch_list = [ + [0], [ + 3, 3, 2, 3, 3], [ + 3, 2, 3, 2, 3], [ + 3, 2, 3, 2, 3], [ + 3, 3, 2, 2, 3, 3], [ + 3, 3, 2, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == -1: + arch_list = cfg.NET.INPUT_ARCH + cfg.DATASET.IMAGE_SIZE = 224 + else: + raise ValueError("Model Retrain Selection is not Supported!") + + # define childnet architecture from arch_list + stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] + choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k3_s2_e6_c80_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s2_e6_c192_se0.25'] + arch_def = [[stem[0]]] + [[choice_block_pool[idx] + for repeat_times in range(len(arch_list[idx + 1]))] + for idx in range(len(choice_block_pool))] + [[stem[1]]] + + # generate childnet + model = gen_childnet( + arch_list, + arch_def, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP) + + # initialize training parameters + eval_metric = cfg.EVAL_METRICS + best_metric, best_epoch, saver = None, None, None + + # initialize distributed parameters + distributed = cfg.NUM_GPU > 1 + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + 'Training on Process {} with {} GPUs.'.format( + args.local_rank, cfg.NUM_GPU)) + + # fix random seeds + torch.manual_seed(cfg.SEED) + torch.cuda.manual_seed_all(cfg.SEED) + np.random.seed(cfg.SEED) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # get parameters and FLOPs of model + if args.local_rank == 0: + macs, params = get_model_flops_params(model, input_size=( + 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) + logger.info( + '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) + + # create optimizer + model = model.cuda() + optimizer = create_optimizer(cfg, model) + + # optionally resume from a checkpoint + resume_state, resume_epoch = {}, None + if cfg.AUTO_RESUME: + resume_state, resume_epoch = resume_checkpoint(model, cfg.RESUME_PATH) + optimizer.load_state_dict(resume_state['optimizer']) + del resume_state + + model_ema = None + if cfg.NET.EMA.USE: + model_ema = ModelEma( + model, + decay=cfg.NET.EMA.DECAY, + device='cpu' if cfg.NET.EMA.FORCE_CPU else '', + resume=cfg.RESUME_PATH if cfg.AUTO_RESUME else None) + + if distributed: + if cfg.BATCHNORM.SYNC_BN: + try: + if HAS_APEX: + model = convert_syncbn_model(model) + else: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm( + model) + if args.local_rank == 0: + logger.info( + 'Converted model to use Synchronized BatchNorm.') + except Exception as e: + if args.local_rank == 0: + logger.error( + 'Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1 with exception {}'.format(e)) + if HAS_APEX: + model = DDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + logger.info( + "Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") + # can use device str in Torch >= 1.1 + model = DDP(model, device_ids=[args.local_rank]) + + # imagenet train dataset + train_dir = os.path.join(cfg.DATA_DIR, 'train') + if not os.path.exists(train_dir) and args.local_rank == 0: + logger.error('Training folder does not exist at: {}'.format(train_dir)) + exit(1) + dataset_train = Dataset(train_dir) + loader_train = create_loader( + dataset_train, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.BATCH_SIZE, + is_training=True, + color_jitter=cfg.AUGMENTATION.COLOR_JITTER, + auto_augment=cfg.AUGMENTATION.AA, + num_aug_splits=0, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=cfg.WORKERS, + distributed=distributed, + collate_fn=None, + pin_memory=cfg.DATASET.PIN_MEM, + interpolation='random', + re_mode=cfg.AUGMENTATION.RE_MODE, + re_prob=cfg.AUGMENTATION.RE_PROB + ) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.exists(eval_dir) and args.local_rank == 0: + logger.error( + 'Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, + is_training=False, + interpolation=cfg.DATASET.INTERPOLATION, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=cfg.WORKERS, + distributed=distributed, + pin_memory=cfg.DATASET.PIN_MEM + ) + + # whether to use label smoothing + if cfg.AUGMENTATION.SMOOTHING > 0.: + train_loss_fn = LabelSmoothingCrossEntropy( + smoothing=cfg.AUGMENTATION.SMOOTHING).cuda() + validate_loss_fn = nn.CrossEntropyLoss().cuda() + else: + train_loss_fn = nn.CrossEntropyLoss().cuda() + validate_loss_fn = train_loss_fn + + # create learning rate scheduler + lr_scheduler, num_epochs = create_scheduler(cfg, optimizer) + start_epoch = resume_epoch if resume_epoch is not None else 0 + if start_epoch > 0: + lr_scheduler.step(start_epoch) + if args.local_rank == 0: + logger.info('Scheduled epochs: {}'.format(num_epochs)) + + try: + best_record, best_ep = 0, 0 + for epoch in range(start_epoch, num_epochs): + if distributed: + loader_train.sampler.set_epoch(epoch) + + train_metrics = train_epoch( + epoch, + model, + loader_train, + optimizer, + train_loss_fn, + cfg, + lr_scheduler=lr_scheduler, + saver=saver, + output_dir=output_dir, + model_ema=model_ema, + logger=logger, + writer=writer, + local_rank=args.local_rank) + + eval_metrics = validate( + epoch, + model, + loader_eval, + validate_loss_fn, + cfg, + logger=logger, + writer=writer, + local_rank=args.local_rank) + + if model_ema is not None and not cfg.NET.EMA.FORCE_CPU: + ema_eval_metrics = validate( + epoch, + model_ema.ema, + loader_eval, + validate_loss_fn, + cfg, + log_suffix='_EMA', + logger=logger, + writer=writer) + eval_metrics = ema_eval_metrics + + if lr_scheduler is not None: + lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) + + update_summary(epoch, train_metrics, eval_metrics, os.path.join( + output_dir, 'summary.csv'), write_header=best_metric is None) + + if saver is not None: + # save proper checkpoint with eval metric + save_metric = eval_metrics[eval_metric] + best_metric, best_epoch = saver.save_checkpoint( + model, optimizer, cfg, + epoch=epoch, model_ema=model_ema, metric=save_metric) + + if best_record < eval_metrics[eval_metric]: + best_record = eval_metrics[eval_metric] + best_ep = epoch + + if args.local_rank == 0: + logger.info( + '*** Best metric: {0} (epoch {1})'.format(best_record, best_ep)) + + except KeyboardInterrupt: + pass + + if best_metric is not None: + logger.info( + '*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) + + +if __name__ == '__main__': + main() diff --git a/examples/nas/legacy/cream/test.py b/examples/nas/legacy/cream/test.py new file mode 100644 index 0000000000000000000000000000000000000000..986be3e1f4b6d146c416f66332c6138c47ac0612 --- /dev/null +++ b/examples/nas/legacy/cream/test.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import warnings +import datetime +import torch +import torch.nn as nn + +from torch.utils.tensorboard import SummaryWriter + +# import timm packages +from timm.utils import ModelEma +from timm.models import resume_checkpoint +from timm.data import Dataset, create_loader + +# import apex as distributed package +try: + from apex.parallel import convert_syncbn_model + from apex.parallel import DistributedDataParallel as DDP + HAS_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + HAS_APEX = False + +# import models and training functions +from lib.core.test import validate +from lib.models.structures.childnet import gen_childnet +from lib.utils.util import parse_config_args, get_logger, get_model_flops_params +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def main(): + args, cfg = parse_config_args('child net testing') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, 'test.log')) + writer = SummaryWriter(os.path.join(output_dir, 'runs')) + else: + writer, logger = None, None + + # retrain model selection + if cfg.NET.SELECTION == 481: + arch_list = [ + [0], [ + 3, 4, 3, 1], [ + 3, 2, 3, 0], [ + 3, 3, 3, 1], [ + 3, 3, 3, 3], [ + 3, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 43: + arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 96 + elif cfg.NET.SELECTION == 14: + arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] + cfg.DATASET.IMAGE_SIZE = 64 + elif cfg.NET.SELECTION == 112: + arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 160 + elif cfg.NET.SELECTION == 287: + arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 604: + arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3], + [3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + else: + raise ValueError("Model Test Selection is not Supported!") + + # define childnet architecture from arch_list + stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] + choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k3_s2_e6_c80_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k5_s2_e6_c192_se0.25'] + arch_def = [[stem[0]]] + [[choice_block_pool[idx] + for repeat_times in range(len(arch_list[idx + 1]))] + for idx in range(len(choice_block_pool))] + [[stem[1]]] + + # generate childnet + model = gen_childnet( + arch_list, + arch_def, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP) + + if args.local_rank == 0: + macs, params = get_model_flops_params(model, input_size=( + 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) + logger.info( + '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) + + # initialize distributed parameters + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + "Training on Process {} with {} GPUs.".format( + args.local_rank, cfg.NUM_GPU)) + + # resume model from checkpoint + assert cfg.AUTO_RESUME is True and os.path.exists(cfg.RESUME_PATH) + _, __ = resume_checkpoint(model, cfg.RESUME_PATH) + + model = model.cuda() + + model_ema = None + if cfg.NET.EMA.USE: + # Important to create EMA model after cuda(), DP wrapper, and AMP but + # before SyncBN and DDP wrapper + model_ema = ModelEma( + model, + decay=cfg.NET.EMA.DECAY, + device='cpu' if cfg.NET.EMA.FORCE_CPU else '', + resume=cfg.RESUME_PATH) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.exists(eval_dir) and args.local_rank == 0: + logger.error( + 'Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, + is_training=False, + num_workers=cfg.WORKERS, + distributed=True, + pin_memory=cfg.DATASET.PIN_MEM, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD + ) + + # only test accuracy of model-EMA + validate_loss_fn = nn.CrossEntropyLoss().cuda() + validate(0, model_ema.ema, loader_eval, validate_loss_fn, cfg, + log_suffix='_EMA', logger=logger, + writer=writer, local_rank=args.local_rank) + + +if __name__ == '__main__': + main() diff --git a/examples/nas/legacy/cream/train.py b/examples/nas/legacy/cream/train.py new file mode 100644 index 0000000000000000000000000000000000000000..50d340c1ef274c9b62df55d2ba55ed5bc799a29f --- /dev/null +++ b/examples/nas/legacy/cream/train.py @@ -0,0 +1,213 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import sys +import datetime +import torch +import numpy as np +import torch.nn as nn + +# import timm packages +from timm.loss import LabelSmoothingCrossEntropy +from timm.data import Dataset, create_loader +from timm.models import resume_checkpoint + +# import apex as distributed package +try: + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + USE_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + USE_APEX = False + +# import models and training functions +from lib.utils.flops_table import FlopsEst +from lib.models.structures.supernet import gen_supernet +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from lib.utils.util import parse_config_args, get_logger, \ + create_optimizer_supernet, create_supernet_scheduler + +from nni.nas.pytorch.callbacks import LRSchedulerCallback +from nni.nas.pytorch.callbacks import ModelCheckpoint +from nni.algorithms.nas.pytorch.cream import CreamSupernetTrainer +from nni.algorithms.nas.pytorch.random import RandomMutator + +def main(): + args, cfg = parse_config_args('nni.cream.supernet') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, "train.log")) + else: + logger = None + + # initialize distributed parameters + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + 'Training on Process %d with %d GPUs.', + args.local_rank, cfg.NUM_GPU) + + # fix random seeds + torch.manual_seed(cfg.SEED) + torch.cuda.manual_seed_all(cfg.SEED) + np.random.seed(cfg.SEED) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # generate supernet + model, sta_num, resolution = gen_supernet( + flops_minimum=cfg.SUPERNET.FLOPS_MINIMUM, + flops_maximum=cfg.SUPERNET.FLOPS_MAXIMUM, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP, + resunit=cfg.SUPERNET.RESUNIT, + dil_conv=cfg.SUPERNET.DIL_CONV, + slice=cfg.SUPERNET.SLICE, + verbose=cfg.VERBOSE, + logger=logger) + + # number of choice blocks in supernet + choice_num = len(model.blocks[7]) + if args.local_rank == 0: + logger.info('Supernet created, param count: %d', ( + sum([m.numel() for m in model.parameters()]))) + logger.info('resolution: %d', (resolution)) + logger.info('choice number: %d', (choice_num)) + + # initialize flops look-up table + model_est = FlopsEst(model) + flops_dict, flops_fixed = model_est.flops_dict, model_est.flops_fixed + + # optionally resume from a checkpoint + optimizer_state = None + resume_epoch = None + if cfg.AUTO_RESUME: + optimizer_state, resume_epoch = resume_checkpoint( + model, cfg.RESUME_PATH) + + # create optimizer and resume from checkpoint + optimizer = create_optimizer_supernet(cfg, model, USE_APEX) + if optimizer_state is not None: + optimizer.load_state_dict(optimizer_state['optimizer']) + model = model.cuda() + + # convert model to distributed mode + if cfg.BATCHNORM.SYNC_BN: + try: + if USE_APEX: + model = convert_syncbn_model(model) + else: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + if args.local_rank == 0: + logger.info('Converted model to use Synchronized BatchNorm.') + except Exception as exception: + logger.info( + 'Failed to enable Synchronized BatchNorm. ' + 'Install Apex or Torch >= 1.1 with Exception %s', exception) + if USE_APEX: + model = DDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + logger.info( + "Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") + # can use device str in Torch >= 1.1 + model = DDP(model, device_ids=[args.local_rank]) + + # create learning rate scheduler + lr_scheduler, num_epochs = create_supernet_scheduler(cfg, optimizer) + + start_epoch = resume_epoch if resume_epoch is not None else 0 + if start_epoch > 0: + lr_scheduler.step(start_epoch) + + if args.local_rank == 0: + logger.info('Scheduled epochs: %d', num_epochs) + + # imagenet train dataset + train_dir = os.path.join(cfg.DATA_DIR, 'train') + if not os.path.exists(train_dir): + logger.info('Training folder does not exist at: %s', train_dir) + sys.exit() + + dataset_train = Dataset(train_dir) + loader_train = create_loader( + dataset_train, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.BATCH_SIZE, + is_training=True, + use_prefetcher=True, + re_prob=cfg.AUGMENTATION.RE_PROB, + re_mode=cfg.AUGMENTATION.RE_MODE, + color_jitter=cfg.AUGMENTATION.COLOR_JITTER, + interpolation='random', + num_workers=cfg.WORKERS, + distributed=True, + collate_fn=None, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD + ) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.isdir(eval_dir): + logger.info('Validation folder does not exist at: %s', eval_dir) + sys.exit() + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=4 * cfg.DATASET.BATCH_SIZE, + is_training=False, + use_prefetcher=True, + num_workers=cfg.WORKERS, + distributed=True, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + interpolation=cfg.DATASET.INTERPOLATION + ) + + # whether to use label smoothing + if cfg.AUGMENTATION.SMOOTHING > 0.: + train_loss_fn = LabelSmoothingCrossEntropy( + smoothing=cfg.AUGMENTATION.SMOOTHING).cuda() + validate_loss_fn = nn.CrossEntropyLoss().cuda() + else: + train_loss_fn = nn.CrossEntropyLoss().cuda() + validate_loss_fn = train_loss_fn + + mutator = RandomMutator(model) + + trainer = CreamSupernetTrainer(model, train_loss_fn, validate_loss_fn, + optimizer, num_epochs, loader_train, loader_eval, + mutator=mutator, batch_size=cfg.DATASET.BATCH_SIZE, + log_frequency=cfg.LOG_INTERVAL, + meta_sta_epoch=cfg.SUPERNET.META_STA_EPOCH, + update_iter=cfg.SUPERNET.UPDATE_ITER, + slices=cfg.SUPERNET.SLICE, + pool_size=cfg.SUPERNET.POOL_SIZE, + pick_method=cfg.SUPERNET.PICK_METHOD, + choice_num=choice_num, sta_num=sta_num, acc_gap=cfg.ACC_GAP, + flops_dict=flops_dict, flops_fixed=flops_fixed, local_rank=args.local_rank, + callbacks=[LRSchedulerCallback(lr_scheduler), + ModelCheckpoint(output_dir)]) + + trainer.train() + + +if __name__ == '__main__': + main() diff --git a/examples/nas/legacy/pdarts/.gitignore b/examples/nas/legacy/pdarts/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..054c274eebe419ada82bf2b665a53fa00428e992 --- /dev/null +++ b/examples/nas/legacy/pdarts/.gitignore @@ -0,0 +1,2 @@ +data/* +log diff --git a/examples/nas/legacy/pdarts/README.md b/examples/nas/legacy/pdarts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..15465360b1ba3cb9618b79a16e3ca5442a891412 --- /dev/null +++ b/examples/nas/legacy/pdarts/README.md @@ -0,0 +1 @@ +[Documentation](https://nni.readthedocs.io/en/latest/NAS/PDARTS.html) diff --git a/examples/nas/legacy/pdarts/README_zh_CN.md b/examples/nas/legacy/pdarts/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..e43f5c20eb14ac288f3e14aad865a8d3611cbd02 --- /dev/null +++ b/examples/nas/legacy/pdarts/README_zh_CN.md @@ -0,0 +1 @@ +[文档](https://nni.readthedocs.io/zh/latest/NAS/PDARTS.html) diff --git a/examples/nas/legacy/pdarts/search.py b/examples/nas/legacy/pdarts/search.py new file mode 100644 index 0000000000000000000000000000000000000000..ac3ac741b6b8124967fb69d2fbf4d6b7eb28f2a8 --- /dev/null +++ b/examples/nas/legacy/pdarts/search.py @@ -0,0 +1,71 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import sys +import time +from argparse import ArgumentParser + +import torch +import torch.nn as nn + +from nni.nas.pytorch.callbacks import ArchitectureCheckpoint +from nni.algorithms.nas.pytorch.pdarts import PdartsTrainer + +# prevent it to be reordered. +if True: + sys.path.append('../../oneshot/darts') + from utils import accuracy + from model import CNN + import datasets + + +logger = logging.getLogger('nni') + + +if __name__ == "__main__": + parser = ArgumentParser("pdarts") + parser.add_argument('--add_layers', action='append', type=int, + help='add layers, default: [0, 6, 12]') + parser.add_argument('--dropped_ops', action='append', type=int, + help='drop ops, default: [3, 2, 1]') + parser.add_argument("--nodes", default=4, type=int) + parser.add_argument("--init_layers", default=5, type=int) + parser.add_argument("--channels", default=16, type=int) + parser.add_argument("--batch-size", default=64, type=int) + parser.add_argument("--log-frequency", default=1, type=int) + parser.add_argument("--epochs", default=50, type=int) + parser.add_argument("--unrolled", default=False, action="store_true") + args = parser.parse_args() + if args.add_layers is None: + args.add_layers = [0, 6, 12] + if args.dropped_ops is None: + args.dropped_ops = [3, 2, 1] + + logger.info("loading data") + dataset_train, dataset_valid = datasets.get_dataset("cifar10") + + def model_creator(layers): + model = CNN(32, 3, args.channels, 10, layers, n_nodes=args.nodes) + criterion = nn.CrossEntropyLoss() + + optim = torch.optim.SGD(model.parameters(), 0.025, momentum=0.9, weight_decay=3.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, args.epochs, eta_min=0.001) + + return model, criterion, optim, lr_scheduler + + logger.info("initializing trainer") + trainer = PdartsTrainer(model_creator, + init_layers=args.init_layers, + metrics=lambda output, target: accuracy(output, target, topk=(1,)), + pdarts_num_layers=args.add_layers, + pdarts_num_to_drop=args.dropped_ops, + num_epochs=args.epochs, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + batch_size=args.batch_size, + log_frequency=args.log_frequency, + unrolled=args.unrolled, + callbacks=[ArchitectureCheckpoint("./checkpoints")]) + logger.info("training") + trainer.train() diff --git a/examples/nas/legacy/textnas/README.md b/examples/nas/legacy/textnas/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f8ebe24afd19d654c7e5be58294279db47548cfa --- /dev/null +++ b/examples/nas/legacy/textnas/README.md @@ -0,0 +1,49 @@ +# TextNAS: A Neural Architecture Search Space tailored for Text Representation + +TextNAS by MSRA. Official Release. + +[Paper link](https://arxiv.org/abs/1912.10729) + +## Preparation + +Prepare the word vectors and SST dataset, and organize them in data directory as shown below: + +``` +textnas +├── data +│ ├── sst +│ │ └── trees +│ │ ├── dev.txt +│ │ ├── test.txt +│ │ └── train.txt +│ └── glove.840B.300d.txt +├── dataloader.py +├── model.py +├── ops.py +├── README.md +├── search.py +└── utils.py +``` + +The following link might be helpful for finding and downloading the corresponding dataset: + +* [GloVe: Global Vectors for Word Representation](https://nlp.stanford.edu/projects/glove/) +* [Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank](https://nlp.stanford.edu/sentiment/) + +## Search + +``` +python search.py +``` + +After each search epoch, 10 sampled architectures will be tested directly. Their performances are expected to be 40% - 42% after 10 epochs. + +By default, 20 sampled architectures will be exported into `checkpoints` directory for next step. + +## Retrain + +``` +sh run_retrain.sh +``` + +By default, the script will retrain the architecture provided by the author on the SST-2 dataset. diff --git a/examples/nas/legacy/textnas/README_zh_CN.md b/examples/nas/legacy/textnas/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..6a43913ffbac52686279574a55dcb2ce4216140b --- /dev/null +++ b/examples/nas/legacy/textnas/README_zh_CN.md @@ -0,0 +1,49 @@ +# TextNAS: A Neural Architecture Search Space tailored for Text Representation + +TextNAS 由 MSRA 提出 正式版本。 + +[论文链接](https://arxiv.org/abs/1912.10729) + +## 准备 + +准备词向量和 SST 数据集,并按如下结构放到 data 目录中: + +``` +textnas +├── data +│ ├── sst +│ │ └── trees +│ │ ├── dev.txt +│ │ ├── test.txt +│ │ └── train.txt +│ └── glove.840B.300d.txt +├── dataloader.py +├── model.py +├── ops.py +├── README.md +├── search.py +└── utils.py +``` + +以下链接有助于查找和下载相应的数据集: + +* [GloVe: Global Vectors for Word Representation](https://nlp.stanford.edu/projects/glove/) +* [Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank](https://nlp.stanford.edu/sentiment/) + +## 搜索 + +``` +python search.py +``` + +在每个搜索 Epoch 后,会直接测试 10 个采样的结构。 10 个 Epoch 后的性能预计为 40% - 42%。 + +默认情况下,20 个采样结构会被导出到 `checkpoints` 目录中,以便进行下一步处理。 + +## 重新训练 + +``` +sh run_retrain.sh +``` + +默认情况下,脚本会重新训练 SST-2 数据集上作者所提供的网络结构。 diff --git a/examples/nas/legacy/textnas/arc/final_arc.json b/examples/nas/legacy/textnas/arc/final_arc.json new file mode 100644 index 0000000000000000000000000000000000000000..c1e12c2d4bd6b3acf0080d841920eb45e3578812 --- /dev/null +++ b/examples/nas/legacy/textnas/arc/final_arc.json @@ -0,0 +1,212 @@ +{ + "LayerChoice1": [ + false, false, false, false, false, true, false, false + ], + "InputChoice2": [ + true + ], + "LayerChoice3": [ + false, false, false, false, false, false, false, true + ], + "InputChoice4": [ + false + ], + "InputChoice5": [ + true, false + ], + "LayerChoice6": [ + false, false, false, true, false, false, false, false + ], + "InputChoice7": [ + false, false + ], + "InputChoice8": [ + false, false, true + ], + "LayerChoice9": [ + false, false, false, false, false, false, true, false + ], + "InputChoice10": [ + false, true, true + ], + "InputChoice11": [ + false, false, true, false + ], + "LayerChoice12": [ + false, true, false, false, false, false, false, false + ], + "InputChoice13": [ + false, true, false, false + ], + "InputChoice14": [ + false, false, false, false, true + ], + "LayerChoice15": [ + false, true, false, false, false, false, false, false + ], + "InputChoice16": [ + false, false, true, false, true + ], + "InputChoice17": [ + false, false, false, false, true + ], + "LayerChoice18": [ + true, false, false, false, false, false, false, false + ], + "InputChoice19": [ + false, false, true, true, true, true + ], + "InputChoice20": [ + true, false, false, false, false + ], + "LayerChoice21": [ + false, false, false, false, false, false, true, false + ], + "InputChoice22": [ + false, true, true, false, false, false, false + ], + "InputChoice23": [ + false, true, false, false, false + ], + "LayerChoice24": [ + false, false, false, false, false, true, false, false + ], + "InputChoice25": [ + false, true, false, true, true, false, true, true + ], + "InputChoice26": [ + false, false, true, false, false + ], + "LayerChoice27": [ + false, false, false, false, false, true, false, false + ], + "InputChoice28": [ + false, false, false, false, false, true, false, true, true + ], + "InputChoice29": [ + true, false, false, false, false + ], + "LayerChoice30": [ + false, false, false, false, false, false, false, true + ], + "InputChoice31": [ + true, true, false, false, true, false, false, true, true, false + ], + "InputChoice32": [ + true, false, false, false, false + ], + "LayerChoice33": [ + false, false, false, false, true, false, false, false + ], + "InputChoice34": [ + true, false, false, true, true, true, true, false, false, false, false + ], + "InputChoice35": [ + false, false, false, true, false + ], + "LayerChoice36": [ + false, true, false, false, false, false, false, false + ], + "InputChoice37": [ + true, true, false, true, false, true, false, false, true, false, false, false + ], + "InputChoice38": [ + false, false, false, true, false + ], + "LayerChoice39": [ + false, false, true, false, false, false, false, false + ], + "InputChoice40": [ + true, true, false, false, false, false, true, false, false, true, true, false, true + ], + "InputChoice41": [ + false, false, false, true, false + ], + "LayerChoice42": [ + true, false, false, false, false, false, false, false + ], + "InputChoice43": [ + false, false, true, false, false, false, true, true, true, false, true, true, false, false + ], + "InputChoice44": [ + false, false, false, false, true + ], + "LayerChoice45": [ + false, false, false, true, false, false, false, false + ], + "InputChoice46": [ + true, false, false, false, false, false, true, false, false, false, true, true, false, false, true + ], + "InputChoice47": [ + false, false, false, true, false + ], + "LayerChoice48": [ + false, false, true, false, false, false, false, false + ], + "InputChoice49": [ + false, false, false, false, false, false, false, false, false, true, true, false, true, false, true, false + ], + "InputChoice50": [ + false, false, false, false, true + ], + "LayerChoice51": [ + false, false, false, false, true, false, false, false + ], + "InputChoice52": [ + false, true, true, true, true, false, false, true, false, true, false, false, false, false, true, false, false + ], + "InputChoice53": [ + false, false, true, false, false + ], + "LayerChoice54": [ + false, false, false, true, false, false, false, false + ], + "InputChoice55": [ + false, false, false, false, false, true, false, false, false, false, false, false, false, true, true, true, false, true + ], + "InputChoice56": [ + false, false, true, false, false + ], + "LayerChoice57": [ + false, false, false, true, false, false, false, false + ], + "InputChoice58": [ + false, false, false, true, false, false, false, false, false, false, true, false, false, false, true, false, false, false, false + ], + "InputChoice59": [ + false, true, false, false, false + ], + "LayerChoice60": [ + false, false, false, false, false, true, false, false + ], + "InputChoice61": [ + true, true, false, false, false, false, false, false, false, false, true, true, false, false, true, true, true, true, false, false + ], + "InputChoice62": [ + true, false, false, false, false + ], + "LayerChoice63": [ + false, false, false, false, false, false, false, true + ], + "InputChoice64": [ + false, true, true, true, false, false, false, true, false, true, true, true, true, false, true, false, false, false, false, false, false + ], + "InputChoice65": [ + false, false, false, false, true + ], + "LayerChoice66": [ + false, false, false, false, false, false, false, true + ], + "InputChoice67": [ + false, false, true, true, true, true, false, true, false, true, true, false, false, false, false, true, false, false, false, false, false, true + ], + "InputChoice68": [ + false, false, false, true, false + ], + "LayerChoice69": [ + false, false, false, true, false, false, false, false + ], + "InputChoice70": [ + true, false, false, true, false, false, false, true, false, false, false, false, true, false, false, false, true, false, false, false, false, false, false + ] +} diff --git a/examples/nas/legacy/textnas/dataloader.py b/examples/nas/legacy/textnas/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..083f1c7413028f9e0f7f304119f68d94b71df528 --- /dev/null +++ b/examples/nas/legacy/textnas/dataloader.py @@ -0,0 +1,335 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import pickle +from collections import Counter + +import numpy as np +import torch +from torch.utils import data + +logger = logging.getLogger("nni.textnas") + + +class PTBTree: + WORD_TO_WORD_MAPPING = { + "{": "-LCB-", + "}": "-RCB-" + } + + def __init__(self): + self.subtrees = [] + self.word = None + self.label = "" + self.parent = None + self.span = (-1, -1) + self.word_vector = None # HOS, store dx1 RNN word vector + self.prediction = None # HOS, store Kx1 prediction vector + + def is_leaf(self): + return len(self.subtrees) == 0 + + def set_by_text(self, text, pos=0, left=0): + depth = 0 + right = left + for i in range(pos + 1, len(text)): + char = text[i] + # update the depth + if char == "(": + depth += 1 + if depth == 1: + subtree = PTBTree() + subtree.parent = self + subtree.set_by_text(text, i, right) + right = subtree.span[1] + self.span = (left, right) + self.subtrees.append(subtree) + elif char == ")": + depth -= 1 + if len(self.subtrees) == 0: + pos = i + for j in range(i, 0, -1): + if text[j] == " ": + pos = j + break + self.word = text[pos + 1:i] + self.span = (left, left + 1) + + # we've reached the end of the category that is the root of this subtree + if depth == 0 and char == " " and self.label == "": + self.label = text[pos + 1:i] + # we've reached the end of the scope for this bracket + if depth < 0: + break + + # Fix some issues with variation in output, and one error in the treebank + # for a word with a punctuation POS + self.standardise_node() + + def standardise_node(self): + if self.word in self.WORD_TO_WORD_MAPPING: + self.word = self.WORD_TO_WORD_MAPPING[self.word] + + def __repr__(self, single_line=True, depth=0): + ans = "" + if not single_line and depth > 0: + ans = "\n" + depth * "\t" + ans += "(" + self.label + if self.word is not None: + ans += " " + self.word + for subtree in self.subtrees: + if single_line: + ans += " " + ans += subtree.__repr__(single_line, depth + 1) + ans += ")" + return ans + + +def read_tree(source): + cur_text = [] + depth = 0 + while True: + line = source.readline() + # Check if we are out of input + if line == "": + return None + # strip whitespace and only use if this contains something + line = line.strip() + if line == "": + continue + cur_text.append(line) + # Update depth + for char in line: + if char == "(": + depth += 1 + elif char == ")": + depth -= 1 + # At depth 0 we have a complete tree + if depth == 0: + tree = PTBTree() + tree.set_by_text(" ".join(cur_text)) + return tree + return None + + +def read_trees(source, max_sents=-1): + with open(source) as fp: + trees = [] + while True: + tree = read_tree(fp) + if tree is None: + break + trees.append(tree) + if len(trees) >= max_sents > 0: + break + return trees + + +class SSTDataset(data.Dataset): + def __init__(self, sents, mask, labels): + self.sents = sents + self.labels = labels + self.mask = mask + + def __getitem__(self, index): + return (self.sents[index], self.mask[index]), self.labels[index] + + def __len__(self): + return len(self.sents) + + +def sst_get_id_input(content, word_id_dict, max_input_length): + words = content.split(" ") + sentence = [word_id_dict[""]] * max_input_length + mask = [0] * max_input_length + unknown = word_id_dict[""] + for i, word in enumerate(words[:max_input_length]): + sentence[i] = word_id_dict.get(word, unknown) + mask[i] = 1 + return sentence, mask + + +def sst_get_phrases(trees, sample_ratio=1.0, is_binary=False, only_sentence=False): + all_phrases = [] + for tree in trees: + if only_sentence: + sentence = get_sentence_by_tree(tree) + label = int(tree.label) + pair = (sentence, label) + all_phrases.append(pair) + else: + phrases = get_phrases_by_tree(tree) + sentence = get_sentence_by_tree(tree) + pair = (sentence, int(tree.label)) + all_phrases.append(pair) + all_phrases += phrases + if sample_ratio < 1.: + np.random.shuffle(all_phrases) + result_phrases = [] + for pair in all_phrases: + if is_binary: + phrase, label = pair + if label <= 1: + pair = (phrase, 0) + elif label >= 3: + pair = (phrase, 1) + else: + continue + if sample_ratio == 1.: + result_phrases.append(pair) + else: + rand_portion = np.random.random() + if rand_portion < sample_ratio: + result_phrases.append(pair) + return result_phrases + + +def get_phrases_by_tree(tree): + phrases = [] + if tree is None: + return phrases + if tree.is_leaf(): + pair = (tree.word, int(tree.label)) + phrases.append(pair) + return phrases + left_child_phrases = get_phrases_by_tree(tree.subtrees[0]) + right_child_phrases = get_phrases_by_tree(tree.subtrees[1]) + phrases.extend(left_child_phrases) + phrases.extend(right_child_phrases) + sentence = get_sentence_by_tree(tree) + pair = (sentence, int(tree.label)) + phrases.append(pair) + return phrases + + +def get_sentence_by_tree(tree): + if tree is None: + return "" + if tree.is_leaf(): + return tree.word + left_sentence = get_sentence_by_tree(tree.subtrees[0]) + right_sentence = get_sentence_by_tree(tree.subtrees[1]) + sentence = left_sentence + " " + right_sentence + return sentence.strip() + + +def get_word_id_dict(word_num_dict, word_id_dict, min_count): + z = [k for k in sorted(word_num_dict.keys())] + for word in z: + count = word_num_dict[word] + if count >= min_count: + index = len(word_id_dict) + if word not in word_id_dict: + word_id_dict[word] = index + return word_id_dict + + +def load_word_num_dict(phrases, word_num_dict): + for sentence, _ in phrases: + words = sentence.split(" ") + for cur_word in words: + word = cur_word.strip() + word_num_dict[word] += 1 + return word_num_dict + + +def init_trainable_embedding(embedding_path, word_id_dict, embed_dim=300): + word_embed_model = load_glove_model(embedding_path, embed_dim) + assert word_embed_model["pool"].shape[1] == embed_dim + embedding = np.random.random([len(word_id_dict), embed_dim]).astype(np.float32) / 2.0 - 0.25 + embedding[0] = np.zeros(embed_dim) # PAD + embedding[1] = (np.random.rand(embed_dim) - 0.5) / 2 # UNK + for word in sorted(word_id_dict.keys()): + idx = word_id_dict[word] + if idx == 0 or idx == 1: + continue + if word in word_embed_model["mapping"]: + embedding[idx] = word_embed_model["pool"][word_embed_model["mapping"][word]] + else: + embedding[idx] = np.random.rand(embed_dim) / 2.0 - 0.25 + return embedding + + +def sst_get_trainable_data(phrases, word_id_dict, max_input_length): + texts, labels, mask = [], [], [] + + for phrase, label in phrases: + if not phrase.split(): + continue + phrase_split, mask_split = sst_get_id_input(phrase, word_id_dict, max_input_length) + texts.append(phrase_split) + labels.append(int(label)) + mask.append(mask_split) # field_input is mask + labels = np.array(labels, dtype=np.int64) + texts = np.reshape(texts, [-1, max_input_length]).astype(np.int32) + mask = np.reshape(mask, [-1, max_input_length]).astype(np.int32) + + return SSTDataset(texts, mask, labels) + + +def load_glove_model(filename, embed_dim): + if os.path.exists(filename + ".cache"): + logger.info("Found cache. Loading...") + with open(filename + ".cache", "rb") as fp: + return pickle.load(fp) + embedding = {"mapping": dict(), "pool": []} + with open(filename) as f: + for i, line in enumerate(f): + line = line.rstrip("\n") + vocab_word, *vec = line.rsplit(" ", maxsplit=embed_dim) + assert len(vec) == 300, "Unexpected line: '%s'" % line + embedding["pool"].append(np.array(list(map(float, vec)), dtype=np.float32)) + embedding["mapping"][vocab_word] = i + embedding["pool"] = np.stack(embedding["pool"]) + with open(filename + ".cache", "wb") as fp: + pickle.dump(embedding, fp) + return embedding + + +def read_data_sst(data_path, max_input_length=64, min_count=1, train_with_valid=False, + train_ratio=1., valid_ratio=1., is_binary=False, only_sentence=False): + word_id_dict = dict() + word_num_dict = Counter() + + sst_path = os.path.join(data_path, "sst") + logger.info("Reading SST data...") + train_file_name = os.path.join(sst_path, "trees", "train.txt") + valid_file_name = os.path.join(sst_path, "trees", "dev.txt") + test_file_name = os.path.join(sst_path, "trees", "test.txt") + train_trees = read_trees(train_file_name) + train_phrases = sst_get_phrases(train_trees, train_ratio, is_binary, only_sentence) + logger.info("Finish load train phrases.") + valid_trees = read_trees(valid_file_name) + valid_phrases = sst_get_phrases(valid_trees, valid_ratio, is_binary, only_sentence) + logger.info("Finish load valid phrases.") + if train_with_valid: + train_phrases += valid_phrases + test_trees = read_trees(test_file_name) + test_phrases = sst_get_phrases(test_trees, valid_ratio, is_binary, only_sentence=True) + logger.info("Finish load test phrases.") + + # get word_id_dict + word_id_dict[""] = 0 + word_id_dict[""] = 1 + load_word_num_dict(train_phrases, word_num_dict) + logger.info("Finish load train words: %d.", len(word_num_dict)) + load_word_num_dict(valid_phrases, word_num_dict) + load_word_num_dict(test_phrases, word_num_dict) + logger.info("Finish load valid+test words: %d.", len(word_num_dict)) + word_id_dict = get_word_id_dict(word_num_dict, word_id_dict, min_count) + logger.info("After trim vocab length: %d.", len(word_id_dict)) + + logger.info("Loading embedding...") + embedding = init_trainable_embedding(os.path.join(data_path, "glove.840B.300d.txt"), word_id_dict) + logger.info("Finish initialize word embedding.") + + dataset_train = sst_get_trainable_data(train_phrases, word_id_dict, max_input_length) + logger.info("Loaded %d training samples.", len(dataset_train)) + dataset_valid = sst_get_trainable_data(valid_phrases, word_id_dict, max_input_length) + logger.info("Loaded %d validation samples.", len(dataset_valid)) + dataset_test = sst_get_trainable_data(test_phrases, word_id_dict, max_input_length) + logger.info("Loaded %d test samples.", len(dataset_test)) + + return dataset_train, dataset_valid, dataset_test, torch.from_numpy(embedding) diff --git a/examples/nas/legacy/textnas/model.py b/examples/nas/legacy/textnas/model.py new file mode 100644 index 0000000000000000000000000000000000000000..631c0e134d391ab707f25c0d3a47a55840f4a164 --- /dev/null +++ b/examples/nas/legacy/textnas/model.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np +import torch +import torch.nn as nn +from nni.nas.pytorch import mutables + +from ops import ConvBN, LinearCombine, AvgPool, MaxPool, RNN, Attention, BatchNorm +from utils import GlobalMaxPool, GlobalAvgPool + + +class Layer(mutables.MutableScope): + def __init__(self, key, prev_keys, hidden_units, choose_from_k, cnn_keep_prob, lstm_keep_prob, att_keep_prob, att_mask): + super(Layer, self).__init__(key) + + def conv_shortcut(kernel_size): + return ConvBN(kernel_size, hidden_units, hidden_units, cnn_keep_prob, False, True) + + self.n_candidates = len(prev_keys) + if self.n_candidates: + self.prec = mutables.InputChoice(choose_from=prev_keys[-choose_from_k:], n_chosen=1) + else: + # first layer, skip input choice + self.prec = None + self.op = mutables.LayerChoice([ + conv_shortcut(1), + conv_shortcut(3), + conv_shortcut(5), + conv_shortcut(7), + AvgPool(3, False, True), + MaxPool(3, False, True), + RNN(hidden_units, lstm_keep_prob), + Attention(hidden_units, 4, att_keep_prob, att_mask) + ]) + if self.n_candidates: + self.skipconnect = mutables.InputChoice(choose_from=prev_keys) + else: + self.skipconnect = None + self.bn = BatchNorm(hidden_units, False, True) + + def forward(self, last_layer, prev_layers, mask): + # pass an extra last_layer to deal with layer 0 (prev_layers is empty) + if self.prec is None: + prec = last_layer + else: + prec = self.prec(prev_layers[-self.prec.n_candidates:]) # skip first + out = self.op(prec, mask) + if self.skipconnect is not None: + connection = self.skipconnect(prev_layers[-self.skipconnect.n_candidates:]) + if connection is not None: + out += connection + out = self.bn(out, mask) + return out + + +class Model(nn.Module): + def __init__(self, embedding, hidden_units=256, num_layers=24, num_classes=5, choose_from_k=5, + lstm_keep_prob=0.5, cnn_keep_prob=0.5, att_keep_prob=0.5, att_mask=True, + embed_keep_prob=0.5, final_output_keep_prob=1.0, global_pool="avg"): + super(Model, self).__init__() + + self.embedding = nn.Embedding.from_pretrained(embedding, freeze=False) + self.hidden_units = hidden_units + self.num_layers = num_layers + self.num_classes = num_classes + + self.init_conv = ConvBN(1, self.embedding.embedding_dim, hidden_units, cnn_keep_prob, False, True) + + self.layers = nn.ModuleList() + candidate_keys_pool = [] + for layer_id in range(self.num_layers): + k = "layer_{}".format(layer_id) + self.layers.append(Layer(k, candidate_keys_pool, hidden_units, choose_from_k, + cnn_keep_prob, lstm_keep_prob, att_keep_prob, att_mask)) + candidate_keys_pool.append(k) + + self.linear_combine = LinearCombine(self.num_layers) + self.linear_out = nn.Linear(self.hidden_units, self.num_classes) + + self.embed_dropout = nn.Dropout(p=1 - embed_keep_prob) + self.output_dropout = nn.Dropout(p=1 - final_output_keep_prob) + + assert global_pool in ["max", "avg"] + if global_pool == "max": + self.global_pool = GlobalMaxPool() + elif global_pool == "avg": + self.global_pool = GlobalAvgPool() + + def forward(self, inputs): + sent_ids, mask = inputs + seq = self.embedding(sent_ids.long()) + seq = self.embed_dropout(seq) + + seq = torch.transpose(seq, 1, 2) # from (N, L, C) -> (N, C, L) + + x = self.init_conv(seq, mask) + prev_layers = [] + + for layer in self.layers: + x = layer(x, prev_layers, mask) + prev_layers.append(x) + + x = self.linear_combine(torch.stack(prev_layers)) + x = self.global_pool(x, mask) + x = self.output_dropout(x) + x = self.linear_out(x) + return x diff --git a/examples/nas/legacy/textnas/ops.py b/examples/nas/legacy/textnas/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4a890e60aee974c7844901ee74d6c39d4a0cc098 --- /dev/null +++ b/examples/nas/legacy/textnas/ops.py @@ -0,0 +1,205 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn.functional as F +from torch import nn + +from utils import get_length, INF + + +class Mask(nn.Module): + def forward(self, seq, mask): + # seq: (N, C, L) + # mask: (N, L) + seq_mask = torch.unsqueeze(mask, 2) + seq_mask = torch.transpose(seq_mask.repeat(1, 1, seq.size()[1]), 1, 2) + return seq.where(torch.eq(seq_mask, 1), torch.zeros_like(seq)) + + +class BatchNorm(nn.Module): + def __init__(self, num_features, pre_mask, post_mask, eps=1e-5, decay=0.9, affine=True): + super(BatchNorm, self).__init__() + self.mask_opt = Mask() + self.pre_mask = pre_mask + self.post_mask = post_mask + self.bn = nn.BatchNorm1d(num_features, eps=eps, momentum=1.0 - decay, affine=affine) + + def forward(self, seq, mask): + if self.pre_mask: + seq = self.mask_opt(seq, mask) + seq = self.bn(seq) + if self.post_mask: + seq = self.mask_opt(seq, mask) + return seq + + +class ConvBN(nn.Module): + def __init__(self, kernal_size, in_channels, out_channels, cnn_keep_prob, + pre_mask, post_mask, with_bn=True, with_relu=True): + super(ConvBN, self).__init__() + self.mask_opt = Mask() + self.pre_mask = pre_mask + self.post_mask = post_mask + self.with_bn = with_bn + self.with_relu = with_relu + self.conv = nn.Conv1d(in_channels, out_channels, kernal_size, 1, bias=True, padding=(kernal_size - 1) // 2) + self.dropout = nn.Dropout(p=(1 - cnn_keep_prob)) + + if with_bn: + self.bn = BatchNorm(out_channels, not post_mask, True) + + if with_relu: + self.relu = nn.ReLU() + + def forward(self, seq, mask): + if self.pre_mask: + seq = self.mask_opt(seq, mask) + seq = self.conv(seq) + if self.post_mask: + seq = self.mask_opt(seq, mask) + if self.with_bn: + seq = self.bn(seq, mask) + if self.with_relu: + seq = self.relu(seq) + seq = self.dropout(seq) + return seq + + +class AvgPool(nn.Module): + def __init__(self, kernal_size, pre_mask, post_mask): + super(AvgPool, self).__init__() + self.avg_pool = nn.AvgPool1d(kernal_size, 1, padding=(kernal_size - 1) // 2) + self.pre_mask = pre_mask + self.post_mask = post_mask + self.mask_opt = Mask() + + def forward(self, seq, mask): + if self.pre_mask: + seq = self.mask_opt(seq, mask) + seq = self.avg_pool(seq) + if self.post_mask: + seq = self.mask_opt(seq, mask) + return seq + + +class MaxPool(nn.Module): + def __init__(self, kernal_size, pre_mask, post_mask): + super(MaxPool, self).__init__() + self.max_pool = nn.MaxPool1d(kernal_size, 1, padding=(kernal_size - 1) // 2) + self.pre_mask = pre_mask + self.post_mask = post_mask + self.mask_opt = Mask() + + def forward(self, seq, mask): + if self.pre_mask: + seq = self.mask_opt(seq, mask) + seq = self.max_pool(seq) + if self.post_mask: + seq = self.mask_opt(seq, mask) + return seq + + +class Attention(nn.Module): + def __init__(self, num_units, num_heads, keep_prob, is_mask): + super(Attention, self).__init__() + self.num_heads = num_heads + self.keep_prob = keep_prob + + self.linear_q = nn.Linear(num_units, num_units) + self.linear_k = nn.Linear(num_units, num_units) + self.linear_v = nn.Linear(num_units, num_units) + + self.bn = BatchNorm(num_units, True, is_mask) + self.dropout = nn.Dropout(p=1 - self.keep_prob) + + def forward(self, seq, mask): + in_c = seq.size()[1] + seq = torch.transpose(seq, 1, 2) # (N, L, C) + queries = seq + keys = seq + num_heads = self.num_heads + + # T_q = T_k = L + Q = F.relu(self.linear_q(seq)) # (N, T_q, C) + K = F.relu(self.linear_k(seq)) # (N, T_k, C) + V = F.relu(self.linear_v(seq)) # (N, T_k, C) + + # Split and concat + Q_ = torch.cat(torch.split(Q, in_c // num_heads, dim=2), dim=0) # (h*N, T_q, C/h) + K_ = torch.cat(torch.split(K, in_c // num_heads, dim=2), dim=0) # (h*N, T_k, C/h) + V_ = torch.cat(torch.split(V, in_c // num_heads, dim=2), dim=0) # (h*N, T_k, C/h) + + # Multiplication + outputs = torch.matmul(Q_, K_.transpose(1, 2)) # (h*N, T_q, T_k) + # Scale + outputs = outputs / (K_.size()[-1] ** 0.5) + # Key Masking + key_masks = mask.repeat(num_heads, 1) # (h*N, T_k) + key_masks = torch.unsqueeze(key_masks, 1) # (h*N, 1, T_k) + key_masks = key_masks.repeat(1, queries.size()[1], 1) # (h*N, T_q, T_k) + + paddings = torch.ones_like(outputs) * (-INF) # extremely small value + outputs = torch.where(torch.eq(key_masks, 0), paddings, outputs) + + query_masks = mask.repeat(num_heads, 1) # (h*N, T_q) + query_masks = torch.unsqueeze(query_masks, -1) # (h*N, T_q, 1) + query_masks = query_masks.repeat(1, 1, keys.size()[1]).float() # (h*N, T_q, T_k) + + att_scores = F.softmax(outputs, dim=-1) * query_masks # (h*N, T_q, T_k) + att_scores = self.dropout(att_scores) + + # Weighted sum + x_outputs = torch.matmul(att_scores, V_) # (h*N, T_q, C/h) + # Restore shape + x_outputs = torch.cat( + torch.split(x_outputs, x_outputs.size()[0] // num_heads, dim=0), + dim=2) # (N, T_q, C) + + x = torch.transpose(x_outputs, 1, 2) # (N, C, L) + x = self.bn(x, mask) + + return x + + +class RNN(nn.Module): + def __init__(self, hidden_size, output_keep_prob): + super(RNN, self).__init__() + self.hidden_size = hidden_size + self.bid_rnn = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) + self.output_keep_prob = output_keep_prob + + self.out_dropout = nn.Dropout(p=(1 - self.output_keep_prob)) + + def forward(self, seq, mask): + # seq: (N, C, L) + # mask: (N, L) + max_len = seq.size()[2] + length = get_length(mask) + seq = torch.transpose(seq, 1, 2) # to (N, L, C) + packed_seq = nn.utils.rnn.pack_padded_sequence(seq, length, batch_first=True, + enforce_sorted=False) + outputs, _ = self.bid_rnn(packed_seq) + outputs = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True, + total_length=max_len)[0] + outputs = outputs.view(-1, max_len, 2, self.hidden_size).sum(2) # (N, L, C) + outputs = self.out_dropout(outputs) # output dropout + return torch.transpose(outputs, 1, 2) # back to: (N, C, L) + + +class LinearCombine(nn.Module): + def __init__(self, layers_num, trainable=True, input_aware=False, word_level=False): + super(LinearCombine, self).__init__() + self.input_aware = input_aware + self.word_level = word_level + + if input_aware: + raise NotImplementedError("Input aware is not supported.") + self.w = nn.Parameter(torch.full((layers_num, 1, 1, 1), 1.0 / layers_num), + requires_grad=trainable) + + def forward(self, seq): + nw = F.softmax(self.w, dim=0) + seq = torch.mul(seq, nw) + seq = torch.sum(seq, dim=0) + return seq diff --git a/examples/nas/legacy/textnas/retrain.py b/examples/nas/legacy/textnas/retrain.py new file mode 100644 index 0000000000000000000000000000000000000000..ab8f5c661cb1c1834eb51543944209d853965afa --- /dev/null +++ b/examples/nas/legacy/textnas/retrain.py @@ -0,0 +1,536 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import sys +import os +import logging +import pickle +import shutil +import random +import math + +import time +import datetime +import argparse +import distutils.util + +import numpy as np +import torch +from torch import nn +from torch import optim +from torch.utils.data import DataLoader +import torch.nn.functional as Func + +from model import Model +from nni.nas.pytorch.fixed import apply_fixed_architecture +from dataloader import read_data_sst + + +logger = logging.getLogger("nni.textnas") + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--reset_output_dir", + type=distutils.util.strtobool, + default=True, + help="Whether to clean the output dir if existed. (default: %(default)s)") + parser.add_argument( + "--child_fixed_arc", + type=str, + required=True, + help="Architecture json file. (default: %(default)s)") + parser.add_argument( + "--data_path", + type=str, + default="data", + help="Directory containing the dataset and embedding file. (default: %(default)s)") + parser.add_argument( + "--output_dir", + type=str, + default="output", + help="The output directory. (default: %(default)s)") + parser.add_argument( + "--child_lr_decay_scheme", + type=str, + default="cosine", + help="Learning rate annealing strategy, only 'cosine' supported. (default: %(default)s)") + parser.add_argument( + "--batch_size", + type=int, + default=128, + help="Number of samples each batch for training. (default: %(default)s)") + parser.add_argument( + "--eval_batch_size", + type=int, + default=128, + help="Number of samples each batch for evaluation. (default: %(default)s)") + parser.add_argument( + "--class_num", + type=int, + default=5, + help="The number of categories. (default: %(default)s)") + parser.add_argument( + "--global_seed", + type=int, + default=1234, + help="Seed for reproduction. (default: %(default)s)") + parser.add_argument( + "--max_input_length", + type=int, + default=64, + help="The maximum length of the sentence. (default: %(default)s)") + parser.add_argument( + "--num_epochs", + type=int, + default=10, + help="The number of training epochs. (default: %(default)s)") + parser.add_argument( + "--child_num_layers", + type=int, + default=24, + help="The layer number of the architecture. (default: %(default)s)") + parser.add_argument( + "--child_out_filters", + type=int, + default=256, + help="The dimension of hidden states. (default: %(default)s)") + parser.add_argument( + "--child_out_filters_scale", + type=int, + default=1, + help="The scale of hidden state dimension. (default: %(default)s)") + parser.add_argument( + "--child_lr_T_0", + type=int, + default=10, + help="The length of one cycle. (default: %(default)s)") + parser.add_argument( + "--child_lr_T_mul", + type=int, + default=2, + help="The multiplication factor per cycle. (default: %(default)s)") + parser.add_argument( + "--min_count", + type=int, + default=1, + help="The threshold to cut off low frequent words. (default: %(default)s)") + parser.add_argument( + "--train_ratio", + type=float, + default=1.0, + help="The sample ratio for the training set. (default: %(default)s)") + parser.add_argument( + "--valid_ratio", + type=float, + default=1.0, + help="The sample ratio for the dev set. (default: %(default)s)") + parser.add_argument( + "--child_grad_bound", + type=float, + default=5.0, + help="The threshold for gradient clipping. (default: %(default)s)") + parser.add_argument( + "--child_lr", + type=float, + default=0.02, + help="The initial learning rate. (default: %(default)s)") + parser.add_argument( + "--cnn_keep_prob", + type=float, + default=0.8, + help="Keep prob for cnn layer. (default: %(default)s)") + parser.add_argument( + "--final_output_keep_prob", + type=float, + default=1.0, + help="Keep prob for the last output layer. (default: %(default)s)") + parser.add_argument( + "--lstm_out_keep_prob", + type=float, + default=0.8, + help="Keep prob for the RNN layer. (default: %(default)s)") + parser.add_argument( + "--embed_keep_prob", + type=float, + default=0.8, + help="Keep prob for the embedding layer. (default: %(default)s)") + parser.add_argument( + "--attention_keep_prob", + type=float, + default=0.8, + help="Keep prob for the self-attention layer. (default: %(default)s)") + parser.add_argument( + "--child_l2_reg", + type=float, + default=3e-6, + help="Weight decay factor. (default: %(default)s)") + parser.add_argument( + "--child_lr_max", + type=float, + default=0.002, + help="The max learning rate. (default: %(default)s)") + parser.add_argument( + "--child_lr_min", + type=float, + default=0.001, + help="The min learning rate. (default: %(default)s)") + parser.add_argument( + "--child_optim_algo", + type=str, + default="adam", + help="Optimization algorithm. (default: %(default)s)") + parser.add_argument( + "--checkpoint_dir", + type=str, + default="best_checkpoint", + help="Path for saved checkpoints. (default: %(default)s)") + parser.add_argument( + "--output_type", + type=str, + default="avg", + help="Opertor type for the time steps reduction. (default: %(default)s)") + parser.add_argument( + "--multi_path", + type=distutils.util.strtobool, + default=False, + help="Search for multiple path in the architecture. (default: %(default)s)") + parser.add_argument( + "--is_binary", + type=distutils.util.strtobool, + default=False, + help="Binary label for sst dataset. (default: %(default)s)") + parser.add_argument( + "--is_cuda", + type=distutils.util.strtobool, + default=True, + help="Specify the device type. (default: %(default)s)") + parser.add_argument( + "--is_mask", + type=distutils.util.strtobool, + default=True, + help="Apply mask. (default: %(default)s)") + parser.add_argument( + "--fixed_seed", + type=distutils.util.strtobool, + default=True, + help="Fix the seed. (default: %(default)s)") + parser.add_argument( + "--load_checkpoint", + type=distutils.util.strtobool, + default=False, + help="Wether to load checkpoint. (default: %(default)s)") + parser.add_argument( + "--log_every", + type=int, + default=50, + help="How many steps to log. (default: %(default)s)") + parser.add_argument( + "--eval_every_epochs", + type=int, + default=1, + help="How many epochs to eval. (default: %(default)s)") + + global FLAGS + + FLAGS = parser.parse_args() + + +def set_random_seed(seed): + logger.info("set random seed for data reading: {}".format(seed)) + random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + np.random.seed(seed) + random.seed(seed) + torch.manual_seed(seed) + if FLAGS.is_cuda: + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + + +def get_model(embedding, num_layers): + logger.info("num layers: {0}".format(num_layers)) + assert FLAGS.child_fixed_arc is not None, "Architecture should be provided." + + child_model = Model( + embedding=embedding, + hidden_units=FLAGS.child_out_filters_scale * FLAGS.child_out_filters, + num_layers=num_layers, + num_classes=FLAGS.class_num, + choose_from_k=5 if FLAGS.multi_path else 1, + lstm_keep_prob=FLAGS.lstm_out_keep_prob, + cnn_keep_prob=FLAGS.cnn_keep_prob, + att_keep_prob=FLAGS.attention_keep_prob, + att_mask=FLAGS.is_mask, + embed_keep_prob=FLAGS.embed_keep_prob, + final_output_keep_prob=FLAGS.final_output_keep_prob, + global_pool=FLAGS.output_type) + + apply_fixed_architecture(child_model, FLAGS.child_fixed_arc) + return child_model + + +def eval_once(child_model, device, eval_set, criterion, valid_dataloader=None, test_dataloader=None): + if eval_set == "test": + assert test_dataloader is not None + dataloader = test_dataloader + elif eval_set == "valid": + assert valid_dataloader is not None + dataloader = valid_dataloader + else: + raise NotImplementedError("Unknown eval_set '{}'".format(eval_set)) + + tot_acc = 0 + tot = 0 + losses = [] + + with torch.no_grad(): # save memory + for batch in dataloader: + (sent_ids, mask), labels = batch + + sent_ids = sent_ids.to(device, non_blocking=True) + mask = mask.to(device, non_blocking=True) + labels = labels.to(device, non_blocking=True) + + logits = child_model((sent_ids, mask)) # run + + loss = criterion(logits, labels.long()) + loss = loss.mean() + preds = logits.argmax(dim=1).long() + acc = torch.eq(preds, labels.long()).long().sum().item() + + losses.append(loss) + tot_acc += acc + tot += len(labels) + + losses = torch.tensor(losses) + loss = losses.mean() + if tot > 0: + final_acc = float(tot_acc) / tot + else: + final_acc = 0 + logger.info("Error in calculating final_acc") + return final_acc, loss + + +def print_user_flags(FLAGS, line_limit=80): + log_strings = "\n" + "-" * line_limit + "\n" + for flag_name in sorted(vars(FLAGS)): + value = "{}".format(getattr(FLAGS, flag_name)) + log_string = flag_name + log_string += "." * (line_limit - len(flag_name) - len(value)) + log_string += value + log_strings = log_strings + log_string + log_strings = log_strings + "\n" + log_strings += "-" * line_limit + logger.info(log_strings) + + +def count_model_params(trainable_params): + num_vars = 0 + for var in trainable_params: + num_vars += np.prod([dim for dim in var.size()]) + return num_vars + + +def update_lr( + optimizer, + epoch, + l2_reg=1e-4, + lr_warmup_val=None, + lr_init=0.1, + lr_decay_scheme="cosine", + lr_max=0.002, + lr_min=0.000000001, + lr_T_0=4, + lr_T_mul=1, + sync_replicas=False, + num_aggregate=None, + num_replicas=None): + if lr_decay_scheme == "cosine": + assert lr_max is not None, "Need lr_max to use lr_cosine" + assert lr_min is not None, "Need lr_min to use lr_cosine" + assert lr_T_0 is not None, "Need lr_T_0 to use lr_cosine" + assert lr_T_mul is not None, "Need lr_T_mul to use lr_cosine" + + T_i = lr_T_0 + t_epoch = epoch + last_reset = 0 + while True: + t_epoch -= T_i + if t_epoch < 0: + break + last_reset += T_i + T_i *= lr_T_mul + + T_curr = epoch - last_reset + + def _update(): + rate = T_curr / T_i * 3.1415926 + lr = lr_min + 0.5 * (lr_max - lr_min) * (1.0 + math.cos(rate)) + return lr + + learning_rate = _update() + else: + raise ValueError("Unknown learning rate decay scheme {}".format(lr_decay_scheme)) + + #update lr in optimizer + for params_group in optimizer.param_groups: + params_group['lr'] = learning_rate + return learning_rate + + +def train(device, data_path, output_dir, num_layers): + logger.info("Build dataloader") + train_dataset, valid_dataset, test_dataset, embedding = \ + read_data_sst(data_path, + FLAGS.max_input_length, + FLAGS.min_count, + train_ratio=FLAGS.train_ratio, + valid_ratio=FLAGS.valid_ratio, + is_binary=FLAGS.is_binary) + train_dataloader = DataLoader(train_dataset, batch_size=FLAGS.batch_size, shuffle=True, pin_memory=True) + test_dataloader = DataLoader(test_dataset, batch_size=FLAGS.eval_batch_size, pin_memory=True) + valid_dataloader = DataLoader(valid_dataset, batch_size=FLAGS.eval_batch_size, pin_memory=True) + + logger.info("Build model") + child_model = get_model(embedding, num_layers) + logger.info("Finish build model") + + #for name, var in child_model.named_parameters(): + # logger.info(name, var.size(), var.requires_grad) # output all params + + num_vars = count_model_params(child_model.parameters()) + logger.info("Model has {} params".format(num_vars)) + + for m in child_model.modules(): # initializer + if isinstance(m, (nn.Conv1d, nn.Linear)): + nn.init.xavier_uniform_(m.weight) + + criterion = nn.CrossEntropyLoss() + + # get optimizer + if FLAGS.child_optim_algo == "adam": + optimizer = optim.Adam(child_model.parameters(), eps=1e-3, weight_decay=FLAGS.child_l2_reg) # with L2 + else: + raise ValueError("Unknown optim_algo {}".format(FLAGS.child_optim_algo)) + + child_model.to(device) + criterion.to(device) + + logger.info("Start training") + start_time = time.time() + step = 0 + + # save path + model_save_path = os.path.join(FLAGS.output_dir, "model.pth") + best_model_save_path = os.path.join(FLAGS.output_dir, "best_model.pth") + best_acc = 0 + start_epoch = 0 + if FLAGS.load_checkpoint: + if os.path.isfile(model_save_path): + checkpoint = torch.load(model_save_path, map_location = torch.device('cpu')) + step = checkpoint['step'] + start_epoch = checkpoint['epoch'] + child_model.load_state_dict(checkpoint['child_model_state_dict']) + optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + + for epoch in range(start_epoch, FLAGS.num_epochs): + lr = update_lr(optimizer, + epoch, + l2_reg=FLAGS.child_l2_reg, + lr_warmup_val=None, + lr_init=FLAGS.child_lr, + lr_decay_scheme=FLAGS.child_lr_decay_scheme, + lr_max=FLAGS.child_lr_max, + lr_min=FLAGS.child_lr_min, + lr_T_0=FLAGS.child_lr_T_0, + lr_T_mul=FLAGS.child_lr_T_mul) + child_model.train() + for batch in train_dataloader: + (sent_ids, mask), labels = batch + + sent_ids = sent_ids.to(device, non_blocking=True) + mask = mask.to(device, non_blocking=True) + labels = labels.to(device, non_blocking=True) + + step += 1 + + logits = child_model((sent_ids, mask)) # run + + loss = criterion(logits, labels.long()) + loss = loss.mean() + preds = logits.argmax(dim=1).long() + acc = torch.eq(preds, labels.long()).long().sum().item() + + optimizer.zero_grad() + loss.backward() + grad_norm = 0 + trainable_params = child_model.parameters() + + assert FLAGS.child_grad_bound is not None, "Need grad_bound to clip gradients." + # compute the gradient norm value + grad_norm = nn.utils.clip_grad_norm_(trainable_params, 99999999) + for param in trainable_params: + nn.utils.clip_grad_norm_(param, FLAGS.child_grad_bound) # clip grad + + optimizer.step() + + if step % FLAGS.log_every == 0: + curr_time = time.time() + log_string = "" + log_string += "epoch={:<6d}".format(epoch) + log_string += "ch_step={:<6d}".format(step) + log_string += " loss={:<8.6f}".format(loss) + log_string += " lr={:<8.4f}".format(lr) + log_string += " |g|={:<8.4f}".format(grad_norm) + log_string += " tr_acc={:<3d}/{:>3d}".format(acc, logits.size()[0]) + log_string += " mins={:<10.2f}".format(float(curr_time - start_time) / 60) + logger.info(log_string) + + epoch += 1 + save_state = { + 'step' : step, + 'epoch' : epoch, + 'child_model_state_dict' : child_model.state_dict(), + 'optimizer_state_dict' : optimizer.state_dict()} + torch.save(save_state, model_save_path) + child_model.eval() + logger.info("Epoch {}: Eval".format(epoch)) + eval_acc, eval_loss = eval_once(child_model, device, "test", criterion, test_dataloader=test_dataloader) + logger.info("ch_step={} {}_accuracy={:<6.4f} {}_loss={:<6.4f}".format(step, "test", eval_acc, "test", eval_loss)) + if eval_acc > best_acc: + best_acc = eval_acc + logger.info("Save best model") + save_state = { + 'step' : step, + 'epoch' : epoch, + 'child_model_state_dict' : child_model.state_dict(), + 'optimizer_state_dict' : optimizer.state_dict()} + torch.save(save_state, best_model_save_path) + + return eval_acc + + +def main(): + parse_args() + if not os.path.isdir(FLAGS.output_dir): + logger.info("Path {} does not exist. Creating.".format(FLAGS.output_dir)) + os.makedirs(FLAGS.output_dir) + elif FLAGS.reset_output_dir: + logger.info("Path {} exists. Remove and remake.".format(FLAGS.output_dir)) + shutil.rmtree(FLAGS.output_dir, ignore_errors=True) + os.makedirs(FLAGS.output_dir) + + print_user_flags(FLAGS) + + if FLAGS.fixed_seed: + set_random_seed(FLAGS.global_seed) + + device = torch.device("cuda" if FLAGS.is_cuda else "cpu") + train(device, FLAGS.data_path, FLAGS.output_dir, FLAGS.child_num_layers) + + +if __name__ == "__main__": + main() diff --git a/examples/nas/legacy/textnas/run_retrain.sh b/examples/nas/legacy/textnas/run_retrain.sh new file mode 100755 index 0000000000000000000000000000000000000000..1427d772cac170e78907c7ddaea48f417aec6494 --- /dev/null +++ b/examples/nas/legacy/textnas/run_retrain.sh @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +export PYTHONPATH="$(pwd)" + +python3 -u retrain.py \ + --train_ratio=1.0 \ + --valid_ratio=1.0 \ + --min_count=1 \ + --is_mask=True \ + --is_binary=True \ + --child_lr_decay_scheme="cosine" \ + --data_path="data" \ + --class_num=2 \ + --child_optim_algo="adam" \ + --output_dir="output_sst2" \ + --global_seed=1234 \ + --max_input_length=64 \ + --batch_size=128 \ + --eval_batch_size=128 \ + --num_epochs=10 \ + --log_every=50 \ + --eval_every_epochs=1 \ + --child_num_layers=24 \ + --child_out_filters=256 \ + --child_l2_reg=1e-6 \ + --cnn_keep_prob=0.8 \ + --final_output_keep_prob=1.0 \ + --embed_keep_prob=0.8 \ + --lstm_out_keep_prob=0.8 \ + --attention_keep_prob=0.8 \ + --child_lr=0.02 \ + --child_lr_max=0.002 \ + --child_lr_min=5e-6 \ + --child_lr_T_0=10 \ + --child_lr_T_mul=2 \ + --multi_path=True \ + --child_fixed_arc="./arc/final_arc.json" \ + --fixed_seed=True \ + "$@" diff --git a/examples/nas/legacy/textnas/search.py b/examples/nas/legacy/textnas/search.py new file mode 100644 index 0000000000000000000000000000000000000000..82bf9c365dee9ae5dcbad4710fc79f2f59c7ce22 --- /dev/null +++ b/examples/nas/legacy/textnas/search.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import random +from argparse import ArgumentParser +from itertools import cycle + +import numpy as np +import torch +import torch.nn as nn + +from nni.algorithms.nas.pytorch.enas import EnasMutator, EnasTrainer +from nni.nas.pytorch.callbacks import LRSchedulerCallback + +from dataloader import read_data_sst +from model import Model +from utils import accuracy + + +logger = logging.getLogger("nni.textnas") + + +class TextNASTrainer(EnasTrainer): + def __init__(self, *args, train_loader=None, valid_loader=None, test_loader=None, **kwargs): + super().__init__(*args, **kwargs) + self.train_loader = train_loader + self.valid_loader = valid_loader + self.test_loader = test_loader + + def init_dataloader(self): + pass + + +if __name__ == "__main__": + parser = ArgumentParser("textnas") + parser.add_argument("--batch-size", default=128, type=int) + parser.add_argument("--log-frequency", default=50, type=int) + parser.add_argument("--seed", default=1234, type=int) + parser.add_argument("--epochs", default=10, type=int) + parser.add_argument("--lr", default=5e-3, type=float) + args = parser.parse_args() + + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + np.random.seed(args.seed) + random.seed(args.seed) + torch.backends.cudnn.deterministic = True + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + train_dataset, valid_dataset, test_dataset, embedding = read_data_sst("data") + train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True) + valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True) + test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, num_workers=4) + train_loader, valid_loader = cycle(train_loader), cycle(valid_loader) + model = Model(embedding) + + mutator = EnasMutator(model, temperature=None, tanh_constant=None, entropy_reduction="mean") + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, eps=1e-3, weight_decay=2e-6) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-5) + + trainer = TextNASTrainer(model, + loss=criterion, + metrics=lambda output, target: {"acc": accuracy(output, target)}, + reward_function=accuracy, + optimizer=optimizer, + callbacks=[LRSchedulerCallback(lr_scheduler)], + batch_size=args.batch_size, + num_epochs=args.epochs, + dataset_train=None, + dataset_valid=None, + train_loader=train_loader, + valid_loader=valid_loader, + test_loader=test_loader, + log_frequency=args.log_frequency, + mutator=mutator, + mutator_lr=2e-3, + mutator_steps=500, + mutator_steps_aggregate=1, + child_steps=3000, + baseline_decay=0.99, + test_arc_per_epoch=10) + trainer.train() + os.makedirs("checkpoints", exist_ok=True) + for i in range(20): + trainer.export(os.path.join("checkpoints", "architecture_%02d.json" % i)) diff --git a/examples/nas/legacy/textnas/utils.py b/examples/nas/legacy/textnas/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a5f3627295dee35bf32368a2c629395e26359cfa --- /dev/null +++ b/examples/nas/legacy/textnas/utils.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import torch +import torch.nn as nn + +INF = 1E10 +EPS = 1E-12 + +logger = logging.getLogger("nni.textnas") + + +def get_length(mask): + length = torch.sum(mask, 1) + length = length.long().cpu() + return length + + +class GlobalAvgPool(nn.Module): + def forward(self, x, mask): + x = torch.sum(x, 2) + length = torch.sum(mask, 1, keepdim=True).float() + length += torch.eq(length, 0.0).float() * EPS + length = length.repeat(1, x.size()[1]) + x /= length + return x + + +class GlobalMaxPool(nn.Module): + def forward(self, x, mask): + mask = torch.eq(mask.float(), 0.0).long() + mask = torch.unsqueeze(mask, dim=1).repeat(1, x.size()[1], 1) + mask *= -INF + x += mask + x, _ = torch.max(x + mask, 2) + return x + + +class IteratorWrapper: + def __init__(self, loader): + self.loader = loader + self.iterator = None + + def __iter__(self): + self.iterator = iter(self.loader) + return self + + def __len__(self): + return len(self.loader) + + def __next__(self): + data = next(self.iterator) + text, length = data.text + max_length = text.size(1) + label = data.label - 1 + bs = label.size(0) + mask = torch.arange(max_length, device=length.device).unsqueeze(0).repeat(bs, 1) + mask = mask < length.unsqueeze(-1).repeat(1, max_length) + return (text, mask), label + + +def accuracy(output, target): + batch_size = target.size(0) + _, predicted = torch.max(output.data, 1) + return (predicted == target).sum().item() / batch_size diff --git a/examples/nas/multi-trial/mnasnet/base_mnasnet.py b/examples/nas/multi-trial/mnasnet/base_mnasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f431812e3cb07ab3ff43f8344d43dcf7faca0d96 --- /dev/null +++ b/examples/nas/multi-trial/mnasnet/base_mnasnet.py @@ -0,0 +1,298 @@ +from nni.retiarii import basic_unit +import nni.retiarii.nn.pytorch as nn +import warnings + +import torch +import torch.nn as torch_nn +from torchvision.models.utils import load_state_dict_from_url +import torch.nn.functional as F + +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).resolve().parents[2])) + +# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is +# 1.0 - tensorflow. +_BN_MOMENTUM = 1 - 0.9997 +_FIRST_DEPTH = 32 +_MOBILENET_V2_FILTERS = [16, 24, 32, 64, 96, 160, 320] +_MOBILENET_V2_NUM_LAYERS = [1, 2, 3, 4, 3, 3, 1] + + +class _ResidualBlock(nn.Module): + def __init__(self, net): + super().__init__() + self.net = net + + def forward(self, x): + return self.net(x) + x + + +class _InvertedResidual(nn.Module): + + def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, skip, bn_momentum=0.1): + super(_InvertedResidual, self).__init__() + assert stride in [1, 2] + assert kernel_size in [3, 5] + mid_ch = in_ch * expansion_factor + self.apply_residual = skip and in_ch == out_ch and stride == 1 + self.layers = nn.Sequential( + # Pointwise + nn.Conv2d(in_ch, mid_ch, 1, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Depthwise + nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, + stride=stride, groups=mid_ch, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Linear pointwise. Note that there's no activation. + nn.Conv2d(mid_ch, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch, momentum=bn_momentum)) + + def forward(self, input): + if self.apply_residual: + ret = self.layers(input) + input + else: + ret = self.layers(input) + return ret + + +def _stack_inverted_residual(in_ch, out_ch, kernel_size, skip, stride, exp_factor, repeats, bn_momentum): + """ Creates a stack of inverted residuals. """ + assert repeats >= 1 + # First one has no skip, because feature map size changes. + first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, skip, bn_momentum=bn_momentum) + remaining = [] + for _ in range(1, repeats): + remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, skip, bn_momentum=bn_momentum)) + return nn.Sequential(first, *remaining) + + +def _stack_normal_conv(in_ch, out_ch, kernel_size, skip, dconv, stride, repeats, bn_momentum): + assert repeats >= 1 + stack = [] + for i in range(repeats): + s = stride if i == 0 else 1 + if dconv: + modules = [ + nn.Conv2d(in_ch, in_ch, kernel_size, padding=kernel_size // 2, stride=s, groups=in_ch, bias=False), + nn.BatchNorm2d(in_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + nn.Conv2d(in_ch, out_ch, 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(out_ch, momentum=bn_momentum) + ] + else: + modules = [ + nn.Conv2d(in_ch, out_ch, kernel_size, padding=kernel_size // 2, stride=s, bias=False), + nn.ReLU(inplace=True), + nn.BatchNorm2d(out_ch, momentum=bn_momentum) + ] + if skip and in_ch == out_ch and s == 1: + # use different implementation for skip and noskip to align with pytorch + stack.append(_ResidualBlock(nn.Sequential(*modules))) + else: + stack += modules + in_ch = out_ch + return stack + + +def _round_to_multiple_of(val, divisor, round_up_bias=0.9): + """ Asymmetric rounding to make `val` divisible by `divisor`. With default + bias, will round up, unless the number is no more than 10% greater than the + smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """ + assert 0.0 < round_up_bias < 1.0 + new_val = max(divisor, int(val + divisor / 2) // divisor * divisor) + return new_val if new_val >= round_up_bias * val else new_val + divisor + + +def _get_depths(depths, alpha): + """ Scales tensor depths as in reference MobileNet code, prefers rouding up + rather than down. """ + return [_round_to_multiple_of(depth * alpha, 8) for depth in depths] + + +class MNASNet(nn.Module): + """ MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This + implements the B1 variant of the model. + >>> model = MNASNet(1000, 1.0) + >>> x = torch.rand(1, 3, 224, 224) + >>> y = model(x) + >>> y.dim() + 1 + >>> y.nelement() + 1000 + """ + # Version 2 adds depth scaling in the initial stages of the network. + _version = 2 + + def __init__(self, alpha, depths, convops, kernel_sizes, num_layers, + skips, num_classes=1000, dropout=0.2): + super().__init__() + assert alpha > 0.0 + assert len(depths) == len(convops) == len(kernel_sizes) == len(num_layers) == len(skips) == 7 + self.alpha = alpha + self.num_classes = num_classes + depths = _get_depths([_FIRST_DEPTH] + depths, alpha) + base_filter_sizes = [16, 24, 40, 80, 96, 192, 320] + exp_ratios = [3, 3, 3, 6, 6, 6, 6] + strides = [1, 2, 2, 2, 1, 2, 1] + layers = [ + # First layer: regular conv. + nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False), + nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + ] + count = 0 + # for conv, prev_depth, depth, ks, skip, stride, repeat, exp_ratio in \ + # zip(convops, depths[:-1], depths[1:], kernel_sizes, skips, strides, num_layers, exp_ratios): + for filter_size, exp_ratio, stride in zip(base_filter_sizes, exp_ratios, strides): + # TODO: restrict that "choose" can only be used within mutator + ph = nn.Placeholder(label=f'mutable_{count}', **{ + 'kernel_size_options': [1, 3, 5], + 'n_layer_options': [1, 2, 3, 4], + 'op_type_options': ['__mutated__.base_mnasnet.RegularConv', + '__mutated__.base_mnasnet.DepthwiseConv', + '__mutated__.base_mnasnet.MobileConv'], + # 'se_ratio_options': [0, 0.25], + 'skip_options': ['identity', 'no'], + 'n_filter_options': [int(filter_size*x) for x in [0.75, 1.0, 1.25]], + 'exp_ratio': exp_ratio, + 'stride': stride, + 'in_ch': depths[0] if count == 0 else None + }) + layers.append(ph) + '''if conv == "mconv": + # MNASNet blocks: stacks of inverted residuals. + layers.append(_stack_inverted_residual(prev_depth, depth, ks, skip, + stride, exp_ratio, repeat, _BN_MOMENTUM)) + else: + # Normal conv and depth-separated conv + layers += _stack_normal_conv(prev_depth, depth, ks, skip, conv == "dconv", + stride, repeat, _BN_MOMENTUM)''' + count += 1 + if count >= 2: + break + layers += [ + # Final mapping to classifier input. + nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + ] + self.layers = nn.Sequential(*layers) + self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), + nn.Linear(1280, num_classes)) + self._initialize_weights() + #self.for_test = 10 + + def forward(self, x): + # if self.for_test == 10: + x = self.layers(x) + # Equivalent to global avgpool and removing H and W dimensions. + x = x.mean([2, 3]) + x = F.relu(x) + return self.classifier(x) + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + torch_nn.init.kaiming_normal_(m.weight, mode="fan_out", + nonlinearity="relu") + if m.bias is not None: + torch_nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + torch_nn.init.ones_(m.weight) + torch_nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + torch_nn.init.kaiming_uniform_(m.weight, mode="fan_out", + nonlinearity="sigmoid") + torch_nn.init.zeros_(m.bias) + + +def test_model(model): + model(torch.randn(2, 3, 224, 224)) + + +# ====================definition of candidate op classes +BN_MOMENTUM = 1 - 0.9997 + + +class RegularConv(nn.Module): + def __init__(self, kernel_size, in_ch, out_ch, skip, exp_ratio, stride): + super().__init__() + self.kernel_size = kernel_size + self.in_ch = in_ch + self.out_ch = out_ch + self.skip = skip + self.exp_ratio = exp_ratio + self.stride = stride + + self.conv = nn.Conv2d(in_ch, out_ch, kernel_size, padding=kernel_size // 2, stride=stride, bias=False) + self.relu = nn.ReLU(inplace=True) + self.bn = nn.BatchNorm2d(out_ch, momentum=BN_MOMENTUM) + + def forward(self, x): + out = self.bn(self.relu(self.conv(x))) + if self.skip == 'identity': + out = out + x + return out + + +class DepthwiseConv(nn.Module): + def __init__(self, kernel_size, in_ch, out_ch, skip, exp_ratio, stride): + super().__init__() + self.kernel_size = kernel_size + self.in_ch = in_ch + self.out_ch = out_ch + self.skip = skip + self.exp_ratio = exp_ratio + self.stride = stride + + self.conv1 = nn.Conv2d(in_ch, in_ch, kernel_size, padding=kernel_size // 2, stride=stride, groups=in_ch, bias=False) + self.bn1 = nn.BatchNorm2d(in_ch, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(in_ch, out_ch, 1, padding=0, stride=1, bias=False) + self.bn2 = nn.BatchNorm2d(out_ch, momentum=BN_MOMENTUM) + + def forward(self, x): + out = self.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + if self.skip == 'identity': + out = out + x + return out + + +class MobileConv(nn.Module): + def __init__(self, kernel_size, in_ch, out_ch, skip, exp_ratio, stride): + super().__init__() + self.kernel_size = kernel_size + self.in_ch = in_ch + self.out_ch = out_ch + self.skip = skip + self.exp_ratio = exp_ratio + self.stride = stride + + mid_ch = in_ch * exp_ratio + self.layers = nn.Sequential( + # Pointwise + nn.Conv2d(in_ch, mid_ch, 1, bias=False), + nn.BatchNorm2d(mid_ch, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + # Depthwise + nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=(kernel_size - 1) // 2, + stride=stride, groups=mid_ch, bias=False), + nn.BatchNorm2d(mid_ch, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + # Linear pointwise. Note that there's no activation. + nn.Conv2d(mid_ch, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch, momentum=BN_MOMENTUM)) + + def forward(self, x): + out = self.layers(x) + if self.skip == 'identity': + out = out + x + return out + + +# mnasnet0_5 +ir_module = _InvertedResidual(16, 16, 3, 1, 1, True) diff --git a/examples/nas/multi-trial/mnasnet/mutator.py b/examples/nas/multi-trial/mnasnet/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..1a55d673af0cbd15bc694f14e830fb85686af97c --- /dev/null +++ b/examples/nas/multi-trial/mnasnet/mutator.py @@ -0,0 +1,64 @@ +import logging +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).resolve().parents[2])) +from nni.retiarii import Mutator + +from base_mnasnet import RegularConv, DepthwiseConv, MobileConv + +_logger = logging.getLogger(__name__) + +class BlockMutator(Mutator): + def __init__(self, target: str): + super(BlockMutator, self).__init__() + self.target = target + + def mutate(self, model): + nodes = model.get_nodes_by_label(self.target) + assert len(nodes) == 1 + node = nodes[0] + graph = node.graph + + related_info = node.operation.parameters + kernel_size = self.choice(related_info['kernel_size_options']) + op_type = self.choice(related_info['op_type_options']) + #self.choice(related_info['se_ratio_options']) + skip = self.choice(related_info['skip_options']) + n_filter = self.choice(related_info['n_filter_options']) + + if related_info['in_ch'] is not None: + in_ch = related_info['in_ch'] + else: + assert len(node.predecessors) == 1 + the_node = node.predecessors[0] + _logger.debug(repr(the_node.operation.parameters)) + _logger.debug(the_node.__repr__()) + in_ch = the_node.operation.parameters['out_ch'] + + # update the placeholder to be a new operation + node.update_operation(op_type, { + 'kernel_size': kernel_size, + 'in_ch': in_ch, + 'out_ch': n_filter, + 'skip': 'no', + 'exp_ratio': related_info['exp_ratio'], + 'stride': related_info['stride'] + }) + + # insert new nodes after the placeholder + n_layer = self.choice(related_info['n_layer_options']) + for i in range(1, n_layer): + node = graph.insert_node_on_edge(node.outgoing_edges[0], + '{}_{}'.format(self.target, i), + op_type, + {'kernel_size': kernel_size, + 'in_ch': n_filter, + 'out_ch': n_filter, + 'skip': skip, + 'exp_ratio': related_info['exp_ratio'], + 'stride': 1}) + + # fix possible shape mismatch + # TODO: use formal method function to update parameters + if len(node.successors) == 1 and 'in_channels' in node.successors[0].operation.parameters: + node.successors[0].operation.parameters['in_channels'] = n_filter \ No newline at end of file diff --git a/examples/nas/multi-trial/mnasnet/search.py b/examples/nas/multi-trial/mnasnet/search.py new file mode 100644 index 0000000000000000000000000000000000000000..79c6cd316134a7efe792542363c74c02f678f3b5 --- /dev/null +++ b/examples/nas/multi-trial/mnasnet/search.py @@ -0,0 +1,58 @@ +import os +import sys +import torch +from pathlib import Path + +import nni.retiarii.evaluator.pytorch.lightning as pl +from nni.retiarii import serialize +from base_mnasnet import MNASNet +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig +from nni.retiarii.strategy import TPEStrategy +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from mutator import BlockMutator + +if __name__ == '__main__': + _DEFAULT_DEPTHS = [16, 24, 40, 80, 96, 192, 320] + _DEFAULT_CONVOPS = ["dconv", "mconv", "mconv", "mconv", "mconv", "mconv", "mconv"] + _DEFAULT_SKIPS = [False, True, True, True, True, True, True] + _DEFAULT_KERNEL_SIZES = [3, 3, 5, 5, 3, 5, 3] + _DEFAULT_NUM_LAYERS = [1, 3, 3, 3, 2, 4, 1] + + base_model = MNASNet(0.5, _DEFAULT_DEPTHS, _DEFAULT_CONVOPS, _DEFAULT_KERNEL_SIZES, + _DEFAULT_NUM_LAYERS, _DEFAULT_SKIPS) + + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + train_dataset = serialize(CIFAR10, root='data/cifar10', train=True, download=True, transform=train_transform) + test_dataset = serialize(CIFAR10, root='data/cifar10', train=False, download=True, transform=valid_transform) + trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=1, limit_train_batches=0.2) + + applied_mutators = [ + BlockMutator('mutable_0'), + BlockMutator('mutable_1') + ] + + simple_strategy = TPEStrategy() + + exp = RetiariiExperiment(base_model, trainer, applied_mutators, simple_strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'mnasnet_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 10 + exp_config.training_service.use_active_gpu = False + exp_config.execution_engine = 'base' + + exp.run(exp_config, 8097) diff --git a/examples/nas/multi-trial/mnist/search.py b/examples/nas/multi-trial/mnist/search.py new file mode 100644 index 0000000000000000000000000000000000000000..d7b41d4a64b1297daf93867cbccf2d519a2a1c19 --- /dev/null +++ b/examples/nas/multi-trial/mnist/search.py @@ -0,0 +1,137 @@ +import random + +import nni +import torch +import torch.nn.functional as F +# remember to import nni.retiarii.nn.pytorch as nn, instead of torch.nn as nn +import nni.retiarii.nn.pytorch as nn +import nni.retiarii.strategy as strategy +from nni.retiarii import model_wrapper +from nni.retiarii.evaluator import FunctionalEvaluator +from nni.retiarii.experiment.pytorch import RetiariiExeConfig, RetiariiExperiment, debug_mutated_model +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.datasets import MNIST + + +class DepthwiseSeparableConv(nn.Module): + def __init__(self, in_ch, out_ch): + super().__init__() + self.depthwise = nn.Conv2d(in_ch, in_ch, kernel_size=3, groups=in_ch) + self.pointwise = nn.Conv2d(in_ch, out_ch, kernel_size=1) + + def forward(self, x): + return self.pointwise(self.depthwise(x)) + + +@model_wrapper +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + # LayerChoice is used to select a layer between Conv2d and DwConv. + self.conv2 = nn.LayerChoice([ + nn.Conv2d(32, 64, 3, 1), + DepthwiseSeparableConv(32, 64) + ]) + # ValueChoice is used to select a dropout rate. + # ValueChoice can be used as parameter of modules wrapped in `nni.retiarii.nn.pytorch` + # or customized modules wrapped with `@basic_unit`. + self.dropout1 = nn.Dropout(nn.ValueChoice([0.25, 0.5, 0.75])) + self.dropout2 = nn.Dropout(0.5) + feature = nn.ValueChoice([64, 128, 256]) + # Same value choice can be used multiple times + self.fc1 = nn.Linear(9216, feature) + self.fc2 = nn.Linear(feature, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(self.conv2(x), 2) + x = torch.flatten(self.dropout1(x), 1) + x = self.fc2(self.dropout2(F.relu(self.fc1(x)))) + return x + + +def train_epoch(model, device, train_loader, optimizer, epoch): + loss_fn = torch.nn.CrossEntropyLoss() + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + if batch_idx % 10 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test_epoch(model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + accuracy = 100. * correct / len(test_loader.dataset) + + print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format( + correct, len(test_loader.dataset), accuracy)) + + return accuracy + + +def evaluate_model(model_cls): + # "model_cls" is a class, need to instantiate + model = model_cls() + + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + transf = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_loader = DataLoader(MNIST('data/mnist', download=True, transform=transf), batch_size=64, shuffle=True) + test_loader = DataLoader(MNIST('data/mnist', download=True, train=False, transform=transf), batch_size=64) + + device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + + for epoch in range(3): + # train the model for one epoch + train_epoch(model, device, train_loader, optimizer, epoch) + # test the model for one epoch + accuracy = test_epoch(model, device, test_loader) + # call report intermediate result. Result can be float or dict + nni.report_intermediate_result(accuracy) + + # report final test result + nni.report_final_result(accuracy) + + +if __name__ == '__main__': + base_model = Net() + + search_strategy = strategy.Random() + model_evaluator = FunctionalEvaluator(evaluate_model) + + exp = RetiariiExperiment(base_model, model_evaluator, [], search_strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'mnist_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 20 + exp_config.training_service.use_active_gpu = False + export_formatter = 'dict' + + # uncomment this for graph-based execution engine + # exp_config.execution_engine = 'base' + # export_formatter = 'code' + + exp.run(exp_config, 8081 + random.randint(0, 100)) + print('Final model:') + for model_code in exp.export_top_models(formatter=export_formatter): + print(model_code) diff --git a/examples/nas/multi-trial/nasbench101/base_ops.py b/examples/nas/multi-trial/nasbench101/base_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..abe2c2730d919fb9caa5a77361e279417a825a94 --- /dev/null +++ b/examples/nas/multi-trial/nasbench101/base_ops.py @@ -0,0 +1,51 @@ +import math + +import torch.nn as nn + + +def truncated_normal_(tensor, mean=0, std=1): + # https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/15 + size = tensor.shape + tmp = tensor.new_empty(size + (4,)).normal_() + valid = (tmp < 2) & (tmp > -2) + ind = valid.max(-1, keepdim=True)[1] + tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1)) + tensor.data.mul_(std).add_(mean) + + +class ConvBnRelu(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0): + super(ConvBnRelu, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.conv_bn_relu = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True) + ) + self.reset_parameters() + + def reset_parameters(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + fan_in = m.kernel_size[0] * m.kernel_size[1] * m.in_channels + truncated_normal_(m.weight.data, mean=0., std=math.sqrt(1. / fan_in)) + if isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def forward(self, x): + return self.conv_bn_relu(x) + + +class Conv3x3BnRelu(ConvBnRelu): + def __init__(self, in_channels, out_channels): + super(Conv3x3BnRelu, self).__init__(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + + +class Conv1x1BnRelu(ConvBnRelu): + def __init__(self, in_channels, out_channels): + super(Conv1x1BnRelu, self).__init__(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + + +Projection = Conv1x1BnRelu diff --git a/examples/nas/multi-trial/nasbench101/network.py b/examples/nas/multi-trial/nasbench101/network.py new file mode 100644 index 0000000000000000000000000000000000000000..62bd58dc578b7bb5504fcf2e0740c5b85718a491 --- /dev/null +++ b/examples/nas/multi-trial/nasbench101/network.py @@ -0,0 +1,178 @@ +import click +import nni +import nni.retiarii.evaluator.pytorch.lightning as pl +import torch.nn as nn +import torchmetrics +from nni.retiarii import model_wrapper, serialize +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig +from nni.retiarii.nn.pytorch import NasBench101Cell +from nni.retiarii.strategy import Random +from pytorch_lightning.callbacks import LearningRateMonitor +from timm.optim import RMSpropTF +from torch.optim.lr_scheduler import CosineAnnealingLR +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from base_ops import Conv3x3BnRelu, Conv1x1BnRelu, Projection + + +@model_wrapper +class NasBench101(nn.Module): + def __init__(self, + stem_out_channels: int = 128, + num_stacks: int = 3, + num_modules_per_stack: int = 3, + max_num_vertices: int = 7, + max_num_edges: int = 9, + num_labels: int = 10, + bn_eps: float = 1e-5, + bn_momentum: float = 0.003): + super().__init__() + + op_candidates = { + 'conv3x3-bn-relu': lambda num_features: Conv3x3BnRelu(num_features, num_features), + 'conv1x1-bn-relu': lambda num_features: Conv1x1BnRelu(num_features, num_features), + 'maxpool3x3': lambda num_features: nn.MaxPool2d(3, 1, 1) + } + + # initial stem convolution + self.stem_conv = Conv3x3BnRelu(3, stem_out_channels) + + layers = [] + in_channels = out_channels = stem_out_channels + for stack_num in range(num_stacks): + if stack_num > 0: + downsample = nn.MaxPool2d(kernel_size=2, stride=2) + layers.append(downsample) + out_channels *= 2 + for _ in range(num_modules_per_stack): + cell = NasBench101Cell(op_candidates, in_channels, out_channels, + lambda cin, cout: Projection(cin, cout), + max_num_vertices, max_num_edges, label='cell') + layers.append(cell) + in_channels = out_channels + + self.features = nn.ModuleList(layers) + self.gap = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(out_channels, num_labels) + + for module in self.modules(): + if isinstance(module, nn.BatchNorm2d): + module.eps = bn_eps + module.momentum = bn_momentum + + def forward(self, x): + bs = x.size(0) + out = self.stem_conv(x) + for layer in self.features: + out = layer(out) + out = self.gap(out).view(bs, -1) + out = self.classifier(out) + return out + + def reset_parameters(self): + for module in self.modules(): + if isinstance(module, nn.BatchNorm2d): + module.eps = self.config.bn_eps + module.momentum = self.config.bn_momentum + + +class AccuracyWithLogits(torchmetrics.Accuracy): + def update(self, pred, target): + return super().update(nn.functional.softmax(pred), target) + + +@nni.trace +class NasBench101TrainingModule(pl.LightningModule): + def __init__(self, max_epochs=108, learning_rate=0.1, weight_decay=1e-4): + super().__init__() + self.save_hyperparameters('learning_rate', 'weight_decay', 'max_epochs') + self.criterion = nn.CrossEntropyLoss() + self.accuracy = AccuracyWithLogits() + + def forward(self, x): + y_hat = self.model(x) + return y_hat + + def training_step(self, batch, batch_idx): + x, y = batch + y_hat = self(x) + loss = self.criterion(y_hat, y) + self.log('train_loss', loss, prog_bar=True) + self.log('train_accuracy', self.accuracy(y_hat, y), prog_bar=True) + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + y_hat = self(x) + self.log('val_loss', self.criterion(y_hat, y), prog_bar=True) + self.log('val_accuracy', self.accuracy(y_hat, y), prog_bar=True) + + def configure_optimizers(self): + optimizer = RMSpropTF(self.parameters(), lr=self.hparams.learning_rate, + weight_decay=self.hparams.weight_decay, + momentum=0.9, alpha=0.9, eps=1.0) + return { + 'optimizer': optimizer, + 'scheduler': CosineAnnealingLR(optimizer, self.hparams.max_epochs) + } + + def on_validation_epoch_end(self): + nni.report_intermediate_result(self.trainer.callback_metrics['val_accuracy'].item()) + + def teardown(self, stage): + if stage == 'fit': + nni.report_final_result(self.trainer.callback_metrics['val_accuracy'].item()) + + +@click.command() +@click.option('--epochs', default=108, help='Training length.') +@click.option('--batch_size', default=256, help='Batch size.') +@click.option('--port', default=8081, help='On which port the experiment is run.') +@click.option('--benchmark', is_flag=True, default=False) +def _multi_trial_test(epochs, batch_size, port, benchmark): + # initalize dataset. Note that 50k+10k is used. It's a little different from paper + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize([0.49139968, 0.48215827, 0.44653124], [0.24703233, 0.24348505, 0.26158768]) + ] + train_dataset = serialize(CIFAR10, 'data', train=True, download=True, transform=transforms.Compose(transf + normalize)) + test_dataset = serialize(CIFAR10, 'data', train=False, transform=transforms.Compose(normalize)) + + # specify training hyper-parameters + training_module = NasBench101TrainingModule(max_epochs=epochs) + # FIXME: need to fix a bug in serializer for this to work + # lr_monitor = serialize(LearningRateMonitor, logging_interval='step') + trainer = pl.Trainer(max_epochs=epochs, gpus=1) + lightning = pl.Lightning( + lightning_module=training_module, + trainer=trainer, + train_dataloader=pl.DataLoader(train_dataset, batch_size=batch_size, shuffle=True), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=batch_size), + ) + + strategy = Random() + + model = NasBench101() + + exp = RetiariiExperiment(model, lightning, [], strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 20 + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = False + + if benchmark: + exp_config.benchmark = 'nasbench101' + exp_config.execution_engine = 'benchmark' + + exp.run(exp_config, port) + + +if __name__ == '__main__': + _multi_trial_test() diff --git a/examples/nas/multi-trial/nasbench201/base_ops.py b/examples/nas/multi-trial/nasbench201/base_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..31427fa5f8fffe137a5dcb46f3fbca64c09bff0e --- /dev/null +++ b/examples/nas/multi-trial/nasbench201/base_ops.py @@ -0,0 +1,138 @@ +import torch +import torch.nn as nn + + +OPS_WITH_STRIDE = { + 'none': lambda C_in, C_out, stride: Zero(C_in, C_out, stride), + 'avg_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'avg'), + 'max_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'max'), + 'conv_3x3': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (3, 3), (stride, stride), (1, 1), (1, 1)), + 'conv_1x1': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (1, 1), (stride, stride), (0, 0), (1, 1)), + 'skip_connect': lambda C_in, C_out, stride: nn.Identity() if stride == 1 and C_in == C_out + else FactorizedReduce(C_in, C_out, stride), +} + +PRIMITIVES = ['none', 'skip_connect', 'conv_1x1', 'conv_3x3', 'avg_pool_3x3'] + + +class ReLUConvBN(nn.Module): + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation): + super(ReLUConvBN, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_out, kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(C_out) + ) + + def forward(self, x): + return self.op(x) + + +class SepConv(nn.Module): + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation): + super(SepConv, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=C_in, bias=False), + nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), + nn.BatchNorm2d(C_out), + ) + + def forward(self, x): + return self.op(x) + + +class Pooling(nn.Module): + def __init__(self, C_in, C_out, stride, mode): + super(Pooling, self).__init__() + if C_in == C_out: + self.preprocess = None + else: + self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 1) + if mode == 'avg': + self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) + elif mode == 'max': + self.op = nn.MaxPool2d(3, stride=stride, padding=1) + else: + raise ValueError('Invalid mode={:} in Pooling'.format(mode)) + + def forward(self, x): + if self.preprocess: + x = self.preprocess(x) + return self.op(x) + + +class Zero(nn.Module): + def __init__(self, C_in, C_out, stride): + super(Zero, self).__init__() + self.C_in = C_in + self.C_out = C_out + self.stride = stride + self.is_zero = True + + def forward(self, x): + if self.C_in == self.C_out: + if self.stride == 1: + return x.mul(0.) + else: + return x[:, :, ::self.stride, ::self.stride].mul(0.) + else: + shape = list(x.shape) + shape[1] = self.C_out + zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device) + return zeros + + +class FactorizedReduce(nn.Module): + def __init__(self, C_in, C_out, stride): + super(FactorizedReduce, self).__init__() + self.stride = stride + self.C_in = C_in + self.C_out = C_out + self.relu = nn.ReLU(inplace=False) + if stride == 2: + C_outs = [C_out // 2, C_out - C_out // 2] + self.convs = nn.ModuleList() + for i in range(2): + self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False)) + self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0) + else: + raise ValueError('Invalid stride : {:}'.format(stride)) + self.bn = nn.BatchNorm2d(C_out) + + def forward(self, x): + x = self.relu(x) + y = self.pad(x) + out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out + + +class ResNetBasicblock(nn.Module): + def __init__(self, inplanes, planes, stride): + super(ResNetBasicblock, self).__init__() + assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride) + self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1) + self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1) + if stride == 2: + self.downsample = nn.Sequential( + nn.AvgPool2d(kernel_size=2, stride=2, padding=0), + nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)) + elif inplanes != planes: + self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1) + else: + self.downsample = None + self.in_dim = inplanes + self.out_dim = planes + self.stride = stride + self.num_conv = 2 + + def forward(self, inputs): + basicblock = self.conv_a(inputs) + basicblock = self.conv_b(basicblock) + + if self.downsample is not None: + inputs = self.downsample(inputs) # residual + return inputs + basicblock diff --git a/examples/nas/multi-trial/nasbench201/network.py b/examples/nas/multi-trial/nasbench201/network.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c0e24798074cc147194f61afff71333f734d90 --- /dev/null +++ b/examples/nas/multi-trial/nasbench201/network.py @@ -0,0 +1,167 @@ +import click +import nni +import nni.retiarii.evaluator.pytorch.lightning as pl +import torch.nn as nn +import torchmetrics +from nni.retiarii import model_wrapper, serialize +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig +from nni.retiarii.nn.pytorch import NasBench201Cell +from nni.retiarii.strategy import Random +from pytorch_lightning.callbacks import LearningRateMonitor +from timm.optim import RMSpropTF +from torch.optim.lr_scheduler import CosineAnnealingLR +from torchvision import transforms +from torchvision.datasets import CIFAR100 + +from base_ops import ResNetBasicblock, PRIMITIVES, OPS_WITH_STRIDE + + +@model_wrapper +class NasBench201(nn.Module): + def __init__(self, + stem_out_channels: int = 16, + num_modules_per_stack: int = 5, + num_labels: int = 100): + super().__init__() + self.channels = C = stem_out_channels + self.num_modules = N = num_modules_per_stack + self.num_labels = num_labels + + self.stem = nn.Sequential( + nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(C) + ) + + layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N + layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N + + C_prev = C + self.cells = nn.ModuleList() + for C_curr, reduction in zip(layer_channels, layer_reductions): + if reduction: + cell = ResNetBasicblock(C_prev, C_curr, 2) + else: + cell = NasBench201Cell({prim: lambda C_in, C_out: OPS_WITH_STRIDE[prim](C_in, C_out, 1) for prim in PRIMITIVES}, + C_prev, C_curr, label='cell') + self.cells.append(cell) + C_prev = C_curr + + self.lastact = nn.Sequential( + nn.BatchNorm2d(C_prev), + nn.ReLU(inplace=True) + ) + self.global_pooling = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(C_prev, self.num_labels) + + def forward(self, inputs): + feature = self.stem(inputs) + for cell in self.cells: + feature = cell(feature) + + out = self.lastact(feature) + out = self.global_pooling(out) + out = out.view(out.size(0), -1) + logits = self.classifier(out) + + return logits + + +class AccuracyWithLogits(torchmetrics.Accuracy): + def update(self, pred, target): + return super().update(nn.functional.softmax(pred), target) + + +@nni.trace +class NasBench201TrainingModule(pl.LightningModule): + def __init__(self, max_epochs=200, learning_rate=0.1, weight_decay=5e-4): + super().__init__() + self.save_hyperparameters('learning_rate', 'weight_decay', 'max_epochs') + self.criterion = nn.CrossEntropyLoss() + self.accuracy = AccuracyWithLogits() + + def forward(self, x): + y_hat = self.model(x) + return y_hat + + def training_step(self, batch, batch_idx): + x, y = batch + y_hat = self(x) + loss = self.criterion(y_hat, y) + self.log('train_loss', loss, prog_bar=True) + self.log('train_accuracy', self.accuracy(y_hat, y), prog_bar=True) + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + y_hat = self(x) + self.log('val_loss', self.criterion(y_hat, y), prog_bar=True) + self.log('val_accuracy', self.accuracy(y_hat, y), prog_bar=True) + + def configure_optimizers(self): + optimizer = RMSpropTF(self.parameters(), lr=self.hparams.learning_rate, + weight_decay=self.hparams.weight_decay, + momentum=0.9, alpha=0.9, eps=1.0) + return { + 'optimizer': optimizer, + 'scheduler': CosineAnnealingLR(optimizer, self.hparams.max_epochs) + } + + def on_validation_epoch_end(self): + nni.report_intermediate_result(self.trainer.callback_metrics['val_accuracy'].item()) + + def teardown(self, stage): + if stage == 'fit': + nni.report_final_result(self.trainer.callback_metrics['val_accuracy'].item()) + + +@click.command() +@click.option('--epochs', default=12, help='Training length.') +@click.option('--batch_size', default=256, help='Batch size.') +@click.option('--port', default=8081, help='On which port the experiment is run.') +@click.option('--benchmark', is_flag=True, default=False) +def _multi_trial_test(epochs, batch_size, port, benchmark): + # initalize dataset. Note that 50k+10k is used. It's a little different from paper + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize([x / 255 for x in [129.3, 124.1, 112.4]], [x / 255 for x in [68.2, 65.4, 70.4]]) + ] + train_dataset = serialize(CIFAR100, 'data', train=True, download=True, transform=transforms.Compose(transf + normalize)) + test_dataset = serialize(CIFAR100, 'data', train=False, transform=transforms.Compose(normalize)) + + # specify training hyper-parameters + training_module = NasBench201TrainingModule(max_epochs=epochs) + # FIXME: need to fix a bug in serializer for this to work + # lr_monitor = serialize(LearningRateMonitor, logging_interval='step') + trainer = pl.Trainer(max_epochs=epochs, gpus=1) + lightning = pl.Lightning( + lightning_module=training_module, + trainer=trainer, + train_dataloader=pl.DataLoader(train_dataset, batch_size=batch_size, shuffle=True), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=batch_size), + ) + + strategy = Random() + + model = NasBench201() + + exp = RetiariiExperiment(model, lightning, [], strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 20 + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = False + + if benchmark: + exp_config.benchmark = 'nasbench201-cifar100' + exp_config.execution_engine = 'benchmark' + + exp.run(exp_config, port) + + +if __name__ == '__main__': + _multi_trial_test() diff --git a/examples/nas/multi-trial/transformer/README.md b/examples/nas/multi-trial/transformer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e8b170dbbf0935a46450bc9fdc4a8d5d364265ac --- /dev/null +++ b/examples/nas/multi-trial/transformer/README.md @@ -0,0 +1,19 @@ +# Tuning Transformer with Retiarii + +This demo is adapted from PyTorch Transformer tutorial. +Here, we show how we use functions provided by retiarii to tune Transformer's hyper-parameters, in order to achieve better performance. +This demo is tested with PyTorch 1.9, torchtext == 0.10, and nni == 2.4. +Please change the configurations (starting on line 196) accordingly and then run: `python retiarii_transformer_demo.py` + +We use a built-in dataset provided by torchtext, WikiText-2, to evaluate Transformer on language modeling. We tune two hyper-parameters: the number of encoder layers (`n_layer`) whose default value in the original paper is 6, and the dropout rate shared by all encoder layers (`p_dropout`) whose default value is 0.1. We report validation perplexity as metric (the lower is better). + +We first tune one hyper-parameter with another fixed to the default value. The results are: +![separate](https://user-images.githubusercontent.com/22978940/136937420-80aecee9-43cc-4f8d-b282-18aec0ad3929.png) + +And then we tune these two hyper-parameters jointly. The results are: +

+ +

+ +As we can observe, we have found better hyper-parameters (`n_layer = 8`, `p_dropout = 0.2`) than default values. + diff --git a/examples/nas/multi-trial/transformer/retiarii_transformer_demo.py b/examples/nas/multi-trial/transformer/retiarii_transformer_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..6e302da12ca9fd1405d3542cb9d4cf29669168fa --- /dev/null +++ b/examples/nas/multi-trial/transformer/retiarii_transformer_demo.py @@ -0,0 +1,207 @@ +############################################################### +# This demo is adapted from PyTorch Transformer tutorial +# Here we show how we use functions provided by retiarii to tune Transformer's hyper-parameters, +# in order to achieve better performance. +# This demo is tested with PyTorch 1.9, torchtext == 0.10, and nni == 2.4 +import torch +import torch.nn.functional as F +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import model_wrapper +import nni +import nni.retiarii.strategy as strategy +from nni.retiarii.evaluator import FunctionalEvaluator +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig + +import math + +from torchtext.datasets import WikiText2 +from torchtext.data.utils import get_tokenizer +from torchtext.vocab import build_vocab_from_iterator + +class PositionalEncoding(nn.Module): + + def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000): + super().__init__() + self.dropout = nn.Dropout(p=dropout) + + position = torch.arange(max_len).unsqueeze(1) + div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) + pe = torch.zeros(max_len, 1, d_model) + pe[:, 0, 0::2] = torch.sin(position * div_term) + pe[:, 0, 1::2] = torch.cos(position * div_term) + self.register_buffer('pe', pe) + + def forward(self, x): + """ + Args: + x: Tensor, with size [seq_len, batch_size, embedding_dim] + """ + x = x + self.pe[:x.size(0)] + return self.dropout(x) + +############################################################### +# PyTorch has already provided modules for Transformer: nn.TransformerEncoderLayer and nn.TransformerEncoder, +# so we can use them directly, but note that to enable retiarii functions, we need to replace "import torch.nn as nn" +# with "import nni.retiarii.nn.pytorch as nn". +# +# We use nn.ValueChoice to make the number of encoder layers (the default is 6) and the dropout rate mutable. +# For other hyper-parameters, we follow the setting in the original paper "Attention is All You Need". +@model_wrapper # This decorator should be put on the top level module. +class Transformer(nn.Module): + + def __init__(self, n_token: int, n_head: int = 8, + d_model: int = 512, d_ff: int = 2048): + super().__init__() + p_dropout = nn.ValueChoice([0.1, 0.2, 0.3, 0.4, 0.5], label='p_dropout') + n_layer = nn.ValueChoice([5, 6, 7, 8, 9], label='n_layer') + self.encoder = nn.TransformerEncoder( + nn.TransformerEncoderLayer(d_model, n_head, d_ff, p_dropout), + n_layer + ) + self.d_model = d_model + self.decoder = nn.Linear(d_model, n_token) + self.embeddings = nn.Embedding(n_token, d_model) + self.position = PositionalEncoding(d_model) + + def forward(self, src, src_mask): + """ + Args: + src: Tensor, with size [seq_len, batch_size] + src_mask: Tensor, with size [seq_len, seq_len] + + Returns: + output: Tensor, with size [seq_len, batch_size, n_token] + """ + src = self.embeddings(src) * math.sqrt(self.d_model) + src = self.position(src) + output = self.encoder(src, src_mask) + output = self.decoder(output) + return output + +############################################################### +# We wrap the whole training procedure in the fit function. +# This function takes one positional argument model_cls which represents one exploration (i.e., one trial). +# model_cls is automatically generated and passed in by retiarii, and we should instantiate model_cls +# through model = model_cls() +def fit(model_cls): + + train_iter = WikiText2(split='train') + tokenizer = get_tokenizer('basic_english') + vocab = build_vocab_from_iterator(map(tokenizer, train_iter), specials=['']) + vocab.set_default_index(vocab['']) + + def process_data(raw_text_iter): + """Converts raw text into a flat Tensor.""" + data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter] + return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) + + train_iter, val_iter, _ = WikiText2() + train_data = process_data(train_iter) + val_data = process_data(val_iter) + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + def generate_batches(data, bsz): + """Divides the data into bsz separate sequences.""" + seq_len = data.size(0) // bsz + data = data[:seq_len * bsz] + data = data.view(bsz, seq_len).t().contiguous() + return data.to(device) + + batch_size = 20 + eval_batch_size = 10 + train_data = generate_batches(train_data, batch_size) + val_data = generate_batches(val_data, eval_batch_size) + + seq_len = 35 + def get_seq(source, i): + """ + Args: + source: Tensor, with size [full_seq_len, batch_size] + i: int + + Returns: + tuple (data, target): data has size [seq_len, batch_size] + and target has size [seq_len * batch_size] + """ + part_len = min(seq_len, len(source) - 1 - i) + data = source[i:i+part_len] + target = source[i+1:i+1+part_len].reshape(-1) + return data, target + + def generate_square_subsequent_mask(sz): + """Generates an upper-triangular matrix of -inf, with zeros on diag.""" + return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1) + + model = model_cls().to(device) + lr = 5.0 + optimizer = torch.optim.SGD(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95) + + def train(model): + model.train() + src_mask = generate_square_subsequent_mask(seq_len).to(device) + for i in range(0, train_data.size(0) - 1, seq_len): + data, target = get_seq(train_data, i) + part_len = data.size(0) + if part_len != seq_len: + src_mask = src_mask[:part_len, :part_len] + output = model(data, src_mask) + loss = F.cross_entropy(output.view(-1, output.size(-1)), target) + + optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) + optimizer.step() + + def evaluate(model, eval_data): + model.eval() + src_mask = generate_square_subsequent_mask(seq_len).to(device) + total_loss = 0. + with torch.no_grad(): + for i in range(0, eval_data.size(0) - 1, seq_len): + data, target = get_seq(eval_data, i) + part_len = data.size(0) + if part_len != seq_len: + src_mask = src_mask[:part_len, :part_len] + output = model(data, src_mask) + output_flat = output.view(-1, output.size(-1)) + total_loss += part_len * F.cross_entropy(output_flat, target).item() + return total_loss / (len(eval_data) - 1) + + best_val_loss = float('inf') + + for epoch in range(20): + train(model) + val_loss = evaluate(model, val_data) + if val_loss < best_val_loss: + best_val_loss = val_loss + scheduler.step() + + best_val_ppl = math.exp(best_val_loss) + nni.report_final_result(best_val_ppl) # reports best validation ppl to nni as final result of one trial + +if __name__ == "__main__": + + train_iter = WikiText2(split='train') + tokenizer = get_tokenizer('basic_english') + vocab = build_vocab_from_iterator(map(tokenizer, train_iter), specials=['']) + vocab.set_default_index(vocab['']) + + n_token = len(vocab) + base_model = Transformer(n_token) + + evaluator = FunctionalEvaluator(fit) + exp = RetiariiExperiment(base_model, evaluator, [], strategy.Random()) + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'transformer tuning' + exp_config.trial_concurrency = 3 # please change configurations accordingly + exp_config.max_trial_number = 25 + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = False + export_formatter = 'dict' + + exp.run(exp_config, 8081) + print('Final model:') + for model_code in exp.export_top_models(optimize_mode='minimize', formatter=export_formatter): + print(model_code) diff --git a/examples/nas/oneshot/darts/README.md b/examples/nas/oneshot/darts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6977be71eff85166585ce81cc015671b03837e70 --- /dev/null +++ b/examples/nas/oneshot/darts/README.md @@ -0,0 +1 @@ +[Documentation](https://nni.readthedocs.io/en/latest/NAS/DARTS.html) diff --git a/examples/nas/oneshot/darts/README_zh_CN.md b/examples/nas/oneshot/darts/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..3c5096308320824c39d4a5dac9227d91ad6feb4b --- /dev/null +++ b/examples/nas/oneshot/darts/README_zh_CN.md @@ -0,0 +1 @@ +[文档](https://nni.readthedocs.io/zh/latest/NAS/DARTS.html) diff --git a/examples/nas/oneshot/darts/datasets.py b/examples/nas/oneshot/darts/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..f19f5691a130df12e900d386473c75a2c9e3f102 --- /dev/null +++ b/examples/nas/oneshot/darts/datasets.py @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR10 + + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + + return img + + +def get_dataset(cls, cutout_length=0): + MEAN = [0.49139968, 0.48215827, 0.44653124] + STD = [0.24703233, 0.24348505, 0.26158768] + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize(MEAN, STD) + ] + cutout = [] + if cutout_length > 0: + cutout.append(Cutout(cutout_length)) + + train_transform = transforms.Compose(transf + normalize + cutout) + valid_transform = transforms.Compose(normalize) + + if cls == "cifar10": + dataset_train = CIFAR10(root="./data", train=True, download=True, transform=train_transform) + dataset_valid = CIFAR10(root="./data", train=False, download=True, transform=valid_transform) + else: + raise NotImplementedError + return dataset_train, dataset_valid diff --git a/examples/nas/oneshot/darts/model.py b/examples/nas/oneshot/darts/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c4135463ae9f3cc355e4c8eaa117d6d6f0863a39 --- /dev/null +++ b/examples/nas/oneshot/darts/model.py @@ -0,0 +1,160 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from collections import OrderedDict + +import torch +import torch.nn as nn + +import ops +from nni.retiarii.nn.pytorch import LayerChoice, InputChoice + + +class AuxiliaryHead(nn.Module): + """ Auxiliary head in 2/3 place of network to let the gradient flow well """ + + def __init__(self, input_size, C, n_classes): + """ assuming input size 7x7 or 8x8 """ + assert input_size in [7, 8] + super().__init__() + self.net = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(5, stride=input_size - 5, padding=0, count_include_pad=False), # 2x2 out + nn.Conv2d(C, 128, kernel_size=1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, kernel_size=2, bias=False), # 1x1 out + nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.linear = nn.Linear(768, n_classes) + + def forward(self, x): + out = self.net(x) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + return logits + + +class Node(nn.Module): + def __init__(self, node_id, num_prev_nodes, channels, num_downsample_connect): + super().__init__() + self.ops = nn.ModuleList() + choice_keys = [] + for i in range(num_prev_nodes): + stride = 2 if i < num_downsample_connect else 1 + choice_keys.append("{}_p{}".format(node_id, i)) + self.ops.append( + LayerChoice(OrderedDict([ + ("maxpool", ops.PoolBN('max', channels, 3, stride, 1, affine=False)), + ("avgpool", ops.PoolBN('avg', channels, 3, stride, 1, affine=False)), + ("skipconnect", nn.Identity() if stride == 1 else ops.FactorizedReduce(channels, channels, affine=False)), + ("sepconv3x3", ops.SepConv(channels, channels, 3, stride, 1, affine=False)), + ("sepconv5x5", ops.SepConv(channels, channels, 5, stride, 2, affine=False)), + ("dilconv3x3", ops.DilConv(channels, channels, 3, stride, 2, 2, affine=False)), + ("dilconv5x5", ops.DilConv(channels, channels, 5, stride, 4, 2, affine=False)) + ]), label=choice_keys[-1])) + self.drop_path = ops.DropPath() + self.input_switch = InputChoice(n_candidates=len(choice_keys), n_chosen=2, label="{}_switch".format(node_id)) + + def forward(self, prev_nodes): + assert len(self.ops) == len(prev_nodes) + out = [op(node) for op, node in zip(self.ops, prev_nodes)] + out = [self.drop_path(o) if o is not None else None for o in out] + return self.input_switch(out) + + +class Cell(nn.Module): + + def __init__(self, n_nodes, channels_pp, channels_p, channels, reduction_p, reduction): + super().__init__() + self.reduction = reduction + self.n_nodes = n_nodes + + # If previous cell is reduction cell, current input size does not match with + # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. + if reduction_p: + self.preproc0 = ops.FactorizedReduce(channels_pp, channels, affine=False) + else: + self.preproc0 = ops.StdConv(channels_pp, channels, 1, 1, 0, affine=False) + self.preproc1 = ops.StdConv(channels_p, channels, 1, 1, 0, affine=False) + + # generate dag + self.mutable_ops = nn.ModuleList() + for depth in range(2, self.n_nodes + 2): + self.mutable_ops.append(Node("{}_n{}".format("reduce" if reduction else "normal", depth), + depth, channels, 2 if reduction else 0)) + + def forward(self, s0, s1): + # s0, s1 are the outputs of previous previous cell and previous cell, respectively. + tensors = [self.preproc0(s0), self.preproc1(s1)] + for node in self.mutable_ops: + cur_tensor = node(tensors) + tensors.append(cur_tensor) + + output = torch.cat(tensors[2:], dim=1) + return output + + +class CNN(nn.Module): + + def __init__(self, input_size, in_channels, channels, n_classes, n_layers, n_nodes=4, + stem_multiplier=3, auxiliary=False): + super().__init__() + self.in_channels = in_channels + self.channels = channels + self.n_classes = n_classes + self.n_layers = n_layers + self.aux_pos = 2 * n_layers // 3 if auxiliary else -1 + + c_cur = stem_multiplier * self.channels + self.stem = nn.Sequential( + nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(c_cur) + ) + + # for the first cell, stem is used for both s0 and s1 + # [!] channels_pp and channels_p is output channel size, but c_cur is input channel size. + channels_pp, channels_p, c_cur = c_cur, c_cur, channels + + self.cells = nn.ModuleList() + reduction_p, reduction = False, False + for i in range(n_layers): + reduction_p, reduction = reduction, False + # Reduce featuremap size and double channels in 1/3 and 2/3 layer. + if i in [n_layers // 3, 2 * n_layers // 3]: + c_cur *= 2 + reduction = True + + cell = Cell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction) + self.cells.append(cell) + c_cur_out = c_cur * n_nodes + channels_pp, channels_p = channels_p, c_cur_out + + if i == self.aux_pos: + self.aux_head = AuxiliaryHead(input_size // 4, channels_p, n_classes) + + self.gap = nn.AdaptiveAvgPool2d(1) + self.linear = nn.Linear(channels_p, n_classes) + + def forward(self, x): + s0 = s1 = self.stem(x) + + aux_logits = None + for i, cell in enumerate(self.cells): + s0, s1 = s1, cell(s0, s1) + if i == self.aux_pos and self.training: + aux_logits = self.aux_head(s1) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + + if aux_logits is not None: + return logits, aux_logits + return logits + + def drop_path_prob(self, p): + for module in self.modules(): + if isinstance(module, ops.DropPath): + module.p = p diff --git a/examples/nas/oneshot/darts/ops.py b/examples/nas/oneshot/darts/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..863334e8183f999d097991ec3e83d24b369bfc30 --- /dev/null +++ b/examples/nas/oneshot/darts/ops.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + + +class DropPath(nn.Module): + def __init__(self, p=0.): + """ + Drop path with probability. + + Parameters + ---------- + p : float + Probability of an path to be zeroed. + """ + super().__init__() + self.p = p + + def forward(self, x): + if self.training and self.p > 0.: + keep_prob = 1. - self.p + # per data point mask + mask = torch.zeros((x.size(0), 1, 1, 1), device=x.device).bernoulli_(keep_prob) + return x / keep_prob * mask + + return x + + +class PoolBN(nn.Module): + """ + AvgPool or MaxPool with BN. `pool_type` must be `max` or `avg`. + """ + def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + self.bn = nn.BatchNorm2d(C, affine=affine) + + def forward(self, x): + out = self.pool(x) + out = self.bn(out) + return out + + +class StdConv(nn.Module): + """ + Standard conv: ReLU - Conv - BN + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class FacConv(nn.Module): + """ + Factorized conv: ReLU - Conv(Kx1) - Conv(1xK) - BN + """ + def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False), + nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class DilConv(nn.Module): + """ + (Dilated) depthwise separable conv. + ReLU - (Dilated) depthwise separable - Pointwise - BN. + If dilation == 2, 3x3 conv => 5x5 receptive field, 5x5 conv => 9x9 receptive field. + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in, + bias=False), + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class SepConv(nn.Module): + """ + Depthwise separable conv. + DilConv(dilation=1) * 2. + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine), + DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class FactorizedReduce(nn.Module): + """ + Reduce feature map size by factorized pointwise (stride=2). + """ + def __init__(self, C_in, C_out, affine=True): + super().__init__() + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + x = self.relu(x) + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out diff --git a/examples/nas/oneshot/darts/retrain.py b/examples/nas/oneshot/darts/retrain.py new file mode 100644 index 0000000000000000000000000000000000000000..30765fea57feb9271294738817a3859e9450725a --- /dev/null +++ b/examples/nas/oneshot/darts/retrain.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import time +from argparse import ArgumentParser + +import torch +import torch.nn as nn +from torch.utils.tensorboard import SummaryWriter + +import datasets +import utils +from model import CNN +from nni.nas.pytorch.utils import AverageMeter +from nni.retiarii import fixed_arch + +logger = logging.getLogger('nni') + + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +writer = SummaryWriter() + + +def train(config, train_loader, model, optimizer, criterion, epoch): + top1 = AverageMeter("top1") + top5 = AverageMeter("top5") + losses = AverageMeter("losses") + + cur_step = epoch * len(train_loader) + cur_lr = optimizer.param_groups[0]["lr"] + logger.info("Epoch %d LR %.6f", epoch, cur_lr) + writer.add_scalar("lr", cur_lr, global_step=cur_step) + + model.train() + + for step, (x, y) in enumerate(train_loader): + x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True) + bs = x.size(0) + + optimizer.zero_grad() + logits, aux_logits = model(x) + loss = criterion(logits, y) + if config.aux_weight > 0.: + loss += config.aux_weight * criterion(aux_logits, y) + loss.backward() + # gradient clipping + nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip) + optimizer.step() + + accuracy = utils.accuracy(logits, y, topk=(1, 5)) + losses.update(loss.item(), bs) + top1.update(accuracy["acc1"], bs) + top5.update(accuracy["acc5"], bs) + writer.add_scalar("loss/train", loss.item(), global_step=cur_step) + writer.add_scalar("acc1/train", accuracy["acc1"], global_step=cur_step) + writer.add_scalar("acc5/train", accuracy["acc5"], global_step=cur_step) + + if step % config.log_frequency == 0 or step == len(train_loader) - 1: + logger.info( + "Train: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch + 1, config.epochs, step, len(train_loader) - 1, losses=losses, + top1=top1, top5=top5)) + + cur_step += 1 + + logger.info("Train: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch + 1, config.epochs, top1.avg)) + + +def validate(config, valid_loader, model, criterion, epoch, cur_step): + top1 = AverageMeter("top1") + top5 = AverageMeter("top5") + losses = AverageMeter("losses") + + model.eval() + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + bs = X.size(0) + + logits = model(X) + loss = criterion(logits, y) + + accuracy = utils.accuracy(logits, y, topk=(1, 5)) + losses.update(loss.item(), bs) + top1.update(accuracy["acc1"], bs) + top5.update(accuracy["acc5"], bs) + + if step % config.log_frequency == 0 or step == len(valid_loader) - 1: + logger.info( + "Valid: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch + 1, config.epochs, step, len(valid_loader) - 1, losses=losses, + top1=top1, top5=top5)) + + writer.add_scalar("loss/test", losses.avg, global_step=cur_step) + writer.add_scalar("acc1/test", top1.avg, global_step=cur_step) + writer.add_scalar("acc5/test", top5.avg, global_step=cur_step) + + logger.info("Valid: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch + 1, config.epochs, top1.avg)) + + return top1.avg + + +if __name__ == "__main__": + parser = ArgumentParser("darts") + parser.add_argument("--layers", default=20, type=int) + parser.add_argument("--batch-size", default=96, type=int) + parser.add_argument("--log-frequency", default=10, type=int) + parser.add_argument("--epochs", default=600, type=int) + parser.add_argument("--aux-weight", default=0.4, type=float) + parser.add_argument("--drop-path-prob", default=0.2, type=float) + parser.add_argument("--workers", default=4) + parser.add_argument("--grad-clip", default=5., type=float) + parser.add_argument("--arc-checkpoint", default="./checkpoints/epoch_0.json") + + args = parser.parse_args() + dataset_train, dataset_valid = datasets.get_dataset("cifar10", cutout_length=16) + + with fixed_arch(args.arc_checkpoint): + model = CNN(32, 3, 36, 10, args.layers, auxiliary=True) + criterion = nn.CrossEntropyLoss() + + model.to(device) + criterion.to(device) + + optimizer = torch.optim.SGD(model.parameters(), 0.025, momentum=0.9, weight_decay=3.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=1E-6) + + train_loader = torch.utils.data.DataLoader(dataset_train, + batch_size=args.batch_size, + shuffle=True, + num_workers=args.workers, + pin_memory=True) + valid_loader = torch.utils.data.DataLoader(dataset_valid, + batch_size=args.batch_size, + shuffle=False, + num_workers=args.workers, + pin_memory=True) + + best_top1 = 0. + for epoch in range(args.epochs): + drop_prob = args.drop_path_prob * epoch / args.epochs + model.drop_path_prob(drop_prob) + + # training + train(args, train_loader, model, optimizer, criterion, epoch) + + # validation + cur_step = (epoch + 1) * len(train_loader) + top1 = validate(args, valid_loader, model, criterion, epoch, cur_step) + best_top1 = max(best_top1, top1) + + lr_scheduler.step() + + logger.info("Final best Prec@1 = {:.4%}".format(best_top1)) diff --git a/examples/nas/oneshot/darts/search.py b/examples/nas/oneshot/darts/search.py new file mode 100644 index 0000000000000000000000000000000000000000..8cb41d6d35d5793324a5d36936ba9444441f840a --- /dev/null +++ b/examples/nas/oneshot/darts/search.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import time +from argparse import ArgumentParser + +import torch +import torch.nn as nn + +import datasets +from model import CNN +from nni.nas.pytorch.callbacks import ArchitectureCheckpoint, LRSchedulerCallback +from utils import accuracy + + +logger = logging.getLogger('nni') + +if __name__ == "__main__": + parser = ArgumentParser("darts") + parser.add_argument("--layers", default=8, type=int) + parser.add_argument("--batch-size", default=64, type=int) + parser.add_argument("--log-frequency", default=10, type=int) + parser.add_argument("--epochs", default=50, type=int) + parser.add_argument("--channels", default=16, type=int) + parser.add_argument("--unrolled", default=False, action="store_true") + parser.add_argument("--visualization", default=False, action="store_true") + parser.add_argument("--v1", default=False, action="store_true") + args = parser.parse_args() + + dataset_train, dataset_valid = datasets.get_dataset("cifar10") + + model = CNN(32, 3, args.channels, 10, args.layers) + criterion = nn.CrossEntropyLoss() + + optim = torch.optim.SGD(model.parameters(), 0.025, momentum=0.9, weight_decay=3.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, args.epochs, eta_min=0.001) + + if args.v1: + from nni.algorithms.nas.pytorch.darts import DartsTrainer + trainer = DartsTrainer(model, + loss=criterion, + metrics=lambda output, target: accuracy(output, target, topk=(1,)), + optimizer=optim, + num_epochs=args.epochs, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + batch_size=args.batch_size, + log_frequency=args.log_frequency, + unrolled=args.unrolled, + callbacks=[LRSchedulerCallback(lr_scheduler), ArchitectureCheckpoint("./checkpoints")]) + if args.visualization: + trainer.enable_visualization() + + trainer.train() + else: + from nni.retiarii.oneshot.pytorch import DartsTrainer + trainer = DartsTrainer( + model=model, + loss=criterion, + metrics=lambda output, target: accuracy(output, target, topk=(1,)), + optimizer=optim, + num_epochs=args.epochs, + dataset=dataset_train, + batch_size=args.batch_size, + log_frequency=args.log_frequency, + unrolled=args.unrolled + ) + trainer.fit() + final_architecture = trainer.export() + print('Final architecture:', trainer.export()) + json.dump(trainer.export(), open('checkpoint.json', 'w')) diff --git a/examples/nas/oneshot/darts/utils.py b/examples/nas/oneshot/darts/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae80d9e5826261a060655e90c741d66f4906533 --- /dev/null +++ b/examples/nas/oneshot/darts/utils.py @@ -0,0 +1,21 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = dict() + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0) + res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item() + return res \ No newline at end of file diff --git a/examples/nas/oneshot/enas-tf/datasets.py b/examples/nas/oneshot/enas-tf/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..873848966b789133f80d06a3a6e7eb28e726a50c --- /dev/null +++ b/examples/nas/oneshot/enas-tf/datasets.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import tensorflow as tf + +def get_dataset(): + (x_train, y_train), (x_valid, y_valid) = tf.keras.datasets.cifar10.load_data() + x_train, x_valid = x_train / 255.0, x_valid / 255.0 + train_set = (x_train, y_train) + valid_set = (x_valid, y_valid) + return train_set, valid_set diff --git a/examples/nas/oneshot/enas-tf/macro.py b/examples/nas/oneshot/enas-tf/macro.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d73c2e69f9a29ea9c3988155dd3b09861fedc3 --- /dev/null +++ b/examples/nas/oneshot/enas-tf/macro.py @@ -0,0 +1,142 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import tensorflow as tf +from tensorflow.keras import Model, Sequential +from tensorflow.keras.layers import ( + AveragePooling2D, + BatchNormalization, + Conv2D, + Dense, + Dropout, + GlobalAveragePooling2D, + MaxPool2D, + ReLU, + SeparableConv2D, +) + +from nni.nas.tensorflow.mutables import InputChoice, LayerChoice, MutableScope + + +def build_conv(filters, kernel_size, name=None): + return Sequential([ + Conv2D(filters, kernel_size=1, use_bias=False), + BatchNormalization(trainable=False), + ReLU(), + Conv2D(filters, kernel_size, padding='same'), + BatchNormalization(trainable=False), + ReLU(), + ], name) + +def build_separable_conv(filters, kernel_size, name=None): + return Sequential([ + Conv2D(filters, kernel_size=1, use_bias=False), + BatchNormalization(trainable=False), + ReLU(), + SeparableConv2D(filters, kernel_size, padding='same', use_bias=False), + Conv2D(filters, kernel_size=1, use_bias=False), + BatchNormalization(trainable=False), + ReLU(), + ], name) + +def build_avg_pool(filters, name=None): + return Sequential([ + Conv2D(filters, kernel_size=1, use_bias=False), + BatchNormalization(trainable=False), + ReLU(), + AveragePooling2D(pool_size=3, strides=1, padding='same'), + BatchNormalization(trainable=False), + ], name) + +def build_max_pool(filters, name=None): + return Sequential([ + Conv2D(filters, kernel_size=1, use_bias=False), + BatchNormalization(trainable=False), + ReLU(), + MaxPool2D(pool_size=3, strides=1, padding='same'), + BatchNormalization(trainable=False), + ], name) + + +class FactorizedReduce(Model): + def __init__(self, filters): + super().__init__() + self.conv1 = Conv2D(filters // 2, kernel_size=1, strides=2, use_bias=False) + self.conv2 = Conv2D(filters // 2, kernel_size=1, strides=2, use_bias=False) + self.bn = BatchNormalization(trainable=False) + + def call(self, x): + out1 = self.conv1(x) + out2 = self.conv2(x[:, 1:, 1:, :]) + out = tf.concat([out1, out2], axis=3) + out = self.bn(out) + return out + + +class ENASLayer(MutableScope): + def __init__(self, key, prev_labels, filters): + super().__init__(key) + self.mutable = LayerChoice([ + build_conv(filters, 3, 'conv3'), + build_separable_conv(filters, 3, 'sepconv3'), + build_conv(filters, 5, 'conv5'), + build_separable_conv(filters, 5, 'sepconv5'), + build_avg_pool(filters, 'avgpool'), + build_max_pool(filters, 'maxpool'), + ]) + if len(prev_labels) > 0: + self.skipconnect = InputChoice(choose_from=prev_labels, n_chosen=None) + else: + self.skipconnect = None + self.batch_norm = BatchNormalization(trainable=False) + + def call(self, prev_layers): + out = self.mutable(prev_layers[-1]) + if self.skipconnect is not None: + connection = self.skipconnect(prev_layers[:-1]) + if connection is not None: + out += connection + return self.batch_norm(out) + + +class GeneralNetwork(Model): + def __init__(self, num_layers=12, filters=24, num_classes=10, dropout_rate=0.0): + super().__init__() + self.num_layers = num_layers + + self.stem = Sequential([ + Conv2D(filters, kernel_size=3, padding='same', use_bias=False), + BatchNormalization() + ]) + + labels = ['layer_{}'.format(i) for i in range(num_layers)] + self.enas_layers = [] + for i in range(num_layers): + layer = ENASLayer(labels[i], labels[:i], filters) + self.enas_layers.append(layer) + + pool_num = 2 + self.pool_distance = num_layers // (pool_num + 1) + self.pool_layers = [FactorizedReduce(filters) for _ in range(pool_num)] + + self.gap = GlobalAveragePooling2D() + self.dropout = Dropout(dropout_rate) + self.dense = Dense(num_classes) + + def call(self, x): + cur = self.stem(x) + prev_outputs = [cur] + + for i, layer in enumerate(self.enas_layers): + if i > 0 and i % self.pool_distance == 0: + pool = self.pool_layers[i // self.pool_distance - 1] + prev_outputs = [pool(tensor) for tensor in prev_outputs] + cur = prev_outputs[-1] + + cur = layer(prev_outputs) + prev_outputs.append(cur) + + cur = self.gap(cur) + cur = self.dropout(cur) + logits = self.dense(cur) + return logits diff --git a/examples/nas/oneshot/enas-tf/micro.py b/examples/nas/oneshot/enas-tf/micro.py new file mode 100644 index 0000000000000000000000000000000000000000..8c52f4b441d1a5e18beb44a2d13682a790a72b10 --- /dev/null +++ b/examples/nas/oneshot/enas-tf/micro.py @@ -0,0 +1,176 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import tensorflow as tf +from tensorflow.keras import Model, Sequential +from tensorflow.keras.layers import ( + AveragePooling2D, + BatchNormalization, + Conv2D, + Dense, + Dropout, + GlobalAveragePooling2D, + MaxPool2D, + ReLU, + SeparableConv2D, +) + +from nni.nas.tensorflow.mutables import InputChoice, LayerChoice, MutableScope + + +def build_conv_1x1(filters, name=None): + return Sequential([ + Conv2D(filters, kernel_size=1, use_bias=False), + BatchNormalization(trainable=False), + ReLU(), + ], name) + +def build_sep_conv(filters, kernel_size, name=None): + return Sequential([ + ReLU(), + SeparableConv2D(filters, kernel_size, padding='same'), + BatchNormalization(trainable=True), + ], name) + + +class FactorizedReduce(Model): + def __init__(self, filters): + super().__init__() + self.conv1 = Conv2D(filters // 2, kernel_size=1, strides=2, use_bias=False) + self.conv2 = Conv2D(filters // 2, kernel_size=1, strides=2, use_bias=False) + self.bn = BatchNormalization(trainable=False) + + def call(self, x): + out1 = self.conv1(x) + out2 = self.conv2(x[:, 1:, 1:, :]) + out = tf.concat([out1, out2], axis=3) + out = self.bn(out) + return out + + +class ReductionLayer(Model): + def __init__(self, filters): + super().__init__() + self.reduce0 = FactorizedReduce(filters) + self.reduce1 = FactorizedReduce(filters) + + def call(self, prevprev, prev): + return self.reduce0(prevprev), self.reduce1(prev) + + +class Calibration(Model): + def __init__(self, filters): + super().__init__() + self.filters = filters + self.process = None + + def build(self, shape): + assert len(shape) == 4 # batch_size, width, height, filters + if shape[3] != self.filters: + self.process = build_conv_1x1(self.filters) + + def call(self, x): + if self.process is None: + return x + return self.process(x) + + +class Cell(Model): + def __init__(self, cell_name, prev_labels, filters): + super().__init__() + self.input_choice = InputChoice(choose_from=prev_labels, n_chosen=1, return_mask=True, key=cell_name + '_input') + self.op_choice = LayerChoice([ + build_sep_conv(filters, 3), + build_sep_conv(filters, 5), + AveragePooling2D(pool_size=3, strides=1, padding='same'), + MaxPool2D(pool_size=3, strides=1, padding='same'), + Sequential(), # Identity + ], key=cell_name + '_op') + + def call(self, prev_layers): + chosen_input, chosen_mask = self.input_choice(prev_layers) + cell_out = self.op_choice(chosen_input) + return cell_out, chosen_mask + + +class Node(MutableScope): + def __init__(self, node_name, prev_node_names, filters): + super().__init__(node_name) + self.cell_x = Cell(node_name + '_x', prev_node_names, filters) + self.cell_y = Cell(node_name + '_y', prev_node_names, filters) + + def call(self, prev_layers): + out_x, mask_x = self.cell_x(prev_layers) + out_y, mask_y = self.cell_y(prev_layers) + return out_x + out_y, mask_x | mask_y + + +class ENASLayer(Model): + def __init__(self, num_nodes, filters, reduction): + super().__init__() + self.preproc0 = Calibration(filters) + self.preproc1 = Calibration(filters) + + self.nodes = [] + node_labels = [InputChoice.NO_KEY, InputChoice.NO_KEY] + name_prefix = 'reduce' if reduction else 'normal' + for i in range(num_nodes): + node_labels.append('{}_node_{}'.format(name_prefix, i)) + self.nodes.append(Node(node_labels[-1], node_labels[:-1], filters)) + + self.conv_ops = [Conv2D(filters, kernel_size=1, padding='same', use_bias=False) for _ in range(num_nodes + 2)] + self.bn = BatchNormalization(trainable=False) + + def call(self, prevprev, prev): + prev_nodes_out = [self.preproc0(prevprev), self.preproc1(prev)] + nodes_used_mask = tf.zeros(len(self.nodes) + 2, dtype=tf.bool) + for i, node in enumerate(self.nodes): + node_out, mask = node(prev_nodes_out) + nodes_used_mask |= tf.pad(mask, [[0, nodes_used_mask.shape[0] - mask.shape[0]]]) + prev_nodes_out.append(node_out) + + outputs = [] + for used, out, conv in zip(nodes_used_mask.numpy(), prev_nodes_out, self.conv_ops): + if not used: + outputs.append(conv(out)) + out = tf.add_n(outputs) + return prev, self.bn(out) + + +class MicroNetwork(Model): + def __init__(self, num_layers=6, num_nodes=5, out_channels=20, num_classes=10, dropout_rate=0.1): + super().__init__() + self.num_layers = num_layers + self.stem = Sequential([ + Conv2D(out_channels * 3, kernel_size=3, padding='same', use_bias=False), + BatchNormalization(), + ]) + + pool_distance = num_layers // 3 + pool_layer_indices = [pool_distance, 2 * pool_distance + 1] + + self.enas_layers = [] + + filters = out_channels + for i in range(num_layers + 2): + if i in pool_layer_indices: + reduction = True + filters *= 2 + self.enas_layers.append(ReductionLayer(filters)) + else: + reduction = False + self.enas_layers.append(ENASLayer(num_nodes, filters, reduction)) + + self.gap = GlobalAveragePooling2D() + self.dropout = Dropout(dropout_rate) + self.dense = Dense(num_classes) + + def call(self, x): + prev = cur = self.stem(x) + for layer in self.enas_layers: + prev, cur = layer(prev, cur) + cur = tf.keras.activations.relu(cur) + cur = self.gap(cur) + cur = self.dropout(cur) + logits = self.dense(cur) + return logits diff --git a/examples/nas/oneshot/enas-tf/search.py b/examples/nas/oneshot/enas-tf/search.py new file mode 100644 index 0000000000000000000000000000000000000000..b7d5ee93d274d6632f545fb3f42f6a97ace61cd4 --- /dev/null +++ b/examples/nas/oneshot/enas-tf/search.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +from tensorflow.keras.losses import Reduction, SparseCategoricalCrossentropy +from tensorflow.keras.optimizers import SGD + +from nni.algorithms.nas.tensorflow import enas + +import datasets +from macro import GeneralNetwork +from micro import MicroNetwork +from utils import accuracy, accuracy_metrics + + +# TODO: argparse + + +dataset_train, dataset_valid = datasets.get_dataset() +#model = GeneralNetwork() +model = MicroNetwork() + +loss = SparseCategoricalCrossentropy(from_logits=True, reduction=Reduction.NONE) +optimizer = SGD(learning_rate=0.05, momentum=0.9) + +trainer = enas.EnasTrainer(model, + loss=loss, + metrics=accuracy_metrics, + reward_function=accuracy, + optimizer=optimizer, + batch_size=64, + num_epochs=310, + dataset_train=dataset_train, + dataset_valid=dataset_valid) +trainer.train() diff --git a/examples/nas/oneshot/enas-tf/utils.py b/examples/nas/oneshot/enas-tf/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dc924a96f3f13fcf09737f8e77032e638196782b --- /dev/null +++ b/examples/nas/oneshot/enas-tf/utils.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import tensorflow as tf + + +def accuracy_metrics(y_true, logits): + return {'enas_acc': accuracy(y_true, logits)} + +def accuracy(y_true, logits): + # y_true: shape=(batch_size) or (batch_size,1), type=integer + # logits: shape=(batch_size, num_of_classes), type=float + # returns float + batch_size = y_true.shape[0] + y_true = tf.squeeze(y_true) + y_pred = tf.math.argmax(logits, axis=1) + y_pred = tf.cast(y_pred, y_true.dtype) + equal = tf.cast(y_pred == y_true, tf.int32) + return tf.math.reduce_sum(equal).numpy() / batch_size diff --git a/examples/nas/oneshot/enas/README.md b/examples/nas/oneshot/enas/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c942ff41ad0beefe98150da165a91dfadaaab095 --- /dev/null +++ b/examples/nas/oneshot/enas/README.md @@ -0,0 +1 @@ +[Documentation](https://nni.readthedocs.io/en/latest/NAS/ENAS.html) diff --git a/examples/nas/oneshot/enas/README_zh_CN.md b/examples/nas/oneshot/enas/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..03919b0f682473de2aceef6e1fd3dbfc9a50a82c --- /dev/null +++ b/examples/nas/oneshot/enas/README_zh_CN.md @@ -0,0 +1 @@ +[文档](https://nni.readthedocs.io/zh/latest/NAS/ENAS.html) diff --git a/examples/nas/oneshot/enas/datasets.py b/examples/nas/oneshot/enas/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5128a8a91478676f6dbf2b256d8ab1849b7dc0 --- /dev/null +++ b/examples/nas/oneshot/enas/datasets.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from torchvision import transforms +from torchvision.datasets import CIFAR10 + + +def get_dataset(cls): + MEAN = [0.49139968, 0.48215827, 0.44653124] + STD = [0.24703233, 0.24348505, 0.26158768] + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize(MEAN, STD) + ] + + train_transform = transforms.Compose(transf + normalize) + valid_transform = transforms.Compose(normalize) + + if cls == "cifar10": + dataset_train = CIFAR10(root="./data", train=True, download=True, transform=train_transform) + dataset_valid = CIFAR10(root="./data", train=False, download=True, transform=valid_transform) + else: + raise NotImplementedError + return dataset_train, dataset_valid diff --git a/examples/nas/oneshot/enas/macro.py b/examples/nas/oneshot/enas/macro.py new file mode 100644 index 0000000000000000000000000000000000000000..198e62b87aba1091e435916462644ed059280191 --- /dev/null +++ b/examples/nas/oneshot/enas/macro.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch.nn as nn + +from nni.nas.pytorch import mutables +from ops import FactorizedReduce, ConvBranch, PoolBranch + + +class ENASLayer(mutables.MutableScope): + + def __init__(self, key, prev_labels, in_filters, out_filters): + super().__init__(key) + self.in_filters = in_filters + self.out_filters = out_filters + self.mutable = mutables.LayerChoice([ + ConvBranch(in_filters, out_filters, 3, 1, 1, separable=False), + ConvBranch(in_filters, out_filters, 3, 1, 1, separable=True), + ConvBranch(in_filters, out_filters, 5, 1, 2, separable=False), + ConvBranch(in_filters, out_filters, 5, 1, 2, separable=True), + PoolBranch('avg', in_filters, out_filters, 3, 1, 1), + PoolBranch('max', in_filters, out_filters, 3, 1, 1) + ]) + if len(prev_labels) > 0: + self.skipconnect = mutables.InputChoice(choose_from=prev_labels, n_chosen=None) + else: + self.skipconnect = None + self.batch_norm = nn.BatchNorm2d(out_filters, affine=False) + + def forward(self, prev_layers): + out = self.mutable(prev_layers[-1]) + if self.skipconnect is not None: + connection = self.skipconnect(prev_layers[:-1]) + if connection is not None: + out = out + connection + return self.batch_norm(out) + + +class GeneralNetwork(nn.Module): + def __init__(self, num_layers=12, out_filters=24, in_channels=3, num_classes=10, + dropout_rate=0.0): + super().__init__() + self.num_layers = num_layers + self.num_classes = num_classes + self.out_filters = out_filters + + self.stem = nn.Sequential( + nn.Conv2d(in_channels, out_filters, 3, 1, 1, bias=False), + nn.BatchNorm2d(out_filters) + ) + + pool_distance = self.num_layers // 3 + self.pool_layers_idx = [pool_distance - 1, 2 * pool_distance - 1] + self.dropout_rate = dropout_rate + self.dropout = nn.Dropout(self.dropout_rate) + + self.layers = nn.ModuleList() + self.pool_layers = nn.ModuleList() + labels = [] + for layer_id in range(self.num_layers): + labels.append("layer_{}".format(layer_id)) + if layer_id in self.pool_layers_idx: + self.pool_layers.append(FactorizedReduce(self.out_filters, self.out_filters)) + self.layers.append(ENASLayer(labels[-1], labels[:-1], self.out_filters, self.out_filters)) + + self.gap = nn.AdaptiveAvgPool2d(1) + self.dense = nn.Linear(self.out_filters, self.num_classes) + + def forward(self, x): + bs = x.size(0) + cur = self.stem(x) + + layers = [cur] + + for layer_id in range(self.num_layers): + cur = self.layers[layer_id](layers) + layers.append(cur) + if layer_id in self.pool_layers_idx: + for i, layer in enumerate(layers): + layers[i] = self.pool_layers[self.pool_layers_idx.index(layer_id)](layer) + cur = layers[-1] + + cur = self.gap(cur).view(bs, -1) + cur = self.dropout(cur) + logits = self.dense(cur) + return logits diff --git a/examples/nas/oneshot/enas/micro.py b/examples/nas/oneshot/enas/micro.py new file mode 100644 index 0000000000000000000000000000000000000000..83b8f61d52164fab572fb6a40088006b0e1ef526 --- /dev/null +++ b/examples/nas/oneshot/enas/micro.py @@ -0,0 +1,193 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch import mutables +from ops import FactorizedReduce, StdConv, SepConvBN, Pool + + +class AuxiliaryHead(nn.Module): + def __init__(self, in_channels, num_classes): + super().__init__() + self.in_channels = in_channels + self.num_classes = num_classes + self.pooling = nn.Sequential( + nn.ReLU(), + nn.AvgPool2d(5, 3, 2) + ) + self.proj = nn.Sequential( + StdConv(in_channels, 128), + StdConv(128, 768) + ) + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(768, 10, bias=False) + + def forward(self, x): + bs = x.size(0) + x = self.pooling(x) + x = self.proj(x) + x = self.avg_pool(x).view(bs, -1) + x = self.fc(x) + return x + + +class Cell(nn.Module): + def __init__(self, cell_name, prev_labels, channels): + super().__init__() + self.input_choice = mutables.InputChoice(choose_from=prev_labels, n_chosen=1, return_mask=True, + key=cell_name + "_input") + self.op_choice = mutables.LayerChoice([ + SepConvBN(channels, channels, 3, 1), + SepConvBN(channels, channels, 5, 2), + Pool("avg", 3, 1, 1), + Pool("max", 3, 1, 1), + nn.Identity() + ], key=cell_name + "_op") + + def forward(self, prev_layers): + from nni.retiarii.oneshot.pytorch.random import PathSamplingInputChoice + out = self.input_choice(prev_layers) + if isinstance(self.input_choice, PathSamplingInputChoice): + # Retiarii pattern + return out, self.input_choice.mask + else: + chosen_input, chosen_mask = out + cell_out = self.op_choice(chosen_input) + return cell_out, chosen_mask + + +class Node(mutables.MutableScope): + def __init__(self, node_name, prev_node_names, channels): + super().__init__(node_name) + self.cell_x = Cell(node_name + "_x", prev_node_names, channels) + self.cell_y = Cell(node_name + "_y", prev_node_names, channels) + + def forward(self, prev_layers): + out_x, mask_x = self.cell_x(prev_layers) + out_y, mask_y = self.cell_y(prev_layers) + return out_x + out_y, mask_x | mask_y + + +class Calibration(nn.Module): + def __init__(self, in_channels, out_channels): + super().__init__() + self.process = None + if in_channels != out_channels: + self.process = StdConv(in_channels, out_channels) + + def forward(self, x): + if self.process is None: + return x + return self.process(x) + + +class ReductionLayer(nn.Module): + def __init__(self, in_channels_pp, in_channels_p, out_channels): + super().__init__() + self.reduce0 = FactorizedReduce(in_channels_pp, out_channels, affine=False) + self.reduce1 = FactorizedReduce(in_channels_p, out_channels, affine=False) + + def forward(self, pprev, prev): + return self.reduce0(pprev), self.reduce1(prev) + + +class ENASLayer(nn.Module): + def __init__(self, num_nodes, in_channels_pp, in_channels_p, out_channels, reduction): + super().__init__() + self.preproc0 = Calibration(in_channels_pp, out_channels) + self.preproc1 = Calibration(in_channels_p, out_channels) + + self.num_nodes = num_nodes + name_prefix = "reduce" if reduction else "normal" + self.nodes = nn.ModuleList() + node_labels = [mutables.InputChoice.NO_KEY, mutables.InputChoice.NO_KEY] + for i in range(num_nodes): + node_labels.append("{}_node_{}".format(name_prefix, i)) + self.nodes.append(Node(node_labels[-1], node_labels[:-1], out_channels)) + self.final_conv_w = nn.Parameter(torch.zeros(out_channels, self.num_nodes + 2, out_channels, 1, 1), requires_grad=True) + self.bn = nn.BatchNorm2d(out_channels, affine=False) + self.reset_parameters() + + def reset_parameters(self): + nn.init.kaiming_normal_(self.final_conv_w) + + def forward(self, pprev, prev): + pprev_, prev_ = self.preproc0(pprev), self.preproc1(prev) + + prev_nodes_out = [pprev_, prev_] + nodes_used_mask = torch.zeros(self.num_nodes + 2, dtype=torch.bool, device=prev.device) + for i in range(self.num_nodes): + node_out, mask = self.nodes[i](prev_nodes_out) + nodes_used_mask[:mask.size(0)] |= mask.to(node_out.device) + prev_nodes_out.append(node_out) + + unused_nodes = torch.cat([out for used, out in zip(nodes_used_mask, prev_nodes_out) if not used], 1) + unused_nodes = F.relu(unused_nodes) + conv_weight = self.final_conv_w[:, ~nodes_used_mask, :, :, :] + conv_weight = conv_weight.view(conv_weight.size(0), -1, 1, 1) + out = F.conv2d(unused_nodes, conv_weight) + return prev, self.bn(out) + + +class MicroNetwork(nn.Module): + def __init__(self, num_layers=2, num_nodes=5, out_channels=24, in_channels=3, num_classes=10, + dropout_rate=0.0, use_aux_heads=False): + super().__init__() + self.num_layers = num_layers + self.use_aux_heads = use_aux_heads + + self.stem = nn.Sequential( + nn.Conv2d(in_channels, out_channels * 3, 3, 1, 1, bias=False), + nn.BatchNorm2d(out_channels * 3) + ) + + pool_distance = self.num_layers // 3 + pool_layers = [pool_distance, 2 * pool_distance + 1] + self.dropout = nn.Dropout(dropout_rate) + + self.layers = nn.ModuleList() + c_pp = c_p = out_channels * 3 + c_cur = out_channels + for layer_id in range(self.num_layers + 2): + reduction = False + if layer_id in pool_layers: + c_cur, reduction = c_p * 2, True + self.layers.append(ReductionLayer(c_pp, c_p, c_cur)) + c_pp = c_p = c_cur + self.layers.append(ENASLayer(num_nodes, c_pp, c_p, c_cur, reduction)) + if self.use_aux_heads and layer_id == pool_layers[-1] + 1: + self.layers.append(AuxiliaryHead(c_cur, num_classes)) + c_pp, c_p = c_p, c_cur + + self.gap = nn.AdaptiveAvgPool2d(1) + self.dense = nn.Linear(c_cur, num_classes) + + self.reset_parameters() + + def reset_parameters(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + + def forward(self, x): + bs = x.size(0) + prev = cur = self.stem(x) + aux_logits = None + + for layer in self.layers: + if isinstance(layer, AuxiliaryHead): + if self.training: + aux_logits = layer(cur) + else: + prev, cur = layer(prev, cur) + + cur = self.gap(F.relu(cur)).view(bs, -1) + cur = self.dropout(cur) + logits = self.dense(cur) + + if aux_logits is not None: + return logits, aux_logits + return logits diff --git a/examples/nas/oneshot/enas/ops.py b/examples/nas/oneshot/enas/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..05cfa7d7cae2255d194209aa4be1079701399979 --- /dev/null +++ b/examples/nas/oneshot/enas/ops.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + + +class StdConv(nn.Module): + def __init__(self, C_in, C_out): + super(StdConv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=False), + nn.ReLU() + ) + + def forward(self, x): + return self.conv(x) + + +class PoolBranch(nn.Module): + def __init__(self, pool_type, C_in, C_out, kernel_size, stride, padding, affine=False): + super().__init__() + self.preproc = StdConv(C_in, C_out) + self.pool = Pool(pool_type, kernel_size, stride, padding) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + out = self.preproc(x) + out = self.pool(out) + out = self.bn(out) + return out + + +class SeparableConv(nn.Module): + def __init__(self, C_in, C_out, kernel_size, stride, padding): + super(SeparableConv, self).__init__() + self.depthwise = nn.Conv2d(C_in, C_in, kernel_size=kernel_size, padding=padding, stride=stride, + groups=C_in, bias=False) + self.pointwise = nn.Conv2d(C_in, C_out, kernel_size=1, bias=False) + + def forward(self, x): + out = self.depthwise(x) + out = self.pointwise(out) + return out + + +class ConvBranch(nn.Module): + def __init__(self, C_in, C_out, kernel_size, stride, padding, separable): + super(ConvBranch, self).__init__() + self.preproc = StdConv(C_in, C_out) + if separable: + self.conv = SeparableConv(C_out, C_out, kernel_size, stride, padding) + else: + self.conv = nn.Conv2d(C_out, C_out, kernel_size, stride=stride, padding=padding) + self.postproc = nn.Sequential( + nn.BatchNorm2d(C_out, affine=False), + nn.ReLU() + ) + + def forward(self, x): + out = self.preproc(x) + out = self.conv(out) + out = self.postproc(out) + return out + + +class FactorizedReduce(nn.Module): + def __init__(self, C_in, C_out, affine=False): + super().__init__() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out + + +class Pool(nn.Module): + def __init__(self, pool_type, kernel_size, stride, padding): + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + def forward(self, x): + return self.pool(x) + + +class SepConvBN(nn.Module): + def __init__(self, C_in, C_out, kernel_size, padding): + super().__init__() + self.relu = nn.ReLU() + self.conv = SeparableConv(C_in, C_out, kernel_size, 1, padding) + self.bn = nn.BatchNorm2d(C_out, affine=True) + + def forward(self, x): + x = self.relu(x) + x = self.conv(x) + x = self.bn(x) + return x diff --git a/examples/nas/oneshot/enas/search.py b/examples/nas/oneshot/enas/search.py new file mode 100644 index 0000000000000000000000000000000000000000..6ee0813e27fec6c6a396ab61199a09f71cedf004 --- /dev/null +++ b/examples/nas/oneshot/enas/search.py @@ -0,0 +1,80 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import time +from argparse import ArgumentParser + +import torch +import torch.nn as nn + +import datasets +from macro import GeneralNetwork +from micro import MicroNetwork +from nni.algorithms.nas.pytorch import enas +from nni.nas.pytorch.callbacks import (ArchitectureCheckpoint, + LRSchedulerCallback) +from utils import accuracy, reward_accuracy + +logger = logging.getLogger('nni') + + +if __name__ == "__main__": + parser = ArgumentParser("enas") + parser.add_argument("--batch-size", default=128, type=int) + parser.add_argument("--log-frequency", default=10, type=int) + parser.add_argument("--search-for", choices=["macro", "micro"], default="macro") + parser.add_argument("--epochs", default=None, type=int, help="Number of epochs (default: macro 310, micro 150)") + parser.add_argument("--visualization", default=False, action="store_true") + parser.add_argument("--v1", default=False, action="store_true") + args = parser.parse_args() + + dataset_train, dataset_valid = datasets.get_dataset("cifar10") + mutator = None + ctrl_kwargs = {} + if args.search_for == "macro": + model = GeneralNetwork() + num_epochs = args.epochs or 310 + elif args.search_for == "micro": + model = MicroNetwork(num_layers=6, out_channels=20, num_nodes=5, dropout_rate=0.1, use_aux_heads=False) + num_epochs = args.epochs or 150 + if args.v1: + mutator = enas.EnasMutator(model, tanh_constant=1.1, cell_exit_extra_step=True) + else: + ctrl_kwargs = {"tanh_constant": 1.1} + else: + raise AssertionError + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), 0.05, momentum=0.9, weight_decay=1.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=0.001) + + if args.v1: + trainer = enas.EnasTrainer(model, + loss=criterion, + metrics=accuracy, + reward_function=reward_accuracy, + optimizer=optimizer, + callbacks=[LRSchedulerCallback(lr_scheduler), ArchitectureCheckpoint("./checkpoints")], + batch_size=args.batch_size, + num_epochs=num_epochs, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + log_frequency=args.log_frequency, + mutator=mutator) + if args.visualization: + trainer.enable_visualization() + trainer.train() + else: + from nni.retiarii.oneshot.pytorch.enas import EnasTrainer + trainer = EnasTrainer(model, + loss=criterion, + metrics=accuracy, + reward_function=reward_accuracy, + optimizer=optimizer, + batch_size=args.batch_size, + num_epochs=num_epochs, + dataset=dataset_train, + log_frequency=args.log_frequency, + ctrl_kwargs=ctrl_kwargs) + trainer.fit() diff --git a/examples/nas/oneshot/enas/utils.py b/examples/nas/oneshot/enas/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..712b98bec4efb642492752e665d597684a36e434 --- /dev/null +++ b/examples/nas/oneshot/enas/utils.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch + + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = dict() + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0) + res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item() + return res + + +def reward_accuracy(output, target, topk=(1,)): + batch_size = target.size(0) + _, predicted = torch.max(output.data, 1) + return (predicted == target).sum().item() / batch_size diff --git a/examples/nas/oneshot/naive-tf/train.py b/examples/nas/oneshot/naive-tf/train.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b2062a8954ca01b8b6e9ef11b2dfe99ca3e815 --- /dev/null +++ b/examples/nas/oneshot/naive-tf/train.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import tensorflow as tf +from tensorflow.keras import Model +from tensorflow.keras.layers import (AveragePooling2D, BatchNormalization, Conv2D, Dense, MaxPool2D) +from tensorflow.keras.losses import Reduction, SparseCategoricalCrossentropy +from tensorflow.keras.optimizers import SGD + +from nni.nas.tensorflow.mutables import LayerChoice, InputChoice +from nni.algorithms.nas.tensorflow.enas import EnasTrainer + + +class Net(Model): + def __init__(self): + super().__init__() + self.conv1 = LayerChoice([ + Conv2D(6, 3, padding='same', activation='relu'), + Conv2D(6, 5, padding='same', activation='relu'), + ]) + self.pool = MaxPool2D(2) + self.conv2 = LayerChoice([ + Conv2D(16, 3, padding='same', activation='relu'), + Conv2D(16, 5, padding='same', activation='relu'), + ]) + self.conv3 = Conv2D(16, 1) + + self.skipconnect = InputChoice(n_candidates=1) + self.bn = BatchNormalization() + + self.gap = AveragePooling2D(2) + self.fc1 = Dense(120, activation='relu') + self.fc2 = Dense(84, activation='relu') + self.fc3 = Dense(10) + + def call(self, x): + bs = x.shape[0] + + t = self.conv1(x) + x = self.pool(t) + x0 = self.conv2(x) + x1 = self.conv3(x0) + + x0 = self.skipconnect([x0]) + if x0 is not None: + x1 += x0 + x = self.pool(self.bn(x1)) + + x = self.gap(x) + x = tf.reshape(x, [bs, -1]) + x = self.fc1(x) + x = self.fc2(x) + x = self.fc3(x) + return x + + +def accuracy(truth, logits): + truth = tf.reshape(truth, (-1, )) + predicted = tf.cast(tf.math.argmax(logits, axis=1), truth.dtype) + equal = tf.cast(predicted == truth, tf.int32) + return tf.math.reduce_sum(equal).numpy() / equal.shape[0] + +def accuracy_metrics(truth, logits): + acc = accuracy(truth, logits) + return {'accuracy': acc} + + +if __name__ == '__main__': + cifar10 = tf.keras.datasets.cifar10 + (x_train, y_train), (x_valid, y_valid) = cifar10.load_data() + x_train, x_valid = x_train / 255.0, x_valid / 255.0 + train_set = (x_train, y_train) + valid_set = (x_valid, y_valid) + + net = Net() + + trainer = EnasTrainer( + net, + loss=SparseCategoricalCrossentropy(from_logits=True, reduction=Reduction.NONE), + metrics=accuracy_metrics, + reward_function=accuracy, + optimizer=SGD(learning_rate=0.001, momentum=0.9), + batch_size=64, + num_epochs=2, + dataset_train=train_set, + dataset_valid=valid_set + ) + + trainer.train() diff --git a/examples/nas/oneshot/naive/.gitignore b/examples/nas/oneshot/naive/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2c68a240baa5e07fc458e2983fa87688b48dfade --- /dev/null +++ b/examples/nas/oneshot/naive/.gitignore @@ -0,0 +1 @@ +checkpoint.json diff --git a/examples/nas/oneshot/naive/README.md b/examples/nas/oneshot/naive/README.md new file mode 100644 index 0000000000000000000000000000000000000000..871d7f0fdd3eb3c72ef703103ab246ff3b83347e --- /dev/null +++ b/examples/nas/oneshot/naive/README.md @@ -0,0 +1 @@ +This is a naive example that demonstrates how to use NNI interface to implement a NAS search space. \ No newline at end of file diff --git a/examples/nas/oneshot/naive/README_zh_CN.md b/examples/nas/oneshot/naive/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..86c93d519d7c4ba0211c15c95e665c7e42e4aa62 --- /dev/null +++ b/examples/nas/oneshot/naive/README_zh_CN.md @@ -0,0 +1 @@ +这是一个简单示例,演示如何使用 NNI 接口实现 NAS 搜索空间。 \ No newline at end of file diff --git a/examples/nas/oneshot/naive/train.py b/examples/nas/oneshot/naive/train.py new file mode 100644 index 0000000000000000000000000000000000000000..eddab1c914c35cf7bbda1e52ad4f66c42029b007 --- /dev/null +++ b/examples/nas/oneshot/naive/train.py @@ -0,0 +1,73 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torchvision +import torchvision.transforms as transforms + +from nni.nas.pytorch.mutables import LayerChoice, InputChoice +from nni.algorithms.nas.pytorch.darts import DartsTrainer + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = LayerChoice([nn.Conv2d(3, 6, 3, padding=1), nn.Conv2d(3, 6, 5, padding=2)]) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = LayerChoice([nn.Conv2d(6, 16, 3, padding=1), nn.Conv2d(6, 16, 5, padding=2)]) + self.conv3 = nn.Conv2d(16, 16, 1) + + self.skipconnect = InputChoice(n_candidates=1) + self.bn = nn.BatchNorm2d(16) + + self.gap = nn.AdaptiveAvgPool2d(4) + self.fc1 = nn.Linear(16 * 4 * 4, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + bs = x.size(0) + + x = self.pool(F.relu(self.conv1(x))) + x0 = F.relu(self.conv2(x)) + x1 = F.relu(self.conv3(x0)) + + x0 = self.skipconnect([x0]) + if x0 is not None: + x1 += x0 + x = self.pool(self.bn(x1)) + + x = self.gap(x).view(bs, -1) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +def accuracy(output, target): + batch_size = target.size(0) + _, predicted = torch.max(output.data, 1) + return {"acc1": (predicted == target).sum().item() / batch_size} + + +if __name__ == "__main__": + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + dataset_train = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform) + dataset_valid = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform) + + net = Net() + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + + trainer = DartsTrainer(net, + loss=criterion, + metrics=accuracy, + optimizer=optimizer, + num_epochs=2, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + batch_size=64, + log_frequency=10) + trainer.enable_visualization() + trainer.train() + trainer.export("checkpoint.json") diff --git a/examples/nas/oneshot/pfld/__init__.py b/examples/nas/oneshot/pfld/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/nas/oneshot/pfld/datasets.py b/examples/nas/oneshot/pfld/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..41564f60445cfb76cf2323483bfc976bd47dd57f --- /dev/null +++ b/examples/nas/oneshot/pfld/datasets.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import cv2 +import os + +import numpy as np + +from torch.utils import data + + +class PFLDDatasets(data.Dataset): + """ Dataset to manage the data loading, augmentation and generation. """ + + def __init__(self, file_list, transforms=None, data_root="", img_size=112): + """ + Parameters + ---------- + file_list : list + a list of file path and annotations + transforms : function + function for data augmentation + data_root : str + the root path of dataset + img_size : int + the size of image height or width + """ + self.line = None + self.path = None + self.img_size = img_size + self.land = None + self.angle = None + self.data_root = data_root + self.transforms = transforms + with open(file_list, "r") as f: + self.lines = f.readlines() + + def __getitem__(self, index): + """ Get the data sample and labels with the index. """ + self.line = self.lines[index].strip().split() + # load image + if self.data_root: + self.img = cv2.imread(os.path.join(self.data_root, self.line[0])) + else: + self.img = cv2.imread(self.line[0]) + # resize + self.img = cv2.resize(self.img, (self.img_size, self.img_size)) + # obtain gt labels + self.land = np.asarray(self.line[1: (106 * 2 + 1)], dtype=np.float32) + self.angle = np.asarray(self.line[(106 * 2 + 1):], dtype=np.float32) + + # augmentation + if self.transforms: + self.img = self.transforms(self.img) + + return self.img, self.land, self.angle + + def __len__(self): + """ Get the size of dataset. """ + return len(self.lines) diff --git a/examples/nas/oneshot/pfld/export.py b/examples/nas/oneshot/pfld/export.py new file mode 100644 index 0000000000000000000000000000000000000000..78ab54d6d9f3568f0894096e2e6c0c12cbf8c0f3 --- /dev/null +++ b/examples/nas/oneshot/pfld/export.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import argparse +import onnx +import onnxsim +import os +import torch + +from lib.builder import search_space +from lib.ops import PRIMITIVES +from nni.algorithms.nas.pytorch.fbnet import ( + LookUpTable, + NASConfig, + model_init, +) + + +parser = argparse.ArgumentParser(description="Export the ONNX model") +parser.add_argument("--net", default="subnet", type=str) +parser.add_argument("--supernet", default="", type=str, metavar="PATH") +parser.add_argument("--resume", default="", type=str, metavar="PATH") +parser.add_argument("--num_points", default=106, type=int) +parser.add_argument("--img_size", default=112, type=int) +parser.add_argument("--onnx", default="./output/pfld.onnx", type=str) +parser.add_argument("--onnx_sim", default="./output/subnet.onnx", type=str) +args = parser.parse_args() + +os.makedirs("./output", exist_ok=True) + +if args.net == "subnet": + from lib.subnet import PFLDInference +else: + raise ValueError("Network is not implemented") + +check = torch.load(args.supernet, map_location=torch.device("cpu")) +sampled_arch = check["arch_sample"] + +nas_config = NASConfig(search_space=search_space) +lookup_table = LookUpTable(config=nas_config, primitives=PRIMITIVES) +pfld_backbone = PFLDInference(lookup_table, sampled_arch, args.num_points) + +pfld_backbone.eval() +check_sub = torch.load(args.resume, map_location=torch.device("cpu")) +param_dict = check_sub["pfld_backbone"] +model_init(pfld_backbone, param_dict) + +print("Convert PyTorch model to ONNX.") +dummy_input = torch.randn(1, 3, args.img_size, args.img_size) +input_names = ["input"] +output_names = ["output"] +torch.onnx.export( + pfld_backbone, + dummy_input, + args.onnx, + verbose=True, + input_names=input_names, + output_names=output_names, +) + +print("Check ONNX model.") +model = onnx.load(args.onnx) + +print("Simplifying the ONNX model.") +model_opt, check = onnxsim.simplify(args.onnx) +assert check, "Simplified ONNX model could not be validated" +onnx.save(model_opt, args.onnx_sim) +print("Onnx model simplify Ok!") diff --git a/examples/nas/oneshot/pfld/lib/__init__.py b/examples/nas/oneshot/pfld/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/nas/oneshot/pfld/lib/builder.py b/examples/nas/oneshot/pfld/lib/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3f4a5527dcb836dbf6ff92a502ec548bc9912d27 --- /dev/null +++ b/examples/nas/oneshot/pfld/lib/builder.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + + +search_space = { + # multi-stage definition for candidate layers + # here two stages are defined for PFLD searching + "stages": { + "stage_0": { + "ops": [ + "mb_k3_res", + "mb_k3_e2_res", + "mb_k3_res_d3", + "mb_k5_res", + "mb_k5_e2_res", + "sep_k3", + "sep_k5", + "gh_k3", + "gh_k5", + ], + "layer_num": 2, + }, + "stage_1": { + "ops": [ + "mb_k3_e2_res", + "mb_k3_e4_res", + "mb_k3_e2_res_se", + "mb_k3_res_d3", + "mb_k5_res", + "mb_k5_e2_res", + "mb_k5_res_se", + "mb_k5_e2_res_se", + "gh_k5", + ], + "layer_num": 3, + }, + }, + # necessary information of layers for NAS + # the basic information is as (input_channels, height, width) + "input_shape": [ + (32, 14, 14), + (32, 14, 14), + (32, 14, 14), + (64, 7, 7), + (64, 7, 7), + ], + # output channels for each layer + "channel_size": [32, 32, 64, 64, 64], + # stride for each layer + "strides": [1, 1, 2, 1, 1], + # height of feature map for each layer + "fm_size": [14, 14, 7, 7, 7], +} diff --git a/examples/nas/oneshot/pfld/lib/ops.py b/examples/nas/oneshot/pfld/lib/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..22478771095c69eb708c44e6df806a5d979a617d --- /dev/null +++ b/examples/nas/oneshot/pfld/lib/ops.py @@ -0,0 +1,456 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import torch + +import torch.nn as nn +import torch.nn.functional as F + + +# Basic primitives as the network path +PRIMITIVES = { + "skip": lambda c_in, c_out, stride, **kwargs: Identity( + c_in, c_out, stride, **kwargs + ), + "conv1x1": lambda c_in, c_out, stride, **kwargs: Conv1x1( + c_in, c_out, stride, **kwargs + ), + "depth_conv": lambda c_in, c_out, stride, **kwargs: DepthConv( + c_in, c_out, stride, **kwargs + ), + "sep_k3": lambda c_in, c_out, stride, **kwargs: SeparableConv( + c_in, c_out, stride, **kwargs + ), + "sep_k5": lambda c_in, c_out, stride, **kwargs: SeparableConv( + c_in, c_out, stride, kernel=5, **kwargs + ), + "gh_k3": lambda c_in, c_out, stride, **kwargs: GhostModule( + c_in, c_out, stride, **kwargs + ), + "gh_k5": lambda c_in, c_out, stride, **kwargs: GhostModule( + c_in, c_out, stride, kernel=5, **kwargs + ), + "mb_k3": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=3, expand=1, **kwargs + ), + "mb_k3_e2": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=3, expand=2, **kwargs + ), + "mb_k3_e4": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=3, expand=4, **kwargs + ), + "mb_k3_res": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=3, expand=1, res=True, **kwargs + ), + "mb_k3_e2_res": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=3, expand=2, res=True, **kwargs + ), + "mb_k3_e4_res": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=3, expand=4, res=True, **kwargs + ), + "mb_k3_d2": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=3, + expand=2, + res=False, + dilation=2, + **kwargs, + ), + "mb_k3_d3": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=3, + expand=2, + res=False, + dilation=3, + **kwargs, + ), + "mb_k3_res_d2": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=3, + expand=2, + res=True, + dilation=2, + **kwargs, + ), + "mb_k3_res_d3": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=3, + expand=2, + res=True, + dilation=3, + **kwargs, + ), + "mb_k3_res_se": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=3, + expand=1, + res=True, + dilation=1, + se=True, + **kwargs, + ), + "mb_k3_e2_res_se": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=3, + expand=2, + res=True, + dilation=1, + se=True, + **kwargs, + ), + "mb_k3_e4_res_se": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=3, + expand=4, + res=True, + dilation=1, + se=True, + **kwargs, + ), + "mb_k5": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=5, expand=1, **kwargs + ), + "mb_k5_e2": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=5, expand=2, **kwargs + ), + "mb_k5_res": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=5, expand=1, res=True, **kwargs + ), + "mb_k5_e2_res": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, c_out, stride, kernel=5, expand=2, res=True, **kwargs + ), + "mb_k5_res_se": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=5, + expand=1, + res=True, + dilation=1, + se=True, + **kwargs, + ), + "mb_k5_e2_res_se": lambda c_in, c_out, stride, **kwargs: MBBlock( + c_in, + c_out, + stride, + kernel=5, + expand=2, + res=True, + dilation=1, + se=True, + **kwargs, + ), +} + + +def conv_bn(inp, oup, kernel, stride, pad=1, groups=1): + return nn.Sequential( + nn.Conv2d(inp, oup, kernel, stride, pad, groups=groups, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + +class SeparableConv(nn.Module): + """Separable convolution.""" + + def __init__(self, in_ch, out_ch, stride=1, kernel=3, fm_size=7): + super(SeparableConv, self).__init__() + assert stride in [1, 2], "stride should be in [1, 2]" + pad = kernel // 2 + + self.conv = nn.Sequential( + conv_bn(in_ch, in_ch, kernel, stride, pad=pad, groups=in_ch), + conv_bn(in_ch, out_ch, 1, 1, pad=0), + ) + + def forward(self, x): + return self.conv(x) + + +class Conv1x1(nn.Module): + """1x1 convolution.""" + + def __init__(self, in_ch, out_ch, stride=1, kernel=1, fm_size=7): + super(Conv1x1, self).__init__() + assert stride in [1, 2], "stride should be in [1, 2]" + padding = kernel // 2 + + self.conv = nn.Sequential( + nn.Conv2d(in_ch, out_ch, kernel, stride, padding), + nn.ReLU(inplace=True), + ) + + def forward(self, x): + return self.conv(x) + + +class DepthConv(nn.Module): + """depth convolution.""" + + def __init__(self, in_ch, out_ch, stride=1, kernel=3, fm_size=7): + super(DepthConv, self).__init__() + assert stride in [1, 2], "stride should be in [1, 2]" + padding = kernel // 2 + + self.conv = nn.Sequential( + nn.Conv2d(in_ch, in_ch, kernel, stride, padding, groups=in_ch), + nn.ReLU(inplace=True), + nn.Conv2d(in_ch, out_ch, 1, 1, 0), + nn.ReLU(inplace=True), + ) + + def forward(self, x): + return self.conv(x) + + +class GhostModule(nn.Module): + """Gost module.""" + + def __init__(self, in_ch, out_ch, stride=1, kernel=3, fm_size=7): + super(GhostModule, self).__init__() + mid_ch = out_ch // 2 + self.primary_conv = conv_bn(in_ch, mid_ch, 1, stride, pad=0) + self.cheap_operation = conv_bn( + mid_ch, mid_ch, kernel, 1, kernel // 2, mid_ch + ) + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + return torch.cat([x1, x2], dim=1) + + +class StemBlock(nn.Module): + def __init__(self, in_ch=3, init_ch=32, bottleneck=True): + super(StemBlock, self).__init__() + self.stem_1 = conv_bn(in_ch, init_ch, 3, 2, 1) + mid_ch = int(init_ch // 2) if bottleneck else init_ch + self.stem_2a = conv_bn(init_ch, mid_ch, 1, 1, 0) + self.stem_2b = SeparableConv(mid_ch, init_ch, 2, 1) + self.stem_2p = nn.MaxPool2d(kernel_size=2, stride=2) + self.stem_3 = conv_bn(init_ch * 2, init_ch, 1, 1, 0) + + def forward(self, x): + stem_1_out = self.stem_1(x) + + stem_2a_out = self.stem_2a(stem_1_out) + stem_2b_out = self.stem_2b(stem_2a_out) + + stem_2p_out = self.stem_2p(stem_1_out) + + out = self.stem_3(torch.cat((stem_2b_out, stem_2p_out), 1)) + return out, stem_1_out + + +class Identity(nn.Module): + """ Identity module.""" + + def __init__(self, in_ch, out_ch, stride=1, fm_size=7): + super(Identity, self).__init__() + self.conv = ( + conv_bn(in_ch, out_ch, kernel=1, stride=stride, pad=0) + if in_ch != out_ch or stride != 1 + else None + ) + + def forward(self, x): + if self.conv: + out = self.conv(x) + else: + out = x + # Add dropout to avoid overfit on Identity (PDARTS) + out = nn.functional.dropout(out, p=0.5) + return out + + +class Hsigmoid(nn.Module): + """Hsigmoid activation function.""" + + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 + + +class eSEModule(nn.Module): + """ The improved SE Module.""" + + def __init__(self, channel, fm_size=7, se=True): + super(eSEModule, self).__init__() + self.se = se + + if self.se: + self.avg_pool = nn.Conv2d( + channel, channel, fm_size, 1, 0, groups=channel, bias=False + ) + self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) + self.hsigmoid = Hsigmoid() + + def forward(self, x): + if self.se: + input = x + x = self.avg_pool(x) + x = self.fc(x) + x = self.hsigmoid(x) + return input * x + else: + return x + + +class ChannelShuffle(nn.Module): + """Procedure: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W].""" + + def __init__(self, groups): + super(ChannelShuffle, self).__init__() + self.groups = groups + + def forward(self, x): + if self.groups == 1: + return x + + N, C, H, W = x.size() + g = self.groups + assert C % g == 0, "group size {} is not for channel {}".format(g, C) + return ( + x.view(N, g, int(C // g), H, W) + .permute(0, 2, 1, 3, 4) + .contiguous() + .view(N, C, H, W) + ) + + +class MBBlock(nn.Module): + """The Inverted Residual Block, with channel shuffle or eSEModule.""" + + def __init__( + self, + in_ch, + out_ch, + stride=1, + kernel=3, + expand=1, + res=False, + dilation=1, + se=False, + fm_size=7, + group=1, + mid_ch=-1, + ): + super(MBBlock, self).__init__() + assert stride in [1, 2], "stride should be in [1, 2]" + assert kernel in [3, 5], "kernel size should be in [3, 5]" + assert dilation in [1, 2, 3, 4], "dilation should be in [1, 2, 3, 4]" + assert group in [1, 2], "group should be in [1, 2]" + + self.use_res_connect = res and (stride == 1) + padding = kernel // 2 + (dilation - 1) + mid_ch = mid_ch if mid_ch > 0 else (in_ch * expand) + + # Basic Modules + conv_layer = nn.Conv2d + norm_layer = nn.BatchNorm2d + activation_layer = nn.ReLU + channel_suffle = ChannelShuffle + se_layer = eSEModule + + self.ir_block = nn.Sequential( + # pointwise convolution + conv_layer(in_ch, mid_ch, 1, 1, 0, bias=False, groups=group), + norm_layer(mid_ch), + activation_layer(inplace=True), + # channel shuffle if necessary + channel_suffle(group), + # depthwise convolution + conv_layer( + mid_ch, + mid_ch, + kernel, + stride, + padding=padding, + dilation=dilation, + groups=mid_ch, + bias=False, + ), + norm_layer(mid_ch), + # eSEModule if necessary + se_layer(mid_ch, fm_size, se), + activation_layer(inplace=True), + # pointwise convolution + conv_layer(mid_ch, out_ch, 1, 1, 0, bias=False, groups=group), + norm_layer(out_ch), + ) + + def forward(self, x): + if self.use_res_connect: + return x + self.ir_block(x) + else: + return self.ir_block(x) + + +class SingleOperation(nn.Module): + """Single operation for sampled path.""" + + def __init__(self, layers_configs, stage_ops, sampled_op=""): + """ + Parameters + ---------- + layers_configs : list + the layer config: [input_channel, output_channel, stride, height] + stage_ops : dict + the pairs of op name and layer operator + sampled_op : str + the searched layer name + """ + super(SingleOperation, self).__init__() + fm = {"fm_size": layers_configs[3]} + ops_names = [op_name for op_name in stage_ops] + sampled_op = sampled_op if sampled_op else ops_names[0] + + # define the single op + self.op = stage_ops[sampled_op](*layers_configs[0:3], **fm) + + def forward(self, x): + return self.op(x) + + +def choice_blocks(layers_configs, stage_ops): + """ + Create list of layer candidates for NNI one-shot NAS. + + Parameters + ---------- + layers_configs : list + the layer config: [input_channel, output_channel, stride, height] + stage_ops : dict + the pairs of op name and layer operator + + Returns + ------- + output: list + list of layer operators + """ + ops_names = [op for op in stage_ops] + fm = {"fm_size": layers_configs[3]} + op_list = [stage_ops[op](*layers_configs[0:3], **fm) for op in ops_names] + + return op_list diff --git a/examples/nas/oneshot/pfld/lib/subnet.py b/examples/nas/oneshot/pfld/lib/subnet.py new file mode 100644 index 0000000000000000000000000000000000000000..b179d9a64a5681c66c18df4fb98dc9ebe91094db --- /dev/null +++ b/examples/nas/oneshot/pfld/lib/subnet.py @@ -0,0 +1,164 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import torch +import torch.nn as nn + +from lib.ops import ( + MBBlock, + SeparableConv, + SingleOperation, + StemBlock, + conv_bn, +) +from torch.nn import init + +INIT_CH = 16 + + +class PFLDInference(nn.Module): + """ The subnet with the architecture of PFLD. """ + + def __init__(self, lookup_table, sampled_ops, num_points=106): + """ + Parameters + ---------- + lookup_table : class + to manage the candidate ops, layer information and layer perf + sampled_ops : list of str + the searched layer names of the subnet + num_points : int + the number of landmarks for prediction + """ + super(PFLDInference, self).__init__() + + stage_names = [stage_name for stage_name in lookup_table.layer_num] + stage_n = [lookup_table.layer_num[stage] for stage in stage_names] + self.stem = StemBlock(init_ch=INIT_CH, bottleneck=False) + + self.block4_1 = MBBlock(INIT_CH, 32, stride=2, mid_ch=32) + stages_0 = [ + SingleOperation( + lookup_table.layer_configs[layer_id], + lookup_table.lut_ops[stage_names[0]], + sampled_ops[layer_id], + ) + for layer_id in range(stage_n[0]) + ] + + stages_1 = [ + SingleOperation( + lookup_table.layer_configs[layer_id], + lookup_table.lut_ops[stage_names[1]], + sampled_ops[layer_id], + ) + for layer_id in range(stage_n[0], stage_n[0] + stage_n[1]) + ] + + blocks = stages_0 + stages_1 + self.blocks = nn.Sequential(*blocks) + + self.avg_pool1 = nn.Conv2d( + INIT_CH, INIT_CH, 9, 8, 1, groups=INIT_CH, bias=False + ) + self.avg_pool2 = nn.Conv2d(32, 32, 3, 2, 1, groups=32, bias=False) + + self.block6_1 = nn.Conv2d(96 + INIT_CH, 64, 1, 1, 0, bias=False) + self.block6_2 = MBBlock(64, 64, res=True, se=True, mid_ch=128) + self.block6_3 = SeparableConv(64, 128, 1) + + self.conv7 = nn.Conv2d(128, 128, 7, 1, 0, groups=128, bias=False) + self.fc = nn.Conv2d(128, num_points * 2, 1, 1, 0, bias=True) + + # init params + self.init_params() + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, mode="fan_out") + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.001) + if m.bias is not None: + init.constant_(m.bias, 0) + + def forward(self, x): + """ + Parameters + ---------- + x : tensor + input image + + Returns + ------- + output: tensor + the predicted landmarks + output: tensor + the intermediate features + """ + x, y1 = self.stem(x) + out1 = x + + x = self.block4_1(x) + for i, block in enumerate(self.blocks): + x = block(x) + if i == 1: + y2 = x + elif i == 4: + y3 = x + + y1 = self.avg_pool1(y1) + y2 = self.avg_pool2(y2) + multi_scale = torch.cat([y3, y2, y1], 1) + + y = self.block6_1(multi_scale) + y = self.block6_2(y) + y = self.block6_3(y) + y = self.conv7(y) + landmarks = self.fc(y) + + return landmarks, out1 + + +class AuxiliaryNet(nn.Module): + """ AuxiliaryNet to predict pose angles. """ + + def __init__(self): + super(AuxiliaryNet, self).__init__() + self.conv1 = conv_bn(INIT_CH, 64, 3, 2) + self.conv2 = conv_bn(64, 64, 3, 1) + self.conv3 = conv_bn(64, 32, 3, 2) + self.conv4 = conv_bn(32, 64, 7, 1) + self.max_pool1 = nn.MaxPool2d(3) + self.fc1 = nn.Linear(64, 32) + self.fc2 = nn.Linear(32, 3) + + def forward(self, x): + """ + Parameters + ---------- + x : tensor + input intermediate features + + Returns + ------- + output: tensor + the predicted pose angles + """ + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + x = self.conv4(x) + x = self.max_pool1(x) + x = x.view(x.size(0), -1) + x = self.fc1(x) + x = self.fc2(x) + + return x diff --git a/examples/nas/oneshot/pfld/lib/supernet.py b/examples/nas/oneshot/pfld/lib/supernet.py new file mode 100644 index 0000000000000000000000000000000000000000..df3015413ee782338dbf2b07f893c1dcc0574572 --- /dev/null +++ b/examples/nas/oneshot/pfld/lib/supernet.py @@ -0,0 +1,164 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import torch +import torch.nn as nn + +from lib.ops import ( + MBBlock, + SeparableConv, + StemBlock, + choice_blocks, + conv_bn, +) +from nni.nas.pytorch import mutables +from torch.nn import init + +INIT_CH = 16 + + +class PFLDInference(nn.Module): + """ PFLD model for facial landmark.""" + + def __init__(self, lookup_table, num_points=106): + """ + Parameters + ---------- + lookup_table : class + to manage the candidate ops, layer information and layer perf + num_points : int + the number of landmarks for prediction + """ + super(PFLDInference, self).__init__() + + stage_names = [stage for stage in lookup_table.layer_num] + stage_lnum = [lookup_table.layer_num[stage] for stage in stage_names] + self.stem = StemBlock(init_ch=INIT_CH, bottleneck=False) + + self.block4_1 = MBBlock(INIT_CH, 32, stride=2, mid_ch=32) + + stages_0 = [ + mutables.LayerChoice( + choice_blocks( + lookup_table.layer_configs[layer_id], + lookup_table.lut_ops[stage_names[0]], + ) + ) + for layer_id in range(stage_lnum[0]) + ] + stages_1 = [ + mutables.LayerChoice( + choice_blocks( + lookup_table.layer_configs[layer_id], + lookup_table.lut_ops[stage_names[1]], + ) + ) + for layer_id in range(stage_lnum[0], stage_lnum[0] + stage_lnum[1]) + ] + blocks = stages_0 + stages_1 + self.blocks = nn.Sequential(*blocks) + + self.avg_pool1 = nn.Conv2d( + INIT_CH, INIT_CH, 9, 8, 1, groups=INIT_CH, bias=False + ) + self.avg_pool2 = nn.Conv2d(32, 32, 3, 2, 1, groups=32, bias=False) + + self.block6_1 = nn.Conv2d(96 + INIT_CH, 64, 1, 1, 0, bias=False) + self.block6_2 = MBBlock(64, 64, res=True, se=True, mid_ch=128) + self.block6_3 = SeparableConv(64, 128, 1) + + self.conv7 = nn.Conv2d(128, 128, 7, 1, 0, groups=128, bias=False) + self.fc = nn.Conv2d(128, num_points * 2, 1, 1, 0, bias=True) + + # init params + self.init_params() + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, mode="fan_out") + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.001) + if m.bias is not None: + init.constant_(m.bias, 0) + + def forward(self, x): + """ + Parameters + ---------- + x : tensor + input image + + Returns + ------- + output: tensor + the predicted landmarks + output: tensor + the intermediate features + """ + x, y1 = self.stem(x) + out1 = x + + x = self.block4_1(x) + for i, block in enumerate(self.blocks): + x = block(x) + if i == 1: + y2 = x + elif i == 4: + y3 = x + + y1 = self.avg_pool1(y1) + y2 = self.avg_pool2(y2) + multi_scale = torch.cat([y3, y2, y1], 1) + + y = self.block6_1(multi_scale) + y = self.block6_2(y) + y = self.block6_3(y) + y = self.conv7(y) + landmarks = self.fc(y) + + return landmarks, out1 + + +class AuxiliaryNet(nn.Module): + """ AuxiliaryNet to predict pose angles. """ + + def __init__(self): + super(AuxiliaryNet, self).__init__() + self.conv1 = conv_bn(INIT_CH, 64, 3, 2) + self.conv2 = conv_bn(64, 64, 3, 1) + self.conv3 = conv_bn(64, 32, 3, 2) + self.conv4 = conv_bn(32, 64, 7, 1) + self.max_pool1 = nn.MaxPool2d(3) + self.fc1 = nn.Linear(64, 32) + self.fc2 = nn.Linear(32, 3) + + def forward(self, x): + """ + Parameters + ---------- + x : tensor + input intermediate features + + Returns + ------- + output: tensor + the predicted pose angles + """ + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + x = self.conv4(x) + x = self.max_pool1(x) + x = x.view(x.size(0), -1) + x = self.fc1(x) + x = self.fc2(x) + + return x diff --git a/examples/nas/oneshot/pfld/lib/trainer.py b/examples/nas/oneshot/pfld/lib/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..17321aa2ee0fba371af803493b34489a7edb5587 --- /dev/null +++ b/examples/nas/oneshot/pfld/lib/trainer.py @@ -0,0 +1,297 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import os +import time +import torch + +import numpy as np + +from nni.algorithms.nas.pytorch.fbnet import FBNetTrainer +from nni.nas.pytorch.utils import AverageMeter +from .utils import accuracy + + +class PFLDTrainer(FBNetTrainer): + def __init__( + self, + model, + auxiliarynet, + model_optim, + criterion, + device, + device_ids, + config, + lookup_table, + train_loader, + valid_loader, + n_epochs=300, + load_ckpt=False, + arch_path=None, + logger=None, + ): + """ + Parameters + ---------- + model : pytorch model + the user model, which has mutables + auxiliarynet : pytorch model + the auxiliarynet to regress angle + model_optim : pytorch optimizer + the user defined optimizer + criterion : pytorch loss + the main task loss + device : pytorch device + the devices to train/search the model + device_ids : list of int + the indexes of devices used for training + config : class + configuration object for fbnet training + lookup_table : class + lookup table object for fbnet training + train_loader : pytorch data loader + data loader for the training set + valid_loader : pytorch data loader + data loader for the validation set + n_epochs : int + number of epochs to train/search + load_ckpt : bool + whether load checkpoint + arch_path : str + the path to store chosen architecture + logger : logger + the logger + """ + + super(PFLDTrainer, self).__init__( + model, + model_optim, + criterion, + device, + device_ids, + lookup_table, + train_loader, + valid_loader, + n_epochs, + load_ckpt, + arch_path, + logger, + ) + + # DataParallel of the AuxiliaryNet to PFLD + self.auxiliarynet = auxiliarynet + self.auxiliarynet = torch.nn.DataParallel( + self.auxiliarynet, device_ids=device_ids + ) + self.auxiliarynet.to(device) + + def _validate(self): + """ + Do validation. During validation, LayerChoices use the mixed-op. + + Returns + ------- + float, float + average loss, average nme + """ + + # test on validation set under eval mode + self.model.eval() + self.auxiliarynet.eval() + + losses, nme = list(), list() + batch_time = AverageMeter("batch_time") + end = time.time() + with torch.no_grad(): + for i, (img, land_gt, angle_gt) in enumerate(self.valid_loader): + img = img.to(self.device, non_blocking=True) + landmark_gt = land_gt.to(self.device, non_blocking=True) + angle_gt = angle_gt.to(self.device, non_blocking=True) + + landmark, _ = self.model(img) + + # compute the l2 loss + landmark = landmark.squeeze() + l2_diff = torch.sum((landmark_gt - landmark) ** 2, axis=1) + loss = torch.mean(l2_diff) + losses.append(loss.cpu().detach().numpy()) + + # compute the accuracy + landmark = landmark.cpu().detach().numpy() + landmark = landmark.reshape(landmark.shape[0], -1, 2) + landmark_gt = landmark_gt.cpu().detach().numpy() + landmark_gt = landmark_gt.reshape(landmark_gt.shape[0], -1, 2) + _, nme_i = accuracy(landmark, landmark_gt) + for item in nme_i: + nme.append(item) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + self.logger.info("===> Evaluate:") + self.logger.info( + "Eval set: Average loss: {:.4f} nme: {:.4f}".format( + np.mean(losses), np.mean(nme) + ) + ) + return np.mean(losses), np.mean(nme) + + def _train_epoch(self, epoch, optimizer, arch_train=False): + """ + Train one epoch. + """ + # switch to train mode + self.model.train() + self.auxiliarynet.train() + + batch_time = AverageMeter("batch_time") + data_time = AverageMeter("data_time") + losses = AverageMeter("losses") + + data_loader = self.valid_loader if arch_train else self.train_loader + end = time.time() + for i, (img, landmark_gt, angle_gt) in enumerate(data_loader): + data_time.update(time.time() - end) + img = img.to(self.device, non_blocking=True) + landmark_gt = landmark_gt.to(self.device, non_blocking=True) + angle_gt = angle_gt.to(self.device, non_blocking=True) + + lands, feats = self.model(img) + landmarks = lands.squeeze() + angle = self.auxiliarynet(feats) + + # task loss + weighted_loss, l2_loss = self.criterion( + landmark_gt, angle_gt, angle, landmarks + ) + loss = l2_loss if arch_train else weighted_loss + + # hardware-aware loss + perf_cost = self._get_perf_cost(requires_grad=True) + regu_loss = self.reg_loss(perf_cost) + if self.mode.startswith("mul"): + loss = loss * regu_loss + elif self.mode.startswith("add"): + loss = loss + regu_loss + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + # measure accuracy and record loss + losses.update(np.squeeze(loss.cpu().detach().numpy()), img.size(0)) + + if i % 10 == 0: + batch_log = ( + "Train [{0}][{1}]\t" + "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" + "Data {data_time.val:.3f} ({data_time.avg:.3f})\t" + "Loss {losses.val:.4f} ({losses.avg:.4f})".format( + epoch + 1, + i, + batch_time=batch_time, + data_time=data_time, + losses=losses, + ) + ) + self.logger.info(batch_log) + + def _warm_up(self): + """ + Warm up the model, while the architecture weights are not trained. + """ + for epoch in range(self.epoch, self.start_epoch): + self.logger.info("\n--------Warmup epoch: %d--------\n", epoch + 1) + self._train_epoch(epoch, self.model_optim) + # adjust learning rate + self.scheduler.step() + + # validation + _, _ = self._validate() + if epoch % 10 == 0: + filename = os.path.join( + self.config.model_dir, "checkpoint_%s.pth" % epoch + ) + self.save_checkpoint(epoch, filename) + + def _train(self): + """ + Train the model, it trains model weights and architecute weights. + Architecture weights are trained according to the schedule. + Before updating architecture weights, ```requires_grad``` is enabled. + Then, it is disabled after the updating, in order not to update + architecture weights when training model weights. + """ + arch_param_num = self.mutator.num_arch_params() + self.logger.info("#arch_params: {}".format(arch_param_num)) + self.epoch = max(self.start_epoch, self.epoch) + + ckpt_path = self.config.model_dir + choice_names = None + val_nme = 1e6 + + for epoch in range(self.epoch, self.n_epochs): + # update the weight parameters + self.logger.info("\n--------Train epoch: %d--------\n", epoch + 1) + self._train_epoch(epoch, self.model_optim) + # adjust learning rate + self.scheduler.step() + + # update the architecture parameters + self.logger.info("Update architecture parameters") + self.mutator.arch_requires_grad() + self._train_epoch(epoch, self.arch_optimizer, True) + self.mutator.arch_disable_grad() + # temperature annealing + self.temp = self.temp * self.exp_anneal_rate + self.mutator.set_temperature(self.temp) + # sample the architecture of sub-network + choice_names = self._layer_choice_sample() + + # validate + _, nme = self._validate() + + if epoch % 10 == 0: + filename = os.path.join(ckpt_path, "checkpoint_%s.pth" % epoch) + self.save_checkpoint(epoch, filename, choice_names) + + if nme < val_nme: + filename = os.path.join(ckpt_path, "checkpoint_best.pth") + self.save_checkpoint(epoch, filename, choice_names) + val_nme = nme + self.logger.info("Best nme: {:.4f}".format(val_nme)) + + def save_checkpoint(self, epoch, filename, choice_names=None): + """ + Save checkpoint of the whole model. + Saving model weights and architecture weights as ```filename```, + and saving currently chosen architecture in ```arch_path```. + """ + state = { + "pfld_backbone": self.model.state_dict(), + "auxiliarynet": self.auxiliarynet.state_dict(), + "optim": self.model_optim.state_dict(), + "epoch": epoch, + "arch_sample": choice_names, + } + torch.save(state, filename) + self.logger.info("Save checkpoint to {0:}".format(filename)) + + if self.arch_path: + self.export(self.arch_path) + + def load_checkpoint(self, filename): + """ + Load the checkpoint from ```filename```. + """ + ckpt = torch.load(filename) + self.epoch = ckpt["epoch"] + self.model.load_state_dict(ckpt["pfld_backbone"]) + self.auxiliarynet.load_state_dict(ckpt["auxiliarynet"]) + self.model_optim.load_state_dict(ckpt["optim"]) diff --git a/examples/nas/oneshot/pfld/lib/utils.py b/examples/nas/oneshot/pfld/lib/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..732a54277ebcb5bf53ff655f274e51d513bf869b --- /dev/null +++ b/examples/nas/oneshot/pfld/lib/utils.py @@ -0,0 +1,127 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import torch + +import numpy as np +import torch.nn as nn + + +def accuracy(preds, target): + """ + Calculate the NME (Normalized Mean Error). + + Parameters + ---------- + preds : numpy array + the predicted landmarks + target : numpy array + the ground truth of landmarks + + Returns + ------- + output: float32 + the nme value + output: list + the list of l2 distances + """ + N = preds.shape[0] + L = preds.shape[1] + rmse = np.zeros(N).astype(np.float32) + + for i in range(N): + pts_pred, pts_gt = ( + preds[i], + target[i], + ) + if L == 19: + # aflw + interocular = 34 + elif L == 29: + # cofw + interocular = np.linalg.norm(pts_gt[8] - pts_gt[9]) + elif L == 68: + # interocular + interocular = np.linalg.norm(pts_gt[36] - pts_gt[45]) + elif L == 98: + # euclidean dis from left eye to right eye + interocular = np.linalg.norm(pts_gt[60] - pts_gt[72]) + elif L == 106: + # euclidean dis from left eye to right eye + interocular = np.linalg.norm(pts_gt[35] - pts_gt[93]) + else: + raise ValueError("Number of landmarks is wrong") + + pred_dis = np.sum(np.linalg.norm(pts_pred - pts_gt, axis=1)) + rmse[i] = pred_dis / (interocular * L) + + return np.mean(rmse), rmse + + +class PFLDLoss(nn.Module): + """Weighted loss of L2 distance with the pose angle for PFLD.""" + + def __init__(self): + super(PFLDLoss, self).__init__() + + def forward(self, landmark_gt, euler_angle_gt, angle, landmarks): + """ + Calculate weighted L2 loss for PFLD. + + Parameters + ---------- + landmark_gt : tensor + the ground truth of landmarks + euler_angle_gt : tensor + the ground truth of pose angle + angle : tensor + the predicted pose angle + landmarks : float32 + the predicted landmarks + + Returns + ------- + output: tensor + the weighted L2 loss + output: tensor + the normal L2 loss + """ + weight_angle = torch.sum(1 - torch.cos(angle - euler_angle_gt), axis=1) + l2_distant = torch.sum((landmark_gt - landmarks) ** 2, axis=1) + + return torch.mean(weight_angle * l2_distant), torch.mean(l2_distant) + + +def bounded_regress_loss( + landmark_gt, landmarks_t, landmarks_s, reg_m=0.5, br_alpha=0.05 +): + """ + Calculate the Bounded Regression Loss for Knowledge Distillation. + + Parameters + ---------- + landmark_gt : tensor + the ground truth of landmarks + landmarks_t : tensor + the predicted landmarks of teacher + landmarks_s : tensor + the predicted landmarks of student + reg_m : float32 + the value to control the regresion constraint + br_alpha : float32 + the balance value for kd loss + + Returns + ------- + output: tensor + the bounded regression loss + """ + l2_dis_s = (landmark_gt - landmarks_s).pow(2).sum(1) + l2_dis_s_m = l2_dis_s + reg_m + + l2_dis_t = (landmark_gt - landmarks_t).pow(2).sum(1) + br_loss = l2_dis_s[l2_dis_s_m > l2_dis_t].sum() + + return br_loss * br_alpha diff --git a/examples/nas/oneshot/pfld/retrain.py b/examples/nas/oneshot/pfld/retrain.py new file mode 100644 index 0000000000000000000000000000000000000000..fa8ca362eedb194f4d368e0edcbc495306f11753 --- /dev/null +++ b/examples/nas/oneshot/pfld/retrain.py @@ -0,0 +1,313 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import argparse +import logging +import os +import time +import torch +import torchvision + +import numpy as np + +from datasets import PFLDDatasets +from lib.builder import search_space +from lib.ops import PRIMITIVES +from lib.utils import PFLDLoss, accuracy +from nni.algorithms.nas.pytorch.fbnet import ( + LookUpTable, + NASConfig, + supernet_sample, +) +from nni.nas.pytorch.utils import AverageMeter +from torch.utils.data import DataLoader + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +def validate(model, auxiliarynet, valid_loader, device, logger): + """Do validation.""" + model.eval() + auxiliarynet.eval() + + losses, nme = list(), list() + with torch.no_grad(): + for i, (img, land_gt, angle_gt) in enumerate(valid_loader): + img = img.to(device, non_blocking=True) + landmark_gt = land_gt.to(device, non_blocking=True) + angle_gt = angle_gt.to(device, non_blocking=True) + + landmark, _ = model(img) + + # compute the l2 loss + landmark = landmark.squeeze() + l2_diff = torch.sum((landmark_gt - landmark) ** 2, axis=1) + loss = torch.mean(l2_diff) + losses.append(loss.cpu().detach().numpy()) + + # compute the accuracy + landmark = landmark.cpu().detach().numpy() + landmark = landmark.reshape(landmark.shape[0], -1, 2) + landmark_gt = landmark_gt.cpu().detach().numpy() + landmark_gt = landmark_gt.reshape(landmark_gt.shape[0], -1, 2) + _, nme_i = accuracy(landmark, landmark_gt) + for item in nme_i: + nme.append(item) + + logger.info("===> Evaluate:") + logger.info( + "Eval set: Average loss: {:.4f} nme: {:.4f}".format( + np.mean(losses), np.mean(nme) + ) + ) + return np.mean(losses), np.mean(nme) + + +def train_epoch( + model, + auxiliarynet, + criterion, + train_loader, + device, + epoch, + optimizer, + logger, +): + """Train one epoch.""" + model.train() + auxiliarynet.train() + + batch_time = AverageMeter("batch_time") + data_time = AverageMeter("data_time") + losses = AverageMeter("losses") + + end = time.time() + for i, (img, landmark_gt, angle_gt) in enumerate(train_loader): + data_time.update(time.time() - end) + img = img.to(device, non_blocking=True) + landmark_gt = landmark_gt.to(device, non_blocking=True) + angle_gt = angle_gt.to(device, non_blocking=True) + + lands, feats = model(img) + landmarks = lands.squeeze() + angle = auxiliarynet(feats) + + # task loss + weighted_loss, _ = criterion( + landmark_gt, angle_gt, angle, landmarks + ) + loss = weighted_loss + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + # measure accuracy and record loss + losses.update(np.squeeze(loss.cpu().detach().numpy()), img.size(0)) + + if i % 10 == 0: + batch_log = ( + "Train [{0}][{1}]\t" + "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" + "Data {data_time.val:.3f} ({data_time.avg:.3f})\t" + "Loss {losses.val:.4f} ({losses.avg:.4f})".format( + epoch + 1, + i, + batch_time=batch_time, + data_time=data_time, + losses=losses, + ) + ) + logger.info(batch_log) + + +def save_checkpoint(model, auxiliarynet, optimizer, filename, logger): + """Save checkpoint of the whole model.""" + state = { + "pfld_backbone": model.state_dict(), + "auxiliarynet": auxiliarynet.state_dict(), + "optim": optimizer.state_dict(), + } + torch.save(state, filename) + logger.info("Save checkpoint to {0:}".format(filename)) + + +def main(args): + """ The main function for supernet pre-training and subnet fine-tuning. """ + logging.basicConfig( + format="[%(asctime)s] [p%(process)s] [%(pathname)s\ + :%(lineno)d] [%(levelname)s] %(message)s", + level=logging.INFO, + handlers=[ + logging.FileHandler(args.log_file, mode="w"), + logging.StreamHandler(), + ], + ) + + # print the information of arguments + for arg in vars(args): + s = arg + ": " + str(getattr(args, arg)) + logging.info(s) + + # for 106 landmarks + num_points = 106 + # list of device ids, and the number of workers for data loading + device_ids = [int(id) for id in args.dev_id.split(",")] + dev_num = len(device_ids) + num_workers = 4 * dev_num + + # import subnet for fine-tuning + from lib.subnet import PFLDInference, AuxiliaryNet + + # the configuration for training control + nas_config = NASConfig( + model_dir=args.snapshot, + search_space=search_space, + ) + # look-up table with information of search space, flops per block, etc. + lookup_table = LookUpTable(config=nas_config, primitives=PRIMITIVES) + + check = torch.load(args.supernet, map_location=torch.device("cpu")) + sampled_arch = check["arch_sample"] + logging.info(sampled_arch) + # create subnet + pfld_backbone = PFLDInference(lookup_table, sampled_arch, num_points) + + # pre-load the weights from pre-trained supernet + state_dict = check["pfld_backbone"] + supernet_sample(pfld_backbone, state_dict, sampled_arch, lookup_table) + + # the auxiliary-net of PFLD to predict the pose angle + auxiliarynet = AuxiliaryNet() + + # DataParallel + pfld_backbone = torch.nn.DataParallel(pfld_backbone, device_ids=device_ids) + pfld_backbone.to(device) + auxiliarynet = torch.nn.DataParallel(auxiliarynet, device_ids=device_ids) + auxiliarynet.to(device) + + # main task loss + criterion = PFLDLoss() + + # optimizer / scheduler for weight train + optimizer = torch.optim.RMSprop( + [ + {"params": pfld_backbone.parameters()}, + {"params": auxiliarynet.parameters()}, + ], + lr=args.base_lr, + momentum=0.0, + weight_decay=args.weight_decay, + ) + + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + optimizer, T_max=args.end_epoch, last_epoch=-1 + ) + + # data argmentation and dataloader + transform = torchvision.transforms.Compose( + [torchvision.transforms.ToTensor()] + ) + # the landmark dataset with 106 points is default used + train_dataset = PFLDDatasets( + os.path.join(args.data_root, "train_data/list.txt"), + transform, + data_root=args.data_root, + img_size=args.img_size, + ) + dataloader = DataLoader( + train_dataset, + batch_size=args.train_batchsize, + shuffle=True, + num_workers=num_workers, + pin_memory=True, + drop_last=False, + ) + + val_dataset = PFLDDatasets( + os.path.join(args.data_root, "test_data/list.txt"), + transform, + data_root=args.data_root, + img_size=args.img_size, + ) + val_dataloader = DataLoader( + val_dataset, + batch_size=args.val_batchsize, + shuffle=False, + num_workers=num_workers, + pin_memory=True, + ) + + # start finetune + ckpt_path = args.snapshot + val_nme = 1e6 + + for epoch in range(0, args.end_epoch): + logging.info("\n--------Train epoch: %d--------\n", epoch + 1) + # update the weight parameters + train_epoch( + pfld_backbone, + auxiliarynet, + criterion, + dataloader, + device, + epoch, + optimizer, + logging, + ) + # adjust learning rate + scheduler.step() + + # validate + _, nme = validate( + pfld_backbone, auxiliarynet, val_dataloader, device, logging + ) + + if epoch % 10 == 0: + filename = os.path.join(ckpt_path, "checkpoint_%s.pth" % epoch) + save_checkpoint( + pfld_backbone, auxiliarynet, optimizer, filename, logging + ) + + if nme < val_nme: + filename = os.path.join(ckpt_path, "checkpoint_best.pth") + save_checkpoint( + pfld_backbone, auxiliarynet, optimizer, filename, logging + ) + val_nme = nme + logging.info("Best nme: {:.4f}".format(val_nme)) + + +def parse_args(): + """ Parse the user arguments. """ + parser = argparse.ArgumentParser(description="Finetuning for PFLD") + parser.add_argument("--dev_id", dest="dev_id", default="0", type=str) + parser.add_argument("--base_lr", default=0.0001, type=int) + parser.add_argument("--weight-decay", "--wd", default=1e-6, type=float) + parser.add_argument("--img_size", default=112, type=int) + parser.add_argument("--supernet", default="", type=str, metavar="PATH") + parser.add_argument("--end_epoch", default=300, type=int) + parser.add_argument( + "--snapshot", default="models", type=str, metavar="PATH" + ) + parser.add_argument("--log_file", default="train.log", type=str) + parser.add_argument( + "--data_root", default="/dataset", type=str, metavar="PATH" + ) + parser.add_argument("--train_batchsize", default=256, type=int) + parser.add_argument("--val_batchsize", default=128, type=int) + args = parser.parse_args() + args.snapshot = os.path.join(args.snapshot, 'subnet') + args.log_file = os.path.join(args.snapshot, "{}.log".format('subnet')) + os.makedirs(args.snapshot, exist_ok=True) + return args + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/examples/nas/oneshot/pfld/train.py b/examples/nas/oneshot/pfld/train.py new file mode 100644 index 0000000000000000000000000000000000000000..d2530faf8732dabe90096d11bcc3019d751934d4 --- /dev/null +++ b/examples/nas/oneshot/pfld/train.py @@ -0,0 +1,186 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import argparse +import logging +import os +import torch +import torchvision + +import numpy as np + +from datasets import PFLDDatasets +from lib.builder import search_space +from lib.ops import PRIMITIVES +from lib.trainer import PFLDTrainer +from lib.utils import PFLDLoss +from nni.algorithms.nas.pytorch.fbnet import LookUpTable, NASConfig +from torch.utils.data import DataLoader + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +def main(args): + """ The main function for supernet pre-training and subnet fine-tuning. """ + logging.basicConfig( + format="[%(asctime)s] [p%(process)s] [%(pathname)s\ + :%(lineno)d] [%(levelname)s] %(message)s", + level=logging.INFO, + handlers=[ + logging.FileHandler(args.log_file, mode="w"), + logging.StreamHandler(), + ], + ) + + # print the information of arguments + for arg in vars(args): + s = arg + ": " + str(getattr(args, arg)) + logging.info(s) + + # for 106 landmarks + num_points = 106 + # list of device ids, and the number of workers for data loading + device_ids = [int(id) for id in args.dev_id.split(",")] + dev_num = len(device_ids) + num_workers = 4 * dev_num + + # random seed + manual_seed = 1 + np.random.seed(manual_seed) + torch.manual_seed(manual_seed) + torch.cuda.manual_seed_all(manual_seed) + + # import supernet for block-wise DNAS pre-training + from lib.supernet import PFLDInference, AuxiliaryNet + + # the configuration for training control + nas_config = NASConfig( + model_dir=args.snapshot, + nas_lr=args.theta_lr, + mode=args.mode, + alpha=args.alpha, + beta=args.beta, + search_space=search_space, + start_epoch=args.start_epoch, + ) + # look-up table with information of search space, flops per block, etc. + lookup_table = LookUpTable(config=nas_config, primitives=PRIMITIVES) + + # create supernet + pfld_backbone = PFLDInference(lookup_table, num_points) + # the auxiliary-net of PFLD to predict the pose angle + auxiliarynet = AuxiliaryNet() + + # main task loss + criterion = PFLDLoss() + + # optimizer for weight train + if args.opt == "adam": + optimizer = torch.optim.AdamW( + [ + {"params": pfld_backbone.parameters()}, + {"params": auxiliarynet.parameters()}, + ], + lr=args.base_lr, + weight_decay=args.weight_decay, + ) + elif args.opt == "rms": + optimizer = torch.optim.RMSprop( + [ + {"params": pfld_backbone.parameters()}, + {"params": auxiliarynet.parameters()}, + ], + lr=args.base_lr, + momentum=0.0, + weight_decay=args.weight_decay, + ) + + # data argmentation and dataloader + transform = torchvision.transforms.Compose( + [torchvision.transforms.ToTensor()] + ) + # the landmark dataset with 106 points is default used + train_dataset = PFLDDatasets( + os.path.join(args.data_root, "train_data/list.txt"), + transform, + data_root=args.data_root, + img_size=args.img_size, + ) + dataloader = DataLoader( + train_dataset, + batch_size=args.train_batchsize, + shuffle=True, + num_workers=num_workers, + pin_memory=True, + drop_last=False, + ) + + val_dataset = PFLDDatasets( + os.path.join(args.data_root, "test_data/list.txt"), + transform, + data_root=args.data_root, + img_size=args.img_size, + ) + val_dataloader = DataLoader( + val_dataset, + batch_size=args.val_batchsize, + shuffle=False, + num_workers=num_workers, + pin_memory=True, + ) + + # create the trainer, then search/finetune + trainer = PFLDTrainer( + pfld_backbone, + auxiliarynet, + optimizer, + criterion, + device, + device_ids, + nas_config, + lookup_table, + dataloader, + val_dataloader, + n_epochs=args.end_epoch, + logger=logging, + ) + trainer.train() + + +def parse_args(): + """ Parse the user arguments. """ + parser = argparse.ArgumentParser(description="FBNet for PFLD") + parser.add_argument("--dev_id", dest="dev_id", default="0", type=str) + parser.add_argument("--opt", default="rms", type=str) + parser.add_argument("--base_lr", default=0.0001, type=int) + parser.add_argument("--weight-decay", "--wd", default=1e-6, type=float) + parser.add_argument("--img_size", default=112, type=int) + parser.add_argument("--theta-lr", "--tlr", default=0.01, type=float) + parser.add_argument( + "--mode", default="mul", type=str, choices=["mul", "add"] + ) + parser.add_argument("--alpha", default=0.25, type=float) + parser.add_argument("--beta", default=0.6, type=float) + parser.add_argument("--start_epoch", default=50, type=int) + parser.add_argument("--end_epoch", default=300, type=int) + parser.add_argument( + "--snapshot", default="models", type=str, metavar="PATH" + ) + parser.add_argument("--log_file", default="train.log", type=str) + parser.add_argument( + "--data_root", default="/dataset", type=str, metavar="PATH" + ) + parser.add_argument("--train_batchsize", default=256, type=int) + parser.add_argument("--val_batchsize", default=128, type=int) + args = parser.parse_args() + args.snapshot = os.path.join(args.snapshot, 'supernet') + args.log_file = os.path.join(args.snapshot, "{}.log".format('supernet')) + os.makedirs(args.snapshot, exist_ok=True) + return args + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/examples/nas/oneshot/proxylessnas/datasets.py b/examples/nas/oneshot/proxylessnas/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..b9390057498d12114847d850069f222e487a6cf1 --- /dev/null +++ b/examples/nas/oneshot/proxylessnas/datasets.py @@ -0,0 +1,188 @@ +import os +import numpy as np +import torch.utils.data +import torchvision.transforms as transforms +import torchvision.datasets as datasets + +def get_split_list(in_dim, child_num): + in_dim_list = [in_dim // child_num] * child_num + for _i in range(in_dim % child_num): + in_dim_list[_i] += 1 + return in_dim_list + +class DataProvider: + VALID_SEED = 0 # random seed for the validation set + + @staticmethod + def name(): + """ Return name of the dataset """ + raise NotImplementedError + + @property + def data_shape(self): + """ Return shape as python list of one data entry """ + raise NotImplementedError + + @property + def n_classes(self): + """ Return `int` of num classes """ + raise NotImplementedError + + @property + def save_path(self): + """ local path to save the data """ + raise NotImplementedError + + @property + def data_url(self): + """ link to download the data """ + raise NotImplementedError + + @staticmethod + def random_sample_valid_set(train_labels, valid_size, n_classes): + train_size = len(train_labels) + assert train_size > valid_size + + g = torch.Generator() + g.manual_seed(DataProvider.VALID_SEED) # set random seed before sampling validation set + rand_indexes = torch.randperm(train_size, generator=g).tolist() + + train_indexes, valid_indexes = [], [] + per_class_remain = get_split_list(valid_size, n_classes) + + for idx in rand_indexes: + label = train_labels[idx] + if isinstance(label, float): + label = int(label) + elif isinstance(label, np.ndarray): + label = np.argmax(label) + else: + assert isinstance(label, int) + if per_class_remain[label] > 0: + valid_indexes.append(idx) + per_class_remain[label] -= 1 + else: + train_indexes.append(idx) + return train_indexes, valid_indexes + + +class ImagenetDataProvider(DataProvider): + + def __init__(self, save_path=None, train_batch_size=256, test_batch_size=512, valid_size=None, + n_worker=32, resize_scale=0.08, distort_color=None): + + self._save_path = save_path + train_transforms = self.build_train_transform(distort_color, resize_scale) + train_dataset = datasets.ImageFolder(self.train_path, train_transforms) + + if valid_size is not None: + if isinstance(valid_size, float): + valid_size = int(valid_size * len(train_dataset)) + else: + assert isinstance(valid_size, int), 'invalid valid_size: %s' % valid_size + train_indexes, valid_indexes = self.random_sample_valid_set( + [cls for _, cls in train_dataset.samples], valid_size, self.n_classes, + ) + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indexes) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indexes) + + valid_dataset = datasets.ImageFolder(self.train_path, transforms.Compose([ + transforms.Resize(self.resize_value), + transforms.CenterCrop(self.image_size), + transforms.ToTensor(), + self.normalize, + ])) + + self.train = torch.utils.data.DataLoader( + train_dataset, batch_size=train_batch_size, sampler=train_sampler, + num_workers=n_worker, pin_memory=True, + ) + self.valid = torch.utils.data.DataLoader( + valid_dataset, batch_size=test_batch_size, sampler=valid_sampler, + num_workers=n_worker, pin_memory=True, + ) + else: + self.train = torch.utils.data.DataLoader( + train_dataset, batch_size=train_batch_size, shuffle=True, + num_workers=n_worker, pin_memory=True, + ) + self.valid = None + + self.test = torch.utils.data.DataLoader( + datasets.ImageFolder(self.valid_path, transforms.Compose([ + transforms.Resize(self.resize_value), + transforms.CenterCrop(self.image_size), + transforms.ToTensor(), + self.normalize, + ])), batch_size=test_batch_size, shuffle=False, num_workers=n_worker, pin_memory=True, + ) + + if self.valid is None: + self.valid = self.test + + @staticmethod + def name(): + return 'imagenet' + + @property + def data_shape(self): + return 3, self.image_size, self.image_size # C, H, W + + @property + def n_classes(self): + return 1000 + + @property + def save_path(self): + if self._save_path is None: + self._save_path = '/dataset/imagenet' + return self._save_path + + @property + def data_url(self): + raise ValueError('unable to download ImageNet') + + @property + def train_path(self): + return os.path.join(self.save_path, 'train') + + @property + def valid_path(self): + return os.path.join(self._save_path, 'val') + + @property + def normalize(self): + return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + def build_train_transform(self, distort_color, resize_scale): + print('Color jitter: %s' % distort_color) + if distort_color == 'strong': + color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1) + elif distort_color == 'normal': + color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5) + else: + color_transform = None + if color_transform is None: + train_transforms = transforms.Compose([ + transforms.RandomResizedCrop(self.image_size, scale=(resize_scale, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + self.normalize, + ]) + else: + train_transforms = transforms.Compose([ + transforms.RandomResizedCrop(self.image_size, scale=(resize_scale, 1.0)), + transforms.RandomHorizontalFlip(), + color_transform, + transforms.ToTensor(), + self.normalize, + ]) + return train_transforms + + @property + def resize_value(self): + return 256 + + @property + def image_size(self): + return 224 \ No newline at end of file diff --git a/examples/nas/oneshot/proxylessnas/main.py b/examples/nas/oneshot/proxylessnas/main.py new file mode 100644 index 0000000000000000000000000000000000000000..866e518962c756aa55f751c50c2a7e5e41d986c1 --- /dev/null +++ b/examples/nas/oneshot/proxylessnas/main.py @@ -0,0 +1,141 @@ +import json +import logging +import os +import sys +from argparse import ArgumentParser + +import torch +from torchvision import transforms +from nni.retiarii.fixed import fixed_arch + +import datasets +from model import SearchMobileNet +from putils import LabelSmoothingLoss, accuracy, get_parameters +from retrain import Retrain + +logger = logging.getLogger('nni_proxylessnas') + +if __name__ == "__main__": + parser = ArgumentParser("proxylessnas") + # configurations of the model + parser.add_argument("--n_cell_stages", default='4,4,4,4,4,1', type=str) + parser.add_argument("--stride_stages", default='2,2,2,1,2,1', type=str) + parser.add_argument("--width_stages", default='24,40,80,96,192,320', type=str) + parser.add_argument("--bn_momentum", default=0.1, type=float) + parser.add_argument("--bn_eps", default=1e-3, type=float) + parser.add_argument("--dropout_rate", default=0, type=float) + parser.add_argument("--no_decay_keys", default='bn', type=str, choices=[None, 'bn', 'bn#bias']) + parser.add_argument('--grad_reg_loss_type', default='add#linear', type=str, choices=['add#linear', 'mul#log']) + parser.add_argument('--grad_reg_loss_lambda', default=1e-1, type=float) # grad_reg_loss_params + parser.add_argument('--grad_reg_loss_alpha', default=0.2, type=float) # grad_reg_loss_params + parser.add_argument('--grad_reg_loss_beta', default=0.3, type=float) # grad_reg_loss_params + parser.add_argument("--applied_hardware", default=None, type=str, help='the hardware to predict model latency') + parser.add_argument("--reference_latency", default=None, type=float, help='the reference latency in specified hardware') + # configurations of imagenet dataset + parser.add_argument("--data_path", default='/data/imagenet/', type=str) + parser.add_argument("--train_batch_size", default=256, type=int) + parser.add_argument("--test_batch_size", default=500, type=int) + parser.add_argument("--n_worker", default=32, type=int) + parser.add_argument("--resize_scale", default=0.08, type=float) + parser.add_argument("--distort_color", default='normal', type=str, choices=['normal', 'strong', 'None']) + # configurations for training mode + parser.add_argument("--train_mode", default='search', type=str, choices=['search', 'retrain']) + # configurations for search + parser.add_argument("--checkpoint_path", default='./search_mobile_net.pt', type=str) + parser.add_argument("--arch_path", default='./arch_path.pt', type=str) + parser.add_argument("--no-warmup", dest='warmup', action='store_false') + # configurations for retrain + parser.add_argument("--exported_arch_path", default=None, type=str) + + args = parser.parse_args() + if args.train_mode == 'retrain' and args.exported_arch_path is None: + logger.error('When --train_mode is retrain, --exported_arch_path must be specified.') + sys.exit(-1) + + if args.train_mode == 'retrain': + assert os.path.isfile(args.exported_arch_path), \ + "exported_arch_path {} should be a file.".format(args.exported_arch_path) + with fixed_arch(args.exported_arch_path): + model = SearchMobileNet(width_stages=[int(i) for i in args.width_stages.split(',')], + n_cell_stages=[int(i) for i in args.n_cell_stages.split(',')], + stride_stages=[int(i) for i in args.stride_stages.split(',')], + n_classes=1000, + dropout_rate=args.dropout_rate, + bn_param=(args.bn_momentum, args.bn_eps)) + else: + model = SearchMobileNet(width_stages=[int(i) for i in args.width_stages.split(',')], + n_cell_stages=[int(i) for i in args.n_cell_stages.split(',')], + stride_stages=[int(i) for i in args.stride_stages.split(',')], + n_classes=1000, + dropout_rate=args.dropout_rate, + bn_param=(args.bn_momentum, args.bn_eps)) + logger.info('SearchMobileNet model create done') + model.init_model() + logger.info('SearchMobileNet model init done') + + # move network to GPU if available + if torch.cuda.is_available(): + device = torch.device('cuda') + else: + device = torch.device('cpu') + + logger.info('Creating data provider...') + data_provider = datasets.ImagenetDataProvider(save_path=args.data_path, + train_batch_size=args.train_batch_size, + test_batch_size=args.test_batch_size, + valid_size=None, + n_worker=args.n_worker, + resize_scale=args.resize_scale, + distort_color=args.distort_color) + logger.info('Creating data provider done') + + if args.no_decay_keys: + keys = args.no_decay_keys + momentum, nesterov = 0.9, True + optimizer = torch.optim.SGD([ + {'params': get_parameters(model, keys, mode='exclude'), 'weight_decay': 4e-5}, + {'params': get_parameters(model, keys, mode='include'), 'weight_decay': 0}, + ], lr=0.05, momentum=momentum, nesterov=nesterov) + else: + momentum, nesterov = 0.9, True + optimizer = torch.optim.SGD(get_parameters(model), lr=0.05, momentum=momentum, nesterov=nesterov, weight_decay=4e-5) + + if args.grad_reg_loss_type == 'add#linear': + grad_reg_loss_params = {'lambda': args.grad_reg_loss_lambda} + elif args.grad_reg_loss_type == 'mul#log': + grad_reg_loss_params = { + 'alpha': args.grad_reg_loss_alpha, + 'beta': args.grad_reg_loss_beta, + } + else: + args.grad_reg_loss_params = None + + if args.train_mode == 'search': + from nni.retiarii.oneshot.pytorch import ProxylessTrainer + from torchvision.datasets import ImageNet + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + dataset = ImageNet(args.data_path, transform=transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) + trainer = ProxylessTrainer(model, + loss=LabelSmoothingLoss(), + dataset=dataset, + optimizer=optimizer, + metrics=lambda output, target: accuracy(output, target, topk=(1, 5,)), + num_epochs=120, + log_frequency=10, + grad_reg_loss_type=args.grad_reg_loss_type, + grad_reg_loss_params=grad_reg_loss_params, + applied_hardware=args.applied_hardware, dummy_input=(1, 3, 224, 224), + ref_latency=args.reference_latency) + trainer.fit() + print('Final architecture:', trainer.export()) + json.dump(trainer.export(), open('checkpoint.json', 'w')) + elif args.train_mode == 'retrain': + # this is retrain + trainer = Retrain(model, optimizer, device, data_provider, n_epochs=300) + trainer.run() diff --git a/examples/nas/oneshot/proxylessnas/model.py b/examples/nas/oneshot/proxylessnas/model.py new file mode 100644 index 0000000000000000000000000000000000000000..6b914a7b4127eb7ea9105ee264e9c5aaf1760450 --- /dev/null +++ b/examples/nas/oneshot/proxylessnas/model.py @@ -0,0 +1,127 @@ +import torch +import nni.retiarii.nn.pytorch as nn +import math + +import ops +import putils +from nni.retiarii.nn.pytorch import LayerChoice + +class SearchMobileNet(nn.Module): + def __init__(self, + width_stages=[24,40,80,96,192,320], + n_cell_stages=[4,4,4,4,4,1], + stride_stages=[2,2,2,1,2,1], + width_mult=1, n_classes=1000, + dropout_rate=0, bn_param=(0.1, 1e-3)): + """ + Parameters + ---------- + width_stages: str + width (output channels) of each cell stage in the block + n_cell_stages: str + number of cells in each cell stage + stride_strages: str + stride of each cell stage in the block + width_mult : int + the scale factor of width + """ + super(SearchMobileNet, self).__init__() + + input_channel = putils.make_divisible(32 * width_mult, 8) + first_cell_width = putils.make_divisible(16 * width_mult, 8) + for i in range(len(width_stages)): + width_stages[i] = putils.make_divisible(width_stages[i] * width_mult, 8) + # first conv + first_conv = ops.ConvLayer(3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='relu6', ops_order='weight_bn_act') + # first block + first_block_conv = ops.OPS['3x3_MBConv1'](input_channel, first_cell_width, 1) + first_block = first_block_conv + + input_channel = first_cell_width + + blocks = [first_block] + + stage_cnt = 0 + for width, n_cell, s in zip(width_stages, n_cell_stages, stride_stages): + for i in range(n_cell): + if i == 0: + stride = s + else: + stride = 1 + op_candidates = [ops.OPS['3x3_MBConv3'](input_channel, width, stride), + ops.OPS['3x3_MBConv6'](input_channel, width, stride), + ops.OPS['5x5_MBConv3'](input_channel, width, stride), + ops.OPS['5x5_MBConv6'](input_channel, width, stride), + ops.OPS['7x7_MBConv3'](input_channel, width, stride), + ops.OPS['7x7_MBConv6'](input_channel, width, stride)] + if stride == 1 and input_channel == width: + # if it is not the first one + op_candidates += [ops.OPS['Zero'](input_channel, width, stride)] + conv_op = LayerChoice(op_candidates, label="s{}_c{}".format(stage_cnt, i)) + else: + conv_op = LayerChoice(op_candidates, label="s{}_c{}".format(stage_cnt, i)) + # shortcut + if stride == 1 and input_channel == width: + # if not first cell + shortcut = ops.IdentityLayer(input_channel, input_channel) + else: + shortcut = None + inverted_residual_block = ops.MobileInvertedResidualBlock(conv_op, shortcut, op_candidates) + blocks.append(inverted_residual_block) + input_channel = width + stage_cnt += 1 + + # feature mix layer + last_channel = putils.make_devisible(1280 * width_mult, 8) if width_mult > 1.0 else 1280 + feature_mix_layer = ops.ConvLayer(input_channel, last_channel, kernel_size=1, use_bn=True, act_func='relu6', ops_order='weight_bn_act', ) + classifier = ops.LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate) + + self.first_conv = first_conv + self.blocks = nn.ModuleList(blocks) + self.feature_mix_layer = feature_mix_layer + self.global_avg_pooling = nn.AdaptiveAvgPool2d(1) + self.classifier = classifier + + # set bn param + self.set_bn_param(momentum=bn_param[0], eps=bn_param[1]) + + def forward(self, x): + x = self.first_conv(x) + for block in self.blocks: + x = block(x) + x = self.feature_mix_layer(x) + x = self.global_avg_pooling(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + + def set_bn_param(self, momentum, eps): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): + m.momentum = momentum + m.eps = eps + return + + def init_model(self, model_init='he_fout', init_div_groups=False): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + if model_init == 'he_fout': + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if init_div_groups: + n /= m.groups + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif model_init == 'he_fin': + n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels + if init_div_groups: + n /= m.groups + m.weight.data.normal_(0, math.sqrt(2. / n)) + else: + raise NotImplementedError + elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + stdv = 1. / math.sqrt(m.weight.size(1)) + m.weight.data.uniform_(-stdv, stdv) + if m.bias is not None: + m.bias.data.zero_() diff --git a/examples/nas/oneshot/proxylessnas/ops.py b/examples/nas/oneshot/proxylessnas/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..8a237e6f54aabb15c3f4618819b4ded0beeb869e --- /dev/null +++ b/examples/nas/oneshot/proxylessnas/ops.py @@ -0,0 +1,334 @@ +from collections import OrderedDict +from nni.retiarii.serializer import basic_unit +import torch +import nni.retiarii.nn.pytorch as nn + +from putils import get_same_padding, build_activation + + +OPS = { + 'Identity': lambda in_C, out_C, stride: IdentityLayer(in_C, out_C, ops_order='weight_bn_act'), + 'Zero': lambda in_C, out_C, stride: ZeroLayer(stride=stride), + '3x3_MBConv1': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 3, stride, 1), + '3x3_MBConv2': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 3, stride, 2), + '3x3_MBConv3': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 3, stride, 3), + '3x3_MBConv4': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 3, stride, 4), + '3x3_MBConv5': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 3, stride, 5), + '3x3_MBConv6': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 3, stride, 6), + '5x5_MBConv1': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 5, stride, 1), + '5x5_MBConv2': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 5, stride, 2), + '5x5_MBConv3': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 5, stride, 3), + '5x5_MBConv4': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 5, stride, 4), + '5x5_MBConv5': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 5, stride, 5), + '5x5_MBConv6': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 5, stride, 6), + '7x7_MBConv1': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 7, stride, 1), + '7x7_MBConv2': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 7, stride, 2), + '7x7_MBConv3': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 7, stride, 3), + '7x7_MBConv4': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 7, stride, 4), + '7x7_MBConv5': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 7, stride, 5), + '7x7_MBConv6': lambda in_C, out_C, stride: MBInvertedConvLayer(in_C, out_C, 7, stride, 6) +} + + +class MobileInvertedResidualBlock(nn.Module): + + def __init__(self, mobile_inverted_conv, shortcut, op_candidates_list): + super(MobileInvertedResidualBlock, self).__init__() + + self.mobile_inverted_conv = mobile_inverted_conv + self.op_candidates_list = op_candidates_list + self.zero_layer_module = ZeroLayerModule(shortcut) + + def forward(self, x): + out = self.mobile_inverted_conv(x) + return self.zero_layer_module(x, out) + + +@basic_unit +class ZeroLayerModule(nn.Module): + def __init__(self, shortcut): + super().__init__() + self.shortcut = shortcut + + def forward(self, x, out): + if torch.sum(torch.abs(out)).item() == 0: + if x.size() == out.size(): + # is zero layer + return x + if self.shortcut is None: + return out + return out + self.shortcut(x) + + +class ShuffleLayer(nn.Module): + def __init__(self, groups): + super(ShuffleLayer, self).__init__() + self.groups = groups + + def forward(self, x): + batchsize, num_channels, height, width = x.size() + channels_per_group = num_channels // self.groups + # reshape + x = x.view(batchsize, self.groups, channels_per_group, height, width) + # noinspection PyUnresolvedReferences + x = torch.transpose(x, 1, 2).contiguous() + # flatten + x = x.view(batchsize, -1, height, width) + return x + +class Base2DLayer(nn.Module): + + def __init__(self, in_channels, out_channels, + use_bn=True, act_func='relu', dropout_rate=0, ops_order='weight_bn_act'): + super(Base2DLayer, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + + self.use_bn = use_bn + self.act_func = act_func + self.dropout_rate = dropout_rate + self.ops_order = ops_order + + """ modules """ + modules = {} + # batch norm + if self.use_bn: + if self.bn_before_weight: + modules['bn'] = nn.BatchNorm2d(in_channels) + else: + modules['bn'] = nn.BatchNorm2d(out_channels) + else: + modules['bn'] = None + # activation + modules['act'] = build_activation(self.act_func, self.ops_list[0] != 'act') + # dropout + if self.dropout_rate > 0: + modules['dropout'] = nn.Dropout2d(self.dropout_rate, inplace=True) + else: + modules['dropout'] = None + # weight + modules['weight'] = self.weight_op() + + # add modules + for op in self.ops_list: + if modules[op] is None: + continue + elif op == 'weight': + if modules['dropout'] is not None: + self.add_module('dropout', modules['dropout']) + for key in modules['weight']: + self.add_module(key, modules['weight'][key]) + else: + self.add_module(op, modules[op]) + self.sequence = nn.Sequential(self._modules) + + @property + def ops_list(self): + return self.ops_order.split('_') + + @property + def bn_before_weight(self): + for op in self.ops_list: + if op == 'bn': + return True + elif op == 'weight': + return False + raise ValueError(f'Invalid ops_order: {self.ops_order}') + + def weight_op(self): + raise NotImplementedError + + def forward(self, x): + x = self.sequence(x) + return x + + @staticmethod + def is_zero_layer(): + return False + + +class ConvLayer(Base2DLayer): + + def __init__(self, in_channels, out_channels, + kernel_size=3, stride=1, dilation=1, groups=1, bias=False, has_shuffle=False, + use_bn=True, act_func='relu', dropout_rate=0, ops_order='weight_bn_act'): + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.groups = groups + self.bias = bias + self.has_shuffle = has_shuffle + + super(ConvLayer, self).__init__(in_channels, out_channels, use_bn, act_func, dropout_rate, ops_order) + + def weight_op(self): + padding = get_same_padding(self.kernel_size) + if isinstance(padding, int): + padding *= self.dilation + else: + padding[0] *= self.dilation + padding[1] *= self.dilation + + weight_dict = OrderedDict() + weight_dict['conv'] = nn.Conv2d( + self.in_channels, self.out_channels, kernel_size=self.kernel_size, stride=self.stride, padding=padding, + dilation=self.dilation, groups=self.groups, bias=self.bias + ) + if self.has_shuffle and self.groups > 1: + weight_dict['shuffle'] = ShuffleLayer(self.groups) + + return weight_dict + + +class IdentityLayer(Base2DLayer): + + def __init__(self, in_channels, out_channels, + use_bn=False, act_func=None, dropout_rate=0, ops_order='weight_bn_act'): + super(IdentityLayer, self).__init__(in_channels, out_channels, use_bn, act_func, dropout_rate, ops_order) + + def weight_op(self): + return None + + +class LinearLayer(nn.Module): + + def __init__(self, in_features, out_features, bias=True, + use_bn=False, act_func=None, dropout_rate=0, ops_order='weight_bn_act'): + super(LinearLayer, self).__init__() + + self.in_features = in_features + self.out_features = out_features + self.bias = bias + + self.use_bn = use_bn + self.act_func = act_func + self.dropout_rate = dropout_rate + self.ops_order = ops_order + + """ modules """ + modules = {} + # batch norm + if self.use_bn: + if self.bn_before_weight: + modules['bn'] = nn.BatchNorm1d(in_features) + else: + modules['bn'] = nn.BatchNorm1d(out_features) + else: + modules['bn'] = None + # activation + modules['act'] = build_activation(self.act_func, self.ops_list[0] != 'act') + # dropout + if self.dropout_rate > 0: + modules['dropout'] = nn.Dropout(self.dropout_rate, inplace=True) + else: + modules['dropout'] = None + # linear + modules['weight'] = {'linear': nn.Linear(self.in_features, self.out_features, self.bias)} + + # add modules + for op in self.ops_list: + if modules[op] is None: + continue + elif op == 'weight': + if modules['dropout'] is not None: + self.add_module('dropout', modules['dropout']) + for key in modules['weight']: + self.add_module(key, modules['weight'][key]) + else: + self.add_module(op, modules[op]) + self.sequence = nn.Sequential(self._modules) + + @property + def ops_list(self): + return self.ops_order.split('_') + + @property + def bn_before_weight(self): + for op in self.ops_list: + if op == 'bn': + return True + elif op == 'weight': + return False + raise ValueError(f'Invalid ops_order: {self.ops_order}') + + def forward(self, x): + x = self.sequence(x) + return x + + @staticmethod + def is_zero_layer(): + return False + + +class MBInvertedConvLayer(nn.Module): + """ + This layer is introduced in section 4.2 in the paper https://arxiv.org/pdf/1812.00332.pdf + """ + def __init__(self, in_channels, out_channels, + kernel_size=3, stride=1, expand_ratio=6, mid_channels=None): + super(MBInvertedConvLayer, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.kernel_size = kernel_size + self.stride = stride + self.expand_ratio = expand_ratio + self.mid_channels = mid_channels + + if self.mid_channels is None: + feature_dim = round(self.in_channels * self.expand_ratio) + else: + feature_dim = self.mid_channels + + if self.expand_ratio == 1: + self.inverted_bottleneck = nn.Sequential() + else: + self.inverted_bottleneck = nn.Sequential(OrderedDict([ + ('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)), + ('bn', nn.BatchNorm2d(feature_dim)), + ('act', nn.ReLU6(inplace=True)), + ])) + + pad = get_same_padding(self.kernel_size) + self.depth_conv = nn.Sequential(OrderedDict([ + ('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)), + ('bn', nn.BatchNorm2d(feature_dim)), + ('act', nn.ReLU6(inplace=True)), + ])) + + self.point_linear = nn.Sequential(OrderedDict([ + ('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)), + ('bn', nn.BatchNorm2d(out_channels)), + ])) + + def forward(self, x): + x = self.inverted_bottleneck(x) + x = self.depth_conv(x) + x = self.point_linear(x) + return x + + @staticmethod + def is_zero_layer(): + return False + + +class ZeroLayer(nn.Module): + + def __init__(self, stride): + super(ZeroLayer, self).__init__() + self.stride = stride + + def forward(self, x): + '''n, c, h, w = x.size() + h //= self.stride + w //= self.stride + device = x.get_device() if x.is_cuda else torch.device('cpu') + # noinspection PyUnresolvedReferences + padding = torch.zeros(n, c, h, w, device=device, requires_grad=False) + return padding''' + return x * 0 + + @staticmethod + def is_zero_layer(): + return True diff --git a/examples/nas/oneshot/proxylessnas/putils.py b/examples/nas/oneshot/proxylessnas/putils.py new file mode 100644 index 0000000000000000000000000000000000000000..d723517f264126365eac5b421bd2492c012653b1 --- /dev/null +++ b/examples/nas/oneshot/proxylessnas/putils.py @@ -0,0 +1,107 @@ +import torch +import nni.retiarii.nn.pytorch as nn + + +def get_parameters(model, keys=None, mode='include'): + if keys is None: + for name, param in model.named_parameters(): + yield param + elif mode == 'include': + for name, param in model.named_parameters(): + flag = False + for key in keys: + if key in name: + flag = True + break + if flag: + yield param + elif mode == 'exclude': + for name, param in model.named_parameters(): + flag = True + for key in keys: + if key in name: + flag = False + break + if flag: + yield param + else: + raise ValueError('do not support: %s' % mode) + + +def get_same_padding(kernel_size): + if isinstance(kernel_size, tuple): + assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size + p1 = get_same_padding(kernel_size[0]) + p2 = get_same_padding(kernel_size[1]) + return p1, p2 + assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`' + assert kernel_size % 2 > 0, 'kernel size should be odd number' + return kernel_size // 2 + + +def build_activation(act_func, inplace=True): + if act_func == 'relu': + return nn.ReLU(inplace=inplace) + elif act_func == 'relu6': + return nn.ReLU6(inplace=inplace) + elif act_func == 'tanh': + return nn.Tanh() + elif act_func == 'sigmoid': + return nn.Sigmoid() + elif act_func is None: + return None + else: + raise ValueError('do not support: %s' % act_func) + + +def make_divisible(v, divisor, min_val=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + """ + if min_val is None: + min_val = divisor + new_v = max(min_val, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = dict() + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0) + res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item() + return res + + +class LabelSmoothingLoss(nn.Module): + def __init__(self, smoothing=0.1, dim=-1): + super(LabelSmoothingLoss, self).__init__() + self.confidence = 1.0 - smoothing + self.smoothing = smoothing + self.dim = dim + + def forward(self, pred, target): + pred = pred.log_softmax(dim=self.dim) + num_classes = pred.size(self.dim) + with torch.no_grad(): + true_dist = torch.zeros_like(pred) + true_dist.fill_(self.smoothing / (num_classes - 1)) + true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) + return torch.mean(torch.sum(-true_dist * pred, dim=self.dim)) diff --git a/examples/nas/oneshot/proxylessnas/retrain.py b/examples/nas/oneshot/proxylessnas/retrain.py new file mode 100644 index 0000000000000000000000000000000000000000..1f6210a1441b15cd49443a7ae87b843fcbcef22e --- /dev/null +++ b/examples/nas/oneshot/proxylessnas/retrain.py @@ -0,0 +1,183 @@ +import time +import math +from datetime import timedelta +import torch +from torch import nn as nn +from nni.nas.pytorch.utils import AverageMeter + +def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.1): + logsoftmax = nn.LogSoftmax() + n_classes = pred.size(1) + # convert to one-hot + target = torch.unsqueeze(target, 1) + soft_target = torch.zeros_like(pred) + soft_target.scatter_(1, target, 1) + # label smoothing + soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + +def accuracy(output, target, topk=(1,)): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +class Retrain: + def __init__(self, model, optimizer, device, data_provider, n_epochs): + self.model = model + self.optimizer = optimizer + self.device = device + self.train_loader = data_provider.train + self.valid_loader = data_provider.valid + self.test_loader = data_provider.test + self.n_epochs = n_epochs + self.criterion = nn.CrossEntropyLoss() + + def run(self): + self.model = torch.nn.DataParallel(self.model) + self.model.to(self.device) + # train + self.train() + # validate + self.validate(is_test=False) + # test + self.validate(is_test=True) + + def train_one_epoch(self, adjust_lr_func, train_log_func, label_smoothing=0.1): + batch_time = AverageMeter('batch_time') + data_time = AverageMeter('data_time') + losses = AverageMeter('losses') + top1 = AverageMeter('top1') + top5 = AverageMeter('top5') + self.model.train() + end = time.time() + for i, (images, labels) in enumerate(self.train_loader): + data_time.update(time.time() - end) + new_lr = adjust_lr_func(i) + images, labels = images.to(self.device), labels.to(self.device) + output = self.model(images) + if label_smoothing > 0: + loss = cross_entropy_with_label_smoothing(output, labels, label_smoothing) + else: + loss = self.criterion(output, labels) + acc1, acc5 = accuracy(output, labels, topk=(1, 5)) + losses.update(loss, images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + + # compute gradient and do SGD step + self.model.zero_grad() # or self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0 or i + 1 == len(self.train_loader): + batch_log = train_log_func(i, batch_time, data_time, losses, top1, top5, new_lr) + print(batch_log) + return top1, top5 + + def train(self, validation_frequency=1): + best_acc = 0 + nBatch = len(self.train_loader) + + def train_log_func(epoch_, i, batch_time, data_time, losses, top1, top5, lr): + batch_log = 'Train [{0}][{1}/{2}]\t' \ + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \ + 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' \ + 'Loss {losses.val:.4f} ({losses.avg:.4f})\t' \ + 'Top-1 acc {top1.val:.3f} ({top1.avg:.3f})'. \ + format(epoch_ + 1, i, nBatch - 1, + batch_time=batch_time, data_time=data_time, losses=losses, top1=top1) + batch_log += '\tTop-5 acc {top5.val:.3f} ({top5.avg:.3f})'.format(top5=top5) + batch_log += '\tlr {lr:.5f}'.format(lr=lr) + return batch_log + + def adjust_learning_rate(n_epochs, optimizer, epoch, batch=0, nBatch=None): + """ adjust learning of a given optimizer and return the new learning rate """ + # cosine + T_total = n_epochs * nBatch + T_cur = epoch * nBatch + batch + # init_lr = 0.05 + new_lr = 0.5 * 0.05 * (1 + math.cos(math.pi * T_cur / T_total)) + for param_group in optimizer.param_groups: + param_group['lr'] = new_lr + return new_lr + + for epoch in range(self.n_epochs): + print('\n', '-' * 30, 'Train epoch: %d' % (epoch + 1), '-' * 30, '\n') + end = time.time() + train_top1, train_top5 = self.train_one_epoch( + lambda i: adjust_learning_rate(self.n_epochs, self.optimizer, epoch, i, nBatch), + lambda i, batch_time, data_time, losses, top1, top5, new_lr: + train_log_func(epoch, i, batch_time, data_time, losses, top1, top5, new_lr), + ) + time_per_epoch = time.time() - end + seconds_left = int((self.n_epochs - epoch - 1) * time_per_epoch) + print('Time per epoch: %s, Est. complete in: %s' % ( + str(timedelta(seconds=time_per_epoch)), + str(timedelta(seconds=seconds_left)))) + + if (epoch + 1) % validation_frequency == 0: + val_loss, val_acc, val_acc5 = self.validate(is_test=False) + is_best = val_acc > best_acc + best_acc = max(best_acc, val_acc) + val_log = 'Valid [{0}/{1}]\tloss {2:.3f}\ttop-1 acc {3:.3f} ({4:.3f})'.\ + format(epoch + 1, self.n_epochs, val_loss, val_acc, best_acc) + val_log += '\ttop-5 acc {0:.3f}\tTrain top-1 {top1.avg:.3f}\ttop-5 {top5.avg:.3f}'.\ + format(val_acc5, top1=train_top1, top5=train_top5) + print(val_log) + else: + is_best = False + + def validate(self, is_test=True): + if is_test: + data_loader = self.test_loader + else: + data_loader = self.valid_loader + self.model.eval() + batch_time = AverageMeter('batch_time') + losses = AverageMeter('losses') + top1 = AverageMeter('top1') + top5 = AverageMeter('top5') + + end = time.time() + with torch.no_grad(): + for i, (images, labels) in enumerate(data_loader): + images, labels = images.to(self.device), labels.to(self.device) + # compute output + output = self.model(images) + loss = self.criterion(output, labels) + # measure accuracy and record loss + acc1, acc5 = accuracy(output, labels, topk=(1, 5)) + losses.update(loss, images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0 or i + 1 == len(data_loader): + if is_test: + prefix = 'Test' + else: + prefix = 'Valid' + test_log = prefix + ': [{0}/{1}]\t'\ + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'\ + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'\ + 'Top-1 acc {top1.val:.3f} ({top1.avg:.3f})'.\ + format(i, len(data_loader) - 1, batch_time=batch_time, loss=losses, top1=top1) + test_log += '\tTop-5 acc {top5.val:.3f} ({top5.avg:.3f})'.format(top5=top5) + print(test_log) + return losses.avg, top1.avg, top5.avg \ No newline at end of file diff --git a/examples/nas/oneshot/spos/README.md b/examples/nas/oneshot/spos/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e9d3fafc865620521d2423709aee47915bd5db17 --- /dev/null +++ b/examples/nas/oneshot/spos/README.md @@ -0,0 +1 @@ +[Documentation](https://nni.readthedocs.io/en/latest/NAS/SPOS.html) diff --git a/examples/nas/oneshot/spos/README_zh_CN.md b/examples/nas/oneshot/spos/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..6feb1adbbf0f2607efb2368010455a5449b032fa --- /dev/null +++ b/examples/nas/oneshot/spos/README_zh_CN.md @@ -0,0 +1 @@ +[文档](https://nni.readthedocs.io/zh/latest/NAS/SPOS.html) \ No newline at end of file diff --git a/examples/nas/oneshot/spos/architecture_final.json b/examples/nas/oneshot/spos/architecture_final.json new file mode 100644 index 0000000000000000000000000000000000000000..4bdcc2d5071bc8ccf14ea77c3bc8a12cbac6f1c4 --- /dev/null +++ b/examples/nas/oneshot/spos/architecture_final.json @@ -0,0 +1,22 @@ +{ + "LayerChoice1": "2", + "LayerChoice2": "1", + "LayerChoice3": "0", + "LayerChoice4": "1", + "LayerChoice5": "2", + "LayerChoice6": "0", + "LayerChoice7": "2", + "LayerChoice8": "0", + "LayerChoice9": "2", + "LayerChoice10": "0", + "LayerChoice11": "2", + "LayerChoice12": "3", + "LayerChoice13": "0", + "LayerChoice14": "0", + "LayerChoice15": "0", + "LayerChoice16": "0", + "LayerChoice17": "3", + "LayerChoice18": "2", + "LayerChoice19": "3", + "LayerChoice20": "3" + } \ No newline at end of file diff --git a/examples/nas/oneshot/spos/blocks.py b/examples/nas/oneshot/spos/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..0c7e5c8ed730419f7cb0ea854dcd5a19dcdb4862 --- /dev/null +++ b/examples/nas/oneshot/spos/blocks.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import nni.retiarii.nn.pytorch as nn + + +class ShuffleNetBlock(nn.Module): + """ + When stride = 1, the block receives input with 2 * inp channels. Otherwise inp channels. + """ + + def __init__(self, inp, oup, mid_channels, ksize, stride, sequence="pdp", affine=True): + super().__init__() + assert stride in [1, 2] + assert ksize in [3, 5, 7] + self.channels = inp // 2 if stride == 1 else inp + self.inp = inp + self.oup = oup + self.mid_channels = mid_channels + self.ksize = ksize + self.stride = stride + self.pad = ksize // 2 + self.oup_main = oup - self.channels + self._affine = affine + assert self.oup_main > 0 + + self.branch_main = nn.Sequential(*self._decode_point_depth_conv(sequence)) + + # FIXME: restore before merging into master + # remove if stride == 2 for torchscript + self.branch_proj = nn.Sequential( + # dw + nn.Conv2d(self.channels, self.channels, ksize, stride, self.pad, + groups=self.channels, bias=False), + nn.BatchNorm2d(self.channels, affine=affine), + # pw-linear + nn.Conv2d(self.channels, self.channels, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.channels, affine=affine), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + if self.stride == 2: + x_proj, x = self.branch_proj(x), x + else: + x_proj, x = self._channel_shuffle(x) + return torch.cat((x_proj, self.branch_main(x)), 1) + + def _decode_point_depth_conv(self, sequence): + result = [] + first_depth = first_point = True + pc = c = self.channels + for i, token in enumerate(sequence): + # compute output channels of this conv + if i + 1 == len(sequence): + assert token == "p", "Last conv must be point-wise conv." + c = self.oup_main + elif token == "p" and first_point: + c = self.mid_channels + if token == "d": + # depth-wise conv + assert pc == c, "Depth-wise conv must not change channels." + result.append(nn.Conv2d(pc, c, self.ksize, self.stride if first_depth else 1, self.pad, + groups=c, bias=False)) + result.append(nn.BatchNorm2d(c, affine=self._affine)) + first_depth = False + elif token == "p": + # point-wise conv + result.append(nn.Conv2d(pc, c, 1, 1, 0, bias=False)) + result.append(nn.BatchNorm2d(c, affine=self._affine)) + result.append(nn.ReLU(inplace=True)) + first_point = False + else: + raise ValueError("Conv sequence must be d and p.") + pc = c + return result + + def _channel_shuffle(self, x): + bs, num_channels, height, width = x.size() + x = x.reshape(bs * num_channels // 2, 2, height * width) + x = x.permute(1, 0, 2) + x = x.reshape(2, -1, num_channels // 2, height, width) + return x[0], x[1] + + +class ShuffleXceptionBlock(ShuffleNetBlock): + + def __init__(self, inp, oup, mid_channels, stride, affine=True): + super().__init__(inp, oup, mid_channels, 3, stride, "dpdpdp", affine) diff --git a/examples/nas/oneshot/spos/evaluation.py b/examples/nas/oneshot/spos/evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..4a4a50c4acbdfcdf3fe93db28c9a9f1cce6dfc16 --- /dev/null +++ b/examples/nas/oneshot/spos/evaluation.py @@ -0,0 +1,156 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import argparse +import logging +import random + +import numpy as np +import torch +import torch.nn as nn +import torchvision.transforms as transforms +import torchvision.datasets as datasets +from nni.retiarii import fixed_arch +from nni.retiarii.oneshot.pytorch.utils import AverageMeterGroup +from torch.utils.tensorboard import SummaryWriter + +from network import ShuffleNetV2OneShot +from utils import CrossEntropyLabelSmooth, accuracy, ToBGRTensor + +logger = logging.getLogger("nni.spos.scratch") + + +def train(epoch, model, criterion, optimizer, loader, writer, args): + model.train() + meters = AverageMeterGroup() + cur_lr = optimizer.param_groups[0]["lr"] + + for step, (x, y) in enumerate(loader): + x, y = x.to('cuda'), y.to('cuda') + cur_step = len(loader) * epoch + step + optimizer.zero_grad() + logits = model(x) + loss = criterion(logits, y) + loss.backward() + optimizer.step() + + metrics = accuracy(logits, y) + metrics["loss"] = loss.item() + meters.update(metrics) + + writer.add_scalar("lr", cur_lr, global_step=cur_step) + writer.add_scalar("loss/train", loss.item(), global_step=cur_step) + writer.add_scalar("acc1/train", metrics["acc1"], global_step=cur_step) + writer.add_scalar("acc5/train", metrics["acc5"], global_step=cur_step) + + if step % args.log_frequency == 0 or step + 1 == len(loader): + logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, + args.epochs, step + 1, len(loader), meters) + + logger.info("Epoch %d training summary: %s", epoch + 1, meters) + + +def validate(epoch, model, criterion, loader, writer, args): + model.eval() + meters = AverageMeterGroup() + with torch.no_grad(): + for step, (x, y) in enumerate(loader): + x, y = x.to('cuda'), y.to('cuda') + logits = model(x) + loss = criterion(logits, y) + metrics = accuracy(logits, y) + metrics["loss"] = loss.item() + meters.update(metrics) + + if step % args.log_frequency == 0 or step + 1 == len(loader): + logger.info("Epoch [%d/%d] Validation Step [%d/%d] %s", epoch + 1, + args.epochs, step + 1, len(loader), meters) + + writer.add_scalar("loss/test", meters.loss.avg, global_step=epoch) + writer.add_scalar("acc1/test", meters.acc1.avg, global_step=epoch) + writer.add_scalar("acc5/test", meters.acc5.avg, global_step=epoch) + + logger.info("Epoch %d validation: top1 = %f, top5 = %f", epoch + 1, meters.acc1.avg, meters.acc5.avg) + + +def dump_checkpoint(model, epoch, checkpoint_dir): + if isinstance(model, nn.DataParallel): + state_dict = model.module.state_dict() + else: + state_dict = model.state_dict() + if not os.path.exists(checkpoint_dir): + os.makedirs(checkpoint_dir) + dest_path = os.path.join(checkpoint_dir, "epoch_{}.pth.tar".format(epoch)) + logger.info("Saving model to %s", dest_path) + torch.save(state_dict, dest_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser("SPOS Training From Scratch") + parser.add_argument("--imagenet-dir", type=str, default="./data/imagenet") + parser.add_argument("--tb-dir", type=str, default="runs") + parser.add_argument("--architecture", type=str, default="architecture_final.json") + parser.add_argument("--workers", type=int, default=4) + parser.add_argument("--batch-size", type=int, default=1024) + parser.add_argument("--epochs", type=int, default=240) + parser.add_argument("--learning-rate", type=float, default=0.5) + parser.add_argument("--momentum", type=float, default=0.9) + parser.add_argument("--weight-decay", type=float, default=4E-5) + parser.add_argument("--label-smooth", type=float, default=0.1) + parser.add_argument("--log-frequency", type=int, default=10) + parser.add_argument("--lr-decay", type=str, default="linear") + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--spos-preprocessing", default=False, action="store_true") + parser.add_argument("--label-smoothing", type=float, default=0.1) + + args = parser.parse_args() + + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + np.random.seed(args.seed) + random.seed(args.seed) + torch.backends.cudnn.deterministic = True + + with fixed_arch(args.architecture): + model = ShuffleNetV2OneShot(affine=True) + model.cuda() + if torch.cuda.device_count() > 1: # exclude last gpu, saving for data preprocessing on gpu + model = nn.DataParallel(model, device_ids=list(range(0, torch.cuda.device_count() - 1))) + criterion = CrossEntropyLabelSmooth(1000, args.label_smoothing) + optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, + momentum=args.momentum, weight_decay=args.weight_decay) + if args.lr_decay == "linear": + scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, + lambda step: (1.0 - step / args.epochs) + if step <= args.epochs else 0, + last_epoch=-1) + elif args.lr_decay == "cosine": + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, 1E-3) + else: + raise ValueError("'%s' not supported." % args.lr_decay) + writer = SummaryWriter(log_dir=args.tb_dir) + + if args.spos_preprocessing: + trans = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), + transforms.RandomHorizontalFlip(0.5), + ToBGRTensor(), + ]) + else: + trans = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.ToTensor() + ]) + train_dataset = datasets.ImageNet(args.imagenet_dir, split='train', transform=trans) + train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.workers) + val_dataset = datasets.ImageNet(args.imagenet_dir, split='val', transform=trans) + valid_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.workers) + for epoch in range(args.epochs): + train(epoch, model, criterion, optimizer, train_loader, writer, args) + validate(epoch, model, criterion, valid_loader, writer, args) + scheduler.step() + dump_checkpoint(model, epoch, "scratch_checkpoints") + + writer.close() diff --git a/examples/nas/oneshot/spos/network.py b/examples/nas/oneshot/spos/network.py new file mode 100644 index 0000000000000000000000000000000000000000..5ea3ffd2e13f55cf889ae8962a23ce3959dd9849 --- /dev/null +++ b/examples/nas/oneshot/spos/network.py @@ -0,0 +1,135 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import pickle +import re + +import torch +import nni.retiarii.nn.pytorch as nn +from nni.retiarii.nn.pytorch import LayerChoice + +from blocks import ShuffleNetBlock, ShuffleXceptionBlock + + +class ShuffleNetV2OneShot(nn.Module): + block_keys = [ + 'shufflenet_3x3', + 'shufflenet_5x5', + 'shufflenet_7x7', + 'xception_3x3', + ] + + def __init__(self, input_size=224, first_conv_channels=16, last_conv_channels=1024, + n_classes=1000, affine=False): + super().__init__() + + assert input_size % 32 == 0 + self.stage_blocks = [4, 4, 8, 4] + self.stage_channels = [64, 160, 320, 640] + self._input_size = input_size + self._feature_map_size = input_size + self._first_conv_channels = first_conv_channels + self._last_conv_channels = last_conv_channels + self._n_classes = n_classes + self._affine = affine + self._layerchoice_count = 0 + + # building first layer + self.first_conv = nn.Sequential( + nn.Conv2d(3, first_conv_channels, 3, 2, 1, bias=False), + nn.BatchNorm2d(first_conv_channels, affine=affine), + nn.ReLU(inplace=True), + ) + self._feature_map_size //= 2 + + p_channels = first_conv_channels + features = [] + for num_blocks, channels in zip(self.stage_blocks, self.stage_channels): + features.extend(self._make_blocks(num_blocks, p_channels, channels)) + p_channels = channels + self.features = nn.Sequential(*features) + + self.conv_last = nn.Sequential( + nn.Conv2d(p_channels, last_conv_channels, 1, 1, 0, bias=False), + nn.BatchNorm2d(last_conv_channels, affine=affine), + nn.ReLU(inplace=True), + ) + self.globalpool = nn.AvgPool2d(self._feature_map_size) + self.dropout = nn.Dropout(0.1) + self.classifier = nn.Sequential( + nn.Linear(last_conv_channels, n_classes, bias=False), + ) + + self._initialize_weights() + + def _make_blocks(self, blocks, in_channels, channels): + result = [] + for i in range(blocks): + stride = 2 if i == 0 else 1 + inp = in_channels if i == 0 else channels + oup = channels + + base_mid_channels = channels // 2 + mid_channels = int(base_mid_channels) # prepare for scale + self._layerchoice_count += 1 + choice_block = LayerChoice([ + ShuffleNetBlock(inp, oup, mid_channels=mid_channels, ksize=3, stride=stride, affine=self._affine), + ShuffleNetBlock(inp, oup, mid_channels=mid_channels, ksize=5, stride=stride, affine=self._affine), + ShuffleNetBlock(inp, oup, mid_channels=mid_channels, ksize=7, stride=stride, affine=self._affine), + ShuffleXceptionBlock(inp, oup, mid_channels=mid_channels, stride=stride, affine=self._affine) + ], label="LayerChoice" + str(self._layerchoice_count)) + result.append(choice_block) + + if stride == 2: + self._feature_map_size //= 2 + return result + + def forward(self, x): + bs = x.size(0) + x = self.first_conv(x) + x = self.features(x) + x = self.conv_last(x) + x = self.globalpool(x) + + x = self.dropout(x) + x = x.contiguous().view(bs, -1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'first' in name: + torch.nn.init.normal_(m.weight, 0, 0.01) + else: + torch.nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1]) + if m.bias is not None: + torch.nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + if m.weight is not None: + torch.nn.init.constant_(m.weight, 1) + if m.bias is not None: + torch.nn.init.constant_(m.bias, 0.0001) + torch.nn.init.constant_(m.running_mean, 0) + elif isinstance(m, nn.BatchNorm1d): + torch.nn.init.constant_(m.weight, 1) + if m.bias is not None: + torch.nn.init.constant_(m.bias, 0.0001) + torch.nn.init.constant_(m.running_mean, 0) + elif isinstance(m, nn.Linear): + torch.nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + torch.nn.init.constant_(m.bias, 0) + + +def load_and_parse_state_dict(filepath="./data/checkpoint-150000.pth.tar"): + checkpoint = torch.load(filepath, map_location=torch.device("cpu")) + if "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + result = dict() + for k, v in checkpoint.items(): + if k.startswith("module."): + k = k[len("module."):] + result[k] = v + return result diff --git a/examples/nas/oneshot/spos/search.py b/examples/nas/oneshot/spos/search.py new file mode 100644 index 0000000000000000000000000000000000000000..a6896a3cdba77507758941a36bf1643cc548c0f4 --- /dev/null +++ b/examples/nas/oneshot/spos/search.py @@ -0,0 +1,85 @@ +# This file is to demo the usage of multi-trial NAS in the usage of SPOS search space. + +import click +import json +import nni.retiarii.evaluator.pytorch as pl +import nni.retiarii.strategy as strategy +from nni.retiarii import serialize +from nni.retiarii.experiment.pytorch import RetiariiExeConfig, RetiariiExperiment +from torchvision import transforms +from torchvision.datasets import CIFAR10 +from nn_meter import load_latency_predictor + +from network import ShuffleNetV2OneShot +from utils import get_archchoice_by_model + + +class LatencyFilter: + def __init__(self, threshold, predictor, predictor_version=None, reverse=False): + """ + Filter the models according to predicted latency. + + Parameters + ---------- + threshold: `float` + the threshold of latency + config, hardware: + determine the targeted device + reverse: `bool` + if reverse is `False`, then the model returns `True` when `latency < threshold`, + else otherwise + """ + self.predictors = load_latency_predictor(predictor, predictor_version) + self.threshold = threshold + + def __call__(self, ir_model): + latency = self.predictors.predict(ir_model, 'nni-ir') + return latency < self.threshold + + +@click.command() +@click.option('--port', default=8081, help='On which port the experiment is run.') +def _main(port): + base_model = ShuffleNetV2OneShot(32) + base_predictor = 'cortexA76cpu_tflite21' + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize([0.49139968, 0.48215827, 0.44653124], [0.24703233, 0.24348505, 0.26158768]) + ] + # FIXME + # CIFAR10 is used here temporarily. + # Actually we should load weight from supernet and evaluate on imagenet. + train_dataset = serialize(CIFAR10, 'data', train=True, download=True, transform=transforms.Compose(transf + normalize)) + test_dataset = serialize(CIFAR10, 'data', train=False, transform=transforms.Compose(normalize)) + + trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=64), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=64), + max_epochs=2, gpus=1) + + simple_strategy = strategy.RegularizedEvolution(model_filter=LatencyFilter(threshold=100, predictor=base_predictor), + sample_size=1, population_size=2, cycles=2) + exp = RetiariiExperiment(base_model, trainer, strategy=simple_strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.trial_concurrency = 2 + # exp_config.max_trial_number = 2 + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = False + exp_config.execution_engine = 'base' + exp_config.dummy_input = [1, 3, 32, 32] + + exp.run(exp_config, port) + + print('Exported models:') + for i, model in enumerate(exp.export_top_models(formatter='dict')): + print(model) + with open(f'architecture_final_{i}.json', 'w') as f: + json.dump(get_archchoice_by_model(model), f, indent=4) + + +if __name__ == '__main__': + _main() diff --git a/examples/nas/oneshot/spos/supernet.py b/examples/nas/oneshot/spos/supernet.py new file mode 100644 index 0000000000000000000000000000000000000000..902d2a490a389ba63bc35fa2679936fa68e77578 --- /dev/null +++ b/examples/nas/oneshot/spos/supernet.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +import logging +import random + +import numpy as np +import torch +import torch.nn as nn +import torchvision.transforms as transforms +import torchvision.datasets as datasets +from nni.retiarii.oneshot.pytorch import SinglePathTrainer + +from network import ShuffleNetV2OneShot, load_and_parse_state_dict +from utils import CrossEntropyLabelSmooth, accuracy, ToBGRTensor + +logger = logging.getLogger("nni.spos.supernet") + +if __name__ == "__main__": + parser = argparse.ArgumentParser("SPOS Supernet Training") + parser.add_argument("--imagenet-dir", type=str, default="./data/imagenet") + parser.add_argument("--load-checkpoint", action="store_true", default=False) + parser.add_argument("--spos-preprocessing", action="store_true", default=False, + help="When true, image values will range from 0 to 255 and use BGR " + "(as in original repo).") + parser.add_argument("--workers", type=int, default=4) + parser.add_argument("--batch-size", type=int, default=768) + parser.add_argument("--epochs", type=int, default=120) + parser.add_argument("--learning-rate", type=float, default=0.5) + parser.add_argument("--momentum", type=float, default=0.9) + parser.add_argument("--weight-decay", type=float, default=4E-5) + parser.add_argument("--label-smooth", type=float, default=0.1) + parser.add_argument("--log-frequency", type=int, default=10) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--label-smoothing", type=float, default=0.1) + + args = parser.parse_args() + + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + np.random.seed(args.seed) + random.seed(args.seed) + torch.backends.cudnn.deterministic = True + + model = ShuffleNetV2OneShot() + if args.load_checkpoint: + if not args.spos_preprocessing: + logger.warning("You might want to use SPOS preprocessing if you are loading their checkpoints.") + # load state_dict and + model_dict = model.state_dict() + model_dict.update(load_and_parse_state_dict()) + model.load_state_dict(model_dict) + logger.info(f'Model loaded from ./data/checkpoint-150000.pth.tar') + model.cuda() + if torch.cuda.device_count() > 1: # exclude last gpu, saving for data preprocessing on gpu + model = nn.DataParallel(model, device_ids=list(range(0, torch.cuda.device_count() - 1))) + criterion = CrossEntropyLabelSmooth(1000, args.label_smoothing) + optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, + momentum=args.momentum, weight_decay=args.weight_decay) + scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, + lambda step: (1.0 - step / args.epochs) + if step <= args.epochs else 0, + last_epoch=-1) + if args.spos_preprocessing: + trans = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), + transforms.RandomHorizontalFlip(0.5), + ToBGRTensor(), + ]) + else: + trans = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.ToTensor() + ]) + train_dataset = datasets.ImageNet(args.imagenet_dir, split='train', transform=trans) + val_dataset = datasets.ImageNet(args.imagenet_dir, split='val', transform=trans) + trainer = SinglePathTrainer(model, criterion, accuracy, optimizer, + args.epochs, train_dataset, val_dataset, + batch_size=args.batch_size, + log_frequency=args.log_frequency, workers=args.workers) + trainer.fit() diff --git a/examples/nas/oneshot/spos/utils.py b/examples/nas/oneshot/spos/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a5800aa5bf3a38ef355f0aa80b0d394946df49a9 --- /dev/null +++ b/examples/nas/oneshot/spos/utils.py @@ -0,0 +1,64 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import numpy as np +import PIL + + +class CrossEntropyLabelSmooth(nn.Module): + + def __init__(self, num_classes, epsilon): + super(CrossEntropyLabelSmooth, self).__init__() + self.num_classes = num_classes + self.epsilon = epsilon + self.logsoftmax = nn.LogSoftmax(dim=1) + + def forward(self, inputs, targets): + log_probs = self.logsoftmax(inputs) + targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1) + targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes + loss = (-targets * log_probs).mean(0).sum() + return loss + + +def accuracy(output, target, topk=(1, 5)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = dict() + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0) + res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item() + return res + + +class ToBGRTensor(object): + + def __call__(self, img): + assert isinstance(img, (np.ndarray, PIL.Image.Image)) + if isinstance(img, PIL.Image.Image): + img = np.asarray(img) + img = img[:,:, ::-1] # 2 BGR + img = np.transpose(img, [2, 0, 1]) # 2 (3, H, W) + img = np.ascontiguousarray(img) + img = torch.from_numpy(img).float() + return img + + +def get_archchoice_by_model(model): + result = {} + for k, v in model.items(): + assert k in v + result[k] = model[k].split("_")[-1] + return result diff --git a/examples/nas/search_space_zoo/darts_example.py b/examples/nas/search_space_zoo/darts_example.py new file mode 100644 index 0000000000000000000000000000000000000000..3526d7cc9045a88b62aea5fde8f4caf1cd9497b2 --- /dev/null +++ b/examples/nas/search_space_zoo/darts_example.py @@ -0,0 +1,53 @@ +# copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import time +from argparse import ArgumentParser + +import torch +import torch.nn as nn + +import datasets +from nni.nas.pytorch.callbacks import ArchitectureCheckpoint, LRSchedulerCallback +from nni.algorithms.nas.pytorch.darts import DartsTrainer +from utils import accuracy + +from nni.nas.pytorch.search_space_zoo import DartsCell +from darts_stack_cells import DartsStackedCells + +logger = logging.getLogger('nni') + +if __name__ == "__main__": + parser = ArgumentParser("darts") + parser.add_argument("--layers", default=8, type=int) + parser.add_argument("--batch-size", default=64, type=int) + parser.add_argument("--log-frequency", default=10, type=int) + parser.add_argument("--epochs", default=50, type=int) + parser.add_argument("--channels", default=16, type=int) + parser.add_argument("--unrolled", default=False, action="store_true") + parser.add_argument("--visualization", default=False, action="store_true") + args = parser.parse_args() + + dataset_train, dataset_valid = datasets.get_dataset("cifar10") + + model = DartsStackedCells(3, args.channels, 10, args.layers, DartsCell) + criterion = nn.CrossEntropyLoss() + + optim = torch.optim.SGD(model.parameters(), 0.025, momentum=0.9, weight_decay=3.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, args.epochs, eta_min=0.001) + + trainer = DartsTrainer(model, + loss=criterion, + metrics=lambda output, target: accuracy(output, target, topk=(1,)), + optimizer=optim, + num_epochs=args.epochs, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + batch_size=args.batch_size, + log_frequency=args.log_frequency, + unrolled=args.unrolled, + callbacks=[LRSchedulerCallback(lr_scheduler), ArchitectureCheckpoint("./checkpoints")]) + if args.visualization: + trainer.enable_visualization() + trainer.train() diff --git a/examples/nas/search_space_zoo/darts_stack_cells.py b/examples/nas/search_space_zoo/darts_stack_cells.py new file mode 100644 index 0000000000000000000000000000000000000000..04277acf439cedff7d76903335c446b83f3c1482 --- /dev/null +++ b/examples/nas/search_space_zoo/darts_stack_cells.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch.nn as nn +from nni.nas.pytorch.search_space_zoo.darts_ops import DropPath + + +class DartsStackedCells(nn.Module): + """ + builtin Darts Search Space + Compared to Darts example, DartsSearchSpace removes Auxiliary Head, which + is considered as a trick rather than part of model. + + Attributes + --- + in_channels: int + the number of input channels + channels: int + the number of initial channels expected + n_classes: int + classes for final classification + n_layers: int + the number of cells contained in this network + factory_func: function + return a callable instance for demand cell structure. + user should pass in ``__init__`` of the cell class with required parameters (see nni.nas.DartsCell for detail) + n_nodes: int + the number of nodes contained in each cell + stem_multiplier: int + channels multiply coefficient when passing a cell + """ + + def __init__(self, in_channels, channels, n_classes, n_layers, factory_func, n_nodes=4, + stem_multiplier=3): + super().__init__() + self.in_channels = in_channels + self.channels = channels + self.n_classes = n_classes + self.n_layers = n_layers + + c_cur = stem_multiplier * self.channels + self.stem = nn.Sequential( + nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(c_cur) + ) + + # for the first cell, stem is used for both s0 and s1 + # [!] channels_pp and channels_p is output channel size, but c_cur is input channel size. + channels_pp, channels_p, c_cur = c_cur, c_cur, channels + + self.cells = nn.ModuleList() + reduction_p, reduction = False, False + for i in range(n_layers): + reduction_p, reduction = reduction, False + # Reduce featuremap size and double channels in 1/3 and 2/3 layer. + if i in [n_layers // 3, 2 * n_layers // 3]: + c_cur *= 2 + reduction = True + + cell = factory_func(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction) + self.cells.append(cell) + c_cur_out = c_cur * n_nodes + channels_pp, channels_p = channels_p, c_cur_out + + self.gap = nn.AdaptiveAvgPool2d(1) + self.linear = nn.Linear(channels_p, n_classes) + + def forward(self, x): + s0 = s1 = self.stem(x) + + for cell in self.cells: + s0, s1 = s1, cell(s0, s1) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + + return logits + + def drop_path_prob(self, p): + for module in self.modules(): + if isinstance(module, DropPath): + module.p = p diff --git a/examples/nas/search_space_zoo/datasets.py b/examples/nas/search_space_zoo/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..f19f5691a130df12e900d386473c75a2c9e3f102 --- /dev/null +++ b/examples/nas/search_space_zoo/datasets.py @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR10 + + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + + return img + + +def get_dataset(cls, cutout_length=0): + MEAN = [0.49139968, 0.48215827, 0.44653124] + STD = [0.24703233, 0.24348505, 0.26158768] + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize(MEAN, STD) + ] + cutout = [] + if cutout_length > 0: + cutout.append(Cutout(cutout_length)) + + train_transform = transforms.Compose(transf + normalize + cutout) + valid_transform = transforms.Compose(normalize) + + if cls == "cifar10": + dataset_train = CIFAR10(root="./data", train=True, download=True, transform=train_transform) + dataset_valid = CIFAR10(root="./data", train=False, download=True, transform=valid_transform) + else: + raise NotImplementedError + return dataset_train, dataset_valid diff --git a/examples/nas/search_space_zoo/enas_macro_example.py b/examples/nas/search_space_zoo/enas_macro_example.py new file mode 100644 index 0000000000000000000000000000000000000000..86aced1ecb5b2de855ace5a29b47373364aaab50 --- /dev/null +++ b/examples/nas/search_space_zoo/enas_macro_example.py @@ -0,0 +1,87 @@ +import torch +import logging +import torch.nn as nn +import torch.nn.functional as F + +from argparse import ArgumentParser +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from nni.nas.pytorch import mutables +from nni.algorithms.nas.pytorch import enas +from utils import accuracy, reward_accuracy +from nni.nas.pytorch.callbacks import (ArchitectureCheckpoint, + LRSchedulerCallback) +from nni.nas.pytorch.search_space_zoo import ENASMacroLayer +from nni.nas.pytorch.search_space_zoo import ENASMacroGeneralModel + +logger = logging.getLogger('nni') + + +def get_dataset(cls): + MEAN = [0.49139968, 0.48215827, 0.44653124] + STD = [0.24703233, 0.24348505, 0.26158768] + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize(MEAN, STD) + ] + + train_transform = transforms.Compose(transf + normalize) + valid_transform = transforms.Compose(normalize) + + if cls == "cifar10": + dataset_train = CIFAR10(root="./data", train=True, download=True, transform=train_transform) + dataset_valid = CIFAR10(root="./data", train=False, download=True, transform=valid_transform) + else: + raise NotImplementedError + return dataset_train, dataset_valid + + +class FactorizedReduce(nn.Module): + def __init__(self, C_in, C_out, affine=False): + super().__init__() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out + + +if __name__ == "__main__": + parser = ArgumentParser("enas") + parser.add_argument("--batch-size", default=128, type=int) + parser.add_argument("--log-frequency", default=10, type=int) + parser.add_argument("--epochs", default=None, type=int, help="Number of epochs (default: macro 310, micro 150)") + parser.add_argument("--visualization", default=False, action="store_true") + args = parser.parse_args() + + dataset_train, dataset_valid = get_dataset("cifar10") + model = ENASMacroGeneralModel() + num_epochs = args.epochs or 310 + mutator = None + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), 0.05, momentum=0.9, weight_decay=1.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=0.001) + trainer = enas.EnasTrainer(model, + loss=criterion, + metrics=accuracy, + reward_function=reward_accuracy, + optimizer=optimizer, + callbacks=[LRSchedulerCallback(lr_scheduler), ArchitectureCheckpoint("./checkpoints")], + batch_size=args.batch_size, + num_epochs=num_epochs, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + log_frequency=args.log_frequency, + mutator=mutator) + if args.visualization: + trainer.enable_visualization() + trainer.train() diff --git a/examples/nas/search_space_zoo/enas_micro_example.py b/examples/nas/search_space_zoo/enas_micro_example.py new file mode 100644 index 0000000000000000000000000000000000000000..eec4942ac87648362e9e5a4647814104abce3991 --- /dev/null +++ b/examples/nas/search_space_zoo/enas_micro_example.py @@ -0,0 +1,130 @@ +import torch +import logging +import torch.nn as nn +import torch.nn.functional as F + +from argparse import ArgumentParser +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from nni.algorithms.nas.pytorch import enas +from utils import accuracy, reward_accuracy +from nni.nas.pytorch.callbacks import (ArchitectureCheckpoint, + LRSchedulerCallback) + +from nni.nas.pytorch.search_space_zoo import ENASMicroLayer + +logger = logging.getLogger('nni') + + +def get_dataset(cls): + MEAN = [0.49139968, 0.48215827, 0.44653124] + STD = [0.24703233, 0.24348505, 0.26158768] + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize(MEAN, STD) + ] + + train_transform = transforms.Compose(transf + normalize) + valid_transform = transforms.Compose(normalize) + + if cls == "cifar10": + dataset_train = CIFAR10(root="./data", train=True, download=True, transform=train_transform) + dataset_valid = CIFAR10(root="./data", train=False, download=True, transform=valid_transform) + else: + raise NotImplementedError + return dataset_train, dataset_valid + + +class MicroNetwork(nn.Module): + def __init__(self, num_layers=2, num_nodes=5, out_channels=24, in_channels=3, num_classes=10, + dropout_rate=0.0): + super().__init__() + self.num_layers = num_layers + + self.stem = nn.Sequential( + nn.Conv2d(in_channels, out_channels * 3, 3, 1, 1, bias=False), + nn.BatchNorm2d(out_channels * 3) + ) + + pool_distance = self.num_layers // 3 + pool_layers = [pool_distance, 2 * pool_distance + 1] + self.dropout = nn.Dropout(dropout_rate) + + self.layers = nn.ModuleList() + c_pp = c_p = out_channels * 3 + c_cur = out_channels + for layer_id in range(self.num_layers + 2): + reduction = False + if layer_id in pool_layers: + c_cur, reduction = c_p * 2, True + self.layers.append(ENASMicroLayer(num_nodes, c_pp, c_p, c_cur, reduction)) + if reduction: + c_pp = c_p = c_cur + c_pp, c_p = c_p, c_cur + + self.gap = nn.AdaptiveAvgPool2d(1) + self.dense = nn.Linear(c_cur, num_classes) + + self.reset_parameters() + + def reset_parameters(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + + def forward(self, x): + bs = x.size(0) + prev = cur = self.stem(x) + # aux_logits = None + + for layer in self.layers: + prev, cur = layer(prev, cur) + + cur = self.gap(F.relu(cur)).view(bs, -1) + cur = self.dropout(cur) + logits = self.dense(cur) + + # if aux_logits is not None: + # return logits, aux_logits + return logits + + +if __name__ == "__main__": + parser = ArgumentParser("enas") + parser.add_argument("--batch-size", default=128, type=int) + parser.add_argument("--log-frequency", default=10, type=int) + parser.add_argument("--epochs", default=None, type=int, help="Number of epochs (default: macro 310, micro 150)") + parser.add_argument("--visualization", default=False, action="store_true") + args = parser.parse_args() + + dataset_train, dataset_valid = get_dataset("cifar10") + + model = MicroNetwork(num_layers=6, out_channels=20, num_nodes=5, dropout_rate=0.1) + num_epochs = args.epochs or 150 + mutator = enas.EnasMutator(model, tanh_constant=1.1, cell_exit_extra_step=True) + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), 0.05, momentum=0.9, weight_decay=1.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=0.001) + + trainer = enas.EnasTrainer(model, + loss=criterion, + metrics=accuracy, + reward_function=reward_accuracy, + optimizer=optimizer, + callbacks=[LRSchedulerCallback(lr_scheduler), ArchitectureCheckpoint("./checkpoints")], + batch_size=args.batch_size, + num_epochs=num_epochs, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + log_frequency=args.log_frequency, + mutator=mutator) + if args.visualization: + trainer.enable_visualization() + trainer.train() + diff --git a/examples/nas/search_space_zoo/fixed-architecture.json b/examples/nas/search_space_zoo/fixed-architecture.json new file mode 100644 index 0000000000000000000000000000000000000000..ae728408748b1741e4d3f8ff0b0d790136918b0a --- /dev/null +++ b/examples/nas/search_space_zoo/fixed-architecture.json @@ -0,0 +1,8 @@ +{ + "0_1": "avg_pool_3x3", + "0_2": "conv_1x1", + "1_2": "skip_connect", + "0_3": "conv_1x1", + "1_3": "skip_connect", + "2_3": "skip_connect" +} diff --git a/examples/nas/search_space_zoo/nasbench201.py b/examples/nas/search_space_zoo/nasbench201.py new file mode 100644 index 0000000000000000000000000000000000000000..38263ac932c1aef87d3617fcb8e41571e67d90dc --- /dev/null +++ b/examples/nas/search_space_zoo/nasbench201.py @@ -0,0 +1,205 @@ +import argparse +import json +import logging +import os +import pprint + +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import DataLoader +from nni.algorithms.nas.pytorch.darts import DartsTrainer +from nni.algorithms.nas.pytorch import enas +from nni.nas.pytorch.utils import AverageMeterGroup +from nni.nas.pytorch.nasbench201 import NASBench201Cell +from nni.nas.pytorch.fixed import apply_fixed_architecture +from nni.nas.benchmarks.nasbench201 import query_nb201_trial_stats +from nni.nas.pytorch.callbacks import ArchitectureCheckpoint, LRSchedulerCallback +from utils import accuracy, reward_accuracy + +import datasets + +logger = logging.getLogger('nni') + +class ReLUConvBN(nn.Module): + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, + bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True): + super(ReLUConvBN, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_out, kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(C_out, affine=bn_affine, momentum=bn_momentum, + track_running_stats=bn_track_running_stats) + ) + + def forward(self, x): + return self.op(x) + + +class ResNetBasicBlock(nn.Module): + def __init__(self, inplanes, planes, stride, bn_affine=True, + bn_momentum=0.1, bn_track_running_stats=True): + super(ResNetBasicBlock, self).__init__() + assert stride == 1 or stride == 2, "invalid stride {:}".format(stride) + self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1, bn_affine, bn_momentum, bn_track_running_stats) + self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1, bn_affine, bn_momentum, bn_track_running_stats) + if stride == 2: + self.downsample = nn.Sequential( + nn.AvgPool2d(kernel_size=2, stride=2, padding=0), + nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)) + elif inplanes != planes: + self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1, bn_affine, bn_momentum, bn_track_running_stats) + else: + self.downsample = None + self.in_dim = inplanes + self.out_dim = planes + self.stride = stride + self.num_conv = 2 + + def forward(self, inputs): + basicblock = self.conv_a(inputs) + basicblock = self.conv_b(basicblock) + + if self.downsample is not None: + inputs = self.downsample(inputs) + return inputs + basicblock + + +class NASBench201Network(nn.Module): + def __init__(self, stem_out_channels, num_modules_per_stack, bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True): + super(NASBench201Network, self).__init__() + self.channels = C = stem_out_channels + self.num_modules = N = num_modules_per_stack + self.num_labels = 10 + + self.bn_momentum = bn_momentum + self.bn_affine = bn_affine + self.bn_track_running_stats = bn_track_running_stats + + self.stem = nn.Sequential( + nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(C, momentum=self.bn_momentum) + ) + + layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N + layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N + + C_prev = C + self.cells = nn.ModuleList() + for i, (C_curr, reduction) in enumerate(zip(layer_channels, layer_reductions)): + if reduction: + cell = ResNetBasicBlock(C_prev, C_curr, 2, self.bn_affine, self.bn_momentum, self.bn_track_running_stats) + else: + cell = NASBench201Cell(i, C_prev, C_curr, 1, self.bn_affine, self.bn_momentum, self.bn_track_running_stats) + self.cells.append(cell) + C_prev = C_curr + + self.lastact = nn.Sequential( + nn.BatchNorm2d(C_prev, momentum=self.bn_momentum), + nn.ReLU(inplace=True) + ) + self.global_pooling = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(C_prev, self.num_labels) + + def forward(self, inputs): + feature = self.stem(inputs) + for cell in self.cells: + feature = cell(feature) + + out = self.lastact(feature) + out = self.global_pooling(out) + out = out.view(out.size(0), -1) + logits = self.classifier(out) + + return logits + + +def train(args, model, train_dataloader, valid_dataloader, criterion, optimizer, device): + model = model.to(device) + model.train() + for epoch in range(args.epochs): + for batch_idx, (data, target) in enumerate(train_dataloader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + + if batch_idx % args.log_frequency == 0: + logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_dataloader.dataset), + 100. * batch_idx / len(train_dataloader), loss.item())) + model.eval() + correct = 0 + test_loss = 0.0 + for data, target in valid_dataloader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += criterion(output, target).item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(valid_dataloader.dataset) + accuracy = 100. * correct / len(valid_dataloader.dataset) + logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, + len(valid_dataloader.dataset), accuracy)) + model.train() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser("nb201") + parser.add_argument('--stem_out_channels', default=16, type=int) + parser.add_argument('--unrolled', default=False, action='store_true') + parser.add_argument('--batch_size', default=64, type=int) + parser.add_argument('--epochs', default=50, type=int) + parser.add_argument('--num_modules_per_stack', default=5, type=int) + parser.add_argument('--log-frequency', default=10, type=int) + parser.add_argument('--bn_momentum', default=0.1, type=int) + parser.add_argument('--bn_affine', default=True, type=bool) + parser.add_argument('--bn_track_running_stats', default=True, type=bool) + parser.add_argument('--arch', default=None, help='json file which should meet requirements in NAS-Bench-201') + parser.add_argument('--visualization', default=False, action='store_true') + args = parser.parse_args() + + dataset_train, dataset_valid = datasets.get_dataset("cifar10") + model = NASBench201Network(stem_out_channels=args.stem_out_channels, + num_modules_per_stack=args.num_modules_per_stack, + bn_affine=args.bn_affine, + bn_momentum=args.bn_momentum, + bn_track_running_stats=args.bn_track_running_stats) + + optim = torch.optim.SGD(model.parameters(), 0.025) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, args.epochs, eta_min=0.001) + criterion = nn.CrossEntropyLoss() + + if args.arch is not None: + logger.info('model retraining...') + with open(args.arch, 'r') as f: + arch = json.load(f) + for trial in query_nb201_trial_stats(arch, 200, 'cifar100'): + pprint.pprint(trial) + apply_fixed_architecture(model, args.arch) + dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=0) + dataloader_valid = DataLoader(dataset_valid, batch_size=args.batch_size, shuffle=True, num_workers=0) + train(args, model, dataloader_train, dataloader_valid, criterion, optim, + torch.device('cuda' if torch.cuda.is_available() else 'cpu')) + exit(0) + + trainer = enas.EnasTrainer(model, + loss=criterion, + metrics=lambda output, target: accuracy(output, target, topk=(1,)), + reward_function=reward_accuracy, + optimizer=optim, + callbacks=[LRSchedulerCallback(lr_scheduler), ArchitectureCheckpoint("./checkpoints")], + batch_size=args.batch_size, + num_epochs=args.epochs, + dataset_train=dataset_train, + dataset_valid=dataset_valid, + log_frequency=args.log_frequency) + + if args.visualization: + trainer.enable_visualization() + trainer.train() diff --git a/examples/nas/search_space_zoo/utils.py b/examples/nas/search_space_zoo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f680db479f687cfbf3a1e71c8f6553c326f6a9c6 --- /dev/null +++ b/examples/nas/search_space_zoo/utils.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch + + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = dict() + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item() + return res + + +def reward_accuracy(output, target, topk=(1,)): + batch_size = target.size(0) + _, predicted = torch.max(output.data, 1) + return (predicted == target).sum().item() / batch_size diff --git a/examples/notebooks/Retiarii_example_multi-trial_NAS.ipynb b/examples/notebooks/Retiarii_example_multi-trial_NAS.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..72b24626899158c3affe9a204f6138efadc51845 --- /dev/null +++ b/examples/notebooks/Retiarii_example_multi-trial_NAS.ipynb @@ -0,0 +1,953 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Retiarii Example - Multi-trial NAS" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This example will show Retiarii's ability to **express** and **explore** the model space for Neural Architecture Search and Hyper-Parameter Tuning in a simple way. The video demo is in [YouTube](https://youtu.be/eQUlABCO0o8) and [Bilibili](https://www.bilibili.com/video/BV14h411v7kZ/).\n", + "\n", + "Let's start the journey with Retiarii!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1: Express the Model Space" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Model space is defined by users to express a set of models that they want to explore, which contains potentially good-performing models. In Retiarii framework, a model space is defined with two parts: a base model and possible mutations on the base model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 1.1: Define the Base Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Defining a base model is almost the same as defining a PyTorch (or TensorFlow) model. Usually, you only need to replace the code ``import torch.nn as nn`` with ``import nni.retiarii.nn.pytorch as nn`` to use NNI wrapped PyTorch modules. Below is a very simple example of defining a base model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch.nn.functional as F\n", + "import nni.retiarii.nn.pytorch as nn\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.conv1 = nn.Conv2d(3, 6, 3, padding=1)\n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " self.conv2 = nn.Conv2d(6, 16, 3, padding=1)\n", + " self.conv3 = nn.Conv2d(16, 16, 1)\n", + "\n", + " self.bn = nn.BatchNorm2d(16)\n", + "\n", + " self.gap = nn.AdaptiveAvgPool2d(4)\n", + " self.fc1 = nn.Linear(16 * 4 * 4, 120)\n", + " self.fc2 = nn.Linear(120, 84)\n", + " self.fc3 = nn.Linear(84, 10)\n", + "\n", + " def forward(self, x):\n", + " bs = x.size(0)\n", + "\n", + " x = self.pool(F.relu(self.conv1(x)))\n", + " x0 = F.relu(self.conv2(x))\n", + " x1 = F.relu(self.conv3(x0))\n", + "\n", + " x1 += x0\n", + " x = self.pool(self.bn(x1))\n", + "\n", + " x = self.gap(x).view(bs, -1)\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", + " \n", + "model = Net()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 1.2: Define the Model Mutations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A base model is only one concrete model, not a model space. NNI provides APIs and primitives for users to express how the base model can be mutated, i.e., a model space that includes many models. The following will use inline Mutation APIs ``LayerChoice`` to choose a layer from candidate operations and use ``InputChoice`` to try out skip connection." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch.nn.functional as F\n", + "import nni.retiarii.nn.pytorch as nn\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " # self.conv1 = nn.Conv2d(3, 6, 3, padding=1)\n", + " self.conv1 = nn.LayerChoice([nn.Conv2d(3, 6, 3, padding=1), nn.Conv2d(3, 6, 5, padding=2)])\n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " # self.conv2 = nn.Conv2d(6, 16, 3, padding=1)\n", + " self.conv2 = nn.LayerChoice([nn.Conv2d(6, 16, 3, padding=1), nn.Conv2d(6, 16, 5, padding=2)])\n", + " self.conv3 = nn.Conv2d(16, 16, 1)\n", + "\n", + " self.skipconnect = nn.InputChoice(n_candidates=2)\n", + " self.bn = nn.BatchNorm2d(16)\n", + "\n", + " self.gap = nn.AdaptiveAvgPool2d(4)\n", + " self.fc1 = nn.Linear(16 * 4 * 4, 120)\n", + " self.fc2 = nn.Linear(120, 84)\n", + " self.fc3 = nn.Linear(84, 10)\n", + "\n", + " def forward(self, x):\n", + " bs = x.size(0)\n", + "\n", + " x = self.pool(F.relu(self.conv1(x)))\n", + " x0 = F.relu(self.conv2(x))\n", + " x1 = F.relu(self.conv3(x0))\n", + "\n", + " x1 = self.skipconnect([x1, x1+x0])\n", + " x = self.pool(self.bn(x1))\n", + "\n", + " x = self.gap(x).view(bs, -1)\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", + " \n", + "model = Net()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2: Explore the Model Space" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will demo ths **multi-trial** NAS method first. In the multi-trial NAS process, the search strategy repeatedly generates new models, and the model evaluator is for training and validating each generated model. The obtained performance of a generated model is collected and sent to the search strategy for generating better models. \n", + "\n", + "Users can choose a proper search strategy to explore the model space, and use a chosen or user-defined model evaluator to evaluate the performance of each sampled model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2.1: Choose or Write a Search Strategy\n", + "\n", + "Currently, Retiarii supports many common strategies, such as Random, Regularized evolution and TPE, etc. According to the APIs of Retiarii, you can customize a new strategy easily, and there we use the TPE strategy as an example." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import nni.retiarii.strategy as strategy\n", + "\n", + "simple_strategy = strategy.TPEStrategy() # choice: Random, GridSearch, RegularizedEvolution, TPEStrategy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2.2: Choose or Write a Model Evaluator" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model evaluator should correctly identify the use case of the model and the optimization goal. For example, on a classification task, an dataset is needed, the loss function could be cross entropy and the optimized metric could be the accuracy.\n", + "\n", + "Retiarii provides two ways for users to write a new model evaluator. In the context of PyTorch, Retiarii has provided two built-in model evaluators, designed for simple use cases: classification and regression. These two evaluators are built upon the awesome library PyTorch-Lightning. Here we take a classification task on CIFAR10 dataset as an example." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:15:27] INFO (lightning/MainThread) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:15:27] INFO (lightning/MainThread) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:15:27] INFO (lightning/MainThread) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + } + ], + "source": [ + "from torchvision import transforms\n", + "from torchvision.datasets import CIFAR10\n", + "from nni.retiarii import serialize\n", + "import nni.retiarii.evaluator.pytorch.lightning as pl\n", + "\n", + "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", + "train_dataset = serialize(CIFAR10, root=\"./data\", train=True, download=True, transform=transform)\n", + "test_dataset = serialize(CIFAR10, root=\"./data\", train=False, download=True, transform=transform)\n", + "\n", + "trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=64),\n", + " val_dataloaders=pl.DataLoader(test_dataset, batch_size=64),\n", + " max_epochs=2, gpus=[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2.3: Configure the Experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After all the above are prepared, it is time to configure an experiment to do the model search. The basic experiment configuration is as follows, and advanced configuration reference on [this page](https://nni.readthedocs.io/en/stable/reference/experiment_config.html).\n", + "\n", + "NNI allows users to run experiments in different training platforms to speed up the search, like Local Machine, Remote Servers, OpenPAI, Kubeflow, FrameworkController on K8S, DLWorkspace, Azure Machine Learning, AdaptDL, other cloud options, and even Hybrid mode. There we use the local mode with GPU speeding up." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from nni.retiarii.experiment.pytorch import RetiariiExeConfig, RetiariiExperiment\n", + "\n", + "exp = RetiariiExperiment(model, trainer, [], simple_strategy)\n", + "\n", + "exp_config = RetiariiExeConfig('local')\n", + "exp_config.experiment_name = 'Retiarii example'\n", + "exp_config.trial_concurrency = 2\n", + "exp_config.max_trial_number = 10\n", + "exp_config.trial_gpu_number = 2\n", + "exp_config.max_experiment_duration = '5m'\n", + "exp_config.execution_engine = 'base'\n", + "exp_config.training_service.use_active_gpu = True" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2.4: Run and View the Experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can launch the experiment now! " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:15:34] INFO (nni.experiment/MainThread) Creating experiment, Experiment ID: d9cseb3g\n", + "[2021-06-07 11:15:34] INFO (nni.experiment/MainThread) Connecting IPC pipe...\n", + "[2021-06-07 11:15:34] INFO (nni.experiment/MainThread) Statring web server...\n", + "[2021-06-07 11:15:35] INFO (nni.experiment/MainThread) Setting up...\n", + "[2021-06-07 11:15:36] INFO (nni.runtime.msg_dispatcher_base/Thread-6) Dispatcher started\n", + "[2021-06-07 11:15:36] INFO (nni.retiarii.experiment.pytorch/MainThread) Web UI URLs: http://127.0.0.1:8745\n", + "[2021-06-07 11:15:36] INFO (nni.retiarii.experiment.pytorch/MainThread) Start strategy...\n", + "[2021-06-07 11:15:36] INFO (nni.retiarii.strategy.tpe_strategy/MainThread) TPE strategy has been started.\n", + "[2021-06-07 11:15:36] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.001164 seconds\n", + "[2021-06-07 11:15:36] INFO (hyperopt.tpe/MainThread) TPE using 0 trials\n", + "[2021-06-07 11:15:36] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.001256 seconds\n", + "[2021-06-07 11:15:36] INFO (hyperopt.tpe/MainThread) TPE using 0 trials\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:16:31] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:16:31] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:16:31] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:16:33] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002677 seconds\n", + "[2021-06-07 11:16:33] INFO (hyperopt.tpe/MainThread) TPE using 1/1 trials with best loss 0.626600\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:16:36] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:16:36] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:16:36] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:16:37] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002730 seconds\n", + "[2021-06-07 11:16:37] INFO (hyperopt.tpe/MainThread) TPE using 1/1 trials with best loss 0.626600\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:17:26] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:17:26] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:17:26] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:17:27] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.003051 seconds\n", + "[2021-06-07 11:17:27] INFO (hyperopt.tpe/MainThread) TPE using 2/2 trials with best loss 0.594700\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:17:31] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:17:31] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:17:31] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:17:31] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002537 seconds\n", + "[2021-06-07 11:17:31] INFO (hyperopt.tpe/MainThread) TPE using 3/3 trials with best loss 0.594700\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:18:21] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:18:21] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:18:21] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:18:22] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002532 seconds\n", + "[2021-06-07 11:18:22] INFO (hyperopt.tpe/MainThread) TPE using 4/4 trials with best loss 0.594700\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:18:26] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:18:26] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:18:26] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:18:28] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002615 seconds\n", + "[2021-06-07 11:18:28] INFO (hyperopt.tpe/MainThread) TPE using 6/6 trials with best loss 0.594700\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:19:16] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:19:16] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:19:16] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:19:18] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002395 seconds\n", + "[2021-06-07 11:19:18] INFO (hyperopt.tpe/MainThread) TPE using 7/7 trials with best loss 0.594700\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:19:21] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:19:21] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:19:21] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:19:23] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002959 seconds\n", + "[2021-06-07 11:19:23] INFO (hyperopt.tpe/MainThread) TPE using 7/7 trials with best loss 0.594700\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:20:12] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:20:12] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:20:12] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:20:13] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.003336 seconds\n", + "[2021-06-07 11:20:13] INFO (hyperopt.tpe/MainThread) TPE using 8/8 trials with best loss 0.594700\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: True\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:20:22] INFO (lightning/Thread-5) GPU available: True, used: True\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:20:22] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:20:22] INFO (lightning/Thread-5) LOCAL_RANK: 0 - HIP_VISIBLE_DEVICES: [0,1,2,3]\n", + "[2021-06-07 11:20:22] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002093 seconds\n", + "[2021-06-07 11:20:22] INFO (hyperopt.tpe/MainThread) TPE using 9/9 trials with best loss 0.593200\n", + "Files already downloaded and verified\n", + "Files already downloaded and verified\n", + "[2021-06-07 11:20:26] INFO (nni.retiarii.experiment.pytorch/Thread-7) Stopping experiment, please wait...\n", + "[2021-06-07 11:20:26] INFO (nni.runtime.msg_dispatcher_base/Thread-6) Dispatcher exiting...\n", + "[2021-06-07 11:20:26] INFO (nni.retiarii.experiment.pytorch/MainThread) Strategy exit\n", + "[2021-06-07 11:20:26] INFO (nni.retiarii.experiment.pytorch/MainThread) Waiting for experiment to become DONE (you can ctrl+c if there is no running trial jobs)...\n", + "[2021-06-07 11:20:27] INFO (nni.retiarii.experiment.pytorch/Thread-7) Experiment stopped\n", + "[2021-06-07 11:20:29] INFO (nni.runtime.msg_dispatcher_base/Thread-6) Dispatcher terminiated\n" + ] + } + ], + "source": [ + "exp.run(exp_config, 8745)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Besides, NNI provides WebUI to help users view the experiment results and make more advanced analysis." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2.5: Export the top Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After searching, exporting the top model script is also very convenient." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final model:\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "\n", + "import nni.retiarii.nn.pytorch\n", + "\n", + "import nni\n", + "import torch\n", + "\n", + "\n", + "class _model__conv1(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.layerchoice__mutation_1_0 = torch.nn.modules.conv.Conv2d(padding=1, in_channels=3, out_channels=6, kernel_size=3)\n", + "\n", + " def forward(self, *_inputs):\n", + " layerchoice__mutation_1_0 = self.layerchoice__mutation_1_0(_inputs[0])\n", + " return layerchoice__mutation_1_0\n", + "\n", + "\n", + "\n", + "class _model__conv2(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.layerchoice__mutation_2_1 = torch.nn.modules.conv.Conv2d(padding=2, in_channels=6, out_channels=16, kernel_size=5)\n", + "\n", + " def forward(self, *_inputs):\n", + " layerchoice__mutation_2_1 = self.layerchoice__mutation_2_1(_inputs[0])\n", + " return layerchoice__mutation_2_1\n", + "\n", + "\n", + "\n", + "class _model(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.__conv1 = _model__conv1()\n", + " self.__pool = torch.nn.modules.pooling.MaxPool2d(kernel_size=2, stride=2)\n", + " self.__conv2 = _model__conv2()\n", + " self.__conv3 = torch.nn.modules.conv.Conv2d(in_channels=16, out_channels=16, kernel_size=1)\n", + " self.__skipconnect = nni.retiarii.nn.pytorch.ChosenInputs(chosen=[1], reduction='sum')\n", + " self.__bn = torch.nn.modules.batchnorm.BatchNorm2d(num_features=16)\n", + " self.__gap = torch.nn.modules.pooling.AdaptiveAvgPool2d(output_size=4)\n", + " self.__fc1 = torch.nn.modules.linear.Linear(in_features=256, out_features=120)\n", + " self.__fc2 = torch.nn.modules.linear.Linear(in_features=120, out_features=84)\n", + " self.__fc3 = torch.nn.modules.linear.Linear(in_features=84, out_features=10)\n", + "\n", + " def forward(self, x__1):\n", + " __Constant1 = -1\n", + " __Constant2 = 1\n", + " __Constant4 = False\n", + " __Constant5 = 0\n", + " __conv1 = self.__conv1(x__1)\n", + " __aten__size6 = x__1.size(dim=__Constant5)\n", + " __relu9 = F.relu(__conv1, __Constant4)\n", + " __ListConstruct21 = [__aten__size6, __Constant1]\n", + " __pool = self.__pool(__relu9)\n", + " __conv2 = self.__conv2(__pool)\n", + " __relu11 = F.relu(__conv2, __Constant4)\n", + " __conv3 = self.__conv3(__relu11)\n", + " __relu13 = F.relu(__conv3, __Constant4)\n", + " __aten__add15 = __relu13.add(other=__relu11, alpha=__Constant2)\n", + " __ListConstruct16 = [__relu13, __aten__add15]\n", + " __skipconnect = self.__skipconnect(__ListConstruct16)\n", + " __bn = self.__bn(__skipconnect)\n", + " __pool__19 = self.__pool(__bn)\n", + " __gap = self.__gap(__pool__19)\n", + " __aten__view22 = __gap.view(size=__ListConstruct21)\n", + " __fc1 = self.__fc1(__aten__view22)\n", + " __relu24 = F.relu(__fc1, __Constant4)\n", + " __fc2 = self.__fc2(__relu24)\n", + " __relu26 = F.relu(__fc2, __Constant4)\n", + " __fc3 = self.__fc3(__relu26)\n", + " return __fc3\n" + ] + } + ], + "source": [ + "print('Final model:')\n", + "for model_code in exp.export_top_models():\n", + " print(model_code)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/notebooks/Retiarii_example_one-shot_NAS.ipynb b/examples/notebooks/Retiarii_example_one-shot_NAS.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..11497c9420b7f0f6fa1ebe2301c1e29c6f628b6c --- /dev/null +++ b/examples/notebooks/Retiarii_example_one-shot_NAS.ipynb @@ -0,0 +1,354 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Retiarii Example - One-shot NAS" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This example will show Retiarii's ability to **express** and **explore** the model space for Neural Architecture Search and Hyper-Parameter Tuning in a simple way. The video demo is in [YouTube](https://youtu.be/3nEx9GMHYEk) and [Bilibili](https://www.bilibili.com/video/BV1c54y1V7vx/).\n", + "\n", + "Let's start the journey with Retiarii!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1: Express the Model Space" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Model space is defined by users to express a set of models that they want to explore, which contains potentially good-performing models. In Retiarii framework, a model space is defined with two parts: a base model and possible mutations on the base model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 1.1: Define the Base Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Defining a base model is almost the same as defining a PyTorch (or TensorFlow) model. Usually, you only need to replace the code ``import torch.nn as nn`` with ``import nni.retiarii.nn.pytorch as nn`` to use NNI wrapped PyTorch modules. Below is a very simple example of defining a base model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch.nn.functional as F\n", + "import nni.retiarii.nn.pytorch as nn\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super(Net, self).__init__()\n", + " self.conv1 = nn.Conv2d(3, 6, 3, padding=1)\n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " self.conv2 = nn.Conv2d(6, 16, 3, padding=1)\n", + " self.conv3 = nn.Conv2d(16, 16, 1)\n", + "\n", + " self.bn = nn.BatchNorm2d(16)\n", + "\n", + " self.gap = nn.AdaptiveAvgPool2d(4)\n", + " self.fc1 = nn.Linear(16 * 4 * 4, 120)\n", + " self.fc2 = nn.Linear(120, 84)\n", + " self.fc3 = nn.Linear(84, 10)\n", + "\n", + " def forward(self, x):\n", + " bs = x.size(0)\n", + "\n", + " x = self.pool(F.relu(self.conv1(x)))\n", + " x0 = F.relu(self.conv2(x))\n", + " x1 = F.relu(self.conv3(x0))\n", + "\n", + " x1 += x0\n", + " x = self.pool(self.bn(x1))\n", + "\n", + " x = self.gap(x).view(bs, -1)\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", + " \n", + "model = Net()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 1.2: Define the Model Mutations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A base model is only one concrete model, not a model space. NNI provides APIs and primitives for users to express how the base model can be mutated, i.e., a model space that includes many models. The following will use inline Mutation APIs ``LayerChoice`` to choose a layer from candidate operations and use ``InputChoice`` to try out skip connection." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch.nn.functional as F\n", + "import nni.retiarii.nn.pytorch as nn\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super(Net, self).__init__()\n", + " # self.conv1 = nn.Conv2d(3, 6, 3, padding=1)\n", + " self.conv1 = nn.LayerChoice([nn.Conv2d(3, 6, 3, padding=1), nn.Conv2d(3, 6, 5, padding=2)])\n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " # self.conv2 = nn.Conv2d(6, 16, 3, padding=1)\n", + " self.conv2 = nn.LayerChoice([nn.Conv2d(6, 16, 3, padding=1), nn.Conv2d(6, 16, 5, padding=2)])\n", + " self.conv3 = nn.Conv2d(16, 16, 1)\n", + "\n", + " self.skipconnect = nn.InputChoice(n_candidates=2)\n", + " self.bn = nn.BatchNorm2d(16)\n", + "\n", + " self.gap = nn.AdaptiveAvgPool2d(4)\n", + " self.fc1 = nn.Linear(16 * 4 * 4, 120)\n", + " self.fc2 = nn.Linear(120, 84)\n", + " self.fc3 = nn.Linear(84, 10)\n", + "\n", + " def forward(self, x):\n", + " bs = x.size(0)\n", + "\n", + " x = self.pool(F.relu(self.conv1(x)))\n", + " x0 = F.relu(self.conv2(x))\n", + " x1 = F.relu(self.conv3(x0))\n", + "\n", + " x1 = self.skipconnect([x1, x1+x0])\n", + " x = self.pool(self.bn(x1))\n", + "\n", + " x = self.gap(x).view(bs, -1)\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", + " \n", + "model = Net()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2: Explore the Model Space" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With a defined model space, users can explore the space in two ways. One is the multi-trial NAS method, which searchs by evaluating each sampled model independently. The other is using one-shot weight-sharing based search, which consumes much less computational resource compared to the first one. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this part, we focus on this **one-shot** approach. The principle of the One-shot approach is combining all the models in a model space into one big model (usually called super-model or super-graph). It takes charge of both search, training and testing, by training and evaluating this big model.\n", + "\n", + "Retiarii has supported some classic one-shot trainers, like DARTS trainer, ENAS trainer, ProxylessNAS trainer, Single-path trainer, and users can customize a new one-shot trainer according to the APIs provided by Retiarii conveniently.\n", + "\n", + "Here, we show an example to use DARTS trainer manually." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Files already downloaded and verified\n", + "[2021-06-07 11:12:22] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [1/391] acc1 0.093750 (0.093750) loss 2.286068 (2.286068)\n", + "[2021-06-07 11:12:22] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [11/391] acc1 0.093750 (0.089489) loss 2.328799 (2.309416)\n", + "[2021-06-07 11:12:23] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [21/391] acc1 0.093750 (0.092262) loss 2.302527 (2.309082)\n", + "[2021-06-07 11:12:23] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [31/391] acc1 0.109375 (0.099294) loss 2.294730 (2.304962)\n", + "[2021-06-07 11:12:23] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [41/391] acc1 0.203125 (0.103277) loss 2.284227 (2.302716)\n", + "[2021-06-07 11:12:23] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [51/391] acc1 0.078125 (0.106618) loss 2.308704 (2.300639)\n", + "[2021-06-07 11:12:23] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [61/391] acc1 0.203125 (0.110143) loss 2.258595 (2.298042)\n", + "[2021-06-07 11:12:23] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [71/391] acc1 0.078125 (0.112896) loss 2.276706 (2.294709)\n", + "[2021-06-07 11:12:24] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [81/391] acc1 0.078125 (0.116898) loss 2.309119 (2.292235)\n", + "[2021-06-07 11:12:24] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [91/391] acc1 0.093750 (0.118304) loss 2.263757 (2.289659)\n", + "[2021-06-07 11:12:24] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [101/391] acc1 0.109375 (0.119431) loss 2.260739 (2.287132)\n", + "[2021-06-07 11:12:24] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [111/391] acc1 0.109375 (0.121481) loss 2.279930 (2.284314)\n", + "[2021-06-07 11:12:24] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [121/391] acc1 0.046875 (0.122934) loss 2.270205 (2.281701)\n", + "[2021-06-07 11:12:25] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [131/391] acc1 0.156250 (0.125477) loss 2.270163 (2.278612)\n", + "[2021-06-07 11:12:25] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [141/391] acc1 0.171875 (0.126551) loss 2.233467 (2.276326)\n", + "[2021-06-07 11:12:25] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [151/391] acc1 0.109375 (0.127897) loss 2.264694 (2.274296)\n", + "[2021-06-07 11:12:25] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [161/391] acc1 0.250000 (0.132279) loss 2.259590 (2.271723)\n", + "[2021-06-07 11:12:25] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [171/391] acc1 0.093750 (0.134868) loss 2.240986 (2.269037)\n", + "[2021-06-07 11:12:25] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [181/391] acc1 0.218750 (0.137690) loss 2.218153 (2.266567)\n", + "[2021-06-07 11:12:25] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [191/391] acc1 0.078125 (0.140134) loss 2.260816 (2.264373)\n", + "[2021-06-07 11:12:26] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [201/391] acc1 0.156250 (0.144123) loss 2.191213 (2.261285)\n", + "[2021-06-07 11:12:26] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [211/391] acc1 0.125000 (0.146919) loss 2.245425 (2.258747)\n", + "[2021-06-07 11:12:26] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [221/391] acc1 0.218750 (0.150028) loss 2.216708 (2.255553)\n", + "[2021-06-07 11:12:26] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [231/391] acc1 0.250000 (0.153003) loss 2.195549 (2.252894)\n", + "[2021-06-07 11:12:26] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [241/391] acc1 0.234375 (0.155666) loss 2.169693 (2.249465)\n", + "[2021-06-07 11:12:26] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [251/391] acc1 0.218750 (0.158989) loss 2.174878 (2.246355)\n", + "[2021-06-07 11:12:27] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [261/391] acc1 0.312500 (0.162775) loss 2.117693 (2.243113)\n", + "[2021-06-07 11:12:27] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [271/391] acc1 0.265625 (0.166686) loss 2.136203 (2.239288)\n", + "[2021-06-07 11:12:27] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [281/391] acc1 0.234375 (0.169095) loss 2.213463 (2.236377)\n", + "[2021-06-07 11:12:27] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [291/391] acc1 0.218750 (0.171338) loss 2.114096 (2.232892)\n", + "[2021-06-07 11:12:27] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [301/391] acc1 0.203125 (0.173432) loss 2.134074 (2.229637)\n", + "[2021-06-07 11:12:28] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [311/391] acc1 0.265625 (0.175291) loss 2.041354 (2.225920)\n", + "[2021-06-07 11:12:28] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [321/391] acc1 0.250000 (0.176840) loss 2.081122 (2.222280)\n", + "[2021-06-07 11:12:28] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [331/391] acc1 0.140625 (0.178578) loss 2.124206 (2.219168)\n", + "[2021-06-07 11:12:28] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [341/391] acc1 0.250000 (0.180169) loss 2.077291 (2.215540)\n", + "[2021-06-07 11:12:28] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [351/391] acc1 0.250000 (0.182381) loss 2.077531 (2.211650)\n", + "[2021-06-07 11:12:28] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [361/391] acc1 0.312500 (0.185033) loss 2.016619 (2.207455)\n", + "[2021-06-07 11:12:29] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [371/391] acc1 0.250000 (0.187163) loss 2.139604 (2.202785)\n", + "[2021-06-07 11:12:29] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [381/391] acc1 0.281250 (0.189099) loss 2.033739 (2.198564)\n", + "[2021-06-07 11:12:29] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [1/2] Step [391/391] acc1 0.275000 (0.190441) loss 1.988353 (2.194509)\n", + "[2021-06-07 11:12:29] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [1/391] acc1 0.296875 (0.296875) loss 2.083627 (2.083627)\n", + "[2021-06-07 11:12:30] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [11/391] acc1 0.265625 (0.251420) loss 2.042856 (2.050898)\n", + "[2021-06-07 11:12:30] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [21/391] acc1 0.234375 (0.273065) loss 2.005307 (2.021047)\n", + "[2021-06-07 11:12:30] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [31/391] acc1 0.375000 (0.269657) loss 1.934093 (2.014375)\n", + "[2021-06-07 11:12:30] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [41/391] acc1 0.265625 (0.277439) loss 2.007705 (2.003260)\n", + "[2021-06-07 11:12:30] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [51/391] acc1 0.218750 (0.278799) loss 2.014602 (2.001039)\n", + "[2021-06-07 11:12:31] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [61/391] acc1 0.187500 (0.278945) loss 2.088407 (1.995837)\n", + "[2021-06-07 11:12:31] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [71/391] acc1 0.343750 (0.285651) loss 1.894479 (1.988130)\n", + "[2021-06-07 11:12:31] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [81/391] acc1 0.281250 (0.289159) loss 1.869002 (1.979012)\n", + "[2021-06-07 11:12:31] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [91/391] acc1 0.265625 (0.291552) loss 1.848354 (1.971483)\n", + "[2021-06-07 11:12:31] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [101/391] acc1 0.406250 (0.290996) loss 1.840711 (1.964297)\n", + "[2021-06-07 11:12:31] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [111/391] acc1 0.390625 (0.294764) loss 1.905811 (1.958954)\n", + "[2021-06-07 11:12:32] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [121/391] acc1 0.250000 (0.296617) loss 1.935214 (1.952315)\n", + "[2021-06-07 11:12:32] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [131/391] acc1 0.281250 (0.299618) loss 1.901846 (1.944634)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-07 11:12:32] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [141/391] acc1 0.312500 (0.302970) loss 1.854658 (1.939751)\n", + "[2021-06-07 11:12:32] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [151/391] acc1 0.218750 (0.305257) loss 1.927818 (1.934704)\n", + "[2021-06-07 11:12:32] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [161/391] acc1 0.343750 (0.307648) loss 1.820810 (1.927533)\n", + "[2021-06-07 11:12:33] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [171/391] acc1 0.312500 (0.307383) loss 1.800313 (1.924665)\n", + "[2021-06-07 11:12:33] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [181/391] acc1 0.484375 (0.307925) loss 1.637479 (1.920402)\n", + "[2021-06-07 11:12:33] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [191/391] acc1 0.359375 (0.306692) loss 1.732374 (1.917680)\n", + "[2021-06-07 11:12:33] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [201/391] acc1 0.406250 (0.309624) loss 1.870701 (1.911484)\n", + "[2021-06-07 11:12:33] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [211/391] acc1 0.328125 (0.311982) loss 1.785704 (1.905039)\n", + "[2021-06-07 11:12:33] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [221/391] acc1 0.265625 (0.312712) loss 1.738683 (1.901547)\n", + "[2021-06-07 11:12:33] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [231/391] acc1 0.359375 (0.315409) loss 1.827117 (1.894860)\n", + "[2021-06-07 11:12:34] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [241/391] acc1 0.375000 (0.317881) loss 1.717454 (1.888916)\n", + "[2021-06-07 11:12:34] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [251/391] acc1 0.328125 (0.318663) loss 1.873310 (1.886883)\n", + "[2021-06-07 11:12:34] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [261/391] acc1 0.390625 (0.320163) loss 1.657088 (1.881767)\n", + "[2021-06-07 11:12:34] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [271/391] acc1 0.421875 (0.321264) loss 1.710897 (1.877521)\n", + "[2021-06-07 11:12:34] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [281/391] acc1 0.421875 (0.321230) loss 1.760745 (1.875136)\n", + "[2021-06-07 11:12:34] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [291/391] acc1 0.375000 (0.321413) loss 1.669255 (1.872129)\n", + "[2021-06-07 11:12:34] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [301/391] acc1 0.328125 (0.322051) loss 1.728873 (1.868047)\n", + "[2021-06-07 11:12:35] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [311/391] acc1 0.375000 (0.323000) loss 1.754761 (1.864783)\n", + "[2021-06-07 11:12:35] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [321/391] acc1 0.437500 (0.324864) loss 1.666240 (1.859164)\n", + "[2021-06-07 11:12:35] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [331/391] acc1 0.421875 (0.325954) loss 1.661471 (1.856318)\n", + "[2021-06-07 11:12:35] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [341/391] acc1 0.328125 (0.326475) loss 1.737106 (1.853075)\n", + "[2021-06-07 11:12:35] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [351/391] acc1 0.343750 (0.327724) loss 1.789253 (1.849491)\n", + "[2021-06-07 11:12:36] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [361/391] acc1 0.250000 (0.328558) loss 1.773805 (1.846033)\n", + "[2021-06-07 11:12:36] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [371/391] acc1 0.312500 (0.329094) loss 1.901358 (1.844091)\n", + "[2021-06-07 11:12:36] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [381/391] acc1 0.250000 (0.330011) loss 1.863921 (1.841390)\n", + "[2021-06-07 11:12:36] INFO (nni.retiarii.oneshot.pytorch.darts/MainThread) Epoch [2/2] Step [391/391] acc1 0.325000 (0.331514) loss 1.729926 (1.837162)\n" + ] + } + ], + "source": [ + "import torch\n", + "from utils import accuracy\n", + "from torchvision import transforms\n", + "from torchvision.datasets import CIFAR10\n", + "from nni.retiarii.oneshot.pytorch import DartsTrainer\n", + "\n", + "criterion = torch.nn.CrossEntropyLoss()\n", + "optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n", + "\n", + "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", + "train_dataset = CIFAR10(root=\"./data\", train=True, download=True, transform=transform)\n", + "\n", + "trainer = DartsTrainer(\n", + " model=model,\n", + " loss=criterion,\n", + " metrics=lambda output, target: accuracy(output, target),\n", + " optimizer=optimizer,\n", + " num_epochs=2,\n", + " dataset=train_dataset,\n", + " batch_size=64,\n", + " log_frequency=10\n", + " )\n", + "\n", + "trainer.fit()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similarly, the optimal structure found can be exported." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final architecture: {'_mutation_1': 1, '_mutation_2': 1, '_mutation_3': [1]}\n" + ] + } + ], + "source": [ + "print('Final architecture:', trainer.export())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/notebooks/tabular_data_classification_in_AML.ipynb b/examples/notebooks/tabular_data_classification_in_AML.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..5d9127994f3bea9874b61e258c45cd7d0c4857cb --- /dev/null +++ b/examples/notebooks/tabular_data_classification_in_AML.ipynb @@ -0,0 +1,1073 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tabular Data Classification with NNI in AML" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This simple example is to use NNI NAS 2.0(Retiarii) framework to search for the best neural architecture for tabular data classification task in Azure Machine Learning training platform.\n", + "\n", + "The video demo is in [YouTube](https://www.youtube.com/watch?v=PDVqBmm7Cro) and [Bilibili](https://www.bilibili.com/video/BV1oy4y1W7GF)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1: Prepare the dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first step is to prepare the dataset. Here we use the Titanic dataset as an example." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import TitanicDataset\n", + "from nni.retiarii import serialize\n", + "\n", + "train_dataset = serialize(TitanicDataset, root='./data', train=True)\n", + "test_dataset = serialize(TitanicDataset, root='./data', train=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2: Define the Model Space" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Model space is defined by users to express a set of models that they want to explore, which contains potentially good-performing models. In Retiarii(NNI NAS 2.0) framework, a model space is defined with two parts: a base model and possible mutations on the base model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2.1: Define the Base Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Defining a base model is almost the same as defining a PyTorch (or TensorFlow) model. Usually, you only need to replace the code ``import torch.nn as nn`` with ``import nni.retiarii.nn.pytorch as nn`` to use NNI wrapped PyTorch modules. Below is a very simple example of defining a base model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import nni.retiarii.nn.pytorch as nn\n", + "import torch.nn.functional as F\n", + "\n", + "class Net(nn.Module):\n", + "\n", + " def __init__(self, input_size):\n", + " super().__init__()\n", + "\n", + " self.fc1 = nn.Linear(input_size, 16)\n", + " self.bn1 = nn.BatchNorm1d(16)\n", + " self.dropout1 = nn.Dropout(0.0)\n", + "\n", + " self.fc2 = nn.Linear(16, 16)\n", + " self.bn2 = nn.BatchNorm1d(16)\n", + " self.dropout2 = nn.Dropout(0.0)\n", + "\n", + " self.fc3 = nn.Linear(16, 2)\n", + "\n", + " def forward(self, x):\n", + "\n", + " x = self.dropout1(F.relu(self.bn1(self.fc1(x))))\n", + " x = self.dropout2(F.relu(self.bn2(self.fc2(x))))\n", + " x = F.sigmoid(self.fc3(x))\n", + " return x\n", + " \n", + "model_space = Net(len(train_dataset.__getitem__(0)[0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2.2: Define the Model Mutations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A base model is only one concrete model, not a model space. NNI provides APIs and primitives for users to express how the base model can be mutated, i.e., a model space that includes many models. The following will use inline Mutation APIs as a simple example. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import nni.retiarii.nn.pytorch as nn\n", + "import torch.nn.functional as F\n", + "\n", + "class Net(nn.Module):\n", + "\n", + " def __init__(self, input_size):\n", + " super().__init__()\n", + "\n", + " self.hidden_dim1 = nn.ValueChoice(\n", + " [16, 32, 64, 128, 256, 512, 1024], label='hidden_dim1')\n", + " self.hidden_dim2 = nn.ValueChoice(\n", + " [16, 32, 64, 128, 256, 512, 1024], label='hidden_dim2')\n", + "\n", + " self.fc1 = nn.Linear(input_size, self.hidden_dim1)\n", + " self.bn1 = nn.BatchNorm1d(self.hidden_dim1)\n", + " self.dropout1 = nn.Dropout(nn.ValueChoice([0.0, 0.25, 0.5]))\n", + "\n", + " self.fc2 = nn.Linear(self.hidden_dim1, self.hidden_dim2)\n", + " self.bn2 = nn.BatchNorm1d(self.hidden_dim2)\n", + " self.dropout2 = nn.Dropout(nn.ValueChoice([0.0, 0.25, 0.5]))\n", + "\n", + " self.fc3 = nn.Linear(self.hidden_dim2, 2)\n", + "\n", + " def forward(self, x):\n", + "\n", + " x = self.dropout1(F.relu(self.bn1(self.fc1(x))))\n", + " x = self.dropout2(F.relu(self.bn2(self.fc2(x))))\n", + " x = F.sigmoid(self.fc3(x))\n", + " return x\n", + "\n", + "model_space = Net(len(train_dataset.__getitem__(0)[0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Besides inline mutations, Retiarii also provides ``mutator``, a more general approach to express complex model space." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 3: Explore the Defined Model Space" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the NAS process, the search strategy repeatedly generates new models, and the model evaluator is for training and validating each generated model. The obtained performance of a generated model is collected and sent to the search strategy for generating better models.\n", + "\n", + "Users can choose a proper search strategy to explore the model space, and use a chosen or user-defined model evaluator to evaluate the performance of each sampled model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 3.1: Choose a Search Strategy" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import nni.retiarii.strategy as strategy\n", + "\n", + "simple_strategy = strategy.TPEStrategy()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 3.2: Choose or Write a Model Evaluator" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the context of PyTorch, Retiarii has provided two built-in model evaluators, designed for simple use cases: classification and regression. These two evaluators are built upon the awesome library PyTorch-Lightning." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 09:56:10] INFO (lightning/MainThread) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 09:56:10] INFO (lightning/MainThread) TPU available: None, using: 0 TPU cores\n" + ] + } + ], + "source": [ + "import nni.retiarii.evaluator.pytorch.lightning as pl\n", + "\n", + "trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=16),\n", + " val_dataloaders=pl.DataLoader(\n", + " test_dataset, batch_size=16),\n", + " max_epochs=20)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 4: Configure the Experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After all the above are prepared, it is time to configure an experiment to do the model search. The basic experiment configuration is as follows, and advanced configuration reference on [this page](https://nni.readthedocs.io/en/stable/reference/experiment_config.html)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "from nni.retiarii.experiment.pytorch import RetiariiExeConfig, RetiariiExperiment\n", + "\n", + "exp = RetiariiExperiment(model_space, trainer, [], simple_strategy)\n", + "\n", + "exp_config = RetiariiExeConfig('aml')\n", + "exp_config.experiment_name = 'titanic_example'\n", + "exp_config.trial_concurrency = 2\n", + "exp_config.max_trial_number = 20\n", + "exp_config.max_experiment_duration = '2h'\n", + "exp_config.execution_engine = 'base'\n", + "exp_config.nni_manager_ip = '' # your nni_manager_ip" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before running experiments on AML(Azure Machine Learning) training service, you need to set up corresponding environment(refer to [AML mode doc](https://nni.readthedocs.io/en/stable/TrainingService/AMLMode.html)) and configure the following additional fields:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Authenticate to your Azure subscription from the CLI.\n", + "# If you have finished, please skip it.\n", + "!az login" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "exp_config.training_service.subscription_id = '' # your subscription id\n", + "exp_config.training_service.resource_group = '' # your resource group\n", + "exp_config.training_service.workspace_name = '' # your workspace name\n", + "exp_config.training_service.compute_target = '' # your compute target\n", + "exp_config.training_service.docker_image = 'msranni/nni:latest' # your docker image" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 5: Run and View the Experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can launch the experiment now! \n", + "\n", + "Besides, NNI provides WebUI to help users view the experiment results and make more advanced analysis." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 09:56:54] INFO (nni.experiment/MainThread) Creating experiment, Experiment ID: 46den9qr\n", + "[2021-06-08 09:56:55] INFO (nni.experiment/MainThread) Connecting IPC pipe...\n", + "[2021-06-08 09:56:58] INFO (nni.experiment/MainThread) Starting web server...\n", + "[2021-06-08 09:57:00] INFO (nni.experiment/MainThread) Setting up...\n", + "[2021-06-08 09:57:05] INFO (nni.runtime.msg_dispatcher_base/Thread-8) Dispatcher started\n", + "[2021-06-08 09:57:05] INFO (nni.retiarii.experiment.pytorch/MainThread) Web UI URLs: http://127.0.0.1:8745\n", + "[2021-06-08 09:57:05] INFO (nni.retiarii.experiment.pytorch/MainThread) Start strategy...\n", + "[2021-06-08 09:57:05] INFO (nni.retiarii.strategy.tpe_strategy/MainThread) TPE strategy has been started.\n", + "[2021-06-08 09:57:05] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.001999 seconds\n", + "[2021-06-08 09:57:05] INFO (hyperopt.tpe/MainThread) TPE using 0 trials\n", + "[2021-06-08 09:57:10] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002029 seconds\n", + "[2021-06-08 09:57:10] INFO (hyperopt.tpe/MainThread) TPE using 0 trials\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:03:55] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:03:55] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:03:56] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.000000 seconds\n", + "[2021-06-08 10:03:56] INFO (hyperopt.tpe/MainThread) TPE using 1/1 trials with best loss 0.795455\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:04:46] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:04:46] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:04:46] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.000000 seconds\n", + "[2021-06-08 10:04:46] INFO (hyperopt.tpe/MainThread) TPE using 2/2 trials with best loss 0.795455\n", + "[2021-06-08 10:04:50] WARNING (nni.runtime.msg_dispatcher_base/Thread-8) assessor queue length: 20\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:04:51] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:04:51] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:04:52] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.000000 seconds\n", + "[2021-06-08 10:04:52] INFO (hyperopt.tpe/MainThread) TPE using 3/3 trials with best loss 0.795455\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:05:46] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:05:46] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:05:48] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002999 seconds\n", + "[2021-06-08 10:05:48] INFO (hyperopt.tpe/MainThread) TPE using 4/4 trials with best loss 0.791667\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:05:56] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:05:56] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:05:56] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.000000 seconds\n", + "[2021-06-08 10:05:56] INFO (hyperopt.tpe/MainThread) TPE using 5/5 trials with best loss 0.791667\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:06:26] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:06:26] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:06:27] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.004991 seconds\n", + "[2021-06-08 10:06:27] INFO (hyperopt.tpe/MainThread) TPE using 6/6 trials with best loss 0.791667\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:07:06] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:07:06] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:07:07] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.006043 seconds\n", + "[2021-06-08 10:07:07] INFO (hyperopt.tpe/MainThread) TPE using 7/7 trials with best loss 0.784091\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:07:56] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:07:56] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:07:57] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.006004 seconds\n", + "[2021-06-08 10:07:57] INFO (hyperopt.tpe/MainThread) TPE using 8/8 trials with best loss 0.731061\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:08:01] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:08:01] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:08:01] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.005000 seconds\n", + "[2021-06-08 10:08:01] INFO (hyperopt.tpe/MainThread) TPE using 9/9 trials with best loss 0.731061\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:08:56] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:08:56] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:08:58] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.004962 seconds\n", + "[2021-06-08 10:08:58] INFO (hyperopt.tpe/MainThread) TPE using 10/10 trials with best loss 0.731061\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:09:01] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:09:01] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:09:03] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.003043 seconds\n", + "[2021-06-08 10:09:03] INFO (hyperopt.tpe/MainThread) TPE using 11/11 trials with best loss 0.731061\n", + "[2021-06-08 10:10:24] WARNING (nni.runtime.msg_dispatcher_base/Thread-8) assessor queue length: 20\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:10:27] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:10:27] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:10:28] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002005 seconds\n", + "[2021-06-08 10:10:28] INFO (hyperopt.tpe/MainThread) TPE using 12/12 trials with best loss 0.731061\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:10:52] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:10:52] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:10:53] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.122046 seconds\n", + "[2021-06-08 10:10:53] INFO (hyperopt.tpe/MainThread) TPE using 13/13 trials with best loss 0.731061\n", + "[2021-06-08 10:14:52] WARNING (nni.runtime.msg_dispatcher_base/Thread-8) assessor queue length: 20\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:14:52] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:14:52] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:14:53] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002038 seconds\n", + "[2021-06-08 10:14:53] INFO (hyperopt.tpe/MainThread) TPE using 14/14 trials with best loss 0.731061\n", + "[2021-06-08 10:14:56] WARNING (nni.runtime.msg_dispatcher_base/Thread-8) assessor queue length: 20\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:14:57] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:14:57] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:14:58] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.005870 seconds\n", + "[2021-06-08 10:14:58] INFO (hyperopt.tpe/MainThread) TPE using 15/15 trials with best loss 0.731061\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:16:07] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:16:07] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:16:08] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.004999 seconds\n", + "[2021-06-08 10:16:08] INFO (hyperopt.tpe/MainThread) TPE using 16/16 trials with best loss 0.712121\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:16:48] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:16:48] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:16:48] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002000 seconds\n", + "[2021-06-08 10:16:48] INFO (hyperopt.tpe/MainThread) TPE using 17/17 trials with best loss 0.712121\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:16:53] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:16:53] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:16:55] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002010 seconds\n", + "[2021-06-08 10:16:55] INFO (hyperopt.tpe/MainThread) TPE using 18/18 trials with best loss 0.712121\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:17:43] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:17:43] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:17:44] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.006001 seconds\n", + "[2021-06-08 10:17:44] INFO (hyperopt.tpe/MainThread) TPE using 19/19 trials with best loss 0.712121\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True, used: False\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:18:03] INFO (lightning/Thread-5) GPU available: True, used: False\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TPU available: None, using: 0 TPU cores\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-06-08 10:18:03] INFO (lightning/Thread-5) TPU available: None, using: 0 TPU cores\n", + "[2021-06-08 10:18:04] INFO (hyperopt.tpe/MainThread) tpe_transform took 0.002009 seconds\n", + "[2021-06-08 10:18:04] INFO (hyperopt.tpe/MainThread) TPE using 20/20 trials with best loss 0.712121\n", + "[2021-06-08 10:18:12] INFO (nni.retiarii.experiment.pytorch/Thread-9) Stopping experiment, please wait...\n", + "[2021-06-08 10:18:14] INFO (nni.runtime.msg_dispatcher_base/Thread-8) Dispatcher exiting...\n", + "[2021-06-08 10:18:14] INFO (nni.retiarii.experiment.pytorch/MainThread) Strategy exit\n", + "[2021-06-08 10:18:14] INFO (nni.retiarii.experiment.pytorch/MainThread) Waiting for experiment to become DONE (you can ctrl+c if there is no running trial jobs)...\n", + "[2021-06-08 10:18:15] INFO (nni.retiarii.experiment.pytorch/Thread-9) Experiment stopped\n", + "[2021-06-08 10:18:16] INFO (nni.runtime.msg_dispatcher_base/Thread-8) Dispatcher terminiated\n" + ] + } + ], + "source": [ + "exp.run(exp_config, 8745)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 6: Export the top Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Exporting the top model script is also very convenient." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final model:\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "\n", + "import nni.retiarii.nn.pytorch\n", + "\n", + "import torch\n", + "\n", + "\n", + "class _model(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.__fc1 = torch.nn.modules.linear.Linear(in_features=9, out_features=512)\n", + " self.__bn1 = torch.nn.modules.batchnorm.BatchNorm1d(num_features=512)\n", + " self.__dropout1 = torch.nn.modules.dropout.Dropout(p=0.0)\n", + " self.__fc2 = torch.nn.modules.linear.Linear(in_features=512, out_features=128)\n", + " self.__bn2 = torch.nn.modules.batchnorm.BatchNorm1d(num_features=128)\n", + " self.__dropout2 = torch.nn.modules.dropout.Dropout(p=0.25)\n", + " self.__fc3 = torch.nn.modules.linear.Linear(in_features=128, out_features=2)\n", + "\n", + " def forward(self, x__1):\n", + " __Constant3 = False\n", + " __fc1 = self.__fc1(x__1)\n", + " __bn1 = self.__bn1(__fc1)\n", + " __relu7 = F.relu(__bn1, __Constant3)\n", + " __dropout1 = self.__dropout1(__relu7)\n", + " __fc2 = self.__fc2(__dropout1)\n", + " __bn2 = self.__bn2(__fc2)\n", + " __relu11 = F.relu(__bn2, __Constant3)\n", + " __dropout2 = self.__dropout2(__relu11)\n", + " __fc3 = self.__fc3(__dropout2)\n", + " __sigmoid13 = F.sigmoid(__fc3)\n", + " return __sigmoid13\n" + ] + } + ], + "source": [ + "print('Final model:')\n", + "for model_code in exp.export_top_models():\n", + " print(model_code)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/examples/notebooks/utils.py b/examples/notebooks/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..64efb244711e06b7191d35e6b18f4f87f7f534db --- /dev/null +++ b/examples/notebooks/utils.py @@ -0,0 +1,32 @@ +import os +import torch +import pandas as pd + +from sklearn.preprocessing import LabelEncoder +from torchvision.datasets.utils import download_url + +class TitanicDataset(torch.utils.data.Dataset): + def __init__(self, root: str, train: bool = True): + filename = 'train.csv' if train else 'eval.csv' + if not os.path.exists(os.path.join(root, filename)): + download_url(os.path.join('https://storage.googleapis.com/tf-datasets/titanic/', filename), root, filename) + + df = pd.read_csv(os.path.join(root, filename)) + object_colunmns = df.select_dtypes(include='object').columns.values + for idx in df.columns: + if idx in object_colunmns: + df[idx] = LabelEncoder().fit_transform(df[idx]) + + self.x = df.iloc[:, 1:].values + self.y = df.iloc[:, 0].values + + def __len__(self): + return len(self.y) + + def __getitem__(self, idx): + return torch.Tensor(self.x[idx]), self.y[idx] + +def accuracy(output, target): + batch_size = target.size(0) + _, predicted = torch.max(output.data, 1) + return {"acc1": (predicted == target).sum().item() / batch_size} \ No newline at end of file diff --git a/examples/trials/README.md b/examples/trials/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8377f29729a264187609a688b737d55c401ae49f --- /dev/null +++ b/examples/trials/README.md @@ -0,0 +1,284 @@ +# How to write a Trial running on NNI? + +*Trial receive the hyper-parameter/architecture configure from Tuner, and send intermediate result to Assessor and final result to Tuner.* + +So when user want to write a Trial running on NNI, she/he should: + +**1)Have an original Trial could run**, + +Trial's code could be any machine learning code that could run in local. Here we use `mnist-keras.py` as example: + +```python +import argparse +import logging +import keras +import numpy as np +from keras import backend as K +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +K.set_image_data_format('channels_last') + +H, W = 28, 28 +NUM_CLASSES = 10 + +def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): + layers = [ + Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), + Conv2D(64, (3, 3), activation='relu'), + MaxPooling2D(pool_size=(2, 2)), + Flatten(), + Dense(100, activation='relu'), + Dense(num_classes, activation='softmax') + ] + + model = Sequential(layers) + + if hyper_params['optimizer'] == 'Adam': + optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) + else: + optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) + + return model + +def load_mnist_data(args): + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] + x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] + y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] + y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] + + return x_train, y_train, x_test, y_test + +class SendMetrics(keras.callbacks.Callback): + def on_epoch_end(self, epoch, logs={}): + pass + +def train(args, params): + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + +def generate_default_params(): + return { + 'optimizer': 'Adam', + 'learning_rate': 0.001 + } + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + PARAMS = generate_default_params() + train(ARGS, PARAMS) +``` + +**2)Get configure from Tuner** + +User import `nni` and use `nni.get_next_parameter()` to receive configure. Please noted **10**, **24** and **25** line in the following code. + + +```python +import argparse +import logging +import keras +import numpy as np +from keras import backend as K +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +import nni + +... + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + + PARAMS = generate_default_params() + RECEIVED_PARAMS = nni.get_next_parameter() + PARAMS.update(RECEIVED_PARAMS) + train(ARGS, PARAMS) +``` + + +**3) Send intermediate result** + +Use `nni.report_intermediate_result` to send intermediate result to Assessor. Please noted **5** line in the following code. + + +```python +... + +class SendMetrics(keras.callbacks.Callback): + def on_epoch_end(self, epoch, logs={}): + nni.report_intermediate_result(logs) + +def train(args, params): + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + +... +``` +**4) Send final result** + +Use `nni.report_final_result` to send final result to Tuner. Please noted **15** line in the following code. + +```python +... + +class SendMetrics(keras.callbacks.Callback): + def on_epoch_end(self, epoch, logs={}): + nni.report_intermediate_result(logs) + +def train(args, params): + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + nni.report_final_result(acc) +... +``` + +Here is the complete example: + + +```python +import argparse +import logging + +import keras +import numpy as np +from keras import backend as K +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +import nni + +LOG = logging.getLogger('mnist_keras') +K.set_image_data_format('channels_last') + +H, W = 28, 28 +NUM_CLASSES = 10 + +def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): + ''' + Create simple convolutional model + ''' + layers = [ + Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), + Conv2D(64, (3, 3), activation='relu'), + MaxPooling2D(pool_size=(2, 2)), + Flatten(), + Dense(100, activation='relu'), + Dense(num_classes, activation='softmax') + ] + + model = Sequential(layers) + + if hyper_params['optimizer'] == 'Adam': + optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) + else: + optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) + + return model + +def load_mnist_data(args): + ''' + Load MNIST dataset + ''' + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] + x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] + y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] + y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] + + LOG.debug('x_train shape: %s', (x_train.shape,)) + LOG.debug('x_test shape: %s', (x_test.shape,)) + + return x_train, y_train, x_test, y_test + +class SendMetrics(keras.callbacks.Callback): + ''' + Keras callback to send metrics to NNI framework + ''' + def on_epoch_end(self, epoch, logs={}): + ''' + Run on end of each epoch + ''' + LOG.debug(logs) + nni.report_intermediate_result(logs) + +def train(args, params): + ''' + Train model + ''' + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + LOG.debug('Final result is: %d', acc) + nni.report_final_result(acc) + +def generate_default_params(): + ''' + Generate default hyper parameters + ''' + return { + 'optimizer': 'Adam', + 'learning_rate': 0.001 + } + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + + try: + # get parameters from tuner + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = generate_default_params() + PARAMS.update(RECEIVED_PARAMS) + # train + train(ARGS, PARAMS) + except Exception as e: + LOG.exception(e) + raise + +``` diff --git a/examples/trials/README_zh_CN.md b/examples/trials/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..f28a3d2c4ac44ed5258f98daebc7aa307f19d527 --- /dev/null +++ b/examples/trials/README_zh_CN.md @@ -0,0 +1,281 @@ +# 如何在 NNI 中实现 Trial 的代码? + +*Trial 从 Tuner 中接收超参和架构配置,并将中间结果发送给 Assessor,最终结果发送给Tuner 。* + +当用户需要在 NNI 上运行 Trial 时,需要: + +**1) 写好原始的训练代码**。 + +Trial 的代码可以是任何能在本机运行的机器学习代码。 这里使用 `mnist-keras. py` 作为示例: + +```python +import argparse +import logging +import keras +import numpy as np +from keras import backend as K +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +K.set_image_data_format('channels_last') + +H, W = 28, 28 +NUM_CLASSES = 10 + +def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): + layers = [ + Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), + Conv2D(64, (3, 3), activation='relu'), + MaxPooling2D(pool_size=(2, 2)), + Flatten(), + Dense(100, activation='relu'), + Dense(num_classes, activation='softmax') + ] + + model = Sequential(layers) + + if hyper_params['optimizer'] == 'Adam': + optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) + else: + optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) + + return model + +def load_mnist_data(args): + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] + x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] + y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] + y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] + + return x_train, y_train, x_test, y_test + +class SendMetrics(keras.callbacks.Callback): + def on_epoch_end(self, epoch, logs={}): + pass + +def train(args, params): + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + +def generate_default_params(): + return { + 'optimizer': 'Adam', + 'learning_rate': 0.001 + } + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + PARAMS = generate_default_params() + train(ARGS, PARAMS) +``` + +**2) 从 Tuner 获取配置** + +导入 `NNI` 并用 `nni.get_next_parameter()` 来接收参数。 注意代码中的 **10**, **24** 和 **25** 行。 + +```python +import argparse +import logging +import keras +import numpy as np +from keras import backend as K +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +import nni + +... + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + + PARAMS = generate_default_params() + RECEIVED_PARAMS = nni.get_next_parameter() + PARAMS.update(RECEIVED_PARAMS) + train(ARGS, PARAMS) +``` + +**3) 发送中间结果** + +用 `nni.report_intermediate_result` 将中间结果发送给 Assessor。 注意第 **5** 行。 + +```python +... + +class SendMetrics(keras.callbacks.Callback): + def on_epoch_end(self, epoch, logs={}): + nni.report_intermediate_result(logs) + +def train(args, params): + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + +... +``` + +**4) 发送最终结果** + +用 `nni.report_final_result` 将最终结果发送给 Tuner。 注意第 **15** 行。 + +```python +... + +class SendMetrics(keras.callbacks.Callback): + def on_epoch_end(self, epoch, logs={}): + nni.report_intermediate_result(logs) + +def train(args, params): + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + nni.report_final_result(acc) +... +``` + +这是完整示例: + +```python +import argparse +import logging + +import keras +import numpy as np +from keras import backend as K +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +import nni + +LOG = logging.getLogger('mnist_keras') +K.set_image_data_format('channels_last') + +H, W = 28, 28 +NUM_CLASSES = 10 + +def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): + ''' + 创建简单的卷积模型 + ''' + layers = [ + Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), + Conv2D(64, (3, 3), activation='relu'), + MaxPooling2D(pool_size=(2, 2)), + Flatten(), + Dense(100, activation='relu'), + Dense(num_classes, activation='softmax') + ] + + model = Sequential(layers) + + if hyper_params['optimizer'] == 'Adam': + optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) + else: + optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) + + return model + +def load_mnist_data(args): + ''' + 加载 MNIST 数据集 + ''' + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] + x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] + y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] + y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] + + LOG.debug('x_train shape: %s', (x_train.shape,)) + LOG.debug('x_test shape: %s', (x_test.shape,)) + + return x_train, y_train, x_test, y_test + +class SendMetrics(keras.callbacks.Callback): + ''' + Keras 回调来返回中间结果给 NNI + ''' + def on_epoch_end(self, epoch, logs={}): + ''' + 在每个 epoch 结束时运行 + ''' + LOG.debug(logs) + nni.report_intermediate_result(logs) + +def train(args, params): + ''' + 训练模型 + ''' + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics()]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + LOG.debug('Final result is: %d', acc) + nni.report_final_result(acc) + +def generate_default_params(): + ''' + 生成默认超参 + ''' + return { + 'optimizer': 'Adam', + 'learning_rate': 0.001 + } + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + + try: + # 从 Tuner 中获取参数 + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = generate_default_params() + PARAMS.update(RECEIVED_PARAMS) + # 训练 + train(ARGS, PARAMS) + except Exception as e: + LOG.exception(e) + raise + +``` \ No newline at end of file diff --git a/examples/trials/auto-gbdt/config.yml b/examples/trials/auto-gbdt/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..38bdd5b80f7151c1e5c99b59b3363b68787f51b0 --- /dev/null +++ b/examples/trials/auto-gbdt/config.yml @@ -0,0 +1,10 @@ +searchSpaceFile: search_space.json +trialCommand: python3 main.py +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: minimize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/auto-gbdt/config_metis.yml b/examples/trials/auto-gbdt/config_metis.yml new file mode 100644 index 0000000000000000000000000000000000000000..0999d8cb52fdf6bbf2a63bde96941bbd986d63a0 --- /dev/null +++ b/examples/trials/auto-gbdt/config_metis.yml @@ -0,0 +1,22 @@ +# The search space of Metis tuner is slightly different from TPE and others. +# See Metis tuner' doc for details: https://nni.readthedocs.io/en/stable/Tuner/MetisTuner.html +searchSpace: + num_leaves: + _type: choice + _value: [31, 28, 24, 20] + learning_rate: + _type: choice + _value: [0.01, 0.05, 0.1, 0.2] + bagging_freq: + _type: choice + _value: [1, 2, 4, 8, 10] + +trialCommand: python3 main.py +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: MetisTuner + classArgs: + optimize_mode: minimize +trainingService: + platform: local diff --git a/examples/trials/auto-gbdt/config_windows.yml b/examples/trials/auto-gbdt/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..d9b0d434a566d9c29b22285bcbc39d737588849f --- /dev/null +++ b/examples/trials/auto-gbdt/config_windows.yml @@ -0,0 +1,10 @@ +searchSpaceFile: search_space.json +trialCommand: python main.py +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: minimize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/auto-gbdt/data/regression.test b/examples/trials/auto-gbdt/data/regression.test new file mode 100644 index 0000000000000000000000000000000000000000..73a6460335a53dc4a51162b235f8fd1ec678a4de --- /dev/null +++ b/examples/trials/auto-gbdt/data/regression.test @@ -0,0 +1,500 @@ +1 0.644 0.247 -0.447 0.862 0.374 0.854 -1.126 -0.790 2.173 1.015 -0.201 1.400 0.000 1.575 1.807 1.607 0.000 1.585 -0.190 -0.744 3.102 0.958 1.061 0.980 0.875 0.581 0.905 0.796 +0 0.385 1.800 1.037 1.044 0.349 1.502 -0.966 1.734 0.000 0.966 -1.960 -0.249 0.000 1.501 0.465 -0.354 2.548 0.834 -0.440 0.638 3.102 0.695 0.909 0.981 0.803 0.813 1.149 1.116 +0 1.214 -0.166 0.004 0.505 1.434 0.628 -1.174 -1.230 1.087 0.579 -1.047 -0.118 0.000 0.835 0.340 1.234 2.548 0.711 -1.383 1.355 0.000 0.848 0.911 1.043 0.931 1.058 0.744 0.696 +1 0.420 1.111 0.137 1.516 -1.657 0.854 0.623 1.605 1.087 1.511 -1.297 0.251 0.000 0.872 -0.368 -0.721 0.000 0.543 0.731 1.424 3.102 1.597 1.282 1.105 0.730 0.148 1.231 1.234 +0 0.897 -1.703 -1.306 1.022 -0.729 0.836 0.859 -0.333 2.173 1.336 -0.965 0.972 2.215 0.671 1.021 -1.439 0.000 0.493 -2.019 -0.289 0.000 0.805 0.930 0.984 1.430 2.198 1.934 1.684 +0 0.756 1.126 -0.945 2.355 -0.555 0.889 0.800 1.440 0.000 0.585 0.271 0.631 2.215 0.722 1.744 1.051 0.000 0.618 0.924 0.698 1.551 0.976 0.864 0.988 0.803 0.234 0.822 0.911 +0 1.141 -0.741 0.953 1.478 -0.524 1.197 -0.871 1.689 2.173 0.875 1.321 -0.518 1.107 0.540 0.037 -0.987 0.000 0.879 1.187 0.245 0.000 0.888 0.701 1.747 1.358 2.479 1.491 1.223 +1 0.606 -0.936 -0.384 1.257 -1.162 2.719 -0.600 0.100 2.173 3.303 -0.284 1.561 1.107 0.689 1.786 -0.326 0.000 0.780 -0.532 1.216 0.000 0.936 2.022 0.985 1.574 4.323 2.263 1.742 +1 0.603 0.429 -0.279 1.448 1.301 1.008 2.423 -1.295 0.000 0.452 1.305 0.533 0.000 1.076 1.011 1.256 2.548 2.021 1.260 -0.343 0.000 0.890 0.969 1.281 0.763 0.652 0.827 0.785 +0 1.171 -0.962 0.521 0.841 -0.315 1.196 -0.744 -0.882 2.173 0.726 -1.305 1.377 1.107 0.643 -1.790 -1.264 0.000 1.257 0.222 0.817 0.000 0.862 0.911 0.987 0.846 1.293 0.899 0.756 +1 1.392 -0.358 0.235 1.494 -0.461 0.895 -0.848 1.549 2.173 0.841 -0.384 0.666 1.107 1.199 2.509 -0.891 0.000 1.109 -0.364 -0.945 0.000 0.693 2.135 1.170 1.362 0.959 2.056 1.842 +1 1.024 1.076 -0.886 0.851 1.530 0.673 -0.449 0.187 1.087 0.628 -0.895 1.176 2.215 0.696 -0.232 -0.875 0.000 0.411 1.501 0.048 0.000 0.842 0.919 1.063 1.193 0.777 0.964 0.807 +1 0.890 -0.760 1.182 1.369 0.751 0.696 -0.959 -0.710 1.087 0.775 -0.130 -1.409 2.215 0.701 -0.110 -0.739 0.000 0.508 -0.451 0.390 0.000 0.762 0.738 0.998 1.126 0.788 0.940 0.790 +1 0.460 0.537 0.636 1.442 -0.269 0.585 0.323 -1.731 2.173 0.503 1.034 -0.927 0.000 0.928 -1.024 1.006 2.548 0.513 -0.618 -1.336 0.000 0.802 0.831 0.992 1.019 0.925 1.056 0.833 +1 0.364 1.648 0.560 1.720 0.829 1.110 0.811 -0.588 0.000 0.408 1.045 1.054 2.215 0.319 -1.138 1.545 0.000 0.423 1.025 -1.265 3.102 1.656 0.928 1.003 0.544 0.327 0.670 0.746 +1 0.525 -0.096 1.206 0.948 -1.103 1.519 -0.582 0.606 2.173 1.274 -0.572 -0.934 0.000 0.855 -1.028 -1.222 0.000 0.578 -1.000 -1.725 3.102 0.896 0.878 0.981 0.498 0.909 0.772 0.668 +0 0.536 -0.821 -1.029 0.703 1.113 0.363 -0.711 0.022 1.087 0.325 1.503 1.249 2.215 0.673 1.041 -0.401 0.000 0.480 2.127 1.681 0.000 0.767 1.034 0.990 0.671 0.836 0.669 0.663 +1 1.789 -0.583 1.641 0.897 0.799 0.515 -0.100 -1.483 0.000 1.101 0.031 -0.326 2.215 1.195 0.001 0.126 2.548 0.768 -0.148 0.601 0.000 0.916 0.921 1.207 1.069 0.483 0.934 0.795 +1 1.332 -0.571 0.986 0.580 1.508 0.582 0.634 -0.746 1.087 1.084 -0.964 -0.489 0.000 0.785 0.274 0.343 2.548 0.779 0.721 1.489 0.000 1.733 1.145 0.990 1.270 0.715 0.897 0.915 +0 1.123 0.629 -1.708 0.597 -0.882 0.752 0.195 1.522 2.173 1.671 1.515 -0.003 0.000 0.778 0.514 0.139 1.274 0.801 1.260 1.600 0.000 1.495 0.976 0.988 0.676 0.921 1.010 0.943 +0 1.816 -0.515 0.171 0.980 -0.454 0.870 0.202 -1.399 2.173 1.130 1.066 -1.593 0.000 0.844 0.735 1.275 2.548 1.125 -1.133 0.348 0.000 0.837 0.693 0.988 1.112 0.784 1.009 0.974 +1 0.364 0.694 0.445 1.862 0.159 0.963 -1.356 1.260 1.087 0.887 -0.540 -1.533 2.215 0.658 -2.544 -1.236 0.000 0.516 -0.807 0.039 0.000 0.891 1.004 0.991 1.092 0.976 1.000 0.953 +1 0.790 -1.175 0.475 1.846 0.094 0.999 -1.090 0.257 0.000 1.422 0.854 1.112 2.215 1.302 1.004 -1.702 1.274 2.557 -0.787 -1.048 0.000 0.890 1.429 0.993 2.807 0.840 2.248 1.821 +1 0.765 -0.500 -0.603 1.843 -0.560 1.068 0.007 0.746 2.173 1.154 -0.017 1.329 0.000 1.165 1.791 -1.585 0.000 1.116 0.441 -0.886 0.000 0.774 0.982 0.989 1.102 0.633 1.178 1.021 +1 1.407 1.293 -1.418 0.502 -1.527 2.005 -2.122 0.622 0.000 1.699 1.508 -0.649 2.215 1.665 0.748 -0.755 0.000 2.555 0.811 1.423 1.551 7.531 5.520 0.985 1.115 1.881 4.487 3.379 +1 0.772 -0.186 -1.372 0.823 -0.140 0.781 0.763 0.046 2.173 1.128 0.516 1.380 0.000 0.797 -0.640 -0.134 2.548 2.019 -0.972 -1.670 0.000 2.022 1.466 0.989 0.856 0.808 1.230 0.991 +1 0.546 -0.954 0.715 1.335 -1.689 0.783 -0.443 -1.735 2.173 1.081 0.185 -0.435 0.000 1.433 -0.662 -0.389 0.000 0.969 0.924 1.099 0.000 0.910 0.879 0.988 0.683 0.753 0.878 0.865 +1 0.596 0.276 -1.054 1.358 1.355 1.444 1.813 -0.208 0.000 1.175 -0.949 -1.573 0.000 0.855 -1.228 -0.925 2.548 1.837 -0.400 0.913 0.000 0.637 0.901 1.028 0.553 0.790 0.679 0.677 +0 0.458 2.292 1.530 0.291 1.283 0.749 -0.930 -0.198 0.000 0.300 -1.560 0.990 0.000 0.811 -0.176 0.995 2.548 1.085 -0.178 -1.213 3.102 0.891 0.648 0.999 0.732 0.655 0.619 0.620 +0 0.638 -0.575 -1.048 0.125 0.178 0.846 -0.753 -0.339 1.087 0.799 -0.727 1.182 0.000 0.888 0.283 0.717 0.000 1.051 -1.046 -1.557 3.102 0.889 0.871 0.989 0.884 0.923 0.836 0.779 +1 0.434 -1.119 -0.313 2.427 0.461 0.497 0.261 -1.177 2.173 0.618 -0.737 -0.688 0.000 1.150 -1.237 -1.652 2.548 0.757 -0.054 1.700 0.000 0.809 0.741 0.982 1.450 0.936 1.086 0.910 +1 0.431 -1.144 -1.030 0.778 -0.655 0.490 0.047 -1.546 0.000 1.583 -0.014 0.891 2.215 0.516 0.956 0.567 2.548 0.935 -1.123 -0.082 0.000 0.707 0.995 0.995 0.700 0.602 0.770 0.685 +1 1.894 0.222 1.224 1.578 1.715 0.966 2.890 -0.013 0.000 0.922 -0.703 -0.844 0.000 0.691 2.056 1.039 0.000 0.900 -0.733 -1.240 3.102 1.292 1.992 1.026 0.881 0.684 1.759 1.755 +0 0.985 -0.316 0.141 1.067 -0.946 0.819 -1.177 1.307 2.173 1.080 -0.429 0.557 1.107 1.726 1.435 -1.075 0.000 1.100 1.547 -0.647 0.000 0.873 1.696 1.179 1.146 1.015 1.538 1.270 +0 0.998 -0.187 -0.236 0.882 0.755 0.468 0.950 -0.439 2.173 0.579 -0.550 -0.624 0.000 1.847 1.196 1.384 1.274 0.846 1.273 -1.072 0.000 1.194 0.797 1.013 1.319 1.174 0.963 0.898 +0 0.515 0.246 -0.593 1.082 1.591 0.912 -0.623 -0.957 2.173 0.858 0.418 0.844 0.000 0.948 2.519 1.599 0.000 1.158 1.385 -0.095 3.102 0.973 1.033 0.988 0.998 1.716 1.054 0.901 +0 0.919 -1.001 1.506 1.389 0.653 0.507 -0.616 -0.689 2.173 0.808 0.536 -0.467 2.215 0.496 2.187 -0.859 0.000 0.822 0.807 1.163 0.000 0.876 0.861 1.088 0.947 0.614 0.911 1.087 +0 0.794 0.051 1.477 1.504 -1.695 0.716 0.315 0.264 1.087 0.879 -0.135 -1.094 2.215 1.433 -0.741 0.201 0.000 1.566 0.534 -0.989 0.000 0.627 0.882 0.974 0.807 1.130 0.929 0.925 +1 0.455 -0.946 -1.175 1.453 -0.580 0.763 -0.856 0.840 0.000 0.829 1.223 1.174 2.215 0.714 0.638 -0.466 0.000 1.182 0.223 -1.333 0.000 0.977 0.938 0.986 0.713 0.714 0.796 0.843 +1 0.662 -0.296 -1.287 1.212 -0.707 0.641 1.457 0.222 0.000 0.600 0.525 -1.700 2.215 0.784 -0.835 -0.961 2.548 0.865 1.131 1.162 0.000 0.854 0.877 0.978 0.740 0.734 0.888 0.811 +0 0.390 0.698 -1.629 1.888 0.298 0.990 1.614 -1.572 0.000 1.666 0.170 0.719 2.215 1.590 1.064 -0.886 1.274 0.952 0.305 -1.216 0.000 1.048 0.897 1.173 0.891 1.936 1.273 1.102 +0 1.014 0.117 1.384 0.686 -1.047 0.609 -1.245 -0.850 0.000 1.076 -1.158 0.814 1.107 1.598 -0.389 -0.111 0.000 0.907 1.688 -1.673 0.000 1.333 0.866 0.989 0.975 0.442 0.797 0.788 +0 1.530 -1.408 -0.207 0.440 -1.357 0.902 -0.647 1.325 1.087 1.320 -0.819 0.246 1.107 0.503 1.407 -1.683 0.000 1.189 -0.972 -0.925 0.000 0.386 1.273 0.988 0.829 1.335 1.173 1.149 +1 1.689 -0.590 0.915 2.076 1.202 0.644 -0.478 -0.238 0.000 0.809 -1.660 -1.184 0.000 1.227 -0.224 -0.808 2.548 1.655 1.047 -0.623 0.000 0.621 1.192 0.988 1.309 0.866 0.924 1.012 +0 1.102 0.402 -1.622 1.262 1.022 0.576 0.271 -0.269 0.000 0.591 0.495 -1.278 0.000 1.271 0.209 0.575 2.548 0.941 0.964 -0.685 3.102 0.989 0.963 1.124 0.857 0.858 0.716 0.718 +0 2.491 0.825 0.581 1.593 0.205 0.782 -0.815 1.499 0.000 1.179 -0.999 -1.509 0.000 0.926 0.920 -0.522 2.548 2.068 -1.021 -1.050 3.102 0.874 0.943 0.980 0.945 1.525 1.570 1.652 +0 0.666 0.254 1.601 1.303 -0.250 1.236 -1.929 0.793 0.000 1.074 0.447 -0.871 0.000 0.991 1.059 -0.342 0.000 1.703 -0.393 -1.419 3.102 0.921 0.945 1.285 0.931 0.462 0.770 0.729 +0 0.937 -1.126 1.424 1.395 1.743 0.760 0.428 -0.238 2.173 0.846 0.494 1.320 2.215 0.872 -1.826 -0.507 0.000 0.612 1.860 1.403 0.000 3.402 2.109 0.985 1.298 1.165 1.404 1.240 +1 0.881 -1.086 -0.870 0.513 0.266 2.049 -1.870 1.160 0.000 2.259 -0.428 -0.935 2.215 1.321 -0.655 -0.449 2.548 1.350 -1.766 -0.108 0.000 0.911 1.852 0.987 1.167 0.820 1.903 1.443 +0 0.410 0.835 -0.819 1.257 1.112 0.871 -1.737 -0.401 0.000 0.927 0.158 1.253 0.000 1.183 0.405 -1.570 0.000 0.807 -0.704 -0.438 3.102 0.932 0.962 0.987 0.653 0.315 0.616 0.648 +1 0.634 0.196 -1.679 1.379 -0.967 2.260 -0.273 1.114 0.000 1.458 1.070 -0.278 1.107 1.195 0.110 -0.688 2.548 0.907 0.298 -1.359 0.000 0.949 1.129 0.984 0.675 0.877 0.938 0.824 +1 0.632 -1.254 1.201 0.496 -0.106 0.235 2.731 -0.955 0.000 0.615 -0.805 0.600 0.000 0.633 -0.934 1.641 0.000 1.407 -0.483 -0.962 1.551 0.778 0.797 0.989 0.578 0.722 0.576 0.539 +0 0.714 1.122 1.566 2.399 -1.431 1.665 0.299 0.323 0.000 1.489 1.087 -0.861 2.215 1.174 0.140 1.083 2.548 0.404 -0.968 1.105 0.000 0.867 0.969 0.981 1.039 1.552 1.157 1.173 +1 0.477 -0.321 -0.471 1.966 1.034 2.282 1.359 -0.874 0.000 1.672 -0.258 1.109 0.000 1.537 0.604 0.231 2.548 1.534 -0.640 0.827 0.000 0.746 1.337 1.311 0.653 0.721 0.795 0.742 +1 1.351 0.460 0.031 1.194 -1.185 0.670 -1.157 -1.637 2.173 0.599 -0.823 0.680 0.000 0.478 0.373 1.716 0.000 0.809 -0.919 0.010 1.551 0.859 0.839 1.564 0.994 0.777 0.971 0.826 +1 0.520 -1.442 -0.348 0.840 1.654 1.273 -0.760 1.317 0.000 0.861 2.579 -0.791 0.000 1.779 0.257 -0.703 0.000 2.154 1.928 0.457 0.000 1.629 3.194 0.992 0.730 1.107 2.447 2.747 +0 0.700 -0.308 0.920 0.438 -0.879 0.516 1.409 1.101 0.000 0.960 0.701 -0.049 2.215 1.442 -0.416 -1.439 2.548 0.628 1.009 -0.364 0.000 0.848 0.817 0.987 0.759 1.421 0.937 0.920 +1 0.720 1.061 -0.546 0.798 -1.521 1.066 0.173 0.271 1.087 1.453 0.114 1.336 1.107 0.702 0.616 -0.367 0.000 0.543 -0.386 -1.301 0.000 0.653 0.948 0.989 1.031 1.500 0.965 0.790 +1 0.735 -0.416 0.588 1.308 -0.382 1.042 0.344 1.609 0.000 0.926 0.163 -0.520 1.107 1.050 -0.427 1.159 2.548 0.834 0.613 0.948 0.000 0.848 1.189 1.042 0.844 1.099 0.829 0.843 +1 0.777 -0.396 1.540 1.608 0.638 0.955 0.040 0.918 2.173 1.315 1.116 -0.823 0.000 0.781 -0.762 0.564 2.548 0.945 -0.573 1.379 0.000 0.679 0.706 1.124 0.608 0.593 0.515 0.493 +1 0.934 0.319 -0.257 0.970 -0.980 0.726 0.774 0.731 0.000 0.896 0.038 -1.465 1.107 0.773 -0.055 -0.831 2.548 1.439 -0.229 0.698 0.000 0.964 1.031 0.995 0.845 0.480 0.810 0.762 +0 0.461 0.771 0.019 2.055 -1.288 1.043 0.147 0.261 2.173 0.833 -0.156 1.425 0.000 0.832 0.805 -0.491 2.548 0.589 1.252 1.414 0.000 0.850 0.906 1.245 1.364 0.850 0.908 0.863 +1 0.858 -0.116 -0.937 0.966 1.167 0.825 -0.108 1.111 1.087 0.733 1.163 -0.634 0.000 0.894 0.771 0.020 0.000 0.846 -1.124 -1.195 3.102 0.724 1.194 1.195 0.813 0.969 0.985 0.856 +0 0.720 -0.335 -0.307 1.445 0.540 1.108 -0.034 -1.691 1.087 0.883 -1.356 -0.678 2.215 0.440 1.093 0.253 0.000 0.389 -1.582 -1.097 0.000 1.113 1.034 0.988 1.256 1.572 1.062 0.904 +1 0.750 -0.811 -0.542 0.985 0.408 0.471 0.477 0.355 0.000 1.347 -0.875 -1.556 2.215 0.564 1.082 -0.724 0.000 0.793 -0.958 -0.020 3.102 0.836 0.825 0.986 1.066 0.924 0.927 0.883 +0 0.392 -0.468 -0.216 0.680 1.565 1.086 -0.765 -0.581 1.087 1.264 -1.035 1.189 2.215 0.986 -0.338 0.747 0.000 0.884 -1.328 -0.965 0.000 1.228 0.988 0.982 1.135 1.741 1.108 0.956 +1 0.434 -1.269 0.643 0.713 0.608 0.597 0.832 1.627 0.000 0.708 -0.422 0.079 2.215 1.533 -0.823 -1.127 2.548 0.408 -1.357 -0.828 0.000 1.331 1.087 0.999 1.075 1.015 0.875 0.809 +0 0.828 -1.803 0.342 0.847 -0.162 1.585 -1.128 -0.272 2.173 1.974 0.039 -1.717 0.000 0.900 0.764 -1.741 0.000 1.349 -0.079 1.035 3.102 0.984 0.815 0.985 0.780 1.661 1.403 1.184 +1 1.089 -0.350 -0.747 1.472 0.792 1.087 -0.069 -1.192 0.000 0.512 -0.841 -1.284 0.000 2.162 -0.821 0.545 2.548 1.360 2.243 -0.183 0.000 0.977 0.628 1.725 1.168 0.635 0.823 0.822 +1 0.444 0.451 -1.332 1.176 -0.247 0.898 0.194 0.007 0.000 1.958 0.576 -1.618 2.215 0.584 1.203 0.268 0.000 0.939 1.033 1.264 3.102 0.829 0.886 0.985 1.265 0.751 1.032 0.948 +0 0.629 0.114 1.177 0.917 -1.204 0.845 0.828 -0.088 0.000 0.962 -1.302 0.823 2.215 0.732 0.358 -1.334 2.548 0.538 0.582 1.561 0.000 1.028 0.834 0.988 0.904 1.205 1.039 0.885 +1 1.754 -1.259 -0.573 0.959 -1.483 0.358 0.448 -1.452 0.000 0.711 0.313 0.499 2.215 1.482 -0.390 1.474 2.548 1.879 -1.540 0.668 0.000 0.843 0.825 1.313 1.315 0.939 1.048 0.871 +1 0.549 0.706 -1.437 0.894 0.891 0.680 -0.762 -1.568 0.000 0.981 0.499 -0.425 2.215 1.332 0.678 0.485 1.274 0.803 0.022 -0.893 0.000 0.793 1.043 0.987 0.761 0.899 0.915 0.794 +0 0.475 0.542 -0.987 1.569 0.069 0.551 1.543 -1.488 0.000 0.608 0.301 1.734 2.215 0.277 0.499 -0.522 0.000 1.375 1.212 0.696 3.102 0.652 0.756 0.987 0.828 0.830 0.715 0.679 +1 0.723 0.049 -1.153 1.300 0.083 0.723 -0.749 0.630 0.000 1.126 0.412 -0.384 0.000 1.272 1.256 1.358 2.548 3.108 0.777 -1.486 3.102 0.733 1.096 1.206 1.269 0.899 1.015 0.903 +1 1.062 0.296 0.725 0.285 -0.531 0.819 1.277 -0.667 0.000 0.687 0.829 -0.092 0.000 1.158 0.447 1.047 2.548 1.444 -0.186 -1.491 3.102 0.863 1.171 0.986 0.769 0.828 0.919 0.840 +0 0.572 -0.349 1.396 2.023 0.795 0.577 0.457 -0.533 0.000 1.351 0.701 -1.091 0.000 0.724 -1.012 -0.182 2.548 0.923 -0.012 0.789 3.102 0.936 1.025 0.985 1.002 0.600 0.828 0.909 +1 0.563 0.387 0.412 0.553 1.050 0.723 -0.992 -0.447 0.000 0.748 0.948 0.546 2.215 1.761 -0.559 -1.183 0.000 1.114 -0.251 1.192 3.102 0.936 0.912 0.976 0.578 0.722 0.829 0.892 +1 1.632 1.577 -0.697 0.708 -1.263 0.863 0.012 1.197 2.173 0.498 0.990 -0.806 0.000 0.627 2.387 -1.283 0.000 0.607 1.290 -0.174 3.102 0.916 1.328 0.986 0.557 0.971 0.935 0.836 +1 0.562 -0.360 0.399 0.803 -1.334 1.443 -0.116 1.628 2.173 0.750 0.987 0.135 1.107 0.795 0.298 -0.556 0.000 1.150 -0.113 -0.093 0.000 0.493 1.332 0.985 1.001 1.750 1.013 0.886 +1 0.987 0.706 -0.492 0.861 0.607 0.593 0.088 -0.184 0.000 0.802 0.894 1.608 2.215 0.782 -0.471 1.500 2.548 0.521 0.772 -0.960 0.000 0.658 0.893 1.068 0.877 0.664 0.709 0.661 +1 1.052 0.883 -0.581 1.566 0.860 0.931 1.515 -0.873 0.000 0.493 0.145 -0.672 0.000 1.133 0.935 1.581 2.548 1.630 0.695 0.923 3.102 1.105 1.087 1.713 0.948 0.590 0.872 0.883 +1 2.130 -0.516 -0.291 0.776 -1.230 0.689 -0.257 0.800 2.173 0.730 -0.274 -1.437 0.000 0.615 0.241 1.083 0.000 0.834 0.757 1.613 3.102 0.836 0.806 1.333 1.061 0.730 0.889 0.783 +1 0.742 0.797 1.628 0.311 -0.418 0.620 0.685 -1.457 0.000 0.683 1.774 -1.082 0.000 1.700 1.104 0.225 2.548 0.382 -2.184 -1.307 0.000 0.945 1.228 0.984 0.864 0.931 0.988 0.838 +0 0.311 -1.249 -0.927 1.272 -1.262 0.642 -1.228 -0.136 0.000 1.220 -0.804 -1.558 2.215 0.950 -0.828 0.495 1.274 2.149 -1.672 0.634 0.000 1.346 0.887 0.981 0.856 1.101 1.001 1.106 +0 0.660 -1.834 -0.667 0.601 1.236 0.932 -0.933 -0.135 2.173 1.373 -0.122 1.429 0.000 0.654 -0.034 -0.847 2.548 0.711 0.911 0.703 0.000 1.144 0.942 0.984 0.822 0.739 0.992 0.895 +0 3.609 -0.590 0.851 0.615 0.455 1.280 0.003 -0.866 1.087 1.334 0.708 -1.131 0.000 0.669 0.480 0.092 0.000 0.975 0.983 -1.429 3.102 1.301 1.089 0.987 1.476 0.934 1.469 1.352 +1 0.905 -0.403 1.567 2.651 0.953 1.194 -0.241 -0.567 1.087 0.308 -0.384 -0.007 0.000 0.608 -0.175 -1.163 2.548 0.379 0.941 1.662 0.000 0.580 0.721 1.126 0.895 0.544 1.097 0.836 +1 0.983 0.255 1.093 0.905 -0.874 0.863 0.060 -0.368 0.000 0.824 -0.747 -0.633 0.000 0.614 0.961 1.052 0.000 0.792 -0.260 1.632 3.102 0.874 0.883 1.280 0.663 0.406 0.592 0.645 +1 1.160 -1.027 0.274 0.460 0.322 2.085 -1.623 -0.840 0.000 1.634 -1.046 1.182 2.215 0.492 -0.367 1.174 0.000 0.824 -0.998 1.617 0.000 0.943 0.884 1.001 1.209 1.313 1.034 0.866 +0 0.299 0.028 -1.372 1.930 -0.661 0.840 -0.979 0.664 1.087 0.535 -2.041 1.434 0.000 1.087 -1.797 0.344 0.000 0.485 -0.560 -1.105 3.102 0.951 0.890 0.980 0.483 0.684 0.730 0.706 +0 0.293 1.737 -1.418 2.074 0.794 0.679 1.024 -1.457 0.000 1.034 1.094 -0.168 1.107 0.506 1.680 -0.661 0.000 0.523 -0.042 -1.274 3.102 0.820 0.944 0.987 0.842 0.694 0.761 0.750 +0 0.457 -0.393 1.560 0.738 -0.007 0.475 -0.230 0.246 0.000 0.776 -1.264 -0.606 2.215 0.865 -0.731 -1.576 2.548 1.153 0.343 1.436 0.000 1.060 0.883 0.988 0.972 0.703 0.758 0.720 +0 0.935 -0.582 0.240 2.401 0.818 1.231 -0.618 -1.289 0.000 0.799 0.544 -0.228 2.215 0.525 -1.494 -0.969 0.000 0.609 -1.123 1.168 3.102 0.871 0.767 1.035 1.154 0.919 0.868 1.006 +1 0.902 -0.745 -1.215 1.174 -0.501 1.215 0.167 1.162 0.000 0.896 1.217 -0.976 0.000 0.585 -0.429 1.036 0.000 1.431 -0.416 0.151 3.102 0.524 0.952 0.990 0.707 0.271 0.592 0.826 +1 0.653 0.337 -0.320 1.118 -0.934 1.050 0.745 0.529 1.087 1.075 1.742 -1.538 0.000 0.585 1.090 0.973 0.000 1.091 -0.187 1.160 1.551 1.006 1.108 0.978 1.121 0.838 0.947 0.908 +0 1.157 1.401 0.340 0.395 -1.218 0.945 1.928 -0.876 0.000 1.384 0.320 1.002 1.107 1.900 1.177 -0.462 2.548 1.122 1.316 1.720 0.000 1.167 1.096 0.989 0.937 1.879 1.307 1.041 +0 0.960 0.355 -0.152 0.872 -0.338 0.391 0.348 0.956 1.087 0.469 2.664 1.409 0.000 0.756 -1.561 1.500 0.000 0.525 1.436 1.728 3.102 1.032 0.946 0.996 0.929 0.470 0.698 0.898 +1 1.038 0.274 0.825 1.198 0.963 1.078 -0.496 -1.014 2.173 0.739 -0.727 -0.151 2.215 1.035 -0.799 0.398 0.000 1.333 -0.872 -1.498 0.000 0.849 1.033 0.985 0.886 0.936 0.975 0.823 +0 0.490 0.277 0.318 1.303 0.694 1.333 -1.620 -0.563 0.000 1.459 -1.326 1.140 0.000 0.779 -0.673 -1.324 2.548 0.860 -1.247 0.043 0.000 0.857 0.932 0.992 0.792 0.278 0.841 1.498 +0 1.648 -0.688 -1.386 2.790 0.995 1.087 1.359 -0.687 0.000 1.050 -0.223 -0.261 2.215 0.613 -0.889 1.335 0.000 1.204 0.827 0.309 3.102 0.464 0.973 2.493 1.737 0.827 1.319 1.062 +0 1.510 -0.662 1.668 0.860 0.280 0.705 0.974 -1.647 1.087 0.662 -0.393 -0.225 0.000 0.610 -0.996 0.532 2.548 0.464 1.305 0.102 0.000 0.859 1.057 1.498 0.799 1.260 0.946 0.863 +1 0.850 -1.185 -0.117 0.943 -0.449 1.142 0.875 -0.030 0.000 2.223 -0.461 1.627 2.215 0.767 -1.761 -1.692 0.000 1.012 -0.727 0.639 3.102 3.649 2.062 0.985 1.478 1.087 1.659 1.358 +0 0.933 1.259 0.130 0.326 -0.890 0.306 1.136 1.142 0.000 0.964 0.705 -1.373 2.215 0.546 -0.196 -0.001 0.000 0.578 -1.169 1.004 3.102 0.830 0.836 0.988 0.837 1.031 0.749 0.655 +0 0.471 0.697 1.570 1.109 0.201 1.248 0.348 -1.448 0.000 2.103 0.773 0.686 2.215 1.451 -0.087 -0.453 2.548 1.197 -0.045 -1.026 0.000 0.793 1.094 0.987 0.851 1.804 1.378 1.089 +1 2.446 -0.701 -1.568 0.059 0.822 1.401 -0.600 -0.044 2.173 0.324 -0.001 1.344 2.215 0.913 -0.818 1.049 0.000 0.442 -1.088 -0.005 0.000 0.611 1.062 0.979 0.562 0.988 0.998 0.806 +0 0.619 2.029 0.933 0.528 -0.903 0.974 0.760 -0.311 2.173 0.825 0.658 -1.466 1.107 0.894 1.594 0.370 0.000 0.882 -0.258 1.661 0.000 1.498 1.088 0.987 0.867 1.139 0.900 0.779 +1 0.674 -0.131 -0.362 0.518 -1.574 0.876 0.442 0.145 1.087 0.497 -1.526 -1.704 0.000 0.680 2.514 -1.374 0.000 0.792 -0.479 0.773 1.551 0.573 1.198 0.984 0.800 0.667 0.987 0.832 +1 1.447 1.145 -0.937 0.307 -1.458 0.478 1.264 0.816 1.087 0.558 1.015 -0.101 2.215 0.937 -0.190 1.177 0.000 0.699 0.954 -1.512 0.000 0.877 0.838 0.990 0.873 0.566 0.646 0.713 +1 0.976 0.308 -0.844 0.436 0.610 1.253 0.149 -1.585 2.173 1.415 0.568 0.096 2.215 0.953 -0.855 0.441 0.000 0.867 -0.650 1.643 0.000 0.890 1.234 0.988 0.796 2.002 1.179 0.977 +0 0.697 0.401 -0.718 0.920 0.735 0.958 -0.172 0.168 2.173 0.872 -0.097 -1.335 0.000 0.513 -1.192 -1.710 1.274 0.426 -1.637 1.368 0.000 0.997 1.227 1.072 0.800 1.013 0.786 0.749 +1 1.305 -2.157 1.740 0.661 -0.912 0.705 -0.516 0.759 2.173 0.989 -0.716 -0.300 2.215 0.627 -1.052 -1.736 0.000 0.467 -2.467 0.568 0.000 0.807 0.964 0.988 1.427 1.012 1.165 0.926 +0 1.847 1.663 -0.618 0.280 1.258 1.462 -0.054 1.371 0.000 0.900 0.309 -0.544 0.000 0.331 -2.149 -0.341 0.000 1.091 -0.833 0.710 3.102 1.496 0.931 0.989 1.549 0.115 1.140 1.150 +0 0.410 -0.323 1.069 2.160 0.010 0.892 0.942 -1.640 2.173 0.946 0.938 1.314 0.000 1.213 -1.099 -0.794 2.548 0.650 0.053 0.056 0.000 1.041 0.916 1.063 0.985 1.910 1.246 1.107 +1 0.576 1.092 -0.088 0.777 -1.579 0.757 0.271 0.109 0.000 0.819 0.827 -1.554 2.215 1.313 2.341 -1.568 0.000 2.827 0.239 -0.338 0.000 0.876 0.759 0.986 0.692 0.457 0.796 0.791 +1 0.537 0.925 -1.406 0.306 -0.050 0.906 1.051 0.037 0.000 1.469 -0.177 -1.320 2.215 1.872 0.723 1.158 0.000 1.313 0.227 -0.501 3.102 0.953 0.727 0.978 0.755 0.892 0.932 0.781 +0 0.716 -0.065 -0.484 1.313 -1.563 0.596 -0.242 0.678 2.173 0.426 -1.909 0.616 0.000 0.885 -0.406 -1.343 2.548 0.501 -1.327 -0.340 0.000 0.470 0.728 1.109 0.919 0.881 0.665 0.692 +1 0.624 -0.389 0.128 1.636 -1.110 1.025 0.573 -0.843 2.173 0.646 -0.697 1.064 0.000 0.632 -1.442 0.961 0.000 0.863 -0.106 1.717 0.000 0.825 0.917 1.257 0.983 0.713 0.890 0.824 +0 0.484 2.101 1.714 1.131 -0.823 0.750 0.583 -1.304 1.087 0.894 0.421 0.559 2.215 0.921 -0.063 0.282 0.000 0.463 -0.474 -1.387 0.000 0.742 0.886 0.995 0.993 1.201 0.806 0.754 +0 0.570 0.339 -1.478 0.528 0.439 0.978 1.479 -1.411 2.173 0.763 1.541 -0.734 0.000 1.375 0.840 0.903 0.000 0.965 1.599 0.364 0.000 0.887 1.061 0.992 1.322 1.453 1.013 0.969 +0 0.940 1.303 1.636 0.851 -1.732 0.803 -0.030 -0.177 0.000 0.480 -0.125 -0.954 0.000 0.944 0.709 0.296 2.548 1.342 -0.418 1.197 3.102 0.853 0.989 0.979 0.873 0.858 0.719 0.786 +1 0.599 0.544 -0.238 0.816 1.043 0.857 0.660 1.128 2.173 0.864 -0.624 -0.843 0.000 1.159 0.367 0.174 0.000 1.520 -0.543 -1.508 0.000 0.842 0.828 0.984 0.759 0.895 0.918 0.791 +1 1.651 1.897 -0.914 0.423 0.315 0.453 0.619 -1.607 2.173 0.532 -0.424 0.209 1.107 0.369 2.479 0.034 0.000 0.701 0.217 0.984 0.000 0.976 0.951 1.035 0.879 0.825 0.915 0.798 +1 0.926 -0.574 -0.763 0.285 1.094 0.672 2.314 1.545 0.000 1.124 0.415 0.809 0.000 1.387 0.270 -0.949 2.548 1.547 -0.631 -0.200 3.102 0.719 0.920 0.986 0.889 0.933 0.797 0.777 +0 0.677 1.698 -0.890 0.641 -0.449 0.607 1.754 1.720 0.000 0.776 0.372 0.782 2.215 0.511 1.491 -0.480 0.000 0.547 -0.341 0.853 3.102 0.919 1.026 0.997 0.696 0.242 0.694 0.687 +0 1.266 0.602 0.958 0.487 1.256 0.709 0.843 -1.196 0.000 0.893 1.303 -0.594 1.107 1.090 1.320 0.354 0.000 0.797 1.846 1.139 0.000 0.780 0.896 0.986 0.661 0.709 0.790 0.806 +1 0.628 -0.616 -0.329 0.764 -1.150 0.477 -0.715 1.187 2.173 1.250 0.607 1.026 2.215 0.983 -0.023 -0.583 0.000 0.377 1.344 -1.015 0.000 0.744 0.954 0.987 0.837 0.841 0.795 0.694 +1 1.035 -0.828 -1.358 1.870 -1.060 1.075 0.130 0.448 2.173 0.660 0.697 0.641 0.000 0.425 1.006 -1.035 0.000 0.751 1.055 1.364 3.102 0.826 0.822 0.988 0.967 0.901 1.077 0.906 +1 0.830 0.265 -0.150 0.660 1.105 0.592 -0.557 0.908 2.173 0.670 -1.419 -0.671 0.000 1.323 -0.409 1.644 2.548 0.850 -0.033 -0.615 0.000 0.760 0.967 0.984 0.895 0.681 0.747 0.770 +1 1.395 1.100 1.167 1.088 0.218 0.400 -0.132 0.024 2.173 0.743 0.530 -1.361 2.215 0.341 -0.691 -0.238 0.000 0.396 -1.426 -0.933 0.000 0.363 0.472 1.287 0.922 0.810 0.792 0.656 +1 1.070 1.875 -1.298 1.215 -0.106 0.767 0.795 0.514 1.087 0.401 2.780 1.276 0.000 0.686 1.127 1.721 2.548 0.391 -0.259 -1.167 0.000 1.278 1.113 1.389 0.852 0.824 0.838 0.785 +0 1.114 -0.071 1.719 0.399 -1.383 0.849 0.254 0.481 0.000 0.958 -0.579 0.742 0.000 1.190 -0.140 -0.862 2.548 0.479 1.390 0.856 0.000 0.952 0.988 0.985 0.764 0.419 0.835 0.827 +0 0.714 0.376 -0.568 1.578 -1.165 0.648 0.141 0.639 2.173 0.472 0.569 1.449 1.107 0.783 1.483 0.361 0.000 0.540 -0.790 0.032 0.000 0.883 0.811 0.982 0.775 0.572 0.760 0.745 +0 0.401 -1.731 0.765 0.974 1.648 0.652 -1.024 0.191 0.000 0.544 -0.366 -1.246 2.215 0.627 0.140 1.008 2.548 0.810 0.409 0.429 0.000 0.950 0.934 0.977 0.621 0.580 0.677 0.650 +1 0.391 1.679 -1.298 0.605 -0.832 0.549 1.338 0.522 2.173 1.244 0.884 1.070 0.000 1.002 0.846 -1.345 2.548 0.783 -2.464 -0.237 0.000 4.515 2.854 0.981 0.877 0.939 1.942 1.489 +1 0.513 -0.220 -0.444 1.699 0.479 1.109 0.181 -0.999 2.173 0.883 -0.335 -1.716 2.215 1.075 -0.380 1.352 0.000 0.857 0.048 0.147 0.000 0.937 0.758 0.986 1.206 0.958 0.949 0.876 +0 1.367 -0.388 0.798 1.158 1.078 0.811 -1.024 -1.628 0.000 1.504 0.097 -0.999 2.215 1.652 -0.860 0.054 2.548 0.573 -0.142 -1.401 0.000 0.869 0.833 1.006 1.412 1.641 1.214 1.041 +1 1.545 -0.533 -1.517 1.177 1.289 2.331 -0.370 -0.073 0.000 1.295 -0.358 -0.891 2.215 0.476 0.756 0.985 0.000 1.945 -0.016 -1.651 3.102 1.962 1.692 1.073 0.656 0.941 1.312 1.242 +0 0.858 0.978 -1.258 0.286 0.161 0.729 1.230 1.087 2.173 0.561 2.670 -0.109 0.000 0.407 2.346 0.938 0.000 1.078 0.729 -0.658 3.102 0.597 0.921 0.982 0.579 0.954 0.733 0.769 +1 1.454 -1.384 0.870 0.067 0.394 1.033 -0.673 0.318 0.000 1.166 -0.763 -1.533 2.215 2.848 -0.045 -0.856 2.548 0.697 -0.140 1.134 0.000 0.931 1.293 0.977 1.541 1.326 1.201 1.078 +1 0.559 -0.913 0.486 1.104 -0.321 1.073 -0.348 1.345 0.000 0.901 -0.827 -0.842 0.000 0.739 0.047 -0.415 2.548 0.433 -1.132 1.268 0.000 0.797 0.695 0.985 0.868 0.346 0.674 0.623 +1 1.333 0.780 -0.964 0.916 1.202 1.822 -0.071 0.742 2.173 1.486 -0.399 -0.824 0.000 0.740 0.568 -0.134 0.000 0.971 -0.070 -1.589 3.102 1.278 0.929 1.421 1.608 1.214 1.215 1.137 +1 2.417 0.631 -0.317 0.323 0.581 0.841 1.524 -1.738 0.000 0.543 1.176 -0.325 0.000 0.827 0.700 0.866 0.000 0.834 -0.262 -1.702 3.102 0.932 0.820 0.988 0.646 0.287 0.595 0.589 +0 0.955 -1.242 0.938 1.104 0.474 0.798 -0.743 1.535 0.000 1.356 -1.357 -1.080 2.215 1.320 -1.396 -0.132 2.548 0.728 -0.529 -0.633 0.000 0.832 0.841 0.988 0.923 1.077 0.988 0.816 +1 1.305 -1.918 0.391 1.161 0.063 0.724 2.593 1.481 0.000 0.592 -1.207 -0.329 0.000 0.886 -0.836 -1.168 2.548 1.067 -1.481 -1.440 0.000 0.916 0.688 0.991 0.969 0.550 0.665 0.638 +0 1.201 0.071 -1.123 2.242 -1.533 0.702 -0.256 0.688 0.000 0.967 0.491 1.040 2.215 1.271 -0.558 0.095 0.000 1.504 0.676 -0.383 3.102 0.917 1.006 0.985 1.017 1.057 0.928 1.057 +0 0.994 -1.607 1.596 0.774 -1.391 0.625 -0.134 -0.862 2.173 0.746 -0.765 -0.316 2.215 1.131 -0.320 0.869 0.000 0.607 0.826 0.301 0.000 0.798 0.967 0.999 0.880 0.581 0.712 0.774 +1 0.482 -0.467 0.729 1.419 1.458 0.824 0.376 -0.242 0.000 1.368 0.023 1.459 2.215 0.826 0.669 -1.079 2.548 0.936 2.215 -0.309 0.000 1.883 1.216 0.997 1.065 0.946 1.224 1.526 +1 0.383 1.588 1.611 0.748 1.194 0.866 -0.279 -0.636 0.000 0.707 0.536 0.801 2.215 1.647 -1.155 0.367 0.000 1.292 0.303 -1.681 3.102 2.016 1.581 0.986 0.584 0.684 1.107 0.958 +0 0.629 0.203 0.736 0.671 -0.271 1.350 -0.486 0.761 2.173 0.496 -0.805 -1.718 0.000 2.393 0.044 -1.046 1.274 0.651 -0.116 -0.541 0.000 0.697 1.006 0.987 1.069 2.317 1.152 0.902 +0 0.905 -0.564 -0.570 0.263 1.096 1.219 -1.397 -1.414 1.087 1.164 -0.533 -0.208 0.000 1.459 1.965 0.784 0.000 2.220 -1.421 0.452 0.000 0.918 1.360 0.993 0.904 0.389 2.118 1.707 +1 1.676 1.804 1.171 0.529 1.175 1.664 0.354 -0.530 0.000 1.004 0.691 -1.280 2.215 0.838 0.373 0.626 2.548 1.094 1.774 0.501 0.000 0.806 1.100 0.991 0.769 0.976 0.807 0.740 +1 1.364 -1.936 0.020 1.327 0.428 1.021 -1.665 -0.907 2.173 0.818 -2.701 1.303 0.000 0.716 -0.590 -1.629 2.548 0.895 -2.280 -1.602 0.000 1.211 0.849 0.989 1.320 0.864 1.065 0.949 +0 0.629 -0.626 0.609 1.828 1.280 0.644 -0.856 -0.873 2.173 0.555 1.066 -0.640 0.000 0.477 -1.364 -1.021 2.548 1.017 0.036 0.380 0.000 0.947 0.941 0.994 1.128 0.241 0.793 0.815 +1 1.152 -0.843 0.926 1.802 0.800 2.493 -1.449 -1.127 0.000 1.737 0.833 0.488 0.000 1.026 0.929 -0.990 2.548 1.408 0.689 1.142 3.102 1.171 0.956 0.993 2.009 0.867 1.499 1.474 +0 2.204 0.081 0.008 1.021 -0.679 2.676 0.090 1.163 0.000 2.210 -1.686 -1.195 0.000 1.805 0.891 -0.148 2.548 0.450 -0.502 -1.295 3.102 6.959 3.492 1.205 0.908 0.845 2.690 2.183 +1 0.957 0.954 1.702 0.043 -0.503 1.113 0.033 -0.308 0.000 0.757 -0.363 -1.129 2.215 1.635 0.068 1.048 1.274 0.415 -2.098 0.061 0.000 1.010 0.979 0.992 0.704 1.125 0.761 0.715 +0 1.222 0.418 1.059 1.303 1.442 0.282 -1.499 -1.286 0.000 1.567 0.016 -0.164 2.215 0.451 2.229 -1.229 0.000 0.660 -0.513 -0.296 3.102 2.284 1.340 0.985 1.531 0.314 1.032 1.094 +1 0.603 1.675 -0.973 0.703 -1.709 1.023 0.652 1.296 2.173 1.078 0.363 -0.263 0.000 0.734 -0.457 -0.745 1.274 0.561 1.434 -0.042 0.000 0.888 0.771 0.984 0.847 1.234 0.874 0.777 +0 0.897 0.949 -0.848 1.115 -0.085 0.522 -1.267 -1.418 0.000 0.684 -0.599 1.474 0.000 1.176 0.922 0.641 2.548 0.470 0.103 0.148 3.102 0.775 0.697 0.984 0.839 0.358 0.847 1.008 +1 0.987 1.013 -1.504 0.468 -0.259 1.160 0.476 -0.971 2.173 1.266 0.919 0.780 0.000 0.634 1.695 0.233 0.000 0.487 -0.082 0.719 3.102 0.921 0.641 0.991 0.730 0.828 0.952 0.807 +1 0.847 1.581 -1.397 1.629 1.529 1.053 0.816 -0.344 2.173 0.895 0.779 0.332 0.000 0.750 1.311 0.419 2.548 1.604 0.844 1.367 0.000 1.265 0.798 0.989 1.328 0.783 0.930 0.879 +1 0.805 1.416 -1.327 0.397 0.589 0.488 0.982 0.843 0.000 0.664 -0.999 0.129 0.000 0.624 0.613 -0.558 0.000 1.431 -0.667 -1.561 3.102 0.959 1.103 0.989 0.590 0.632 0.926 0.798 +0 1.220 -0.313 -0.489 1.759 0.201 1.698 -0.220 0.241 2.173 1.294 1.390 -1.682 0.000 1.447 -1.623 -1.296 0.000 1.710 0.872 -1.356 3.102 1.198 0.981 1.184 0.859 2.165 1.807 1.661 +0 0.772 -0.611 -0.549 0.465 -1.528 1.103 -0.140 0.001 2.173 0.854 -0.406 1.655 0.000 0.733 -1.250 1.072 0.000 0.883 0.627 -1.132 3.102 0.856 0.927 0.987 1.094 1.013 0.938 0.870 +1 1.910 0.771 0.828 0.231 1.267 1.398 1.455 -0.295 2.173 0.837 -2.564 0.770 0.000 0.540 2.189 1.287 0.000 1.345 1.311 -1.151 0.000 0.861 0.869 0.984 1.359 1.562 1.105 0.963 +1 0.295 0.832 1.399 1.222 -0.517 2.480 0.013 1.591 0.000 2.289 0.436 0.287 2.215 1.995 -0.367 -0.409 1.274 0.375 1.367 -1.716 0.000 1.356 2.171 0.990 1.467 1.664 1.855 1.705 +1 1.228 0.339 -0.575 0.417 1.474 0.480 -1.416 -1.498 2.173 0.614 -0.933 -0.961 0.000 1.189 1.690 1.003 0.000 1.690 -1.065 0.106 3.102 0.963 1.147 0.987 1.086 0.948 0.930 0.866 +0 2.877 -1.014 1.440 0.782 0.483 1.134 -0.735 -0.196 2.173 1.123 0.084 -0.596 0.000 1.796 -0.356 1.044 2.548 1.406 1.582 -0.991 0.000 0.939 1.178 1.576 0.996 1.629 1.216 1.280 +1 2.178 0.259 1.107 0.256 1.222 0.979 -0.440 -0.538 1.087 0.496 -0.760 -0.049 0.000 1.471 1.683 -1.486 0.000 0.646 0.695 -1.577 3.102 1.093 1.070 0.984 0.608 0.889 0.962 0.866 +1 0.604 0.592 1.295 0.964 0.348 1.178 -0.016 0.832 2.173 1.626 -0.420 -0.760 0.000 0.748 0.461 -0.906 0.000 0.728 0.309 -1.269 1.551 0.852 0.604 0.989 0.678 0.949 1.021 0.878 +0 0.428 -1.352 -0.912 1.713 0.797 1.894 -1.452 0.191 2.173 2.378 2.113 -1.190 0.000 0.860 2.174 0.949 0.000 1.693 0.759 1.426 3.102 0.885 1.527 1.186 1.090 3.294 4.492 3.676 +0 0.473 0.485 0.154 1.433 -1.504 0.766 1.257 -1.302 2.173 0.414 0.119 0.238 0.000 0.805 0.242 -0.691 2.548 0.734 0.749 0.753 0.000 0.430 0.893 1.137 0.686 0.724 0.618 0.608 +1 0.763 -0.601 0.876 0.182 -1.678 0.818 0.599 0.481 2.173 0.658 -0.737 -0.553 0.000 0.857 -1.138 -1.435 0.000 1.540 -1.466 -0.447 0.000 0.870 0.566 0.989 0.728 0.658 0.821 0.726 +0 0.619 -0.273 -0.143 0.992 -1.267 0.566 0.876 -1.396 2.173 0.515 0.892 0.618 0.000 0.434 -0.902 0.862 2.548 0.490 -0.539 0.549 0.000 0.568 0.794 0.984 0.667 0.867 0.597 0.578 +0 0.793 0.970 0.324 0.570 0.816 0.761 -0.550 1.519 2.173 1.150 0.496 -0.447 0.000 0.925 0.724 1.008 1.274 1.135 -0.275 -0.843 0.000 0.829 1.068 0.978 1.603 0.892 1.041 1.059 +1 0.480 0.364 -0.067 1.906 -1.582 1.397 1.159 0.140 0.000 0.639 0.398 -1.102 0.000 1.597 -0.668 1.607 2.548 1.306 -0.797 0.288 3.102 0.856 1.259 1.297 1.022 1.032 1.049 0.939 +0 0.514 1.304 1.490 1.741 -0.220 0.648 0.155 0.535 0.000 0.562 -1.016 0.837 0.000 0.863 -0.780 -0.815 2.548 1.688 -0.130 -1.545 3.102 0.887 0.980 1.309 1.269 0.654 1.044 1.035 +0 1.225 0.333 0.656 0.893 0.859 1.037 -0.876 1.603 1.087 1.769 0.272 -0.227 2.215 1.000 0.579 -1.690 0.000 1.385 0.471 -0.860 0.000 0.884 1.207 0.995 1.097 2.336 1.282 1.145 +0 2.044 -1.472 -0.294 0.392 0.369 0.927 0.718 1.492 1.087 1.619 -0.736 0.047 2.215 1.884 -0.101 -1.540 0.000 0.548 -0.441 1.117 0.000 0.798 0.877 0.981 0.750 2.272 1.469 1.276 +0 1.037 -0.276 0.735 3.526 1.156 2.498 0.401 -0.590 1.087 0.714 -1.203 1.393 2.215 0.681 0.629 1.534 0.000 0.719 -0.355 -0.706 0.000 0.831 0.857 0.988 2.864 2.633 1.988 1.466 +1 0.651 -1.218 -0.791 0.770 -1.449 0.610 -0.535 0.960 2.173 0.380 -1.072 -0.031 2.215 0.415 2.123 -1.100 0.000 0.776 0.217 0.420 0.000 0.986 1.008 1.001 0.853 0.588 0.799 0.776 +0 1.586 -0.409 0.085 3.258 0.405 1.647 -0.674 -1.519 0.000 0.640 -1.027 -1.681 0.000 1.452 -0.444 -0.957 2.548 0.927 -0.017 1.215 3.102 0.519 0.866 0.992 0.881 0.847 1.018 1.278 +0 0.712 0.092 -0.466 0.688 1.236 0.921 -1.217 -1.022 2.173 2.236 -1.167 0.868 2.215 0.851 -1.892 -0.753 0.000 0.475 -1.216 -0.383 0.000 0.668 0.758 0.988 1.180 2.093 1.157 0.934 +0 0.419 0.471 0.974 2.805 0.235 1.473 -0.198 1.255 1.087 0.931 1.083 -0.712 0.000 1.569 1.358 -1.179 2.548 2.506 0.199 -0.842 0.000 0.929 0.991 0.992 1.732 2.367 1.549 1.430 +1 0.667 1.003 1.504 0.368 1.061 0.885 -0.318 -0.353 0.000 1.438 -1.939 0.710 0.000 1.851 0.277 -1.460 2.548 1.403 0.517 -0.157 0.000 0.883 1.019 1.000 0.790 0.859 0.938 0.841 +1 1.877 -0.492 0.372 0.441 0.955 1.034 -1.220 -0.846 1.087 0.952 -0.320 1.125 0.000 0.542 0.308 -1.261 2.548 1.018 -1.415 -1.547 0.000 1.280 0.932 0.991 1.273 0.878 0.921 0.906 +0 1.052 0.901 1.176 1.280 1.517 0.562 -1.150 -0.079 2.173 1.228 -0.308 -0.354 0.000 0.790 -1.492 -0.963 0.000 0.942 -0.672 -1.588 3.102 1.116 0.902 0.988 1.993 0.765 1.375 1.325 +1 0.518 -0.254 1.642 0.865 0.725 0.980 0.734 0.023 0.000 1.448 0.780 -1.736 2.215 0.955 0.513 -0.519 0.000 0.365 -0.444 -0.243 3.102 0.833 0.555 0.984 0.827 0.795 0.890 0.786 +0 0.870 0.815 -0.506 0.663 -0.518 0.935 0.289 -1.675 2.173 1.188 0.005 0.635 0.000 0.580 0.066 -1.455 2.548 0.580 -0.634 -0.199 0.000 0.852 0.788 0.979 1.283 0.208 0.856 0.950 +0 0.628 1.382 0.135 0.683 0.571 1.097 0.564 -0.950 2.173 0.617 -0.326 0.371 0.000 1.093 0.918 1.667 2.548 0.460 1.221 0.708 0.000 0.743 0.861 0.975 1.067 1.007 0.843 0.762 +0 4.357 0.816 -1.609 1.845 -1.288 3.292 0.726 0.324 2.173 1.528 0.583 -0.801 2.215 0.605 0.572 1.406 0.000 0.794 -0.791 0.122 0.000 0.967 1.132 1.124 3.602 2.811 2.460 1.861 +0 0.677 -1.265 1.559 0.866 -0.618 0.823 0.260 0.185 0.000 1.133 0.337 1.589 2.215 0.563 -0.830 0.510 0.000 0.777 0.117 -0.941 3.102 0.839 0.763 0.986 1.182 0.649 0.796 0.851 +0 2.466 -1.838 -1.648 1.717 1.533 1.676 -1.553 -0.109 2.173 0.670 -0.666 0.284 0.000 0.334 -2.480 0.316 0.000 0.366 -0.804 -1.298 3.102 0.875 0.894 0.997 0.548 0.770 1.302 1.079 +1 1.403 0.129 -1.307 0.688 0.306 0.579 0.753 0.814 1.087 0.474 0.694 -1.400 0.000 0.520 1.995 0.185 0.000 0.929 -0.504 1.270 3.102 0.972 0.998 1.353 0.948 0.650 0.688 0.724 +1 0.351 1.188 -0.360 0.254 -0.346 1.129 0.545 1.691 0.000 0.652 -0.039 -0.258 2.215 1.089 0.655 0.472 2.548 0.554 -0.493 1.366 0.000 0.808 1.045 0.992 0.570 0.649 0.809 0.744 +0 1.875 -0.013 -0.128 0.236 1.163 0.902 0.426 0.590 2.173 1.251 -1.210 -0.616 0.000 1.035 1.534 0.912 0.000 1.944 1.789 -1.691 0.000 0.974 1.113 0.990 0.925 1.120 0.956 0.912 +0 0.298 0.750 -0.507 1.555 1.463 0.804 1.200 -0.665 0.000 0.439 -0.829 -0.252 1.107 0.770 -1.090 0.947 2.548 1.165 -0.166 -0.763 0.000 1.140 0.997 0.988 1.330 0.555 1.005 1.012 +0 0.647 0.342 0.245 4.340 -0.157 2.229 0.068 1.170 2.173 2.133 -0.201 -1.441 0.000 1.467 0.697 -0.532 1.274 1.457 0.583 -1.640 0.000 0.875 1.417 0.976 2.512 2.390 1.794 1.665 +1 1.731 -0.803 -1.013 1.492 -0.020 1.646 -0.541 1.121 2.173 0.459 -1.251 -1.495 2.215 0.605 -1.711 -0.232 0.000 0.658 0.634 -0.068 0.000 1.214 0.886 1.738 1.833 1.024 1.192 1.034 +0 0.515 1.416 -1.089 1.697 1.426 1.414 0.941 0.027 0.000 1.480 0.133 -1.595 2.215 1.110 0.752 0.760 2.548 1.062 0.697 -0.492 0.000 0.851 0.955 0.994 1.105 1.255 1.175 1.095 +0 1.261 0.858 1.465 0.757 0.305 2.310 0.679 1.080 2.173 1.544 2.518 -0.464 0.000 2.326 0.270 -0.841 0.000 2.163 0.839 -0.500 3.102 0.715 0.825 1.170 0.980 2.371 1.527 1.221 +1 1.445 1.509 1.471 0.414 -1.285 0.767 0.864 -0.677 2.173 0.524 1.388 0.171 0.000 0.826 0.190 0.121 2.548 0.572 1.691 -1.603 0.000 0.870 0.935 0.994 0.968 0.735 0.783 0.777 +1 0.919 -0.264 -1.245 0.681 -1.722 1.022 1.010 0.097 2.173 0.685 0.403 -1.351 0.000 1.357 -0.429 1.262 1.274 0.687 1.021 -0.563 0.000 0.953 0.796 0.991 0.873 1.749 1.056 0.917 +1 0.293 -2.258 -1.427 1.191 1.202 0.394 -2.030 1.438 0.000 0.723 0.596 -0.024 2.215 0.525 -1.678 -0.290 0.000 0.788 -0.824 -1.029 3.102 0.821 0.626 0.976 1.080 0.810 0.842 0.771 +0 3.286 0.386 1.688 1.619 -1.620 1.392 -0.009 0.280 0.000 1.179 -0.776 -0.110 2.215 1.256 0.248 -1.114 2.548 0.777 0.825 -0.156 0.000 1.026 1.065 0.964 0.909 1.249 1.384 1.395 +1 1.075 0.603 0.561 0.656 -0.685 0.985 0.175 0.979 2.173 1.154 0.584 -0.886 0.000 1.084 -0.354 -1.004 2.548 0.865 1.224 1.269 0.000 1.346 1.073 1.048 0.873 1.310 1.003 0.865 +1 1.098 -0.091 1.466 1.558 0.915 0.649 1.314 -1.182 2.173 0.791 0.073 0.351 0.000 0.517 0.940 1.195 0.000 1.150 1.187 -0.692 3.102 0.866 0.822 0.980 1.311 0.394 1.119 0.890 +1 0.481 -1.042 0.148 1.135 -1.249 1.202 -0.344 0.308 1.087 0.779 -1.431 1.581 0.000 0.860 -0.860 -1.125 0.000 0.785 0.303 1.199 3.102 0.878 0.853 0.988 1.072 0.827 0.936 0.815 +0 1.348 0.497 0.318 0.806 0.976 1.393 -0.152 0.632 2.173 2.130 0.515 -1.054 0.000 0.908 0.062 -0.780 0.000 1.185 0.687 1.668 1.551 0.720 0.898 0.985 0.683 1.292 1.320 1.131 +0 2.677 -0.420 -1.685 1.828 1.433 2.040 -0.718 -0.039 0.000 0.400 -0.873 0.472 0.000 0.444 0.340 -0.830 2.548 0.431 0.768 -1.417 3.102 0.869 0.917 0.996 0.707 0.193 0.728 1.154 +1 1.300 0.586 -0.122 1.306 0.609 0.727 -0.556 -1.652 2.173 0.636 0.720 1.393 2.215 0.328 1.280 -0.390 0.000 0.386 0.752 -0.905 0.000 0.202 0.751 1.106 0.864 0.799 0.928 0.717 +0 0.637 -0.176 1.737 1.322 -0.414 0.702 -0.964 -0.680 0.000 1.054 -0.461 0.889 2.215 0.861 -0.267 0.225 0.000 1.910 -1.888 1.027 0.000 0.919 0.899 1.186 0.993 1.109 0.862 0.775 +1 0.723 -0.104 1.572 0.428 -0.840 0.655 0.544 1.401 2.173 1.522 -0.154 -0.452 2.215 0.996 0.190 0.273 0.000 1.906 -0.176 0.966 0.000 0.945 0.894 0.990 0.981 1.555 0.988 0.893 +0 2.016 -0.570 1.612 0.798 0.441 0.334 0.191 -0.909 0.000 0.939 0.146 0.021 2.215 0.553 -0.444 1.156 2.548 0.781 -1.545 -0.520 0.000 0.922 0.956 1.528 0.722 0.699 0.778 0.901 +0 1.352 -0.707 1.284 0.665 0.580 0.694 -1.040 -0.899 2.173 0.692 -2.048 0.029 0.000 0.545 -2.042 1.259 0.000 0.661 -0.808 -1.251 3.102 0.845 0.991 0.979 0.662 0.225 0.685 0.769 +1 1.057 -1.561 -0.411 0.952 -0.681 1.236 -1.107 1.045 2.173 1.288 -2.521 -0.521 0.000 1.361 -1.239 1.546 0.000 0.373 -1.540 0.028 0.000 0.794 0.782 0.987 0.889 0.832 0.972 0.828 +0 1.118 -0.017 -1.227 1.077 1.256 0.714 0.624 -0.811 0.000 0.800 0.704 0.387 1.107 0.604 0.234 0.986 0.000 1.306 -0.456 0.094 3.102 0.828 0.984 1.195 0.987 0.672 0.774 0.748 +1 0.602 2.201 0.212 0.119 0.182 0.474 2.130 1.270 0.000 0.370 2.088 -0.573 0.000 0.780 -0.725 -1.033 0.000 1.642 0.598 0.303 3.102 0.886 0.988 0.985 0.644 0.756 0.651 0.599 +0 1.677 -0.844 1.581 0.585 0.887 1.012 -2.315 0.752 0.000 1.077 0.748 -0.195 0.000 0.718 0.832 -1.337 1.274 1.181 -0.557 -1.006 3.102 1.018 1.247 0.988 0.908 0.651 1.311 1.120 +1 1.695 0.259 1.224 1.344 1.067 0.718 -1.752 -0.215 0.000 0.473 0.991 -0.993 0.000 0.891 1.285 -1.500 2.548 0.908 -0.131 0.288 0.000 0.945 0.824 0.979 1.009 0.951 0.934 0.833 +0 0.793 0.628 0.432 1.707 0.302 0.919 1.045 -0.784 0.000 1.472 0.175 -1.284 2.215 1.569 0.155 0.971 2.548 0.435 0.735 1.625 0.000 0.801 0.907 0.992 0.831 1.446 1.082 1.051 +1 0.537 -0.664 -0.244 1.104 1.272 1.154 0.394 1.633 0.000 1.527 0.963 0.559 2.215 1.744 0.650 -0.912 0.000 1.097 0.730 -0.368 3.102 1.953 1.319 1.045 1.309 0.869 1.196 1.126 +1 0.585 -1.469 1.005 0.749 -1.060 1.224 -0.717 -0.323 2.173 1.012 -0.201 1.268 0.000 0.359 -0.567 0.476 0.000 1.117 -1.124 1.557 3.102 0.636 1.281 0.986 0.616 1.289 0.890 0.881 +1 0.354 -1.517 0.667 2.534 -1.298 1.020 -0.375 1.254 0.000 1.119 -0.060 -1.538 2.215 1.059 -0.395 -0.140 0.000 2.609 0.199 -0.778 1.551 0.957 0.975 1.286 1.666 1.003 1.224 1.135 +1 0.691 -1.619 -1.380 0.361 1.727 1.493 -1.093 -0.289 0.000 1.447 -0.640 1.341 0.000 1.453 -0.617 -1.456 1.274 1.061 -1.481 -0.091 0.000 0.744 0.649 0.987 0.596 0.727 0.856 0.797 +0 1.336 1.293 -1.359 0.357 0.067 1.110 -0.058 -0.515 0.000 0.976 1.498 1.207 0.000 1.133 0.437 1.053 2.548 0.543 1.374 0.171 0.000 0.764 0.761 0.984 0.827 0.553 0.607 0.612 +0 0.417 -1.111 1.661 2.209 -0.683 1.931 -0.642 0.959 1.087 1.514 -2.032 -0.686 0.000 1.521 -0.539 1.344 0.000 0.978 -0.866 0.363 1.551 2.813 1.850 1.140 1.854 0.799 1.600 1.556 +0 1.058 0.390 -0.591 0.134 1.149 0.346 -1.550 0.186 0.000 1.108 -0.999 0.843 1.107 1.124 0.415 -1.514 0.000 1.067 -0.426 -1.000 3.102 1.744 1.050 0.985 1.006 1.010 0.883 0.789 +1 1.655 0.253 1.216 0.270 1.703 0.500 -0.006 -1.418 2.173 0.690 -0.350 0.170 2.215 1.045 -0.924 -0.774 0.000 0.996 -0.745 -0.123 0.000 0.839 0.820 0.993 0.921 0.869 0.725 0.708 +0 1.603 -0.850 0.564 0.829 0.093 1.270 -1.113 -1.155 2.173 0.853 -1.021 1.248 2.215 0.617 -1.270 1.733 0.000 0.935 -0.092 0.136 0.000 1.011 1.074 0.977 0.823 1.269 1.054 0.878 +0 1.568 -0.792 1.005 0.545 0.896 0.895 -1.698 -0.988 0.000 0.608 -1.634 1.705 0.000 0.826 0.208 0.618 1.274 2.063 -1.743 -0.520 0.000 0.939 0.986 0.990 0.600 0.435 1.033 1.087 +0 0.489 -1.335 -1.102 1.738 1.028 0.628 -0.992 -0.627 0.000 0.652 -0.064 -0.215 0.000 1.072 0.173 -1.251 2.548 1.042 0.057 0.841 3.102 0.823 0.895 1.200 1.164 0.770 0.837 0.846 +1 1.876 0.870 1.234 0.556 -1.262 1.764 0.855 -0.467 2.173 1.079 1.351 0.852 0.000 0.773 0.383 0.874 0.000 1.292 0.829 -1.228 3.102 0.707 0.969 1.102 1.601 1.017 1.112 1.028 +0 1.033 0.407 -0.374 0.705 -1.254 0.690 -0.231 1.502 2.173 0.433 -2.009 -0.057 0.000 0.861 1.151 0.334 0.000 0.960 -0.839 1.299 3.102 2.411 1.480 0.982 0.995 0.377 1.012 0.994 +0 1.092 0.653 -0.801 0.463 0.426 0.529 -1.055 0.040 0.000 0.663 0.999 1.255 1.107 0.749 -1.106 1.185 2.548 0.841 -0.745 -1.029 0.000 0.841 0.743 0.988 0.750 1.028 0.831 0.868 +1 0.799 -0.285 -0.011 0.531 1.392 1.063 0.854 0.494 2.173 1.187 -1.065 -0.851 0.000 0.429 -0.296 1.072 0.000 0.942 -1.985 1.172 0.000 0.873 0.693 0.992 0.819 0.689 1.131 0.913 +0 0.503 1.973 -0.377 1.515 -1.514 0.708 1.081 -0.313 2.173 1.110 -0.417 0.839 0.000 0.712 -1.153 1.165 0.000 0.675 -0.303 -0.930 1.551 0.709 0.761 1.032 0.986 0.698 0.963 1.291 +0 0.690 -0.574 -1.608 1.182 1.118 0.557 -2.243 0.144 0.000 0.969 0.216 -1.383 1.107 1.054 0.888 -0.709 2.548 0.566 1.663 -0.550 0.000 0.752 1.528 0.987 1.408 0.740 1.290 1.123 +1 0.890 1.501 0.786 0.779 -0.615 1.126 0.716 1.541 2.173 0.887 0.728 -0.673 2.215 1.216 0.332 -0.020 0.000 0.965 1.828 0.101 0.000 0.827 0.715 1.099 1.088 1.339 0.924 0.878 +0 0.566 0.883 0.655 1.600 0.034 1.155 2.028 -1.499 0.000 0.723 -0.871 0.763 0.000 1.286 -0.696 -0.676 2.548 1.134 -0.113 1.207 3.102 4.366 2.493 0.984 0.960 0.962 1.843 1.511 +0 1.146 1.086 -0.911 0.838 1.298 0.821 0.127 -0.145 0.000 1.352 0.474 -1.580 2.215 1.619 -0.081 0.675 2.548 1.382 -0.748 0.127 0.000 0.958 0.976 1.239 0.876 1.481 1.116 1.076 +0 1.739 -0.326 -1.661 0.420 -1.705 1.193 -0.031 -1.212 2.173 1.783 -0.442 0.522 0.000 1.064 -0.692 0.027 0.000 1.314 0.359 -0.037 3.102 0.968 0.897 0.986 0.907 1.196 1.175 1.112 +1 0.669 0.194 -0.703 0.657 -0.260 0.899 -2.511 0.311 0.000 1.482 0.773 0.974 2.215 3.459 0.037 -1.299 1.274 2.113 0.067 1.516 0.000 0.740 0.871 0.979 1.361 2.330 1.322 1.046 +1 1.355 -1.033 -1.173 0.552 -0.048 0.899 -0.482 -1.287 2.173 1.422 -1.227 0.390 1.107 1.937 -0.028 0.914 0.000 0.849 -0.230 -1.734 0.000 0.986 1.224 1.017 1.051 1.788 1.150 1.009 +1 0.511 -0.202 1.029 0.780 1.154 0.816 0.532 -0.731 0.000 0.757 0.517 0.749 2.215 1.302 0.289 -1.188 0.000 0.584 1.211 -0.350 0.000 0.876 0.943 0.995 0.963 0.256 0.808 0.891 +1 1.109 0.572 1.484 0.753 1.543 1.711 -0.145 -0.746 1.087 1.759 0.631 0.845 2.215 0.945 0.542 0.003 0.000 0.378 -1.150 -0.044 0.000 0.764 1.042 0.992 1.045 2.736 1.441 1.140 +0 0.712 -0.025 0.553 0.928 -0.711 1.304 0.045 -0.300 0.000 0.477 0.720 0.969 0.000 1.727 -0.474 1.328 1.274 1.282 2.222 1.684 0.000 0.819 0.765 1.023 0.961 0.657 0.799 0.744 +1 1.131 -0.302 1.079 0.901 0.236 0.904 -0.249 1.694 2.173 1.507 -0.702 -1.128 0.000 0.774 0.565 0.284 2.548 1.802 1.446 -0.192 0.000 3.720 2.108 0.986 0.930 1.101 1.484 1.238 +0 1.392 1.253 0.118 0.864 -1.358 0.922 -0.447 -1.243 1.087 1.969 1.031 0.774 2.215 1.333 -0.359 -0.681 0.000 1.099 -0.257 1.473 0.000 1.246 0.909 1.475 1.234 2.531 1.449 1.306 +0 1.374 2.291 -0.479 1.339 -0.243 0.687 2.345 1.310 0.000 0.467 1.081 0.772 0.000 0.656 1.155 -1.636 2.548 0.592 0.536 -1.269 3.102 0.981 0.821 1.010 0.877 0.217 0.638 0.758 +1 0.401 -1.516 0.909 2.738 0.519 0.887 0.566 -1.202 0.000 0.909 -0.176 1.682 0.000 2.149 -0.878 -0.514 2.548 0.929 -0.563 -1.555 3.102 1.228 0.803 0.980 1.382 0.884 1.025 1.172 +1 0.430 -1.589 1.417 2.158 1.226 1.180 -0.829 -0.781 2.173 0.798 1.400 -0.111 0.000 0.939 -0.878 1.076 2.548 0.576 1.335 -0.826 0.000 0.861 0.970 0.982 1.489 1.308 1.015 0.992 +1 1.943 -0.391 -0.840 0.621 -1.613 2.026 1.734 1.025 0.000 0.930 0.573 -0.912 0.000 1.326 0.847 -0.220 1.274 1.181 0.079 0.709 3.102 1.164 1.007 0.987 1.094 0.821 0.857 0.786 +1 0.499 0.436 0.887 0.859 1.509 0.733 -0.559 1.111 1.087 1.011 -0.796 0.279 2.215 1.472 -0.510 -0.982 0.000 1.952 0.379 -0.733 0.000 1.076 1.358 0.991 0.589 0.879 1.068 0.922 +0 0.998 -0.407 -1.711 0.139 0.652 0.810 -0.331 -0.721 0.000 0.471 -0.533 0.442 0.000 0.531 -1.405 0.120 2.548 0.707 0.098 -1.176 1.551 1.145 0.809 0.988 0.529 0.612 0.562 0.609 +1 1.482 0.872 0.638 1.288 0.362 0.856 0.900 -0.511 1.087 1.072 1.061 -1.432 2.215 1.770 -2.292 -1.547 0.000 1.131 1.374 0.783 0.000 6.316 4.381 1.002 1.317 1.048 2.903 2.351 +1 2.084 -0.422 1.289 1.125 0.735 1.104 -0.518 -0.326 2.173 0.413 -0.719 -0.699 0.000 0.857 0.108 -1.631 0.000 0.527 0.641 -1.362 3.102 0.791 0.952 1.016 0.776 0.856 0.987 0.836 +0 0.464 0.674 0.025 0.430 -1.703 0.982 -1.311 -0.808 2.173 1.875 1.060 0.821 2.215 0.954 -0.480 -1.677 0.000 0.567 0.702 -0.939 0.000 0.781 1.076 0.989 1.256 3.632 1.652 1.252 +1 0.457 -1.944 -1.010 1.409 0.931 1.098 -0.742 -0.415 0.000 1.537 -0.834 0.945 2.215 1.752 -0.287 -1.269 2.548 0.692 -1.537 -0.223 0.000 0.801 1.192 1.094 1.006 1.659 1.175 1.122 +0 3.260 -0.943 1.737 0.920 1.309 0.946 -0.139 -0.271 2.173 0.994 -0.952 -0.311 0.000 0.563 -0.136 -0.881 0.000 1.236 -0.507 0.906 1.551 0.747 0.869 0.985 1.769 1.034 1.179 1.042 +0 0.615 -0.778 0.246 1.861 1.619 0.560 -0.943 -0.204 2.173 0.550 -0.759 -1.342 2.215 0.578 0.076 -0.973 0.000 0.939 0.035 0.680 0.000 0.810 0.747 1.401 0.772 0.702 0.719 0.662 +1 2.370 -0.064 -0.237 1.737 0.154 2.319 -1.838 -1.673 0.000 1.053 -1.305 -0.075 0.000 0.925 0.149 0.318 1.274 0.851 -0.922 0.981 3.102 0.919 0.940 0.989 0.612 0.598 1.219 1.626 +1 1.486 0.311 -1.262 1.354 -0.847 0.886 -0.158 1.213 2.173 1.160 -0.218 0.239 0.000 1.166 0.494 0.278 2.548 0.575 1.454 -1.701 0.000 0.429 1.129 0.983 1.111 1.049 1.006 0.920 +1 1.294 1.587 -0.864 0.487 -0.312 0.828 1.051 -0.031 1.087 2.443 1.216 1.609 2.215 1.167 0.813 0.921 0.000 1.751 -0.415 0.119 0.000 1.015 1.091 0.974 1.357 2.093 1.178 1.059 +1 0.984 0.465 -1.661 0.379 -0.554 0.977 0.237 0.365 0.000 0.510 0.143 1.101 0.000 1.099 -0.662 -1.593 2.548 1.104 -0.197 -0.648 3.102 0.925 0.922 0.986 0.642 0.667 0.806 0.722 +1 0.930 -0.009 0.047 0.667 1.367 1.065 -0.231 0.815 0.000 1.199 -1.114 -0.877 2.215 0.940 0.824 -1.583 0.000 1.052 -0.407 -0.076 1.551 1.843 1.257 1.013 1.047 0.751 1.158 0.941 +0 0.767 -0.011 -0.637 0.341 -1.437 1.438 -0.425 -0.450 2.173 1.073 -0.718 1.341 2.215 0.633 -1.394 0.486 0.000 0.603 -1.945 -1.626 0.000 0.703 0.790 0.984 1.111 1.848 1.129 1.072 +1 1.779 0.017 0.432 0.402 1.022 0.959 1.480 1.595 2.173 1.252 1.365 0.006 0.000 1.188 -0.174 -1.107 0.000 1.181 0.518 -0.258 0.000 1.057 0.910 0.991 1.616 0.779 1.158 1.053 +0 0.881 0.630 1.029 1.990 0.508 1.102 0.742 -1.298 2.173 1.565 1.085 0.686 2.215 2.691 1.391 -0.904 0.000 0.499 1.388 -1.199 0.000 0.347 0.861 0.997 0.881 1.920 1.233 1.310 +0 1.754 -0.266 0.389 0.347 -0.030 0.462 -1.408 -0.957 2.173 0.515 -2.341 -1.700 0.000 0.588 -0.797 1.355 2.548 0.608 0.329 -1.389 0.000 1.406 0.909 0.988 0.760 0.593 0.768 0.847 +0 1.087 0.311 -1.447 0.173 0.567 0.854 0.362 0.584 0.000 1.416 -0.716 -1.211 2.215 0.648 -0.358 -0.692 1.274 0.867 -0.513 0.206 0.000 0.803 0.813 0.984 1.110 0.491 0.921 0.873 +0 0.279 1.114 -1.190 3.004 -0.738 1.233 0.896 1.092 2.173 0.454 -0.374 0.117 2.215 0.357 0.119 1.270 0.000 0.458 1.343 0.316 0.000 0.495 0.540 0.988 1.715 1.139 1.618 1.183 +1 1.773 -0.694 -1.518 2.306 -1.200 3.104 0.749 0.362 0.000 1.871 0.230 -1.686 2.215 0.805 -0.179 -0.871 1.274 0.910 0.607 -0.246 0.000 1.338 1.598 0.984 1.050 0.919 1.678 1.807 +0 0.553 0.683 0.827 0.973 -0.706 1.488 0.149 1.140 2.173 1.788 0.447 -0.478 0.000 0.596 1.043 1.607 0.000 0.373 -0.868 -1.308 1.551 1.607 1.026 0.998 1.134 0.808 1.142 0.936 +1 0.397 1.101 -1.139 1.688 0.146 0.972 0.541 1.518 0.000 1.549 -0.873 -1.012 0.000 2.282 -0.151 0.314 2.548 1.174 0.033 -1.368 0.000 0.937 0.776 1.039 1.143 0.959 0.986 1.013 +1 0.840 1.906 -0.959 0.869 0.576 0.642 0.554 -1.351 0.000 0.756 0.923 -0.823 2.215 1.251 1.130 0.545 2.548 1.513 0.410 1.073 0.000 1.231 0.985 1.163 0.812 0.987 0.816 0.822 +1 0.477 1.665 0.814 0.763 -0.382 0.828 -0.008 0.280 2.173 1.213 -0.001 1.560 0.000 1.136 0.311 -1.289 0.000 0.797 1.091 -0.616 3.102 1.026 0.964 0.992 0.772 0.869 0.916 0.803 +0 2.655 0.020 0.273 1.464 0.482 1.709 -0.107 -1.456 2.173 0.825 0.141 -0.386 0.000 1.342 -0.592 1.635 1.274 0.859 -0.175 -0.874 0.000 0.829 0.946 1.003 2.179 0.836 1.505 1.176 +0 0.771 -1.992 -0.720 0.732 -1.464 0.869 -1.290 0.388 2.173 0.926 -1.072 -1.489 2.215 0.640 -1.232 0.840 0.000 0.528 -2.440 -0.446 0.000 0.811 0.868 0.993 0.995 1.317 0.809 0.714 +0 1.357 1.302 0.076 0.283 -1.060 0.783 1.559 -0.994 0.000 0.947 1.212 1.617 0.000 1.127 0.311 0.442 2.548 0.582 -0.052 1.186 1.551 1.330 0.995 0.985 0.846 0.404 0.858 0.815 +0 0.442 -0.381 -0.424 1.244 0.591 0.731 0.605 -0.713 2.173 0.629 2.762 1.040 0.000 0.476 2.693 -0.617 0.000 0.399 0.442 1.486 3.102 0.839 0.755 0.988 0.869 0.524 0.877 0.918 +0 0.884 0.422 0.055 0.818 0.624 0.950 -0.763 1.624 0.000 0.818 -0.609 -1.166 0.000 1.057 -0.528 1.070 2.548 1.691 -0.124 -0.335 3.102 1.104 0.933 0.985 0.913 1.000 0.863 1.056 +0 1.276 0.156 1.714 1.053 -1.189 0.672 -0.464 -0.030 2.173 0.469 -2.483 0.442 0.000 0.564 2.580 -0.253 0.000 0.444 -0.628 1.080 1.551 5.832 2.983 0.985 1.162 0.494 1.809 1.513 +0 1.106 -0.556 0.406 0.573 -1.400 0.769 -0.518 1.457 2.173 0.743 -0.352 -0.010 0.000 1.469 -0.550 -0.930 2.548 0.540 1.236 -0.571 0.000 0.962 0.970 1.101 0.805 1.107 0.873 0.773 +0 0.539 -0.964 -0.464 1.371 -1.606 0.667 -0.160 0.655 0.000 0.952 0.352 -0.740 2.215 0.952 0.007 1.123 0.000 1.061 -0.505 1.389 3.102 1.063 0.991 1.019 0.633 0.967 0.732 0.799 +1 0.533 -0.989 -1.608 0.462 -1.723 1.204 -0.598 -0.098 2.173 1.343 -0.460 1.632 2.215 0.577 0.221 -0.492 0.000 0.628 -0.073 0.472 0.000 0.518 0.880 0.988 1.179 1.874 1.041 0.813 +1 1.024 1.075 -0.795 0.286 -1.436 1.365 0.857 -0.309 2.173 0.804 1.532 1.435 0.000 1.511 0.722 1.494 0.000 1.778 0.903 0.753 1.551 0.686 0.810 0.999 0.900 1.360 1.133 0.978 +1 2.085 -0.269 -1.423 0.789 1.298 0.281 1.652 0.187 0.000 0.658 -0.760 -0.042 2.215 0.663 0.024 0.120 0.000 0.552 -0.299 -0.428 3.102 0.713 0.811 1.130 0.705 0.218 0.675 0.743 +1 0.980 -0.443 0.813 0.785 -1.253 0.719 0.448 -1.458 0.000 1.087 0.595 0.635 1.107 1.428 0.029 -0.995 0.000 1.083 1.562 -0.092 0.000 0.834 0.891 1.165 0.967 0.661 0.880 0.817 +1 0.903 -0.733 -0.980 0.634 -0.639 0.780 0.266 -0.287 2.173 1.264 -0.936 1.004 0.000 1.002 -0.056 -1.344 2.548 1.183 -0.098 1.169 0.000 0.733 1.002 0.985 0.711 0.916 0.966 0.875 +0 0.734 -0.304 -1.175 2.851 1.674 0.904 -0.634 0.412 2.173 1.363 -1.050 -0.282 0.000 1.476 -1.603 0.103 0.000 2.231 -0.718 1.708 3.102 0.813 0.896 1.088 0.686 1.392 1.033 1.078 +1 1.680 0.591 -0.243 0.111 -0.478 0.326 -0.079 -1.555 2.173 0.711 0.714 0.922 2.215 0.355 0.858 1.682 0.000 0.727 1.620 1.360 0.000 0.334 0.526 1.001 0.862 0.633 0.660 0.619 +1 1.163 0.225 -0.202 0.501 -0.979 1.609 -0.938 1.424 0.000 1.224 -0.118 -1.274 0.000 2.034 1.241 -0.254 0.000 1.765 0.536 0.237 3.102 0.894 0.838 0.988 0.693 0.579 0.762 0.726 +0 1.223 1.232 1.471 0.489 1.728 0.703 -0.111 0.411 0.000 1.367 1.014 -1.294 1.107 1.524 -0.414 -0.164 2.548 1.292 0.833 0.316 0.000 0.861 0.752 0.994 0.836 1.814 1.089 0.950 +0 0.816 1.637 -1.557 1.036 -0.342 0.913 1.333 0.949 2.173 0.812 0.756 -0.628 2.215 1.333 0.470 1.495 0.000 1.204 -2.222 -1.675 0.000 1.013 0.924 1.133 0.758 1.304 0.855 0.860 +0 0.851 -0.564 -0.691 0.692 1.345 1.219 1.014 0.318 0.000 1.422 -0.262 -1.635 2.215 0.531 1.802 0.008 0.000 0.508 0.515 -1.267 3.102 0.821 0.787 1.026 0.783 0.432 1.149 1.034 +0 0.800 -0.599 0.204 0.552 -0.484 0.974 0.413 0.961 2.173 1.269 -0.984 -1.039 2.215 0.380 -1.213 1.371 0.000 0.551 0.332 -0.659 0.000 0.694 0.852 0.984 1.057 2.037 1.096 0.846 +0 0.744 -0.071 -0.255 0.638 0.512 1.125 0.407 0.844 2.173 0.860 -0.481 -0.677 0.000 1.102 0.181 -1.194 0.000 1.011 -1.081 -1.713 3.102 0.854 0.862 0.982 1.111 1.372 1.042 0.920 +1 0.400 1.049 -0.625 0.880 -0.407 1.040 2.150 -1.359 0.000 0.747 -0.144 0.847 2.215 0.560 -1.829 0.698 0.000 1.663 -0.668 0.267 0.000 0.845 0.964 0.996 0.820 0.789 0.668 0.668 +0 1.659 -0.705 -1.057 1.803 -1.436 1.008 0.693 0.005 0.000 0.895 -0.007 0.681 1.107 1.085 0.125 1.476 2.548 1.214 1.068 0.486 0.000 0.867 0.919 0.986 1.069 0.692 1.026 1.313 +0 0.829 -0.153 0.861 0.615 -0.548 0.589 1.077 -0.041 2.173 1.056 0.763 -1.737 0.000 0.639 0.970 0.725 0.000 0.955 1.227 -0.799 3.102 1.020 1.024 0.985 0.750 0.525 0.685 0.671 +1 0.920 -0.806 -0.840 1.048 0.278 0.973 -0.077 -1.364 2.173 1.029 0.309 0.133 0.000 1.444 1.484 1.618 1.274 1.419 -0.482 0.417 0.000 0.831 1.430 1.151 1.829 1.560 1.343 1.224 +1 0.686 0.249 -0.905 0.343 -1.731 0.724 -2.823 -0.901 0.000 0.982 0.303 1.312 1.107 1.016 0.245 0.610 0.000 1.303 -0.557 -0.360 3.102 1.384 1.030 0.984 0.862 1.144 0.866 0.779 +0 1.603 0.444 0.508 0.586 0.401 0.610 0.467 -1.735 2.173 0.914 0.626 -1.019 0.000 0.812 0.422 -0.408 2.548 0.902 1.679 1.490 0.000 1.265 0.929 0.990 1.004 0.816 0.753 0.851 +1 0.623 0.780 -0.203 0.056 0.015 0.899 0.793 1.326 1.087 0.803 1.478 -1.499 2.215 1.561 1.492 -0.120 0.000 0.904 0.795 0.137 0.000 0.548 1.009 0.850 0.924 0.838 0.914 0.860 +0 1.654 -2.032 -1.160 0.859 -1.583 0.689 -1.965 0.891 0.000 0.646 -1.014 -0.288 2.215 0.630 -0.815 0.402 0.000 0.638 0.316 0.655 3.102 0.845 0.879 0.993 1.067 0.625 1.041 0.958 +1 0.828 -1.269 -1.203 0.744 -0.213 0.626 -1.017 -0.404 0.000 1.281 -0.931 1.733 2.215 0.699 -0.351 1.287 0.000 1.251 -1.171 0.197 0.000 0.976 1.186 0.987 0.646 0.655 0.733 0.671 +1 0.677 0.111 1.090 1.580 1.591 1.560 0.654 -0.341 2.173 0.794 -0.266 0.702 0.000 0.823 0.651 -1.239 2.548 0.730 1.467 -1.530 0.000 1.492 1.023 0.983 1.909 1.022 1.265 1.127 +1 0.736 0.882 -1.060 0.589 0.168 1.663 0.781 1.022 2.173 2.025 1.648 -1.292 0.000 1.240 0.924 -0.421 1.274 1.354 0.065 0.501 0.000 0.316 0.925 0.988 0.664 1.736 0.992 0.807 +1 1.040 -0.822 1.638 0.974 -0.674 0.393 0.830 0.011 2.173 0.770 -0.140 -0.402 0.000 0.294 -0.133 0.030 0.000 1.220 0.807 0.638 0.000 0.826 1.063 1.216 1.026 0.705 0.934 0.823 +1 0.711 0.602 0.048 1.145 0.966 0.934 0.263 -1.589 2.173 0.971 -0.496 -0.421 1.107 0.628 -0.865 0.845 0.000 0.661 -0.008 -0.565 0.000 0.893 0.705 0.988 0.998 1.339 0.908 0.872 +1 0.953 -1.651 -0.167 0.885 1.053 1.013 -1.239 0.133 0.000 1.884 -1.122 1.222 2.215 1.906 -0.860 -1.184 1.274 1.413 -0.668 -1.647 0.000 1.873 1.510 1.133 1.050 1.678 1.246 1.061 +1 0.986 -0.892 -1.380 0.917 1.134 0.950 -1.162 -0.469 0.000 0.569 -1.393 0.215 0.000 0.320 2.667 1.712 0.000 1.570 -0.375 1.457 3.102 0.925 1.128 1.011 0.598 0.824 0.913 0.833 +1 1.067 0.099 1.154 0.527 -0.789 1.085 0.623 -1.602 2.173 1.511 -0.230 0.022 2.215 0.269 -0.377 0.883 0.000 0.571 -0.540 -0.512 0.000 0.414 0.803 1.022 0.959 2.053 1.041 0.780 +0 0.825 -2.118 0.217 1.453 -0.493 0.819 0.313 -0.942 0.000 2.098 -0.725 1.096 2.215 0.484 1.336 1.458 0.000 0.482 0.100 1.163 0.000 0.913 0.536 0.990 1.679 0.957 1.095 1.143 +1 1.507 0.054 1.120 0.698 -1.340 0.912 0.384 0.015 1.087 0.720 0.247 -0.820 0.000 0.286 0.154 1.578 2.548 0.629 1.582 -0.576 0.000 0.828 0.893 1.136 0.514 0.632 0.699 0.709 +1 0.610 1.180 -0.993 0.816 0.301 0.932 0.758 1.539 0.000 0.726 -0.830 0.248 2.215 0.883 0.857 -1.305 0.000 1.338 1.009 -0.252 3.102 0.901 1.074 0.987 0.875 1.159 1.035 0.858 +1 1.247 -1.360 1.502 1.525 -1.332 0.618 1.063 0.755 0.000 0.582 -0.155 0.473 2.215 1.214 -0.422 -0.551 2.548 0.838 -1.171 -1.166 0.000 2.051 1.215 1.062 1.091 0.725 0.896 1.091 +0 0.373 -0.600 1.291 2.573 0.207 0.765 -0.209 1.667 0.000 0.668 0.724 -1.499 0.000 1.045 -0.338 -0.754 2.548 0.558 -0.469 0.029 3.102 0.868 0.939 1.124 0.519 0.383 0.636 0.838 +0 0.791 0.336 -0.307 0.494 1.213 1.158 0.336 1.081 2.173 0.918 1.289 -0.449 0.000 0.735 -0.521 -0.969 0.000 1.052 0.499 -1.188 3.102 0.699 1.013 0.987 0.622 1.050 0.712 0.661 +0 1.321 0.856 0.464 0.202 0.901 1.144 0.120 -1.651 0.000 0.803 0.577 -0.509 2.215 0.695 -0.114 0.423 2.548 0.621 1.852 -0.420 0.000 0.697 0.964 0.983 0.527 0.659 0.719 0.729 +0 0.563 2.081 0.913 0.982 -0.533 0.549 -0.481 -1.730 0.000 0.962 0.921 0.569 2.215 0.731 1.184 -0.679 1.274 0.918 0.931 -1.432 0.000 1.008 0.919 0.993 0.895 0.819 0.810 0.878 +1 1.148 0.345 0.953 0.921 0.617 0.991 1.103 -0.484 0.000 0.970 1.978 1.525 0.000 1.150 0.689 -0.757 2.548 0.517 0.995 1.245 0.000 1.093 1.140 0.998 1.006 0.756 0.864 0.838 +1 1.400 0.128 -1.695 1.169 1.070 1.094 -0.345 -0.249 0.000 1.224 0.364 -0.036 2.215 1.178 0.530 -1.544 0.000 1.334 0.933 1.604 0.000 0.560 1.267 1.073 0.716 0.780 0.832 0.792 +0 0.330 -2.133 1.403 0.628 0.379 1.686 -0.995 0.030 1.087 2.071 0.127 -0.457 0.000 4.662 -0.855 1.477 0.000 2.072 -0.917 -1.416 3.102 5.403 3.074 0.977 0.936 1.910 2.325 1.702 +0 0.989 0.473 0.968 1.970 1.368 0.844 0.574 -0.290 2.173 0.866 -0.345 -1.019 0.000 1.130 0.605 -0.752 0.000 0.956 -0.888 0.870 3.102 0.885 0.886 0.982 1.157 1.201 1.100 1.068 +1 0.773 0.418 0.753 1.388 1.070 1.104 -0.378 -0.758 0.000 1.027 0.397 -0.496 2.215 1.234 0.027 1.084 2.548 0.936 0.209 1.677 0.000 1.355 1.020 0.983 0.550 1.206 0.916 0.931 +0 0.319 2.015 1.534 0.570 -1.134 0.632 0.124 0.757 0.000 0.477 0.598 -1.109 1.107 0.449 0.438 -0.755 2.548 0.574 -0.659 0.691 0.000 0.440 0.749 0.985 0.517 0.158 0.505 0.522 +0 1.215 1.453 -1.386 1.276 1.298 0.643 0.570 -0.196 2.173 0.588 2.104 0.498 0.000 0.617 -0.296 -0.801 2.548 0.452 0.110 0.313 0.000 0.815 0.953 1.141 1.166 0.547 0.892 0.807 +1 1.257 -1.869 -0.060 0.265 0.653 1.527 -0.346 1.163 2.173 0.758 -2.119 -0.604 0.000 1.473 -1.133 -1.290 2.548 0.477 -0.428 -0.066 0.000 0.818 0.841 0.984 1.446 1.729 1.211 1.054 +1 1.449 0.464 1.585 1.418 -1.488 1.540 0.942 0.087 0.000 0.898 0.402 -0.631 2.215 0.753 0.039 -1.729 0.000 0.859 0.849 -1.054 0.000 0.791 0.677 0.995 0.687 0.527 0.703 0.606 +1 1.084 -1.997 0.900 1.333 1.024 0.872 -0.864 -1.500 2.173 1.072 -0.813 -0.421 2.215 0.924 0.478 0.304 0.000 0.992 -0.398 -1.022 0.000 0.741 1.085 0.980 1.221 1.176 1.032 0.961 +0 1.712 1.129 0.125 1.120 -1.402 1.749 0.951 -1.575 2.173 1.711 0.445 0.578 0.000 1.114 0.234 -1.011 0.000 1.577 -0.088 0.086 3.102 2.108 1.312 1.882 1.597 2.009 1.441 1.308 +0 0.530 0.248 1.622 1.450 -1.012 1.221 -1.154 -0.763 2.173 1.698 -0.586 0.733 0.000 0.889 1.042 1.038 1.274 0.657 0.008 0.701 0.000 0.430 1.005 0.983 0.930 2.264 1.357 1.146 +1 0.921 1.735 0.883 0.699 -1.614 0.821 1.463 0.319 1.087 1.099 0.814 -1.600 2.215 1.375 0.702 -0.691 0.000 0.869 1.326 -0.790 0.000 0.980 0.900 0.988 0.832 1.452 0.816 0.709 +0 2.485 -0.823 -0.297 0.886 -1.404 0.989 0.835 1.615 2.173 0.382 0.588 -0.224 0.000 1.029 -0.456 1.546 2.548 0.613 -0.359 -0.789 0.000 0.768 0.977 1.726 2.007 0.913 1.338 1.180 +1 0.657 -0.069 -0.078 1.107 1.549 0.804 1.335 -1.630 2.173 1.271 0.481 0.153 1.107 1.028 0.144 -0.762 0.000 1.098 0.132 1.570 0.000 0.830 0.979 1.175 1.069 1.624 1.000 0.868 +1 2.032 0.329 -1.003 0.493 -0.136 1.159 -0.224 0.750 1.087 0.396 0.546 0.587 0.000 0.620 1.805 0.982 0.000 1.236 0.744 -1.621 0.000 0.930 1.200 0.988 0.482 0.771 0.887 0.779 +0 0.524 -1.319 0.634 0.471 1.221 0.599 -0.588 -0.461 0.000 1.230 -1.504 -1.517 1.107 1.436 -0.035 0.104 2.548 0.629 1.997 -1.282 0.000 2.084 1.450 0.984 1.084 1.827 1.547 1.213 +1 0.871 0.618 -1.544 0.718 0.186 1.041 -1.180 0.434 2.173 1.133 1.558 -1.301 0.000 0.452 -0.595 0.522 0.000 0.665 0.567 0.130 3.102 1.872 1.114 1.095 1.398 0.979 1.472 1.168 +1 3.308 1.037 -0.634 0.690 -0.619 1.975 0.949 1.280 0.000 0.826 0.546 -0.139 2.215 0.635 -0.045 0.427 0.000 1.224 0.112 1.339 3.102 1.756 1.050 0.992 0.738 0.903 0.968 1.238 +0 0.588 2.104 -0.872 1.136 1.743 0.842 0.638 0.015 0.000 0.481 0.928 1.000 2.215 0.595 0.125 1.429 0.000 0.951 -1.140 -0.511 3.102 1.031 1.057 0.979 0.673 1.064 1.001 0.891 +0 0.289 0.823 0.013 0.615 -1.601 0.177 2.403 -0.015 0.000 0.258 1.151 1.036 2.215 0.694 0.553 -1.326 2.548 0.411 0.366 0.106 0.000 0.482 0.562 0.989 0.670 0.404 0.516 0.561 +1 0.294 -0.660 -1.162 1.752 0.384 0.860 0.513 1.119 0.000 2.416 0.107 -1.342 0.000 1.398 0.361 -0.350 2.548 1.126 -0.902 0.040 1.551 0.650 1.125 0.988 0.531 0.843 0.912 0.911 +0 0.599 -0.616 1.526 1.381 0.507 0.955 -0.646 -0.085 2.173 0.775 -0.533 1.116 2.215 0.789 -0.136 -1.176 0.000 2.449 1.435 -1.433 0.000 1.692 1.699 1.000 0.869 1.119 1.508 1.303 +1 1.100 -1.174 -1.114 1.601 -1.576 1.056 -1.343 0.547 2.173 0.555 0.367 0.592 2.215 0.580 -1.862 -0.914 0.000 0.904 0.508 -0.444 0.000 1.439 1.105 0.986 1.408 1.104 1.190 1.094 +1 2.237 -0.701 1.470 0.719 -0.199 0.745 -0.132 -0.737 1.087 0.976 -0.227 0.093 2.215 0.699 0.057 1.133 0.000 0.661 0.573 -0.679 0.000 0.785 0.772 1.752 1.235 0.856 0.990 0.825 +1 0.455 -0.880 -1.482 1.260 -0.178 1.499 0.158 1.022 0.000 1.867 -0.435 -0.675 2.215 1.234 0.783 1.586 0.000 0.641 -0.454 -0.409 3.102 1.002 0.964 0.986 0.761 0.240 1.190 0.995 +1 1.158 -0.778 -0.159 0.823 1.641 1.341 -0.830 -1.169 2.173 0.840 -1.554 0.934 0.000 0.693 0.488 -1.218 2.548 1.042 1.395 0.276 0.000 0.946 0.785 1.350 1.079 0.893 1.267 1.151 +1 0.902 -0.078 -0.055 0.872 -0.012 0.843 1.276 1.739 2.173 0.838 1.492 0.918 0.000 0.626 0.904 -0.648 2.548 0.412 -2.027 -0.883 0.000 2.838 1.664 0.988 1.803 0.768 1.244 1.280 +1 0.649 -1.028 -1.521 1.097 0.774 1.216 -0.383 -0.318 2.173 1.643 -0.285 -1.705 0.000 0.911 -0.091 0.341 0.000 0.592 0.537 0.732 3.102 0.911 0.856 1.027 1.160 0.874 0.986 0.893 +1 1.192 1.846 -0.781 1.326 -0.747 1.550 1.177 1.366 0.000 1.196 0.151 0.387 2.215 0.527 2.261 -0.190 0.000 0.390 1.474 0.381 0.000 0.986 1.025 1.004 1.392 0.761 0.965 1.043 +0 0.438 -0.358 -1.549 0.836 0.436 0.818 0.276 -0.708 2.173 0.707 0.826 0.392 0.000 1.050 1.741 -1.066 0.000 1.276 -1.583 0.842 0.000 1.475 1.273 0.986 0.853 1.593 1.255 1.226 +1 1.083 0.142 1.701 0.605 -0.253 1.237 0.791 1.183 2.173 0.842 2.850 -0.082 0.000 0.724 -0.464 -0.694 0.000 1.499 0.456 -0.226 3.102 0.601 0.799 1.102 0.995 1.389 1.013 0.851 +0 0.828 1.897 -0.615 0.572 -0.545 0.572 0.461 0.464 2.173 0.393 0.356 1.069 2.215 1.840 0.088 1.500 0.000 0.407 -0.663 -0.787 0.000 0.950 0.965 0.979 0.733 0.363 0.618 0.733 +0 0.735 1.438 1.197 1.123 -0.214 0.641 0.949 0.858 0.000 1.162 0.524 -0.896 2.215 0.992 0.454 -1.475 2.548 0.902 1.079 0.019 0.000 0.822 0.917 1.203 1.032 0.569 0.780 0.764 +0 0.437 -2.102 0.044 1.779 -1.042 1.231 -0.181 -0.515 1.087 2.666 0.863 1.466 2.215 1.370 0.345 -1.371 0.000 0.906 0.363 1.611 0.000 1.140 1.362 1.013 3.931 3.004 2.724 2.028 +1 0.881 1.814 -0.987 0.384 0.800 2.384 1.422 0.640 0.000 1.528 0.292 -0.962 1.107 2.126 -0.371 -1.401 2.548 0.700 0.109 0.203 0.000 0.450 0.813 0.985 0.956 1.013 0.993 0.774 +1 0.630 0.408 0.152 0.194 0.316 0.710 -0.824 -0.358 2.173 0.741 0.535 -0.851 2.215 0.933 0.406 1.148 0.000 0.523 -0.479 -0.625 0.000 0.873 0.960 0.988 0.830 0.921 0.711 0.661 +1 0.870 -0.448 -1.134 0.616 0.135 0.600 0.649 -0.622 2.173 0.768 0.709 -0.123 0.000 1.308 0.500 1.468 0.000 1.973 -0.286 1.462 3.102 0.909 0.944 0.990 0.835 1.250 0.798 0.776 +0 1.290 0.552 1.330 0.615 -1.353 0.661 0.240 -0.393 0.000 0.531 0.053 -1.588 0.000 0.675 0.839 -0.345 1.274 1.597 0.020 0.536 3.102 1.114 0.964 0.987 0.783 0.675 0.662 0.675 +1 0.943 0.936 1.068 1.373 0.671 2.170 -2.011 -1.032 0.000 0.640 0.361 -0.806 0.000 2.239 -0.083 0.590 2.548 1.224 0.646 -1.723 0.000 0.879 0.834 0.981 1.436 0.568 0.916 0.931 +1 0.431 1.686 -1.053 0.388 1.739 0.457 -0.471 -0.743 2.173 0.786 1.432 -0.547 2.215 0.537 -0.413 1.256 0.000 0.413 2.311 -0.408 0.000 1.355 1.017 0.982 0.689 1.014 0.821 0.715 +0 1.620 -0.055 -0.862 1.341 -1.571 0.634 -0.906 0.935 2.173 0.501 -2.198 -0.525 0.000 0.778 -0.708 -0.060 0.000 0.988 -0.621 0.489 3.102 0.870 0.956 1.216 0.992 0.336 0.871 0.889 +1 0.549 0.304 -1.443 1.309 -0.312 1.116 0.644 1.519 2.173 1.078 -0.303 -0.736 0.000 1.261 0.387 0.628 2.548 0.945 -0.190 0.090 0.000 0.893 1.043 1.000 1.124 1.077 1.026 0.886 +0 0.412 -0.618 -1.486 1.133 -0.665 0.646 0.436 1.520 0.000 0.993 0.976 0.106 2.215 0.832 0.091 0.164 2.548 0.672 -0.650 1.256 0.000 0.695 1.131 0.991 1.017 0.455 1.226 1.087 +0 1.183 -0.084 1.644 1.389 0.967 0.843 0.938 -0.670 0.000 0.480 0.256 0.123 2.215 0.437 1.644 0.491 0.000 0.501 -0.416 0.101 3.102 1.060 0.804 1.017 0.775 0.173 0.535 0.760 +0 1.629 -1.486 -0.683 2.786 -0.492 1.347 -2.638 1.453 0.000 1.857 0.208 0.873 0.000 0.519 -1.265 -1.602 1.274 0.903 -1.102 -0.329 1.551 6.892 3.522 0.998 0.570 0.477 2.039 2.006 +1 2.045 -0.671 -1.235 0.490 -0.952 0.525 -1.252 1.289 0.000 1.088 -0.993 0.648 2.215 0.975 -0.109 -0.254 2.548 0.556 -1.095 -0.194 0.000 0.803 0.861 0.980 1.282 0.945 0.925 0.811 +0 0.448 -0.058 -0.974 0.945 -1.633 1.181 -1.139 0.266 2.173 1.118 -0.761 1.502 1.107 1.706 0.585 -0.680 0.000 0.487 -1.951 0.945 0.000 2.347 1.754 0.993 1.161 1.549 1.414 1.176 +0 0.551 0.519 0.448 2.183 1.293 1.220 0.628 -0.627 2.173 1.019 -0.002 -0.652 0.000 1.843 -0.386 1.042 2.548 0.400 -1.102 -1.014 0.000 0.648 0.792 1.049 0.888 2.132 1.262 1.096 +0 1.624 0.488 1.403 0.760 0.559 0.812 0.777 -1.244 2.173 0.613 0.589 -0.030 2.215 0.692 1.058 0.683 0.000 1.054 1.165 -0.765 0.000 0.915 0.875 1.059 0.821 0.927 0.792 0.721 +1 0.774 0.444 1.257 0.515 -0.689 0.515 1.448 -1.271 0.000 0.793 0.118 0.811 1.107 0.679 0.326 -0.426 0.000 1.066 -0.865 -0.049 3.102 0.960 1.046 0.986 0.716 0.772 0.855 0.732 +1 2.093 -1.240 1.615 0.918 -1.202 1.412 -0.541 0.640 1.087 2.019 0.872 -0.639 0.000 0.672 -0.936 0.972 0.000 0.896 0.235 0.212 0.000 0.810 0.700 1.090 0.797 0.862 1.049 0.874 +1 0.908 1.069 0.283 0.400 1.293 0.609 1.452 -1.136 0.000 0.623 0.417 -0.098 2.215 1.023 0.775 1.054 1.274 0.706 2.346 -1.305 0.000 0.744 1.006 0.991 0.606 0.753 0.796 0.753 +0 0.403 -1.328 -0.065 0.901 1.052 0.708 -0.354 -0.718 2.173 0.892 0.633 1.684 2.215 0.999 -1.205 0.941 0.000 0.930 1.072 -0.809 0.000 2.105 1.430 0.989 0.838 1.147 1.042 0.883 +0 1.447 0.453 0.118 1.731 0.650 0.771 0.446 -1.564 0.000 0.973 -2.014 0.354 0.000 1.949 -0.643 -1.531 1.274 1.106 -0.334 -1.163 0.000 0.795 0.821 1.013 1.699 0.918 1.118 1.018 +1 1.794 0.123 -0.454 0.057 1.489 0.966 -1.190 1.090 1.087 0.539 -0.535 1.035 0.000 1.096 -1.069 -1.236 2.548 0.659 -1.196 -0.283 0.000 0.803 0.756 0.985 1.343 1.109 0.993 0.806 +0 1.484 -2.047 0.813 0.591 -0.295 0.923 0.312 -1.164 2.173 0.654 -0.316 0.752 2.215 0.599 1.966 -1.128 0.000 0.626 -0.304 -1.431 0.000 1.112 0.910 1.090 0.986 1.189 1.350 1.472 +0 0.417 -2.016 0.849 1.817 0.040 1.201 -1.676 -1.394 0.000 0.792 0.537 0.641 2.215 0.794 -1.222 0.187 0.000 0.825 -0.217 1.334 3.102 1.470 0.931 0.987 1.203 0.525 0.833 0.827 +1 0.603 1.009 0.033 0.486 1.225 0.884 -0.617 -1.058 0.000 0.500 -1.407 -0.567 0.000 1.476 -0.876 0.605 2.548 0.970 0.560 1.092 3.102 0.853 1.153 0.988 0.846 0.920 0.944 0.835 +1 1.381 -0.326 0.552 0.417 -0.027 1.030 -0.835 -1.287 2.173 0.941 -0.421 1.519 2.215 0.615 -1.650 0.377 0.000 0.606 0.644 0.650 0.000 1.146 0.970 0.990 1.191 0.884 0.897 0.826 +1 0.632 1.200 -0.703 0.438 -1.700 0.779 -0.731 0.958 1.087 0.605 0.393 -1.376 0.000 0.670 -0.827 -1.315 2.548 0.626 -0.501 0.417 0.000 0.904 0.903 0.998 0.673 0.803 0.722 0.640 +1 1.561 -0.569 1.580 0.329 0.237 1.059 0.731 0.415 2.173 0.454 0.016 -0.828 0.000 0.587 0.008 -0.291 1.274 0.597 1.119 1.191 0.000 0.815 0.908 0.988 0.733 0.690 0.892 0.764 +1 2.102 0.087 0.449 1.164 -0.390 1.085 -0.408 -1.116 2.173 0.578 0.197 -0.137 0.000 1.202 0.917 1.523 0.000 0.959 -0.832 1.404 3.102 1.380 1.109 1.486 1.496 0.886 1.066 1.025 +1 1.698 -0.489 -0.552 0.976 -1.009 1.620 -0.721 0.648 1.087 1.481 -1.860 -1.354 0.000 1.142 -1.140 1.401 2.548 1.000 -1.274 -0.158 0.000 1.430 1.130 0.987 1.629 1.154 1.303 1.223 +1 1.111 -0.249 -1.457 0.421 0.939 0.646 -2.076 0.362 0.000 1.315 0.796 -1.436 2.215 0.780 0.130 0.055 0.000 1.662 -0.834 0.461 0.000 0.920 0.948 0.990 1.046 0.905 1.493 1.169 +1 0.945 0.390 -1.159 1.675 0.437 0.356 0.261 0.543 1.087 0.574 0.838 1.599 2.215 0.496 -1.220 -0.022 0.000 0.558 -2.454 1.440 0.000 0.763 0.983 1.728 1.000 0.578 0.922 1.003 +1 2.076 0.014 -1.314 0.854 -0.306 3.446 1.341 0.598 0.000 2.086 0.227 -0.747 2.215 1.564 -0.216 1.649 2.548 0.965 -0.857 -1.062 0.000 0.477 0.734 1.456 1.003 1.660 1.001 0.908 +1 1.992 0.192 -0.103 0.108 -1.599 0.938 0.595 -1.360 2.173 0.869 -1.012 1.432 0.000 1.302 0.850 0.436 2.548 0.487 1.051 -1.027 0.000 0.502 0.829 0.983 1.110 1.394 0.904 0.836 +0 0.460 1.625 1.485 1.331 1.242 0.675 -0.329 -1.039 1.087 0.671 -1.028 -0.514 0.000 1.265 -0.788 0.415 1.274 0.570 -0.683 -1.738 0.000 0.725 0.758 1.004 1.024 1.156 0.944 0.833 +0 0.871 0.839 -1.536 0.428 1.198 0.875 -1.256 -0.466 1.087 0.684 -0.768 0.150 0.000 0.556 -1.793 0.389 0.000 0.942 -1.126 1.339 1.551 0.624 0.734 0.986 1.357 0.960 1.474 1.294 +1 0.951 1.651 0.576 1.273 1.495 0.834 0.048 -0.578 2.173 0.386 -0.056 -1.448 0.000 0.597 -0.196 0.162 2.548 0.524 1.649 1.625 0.000 0.737 0.901 1.124 1.014 0.556 1.039 0.845 +1 1.049 -0.223 0.685 0.256 -1.191 2.506 0.238 -0.359 0.000 1.510 -0.904 1.158 1.107 2.733 -0.902 1.679 2.548 0.407 -0.474 -1.572 0.000 1.513 2.472 0.982 1.238 0.978 1.985 1.510 +0 0.455 -0.028 0.265 1.286 1.373 0.459 0.331 -0.922 0.000 0.343 0.634 0.430 0.000 0.279 -0.084 -0.272 0.000 0.475 0.926 -0.123 3.102 0.803 0.495 0.987 0.587 0.211 0.417 0.445 +1 2.074 0.388 0.878 1.110 1.557 1.077 -0.226 -0.295 2.173 0.865 -0.319 -1.116 2.215 0.707 -0.835 0.722 0.000 0.632 -0.608 -0.728 0.000 0.715 0.802 1.207 1.190 0.960 1.143 0.926 +1 1.390 0.265 1.196 0.919 -1.371 1.858 0.506 0.786 0.000 1.280 -1.367 -0.720 2.215 1.483 -0.441 -0.675 2.548 1.076 0.294 -0.539 0.000 1.126 0.830 1.155 1.551 0.702 1.103 0.933 +1 1.014 -0.079 1.597 1.038 -0.281 1.135 -0.722 -0.177 2.173 0.544 -1.475 -1.501 0.000 1.257 -1.315 1.212 0.000 0.496 -0.060 1.180 1.551 0.815 0.611 1.411 1.110 0.792 0.846 0.853 +0 0.335 1.267 -1.154 2.011 -0.574 0.753 0.618 1.411 0.000 0.474 0.748 0.681 2.215 0.608 -0.446 -0.354 2.548 0.399 1.295 -0.581 0.000 0.911 0.882 0.975 0.832 0.598 0.580 0.678 +1 0.729 -0.189 1.182 0.293 1.310 0.412 0.459 -0.632 0.000 0.869 -1.128 -0.625 2.215 1.173 -0.893 0.478 2.548 0.584 -2.394 -1.727 0.000 2.016 1.272 0.995 1.034 0.905 0.966 1.038 +1 1.225 -1.215 -0.088 0.881 -0.237 0.600 -0.976 1.462 2.173 0.876 0.506 1.583 2.215 0.718 1.228 -0.031 0.000 0.653 -1.292 1.216 0.000 0.838 1.108 0.981 1.805 0.890 1.251 1.197 +1 2.685 -0.444 0.847 0.253 0.183 0.641 -1.541 -0.873 2.173 0.417 2.874 -0.551 0.000 0.706 -1.431 0.764 0.000 1.390 -0.596 -1.397 0.000 0.894 0.829 0.993 0.789 0.654 0.883 0.746 +0 0.638 -0.481 0.683 1.457 -1.024 0.707 -1.338 1.498 0.000 0.980 0.518 0.289 2.215 0.964 -0.531 -0.423 0.000 0.694 -0.654 -1.314 3.102 0.807 1.283 1.335 0.658 0.907 0.797 0.772 +1 1.789 -0.765 -0.732 0.421 -0.020 1.142 -1.353 1.439 2.173 0.725 -1.518 -1.261 0.000 0.812 -2.597 -0.463 0.000 1.203 -0.120 1.001 0.000 0.978 0.673 0.985 1.303 1.400 1.078 0.983 +1 0.784 -1.431 1.724 0.848 0.559 0.615 -1.643 -1.456 0.000 1.339 -0.513 0.040 2.215 0.394 -2.483 1.304 0.000 0.987 0.889 -0.339 0.000 0.732 0.713 0.987 0.973 0.705 0.875 0.759 +1 0.911 1.098 -1.289 0.421 0.823 1.218 -0.503 0.431 0.000 0.775 0.432 -1.680 0.000 0.855 -0.226 -0.460 2.548 0.646 -0.947 -1.243 1.551 2.201 1.349 0.985 0.730 0.451 0.877 0.825 +1 0.959 0.372 -0.269 1.255 0.702 1.151 0.097 0.805 2.173 0.993 1.011 0.767 2.215 1.096 0.185 0.381 0.000 1.001 -0.205 0.059 0.000 0.979 0.997 1.168 0.796 0.771 0.839 0.776 +0 0.283 -1.864 -1.663 0.219 1.624 0.955 -1.213 0.932 2.173 0.889 0.395 -0.268 0.000 0.597 -1.083 -0.921 2.548 0.584 1.325 -1.072 0.000 0.856 0.927 0.996 0.937 0.936 1.095 0.892 +0 2.017 -0.488 -0.466 1.029 -0.870 3.157 0.059 -0.343 2.173 3.881 0.872 1.502 1.107 3.631 1.720 0.963 0.000 0.633 -1.264 -1.734 0.000 4.572 3.339 1.005 1.407 5.590 3.614 3.110 +1 1.088 0.414 -0.841 0.485 0.605 0.860 1.110 -0.568 0.000 1.152 -0.325 1.203 2.215 0.324 1.652 -0.104 0.000 0.510 1.095 -1.728 0.000 0.880 0.722 0.989 0.977 0.711 0.888 0.762 +0 0.409 -1.717 0.712 0.809 -1.301 0.701 -1.529 -1.411 0.000 1.191 -0.582 0.438 2.215 1.147 0.813 -0.571 2.548 1.039 0.543 0.892 0.000 0.636 0.810 0.986 0.861 1.411 0.907 0.756 +1 1.094 1.577 -0.988 0.497 -0.149 0.891 -2.459 1.034 0.000 0.646 0.792 -1.022 0.000 1.573 0.254 -0.053 2.548 1.428 0.190 -1.641 3.102 4.322 2.687 0.985 0.881 1.135 1.907 1.831 +1 0.613 1.993 -0.280 0.544 0.931 0.909 1.526 1.559 0.000 0.840 1.473 -0.483 2.215 0.856 0.352 0.408 2.548 1.058 1.733 -1.396 0.000 0.801 1.066 0.984 0.639 0.841 0.871 0.748 +0 0.958 -1.202 0.600 0.434 0.170 0.783 -0.214 1.319 0.000 0.835 -0.454 -0.615 2.215 0.658 -1.858 -0.891 0.000 0.640 0.172 -1.204 3.102 1.790 1.086 0.997 0.804 0.403 0.793 0.756 +1 1.998 -0.238 0.972 0.058 0.266 0.759 1.576 -0.357 2.173 1.004 -0.349 -0.747 2.215 0.962 0.490 -0.453 0.000 1.592 0.661 -1.405 0.000 0.874 1.086 0.990 1.436 1.527 1.177 0.993 +1 0.796 -0.171 -0.818 0.574 -1.625 1.201 -0.737 1.451 2.173 0.651 0.404 -0.452 0.000 1.150 -0.652 -0.120 0.000 1.008 -0.093 0.531 3.102 0.884 0.706 0.979 1.193 0.937 0.943 0.881 +1 0.773 1.023 0.527 1.537 -0.201 2.967 -0.574 -1.534 2.173 2.346 -0.307 0.394 2.215 1.393 0.135 -0.027 0.000 3.015 0.187 0.516 0.000 0.819 1.260 0.982 2.552 3.862 2.179 1.786 +0 1.823 1.008 -1.489 0.234 -0.962 0.591 0.461 0.996 2.173 0.568 -1.297 -0.410 0.000 0.887 2.157 1.194 0.000 2.079 0.369 -0.085 3.102 0.770 0.945 0.995 1.179 0.971 0.925 0.983 +0 0.780 0.640 0.490 0.680 -1.301 0.715 -0.137 0.152 2.173 0.616 -0.831 1.668 0.000 1.958 0.528 -0.982 2.548 0.966 -1.551 0.462 0.000 1.034 1.079 1.008 0.827 1.369 1.152 0.983 +1 0.543 0.801 1.543 1.134 -0.772 0.954 -0.849 0.410 1.087 0.851 -1.988 1.686 0.000 0.799 -0.912 -1.156 0.000 0.479 0.097 1.334 0.000 0.923 0.597 0.989 1.231 0.759 0.975 0.867 +0 1.241 -0.014 0.129 1.158 0.670 0.445 -0.732 1.739 2.173 0.918 0.659 -1.340 2.215 0.557 2.410 -1.404 0.000 0.966 -1.545 -1.120 0.000 0.874 0.918 0.987 1.001 0.798 0.904 0.937 +0 1.751 -0.266 -1.575 0.489 1.292 1.112 1.533 0.137 2.173 1.204 -0.414 -0.928 0.000 0.879 1.237 -0.415 2.548 1.479 1.469 0.913 0.000 2.884 1.747 0.989 1.742 0.600 1.363 1.293 +1 1.505 1.208 -1.476 0.995 -0.836 2.800 -1.600 0.111 0.000 2.157 1.241 1.110 2.215 1.076 2.619 -0.913 0.000 1.678 2.204 -1.575 0.000 0.849 1.224 0.990 1.412 0.976 1.271 1.105 +0 0.816 0.611 0.779 1.694 0.278 0.575 -0.787 1.592 2.173 1.148 1.076 -0.831 2.215 0.421 1.316 0.632 0.000 0.589 0.452 -1.466 0.000 0.779 0.909 0.990 1.146 1.639 1.236 0.949 +1 0.551 -0.808 0.330 1.188 -0.294 0.447 -0.035 -0.993 0.000 0.432 -0.276 -0.481 2.215 1.959 -0.288 1.195 2.548 0.638 0.583 1.107 0.000 0.832 0.924 0.993 0.723 0.976 0.968 0.895 +0 1.316 -0.093 0.995 0.860 -0.621 0.593 -0.560 -1.599 2.173 0.524 -0.318 -0.240 2.215 0.566 0.759 -0.368 0.000 0.483 -2.030 -1.104 0.000 1.468 1.041 1.464 0.811 0.778 0.690 0.722 +1 1.528 0.067 -0.855 0.959 -1.464 1.143 -0.082 1.023 0.000 0.702 -0.763 -0.244 0.000 0.935 -0.881 0.206 2.548 0.614 -0.831 1.657 3.102 1.680 1.105 0.983 1.078 0.559 0.801 0.809 +0 0.558 -0.833 -0.598 1.436 -1.724 1.316 -0.661 1.593 2.173 1.148 -0.503 -0.132 1.107 1.584 -0.125 0.380 0.000 1.110 -1.216 -0.181 0.000 1.258 0.860 1.053 0.790 1.814 1.159 1.007 +1 0.819 0.879 1.221 0.598 -1.450 0.754 0.417 -0.369 2.173 0.477 1.199 0.274 0.000 1.073 0.368 0.273 2.548 1.599 2.047 1.690 0.000 0.933 0.984 0.983 0.788 0.613 0.728 0.717 +0 0.981 -1.007 0.489 0.923 1.261 0.436 -0.698 -0.506 2.173 0.764 -1.105 -1.241 2.215 0.577 -2.573 -0.036 0.000 0.565 -1.628 1.610 0.000 0.688 0.801 0.991 0.871 0.554 0.691 0.656 +0 2.888 0.568 -1.416 1.461 -1.157 1.756 -0.900 0.522 0.000 0.657 0.409 1.076 2.215 1.419 0.672 -0.019 0.000 1.436 -0.184 -0.980 3.102 0.946 0.919 0.995 1.069 0.890 0.834 0.856 +1 0.522 1.805 -0.963 1.136 0.418 0.727 -0.195 -1.695 2.173 0.309 2.559 -0.178 0.000 0.521 1.794 0.919 0.000 0.788 0.174 -0.406 3.102 0.555 0.729 1.011 1.385 0.753 0.927 0.832 +1 0.793 -0.162 -1.643 0.634 0.337 0.898 -0.633 1.689 0.000 0.806 -0.826 -0.356 2.215 0.890 -0.142 -1.268 0.000 1.293 0.574 0.725 0.000 0.833 1.077 0.988 0.721 0.679 0.867 0.753 +0 1.298 1.098 0.280 0.371 -0.373 0.855 -0.306 -1.186 0.000 0.977 -0.421 1.003 0.000 0.978 0.956 -1.249 2.548 0.735 0.577 -0.037 3.102 0.974 1.002 0.992 0.549 0.587 0.725 0.954 +1 0.751 -0.520 -1.653 0.168 -0.419 0.878 -1.023 -1.364 2.173 1.310 -0.667 0.863 0.000 1.196 -0.827 0.358 0.000 1.154 -0.165 -0.360 1.551 0.871 0.950 0.983 0.907 0.955 0.959 0.874 +0 1.730 0.666 -1.432 0.446 1.302 0.921 -0.203 0.621 0.000 1.171 -0.365 -0.611 1.107 0.585 0.807 1.150 0.000 0.415 -0.843 1.311 0.000 0.968 0.786 0.986 1.059 0.371 0.790 0.848 +1 0.596 -1.486 0.690 1.045 -1.344 0.928 0.867 0.820 2.173 0.610 0.999 -1.329 2.215 0.883 -0.001 -0.106 0.000 1.145 2.184 -0.808 0.000 2.019 1.256 1.056 1.751 1.037 1.298 1.518 +1 0.656 -1.993 -0.519 1.643 -0.143 0.815 0.256 1.220 1.087 0.399 -1.184 -1.458 0.000 0.738 1.361 -1.443 0.000 0.842 0.033 0.293 0.000 0.910 0.891 0.993 0.668 0.562 0.958 0.787 +1 1.127 -0.542 0.645 0.318 -1.496 0.661 -0.640 0.369 2.173 0.992 0.358 1.702 0.000 1.004 0.316 -1.109 0.000 1.616 -0.936 -0.707 1.551 0.875 1.191 0.985 0.651 0.940 0.969 0.834 +0 0.916 -1.423 -1.490 1.248 -0.538 0.625 -0.535 -0.174 0.000 0.769 -0.389 1.608 2.215 0.667 -1.138 -1.738 1.274 0.877 -0.019 0.482 0.000 0.696 0.917 1.121 0.678 0.347 0.647 0.722 +1 2.756 -0.637 -1.715 1.331 1.124 0.913 -0.296 -0.491 0.000 0.983 -0.831 0.000 2.215 1.180 -0.428 0.742 0.000 1.113 0.005 -1.157 1.551 1.681 1.096 1.462 0.976 0.917 1.009 1.040 +0 0.755 1.754 0.701 2.111 0.256 1.243 0.057 -1.502 2.173 0.565 -0.034 -1.078 1.107 0.529 1.696 -1.090 0.000 0.665 0.292 0.107 0.000 0.870 0.780 0.990 2.775 0.465 1.876 1.758 +1 0.593 -0.762 1.743 0.908 0.442 0.773 -1.357 -0.768 2.173 0.432 1.421 1.236 0.000 0.579 0.291 -0.403 0.000 0.966 -0.309 1.016 3.102 0.893 0.743 0.989 0.857 1.030 0.943 0.854 +1 0.891 -1.151 -1.269 0.504 -0.622 0.893 -0.549 0.700 0.000 0.828 -0.825 0.154 2.215 1.083 0.632 -1.141 0.000 1.059 -0.557 1.526 3.102 2.117 1.281 0.987 0.819 0.802 0.917 0.828 +1 2.358 -0.248 0.080 0.747 -0.975 1.019 1.374 1.363 0.000 0.935 0.127 -1.707 2.215 0.312 -0.827 0.017 0.000 0.737 1.059 -0.327 0.000 0.716 0.828 1.495 0.953 0.704 0.880 0.745 +0 0.660 -0.017 -1.138 0.453 1.002 0.645 0.518 0.703 2.173 0.751 0.705 -0.592 2.215 0.744 -0.909 -1.596 0.000 0.410 -1.135 0.481 0.000 0.592 0.922 0.989 0.897 0.948 0.777 0.701 +1 0.718 0.518 0.225 1.710 -0.022 1.888 -0.424 1.092 0.000 4.134 0.185 -1.366 0.000 1.415 1.293 0.242 2.548 2.351 0.264 -0.057 3.102 0.830 1.630 0.976 1.215 0.890 1.422 1.215 +1 1.160 0.203 0.941 0.594 0.212 0.636 -0.556 0.679 2.173 1.089 -0.481 -1.008 1.107 1.245 -0.056 -1.357 0.000 0.587 1.007 0.056 0.000 1.106 0.901 0.987 0.786 1.224 0.914 0.837 +1 0.697 0.542 0.619 0.985 1.481 0.745 0.415 1.644 2.173 0.903 0.495 -0.958 2.215 1.165 1.195 0.346 0.000 1.067 -0.881 -0.264 0.000 0.830 1.025 0.987 0.690 0.863 0.894 0.867 +0 1.430 0.190 -0.700 0.246 0.518 1.302 0.660 -0.247 2.173 1.185 -0.539 1.504 0.000 1.976 -0.401 1.079 0.000 0.855 -0.958 -1.110 3.102 0.886 0.953 0.993 0.889 1.400 1.376 1.119 +1 1.122 -0.795 0.202 0.397 -1.553 0.597 -1.459 -0.734 2.173 0.522 1.044 1.027 2.215 0.783 -1.243 1.701 0.000 0.371 1.737 0.199 0.000 1.719 1.176 0.988 0.723 1.583 1.063 0.914 +0 1.153 0.526 1.236 0.266 0.001 1.139 -1.236 -0.585 2.173 1.337 -0.215 -1.356 2.215 1.780 1.129 0.902 0.000 1.608 -0.391 -0.161 0.000 1.441 1.633 0.990 1.838 1.516 1.635 1.373 +1 0.760 1.012 0.758 0.937 0.051 0.941 0.687 -1.247 2.173 1.288 -0.743 0.822 0.000 1.552 1.782 -1.533 0.000 0.767 1.349 0.168 0.000 0.716 0.862 0.988 0.595 0.359 0.697 0.623 +1 1.756 -1.469 1.395 1.345 -1.595 0.817 0.017 -0.741 2.173 0.483 -0.008 0.293 0.000 1.768 -0.663 0.438 1.274 1.202 -1.387 -0.222 0.000 1.022 1.058 0.992 1.407 1.427 1.356 1.133 +0 0.397 0.582 -0.758 1.260 -1.735 0.889 -0.515 1.139 2.173 0.973 1.616 0.460 0.000 1.308 1.001 -0.709 2.548 0.858 0.995 -0.231 0.000 0.749 0.888 0.979 1.487 1.804 1.208 1.079 +0 0.515 -0.984 0.425 1.114 -0.439 1.999 0.818 1.561 0.000 1.407 0.009 -0.380 0.000 1.332 0.230 0.397 0.000 1.356 -0.616 -1.057 3.102 0.978 1.017 0.990 1.118 0.862 0.835 0.919 +1 1.368 -0.921 -0.866 0.842 -0.598 0.456 -1.176 1.219 1.087 0.419 -1.974 -0.819 0.000 0.791 -1.640 0.881 0.000 1.295 -0.782 0.442 3.102 0.945 0.761 0.974 0.915 0.535 0.733 0.651 +0 2.276 0.134 0.399 2.525 0.376 1.111 -1.078 -1.571 0.000 0.657 2.215 -0.900 0.000 1.183 -0.662 -0.508 2.548 1.436 -0.517 0.960 3.102 0.569 0.931 0.993 1.170 0.967 0.879 1.207 +0 0.849 0.907 0.124 0.652 1.585 0.715 0.355 -1.200 0.000 0.599 -0.892 1.301 0.000 1.106 1.151 0.582 0.000 1.895 -0.279 -0.568 3.102 0.881 0.945 0.998 0.559 0.649 0.638 0.660 +1 2.105 0.248 -0.797 0.530 0.206 1.957 -2.175 0.797 0.000 1.193 0.637 -1.646 2.215 0.881 1.111 -1.046 0.000 0.872 -0.185 1.085 1.551 0.986 1.343 1.151 1.069 0.714 2.063 1.951 +1 1.838 1.060 1.637 1.017 1.370 0.913 0.461 -0.609 1.087 0.766 -0.461 0.303 2.215 0.724 -0.061 0.886 0.000 0.941 1.123 -0.745 0.000 0.858 0.847 0.979 1.313 1.083 1.094 0.910 +0 0.364 1.274 1.066 1.570 -0.394 0.485 0.012 -1.716 0.000 0.317 -1.233 0.534 2.215 0.548 -2.165 0.762 0.000 0.729 0.169 -0.318 3.102 0.892 0.944 1.013 0.594 0.461 0.688 0.715 +1 0.503 1.343 -0.031 1.134 -1.204 0.590 -0.309 0.174 2.173 0.408 2.372 -0.628 0.000 1.850 0.400 1.147 2.548 0.664 -0.458 -0.885 0.000 1.445 1.283 0.989 1.280 1.118 1.127 1.026 +0 1.873 0.258 0.103 2.491 0.530 1.678 0.644 -1.738 2.173 1.432 0.848 -1.340 0.000 0.621 1.323 -1.316 0.000 0.628 0.789 -0.206 1.551 0.426 0.802 1.125 0.688 1.079 1.338 1.239 +1 0.826 -0.732 1.587 0.582 -1.236 0.495 0.757 -0.741 2.173 0.940 1.474 0.354 2.215 0.474 1.055 -1.657 0.000 0.415 1.758 0.841 0.000 0.451 0.578 0.984 0.757 0.922 0.860 0.696 +0 0.935 -1.614 -0.597 0.299 1.223 0.707 -0.853 -1.026 0.000 0.751 0.007 -1.691 0.000 1.062 -0.125 0.976 2.548 0.877 1.275 0.646 0.000 0.962 1.074 0.980 0.608 0.726 0.741 0.662 +1 0.643 0.542 -1.285 0.474 -0.366 0.667 -0.446 1.195 2.173 1.076 0.145 -0.126 0.000 0.970 -0.661 0.394 1.274 1.218 -0.184 -1.722 0.000 1.331 1.019 0.985 1.192 0.677 0.973 0.910 +0 0.713 0.164 1.080 1.427 -0.460 0.960 -0.152 -0.940 2.173 1.427 -0.901 1.036 1.107 0.440 -1.269 -0.194 0.000 0.452 1.932 -0.532 0.000 1.542 1.210 1.374 1.319 1.818 1.220 1.050 +0 0.876 -0.463 -1.224 2.458 -1.689 1.007 -0.752 0.398 0.000 2.456 -1.285 -0.152 1.107 1.641 1.838 1.717 0.000 0.458 0.194 0.488 3.102 4.848 2.463 0.986 1.981 0.974 2.642 2.258 +1 0.384 -0.275 0.387 1.403 -0.994 0.620 -1.529 1.685 0.000 1.091 -1.644 1.078 0.000 0.781 -1.311 0.326 2.548 1.228 -0.728 -0.633 1.551 0.920 0.854 0.987 0.646 0.609 0.740 0.884 +0 0.318 -1.818 -1.008 0.977 1.268 0.457 2.451 -1.522 0.000 0.881 1.351 0.461 2.215 0.929 0.239 -0.380 2.548 0.382 -0.613 1.330 0.000 1.563 1.193 0.994 0.829 0.874 0.901 1.026 +1 0.612 -1.120 1.098 0.402 -0.480 0.818 0.188 1.511 0.000 0.800 -0.253 0.977 0.000 1.175 0.271 -1.289 1.274 2.531 0.226 -0.409 3.102 0.889 0.947 0.979 1.486 0.940 1.152 1.119 +1 0.587 -0.737 -0.228 0.970 1.119 0.823 0.184 1.594 0.000 1.104 0.301 -0.818 2.215 0.819 0.712 -0.560 0.000 2.240 -0.419 0.340 3.102 1.445 1.103 0.988 0.715 1.363 1.019 0.926 +0 1.030 -0.694 -1.638 0.893 -1.074 1.160 -0.766 0.485 0.000 1.632 -0.698 -1.142 2.215 1.050 -1.092 0.952 0.000 1.475 0.286 0.125 3.102 0.914 1.075 0.982 0.732 1.493 1.219 1.079 +1 2.142 0.617 1.517 0.387 -0.862 0.345 1.203 -1.014 2.173 0.609 1.092 0.275 0.000 1.331 0.582 -0.183 2.548 0.557 1.540 -1.642 0.000 0.801 0.737 1.060 0.715 0.626 0.749 0.674 +0 1.076 0.240 -0.246 0.871 -1.241 0.496 0.282 0.746 2.173 1.095 -0.648 1.100 2.215 0.446 -1.756 0.764 0.000 0.434 0.788 -0.991 0.000 1.079 0.868 1.047 0.818 0.634 0.795 0.733 +0 1.400 0.901 -1.617 0.625 -0.163 0.661 -0.411 -1.616 2.173 0.685 0.524 0.425 0.000 0.881 -0.766 0.312 0.000 0.979 0.255 -0.667 3.102 0.898 1.105 1.253 0.730 0.716 0.738 0.795 +0 3.302 1.132 1.051 0.658 0.768 1.308 0.251 -0.374 1.087 1.673 0.015 -0.898 0.000 0.688 -0.535 1.363 1.274 0.871 1.325 -1.583 0.000 1.646 1.249 0.995 1.919 1.288 1.330 1.329 +0 1.757 0.202 0.750 0.767 -0.362 0.932 -1.033 -1.366 0.000 1.529 -1.012 -0.771 0.000 1.161 -0.287 0.059 0.000 2.185 1.147 1.099 3.102 0.795 0.529 1.354 1.144 1.491 1.319 1.161 +0 1.290 0.905 -1.711 1.017 -0.695 1.008 -1.038 0.693 2.173 1.202 -0.595 0.187 0.000 1.011 0.139 -1.607 0.000 0.789 -0.613 -1.041 3.102 1.304 0.895 1.259 1.866 0.955 1.211 1.200 +1 1.125 -0.004 1.694 0.373 0.329 0.978 0.640 -0.391 0.000 1.122 -0.376 1.521 2.215 0.432 2.413 -1.259 0.000 0.969 0.730 0.512 3.102 0.716 0.773 0.991 0.624 0.977 0.981 0.875 +0 1.081 0.861 1.252 1.621 1.474 1.293 0.600 0.630 0.000 1.991 -0.090 -0.675 2.215 0.861 1.105 -0.201 0.000 1.135 2.489 -1.659 0.000 1.089 0.657 0.991 2.179 0.412 1.334 1.071 +1 0.652 -0.294 1.241 1.034 0.490 1.033 0.551 -0.963 2.173 0.661 1.031 -1.654 2.215 1.376 -0.018 0.843 0.000 0.943 -0.329 -0.269 0.000 1.085 1.067 0.991 1.504 0.773 1.135 0.993 +1 1.408 -1.028 -1.018 0.252 -0.242 0.465 -0.364 -0.200 0.000 1.466 0.669 0.739 1.107 1.031 0.415 -1.468 2.548 0.457 -1.091 -1.722 0.000 0.771 0.811 0.979 1.459 1.204 1.041 0.866 +1 0.781 -1.143 -0.659 0.961 1.266 1.183 -0.686 0.119 2.173 1.126 -0.064 1.447 0.000 0.730 1.430 -1.535 0.000 1.601 0.513 1.658 0.000 0.871 1.345 1.184 1.058 0.620 1.107 0.978 +1 1.300 -0.616 1.032 0.751 -0.731 0.961 -0.716 1.592 0.000 2.079 -1.063 -0.271 2.215 0.475 0.518 1.695 1.274 0.395 -2.204 0.349 0.000 1.350 0.983 1.369 1.265 1.428 1.135 0.982 +1 0.833 0.809 1.657 1.637 1.019 0.705 1.077 -0.968 2.173 1.261 0.114 -0.298 1.107 1.032 0.017 0.236 0.000 0.640 -0.026 -1.598 0.000 0.894 0.982 0.981 1.250 1.054 1.018 0.853 +1 1.686 -1.090 -0.301 0.890 0.557 1.304 -0.284 -1.393 2.173 0.388 2.118 0.513 0.000 0.514 -0.015 0.891 0.000 0.460 0.547 0.627 3.102 0.942 0.524 1.186 1.528 0.889 1.015 1.122 +1 0.551 0.911 0.879 0.379 -0.796 1.154 -0.808 -0.966 0.000 1.168 -0.513 0.355 2.215 0.646 -1.309 0.773 0.000 0.544 -0.283 1.301 3.102 0.847 0.705 0.990 0.772 0.546 0.790 0.719 +1 1.597 0.793 -1.119 0.691 -1.455 0.370 0.337 1.354 0.000 0.646 -1.005 0.732 2.215 1.019 0.040 0.209 0.000 0.545 0.958 0.239 3.102 0.962 0.793 0.994 0.719 0.745 0.812 0.739 +0 1.033 -1.193 -0.452 0.247 0.970 0.503 -1.424 1.362 0.000 1.062 -0.416 -1.156 2.215 0.935 -0.023 0.555 2.548 0.410 -1.766 0.379 0.000 0.590 0.953 0.991 0.717 1.081 0.763 0.690 +1 0.859 -1.004 1.521 0.781 -0.993 0.677 0.643 -0.338 2.173 0.486 0.409 1.283 0.000 0.679 0.110 0.285 0.000 0.715 -0.735 -0.157 1.551 0.702 0.773 0.984 0.627 0.633 0.694 0.643 +0 0.612 -1.127 1.074 1.225 -0.426 0.927 -2.141 -0.473 0.000 1.290 -0.927 -1.085 2.215 1.183 1.981 -1.687 0.000 2.176 0.406 -1.581 0.000 0.945 0.651 1.170 0.895 1.604 1.179 1.142 +1 0.535 0.321 -1.095 0.281 -0.960 0.876 -0.709 -0.076 0.000 1.563 -0.666 1.536 2.215 0.773 -0.321 0.435 0.000 0.682 -0.801 -0.952 3.102 0.711 0.667 0.985 0.888 0.741 0.872 0.758 +1 0.745 1.586 1.578 0.863 -1.423 0.530 1.714 1.085 0.000 1.174 0.679 1.015 0.000 1.158 0.609 -1.186 2.548 1.851 0.832 -0.248 3.102 0.910 1.164 0.983 0.947 0.858 0.928 0.823 +0 0.677 -1.014 -1.648 1.455 1.461 0.596 -2.358 0.517 0.000 0.800 0.849 -0.743 2.215 1.024 -0.282 -1.004 0.000 1.846 -0.977 0.378 3.102 2.210 1.423 0.982 1.074 1.623 1.417 1.258 +1 0.815 -1.263 0.057 1.018 -0.208 0.339 -0.347 -1.646 2.173 1.223 0.600 -1.658 2.215 1.435 0.042 0.926 0.000 0.777 1.698 -0.698 0.000 1.022 1.058 1.000 0.784 0.477 0.886 0.836 +0 3.512 -1.094 -0.220 0.338 -0.328 1.962 -1.099 1.544 1.087 1.461 -1.305 -0.922 2.215 1.219 -1.289 0.400 0.000 0.731 0.155 1.249 0.000 1.173 1.366 0.993 2.259 2.000 1.626 1.349 +0 0.904 1.248 0.325 0.317 -1.624 0.685 -0.538 1.665 2.173 0.685 -2.145 -1.106 0.000 0.632 -1.460 1.017 0.000 1.085 -0.182 0.162 3.102 0.885 0.801 0.989 0.930 0.904 1.012 0.961 \ No newline at end of file diff --git a/examples/trials/auto-gbdt/data/regression.train b/examples/trials/auto-gbdt/data/regression.train new file mode 100644 index 0000000000000000000000000000000000000000..4db47f5829260950653a3163180e5b6338cdd164 --- /dev/null +++ b/examples/trials/auto-gbdt/data/regression.train @@ -0,0 +1,7000 @@ +1 0.869 -0.635 0.226 0.327 -0.690 0.754 -0.249 -1.092 0.000 1.375 -0.654 0.930 1.107 1.139 -1.578 -1.047 0.000 0.658 -0.010 -0.046 3.102 1.354 0.980 0.978 0.920 0.722 0.989 0.877 +1 0.908 0.329 0.359 1.498 -0.313 1.096 -0.558 -1.588 2.173 0.813 -0.214 1.271 2.215 0.500 -1.261 0.732 0.000 0.399 -1.139 -0.001 0.000 0.302 0.833 0.986 0.978 0.780 0.992 0.798 +1 0.799 1.471 -1.636 0.454 0.426 1.105 1.282 1.382 0.000 0.852 1.541 -0.820 2.215 0.993 0.356 -0.209 2.548 1.257 1.129 0.900 0.000 0.910 1.108 0.986 0.951 0.803 0.866 0.780 +0 1.344 -0.877 0.936 1.992 0.882 1.786 -1.647 -0.942 0.000 2.423 -0.676 0.736 2.215 1.299 -1.431 -0.365 0.000 0.745 -0.678 -1.360 0.000 0.947 1.029 0.999 0.728 0.869 1.027 0.958 +1 1.105 0.321 1.522 0.883 -1.205 0.681 -1.070 -0.922 0.000 0.801 1.021 0.971 2.215 0.597 -0.350 0.631 0.000 0.480 -0.374 0.113 0.000 0.756 1.361 0.987 0.838 1.133 0.872 0.808 +0 1.596 -0.608 0.007 1.818 -0.112 0.848 -0.566 1.581 2.173 0.755 0.643 1.426 0.000 0.922 -1.190 -1.616 0.000 0.651 -0.654 -1.274 3.102 0.824 0.938 0.972 0.789 0.431 0.961 0.958 +1 0.409 -1.885 -1.027 1.672 -1.605 1.338 0.055 0.013 2.173 0.510 -1.038 0.708 0.000 0.747 -0.358 -1.647 0.000 0.367 0.069 1.377 3.102 0.869 1.222 1.001 0.545 0.699 0.977 0.829 +1 0.934 0.629 0.528 0.238 -0.967 0.548 -0.059 -1.707 2.173 0.941 -2.654 -0.157 0.000 1.030 -0.176 0.523 2.548 1.374 1.291 -1.467 0.000 0.902 1.084 0.980 0.783 0.849 0.894 0.775 +1 1.405 0.537 0.690 1.180 -0.110 3.202 -1.527 -1.576 0.000 2.932 0.567 -0.130 2.215 1.787 0.899 0.585 2.548 0.402 -0.151 1.163 0.000 1.667 4.039 1.176 1.045 1.543 3.535 2.741 +1 1.177 0.104 1.397 0.480 0.266 1.136 1.535 -0.253 0.000 1.027 0.534 1.180 0.000 2.406 0.088 -0.977 2.548 1.250 0.269 0.530 0.000 0.833 0.774 0.986 1.104 0.849 0.937 0.812 +1 0.946 1.111 1.218 0.908 0.822 1.153 -0.365 -1.566 0.000 0.745 0.721 -0.376 2.215 0.609 0.308 -1.282 0.000 1.598 -0.451 0.064 3.102 0.829 0.981 0.994 0.908 0.776 0.783 0.725 +0 0.739 -0.178 0.830 0.505 -0.130 0.961 -0.356 -1.717 2.173 0.621 -0.482 -1.199 0.000 0.983 0.081 -0.290 0.000 1.065 0.774 0.399 3.102 0.945 1.026 0.982 0.542 1.251 0.830 0.761 +1 1.384 0.117 -1.180 0.763 -0.080 1.020 0.877 1.277 2.173 0.331 1.410 -1.474 0.000 1.283 0.737 -0.225 0.000 1.560 0.847 0.505 3.102 0.959 0.807 1.192 1.221 0.861 0.929 0.838 +1 1.384 0.889 0.619 1.082 0.345 0.956 0.855 -1.129 2.173 0.546 -0.308 -0.623 2.215 0.348 1.024 0.184 0.000 0.781 -1.636 1.144 0.000 0.522 0.738 0.986 1.350 0.813 0.953 0.780 +1 1.344 0.839 -1.061 2.472 -0.573 1.513 1.144 0.856 0.000 0.884 1.475 -1.361 1.107 1.587 2.235 0.078 0.000 1.609 2.396 0.757 0.000 0.934 0.845 1.078 1.400 0.948 1.008 0.901 +0 0.547 -0.350 -0.647 2.040 0.276 0.545 0.839 1.729 0.000 0.653 1.472 1.243 0.000 0.786 -0.044 -1.020 2.548 0.419 -0.629 1.571 3.102 0.689 0.867 1.082 0.664 0.354 0.580 0.817 +1 1.484 1.700 -1.059 2.700 -1.056 2.409 0.457 0.345 0.000 1.415 1.114 -1.449 0.000 1.013 -2.057 1.131 0.000 0.905 2.182 1.043 0.000 1.654 0.994 0.983 0.741 0.163 0.592 0.745 +0 1.058 -0.161 -0.195 2.705 -0.751 1.910 -1.032 0.865 0.000 1.301 0.147 -1.119 1.107 0.967 -0.367 1.108 0.000 0.555 -0.714 1.505 3.102 0.954 0.651 1.125 0.894 0.672 1.182 1.316 +0 0.675 1.121 -0.280 1.540 0.735 0.615 -0.507 0.795 2.173 0.219 -1.894 -0.581 0.000 1.246 -0.348 -0.856 2.548 0.753 -1.146 -1.375 0.000 0.907 0.898 1.120 1.269 1.089 1.015 0.915 +1 0.643 -1.430 1.519 0.941 0.887 1.615 -1.337 -0.267 1.087 1.667 0.656 -1.588 0.000 0.828 1.836 0.408 0.000 1.709 -0.347 -1.183 3.102 0.921 1.373 0.985 1.423 1.547 1.783 1.438 +1 1.102 0.427 1.717 0.934 0.776 1.279 -0.250 -0.926 2.173 1.067 0.434 0.681 0.000 1.054 0.004 0.255 0.000 0.743 1.208 -1.151 0.000 0.709 0.522 1.054 1.273 0.835 0.935 0.865 +1 1.330 0.202 1.173 0.135 -1.083 0.728 1.109 -0.540 1.087 0.462 0.133 -0.561 0.000 0.479 1.187 0.658 0.000 0.670 1.007 0.055 3.102 0.782 0.672 0.990 0.734 0.379 0.765 0.643 +0 1.290 -1.423 -0.687 0.131 -1.136 0.821 0.296 0.168 2.173 0.696 -0.469 -1.151 1.107 0.940 0.273 1.641 0.000 0.720 1.106 0.727 0.000 1.007 0.868 0.999 1.110 1.125 0.883 0.859 +1 1.048 -1.119 -0.957 0.996 -1.550 0.733 0.283 0.919 2.173 1.050 -0.041 0.109 2.215 0.943 0.320 -0.858 0.000 0.628 -0.325 1.217 0.000 0.873 0.873 0.976 1.373 0.888 1.207 0.999 +0 0.488 1.698 0.791 0.894 -0.709 1.563 -0.076 1.739 2.173 0.624 2.395 0.523 0.000 1.661 0.266 -0.218 2.548 0.947 -0.077 0.285 0.000 1.675 1.414 0.988 1.333 2.004 1.551 1.217 +0 1.413 -0.852 0.310 1.128 -1.510 0.820 1.153 -1.670 2.173 1.170 0.100 0.266 0.000 0.852 0.401 -1.334 0.000 1.370 0.960 -0.632 0.000 0.890 0.938 1.745 0.974 0.677 1.136 0.973 +1 0.770 -0.449 -0.986 0.966 -1.301 0.739 -1.033 0.875 1.087 1.369 -1.181 0.167 1.107 1.257 -0.122 -1.588 0.000 0.600 0.611 0.116 0.000 1.048 1.106 0.993 1.132 0.892 0.974 0.951 +0 2.468 0.664 1.024 0.317 1.407 0.996 -0.453 -0.500 0.000 0.348 1.016 -0.161 0.000 0.978 -2.634 -0.285 0.000 1.245 -0.472 1.464 3.102 1.006 0.795 0.996 0.945 0.322 0.735 1.470 +1 1.014 0.013 -0.485 0.695 1.701 0.597 0.076 0.143 2.173 0.917 0.685 1.713 2.215 0.531 -0.987 -1.654 0.000 0.963 1.295 0.264 0.000 1.576 1.067 1.072 0.806 1.130 0.838 0.752 +0 1.251 -0.750 1.090 0.462 -0.381 0.677 0.340 -0.711 0.000 0.601 -0.461 -1.247 0.000 0.822 0.985 -1.653 0.000 0.754 -0.907 0.279 3.102 0.848 0.842 1.021 0.666 0.411 0.607 0.638 +1 1.114 1.782 1.450 0.653 1.513 0.825 1.851 -0.480 0.000 0.846 1.158 0.514 2.215 0.520 2.685 1.542 0.000 1.042 0.549 -0.463 1.551 1.321 1.037 0.997 0.824 0.692 0.804 0.831 +1 0.657 -0.901 -0.855 1.176 1.487 0.745 -1.236 1.649 2.173 0.661 -2.099 0.137 0.000 1.780 -1.036 -0.213 0.000 1.236 -0.185 0.784 3.102 0.861 1.016 1.045 0.759 0.898 0.849 0.765 +0 1.009 -0.660 -1.539 1.316 -1.693 1.146 2.025 0.137 0.000 1.063 -0.539 1.052 2.215 1.124 0.548 -0.887 2.548 1.017 -0.057 0.172 0.000 1.076 0.939 0.974 0.932 1.346 0.854 0.822 +0 2.122 0.792 0.723 2.438 1.064 2.692 0.361 -0.993 2.173 1.725 1.204 0.488 2.215 0.267 -0.767 -1.134 0.000 1.372 0.601 -0.568 0.000 0.727 0.981 0.989 2.837 3.398 2.152 1.568 +1 0.304 -1.425 -1.646 1.166 -1.469 1.458 -0.472 0.510 2.173 0.867 -0.309 -1.605 0.000 1.317 0.136 -0.332 2.548 0.853 0.744 -1.365 0.000 0.760 0.980 0.986 1.376 1.309 1.081 0.957 +1 1.167 0.556 -0.911 0.908 0.051 1.078 0.387 1.253 0.000 1.213 0.155 -0.673 2.215 0.489 -1.384 0.704 0.000 1.348 0.692 -1.502 3.102 0.868 0.829 1.087 0.782 0.878 0.642 0.621 +1 0.880 0.617 -0.649 1.724 1.104 1.213 -0.576 1.216 2.173 0.782 -0.913 -0.102 0.000 1.183 -0.576 -0.783 0.000 0.432 1.286 -0.204 0.000 0.879 0.616 1.706 1.435 0.598 0.911 1.007 +0 0.313 1.256 -0.904 1.002 1.290 1.383 1.295 -1.528 2.173 1.160 -0.765 0.080 1.107 1.060 2.309 -0.340 0.000 0.852 1.129 0.378 0.000 0.911 1.480 0.988 1.000 2.976 1.837 1.444 +0 1.263 0.596 0.460 1.063 1.060 0.709 -0.613 -0.688 0.000 1.464 1.079 1.174 2.215 1.411 0.369 -0.596 1.274 0.611 0.293 -0.894 0.000 1.175 1.244 0.988 0.905 1.623 1.442 1.222 +1 1.121 -0.379 1.363 1.451 0.782 1.088 -0.803 -0.793 1.087 0.515 0.368 -0.665 0.000 0.708 -1.372 1.449 0.000 0.579 0.441 0.238 3.102 1.336 0.869 0.984 1.459 0.905 0.950 0.863 +0 1.205 0.916 -1.209 0.354 -0.706 1.124 1.045 0.787 0.000 0.489 -0.457 -1.033 2.215 0.388 1.276 0.000 0.000 0.443 -0.889 1.403 0.000 0.842 0.653 0.986 0.500 0.532 0.580 0.589 +1 0.420 -0.722 0.732 0.885 -0.724 0.741 1.244 1.619 0.000 1.248 0.281 0.076 2.215 1.085 0.331 1.242 0.000 1.025 0.086 -0.955 1.551 0.919 0.927 0.989 0.744 0.824 0.923 0.798 +0 1.380 1.427 1.105 1.788 0.982 1.955 -0.205 -0.852 1.087 0.901 -0.193 0.854 0.000 1.172 0.352 -0.512 1.274 0.445 -0.158 1.421 0.000 0.403 0.882 1.000 2.450 0.804 1.608 1.272 +1 0.704 0.369 -0.230 1.167 -1.430 0.721 0.012 1.508 2.173 0.683 0.028 0.688 2.215 1.013 -0.764 -0.222 0.000 0.930 0.082 -0.753 0.000 0.865 0.748 1.107 0.835 0.696 0.681 0.604 +1 0.695 0.420 1.203 0.769 -0.911 0.830 1.168 0.076 0.000 0.394 0.392 0.510 2.215 0.747 1.559 0.835 0.000 1.090 -0.422 -1.161 3.102 0.973 0.654 0.987 0.688 0.652 0.784 0.703 +1 0.312 1.722 1.411 1.133 1.163 0.756 1.210 -0.700 2.173 0.755 -0.053 -0.139 2.215 0.812 -0.193 1.153 0.000 0.847 1.298 1.682 0.000 1.010 1.000 0.996 1.118 0.931 0.860 0.794 +0 0.431 0.572 -0.684 2.262 0.155 1.178 0.178 -1.429 2.173 0.463 0.649 0.544 2.215 0.757 0.955 1.552 0.000 0.658 1.073 1.064 0.000 0.344 0.840 0.986 0.580 1.096 0.957 0.821 +0 0.309 -1.951 -1.229 1.592 0.770 0.633 -0.197 -1.568 1.087 0.898 -1.885 -0.257 0.000 0.897 -0.933 0.931 2.548 1.280 -0.431 -0.799 0.000 0.921 0.862 0.990 0.812 0.831 1.026 0.895 +1 0.458 0.129 -0.519 1.195 0.737 0.534 -1.316 -1.729 0.000 0.687 0.351 1.103 2.215 0.911 1.049 -0.219 2.548 0.808 -1.014 -0.367 0.000 0.888 1.371 0.984 0.871 0.852 1.238 1.006 +0 0.637 -0.037 -1.732 1.254 -0.425 0.486 0.090 0.024 2.173 0.675 -1.119 1.644 0.000 0.494 -2.085 0.544 0.000 0.386 -0.239 1.092 0.000 0.913 0.912 1.144 0.698 0.525 0.741 0.726 +1 0.976 0.291 -1.128 0.668 -0.540 0.950 2.026 1.060 0.000 0.678 -0.571 1.307 2.215 1.199 1.293 -0.273 0.000 0.602 1.124 0.825 3.102 1.891 1.026 0.990 0.814 0.693 1.131 1.181 +1 0.535 -1.391 -0.825 1.343 -1.449 1.111 -0.852 -0.484 0.000 1.677 -0.700 1.069 2.215 0.623 0.018 -1.653 0.000 0.925 0.350 0.169 0.000 0.852 1.025 0.986 1.447 0.755 1.273 1.138 +0 2.638 1.289 -0.280 0.991 0.872 1.152 -0.702 1.551 2.173 0.643 -0.767 -1.689 0.000 0.747 -2.603 0.907 0.000 1.259 0.986 -0.759 0.000 0.889 0.937 1.931 2.569 0.709 1.666 1.322 +0 1.541 0.058 1.227 1.217 0.660 0.524 1.040 -0.640 0.000 0.709 -0.226 -0.727 2.215 0.543 1.360 1.720 0.000 0.981 0.326 -0.429 3.102 0.842 0.839 0.988 0.882 0.311 0.754 0.792 +0 2.559 -0.021 -1.615 2.095 -1.335 1.720 -0.641 0.033 2.173 0.737 -0.414 -0.379 0.000 1.158 -0.598 -1.608 2.548 0.847 1.549 0.847 0.000 0.980 0.951 1.004 0.748 1.751 1.606 1.295 +1 1.925 -0.859 1.353 1.769 -1.452 0.756 -0.342 -0.809 2.173 1.734 -0.850 0.151 0.000 0.944 -0.376 0.932 0.000 0.606 0.624 -1.039 0.000 0.964 0.931 1.474 1.062 0.530 0.907 0.819 +1 1.545 0.059 -1.732 1.034 0.807 2.467 -1.237 -0.565 0.000 1.933 2.370 -1.639 0.000 3.921 -0.645 0.727 2.548 1.843 -0.219 -0.527 3.102 2.292 2.692 1.319 1.447 1.914 3.176 2.387 +0 1.200 -1.018 -1.173 0.845 -0.439 0.601 -0.814 1.627 0.000 0.706 -1.103 0.845 0.000 1.111 -0.536 0.424 2.548 1.038 -0.456 -0.630 3.102 0.923 0.890 0.990 0.887 0.667 0.658 0.694 +0 0.609 -0.521 0.287 0.650 0.198 0.511 1.237 -0.670 2.173 0.648 -1.193 -1.686 2.215 0.364 1.444 0.064 0.000 0.451 1.152 0.677 0.000 0.433 0.925 0.983 0.770 1.497 0.925 0.731 +0 0.318 -1.381 -0.250 2.482 0.957 1.383 0.001 -0.222 2.173 1.045 -1.565 1.525 2.215 0.904 2.253 1.645 0.000 1.349 -0.541 -1.383 0.000 0.992 2.146 1.091 0.821 2.375 2.313 2.267 +1 0.947 -0.329 -0.033 0.020 -1.381 1.245 0.865 0.799 2.173 1.130 -0.013 -1.688 0.000 1.371 0.681 -0.931 0.000 0.982 0.958 0.019 0.000 1.001 0.587 0.525 0.860 0.892 0.820 0.697 +0 1.147 0.502 -1.131 1.237 -1.061 0.869 0.812 0.520 0.000 1.011 0.808 1.346 2.215 0.635 1.284 -0.138 0.000 0.538 0.612 0.124 3.102 0.848 0.987 0.993 0.677 0.595 0.704 0.778 +1 1.028 -0.732 1.243 1.198 -0.032 0.756 -1.491 1.404 0.000 1.343 -1.475 -0.263 2.215 0.483 -2.591 1.686 0.000 0.707 -0.687 -1.342 1.551 0.831 0.686 1.402 1.093 0.791 0.829 0.856 +1 0.303 1.225 0.629 1.256 -0.602 0.897 0.529 0.974 2.173 0.913 -0.667 -0.299 2.215 0.991 0.560 1.376 0.000 0.534 -1.176 -0.672 0.000 0.771 1.006 0.988 0.700 1.491 0.876 0.757 +0 0.534 -0.766 -0.533 0.974 -1.501 0.797 -1.574 0.323 2.173 1.137 0.271 -0.998 2.215 2.434 2.003 1.210 0.000 1.956 0.216 -0.272 0.000 3.588 2.573 0.989 1.251 1.990 2.742 2.023 +0 0.459 -1.448 -0.858 0.262 -0.304 0.760 1.090 -0.338 2.173 1.076 -1.079 1.151 2.215 0.357 -0.614 1.522 0.000 0.506 1.609 -1.293 0.000 0.842 0.866 0.988 0.935 2.209 1.120 0.920 +0 1.076 1.912 -0.667 0.618 -0.665 0.496 -1.524 1.127 0.000 0.944 -0.870 0.103 2.215 0.935 1.243 1.271 2.548 1.235 -0.512 -1.578 0.000 0.961 1.036 0.975 0.872 1.634 1.178 1.285 +1 0.442 1.823 -1.466 0.988 -1.565 1.444 -2.428 0.846 0.000 2.252 0.525 -0.141 1.107 2.366 0.328 -1.663 0.000 1.064 -0.091 -0.788 0.000 0.657 0.900 0.991 0.834 1.460 1.053 0.845 +1 0.575 -0.588 1.555 0.501 0.137 0.407 -1.782 1.262 0.000 0.348 -1.980 0.111 0.000 0.942 -0.695 -1.028 2.548 0.607 0.406 -0.667 3.102 0.695 0.884 0.987 0.705 0.428 0.634 0.590 +0 0.999 1.633 1.532 1.019 -0.793 0.613 -0.171 1.109 1.087 0.817 0.619 0.904 0.000 1.225 0.506 -0.244 0.000 1.189 1.033 0.553 0.000 0.992 0.948 1.211 1.278 0.973 1.015 0.924 +1 1.175 -0.643 0.099 1.273 -0.627 0.584 -0.133 -1.130 0.000 0.561 0.226 1.221 0.000 1.565 1.090 1.382 2.548 0.522 0.666 0.624 0.000 0.936 1.043 1.030 0.500 1.077 1.064 0.882 +0 0.733 -0.490 1.685 2.278 1.609 1.372 -1.278 -0.212 0.000 1.102 0.960 1.197 2.215 1.219 -0.308 -0.175 2.548 0.483 -0.242 -0.916 0.000 0.982 0.782 0.988 1.978 1.458 1.476 1.445 +1 1.792 -0.344 0.136 0.841 -0.813 1.685 0.625 1.499 0.000 0.548 0.587 -1.315 0.000 0.806 2.248 -0.160 0.000 1.011 1.329 -0.285 3.102 1.160 0.878 1.283 1.102 0.299 0.793 1.010 +1 0.641 1.633 0.001 1.118 1.010 1.013 0.750 1.516 0.000 1.438 0.526 0.358 2.215 1.649 0.175 -0.915 0.000 1.605 -0.493 -0.864 1.551 0.845 0.645 0.987 0.815 1.472 1.009 0.965 +0 0.442 0.276 0.929 1.638 -1.072 1.752 0.460 -0.802 2.173 1.436 -2.551 0.752 0.000 1.424 0.493 0.587 0.000 1.545 0.634 1.463 3.102 0.521 0.675 1.148 0.917 1.574 1.078 0.926 +1 1.152 0.873 -1.400 0.290 -0.264 0.831 0.373 -0.288 0.000 1.157 0.599 0.723 2.215 1.550 0.878 1.527 1.274 1.283 0.871 -0.714 0.000 0.798 1.181 0.988 0.758 0.975 0.987 0.872 +0 0.546 0.444 -0.292 1.429 -1.480 1.474 0.659 -1.104 2.173 2.622 0.481 0.538 0.000 0.685 -0.777 1.058 2.548 0.564 -1.013 -1.035 0.000 0.413 1.265 1.073 0.854 1.565 0.917 0.799 +1 1.274 -0.150 -0.628 1.824 -0.101 2.833 1.929 -1.628 0.000 1.361 0.040 0.111 2.215 2.690 0.230 0.574 1.274 0.776 0.382 -1.153 0.000 2.074 3.255 0.990 1.344 0.851 2.496 2.299 +1 0.625 -0.506 1.263 0.814 -1.314 1.228 -0.925 -0.091 0.000 1.217 0.430 1.588 2.215 0.976 0.010 -0.291 2.548 0.518 -1.251 0.127 0.000 0.921 0.750 0.986 0.647 1.177 1.064 0.929 +0 0.667 1.941 -0.188 0.446 0.506 1.049 0.577 1.737 1.087 1.508 0.766 -0.323 2.215 0.930 0.075 1.093 0.000 0.677 -0.442 -0.886 0.000 0.930 1.235 0.988 0.754 1.785 1.221 1.047 +1 1.864 0.056 -0.290 0.550 0.224 0.604 0.555 0.877 0.000 1.060 -0.375 1.727 2.215 0.824 -1.420 -0.485 0.000 0.817 0.925 1.318 0.000 0.510 0.916 0.990 0.821 0.441 0.842 0.785 +0 0.732 -0.712 -0.454 0.451 -0.392 1.167 0.448 0.949 2.173 0.920 0.120 1.609 0.000 0.926 1.528 -0.666 2.548 0.615 0.689 -0.687 0.000 0.930 0.983 0.987 1.117 1.539 0.967 0.852 +1 1.065 -0.611 -0.375 1.116 0.990 0.582 -1.434 -0.946 0.000 0.986 -0.550 -1.030 0.000 1.145 1.286 0.130 0.000 1.169 0.648 1.056 3.102 0.936 0.946 1.424 0.845 0.724 0.728 0.717 +1 0.910 -1.631 -0.125 1.964 -0.646 1.310 -0.927 1.357 2.173 0.445 -0.372 0.368 0.000 1.188 -1.481 0.595 0.000 1.407 -0.139 -1.529 3.102 0.984 0.993 0.996 1.619 0.930 1.159 0.979 +0 0.512 0.589 -1.486 0.552 -0.637 0.439 -0.923 -0.210 2.173 1.266 0.445 1.368 2.215 0.366 0.425 -0.052 0.000 0.641 -0.054 0.686 0.000 0.360 0.633 0.983 0.645 1.362 0.814 0.639 +1 1.377 -0.587 -0.869 1.735 -1.399 0.433 -0.277 0.236 2.173 0.921 0.321 1.152 1.107 0.330 -0.051 1.366 0.000 1.935 -2.212 0.028 0.000 0.635 0.758 0.988 0.980 0.740 0.923 0.794 +1 1.825 0.661 -0.885 1.030 0.833 1.565 2.020 -0.009 0.000 1.341 0.817 1.398 1.107 1.286 0.089 -1.706 0.000 1.295 1.032 -1.295 0.000 1.000 0.904 1.900 1.043 0.663 0.883 0.810 +1 1.477 0.870 0.367 0.643 0.024 0.425 0.141 0.632 0.000 1.340 0.221 -1.515 0.000 0.334 0.049 -1.312 2.548 1.172 1.080 -1.022 3.102 1.499 1.109 0.984 0.654 0.340 0.633 0.750 +1 1.074 -0.203 0.943 1.242 -1.727 0.952 -0.813 -0.239 2.173 0.629 -1.616 1.494 0.000 0.759 -0.793 -1.276 2.548 0.668 -0.085 -0.832 0.000 0.921 0.765 1.075 0.735 0.852 0.866 0.765 +1 0.652 0.084 -0.285 0.344 -0.839 1.105 0.260 1.644 2.173 0.700 0.765 -0.311 1.107 0.762 1.143 0.745 0.000 0.977 1.361 0.130 0.000 0.532 1.219 0.991 0.562 1.316 0.871 0.769 +1 1.748 -1.259 -1.568 1.159 -1.308 2.531 -0.895 -0.116 2.173 1.097 -0.529 1.515 1.107 1.602 0.505 1.042 0.000 0.954 -0.732 -1.359 0.000 1.553 1.095 0.985 2.288 2.479 1.717 1.644 +1 0.653 0.816 1.491 1.173 0.353 0.999 0.795 0.099 2.173 1.032 1.716 -0.995 0.000 1.052 0.893 -1.388 0.000 1.044 -0.757 -1.378 0.000 0.849 1.122 1.037 0.773 1.037 1.016 0.879 +1 0.603 -1.305 -0.295 1.986 -0.397 1.038 0.458 1.221 2.173 0.430 0.015 1.719 2.215 0.470 0.031 -0.543 0.000 0.524 -1.371 0.515 0.000 0.682 1.045 0.984 1.363 0.480 1.875 1.364 +0 0.510 -0.400 1.364 1.352 -0.990 0.630 -0.448 0.685 2.173 0.594 -0.795 -0.770 2.215 0.600 0.602 0.801 0.000 0.456 -0.936 1.413 0.000 0.659 0.725 0.988 0.901 0.886 0.668 0.599 +1 0.664 -0.216 0.435 1.156 1.437 1.839 -2.034 0.306 0.000 2.575 0.989 -1.165 2.215 1.506 1.083 -1.623 0.000 0.631 0.661 0.674 3.102 0.839 0.945 0.988 0.541 1.154 0.998 0.837 +1 1.436 1.090 0.733 0.278 -0.823 2.421 1.483 0.320 0.000 2.447 -1.403 -1.503 2.215 2.000 2.287 -1.506 0.000 2.205 1.306 -0.221 0.000 1.660 2.246 0.983 2.974 1.665 3.841 2.825 +1 0.709 0.850 0.672 0.949 -1.138 1.241 0.417 1.582 2.173 0.957 0.470 -0.037 2.215 0.877 0.102 0.661 0.000 1.705 1.461 -0.759 0.000 0.972 0.856 1.134 0.950 1.595 1.049 0.923 +0 1.135 0.285 -1.109 1.089 -0.896 1.103 0.127 0.964 0.000 0.731 -0.489 0.048 2.215 0.754 0.464 0.380 0.000 0.715 -1.183 -0.956 1.551 0.883 0.926 0.987 1.058 0.600 0.887 0.971 +1 1.124 0.354 0.040 1.132 1.620 0.956 1.375 0.416 0.000 1.543 0.437 -0.805 2.215 1.724 1.678 -1.636 0.000 2.128 -0.175 1.562 0.000 0.852 1.251 1.546 0.743 0.139 0.718 0.746 +1 0.341 -1.223 -1.373 0.994 0.692 1.086 0.319 -1.186 0.000 1.213 1.562 0.163 2.215 1.057 0.491 1.657 2.548 0.565 1.305 0.426 0.000 1.430 0.975 0.988 1.257 1.353 1.040 0.963 +0 1.218 -0.308 -1.602 1.532 -1.007 0.556 -0.059 0.820 2.173 0.840 -1.431 0.502 0.000 0.463 -0.801 -0.215 2.548 0.407 -1.488 0.811 0.000 0.627 0.812 0.989 0.704 0.573 0.709 0.765 +1 0.352 -1.440 0.063 0.644 1.519 1.138 0.660 1.460 2.173 1.127 -0.034 -0.520 0.000 0.931 0.095 0.137 0.000 0.496 0.796 -0.591 3.102 0.883 0.568 0.995 0.906 0.773 0.907 0.786 +1 0.637 0.994 1.198 0.755 1.183 0.646 -1.285 0.844 0.000 1.288 0.139 -1.166 2.215 0.826 -1.664 -0.400 0.000 0.702 0.662 -0.712 0.000 0.925 0.712 1.001 0.989 0.705 0.810 0.732 +0 0.802 -0.507 0.519 1.249 -1.030 1.596 0.474 0.732 0.000 1.052 -0.734 -1.455 0.000 1.868 0.130 -0.997 2.548 0.906 -0.195 -0.393 3.102 0.782 0.968 1.366 0.968 0.548 1.045 0.989 +0 1.178 -1.468 -0.931 1.113 0.177 0.710 -1.096 0.586 2.173 0.659 0.755 1.437 0.000 0.792 -1.703 -0.403 0.000 1.131 -0.379 -1.623 3.102 0.736 0.788 1.333 0.960 0.921 0.798 0.689 +1 0.775 -1.014 -0.295 0.804 -1.630 0.743 -0.163 1.621 2.173 0.559 -2.107 -1.004 0.000 0.450 -1.627 0.467 0.000 0.984 0.124 0.343 3.102 0.761 0.978 1.020 0.841 0.839 0.853 0.737 +0 0.928 -0.440 -0.658 1.570 -1.652 1.047 0.218 -0.527 2.173 0.696 -1.161 1.125 0.000 1.395 -0.149 0.858 0.000 0.653 0.972 0.477 1.551 0.890 0.900 1.304 1.137 0.811 0.990 0.961 +0 0.812 -0.292 0.794 0.639 -0.177 1.133 -0.240 -0.137 1.087 1.422 -0.659 1.663 2.215 1.218 0.176 -1.177 0.000 1.184 0.265 1.585 0.000 0.903 0.981 0.995 0.817 1.910 1.048 0.878 +1 1.263 -0.147 -1.176 1.318 -1.711 1.417 0.788 -0.284 2.173 1.114 -2.680 0.734 0.000 0.449 1.895 1.423 0.000 0.646 -0.064 1.098 1.551 6.084 3.149 0.989 1.753 1.062 2.739 2.117 +0 1.277 -0.164 0.024 0.660 -1.226 0.841 -0.443 1.136 2.173 0.788 0.152 -1.739 2.215 0.597 -0.002 -0.640 0.000 0.701 2.486 -1.042 0.000 1.528 1.286 1.148 0.997 0.725 1.191 1.061 +0 1.447 -1.048 1.596 1.251 -1.497 1.650 -0.449 -0.184 0.000 1.313 -0.575 1.523 2.215 0.635 -1.123 0.221 2.548 0.422 0.784 1.142 0.000 1.526 0.981 0.986 0.612 0.949 1.060 1.045 +1 0.923 0.825 -1.263 0.812 -0.403 0.916 0.648 1.435 2.173 0.737 1.715 -0.442 0.000 1.649 1.150 0.529 0.000 1.227 -1.071 -1.294 0.000 0.923 0.931 0.989 0.829 0.607 0.739 0.695 +0 0.863 0.002 -0.218 1.329 -1.344 0.364 1.067 -0.937 0.000 0.431 -0.404 1.713 0.000 0.611 -0.045 0.202 2.548 1.152 -0.293 1.027 0.000 0.897 0.744 1.260 0.757 0.171 0.557 0.563 +0 0.621 0.645 0.474 0.697 0.467 1.350 2.065 -1.640 0.000 1.298 0.237 -0.505 2.215 1.846 0.267 0.252 0.000 2.003 -0.829 1.240 0.000 1.049 1.014 0.994 1.248 0.588 0.902 1.115 +1 0.664 0.845 1.330 0.592 -0.368 1.532 0.473 -1.002 0.000 1.232 1.295 1.302 1.107 1.087 2.506 0.963 0.000 2.042 0.245 0.262 0.000 0.706 1.086 0.987 0.617 1.044 0.747 0.678 +1 1.302 -0.364 0.124 0.147 1.571 0.709 -1.391 0.877 0.000 0.720 0.189 -0.711 0.000 0.985 -0.285 -1.221 1.274 1.175 0.418 1.662 3.102 2.017 1.317 0.990 0.784 0.547 0.909 0.824 +0 1.073 1.932 -1.676 0.803 0.434 1.031 0.949 -1.336 2.173 1.064 0.704 0.255 2.215 0.560 -0.953 -0.640 0.000 1.275 -0.461 0.544 0.000 0.848 0.988 1.216 1.111 1.537 1.088 1.172 +1 0.415 1.473 -0.724 0.829 1.331 0.606 0.368 -0.583 0.000 0.600 -0.576 0.055 1.107 0.394 1.805 0.495 0.000 1.022 -0.054 -0.292 1.551 0.837 0.722 0.991 1.183 0.288 1.121 0.948 +1 1.565 0.915 -1.256 0.485 1.501 0.549 1.244 -0.414 0.000 1.305 1.168 0.520 2.215 0.468 -0.242 -0.108 2.548 0.459 0.355 0.876 0.000 0.920 0.893 0.986 0.723 0.801 0.836 0.726 +0 0.684 0.813 -1.557 0.770 0.439 1.436 0.458 0.763 2.173 0.908 -0.660 -1.353 2.215 1.145 2.230 -1.641 0.000 1.706 -0.599 -0.662 0.000 0.705 0.853 0.988 0.890 1.882 1.347 1.173 +1 0.818 -0.868 0.519 0.705 0.776 1.202 0.951 -1.058 2.173 0.606 0.685 1.517 2.215 0.989 0.391 0.207 0.000 1.054 0.289 0.975 0.000 0.720 0.695 0.993 1.427 0.933 0.971 0.842 +1 0.847 -0.389 0.605 0.414 -0.117 0.884 0.391 1.665 2.173 0.436 -0.325 -0.601 0.000 0.765 -1.419 0.035 0.000 0.418 0.291 1.276 0.000 0.889 0.796 0.978 1.225 0.682 0.863 0.774 +0 0.823 1.163 1.226 1.047 0.010 0.742 1.415 -1.439 2.173 0.798 -1.929 1.047 0.000 0.402 1.814 -1.194 0.000 1.063 -0.775 -0.249 3.102 0.841 0.844 1.144 0.947 1.613 1.691 1.591 +0 0.964 0.029 -0.104 0.350 0.913 0.730 0.093 -1.261 2.173 0.442 1.332 -1.143 2.215 0.661 2.238 0.373 0.000 2.534 1.524 1.156 0.000 1.033 0.915 0.987 0.883 0.578 1.050 0.935 +1 1.240 0.152 0.496 0.770 -0.247 0.771 2.734 -0.009 0.000 0.775 1.245 1.534 0.000 1.190 -0.475 1.513 2.548 1.941 0.445 -1.070 3.102 2.091 1.879 0.990 0.922 1.061 1.663 1.504 +1 0.617 1.431 -1.119 0.798 -0.421 1.166 0.250 -0.175 2.173 1.347 0.837 1.512 0.000 0.416 1.295 1.737 0.000 0.500 0.890 -0.447 0.000 0.801 0.700 0.993 0.812 1.091 1.019 0.885 +1 0.445 -1.120 -1.676 0.837 0.217 1.127 -0.084 -0.095 2.173 0.933 0.224 1.700 0.000 0.526 -0.430 1.275 0.000 0.767 0.662 1.526 0.000 0.557 1.286 0.983 0.931 0.728 0.952 0.939 +1 1.349 -1.565 -0.602 1.033 -1.097 1.380 -0.398 0.928 2.173 0.385 0.596 1.480 0.000 0.600 -0.880 -1.298 0.000 1.038 -0.545 -0.047 0.000 0.982 1.003 0.977 0.661 0.848 1.050 0.904 +1 0.524 -1.114 1.010 0.394 1.008 0.602 -1.107 0.556 2.173 1.212 -0.314 -1.062 2.215 0.666 -0.798 -0.794 0.000 0.464 -1.154 -1.287 0.000 0.307 0.675 0.975 0.940 1.351 0.836 0.680 +1 1.215 0.744 0.537 0.534 -1.342 1.031 0.398 -1.499 2.173 0.964 1.233 0.606 0.000 0.837 1.197 -1.060 2.548 0.521 -2.332 -0.709 0.000 0.617 1.543 1.108 0.994 0.706 1.188 1.068 +0 0.348 -1.520 0.332 0.486 -1.136 1.203 -0.862 -1.639 0.000 0.895 -1.623 -1.204 2.215 1.856 -0.118 0.433 2.548 0.685 -1.014 0.913 0.000 0.935 0.794 0.985 0.818 1.792 1.106 0.912 +1 1.599 -1.367 -1.364 0.376 0.956 0.414 -0.288 0.354 0.000 0.710 0.691 -0.085 2.215 0.572 -0.480 1.130 0.000 0.617 0.713 -1.179 3.102 0.573 0.700 0.988 0.850 0.500 0.873 0.743 +1 1.038 1.267 -0.809 1.920 -1.217 2.402 -1.935 0.666 0.000 1.667 0.815 -1.069 0.000 1.486 2.160 -1.727 0.000 2.174 0.159 -0.459 1.551 0.702 0.889 0.989 1.392 0.525 1.130 1.038 +1 1.040 -0.629 -1.578 0.873 1.250 1.270 -0.737 -0.440 1.087 0.587 -1.312 -1.565 0.000 1.391 -0.061 0.561 2.548 0.701 -1.555 1.379 0.000 0.449 1.047 0.985 1.287 1.419 1.071 0.910 +0 0.488 -1.191 1.315 0.574 -0.503 0.440 0.489 1.144 1.087 0.723 0.911 1.517 0.000 1.571 0.206 -0.050 2.548 0.838 0.142 -0.965 0.000 0.892 1.049 0.988 1.256 0.923 1.148 1.103 +1 0.395 1.580 -0.387 1.730 1.123 0.801 -0.316 -0.692 2.173 0.687 0.675 1.167 1.107 0.557 -0.132 0.231 0.000 0.462 0.820 -1.510 0.000 0.884 0.829 1.120 1.598 1.230 1.081 0.927 +0 0.368 -0.881 0.232 1.259 1.729 0.726 0.981 -0.303 0.000 0.540 2.865 0.353 0.000 1.324 -0.082 1.687 1.274 0.819 1.872 -0.461 0.000 0.807 0.757 0.987 0.882 0.625 0.870 1.198 +1 0.932 0.790 1.624 0.789 -0.636 0.646 -0.122 1.068 1.087 0.691 0.694 -1.189 2.215 0.854 2.067 -0.058 0.000 1.282 0.913 0.298 0.000 0.805 0.995 1.062 0.888 0.975 0.943 0.820 +1 0.876 -0.059 -0.512 0.623 1.056 0.787 0.152 -0.092 0.000 1.525 0.151 -1.709 2.215 0.951 0.673 0.986 1.274 0.770 1.160 -0.496 0.000 0.892 0.926 1.011 0.912 0.918 0.946 0.803 +0 1.861 -0.667 -0.277 1.997 -0.758 0.721 -1.048 1.412 0.000 0.659 -1.964 1.670 0.000 1.140 0.067 0.124 2.548 1.639 -1.440 1.187 0.000 0.807 0.936 1.121 0.968 0.639 0.911 1.058 +0 0.331 1.639 1.172 0.843 1.560 0.537 -0.534 -1.733 0.000 0.811 -0.430 -0.969 0.000 2.548 0.670 0.310 2.548 1.293 0.781 -0.813 3.102 0.895 0.947 0.999 1.144 1.184 1.131 0.959 +1 0.378 -1.116 -0.289 1.450 0.283 0.884 0.966 -1.630 2.173 0.369 1.390 0.267 0.000 0.825 0.734 -0.766 2.548 0.383 0.864 1.276 0.000 0.399 0.669 0.986 1.920 0.753 2.084 1.657 +0 1.399 0.506 1.676 1.639 1.095 1.759 1.332 -1.423 2.173 1.874 0.352 -0.292 2.215 2.006 0.346 0.377 0.000 1.793 0.421 0.028 0.000 0.659 0.947 1.048 1.462 2.654 1.669 1.487 +0 1.763 -1.140 0.063 0.717 -0.207 1.013 -0.222 -1.663 0.000 0.689 -1.374 1.153 1.107 0.649 0.861 -1.090 1.274 0.903 -0.892 -1.149 0.000 0.913 0.969 0.992 1.084 1.228 0.926 0.942 +0 0.598 -1.099 -0.297 0.887 1.681 1.774 -0.119 -0.024 2.173 1.206 -0.239 -1.702 0.000 1.209 0.951 -1.688 1.274 0.810 0.557 1.213 0.000 0.879 0.810 0.987 1.298 2.139 1.309 1.136 +1 0.685 0.643 -1.637 0.794 0.241 0.645 -1.297 -0.422 2.173 0.442 -0.656 1.553 0.000 0.661 -0.569 1.007 2.548 1.208 -0.553 0.371 0.000 0.859 0.709 1.014 0.691 0.831 0.802 0.688 +0 0.381 -0.498 -0.126 0.379 -0.311 0.544 0.665 1.639 2.173 0.913 -0.448 -0.753 2.215 0.277 -0.388 -1.183 0.000 0.723 0.095 0.255 0.000 0.494 0.590 0.984 0.889 1.063 0.753 0.598 +1 0.490 -2.359 -0.439 0.467 -0.508 1.020 -1.376 -1.350 0.000 0.414 0.153 -0.294 0.000 1.583 0.262 0.810 2.548 1.155 -0.548 0.533 1.551 1.628 1.221 0.985 1.198 0.568 1.080 0.939 +0 1.974 -0.520 -0.274 1.046 -0.317 1.240 0.975 1.141 1.087 1.082 0.029 -1.722 0.000 0.638 0.247 0.819 0.000 0.732 0.334 -1.273 3.102 0.971 1.057 1.002 0.898 0.881 1.491 1.227 +1 0.464 -2.190 0.863 1.148 -0.355 0.479 0.768 0.208 2.173 0.599 -1.474 -1.680 0.000 0.808 -0.366 -1.517 2.548 0.430 -0.155 0.625 0.000 0.747 1.047 0.990 0.857 0.913 0.925 0.783 +0 3.524 0.413 1.358 0.502 1.397 1.182 0.445 -0.197 2.173 2.001 0.849 -0.630 0.000 1.133 -0.419 1.143 2.548 0.681 0.639 -0.851 0.000 0.311 0.811 0.987 0.708 1.509 1.232 1.297 +1 1.032 -0.978 0.678 0.604 -1.027 0.743 -1.006 -0.794 1.087 1.060 -0.218 1.430 2.215 1.286 0.382 0.398 0.000 0.456 1.251 -0.015 0.000 0.570 1.009 1.093 0.820 1.298 0.992 0.883 +1 0.796 -0.391 0.418 1.192 -1.625 0.721 0.456 -0.743 0.000 0.782 0.667 0.955 2.215 0.757 1.490 -0.856 0.000 1.124 -0.781 0.166 3.102 0.838 1.091 1.301 0.825 0.944 0.923 0.897 +1 0.583 1.687 -0.278 0.980 1.532 0.654 2.241 0.179 0.000 0.916 0.640 -1.499 2.215 0.730 0.337 -0.457 2.548 0.398 1.153 0.935 0.000 0.604 0.858 1.045 0.820 0.713 0.803 0.722 +0 1.212 0.266 0.342 0.968 0.934 0.798 -0.127 -1.372 2.173 0.661 -0.117 -0.256 0.000 0.689 -0.790 -0.920 1.274 0.748 -0.763 1.364 0.000 0.985 0.905 0.993 0.855 0.506 0.797 0.703 +0 2.110 0.969 0.013 0.566 -1.518 2.075 -1.121 1.561 0.000 1.281 0.153 -0.573 2.215 0.664 -0.617 1.216 0.000 0.524 -2.282 0.079 0.000 0.971 0.762 1.487 1.092 0.546 0.840 1.086 +0 1.006 0.503 -0.135 0.709 0.959 0.661 1.545 -0.028 1.087 0.637 2.118 -1.141 0.000 0.605 1.485 1.235 1.274 1.753 -0.224 -1.704 0.000 2.243 1.345 0.988 0.800 0.716 1.039 0.913 +0 0.628 -0.317 -1.678 0.587 -0.446 1.237 0.947 1.659 2.173 1.313 0.604 0.225 0.000 1.009 -2.638 -1.139 0.000 0.412 1.205 0.642 3.102 5.249 3.002 0.979 1.526 0.629 2.510 1.855 +1 0.464 -1.107 -0.410 0.525 1.402 0.962 -1.204 -0.155 2.173 0.769 -0.136 -1.737 0.000 0.774 0.955 -1.494 0.000 0.918 2.343 0.106 0.000 0.789 0.724 0.984 0.914 1.006 1.024 1.087 +1 1.031 1.057 -0.080 1.233 -0.598 0.590 -0.332 -1.222 2.173 0.506 -0.231 0.536 0.000 1.498 0.105 1.593 0.000 1.397 1.207 0.338 0.000 0.963 0.824 0.990 1.114 0.507 0.998 1.061 +0 0.990 0.753 1.671 0.664 -0.394 0.730 -0.244 -0.925 2.173 0.966 0.689 0.358 2.215 0.345 2.219 1.238 0.000 0.618 0.003 0.968 0.000 0.795 1.088 1.076 0.836 1.287 0.824 0.731 +0 2.066 0.020 0.385 1.332 0.659 1.167 0.934 -1.575 1.087 0.818 0.447 -1.135 1.107 0.788 -2.658 -0.248 0.000 0.462 1.233 -0.762 0.000 2.981 2.199 0.974 1.680 0.658 1.993 1.799 +0 0.872 0.717 1.067 0.425 -1.153 0.856 -1.057 0.204 0.000 1.407 -1.190 -1.741 2.215 1.543 -1.330 -0.421 2.548 0.617 0.194 0.694 0.000 0.907 0.986 0.987 1.742 1.465 1.549 1.312 +0 0.625 0.276 0.272 2.293 0.929 0.991 0.450 1.653 2.173 0.437 -0.252 -1.166 0.000 0.645 -1.489 -0.402 0.000 2.805 0.934 -0.404 3.102 0.901 0.978 0.990 1.051 1.797 1.183 0.965 +1 1.067 -0.284 0.666 0.562 -0.683 0.824 -0.122 -0.604 0.000 1.345 0.099 1.294 2.215 0.612 -1.449 1.524 2.548 1.019 -1.009 -0.562 0.000 0.802 0.958 1.006 0.869 0.939 0.980 0.830 +1 0.471 0.532 1.703 0.764 -1.341 0.834 -0.732 -0.231 0.000 1.166 -0.564 1.506 1.107 1.265 -1.159 0.236 2.548 0.599 -2.162 -1.436 0.000 1.498 1.029 0.996 0.689 1.261 0.980 0.866 +0 0.923 1.977 0.963 0.348 -1.165 0.964 1.353 0.241 2.173 0.877 0.460 -1.350 0.000 0.577 1.670 -0.859 0.000 0.755 -0.619 1.732 3.102 0.928 0.879 0.991 0.829 1.439 0.958 0.842 +1 0.735 1.285 -0.067 0.604 0.201 0.385 -0.484 -1.267 2.173 0.689 0.040 -1.695 0.000 0.531 0.878 -1.477 2.548 0.480 1.001 -0.409 0.000 0.889 0.662 0.985 0.687 0.458 0.590 0.566 +1 0.470 0.019 -0.417 1.425 0.555 1.138 -0.645 -1.303 2.173 0.393 -1.135 0.464 1.107 0.640 -1.579 -0.002 0.000 0.969 -0.878 1.387 0.000 0.878 1.064 0.982 0.778 1.016 1.005 0.918 +1 0.690 -0.866 -1.684 0.493 0.323 0.739 1.529 -0.242 0.000 1.197 0.469 -1.467 2.215 0.924 1.691 0.195 0.000 0.929 -0.031 1.188 3.102 0.818 0.932 0.985 1.281 0.695 0.857 1.106 +1 1.052 0.423 -0.511 1.364 1.328 1.202 -0.309 -1.602 2.173 0.932 0.014 -0.157 0.000 0.499 0.470 1.229 2.548 1.654 -0.738 0.246 0.000 0.947 0.864 1.654 1.248 0.676 0.973 0.956 +1 0.675 -0.414 0.782 0.170 1.003 1.559 1.024 1.680 0.000 2.118 0.665 -0.359 1.107 0.864 0.753 1.272 0.000 0.828 -1.272 -0.183 0.000 0.803 0.914 0.989 1.107 1.024 0.875 0.802 +1 1.645 -0.421 -0.496 0.371 -0.958 1.178 -0.788 1.118 2.173 0.568 -0.388 -1.170 0.000 0.632 -0.688 0.177 1.274 0.485 -0.146 -1.556 0.000 0.245 0.880 0.979 0.657 0.807 0.925 0.743 +1 0.353 1.209 0.993 0.777 0.593 0.734 -0.475 0.589 1.087 0.994 0.301 -1.078 0.000 0.716 1.057 -0.503 0.000 0.625 0.274 -1.448 0.000 0.950 0.804 0.996 0.760 0.903 0.613 0.587 +1 0.424 0.476 1.372 2.397 -1.284 0.818 -0.252 0.083 2.173 1.090 0.366 0.771 2.215 0.430 0.300 -0.351 0.000 0.523 -1.048 -1.537 0.000 0.650 0.811 0.989 1.260 0.922 1.038 0.816 +0 1.330 1.084 0.755 0.416 0.198 0.500 0.181 0.414 0.000 0.819 -0.310 -1.592 2.215 1.121 0.477 -0.994 2.548 1.493 0.929 -0.537 0.000 0.728 0.752 0.990 1.000 0.684 0.822 0.842 +1 1.274 -1.011 -0.817 0.246 -0.339 0.754 -0.031 0.790 2.173 0.961 0.020 -1.485 2.215 0.323 0.139 0.422 0.000 0.435 -2.003 1.055 0.000 0.721 0.887 0.989 1.027 1.111 0.829 0.731 +0 1.045 -1.383 -0.047 0.145 0.425 1.032 -0.769 1.020 0.000 1.052 -1.520 -0.524 2.215 1.971 0.564 -1.104 2.548 1.216 0.284 1.074 0.000 0.969 1.347 0.980 1.303 2.163 1.358 1.110 +1 0.965 0.728 -0.924 0.797 -1.577 0.774 -0.019 0.805 0.000 0.989 -0.081 0.423 2.215 1.061 0.606 -1.266 2.548 0.889 -1.991 0.894 0.000 1.884 1.313 0.997 0.564 1.165 1.148 1.047 +0 1.842 -0.780 -0.583 1.509 -1.214 0.591 -1.562 0.471 0.000 0.575 -0.674 0.889 0.000 0.701 -1.324 0.855 0.000 1.099 -0.173 -1.407 3.102 1.034 0.858 1.243 0.798 0.324 0.578 0.770 +1 0.832 1.305 1.098 1.187 0.502 0.977 2.932 0.745 0.000 1.699 0.550 -1.112 0.000 1.104 0.231 -0.695 0.000 0.945 -0.453 -1.724 3.102 0.839 0.901 0.983 1.166 0.555 1.017 1.085 +1 0.399 0.185 -1.260 1.759 -0.670 0.969 1.055 0.589 0.000 1.884 -0.363 1.360 0.000 1.489 -1.122 -0.162 2.548 1.935 -0.867 0.686 3.102 2.311 1.624 0.994 0.778 0.906 1.241 1.116 +1 0.694 -0.023 0.247 0.844 0.997 0.852 -0.371 1.057 1.087 0.578 -0.083 -0.531 0.000 1.575 -0.947 -0.407 0.000 1.338 -0.710 -1.604 3.102 0.745 0.934 0.982 0.675 0.811 0.897 0.792 +1 0.351 -1.542 -1.212 1.485 1.533 0.972 -0.071 0.116 0.000 1.430 0.320 -1.621 2.215 0.872 -0.511 -0.508 1.274 0.505 1.521 0.384 0.000 0.788 0.976 0.987 1.314 1.140 1.776 1.579 +1 0.878 -0.497 0.312 1.068 0.529 1.362 -0.567 -1.019 2.173 0.496 -1.036 0.853 0.000 0.619 -0.368 1.506 2.548 0.800 0.672 0.349 0.000 0.956 0.676 0.978 1.514 0.876 1.003 0.872 +0 0.701 -0.652 0.516 2.586 0.100 0.739 -0.411 -1.204 2.173 0.664 0.324 -1.287 0.000 0.553 -0.857 1.365 0.000 0.451 1.125 -1.626 0.000 0.897 0.681 0.974 0.923 0.659 0.969 0.906 +0 3.291 0.586 -0.841 0.297 -0.460 1.598 -1.229 1.031 0.000 0.792 -0.490 0.765 2.215 0.757 -0.612 -0.369 2.548 0.775 -0.974 1.610 0.000 0.848 1.126 0.988 1.354 0.705 0.929 1.339 +1 1.037 -0.375 -0.433 1.485 -1.244 0.969 0.344 0.853 0.000 0.798 2.616 1.576 0.000 1.285 0.920 0.123 2.548 1.324 -0.055 -0.813 3.102 0.811 1.015 1.146 1.247 0.933 0.831 0.890 +0 0.465 -0.020 0.313 1.280 -1.013 0.857 0.325 1.216 2.173 1.457 -0.428 -0.829 1.107 1.949 -1.452 0.568 0.000 0.747 -1.525 -0.120 0.000 0.789 1.608 0.994 0.965 1.711 1.409 1.169 +0 1.149 0.542 -0.779 0.412 1.454 0.634 0.510 1.730 2.173 0.806 -0.062 0.175 2.215 0.538 -0.076 1.196 0.000 0.484 -0.450 0.622 0.000 0.307 0.525 0.983 0.769 1.082 0.688 0.567 +1 1.224 -1.015 1.297 0.718 0.394 1.455 -0.730 1.726 0.000 2.353 -0.191 0.087 2.215 0.833 0.346 -0.651 2.548 0.856 -1.437 -1.468 0.000 0.982 1.277 0.987 1.243 1.017 1.457 1.187 +0 0.940 0.183 1.100 1.314 0.247 0.533 -0.899 -0.670 0.000 1.232 0.200 1.522 2.215 1.625 0.124 -0.710 2.548 0.554 -0.782 0.893 0.000 0.819 1.086 1.070 1.061 1.361 0.942 0.861 +1 1.126 -0.754 1.556 1.080 1.743 2.352 -0.618 -0.337 0.000 1.093 -1.694 0.298 0.000 1.396 -1.219 -1.700 0.000 2.313 0.810 1.721 1.551 0.863 0.848 0.985 1.841 0.872 1.406 1.290 +1 0.380 -1.178 0.873 1.237 -1.067 0.855 0.226 0.970 0.000 1.376 -0.159 -0.635 2.215 0.499 1.295 -0.745 0.000 0.804 -0.397 0.357 1.551 1.389 0.915 0.988 0.786 0.756 0.881 0.820 +1 0.793 1.258 -1.374 0.374 0.317 0.564 0.302 1.099 2.173 0.461 2.217 0.043 0.000 0.656 1.539 1.448 0.000 0.836 -0.529 -0.186 3.102 0.950 0.880 0.989 0.700 0.754 0.655 0.600 +1 0.738 -0.628 -1.413 1.892 -0.735 0.694 -0.090 1.403 0.000 1.238 -0.707 0.465 1.107 0.466 0.374 0.372 0.000 0.553 0.863 0.929 3.102 0.860 0.954 0.985 0.983 0.806 0.928 0.860 +1 0.730 -0.971 0.273 1.771 1.435 0.726 -0.923 -0.457 0.000 0.966 0.787 -0.450 0.000 0.788 -0.719 0.811 2.548 0.994 -0.349 -1.088 0.000 0.911 0.946 1.364 0.714 0.529 0.641 0.662 +0 0.669 0.758 -0.462 0.211 0.475 1.541 -0.041 0.031 2.173 2.259 0.322 -1.715 0.000 1.302 -1.141 -1.197 2.548 1.192 2.222 -0.355 0.000 0.899 0.959 0.978 0.815 1.945 1.363 1.107 +1 1.337 0.073 -1.373 0.259 0.715 1.040 0.171 1.256 0.000 1.326 -0.272 0.207 2.215 0.615 -2.401 -0.242 0.000 1.529 0.254 -0.705 3.102 0.934 1.114 0.985 1.032 1.015 0.992 0.864 +0 0.299 -1.604 -0.905 1.220 -0.732 1.313 2.107 1.474 0.000 1.890 -0.639 -0.258 2.215 1.279 1.384 1.245 2.548 1.910 0.672 0.574 0.000 0.975 0.890 0.990 0.905 2.710 1.452 1.214 +0 0.617 -0.042 -1.005 0.378 1.614 0.672 0.984 0.245 1.087 0.383 -0.813 -0.481 0.000 0.847 -1.144 1.130 2.548 0.873 0.749 -1.374 0.000 0.917 0.964 0.989 0.662 1.460 0.958 0.828 +0 0.940 -0.703 1.332 1.113 -0.912 0.504 -0.749 -1.210 2.173 0.601 -0.754 0.402 2.215 0.513 0.242 -0.533 0.000 0.650 -0.944 0.979 0.000 0.789 0.657 1.275 0.839 0.804 0.628 0.563 +0 0.892 1.239 0.822 0.256 -1.493 0.848 0.461 -0.262 1.087 0.330 0.790 0.309 0.000 1.024 1.600 -1.499 0.000 1.017 0.236 1.575 3.102 0.991 1.084 0.978 0.579 0.983 0.758 0.684 +1 0.892 0.780 -1.602 0.373 -0.768 0.603 1.175 -1.077 2.173 0.462 -0.122 1.005 2.215 0.526 -1.449 1.257 0.000 1.674 0.657 0.181 0.000 1.053 0.973 0.985 0.725 0.921 0.780 0.763 +1 0.481 1.083 -1.015 1.993 -0.354 1.192 0.755 1.679 0.000 1.114 -0.428 -0.413 2.215 0.921 0.227 0.266 2.548 2.434 -1.435 0.893 0.000 0.890 1.097 0.983 1.730 0.725 1.186 1.202 +0 1.577 2.017 -1.364 0.560 0.205 0.470 1.358 0.410 0.000 0.607 -0.958 0.699 0.000 1.002 1.173 -0.519 2.548 0.582 0.038 0.567 0.000 0.580 0.687 1.286 0.753 0.661 0.631 0.668 +0 0.517 1.836 0.705 2.437 0.849 0.604 -1.041 -0.422 1.087 0.586 0.199 -0.563 0.000 1.296 -0.246 -1.353 2.548 0.862 1.006 -1.193 0.000 0.694 0.983 0.997 1.394 0.932 1.306 1.065 +1 0.871 0.166 -0.658 1.662 0.113 0.883 -0.065 -1.450 0.000 0.552 0.324 -0.054 2.215 1.552 0.404 0.809 0.000 2.551 -0.779 -1.530 0.000 0.880 1.017 1.068 1.031 0.837 0.759 0.837 +0 1.033 -0.488 -1.260 0.761 0.365 1.177 -1.283 -1.533 2.173 1.050 -0.422 0.232 2.215 0.597 -0.926 1.075 0.000 0.724 -2.056 -0.037 0.000 0.838 0.909 1.222 1.057 1.790 0.996 0.872 +0 2.931 0.181 -0.816 1.290 -1.261 1.699 1.014 1.091 0.000 1.008 1.223 -0.743 1.107 0.968 -0.651 0.752 2.548 1.141 0.309 0.233 0.000 0.856 1.289 1.048 0.972 1.587 1.212 1.367 +1 0.866 -1.386 1.434 0.824 0.737 0.823 0.116 -1.009 2.173 0.318 -1.644 0.333 0.000 0.422 -0.399 0.623 2.548 0.727 -0.996 -0.526 0.000 0.477 0.749 0.978 0.667 0.759 1.033 0.774 +1 0.526 -0.454 -0.254 1.847 0.634 0.835 0.982 -1.610 2.173 0.623 1.186 -0.714 0.000 0.570 0.919 0.517 0.000 0.857 -0.324 -1.116 3.102 0.824 0.886 0.988 0.791 0.772 1.004 0.914 +0 0.697 -1.126 0.719 0.884 -0.586 0.677 0.291 0.103 0.000 1.430 -0.933 1.128 1.107 0.658 0.649 -0.868 0.000 1.880 0.246 -1.217 1.551 0.955 0.936 1.003 0.926 1.610 1.102 0.977 +1 0.877 0.080 0.958 1.354 -0.028 0.608 -0.044 1.594 0.000 0.897 0.333 0.204 0.000 0.878 2.132 -1.621 0.000 2.168 0.305 -1.249 0.000 1.024 0.774 1.171 0.851 0.682 0.813 0.818 +1 0.838 -0.008 -0.861 0.536 1.464 1.230 0.532 -0.581 2.173 1.309 0.320 1.087 1.107 0.717 0.807 1.713 0.000 0.375 2.376 1.015 0.000 0.739 0.920 0.984 0.842 1.873 1.057 0.870 +1 0.693 0.460 1.485 0.579 0.584 1.160 1.208 -0.611 2.173 0.821 2.256 1.369 0.000 1.488 2.077 0.782 0.000 0.801 -0.015 -0.918 0.000 0.855 1.276 0.993 1.405 0.778 1.178 1.257 +0 0.753 -0.642 0.580 0.686 -0.609 0.760 0.133 -1.106 2.173 0.846 0.890 1.018 1.107 0.564 0.343 0.032 0.000 0.795 -0.282 1.402 0.000 0.748 0.774 0.986 1.185 1.207 0.973 0.776 +1 0.493 -1.340 -1.280 1.537 1.615 1.157 -0.847 -0.157 2.173 0.739 0.345 0.179 2.215 1.269 0.123 0.981 0.000 0.876 -2.150 -1.471 0.000 1.185 0.839 0.996 1.672 0.963 1.468 1.594 +0 0.438 -0.827 1.361 0.702 -1.052 0.949 0.046 0.781 2.173 1.777 -0.999 -1.409 0.000 1.263 -0.628 0.189 2.548 1.393 -0.299 0.000 0.000 2.079 1.526 0.990 0.905 0.864 1.213 1.022 +0 0.585 0.462 -0.588 1.346 0.294 1.039 -0.231 -1.064 2.173 1.147 1.579 1.041 2.215 1.686 -0.438 1.187 0.000 1.818 -0.603 -0.517 0.000 0.910 0.941 0.989 1.323 2.294 1.598 1.284 +1 0.307 -1.864 0.730 0.868 -0.308 0.897 -0.021 -0.367 0.000 1.445 0.238 1.102 2.215 0.716 -0.256 -0.948 0.000 1.022 0.253 -1.404 1.551 0.744 0.726 0.981 1.047 0.848 0.894 0.797 +1 0.687 0.401 -1.718 0.229 -0.528 1.048 -0.493 0.765 1.087 0.918 0.993 -1.088 0.000 0.291 -0.495 -0.631 0.000 0.498 0.513 -0.293 3.102 0.756 1.447 0.988 0.579 0.769 0.844 0.743 +0 2.058 0.191 1.474 0.653 -0.064 0.814 1.760 -0.065 0.000 0.554 1.753 -0.800 0.000 0.467 0.595 -1.692 1.274 0.703 1.335 0.524 3.102 0.878 0.839 1.579 0.960 0.453 0.626 0.859 +1 0.647 1.255 -1.699 1.302 0.762 0.717 1.893 0.460 0.000 0.972 -0.472 -0.198 2.215 1.854 -0.757 -1.187 1.274 0.898 0.444 -1.050 0.000 1.507 1.657 1.014 1.754 1.137 1.526 1.340 +0 0.580 -0.676 1.715 1.587 1.682 1.179 1.109 -0.145 2.173 0.661 1.213 0.764 0.000 0.937 0.093 0.099 0.000 1.402 -0.245 -0.345 3.102 0.962 1.289 0.972 3.017 1.073 1.935 1.784 +1 0.288 -2.154 -0.071 1.130 -1.398 0.907 -0.507 -0.392 2.173 1.303 -1.171 0.840 0.000 1.030 -0.936 1.407 0.000 0.973 -0.773 -0.690 1.551 0.883 0.976 0.986 0.873 0.336 0.918 0.806 +1 1.423 1.049 -1.648 1.105 -0.791 0.488 1.094 -0.311 2.173 1.218 0.504 0.584 2.215 0.277 0.555 0.241 0.000 0.693 -0.047 -0.201 0.000 0.903 0.884 1.213 1.267 0.886 0.895 0.821 +0 0.759 0.896 0.919 1.366 -1.734 0.565 0.676 0.154 0.000 0.579 -0.872 -1.190 2.215 0.695 0.123 -0.635 2.548 0.961 -0.907 0.064 0.000 1.136 1.035 0.990 0.784 0.487 0.700 0.781 +1 1.109 -0.751 0.057 0.535 1.697 0.348 -2.114 0.209 0.000 0.646 -0.625 -1.280 1.107 0.476 1.509 1.123 0.000 1.036 -1.012 1.392 0.000 0.914 0.857 1.062 0.854 0.749 0.859 0.747 +1 0.624 -0.150 1.539 0.554 1.434 0.528 0.985 -1.187 0.000 1.411 0.536 -0.142 1.107 1.313 0.494 1.293 0.000 1.824 0.571 -0.790 3.102 1.135 1.180 0.990 1.515 0.804 1.167 1.044 +0 0.569 2.386 1.580 1.233 -0.223 0.426 0.838 0.922 2.173 0.451 -1.694 -1.301 0.000 0.707 -0.124 -0.199 2.548 0.863 -1.071 0.854 0.000 0.780 1.113 1.159 1.264 0.676 0.917 1.449 +0 0.863 -1.453 0.273 1.565 0.306 1.009 -0.750 -0.070 2.173 3.236 0.454 -1.606 2.215 0.696 -2.400 1.145 0.000 0.854 0.322 0.265 0.000 0.915 1.116 0.986 4.261 3.133 2.832 2.157 +1 0.586 -1.273 1.394 0.040 0.717 0.992 -0.297 0.132 2.173 1.030 0.176 -1.342 0.000 0.988 0.370 -0.731 0.000 1.313 0.129 0.964 3.102 0.857 0.844 0.907 0.846 0.868 0.890 0.756 +1 1.674 -0.130 0.319 1.216 0.949 0.840 -0.960 -0.382 2.173 0.754 -0.284 1.270 1.107 1.872 -0.409 -1.069 0.000 0.998 0.776 1.455 0.000 1.609 1.159 1.063 1.175 1.235 1.016 1.016 +0 0.551 1.170 -0.672 1.159 0.359 0.734 0.593 1.340 0.000 0.813 -0.633 0.108 2.215 1.183 -0.654 -1.264 2.548 0.548 -1.177 -0.394 0.000 1.502 1.153 0.984 0.813 0.985 0.874 0.850 +0 0.389 -0.963 1.296 0.386 -0.569 0.804 -1.083 0.729 0.000 0.708 0.570 -0.022 2.215 0.308 2.066 -0.021 0.000 0.629 -0.614 -1.068 0.000 0.905 1.042 0.986 0.581 0.736 0.666 0.634 +1 0.782 -0.170 -0.049 0.863 1.490 1.023 0.352 -1.291 2.173 0.891 0.106 0.827 2.215 1.264 1.808 -0.002 0.000 1.432 1.041 1.642 0.000 1.143 0.971 1.119 0.956 1.337 0.819 0.759 +1 0.632 -1.118 -1.367 0.996 0.795 1.022 -0.367 -1.664 0.000 1.673 -0.159 0.051 2.215 0.512 0.009 1.316 0.000 0.430 -0.703 1.390 3.102 1.112 0.617 1.021 1.129 0.763 0.877 0.796 +1 0.680 1.366 -0.416 1.004 1.071 1.351 -0.737 0.691 0.000 2.494 1.615 -0.978 0.000 1.081 0.734 0.653 2.548 0.867 0.945 -1.476 3.102 0.671 0.595 1.114 0.697 0.706 0.873 0.810 +0 1.369 0.372 1.691 0.342 -0.556 0.510 2.220 0.416 0.000 0.746 1.225 -0.587 2.215 0.764 1.067 -1.405 2.548 1.085 0.682 0.606 0.000 0.899 0.906 0.990 0.559 0.541 0.675 0.705 +0 0.794 1.949 0.703 0.324 -1.319 0.979 -0.166 1.632 1.087 1.493 0.478 -0.283 2.215 0.972 0.729 1.614 0.000 0.518 -0.186 0.244 0.000 0.849 1.047 0.992 1.153 1.856 1.106 0.890 +0 0.534 -0.706 -1.284 1.574 -0.386 0.963 -1.186 0.795 0.000 1.090 -1.384 1.648 0.000 0.758 -0.493 1.047 0.000 1.181 0.782 -1.090 1.551 0.959 1.039 0.991 0.788 1.021 0.976 0.889 +1 1.950 -0.143 -0.222 0.999 -0.740 0.557 -0.133 1.610 0.000 1.007 0.326 0.946 2.215 1.021 0.549 -1.434 2.548 0.412 0.964 0.419 0.000 0.925 0.978 0.996 0.953 0.916 0.914 0.878 +0 0.415 1.655 -1.666 1.441 -0.762 0.630 1.022 0.873 0.000 0.365 -1.530 1.388 0.000 0.450 1.452 0.366 0.000 0.871 -0.061 -0.898 3.102 0.837 0.820 0.996 0.562 0.429 0.529 0.596 +0 0.301 -0.435 1.176 2.128 -1.580 1.009 -0.016 -1.030 2.173 0.990 -0.748 0.609 0.000 0.370 -2.559 1.214 0.000 1.733 -0.414 -0.050 0.000 0.982 0.637 0.981 1.359 0.477 0.878 0.985 +0 0.630 0.685 0.468 0.919 -1.254 0.516 2.425 0.838 0.000 1.141 -0.024 1.388 2.215 1.271 1.927 -0.691 0.000 1.019 1.328 0.085 0.000 0.874 0.501 1.054 0.854 0.701 0.980 0.822 +1 0.454 -1.220 1.122 0.662 1.024 0.894 -0.380 -0.425 0.000 0.683 -0.914 0.068 0.000 1.402 -0.409 1.181 2.548 1.717 -0.250 -1.428 3.102 0.853 1.096 0.987 0.618 0.848 0.914 0.821 +0 1.904 -0.093 -1.471 1.313 -1.123 3.175 -0.233 0.654 0.000 2.244 0.018 -1.012 2.215 1.520 -0.705 -0.819 0.000 1.227 0.523 0.844 3.102 3.983 2.262 0.989 0.780 1.560 2.031 1.754 +1 0.787 -0.891 0.909 0.873 -1.643 2.202 -0.040 -0.601 0.000 1.164 0.923 0.477 0.000 1.368 0.684 1.033 0.000 1.596 0.457 1.509 3.102 0.949 0.918 0.988 0.586 0.731 0.901 1.072 +1 1.474 0.491 0.744 0.392 -0.420 1.040 1.308 0.470 0.000 1.285 -0.710 -1.403 1.107 0.690 -0.517 -0.637 2.548 0.987 0.084 -0.828 0.000 0.626 0.692 0.987 0.819 0.643 0.904 0.738 +0 0.463 1.242 -1.314 2.791 1.720 0.748 1.157 0.372 0.000 0.601 1.056 -0.308 0.000 0.898 0.007 0.125 1.274 0.934 -1.066 -0.693 3.102 0.822 0.723 0.979 1.207 0.674 0.957 1.029 +0 1.079 -0.575 0.495 0.313 0.078 0.524 0.421 -0.528 0.000 0.869 -0.779 -0.583 0.000 2.022 -1.058 1.205 1.274 1.743 -0.434 -1.414 3.102 0.937 0.942 0.985 1.118 1.114 1.064 0.958 +1 0.920 0.674 0.591 0.417 -1.276 0.574 1.485 1.671 0.000 1.258 1.206 -1.256 0.000 0.849 1.905 0.081 0.000 1.387 0.827 -0.189 3.102 0.899 0.980 0.989 0.654 0.569 0.770 0.688 +1 0.697 0.436 1.342 0.199 -1.430 1.067 -1.009 0.838 2.173 0.982 -1.186 -0.471 0.000 0.841 -0.398 -0.803 0.000 0.644 -0.439 -1.181 3.102 0.695 1.354 0.977 0.536 0.878 0.821 0.744 +1 1.002 0.348 -1.265 1.269 1.351 1.317 0.496 -0.267 2.173 0.646 -0.247 -1.579 0.000 0.580 0.400 0.681 0.000 0.854 -0.422 0.340 3.102 0.930 0.915 1.103 1.352 0.830 0.933 0.883 +1 0.401 2.052 -1.082 1.207 0.977 0.463 1.540 0.750 0.000 1.100 1.368 -0.910 1.107 1.013 0.296 -0.526 2.548 0.414 1.733 -0.760 0.000 0.678 0.831 0.988 0.913 0.742 0.773 0.675 +0 0.645 -1.241 0.485 2.162 1.506 1.458 -1.382 -0.385 0.000 0.868 -1.593 -1.584 2.215 1.103 0.572 0.574 2.548 0.775 -0.624 -0.384 0.000 0.533 1.190 1.302 1.419 1.742 1.274 1.213 +1 0.934 -0.787 1.514 0.149 1.322 0.499 0.464 -0.702 0.000 1.009 1.198 -0.343 2.215 0.447 0.787 0.885 2.548 0.900 -1.064 0.468 0.000 1.348 0.904 0.994 1.143 0.652 0.808 0.786 +1 0.801 0.080 -0.663 0.347 -0.820 0.847 -0.042 0.308 0.000 1.145 1.244 -1.143 2.215 1.157 0.661 0.984 0.000 1.263 0.118 -1.674 0.000 0.987 1.133 0.992 0.606 0.600 0.672 0.660 +0 1.440 -0.196 -0.599 1.265 -0.270 0.918 0.980 1.698 0.000 0.608 -0.048 0.489 2.215 0.658 0.829 0.983 0.000 0.470 1.057 -0.584 3.102 0.843 0.956 0.986 0.546 0.523 0.617 0.790 +1 0.928 0.249 -1.091 0.799 1.270 1.109 -0.534 -0.220 0.000 0.883 -0.368 1.682 2.215 0.734 0.227 0.242 0.000 1.005 0.853 1.170 1.551 0.927 1.173 1.012 0.657 0.749 0.936 0.826 +1 0.417 -0.870 -1.429 0.710 -0.594 1.157 -0.427 -1.612 2.173 1.013 0.668 0.813 0.000 1.154 -1.378 -0.811 0.000 1.113 0.527 0.270 0.000 0.820 1.207 0.979 0.831 0.948 0.944 0.823 +1 0.943 -0.893 -0.091 0.943 -1.307 0.968 -0.080 0.616 0.000 0.773 -1.127 -0.775 0.000 1.554 -0.724 -1.512 1.274 0.710 0.497 1.221 1.551 0.892 0.701 1.162 0.834 0.787 0.794 0.780 +0 0.671 -1.476 -1.489 1.633 -0.633 0.479 -1.200 0.092 0.000 0.804 -0.665 1.629 1.107 0.875 -1.185 1.122 1.274 0.666 -1.675 -0.445 0.000 0.536 0.897 1.011 0.882 0.483 0.727 0.671 +1 0.932 0.036 -1.741 1.614 -1.197 0.845 -0.520 0.913 2.173 0.668 0.388 -0.294 2.215 0.953 0.397 0.185 0.000 0.831 0.852 1.054 0.000 0.751 0.900 0.986 0.855 1.113 0.957 0.838 +1 0.810 1.075 -0.641 1.238 1.640 0.436 2.313 -0.797 0.000 0.875 0.733 0.239 2.215 0.535 2.417 0.696 0.000 0.846 0.050 1.516 3.102 0.858 1.056 1.227 0.729 0.761 0.827 0.787 +0 1.256 0.559 0.839 0.545 1.684 0.596 0.949 -0.895 2.173 0.445 0.967 -0.212 0.000 0.640 0.068 0.987 0.000 1.251 -0.582 -1.032 1.551 0.821 0.849 0.982 0.888 0.854 0.827 0.710 +0 1.305 0.256 0.817 0.433 -1.628 0.698 1.286 -0.805 0.000 1.097 -0.728 -0.882 0.000 2.087 -0.792 0.778 2.548 0.716 -1.157 -0.155 0.000 0.798 0.783 0.989 0.840 1.194 0.905 0.829 +1 0.742 0.523 1.054 1.258 -1.253 0.981 -0.809 1.417 2.173 1.091 0.634 -0.318 0.000 0.806 -0.074 0.290 2.548 0.472 2.198 -0.358 0.000 1.090 0.941 1.170 1.160 1.030 1.250 1.058 +0 0.777 2.260 1.450 1.346 -1.116 0.701 1.864 0.312 0.000 0.772 0.291 1.113 0.000 1.467 1.356 -0.605 2.548 0.846 0.553 0.381 0.000 0.765 0.939 1.046 0.941 0.914 0.801 0.841 +0 0.451 -2.143 -0.834 1.143 0.252 1.105 -0.367 0.946 1.087 1.451 -1.128 -0.730 0.000 0.413 1.078 1.694 0.000 1.474 -0.703 1.739 1.551 1.988 1.327 0.990 1.117 0.941 1.146 1.027 +0 0.818 -0.663 1.109 1.043 1.611 0.778 -2.913 -0.636 0.000 1.510 -0.061 0.308 0.000 1.067 -0.136 -0.453 2.548 2.652 0.039 -1.536 3.102 0.792 0.866 0.976 0.760 1.072 0.975 0.878 +1 1.788 0.170 -0.595 0.320 -0.650 0.803 0.713 1.321 2.173 1.119 -0.299 0.652 1.107 0.937 0.698 -0.704 0.000 0.525 1.308 1.318 0.000 0.815 0.864 0.986 1.087 1.090 1.023 0.894 +1 0.558 -0.627 0.091 1.325 -1.043 0.614 -0.332 0.386 0.000 0.691 0.335 -0.040 0.000 0.921 2.498 1.066 0.000 1.627 0.759 1.491 3.102 0.706 0.938 1.015 1.115 0.793 0.811 0.799 +0 0.741 0.706 1.546 0.985 -0.670 0.879 1.520 -0.497 0.000 1.305 1.542 1.493 0.000 0.511 -0.274 0.652 1.274 0.966 1.260 -0.069 3.102 2.219 1.318 1.079 0.708 0.645 0.957 0.840 +1 0.744 -0.659 0.502 0.939 -0.808 1.124 0.855 0.982 0.000 1.157 -0.776 -0.976 0.000 0.662 -0.139 -0.561 2.548 0.894 -0.604 1.446 0.000 1.085 0.966 1.071 0.588 0.305 0.600 0.589 +1 1.780 0.235 -0.147 0.885 0.597 0.338 0.551 1.357 0.000 1.500 1.307 1.716 2.215 0.715 1.376 -0.480 0.000 1.220 0.869 0.342 0.000 0.983 0.940 1.080 0.761 0.960 1.047 0.871 +1 0.721 -1.425 0.859 0.327 -1.571 1.067 -1.671 -0.301 0.000 0.890 -0.866 -1.106 1.107 0.827 -1.035 1.543 2.548 0.669 -1.794 0.511 0.000 0.915 1.037 0.989 0.770 0.634 0.800 0.758 +0 1.954 -0.564 -1.073 0.619 1.349 2.851 0.887 0.839 0.000 1.725 0.325 -0.817 2.215 1.199 1.304 -0.860 0.000 0.647 0.368 -0.191 3.102 1.596 0.900 1.247 1.124 0.512 0.765 0.842 +0 1.815 -1.727 1.495 1.537 -1.659 1.308 0.802 0.074 0.000 1.014 0.513 -0.236 0.000 1.117 -1.150 0.905 2.548 1.721 0.944 -0.701 1.551 0.760 0.933 1.003 0.847 1.912 1.599 1.859 +0 0.439 1.898 0.440 0.923 -1.309 0.589 -0.361 1.084 0.000 0.874 1.102 -0.024 1.107 0.672 0.243 -0.408 2.548 0.398 0.327 0.021 0.000 0.670 0.945 0.984 1.030 0.458 0.807 0.947 +1 0.597 -1.663 -0.154 0.568 -0.946 1.420 -0.949 1.455 0.000 1.441 -0.599 -0.582 0.000 1.014 -1.290 1.064 2.548 1.675 -1.080 0.650 3.102 0.887 0.965 0.985 0.811 0.368 0.724 0.668 +0 1.971 0.855 0.367 0.062 -1.284 0.873 -0.975 -0.945 0.000 0.922 0.088 0.865 2.215 1.025 -1.606 -1.251 0.000 0.987 -0.066 -1.730 3.102 0.822 0.884 0.986 0.719 0.624 0.954 1.118 +0 2.475 -1.448 -0.413 0.593 -1.307 0.890 -0.545 1.193 2.173 0.741 -1.699 0.980 0.000 0.614 -0.074 -1.439 2.548 0.477 -0.576 -0.653 0.000 0.881 0.856 1.210 0.968 0.676 1.024 0.865 +1 0.939 -0.172 1.040 0.513 1.625 1.879 0.740 -1.057 0.000 1.489 -0.058 0.641 2.215 0.848 0.914 0.063 0.000 0.919 0.074 -0.271 0.000 0.744 1.445 0.994 0.913 0.625 1.322 1.195 +1 1.530 0.627 1.239 0.681 -1.076 0.681 -0.039 -0.386 2.173 0.470 0.094 0.035 0.000 0.452 1.011 -0.274 0.000 0.971 -0.738 1.140 3.102 0.607 0.655 1.231 1.063 0.924 0.820 0.664 +1 0.729 0.312 0.462 1.264 1.574 1.043 -0.424 -1.411 0.000 1.822 -0.677 0.286 2.215 0.552 -0.622 -0.767 0.000 0.864 -0.678 1.218 3.102 0.868 0.718 1.121 1.283 0.846 0.929 0.891 +1 0.879 0.320 -0.376 2.413 -0.045 2.062 -0.411 1.571 0.000 1.145 0.065 -0.994 2.215 1.274 -0.835 -0.179 0.000 2.082 -0.950 -1.092 3.102 0.950 1.329 0.994 1.210 0.902 1.353 1.574 +1 0.669 0.353 -1.717 0.385 -1.529 0.786 0.237 0.715 2.173 0.652 2.350 -1.499 0.000 1.169 0.929 -0.239 1.274 1.153 1.550 -0.582 0.000 0.890 0.946 0.984 0.941 1.025 1.024 1.148 +0 1.539 -0.825 -1.022 0.991 -0.049 1.007 0.232 0.363 2.173 0.778 0.959 0.655 0.000 1.578 -0.855 -1.510 1.274 1.366 0.533 1.276 0.000 0.975 0.989 1.316 1.338 1.840 1.142 1.011 +1 2.168 0.073 -0.688 0.725 -0.384 1.384 1.738 0.897 0.000 1.365 0.620 0.905 2.215 0.614 0.677 -0.853 1.274 0.715 -0.493 -1.424 0.000 0.676 0.960 0.998 0.513 0.974 0.922 0.810 +1 1.896 -0.414 -0.963 0.636 0.168 1.093 0.303 0.748 2.173 0.290 -0.549 -1.624 2.215 0.332 0.902 1.217 0.000 0.544 0.131 -0.511 0.000 0.510 0.645 1.297 0.680 0.791 0.895 0.694 +1 0.855 -1.294 0.416 1.312 1.036 0.257 -2.689 -0.308 0.000 0.920 -1.012 -0.838 2.215 0.568 1.189 -0.558 0.000 0.543 -1.191 1.299 0.000 0.688 0.758 0.995 1.534 1.142 1.158 0.954 +1 4.335 -0.902 -0.530 0.835 -1.042 0.836 -1.622 1.248 0.000 1.913 -0.266 0.005 2.215 4.189 1.139 1.302 0.000 1.276 -0.454 -0.891 0.000 0.822 1.007 1.172 0.755 0.774 0.945 0.807 +0 0.656 0.761 -1.076 0.990 -0.311 0.513 -1.057 0.361 0.000 0.794 0.382 1.265 2.215 1.490 -0.942 -0.820 2.548 0.754 -0.484 1.629 0.000 0.894 0.932 0.990 1.005 1.421 1.246 1.129 +0 2.683 -0.605 0.494 0.890 -0.386 1.066 -0.617 -0.904 2.173 0.939 0.447 0.950 2.215 1.416 -0.399 -1.565 0.000 1.330 -1.534 1.240 0.000 1.067 0.832 1.524 1.198 1.687 1.216 1.095 +1 0.565 -0.095 0.713 0.188 -1.350 0.733 -1.734 -0.394 0.000 0.755 -0.288 -1.514 2.215 0.799 -1.464 1.537 2.548 1.071 -0.961 0.253 0.000 0.850 0.913 0.985 0.736 0.661 0.800 0.695 +1 0.621 -1.016 0.577 0.776 -0.343 0.619 0.017 1.555 0.000 0.560 1.141 -0.607 2.215 1.421 -0.690 -1.669 0.000 1.291 -0.424 0.013 3.102 0.776 1.069 0.993 0.798 0.832 0.896 0.798 +0 1.013 -0.552 1.710 0.265 -0.847 0.588 1.633 1.525 1.087 0.975 -0.568 0.404 2.215 1.554 0.756 -0.158 0.000 1.127 0.070 -1.269 0.000 1.350 1.276 0.990 1.616 1.789 1.246 1.156 +0 2.391 -1.293 0.948 1.210 0.949 1.694 -0.775 -0.618 2.173 0.754 -1.064 -1.365 1.107 0.359 -0.875 -1.729 0.000 0.637 0.448 0.233 0.000 0.675 1.017 0.996 1.130 1.068 1.480 1.157 +1 0.948 -0.014 0.759 0.496 -1.070 0.728 0.192 -0.249 1.087 0.995 0.341 -0.718 0.000 1.259 -0.109 1.494 0.000 0.670 0.740 1.408 1.551 1.617 0.975 0.988 0.770 0.782 0.777 0.699 +0 0.950 -1.377 1.522 1.153 0.926 0.463 -0.557 -0.747 0.000 0.866 0.338 -1.272 1.107 0.635 -2.558 -0.132 0.000 1.089 -0.297 0.784 3.102 1.301 1.048 0.984 1.088 0.899 0.988 0.897 +1 0.529 -0.590 -0.818 1.141 1.610 0.849 0.508 0.943 0.000 1.363 -0.856 -0.392 2.215 0.734 -0.058 1.704 0.000 0.747 0.771 -0.168 3.102 0.989 0.835 0.984 1.085 0.957 1.006 0.871 +1 0.865 -0.429 -0.818 0.711 0.995 0.885 -0.933 -0.535 0.000 0.687 -2.766 0.626 0.000 2.097 -1.141 1.208 2.548 1.836 0.759 -0.514 0.000 0.841 0.931 1.084 0.985 0.854 0.914 0.792 +0 2.584 -0.257 -0.364 2.446 -0.585 1.610 0.245 1.426 2.173 0.801 -0.660 1.577 0.000 0.482 -1.194 1.077 0.000 0.539 0.287 0.313 3.102 0.516 0.953 1.000 0.700 0.833 1.426 1.217 +1 0.626 1.159 -0.123 1.036 -1.551 0.809 0.047 0.559 0.000 0.826 -0.442 1.379 2.215 0.758 0.461 -1.064 2.548 0.590 1.088 -1.052 0.000 1.264 0.946 1.071 1.037 0.798 0.738 0.752 +1 0.685 0.490 0.258 2.455 0.996 1.325 1.383 -1.287 0.000 0.740 2.909 1.030 0.000 1.233 0.078 -0.595 2.548 1.876 0.885 -0.234 3.102 2.610 1.936 1.109 1.200 0.698 1.478 1.467 +1 1.567 0.410 -0.444 0.838 0.329 0.804 -2.841 -0.263 0.000 1.123 -0.900 -1.702 2.215 1.271 -0.449 1.271 0.000 1.168 0.468 1.398 1.551 3.238 2.198 1.020 0.882 0.914 1.662 1.689 +0 0.983 1.151 -0.541 1.881 -1.054 0.766 0.843 1.393 2.173 0.876 2.034 0.671 0.000 0.931 1.825 0.212 0.000 1.074 0.867 -1.294 0.000 0.908 1.005 0.981 0.906 0.452 0.823 0.794 +1 0.749 -0.875 -1.195 0.766 -0.553 0.394 1.605 0.657 0.000 0.879 -0.696 1.599 2.215 0.811 -0.739 0.055 2.548 1.469 -1.674 0.157 0.000 0.816 1.087 0.983 0.899 0.884 0.896 1.080 +0 0.697 0.026 -1.442 0.788 0.333 0.893 -0.052 0.780 0.000 1.273 0.831 -1.102 2.215 0.696 -0.198 -0.021 0.000 1.165 -0.198 1.723 0.000 0.945 0.850 1.026 0.909 0.570 0.903 0.766 +0 1.791 -0.412 1.684 0.433 0.714 0.749 -0.996 -0.356 1.087 0.330 1.009 0.025 2.215 0.539 -0.604 1.012 0.000 0.429 -0.283 -1.057 0.000 0.515 0.657 0.989 0.806 0.919 0.837 0.647 +1 0.880 -1.111 -1.408 2.535 -1.146 1.650 2.276 0.851 0.000 1.416 -1.525 -0.564 0.000 1.318 0.221 0.246 2.548 0.906 -0.210 1.615 3.102 1.712 1.206 0.987 1.966 0.816 1.292 1.182 +0 1.210 0.383 -1.246 0.442 -1.513 0.971 -0.784 0.149 1.087 1.459 -0.110 1.579 2.215 0.592 -0.193 -0.015 0.000 0.932 -0.639 -0.294 0.000 0.916 1.085 0.993 1.196 1.788 1.045 0.890 +1 1.166 0.383 -0.953 0.770 -0.463 1.174 1.892 0.787 0.000 1.117 -0.070 -0.848 2.215 1.190 1.062 1.516 0.000 0.919 1.712 -0.028 0.000 1.071 0.958 0.980 0.771 0.548 1.222 1.051 +0 0.662 0.429 1.475 1.126 0.524 1.216 0.360 -1.648 1.087 1.672 0.175 0.058 2.215 0.735 -0.270 -1.222 0.000 0.488 0.375 0.582 0.000 0.706 0.899 0.989 1.065 2.106 1.136 0.894 +1 1.842 -1.461 0.324 0.521 -1.223 0.971 -0.175 1.405 2.173 0.540 -0.776 -1.251 0.000 0.760 0.386 -0.453 2.548 0.868 0.281 -1.269 0.000 0.535 0.790 1.336 1.140 1.113 1.085 0.911 +0 1.781 0.188 1.399 0.115 0.478 0.706 0.339 -0.364 2.173 0.883 0.814 0.372 2.215 0.694 1.483 -1.025 0.000 1.101 1.350 0.928 0.000 0.931 1.001 0.983 1.035 0.772 0.897 0.898 +1 1.542 0.725 -1.055 0.750 0.365 1.171 1.251 1.177 2.173 1.266 -0.753 -0.372 0.000 0.615 0.711 0.725 0.000 1.246 1.012 -1.536 3.102 0.788 0.900 1.428 0.838 0.820 0.878 0.761 +1 1.283 -1.669 0.700 0.582 -0.312 1.563 1.064 -1.179 0.000 2.011 -0.751 0.245 2.215 1.446 -1.420 -1.636 0.000 1.081 -1.181 1.058 0.000 0.908 0.720 0.986 0.894 0.831 0.959 0.830 +1 1.462 -0.491 1.483 0.307 -1.384 0.931 -1.114 -1.440 0.000 1.163 0.044 0.734 2.215 1.423 -1.297 -0.229 0.000 1.308 -0.330 -0.400 3.102 0.961 0.936 0.986 0.997 0.982 0.867 0.826 +1 0.705 -1.570 0.177 1.320 0.677 0.990 -0.449 -1.302 0.000 0.498 0.136 0.772 1.107 0.751 -0.535 -0.324 0.000 0.685 -0.037 -1.726 0.000 0.785 0.637 0.991 1.543 0.443 1.153 0.967 +1 1.210 -1.128 -0.022 0.570 -1.092 0.738 -1.743 1.661 0.000 0.603 -1.304 1.010 0.000 1.044 -0.185 -0.078 2.548 0.883 -0.846 -1.243 3.102 0.841 0.700 0.987 0.728 0.706 0.782 0.729 +1 1.165 -0.590 1.582 0.222 -0.160 0.930 -1.602 -0.033 0.000 1.069 -0.254 0.951 1.107 1.406 -0.435 -1.094 2.548 1.433 -1.433 -1.366 0.000 1.646 1.404 0.990 0.782 1.263 1.175 0.959 +1 1.449 1.070 0.521 0.371 1.673 2.089 -0.175 -0.979 0.000 2.539 0.602 0.127 2.215 2.081 -0.017 -1.639 0.000 1.040 1.750 1.238 0.000 0.935 0.767 0.982 1.070 0.915 1.183 1.015 +1 0.717 -0.863 1.272 1.338 -0.299 0.845 -0.658 0.194 0.000 1.566 0.017 -1.366 2.215 0.439 -1.509 0.879 0.000 0.841 -0.450 1.129 3.102 0.851 0.660 1.340 1.258 0.858 0.927 0.857 +0 0.837 -0.180 1.222 0.715 -1.527 0.643 -0.963 -0.231 2.173 0.678 -2.048 0.504 0.000 1.049 -0.427 -0.928 2.548 0.375 -2.198 1.707 0.000 0.607 1.020 0.990 1.125 0.656 0.814 0.950 +1 1.432 -0.539 -0.390 0.872 0.266 1.837 -0.474 1.632 0.000 1.443 -0.817 0.128 0.000 1.037 0.585 -0.899 2.548 0.887 -0.981 0.743 0.000 0.816 0.914 0.985 0.851 0.646 0.876 0.774 +0 0.371 -1.945 -1.516 1.786 -1.677 0.495 1.134 0.234 1.087 0.884 0.065 0.222 0.000 0.611 0.361 1.535 1.274 0.737 -0.802 0.224 0.000 0.598 0.664 0.975 0.662 0.684 0.899 0.745 +0 0.510 -0.044 0.945 0.957 -0.555 0.859 0.636 0.131 1.087 0.792 -0.678 -1.614 0.000 0.586 -1.731 1.503 0.000 0.462 0.642 -1.662 3.102 0.750 0.694 0.989 0.716 0.668 0.957 0.824 +1 0.731 0.709 -1.671 1.584 0.953 0.627 2.045 -1.655 0.000 1.078 -0.650 -0.547 0.000 0.685 -0.592 0.772 0.000 1.178 0.537 -0.631 3.102 1.223 0.969 1.045 0.649 0.283 0.613 0.791 +0 1.076 0.028 0.125 1.631 -0.604 0.931 0.793 1.415 0.000 1.008 0.624 -1.311 0.000 1.294 1.626 0.248 0.000 1.117 0.707 1.128 1.551 1.311 0.867 1.120 0.911 0.566 0.762 0.871 +1 0.785 -1.019 0.790 0.688 -1.245 0.669 -2.245 -1.196 0.000 0.560 -0.492 -0.182 2.215 1.150 -0.404 1.195 2.548 0.745 -1.913 0.074 0.000 0.982 1.033 0.988 0.653 0.808 0.911 0.772 +1 0.680 0.577 0.657 0.565 -0.732 0.987 1.558 1.467 0.000 0.910 -0.330 -0.488 0.000 0.750 0.910 0.481 2.548 0.982 0.541 -1.308 3.102 0.544 0.791 0.984 0.637 0.666 0.609 0.626 +0 1.612 0.907 -0.118 1.128 0.649 0.525 -1.080 -1.293 0.000 0.386 -1.181 0.488 0.000 2.023 -0.913 1.520 2.548 1.865 0.752 -0.686 3.102 0.957 0.878 1.192 0.937 2.127 1.493 1.302 +0 1.324 0.518 -1.578 0.692 -0.548 0.973 0.664 -0.698 0.000 1.513 -0.837 0.978 0.000 1.712 -0.406 0.401 2.548 1.711 -0.303 -0.530 0.000 0.972 0.829 1.062 1.201 1.117 0.910 0.822 +0 0.952 -1.832 0.426 1.383 1.005 0.655 1.308 -0.956 0.000 0.503 -0.889 -0.591 1.107 0.277 1.172 1.346 0.000 0.759 1.305 -1.325 0.000 0.974 0.676 0.985 1.044 0.617 0.837 0.857 +1 1.674 0.081 -1.698 1.277 -1.290 0.903 0.286 0.439 2.173 0.641 0.513 1.006 0.000 1.194 -0.183 -0.444 2.548 0.431 0.327 -0.186 0.000 0.604 0.753 0.978 1.386 0.978 1.029 0.833 +0 1.436 0.264 -0.970 1.420 -0.439 1.127 -0.676 0.638 2.173 1.465 -0.458 1.314 2.215 0.629 -1.381 -1.115 0.000 0.377 0.698 -0.052 0.000 0.903 1.000 0.988 1.641 1.099 1.355 1.086 +0 1.763 1.187 0.443 0.876 0.808 0.564 -0.542 -1.087 0.000 0.703 0.665 -1.075 2.215 0.799 1.860 1.442 0.000 0.586 -2.287 -0.446 0.000 1.079 0.913 0.991 1.029 0.717 0.739 0.812 +1 0.951 -0.172 0.304 0.352 0.219 1.295 -0.804 0.925 2.173 1.121 -0.104 -0.956 0.000 1.316 -0.407 -0.269 0.000 1.345 -2.407 -1.548 0.000 0.904 0.980 0.989 0.873 0.951 0.867 0.782 +0 1.690 1.438 0.949 0.772 -1.109 0.720 1.100 0.316 1.087 0.717 -1.314 -1.479 2.215 0.553 -1.301 -0.125 0.000 0.915 -0.059 -1.219 0.000 0.881 0.807 1.520 1.002 1.966 1.437 1.233 +0 1.552 0.228 0.776 1.065 -1.439 0.885 0.266 -1.087 2.173 1.055 -0.966 1.285 0.000 0.881 -0.021 -0.509 0.000 1.628 -0.524 0.302 3.102 0.826 1.113 1.623 1.170 1.343 0.984 0.922 +1 0.730 0.076 -0.712 0.749 1.059 0.626 0.958 0.557 1.087 0.663 -0.371 1.699 0.000 0.785 -1.320 -1.239 2.548 1.186 -0.283 0.285 0.000 1.105 1.040 1.024 0.813 1.593 0.902 0.761 +1 0.562 0.301 -1.472 0.944 -0.477 1.072 0.134 -1.675 0.000 1.040 0.045 -0.025 0.000 1.083 -0.337 -0.560 0.000 0.955 1.001 0.855 3.102 0.968 0.935 0.984 0.894 1.024 0.759 0.709 +1 0.667 -0.357 -1.584 1.287 -0.108 1.336 0.390 -0.162 2.173 1.069 0.453 1.255 2.215 1.344 0.321 -1.683 0.000 0.588 1.082 -1.729 0.000 0.484 0.578 1.247 1.018 1.684 1.022 0.942 +1 0.934 0.111 -1.263 2.339 1.684 0.759 1.012 -0.094 2.173 1.016 -0.570 -0.576 0.000 0.911 0.696 0.805 0.000 1.817 2.067 0.498 0.000 0.958 1.122 0.985 0.746 0.420 0.897 0.874 +0 0.463 0.022 -1.153 0.431 -0.343 0.612 -1.019 -0.760 0.000 1.267 -0.497 0.817 2.215 1.331 0.114 -0.745 0.000 2.139 0.837 1.193 0.000 0.990 0.927 0.984 1.293 0.680 0.923 0.939 +0 0.610 -0.721 0.047 1.126 0.749 0.809 0.640 0.567 1.087 0.703 1.973 0.077 0.000 1.261 -0.972 -1.063 0.000 1.639 -1.359 -1.232 0.000 0.787 0.881 0.993 0.981 1.326 0.937 0.881 +1 1.468 0.169 0.736 0.469 1.395 0.628 -0.368 -1.245 2.173 1.108 -0.791 1.408 2.215 1.204 -0.358 -0.170 0.000 0.588 -0.948 -0.524 0.000 1.009 0.908 0.988 1.031 0.881 0.876 0.898 +1 0.380 1.267 1.541 0.874 0.535 0.950 -0.452 1.424 2.173 1.049 -2.570 -1.208 0.000 1.162 -0.563 0.083 0.000 0.815 -0.269 -0.378 0.000 1.057 0.836 0.988 2.190 0.355 1.564 1.363 +1 1.131 -0.147 0.081 2.441 -1.303 1.351 -0.135 -0.277 2.173 0.607 0.242 1.464 0.000 1.307 0.486 0.137 2.548 2.348 -0.362 1.247 0.000 0.988 1.726 2.182 1.525 0.826 1.352 1.312 +0 0.518 2.315 0.329 1.341 -1.002 1.079 0.369 0.688 0.000 0.643 0.470 -1.616 0.000 0.734 1.039 -0.324 2.548 0.863 0.178 -0.994 3.102 0.742 0.831 1.076 0.991 0.452 0.694 0.875 +0 1.107 1.540 -1.021 0.954 -0.132 0.880 0.266 -1.645 0.000 1.130 0.018 -0.303 0.000 0.966 1.813 1.514 0.000 2.144 2.210 0.483 0.000 0.816 0.776 1.023 0.717 0.523 0.722 0.775 +0 2.144 -0.219 -1.342 0.564 -1.422 0.759 0.172 0.152 2.173 1.137 -0.799 -0.749 2.215 1.417 -0.633 0.593 0.000 2.100 0.169 0.967 0.000 1.084 0.984 0.999 1.002 1.221 1.034 1.091 +1 1.197 -0.081 0.919 0.618 -0.264 0.433 -1.525 0.096 2.173 0.618 -0.334 -0.438 0.000 0.632 -0.709 1.717 2.548 1.101 -2.163 -1.515 0.000 0.813 0.821 1.043 0.688 0.696 0.625 0.593 +1 1.061 -0.736 1.441 0.997 0.986 1.499 -0.812 -0.800 2.173 0.569 -0.875 0.247 2.215 0.916 0.182 0.750 0.000 0.448 -0.348 -1.589 0.000 0.646 1.264 0.997 0.748 1.102 1.033 0.900 +1 0.796 2.221 1.734 0.569 0.178 0.432 0.667 0.774 2.173 0.356 0.846 -1.658 0.000 0.576 0.075 1.243 0.000 1.217 -0.336 -0.537 3.102 0.449 0.686 0.991 0.735 0.833 0.823 0.679 +1 0.586 -1.090 -0.937 1.280 0.093 1.070 -0.254 -1.575 0.000 0.740 -1.108 1.684 0.000 0.939 0.358 0.158 1.274 1.243 -0.716 -0.168 0.000 0.944 0.821 0.988 0.960 0.821 0.848 0.895 +0 0.953 -0.957 0.828 1.468 1.565 0.998 -0.700 -0.762 2.173 0.474 -0.139 0.643 0.000 0.426 -0.026 -0.852 0.000 0.810 0.079 0.122 3.102 0.673 0.808 1.011 0.822 0.785 0.877 0.722 +0 1.127 1.349 -0.949 1.410 -0.310 0.641 -0.372 1.146 1.087 0.442 2.154 0.874 0.000 1.092 0.622 1.157 0.000 1.001 0.087 -0.881 3.102 0.939 0.999 0.985 1.408 0.845 0.935 0.925 +1 0.571 -0.457 -1.127 1.556 -0.934 0.586 0.263 1.002 0.000 0.785 0.834 -1.295 0.000 0.699 -0.105 0.709 2.548 1.227 1.004 1.116 3.102 0.860 0.821 0.992 0.887 0.562 0.747 0.742 +1 1.883 -0.760 1.158 1.173 1.232 0.765 -1.050 -0.035 2.173 1.043 -0.192 -0.977 2.215 0.648 -0.252 1.740 0.000 0.871 -0.488 -0.458 0.000 0.771 0.810 0.991 1.391 1.147 1.126 0.902 +1 0.609 -0.069 -1.094 0.776 0.386 1.228 -0.194 0.991 0.000 1.191 -0.244 -0.561 1.107 1.030 1.151 -1.008 0.000 1.297 -0.224 0.443 0.000 0.919 0.697 0.990 0.772 0.577 0.856 0.752 +0 0.805 1.150 1.305 1.911 0.250 0.471 -1.387 1.229 0.000 0.545 -1.035 -0.475 2.215 0.600 -0.439 -1.436 2.548 1.271 1.168 -0.627 0.000 0.907 0.885 1.399 1.137 0.497 1.029 1.465 +0 0.640 0.104 -1.140 1.011 1.345 0.592 0.328 -0.640 2.173 0.910 -1.596 0.378 2.215 0.384 0.133 0.787 0.000 0.865 -0.529 -0.620 0.000 0.658 0.784 0.988 0.845 1.514 0.929 0.731 +1 0.462 0.106 -0.497 0.753 -0.976 0.819 1.280 0.529 0.000 1.223 1.442 -1.138 2.215 0.528 1.678 0.819 0.000 1.265 0.419 1.570 3.102 0.449 0.833 0.985 0.659 0.924 0.855 0.766 +1 1.193 -0.881 -0.713 0.668 1.275 2.249 1.337 0.157 0.000 1.605 -0.887 1.577 1.107 0.660 -0.415 -1.709 2.548 2.670 -1.459 1.743 0.000 0.672 1.197 1.207 0.679 0.319 0.738 0.692 +0 1.116 1.277 0.465 0.612 -1.667 0.891 0.205 1.204 2.173 0.766 1.342 -0.306 2.215 0.514 -0.063 -1.553 0.000 1.328 0.772 -0.698 0.000 0.792 1.010 1.076 0.751 1.409 0.835 0.764 +0 0.566 -0.800 -0.360 1.095 1.021 0.571 0.507 -1.188 2.173 0.518 -1.229 -1.066 0.000 0.839 0.201 0.979 1.274 0.405 -1.411 0.356 0.000 0.586 0.874 1.033 0.677 0.809 0.708 0.661 +0 0.654 -0.280 -1.240 1.077 -0.091 0.792 0.294 0.708 2.173 0.412 -0.304 1.168 0.000 0.541 0.954 1.561 2.548 0.793 1.879 -1.109 0.000 1.407 0.813 1.000 0.903 0.647 0.710 0.766 +0 0.780 -0.675 1.168 0.783 -1.458 1.448 -0.427 0.471 1.087 0.770 0.130 -1.340 0.000 0.678 -1.095 -1.018 0.000 1.022 -1.272 -0.559 0.000 0.858 0.514 0.987 1.255 0.714 0.869 0.837 +1 0.651 -0.058 -0.599 1.620 0.657 1.273 -1.425 -0.619 0.000 1.827 -0.251 0.944 2.215 3.026 2.116 -1.487 0.000 1.516 -0.575 -0.180 0.000 0.886 1.854 1.287 0.963 0.858 1.446 1.203 +1 0.998 0.385 -0.033 1.496 1.038 0.935 1.078 1.641 2.173 0.683 0.925 0.017 2.215 1.047 0.565 -0.977 0.000 0.876 -2.352 -0.508 0.000 0.497 0.773 1.392 0.846 1.172 0.883 0.805 +0 1.818 1.089 1.715 1.050 -0.959 2.660 0.985 0.185 2.173 2.121 0.205 -1.620 1.107 0.620 0.966 0.474 0.000 0.857 1.063 -0.285 0.000 0.516 1.318 1.280 2.142 3.762 1.971 1.439 +1 1.116 0.725 0.711 1.180 1.478 1.636 -2.434 0.743 0.000 1.676 2.760 -1.144 0.000 0.990 0.595 -1.250 0.000 2.721 0.667 -0.725 3.102 0.700 0.636 1.014 0.843 0.451 0.848 0.696 +1 0.488 0.183 -1.314 1.377 -0.839 1.020 0.497 1.353 2.173 1.009 -0.135 -0.349 0.000 0.720 0.280 0.475 2.548 0.483 0.630 -1.636 0.000 0.934 1.218 0.990 0.864 0.767 0.870 0.835 +0 0.963 0.638 1.729 0.933 0.178 1.512 -0.013 -0.216 2.173 2.022 0.660 1.005 2.215 0.805 0.215 -0.809 0.000 1.309 -0.408 1.688 0.000 0.976 1.275 1.293 1.019 2.466 1.348 1.111 +1 1.042 -0.348 -0.758 1.613 -1.110 1.710 -0.231 -0.539 2.173 1.172 -0.332 1.671 1.107 0.596 -2.224 0.319 0.000 1.442 2.169 0.928 0.000 0.930 1.995 0.999 1.087 1.904 2.008 1.918 +0 0.408 -2.071 1.086 1.695 0.551 0.520 -2.288 -1.098 0.000 0.653 -0.978 -1.530 2.215 0.623 -1.358 -0.614 2.548 0.658 -2.475 -0.044 0.000 0.787 0.879 0.979 0.762 0.525 0.660 0.747 +0 1.003 0.687 -1.158 0.648 0.335 1.169 0.570 0.912 1.087 1.743 -0.246 -0.998 2.215 0.345 2.216 0.807 0.000 0.553 -0.369 0.554 0.000 0.965 0.847 1.088 1.015 2.261 1.201 0.983 +0 0.950 -0.796 0.636 0.391 -1.445 0.262 1.049 -1.691 0.000 0.740 -0.349 -0.902 2.215 1.093 1.759 0.988 0.000 1.162 1.066 -0.425 3.102 0.771 0.813 0.988 0.743 0.841 0.857 0.862 +1 1.089 -0.397 -1.419 0.292 0.574 1.074 -0.134 1.248 0.000 0.926 -0.930 -1.088 1.107 2.335 0.291 0.078 0.000 1.343 -1.071 -0.056 0.000 1.760 1.430 0.983 0.746 0.563 1.071 0.910 +1 0.302 1.168 0.252 0.226 0.045 0.722 -0.160 -0.810 2.173 0.751 -2.118 1.693 0.000 0.544 -1.115 0.888 0.000 1.003 0.038 0.725 3.102 0.812 0.986 0.984 0.745 0.889 0.915 0.789 +1 0.652 0.543 -1.645 0.683 0.227 0.712 -0.044 1.111 2.173 0.948 1.066 -0.356 0.000 0.927 1.266 -1.276 0.000 0.785 0.993 0.411 0.000 0.944 1.058 0.986 0.568 0.737 0.724 0.642 +1 1.559 0.828 -1.298 1.116 -0.771 0.923 0.179 0.519 2.173 0.798 -0.285 -0.051 2.215 2.043 0.635 1.316 0.000 0.597 0.107 0.879 0.000 0.713 0.746 0.979 1.015 0.690 0.974 0.801 +0 0.451 1.024 -1.245 0.855 0.187 0.461 0.596 0.217 2.173 0.689 0.160 -1.070 0.000 1.022 0.988 1.031 0.000 0.826 -1.129 -1.409 1.551 1.006 1.009 0.991 0.723 0.989 0.981 0.863 +0 1.579 0.002 -1.705 0.922 1.466 0.838 -0.732 0.980 2.173 1.163 -0.666 -0.354 0.000 1.051 -1.594 -0.423 0.000 1.539 -0.131 0.212 3.102 0.952 0.972 0.987 1.113 0.845 0.928 1.150 +1 0.809 -0.003 0.685 0.716 -1.615 0.763 -0.973 -1.510 0.000 1.279 0.281 0.131 2.215 0.856 -0.457 -0.792 2.548 0.680 -0.836 1.116 0.000 0.770 0.727 0.988 0.915 0.938 0.905 0.777 +0 0.595 1.803 0.639 0.851 -0.324 0.872 -0.593 0.349 2.173 0.891 -0.767 -1.686 0.000 1.360 1.389 -1.475 2.548 0.639 -0.289 -0.446 0.000 0.910 1.059 0.979 0.889 2.196 1.241 1.098 +0 0.775 0.764 -1.566 0.592 1.375 1.671 -0.304 -0.108 2.173 1.266 -0.480 1.463 2.215 0.785 -1.017 -0.154 0.000 0.886 0.808 0.761 0.000 0.873 0.962 0.991 1.371 2.123 1.172 0.959 +1 0.544 -1.350 -0.754 0.636 0.688 1.030 -1.729 0.976 0.000 1.353 -0.723 -0.407 2.215 1.588 0.296 -1.119 0.000 0.981 -0.656 0.865 3.102 0.769 0.606 0.988 1.053 0.948 0.939 0.837 +0 1.037 0.854 0.339 0.543 0.675 1.020 0.228 1.326 2.173 0.529 -0.171 -0.546 0.000 0.470 0.172 0.575 0.000 0.968 -1.496 -1.171 0.000 0.977 1.221 0.989 1.214 1.363 1.034 1.150 +1 0.669 -0.383 -1.047 0.892 0.760 1.403 -1.426 -1.164 0.000 1.645 -1.142 0.322 2.215 0.390 -0.969 -0.689 0.000 0.408 0.074 0.606 0.000 0.912 0.690 1.069 0.972 0.307 0.755 0.685 +0 0.552 1.123 0.951 1.039 -1.464 0.667 0.616 0.033 2.173 0.576 0.449 1.545 0.000 1.125 1.337 0.336 0.000 1.127 -0.101 -1.002 3.102 1.272 0.967 0.990 0.966 0.816 0.836 0.797 +1 0.933 -0.060 -1.544 0.469 -0.324 1.087 -1.162 0.156 2.173 1.023 -1.258 -1.330 0.000 0.733 -0.558 1.640 0.000 1.179 -0.261 0.827 3.102 0.754 0.896 0.985 1.039 0.863 0.916 0.800 +1 0.648 0.439 0.041 1.678 0.075 0.943 0.638 -0.162 0.000 0.774 0.645 1.562 2.215 1.878 -2.326 1.112 0.000 0.857 -0.085 -1.281 0.000 0.588 0.798 1.007 1.556 0.979 1.139 0.996 +0 1.038 -1.641 0.223 0.790 -0.829 1.051 1.667 1.001 0.000 0.937 0.075 -0.693 0.000 0.521 1.437 0.175 0.000 0.743 -0.313 -1.107 3.102 0.906 1.031 1.018 0.715 0.328 0.826 1.448 +1 1.106 0.303 -0.341 1.223 -0.646 0.469 -0.722 1.392 0.000 1.034 -0.496 0.599 1.107 1.321 0.236 1.545 2.548 0.371 -0.723 -0.797 0.000 0.588 0.677 0.996 1.128 1.056 0.942 0.773 +1 0.299 0.479 1.727 1.487 1.049 1.138 -0.706 -0.896 0.000 0.886 -1.420 -1.493 0.000 1.033 0.265 0.616 2.548 1.431 -1.125 0.675 0.000 1.363 1.149 0.981 0.942 0.733 1.075 1.601 +0 0.907 0.334 -0.775 0.580 -1.343 0.339 0.326 0.912 0.000 0.690 2.388 0.191 0.000 0.878 0.636 1.686 2.548 0.453 1.925 -0.183 0.000 0.855 0.695 0.986 0.562 0.219 0.503 0.591 +1 0.558 0.003 1.133 1.205 0.211 0.695 -1.330 0.741 1.087 1.294 -0.261 -1.009 2.215 0.388 -0.428 -0.165 0.000 1.152 0.337 1.688 0.000 0.881 1.172 0.990 1.224 1.602 1.251 1.090 +1 0.696 0.696 0.731 0.731 -1.701 1.863 0.404 -0.606 2.173 2.216 0.396 0.907 0.000 0.865 1.314 1.335 0.000 0.806 1.042 -0.025 0.000 0.871 0.753 0.992 1.298 0.788 0.895 0.843 +1 0.440 -0.663 0.928 1.982 -0.061 0.484 0.416 -1.585 2.173 1.092 1.215 1.643 1.107 0.512 1.192 0.099 0.000 0.416 0.329 1.392 0.000 0.526 0.644 1.006 1.067 0.519 1.164 0.895 +1 1.078 0.892 0.670 1.059 -0.855 0.948 1.322 -1.627 2.173 1.076 1.386 0.246 0.000 0.837 0.249 0.669 0.000 1.508 0.025 -1.268 1.551 0.836 0.914 1.451 1.094 0.974 0.858 0.788 +1 1.242 0.567 -0.166 1.531 1.725 0.626 1.722 -0.065 0.000 0.978 0.245 1.551 2.215 0.365 0.165 0.746 0.000 0.683 -0.294 -0.744 3.102 0.936 0.855 1.893 1.110 0.687 0.782 0.822 +1 0.538 1.149 1.727 0.914 1.430 0.532 -0.276 0.474 0.000 0.610 -0.806 -0.154 0.000 1.149 0.860 -0.788 2.548 0.721 1.107 -0.643 3.102 0.732 0.885 0.984 0.915 0.162 0.736 0.758 +1 1.693 0.392 1.691 1.251 1.241 1.189 0.564 -0.424 2.173 0.647 0.382 0.145 0.000 0.443 -2.715 -1.276 0.000 0.615 0.647 0.578 3.102 2.285 1.470 1.001 1.530 0.717 1.326 1.373 +0 0.917 -0.250 0.669 0.803 1.350 1.759 -2.767 -0.384 0.000 0.872 -0.621 1.370 0.000 1.085 -0.883 -1.464 1.274 1.234 -0.208 -1.544 3.102 1.104 0.949 0.992 0.783 0.331 0.660 0.634 +1 1.305 1.039 1.573 1.512 -1.052 0.812 0.768 0.675 2.173 0.775 1.466 -0.012 2.215 0.637 0.386 -0.719 0.000 0.454 1.074 0.389 0.000 0.564 0.690 1.365 1.095 0.807 0.948 0.746 +1 0.911 0.276 -0.320 1.795 -0.442 0.603 0.832 1.302 0.000 0.613 -0.271 0.813 2.215 0.752 -0.004 1.535 2.548 0.517 1.076 -1.603 0.000 0.783 0.635 0.976 0.954 0.448 0.750 0.757 +1 0.686 -0.500 0.939 1.350 -1.333 0.745 -0.845 -0.527 2.173 0.846 1.423 -1.663 0.000 0.998 -1.130 0.519 2.548 0.946 1.231 0.935 0.000 0.941 1.519 1.185 0.933 0.894 1.173 1.002 +0 1.154 -0.132 -1.172 1.205 -0.379 0.479 -1.096 -0.309 2.173 1.043 -0.651 0.596 1.107 0.975 -0.205 1.443 0.000 0.816 -1.059 1.388 0.000 1.048 0.959 1.072 1.092 0.791 0.781 0.764 +1 0.895 -1.061 0.742 0.840 -1.061 1.095 0.918 -0.389 2.173 0.839 0.630 -1.238 0.000 0.934 0.439 -1.651 0.000 0.673 0.306 0.526 1.551 0.715 1.312 1.200 0.758 0.715 1.035 0.956 +1 1.776 1.181 -1.527 0.775 -1.263 0.720 -0.094 -0.055 2.173 0.604 0.422 0.387 0.000 0.604 1.440 0.657 2.548 0.448 1.799 1.255 0.000 0.815 0.870 0.984 0.858 0.915 0.924 0.795 +1 0.999 -2.310 0.531 0.385 -0.514 0.448 1.585 -1.032 0.000 0.688 -0.803 -1.696 0.000 0.773 -0.943 0.955 0.000 0.621 -2.074 1.410 0.000 0.843 0.813 0.979 0.653 0.223 0.535 0.582 +1 0.879 1.530 -0.715 0.825 0.221 0.556 1.070 1.175 2.173 0.480 1.791 0.360 0.000 1.072 0.322 -1.236 2.548 0.379 -1.707 -1.581 0.000 1.984 1.301 0.986 0.890 0.868 0.910 0.978 +1 2.368 0.796 0.542 1.064 1.237 0.410 1.171 -1.301 0.000 0.462 -0.457 1.680 0.000 1.201 0.601 -0.696 2.548 0.616 -0.965 -1.238 3.102 0.945 0.820 1.291 1.188 0.750 0.978 0.874 +1 1.663 -0.508 0.134 0.601 0.287 0.795 -0.459 1.634 0.000 1.157 0.111 -0.902 2.215 0.776 0.324 0.912 2.548 0.449 0.664 -1.671 0.000 0.631 0.923 1.002 0.701 1.013 0.797 0.802 +0 0.413 0.203 1.624 1.527 1.027 0.677 -0.037 -0.241 2.173 1.438 0.046 -1.649 2.215 1.186 1.332 -0.158 0.000 0.626 1.445 -0.770 0.000 0.923 0.949 0.983 1.040 1.387 0.999 1.078 +1 1.084 0.762 1.222 0.897 -1.221 0.584 0.973 0.305 0.000 0.424 -0.638 -0.397 0.000 0.739 0.724 -0.945 2.548 1.455 0.211 -0.150 0.000 0.886 0.859 1.104 0.672 0.797 0.789 0.882 +1 0.952 -0.757 1.033 0.522 -0.883 0.821 -0.303 1.364 2.173 0.752 -0.788 -0.168 0.000 1.113 0.237 -0.274 0.000 0.976 -1.176 -1.303 3.102 0.799 0.935 0.987 0.686 0.842 0.872 0.742 +1 1.502 1.209 0.702 0.759 1.018 0.895 1.605 -0.734 0.000 0.429 0.898 1.597 0.000 0.429 2.044 -1.154 0.000 0.453 2.215 0.697 0.000 1.074 0.777 0.983 0.613 0.241 0.480 0.681 +0 0.510 -0.845 -0.825 0.923 1.316 0.627 0.712 0.285 2.173 0.923 -1.091 -1.590 2.215 0.531 -2.126 -0.594 0.000 0.983 -0.569 0.180 0.000 0.896 0.926 0.987 0.875 1.622 1.032 0.863 +0 0.937 0.030 -0.446 0.814 1.215 0.240 0.395 1.715 0.000 0.525 -0.233 1.012 2.215 0.735 0.064 0.077 2.548 1.730 -0.389 -0.968 0.000 0.838 0.707 1.207 0.692 0.503 0.514 0.522 +1 1.154 -0.586 -0.910 1.279 -0.295 1.508 -0.209 1.276 2.173 0.619 -0.247 0.484 0.000 0.289 2.100 -0.482 0.000 0.553 -0.039 -0.364 3.102 1.186 0.709 0.984 1.547 0.965 0.977 0.926 +1 1.046 -0.419 -0.646 0.379 -0.816 0.884 -0.646 1.732 0.000 2.073 -1.193 -0.000 0.000 1.723 -0.541 1.309 2.548 0.396 -2.305 -0.722 0.000 1.230 1.440 0.988 1.139 0.682 1.164 1.099 +0 1.434 -0.903 -0.793 0.328 0.160 0.625 -0.750 -1.657 0.000 0.810 -0.641 1.026 2.215 0.901 0.306 0.206 2.548 0.445 -0.961 -0.075 0.000 0.813 0.902 0.988 0.900 0.772 0.714 0.667 +0 1.759 1.471 -1.264 1.191 1.650 0.904 -0.670 0.183 2.173 0.427 1.040 0.708 2.215 0.819 -1.790 -0.173 0.000 0.375 -1.798 1.201 0.000 0.581 0.807 0.988 0.815 0.996 1.383 1.565 +0 1.088 -0.611 1.523 0.421 -0.635 1.282 0.556 -1.466 2.173 1.269 -0.915 -0.515 0.000 2.838 -0.261 0.369 2.548 0.996 -0.471 1.077 0.000 0.872 0.920 0.993 1.208 2.586 1.363 1.051 +0 3.865 -1.225 -0.169 0.486 0.408 2.683 -0.363 1.606 1.087 1.958 -1.265 0.264 2.215 1.240 -2.103 -1.370 0.000 0.837 -0.928 -1.044 0.000 0.784 1.563 0.989 2.964 3.554 2.220 1.832 +0 0.928 0.878 -0.710 2.000 -0.880 0.622 -0.708 1.700 2.173 1.604 -0.162 0.867 0.000 0.733 -0.312 0.269 1.274 0.526 1.809 1.632 0.000 1.113 1.023 0.976 0.885 0.823 0.850 0.960 +0 1.040 0.987 1.700 1.159 -0.835 0.590 -0.071 0.455 0.000 0.777 0.459 0.033 2.215 0.614 1.220 0.784 0.000 1.232 0.106 -1.287 3.102 0.888 0.942 1.151 0.934 0.834 0.690 0.737 +0 2.217 -0.194 0.202 0.677 -0.312 1.576 -0.361 1.544 2.173 0.499 -0.504 -0.912 2.215 0.954 0.392 -0.859 0.000 0.374 0.289 -1.345 0.000 0.281 1.065 0.999 0.800 1.048 1.146 0.916 +0 3.530 -1.640 -0.277 1.379 1.114 3.102 -0.125 1.149 1.087 1.597 -0.746 1.243 1.107 0.826 1.343 -1.556 0.000 1.646 -0.667 -0.759 0.000 1.990 1.895 2.903 3.876 1.104 2.520 2.291 +1 0.656 -0.074 -0.380 0.778 1.296 0.957 -0.518 -0.730 2.173 0.808 0.910 -1.476 0.000 1.184 0.268 1.486 0.000 0.786 1.260 0.584 0.000 0.840 0.850 0.988 0.846 0.838 0.894 0.760 +1 1.246 0.206 -1.344 0.424 -0.901 1.368 0.872 1.430 2.173 1.848 0.918 -0.372 0.000 1.497 0.392 0.981 2.548 1.154 -0.863 0.132 0.000 1.020 1.389 0.984 1.313 0.813 1.255 1.152 +0 0.607 0.109 1.623 0.180 -0.713 0.492 0.652 0.752 0.000 1.722 0.905 -0.711 2.215 0.814 0.403 -1.359 0.000 1.841 1.442 0.404 0.000 0.934 0.907 0.985 1.423 1.404 1.113 1.140 +0 0.686 -1.481 1.477 1.200 -1.048 0.755 -0.235 -1.156 2.173 0.328 0.687 0.285 0.000 1.227 -1.036 0.522 2.548 0.487 -0.995 -0.596 0.000 0.684 0.880 0.988 0.957 1.321 0.916 0.831 +1 0.899 -0.758 0.336 0.892 -1.491 0.954 -0.567 -1.630 0.000 1.337 -0.688 0.071 2.215 1.042 -0.162 -0.929 2.548 0.810 -0.912 1.466 0.000 1.103 0.837 1.237 0.928 1.039 0.878 0.787 +1 1.036 1.431 -0.434 0.735 -1.208 0.853 -0.021 0.667 2.173 0.657 0.803 -1.566 0.000 0.813 0.116 -0.345 0.000 0.803 1.398 1.111 0.000 1.077 1.105 0.986 0.465 0.651 0.733 0.685 +0 1.920 -0.262 -0.501 0.125 -1.455 1.435 1.362 0.847 0.000 0.709 -0.778 -1.399 0.000 0.803 1.271 -0.271 1.274 0.543 1.654 1.534 0.000 0.886 0.980 0.985 0.768 0.448 0.714 1.073 +1 0.935 -0.677 1.227 1.685 0.671 1.040 1.335 -0.993 1.087 0.531 1.327 1.499 0.000 1.149 0.405 -0.811 0.000 0.990 0.792 0.209 3.102 0.915 0.868 0.986 0.828 0.973 1.213 0.983 +1 0.289 -1.962 -0.349 0.403 -0.806 1.102 -0.800 0.764 2.173 0.793 -1.894 -1.180 0.000 0.611 -0.043 -1.295 2.548 0.590 -0.553 0.267 0.000 1.056 0.843 0.978 0.976 1.058 0.859 0.770 +1 0.929 -0.410 -1.322 0.358 0.787 0.907 -0.861 1.236 2.173 1.703 1.927 -0.007 0.000 0.589 -2.041 -0.842 0.000 1.586 -1.496 -1.547 0.000 0.676 1.083 0.991 0.591 0.601 0.731 0.779 +0 0.604 -0.716 -0.886 0.424 0.105 0.489 -0.521 -0.287 2.173 0.617 -0.564 0.672 1.107 1.298 -1.322 1.363 0.000 0.868 -1.460 -1.005 0.000 1.006 1.013 0.982 0.710 0.615 0.708 0.738 +0 1.520 0.177 -0.041 0.385 -1.280 0.763 -0.247 -1.729 0.000 1.466 -0.945 0.210 2.215 0.832 -0.771 -1.434 0.000 1.637 -1.318 1.574 3.102 0.576 0.761 0.986 0.943 1.387 1.006 0.971 +1 1.015 0.441 0.518 0.486 -0.352 0.638 -1.062 1.286 2.173 0.600 0.017 -0.315 0.000 1.295 -0.793 -1.104 1.274 0.554 0.676 0.687 0.000 0.665 0.964 0.987 0.947 0.951 0.808 0.722 +1 0.930 -0.507 -0.821 1.348 1.604 0.279 -0.406 -0.183 0.000 0.620 0.573 -0.425 2.215 0.952 0.460 1.508 1.274 0.958 -1.824 0.475 0.000 0.912 1.111 1.267 0.839 0.805 0.898 0.846 +0 1.520 -0.439 1.458 0.798 1.134 0.687 -0.530 -0.491 0.000 0.494 0.426 0.417 2.215 0.316 1.026 -0.442 2.548 0.606 0.683 0.074 0.000 0.866 0.694 0.986 0.731 0.329 0.565 0.661 +1 0.802 -0.480 0.089 0.713 1.225 2.125 0.779 0.466 0.000 2.200 0.875 -1.341 2.215 1.891 0.187 -1.631 2.548 1.643 1.121 -0.408 0.000 0.889 1.109 0.991 0.931 0.951 0.994 0.924 +0 0.547 -1.312 -0.111 1.370 1.409 1.299 2.403 -0.697 0.000 1.469 0.111 -1.584 2.215 2.116 0.956 0.545 0.000 0.945 0.363 0.680 3.102 3.481 2.120 1.175 1.273 0.964 1.864 2.242 +0 0.485 0.296 1.047 0.483 0.932 0.500 -0.995 -0.115 2.173 0.750 0.038 -1.036 2.215 0.655 1.879 -1.513 0.000 0.578 0.760 -1.625 0.000 0.697 0.729 0.987 0.874 0.827 0.685 0.607 +0 0.806 -0.819 -0.676 0.840 -1.487 1.184 -0.611 -0.043 2.173 1.369 -0.494 1.612 2.215 1.187 -0.002 0.484 0.000 0.483 -0.102 1.476 0.000 0.653 0.913 0.989 1.132 1.870 1.092 0.940 +0 0.819 0.204 0.528 0.681 0.399 0.364 -0.645 -0.344 1.087 1.127 -0.122 -0.919 2.215 0.855 -0.996 1.038 0.000 1.343 -0.409 -1.585 0.000 0.904 1.033 0.990 0.902 0.530 0.859 0.903 +1 0.808 0.326 -1.705 0.460 0.211 0.703 -0.672 0.739 2.173 0.907 0.208 -0.624 0.000 1.145 -0.488 -1.145 0.000 0.995 -0.784 0.255 0.000 0.927 0.718 0.989 0.740 0.531 0.760 0.669 +1 0.794 -1.488 -0.678 0.386 -0.735 0.667 0.554 1.703 2.173 0.698 0.322 0.659 0.000 0.555 -0.807 0.380 0.000 0.616 -0.439 1.695 3.102 0.662 0.928 0.979 0.584 0.387 0.652 0.665 +1 0.299 1.251 -1.641 0.713 -0.331 0.875 -0.458 -1.640 0.000 1.323 -0.835 0.371 0.000 0.711 -1.060 1.310 2.548 1.053 -0.014 -0.401 3.102 2.266 1.302 0.991 0.546 0.773 0.889 0.779 +1 0.770 0.238 -1.724 0.857 -0.297 1.253 -0.041 0.474 1.087 1.185 1.875 1.483 0.000 1.244 -0.300 -0.849 0.000 1.078 0.390 -0.298 1.551 0.632 0.565 1.080 1.016 0.850 0.877 0.765 +1 0.729 -0.410 -0.092 0.613 -1.276 1.212 0.098 1.722 2.173 0.845 0.853 0.273 0.000 0.736 0.321 -0.329 0.000 0.539 -0.419 0.514 3.102 0.703 0.553 0.993 0.951 0.803 0.840 0.733 +1 1.380 1.749 1.237 0.261 -0.646 1.317 0.820 0.146 2.173 1.013 2.789 -1.036 0.000 0.890 1.811 -1.211 0.000 0.919 0.822 1.088 1.551 0.702 1.069 0.992 1.161 0.878 1.293 1.062 +0 1.212 -0.170 -0.364 0.344 -0.602 0.649 -1.609 0.900 0.000 1.054 -0.528 -0.579 2.215 1.369 -0.245 -1.438 0.000 2.053 0.221 0.656 3.102 1.092 1.030 0.986 1.039 1.313 0.977 0.859 +0 2.060 0.441 0.342 0.723 -0.460 1.045 0.925 -1.156 0.000 1.269 -0.149 0.971 2.215 0.617 0.134 -1.540 0.000 0.745 -0.585 -0.977 3.102 0.761 0.753 1.117 1.061 0.897 0.920 0.955 +1 0.897 2.121 0.592 1.129 1.328 1.486 0.322 -1.045 0.000 1.498 1.112 0.683 1.107 0.730 -1.578 -1.656 0.000 0.646 -1.104 -0.347 3.102 1.277 1.218 0.984 0.790 1.568 1.331 1.300 +1 0.911 1.825 1.033 0.276 0.217 0.779 0.936 -0.498 0.000 0.897 0.972 -1.069 0.000 1.146 1.040 1.347 2.548 1.413 0.870 0.427 1.551 0.875 1.004 0.999 0.625 0.719 0.818 0.741 +0 0.568 0.545 -1.368 3.152 -0.728 1.429 0.490 0.741 2.173 0.530 0.157 0.252 0.000 1.888 1.186 1.644 0.000 1.628 1.345 -0.333 3.102 0.962 1.251 1.012 1.860 1.635 1.343 1.246 +0 0.495 -0.522 0.831 0.778 -1.338 0.416 -1.958 -0.059 0.000 0.588 0.936 -1.432 1.107 0.561 0.428 -0.195 1.274 0.465 1.257 0.828 0.000 1.922 1.177 0.986 1.067 0.568 0.925 0.873 +1 2.287 -0.206 -0.855 1.496 0.296 1.100 0.074 0.650 0.000 0.942 0.181 -1.407 0.000 0.991 -0.509 1.578 2.548 0.374 -0.678 -1.486 0.000 1.023 0.933 2.208 1.345 1.049 0.985 0.934 +1 0.487 1.476 -0.178 0.848 -1.075 0.872 0.103 0.873 0.000 1.173 0.796 -0.840 2.215 1.040 0.547 1.395 0.000 1.247 0.393 0.039 3.102 0.886 0.907 0.981 0.624 0.800 0.918 0.798 +1 1.847 -0.658 -0.506 0.289 -1.092 1.252 -0.533 1.050 2.173 0.590 -1.709 1.563 0.000 0.356 -1.737 -1.208 0.000 0.644 -1.067 -1.351 3.102 0.743 0.921 0.989 0.586 0.867 0.917 0.763 +1 0.916 -1.572 1.325 0.251 -0.371 0.624 -1.203 -0.810 0.000 1.074 -0.130 1.187 2.215 1.870 -0.493 0.160 2.548 0.854 -1.943 -1.216 0.000 0.768 1.293 0.992 0.778 1.242 1.104 0.905 +1 0.632 1.011 -1.116 1.253 1.276 0.877 0.580 0.117 2.173 1.001 0.446 -0.349 0.000 1.070 0.747 -1.429 2.548 1.088 -0.787 1.509 0.000 1.703 1.239 1.028 1.034 1.196 0.965 0.906 +1 0.496 -0.645 -0.882 0.955 1.070 0.602 0.041 -1.731 0.000 1.136 -0.089 -0.037 2.215 0.682 1.005 0.572 2.548 0.638 0.757 -1.152 0.000 0.916 1.059 0.986 0.949 0.768 0.880 0.814 +0 1.619 0.557 -1.233 1.899 -0.925 2.482 0.130 0.869 1.087 1.486 0.724 -0.510 2.215 0.758 0.402 -0.067 0.000 0.550 1.708 -1.484 0.000 0.933 0.821 0.976 2.565 2.818 1.887 1.428 +1 0.610 -1.762 -0.635 0.832 0.164 0.988 -0.941 1.129 0.000 0.933 -0.445 -0.569 2.215 0.874 0.405 -1.210 0.000 0.700 2.340 -0.587 0.000 1.158 0.946 0.996 0.661 0.712 0.628 0.639 +0 0.951 1.868 -0.565 1.429 -1.374 1.180 0.888 0.859 2.173 0.590 -0.433 0.306 2.215 0.705 0.993 -1.305 0.000 0.960 1.256 0.216 0.000 0.910 1.003 1.075 1.473 1.065 1.261 0.995 +0 1.031 0.730 0.885 1.183 -0.781 0.457 1.358 -0.144 2.173 0.907 0.032 -1.634 2.215 0.618 0.518 -0.807 0.000 0.673 1.537 1.003 0.000 0.860 0.852 1.527 0.875 1.147 0.810 0.694 +1 0.801 -0.320 -0.450 0.124 -1.296 0.862 0.546 0.511 1.087 1.553 0.375 1.646 0.000 1.094 0.281 0.029 0.000 0.956 1.089 1.405 3.102 0.861 0.879 0.993 0.728 0.785 0.690 0.643 +1 0.748 1.009 -0.466 1.164 1.214 0.684 1.051 1.639 2.173 0.649 0.309 0.300 0.000 0.696 -0.611 -0.612 1.274 0.600 -2.133 0.054 0.000 1.630 1.009 1.290 0.807 1.141 1.121 1.083 +1 1.193 -1.403 -0.085 0.816 -1.139 0.949 -0.738 0.008 2.173 1.086 -1.325 -1.530 0.000 1.686 -2.134 0.954 0.000 0.816 -0.341 -0.807 3.102 1.944 1.422 1.111 0.870 0.643 1.204 1.008 +1 1.839 -1.074 1.122 0.806 1.498 1.029 -1.038 -0.306 2.173 0.822 1.729 -0.471 0.000 1.135 -0.985 -1.399 0.000 1.271 -1.652 0.831 0.000 1.349 0.839 1.001 1.404 0.705 0.919 0.855 +1 1.063 1.222 0.546 1.183 1.151 1.248 -0.357 0.025 0.000 1.352 0.337 -1.118 2.215 0.570 0.225 1.504 2.548 0.389 1.878 0.147 0.000 1.827 1.229 0.981 1.259 0.657 1.032 1.005 +1 1.111 -1.797 0.036 0.529 -0.940 0.751 -1.832 0.580 0.000 0.509 -1.418 1.428 2.215 1.410 -0.030 -1.366 2.548 0.694 -0.463 1.197 0.000 0.961 0.685 0.986 1.125 0.874 0.877 0.822 +1 0.436 1.636 1.156 0.631 -0.584 0.744 0.446 1.137 1.087 0.955 0.376 -1.404 1.107 0.816 0.640 -0.252 0.000 0.622 0.678 0.130 0.000 0.558 0.922 0.981 0.762 0.933 0.806 0.689 +1 1.699 0.749 0.180 0.391 1.545 0.379 -1.137 -0.713 2.173 1.090 0.368 -1.426 2.215 0.672 -0.754 0.404 0.000 0.488 -0.996 1.387 0.000 0.503 0.933 1.064 1.051 0.976 0.902 0.789 +1 0.712 1.437 -0.315 0.527 -1.501 0.490 -1.566 -0.437 1.087 0.450 -0.086 0.472 0.000 0.632 -0.536 1.711 2.548 0.972 0.194 1.352 0.000 0.630 0.962 0.992 1.195 0.740 1.485 1.142 +1 1.067 0.978 0.296 0.821 1.644 1.242 0.979 1.709 2.173 0.908 0.877 -0.734 0.000 0.726 1.093 0.830 2.548 1.254 -0.110 -0.301 0.000 0.919 0.947 1.216 1.010 0.853 0.951 0.848 +1 2.015 -0.442 -0.686 0.868 0.486 0.329 1.213 -1.220 0.000 1.093 0.182 1.307 2.215 0.429 -0.187 0.882 0.000 0.395 0.669 -0.501 0.000 0.828 0.740 1.596 0.855 0.595 0.861 0.756 +1 0.369 -1.946 -1.683 0.662 0.100 0.696 -1.520 -0.623 0.000 1.639 -0.705 1.190 0.000 1.087 -2.389 -0.667 0.000 1.033 -0.630 -1.279 3.102 0.897 0.920 0.980 0.587 0.720 0.815 0.735 +1 1.133 0.679 -1.641 1.875 -1.571 1.967 1.177 0.287 0.000 0.867 2.400 1.151 0.000 1.338 -0.202 -1.070 2.548 1.564 -1.309 -0.843 3.102 1.791 1.956 1.001 1.346 0.836 1.803 1.601 +1 1.279 -0.508 -1.397 0.827 -0.518 0.955 -0.014 -0.854 1.087 0.733 -1.671 0.266 0.000 0.906 -0.311 1.592 2.548 0.952 1.784 0.625 0.000 0.826 0.916 1.015 0.691 0.953 0.943 0.818 +0 0.593 1.520 0.195 0.986 -1.719 0.901 0.099 0.068 2.173 0.869 1.364 -0.191 0.000 1.478 -0.479 1.319 2.548 1.914 -0.832 -1.567 0.000 0.769 1.118 1.047 1.300 1.376 1.167 1.046 +1 0.806 0.343 0.691 1.028 -1.710 0.876 -0.303 0.237 2.173 0.912 1.447 -0.795 0.000 0.597 -0.293 -1.563 1.274 0.397 0.279 -1.466 0.000 0.653 0.749 1.047 0.979 0.900 0.866 0.789 +1 0.645 -1.552 0.848 0.814 -0.964 1.174 -1.072 -1.720 2.173 1.398 -0.661 -1.308 2.215 2.026 -0.015 0.443 0.000 2.718 0.270 -0.077 0.000 0.935 1.633 1.002 0.872 0.784 1.289 1.105 +1 0.862 -0.460 0.781 1.114 -0.301 0.960 -0.170 -1.666 0.000 1.435 -1.251 0.024 2.215 0.969 -1.776 1.203 0.000 0.823 -0.013 -0.717 3.102 1.978 1.305 1.123 0.867 0.902 1.168 0.998 +1 0.799 -0.496 1.631 0.505 1.055 2.129 -0.470 0.093 0.000 1.757 -0.395 -1.518 0.000 1.672 0.337 -1.476 2.548 1.597 0.961 0.797 0.000 0.937 0.959 0.980 0.732 0.636 1.127 1.001 +1 1.883 0.739 -1.420 1.654 0.888 0.328 0.103 -1.184 0.000 0.646 0.844 -0.333 0.000 1.210 -0.167 -0.383 1.274 1.231 -0.634 0.686 3.102 0.779 0.938 2.134 1.481 0.813 1.119 0.928 +0 1.042 -1.796 -1.740 0.744 -0.539 1.752 1.248 0.027 0.000 2.031 -1.125 1.676 2.215 0.516 1.901 -0.225 0.000 0.620 0.533 1.352 3.102 0.824 0.944 1.077 1.012 1.079 2.281 2.187 +1 1.763 1.357 -1.215 0.237 -0.008 0.816 1.278 0.268 2.173 0.394 0.314 0.241 0.000 0.540 2.243 1.471 0.000 0.847 0.376 1.437 1.551 1.109 0.881 0.989 0.683 0.854 0.753 0.699 +0 1.048 0.167 1.301 0.879 -0.834 0.753 0.035 -0.849 0.000 0.995 -0.663 -0.531 0.000 2.804 -1.293 1.050 0.000 1.763 -0.006 0.176 3.102 0.852 0.989 1.248 0.719 1.032 0.749 0.749 +0 0.772 -1.315 0.988 0.614 -0.389 1.371 -1.362 -0.232 2.173 0.884 -2.188 1.679 0.000 1.084 -2.520 -1.257 0.000 1.347 -0.988 1.448 3.102 0.815 0.912 0.983 0.891 1.442 1.169 0.942 +1 0.830 1.685 -0.317 1.009 0.743 0.922 -0.428 1.680 0.000 1.237 0.735 0.026 2.215 1.311 1.807 1.113 0.000 1.210 1.379 -1.370 0.000 0.807 0.943 1.035 0.855 0.754 0.954 1.042 +0 0.438 -0.581 -1.598 1.106 0.362 0.900 -0.164 -1.394 2.173 0.636 0.517 0.367 0.000 0.803 0.589 -1.005 2.548 0.736 0.008 1.362 0.000 0.735 0.980 0.989 0.747 0.572 0.683 0.647 +1 1.208 -1.938 0.301 0.617 1.694 0.651 0.087 0.301 1.087 0.737 -1.155 -1.454 0.000 0.841 -0.535 -1.140 2.548 0.465 -1.413 1.263 0.000 0.525 1.045 1.137 0.941 0.943 0.948 0.797 +0 1.009 0.489 -0.344 0.186 1.110 1.015 0.395 -1.268 1.087 0.755 0.124 0.525 1.107 0.407 -0.249 -0.562 0.000 0.779 -0.857 1.550 0.000 0.635 0.807 0.988 0.750 1.298 0.814 0.732 +0 0.332 -0.778 -0.149 1.561 1.368 0.991 -0.288 0.515 2.173 0.895 -0.173 -0.683 1.107 0.672 -0.894 -0.348 0.000 1.190 -2.350 -1.613 0.000 0.744 0.926 0.988 0.926 1.224 0.836 0.741 +1 0.363 1.356 1.085 1.256 0.275 1.007 0.365 -1.242 0.000 1.293 0.655 0.687 2.215 0.992 -0.142 -0.724 0.000 0.774 1.065 -1.575 3.102 0.941 0.766 0.986 0.672 0.853 0.945 0.859 +0 1.176 0.484 -1.026 1.331 -1.547 0.532 0.422 0.827 2.173 1.039 -1.007 1.358 1.107 1.281 -0.297 -0.239 0.000 1.843 0.669 -0.376 0.000 0.990 0.872 0.996 1.120 1.008 0.909 0.888 +1 1.259 -1.345 1.469 0.534 -0.900 1.459 -2.898 0.759 0.000 2.024 -1.099 -0.817 1.107 0.855 -0.624 1.630 0.000 2.036 -0.473 -0.282 3.102 3.096 2.726 0.990 1.076 0.998 2.072 1.571 +0 1.235 0.497 -0.416 0.199 0.552 0.635 1.995 -1.100 0.000 0.891 0.123 0.941 1.107 1.358 -1.445 0.873 0.000 0.998 0.956 -0.477 0.000 0.846 0.884 0.989 0.847 0.636 0.878 0.827 +1 0.910 1.750 -0.856 1.089 -1.368 0.842 0.083 1.142 2.173 0.472 0.358 0.473 2.215 0.292 1.583 -0.477 0.000 0.414 2.071 -0.488 0.000 0.914 0.811 0.976 1.151 0.543 1.279 1.083 +0 2.233 0.447 1.075 1.085 -0.234 2.101 -0.109 -0.154 0.000 1.976 -0.249 1.414 1.107 1.123 0.113 -0.847 0.000 2.001 0.377 -1.424 3.102 0.843 0.785 1.993 1.519 1.184 1.154 1.033 +1 0.355 -2.258 -0.727 0.916 1.169 1.029 -1.102 -0.624 2.173 0.416 -2.371 1.191 0.000 0.560 -0.656 -0.320 0.000 0.681 1.521 1.707 0.000 1.020 0.995 0.991 0.572 0.749 0.660 0.634 +0 0.312 1.518 -0.017 1.033 1.697 1.179 0.583 0.073 2.173 0.873 -0.603 -1.708 2.215 0.631 -1.421 1.234 0.000 0.601 0.526 -0.828 0.000 1.113 0.799 0.989 1.036 1.773 1.041 0.892 +0 1.134 0.540 0.211 0.764 0.174 1.170 -0.832 -1.057 2.173 1.225 -0.467 1.597 0.000 1.099 0.245 0.907 2.548 0.786 -0.703 0.014 0.000 1.286 0.988 0.983 1.345 1.611 1.048 0.962 +1 1.043 -0.499 1.052 0.746 0.285 0.667 -0.071 -1.052 2.173 0.662 2.284 -1.676 0.000 0.873 -0.518 0.084 1.274 1.043 -2.102 0.883 0.000 0.970 0.943 0.989 0.947 0.847 0.902 0.899 +1 0.409 1.029 -0.046 1.288 0.841 0.940 0.643 -1.218 0.000 1.298 0.371 0.025 2.215 0.804 1.194 -1.713 0.000 0.495 -0.927 1.021 3.102 0.855 0.979 0.987 0.797 0.820 0.941 0.859 +0 1.599 -0.018 0.014 0.778 -1.280 0.849 -0.839 1.739 2.173 1.350 0.311 -0.859 1.107 1.771 -0.740 0.658 0.000 0.551 -0.125 1.633 0.000 0.916 1.015 1.420 0.977 1.491 1.081 0.995 +0 1.974 -0.448 -0.526 1.002 -0.852 0.876 -0.634 1.166 2.173 0.415 -1.481 0.271 2.215 0.901 0.037 1.547 0.000 0.607 -0.573 0.268 0.000 0.804 0.710 0.987 1.401 0.759 0.983 0.820 +0 1.430 0.120 0.450 1.019 -1.307 0.893 -0.345 -1.481 1.087 1.210 0.138 -0.590 2.215 0.716 1.340 1.072 0.000 0.855 -0.465 1.272 0.000 1.047 1.110 1.673 1.103 1.165 0.946 0.904 +0 1.815 0.926 -0.662 0.132 -0.394 1.203 1.021 0.705 0.000 0.851 0.540 1.095 0.000 1.735 0.749 -1.074 2.548 0.630 0.138 1.638 3.102 0.900 0.753 0.979 0.686 0.579 0.938 0.915 +0 0.841 0.636 1.168 1.073 0.417 0.836 0.149 -1.182 0.000 0.829 -0.105 -1.723 2.215 0.891 1.469 0.202 0.000 0.968 0.309 -0.344 3.102 1.924 1.125 0.986 0.824 0.790 0.852 0.823 +1 1.313 -0.893 -1.258 0.920 -0.460 0.413 0.123 1.358 0.000 1.160 0.363 0.554 2.215 0.324 0.818 0.149 2.548 0.417 0.767 -0.727 0.000 0.684 0.812 1.003 0.767 0.289 0.863 0.698 +1 0.986 -0.412 -1.647 0.678 0.520 0.910 -0.711 0.528 0.000 1.273 0.507 -1.145 2.215 0.798 -0.469 -0.136 2.548 0.373 -0.080 -1.123 0.000 0.930 0.668 1.051 0.975 1.025 0.889 0.781 +1 0.382 -1.484 -0.178 0.268 -0.258 0.791 0.137 -1.506 1.087 1.132 -0.921 0.772 2.215 1.268 0.047 -0.749 0.000 1.064 0.386 0.755 0.000 1.279 1.041 0.983 0.876 1.466 0.989 0.828 +0 1.480 -1.407 -1.710 0.685 -0.874 0.381 0.035 -0.047 2.173 0.554 -1.361 0.097 2.215 0.509 0.565 1.204 0.000 0.724 -0.637 0.939 0.000 0.526 0.704 0.986 1.012 0.536 0.742 0.704 +1 1.057 -0.750 -1.238 1.709 1.552 0.723 1.124 0.143 2.173 0.590 1.808 0.371 0.000 0.488 -0.659 1.666 0.000 0.995 -0.540 -0.599 3.102 0.931 1.075 1.094 1.717 1.068 1.143 1.120 +1 0.673 -0.030 -0.051 1.008 -1.395 0.732 -0.725 0.806 0.000 0.882 0.367 -1.089 2.215 1.510 0.104 0.578 0.000 0.791 -0.843 -0.812 3.102 0.910 0.978 1.067 0.661 0.598 0.895 0.789 +0 0.509 2.377 -1.043 0.824 1.685 1.132 0.635 -0.696 2.173 1.184 0.222 0.997 2.215 1.313 -0.632 0.780 0.000 0.709 0.377 -0.125 0.000 1.008 0.850 0.997 1.015 1.738 1.069 1.045 +0 0.674 0.092 0.431 0.362 1.076 1.068 0.104 0.043 2.173 1.022 -1.504 -1.682 0.000 0.626 -0.494 0.535 2.548 1.326 -1.342 -0.858 0.000 1.027 0.969 0.986 0.922 0.551 1.123 0.919 +1 0.557 -0.410 -0.034 0.825 0.782 1.324 0.211 -0.554 2.173 0.481 -0.954 0.556 0.000 1.195 -0.279 1.252 0.000 1.071 -0.192 -1.481 1.551 0.792 0.722 0.987 1.453 0.976 1.008 0.919 +1 0.301 2.128 1.126 1.421 0.364 0.580 -0.401 -0.190 2.173 0.538 0.145 -1.080 0.000 1.145 -1.000 -1.047 2.548 0.936 -2.157 0.859 0.000 0.868 0.839 0.981 1.411 0.795 0.996 0.851 +0 1.192 -0.449 -1.693 1.264 -1.267 1.166 1.036 0.275 0.000 0.867 0.355 -0.236 0.000 1.510 0.639 1.220 1.274 0.895 0.680 -0.886 3.102 0.814 1.119 0.988 0.665 0.843 0.724 0.749 +0 0.750 1.800 -1.700 1.218 0.896 0.631 -0.063 0.765 2.173 0.960 0.526 -1.076 2.215 0.805 -0.068 0.109 0.000 0.411 0.958 -1.465 0.000 0.751 0.737 0.988 1.041 1.194 0.937 0.787 +1 0.705 -0.898 1.526 0.626 0.688 0.850 0.157 0.353 0.000 1.342 0.490 -1.238 2.215 0.843 -1.412 -0.721 0.000 0.681 0.196 1.248 3.102 0.678 0.604 0.995 0.993 0.686 0.826 0.735 +1 0.661 -2.283 -0.198 1.152 -0.090 1.541 -1.150 -1.728 2.173 0.944 -1.168 0.323 0.000 0.884 -0.314 0.595 2.548 0.903 -0.209 -1.041 0.000 1.127 0.778 0.996 1.496 1.399 1.094 0.936 +0 2.199 1.614 -0.241 1.571 -0.502 0.936 -0.010 1.277 2.173 0.913 1.460 1.511 0.000 0.798 1.263 -1.424 2.548 0.822 0.400 0.860 0.000 0.869 0.968 0.993 0.934 1.073 1.298 1.108 +1 1.191 0.989 0.518 1.176 1.150 0.413 -0.268 1.063 0.000 1.446 -0.464 -0.603 2.215 0.375 -0.248 -0.992 2.548 0.940 0.615 -1.255 0.000 0.966 1.125 0.990 0.825 0.281 1.133 0.941 +1 1.050 -1.317 -0.205 0.725 0.486 0.455 -0.260 0.756 0.000 1.160 -0.952 1.435 1.107 0.314 -0.962 0.007 0.000 0.669 1.544 -0.734 0.000 0.813 1.333 0.992 1.220 1.060 1.010 1.092 +0 0.580 -1.170 0.179 3.078 -0.363 1.778 -0.790 1.458 1.087 0.988 -1.800 1.164 0.000 0.672 -0.779 -0.365 1.274 0.875 -1.647 -0.785 0.000 1.193 0.938 0.985 2.092 1.358 1.323 1.211 +1 1.011 -1.861 -0.189 1.571 -0.818 1.393 -0.656 -1.475 2.173 1.303 1.281 0.233 2.215 1.160 -2.356 0.562 0.000 2.296 -0.989 1.227 0.000 0.947 1.736 0.989 2.881 3.037 2.301 1.989 +0 0.720 0.723 -0.954 0.482 -0.483 0.471 0.969 0.947 2.173 0.605 0.106 -1.387 2.215 0.400 -0.362 0.493 0.000 0.714 0.703 0.242 0.000 0.591 0.630 0.992 0.815 0.763 0.696 0.642 +1 0.561 -0.420 1.258 0.147 -0.322 0.740 0.494 -1.164 0.000 1.141 0.799 -0.712 0.000 0.679 0.136 0.328 1.274 0.663 -1.141 1.570 3.102 0.836 0.937 0.991 0.754 0.629 0.806 0.724 +1 1.002 0.494 0.151 1.539 0.876 1.007 1.178 -0.989 1.087 0.755 0.040 1.274 0.000 0.646 2.427 -0.058 0.000 1.261 0.179 -1.550 3.102 2.097 1.403 1.047 1.371 0.836 1.051 1.032 +1 0.883 0.562 1.699 1.298 1.200 1.130 -0.247 -0.472 2.173 0.774 -0.236 0.480 0.000 0.961 0.332 -1.219 1.274 0.435 -0.170 -1.701 0.000 0.698 0.934 0.989 0.838 0.905 1.117 0.937 +0 0.903 1.260 -1.738 0.151 1.045 0.684 -0.091 0.408 0.000 0.776 -0.095 -0.429 2.215 0.458 -1.085 0.819 0.000 0.377 -2.003 -1.166 0.000 0.686 0.829 0.991 0.842 0.623 0.636 0.686 +1 2.631 1.183 0.485 0.401 -0.144 0.981 0.545 1.675 2.173 1.456 -1.294 -1.231 0.000 0.747 0.623 -0.938 0.000 0.892 0.712 0.207 3.102 0.855 1.026 0.994 0.488 0.972 0.887 0.779 +0 0.402 -1.692 0.078 0.632 -1.720 0.785 -1.067 -0.747 0.000 0.898 0.773 0.695 2.215 1.291 0.563 1.532 2.548 0.542 2.256 0.179 0.000 0.893 0.899 0.995 0.940 0.790 0.758 0.807 +1 0.828 -0.961 -0.164 1.830 -1.374 1.110 -0.991 0.660 1.087 1.310 0.073 -1.368 2.215 0.786 -1.699 0.160 0.000 0.438 1.429 0.048 0.000 1.888 1.441 1.511 1.143 1.987 1.280 1.174 +0 1.285 -1.618 -0.050 0.488 -1.005 0.748 -1.306 -0.923 0.000 1.028 1.738 1.213 0.000 1.776 1.136 1.594 2.548 1.381 -0.005 0.220 3.102 4.556 2.569 0.988 2.740 1.386 1.788 2.032 +1 0.982 1.026 0.656 1.963 -1.379 0.553 0.280 0.942 2.173 0.898 2.721 -0.860 0.000 0.676 -0.067 -0.614 0.000 0.800 -0.612 -1.471 3.102 2.389 1.892 1.858 1.169 0.689 1.346 1.199 +1 1.091 1.280 1.624 1.175 -1.139 0.951 0.962 0.185 2.173 0.955 2.604 0.895 0.000 0.707 0.810 -1.679 0.000 1.522 0.267 -0.433 3.102 1.007 1.012 0.989 1.234 0.795 0.944 0.833 +1 0.966 0.273 0.402 1.043 1.419 1.141 1.634 1.460 2.173 1.050 -0.243 -0.726 0.000 1.049 1.112 0.145 0.000 0.630 1.141 -0.288 3.102 1.044 1.177 1.103 0.724 0.906 0.847 0.822 +0 0.448 1.679 1.504 1.756 -0.115 0.888 -0.269 -1.070 0.000 0.901 -0.260 0.855 2.215 0.306 0.844 0.935 2.548 0.443 -1.456 -1.329 0.000 0.804 1.111 1.221 0.656 0.356 0.872 1.086 +0 0.879 -0.948 0.250 0.758 1.048 1.244 0.589 -1.733 1.087 1.253 -0.218 -0.373 0.000 0.380 0.778 -0.974 0.000 0.706 -0.042 0.226 3.102 0.811 0.576 0.981 1.862 1.026 1.188 1.077 +0 1.412 -0.519 0.589 0.874 -0.316 0.785 0.400 -0.827 0.000 1.766 -0.403 1.612 2.215 0.455 0.529 -0.173 1.274 0.594 1.253 0.268 0.000 1.059 0.620 1.120 1.295 1.073 0.940 0.938 +1 1.307 0.127 0.223 0.472 1.399 1.694 -2.066 0.511 0.000 2.344 0.364 -1.028 2.215 2.016 0.062 -1.515 2.548 0.932 -0.255 1.629 0.000 0.659 0.908 0.985 1.061 1.043 1.034 0.810 +1 0.994 0.562 1.612 0.748 0.186 0.504 0.049 -0.551 2.173 1.022 -0.060 -1.419 0.000 0.479 2.088 -0.185 0.000 1.839 0.059 0.436 3.102 0.768 0.965 1.146 0.792 0.791 0.793 0.721 +0 0.398 0.529 -0.313 5.039 -0.763 1.475 0.033 0.790 0.000 1.848 -0.333 1.128 0.000 1.571 1.478 -1.056 2.548 2.096 0.345 1.138 3.102 1.190 0.909 0.982 0.811 1.555 1.276 1.516 +1 1.436 0.204 0.466 1.121 1.171 1.136 0.153 -1.441 1.087 1.030 1.459 0.235 0.000 1.047 -0.051 -0.516 0.000 0.736 -0.404 -1.238 3.102 0.994 1.067 1.043 0.797 0.361 0.845 0.823 +0 0.423 -0.607 -0.638 0.273 1.480 0.721 -0.608 0.209 2.173 0.977 -0.514 -1.727 2.215 0.690 0.439 0.899 0.000 0.966 1.936 1.378 0.000 1.024 1.432 0.988 0.909 1.217 1.192 0.957 +1 1.279 1.695 0.754 0.976 -1.491 0.699 -1.401 1.398 0.000 0.778 -0.389 -0.673 2.215 0.887 0.547 -1.413 0.000 2.165 0.802 0.136 1.551 0.527 0.912 1.393 1.513 1.162 1.125 0.906 +0 1.500 -1.729 -0.283 0.019 -0.582 0.464 -0.285 -1.665 0.000 0.895 0.072 1.251 2.215 0.572 -1.110 0.721 0.000 1.075 0.147 -0.737 3.102 0.895 0.792 0.998 1.217 0.865 0.892 0.774 +0 1.405 -0.187 1.623 1.148 0.628 0.562 1.633 -1.131 0.000 0.764 1.009 0.257 2.215 0.606 0.696 1.296 0.000 1.730 -0.361 -0.255 1.551 0.981 0.928 1.374 1.078 0.954 0.903 0.960 +0 0.960 -0.237 1.295 0.514 0.684 0.414 0.725 1.453 0.000 0.805 -1.280 -0.623 0.000 1.114 -0.283 -0.315 2.548 0.631 -0.389 0.667 1.551 0.952 0.890 0.979 0.502 0.498 0.568 0.597 +1 0.524 0.870 1.305 1.065 -0.394 2.409 -0.012 -1.642 1.087 3.340 1.512 0.178 0.000 0.686 0.325 -0.480 0.000 1.006 -0.100 0.677 0.000 0.822 0.713 1.034 1.519 0.829 0.971 0.888 +1 1.047 -0.444 -0.637 0.346 0.503 0.951 -0.460 0.555 0.000 0.835 -0.422 1.266 0.000 0.888 0.164 -0.336 2.548 2.631 0.261 -1.469 3.102 1.130 1.077 0.988 1.110 0.998 1.022 0.918 +1 0.649 -1.028 0.278 0.624 -1.366 0.968 -0.061 0.972 0.000 0.731 0.831 -0.275 2.215 0.751 -0.307 -1.193 0.000 0.589 -0.952 -1.349 0.000 1.196 0.874 0.990 1.242 0.697 0.790 0.825 +0 1.609 -0.811 -1.614 0.527 -0.440 0.894 -0.232 1.216 2.173 0.788 -0.564 -0.418 0.000 0.465 0.111 0.677 1.274 1.448 -1.666 -0.272 0.000 1.089 0.938 1.112 0.960 0.403 0.947 0.865 +0 0.633 -0.569 -1.082 0.861 -0.066 0.607 1.311 -1.466 2.173 0.436 1.565 -0.016 0.000 1.002 -0.084 1.031 1.274 0.724 -1.265 -0.342 0.000 1.079 0.804 0.991 1.530 1.062 1.078 0.923 +1 2.545 1.411 -1.622 0.611 1.254 2.032 -0.686 0.063 0.000 1.202 -0.022 -1.354 2.215 0.902 1.518 1.417 0.000 1.004 0.650 -0.342 3.102 0.856 1.004 0.984 0.973 0.884 0.985 0.828 +1 2.268 1.403 -0.518 0.437 -0.299 1.423 -1.183 1.099 0.000 0.682 1.323 -0.856 0.000 0.839 -0.139 -0.911 2.548 0.770 -0.330 0.449 3.102 0.926 0.833 0.992 0.877 0.583 0.746 0.689 +1 0.522 -1.186 0.970 1.017 -1.117 1.086 2.217 -0.099 0.000 2.026 -0.854 1.623 1.107 0.789 0.406 0.328 0.000 0.914 -0.982 0.442 0.000 0.853 0.752 0.986 0.936 0.843 0.881 0.803 +0 0.844 0.260 -0.758 0.919 1.612 0.726 2.215 -0.165 0.000 0.581 -0.146 0.499 0.000 0.656 1.367 0.538 0.000 1.443 -0.281 1.558 3.102 0.875 0.943 1.030 0.574 0.665 0.667 0.636 +0 1.722 -0.697 1.243 0.834 1.258 0.765 -0.485 0.585 2.173 1.837 0.156 -0.913 2.215 0.891 0.788 -0.280 0.000 0.482 -1.043 -0.238 0.000 0.917 0.956 0.993 0.898 1.797 1.348 1.145 +0 0.772 0.305 0.990 1.195 -0.510 1.097 -1.251 0.818 2.173 0.745 -0.892 -0.527 0.000 0.881 0.495 1.720 0.000 1.018 0.602 -1.080 0.000 0.693 0.892 1.299 1.456 1.639 1.078 0.953 +0 0.688 0.216 1.378 0.392 1.109 0.543 -0.854 -0.680 0.000 0.930 -0.549 1.728 2.215 0.844 -0.523 -0.175 2.548 0.655 -1.196 0.413 0.000 0.804 0.893 0.989 0.743 0.932 0.627 0.617 +1 1.047 -0.745 -0.368 1.504 -1.445 1.159 -0.826 1.220 2.173 1.018 -0.507 0.464 0.000 0.524 -2.457 -1.289 0.000 0.509 -1.017 -0.665 0.000 0.865 1.024 1.434 0.858 0.593 0.854 0.785 +0 0.849 0.632 0.655 0.427 -0.225 0.812 -0.749 0.041 0.000 1.112 1.177 -1.698 2.215 1.222 -1.500 -0.706 0.000 0.610 -1.398 0.818 3.102 1.132 0.754 0.994 1.087 1.596 1.430 1.162 +0 0.629 0.004 1.257 1.422 0.356 0.708 -0.664 -1.682 2.173 0.909 1.207 -1.668 0.000 1.006 0.713 -0.157 2.548 0.768 1.084 0.757 0.000 0.889 0.918 0.987 0.949 1.326 0.951 0.915 +1 0.473 -1.476 -1.269 1.489 0.857 0.538 -0.103 -1.303 0.000 0.854 0.777 -0.176 2.215 0.549 0.883 1.105 2.548 0.531 -0.308 0.855 0.000 0.767 0.890 1.094 1.096 0.668 1.092 0.891 +1 1.677 0.129 0.400 0.886 1.061 0.808 -0.882 -1.385 2.173 0.703 -0.689 0.385 0.000 0.917 1.251 -0.693 0.000 1.389 -0.369 -0.570 0.000 0.991 1.077 0.986 0.976 0.949 1.002 0.871 +1 0.403 -1.524 -0.423 0.179 0.134 0.722 -0.265 -1.027 1.087 0.855 -1.027 0.768 0.000 1.299 -0.509 1.611 2.548 1.569 -0.866 0.115 0.000 0.837 1.048 0.984 0.708 0.852 0.893 0.771 +0 0.766 1.705 1.333 1.245 0.465 1.304 0.965 -0.952 2.173 0.759 0.528 0.963 0.000 0.747 -0.319 0.129 2.548 0.500 0.209 1.525 0.000 0.406 1.184 0.991 1.213 1.343 1.194 0.983 +0 1.471 -0.052 0.010 0.417 -1.663 0.795 -0.381 0.576 0.000 1.033 0.571 -1.097 0.000 0.881 -0.509 -1.680 1.274 0.745 -0.209 1.351 3.102 0.903 0.816 1.082 0.668 0.265 0.561 0.609 +0 0.924 0.804 0.949 2.844 0.574 1.006 0.319 -1.077 0.000 0.756 -0.525 -1.423 2.215 1.035 0.808 -0.667 0.000 0.542 1.319 1.316 3.102 0.847 0.921 0.991 0.594 0.819 1.064 1.133 +0 2.039 -0.500 1.510 2.119 1.485 2.993 -0.704 1.712 2.173 3.991 -0.281 -0.192 0.000 1.751 -1.763 0.714 0.000 0.574 0.619 -1.358 0.000 2.015 1.163 0.974 1.157 1.955 2.211 1.913 +0 0.559 -1.689 -0.223 1.257 -0.142 0.458 0.998 0.525 0.000 0.379 0.721 0.878 2.215 1.139 -0.842 -1.510 0.000 0.514 1.216 -1.728 0.000 0.685 0.622 0.984 0.823 0.506 0.651 0.691 +1 1.779 -0.989 -0.983 0.410 -0.168 0.828 -0.107 1.078 2.173 0.347 -1.834 0.879 0.000 0.378 0.032 -0.703 2.548 0.626 -1.652 -0.074 0.000 0.460 0.963 0.991 0.512 0.699 0.758 0.711 +1 0.531 1.348 1.628 1.125 0.808 0.575 1.047 -0.075 0.000 1.004 0.596 -0.642 0.000 1.638 0.311 -1.616 2.548 1.627 0.952 1.172 3.102 0.868 1.144 0.985 0.835 0.895 0.936 0.832 +1 2.287 -0.327 1.010 0.560 0.648 0.492 -0.601 -0.038 0.000 0.697 -1.186 -0.752 2.215 0.822 2.214 -1.395 0.000 1.644 -0.489 -1.352 3.102 0.746 0.715 0.986 1.206 0.587 0.933 0.770 +0 0.757 -1.171 1.622 1.045 0.412 0.508 -0.373 -1.306 0.000 0.857 -1.222 0.699 2.215 0.973 0.306 -0.470 1.274 0.946 -1.543 -1.245 0.000 0.857 0.992 1.092 0.631 1.203 0.860 0.789 +0 1.840 0.130 0.400 0.929 0.338 1.292 -0.661 -1.251 2.173 0.771 -0.334 -0.309 0.000 0.740 -1.042 -1.620 0.000 1.245 -0.175 1.460 3.102 1.172 1.039 0.984 0.888 0.916 1.112 0.955 +1 0.529 -0.260 -0.615 0.935 0.768 0.845 -0.527 0.597 0.000 0.934 0.364 -1.099 0.000 0.620 -1.135 0.846 0.000 0.724 1.184 -1.373 3.102 0.556 1.163 0.990 0.499 0.325 0.696 0.625 +1 0.670 -1.182 -1.325 0.915 1.046 0.571 -0.138 1.443 2.173 0.856 -2.268 -0.118 0.000 0.819 -0.315 -0.330 2.548 0.486 -1.855 -0.628 0.000 0.954 1.052 0.986 0.624 0.856 0.972 0.838 +0 0.413 0.291 0.041 1.193 -0.366 0.434 -0.542 0.367 1.087 0.707 -0.193 1.132 0.000 0.737 -0.859 -1.639 2.548 1.140 0.025 -1.675 0.000 0.685 0.731 0.992 0.765 0.699 0.586 0.652 +1 0.801 1.721 0.108 0.865 -1.604 0.636 -0.492 1.591 2.173 0.638 0.819 -0.945 0.000 0.720 -0.154 0.643 0.000 0.770 0.664 0.238 3.102 0.879 0.680 1.153 1.368 0.865 0.905 0.837 +1 0.725 -1.768 0.455 0.344 -0.315 1.074 -1.423 -0.064 2.173 1.658 -0.936 -1.447 2.215 0.963 -2.331 1.379 0.000 0.517 -0.513 0.694 0.000 0.990 1.196 0.982 1.066 1.919 1.141 0.947 +1 0.554 0.461 -0.626 1.569 1.021 0.508 0.505 -0.987 0.000 0.594 -0.149 -0.880 0.000 0.738 -0.986 0.728 2.548 1.126 0.326 1.213 3.102 0.923 0.957 1.287 0.669 0.634 0.666 0.670 +0 1.347 0.691 -0.587 0.128 -1.020 0.494 -0.152 0.568 1.087 1.015 -0.255 -0.891 0.000 1.171 0.323 -1.648 2.548 2.220 0.885 1.045 0.000 1.224 1.012 0.979 0.797 0.896 0.796 0.710 +1 0.619 -0.955 1.499 2.304 0.646 1.729 -1.755 -0.690 0.000 1.276 0.434 1.140 2.215 0.368 0.144 -1.174 2.548 0.711 0.580 -0.391 0.000 0.183 0.823 1.150 0.833 0.643 0.875 0.788 +0 3.324 -0.673 -0.990 0.444 0.717 2.085 -0.158 1.118 0.000 0.854 2.388 0.184 0.000 0.963 -0.667 -0.377 0.000 0.930 0.078 -1.483 0.000 0.982 0.738 1.683 0.935 0.325 0.728 0.655 +1 0.506 -0.169 0.456 1.688 1.549 2.896 -1.339 1.687 0.000 3.969 -0.389 -0.054 1.107 0.818 -1.375 -0.081 0.000 1.053 -0.051 -0.597 1.551 2.775 2.042 1.067 1.908 0.923 2.424 1.903 +0 1.529 0.612 1.162 0.925 -1.310 1.713 -0.112 1.467 2.173 2.798 -1.190 -0.305 0.000 1.462 -0.076 -0.480 2.548 1.831 -1.483 -0.042 0.000 0.873 1.127 1.306 1.092 1.938 1.782 1.668 +1 0.609 0.022 -0.221 1.247 -1.435 0.511 1.060 0.783 1.087 0.284 -0.889 0.960 0.000 0.551 1.712 -0.446 0.000 0.523 -1.789 0.760 0.000 1.290 0.907 1.072 0.607 0.793 0.661 0.667 +0 0.754 -0.922 -0.779 1.369 1.696 1.165 1.732 -0.227 0.000 0.892 -0.422 1.617 2.215 1.055 -1.325 0.714 2.548 0.899 -0.145 0.297 0.000 0.998 1.901 1.113 0.902 0.930 1.754 1.595 +1 0.801 -1.643 -1.022 0.428 -0.115 0.465 -2.446 -0.140 0.000 0.826 -1.072 1.146 2.215 0.969 -1.933 1.421 0.000 0.889 -2.385 -1.282 0.000 0.864 0.947 0.992 0.813 0.298 0.757 0.724 +1 0.558 1.595 -0.901 1.174 -0.242 0.695 0.478 0.127 0.000 1.061 -0.591 1.006 2.215 0.662 0.612 -1.430 1.274 1.482 0.055 0.898 0.000 0.909 1.069 0.982 0.965 0.946 1.571 1.237 +1 1.571 0.860 -1.020 1.754 1.663 1.193 0.637 1.047 2.173 0.600 -0.436 0.128 0.000 0.953 0.845 -0.632 0.000 0.860 0.923 -0.204 3.102 0.805 0.820 1.522 0.964 1.001 0.980 0.833 +1 0.401 2.244 -1.036 0.387 -0.595 1.097 1.629 1.600 2.173 1.537 0.234 0.146 2.215 1.315 0.016 -1.606 0.000 0.947 1.132 -0.032 0.000 0.371 0.944 0.979 0.978 2.350 1.169 0.944 +0 2.150 -0.120 -0.014 0.601 -0.461 0.843 0.469 1.338 0.000 0.786 0.436 -0.754 2.215 1.344 0.833 -1.252 2.548 1.946 1.240 1.607 0.000 1.137 1.043 0.975 0.834 0.539 0.890 1.135 +0 0.921 0.714 0.347 1.167 1.464 0.900 0.951 -1.423 2.173 0.911 1.700 -0.067 0.000 0.506 2.066 1.493 0.000 0.694 0.645 0.102 3.102 1.062 1.204 1.214 0.666 0.824 0.759 0.766 +0 0.816 -0.033 1.174 0.643 1.055 1.187 -1.315 -0.455 0.000 1.308 -1.020 -1.624 2.215 1.318 -0.503 -0.418 1.274 0.671 -1.267 0.975 0.000 0.809 0.927 0.989 1.174 1.284 1.154 0.928 +0 1.738 -0.994 -1.384 2.604 -1.554 1.229 -1.332 0.369 2.173 0.791 -1.036 0.747 0.000 1.634 -1.641 0.059 0.000 1.970 -0.542 -0.926 3.102 1.207 0.827 0.975 1.002 1.623 1.374 1.288 +0 0.720 2.012 -1.469 0.695 -1.509 1.025 0.928 0.025 2.173 0.618 0.574 -0.727 2.215 1.040 0.832 1.399 0.000 1.053 1.524 1.587 0.000 1.011 1.048 0.983 0.685 0.762 0.784 0.743 +1 0.861 -0.006 -1.528 2.018 1.669 1.027 -0.340 0.471 1.087 0.341 -0.258 -1.085 0.000 0.662 0.621 -0.535 2.548 1.117 -1.014 0.134 0.000 0.819 0.845 0.977 0.835 0.978 1.058 0.958 +0 0.882 0.672 -0.704 0.539 1.024 0.396 2.133 1.030 0.000 0.648 -1.635 -0.751 0.000 0.641 0.322 -0.130 2.548 0.936 -1.002 1.455 0.000 0.909 0.751 0.986 0.705 0.443 0.512 0.570 +1 1.666 -0.272 -1.441 0.830 1.219 0.494 -1.346 0.426 2.173 0.650 -1.086 -1.006 0.000 0.799 0.184 0.125 2.548 0.689 -0.563 0.202 0.000 0.796 0.736 1.103 1.043 0.707 0.805 0.712 +0 0.630 -1.064 -0.570 0.612 1.687 0.523 -0.144 1.378 2.173 0.782 0.246 -0.064 0.000 0.782 0.693 0.677 0.000 1.022 0.964 -1.021 3.102 0.804 0.871 0.989 0.762 0.839 0.677 0.665 +0 0.517 -0.170 0.380 0.472 -1.000 1.124 -0.353 1.526 2.173 1.623 0.311 0.243 2.215 1.601 -0.342 -0.053 0.000 1.413 -1.872 -1.250 0.000 1.045 0.879 0.984 0.983 1.945 1.118 0.957 +0 0.385 0.705 1.303 1.534 0.256 1.033 0.751 1.573 2.173 1.228 0.430 -0.227 2.215 1.601 0.740 -1.213 0.000 1.317 -0.229 1.231 0.000 1.584 1.153 0.986 0.941 1.675 1.085 1.044 +1 0.794 0.715 1.444 0.500 -0.958 0.912 0.779 -0.365 1.087 1.066 0.237 0.747 2.215 1.990 -1.218 -1.018 0.000 1.854 1.217 0.921 0.000 0.817 1.023 0.988 0.949 1.286 0.911 0.803 +1 2.172 0.379 0.295 0.618 0.826 1.011 1.058 -1.160 1.087 0.997 0.915 1.040 2.215 1.002 -1.083 -0.797 0.000 1.070 0.184 -1.576 0.000 1.144 1.435 0.985 1.511 1.357 1.164 1.138 +1 1.744 -1.480 0.661 0.889 -0.443 1.301 -2.118 -1.511 0.000 1.756 1.506 0.675 0.000 1.648 -1.363 -1.076 2.548 1.030 -0.538 -0.445 0.000 1.097 0.926 1.447 0.719 0.761 0.789 0.855 +1 0.748 1.087 -1.668 0.021 0.322 0.524 1.945 0.958 0.000 0.776 0.106 -1.249 2.215 0.834 0.624 -0.197 1.274 1.300 1.421 0.122 0.000 0.878 0.840 0.690 0.490 0.738 0.848 0.714 +1 1.617 0.979 -1.741 1.253 -1.586 3.405 -1.444 0.365 2.173 1.975 1.107 -1.166 0.000 1.295 -0.045 -0.804 2.548 1.548 1.527 1.626 0.000 1.558 1.539 1.006 5.880 3.028 3.975 3.496 +0 1.477 -0.002 0.347 0.880 -1.315 1.873 0.939 -1.655 2.173 1.982 0.217 -0.102 2.215 0.516 0.721 0.250 0.000 0.672 0.521 0.894 0.000 0.576 1.008 1.575 1.154 2.985 1.591 1.149 +1 0.982 -2.058 -0.774 0.973 -1.436 0.958 -0.267 1.530 0.000 0.853 -1.245 0.380 0.000 1.048 0.263 -0.489 2.548 0.706 -1.503 -1.146 0.000 1.027 0.880 0.993 1.183 0.931 1.002 0.879 +1 1.477 0.231 1.666 0.759 -1.208 0.697 -0.795 0.604 2.173 0.620 -0.667 -0.180 0.000 0.873 -1.018 -0.486 2.548 0.614 -1.329 1.188 0.000 0.847 0.684 0.984 1.114 0.823 0.989 0.858 +1 0.690 -0.281 -1.113 0.448 -0.466 1.039 1.168 0.555 2.173 0.968 0.857 1.148 2.215 1.002 0.729 -1.465 0.000 0.865 -1.735 -0.748 0.000 0.974 1.028 0.994 0.881 0.783 0.819 0.741 +1 1.136 0.062 -0.641 0.160 0.421 1.112 0.948 1.643 0.000 0.890 -1.225 1.085 1.107 0.821 0.140 0.256 0.000 1.796 0.486 -0.225 0.000 0.633 0.678 0.986 1.235 0.737 0.834 0.783 +1 0.691 0.721 -1.738 0.830 -0.919 2.469 0.751 0.581 0.000 1.873 0.557 -1.002 0.000 1.120 0.660 -1.378 1.274 1.560 1.375 1.727 3.102 1.700 1.740 0.989 0.621 0.592 1.268 1.167 +0 0.849 -1.216 1.586 2.412 -1.307 1.565 -0.936 0.083 0.000 0.956 -0.382 1.445 2.215 0.693 2.635 -0.690 0.000 0.983 -1.383 0.564 0.000 1.055 0.932 1.011 0.947 0.484 0.911 1.086 +0 1.220 -0.315 0.096 2.332 0.648 2.569 0.952 -1.323 2.173 0.862 -0.556 1.176 2.215 0.453 1.568 -0.142 0.000 0.464 -1.440 0.170 0.000 1.384 1.116 1.117 2.930 2.512 1.994 1.579 +1 1.451 -0.627 1.115 1.625 1.591 1.349 -0.182 -0.080 1.087 0.888 0.364 -0.970 2.215 0.417 -0.787 -1.001 0.000 0.517 -0.209 -1.525 0.000 0.282 0.818 0.986 1.293 1.244 1.310 0.953 +0 0.664 1.185 -0.422 1.481 -1.207 1.031 1.249 0.369 2.173 0.544 1.675 1.094 0.000 0.516 1.502 -0.633 0.000 1.680 -0.411 1.696 3.102 0.813 0.825 0.990 1.006 1.896 1.135 0.960 +0 1.043 0.372 -0.267 0.310 0.072 0.619 -0.301 0.649 0.000 0.553 0.387 -0.545 2.215 0.678 2.457 1.286 0.000 1.260 0.880 -1.509 3.102 0.882 0.834 0.996 0.579 0.628 0.596 0.650 +1 0.391 1.039 1.704 0.533 -1.271 1.051 0.932 0.418 0.000 1.285 -0.727 0.382 0.000 1.198 0.517 1.507 2.548 2.418 1.054 -1.001 1.551 0.778 1.044 0.977 1.084 1.106 0.957 0.917 +1 0.927 -1.161 1.473 1.308 -1.306 1.304 -1.396 0.051 2.173 1.694 -0.536 -1.449 0.000 2.041 -0.964 0.478 2.548 0.552 -1.847 0.517 0.000 1.013 1.733 0.990 1.380 0.843 1.414 1.241 +0 2.127 -0.129 0.684 2.429 0.732 3.401 -0.736 -0.819 1.087 4.316 -0.465 0.838 0.000 0.704 -2.230 1.401 0.000 0.506 -0.891 1.672 1.551 3.346 1.795 0.998 3.206 1.108 2.564 2.174 +0 1.605 0.629 -1.432 0.182 -0.363 0.458 1.947 1.285 0.000 1.246 -0.400 -0.050 1.107 0.748 0.302 0.453 0.000 0.686 -0.031 -1.728 3.102 1.197 0.863 0.986 1.305 0.848 0.952 0.896 +0 0.469 -0.238 -1.226 1.624 1.343 0.399 -0.451 0.614 1.087 0.544 0.182 0.110 0.000 1.182 0.726 -0.970 0.000 0.735 -0.546 -0.170 3.102 1.086 0.932 0.987 0.785 0.377 0.597 0.671 +1 0.656 0.643 1.658 0.385 -0.191 1.029 -0.732 -0.989 1.087 1.102 -0.426 0.943 0.000 0.771 -0.553 0.397 0.000 0.592 -1.219 0.749 0.000 0.679 0.604 0.991 1.483 0.653 0.952 0.968 +0 0.383 1.191 1.183 1.646 -1.455 0.734 0.222 0.737 2.173 0.790 -0.841 0.192 2.215 1.241 -0.794 -0.722 0.000 0.551 -1.286 -1.725 0.000 0.814 0.881 0.984 2.127 0.832 1.514 1.317 +1 2.841 0.463 -1.484 0.259 0.488 0.834 1.575 -0.038 2.173 0.608 1.416 0.777 0.000 0.376 0.169 -0.359 0.000 0.703 0.060 0.977 3.102 1.013 0.990 1.164 0.751 0.933 0.990 0.875 +0 1.423 -0.578 1.443 0.463 -0.952 1.136 1.436 0.083 0.000 0.708 -1.394 -1.078 0.000 0.667 0.009 0.782 2.548 0.780 1.126 -1.093 0.000 0.808 0.631 0.986 0.675 0.531 0.522 0.550 +1 0.736 -0.884 -1.632 0.530 0.101 1.294 -0.024 -0.928 2.173 0.860 0.057 0.589 0.000 1.158 -0.599 1.088 1.274 0.486 1.858 0.227 0.000 1.138 1.106 0.989 0.895 1.558 1.134 0.926 +1 0.793 -0.120 0.990 2.447 0.409 0.750 0.916 -0.711 2.173 0.486 1.452 -1.697 0.000 0.620 0.689 1.494 2.548 1.253 2.151 -1.475 0.000 0.869 0.729 0.988 0.881 0.780 1.000 0.880 +1 1.013 -0.437 -1.047 0.905 0.585 1.257 0.083 1.734 2.173 1.329 -0.690 -0.305 0.000 0.878 -0.125 0.820 0.000 0.971 -0.693 0.338 3.102 1.486 0.871 1.320 1.122 1.244 1.064 0.913 +1 1.119 -0.804 0.620 1.092 -0.100 0.924 0.228 -1.687 1.087 0.561 1.888 -1.183 0.000 0.314 -1.223 -0.359 0.000 1.009 0.563 -0.729 0.000 0.896 1.057 0.981 1.400 0.801 0.951 1.195 +1 1.152 -0.448 -0.949 1.240 -1.623 0.702 -0.169 0.634 2.173 0.838 -1.033 -0.071 2.215 0.580 -0.581 -0.467 0.000 0.802 0.094 1.236 0.000 0.953 1.009 0.986 0.965 0.848 0.874 0.788 +0 1.299 -1.578 0.576 0.506 -0.744 0.747 -0.239 -0.193 0.000 0.988 -1.110 -1.409 2.215 1.467 -0.578 0.895 2.548 0.820 0.937 -1.522 0.000 0.870 0.939 1.042 0.933 1.166 0.816 0.757 +0 1.631 0.635 -0.306 0.215 -0.452 0.550 0.697 -1.597 2.173 0.520 0.087 1.156 0.000 0.705 -1.460 1.293 0.000 1.216 -0.483 0.347 3.102 0.891 1.011 1.001 0.976 1.039 0.831 0.928 +0 1.011 -0.372 -0.521 0.203 -1.241 1.022 0.620 -1.302 0.000 1.172 -1.167 -0.005 2.215 1.998 0.005 0.858 2.548 1.364 0.729 1.280 0.000 1.308 1.061 0.985 1.041 1.544 1.072 0.953 +0 1.698 0.254 0.090 1.329 -0.614 0.651 0.267 -1.228 0.000 1.012 -0.919 0.686 2.215 1.434 2.257 1.730 0.000 0.419 1.549 -1.509 0.000 0.730 0.624 1.232 1.267 0.703 0.897 0.890 +0 1.687 -1.122 -0.905 0.405 0.100 1.399 -0.955 -0.228 2.173 1.835 -0.159 1.432 0.000 1.108 0.552 0.820 2.548 1.018 0.322 1.717 0.000 0.671 0.879 0.989 0.883 1.842 1.399 1.270 +1 0.704 -0.200 -0.918 1.037 1.565 0.544 -1.831 0.627 0.000 0.828 -0.151 -0.573 0.000 1.228 -0.568 0.795 2.548 1.541 -0.004 -1.162 3.102 1.095 1.039 0.988 0.598 1.084 0.779 0.714 +1 0.785 -1.415 0.797 0.675 -1.088 0.358 0.167 0.618 2.173 0.638 -1.648 -1.198 0.000 0.819 -0.841 -0.793 0.000 1.364 -0.596 -1.383 3.102 0.857 0.996 1.001 0.783 0.795 0.644 0.663 +0 0.629 0.532 -1.103 0.638 0.875 0.655 -0.656 -0.131 1.087 0.517 -1.705 0.400 0.000 0.967 -0.912 1.497 2.548 0.687 -1.628 -1.119 0.000 0.765 0.780 0.985 1.048 1.001 0.916 0.946 +0 0.580 -0.427 -0.570 1.226 0.234 0.888 0.334 1.126 0.000 0.700 0.935 -1.360 2.215 0.427 -0.247 0.210 0.000 1.429 -0.134 -0.890 3.102 0.883 0.948 0.990 0.847 0.650 0.909 0.897 +1 0.406 -0.898 -0.210 0.259 0.314 1.769 -0.435 0.680 0.000 1.905 -0.717 -1.166 1.107 1.270 0.100 -1.704 2.548 0.649 0.367 -0.290 0.000 1.460 1.442 0.990 1.229 1.056 1.388 1.138 +1 0.554 0.356 -0.511 2.467 0.625 1.100 -0.547 -1.426 1.087 0.500 0.123 1.641 1.107 0.884 -0.084 0.054 0.000 1.244 0.883 -0.908 0.000 1.128 0.865 1.384 1.641 0.552 1.057 0.964 +1 0.906 0.291 -0.761 1.419 -1.296 0.613 -0.392 0.068 0.000 0.579 0.050 0.867 2.215 0.936 -0.955 0.972 2.548 0.452 -0.751 1.326 0.000 0.756 0.657 1.000 0.892 0.458 0.758 0.682 +1 0.295 1.668 -0.353 0.459 0.955 1.234 -0.381 -1.072 2.173 1.093 -0.698 1.074 0.000 1.028 -0.634 0.497 0.000 0.981 0.007 -0.667 0.000 0.808 0.688 0.990 0.995 0.900 0.950 0.814 +0 0.703 -0.281 0.924 1.084 -0.744 1.216 -0.799 -1.426 2.173 1.182 -1.027 0.247 2.215 2.000 0.072 0.222 0.000 0.627 -0.109 1.175 0.000 0.943 0.960 1.206 1.016 1.775 1.196 0.991 +0 0.837 -1.376 -0.253 0.859 -0.871 0.699 1.501 0.691 0.000 0.699 0.106 -1.391 0.000 0.772 -0.047 0.981 2.548 0.777 -1.102 -0.759 0.000 0.910 0.818 0.992 0.680 0.193 0.747 0.753 +0 0.425 1.198 -0.499 1.453 0.767 0.893 0.060 -0.704 2.173 1.395 -0.141 0.785 1.107 1.660 -0.318 -1.193 0.000 0.720 -1.126 -1.608 0.000 0.769 0.893 0.990 1.053 1.609 1.079 1.113 +1 0.591 1.441 -1.402 1.680 0.797 0.913 -1.437 -1.301 0.000 1.172 0.257 -0.222 1.107 0.610 -0.734 0.554 0.000 0.534 0.646 0.663 3.102 0.859 1.021 1.266 1.292 0.543 0.953 1.323 +1 0.703 -0.798 -1.426 0.580 -1.334 0.634 0.633 -1.243 2.173 1.110 1.030 0.336 0.000 0.552 -0.812 -0.805 2.548 0.543 -0.363 0.821 0.000 0.933 0.997 0.988 0.607 0.683 0.799 0.740 +1 0.430 1.295 -1.078 0.335 -0.265 1.109 -0.608 -1.155 2.173 1.182 0.003 0.553 2.215 0.616 1.083 0.512 0.000 0.427 -0.350 1.000 0.000 0.561 1.178 0.999 0.842 1.765 0.946 0.795 +1 0.319 0.162 -0.815 1.435 1.057 0.997 -0.224 -0.995 0.000 1.139 -1.030 -0.785 0.000 2.209 0.224 0.815 0.000 1.389 -0.245 0.181 1.551 1.061 1.152 0.987 1.067 0.905 0.818 0.964 +0 0.645 1.492 1.414 1.287 0.782 0.345 -0.197 -0.207 2.173 0.390 -0.162 0.309 2.215 0.672 1.796 -1.656 0.000 1.027 1.113 -0.733 0.000 0.748 0.837 0.988 0.649 0.242 0.596 0.635 +1 0.976 -0.919 0.680 0.233 -0.734 1.077 -0.738 1.454 2.173 1.119 -0.310 -1.023 2.215 1.600 -0.531 -0.202 0.000 0.580 0.222 1.051 0.000 1.067 1.004 0.984 0.934 1.320 0.977 0.893 +0 0.340 -0.962 0.132 1.928 -0.755 0.965 -0.793 -0.103 2.173 1.168 -0.821 1.158 0.000 1.510 -0.867 1.656 2.548 1.138 -0.579 0.490 0.000 0.858 0.865 0.989 0.878 1.508 0.976 0.962 +1 1.072 0.289 1.245 0.781 -1.440 0.679 -0.312 1.664 2.173 0.852 1.236 0.084 0.000 0.616 -0.362 0.255 0.000 0.784 1.013 -0.987 0.000 0.876 1.088 0.988 0.725 0.849 0.922 0.832 +0 0.471 -0.701 -0.227 1.114 1.618 0.811 -0.490 0.476 0.000 1.142 -0.170 -1.430 2.215 1.490 0.476 -0.090 0.000 0.979 0.443 1.637 0.000 0.965 1.163 0.999 0.646 0.698 0.734 0.673 +0 0.706 1.344 -0.790 0.354 1.581 1.100 0.724 1.582 2.173 0.982 1.139 -0.201 1.107 1.000 2.159 0.824 0.000 1.262 1.888 -0.437 0.000 1.126 1.031 0.993 0.839 1.565 1.147 0.974 +0 0.692 0.556 0.290 1.077 0.173 0.700 0.330 -1.043 2.173 0.490 -0.794 0.859 0.000 0.725 0.636 1.539 0.000 0.752 -0.782 -1.427 3.102 0.924 0.959 0.976 1.218 0.583 0.965 0.896 +1 1.444 -0.011 0.918 0.786 0.033 2.253 -0.561 -0.087 0.000 2.255 -0.565 -1.542 2.215 1.410 0.507 -1.294 2.548 1.322 0.260 1.190 0.000 2.687 2.199 1.056 1.477 1.213 1.804 1.458 +0 0.282 0.276 -0.532 0.852 0.820 0.330 -0.286 -0.258 2.173 0.777 0.804 1.500 0.000 0.662 0.426 -1.512 2.548 0.907 1.056 -0.308 0.000 1.118 0.889 0.989 0.736 0.573 0.659 0.632 +1 0.664 -0.406 -0.535 1.312 -0.146 0.978 0.558 0.113 0.000 1.107 -1.199 1.502 0.000 1.071 0.003 1.405 0.000 0.534 1.065 1.111 0.000 0.953 0.858 0.982 1.001 0.699 0.820 0.751 +0 3.356 -0.497 0.920 2.241 1.150 1.484 -2.553 -0.746 0.000 0.621 -1.832 -0.397 0.000 0.563 -0.851 -1.497 2.548 0.824 -0.435 -1.207 3.102 0.958 1.094 0.981 1.045 0.173 0.851 1.768 +1 0.691 -1.415 1.141 0.553 0.480 1.007 -0.459 -0.737 1.087 0.853 -1.377 0.648 0.000 0.865 -0.344 0.946 0.000 1.649 -0.685 -1.487 3.102 0.797 0.999 0.977 1.031 0.887 0.943 0.814 +1 1.190 -0.865 -1.286 0.551 1.221 1.078 -1.198 0.458 0.000 0.979 -2.270 -0.730 0.000 1.116 -0.884 1.405 2.548 1.137 -0.885 -1.046 3.102 1.008 1.103 0.985 0.572 0.693 0.713 0.656 +1 0.603 -0.301 1.575 0.651 0.827 2.186 -1.834 1.026 0.000 3.067 0.263 -0.854 2.215 1.088 -0.333 -0.127 0.000 0.686 -0.094 -1.218 1.551 0.609 1.094 0.986 0.664 0.492 1.166 0.963 +1 1.447 -1.325 -0.344 0.917 0.129 1.137 -0.756 1.313 1.087 0.330 -1.542 -0.826 0.000 0.715 -1.161 -1.284 2.548 0.594 -1.498 -1.708 0.000 0.416 0.799 0.987 0.792 0.857 1.014 0.774 +0 2.057 1.335 1.524 1.329 0.278 2.270 0.973 -0.001 0.000 1.256 0.942 -1.229 0.000 1.114 -1.910 1.428 0.000 1.918 0.188 -1.473 3.102 1.383 2.253 2.064 1.208 1.569 2.755 2.870 +1 1.539 -0.726 0.540 0.785 0.528 1.167 0.037 -1.166 0.000 0.684 -1.443 1.193 0.000 1.417 0.120 0.092 2.548 0.679 -0.367 -1.647 3.102 2.263 1.198 0.974 1.036 0.781 1.015 1.053 +1 0.841 0.120 -0.965 1.057 1.716 0.914 -0.341 -1.639 1.087 0.830 -0.454 1.237 0.000 1.051 0.813 -0.410 2.548 1.269 -0.804 -0.324 0.000 0.949 0.806 0.992 0.612 1.356 0.914 0.835 +0 0.555 -0.876 -1.496 1.017 1.122 0.737 -1.337 1.015 0.000 0.506 0.767 -0.742 2.215 0.795 -0.222 0.380 0.000 1.143 -0.254 -1.058 3.102 0.872 0.843 0.988 0.694 0.440 0.578 0.598 +0 3.968 -0.884 1.743 1.228 1.674 1.135 1.306 -0.194 0.000 1.021 0.732 0.053 1.107 1.950 -0.837 1.245 2.548 2.057 -1.085 -0.549 0.000 0.824 0.666 0.962 0.915 1.928 1.559 1.909 +1 1.083 -0.136 1.574 0.804 0.047 0.779 0.070 0.346 0.000 1.254 0.487 -1.579 2.215 1.179 0.845 -0.805 1.274 1.028 1.132 0.305 0.000 0.944 1.068 1.268 0.968 0.875 0.957 0.883 +0 0.789 1.014 0.446 0.338 -1.201 0.794 1.247 -1.205 2.173 0.778 -0.132 -1.351 0.000 2.059 0.149 0.616 2.548 0.640 0.209 0.231 0.000 0.927 0.967 0.985 0.758 1.835 1.005 0.824 +0 1.148 0.037 0.824 1.506 0.223 1.823 -0.662 -1.529 0.000 1.535 -0.879 -0.542 0.000 0.947 -0.956 0.725 0.000 1.128 1.048 -0.077 3.102 1.108 1.061 0.988 0.784 0.973 0.745 0.704 +1 0.442 0.931 -1.322 1.243 0.125 1.017 0.422 1.651 0.000 1.439 0.039 0.491 1.107 1.271 -1.064 -0.772 0.000 0.740 2.471 -0.838 0.000 0.900 1.068 0.991 0.899 0.626 0.953 0.911 +0 1.705 -0.324 -1.051 0.503 1.617 0.646 -1.234 0.642 2.173 0.729 1.340 -0.247 0.000 0.545 -2.303 -0.655 0.000 1.065 -1.820 1.396 0.000 0.821 0.872 0.987 0.856 0.760 0.849 0.952 +1 0.365 -0.571 -0.945 0.094 -1.114 0.731 0.746 -0.469 0.000 0.673 0.163 0.140 0.000 1.116 0.286 1.000 2.548 1.431 -0.532 -1.733 3.102 0.895 1.010 0.749 0.680 0.772 0.858 0.704 +1 0.815 0.519 0.070 1.116 -0.367 0.338 -1.486 -1.500 0.000 1.138 -1.056 1.444 2.215 0.707 -1.203 0.435 2.548 1.267 -1.382 -0.943 0.000 0.750 0.804 0.993 1.368 0.760 1.396 1.211 +0 0.950 -0.274 0.300 1.403 -0.356 0.601 -2.283 -0.202 0.000 1.082 -0.716 0.791 2.215 0.636 -0.696 -1.691 0.000 0.739 0.165 -0.373 1.551 1.435 1.133 0.989 1.009 0.802 0.893 0.945 +0 0.919 0.236 -0.612 1.227 0.866 0.665 1.327 0.908 2.173 0.907 1.335 -1.274 2.215 0.726 1.873 -0.761 0.000 0.777 1.537 0.308 0.000 0.686 0.792 1.429 1.128 1.054 0.902 0.822 +0 0.770 0.759 0.736 0.736 -1.027 0.330 -0.940 -0.272 2.173 0.295 0.326 1.407 0.000 0.530 1.248 -0.710 2.548 0.974 1.705 1.241 0.000 0.681 1.113 1.042 0.589 0.775 0.718 0.638 +0 1.045 0.237 -1.715 1.000 0.388 1.114 0.780 -1.023 2.173 1.398 -1.402 0.506 0.000 1.070 2.054 -1.315 0.000 0.955 -1.307 -0.241 0.000 0.943 0.807 1.342 1.162 1.755 1.551 1.295 +1 0.737 0.111 -1.599 1.047 -1.551 0.383 0.519 -1.677 0.000 0.917 -0.411 0.415 0.000 0.505 1.139 0.488 1.274 0.466 -0.195 -1.269 3.102 1.347 0.900 0.991 0.569 0.478 0.567 0.675 +1 0.430 -0.795 0.753 0.833 1.400 0.561 -0.519 1.514 1.087 0.685 1.162 -1.049 0.000 1.267 -1.348 0.172 0.000 1.024 -0.045 -0.336 3.102 0.804 0.872 0.984 0.862 0.820 0.819 1.297 +0 1.267 1.312 -0.193 0.674 1.350 0.788 2.235 1.519 0.000 0.603 1.016 -1.227 2.215 1.217 0.161 0.022 0.000 0.542 0.396 -0.487 0.000 0.956 0.902 1.259 0.752 0.347 0.568 0.654 +0 0.411 -2.314 1.243 0.286 -1.179 1.442 -0.166 0.026 0.000 1.954 -1.225 -1.559 1.107 0.416 0.637 1.702 2.548 0.632 1.717 -0.888 0.000 0.953 0.987 0.997 0.839 1.114 1.345 1.065 +0 0.568 -0.484 -0.507 0.522 -0.660 0.847 -0.634 0.902 2.173 0.953 0.712 1.556 0.000 1.203 -0.088 -0.938 2.548 1.134 -0.078 0.125 0.000 1.423 1.120 0.983 1.069 1.299 0.941 1.019 +1 0.746 0.534 -1.727 0.757 0.116 0.757 -0.347 -0.058 2.173 0.942 -0.169 0.665 1.107 1.354 0.284 -1.322 0.000 0.712 1.602 1.640 0.000 1.088 1.261 1.037 0.841 0.761 1.000 0.830 +1 0.603 1.177 -0.435 1.395 0.143 0.510 0.599 -1.483 2.173 0.647 0.824 -1.093 0.000 0.675 -0.530 0.131 0.000 2.241 -0.716 1.488 3.102 1.200 0.870 0.993 2.475 1.037 1.606 1.301 +0 0.564 -0.290 0.604 0.391 -0.364 1.268 0.363 -1.401 2.173 0.705 -1.356 0.811 0.000 0.652 -2.372 0.147 0.000 1.087 -0.631 -0.013 3.102 0.888 0.773 0.979 1.441 1.391 1.387 1.108 +1 1.091 -0.295 0.673 0.296 0.463 0.853 -0.749 -0.960 1.087 0.495 -0.678 1.098 0.000 0.962 -0.557 0.059 0.000 1.003 -1.329 1.602 0.000 0.853 0.997 0.996 0.625 0.539 0.691 0.640 +0 1.251 -0.903 -1.037 0.290 1.112 0.733 -0.671 0.212 1.087 0.796 -0.140 1.510 0.000 0.835 -0.034 0.612 0.000 0.819 1.161 -1.006 3.102 0.908 0.939 0.981 0.858 1.246 0.818 0.770 +1 0.380 2.223 -1.632 1.200 1.213 0.662 1.012 -1.054 2.173 2.048 0.629 -0.434 2.215 0.956 0.456 0.782 0.000 1.391 1.210 1.111 0.000 0.730 1.054 0.992 1.387 0.965 1.036 0.940 +1 1.337 -0.088 -1.053 0.569 -0.497 1.002 -0.563 1.051 2.173 0.578 -0.901 -1.281 0.000 1.331 -1.320 0.302 2.548 0.389 2.295 1.080 0.000 1.971 1.675 0.987 1.367 1.113 1.350 1.199 +0 2.183 -0.597 0.334 0.133 0.096 1.253 -1.244 -1.085 0.000 0.697 0.675 0.368 0.000 1.001 -1.381 -1.556 0.000 0.939 0.149 1.238 3.102 0.869 0.848 0.987 0.738 0.581 0.777 0.922 +0 1.576 1.712 -1.165 0.511 1.131 1.031 0.142 0.366 0.000 0.437 -0.409 -1.272 2.215 0.393 0.616 -1.734 0.000 0.920 0.091 0.686 0.000 1.128 0.933 1.092 0.682 0.182 0.659 0.818 +0 1.148 -0.158 0.480 0.532 -0.813 0.656 -0.504 -1.600 2.173 0.892 -0.495 -0.264 0.000 0.808 0.685 1.512 2.548 0.452 -0.881 0.842 0.000 0.732 0.928 0.994 0.850 0.680 0.709 0.659 +1 1.229 -1.187 0.889 0.634 1.715 1.019 -0.305 -0.382 0.000 0.850 0.536 1.684 2.215 0.640 0.079 -0.059 2.548 0.883 -1.634 -1.333 0.000 0.717 0.812 0.994 0.957 0.805 0.819 0.747 +0 0.898 -0.837 -0.229 0.972 0.753 2.051 0.304 1.445 1.087 1.007 -1.240 -0.751 0.000 1.833 -0.029 -0.390 1.274 0.563 -1.022 0.596 0.000 0.920 1.031 1.002 1.647 2.442 1.546 1.273 +1 1.808 -1.086 -1.533 0.992 0.029 2.164 -1.137 1.155 2.173 2.107 1.035 -0.170 0.000 1.183 -1.559 -0.868 0.000 1.322 -0.453 -0.846 3.102 4.772 2.620 1.831 1.637 1.826 2.599 2.091 +0 1.280 -0.733 -1.638 1.119 1.336 0.774 -0.951 -0.297 0.000 0.628 -0.707 0.770 2.215 0.986 -0.025 -0.015 0.000 1.391 -0.084 -1.051 3.102 0.855 0.886 0.976 0.737 0.883 0.701 0.809 +0 1.132 1.830 1.010 5.547 1.284 2.728 0.134 -0.603 1.087 1.193 0.327 -0.146 0.000 1.829 2.140 -0.235 0.000 2.644 0.982 1.489 3.102 2.693 2.274 0.980 4.509 3.117 2.898 2.586 +0 0.283 -0.993 1.421 1.151 -1.511 0.728 -0.930 0.526 0.000 0.875 0.909 1.584 2.215 1.163 -0.685 -0.131 0.000 0.888 0.136 -0.668 1.551 0.938 0.861 0.986 0.614 0.780 0.983 0.910 +0 2.298 0.328 -0.186 0.289 0.452 0.705 0.860 0.475 2.173 1.522 0.658 1.728 2.215 0.492 0.228 1.577 0.000 1.233 1.768 -1.530 0.000 0.947 0.910 0.991 1.459 1.385 1.082 0.952 +1 0.824 -1.436 -0.667 0.897 0.891 0.973 -0.852 -1.125 0.000 1.406 0.101 0.925 2.215 0.461 -0.331 -0.634 2.548 0.453 -1.172 -0.297 0.000 0.744 0.490 1.175 1.270 0.868 0.882 0.850 +0 1.644 -1.171 -1.352 0.854 1.194 1.289 -0.954 -0.691 1.087 1.059 -0.819 1.127 2.215 1.345 0.220 0.448 0.000 1.245 -0.709 0.359 0.000 0.827 0.950 1.231 1.197 1.718 1.131 1.077 +0 2.114 -0.490 -1.230 0.143 -0.704 1.362 -0.346 0.591 1.087 0.794 -1.431 0.319 0.000 1.385 -1.097 -0.813 2.548 0.760 -0.571 1.403 0.000 0.933 0.978 0.989 0.812 1.798 1.174 1.010 +1 2.173 0.688 1.385 0.494 0.071 0.449 -0.254 -1.673 0.000 0.778 -0.364 -0.812 2.215 1.611 0.560 0.121 2.548 0.611 -0.493 0.713 0.000 0.683 0.985 1.329 1.166 1.078 0.971 0.825 +0 0.362 -0.623 -0.094 0.652 -0.853 0.726 1.193 1.301 0.000 0.446 0.007 -1.514 1.107 0.989 0.706 -0.233 2.548 0.490 -0.058 0.537 0.000 0.849 0.903 0.989 0.626 0.703 0.631 0.605 +0 0.722 -1.687 1.609 0.718 -0.895 0.614 -0.123 -1.434 2.173 0.972 -0.556 1.142 0.000 0.773 1.853 -0.171 0.000 0.712 0.842 -0.137 3.102 2.581 1.443 0.988 1.239 0.768 1.148 1.561 +0 0.501 2.095 -0.656 3.431 -1.148 0.667 0.755 0.373 2.173 0.720 1.580 1.359 0.000 1.383 1.522 0.393 0.000 0.930 0.064 1.347 3.102 1.165 1.122 0.985 1.450 0.703 1.160 2.031 +1 0.774 -1.179 0.285 0.495 -1.259 0.698 0.033 -1.026 1.087 0.933 -0.441 1.151 2.215 0.910 -1.478 -0.938 0.000 0.706 1.990 -0.314 0.000 3.264 1.910 0.989 0.897 1.136 1.327 1.259 +1 0.974 0.339 -1.483 0.262 0.377 1.363 -1.036 0.863 0.000 2.333 -0.469 -0.472 2.215 1.337 -0.766 -1.492 2.548 1.009 -0.055 0.706 0.000 0.948 1.278 0.982 1.408 1.530 1.396 1.282 +1 0.471 -1.274 -1.286 1.530 -1.608 1.679 0.282 0.426 2.173 0.470 0.560 -0.707 0.000 1.042 1.839 -1.662 0.000 0.683 -0.353 -0.568 1.551 1.173 1.002 0.982 3.275 0.975 2.001 2.262 +1 1.987 0.656 -0.793 0.319 0.549 0.832 1.639 0.155 0.000 1.178 -0.323 -1.722 1.107 1.029 0.371 0.657 2.548 0.947 1.495 -0.278 0.000 0.974 0.893 1.032 1.123 1.079 1.102 0.992 +1 1.098 1.986 0.505 1.088 -0.151 1.104 1.758 -1.154 0.000 1.076 1.207 0.607 0.000 0.975 1.459 -0.352 2.548 1.103 -0.872 -1.398 0.000 0.826 0.885 0.985 0.701 0.779 0.738 0.801 +1 1.879 -0.505 -1.528 0.267 0.397 1.926 -2.132 0.358 0.000 1.979 -0.797 -1.689 2.215 1.056 -0.679 -0.355 0.000 1.159 -0.324 0.376 0.000 0.778 0.788 0.987 0.691 0.862 0.911 0.783 +1 0.675 0.805 1.619 1.846 -1.444 0.391 0.688 -0.920 0.000 1.352 1.136 0.148 2.215 1.016 1.491 -0.337 0.000 1.519 0.426 0.619 3.102 0.794 0.942 0.976 1.538 0.686 1.078 0.964 +0 0.554 0.619 1.040 3.079 1.734 1.070 1.061 0.049 2.173 0.653 -1.762 0.307 0.000 0.731 0.096 -0.934 2.548 0.599 1.712 0.434 0.000 0.970 0.898 1.059 1.652 1.017 1.211 1.324 +0 0.749 -0.649 -0.470 1.787 -0.549 1.492 -0.448 1.201 2.173 0.956 -1.578 -1.133 2.215 0.861 -1.604 0.803 0.000 1.030 -0.922 0.180 0.000 0.905 1.030 0.994 1.383 1.866 1.457 1.195 +1 2.447 -0.507 1.655 0.301 -1.339 0.716 -0.085 -0.443 2.173 0.818 -0.895 0.367 2.215 0.834 -0.837 -0.174 0.000 0.639 -0.099 0.999 0.000 0.776 0.700 0.986 1.098 0.893 0.952 0.789 +1 1.113 -0.430 -1.397 0.714 1.551 0.729 0.332 -0.191 2.173 0.382 -0.755 -0.532 0.000 0.760 -1.037 0.908 0.000 0.726 -0.057 1.149 3.102 0.811 0.929 0.980 0.630 0.737 0.805 0.701 +1 0.336 -1.917 0.471 0.572 -1.013 0.950 -0.600 1.223 0.000 1.313 0.083 -0.504 2.215 0.561 -0.328 0.454 0.000 0.645 0.151 -1.081 3.102 0.856 0.774 0.990 0.812 0.415 0.837 0.722 +1 0.892 0.137 -0.084 0.509 1.530 0.793 1.255 0.144 0.000 0.916 -0.066 -1.342 2.215 1.625 0.534 1.311 2.548 0.379 2.063 -0.849 0.000 0.846 1.186 0.987 0.770 0.984 0.990 0.825 +1 0.395 0.578 0.747 1.939 -0.228 0.831 0.010 -1.220 2.173 0.745 1.928 1.187 0.000 0.387 -1.868 0.355 0.000 0.587 -1.185 0.884 3.102 0.562 0.880 0.991 0.798 0.906 0.790 0.713 +0 0.424 -0.714 0.029 2.095 -0.111 0.609 -0.530 1.204 0.000 0.799 0.599 1.294 0.000 0.592 -0.348 -0.872 2.548 0.813 0.645 -1.435 3.102 0.909 0.867 0.990 0.821 0.414 0.606 0.781 +0 0.582 0.167 1.192 1.301 -0.880 0.395 0.450 -0.209 2.173 0.561 0.658 1.575 0.000 0.830 -0.414 0.728 2.548 0.610 1.342 0.048 0.000 0.834 0.808 1.154 0.695 0.630 0.608 0.607 +0 1.225 -0.893 -1.589 2.172 -1.489 1.795 0.438 0.813 2.173 2.186 -1.190 -1.400 2.215 2.578 1.482 -0.119 0.000 1.288 0.683 0.403 0.000 0.633 0.867 0.990 0.642 3.801 2.230 1.798 +1 2.054 1.082 0.131 0.331 -0.898 0.790 -0.398 0.630 0.000 0.641 0.015 -1.563 0.000 1.588 0.730 -1.139 2.548 1.213 0.538 -1.737 3.102 1.425 1.032 0.986 1.064 0.550 0.886 0.968 +0 1.319 0.241 1.057 1.784 1.604 0.810 -0.905 -0.576 0.000 0.593 -0.062 -0.008 2.215 1.083 -0.516 0.509 0.000 1.332 0.431 -1.042 1.551 1.428 0.897 1.006 0.921 0.686 0.782 0.952 +1 1.471 -1.212 -1.277 1.476 -0.918 0.295 -1.307 0.596 0.000 0.699 0.250 1.208 2.215 1.025 -0.877 0.145 0.000 0.636 0.254 0.223 3.102 0.429 0.843 0.982 1.077 0.467 1.039 0.894 +0 1.424 1.123 -1.552 0.217 -1.560 1.198 1.323 -0.368 0.000 0.558 0.936 -0.906 0.000 1.132 0.134 0.672 2.548 1.592 -0.588 1.259 0.000 0.872 0.785 0.998 0.924 0.255 0.784 0.807 +0 0.440 -0.386 -0.493 1.082 -1.375 0.707 -1.804 -0.421 0.000 1.262 0.290 0.561 2.215 1.206 -0.315 1.091 0.000 1.411 -0.188 -1.576 3.102 2.106 1.459 0.993 1.048 1.173 1.219 1.131 +0 1.237 -0.208 -0.792 0.863 -1.743 1.641 -0.479 0.974 2.173 2.302 0.330 -0.681 2.215 1.387 -0.328 0.530 0.000 0.665 0.754 0.998 0.000 0.837 0.885 1.081 1.017 3.098 1.510 1.241 +0 0.376 -1.987 -1.713 0.334 -1.229 1.472 0.897 -0.083 1.087 1.631 0.038 1.210 1.107 1.729 -0.566 -1.217 0.000 0.508 2.080 0.333 0.000 2.562 1.945 0.979 1.494 2.330 1.661 1.353 +1 0.728 -0.776 1.739 0.607 -0.097 1.218 -1.424 1.078 0.000 1.531 0.614 -1.048 2.215 2.581 -0.009 0.154 1.274 1.906 -0.212 -1.437 0.000 2.305 2.340 0.984 1.302 1.989 1.899 1.457 +1 0.424 -2.146 -0.253 0.266 -1.690 0.755 -0.358 0.744 0.000 0.992 -0.434 -1.321 1.107 0.685 0.315 0.167 1.274 0.404 -0.654 0.898 0.000 1.013 1.008 0.995 0.712 0.924 0.692 0.651 +0 1.152 1.118 0.030 0.354 1.742 0.860 -0.251 1.570 0.000 0.899 0.847 -0.980 2.215 0.921 0.027 0.827 0.000 1.090 0.388 -0.132 3.102 1.020 1.010 0.986 0.775 0.645 0.850 0.842 +0 0.425 0.551 0.925 1.083 -0.025 1.386 0.275 1.186 2.173 1.158 0.717 -0.674 2.215 0.748 -0.445 -1.391 0.000 0.766 0.004 0.735 0.000 0.872 1.113 0.991 1.008 1.903 1.113 0.917 +1 0.892 -0.333 0.294 1.400 0.912 0.369 -0.190 -0.816 0.000 0.570 -0.783 1.161 0.000 1.675 -1.180 -1.046 1.274 0.860 -0.189 -1.234 3.102 0.999 0.948 0.984 0.796 0.537 0.967 0.809 +0 1.548 -1.045 -0.035 0.455 0.117 0.941 -0.445 1.621 0.000 0.460 -0.491 0.883 0.000 0.614 -0.933 -0.659 2.548 1.069 -0.628 -1.110 1.551 0.865 0.853 0.987 0.769 0.259 0.582 0.703 +0 2.321 -0.783 -0.100 1.382 0.565 1.046 -0.550 -0.727 1.087 1.487 -0.711 1.577 0.000 0.640 -0.143 0.451 0.000 0.974 -0.551 -1.675 0.000 0.931 0.941 1.401 1.443 1.536 1.213 0.988 +0 1.642 -0.917 -0.783 0.243 -0.061 1.615 -0.349 1.021 2.173 0.927 -0.055 -0.550 2.215 0.888 0.197 1.673 0.000 0.610 1.555 -0.138 0.000 1.104 0.997 0.987 1.497 1.799 1.134 1.061 +0 0.900 0.096 1.066 1.948 0.350 0.672 0.443 -1.085 2.173 0.918 -2.441 -1.246 0.000 0.405 -1.067 1.671 0.000 0.569 -0.688 0.607 0.000 0.816 1.816 1.103 0.743 0.672 1.390 1.360 +0 0.712 -0.227 0.050 1.660 1.078 1.040 0.764 -1.236 2.173 1.019 -2.099 0.095 0.000 1.103 1.036 -0.736 0.000 1.793 -0.762 1.545 3.102 0.956 0.967 1.205 0.886 1.616 1.102 1.043 +1 1.075 0.719 -0.168 0.938 1.391 1.006 0.266 -0.973 0.000 1.000 0.634 0.852 0.000 1.129 -0.088 1.684 2.548 1.466 0.073 0.307 3.102 2.163 1.391 1.372 0.842 0.935 0.962 0.875 +0 2.243 -1.695 -0.020 1.125 0.059 1.318 -1.387 -1.604 0.000 1.218 -1.428 -1.245 0.000 1.487 -1.289 0.735 1.274 0.626 -0.393 0.823 3.102 0.863 0.977 0.988 1.024 0.366 0.942 1.171 +1 0.531 0.554 1.743 1.299 -0.321 0.922 1.827 -0.462 0.000 0.771 1.311 1.488 0.000 1.145 1.275 1.023 2.548 0.749 0.937 0.134 0.000 0.936 0.783 1.103 0.934 0.810 0.696 0.650 +0 0.399 1.909 -1.123 1.100 1.127 0.595 0.555 1.011 2.173 0.449 2.066 -0.598 0.000 0.805 -0.167 -0.519 2.548 1.426 -0.024 -1.391 0.000 0.857 0.844 0.983 0.876 0.908 0.673 0.646 +1 0.331 0.758 1.528 1.575 -0.447 0.653 0.506 1.115 0.000 0.925 -0.416 0.914 0.000 0.893 0.449 -0.995 2.548 1.245 -0.567 -0.702 3.102 0.858 1.065 0.989 0.899 0.543 0.824 0.850 +0 0.561 -1.671 0.303 1.009 -0.794 1.524 -0.881 0.071 2.173 1.164 -0.005 -1.721 1.107 0.723 1.929 1.327 0.000 0.713 0.784 -1.152 0.000 0.796 1.070 0.986 0.892 2.153 1.618 1.377 +0 0.513 1.738 1.001 1.013 -0.600 0.654 0.783 -1.149 2.173 0.650 -0.163 1.734 0.000 2.189 0.679 0.651 2.548 1.772 0.919 -0.430 0.000 0.820 1.046 0.990 0.795 1.489 0.931 0.827 +1 0.757 0.022 -0.775 1.862 -0.070 1.395 -0.516 1.154 1.087 1.058 1.919 -1.489 0.000 0.757 0.364 -0.395 0.000 1.589 -0.477 -1.284 0.000 0.804 1.225 0.988 0.670 0.467 0.980 0.825 +0 1.177 1.021 0.804 1.538 0.044 1.845 0.051 -1.582 1.087 0.436 0.056 -0.534 1.107 0.564 2.532 0.159 0.000 0.811 -0.892 0.122 0.000 0.523 0.544 1.179 1.986 1.070 1.280 1.171 +0 3.211 -1.053 -1.274 0.813 -1.617 3.254 0.430 0.376 0.000 3.664 -0.912 -1.600 2.215 3.134 0.888 0.385 0.000 1.896 1.036 -1.472 3.102 1.334 1.170 0.988 0.858 3.156 2.371 2.063 +0 1.917 -0.638 1.101 0.687 1.539 0.906 0.047 -0.920 0.000 1.027 0.515 -0.205 2.215 0.544 0.615 -1.685 0.000 1.233 -0.315 0.328 3.102 0.899 0.984 0.991 1.480 0.670 0.985 0.983 +1 0.573 1.144 -0.806 1.199 1.644 0.869 0.572 -0.790 2.173 1.125 0.577 -0.033 2.215 1.109 -0.573 1.332 0.000 0.737 1.324 0.771 0.000 0.774 1.101 0.992 0.976 0.918 0.925 0.979 +0 1.536 -0.761 -0.482 2.613 -0.144 1.034 -1.478 1.349 0.000 0.870 -2.741 1.663 0.000 0.733 0.152 0.849 2.548 0.573 0.084 1.225 0.000 1.047 0.943 0.990 0.936 0.433 0.861 1.078 +0 0.687 1.583 -0.708 1.385 -1.613 0.889 1.434 0.248 0.000 0.404 0.328 -0.463 2.215 0.641 -0.559 -1.331 2.548 1.164 -0.127 0.942 0.000 1.653 1.051 0.986 0.996 0.465 0.842 0.891 +0 1.155 -0.234 -0.225 1.264 -0.759 0.906 -1.571 1.637 2.173 0.784 -0.632 -1.240 0.000 1.311 -0.049 0.522 2.548 1.510 -1.779 0.649 0.000 1.817 1.301 0.983 0.985 1.620 1.135 1.086 +0 0.670 -0.980 -0.184 0.480 1.275 1.140 0.324 -0.626 2.173 0.711 0.551 1.229 0.000 0.758 1.302 0.612 0.000 0.680 -0.862 1.331 0.000 0.811 1.296 0.983 0.583 0.844 0.798 0.706 +1 0.380 -0.978 0.445 0.734 -1.036 1.221 0.040 -0.264 0.000 1.085 -0.433 0.985 0.000 0.999 -0.838 1.426 2.548 1.754 0.886 -1.355 3.102 1.062 0.736 0.982 0.776 1.321 1.042 0.876 +0 0.347 -2.251 0.510 1.561 -1.457 1.496 0.562 1.067 2.173 1.687 -1.045 -0.144 2.215 1.103 0.672 -0.846 0.000 0.902 -0.972 -1.094 0.000 0.678 0.987 1.000 2.922 2.984 2.181 1.584 +1 1.528 0.425 -0.742 0.329 -1.267 1.570 -0.698 1.034 2.173 0.489 -0.703 -0.528 0.000 0.273 2.610 -0.776 0.000 0.453 -0.663 0.060 3.102 1.168 0.741 0.989 1.915 0.688 1.188 1.038 +1 0.766 0.317 -0.285 0.773 -1.326 0.778 -0.231 1.325 2.173 0.446 1.290 -0.571 0.000 0.532 -1.283 -1.319 1.274 1.118 0.325 0.276 0.000 0.778 0.999 0.985 0.874 0.743 0.807 0.721 +0 0.514 -0.069 -0.630 0.477 -1.693 0.998 0.226 0.548 0.000 0.788 1.281 0.044 2.215 1.669 0.616 -1.390 0.000 0.448 0.797 -1.073 3.102 0.759 0.859 0.991 0.473 0.464 0.572 0.598 +0 0.940 -0.694 1.330 0.895 -0.610 0.658 0.214 -0.862 0.000 0.426 -0.015 1.434 0.000 0.898 -0.959 0.407 2.548 0.840 1.838 -0.224 0.000 0.996 1.011 1.251 0.674 0.142 0.655 0.640 +1 0.407 2.138 1.044 0.743 0.819 2.112 0.373 -1.540 0.000 2.009 1.203 0.381 0.000 1.791 0.603 -0.251 1.274 1.107 0.137 -1.202 3.102 0.841 1.017 0.995 0.778 0.857 0.883 0.806 +1 0.787 1.108 -1.695 0.872 1.703 0.854 0.754 -0.130 2.173 0.644 0.076 1.677 2.215 0.548 -0.565 -0.584 0.000 0.744 -1.106 1.149 0.000 0.897 0.779 0.995 1.313 1.154 1.033 0.842 +0 1.365 1.113 1.709 0.438 -0.509 0.784 1.249 1.034 2.173 1.186 1.278 -0.682 2.215 0.377 1.205 -0.290 0.000 1.466 0.056 0.292 0.000 0.687 0.933 0.987 0.789 1.419 0.857 0.781 +1 0.717 -0.525 -0.352 1.629 -0.280 1.262 0.201 1.026 0.000 0.930 -1.141 -0.897 0.000 0.927 -0.576 1.247 2.548 1.330 -0.767 -1.323 1.551 0.909 1.119 0.993 1.078 0.636 0.875 0.922 +1 0.908 0.115 1.149 1.646 1.724 1.225 -0.293 0.400 2.173 1.041 -0.484 -0.481 2.215 0.557 0.108 -1.742 0.000 1.101 -0.088 -1.007 0.000 0.540 1.114 0.993 1.156 1.197 1.073 0.878 +0 0.326 -1.718 -0.762 1.476 -1.081 1.009 -0.526 0.085 2.173 0.807 0.924 -1.573 1.107 0.401 2.102 0.554 0.000 0.737 -2.058 1.475 0.000 0.859 0.825 1.000 1.087 1.706 1.022 0.976 +1 0.410 -0.630 -0.621 0.106 -1.494 0.960 0.757 1.360 0.000 1.260 -0.039 -0.345 2.215 1.099 0.236 0.839 2.548 0.997 1.162 -1.681 0.000 1.058 1.246 0.894 0.728 1.111 1.100 0.872 +1 0.815 0.465 1.062 1.180 -1.456 0.486 -2.050 -0.383 0.000 1.334 0.492 -1.611 1.107 0.702 1.159 -0.504 0.000 0.596 -1.142 -0.013 0.000 1.281 0.743 1.042 0.652 0.855 0.767 0.713 +0 0.677 -1.636 -0.558 0.793 -1.238 1.291 0.166 0.707 0.000 0.964 0.674 1.077 0.000 1.057 -0.317 -0.370 2.548 1.123 -0.462 -1.004 1.551 0.998 1.314 0.981 0.522 0.458 0.976 0.969 +1 0.716 -0.196 -1.450 0.547 0.205 1.360 0.703 1.173 2.173 1.445 1.036 -0.685 2.215 0.664 1.985 -0.341 0.000 0.908 1.186 0.695 0.000 0.979 0.779 0.987 0.947 2.084 1.108 0.918 +1 0.821 -1.303 -0.662 0.255 0.587 0.949 -1.584 0.074 0.000 1.314 -1.036 1.250 1.107 1.682 -0.910 -1.430 2.548 0.405 -2.460 1.057 0.000 1.007 1.273 0.995 0.816 1.050 1.046 0.907 +1 3.017 -0.240 -0.278 0.742 -0.499 1.309 -1.166 1.318 0.000 0.456 -0.627 -1.579 0.000 1.095 -0.002 0.621 2.548 1.182 0.617 -1.491 1.551 0.939 1.133 0.991 1.091 0.886 0.948 1.190 +1 0.525 0.090 -1.129 1.475 1.199 1.179 -0.271 0.808 1.087 1.038 -1.332 -0.208 0.000 1.146 -1.507 -0.625 0.000 1.108 -0.720 -0.937 0.000 0.924 0.923 1.053 0.872 0.798 0.960 0.900 +0 0.541 1.873 0.034 0.982 -1.227 0.820 0.544 -1.361 2.173 0.948 -1.400 0.751 0.000 1.107 1.254 -0.361 0.000 1.776 0.724 0.680 3.102 0.987 0.990 0.988 0.801 1.248 0.832 0.729 +1 0.861 1.019 -0.607 0.939 -1.434 1.869 0.577 0.981 0.000 0.782 0.873 -1.518 0.000 1.714 0.141 -0.476 2.548 1.681 1.314 -1.022 0.000 0.867 0.985 0.988 1.053 0.880 0.885 0.774 +1 0.382 -1.725 1.542 2.375 -0.686 0.412 -0.814 0.416 0.000 0.622 0.718 0.890 1.107 0.843 -1.152 1.337 0.000 0.614 -1.151 1.673 3.102 0.817 0.930 1.197 0.727 0.796 1.106 0.926 +0 1.356 -0.852 0.561 0.841 0.171 1.106 -1.225 -1.355 0.000 0.685 -0.324 -0.375 1.107 0.518 -1.494 0.743 0.000 1.197 -0.464 -1.234 3.102 1.321 0.862 0.977 0.850 0.579 0.741 0.830 +1 1.890 -0.933 0.683 0.671 1.065 0.655 0.224 -1.491 2.173 0.613 -0.680 -1.113 0.000 0.688 -1.341 -0.954 0.000 0.841 -0.399 0.284 0.000 0.896 0.827 1.000 0.760 0.741 0.914 0.781 +1 0.809 -0.053 -0.717 1.257 0.552 0.864 -0.223 1.572 0.000 0.650 0.470 -1.404 0.000 0.960 0.132 -0.166 2.548 1.513 -0.573 0.553 3.102 0.910 1.073 1.272 0.775 0.682 0.831 0.781 +0 0.655 1.421 -0.148 1.282 0.766 0.921 0.374 1.032 2.173 1.782 -0.174 -0.749 0.000 0.993 0.704 -0.810 0.000 1.219 -0.802 1.357 0.000 1.014 0.968 0.987 0.831 0.347 1.017 0.965 +1 0.589 -1.288 0.882 0.706 -1.282 0.912 -0.280 1.587 0.000 1.092 -1.156 1.702 2.215 2.027 1.958 -0.071 0.000 1.698 -1.373 0.314 0.000 2.229 1.465 0.985 1.327 1.471 1.195 1.070 +1 0.464 2.117 1.455 1.277 -0.532 0.492 0.348 1.679 0.000 0.620 -2.247 -0.951 0.000 1.811 0.032 0.499 2.548 0.626 1.016 -1.669 3.102 0.577 0.811 1.041 0.637 0.911 1.034 0.860 +0 0.883 0.282 1.525 0.923 -1.277 0.667 0.378 -0.304 0.000 0.604 -0.516 -0.880 1.107 1.025 -0.050 0.353 0.000 1.043 0.576 0.908 1.551 0.892 0.832 0.994 0.772 0.854 0.651 0.709 +0 1.139 1.250 -0.936 0.608 -1.478 2.087 0.351 1.160 1.087 2.580 0.803 -0.582 1.107 1.468 0.184 0.788 0.000 1.053 -0.524 1.014 0.000 0.649 0.844 0.987 0.961 3.510 1.749 1.419 +0 2.183 1.059 -1.225 1.576 -0.637 1.106 -1.504 0.383 2.173 0.680 -0.080 0.720 0.000 0.640 -2.077 0.819 0.000 1.254 -1.310 1.662 3.102 1.355 1.001 1.299 3.215 1.139 2.248 1.881 +1 0.950 -0.127 1.076 1.207 0.094 1.257 0.335 -1.062 2.173 0.612 1.214 0.474 0.000 0.712 -0.390 0.511 0.000 0.597 -0.307 1.683 3.102 0.954 0.739 1.148 1.321 0.657 0.850 0.824 +0 1.061 -0.398 -1.582 1.375 -1.049 0.949 0.906 0.811 0.000 0.788 -1.621 -0.817 0.000 0.569 2.087 0.815 0.000 0.930 -0.289 0.216 3.102 0.984 1.042 0.983 0.723 0.375 0.918 0.986 +1 0.625 -0.581 0.952 1.814 1.733 1.494 -0.925 -0.238 1.087 0.768 -0.362 1.299 2.215 0.441 -0.396 1.636 0.000 0.484 -1.153 -1.126 0.000 0.644 0.873 0.989 0.543 1.613 1.133 0.867 +1 0.282 -0.038 0.753 0.129 1.713 1.088 1.135 -0.984 2.173 1.153 1.030 1.002 2.215 0.656 0.901 -0.295 0.000 0.736 -0.278 0.232 0.000 0.648 0.966 0.608 0.488 1.610 0.912 0.708 +1 0.728 1.193 1.172 1.265 -1.518 1.591 -0.702 0.414 0.000 1.005 2.186 0.308 0.000 1.749 1.475 -1.567 0.000 1.780 0.022 1.448 3.102 0.923 0.960 0.988 0.764 0.287 0.719 0.981 +0 0.521 -1.252 -1.330 1.137 -1.274 1.091 -0.228 -0.701 2.173 1.189 -1.964 0.395 0.000 0.946 -0.794 0.713 0.000 0.618 -0.506 1.521 1.551 1.088 0.866 0.995 1.823 0.806 1.170 1.186 +0 0.425 -0.942 -0.408 1.318 -1.666 0.931 2.046 -0.693 0.000 1.869 0.591 0.778 2.215 1.306 0.330 1.302 0.000 2.187 0.303 -0.676 3.102 0.738 1.018 0.989 1.878 1.777 1.458 1.139 +1 1.152 -0.412 -0.081 0.687 0.449 0.871 -1.778 -1.543 0.000 0.630 -0.600 -1.460 0.000 0.619 -1.361 0.102 0.000 1.057 -1.064 1.658 1.551 0.988 1.231 0.985 0.813 0.780 0.745 0.815 +1 1.073 1.471 -0.864 0.993 0.145 0.655 2.136 0.577 0.000 1.569 0.246 -1.630 2.215 0.466 2.203 1.451 0.000 1.315 0.437 -0.125 1.551 0.715 1.036 1.129 1.403 1.279 1.183 1.034 +0 0.688 0.289 -0.299 0.329 -1.276 0.630 2.799 0.546 0.000 0.519 -1.509 -1.140 2.215 1.299 -1.524 0.950 0.000 0.580 -0.520 -0.535 0.000 0.899 1.022 0.992 0.659 1.087 1.650 1.429 +0 1.109 0.433 -0.468 1.175 0.241 0.701 -1.039 1.730 0.000 0.377 1.571 1.412 2.215 0.591 0.098 0.501 2.548 0.502 -1.305 -0.971 0.000 0.634 0.792 0.985 0.794 0.555 0.787 0.868 +0 0.946 1.536 0.058 0.724 -0.300 1.641 1.067 0.425 2.173 1.580 -0.824 -1.154 0.000 2.171 0.012 -1.604 2.548 1.229 -2.323 1.184 0.000 2.579 2.312 0.994 0.914 2.625 2.842 2.275 +1 0.617 1.733 -0.695 1.110 1.042 1.355 0.182 -1.722 1.087 1.529 -1.026 -0.209 0.000 0.911 -0.820 0.991 2.548 0.591 1.563 -0.394 0.000 2.547 1.688 1.147 1.466 1.186 1.458 1.530 +1 1.852 0.057 -1.327 1.108 1.144 1.124 -0.445 0.673 2.173 0.650 0.023 -0.280 2.215 0.468 0.818 0.158 0.000 1.160 -1.182 -0.682 0.000 0.670 0.666 1.572 1.054 0.999 1.013 0.796 +0 0.869 0.150 -0.228 1.577 0.290 1.197 1.356 -1.683 1.087 0.635 1.798 1.358 0.000 0.743 2.004 0.641 0.000 0.851 -1.142 -1.462 3.102 0.799 1.347 0.987 1.212 1.979 1.515 1.322 +0 1.687 -0.743 0.796 1.179 1.013 2.338 0.190 -0.846 2.173 1.664 -1.004 0.541 0.000 0.810 0.648 -0.799 2.548 1.504 -0.334 -0.332 0.000 1.602 1.480 0.999 2.247 0.457 1.549 1.466 +0 1.525 0.148 -0.799 0.902 -1.485 1.169 -1.336 1.029 1.087 0.726 -1.975 0.299 0.000 1.173 -0.496 -0.983 0.000 1.220 -0.316 1.184 1.551 0.972 0.808 0.984 1.646 0.660 1.081 0.914 +0 0.348 -0.907 0.642 0.978 -0.753 1.609 0.838 -0.160 1.087 2.021 -0.391 1.533 2.215 0.926 0.143 0.604 0.000 1.303 0.069 -1.558 0.000 1.127 1.077 0.989 2.618 3.186 2.117 1.618 +0 0.701 -0.799 -0.975 0.621 -1.159 1.172 -1.742 1.184 0.000 1.609 0.733 -0.314 2.215 0.452 -0.745 0.490 0.000 0.637 -0.571 1.438 3.102 0.998 0.662 0.996 0.982 1.162 1.587 1.303 +1 0.299 1.678 -0.207 1.484 -0.899 0.966 0.987 -0.482 2.173 0.297 2.175 1.023 0.000 0.919 -0.041 1.075 0.000 0.379 1.336 1.696 3.102 0.924 0.783 0.993 0.736 0.624 0.902 0.799 +0 1.447 0.708 -0.072 0.402 0.199 1.086 0.723 -0.923 2.173 1.208 0.486 1.071 2.215 0.656 2.218 1.494 0.000 0.553 0.968 -1.527 0.000 0.527 0.928 0.989 1.044 1.653 1.042 0.912 +0 1.186 1.102 0.204 0.191 -0.201 0.836 -2.042 0.858 0.000 0.837 -0.530 -0.917 2.215 1.103 1.021 -1.524 0.000 1.402 0.403 -1.154 0.000 0.632 0.919 0.983 0.510 0.750 0.705 0.727 +1 1.178 0.413 -0.465 1.224 -1.463 1.037 0.708 0.693 0.000 0.561 -0.348 0.362 0.000 1.109 0.341 -1.441 2.548 0.671 -1.031 -0.785 3.102 1.028 1.123 1.301 0.740 0.695 0.879 0.868 +1 0.598 -1.435 0.398 1.088 -1.164 0.684 -0.904 0.101 2.173 0.911 -1.215 1.288 0.000 1.052 -0.182 1.330 0.000 1.886 -0.185 -0.505 3.102 0.906 0.966 1.102 0.922 0.755 0.729 0.747 +0 0.847 1.179 -1.039 1.368 -1.346 1.598 1.575 0.489 2.173 1.243 -0.272 -1.603 1.107 0.324 0.160 0.237 0.000 0.768 1.562 -0.414 0.000 0.608 0.847 0.983 1.764 2.981 1.777 1.306 +0 0.714 1.317 -1.712 0.959 1.663 1.010 0.010 -1.075 2.173 0.863 1.840 0.742 0.000 1.623 0.889 0.299 2.548 0.858 1.811 0.358 0.000 0.789 0.952 0.988 0.893 1.708 1.250 1.090 +1 0.359 1.363 0.130 0.783 1.588 1.052 -1.057 0.267 0.000 0.948 0.501 -0.992 2.215 0.832 -0.601 1.344 2.548 0.524 -0.414 1.721 0.000 1.143 0.902 0.981 0.747 1.003 0.949 0.830 +1 0.790 -0.299 1.378 1.729 -1.729 1.012 -0.899 0.180 2.173 0.687 -0.128 0.039 0.000 1.620 -0.390 -1.362 2.548 0.739 -1.797 0.015 0.000 0.788 1.008 0.999 0.797 1.616 1.064 1.023 +1 0.552 1.138 0.037 1.701 -0.060 0.905 0.310 1.545 2.173 0.868 1.133 1.550 0.000 1.112 -0.243 -1.115 2.548 1.414 -0.557 -0.012 0.000 2.081 1.395 0.998 1.270 0.922 0.986 0.992 +0 1.524 1.498 -1.736 0.578 -1.106 0.594 -0.362 -0.811 0.000 0.767 0.914 1.116 1.107 1.181 0.357 -0.364 0.000 2.530 0.838 0.506 3.102 0.831 1.183 0.981 1.264 0.658 0.972 1.090 +0 0.918 -1.183 1.706 0.657 -0.908 0.633 -1.169 -0.809 0.000 0.776 -1.040 0.794 1.107 1.051 -1.412 0.211 2.548 0.740 -1.330 1.560 0.000 0.911 0.911 0.989 0.895 0.535 0.689 0.666 +1 0.957 0.098 1.073 1.421 -1.659 1.546 -0.670 -0.233 2.173 1.539 1.046 1.485 2.215 0.667 -0.376 -0.640 0.000 0.911 -0.877 0.274 0.000 0.690 0.640 1.016 0.902 3.200 1.604 1.263 +1 0.511 0.488 0.454 1.708 -0.561 0.567 0.471 0.924 0.000 1.259 -0.153 1.512 2.215 0.547 -0.798 -0.891 2.548 0.372 -1.061 0.558 0.000 0.732 0.759 1.025 0.722 0.799 0.839 0.754 +1 0.468 0.755 -0.715 0.455 -0.858 0.648 1.114 0.868 0.000 1.271 0.497 1.190 2.215 1.252 1.627 -0.608 0.000 1.438 -0.436 -0.099 0.000 1.665 1.045 0.983 1.084 0.587 0.882 0.978 +1 0.947 -1.734 1.738 0.854 -0.164 1.258 1.374 -0.534 0.000 1.244 -0.345 1.712 0.000 1.372 -1.133 1.463 2.548 1.180 -0.476 0.451 3.102 0.887 1.146 1.233 0.879 0.838 0.735 0.851 +1 0.672 -0.078 1.170 1.109 -0.142 0.923 -0.331 1.683 2.173 0.715 -0.892 -0.063 0.000 0.861 -2.060 -0.595 0.000 0.633 -0.388 -0.371 3.102 0.941 0.778 1.106 0.993 0.779 0.871 0.880 +1 1.321 -0.501 -1.721 0.785 0.955 0.419 -0.989 0.146 0.000 1.245 -1.063 -0.729 1.107 0.397 -0.758 -1.328 2.548 0.892 -0.100 0.052 0.000 0.874 1.030 0.990 0.530 0.396 0.673 0.649 +0 0.512 1.691 -1.331 0.552 0.373 1.198 0.838 1.315 2.173 1.404 0.310 -0.260 0.000 0.540 -0.149 -0.932 0.000 0.851 -0.241 1.537 1.551 0.827 0.880 0.980 0.856 0.674 0.990 0.823 +1 0.982 -0.229 -0.513 2.079 -1.097 0.958 1.377 -0.057 0.000 1.838 -0.866 1.253 1.107 0.511 0.332 -1.728 2.548 0.694 -1.417 0.006 0.000 0.704 1.081 0.993 1.624 0.825 1.040 1.113 +1 1.091 -1.010 0.509 0.501 1.338 0.808 0.018 -0.627 2.173 0.425 0.724 1.649 0.000 0.837 2.038 0.871 0.000 0.425 -0.526 1.116 0.000 0.953 0.771 0.992 1.003 0.790 0.878 0.992 +1 0.372 -1.892 0.776 0.680 -0.785 0.806 0.746 -0.040 2.173 0.646 -0.934 -1.720 0.000 1.362 -0.355 1.195 0.000 0.634 -0.010 -0.593 0.000 0.822 0.554 0.989 1.039 0.671 0.862 0.772 +0 1.050 -0.001 -1.376 0.020 0.805 0.742 0.660 0.805 0.000 0.449 -1.209 -0.417 2.215 0.679 -0.535 -0.673 1.274 0.445 -0.135 0.680 0.000 0.388 0.918 0.461 0.428 0.238 0.653 0.585 +0 0.904 1.056 0.005 0.710 0.367 1.104 0.943 1.376 0.000 0.911 1.294 -0.810 2.215 0.370 1.300 -1.609 1.274 0.767 1.655 -0.306 0.000 1.597 0.873 0.990 0.853 0.408 0.734 0.763 +1 0.541 -1.546 -0.236 1.359 1.513 1.211 -1.154 -0.146 2.173 1.203 -1.542 1.533 0.000 1.003 -0.439 -0.550 2.548 0.660 -0.964 0.666 0.000 0.855 1.091 1.187 1.188 0.684 0.964 0.879 +0 1.036 0.784 1.434 1.159 0.645 0.872 0.520 -1.043 2.173 0.852 0.779 -0.635 0.000 1.874 0.418 0.517 1.274 1.150 -0.359 1.514 0.000 0.874 1.007 0.990 0.752 1.572 0.957 0.895 +1 0.914 -0.525 0.572 0.679 -1.285 1.191 0.337 1.619 2.173 0.820 0.434 0.604 0.000 1.828 -0.379 -0.370 0.000 1.657 -0.428 -0.949 1.551 0.399 0.587 1.086 1.021 1.274 0.962 0.836 +1 0.620 0.647 -0.146 1.161 -1.632 0.655 -1.134 0.078 0.000 1.126 -0.900 -1.117 2.215 1.084 -0.192 1.127 0.000 0.637 0.863 -0.773 3.102 1.066 1.102 1.144 1.128 0.908 0.957 0.960 +0 0.756 -0.342 -0.004 0.726 1.286 0.692 0.599 -0.880 2.173 0.485 1.135 -0.486 2.215 0.582 -0.243 0.536 0.000 0.731 1.190 1.430 0.000 0.859 0.887 0.984 0.727 0.384 0.620 0.604 +1 0.810 -0.507 0.384 0.888 1.317 0.867 -1.311 -0.965 0.000 0.920 -1.167 0.570 2.215 1.116 -0.425 -0.699 0.000 1.072 0.097 1.420 3.102 0.887 1.098 0.991 0.790 0.888 0.923 0.865 +0 1.542 1.087 0.611 0.407 0.153 0.475 -2.321 1.049 0.000 1.036 -1.229 -0.647 1.107 1.142 -0.144 -1.375 2.548 0.503 -1.309 1.622 0.000 1.177 1.130 0.982 1.068 0.975 1.145 1.194 +1 0.385 0.857 -1.130 1.611 1.466 1.363 0.351 0.016 2.173 0.393 -0.509 1.735 0.000 0.379 0.639 1.053 2.548 1.131 0.402 -0.900 0.000 0.755 1.138 0.994 0.579 0.737 1.061 0.940 +1 1.010 -0.625 -0.011 0.383 1.620 0.822 -0.547 -1.667 0.000 0.354 -1.294 1.425 0.000 0.917 0.342 0.170 2.548 0.900 0.191 -1.117 3.102 0.755 0.684 0.990 0.632 0.639 0.517 0.506 +0 0.824 -0.613 -0.018 0.446 -1.459 1.084 -0.982 -0.750 1.087 1.622 1.438 0.938 0.000 1.488 2.255 -1.242 0.000 2.901 0.512 1.231 3.102 0.938 0.959 0.990 0.765 2.483 1.806 1.545 +1 1.115 -0.809 -1.350 0.766 -0.213 1.114 -1.178 1.240 0.000 1.833 -0.548 -0.970 2.215 2.052 -0.726 0.318 0.000 1.338 -0.088 1.632 3.102 0.917 0.824 1.094 0.784 1.062 0.839 0.738 +0 2.743 0.288 -1.303 0.672 -0.736 1.816 -1.435 0.680 1.087 0.704 -1.449 0.044 1.107 1.750 -0.910 -1.157 0.000 1.002 -0.533 0.585 0.000 1.484 1.106 0.993 2.713 0.904 1.800 1.471 +0 0.418 0.799 1.673 0.528 -1.454 0.977 -0.306 -0.048 1.087 0.768 -1.166 -1.382 0.000 0.583 -1.048 0.599 0.000 0.697 0.807 1.098 1.551 1.003 0.978 0.987 0.990 0.958 0.845 0.754 +0 1.177 -1.178 -0.344 0.161 1.077 0.549 0.131 -0.908 0.000 0.851 -0.954 0.771 2.215 0.507 -0.232 1.512 0.000 0.725 -0.570 1.208 3.102 0.937 1.033 0.994 0.639 0.292 0.630 0.620 +1 0.594 0.915 0.073 0.854 -1.619 1.197 -0.380 0.974 1.087 0.848 -0.812 -1.081 0.000 0.648 -0.581 -0.152 2.548 0.519 -0.368 1.466 0.000 0.887 1.222 0.987 0.790 0.943 0.866 0.804 +1 1.288 -0.597 -1.190 2.446 -1.338 1.357 1.603 0.658 0.000 0.688 -0.030 -1.473 0.000 1.176 0.181 -0.238 0.000 1.301 -0.251 0.723 1.551 0.830 0.737 0.982 1.092 0.267 0.935 0.925 +1 1.105 1.192 1.173 0.676 0.359 0.945 0.736 1.559 2.173 1.215 2.299 -1.104 0.000 1.028 2.122 -0.095 0.000 0.922 1.277 0.574 3.102 0.791 0.653 0.985 0.930 0.865 0.887 0.782 +0 0.936 1.106 -1.311 0.717 1.476 0.441 0.054 -1.381 0.000 0.635 1.199 -0.002 1.107 1.050 -0.509 0.130 2.548 0.443 1.650 0.312 0.000 1.012 0.993 0.991 0.830 0.893 0.992 0.840 +0 0.724 -0.133 -1.375 1.353 -0.875 1.534 0.876 0.684 2.173 0.517 1.248 0.471 0.000 1.621 -0.399 -0.908 0.000 1.863 0.000 1.507 3.102 0.683 0.977 0.985 1.535 1.458 1.152 0.939 +0 0.711 0.427 1.648 0.862 1.368 0.456 -1.998 -0.954 0.000 1.134 -1.110 0.147 2.215 0.535 -1.132 -0.851 2.548 0.470 -0.463 1.201 0.000 0.867 0.918 0.992 0.700 0.649 0.752 0.701 +0 0.441 1.675 -1.415 0.804 0.367 1.767 0.190 -0.725 2.173 1.638 1.090 0.970 2.215 0.812 1.275 0.296 0.000 1.388 0.750 -1.514 0.000 0.909 1.200 0.986 2.148 2.776 1.741 1.426 +1 1.700 -0.424 -1.294 0.572 -1.057 0.740 -0.733 -0.783 0.000 1.960 -1.319 0.250 2.215 1.264 1.533 1.297 0.000 0.891 -1.819 0.660 0.000 0.868 0.879 0.983 0.542 1.278 1.195 0.996 +0 1.246 -1.193 1.361 0.574 0.328 0.594 -0.467 -1.062 2.173 0.557 -0.922 -0.554 0.000 0.969 -0.370 0.499 2.548 0.443 -1.947 0.746 0.000 0.770 0.738 0.990 0.665 0.933 0.686 0.630 +1 0.538 0.690 -0.179 1.195 -0.928 0.419 1.353 1.032 0.000 0.581 -0.160 -0.501 2.215 1.067 -0.490 0.765 0.000 0.849 0.233 -1.287 1.551 1.357 0.970 0.987 0.874 0.436 0.711 0.831 +0 0.992 -1.496 -0.831 0.806 1.709 1.118 -0.779 -1.099 2.173 1.240 0.182 0.337 2.215 1.330 1.281 0.537 0.000 1.106 0.452 1.061 0.000 0.852 0.902 0.986 0.884 1.884 1.381 1.496 +1 1.963 0.507 0.777 1.179 -0.738 1.353 -0.155 -1.415 2.173 0.549 0.923 1.473 2.215 0.579 -0.164 -0.941 0.000 1.763 -1.087 0.212 0.000 1.004 1.143 2.063 1.677 0.984 1.115 1.090 +0 1.304 1.487 -0.030 1.077 0.961 1.363 1.330 -1.425 0.000 1.301 0.623 0.497 1.107 1.045 0.547 -0.864 2.548 0.643 0.682 1.645 0.000 0.647 0.795 1.280 0.933 1.167 1.022 0.994 +0 0.396 -0.606 0.851 0.564 -0.887 0.757 0.072 -1.294 0.000 1.554 0.412 0.504 2.215 0.460 -0.665 0.179 1.274 0.966 1.096 -1.247 0.000 0.866 0.886 0.983 1.592 0.607 0.983 1.086 +1 1.411 -2.003 -0.452 0.329 0.224 0.687 -1.568 -1.417 2.173 1.022 -0.311 1.391 2.215 0.734 -0.960 -0.794 0.000 0.823 2.281 0.925 0.000 0.613 0.849 0.975 0.868 1.091 0.947 0.777 +0 0.878 -0.751 1.327 3.579 1.408 1.228 -1.540 -0.574 0.000 1.223 1.277 -0.747 0.000 1.139 1.249 0.496 2.548 1.530 1.266 -0.142 0.000 0.942 0.998 0.987 0.852 0.746 1.038 1.340 +0 0.675 -1.009 -0.347 1.295 -0.716 0.946 1.286 1.189 0.000 0.512 2.521 -1.234 0.000 0.459 0.754 -0.861 2.548 0.894 -0.051 1.146 0.000 1.033 0.848 0.986 0.872 0.610 0.603 0.831 +1 1.667 -0.565 0.759 1.695 1.215 1.524 2.534 -0.887 0.000 1.595 -1.159 -0.227 2.215 0.680 -1.005 -1.595 2.548 1.409 -1.178 0.974 0.000 0.729 1.160 0.987 0.815 1.046 1.081 0.876 +1 0.822 0.304 -0.666 1.194 -0.132 1.024 0.116 1.673 2.173 0.587 0.497 0.754 0.000 0.488 -0.871 -0.080 2.548 0.659 0.414 1.192 0.000 0.311 0.692 0.993 0.518 1.011 0.835 0.730 +0 1.133 -0.647 -0.998 0.112 1.470 0.848 -0.343 1.082 0.000 1.305 0.181 -0.200 1.107 1.737 1.726 -1.128 0.000 1.603 0.174 0.595 3.102 0.912 0.922 0.979 0.861 0.858 0.992 0.903 +0 0.593 0.250 -0.005 0.593 -1.536 1.027 -0.787 0.746 2.173 1.521 0.237 -0.282 2.215 2.315 -1.293 -1.360 0.000 1.642 -1.183 0.918 0.000 1.905 1.676 0.987 0.876 1.779 1.613 1.448 +0 1.550 -0.517 -0.243 1.418 -1.149 1.416 -1.681 -1.687 0.000 2.057 -0.067 0.424 2.215 0.548 -0.472 -1.688 2.548 0.938 1.006 0.208 0.000 0.819 0.752 1.495 1.549 1.097 1.546 1.396 +0 0.769 -2.183 0.341 0.621 0.870 0.685 0.452 -0.699 1.087 0.925 -1.167 -1.569 2.215 0.698 -0.263 0.496 0.000 0.554 -0.487 -1.667 0.000 0.646 0.790 0.981 0.888 1.364 1.041 0.818 +0 0.767 -0.494 1.477 1.613 0.797 0.715 -1.306 -1.343 0.000 0.910 -1.513 -0.492 0.000 0.794 -0.889 1.631 1.274 1.404 -0.996 -0.028 3.102 1.029 0.871 0.987 0.721 0.809 0.730 0.775 +1 1.079 -1.245 -1.356 0.789 -1.554 1.017 -0.603 0.136 2.173 0.555 -0.534 0.685 0.000 0.703 -0.828 1.088 2.548 0.448 0.813 -1.102 0.000 0.847 0.841 0.978 0.694 0.813 0.841 0.727 +0 1.117 -1.143 1.593 1.739 -1.417 0.309 -0.462 1.325 0.000 0.831 -1.035 -0.004 2.215 0.940 0.674 0.599 2.548 0.578 -0.890 0.290 0.000 0.989 0.877 0.982 1.666 1.083 1.220 1.185 +1 0.433 -0.543 0.418 0.843 1.568 1.073 0.753 -0.890 0.000 0.701 0.248 -0.434 2.215 0.724 -2.226 0.824 0.000 1.954 0.645 0.614 3.102 1.299 0.973 0.993 1.435 0.900 1.062 1.212 +1 1.255 -0.382 -1.548 0.619 -0.308 1.062 0.288 0.378 2.173 1.067 -1.041 -1.599 0.000 0.765 -0.695 -0.946 1.274 0.835 -0.466 0.414 0.000 1.009 0.666 1.099 1.151 1.214 0.994 0.854 +0 1.279 0.539 -1.468 0.312 0.991 0.667 -1.582 -0.684 0.000 0.945 -0.145 1.385 2.215 1.060 -0.101 -0.180 2.548 0.624 -2.206 0.456 0.000 1.002 1.129 0.984 0.662 1.050 1.015 0.981 +0 2.094 0.416 -1.638 0.462 -0.603 1.339 1.348 -0.356 2.173 1.630 1.087 1.506 0.000 1.023 -0.107 0.183 0.000 1.380 0.423 0.493 3.102 0.944 0.954 1.095 1.484 1.181 1.088 1.044 +0 0.680 -0.768 0.564 1.169 1.527 0.524 -0.053 -0.583 0.000 0.801 0.341 0.220 0.000 1.258 0.538 -1.330 2.548 1.138 -0.083 0.470 1.551 0.952 1.012 0.985 0.733 0.968 0.837 0.832 +1 2.188 0.051 -1.652 0.517 0.457 1.206 0.545 0.158 2.173 0.465 -1.881 0.693 0.000 1.532 0.412 -1.069 2.548 0.531 0.804 0.654 0.000 1.305 1.398 1.394 1.465 1.516 1.174 1.097 +1 1.760 -0.554 0.291 0.506 0.694 1.160 0.212 -1.118 2.173 0.438 0.053 -1.481 0.000 0.443 0.119 0.815 0.000 0.631 0.753 1.224 3.102 0.647 0.689 0.974 0.870 0.840 1.065 0.824 +1 1.685 0.469 -0.039 0.196 0.391 1.476 1.456 1.373 2.173 0.731 0.138 -0.689 0.000 0.820 0.594 -1.669 0.000 0.582 2.296 -0.783 0.000 0.967 0.686 0.993 1.476 1.074 0.994 0.930 +0 1.534 1.079 0.443 0.823 1.317 0.412 2.041 1.690 0.000 0.678 0.288 -0.426 2.215 0.429 0.380 -1.171 0.000 0.649 0.673 -1.384 3.102 0.946 0.949 1.104 0.714 0.481 0.647 0.653 +1 0.590 -0.970 -0.212 1.752 1.189 0.708 -0.681 -1.202 0.000 0.655 -0.142 1.271 1.107 0.765 -1.462 -0.995 0.000 1.625 2.174 -0.213 0.000 0.676 1.006 1.342 0.790 0.409 0.726 0.780 +0 1.707 0.327 -1.150 0.274 0.035 1.279 -1.306 0.536 0.000 0.510 -1.738 -0.003 0.000 0.999 -0.884 -1.137 0.000 1.211 -0.034 1.572 3.102 0.899 1.280 0.990 0.854 0.530 0.750 0.955 +0 0.422 0.755 -0.656 1.486 -0.123 0.597 0.739 1.603 2.173 0.661 -0.740 0.777 0.000 0.728 -1.106 -1.175 2.548 0.514 2.157 1.515 0.000 2.012 1.281 0.989 1.804 1.060 1.274 1.225 +0 0.962 1.619 -1.155 0.727 0.245 0.492 1.882 0.800 0.000 0.593 0.460 -1.629 2.215 0.673 0.232 -0.533 2.548 0.753 0.898 1.112 0.000 0.696 0.756 1.104 0.750 0.565 0.618 0.593 +0 0.412 0.253 -1.542 1.399 -0.477 1.000 0.007 0.499 2.173 0.800 1.572 -1.377 0.000 0.727 0.762 1.587 1.274 0.465 0.210 0.159 0.000 0.976 0.667 0.988 1.143 0.992 0.844 0.792 +1 0.953 0.033 -1.679 1.534 -1.613 0.872 0.529 0.135 1.087 1.122 0.872 0.961 2.215 0.880 -1.567 -0.103 0.000 0.937 0.480 -0.312 0.000 0.705 0.730 0.989 1.304 1.021 0.994 0.789 +0 0.297 1.121 1.505 1.861 -0.493 0.685 -0.992 1.602 0.000 0.759 0.430 0.642 2.215 0.564 -0.762 0.440 0.000 1.102 0.159 -1.512 3.102 0.973 1.009 1.003 0.813 0.777 0.727 0.937 +0 0.860 0.689 -0.496 0.426 0.476 1.227 0.027 0.623 1.087 0.886 -0.615 1.652 2.215 0.383 0.137 0.360 0.000 1.674 -0.877 -1.114 0.000 1.027 0.830 0.988 0.899 1.331 0.941 0.832 +1 0.812 0.543 0.987 0.471 -0.626 0.669 -1.155 0.297 0.000 0.908 0.179 -1.363 2.215 0.975 0.443 0.024 2.548 0.642 -1.721 -1.582 0.000 1.098 1.191 0.987 0.731 0.961 0.975 0.828 +0 0.318 1.916 -1.528 1.226 -0.246 0.461 0.163 1.165 2.173 0.359 1.899 1.459 0.000 0.665 1.098 0.421 2.548 0.536 2.082 -0.759 0.000 0.544 0.830 0.984 0.595 0.570 0.580 0.582 +0 0.901 0.831 -1.387 4.221 -1.484 1.743 -1.194 0.087 0.000 0.867 -0.058 0.357 2.215 0.780 0.780 1.346 2.548 0.887 -0.033 0.785 0.000 1.272 1.104 0.988 0.872 0.797 1.252 1.708 +1 1.020 -0.240 -0.819 1.867 0.123 1.243 2.916 1.640 0.000 1.422 -0.629 -0.174 0.000 1.607 -0.531 1.497 0.000 1.218 0.206 -0.076 3.102 2.315 1.445 1.436 1.076 0.673 1.020 0.949 +0 0.911 -1.205 0.164 0.787 -1.690 1.524 -0.580 -0.014 1.087 1.700 0.042 -1.671 0.000 0.839 0.887 -0.367 0.000 1.374 -1.011 0.986 0.000 0.578 1.540 1.167 1.115 1.804 1.354 1.185 +1 0.792 0.613 1.578 1.155 -1.316 2.242 -0.825 -0.035 0.000 1.424 -0.087 1.543 1.107 1.393 -0.092 0.973 2.548 1.249 0.508 -1.268 0.000 0.827 0.894 0.981 0.697 0.735 0.664 0.626 +1 0.996 -0.088 -1.723 0.728 -0.582 0.423 0.613 -0.434 2.173 0.772 0.651 -1.406 2.215 0.608 -0.967 -0.144 0.000 1.086 0.429 0.340 0.000 0.870 0.995 1.011 0.683 0.646 0.653 0.624 +1 0.819 -1.525 -0.766 0.873 1.129 0.731 -0.920 -0.389 0.000 0.500 -0.073 1.730 0.000 0.974 -0.295 1.031 0.000 0.936 0.894 -1.058 3.102 0.645 0.790 1.161 0.680 1.014 0.868 0.750 +0 0.382 1.907 0.741 0.098 -0.924 0.461 0.789 0.404 0.000 1.287 0.096 -1.020 2.215 0.434 -0.826 0.254 2.548 0.821 0.044 1.141 0.000 0.681 1.071 0.990 0.639 0.836 0.700 0.636 +0 0.625 0.544 -1.069 0.506 0.281 0.560 -0.650 -1.569 0.000 0.936 -0.797 0.513 2.215 0.875 0.185 1.166 2.548 0.863 1.837 -0.844 0.000 0.835 0.856 0.985 0.764 0.742 0.859 0.840 +1 1.636 -0.107 1.626 0.151 0.679 0.668 0.901 -0.472 2.173 0.392 -0.551 1.025 0.000 1.585 0.536 0.402 2.548 1.238 -0.244 -1.031 0.000 0.881 0.965 0.988 1.001 0.932 0.871 0.792 +1 1.095 -0.028 1.404 0.586 0.748 0.653 -0.676 0.787 0.000 1.153 0.041 -1.451 2.215 1.093 0.184 0.472 0.000 1.069 0.617 -0.444 3.102 0.815 0.953 0.978 0.863 0.866 0.885 0.793 +0 1.015 0.154 1.097 0.784 1.205 1.176 0.768 -0.982 2.173 1.263 1.747 1.063 0.000 1.677 1.427 -0.314 0.000 0.875 1.182 0.679 0.000 0.861 1.159 0.991 0.811 0.326 0.991 1.065 +1 1.665 -0.167 0.929 0.991 -0.138 1.314 -1.467 -0.968 2.173 0.623 -2.753 0.752 0.000 0.679 0.065 1.592 0.000 0.685 -1.024 -1.653 3.102 2.128 1.185 1.459 1.800 0.594 1.136 1.198 +0 0.655 -0.854 -1.038 1.626 -1.230 0.813 -0.356 0.861 1.087 0.322 2.792 -0.137 0.000 0.803 -0.822 0.517 2.548 0.524 0.857 -0.547 0.000 0.600 1.371 1.003 1.497 0.414 1.086 1.649 +0 2.814 1.249 -0.195 0.544 1.183 1.116 0.659 0.255 2.173 2.117 -0.086 1.738 1.107 0.688 -2.364 1.356 0.000 0.883 1.439 -1.264 0.000 3.737 2.546 1.623 1.105 2.367 1.996 2.005 +0 0.984 -0.227 -0.807 1.883 -1.121 0.602 -1.129 0.765 0.000 0.679 0.790 0.117 2.215 0.633 0.278 0.605 0.000 0.569 -0.912 1.471 1.551 0.888 0.975 0.993 0.665 0.814 0.869 0.874 +1 0.724 -0.539 0.979 1.230 0.258 0.406 -1.160 -0.796 1.087 0.618 1.000 -1.460 0.000 0.530 -0.930 1.683 1.274 0.561 0.033 0.316 0.000 0.863 0.941 0.996 0.665 0.457 0.628 0.742 +0 1.771 1.430 -1.317 1.005 -1.657 2.017 0.973 0.329 2.173 1.638 0.053 -1.562 2.215 1.384 -0.076 -0.440 0.000 1.237 0.301 0.910 0.000 1.392 1.383 0.996 1.966 2.946 1.685 1.427 +1 0.741 -0.897 -1.012 0.570 -1.041 1.318 -0.287 -0.145 0.000 1.097 -0.642 1.584 2.215 1.767 -1.274 0.871 2.548 1.251 0.010 1.320 0.000 0.830 1.074 0.976 1.007 1.048 0.912 0.868 +0 1.203 -1.280 1.044 0.561 1.738 1.449 -0.130 0.895 2.173 1.366 -0.196 -0.703 0.000 1.280 0.381 -1.051 2.548 0.602 -0.918 -0.229 0.000 0.921 0.856 0.992 0.936 1.733 1.205 1.062 +0 0.943 -1.171 -0.799 0.607 -0.149 0.934 -1.570 -1.099 2.173 1.199 -2.631 0.883 0.000 1.309 -1.370 0.718 2.548 0.655 -0.508 -0.482 0.000 1.809 1.185 0.986 0.914 1.375 1.081 1.058 +0 0.532 -0.051 0.299 0.856 -1.708 1.207 1.661 0.543 0.000 0.793 1.166 -0.169 2.215 0.800 0.905 0.968 0.000 3.044 0.617 -1.456 3.102 0.901 0.941 0.987 0.805 1.326 1.153 0.962 +1 0.283 1.950 -0.709 0.896 1.250 1.061 1.257 -1.517 0.000 1.402 0.759 0.363 2.215 0.983 0.656 -0.450 2.548 0.562 1.300 1.050 0.000 0.886 0.986 0.991 0.858 0.835 0.948 0.803 +1 1.039 -0.094 0.708 1.258 1.289 1.013 -0.687 -0.574 2.173 0.537 -0.562 0.516 0.000 0.551 -1.487 0.632 0.000 0.605 0.553 -1.311 3.102 0.469 0.948 0.997 0.658 0.793 0.935 0.831 +0 1.053 -0.766 -0.682 0.479 0.737 0.855 -0.834 0.172 1.087 0.464 -1.537 1.207 2.215 0.974 -1.017 -1.032 0.000 1.060 -1.535 1.701 0.000 0.913 1.114 0.986 0.709 0.822 0.759 0.670 +1 0.844 0.445 -0.041 0.150 1.562 1.254 1.260 -0.134 2.173 1.849 1.394 -1.716 2.215 0.449 1.315 -1.013 0.000 0.497 0.879 0.495 0.000 0.640 0.783 0.993 1.509 2.225 1.368 1.028 +1 0.919 -0.426 -0.336 0.495 -0.147 0.816 -0.468 1.345 0.000 0.634 -0.630 -1.334 2.215 0.712 1.031 1.021 2.548 0.909 -2.463 -0.486 0.000 2.467 1.512 0.980 1.205 0.945 1.310 1.112 +1 1.391 1.046 -0.479 1.945 -1.100 1.225 0.565 0.811 2.173 0.638 0.566 -1.656 1.107 0.375 -0.242 0.845 0.000 0.538 0.111 -0.022 0.000 0.447 0.552 1.209 0.872 1.034 1.135 0.842 +0 0.347 -1.265 -0.788 0.666 1.196 0.738 -2.518 -1.357 0.000 0.402 -2.857 -0.316 0.000 1.011 -0.839 0.566 2.548 0.629 -0.604 -0.347 0.000 0.884 0.852 0.979 0.558 0.357 0.592 0.672 +0 0.476 0.704 -0.342 1.287 0.069 0.871 -1.401 1.684 2.173 0.574 0.591 -1.326 2.215 0.587 -2.209 0.612 0.000 0.678 -0.578 -0.482 0.000 0.895 0.945 0.989 0.882 1.314 1.003 0.910 +1 0.773 0.965 -0.158 0.632 -1.162 0.660 0.879 -1.466 0.000 0.775 1.477 -0.984 1.107 1.732 0.174 0.534 2.548 0.705 0.592 0.940 0.000 0.891 1.074 0.982 1.142 1.493 0.903 0.844 +0 0.930 0.192 0.893 0.632 -0.962 0.527 -1.255 -0.858 2.173 0.759 0.773 0.234 2.215 0.813 2.611 1.099 0.000 1.362 1.627 -1.434 0.000 0.851 0.929 1.056 0.917 1.378 1.279 1.027 +1 1.297 -0.025 -0.510 0.437 -0.018 1.131 0.322 0.067 0.000 1.249 2.397 1.243 0.000 2.017 0.420 -1.127 2.548 1.207 0.985 0.101 0.000 0.950 1.211 0.984 0.678 0.713 0.739 0.695 +1 1.719 -0.291 -0.375 1.296 0.956 1.201 -0.230 -0.860 2.173 0.996 0.259 1.646 2.215 0.864 0.070 0.257 0.000 0.688 -1.330 -1.538 0.000 0.913 0.795 1.928 1.384 1.308 1.116 0.959 +0 0.843 1.415 0.073 1.151 -0.419 1.254 -1.318 -1.035 0.000 1.018 -0.151 0.908 2.215 0.964 1.235 1.555 0.000 1.038 -1.372 -1.494 0.000 0.909 1.388 0.984 1.266 0.673 1.174 1.077 +1 1.149 0.932 1.718 0.739 -0.164 1.262 0.461 0.631 0.000 1.337 -1.573 -0.825 0.000 1.944 -0.064 1.718 2.548 1.338 0.521 0.132 3.102 0.901 1.275 1.267 1.059 1.296 1.089 1.108 +0 0.805 0.035 -0.385 0.649 0.696 1.300 -0.584 -0.927 2.173 0.792 0.124 0.849 2.215 1.405 -1.619 -1.484 0.000 2.625 -1.612 0.762 0.000 1.912 1.833 0.990 0.981 1.589 1.547 1.237 +1 1.275 2.137 -1.509 1.461 -0.172 0.933 0.510 1.019 2.173 0.455 0.867 -1.676 0.000 0.726 1.134 -0.557 0.000 0.559 0.538 -0.213 1.551 0.762 0.966 1.766 0.979 0.687 1.126 0.906 +1 0.415 1.146 1.261 0.559 -0.892 0.908 -0.319 0.543 0.000 0.716 -0.645 1.135 0.000 1.925 -1.073 -1.116 1.274 0.683 -1.525 -0.713 0.000 0.919 0.854 0.992 0.974 0.966 0.995 0.858 +1 0.574 -2.051 1.238 1.484 -1.151 0.537 -2.667 -0.899 0.000 1.060 0.053 0.433 2.215 0.413 -0.213 0.076 0.000 0.397 -1.119 0.203 0.000 0.782 1.329 1.068 1.681 0.755 1.211 1.120 +1 1.169 0.480 -0.727 0.768 -0.599 1.149 -0.167 1.358 1.087 0.387 -0.989 0.083 2.215 0.436 1.265 -0.976 0.000 0.555 0.956 0.173 0.000 0.471 1.036 1.002 0.676 0.993 0.896 0.786 +0 1.162 1.122 -1.049 0.480 1.708 1.543 0.872 -0.492 2.173 1.163 -1.207 1.004 0.000 1.064 -0.916 1.492 0.000 1.125 0.296 0.678 1.551 0.755 0.969 0.983 1.011 1.268 1.639 1.351 +0 0.488 -2.176 -0.504 3.286 -0.107 1.953 -1.331 1.631 1.087 0.478 -0.872 -1.358 0.000 0.779 -0.706 0.380 2.548 0.373 -1.455 -0.322 0.000 0.504 0.840 0.972 0.746 1.457 1.467 1.080 +1 1.404 -0.574 1.044 1.573 1.541 0.380 0.203 -0.777 0.000 0.585 -0.865 1.486 0.000 1.587 -0.374 -0.224 2.548 1.071 0.836 -0.622 3.102 1.063 0.985 0.989 1.324 0.841 1.102 0.922 +0 0.977 1.238 1.071 0.767 0.128 0.679 0.653 -1.073 2.173 0.725 0.453 0.220 2.215 0.593 0.823 -1.467 0.000 1.519 0.189 1.562 0.000 0.810 0.775 0.989 0.919 0.955 0.697 0.637 +0 0.713 -0.977 -0.273 0.562 1.301 0.801 -0.281 0.032 2.173 0.982 -1.762 -1.240 0.000 0.644 1.129 1.193 2.548 0.810 -1.071 0.628 0.000 0.826 1.421 0.991 1.174 1.078 1.195 0.999 +0 0.473 0.280 0.937 0.552 -1.172 0.824 -0.120 0.750 0.000 0.935 -0.335 1.594 0.000 0.576 -2.485 -0.541 0.000 1.131 0.080 0.314 3.102 1.301 0.929 0.985 0.659 0.572 0.714 0.662 +1 1.880 1.683 1.391 0.678 0.438 0.687 -0.475 -1.475 0.000 1.250 1.326 -0.565 2.215 1.190 0.499 0.131 2.548 1.072 1.984 0.315 0.000 0.705 0.871 1.183 1.257 0.936 1.001 0.821 +1 1.223 0.954 0.544 1.366 1.125 0.967 0.249 -1.510 2.173 1.215 -1.295 -0.534 0.000 0.364 0.059 -0.533 1.274 0.399 -0.818 -1.490 0.000 0.707 1.390 0.992 0.756 0.575 0.876 1.221 +0 2.197 -1.095 -0.518 0.249 1.504 0.968 -0.454 0.142 2.173 0.966 -0.598 -1.230 0.000 1.666 -1.038 1.355 2.548 1.039 -2.367 1.283 0.000 0.992 0.951 0.993 0.950 1.509 1.013 0.906 +0 0.747 -1.363 1.295 0.653 -0.908 0.449 -2.227 0.369 0.000 0.596 0.449 1.153 2.215 0.621 -0.001 -0.581 2.548 0.671 -1.282 -1.073 0.000 0.863 0.903 0.982 1.094 0.664 0.837 0.783 +1 0.563 0.766 -1.404 1.130 1.064 0.829 -0.451 0.429 2.173 0.792 -0.924 -0.754 2.215 0.383 -1.168 1.656 0.000 0.424 -2.226 1.678 0.000 0.334 0.884 0.984 0.982 1.086 0.840 0.773 +0 0.348 -1.274 0.396 0.933 -1.576 0.795 0.939 1.230 0.000 0.977 0.737 -0.302 0.000 1.224 -0.104 -1.431 2.548 1.868 0.413 0.191 1.551 1.847 1.241 0.991 1.252 1.204 1.346 1.581 +1 1.946 1.190 -0.132 1.177 -0.592 0.930 0.963 -1.705 1.087 1.092 1.496 0.644 2.215 0.576 1.659 1.678 0.000 0.366 2.036 -1.492 0.000 0.214 0.650 0.982 1.383 1.337 1.101 0.854 +0 1.871 0.765 -1.514 0.093 1.481 0.586 -1.006 -0.479 2.173 0.856 -1.500 1.166 0.000 0.907 0.698 -0.032 2.548 1.215 -0.918 0.327 0.000 0.958 0.984 0.977 0.892 0.988 0.939 1.056 +1 2.088 0.777 0.525 0.263 -0.637 0.729 -0.605 -1.381 2.173 0.478 0.891 -0.095 2.215 0.670 -0.218 -0.937 0.000 0.803 0.961 -1.712 0.000 0.801 0.759 0.983 0.564 1.080 0.932 0.795 +0 0.608 0.357 1.542 0.345 0.454 0.829 1.626 0.251 2.173 0.742 1.222 -0.653 0.000 1.383 0.645 -1.690 0.000 1.023 1.454 -1.327 0.000 0.828 0.698 0.990 0.795 1.106 0.865 0.736 +0 0.789 0.004 -0.300 0.997 -1.326 0.858 0.317 0.012 0.000 0.968 -0.259 1.356 2.215 0.854 -0.112 -1.243 0.000 0.724 -0.281 0.855 3.102 1.434 0.925 0.987 0.854 0.331 0.758 0.704 +1 1.450 -0.258 0.094 0.563 0.632 0.576 0.869 -0.985 2.173 0.358 -1.118 -0.906 0.000 0.551 0.514 -1.692 2.548 0.580 0.814 1.134 0.000 0.950 0.836 0.996 0.752 0.432 0.688 0.642 +1 2.154 -0.461 -0.590 0.639 -0.435 0.983 0.066 1.562 2.173 0.824 0.221 0.443 2.215 0.575 0.325 -1.633 0.000 0.807 -0.417 0.470 0.000 0.786 0.727 0.988 0.966 1.126 1.031 0.825 +0 1.407 0.002 -1.705 0.547 -1.212 0.702 -0.442 -0.424 2.173 0.615 1.400 -1.461 0.000 0.722 1.071 0.382 2.548 0.732 1.314 0.078 0.000 0.862 1.220 0.995 0.858 0.994 0.833 0.794 +1 2.178 -0.161 -1.600 0.512 -0.812 0.457 0.257 -0.280 0.000 1.012 1.729 0.320 0.000 0.344 -0.889 -0.042 2.548 0.645 0.847 0.944 3.102 1.417 0.865 0.985 0.691 0.505 0.682 0.926 +0 0.805 0.089 -0.785 1.437 -0.133 0.768 -2.669 0.096 0.000 1.747 -0.814 1.323 2.215 1.233 -1.311 -1.076 2.548 1.490 0.217 -1.565 0.000 0.876 1.013 0.987 1.388 1.380 1.375 1.071 +1 0.916 0.395 1.343 1.113 -0.929 0.928 -1.044 1.565 2.173 0.815 -1.244 0.479 0.000 1.458 2.320 1.283 0.000 1.979 -0.948 -0.804 0.000 0.901 0.770 1.244 1.217 1.264 0.901 0.891 +0 0.666 1.839 1.718 0.602 0.661 1.133 0.581 -1.072 1.087 1.360 -0.479 0.289 0.000 0.745 -1.339 1.359 2.548 0.370 0.445 1.067 0.000 0.775 0.888 0.991 1.010 1.679 1.120 1.051 +1 0.848 -1.166 -1.525 0.438 -0.142 0.837 0.250 0.587 2.173 0.657 0.924 1.739 2.215 1.151 -0.229 -0.705 0.000 0.427 0.219 0.227 0.000 0.610 0.885 0.992 0.877 1.017 0.815 0.717 +0 1.611 -1.495 0.112 0.640 0.646 0.756 -0.446 -1.533 0.000 0.513 1.221 -0.239 0.000 0.725 0.808 1.002 2.548 1.709 0.310 -1.260 3.102 1.760 1.099 0.989 1.567 0.791 1.362 1.324 +1 0.748 0.541 1.116 1.108 0.246 0.888 -0.448 -0.682 0.000 0.502 1.019 -1.734 0.000 0.796 -1.341 -1.467 1.274 0.488 1.466 1.009 0.000 0.464 1.175 0.985 0.498 0.826 0.748 0.707 +1 0.371 -0.504 -0.644 1.314 0.718 0.728 0.555 1.234 2.173 0.864 0.594 -0.840 0.000 1.094 1.112 -1.212 0.000 1.594 0.142 0.302 3.102 0.674 1.087 0.987 0.731 0.879 0.870 0.794 +1 0.479 1.532 1.644 1.258 0.374 0.596 -1.460 -1.411 0.000 0.777 0.164 -0.827 2.215 0.426 0.755 1.325 0.000 0.922 -0.491 -0.872 3.102 1.072 0.926 0.986 1.140 0.299 0.881 0.800 +1 0.559 -1.952 -1.249 0.327 0.073 1.554 -0.992 0.946 2.173 1.168 -0.464 -0.416 0.000 0.913 1.058 -0.803 0.000 0.518 -1.177 -1.010 0.000 0.847 0.699 0.986 1.047 0.762 0.885 0.771 +0 0.920 0.594 0.958 0.506 -0.675 0.760 -0.224 0.955 0.000 0.830 -1.259 -0.470 2.215 0.632 -0.610 -1.658 0.000 0.938 0.310 -0.793 3.102 0.924 0.878 0.989 1.214 0.778 0.813 0.803 +0 0.725 -0.151 -1.293 1.845 -0.418 0.614 -0.416 0.599 2.173 0.844 0.909 1.447 0.000 0.721 -0.906 -0.983 0.000 0.850 -1.024 1.437 0.000 0.979 0.979 1.137 0.898 0.638 0.767 0.766 +0 1.142 -0.388 -1.531 0.440 0.548 0.909 -1.397 -1.004 2.173 1.388 -0.665 0.578 2.215 0.483 -2.008 -1.532 0.000 0.498 1.733 0.364 0.000 2.337 1.698 0.984 0.870 1.747 1.382 1.088 +0 0.407 1.116 1.122 0.508 1.593 0.728 -1.158 -1.629 2.173 0.693 0.003 -0.526 0.000 1.245 -0.138 0.018 0.000 0.485 1.244 0.761 3.102 0.680 0.728 0.984 0.796 1.224 0.887 0.773 +0 0.941 1.236 0.309 0.421 -1.729 1.072 -0.268 -1.018 2.173 1.029 -0.012 1.112 2.215 0.714 0.390 -0.114 0.000 0.766 0.124 0.651 0.000 0.531 0.964 0.989 1.058 1.466 1.172 0.919 +1 1.898 1.209 1.677 1.083 1.127 0.789 0.204 0.343 2.173 0.829 0.597 0.044 0.000 0.896 0.837 -1.000 2.548 0.472 1.382 -0.956 0.000 0.778 0.728 0.987 0.854 1.052 0.938 0.823 +0 2.078 0.116 1.589 0.687 -1.296 1.041 0.058 0.074 1.087 0.618 0.450 0.478 1.107 0.591 -0.480 -1.365 0.000 0.771 -0.715 -0.310 0.000 0.619 0.852 0.984 0.900 0.483 0.951 0.810 +0 1.543 0.633 -0.593 0.699 -0.723 1.187 1.320 1.010 2.173 0.527 1.689 0.190 0.000 1.164 -1.048 -1.006 1.274 1.050 -0.152 1.015 0.000 1.265 1.093 0.973 0.950 2.724 1.485 1.248 +0 1.528 -0.490 0.185 1.757 -0.094 0.714 -0.966 -0.363 2.173 1.141 0.650 1.302 0.000 1.764 -1.653 -1.292 0.000 1.726 -0.474 1.288 3.102 0.878 0.930 0.989 0.858 1.195 1.040 1.068 +1 2.925 -0.099 0.405 1.998 0.024 2.810 -0.445 -1.491 0.000 0.753 0.831 -0.707 0.000 0.887 -0.433 1.572 2.548 1.124 -0.502 -0.151 3.102 1.068 0.937 1.126 0.707 0.764 0.828 0.849 +0 0.583 -0.771 -0.660 2.118 -0.751 1.121 0.582 0.760 2.173 0.651 1.387 1.012 0.000 1.812 0.609 1.425 2.548 0.794 0.839 -0.147 0.000 0.834 0.879 0.980 1.516 1.004 1.221 1.018 +1 1.063 -1.308 0.583 1.148 0.628 1.123 -0.293 -1.074 2.173 0.592 -0.667 1.394 0.000 0.411 -1.775 0.286 0.000 1.318 -0.084 -0.206 1.551 0.815 0.838 0.983 1.907 0.917 1.345 1.053 +1 0.688 -0.766 -0.142 2.263 -0.518 0.956 -0.227 1.537 2.173 0.352 1.092 1.367 0.000 0.784 0.362 -0.140 0.000 0.639 -0.198 -1.442 0.000 0.842 1.001 0.977 0.889 0.837 0.988 0.842 +0 0.481 -0.444 -1.645 0.562 1.072 0.723 -1.154 -0.496 2.173 0.868 -0.175 -1.482 0.000 0.819 -0.779 0.873 0.000 0.529 0.107 -1.363 3.102 1.188 1.156 0.985 0.670 0.646 0.694 0.696 +1 0.565 0.263 0.543 1.933 0.794 0.721 0.563 -1.108 2.173 1.281 2.108 -0.517 0.000 0.846 0.139 1.511 0.000 0.569 1.448 -1.175 0.000 0.841 0.714 0.994 0.757 0.722 0.938 0.848 +1 0.441 0.802 -0.334 1.660 0.779 0.985 0.869 -1.440 0.000 0.892 0.055 -0.348 2.215 0.425 2.491 -0.158 0.000 0.574 -0.522 0.822 3.102 1.636 1.272 1.000 0.908 0.605 0.971 0.892 +1 0.534 -0.669 -0.692 0.991 0.309 0.906 0.144 -1.325 0.000 1.372 0.508 0.583 2.215 0.923 0.809 -0.903 0.000 1.382 0.131 1.189 3.102 0.875 0.989 0.987 1.412 0.685 1.009 1.112 +0 0.647 -0.899 0.728 1.828 0.395 1.032 -0.623 -1.280 1.087 0.824 -0.388 1.724 0.000 1.067 -1.951 -0.370 0.000 0.593 0.244 0.752 3.102 1.971 1.278 0.996 1.435 0.892 0.963 1.060 +1 1.044 -0.750 0.623 0.229 0.514 0.900 0.209 -1.175 0.000 0.701 -1.290 1.335 2.215 0.875 0.304 -0.646 0.000 1.105 -0.078 0.451 3.102 0.740 0.948 0.976 0.809 0.771 0.879 0.802 +0 0.451 0.161 -0.277 0.826 1.434 0.494 0.729 0.237 2.173 0.472 1.295 0.603 0.000 0.663 -0.876 -1.177 2.548 0.724 0.652 -1.408 0.000 0.769 0.900 0.985 0.665 0.964 0.683 0.612 +0 0.881 0.500 0.990 2.439 0.462 1.042 2.367 -0.563 0.000 1.659 0.682 1.662 0.000 1.394 -0.484 -0.332 2.548 1.389 0.204 -1.146 0.000 0.834 0.851 0.990 1.040 0.992 1.027 0.965 +0 1.409 -0.207 0.458 1.114 -0.008 1.252 0.411 -1.519 1.087 0.700 1.425 0.797 2.215 0.447 -1.605 -0.844 0.000 0.569 1.222 -0.920 0.000 1.344 1.243 0.995 1.308 1.417 1.319 1.137 +0 0.456 1.473 -1.394 1.272 -0.052 0.935 -2.623 -1.252 0.000 1.038 -0.064 0.183 2.215 0.903 0.756 -1.410 0.000 1.317 0.516 1.177 0.000 0.880 1.103 0.988 0.583 0.460 0.680 0.713 +0 0.484 0.081 -1.641 1.783 1.640 1.009 0.608 -0.083 0.000 1.281 0.111 -0.410 0.000 0.931 0.394 1.129 2.548 0.788 0.493 -1.679 3.102 0.929 1.001 0.991 0.947 0.379 0.813 1.092 +0 1.496 -1.081 0.476 0.509 0.031 0.897 0.243 -1.641 2.173 1.461 -0.152 0.787 1.107 1.100 0.839 -1.206 0.000 1.552 -0.189 -0.703 0.000 1.078 0.978 0.992 1.165 1.414 1.256 1.263 +1 0.533 1.277 0.857 0.958 1.422 0.837 0.584 -0.682 1.087 0.913 -0.999 0.251 0.000 1.016 -0.729 1.535 2.548 0.442 -0.983 -0.898 0.000 0.714 0.810 0.981 1.028 1.357 0.911 0.863 +1 1.070 0.556 -0.033 0.922 -1.274 0.976 0.463 -0.768 2.173 0.916 1.619 0.815 0.000 1.265 1.312 1.316 0.000 1.773 0.869 0.292 0.000 0.920 0.981 1.237 0.795 0.976 0.998 0.885 +1 1.064 -0.151 1.454 0.656 -1.265 0.819 -0.729 -0.449 0.000 1.092 0.621 0.436 2.215 0.789 1.178 -1.253 0.000 1.003 0.747 0.995 0.000 0.903 0.916 0.995 0.740 0.713 0.810 0.766 +1 1.625 0.938 -0.590 0.931 -1.687 0.934 1.613 -0.024 0.000 1.573 2.497 1.499 0.000 1.336 0.678 0.011 0.000 2.557 -0.621 0.823 1.551 0.919 1.162 1.423 1.747 1.029 1.367 1.238 +0 0.396 0.388 0.182 2.253 -1.550 1.976 0.037 -0.864 2.173 2.089 0.743 0.726 0.000 1.490 -0.135 0.498 2.548 1.115 -0.152 0.919 0.000 1.036 0.886 1.309 1.315 2.024 1.605 1.379 +1 1.777 -1.437 0.545 0.324 0.447 0.584 -0.888 0.906 0.000 1.861 -0.663 -1.038 2.215 0.332 0.051 -0.936 0.000 0.402 -1.405 -1.715 3.102 0.881 1.174 0.991 0.612 0.605 0.934 0.802 +1 0.593 -0.792 0.136 1.532 -0.676 0.681 0.531 1.226 1.087 0.536 -0.048 0.391 0.000 0.997 -0.818 1.686 2.548 0.487 1.149 -0.720 0.000 0.762 0.906 0.987 1.116 0.901 0.868 0.752 +1 0.879 -1.438 0.430 0.637 -0.212 0.715 0.352 -1.681 2.173 1.232 1.380 1.275 0.000 1.090 0.707 -0.688 0.000 0.853 0.444 0.489 3.102 1.063 1.113 0.983 0.689 0.769 0.781 0.789 +0 2.928 -0.278 0.129 0.425 1.354 1.341 -0.324 1.666 1.087 0.633 -0.717 -0.765 2.215 0.860 0.126 -1.065 0.000 0.432 0.053 0.757 0.000 0.671 0.831 1.380 0.968 1.137 1.145 0.897 +0 0.496 0.908 0.904 0.830 -0.068 1.198 0.234 0.905 2.173 2.000 0.634 -0.922 2.215 0.973 0.890 1.337 0.000 0.680 1.283 -1.458 0.000 0.995 0.993 0.986 1.319 2.321 1.379 1.088 +1 1.563 -1.398 0.235 0.742 0.283 1.325 -0.746 1.307 2.173 0.506 1.028 -0.961 0.000 1.667 -1.021 -1.202 0.000 1.395 0.609 0.019 0.000 0.907 1.355 0.983 0.786 0.705 0.887 0.880 +1 0.578 1.509 0.206 1.197 1.189 1.189 0.429 -0.579 2.173 1.082 1.004 1.388 2.215 0.472 -0.632 0.596 0.000 0.367 -0.190 -0.902 0.000 0.461 0.802 0.983 1.572 1.712 1.193 0.991 +0 1.841 2.155 0.303 0.209 0.282 1.315 -1.245 -1.571 0.000 0.625 1.725 -1.725 0.000 1.717 -0.084 -0.283 2.548 0.722 0.715 -0.385 1.551 4.184 2.371 0.986 1.601 0.427 1.611 1.885 +0 0.871 -1.296 1.016 0.655 -1.494 0.565 -1.164 -0.190 2.173 0.534 -1.752 0.463 0.000 0.780 -0.413 -1.238 2.548 1.080 -1.257 1.644 0.000 0.877 0.824 0.985 0.820 0.737 0.639 0.617 +0 0.663 -0.509 0.123 1.114 0.846 0.808 -1.095 -1.147 2.173 0.966 -0.181 -0.418 2.215 0.842 -0.571 1.143 0.000 0.610 -1.639 1.217 0.000 0.573 1.013 0.988 1.042 1.008 0.885 0.777 +1 0.756 -0.407 -0.691 0.920 -1.523 0.617 -1.247 -0.132 0.000 0.603 -1.122 1.412 2.215 0.983 -1.259 0.828 0.000 1.068 0.581 0.977 3.102 1.070 0.865 0.983 0.979 0.819 0.809 0.779 +0 0.862 0.015 -1.363 1.406 1.358 0.469 -0.987 0.583 2.173 0.865 0.850 -0.402 0.000 0.834 1.324 0.098 2.548 0.874 0.589 -0.959 0.000 0.878 1.163 0.986 1.017 1.257 0.898 0.838 +1 1.089 0.926 1.253 0.411 -0.863 0.948 0.260 0.973 0.000 1.369 0.240 -0.998 2.215 0.684 0.766 0.266 2.548 0.499 1.237 -1.226 0.000 0.956 0.696 0.987 1.020 0.984 0.831 0.781 +1 1.014 1.240 -0.514 0.689 -0.232 1.706 0.788 1.366 2.173 0.730 0.083 -0.645 0.000 0.657 2.468 -0.237 0.000 0.744 0.800 0.362 3.102 1.870 1.091 0.979 1.756 0.944 1.193 1.122 +0 0.795 -0.492 0.453 0.395 -1.642 0.408 -0.771 1.406 0.000 1.040 -0.318 -0.968 2.215 0.901 -0.754 -0.073 2.548 0.640 -0.111 1.002 0.000 0.379 0.801 0.991 0.632 0.787 0.652 0.600 +0 1.831 -0.047 -1.168 1.818 -1.282 1.438 0.037 0.238 0.000 0.794 -0.241 -0.187 2.215 1.335 -1.116 1.294 0.000 2.173 2.007 0.991 0.000 0.684 1.108 1.001 0.824 1.176 0.884 0.970 +1 1.931 0.996 0.211 0.586 -0.922 1.242 1.125 -1.636 2.173 0.755 1.642 -0.675 0.000 1.369 -0.356 1.010 0.000 0.484 0.782 0.748 0.000 0.814 1.030 1.256 0.887 0.900 0.971 0.826 +1 0.749 -0.553 0.766 0.880 -0.468 0.526 -0.102 -1.634 2.173 1.311 -0.985 1.262 2.215 1.163 -1.190 -0.574 0.000 1.058 -0.735 -0.162 0.000 0.515 0.955 1.008 0.935 0.845 0.870 0.766 +0 1.049 -1.407 1.592 0.688 1.318 0.897 0.353 -0.221 0.000 0.327 -0.336 0.303 2.215 0.296 1.299 -0.030 0.000 0.367 1.160 0.415 0.000 0.672 0.739 0.977 0.847 0.353 0.550 1.020 +0 0.654 -1.450 0.128 0.826 -1.374 0.777 -1.263 -1.186 0.000 1.035 -2.167 -1.257 0.000 1.778 1.053 0.698 1.274 1.582 -0.023 0.122 3.102 0.953 1.697 0.994 1.861 1.026 2.076 1.601 +0 1.066 -0.232 -1.486 0.040 -1.530 0.763 -0.264 0.437 1.087 0.361 1.669 0.550 0.000 1.345 0.276 -1.322 2.548 0.715 1.024 -0.832 0.000 0.648 0.979 0.692 0.435 1.313 0.821 0.705 +1 1.928 0.024 -0.101 0.851 -0.782 0.772 0.495 -1.334 2.173 0.630 0.549 -1.649 2.215 1.472 0.272 1.112 0.000 0.637 0.872 0.033 0.000 0.973 1.032 1.021 0.952 0.289 0.793 0.805 +1 1.909 2.020 0.450 0.509 1.310 0.747 1.331 -1.513 2.173 0.864 0.538 -0.642 2.215 0.805 2.021 1.631 0.000 0.946 1.718 -0.347 0.000 0.944 1.052 0.990 1.114 0.964 0.999 0.856 +1 0.363 -1.188 -0.139 2.492 -1.121 0.929 0.323 1.157 2.173 0.643 -0.660 0.235 0.000 0.942 -0.420 1.228 0.000 1.603 0.069 0.501 0.000 0.906 0.902 1.019 1.677 1.130 1.072 0.997 +0 0.706 0.967 -1.518 1.296 1.045 0.368 1.534 -0.688 0.000 0.690 -1.752 -0.161 0.000 1.451 -1.397 1.330 2.548 1.427 -0.117 -0.691 0.000 0.897 0.872 0.988 1.605 1.279 1.139 1.021 +1 1.347 -0.565 1.673 0.457 1.554 2.792 -0.745 0.474 0.000 2.670 0.405 -0.969 2.215 1.847 0.243 -1.408 2.548 0.595 0.439 1.097 0.000 0.671 0.948 0.986 1.101 0.926 1.283 0.992 +0 0.956 0.597 -1.681 0.213 0.039 0.859 0.074 -0.077 2.173 0.829 -0.366 -1.297 2.215 0.767 0.036 1.026 0.000 0.895 0.790 1.580 0.000 0.951 0.976 0.988 0.895 1.142 0.805 0.720 +1 1.698 -0.884 -0.603 0.720 0.064 0.674 -0.693 1.060 2.173 0.636 -0.198 0.027 0.000 1.237 -0.221 -1.697 1.274 1.113 0.415 1.551 0.000 1.144 0.939 0.988 1.018 0.742 0.861 0.806 +1 1.044 -1.352 -1.229 0.896 -0.263 1.325 -0.786 -1.511 2.173 0.737 -1.496 0.552 0.000 1.154 -0.684 0.172 2.548 1.219 1.058 0.747 0.000 0.661 0.671 1.025 1.018 1.539 1.002 0.870 +1 1.321 -0.470 0.926 0.517 -0.192 1.395 -0.534 -1.425 0.000 1.694 0.479 0.037 2.215 1.220 1.999 1.167 0.000 1.883 0.593 -0.386 3.102 0.770 1.500 0.986 1.010 0.625 1.358 1.135 +1 0.787 0.460 -0.567 1.286 -1.215 1.339 -0.164 -0.732 2.173 2.103 -2.307 0.867 0.000 0.949 -0.867 1.099 0.000 1.086 -0.604 0.172 0.000 0.841 0.785 0.990 0.688 0.850 0.862 0.773 +1 2.035 -0.779 0.207 1.149 0.771 1.028 -0.347 -1.063 1.087 0.701 1.888 -0.165 0.000 0.712 -0.047 -1.640 1.274 2.066 1.162 1.706 0.000 0.993 1.066 1.031 0.979 0.553 1.016 0.852 +0 0.858 -0.158 -0.991 0.605 1.371 0.806 0.158 1.463 0.000 0.744 -0.008 0.007 2.215 1.390 1.293 -0.586 2.548 0.838 0.855 0.922 0.000 0.813 0.987 0.991 1.272 1.003 0.925 0.898 +0 1.084 0.343 1.013 1.366 1.570 0.522 -0.205 0.056 2.173 0.774 -0.523 -0.741 0.000 0.458 -2.346 -0.399 0.000 1.218 0.507 1.621 3.102 1.135 0.970 0.986 0.593 0.905 0.896 0.915 +0 1.823 -0.803 0.155 0.827 0.512 0.969 0.091 -1.276 2.173 0.530 0.507 1.288 0.000 0.437 1.185 -0.948 0.000 0.402 -0.971 1.170 3.102 0.729 0.785 0.978 0.560 0.693 0.991 0.937 +1 0.454 0.409 1.624 0.502 1.660 1.147 -1.001 1.029 2.173 1.228 -0.602 -0.428 2.215 1.590 1.235 -0.293 0.000 0.803 -0.842 -1.686 0.000 0.538 0.919 0.987 1.700 1.723 1.711 1.392 +1 1.254 0.433 -0.735 0.152 1.100 0.928 -0.161 0.625 2.173 0.399 -0.565 1.568 1.107 0.427 -2.404 1.732 0.000 0.501 -1.540 -1.247 0.000 0.306 1.219 0.986 0.652 0.698 0.744 0.782 +0 0.491 1.219 -1.519 1.551 1.009 1.063 0.179 1.736 2.173 1.348 0.804 -0.252 2.215 1.319 -0.218 -0.323 0.000 0.620 -0.769 -1.553 0.000 0.958 1.075 0.989 1.287 1.813 1.222 1.179 +0 0.569 -1.331 -1.239 1.098 1.564 1.255 -1.641 -0.270 0.000 1.206 1.206 1.414 0.000 0.876 0.350 0.811 2.548 0.839 0.617 -0.481 3.102 1.114 0.875 0.982 1.478 0.613 1.215 1.608 +1 1.307 -0.702 -1.619 1.493 -0.015 0.427 0.363 -0.015 2.173 0.552 -0.650 0.640 0.000 0.690 1.035 -0.882 2.548 1.265 -0.056 1.413 0.000 0.920 0.779 1.920 1.110 0.546 0.896 0.770 +1 1.596 -0.384 1.026 1.259 1.449 0.437 -0.998 -0.115 2.173 0.700 -1.327 -1.342 2.215 0.615 1.240 -0.582 0.000 0.390 -0.686 -1.375 0.000 0.798 1.031 0.982 0.943 0.742 0.773 0.837 +1 0.411 0.919 -1.575 1.321 0.230 0.725 1.090 0.992 2.173 0.660 1.415 -0.544 0.000 0.756 0.622 1.572 2.548 0.415 1.639 -0.396 0.000 0.886 0.906 1.020 0.708 0.499 0.602 0.591 +1 0.616 1.620 -0.370 1.340 -0.566 2.037 0.733 0.902 2.173 1.775 -2.561 -1.128 0.000 0.893 0.510 -1.560 0.000 0.748 0.577 0.450 0.000 0.878 1.104 0.990 0.454 0.789 1.026 0.850 +0 1.293 1.119 0.048 0.432 1.649 0.875 1.609 1.048 2.173 1.277 1.158 -1.120 1.107 0.427 0.394 -0.721 0.000 0.366 0.018 0.269 0.000 0.351 0.804 1.027 0.921 1.481 0.886 0.704 +1 2.075 -1.040 -1.303 1.249 -0.448 0.669 -2.452 0.456 0.000 1.050 -1.154 0.270 2.215 1.211 0.337 1.439 0.000 0.711 0.310 -0.863 0.000 0.896 0.707 1.555 1.276 0.655 0.855 0.931 +1 1.613 1.361 1.703 0.769 1.245 0.911 1.745 -0.243 0.000 0.637 0.889 0.579 2.215 0.482 0.751 -1.353 0.000 0.510 0.226 -1.186 1.551 1.158 0.943 0.982 0.709 0.541 0.639 0.750 +0 0.842 0.981 1.457 1.649 0.522 0.521 -0.264 -1.033 0.000 0.773 0.682 -0.528 2.215 0.782 -0.671 -1.576 0.000 0.424 -0.598 1.281 0.000 0.646 0.710 1.218 0.652 0.494 0.646 0.697 +1 1.868 0.681 -1.647 0.801 -1.618 0.470 -0.395 0.517 0.000 1.147 0.901 -0.425 2.215 0.933 1.144 -0.773 0.000 1.020 0.979 0.397 3.102 0.874 0.765 0.982 0.908 0.670 0.868 0.747 +1 0.346 1.307 0.018 0.214 -1.352 0.991 0.202 -0.075 0.000 0.805 0.938 1.566 2.215 0.949 0.075 0.665 0.000 1.309 0.275 -1.189 3.102 0.910 1.094 0.984 0.643 0.641 0.742 0.665 +1 0.679 0.765 0.946 0.806 1.301 0.673 -0.101 -0.330 2.173 0.400 -0.349 0.813 0.000 0.679 1.904 -0.443 0.000 1.145 1.108 -1.406 3.102 1.439 0.989 0.988 0.938 1.053 0.823 0.839 +0 0.538 -1.744 -0.236 1.491 -1.139 0.290 -1.425 1.730 0.000 0.767 -0.734 0.362 2.215 0.522 -2.175 0.131 0.000 0.565 -1.360 0.855 3.102 0.775 0.762 0.989 0.658 0.371 0.622 0.574 +0 1.056 0.704 1.188 1.549 -1.729 1.054 -2.542 -0.295 0.000 0.831 0.013 1.146 2.215 1.535 -0.066 -0.085 0.000 1.038 0.360 -0.620 1.551 1.747 1.031 0.989 0.633 0.856 0.810 0.814 +0 1.974 -0.507 -1.319 0.254 1.283 0.744 1.521 0.952 0.000 0.890 0.823 -0.003 2.215 0.482 1.107 -1.527 0.000 0.513 0.491 -0.386 3.102 0.865 0.957 0.988 0.645 0.221 0.747 0.864 +0 1.419 0.386 0.329 3.072 0.026 1.708 1.157 -1.722 1.087 1.067 0.698 -1.057 0.000 1.743 2.727 1.667 0.000 0.895 -1.022 0.044 3.102 1.043 1.026 0.977 2.329 2.366 1.745 1.381 +1 0.893 0.521 -0.511 0.605 0.911 1.038 1.163 -0.473 2.173 1.056 1.469 -1.092 2.215 1.573 0.378 1.236 0.000 1.675 0.259 0.664 0.000 0.888 1.482 0.987 0.839 0.855 1.184 0.957 +1 0.618 -0.963 -1.669 0.662 1.741 1.219 -0.260 0.422 0.000 0.545 -0.145 -1.129 2.215 0.947 1.119 -1.358 0.000 1.085 0.558 -0.184 3.102 0.474 0.700 0.983 1.463 0.598 1.006 1.294 +0 0.439 -1.789 1.531 1.177 -0.823 0.950 -1.118 1.248 1.087 0.886 0.575 0.250 0.000 1.231 0.870 -1.059 0.000 0.494 -1.366 -0.438 0.000 1.277 0.801 0.991 0.958 1.116 0.917 0.852 +1 0.467 0.548 1.247 0.581 -0.217 2.671 1.131 -1.362 0.000 1.570 -0.929 0.798 0.000 2.021 -0.485 -0.082 2.548 2.711 -0.010 0.284 1.551 0.689 1.126 0.987 0.765 0.746 0.911 0.781 +0 0.326 -1.490 -1.234 0.604 0.494 0.686 -1.240 -0.310 2.173 0.886 -1.046 1.651 0.000 0.479 0.088 -0.726 2.548 0.956 -1.241 0.996 0.000 0.713 1.034 0.997 0.558 0.581 0.704 0.669 +1 0.837 -2.246 0.393 0.690 -0.157 0.650 -1.549 0.027 1.087 1.376 -0.913 -1.272 1.107 1.409 -1.481 -1.662 0.000 0.654 -2.086 1.566 0.000 0.538 1.028 0.974 1.141 1.356 0.864 0.831 +1 1.227 0.764 -1.256 0.257 0.513 0.852 -0.716 0.440 0.000 0.826 0.276 -1.117 2.215 0.530 0.377 0.920 0.000 0.524 -0.111 1.663 3.102 0.884 1.166 0.986 0.517 0.375 0.694 0.679 +1 0.403 1.827 0.337 0.616 -1.165 1.102 1.084 -0.087 0.000 1.028 0.895 -0.420 0.000 1.754 0.517 1.109 0.000 0.901 -0.249 1.556 0.000 0.825 0.755 0.987 0.638 0.757 0.564 0.596 +0 0.781 1.599 0.839 1.846 -0.348 0.996 -0.652 1.677 1.087 0.921 -0.492 -0.703 0.000 1.300 0.219 0.537 1.274 0.587 -0.083 1.263 0.000 0.959 0.973 1.458 2.288 1.381 1.554 1.301 +0 2.006 0.241 -0.748 0.146 -1.374 1.470 -0.488 -1.108 2.173 1.771 -2.059 0.661 0.000 1.613 -0.483 0.148 2.548 3.528 0.089 1.173 0.000 0.906 1.192 0.992 0.839 1.737 1.259 1.025 +1 1.635 -1.594 0.945 0.478 1.327 0.892 -0.844 -0.162 2.173 1.745 -0.917 -1.204 0.000 0.430 -0.671 -0.847 0.000 1.013 0.092 0.555 3.102 0.443 1.057 0.991 1.308 0.787 1.017 1.055 +1 0.811 -0.938 0.851 0.877 -1.434 0.743 -1.007 -0.032 2.173 1.564 -0.496 -1.350 0.000 0.850 0.437 0.425 2.548 1.328 1.149 0.049 0.000 2.721 1.677 1.032 0.879 0.904 1.267 1.094 +0 1.600 -0.949 -0.531 1.396 -0.598 2.468 -1.353 -0.604 0.000 1.386 -0.191 0.995 1.107 2.251 0.605 1.290 1.274 0.976 -0.967 0.802 0.000 2.276 2.531 0.999 1.884 0.970 2.270 1.826 +0 0.482 2.308 1.577 1.585 1.346 0.935 1.048 -1.272 2.173 0.817 -1.365 0.000 0.000 1.947 -0.490 0.495 2.548 1.852 -0.219 -0.640 0.000 1.303 1.236 1.004 0.946 2.245 1.463 1.505 +0 2.797 -0.806 0.021 1.439 0.487 1.557 1.758 1.079 0.000 1.273 -1.175 -1.049 0.000 1.544 -0.657 -1.552 2.548 2.237 -0.361 -1.071 3.102 6.909 4.090 1.134 1.526 0.632 2.554 2.418 +0 0.577 -1.617 0.550 1.626 0.418 0.806 0.805 -1.389 1.087 0.512 1.808 -1.115 0.000 0.608 1.704 0.415 0.000 0.777 -0.875 -1.338 3.102 0.840 0.906 0.981 1.004 0.899 2.068 2.443 +1 1.149 1.214 -1.089 0.707 -0.564 0.576 1.837 0.412 0.000 0.545 0.172 -1.410 2.215 0.674 -0.338 1.301 0.000 1.139 0.437 0.784 3.102 1.720 1.012 0.981 0.858 0.665 0.757 0.851 +1 1.623 0.303 -0.872 0.340 -0.466 2.228 1.041 1.409 0.000 2.718 -0.044 0.143 0.000 0.804 -0.673 -0.818 2.548 1.006 2.146 -1.477 0.000 2.248 1.759 0.993 0.564 0.467 1.448 1.345 +1 0.436 -0.509 -1.555 1.165 -0.011 0.800 0.078 -0.738 0.000 0.890 0.289 1.498 1.107 0.749 1.136 -0.742 0.000 1.384 -0.087 0.693 1.551 0.861 1.090 0.989 0.857 0.695 0.837 0.752 +1 0.662 0.176 -0.210 0.819 1.440 0.913 -0.704 -1.337 2.173 0.548 -1.024 0.729 0.000 1.171 0.214 0.782 0.000 1.452 -0.949 -0.495 1.551 0.850 1.054 1.016 0.906 0.879 0.915 0.808 +0 0.617 -1.438 -1.351 1.439 -0.095 0.629 -0.615 1.157 2.173 0.767 -1.190 -0.608 1.107 0.648 -2.146 1.180 0.000 0.524 -1.539 1.741 0.000 0.353 0.736 1.182 1.026 1.068 0.765 0.682 +0 1.246 -0.426 1.734 0.712 0.462 1.037 -0.780 -0.976 2.173 1.107 2.167 0.348 0.000 0.591 -1.772 0.876 0.000 0.880 0.164 -1.067 0.000 1.290 1.085 1.189 0.677 1.037 0.766 0.730 +1 0.583 0.393 -1.245 1.586 1.218 0.877 -0.834 0.087 1.087 0.929 0.768 -0.582 0.000 0.726 -0.502 -1.354 2.548 1.071 -0.601 1.087 0.000 1.687 1.086 1.062 1.302 0.968 0.922 0.912 +1 0.950 1.222 0.941 0.366 0.017 1.032 2.527 -1.316 0.000 0.722 2.208 -0.447 0.000 1.637 1.281 0.310 2.548 1.297 0.852 1.530 1.551 1.319 1.301 0.989 0.757 1.015 1.116 1.045 +1 0.837 1.281 1.703 1.308 0.426 1.582 1.094 -0.995 0.000 1.412 0.366 0.597 0.000 0.984 1.162 0.785 1.274 1.365 1.303 -0.757 3.102 1.270 0.884 1.324 0.704 0.881 0.878 0.894 +1 1.037 1.955 0.489 0.492 -0.897 1.252 0.563 -1.509 2.173 1.371 -0.159 0.300 0.000 0.998 -1.029 0.804 0.000 1.181 0.540 -0.581 1.551 1.015 0.973 0.987 1.275 0.957 1.048 1.049 +1 1.408 0.505 0.460 1.324 -0.996 0.710 1.561 1.053 2.173 0.825 1.205 0.531 0.000 1.201 1.778 -1.342 0.000 1.353 0.037 -0.293 3.102 0.689 0.963 1.829 0.983 1.307 0.975 0.967 +0 0.627 -0.368 -0.204 1.086 -1.131 0.674 0.176 0.406 2.173 1.090 0.561 -1.586 2.215 0.871 1.295 -0.232 0.000 0.791 -1.121 1.086 0.000 1.911 1.228 0.991 1.117 1.255 0.993 0.990 +0 0.906 0.053 -1.025 1.530 -0.868 0.543 0.146 0.708 0.000 0.869 -0.134 0.025 2.215 1.025 -0.736 1.142 1.274 0.635 0.139 1.171 0.000 0.824 0.809 0.975 1.005 0.914 0.837 0.836 +1 0.848 -0.304 0.395 0.451 1.429 0.383 0.502 0.928 0.000 0.620 1.057 -1.583 1.107 0.982 -0.149 -0.422 0.000 0.805 -0.573 -0.405 1.551 1.101 0.932 0.989 0.667 0.851 0.646 0.609 +1 0.560 -0.423 -1.118 0.705 0.651 0.681 -0.886 -1.730 2.173 1.277 -0.933 -0.917 2.215 0.999 -1.344 0.436 0.000 1.095 -1.613 0.896 0.000 0.822 0.935 0.989 0.830 0.920 0.785 0.686 +0 0.292 -1.473 0.212 2.889 -0.722 0.814 -1.387 0.609 2.173 1.048 -1.752 -1.058 0.000 0.844 -0.834 -0.199 2.548 2.633 -1.540 0.917 0.000 0.922 0.974 0.989 1.293 0.734 0.888 1.047 +0 0.660 -1.565 -0.146 1.221 0.924 0.965 -0.930 0.053 2.173 1.485 -0.516 -1.422 2.215 1.038 -1.196 1.543 0.000 0.792 -1.485 -0.900 0.000 0.841 0.891 1.022 0.850 1.748 1.077 0.910 +0 0.512 -0.689 0.559 1.678 1.386 1.204 -0.541 -1.189 2.173 0.957 0.222 0.093 2.215 0.851 -1.175 -0.108 0.000 0.865 1.165 0.767 0.000 1.820 1.166 0.986 1.175 1.576 1.139 1.027 +1 1.583 0.158 -0.597 0.434 0.915 0.736 -1.038 -1.544 2.173 0.449 -1.431 0.069 2.215 0.316 -1.781 0.437 0.000 0.454 -1.104 0.603 0.000 0.307 0.682 1.124 0.845 0.859 0.814 0.740 +1 0.929 0.634 -1.486 0.290 -0.406 1.568 -1.725 1.243 0.000 1.726 -0.290 -0.298 2.215 1.115 1.417 -0.738 0.000 1.302 0.083 0.645 0.000 0.922 1.200 0.988 1.357 0.920 1.016 0.993 +0 1.532 0.972 1.010 0.872 1.291 0.811 0.208 -1.183 0.000 0.816 1.054 -0.234 2.215 0.846 -0.674 0.228 2.548 0.386 -0.663 -0.375 0.000 0.724 0.909 0.969 1.358 0.990 1.029 0.985 +1 0.468 -0.263 0.809 0.941 -0.254 0.824 0.866 1.305 2.173 1.317 0.874 -1.557 0.000 1.041 0.901 0.192 2.548 1.156 0.116 -0.414 0.000 1.513 1.215 0.994 0.902 0.976 0.907 0.803 +1 3.280 -0.404 1.408 0.578 -0.635 0.672 -0.732 -0.046 2.173 2.028 -0.005 -0.710 0.000 0.519 -0.747 0.972 2.548 0.399 1.031 0.346 0.000 1.230 1.076 1.838 1.409 0.585 0.889 1.044 +1 0.966 -0.702 -1.111 0.749 0.882 0.524 0.466 0.339 2.173 0.294 -1.392 1.628 0.000 0.665 -2.021 0.159 0.000 0.713 0.352 -1.684 3.102 0.713 0.879 1.149 0.900 0.626 0.788 0.714 +0 2.338 -1.420 0.795 0.438 0.075 1.305 -0.920 -0.845 2.173 0.876 -1.080 -0.207 0.000 1.423 -0.980 1.453 2.548 0.642 -1.350 -1.646 0.000 0.959 1.006 0.986 0.865 1.494 1.151 0.966 +0 0.433 0.579 1.001 0.812 -1.199 1.224 1.279 1.078 1.087 1.404 -0.135 -0.652 0.000 0.740 0.493 -1.177 0.000 0.938 -0.359 0.403 3.102 0.895 0.896 0.993 1.355 1.279 1.223 1.027 +0 1.920 -0.563 0.188 0.787 0.596 0.835 -1.803 -1.487 0.000 0.900 -0.889 1.736 1.107 0.349 1.577 -1.561 0.000 0.685 -0.170 -0.885 0.000 0.682 0.899 0.986 0.687 1.195 0.872 0.802 +0 1.451 -0.442 -0.441 0.354 0.333 1.132 -0.626 -0.885 2.173 1.479 -0.780 1.024 0.000 0.770 0.958 1.257 1.274 0.513 -0.180 1.054 0.000 0.339 0.915 0.986 0.811 1.547 1.085 0.976 +1 0.768 -0.836 1.352 1.233 -0.152 0.774 -0.076 1.142 0.000 1.419 -0.195 -0.787 2.215 0.752 -0.260 0.547 0.000 0.857 -0.114 1.683 3.102 0.716 1.340 1.317 0.797 0.790 0.815 0.799 +1 0.973 0.524 1.577 0.499 1.650 1.053 0.959 0.079 0.000 0.518 1.071 1.192 1.107 0.436 -0.327 1.740 2.548 0.577 -2.212 -1.244 0.000 0.928 0.998 0.984 0.710 0.468 0.701 0.807 +1 0.607 -1.734 1.443 1.019 0.645 0.752 0.781 -1.011 2.173 0.520 -0.568 -1.528 0.000 0.629 -0.682 0.615 1.274 0.449 1.810 0.758 0.000 0.845 0.786 0.988 0.515 1.119 0.930 0.769 +1 0.695 1.537 -0.672 1.545 -0.794 0.799 1.111 0.742 2.173 1.166 0.030 1.022 2.215 0.657 1.287 -0.261 0.000 0.681 1.884 1.696 0.000 0.793 0.831 0.989 2.218 0.887 1.554 1.173 +0 2.717 -0.525 0.903 2.327 0.516 1.663 -0.042 -0.959 2.173 1.006 -0.092 -0.360 2.215 0.762 0.082 -1.431 0.000 0.945 -0.021 1.463 0.000 0.480 0.900 1.189 1.402 0.980 1.637 1.265 +1 0.804 0.922 -0.244 0.241 0.738 1.167 0.144 -1.053 2.173 0.715 0.576 0.686 0.000 0.888 0.382 1.720 2.548 0.391 1.332 1.203 0.000 0.476 1.166 0.985 0.728 0.779 0.747 0.693 +1 0.488 1.230 1.552 0.227 1.537 0.510 0.869 -0.654 0.000 0.613 1.645 -0.152 0.000 0.612 -0.717 1.200 2.548 0.481 1.281 1.027 3.102 0.719 1.133 0.989 0.599 0.602 0.698 0.673 +0 0.999 -0.495 -0.580 0.305 0.785 1.170 2.173 -0.569 0.000 1.042 0.916 0.537 2.215 0.889 1.326 1.586 0.000 2.062 -0.591 1.339 3.102 1.842 1.641 0.988 0.879 1.503 1.814 1.715 +1 1.038 -1.460 0.819 0.586 -1.021 1.135 -0.043 -0.539 0.000 0.713 -0.447 1.647 0.000 0.944 -0.206 -1.401 2.548 1.261 0.646 0.346 0.000 1.399 0.983 1.076 0.521 0.418 0.614 0.714 +1 0.485 1.309 -0.784 0.545 -1.079 0.965 -0.525 0.158 0.000 0.934 0.702 -0.993 0.000 1.659 -0.335 1.118 2.548 1.181 0.056 -0.299 0.000 0.923 1.042 0.996 0.981 0.827 0.931 0.807 +1 1.403 1.477 1.714 0.929 -0.956 1.201 1.254 -0.256 2.173 1.164 1.502 1.196 0.000 1.003 0.255 0.500 2.548 0.919 1.642 -1.047 0.000 1.247 1.181 1.062 1.222 1.110 1.039 0.969 +1 0.538 1.221 0.603 0.786 -1.049 1.723 1.436 1.285 0.000 2.041 0.755 -0.481 0.000 1.668 0.233 -1.071 2.548 1.448 0.654 -0.090 0.000 0.772 1.111 0.990 0.844 1.028 0.982 0.925 +0 1.496 1.807 -0.762 0.586 -1.348 0.803 1.347 0.745 0.000 0.433 1.002 -0.730 2.215 0.599 1.272 1.059 0.000 1.012 0.841 1.471 1.551 0.883 0.777 0.991 0.876 0.547 0.618 0.696 +1 0.623 -2.071 -0.523 1.502 0.475 0.701 -0.932 -1.679 2.173 0.380 -1.715 0.910 0.000 1.050 -0.794 -1.222 1.274 0.409 -1.416 -1.203 0.000 0.486 0.569 1.050 1.184 0.430 0.909 0.691 +0 0.869 -1.243 -1.520 1.482 0.181 1.598 -0.898 -1.025 1.087 1.036 1.073 1.064 1.107 0.718 0.844 0.159 0.000 0.680 2.340 0.641 0.000 0.898 0.847 1.571 1.386 2.877 1.926 1.842 +0 0.601 0.889 1.639 0.792 -0.093 0.861 0.359 1.598 2.173 1.388 0.463 1.014 0.000 2.567 -0.430 -0.316 0.000 1.168 1.369 -1.719 0.000 1.459 1.054 0.987 0.662 0.950 0.871 0.766 +0 2.442 -0.542 -0.318 0.938 0.615 0.572 -1.347 0.699 0.000 0.840 -1.013 1.191 0.000 1.383 -1.054 -1.459 2.548 0.802 1.240 1.684 0.000 0.673 0.974 1.563 0.851 0.180 0.843 0.853 +0 0.748 -0.178 -0.751 1.096 -1.097 0.534 1.159 -1.022 0.000 0.552 1.885 -1.740 0.000 1.430 1.143 0.869 2.548 1.566 -0.111 0.134 3.102 0.830 1.010 0.978 0.926 1.111 0.938 0.875 +0 2.328 -0.849 -0.787 0.845 -1.232 1.507 -0.506 0.771 0.000 0.920 -1.056 -1.622 2.215 0.736 -1.174 0.017 2.548 1.017 0.271 0.583 0.000 0.874 0.940 0.978 0.834 0.875 0.920 1.084 +0 0.530 1.968 -0.666 1.719 -1.336 0.811 1.924 1.282 0.000 1.155 0.304 0.217 0.000 1.035 0.932 0.643 1.274 1.272 0.275 -0.747 1.551 2.514 1.423 0.983 1.440 0.889 1.158 1.325 +1 0.714 1.096 -1.608 1.130 -0.665 0.467 0.632 1.157 0.000 0.513 -0.905 0.889 2.215 0.483 -0.154 -1.198 0.000 1.120 0.142 -0.386 3.102 0.922 1.011 0.992 0.772 0.740 0.870 0.763 +1 1.123 1.909 -0.819 0.267 1.591 1.142 2.741 1.347 0.000 1.035 0.886 -1.704 0.000 2.641 0.299 -0.137 2.548 0.800 0.012 -0.542 0.000 0.869 1.075 0.982 0.473 0.762 0.787 0.853 +0 1.014 -1.994 1.303 0.856 0.353 0.458 -2.789 0.058 0.000 0.620 -2.831 -1.428 0.000 0.346 0.246 -0.085 0.000 0.642 -0.897 -1.050 1.551 1.103 0.872 0.988 0.843 0.309 0.800 0.743 +0 1.028 -0.064 0.594 1.347 1.069 0.815 -1.695 -1.162 0.000 1.054 -1.246 -0.660 1.107 0.443 -0.840 0.870 2.548 0.392 2.087 0.055 0.000 3.586 1.938 0.995 1.591 0.725 1.315 1.315 +1 0.590 1.583 0.880 2.002 1.030 1.245 0.454 -0.029 0.000 0.925 -1.633 -1.702 0.000 0.975 0.704 -1.381 0.000 1.409 0.543 -0.534 3.102 0.605 0.666 1.002 2.159 0.625 1.537 1.286 +0 0.573 0.002 -1.054 1.101 -0.091 1.234 0.158 -0.794 2.173 1.285 2.792 0.701 0.000 0.913 1.425 1.564 0.000 1.650 -1.087 1.193 0.000 0.490 0.552 0.985 0.891 0.531 0.849 0.925 +0 0.728 2.079 0.378 1.072 -1.296 0.603 1.038 -0.997 2.173 0.515 -0.350 -0.123 0.000 1.324 0.646 1.181 2.548 0.833 1.004 0.568 0.000 0.891 0.908 1.222 1.095 1.046 0.855 0.855 +0 0.860 1.951 0.577 1.180 -1.724 1.987 0.010 -0.103 0.000 1.173 -2.492 1.607 0.000 0.740 0.815 -1.006 1.274 0.411 2.024 1.505 0.000 0.259 0.600 1.223 0.746 0.437 0.615 0.534 +1 0.788 -0.503 -1.554 0.544 -1.085 0.764 0.483 -0.384 1.087 0.716 -0.335 1.422 2.215 0.622 1.316 -0.784 0.000 0.985 0.401 1.459 0.000 0.892 0.880 0.983 0.834 1.182 0.734 0.681 +1 1.602 -0.482 0.950 1.322 0.472 0.677 1.982 -1.200 0.000 0.700 -1.234 -0.934 2.215 0.898 0.361 -0.506 2.548 0.811 -0.927 1.513 0.000 0.425 0.690 0.985 1.082 0.848 0.917 0.735 +1 1.225 0.116 0.092 0.736 0.873 1.149 -0.047 1.182 0.000 2.263 0.312 -0.762 0.000 0.911 0.663 0.838 2.548 1.177 2.419 -1.470 0.000 0.899 0.789 0.984 0.588 0.397 0.512 0.546 +1 1.898 -0.746 0.746 0.312 -1.590 1.130 -0.815 -0.686 2.173 0.491 0.552 -1.143 2.215 0.779 -0.315 1.072 0.000 0.374 -0.082 0.318 0.000 0.381 0.964 0.988 0.908 0.934 0.934 0.745 +0 1.794 -0.438 0.286 0.532 0.221 0.548 1.236 -1.684 0.000 1.048 0.250 -1.356 2.215 0.857 -0.319 1.316 2.548 1.275 0.023 -0.424 0.000 1.424 1.003 0.988 0.821 0.742 0.853 0.874 +1 1.048 -0.133 -0.045 1.171 -0.480 1.119 -0.481 1.593 0.000 1.116 0.704 1.170 0.000 1.673 -0.078 -0.633 2.548 0.851 0.712 0.208 3.102 0.819 0.671 0.982 0.698 0.769 0.876 0.818 +0 2.582 -1.329 1.319 1.076 1.038 1.456 -1.316 -0.651 0.000 0.828 -0.905 -0.453 0.000 0.724 -0.299 1.223 2.548 0.745 -0.345 0.074 3.102 0.624 1.217 0.993 0.967 0.484 0.760 1.087 +0 0.291 1.048 -0.077 1.333 -0.877 0.546 1.539 0.439 0.000 0.816 -0.192 1.308 2.215 0.828 1.119 1.270 0.000 1.163 0.661 -0.763 3.102 0.846 1.052 0.989 0.843 0.955 1.154 0.989 +1 2.282 -1.030 -1.371 0.678 -0.296 1.450 -1.066 1.378 2.173 1.634 -0.654 -0.119 0.000 0.751 0.007 0.628 2.548 0.701 -1.102 -0.813 0.000 0.938 0.883 1.420 1.338 1.099 1.120 1.077 +1 0.880 1.128 -1.297 0.348 -0.336 0.178 1.480 0.302 0.000 0.498 -0.573 -0.498 2.215 1.024 0.453 0.889 2.548 1.012 2.151 -1.219 0.000 0.850 1.163 0.984 0.673 0.840 1.065 0.912 +1 0.669 -1.065 0.454 1.022 1.188 1.509 0.252 1.534 0.000 2.303 -0.294 -0.544 2.215 0.437 -0.311 0.681 0.000 0.732 0.767 -0.359 3.102 1.108 1.033 0.987 1.376 0.787 1.256 1.075 +1 0.783 0.716 0.830 0.547 -0.838 1.076 0.502 1.729 2.173 0.893 0.347 -0.239 1.107 0.896 1.196 0.098 0.000 0.520 1.227 -1.239 0.000 0.706 1.060 0.985 0.685 1.417 0.837 0.719 +0 1.150 0.215 1.537 0.602 -1.123 0.707 -0.017 0.190 0.000 0.931 -0.567 -0.611 0.000 0.876 0.895 0.484 2.548 0.801 1.320 1.267 0.000 0.910 0.936 0.994 0.568 0.666 0.622 0.627 +1 1.341 -0.542 1.479 0.908 -1.407 0.429 0.149 -0.619 0.000 0.839 -0.121 -0.161 2.215 1.292 0.830 0.684 1.274 0.610 1.166 -0.352 0.000 0.552 0.813 0.986 1.006 0.971 0.907 0.788 +0 0.878 1.195 0.358 2.148 0.590 1.251 1.208 -0.915 2.173 0.741 0.016 -1.312 2.215 0.278 0.172 1.371 0.000 0.716 1.457 1.271 0.000 0.427 0.888 0.992 1.617 1.032 1.394 1.039 +1 0.652 -0.281 0.657 1.086 0.993 0.889 0.225 1.086 1.087 0.756 0.826 -0.581 1.107 1.388 1.540 -0.913 0.000 1.020 2.040 -0.675 0.000 0.581 0.700 0.994 1.056 1.262 1.100 1.572 +0 0.892 -0.959 -1.325 1.056 0.391 0.781 -0.377 0.155 2.173 0.948 0.355 -1.283 0.000 1.174 1.167 0.904 1.274 0.838 0.391 -0.029 0.000 1.052 1.068 1.344 0.920 1.337 1.083 0.982 +0 0.644 -0.473 -0.491 1.978 -0.976 1.653 -0.471 1.029 2.173 1.675 -0.450 -0.770 1.107 0.937 -0.700 0.537 0.000 1.118 -1.380 0.549 0.000 0.864 1.243 0.992 1.837 2.445 1.463 1.229 +1 1.206 -1.561 -1.529 1.907 1.452 1.315 -1.673 -0.019 0.000 0.549 -0.500 0.583 2.215 0.700 -0.725 -1.086 2.548 0.980 -1.242 -0.838 0.000 1.179 0.997 0.994 1.050 0.664 0.782 0.961 +1 2.290 0.260 0.112 1.207 -0.719 1.533 -0.816 1.460 1.087 0.657 -0.482 -0.676 2.215 0.425 0.206 -1.051 0.000 0.488 2.195 1.268 0.000 0.879 1.013 1.568 2.128 1.405 1.400 1.277 +1 3.214 -0.930 -1.477 1.183 1.547 2.422 -0.739 0.156 0.000 0.647 -0.140 -1.317 1.107 0.373 -0.664 0.576 0.000 1.099 0.390 -0.627 0.000 0.805 0.849 1.094 0.749 0.606 0.623 0.847 +0 1.425 -0.176 -1.441 0.432 -0.142 0.677 -0.217 -0.086 2.173 0.835 -1.093 1.179 0.000 0.555 1.103 -0.518 0.000 0.692 0.468 1.065 3.102 1.370 0.849 1.001 0.842 0.688 0.714 0.685 +1 1.636 -1.937 0.185 0.694 0.468 0.998 -0.321 -0.558 2.173 0.954 0.975 -1.685 0.000 0.414 -2.065 0.633 0.000 1.420 -0.648 1.635 3.102 0.672 0.976 1.002 1.047 1.193 1.054 0.840 +0 1.002 -0.503 -1.552 0.504 -0.050 1.645 -2.624 0.577 0.000 1.578 0.388 -1.533 2.215 1.186 1.992 -0.585 0.000 0.956 0.541 0.238 3.102 0.853 0.893 0.989 0.847 1.117 0.979 0.962 +1 0.351 1.357 -0.471 0.487 -0.196 1.034 0.005 1.672 0.000 1.425 0.087 -0.229 2.215 0.977 0.567 -1.479 0.000 1.201 -1.747 0.672 0.000 0.780 0.663 0.979 0.670 0.870 0.935 0.804 +0 0.727 2.288 0.495 0.266 1.052 0.654 0.232 -0.396 2.173 0.746 -1.066 1.568 1.107 0.540 0.318 1.398 0.000 0.827 0.763 -0.684 0.000 0.734 0.908 0.991 0.956 1.247 1.101 0.861 +0 0.401 0.784 0.041 1.706 0.462 1.345 0.185 -0.932 2.173 0.651 0.167 -1.603 0.000 0.938 0.512 1.133 2.548 0.415 -0.183 0.867 0.000 0.554 0.836 0.997 0.970 1.366 1.378 1.101 +0 0.424 0.868 -0.998 1.639 1.255 1.474 0.713 -0.037 2.173 0.339 1.612 -1.394 0.000 0.834 0.376 1.566 2.548 0.570 -0.532 -1.266 0.000 0.816 1.177 1.035 0.567 1.387 0.960 0.818 +0 0.369 0.964 -1.426 0.948 0.991 0.420 -2.202 0.725 0.000 1.112 -0.077 -0.739 1.107 0.621 -0.510 -1.648 0.000 0.863 0.195 0.240 3.102 1.153 1.012 0.988 0.916 0.697 0.894 0.804 +0 0.333 0.151 -0.921 1.240 0.133 1.089 -0.398 -1.031 2.173 0.860 0.725 1.421 0.000 1.010 0.153 0.411 2.548 1.383 1.097 0.808 0.000 0.852 0.847 0.989 1.427 1.314 1.096 1.002 +0 0.861 0.869 1.377 1.051 -0.251 0.697 0.169 -1.222 0.000 0.881 -0.049 0.640 2.215 0.402 -0.966 1.587 0.000 0.776 -0.105 0.063 3.102 0.828 0.995 1.311 0.744 0.372 0.638 0.705 +0 0.954 -0.909 -1.450 0.967 1.149 0.639 0.269 0.909 1.087 1.148 -0.412 -0.625 1.107 0.790 1.038 -0.179 0.000 0.624 1.746 0.893 0.000 0.744 0.849 0.987 0.986 1.316 0.937 0.964 +1 0.540 2.262 0.484 0.673 -0.187 1.044 1.247 -0.608 0.000 1.147 1.438 1.410 2.215 1.899 0.869 1.039 2.548 0.518 0.294 -0.477 0.000 0.543 1.271 0.992 0.899 0.662 1.026 0.873 +1 1.150 0.782 -0.107 0.213 1.400 1.096 0.686 1.405 0.000 0.275 -2.784 -0.645 0.000 0.748 1.283 0.073 2.548 0.882 0.442 -1.498 0.000 0.762 0.973 0.988 0.525 0.683 0.763 0.729 +1 1.225 0.129 -0.695 0.800 -1.683 0.647 -0.224 1.404 0.000 1.348 -1.116 0.879 1.107 0.639 -2.416 -0.604 0.000 0.759 0.269 0.468 0.000 0.869 0.633 1.065 1.352 0.921 0.955 0.885 +1 0.973 -0.601 1.536 1.202 -1.158 1.513 -0.246 -1.580 2.173 2.024 -1.088 0.096 0.000 1.537 -1.033 0.859 0.000 1.341 1.201 -0.222 0.000 1.716 1.158 0.988 0.740 0.979 1.445 1.187 +1 1.394 -0.406 -1.088 0.359 -0.745 1.600 -1.160 0.445 0.000 1.849 0.376 -0.966 2.215 1.368 -0.595 1.068 0.000 0.722 0.627 1.096 0.000 0.836 0.613 0.994 0.686 0.822 0.920 0.806 +0 0.441 -0.672 1.115 1.701 -0.577 0.586 -1.313 1.345 0.000 0.580 0.866 -1.567 1.107 0.771 -0.720 -0.032 2.548 0.744 -1.531 0.496 0.000 0.747 0.745 1.199 1.026 0.967 0.848 0.809 +0 1.050 0.816 0.893 1.093 1.623 1.096 0.049 -1.359 1.087 0.746 0.660 -0.191 2.215 1.012 -1.555 -0.394 0.000 0.969 -2.002 0.292 0.000 0.930 1.320 0.987 1.186 1.233 1.163 1.259 +1 0.687 1.388 -1.210 1.089 0.304 1.684 2.240 1.040 0.000 1.894 -0.240 -0.942 1.107 0.990 -0.276 -0.072 1.274 0.530 -0.935 -1.615 0.000 0.457 0.584 1.173 1.000 1.028 1.111 0.861 +1 0.655 1.750 -1.468 0.571 1.107 1.263 0.466 -0.936 0.000 0.535 2.260 -0.187 0.000 1.054 0.311 0.322 2.548 2.889 1.295 1.024 1.551 1.202 1.110 0.996 0.833 1.166 1.141 0.931 +0 1.265 -0.516 -1.174 0.383 -0.287 1.131 1.131 1.030 0.000 1.633 -0.958 -0.456 2.215 1.490 0.718 1.424 0.000 1.244 0.968 0.275 3.102 0.910 0.961 0.994 0.799 1.840 1.728 1.498 +0 0.362 0.894 0.866 1.503 -0.534 0.805 -0.622 0.985 1.087 1.271 0.207 1.562 2.215 1.174 -0.496 -0.513 0.000 0.869 -0.523 -0.110 0.000 0.399 1.001 0.988 1.174 0.979 1.107 1.015 +1 1.013 0.278 1.406 1.450 1.146 1.421 0.747 -0.911 1.087 0.690 2.763 0.803 0.000 1.361 0.938 -0.404 0.000 0.485 1.149 -0.266 0.000 0.864 1.474 0.988 1.762 1.408 1.309 1.435 +0 0.478 1.732 0.908 0.918 -0.729 0.779 -0.271 0.023 0.000 0.980 0.064 1.456 0.000 1.231 0.744 0.444 2.548 1.220 -0.823 -1.533 0.000 1.000 0.861 0.983 0.741 0.730 0.829 0.810 +1 0.970 1.230 0.418 0.289 0.569 1.060 0.664 -1.239 2.173 0.517 -1.119 0.592 2.215 0.632 0.260 0.822 0.000 0.756 -1.191 -1.335 0.000 1.024 1.204 0.993 0.841 1.572 0.974 0.868 +0 1.638 -0.777 -1.571 1.072 1.424 0.943 0.731 0.182 1.087 0.389 0.215 0.882 0.000 0.478 -1.303 -0.423 2.548 0.729 -0.670 -0.809 0.000 0.786 0.905 0.991 0.804 1.179 1.098 0.858 +1 3.481 0.207 -1.183 1.274 -1.497 1.965 -1.060 0.283 0.000 0.705 0.225 1.663 0.000 0.853 0.888 -0.927 2.548 0.890 2.063 0.237 0.000 1.729 1.119 0.991 1.429 0.914 0.953 1.036 +1 2.031 -0.675 -1.619 0.112 -1.193 0.588 -0.435 -0.641 2.173 0.931 0.709 -0.049 2.215 0.669 -0.902 -0.133 0.000 1.021 2.021 0.980 0.000 0.813 0.859 0.994 0.841 0.872 0.905 0.772 +1 1.005 1.425 -0.123 1.039 -1.255 1.135 1.172 0.147 2.173 1.229 1.453 1.724 0.000 0.635 2.167 -1.737 0.000 0.624 0.912 1.352 3.102 0.619 0.454 1.206 1.018 0.789 0.892 0.824 +1 1.319 -0.586 -0.225 0.620 -0.100 0.767 0.911 1.021 2.173 0.685 1.278 -1.405 2.215 0.720 0.546 1.503 0.000 0.759 1.458 -0.645 0.000 0.908 0.815 0.999 1.602 0.896 1.359 1.152 +0 1.399 -0.020 1.234 1.141 1.494 1.211 -0.726 -0.221 0.000 0.739 0.120 -0.127 0.000 0.640 0.815 1.138 2.548 1.165 -1.098 -1.364 1.551 0.928 1.079 0.980 0.752 1.026 0.755 0.790 +0 0.977 -1.463 1.416 0.427 -1.196 2.019 -1.580 1.110 1.087 1.305 -1.052 -0.989 2.215 1.271 -1.762 -0.881 0.000 1.901 1.908 -0.110 0.000 0.925 0.967 0.995 1.092 2.349 1.354 1.099 +0 0.327 -1.517 0.850 2.173 0.106 0.652 -1.360 -0.293 2.173 1.150 0.641 -1.420 0.000 0.678 0.153 0.744 2.548 0.574 2.042 -1.724 0.000 1.117 0.998 0.987 0.909 0.968 1.273 2.115 +0 0.790 -0.711 0.733 0.422 0.855 0.479 0.009 -1.261 2.173 0.483 0.195 0.324 0.000 1.651 -0.270 -0.407 2.548 1.574 -0.168 1.617 0.000 1.068 1.084 0.988 1.022 0.790 0.911 0.855 +1 0.524 1.129 0.683 1.532 0.579 1.106 0.050 -1.019 2.173 1.058 0.051 1.201 0.000 0.577 -1.155 -1.054 0.000 1.348 -0.265 -0.362 3.102 1.369 1.077 0.994 1.343 0.762 0.928 0.913 +0 0.686 1.798 0.218 1.167 0.756 0.722 0.143 1.338 0.000 0.830 0.445 -0.257 0.000 0.693 1.901 -0.683 0.000 0.569 0.784 0.271 0.000 0.898 0.812 0.976 0.618 0.395 0.564 0.588 +0 2.098 -0.024 -0.138 0.901 -0.242 0.483 -0.642 -0.965 0.000 0.853 0.180 -1.508 0.000 1.094 1.961 1.391 0.000 1.367 -2.052 0.947 0.000 0.867 0.737 0.982 0.805 0.486 0.699 0.733 +0 1.694 0.868 -0.740 0.356 1.339 0.701 -0.015 0.206 2.173 0.571 -0.924 0.726 0.000 0.942 0.176 -1.732 2.548 0.935 -2.316 1.516 0.000 0.909 1.209 1.027 0.963 1.002 1.020 1.161 +0 3.216 0.626 -0.738 0.103 0.336 1.587 -0.781 0.943 1.087 0.888 -1.003 1.308 0.000 1.093 0.045 -0.073 2.548 0.574 -1.396 -1.298 0.000 0.730 0.908 0.986 0.847 1.481 1.572 1.362 +0 0.947 -0.204 -1.015 2.041 -1.426 1.353 0.363 0.454 2.173 0.485 -0.387 -0.335 0.000 0.426 0.817 1.357 0.000 0.459 -1.315 0.760 3.102 0.847 0.920 0.984 0.942 0.974 1.151 0.905 +0 0.830 0.657 -1.403 1.241 -0.608 0.799 -2.113 0.972 0.000 0.928 0.963 -0.805 0.000 0.868 0.457 0.499 2.548 1.060 1.036 1.233 1.551 4.552 2.654 0.984 0.843 0.530 1.757 1.450 +1 1.141 0.353 1.269 0.539 -0.392 1.197 0.839 -0.778 0.000 1.274 0.173 0.266 2.215 1.516 0.271 1.534 2.548 0.728 -0.369 0.690 0.000 1.697 1.427 1.083 0.716 1.347 1.109 0.916 +1 1.359 -0.796 1.287 0.771 -0.849 1.825 -0.994 -1.226 1.087 1.206 -0.107 0.538 0.000 1.763 0.488 0.245 0.000 0.841 -0.676 0.869 0.000 0.938 0.637 1.331 1.164 1.259 1.528 1.258 +1 1.198 1.925 1.155 0.920 0.991 2.569 0.090 -0.804 0.000 1.241 0.633 1.189 2.215 0.945 1.074 0.059 0.000 0.665 0.994 1.000 0.000 0.656 0.812 0.973 0.925 0.760 0.702 0.640 +0 0.495 0.543 0.284 1.707 1.079 0.645 -0.462 -0.194 0.000 0.817 -2.483 0.897 0.000 0.846 0.741 -1.163 2.548 1.112 -0.489 -1.396 0.000 1.146 0.963 0.989 1.019 0.992 0.851 0.799 +0 0.444 -1.225 0.682 0.373 -1.422 0.880 -0.436 1.679 0.000 0.797 -0.863 -0.174 2.215 0.560 -0.516 0.751 0.000 0.854 0.654 -0.240 3.102 0.941 0.976 0.988 0.753 0.700 0.779 0.674 +1 0.564 0.655 1.138 0.603 -0.910 0.906 0.728 -1.291 2.173 0.796 1.149 0.800 0.000 1.414 0.364 0.329 0.000 0.605 -0.859 -0.603 3.102 0.935 1.013 0.983 0.777 0.908 0.953 0.839 +0 0.647 -1.068 -1.451 1.266 -0.865 1.234 -0.729 1.448 1.087 0.781 -1.330 -0.465 0.000 1.134 -0.147 0.717 2.548 1.220 -1.522 -0.014 0.000 0.583 1.078 0.988 1.128 1.001 1.047 0.981 +1 0.966 -0.214 0.730 1.067 -1.154 0.839 0.439 0.846 0.000 0.833 -0.658 -1.447 2.215 0.596 0.216 -0.023 0.000 2.347 -0.015 -0.950 3.102 0.907 1.164 1.396 0.950 0.691 0.921 0.832 +0 0.778 1.505 -1.427 1.772 -1.601 0.478 0.946 0.815 1.087 0.675 0.519 0.119 0.000 0.901 1.209 -0.566 2.548 0.941 -0.433 0.184 0.000 0.576 0.789 0.978 1.127 0.790 0.878 1.084 +0 1.096 1.605 -0.616 0.827 0.033 0.770 0.843 -1.358 2.173 0.930 1.284 0.140 2.215 1.012 0.521 1.229 0.000 0.842 1.650 1.553 0.000 0.831 0.965 0.978 1.122 1.251 0.879 0.857 +0 0.310 0.353 -1.552 0.491 0.828 0.803 0.184 -1.105 2.173 0.789 -0.714 0.713 1.107 0.486 -1.915 0.348 0.000 0.642 -1.649 -0.862 0.000 0.548 1.206 0.984 0.638 1.295 0.881 0.756 +1 1.182 1.084 0.747 1.266 0.237 0.719 0.242 -1.707 2.173 0.542 -2.673 -1.115 0.000 1.166 -0.534 -0.824 0.000 0.590 0.708 1.301 0.000 1.119 0.900 0.985 0.663 0.991 0.894 0.944 +1 0.418 -1.167 0.756 1.154 -0.455 0.574 -0.421 1.280 2.173 0.817 -1.214 -1.259 2.215 0.623 1.179 0.160 0.000 0.535 0.682 1.707 0.000 0.645 0.809 0.989 0.810 0.871 0.822 0.756 +0 1.326 -0.350 -0.204 0.339 0.596 1.056 -0.409 0.869 2.173 1.692 0.569 -1.426 0.000 1.176 0.001 -1.036 2.548 0.627 -1.191 0.728 0.000 2.026 1.245 0.984 0.978 1.403 1.151 1.009 +1 1.242 -0.650 1.320 1.163 0.811 1.024 -0.227 -0.909 2.173 0.493 -1.095 -1.189 0.000 1.006 -0.971 -0.101 2.548 0.684 -0.608 0.243 0.000 0.742 0.772 0.985 0.966 1.003 0.983 0.795 +0 0.623 -1.176 1.542 0.480 0.318 1.275 0.446 0.087 0.000 1.404 -0.055 1.470 0.000 1.996 -0.259 -1.005 2.548 0.636 0.239 -0.768 3.102 2.789 1.542 0.988 1.325 0.309 1.203 1.317 +0 0.636 1.164 0.589 0.595 0.662 1.077 -0.344 -1.399 0.000 1.394 -0.245 0.098 2.215 1.146 -0.175 -0.939 0.000 1.110 -0.835 1.059 3.102 0.879 0.919 1.000 0.791 0.958 0.809 0.736 +1 1.205 -1.088 0.959 1.898 1.151 0.606 -0.653 -0.869 2.173 0.862 0.206 -0.683 0.000 0.786 -0.199 0.203 1.274 0.734 -0.438 -0.342 0.000 0.501 0.560 1.005 1.187 0.733 0.845 0.820 +1 0.333 0.505 0.415 1.146 -0.152 1.701 -0.030 -0.886 0.000 1.798 -1.329 0.652 2.215 1.683 -0.953 1.162 2.548 0.668 -0.054 -1.646 0.000 1.030 1.763 0.991 3.278 0.872 2.361 2.027 +0 0.930 0.179 -0.595 0.271 0.854 0.654 -0.498 1.515 2.173 0.983 0.292 -1.281 2.215 0.667 -0.904 -0.113 0.000 1.184 0.355 0.278 0.000 0.877 0.922 0.985 0.947 0.841 0.727 0.694 +1 1.758 0.195 -1.519 0.551 0.858 1.057 -1.284 -0.245 2.173 0.770 -0.633 -1.630 0.000 1.627 -0.779 0.707 2.548 0.608 0.614 -0.852 0.000 0.883 1.102 1.146 1.586 1.289 1.213 1.020 +1 0.750 -1.544 -0.300 1.308 -1.184 0.505 -0.494 1.389 0.000 0.369 2.001 -1.651 0.000 1.821 0.203 0.657 1.274 1.298 0.184 -0.292 3.102 0.828 0.887 0.987 1.095 0.887 1.188 0.972 +1 0.413 0.727 0.878 1.121 -1.302 1.039 -0.926 0.361 2.173 0.763 -1.196 1.271 2.215 1.300 -0.632 -1.082 0.000 1.142 1.280 0.276 0.000 2.224 1.752 0.988 1.889 0.977 1.439 1.364 +1 0.907 -0.730 -0.315 1.014 0.551 0.922 0.499 -1.373 2.173 0.410 -0.888 1.098 0.000 1.207 0.233 1.535 0.000 0.951 -1.587 -0.162 0.000 0.952 0.993 0.984 0.751 0.238 0.890 0.788 +1 0.336 0.838 -0.813 1.411 1.658 1.419 -0.235 -0.961 2.173 1.061 -0.700 0.977 2.215 1.965 0.650 0.535 0.000 0.403 -2.196 -0.124 0.000 0.854 1.154 0.991 0.847 1.831 0.989 0.862 +1 1.811 -1.649 -1.623 0.461 -0.025 1.072 -0.689 0.366 2.173 0.491 1.920 -0.657 0.000 0.594 -1.202 1.233 0.000 0.416 -0.378 -1.255 3.102 0.762 0.978 1.255 0.656 0.708 0.872 0.729 +0 1.358 0.970 -1.289 1.618 -0.619 0.569 -2.297 -1.411 0.000 1.040 0.206 0.788 0.000 1.280 -0.426 0.552 0.000 1.125 0.924 1.530 3.102 0.718 0.986 1.166 1.453 1.308 1.062 1.098 +1 0.888 0.821 -1.451 0.397 0.583 1.160 -0.424 -0.610 2.173 0.838 1.161 1.378 0.000 0.573 1.619 0.951 0.000 0.713 1.575 0.076 0.000 0.995 0.791 0.982 1.368 0.984 1.076 0.940 +0 1.728 0.897 1.389 1.463 0.518 0.703 1.017 -0.954 2.173 0.653 1.356 -1.345 2.215 0.272 1.139 0.888 0.000 1.260 0.765 0.074 0.000 0.450 0.649 1.556 1.068 0.391 0.902 0.713 +0 1.155 -0.459 1.456 1.320 1.218 1.098 -0.537 -0.472 1.087 0.510 1.885 -0.063 0.000 0.885 0.894 -1.058 0.000 1.186 1.230 0.322 0.000 0.955 0.717 0.989 1.466 0.443 0.960 1.231 +1 0.804 1.268 -1.253 1.299 0.970 0.889 0.453 -0.086 2.173 1.434 0.919 1.437 0.000 0.711 -0.073 -1.488 2.548 1.054 -1.918 -0.587 0.000 0.816 0.784 1.285 1.169 0.982 0.874 0.820 +1 0.570 0.954 -0.795 1.633 -0.072 0.528 -0.750 0.228 0.000 0.716 0.529 1.389 2.215 1.341 -0.920 1.545 1.274 0.750 -0.456 -1.269 0.000 0.943 0.941 0.988 2.060 0.906 1.367 1.189 +1 0.780 0.752 -0.252 0.789 1.520 2.944 1.272 -1.181 0.000 2.788 0.759 0.525 0.000 1.074 0.627 0.020 0.000 1.434 -0.485 1.218 3.102 1.173 0.778 1.086 0.872 0.447 0.876 0.817 +1 1.029 -0.684 -0.892 0.187 -1.685 1.504 -0.338 -0.004 0.000 0.858 0.091 1.026 1.107 2.675 -0.274 -1.719 2.548 1.060 -0.625 0.528 0.000 0.974 1.149 0.977 0.917 1.047 1.272 1.049 +1 1.292 -0.407 -1.136 2.004 -1.701 0.623 2.174 0.223 0.000 1.124 0.286 0.386 2.215 0.498 1.293 -0.336 0.000 0.966 -0.994 1.201 3.102 1.020 0.983 1.084 1.434 0.992 0.995 0.918 +0 1.959 0.647 0.013 1.699 -0.083 1.780 -0.534 1.704 0.000 0.602 -0.068 -0.034 1.107 0.811 0.640 1.584 1.274 0.598 -0.803 -1.209 0.000 0.846 0.917 0.975 0.750 0.794 0.847 1.354 +0 1.792 -0.006 -0.203 0.223 -0.563 1.338 -0.056 1.478 0.000 0.543 -2.690 -0.391 0.000 0.698 1.099 0.132 2.548 0.797 -0.662 1.096 0.000 0.800 0.892 0.998 0.821 0.599 0.802 0.897 +1 0.298 1.472 0.100 1.350 -1.572 0.931 0.243 -0.576 2.173 0.657 -1.300 0.078 0.000 1.254 1.135 0.998 0.000 1.252 0.099 1.390 3.102 0.908 0.813 0.986 1.555 1.121 1.176 0.978 +1 0.476 0.590 -0.322 2.835 0.297 0.669 -0.851 1.474 0.000 0.749 1.646 -1.262 2.215 0.975 -0.904 1.047 0.000 1.849 0.273 -1.327 0.000 1.152 0.808 0.986 0.804 0.803 1.005 0.973 +1 0.555 -0.496 -1.694 0.710 0.315 0.833 -1.168 0.761 0.000 1.086 -1.317 -1.537 1.107 0.986 -1.431 -0.815 2.548 0.788 -1.887 1.049 0.000 0.774 1.018 0.985 1.042 0.677 0.825 0.869 +1 1.117 1.341 -1.362 0.224 -1.573 1.124 0.695 1.193 2.173 0.817 2.147 -0.143 0.000 0.502 0.738 -0.313 0.000 0.444 -0.139 -0.022 0.000 0.759 0.693 0.979 0.942 0.874 0.896 0.809 +1 2.050 -0.109 -1.427 0.683 -1.557 1.620 -1.244 0.798 0.000 1.193 0.134 -1.169 0.000 1.865 -0.135 -0.078 2.548 0.999 -1.282 -0.369 0.000 0.760 0.853 0.977 1.358 0.647 0.932 1.186 +0 1.664 -0.938 0.546 1.304 -1.587 0.527 0.786 1.640 0.000 0.620 0.500 0.916 0.000 1.133 -0.974 -0.054 2.548 1.393 0.013 -0.947 3.102 0.909 0.867 1.917 1.245 0.882 0.925 0.821 +1 0.749 -1.522 -1.680 0.651 0.567 0.670 -1.083 -0.644 0.000 0.841 -0.611 1.414 2.215 1.037 -1.678 -1.167 0.000 1.864 -0.165 0.339 3.102 0.891 1.115 0.989 1.134 0.963 0.967 0.898 +0 0.682 -0.017 0.639 1.210 -1.299 1.350 0.163 1.143 2.173 1.114 -0.852 -0.364 2.215 0.961 0.098 -0.203 0.000 0.691 -1.169 -1.265 0.000 1.047 0.785 1.240 1.050 2.020 1.103 0.948 +0 4.066 0.724 -1.381 0.448 -1.100 2.328 0.087 0.316 1.087 1.025 0.946 -0.799 0.000 0.740 0.737 1.184 0.000 0.593 -0.064 1.110 3.102 0.807 1.282 0.982 0.819 0.822 1.660 1.278 +0 0.374 0.432 0.988 0.244 -1.146 0.946 -1.506 0.236 0.000 0.986 1.123 -1.166 0.000 1.221 -0.511 0.846 0.000 0.599 -1.049 0.750 1.551 0.913 1.051 0.995 0.528 0.762 0.650 0.587 +1 0.920 -0.320 -0.241 0.289 -0.303 0.985 0.635 1.418 0.000 0.599 1.390 -1.545 0.000 0.761 -0.044 -0.787 2.548 0.844 -0.936 0.236 0.000 0.986 1.037 0.999 0.737 0.371 0.686 0.998 +1 0.578 0.643 -1.119 0.853 1.090 1.110 2.041 -0.083 0.000 1.330 0.460 1.081 0.000 0.666 -0.113 1.424 0.000 1.935 -1.051 -1.242 3.102 0.981 0.779 0.987 0.968 0.815 0.971 0.808 +0 0.863 0.927 -1.666 1.122 1.691 1.484 -1.540 0.417 0.000 0.954 0.374 -0.321 2.215 0.785 0.072 1.494 0.000 1.400 0.052 -0.945 3.102 0.874 0.850 1.003 1.291 0.581 0.989 1.083 +0 0.844 -2.102 0.735 1.315 1.312 0.949 -0.769 0.747 1.087 1.383 0.451 -0.375 0.000 0.794 1.383 -1.597 0.000 2.914 0.191 -1.077 1.551 0.879 1.271 0.998 0.835 1.983 1.367 1.568 +1 1.164 0.534 -1.645 0.443 1.258 1.100 0.639 -0.056 2.173 0.711 0.444 -0.872 2.215 0.779 0.113 1.220 0.000 0.495 1.325 0.194 0.000 0.775 0.909 0.987 0.768 0.881 0.859 0.737 +0 0.781 0.153 0.493 0.344 -1.506 0.617 -0.523 -0.380 2.173 0.842 0.453 -0.690 0.000 0.869 -0.828 -1.737 2.548 0.754 0.064 1.116 0.000 0.988 0.948 0.983 0.659 0.875 0.671 0.613 +1 1.065 0.515 0.639 0.692 -1.696 0.721 -0.928 -0.742 0.000 0.752 0.683 1.414 2.215 1.197 -0.483 -0.133 0.000 1.078 -0.028 0.338 3.102 0.935 0.828 1.025 0.605 0.736 0.870 0.825 +1 1.339 -0.342 -1.533 0.894 -0.442 0.787 0.619 1.162 2.173 0.707 0.586 0.207 2.215 0.552 0.512 -0.962 0.000 1.253 -2.279 -0.121 0.000 0.714 0.756 1.262 0.991 0.833 0.882 0.788 +0 1.079 -0.630 -1.332 1.576 -0.588 0.455 -0.411 0.247 2.173 0.531 -0.255 1.211 0.000 0.675 -1.487 1.342 0.000 0.617 0.698 0.617 1.551 0.689 0.732 1.123 0.869 0.417 0.696 0.709 +0 0.779 -1.248 0.918 1.726 0.282 0.945 -0.533 -1.302 2.173 1.093 -0.354 1.267 0.000 0.878 0.686 -1.237 0.000 1.095 0.334 -0.381 3.102 1.460 1.098 0.986 1.490 0.952 1.161 1.184 +0 0.713 1.338 1.365 0.943 -1.454 1.157 1.881 0.033 0.000 1.054 0.877 0.652 1.107 0.790 -0.573 -1.271 2.548 1.528 0.966 -1.278 0.000 2.030 1.531 0.981 1.425 1.259 1.305 1.190 +1 0.650 1.832 1.593 0.369 -1.284 0.644 -0.897 1.493 2.173 0.525 -0.149 -0.162 0.000 0.973 -0.490 0.330 2.548 0.744 0.125 -0.800 0.000 1.008 1.042 0.990 0.974 0.874 0.876 0.925 +0 0.785 -0.020 1.287 0.729 -1.372 0.852 0.733 0.200 1.087 0.647 1.203 -0.543 2.215 0.809 1.110 1.262 0.000 0.375 -1.735 1.283 0.000 0.701 0.864 0.989 1.090 0.732 0.960 0.808 +1 0.411 1.328 -0.967 0.223 0.745 0.560 -0.355 -0.856 2.173 0.969 0.739 -0.450 2.215 1.220 -0.440 0.977 0.000 0.419 -0.791 1.096 0.000 0.203 1.106 0.993 0.656 0.748 0.786 0.677 +1 1.623 0.033 0.952 1.043 0.614 0.782 -1.507 -0.790 2.173 0.587 0.255 0.297 0.000 0.563 0.193 -0.339 2.548 1.057 -0.069 -1.461 0.000 1.041 1.240 0.984 0.776 0.873 0.983 0.867 +1 0.861 -0.887 0.213 0.961 0.893 0.685 1.039 -1.252 2.173 0.598 0.611 1.670 0.000 0.703 -0.287 -0.356 2.548 1.172 0.096 -0.832 0.000 0.892 0.720 0.984 1.220 0.892 0.840 0.745 +0 0.798 0.515 -1.094 1.228 1.126 0.707 1.348 1.252 2.173 1.185 -0.773 -0.810 2.215 1.480 -1.050 -0.117 0.000 0.418 1.077 0.519 0.000 1.427 1.156 1.247 0.838 2.175 1.370 1.169 +1 1.458 -0.595 -0.506 0.854 1.555 0.533 0.683 0.622 2.173 1.355 -0.133 1.223 2.215 0.721 0.704 -1.132 0.000 0.785 0.637 -0.813 0.000 0.236 1.013 1.483 1.133 0.835 0.938 0.836 +1 1.794 -1.064 -0.431 0.537 0.679 1.514 1.337 0.798 0.000 0.644 0.409 -0.759 1.107 1.102 0.477 1.730 2.548 0.563 -1.147 -1.481 0.000 2.865 1.768 1.145 0.953 0.702 1.176 1.396 +1 0.619 -1.821 0.384 1.252 1.212 2.304 -2.713 -0.520 0.000 1.066 -0.611 -1.682 2.215 2.077 -0.602 0.972 0.000 1.120 0.534 1.383 3.102 0.980 2.564 0.993 1.000 0.763 2.300 1.787 +1 0.673 0.434 -0.059 0.832 -1.338 1.018 0.450 0.660 0.000 1.294 0.330 -0.887 2.215 0.601 1.435 0.383 0.000 1.274 0.484 1.620 3.102 0.901 0.923 0.990 0.679 0.906 0.954 0.817 +1 1.110 -0.907 -1.025 0.352 0.389 0.786 0.468 0.767 2.173 0.914 -0.388 -0.803 0.000 1.343 2.121 0.498 0.000 1.326 -0.915 -1.274 0.000 0.781 0.656 0.986 1.252 0.730 0.859 0.798 +1 0.805 -0.718 1.344 0.307 1.602 1.721 -1.175 1.516 2.173 1.713 -0.993 -0.391 0.000 2.114 -0.503 0.466 0.000 1.177 -0.125 0.184 1.551 0.914 0.822 0.984 1.194 1.625 1.306 1.045 +1 1.041 -2.224 1.377 0.756 -0.949 0.675 -0.052 -0.143 2.173 0.609 -1.055 -1.376 2.215 0.537 -0.611 1.262 0.000 0.516 -1.641 -0.054 0.000 0.673 0.794 1.064 0.739 0.987 1.004 0.789 +0 0.280 -1.592 0.254 1.711 0.531 0.563 -1.450 -0.469 0.000 0.915 -1.550 -1.166 0.000 0.760 -1.088 1.050 0.000 0.549 1.284 -0.374 3.102 0.901 0.850 1.003 0.802 0.625 0.903 0.936 +0 0.737 -1.765 -0.399 1.033 0.828 1.196 -2.173 -1.580 0.000 1.495 -0.162 0.657 2.215 2.093 -0.843 -0.938 2.548 1.491 -0.040 0.165 0.000 3.331 2.220 1.081 1.264 2.001 1.804 1.440 +0 2.636 0.430 -0.661 0.213 -0.954 1.642 0.619 1.096 2.173 0.730 -0.451 -1.251 2.215 0.493 0.143 1.195 0.000 0.423 -1.620 0.560 0.000 0.689 1.146 0.989 0.777 1.658 1.293 1.058 +0 0.975 1.711 1.031 1.228 0.575 1.385 -0.090 -0.856 2.173 0.347 -1.088 1.171 0.000 1.281 0.526 1.551 2.548 0.478 0.661 -0.260 0.000 0.779 0.975 0.990 1.296 1.483 1.813 1.446 +0 0.417 -1.488 -1.493 1.198 -0.219 1.453 -0.458 0.890 0.000 1.364 -0.271 -1.145 2.215 0.822 -0.215 -0.073 2.548 0.403 -0.244 1.291 0.000 1.008 0.981 0.986 0.884 0.926 0.899 0.906 +0 1.421 1.507 1.074 0.793 1.700 0.577 2.405 -0.377 0.000 0.317 2.214 -1.606 0.000 0.609 -2.336 -0.280 0.000 0.530 0.335 1.053 3.102 0.815 0.987 0.991 0.520 0.404 0.669 0.711 +1 0.660 -1.796 -0.047 1.053 -0.868 0.926 -1.217 -1.465 2.173 0.937 -1.742 0.874 0.000 1.021 -0.614 -0.768 2.548 1.692 0.104 0.370 0.000 0.897 0.910 0.986 1.122 0.789 0.928 1.167 +1 1.726 -0.551 -0.581 0.320 1.573 0.635 1.003 0.851 0.000 0.386 0.410 -0.683 0.000 0.613 -0.657 0.721 2.548 0.379 0.123 0.271 3.102 1.079 0.859 0.986 0.549 0.220 0.495 0.653 +1 0.339 -1.324 -1.092 1.114 -1.029 0.905 1.047 0.626 0.000 0.852 -0.969 -1.194 2.215 1.030 -0.500 -1.503 2.548 0.447 0.066 -1.549 0.000 1.026 1.212 0.975 0.737 0.355 1.021 0.878 +0 0.889 -0.590 1.653 0.665 -0.396 0.313 -1.403 1.366 0.000 0.763 -0.568 -0.624 2.215 0.782 -1.820 0.231 0.000 0.809 0.973 1.383 1.551 0.804 0.897 1.026 0.798 0.986 0.898 0.761 +1 0.551 0.306 0.773 0.579 1.444 0.506 0.059 -0.257 0.000 0.858 0.202 -1.001 2.215 0.916 -1.411 1.528 0.000 0.374 -0.546 0.180 3.102 1.642 0.883 0.988 0.946 0.502 0.713 0.901 +1 0.688 -0.666 -1.491 0.740 -0.899 0.872 -0.831 -0.302 0.000 1.328 -0.079 1.061 2.215 0.604 0.692 -1.474 0.000 0.809 -0.812 0.272 1.551 1.622 1.001 0.993 1.312 0.748 0.912 0.935 +0 0.966 -0.670 -1.189 0.459 0.749 0.854 0.276 -0.594 2.173 0.825 0.453 0.410 2.215 0.816 -0.206 -1.144 0.000 1.007 -0.215 1.064 0.000 0.913 0.930 0.986 0.959 0.978 0.838 0.739 +1 1.076 0.378 0.100 0.844 -1.085 1.123 0.159 -0.895 2.173 1.050 1.359 0.533 2.215 1.080 -0.560 1.443 0.000 1.038 -0.181 0.682 0.000 0.906 1.455 1.156 0.995 1.860 1.366 1.108 +1 0.468 -1.378 0.424 1.023 -1.355 0.832 -1.234 1.545 2.173 1.172 0.122 -0.276 0.000 0.455 -0.727 -0.560 2.548 0.975 1.466 0.981 0.000 1.789 1.152 0.987 0.744 0.746 1.240 1.201 +0 0.548 1.570 -1.559 1.629 1.487 0.689 -0.913 0.624 2.173 0.789 -1.021 -0.852 2.215 0.881 -0.781 0.018 0.000 0.821 -0.205 -0.719 0.000 0.648 0.735 0.983 1.303 1.056 1.123 0.946 +1 0.636 1.143 0.287 0.579 -0.436 1.003 -0.497 1.733 2.173 0.761 0.424 -0.114 0.000 1.201 -0.382 0.459 0.000 1.110 0.478 -1.323 3.102 0.971 0.992 0.989 1.108 0.757 0.933 0.819 +1 1.441 -1.328 1.689 0.991 1.428 0.950 -0.965 -0.704 0.000 1.093 -1.130 0.268 2.215 0.919 -0.693 1.375 1.274 0.998 0.184 0.304 0.000 0.998 1.179 0.977 1.206 0.920 1.079 1.362 +1 1.977 1.239 -1.385 0.571 -1.705 1.065 -0.115 0.386 2.173 0.565 1.512 0.721 0.000 0.526 0.729 -0.820 2.548 0.880 0.682 -0.375 0.000 0.842 1.060 1.003 0.585 0.937 1.029 0.882 +1 1.201 -1.196 -0.715 1.085 -1.069 0.804 -0.872 0.784 1.087 0.823 -0.217 0.734 2.215 0.571 -0.263 -1.734 0.000 0.584 -0.558 -0.298 0.000 0.625 0.717 0.983 1.069 0.404 0.909 0.716 +0 0.938 -0.932 0.265 1.236 -0.339 0.574 -0.364 1.050 0.000 0.633 -1.026 1.602 1.107 0.699 -2.141 1.324 0.000 0.972 -1.145 -1.345 0.000 0.766 1.004 0.988 0.926 0.777 0.674 0.764 +0 0.577 0.473 -1.180 0.282 -0.146 0.751 1.066 0.510 2.173 0.736 0.866 -0.370 0.000 0.686 0.486 1.613 0.000 0.413 2.440 -1.324 0.000 0.927 0.938 0.984 0.803 0.849 0.703 0.665 +0 0.292 -1.476 0.440 1.559 -1.172 1.234 -1.153 0.216 2.173 1.414 -2.351 0.673 0.000 2.057 -0.359 -1.577 2.548 0.861 -1.230 -0.559 0.000 1.467 1.383 0.988 0.782 2.131 1.539 1.266 +0 1.134 -0.732 1.521 1.380 -1.253 1.184 1.167 0.517 0.000 0.840 -1.872 -0.189 0.000 1.436 -1.119 -1.041 1.274 1.050 -0.562 0.467 0.000 1.064 0.952 1.039 0.758 0.499 0.735 0.763 +1 0.489 -0.899 0.400 0.856 1.635 0.895 -1.208 1.494 0.000 1.475 -1.215 0.028 2.215 0.823 0.278 -0.513 2.548 0.711 -0.900 -1.661 0.000 0.855 1.163 0.993 1.126 1.146 1.006 0.894 +1 1.077 1.583 0.818 1.470 0.143 1.116 1.002 -1.354 2.173 0.332 1.837 -0.894 0.000 0.326 -0.145 -0.366 0.000 0.428 0.842 -0.791 0.000 0.656 0.726 0.995 0.782 0.942 0.982 0.788 +0 0.755 1.796 -0.050 0.452 1.022 0.616 1.000 1.689 2.173 0.591 0.250 1.609 0.000 0.414 1.849 -1.662 0.000 1.932 0.907 -0.041 3.102 0.779 0.986 0.997 0.781 1.155 0.708 0.654 +1 1.098 -0.043 -0.473 0.285 1.440 1.174 0.047 0.125 2.173 1.211 -1.520 -1.220 1.107 0.987 -0.210 1.563 0.000 0.914 -1.317 1.371 0.000 0.982 1.231 0.993 1.295 2.274 1.361 1.108 +1 1.409 0.396 1.067 0.894 0.401 0.634 0.785 -1.389 2.173 0.408 -0.918 -0.078 0.000 0.930 -0.052 -0.581 2.548 0.776 -0.643 -1.739 0.000 0.733 0.930 0.981 0.889 0.763 0.797 0.723 +0 1.415 -1.155 -0.957 0.390 -0.482 1.020 -1.176 0.785 2.173 0.590 -1.812 -1.380 0.000 0.884 -1.319 0.323 0.000 0.696 0.129 -1.676 3.102 1.131 0.928 0.987 1.221 0.960 0.902 0.803 +0 0.507 0.120 -0.589 0.530 1.388 0.509 -0.890 1.412 2.173 0.521 -0.636 -0.572 0.000 0.636 -0.850 0.703 2.548 0.575 -2.096 -0.269 0.000 0.788 0.847 0.990 0.587 0.423 0.563 0.539 +0 1.720 -1.069 0.806 2.594 0.334 0.925 -0.436 -1.617 2.173 1.071 -1.275 -1.139 0.000 1.465 -1.350 -0.025 2.548 2.893 -0.426 -1.079 0.000 0.951 0.972 1.206 0.921 1.640 1.229 1.324 +0 1.059 -0.486 1.117 1.098 1.283 1.081 -0.740 -1.518 0.000 1.692 -0.664 -0.172 2.215 0.615 0.296 -1.077 2.548 0.782 0.153 -0.360 0.000 1.390 0.866 0.978 1.398 0.974 1.019 0.993 +1 0.997 0.217 -1.436 0.470 0.855 0.859 -0.056 0.203 0.000 0.530 -1.229 -0.010 0.000 0.744 1.068 -0.931 2.548 0.824 0.802 0.581 0.000 0.959 1.050 0.991 0.756 0.701 0.856 0.761 +1 1.469 1.811 -0.762 0.532 -0.972 0.833 2.170 1.116 0.000 0.490 2.798 0.060 0.000 1.047 1.418 1.273 2.548 1.315 1.150 -0.456 3.102 1.203 0.894 1.001 0.562 0.900 0.780 0.838 +1 0.388 1.212 -1.048 0.877 1.007 2.610 2.126 -1.301 0.000 3.651 1.052 0.455 2.215 0.375 0.866 0.171 0.000 0.425 1.743 0.560 0.000 1.169 1.381 0.988 1.347 1.396 2.231 1.776 +0 1.322 -0.798 0.213 1.357 1.674 1.610 -0.550 -0.248 2.173 1.051 -0.030 1.508 0.000 1.541 1.063 1.565 2.548 0.616 0.480 -1.280 0.000 0.696 0.769 1.796 1.498 2.734 1.626 1.304 +0 0.449 -1.449 0.341 0.615 -1.252 1.246 -0.923 -1.613 2.173 0.952 0.943 0.171 0.000 1.490 -0.653 0.279 2.548 0.456 -1.908 -1.633 0.000 0.915 0.927 0.982 0.758 1.692 0.895 0.754 +1 0.540 0.208 -0.465 0.992 -1.630 0.468 -0.125 1.540 0.000 0.756 0.784 0.326 2.215 0.920 -0.794 -0.477 0.000 0.932 -1.032 0.917 1.551 0.851 0.878 0.988 0.709 0.995 0.733 0.681 +0 0.915 0.081 0.770 0.242 0.399 0.345 -1.206 -0.313 0.000 1.115 -0.708 -1.016 0.000 1.005 -0.074 1.428 2.548 0.442 0.006 -1.636 3.102 0.845 1.005 0.989 0.570 0.190 0.591 0.753 +1 2.215 -1.195 0.434 1.127 1.137 1.129 -0.780 -0.969 1.087 0.488 -1.027 -0.574 0.000 0.938 -0.854 -1.714 0.000 0.768 -0.914 1.406 1.551 0.890 0.769 1.296 0.737 0.844 1.031 0.862 +1 1.592 0.091 -1.384 0.379 -0.320 0.594 -0.927 0.496 2.173 0.260 0.569 -1.073 0.000 0.862 -1.507 -0.825 2.548 0.912 0.083 0.533 0.000 0.685 0.904 0.988 1.023 0.891 0.894 0.821 +1 1.413 0.724 -1.335 0.307 1.605 1.060 1.686 -0.631 0.000 1.367 0.436 0.721 2.215 0.594 -0.047 -1.629 0.000 2.197 0.305 1.100 3.102 0.940 0.970 0.997 1.111 0.529 0.818 0.767 +1 0.700 -0.136 0.282 1.209 1.320 0.886 0.211 -0.738 2.173 0.463 -1.145 -0.021 0.000 1.031 -0.580 0.899 0.000 0.852 1.200 -1.511 1.551 0.834 1.162 1.026 1.060 0.842 0.917 0.826 +1 0.692 1.943 0.264 0.484 1.602 2.655 -1.037 -0.766 0.000 3.523 1.392 0.882 2.215 1.453 0.600 -1.687 1.274 0.712 0.861 0.108 0.000 2.995 2.608 0.979 1.021 2.010 3.562 2.610 +1 0.775 -1.310 -0.310 0.179 1.211 1.434 -1.137 0.759 0.000 1.369 -0.684 -0.586 0.000 2.098 -0.005 1.118 0.000 4.529 -0.947 -0.989 0.000 0.726 0.991 0.987 0.560 0.727 0.793 0.736 +0 0.503 1.408 0.821 1.373 -1.309 1.176 1.725 -0.823 0.000 1.204 0.656 1.145 2.215 0.766 0.764 0.474 0.000 1.306 -0.227 0.157 3.102 1.761 1.604 1.082 0.955 1.041 1.221 1.038 +0 0.826 0.115 -0.612 1.188 -1.217 0.704 0.552 0.689 1.087 1.247 0.671 -0.916 2.215 1.110 2.340 0.588 0.000 2.232 1.033 1.251 0.000 0.585 1.190 0.990 0.902 1.371 1.130 1.335 +1 1.099 1.414 -0.113 0.379 1.688 0.803 0.439 0.908 2.173 1.053 1.122 -1.247 0.000 1.015 0.318 -0.854 2.548 0.947 1.185 0.368 0.000 0.813 0.747 0.986 0.871 1.126 0.741 0.632 +1 0.664 0.326 -1.298 0.483 1.138 1.000 -0.144 0.156 0.000 1.288 -0.319 -1.405 2.215 0.736 -0.988 -0.162 0.000 1.094 -0.586 1.047 3.102 0.871 0.858 0.990 1.029 0.884 0.924 0.963 +0 0.980 1.726 -0.762 0.966 1.267 0.445 1.965 -0.196 0.000 0.773 0.619 1.039 1.107 0.516 -0.541 -0.403 2.548 0.635 1.066 1.656 0.000 0.859 0.937 1.303 0.950 0.782 0.837 0.742 +1 0.607 -0.025 0.188 0.854 -1.272 1.279 0.472 -1.594 0.000 1.175 0.924 0.160 2.215 1.260 -0.096 0.896 2.548 1.400 2.350 -0.643 0.000 3.360 2.336 0.988 0.770 1.070 1.649 1.376 +0 0.641 1.328 -0.710 0.859 1.091 1.350 1.452 1.484 1.087 1.488 0.655 -1.371 0.000 3.466 0.598 -0.208 2.548 1.295 -0.010 -0.222 0.000 0.960 1.158 1.027 1.194 2.916 1.539 1.195 +0 0.842 -1.910 -0.245 0.222 1.665 1.245 -2.030 -0.630 0.000 2.142 -0.869 1.191 2.215 0.784 -1.486 -1.196 2.548 0.915 -1.342 0.234 0.000 1.080 0.929 0.979 1.140 1.263 0.874 0.799 +1 0.841 0.792 1.305 0.582 -0.907 0.695 0.746 -0.157 0.000 0.682 0.933 -1.063 1.107 1.514 0.407 0.484 1.274 1.723 0.358 -1.693 0.000 1.354 0.987 0.990 0.887 1.100 0.825 0.741 +1 0.658 0.433 -0.926 0.797 -0.157 0.723 1.702 -1.419 0.000 0.662 0.986 -0.297 2.215 1.090 -0.712 0.702 2.548 0.938 1.231 1.198 0.000 0.900 0.939 0.993 0.808 1.164 1.105 1.023 +1 1.935 0.637 -0.310 0.436 -1.062 0.637 0.216 1.727 0.000 0.751 -0.851 1.512 2.215 1.302 0.669 0.821 2.548 0.820 1.394 -0.150 0.000 0.772 1.085 0.991 0.999 1.123 1.025 0.841 +1 1.215 1.178 1.589 0.313 0.430 0.799 -0.674 -1.105 0.000 0.779 1.097 -0.009 0.000 0.634 0.122 0.714 2.548 0.658 -0.857 -1.037 1.551 1.003 0.831 0.986 0.820 0.579 0.813 0.739 +0 0.677 -0.195 0.490 0.996 1.322 1.217 0.296 1.308 0.000 1.638 -0.958 -0.557 0.000 0.813 1.002 1.372 2.548 1.948 0.189 -0.355 1.551 3.624 2.240 0.996 0.597 1.055 1.472 1.200 +1 2.172 0.282 -0.300 0.248 -0.357 0.688 -0.654 1.675 2.173 0.700 2.202 -1.560 0.000 1.343 -2.216 0.808 0.000 0.590 -0.253 -0.016 0.000 0.781 0.868 0.981 0.876 0.934 0.957 0.774 +0 1.415 1.665 -1.552 0.265 0.548 0.800 0.950 0.896 0.000 0.956 0.660 -0.979 2.215 1.308 1.215 -0.288 2.548 0.697 1.370 0.690 0.000 0.451 0.927 0.987 0.787 0.799 0.817 0.763 +0 0.572 0.408 -1.450 1.050 0.570 0.404 1.404 -0.035 0.000 0.482 0.914 -0.976 1.107 1.515 1.520 -0.676 2.548 1.026 1.656 -1.401 0.000 0.969 0.741 1.040 0.689 0.416 0.710 0.666 +0 0.507 -1.546 0.422 0.878 -0.233 1.277 1.155 -0.802 0.000 1.033 -0.615 1.345 2.215 1.053 -1.783 1.190 0.000 1.606 -0.296 -0.567 0.000 0.745 0.715 0.988 0.750 0.287 0.654 0.633 +0 0.694 -1.242 -0.850 0.463 -0.761 0.798 -2.154 -1.497 0.000 0.356 -1.876 1.704 0.000 1.092 -0.923 0.667 2.548 0.452 -0.442 0.546 1.551 0.723 0.618 1.003 0.572 0.143 0.563 0.530 +1 1.300 -1.294 -0.204 0.438 -0.190 1.311 -0.237 1.519 2.173 0.624 0.074 -0.847 1.107 0.471 0.725 0.945 0.000 0.600 0.559 0.207 0.000 0.364 0.871 0.984 0.733 1.145 0.993 0.822 +1 0.384 0.716 -1.380 1.174 0.370 0.712 -1.267 -0.360 0.000 1.068 0.238 1.299 2.215 0.923 -1.585 0.358 0.000 1.144 2.175 1.577 0.000 0.929 0.691 0.990 0.922 0.709 0.778 0.988 +0 0.718 0.087 -0.395 1.370 -1.528 1.276 -0.449 0.185 2.173 1.162 0.024 -1.510 2.215 0.706 -0.399 1.613 0.000 0.886 -0.431 -0.416 0.000 0.845 0.989 1.171 0.715 1.841 1.067 0.856 +1 0.449 -1.027 -0.489 1.103 0.777 1.370 -2.122 0.362 0.000 2.044 1.304 -1.289 0.000 1.942 0.319 -0.654 0.000 1.781 0.203 1.153 3.102 1.623 1.539 0.985 0.725 0.595 1.341 1.080 +1 0.544 0.325 -1.527 0.901 0.787 0.635 0.851 0.318 2.173 0.728 1.678 -0.685 0.000 0.651 1.374 1.716 0.000 0.560 -0.688 -1.339 3.102 0.944 0.816 0.989 0.728 0.867 0.626 0.608 +0 0.956 0.657 -0.187 1.152 0.695 0.985 0.160 -1.007 2.173 0.785 -0.056 -1.605 1.107 0.621 -1.649 -0.923 0.000 0.828 0.405 1.176 0.000 0.848 1.116 1.038 0.971 0.679 0.870 0.795 +1 2.229 0.763 -0.619 0.160 -1.406 0.464 1.431 -1.562 0.000 0.653 1.267 0.301 0.000 1.207 0.755 0.837 2.548 0.943 -0.280 1.728 0.000 1.039 0.945 0.989 0.789 0.224 0.731 0.732 +0 0.584 -1.026 -1.383 1.154 0.977 1.075 -0.564 -0.839 2.173 1.317 1.527 0.455 2.215 1.015 1.157 -1.347 0.000 0.626 0.702 0.586 0.000 0.887 1.001 0.986 1.052 2.770 1.570 1.275 +1 0.580 -1.148 -0.756 1.396 0.364 0.471 -0.133 1.427 2.173 0.423 0.719 -0.365 0.000 0.773 -0.281 -1.282 2.548 1.244 1.260 0.992 0.000 0.964 0.948 1.056 0.906 0.488 0.695 0.902 +1 0.730 -1.363 0.848 1.617 -1.484 0.672 -1.202 -0.443 1.087 0.843 -0.999 -0.761 0.000 1.461 0.037 1.544 2.548 2.116 -0.173 0.233 0.000 1.545 1.048 1.299 1.125 1.461 1.026 1.019 +0 0.893 -1.485 -1.668 0.734 0.542 0.713 -1.776 -0.422 0.000 0.581 -1.913 0.212 0.000 1.169 -1.365 0.955 1.274 1.892 -0.330 -1.303 3.102 0.749 0.943 1.024 0.916 1.211 0.954 0.815 +0 0.448 0.257 -0.056 1.386 1.109 0.750 -1.301 0.226 0.000 1.330 0.450 1.655 2.215 0.781 -2.594 -1.561 0.000 1.077 -2.352 -0.309 0.000 0.915 1.473 0.989 0.873 1.301 1.765 1.449 +1 1.093 1.771 -0.293 2.109 -0.823 1.365 -2.081 1.324 0.000 0.538 0.551 -0.041 0.000 0.676 0.777 0.448 2.548 1.441 0.368 -1.703 3.102 0.895 0.916 0.985 0.957 0.721 0.942 0.796 +1 0.643 -0.142 1.359 0.685 0.623 0.386 0.180 -0.727 2.173 0.505 -2.623 0.609 0.000 0.917 -0.303 1.631 1.274 0.984 -1.565 -0.611 0.000 0.905 1.154 0.991 0.743 0.659 0.907 1.075 +0 0.772 -0.470 -1.466 0.390 -0.344 0.716 -0.303 1.083 2.173 0.785 -0.622 -0.972 0.000 1.154 -1.033 0.117 2.548 0.717 -0.331 1.560 0.000 0.750 0.879 0.984 0.820 0.992 0.774 0.702 +1 0.449 1.490 -0.194 0.734 1.296 0.788 0.953 -1.032 0.000 0.742 1.157 1.083 2.215 0.394 1.927 1.184 0.000 1.080 -0.020 0.432 3.102 1.099 0.946 0.988 1.123 0.687 0.792 0.794 +1 0.765 -0.199 -0.829 0.860 0.921 1.295 0.209 -1.616 0.000 0.735 1.490 -0.805 0.000 1.514 0.697 0.478 1.274 0.812 0.724 1.534 0.000 0.930 1.031 1.124 0.721 0.682 0.720 0.740 +0 0.503 1.577 -1.050 0.824 -0.058 0.665 -0.098 0.936 2.173 0.527 1.422 1.391 0.000 0.641 1.141 0.020 0.000 1.135 0.938 -1.248 3.102 0.847 0.929 0.978 0.644 1.038 0.710 0.659 +0 0.619 -1.216 0.456 1.224 1.275 0.932 -1.042 -0.563 2.173 1.534 0.529 0.956 1.107 1.504 -0.461 -1.186 0.000 0.610 0.788 -1.023 0.000 0.851 1.023 0.987 1.926 2.323 1.560 1.341 +0 0.391 0.134 -1.658 0.595 1.362 0.417 1.242 -1.293 1.087 0.351 0.804 -0.060 0.000 0.759 0.382 0.505 2.548 0.724 -1.061 0.932 0.000 0.971 0.970 0.981 0.876 0.761 0.880 0.763 +0 0.416 0.739 -1.653 0.235 -0.289 0.704 0.339 -0.116 2.173 0.705 -1.881 1.630 0.000 0.476 -0.671 0.696 2.548 0.581 -0.795 -1.050 0.000 0.698 0.615 0.989 0.809 0.631 0.826 0.707 +0 0.384 -0.267 -0.624 1.101 1.228 0.786 1.177 -1.282 2.173 0.987 1.181 0.239 2.215 0.635 1.695 -0.662 0.000 0.602 2.212 0.843 0.000 0.722 0.824 0.985 1.357 1.271 1.199 1.131 +0 1.077 0.114 1.659 0.682 1.302 1.013 -1.251 0.078 0.000 0.900 -0.339 -1.071 1.107 0.735 -1.950 -0.299 0.000 0.543 -2.018 -0.931 0.000 0.912 1.173 0.988 0.596 0.441 0.674 0.737 +0 1.012 -0.965 -1.604 1.170 1.370 0.675 0.819 -0.442 0.000 0.669 0.740 0.003 0.000 0.466 -0.385 -1.542 2.548 0.998 -0.698 0.016 3.102 0.559 0.821 0.995 0.488 0.526 0.615 0.794 +0 1.015 -0.595 1.307 0.910 -1.265 0.863 2.131 -0.320 0.000 1.561 0.250 1.715 2.215 0.577 0.906 -0.700 0.000 1.449 -1.779 0.112 0.000 0.885 0.791 0.988 0.787 0.970 1.146 1.185 +0 0.780 0.545 1.715 1.604 0.482 0.804 0.389 -0.840 2.173 0.697 -0.059 0.589 0.000 0.799 -0.579 -1.157 2.548 0.445 0.174 1.333 0.000 0.734 1.005 1.388 1.045 0.606 0.864 0.774 +0 0.787 -0.604 0.796 0.847 0.961 0.646 -0.146 0.734 2.173 1.011 -0.237 -1.017 0.000 1.408 -0.452 -0.556 2.548 1.068 -0.470 -1.707 0.000 0.818 1.063 0.997 1.072 1.111 0.794 0.809 +1 1.195 -0.031 0.647 0.565 -1.480 0.861 -0.187 -1.335 2.173 0.568 -0.701 0.859 0.000 0.506 -0.865 -1.497 2.548 0.976 -0.499 -0.583 0.000 0.937 0.945 1.071 0.662 0.342 0.613 0.609 +0 0.453 -1.189 0.754 1.448 0.084 0.810 0.213 -0.608 2.173 0.659 0.171 -1.299 0.000 0.656 -0.549 -1.456 0.000 1.084 -1.114 1.152 3.102 0.583 0.786 0.994 0.888 1.305 0.822 0.749 +1 1.064 -1.090 -1.618 1.356 1.291 2.083 2.016 -0.136 0.000 1.653 0.038 1.319 2.215 0.603 -0.956 -0.314 0.000 0.826 -0.801 1.418 3.102 4.514 3.202 0.984 1.334 0.558 2.371 2.682 +1 1.932 1.712 -1.541 0.636 -1.617 0.584 1.076 0.138 2.173 0.507 1.826 -0.118 0.000 0.771 0.866 0.562 2.548 0.887 0.442 -0.346 0.000 0.833 0.608 0.991 0.919 0.317 0.809 0.692 +1 1.985 0.162 0.788 1.081 0.113 0.503 0.446 -0.006 0.000 1.587 0.052 -1.090 2.215 1.268 -0.399 1.594 2.548 0.970 -1.076 -0.470 0.000 1.138 1.137 1.158 1.518 1.065 1.119 1.006 +0 0.334 0.953 -0.717 0.591 1.219 0.594 -1.421 -1.672 0.000 0.803 -1.057 0.279 1.107 1.207 0.950 -0.127 2.548 1.359 2.431 1.483 0.000 5.986 3.388 0.992 0.748 1.390 2.338 1.748 +1 1.679 -1.408 -0.511 0.822 -1.065 1.003 -0.322 1.143 2.173 0.388 -1.111 -0.041 2.215 0.557 -0.366 -0.278 0.000 0.813 -0.366 -1.505 0.000 0.909 0.971 0.996 0.579 0.892 0.930 0.793 +1 1.207 -0.283 1.639 0.398 -0.001 1.356 1.150 -0.442 0.000 2.351 -0.712 1.393 2.215 0.693 -0.532 -0.601 0.000 0.536 0.247 0.602 1.551 0.766 0.686 0.987 0.841 0.858 0.885 0.754 +0 1.031 0.231 -0.085 0.536 -1.741 0.875 0.250 1.214 0.000 0.472 -1.161 0.472 2.215 1.505 1.172 -0.719 2.548 0.439 0.576 -0.928 0.000 0.911 0.906 1.027 0.863 1.623 0.990 0.823 +1 1.304 -0.363 0.540 0.772 -0.769 2.221 -1.045 1.057 0.000 1.280 0.156 -0.854 2.215 1.243 0.961 -0.702 2.548 1.376 -0.513 -1.092 0.000 0.687 0.813 1.284 1.032 0.646 0.858 0.714 +1 2.371 -0.559 -1.223 1.430 -0.543 0.883 -0.637 -0.823 0.000 1.121 1.482 1.128 0.000 2.190 -0.821 -0.307 2.548 4.220 0.594 1.136 0.000 1.184 1.563 1.469 1.191 0.710 1.840 1.790 +1 0.510 1.186 1.656 0.974 0.364 0.924 1.271 -1.012 1.087 0.761 -0.447 1.541 0.000 0.772 -0.257 0.749 0.000 0.772 0.162 0.042 3.102 0.777 0.684 0.986 0.982 0.889 0.922 0.795 +1 0.519 -0.106 0.006 0.694 -1.625 0.390 -1.574 -0.700 0.000 0.842 0.459 -1.033 2.215 0.351 0.323 1.234 0.000 1.326 0.860 0.340 3.102 0.999 0.963 0.984 0.681 0.941 0.843 0.735 +0 1.598 -1.168 1.597 0.933 -1.407 1.194 0.306 0.287 1.087 0.885 -0.698 -0.121 2.215 1.452 0.864 -1.351 0.000 1.601 -0.438 1.029 0.000 0.913 1.300 0.995 1.689 0.977 1.201 1.204 +0 0.858 -0.039 -0.693 1.275 -0.338 0.478 2.232 1.210 0.000 0.734 -2.713 -0.543 0.000 1.031 -0.389 1.373 2.548 2.497 1.262 0.927 0.000 0.998 0.729 0.984 1.107 0.596 1.005 0.978 +1 1.479 -1.297 0.740 0.364 1.719 2.749 -1.327 -0.785 0.000 2.422 -0.789 1.241 2.215 0.863 -1.337 0.348 2.548 1.208 -1.938 0.231 0.000 0.929 1.298 0.987 1.013 1.221 1.686 1.372 +1 0.423 -0.785 -0.613 0.346 -0.819 0.809 0.019 0.523 2.173 0.653 -0.586 -1.320 0.000 1.366 0.112 1.636 0.000 0.499 0.470 0.196 0.000 0.900 0.914 0.989 0.565 0.798 0.679 0.646 +1 1.392 0.046 -0.482 0.210 -0.411 1.475 -0.710 0.876 0.000 0.904 -1.383 -1.196 0.000 0.868 0.751 0.208 2.548 1.133 -0.129 -0.802 0.000 1.004 1.314 1.001 0.524 0.586 0.746 0.680 +1 0.430 1.650 -0.751 0.219 -1.475 0.561 0.691 0.710 0.000 0.755 0.842 -1.543 2.215 1.864 0.578 -0.624 2.548 0.508 2.431 0.024 0.000 1.199 1.044 0.987 0.726 0.940 0.903 0.778 +0 1.652 0.570 0.108 1.134 1.387 1.038 -0.437 0.261 2.173 2.429 -0.059 -1.571 2.215 0.730 1.591 -0.153 0.000 0.860 -1.380 -1.145 0.000 2.417 1.776 1.734 1.725 2.371 1.576 1.404 +1 0.597 0.635 -0.623 0.917 0.484 0.808 0.670 -1.165 1.087 0.853 0.068 0.032 0.000 0.872 -0.869 0.983 2.548 0.851 0.666 1.542 0.000 1.162 0.970 0.985 0.916 1.363 0.873 0.758 +0 0.509 0.729 -1.274 1.057 0.635 1.161 0.682 0.586 1.087 1.301 -0.899 -0.880 2.215 1.542 1.376 -1.479 0.000 1.381 1.204 0.790 0.000 1.131 1.668 1.004 1.306 2.389 1.815 1.376 +1 0.318 2.059 1.091 0.821 0.042 0.636 -2.368 -0.946 0.000 0.883 -0.797 1.707 0.000 0.456 -0.980 0.265 1.274 1.365 1.225 0.310 3.102 0.772 0.707 0.994 0.619 0.994 0.858 0.772 +1 0.865 -1.015 -1.715 0.854 -0.252 0.566 -0.749 1.306 2.173 0.756 -1.099 -1.270 0.000 0.761 -1.120 0.438 0.000 1.983 -0.424 -0.049 3.102 1.033 0.905 1.153 0.780 1.063 0.726 0.665 +1 1.093 0.984 -0.528 1.378 -1.113 1.481 0.419 -1.042 0.000 3.647 -0.986 0.888 0.000 1.097 1.233 -0.045 0.000 1.406 0.896 -1.253 3.102 2.107 1.259 0.981 0.952 1.006 0.993 0.842 +0 2.238 -0.052 0.735 0.379 0.098 0.954 0.616 -0.864 1.087 1.076 0.135 -1.647 2.215 0.840 -1.061 1.175 0.000 1.231 0.775 -0.525 0.000 0.708 0.794 0.987 1.338 1.033 1.048 0.906 +0 0.750 1.574 -0.926 0.796 1.049 0.639 0.736 0.275 1.087 0.753 -0.857 0.257 1.107 0.937 -1.612 -1.532 0.000 0.607 2.471 -0.972 0.000 0.721 0.926 1.047 0.832 0.924 0.974 1.163 +1 1.569 -1.973 -1.496 1.589 -1.347 1.228 0.867 0.654 0.000 0.736 -1.719 -0.613 0.000 1.013 -0.502 -0.151 2.548 0.902 -0.506 1.004 3.102 0.962 0.921 0.975 1.205 0.630 0.883 1.487 +1 1.035 -0.387 0.439 1.890 0.641 0.636 0.712 1.479 0.000 1.179 -0.531 -1.299 2.215 0.373 2.291 0.398 0.000 1.061 0.174 -0.828 3.102 0.725 1.266 0.970 1.104 0.571 1.026 1.103 +1 0.720 -1.027 0.996 0.193 -0.975 0.660 0.884 1.399 0.000 0.845 -0.543 -0.456 2.215 0.559 1.153 -1.398 0.000 1.117 -1.240 0.100 3.102 0.666 1.230 0.994 0.748 0.597 1.034 0.854 +1 1.496 -0.636 -1.432 0.439 -1.604 0.445 -0.627 0.383 2.173 0.289 -2.606 0.633 0.000 0.651 -1.087 0.022 2.548 0.372 -2.326 1.192 0.000 0.207 0.646 0.977 0.860 0.287 0.689 0.730 +0 1.296 -1.759 0.877 0.657 -1.299 1.173 1.038 -0.034 2.173 0.728 -0.049 1.362 2.215 1.886 -1.972 -1.349 0.000 0.995 -1.993 -0.632 0.000 0.926 1.630 1.182 2.610 1.513 2.387 1.932 +0 0.660 -1.418 -1.155 0.338 1.442 1.167 -0.763 -0.356 1.087 1.626 1.326 1.452 0.000 0.983 -1.605 0.338 0.000 0.901 -0.465 0.989 3.102 0.957 0.986 0.995 0.612 1.023 0.718 0.686 +1 1.453 0.019 -0.545 0.610 0.792 2.542 1.641 -1.658 0.000 1.000 1.702 0.264 0.000 1.331 0.860 0.169 0.000 1.267 -1.132 0.054 3.102 1.013 0.793 1.217 0.890 0.913 1.033 0.899 +1 1.205 0.638 -1.497 1.355 -1.554 0.721 -0.710 0.143 0.000 0.482 -1.038 -1.402 2.215 1.095 1.144 -0.040 0.000 0.827 0.331 0.977 3.102 0.920 1.076 0.982 0.726 0.661 0.678 0.745 +0 1.408 -0.338 -0.110 0.783 -1.196 0.563 -1.437 -1.224 0.000 1.242 0.576 0.053 1.107 1.202 -0.528 1.576 2.548 1.204 1.266 1.234 0.000 0.767 0.900 1.206 0.978 1.506 1.310 1.101 +1 0.863 0.702 1.097 1.589 0.376 1.197 1.058 -1.344 2.173 0.344 2.481 -0.142 0.000 0.931 0.640 -0.462 2.548 0.439 1.658 1.488 0.000 0.524 0.927 0.987 0.818 0.970 0.970 0.822 +1 1.247 -0.710 1.130 0.748 0.494 0.907 -0.397 -0.660 2.173 0.479 0.788 0.052 2.215 0.350 -1.445 1.350 0.000 0.496 -2.332 -1.408 0.000 0.739 0.941 0.991 1.015 0.853 0.940 0.829 +0 0.479 2.001 0.272 0.621 0.925 0.627 0.212 -0.307 2.173 0.735 0.189 1.336 0.000 0.941 0.119 -1.119 2.548 0.416 -1.537 0.605 0.000 0.975 1.005 0.985 0.821 0.641 0.703 0.721 +0 0.528 0.582 -1.453 0.983 -0.241 0.529 -2.811 -1.024 0.000 1.419 0.263 0.966 2.215 0.683 0.621 -0.263 0.000 0.482 -0.209 0.971 3.102 3.126 1.722 0.988 0.993 0.198 1.543 1.253 +1 0.374 -0.148 -1.673 1.089 0.726 0.934 1.145 -0.074 0.000 0.881 0.527 1.191 0.000 1.311 0.045 -0.503 2.548 1.239 -0.617 -1.331 0.000 1.398 0.864 0.993 1.041 0.785 0.822 0.829 +0 1.812 1.235 1.393 1.242 -1.598 0.930 1.847 -0.139 0.000 0.702 2.191 0.414 0.000 0.541 2.214 -0.681 0.000 1.003 0.935 -0.829 3.102 0.886 0.955 0.984 0.821 0.517 0.682 0.854 +0 0.369 -0.782 -0.128 0.980 -0.584 0.525 -1.186 1.289 2.173 0.420 0.550 -0.695 2.215 0.957 -0.155 1.486 0.000 0.865 -0.744 0.358 0.000 0.931 0.788 0.975 1.145 0.966 0.774 0.717 +0 0.543 0.952 -1.358 0.507 -0.189 0.541 -1.063 1.246 0.000 0.454 -1.086 0.354 0.000 0.760 -0.090 -1.373 1.274 0.399 -0.834 -0.213 1.551 0.922 0.730 0.987 0.905 0.414 0.730 0.846 +1 0.873 0.914 -1.094 0.738 0.216 0.934 -0.252 1.512 0.000 0.909 0.637 -0.354 2.215 1.099 0.476 0.758 2.548 1.042 -0.728 -1.420 0.000 0.863 1.044 1.029 0.645 0.898 0.944 0.866 +0 0.637 -1.593 1.524 1.188 -1.031 0.600 -1.311 -0.437 2.173 0.647 -0.309 0.690 0.000 1.107 -1.170 0.182 1.274 0.720 -0.421 1.284 0.000 0.997 0.870 0.986 0.931 0.538 0.704 0.713 +0 0.640 0.746 0.003 0.705 -1.690 1.065 0.654 1.034 2.173 0.816 1.513 -1.114 0.000 1.144 -0.427 1.021 0.000 2.143 0.748 -0.768 0.000 0.869 0.924 0.985 0.853 1.600 0.970 0.921 +0 5.331 1.077 0.157 1.011 -0.269 1.522 -0.836 1.559 0.000 1.617 -0.800 -1.230 0.000 1.657 0.058 -1.373 0.000 0.796 -0.060 0.579 3.102 1.196 0.845 1.202 0.976 0.474 0.840 1.604 +1 0.421 0.840 1.604 1.006 1.055 1.271 -2.596 0.445 0.000 1.199 -0.135 0.681 2.215 2.365 0.493 -1.078 0.000 1.256 0.039 -0.296 3.102 7.876 4.278 0.984 1.549 0.860 2.653 3.166 +1 0.694 -0.218 -1.193 1.476 1.305 0.916 -0.660 0.138 1.087 0.629 -1.090 -0.728 2.215 0.443 -1.080 0.169 0.000 1.890 0.219 -1.221 0.000 1.246 0.900 1.090 1.141 0.827 0.859 0.819 +0 0.592 0.660 -1.632 0.779 0.354 0.687 -0.053 -0.199 1.087 1.240 -0.852 0.560 2.215 1.371 -1.811 -1.430 0.000 1.737 0.096 -1.249 0.000 2.158 1.747 0.991 1.302 1.032 1.300 1.310 +0 0.794 1.209 0.682 1.928 0.618 0.998 2.468 -1.284 0.000 1.253 -0.331 -0.450 0.000 0.842 -0.368 1.101 2.548 0.791 1.068 -1.541 3.102 0.841 0.916 0.987 0.871 0.731 0.703 0.808 +1 0.628 1.124 -1.620 0.997 -0.472 0.672 0.167 -1.689 0.000 0.896 -0.652 -1.342 0.000 1.899 -0.643 0.283 2.548 0.840 0.234 -0.489 1.551 0.875 0.806 0.990 1.665 0.793 1.051 1.066 +1 0.877 1.810 1.579 0.904 1.248 1.109 0.490 -0.423 2.173 0.527 0.097 0.300 2.215 0.679 0.200 -1.152 0.000 1.130 0.194 1.108 0.000 0.864 1.043 0.973 0.831 0.718 0.914 0.790 +0 0.659 -0.548 0.212 0.729 -1.662 1.049 0.346 -0.591 2.173 0.709 0.519 1.234 0.000 0.655 0.433 0.604 0.000 0.698 -0.680 -1.290 1.551 0.563 1.136 0.989 0.540 0.772 0.756 0.737 +1 1.594 -0.349 0.503 0.278 0.557 0.962 0.740 -1.020 1.087 0.744 -0.073 1.513 2.215 0.323 0.380 1.090 0.000 0.543 -0.691 -0.051 0.000 0.501 0.820 0.987 0.807 1.079 0.934 0.721 +1 0.956 -0.337 -1.392 1.317 -0.877 0.678 0.692 0.480 2.173 0.589 1.442 0.753 0.000 1.067 1.321 -0.145 2.548 0.924 -1.592 1.390 0.000 0.958 1.011 0.977 1.570 0.698 1.223 0.989 +0 0.489 -0.806 0.339 0.688 -0.603 0.926 1.535 1.062 0.000 1.010 -0.008 -1.016 2.215 0.570 0.287 -0.143 2.548 0.389 1.464 0.368 0.000 0.844 0.881 0.987 1.167 0.586 0.890 1.382 +1 0.590 -0.228 0.443 0.763 -1.724 1.037 0.448 -1.135 2.173 1.572 1.109 1.592 0.000 2.479 0.264 0.186 0.000 0.734 1.285 -0.153 0.000 1.090 1.018 0.986 0.859 0.897 1.007 0.848 +1 2.256 -0.273 -1.452 0.682 0.865 0.437 -0.433 1.308 0.000 1.103 0.006 -0.006 2.215 0.397 -2.701 0.748 0.000 1.308 -1.440 0.065 0.000 0.989 0.975 1.494 0.836 0.234 0.819 0.817 +1 0.607 0.534 -0.418 1.583 0.705 0.547 0.367 0.975 2.173 0.908 0.780 -0.025 2.215 1.168 -0.616 -1.365 0.000 2.258 0.308 -1.236 0.000 0.907 0.945 1.152 0.733 0.844 0.813 0.775 +0 1.026 -1.834 -0.754 1.788 -1.661 0.489 -1.053 -0.534 0.000 0.760 -2.085 1.297 0.000 0.579 -1.253 0.701 2.548 0.788 0.051 -0.045 3.102 1.485 0.899 1.368 1.205 0.516 0.830 0.806 +1 0.524 -1.310 0.683 1.178 -0.708 0.941 0.451 1.734 2.173 0.924 0.300 0.408 2.215 0.750 -1.428 1.098 0.000 0.589 -2.395 -0.731 0.000 0.893 1.469 1.034 1.408 1.281 1.320 1.130 +0 0.777 -0.038 0.681 1.287 0.300 0.994 1.242 -1.187 0.000 0.846 -1.037 0.483 2.215 1.353 0.260 -0.853 2.548 1.301 -0.761 1.678 0.000 0.873 0.866 0.989 1.040 1.347 0.992 0.880 +0 1.083 -0.383 1.261 0.464 -0.084 0.907 -0.437 0.384 1.087 1.264 -0.424 -1.240 2.215 0.894 -0.915 -0.709 0.000 0.744 -0.992 1.112 0.000 0.901 0.934 0.985 0.888 1.566 0.864 0.751 +0 0.823 1.424 -0.732 0.649 0.092 0.353 -0.740 0.472 2.173 0.508 0.912 0.178 2.215 1.437 0.174 -1.612 0.000 0.418 -2.289 0.458 0.000 0.451 0.837 0.989 0.559 0.612 0.645 0.652 +0 0.870 -0.729 0.752 2.530 0.243 1.424 -0.672 -1.506 0.000 0.951 -0.733 -0.339 2.215 1.180 0.035 1.486 2.548 0.830 -0.693 -1.162 0.000 0.524 0.817 0.978 0.870 1.213 0.966 1.092 +0 0.651 -0.395 -0.291 1.377 -0.267 0.988 0.022 -1.353 2.173 0.678 0.243 1.494 0.000 1.028 0.419 0.717 2.548 0.740 -0.905 0.778 0.000 0.865 0.947 0.991 1.262 1.233 1.180 0.996 +1 0.806 -0.368 0.793 1.212 1.500 1.476 -0.012 -0.359 2.173 0.618 0.230 1.155 0.000 0.938 -1.455 -1.604 0.000 0.733 -0.002 0.110 0.000 0.717 1.085 0.981 0.719 0.965 0.957 0.796 +1 0.745 0.643 0.591 0.623 -0.423 0.946 0.974 -0.224 0.000 0.851 1.168 -1.382 1.107 0.840 0.127 -1.714 2.548 0.861 0.992 1.562 0.000 0.832 1.033 0.992 0.831 0.566 0.797 0.712 +0 0.614 0.388 0.588 0.979 1.345 0.756 -0.273 -0.589 0.000 0.626 -1.678 -1.065 0.000 0.438 0.553 1.544 2.548 0.839 -1.592 0.123 0.000 0.833 0.971 0.987 1.081 0.517 0.682 1.036 +0 0.889 0.356 0.261 0.916 0.397 0.856 1.345 -1.114 0.000 1.205 0.903 1.027 1.107 0.790 -0.040 -1.180 2.548 0.606 0.383 -1.528 0.000 0.649 1.141 0.987 0.960 1.084 0.803 0.828 +0 0.704 -0.279 -0.729 0.627 1.103 0.516 0.241 -1.699 0.000 0.471 1.168 0.237 0.000 0.547 2.005 1.304 0.000 0.853 -1.037 -0.009 3.102 0.757 0.989 0.989 0.695 0.205 0.847 0.724 +1 2.526 -0.833 -1.682 0.209 -0.637 0.802 0.034 -0.411 2.173 1.193 -1.641 0.530 0.000 0.580 -0.340 -1.717 2.548 1.087 -1.217 -0.822 0.000 0.908 0.800 0.986 0.480 0.804 0.785 0.744 +1 2.038 -1.164 1.137 1.410 0.222 2.995 0.174 -0.913 0.000 0.851 1.501 0.817 0.000 0.977 0.188 1.027 0.000 0.999 0.088 -0.056 3.102 1.042 0.915 1.724 1.222 0.580 0.906 1.156 +0 1.040 -0.649 1.586 0.540 -1.161 0.626 1.370 1.515 2.173 0.941 2.025 0.161 0.000 0.434 -1.200 -0.653 0.000 1.004 0.933 0.153 3.102 0.792 0.915 0.984 1.213 0.798 1.160 1.334 +0 1.064 -1.689 1.144 0.774 0.844 1.131 -1.562 0.571 2.173 1.159 -1.515 -0.859 0.000 1.715 -1.839 -1.231 0.000 0.455 -0.065 -0.978 3.102 0.872 0.790 0.999 0.803 0.973 1.063 0.988 +1 1.147 -0.364 -1.192 0.174 1.682 0.882 -0.511 1.662 1.087 0.880 0.179 0.271 2.215 1.011 1.140 0.066 0.000 0.461 0.564 -0.417 0.000 0.386 1.304 0.976 1.007 1.316 0.894 0.895 +0 1.253 1.917 -1.135 0.745 -1.330 0.678 -0.123 0.643 0.000 0.924 0.620 0.192 0.000 0.409 -0.843 -1.167 1.274 0.636 0.463 1.276 3.102 0.934 0.896 0.975 0.681 0.446 0.664 0.853 +0 0.830 1.469 -0.503 0.790 0.614 0.966 -0.371 1.505 2.173 0.674 -0.448 -0.643 0.000 0.998 -0.427 -0.145 2.548 0.741 -0.885 0.757 0.000 0.921 1.009 0.989 1.161 1.220 1.222 1.072 +1 0.882 0.743 0.530 0.795 1.574 0.705 0.783 -1.649 0.000 0.422 -0.027 1.480 0.000 1.350 -0.699 -0.073 2.548 1.270 0.792 -0.260 3.102 0.609 0.901 0.990 1.219 0.983 0.911 0.846 +1 0.481 -0.687 -1.417 0.697 -0.018 0.860 0.458 1.046 0.000 1.119 -2.334 -0.377 0.000 0.568 0.788 -0.005 0.000 1.417 1.827 1.735 0.000 1.052 0.694 0.991 0.725 0.210 0.716 0.818 +1 1.004 0.661 -0.862 1.004 1.641 1.170 -0.101 -0.188 2.173 1.154 0.637 -1.689 0.000 1.368 0.143 1.315 2.548 1.259 -1.151 0.404 0.000 2.411 1.491 1.077 1.213 1.553 1.267 1.107 +1 1.074 0.080 0.283 0.860 -0.355 1.057 1.095 1.663 0.000 0.629 0.280 -1.165 2.215 0.734 0.970 -0.486 2.548 0.418 1.224 0.019 0.000 1.028 0.891 0.986 0.785 0.505 0.630 0.710 +1 2.355 -0.491 -0.653 0.748 0.112 0.693 0.664 1.242 1.087 0.835 -0.763 0.666 1.107 0.471 1.108 -1.516 0.000 0.470 1.563 1.150 0.000 0.390 1.024 1.170 1.421 1.049 1.062 0.954 +0 0.821 1.390 0.233 1.465 0.789 1.752 1.034 0.742 2.173 2.724 1.138 -0.872 2.215 1.443 0.503 -1.349 0.000 0.368 1.066 -1.499 0.000 0.318 0.861 0.976 0.682 3.200 1.600 1.286 +0 1.555 1.856 0.835 1.152 0.433 1.091 -0.162 -1.717 0.000 1.276 0.651 -0.331 2.215 0.921 1.470 -0.557 0.000 0.415 0.174 1.275 0.000 0.856 0.763 1.001 0.995 0.824 1.111 0.904 +0 1.705 0.794 0.171 0.232 -0.914 0.926 -0.951 -1.608 0.000 0.501 2.782 0.186 0.000 0.775 0.001 -1.652 0.000 0.498 -1.883 0.961 0.000 1.051 0.954 0.984 0.726 0.529 0.651 0.877 +1 0.918 1.679 1.592 1.745 -1.170 0.529 1.763 -1.473 0.000 1.267 -1.273 -0.291 0.000 1.521 1.078 0.352 2.548 0.876 1.254 1.059 0.000 0.800 0.986 1.066 1.181 0.903 1.021 0.852 +1 0.753 0.058 0.740 0.384 -1.121 0.855 -0.061 -0.586 0.000 1.086 -0.021 1.392 2.215 0.588 -0.959 -0.995 0.000 1.249 -0.863 0.676 3.102 0.803 1.003 0.990 0.763 0.842 0.873 0.807 +1 0.370 -0.660 -0.511 1.211 -0.886 1.205 -0.304 1.491 2.173 1.448 -0.450 0.761 0.000 1.083 0.035 -0.607 1.274 1.105 0.365 -1.257 0.000 0.963 1.042 0.986 1.303 1.373 0.950 0.955 +1 0.392 1.670 0.039 1.678 1.494 1.621 2.615 -0.604 0.000 1.342 0.065 1.014 2.215 0.973 0.800 0.837 0.000 0.932 -0.502 1.531 3.102 1.048 2.494 1.087 1.284 0.570 2.116 1.636 +0 1.950 0.691 0.445 1.452 0.301 2.038 0.179 -1.391 2.173 1.090 0.991 0.058 2.215 0.431 1.294 1.679 0.000 0.455 0.615 1.317 0.000 0.231 0.808 0.991 0.624 2.319 1.669 1.199 +1 0.764 -0.556 0.755 1.054 -0.453 0.385 -0.849 -1.702 1.087 0.598 0.286 -0.822 0.000 0.370 -1.939 0.960 0.000 0.488 -0.495 -0.986 3.102 1.211 0.841 1.101 0.572 0.283 0.496 0.533 +1 0.539 -0.857 -0.632 1.126 0.478 0.911 -0.886 -0.352 1.087 0.720 -0.651 0.485 0.000 2.128 -0.846 -1.536 2.548 0.922 -1.424 -1.471 0.000 1.191 1.046 0.991 1.119 1.520 0.949 0.844 +1 1.475 -0.350 -1.109 1.150 -1.724 1.279 -0.374 0.491 0.000 0.529 -0.017 1.515 0.000 0.689 -0.704 -1.131 0.000 1.108 0.420 -0.669 3.102 0.836 0.996 0.989 0.741 0.463 0.639 0.797 +1 0.465 -0.023 -1.457 0.784 1.389 0.978 -1.036 1.142 0.000 1.218 -1.319 -0.551 0.000 1.211 -0.161 -0.656 1.274 1.100 0.418 0.464 3.102 0.848 0.992 0.986 0.920 0.807 0.851 0.777 +0 0.634 0.703 -0.950 0.989 0.006 0.956 1.559 1.692 2.173 0.611 1.524 -0.640 0.000 1.436 0.935 0.583 2.548 0.714 0.759 1.384 0.000 0.882 0.863 0.986 1.026 1.294 0.865 0.735 +1 2.057 0.872 1.346 0.548 0.381 0.661 0.662 0.675 0.000 1.042 1.068 -0.532 2.215 1.097 -0.064 -1.008 2.548 0.586 -0.113 -0.389 0.000 0.873 0.953 1.123 1.088 0.847 0.938 0.827 +1 0.923 1.122 -0.993 0.633 0.399 0.967 0.482 -1.740 1.087 1.023 1.628 0.609 0.000 0.520 0.787 -0.206 0.000 0.943 -0.374 -0.822 3.102 0.884 1.135 1.007 0.929 0.892 0.966 0.821 +1 1.005 -0.044 1.160 1.181 -1.242 1.010 0.190 -0.615 0.000 0.993 -0.740 1.592 1.107 1.265 -0.512 0.708 1.274 0.945 0.031 0.055 0.000 0.856 1.145 1.252 0.806 0.860 0.967 0.881 +1 1.113 -0.707 -1.194 1.771 -1.438 0.828 -0.182 1.042 0.000 0.778 2.012 0.107 0.000 0.859 -1.119 -0.014 0.000 1.621 -0.477 -0.611 0.000 0.978 0.973 0.975 0.642 0.634 0.785 0.722 +0 2.182 -1.602 -0.583 1.435 -0.473 1.169 -1.218 1.402 0.000 0.590 -0.264 1.383 0.000 0.947 -0.482 0.471 2.548 0.415 -0.702 0.051 0.000 0.915 1.008 0.979 0.790 0.362 0.856 0.968 +0 2.074 0.884 -1.511 0.753 -0.825 1.227 -0.493 0.374 0.000 1.026 -0.828 0.704 0.000 1.526 0.334 -0.805 2.548 0.722 -0.626 1.603 1.551 0.815 0.847 1.004 0.804 0.814 1.020 1.213 +0 0.294 -1.159 0.495 1.037 -1.135 1.006 -0.175 1.154 0.000 0.531 -0.930 0.938 0.000 1.234 1.150 -0.377 1.274 0.803 0.544 0.052 1.551 1.089 0.779 0.989 2.515 0.371 1.643 1.304 +0 0.449 0.050 1.620 1.278 1.619 0.686 -0.266 -0.097 2.173 0.772 1.188 -1.270 0.000 1.218 0.702 -0.608 0.000 0.683 2.243 1.131 0.000 0.902 1.150 1.002 1.422 0.816 1.149 1.075 +1 0.443 0.601 -1.628 0.561 -0.854 0.480 0.671 -0.679 0.000 1.378 -0.830 1.417 2.215 0.980 -0.828 0.102 2.548 0.661 1.792 0.195 0.000 0.922 1.243 0.989 1.989 1.145 1.494 1.272 +0 1.233 -0.165 -0.892 1.341 0.272 0.962 0.106 0.537 2.173 1.849 -0.902 -1.300 0.000 0.923 -0.343 1.190 0.000 0.840 -0.903 0.465 3.102 1.663 1.184 1.545 1.085 0.604 1.107 1.038 +0 1.019 0.081 -0.809 0.778 0.039 0.929 -0.389 0.953 2.173 0.741 0.474 -1.119 0.000 0.399 0.068 -1.648 0.000 0.544 -0.742 1.188 0.000 0.949 1.036 0.988 0.584 0.987 0.761 0.720 +0 0.864 0.543 -0.813 0.968 -0.123 0.537 0.251 1.591 0.000 0.770 -0.331 0.613 0.000 0.705 0.973 -1.677 2.548 0.936 0.091 0.991 1.551 0.896 0.693 0.987 0.819 0.520 0.620 0.649 +1 0.479 -2.138 0.061 0.971 0.883 1.069 -0.727 -0.471 0.000 1.187 -2.466 1.128 0.000 0.899 -0.652 -1.030 2.548 1.115 0.079 -1.290 3.102 0.925 1.269 0.979 0.902 0.365 1.058 0.898 +1 1.385 0.238 0.378 2.601 0.890 1.076 -2.164 -0.774 0.000 0.937 -0.441 -1.099 0.000 1.306 -0.204 1.436 2.548 0.915 -1.354 -1.006 0.000 0.755 0.981 1.172 1.034 1.051 0.879 1.050 +0 0.399 1.075 -1.644 0.639 0.131 0.617 0.670 -1.337 1.087 0.490 1.280 -0.141 2.215 0.684 0.511 0.739 0.000 0.375 -0.881 -0.132 0.000 0.699 0.747 0.986 0.682 0.760 0.611 0.544 +1 1.078 -2.326 1.728 0.597 -1.364 0.514 -1.403 0.600 0.000 0.709 -0.791 -0.278 1.107 0.725 -0.764 -0.736 2.548 1.247 -0.145 0.509 0.000 0.954 0.814 0.985 0.748 0.306 0.671 0.664 +0 0.610 -2.247 -1.013 0.971 1.479 0.642 0.690 -0.186 2.173 0.321 -1.363 -0.113 0.000 0.441 1.121 -1.338 0.000 0.841 -0.344 0.947 3.102 1.125 0.910 0.987 0.723 0.803 1.013 0.886 +0 0.421 -0.788 -1.499 1.773 -1.416 0.983 -0.249 0.713 0.000 0.516 0.241 1.331 0.000 0.671 0.602 -0.132 2.548 0.758 -0.682 -0.304 1.551 0.885 0.856 0.981 0.828 0.449 1.015 1.114 +0 1.796 0.407 0.703 1.094 0.130 0.732 -0.270 -1.028 2.173 0.964 0.219 -0.179 0.000 1.070 0.675 -1.710 2.548 1.646 -0.394 -1.721 0.000 1.717 1.161 0.985 1.007 0.862 0.967 0.957 +0 1.861 -1.475 0.304 1.122 -0.292 1.421 -1.800 -1.176 0.000 1.196 -1.517 0.826 2.215 0.387 -0.858 -1.298 1.274 1.023 -1.107 1.287 0.000 1.535 0.847 1.024 0.926 0.718 0.897 0.961 +1 0.380 -0.735 -0.242 0.247 -0.432 0.676 0.299 0.958 0.000 1.035 0.290 -1.185 1.107 1.483 -0.659 -0.525 2.548 0.637 1.200 0.922 0.000 0.730 1.042 0.986 0.754 1.019 0.805 0.724 +0 0.571 -0.893 1.560 1.094 1.578 0.791 0.563 -0.492 1.087 0.809 0.561 0.503 0.000 0.295 -0.781 0.390 0.000 0.500 -0.982 1.070 1.551 0.980 0.920 0.985 0.609 0.930 0.760 0.699 +1 0.660 -0.125 -0.763 0.941 1.624 0.917 -0.353 0.559 0.000 0.733 -0.488 0.013 0.000 1.139 -1.089 -1.241 2.548 0.748 -1.972 0.974 0.000 0.833 1.099 0.988 0.871 0.826 0.916 0.833 +0 1.394 0.462 -1.523 1.050 1.399 0.548 -0.382 0.343 0.000 0.763 1.041 -0.651 2.215 0.727 -0.911 0.372 2.548 0.916 2.494 -1.211 0.000 1.050 0.918 0.998 0.963 1.162 1.150 1.119 +1 0.610 -0.906 0.438 0.526 1.651 0.793 0.447 0.342 0.000 1.057 0.437 -0.436 1.107 1.805 0.570 1.423 1.274 1.334 0.868 -1.376 0.000 1.640 1.178 0.993 1.575 1.466 1.319 1.258 +1 1.535 0.160 -0.645 1.262 -0.237 0.482 0.607 -1.267 0.000 1.342 -0.792 1.007 2.215 0.630 0.760 1.394 2.548 0.612 -2.304 -1.264 0.000 1.031 1.000 0.988 0.964 0.961 1.034 0.985 +0 0.629 0.488 0.729 0.379 0.944 0.793 -0.728 -0.968 2.173 0.700 -0.127 1.491 0.000 1.086 0.305 -0.199 2.548 0.704 -0.830 0.632 0.000 0.758 0.940 0.998 0.881 0.984 1.091 0.963 +1 0.960 -0.119 0.945 1.133 -1.350 0.731 0.588 -0.320 0.000 0.845 0.730 0.081 0.000 1.872 -0.127 -1.317 1.274 1.493 0.294 1.067 3.102 0.604 0.985 1.271 0.866 1.117 0.982 0.900 +1 3.006 -0.010 -1.063 1.535 -1.244 2.765 0.714 0.673 0.000 1.650 0.647 -0.466 2.215 0.878 -1.643 1.444 0.000 0.753 0.687 1.491 3.102 0.972 1.022 0.972 0.824 0.991 0.912 0.874 +1 1.016 1.157 -1.390 1.023 -0.977 1.063 -0.735 0.448 2.173 0.666 0.426 1.513 0.000 1.370 -2.642 -1.021 0.000 1.352 0.573 0.103 1.551 0.898 1.022 0.991 0.879 1.059 1.094 0.922 +1 1.766 1.984 -1.349 0.532 -0.506 0.491 1.094 -0.631 0.000 0.992 1.698 0.655 0.000 1.038 0.324 1.423 2.548 0.603 0.113 0.171 0.000 0.998 0.975 0.988 0.884 0.447 0.850 0.836 +0 3.221 1.677 -1.400 0.486 -0.899 0.543 0.440 0.221 2.173 0.773 0.678 0.556 2.215 0.957 1.208 0.223 0.000 1.166 1.591 1.321 0.000 1.033 0.899 0.991 1.327 0.308 1.092 0.951 +1 0.464 -0.490 -1.380 1.247 0.801 0.809 2.880 -1.099 0.000 0.681 -0.439 -0.658 0.000 1.196 -0.354 0.182 2.548 1.044 -2.333 0.914 0.000 1.941 1.067 0.986 0.705 0.539 0.840 0.800 +0 0.665 0.741 -0.440 0.778 -1.616 0.575 0.489 1.327 2.173 0.684 -0.924 -0.958 0.000 1.145 0.435 0.028 0.000 0.600 1.046 0.236 0.000 0.623 0.750 0.987 0.556 0.386 0.490 0.548 +0 0.485 -0.845 -0.326 1.232 1.268 0.594 0.879 1.080 2.173 0.772 -0.842 -1.159 2.215 0.992 0.428 -0.332 0.000 0.832 1.164 -0.167 0.000 0.856 0.904 1.062 0.763 1.337 0.892 0.896 +0 1.153 -0.408 -1.306 2.303 -1.040 0.947 -1.214 0.950 0.000 0.772 -1.580 0.505 2.215 1.177 0.212 -0.361 2.548 1.915 -1.973 0.576 0.000 1.415 0.847 0.988 0.883 1.297 1.205 1.545 +0 0.840 -0.388 -1.053 0.413 0.274 0.618 1.072 -1.111 0.000 0.985 -0.816 0.936 2.215 0.475 -0.055 -0.591 0.000 0.630 -0.379 0.707 3.102 0.735 0.759 0.981 0.810 0.201 0.813 0.759 +1 0.412 1.151 0.527 1.116 -0.554 2.422 -1.472 -1.297 2.173 2.455 -0.709 0.734 1.107 1.017 -0.123 0.572 0.000 0.979 0.095 -0.105 0.000 0.645 0.947 0.986 4.949 3.734 3.626 2.589 +0 0.770 1.641 -0.700 0.924 -1.204 0.420 0.855 1.264 2.173 0.710 0.833 -0.086 0.000 1.816 -0.072 0.654 2.548 0.620 0.128 -1.107 0.000 0.755 0.956 0.983 0.761 0.780 0.881 0.751 +1 1.255 -0.090 -0.767 0.691 -0.668 1.566 1.338 1.572 0.000 1.046 0.776 0.395 2.215 1.587 0.292 -0.080 2.548 1.301 -0.214 0.011 0.000 1.091 0.911 0.992 0.964 0.657 0.941 1.061 +0 0.561 1.386 -0.096 0.466 1.011 1.040 0.655 1.690 2.173 1.069 -0.441 0.095 0.000 0.927 -0.214 -0.639 0.000 0.393 -0.949 -1.197 3.102 0.953 0.654 0.990 0.905 0.776 0.929 0.800 +1 1.773 -0.322 -1.192 0.078 0.606 1.200 -0.149 0.664 2.173 0.755 -0.115 -0.416 2.215 0.585 0.949 0.988 0.000 0.753 0.392 -1.216 0.000 0.703 0.960 0.984 0.710 1.158 0.924 0.787 +1 0.873 0.202 -1.363 0.867 0.561 0.839 -2.207 -1.458 0.000 0.873 -1.636 0.479 2.215 0.721 -1.165 0.063 0.000 1.106 -0.907 -0.606 3.102 0.793 1.051 1.189 0.862 0.783 0.860 1.004 +1 1.093 -1.223 -0.568 1.004 1.268 1.552 -0.568 -1.552 0.000 1.946 0.885 -0.241 0.000 1.499 -2.576 1.125 0.000 1.797 -0.739 -0.144 0.000 0.886 1.178 1.446 0.712 0.575 0.727 0.776 +0 0.279 1.305 -0.004 1.147 -1.332 0.827 -1.044 1.238 2.173 0.664 -1.403 -0.669 2.215 0.956 0.073 0.409 0.000 0.973 -1.346 -1.292 0.000 0.871 0.857 0.987 1.038 1.099 0.860 0.755 +1 1.584 -0.159 -1.264 0.965 1.680 0.728 -1.062 0.082 0.000 0.412 -0.225 1.581 0.000 1.459 0.823 -0.361 0.000 0.753 0.042 0.947 3.102 0.983 0.785 0.990 0.820 0.266 0.585 0.675 +1 0.785 -0.150 -1.408 0.870 -0.293 1.235 0.551 -1.405 2.173 1.747 -0.117 0.515 0.000 1.123 -0.034 1.416 2.548 0.910 0.862 -0.964 0.000 1.884 1.315 0.988 0.885 0.937 1.174 0.970 +1 1.688 0.776 -0.115 0.780 0.731 1.259 1.362 -1.541 2.173 0.621 1.077 0.100 2.215 0.349 2.439 1.318 0.000 0.473 1.168 0.586 0.000 0.412 0.772 1.098 0.559 1.308 0.996 0.788 +0 0.374 -1.282 1.521 1.163 -0.217 0.672 0.266 0.861 0.000 1.153 -1.100 -1.525 1.107 0.823 -0.502 0.104 0.000 0.614 -0.999 0.116 3.102 1.015 0.705 0.985 0.960 0.757 0.831 0.864 +1 0.697 0.363 0.412 0.750 -0.788 0.475 0.970 1.516 0.000 0.885 -1.424 0.378 0.000 0.799 0.925 -1.061 2.548 0.767 1.332 -1.461 3.102 2.440 1.713 0.992 0.789 0.275 1.151 0.938 +0 0.927 0.369 0.480 0.409 0.346 1.005 -0.698 -0.954 2.173 0.901 1.030 0.955 0.000 0.803 -0.770 1.366 2.548 1.103 0.065 -0.625 0.000 1.445 1.161 0.990 1.566 0.974 1.131 1.039 +0 0.582 -0.701 0.489 0.402 -1.271 0.773 1.056 1.017 2.173 1.022 0.555 -1.521 2.215 1.177 -2.369 -0.529 0.000 0.555 1.325 0.737 0.000 0.847 0.941 0.989 1.615 1.039 1.238 1.032 +0 1.087 -1.739 -0.368 1.715 0.006 0.743 -0.931 1.510 2.173 0.525 -2.379 -1.570 0.000 0.866 -1.017 -1.039 0.000 1.275 -1.153 0.790 3.102 0.881 0.898 0.986 0.823 0.665 0.875 0.842 +1 0.579 0.926 1.156 1.082 -0.451 0.700 -0.396 1.526 2.173 0.796 -0.513 0.156 2.215 0.951 -0.490 -1.152 0.000 0.746 -0.046 0.672 0.000 0.954 0.801 1.088 1.057 1.040 0.866 0.759 +0 2.940 -0.749 -0.158 1.248 -0.440 3.201 -0.491 1.321 0.000 1.977 -0.697 -0.619 1.107 1.459 -1.106 1.096 2.548 1.200 -0.742 -1.234 0.000 1.026 1.052 0.976 1.413 1.860 1.164 1.071 +1 0.964 0.515 -1.484 0.076 -1.650 0.533 -0.962 0.267 2.173 0.879 -0.584 -0.986 1.107 0.581 0.739 -0.002 0.000 1.172 -0.493 0.950 0.000 0.978 0.986 0.985 0.876 0.930 0.715 0.685 +1 0.348 0.655 -1.708 1.663 1.341 0.973 0.542 -0.788 2.173 0.926 0.423 0.797 2.215 0.746 0.781 0.194 0.000 1.615 1.159 -0.328 0.000 0.813 0.958 1.002 1.383 1.384 1.064 1.001 +1 1.255 -0.446 0.556 1.346 0.581 0.745 -0.451 -1.494 2.173 0.507 -0.705 -0.199 0.000 1.217 1.087 -1.019 2.548 0.772 -0.425 1.303 0.000 0.801 1.093 0.982 1.233 1.199 1.353 1.038 +1 0.892 0.445 1.054 0.665 -1.270 0.831 0.499 -0.968 0.000 0.790 1.227 -1.393 0.000 1.524 1.180 0.340 2.548 0.540 1.179 1.400 0.000 0.927 0.905 0.989 0.874 0.757 0.890 0.774 +0 1.046 1.044 -1.439 0.448 -1.579 0.568 1.320 1.458 0.000 0.698 1.162 0.107 0.000 0.940 0.365 -0.543 2.548 0.814 -0.441 0.540 3.102 1.258 1.018 0.996 0.778 0.641 0.742 0.729 +1 0.501 1.726 0.726 1.090 -0.423 0.820 -0.187 -0.217 2.173 0.764 1.408 1.493 0.000 0.900 -0.310 -1.448 0.000 0.736 0.371 -1.081 3.102 1.428 0.850 0.982 1.675 0.635 1.088 1.093 +0 1.345 0.102 -0.419 0.473 0.006 0.726 0.569 1.415 0.000 1.108 0.958 0.906 2.215 0.892 1.099 -0.807 2.548 0.775 -0.134 -0.953 0.000 1.059 0.972 0.995 0.649 1.062 0.775 0.781 +1 0.799 1.094 0.758 1.387 1.252 1.665 -0.267 -0.671 0.000 1.052 0.414 1.474 0.000 1.724 0.208 0.676 0.000 2.032 0.589 0.008 3.102 1.263 0.997 1.000 0.991 1.261 0.916 0.846 +0 0.770 -2.237 0.267 0.631 0.631 0.531 -1.300 0.633 0.000 1.357 -0.799 -1.406 2.215 0.701 -0.540 -0.179 0.000 1.144 -1.123 -1.333 3.102 0.838 0.836 0.973 1.158 0.308 0.787 0.747 +0 1.141 0.389 0.124 1.332 0.901 1.243 -0.986 -1.471 2.173 0.648 1.108 -0.211 2.215 0.270 1.246 0.881 0.000 0.860 -0.297 -0.516 0.000 0.922 1.008 1.101 0.804 2.075 1.331 1.035 +0 0.437 1.138 -0.669 1.578 -0.367 0.635 1.443 1.194 0.000 0.638 1.520 -1.359 2.215 0.708 2.567 1.076 0.000 0.831 0.449 0.748 3.102 0.896 0.845 0.983 1.007 0.715 0.764 0.798 +1 0.699 1.188 0.119 0.414 -0.856 0.875 0.166 0.611 0.000 0.911 0.231 -0.040 0.000 1.215 0.642 -1.540 2.548 1.327 -1.193 -0.924 0.000 0.914 1.138 0.986 0.800 0.725 0.699 0.739 +1 0.837 -0.774 -1.407 0.914 1.055 0.764 0.296 0.998 2.173 0.984 -1.284 -0.783 0.000 0.713 -0.605 -0.265 2.548 0.452 0.101 0.236 0.000 0.972 1.329 0.989 0.716 0.954 0.824 0.746 +1 0.843 -0.778 -0.682 0.635 -1.702 0.669 0.383 0.653 1.087 0.543 -1.775 0.537 0.000 0.527 -1.515 -0.974 0.000 1.221 0.196 -1.358 3.102 0.806 1.033 0.980 1.162 0.931 0.907 0.828 +0 1.088 1.003 -0.458 0.775 -0.458 0.847 -0.348 1.406 2.173 0.344 1.679 1.118 0.000 0.583 -0.977 -0.762 2.548 0.521 -0.249 0.758 0.000 0.683 0.853 0.989 1.223 0.874 0.859 0.770 +1 1.053 1.552 1.206 0.309 0.197 0.463 -0.530 0.357 2.173 0.558 1.181 -0.222 0.000 0.570 1.460 -1.445 0.000 0.850 0.599 -1.217 3.102 0.790 1.014 0.998 0.656 0.793 0.673 0.648 +1 0.724 0.630 0.129 0.876 1.058 0.667 1.255 1.668 0.000 1.165 -0.627 0.055 2.215 1.023 0.230 -1.368 1.274 1.029 1.898 -0.634 0.000 1.084 0.921 0.985 1.264 1.236 1.066 0.955 +0 1.280 0.201 -1.389 0.264 -0.098 0.601 0.489 0.363 0.000 0.364 0.130 1.580 0.000 0.751 -0.550 -0.704 2.548 0.682 0.936 0.460 3.102 0.902 0.828 0.991 0.660 0.714 0.571 0.583 +0 1.262 -0.761 0.463 0.878 1.448 1.036 1.468 0.609 0.000 0.989 1.118 -0.074 0.000 1.199 1.701 1.437 0.000 3.596 -0.757 -1.070 3.102 1.062 0.976 1.132 1.314 0.349 1.519 1.339 +0 1.720 -0.734 -1.009 1.157 -0.648 0.410 0.131 0.245 0.000 1.365 0.530 1.067 2.215 0.557 0.839 0.798 2.548 0.370 -2.466 1.079 0.000 0.903 0.901 0.980 1.007 0.283 1.065 0.869 +0 0.800 -1.072 1.459 0.526 0.352 0.667 -0.304 1.585 2.173 0.543 -1.339 -0.046 0.000 0.799 -0.045 0.070 2.548 0.612 0.141 -0.417 0.000 0.687 0.921 0.981 0.656 0.898 0.633 0.591 +0 1.049 -0.446 -1.071 0.943 1.152 0.553 0.386 0.958 2.173 0.337 -1.020 -0.559 0.000 0.564 0.139 -0.524 1.274 0.447 -1.521 1.231 0.000 0.543 0.802 1.251 0.732 0.682 0.640 0.586 +0 1.140 -0.340 0.879 0.637 1.601 0.853 0.626 -0.406 0.000 0.948 0.460 -1.298 2.215 1.142 0.056 -1.008 2.548 0.647 1.534 1.170 0.000 0.701 0.891 0.989 0.875 0.366 0.690 0.731 +0 0.660 0.523 0.503 0.993 0.228 0.564 -0.514 1.294 2.173 0.664 -0.307 -1.541 0.000 1.057 -0.006 -0.918 2.548 0.904 -0.560 -0.485 0.000 0.842 0.762 0.988 0.874 0.912 0.724 0.664 +0 0.489 0.172 -0.062 1.035 1.569 0.613 -0.344 -1.131 2.173 0.561 1.017 0.864 0.000 1.079 0.798 0.341 2.548 0.840 0.622 -0.677 0.000 0.890 0.943 0.987 0.784 1.184 0.722 0.664 +1 0.716 -0.646 -1.613 1.112 0.969 1.180 -0.546 0.889 1.087 1.170 -0.415 -1.134 0.000 1.288 1.506 -0.689 0.000 1.386 -0.055 0.098 3.102 2.445 1.636 0.985 0.788 0.946 1.489 1.323 +0 0.600 -0.217 -1.315 2.162 -0.306 1.336 -0.637 1.499 2.173 0.932 -1.264 -1.595 0.000 1.161 -0.864 0.022 2.548 1.782 0.122 0.429 0.000 0.889 0.699 1.246 1.572 1.526 1.144 0.975 +0 0.690 0.674 0.300 1.425 -0.792 0.481 1.638 -0.924 0.000 0.877 -0.038 1.577 0.000 0.988 0.601 -1.472 2.548 1.309 -1.824 1.007 0.000 0.955 0.939 1.143 0.761 0.844 0.720 0.695 +1 0.618 0.679 -0.928 0.364 -0.722 1.717 0.170 -0.590 0.000 1.624 0.512 0.521 0.000 2.327 -0.250 1.522 1.274 2.395 1.940 1.165 0.000 1.068 0.677 0.998 1.006 0.800 1.053 0.863 +1 1.066 0.573 -0.987 1.731 -0.329 0.749 0.082 1.204 1.087 0.577 0.609 0.473 0.000 0.594 -0.194 -0.700 0.000 1.439 0.675 1.614 0.000 0.873 0.856 1.053 0.781 0.721 0.857 0.737 +1 1.524 0.978 -1.113 1.403 -1.173 1.467 0.806 0.657 2.173 0.421 1.719 -0.330 0.000 0.549 0.524 0.340 2.548 0.758 0.490 -1.572 0.000 0.806 1.111 1.000 0.920 0.342 1.185 0.929 +1 0.586 -0.235 1.458 1.141 0.087 1.140 1.377 1.641 0.000 1.219 0.912 -1.474 2.215 2.613 0.311 -0.183 2.548 1.033 -0.596 -1.554 0.000 0.980 1.147 1.069 0.944 1.833 1.083 0.931 +0 0.525 0.274 -1.558 0.587 0.530 0.678 0.697 -1.152 0.000 0.614 0.161 -0.042 0.000 1.485 -1.272 0.788 1.274 0.829 -0.093 -0.573 0.000 0.747 0.853 0.990 1.549 0.603 1.012 0.969 +1 0.535 1.657 0.294 0.931 -0.844 0.815 0.069 0.722 2.173 0.584 0.741 -0.405 0.000 0.672 -0.247 -1.373 0.000 0.990 -0.394 1.661 3.102 0.900 1.009 0.993 1.405 0.757 1.241 1.048 +0 0.327 0.849 -1.103 1.033 0.372 1.390 -2.313 0.642 0.000 1.539 -1.009 -0.842 1.107 0.834 -1.509 -1.551 0.000 0.654 -0.624 -1.732 0.000 0.405 0.765 0.985 0.675 1.078 1.496 1.354 +0 1.009 -0.406 -0.260 0.421 0.948 0.788 0.509 0.732 2.173 0.821 -0.512 -1.448 2.215 0.493 1.307 -0.401 0.000 1.033 0.243 -1.586 0.000 0.838 0.894 0.991 0.815 1.268 0.776 0.709 +1 0.762 -0.592 -0.426 1.483 0.410 0.911 -0.180 1.554 2.173 0.496 -1.206 -1.300 2.215 0.438 0.911 -1.018 0.000 0.607 -0.145 0.019 0.000 0.578 0.814 1.008 0.822 0.765 0.833 0.715 +1 0.348 -1.296 -1.022 1.345 -0.434 2.399 -1.018 0.815 0.000 0.614 -0.151 1.356 0.000 1.882 0.862 -0.971 2.548 0.775 -0.253 0.415 0.000 1.065 1.618 0.985 0.936 0.353 1.802 1.440 +1 1.363 -1.564 1.381 0.684 1.135 0.473 -0.569 1.595 0.000 1.290 -0.308 -0.074 2.215 0.553 -2.670 -0.082 0.000 1.328 -0.626 -0.881 0.000 0.961 1.106 0.982 0.657 0.641 0.836 0.763 +1 0.727 -1.194 1.245 0.328 0.288 0.630 -1.006 -1.277 2.173 0.545 0.091 -0.154 1.107 0.675 -1.954 -0.218 0.000 0.646 0.264 1.542 0.000 1.354 0.980 0.980 0.669 0.888 0.733 0.682 +1 0.530 -1.092 0.801 1.341 -0.818 0.847 -0.485 1.583 0.000 0.638 -0.559 -0.060 0.000 0.987 0.924 0.335 2.548 1.000 0.717 -1.472 3.102 1.556 1.134 1.160 1.274 0.760 0.950 0.925 +0 0.533 -1.281 -0.041 1.600 -1.042 1.059 -0.774 0.147 2.173 0.770 -1.950 -1.631 0.000 1.161 0.138 1.258 2.548 0.593 2.049 1.062 0.000 0.524 1.106 1.003 1.076 1.342 1.025 0.931 +0 0.830 -0.425 0.532 1.999 0.204 1.503 0.159 -1.544 1.087 0.266 -0.121 1.425 0.000 0.347 -2.244 -0.209 0.000 0.639 0.955 -1.269 3.102 0.821 0.909 0.991 2.083 0.590 1.401 1.117 +0 1.344 0.591 -0.856 0.606 0.859 0.246 -1.636 -0.545 0.000 0.537 0.577 1.456 1.107 1.276 -0.889 0.805 1.274 0.404 -0.847 -0.723 0.000 0.173 0.745 1.249 1.210 0.906 0.842 0.745 +1 0.667 0.527 0.146 1.385 -0.883 0.871 -0.105 1.250 2.173 0.742 0.983 -0.355 2.215 0.899 1.597 -1.502 0.000 0.756 0.776 0.861 0.000 0.862 1.116 1.064 0.620 1.366 0.875 0.823 +1 1.470 -0.196 1.517 1.281 -1.078 0.498 1.118 0.817 0.000 0.880 0.466 -0.025 2.215 0.354 0.695 0.613 2.548 0.741 1.589 -0.024 0.000 0.738 0.750 1.369 0.810 0.334 0.772 0.811 +0 0.824 -0.572 0.445 1.562 1.177 0.424 0.986 -0.285 0.000 1.084 -0.533 -1.585 1.107 0.458 1.272 0.093 2.548 1.387 -0.421 -0.625 0.000 0.983 0.867 0.986 0.933 1.132 0.867 0.754 +0 1.107 0.185 0.511 1.841 0.997 1.114 -0.310 0.163 2.173 1.480 -0.102 -1.394 2.215 0.816 1.242 -1.247 0.000 1.930 -0.401 -0.870 0.000 1.537 1.136 0.982 1.146 1.873 1.256 1.215 +1 1.160 0.148 -1.481 1.052 1.393 1.282 0.482 0.127 2.173 1.187 0.880 -0.744 0.000 1.524 -0.554 1.214 2.548 0.577 1.423 -1.029 0.000 0.521 1.197 0.992 0.950 1.760 1.240 1.101 +0 1.125 -0.023 0.571 2.183 0.659 1.180 -2.621 -1.354 0.000 0.493 -0.921 -0.156 0.000 0.536 -1.246 -1.282 2.548 1.003 -0.080 1.445 3.102 0.375 0.671 0.994 1.268 0.520 0.865 0.831 +0 0.572 -1.082 1.237 0.366 0.884 1.160 -0.942 -1.194 2.173 0.931 1.636 -0.235 0.000 0.937 1.232 0.317 0.000 0.795 0.560 1.101 3.102 0.724 0.766 0.983 1.114 1.281 1.517 1.205 +1 0.822 -0.424 0.850 0.847 -0.121 0.648 -0.534 0.507 0.000 0.555 0.537 0.487 0.000 1.544 0.765 -1.156 2.548 1.155 -0.493 -1.511 0.000 0.731 0.859 0.988 1.040 0.877 0.894 0.778 +1 0.814 1.536 0.768 1.029 1.320 0.423 0.643 -0.126 0.000 0.542 0.904 1.058 0.000 0.757 0.133 -1.652 2.548 2.039 -0.514 -0.615 1.551 0.901 1.127 0.993 1.126 0.850 1.502 1.165 +0 0.435 0.596 0.383 1.381 -0.633 1.185 0.587 -0.911 2.173 1.912 0.653 1.163 1.107 0.779 -0.074 0.126 0.000 1.163 0.050 0.725 0.000 0.795 0.931 0.990 0.862 2.118 1.217 0.963 +0 0.282 1.717 -0.049 0.691 1.475 0.545 -1.093 -1.644 2.173 0.375 -0.914 0.318 1.107 0.851 1.328 -1.305 0.000 0.644 0.662 0.129 0.000 0.895 0.889 0.978 0.837 0.654 0.787 0.688 +0 0.967 -1.856 1.209 1.110 0.524 0.561 -1.422 -1.119 2.173 1.007 -1.102 -0.664 1.107 0.479 2.386 0.009 0.000 1.347 -1.531 0.952 0.000 4.126 2.738 0.994 1.213 0.472 1.778 1.882 +1 1.550 0.155 -0.658 1.057 -1.623 1.366 -0.592 0.869 1.087 0.390 0.430 1.415 0.000 1.817 -0.810 -0.788 2.548 0.895 -0.593 0.340 0.000 0.789 1.015 1.354 1.588 1.978 1.294 1.013 +0 0.663 0.443 1.595 1.656 -0.814 0.932 0.949 0.868 0.000 1.306 0.267 -0.527 2.215 0.668 1.585 1.476 0.000 0.520 -0.273 0.949 3.102 0.932 0.713 1.199 0.824 0.757 0.912 0.875 +0 1.217 0.824 -0.703 1.283 -1.079 1.068 0.234 0.445 0.000 0.762 1.072 -1.408 2.215 1.784 -0.205 1.039 2.548 0.703 0.144 -0.338 0.000 0.898 0.833 0.986 0.736 1.334 0.995 0.961 +1 0.364 -0.536 -0.680 1.876 -1.693 1.025 0.700 -0.189 2.173 0.331 1.325 -1.679 0.000 1.138 0.324 0.764 2.548 0.392 0.200 0.503 0.000 0.514 0.733 0.993 0.901 1.046 0.962 0.743 +1 0.464 -1.659 -1.709 0.751 0.889 1.336 -1.347 -0.664 2.173 0.551 -0.034 1.036 0.000 0.652 -0.085 1.577 0.000 0.711 -1.050 0.464 3.102 0.432 0.545 0.980 1.201 0.878 0.882 0.781 +0 0.818 -0.167 0.712 0.375 0.669 0.776 0.948 -0.412 1.087 0.571 1.797 -0.796 0.000 0.582 2.000 1.229 0.000 1.362 0.164 1.638 3.102 0.870 1.002 0.980 1.376 1.129 1.003 1.122 +0 0.685 -0.068 -0.902 0.591 -0.364 1.134 -0.288 -1.151 1.087 0.667 -0.881 0.815 0.000 0.767 0.432 1.133 0.000 1.338 -1.114 -0.134 3.102 0.875 0.984 0.983 0.849 1.257 0.961 0.833 +1 0.759 -2.048 -0.172 0.868 -0.740 0.627 -0.155 -1.540 1.087 0.765 -1.188 0.586 0.000 2.008 -0.541 0.957 2.548 1.348 -0.227 -0.781 0.000 0.674 0.985 0.987 1.005 1.129 0.981 0.805 +1 1.083 -0.260 1.273 0.503 -0.938 0.838 0.314 0.178 0.000 1.490 -0.108 -1.602 2.215 1.038 -0.642 -0.265 0.000 0.515 -0.791 -0.581 0.000 0.929 0.797 0.987 0.714 0.947 0.884 0.753 +1 1.112 0.889 -0.073 1.016 1.533 0.928 1.041 1.197 0.000 0.386 0.631 -1.521 0.000 1.284 0.808 -0.347 1.274 0.615 -0.752 -0.584 1.551 0.850 1.012 1.461 0.920 0.707 0.825 0.781 +1 0.888 0.302 -0.757 0.950 1.723 2.275 -1.051 -0.134 0.000 2.279 0.623 1.436 2.215 1.289 -0.329 1.498 2.548 1.043 0.236 0.040 0.000 1.742 1.911 1.002 1.039 0.958 2.108 1.631 +1 1.774 1.677 1.585 0.296 -0.720 0.859 0.300 0.499 2.173 0.982 -2.216 -0.213 0.000 0.972 -0.283 -1.237 2.548 0.447 -0.110 1.004 0.000 0.713 0.957 0.988 1.104 1.193 1.037 0.987 +0 1.123 -0.804 0.993 1.180 -0.034 0.726 0.198 -1.618 2.173 0.817 0.311 -0.535 2.215 0.309 1.781 0.748 0.000 0.444 -2.015 -0.788 0.000 1.211 0.916 1.274 1.190 0.941 0.938 0.843 +0 0.548 0.383 -1.439 4.161 1.566 1.719 -0.062 -0.525 2.173 0.570 1.069 1.366 2.215 1.896 0.179 0.217 0.000 0.396 -0.309 -0.680 0.000 0.744 0.925 0.995 2.199 1.699 1.489 1.285 +0 0.401 -0.234 -0.005 1.731 1.015 0.416 -0.996 -0.812 2.173 0.466 -2.770 0.696 0.000 0.667 -0.318 -1.051 2.548 0.629 -1.451 1.302 0.000 0.630 0.873 0.984 0.843 0.260 0.634 0.686 +1 3.088 -0.690 0.845 1.949 0.139 3.365 -2.020 -1.150 0.000 1.050 -0.216 -0.058 2.215 1.296 0.027 0.427 2.548 0.948 -0.508 -0.425 0.000 0.825 0.738 2.018 1.291 0.547 0.887 0.823 +0 1.214 -0.221 0.144 0.294 -0.296 0.551 -0.193 -0.874 2.173 0.826 0.409 1.310 2.215 0.813 1.491 0.852 0.000 1.044 -1.803 1.513 0.000 1.050 0.927 0.980 0.767 0.966 0.804 0.767 +1 0.618 -0.874 -0.632 0.707 0.888 0.812 -0.659 0.467 0.000 0.617 -0.591 1.535 0.000 0.586 1.257 -1.571 0.000 1.051 1.229 -0.555 0.000 0.897 0.669 0.984 0.696 0.517 0.489 0.512 +1 1.461 -0.927 0.604 0.819 -0.063 0.969 -0.121 -1.382 2.173 0.406 0.795 -1.081 0.000 0.892 0.500 -0.225 2.548 1.390 -0.188 1.154 0.000 0.941 1.037 0.992 1.043 1.076 1.068 0.888 +0 0.494 -1.134 0.454 0.806 -0.221 1.302 -0.679 -0.981 0.000 1.184 0.148 0.750 2.215 0.719 -1.043 -0.384 2.548 1.168 0.614 1.115 0.000 1.055 0.820 0.992 1.679 1.079 1.096 1.017 +1 1.475 -0.207 -1.642 0.893 -1.355 0.452 -2.122 0.539 0.000 1.489 0.460 0.157 2.215 0.699 0.305 1.118 2.548 0.369 -2.205 -1.591 0.000 0.607 1.058 0.996 1.602 0.829 1.200 1.136 +1 1.268 -0.136 -0.667 0.937 -1.623 0.825 1.929 0.691 0.000 1.436 1.513 -0.343 2.215 1.266 1.040 -1.732 2.548 1.296 0.259 1.166 0.000 1.584 1.217 1.145 1.484 1.393 1.134 1.208 +0 0.293 -0.924 1.135 0.274 0.274 0.999 0.256 1.573 0.000 1.266 1.105 -0.451 2.215 0.565 0.237 -0.070 2.548 0.677 0.956 1.216 0.000 0.701 0.815 0.995 0.851 0.508 0.876 0.743 +0 0.344 -1.962 0.923 0.828 -1.016 1.002 -1.092 1.741 2.173 0.847 0.504 0.631 2.215 1.083 0.537 0.023 0.000 1.290 -0.527 -0.405 0.000 0.979 0.904 0.985 0.770 1.674 1.129 0.941 +1 2.268 0.002 -1.467 0.716 1.054 1.014 -2.028 0.571 0.000 1.280 -0.803 0.197 2.215 0.318 0.778 1.463 0.000 1.311 -0.198 -0.398 0.000 0.818 0.888 1.349 0.697 0.828 0.941 0.795 +0 0.652 -1.514 -0.848 0.754 0.345 0.646 0.049 1.311 2.173 0.846 -0.052 -0.945 0.000 0.978 -1.747 1.403 0.000 1.598 0.232 0.565 3.102 1.027 1.060 0.992 1.345 0.682 1.097 1.177 +0 1.156 -0.306 -0.768 0.686 0.001 0.734 -1.533 1.189 0.000 0.621 -0.917 -1.632 2.215 1.237 -0.003 -1.656 2.548 2.604 -0.559 0.245 0.000 0.977 1.170 0.990 0.758 0.454 0.723 0.761 +1 0.481 1.567 1.452 0.725 0.217 0.786 1.649 0.035 0.000 1.263 1.379 -1.362 0.000 1.261 0.675 -0.105 0.000 1.911 0.935 1.177 3.102 0.886 1.213 0.981 0.740 0.988 0.967 0.802 +1 2.003 -1.935 -0.837 0.898 -1.114 1.113 -1.472 0.662 2.173 0.408 -1.500 -0.126 0.000 0.734 -1.541 1.083 0.000 0.581 -0.202 -1.125 3.102 0.747 0.659 0.990 0.927 1.021 1.128 0.895 +0 1.419 -0.405 1.666 0.851 0.666 1.103 0.506 0.017 2.173 0.974 0.270 -1.143 2.215 0.398 2.606 -1.545 0.000 0.748 -0.638 1.109 0.000 1.844 1.333 1.194 1.333 1.332 1.101 1.090 +0 1.712 0.807 -0.387 0.438 -0.023 0.852 1.157 -1.426 1.087 0.816 0.855 1.522 0.000 0.499 -0.900 0.458 0.000 1.457 0.734 0.614 3.102 1.324 0.960 0.974 1.116 1.149 0.882 0.881 +0 0.997 -0.060 -1.725 0.858 0.967 1.708 -0.072 1.255 1.087 2.161 0.228 -0.285 2.215 1.086 -0.314 -0.575 0.000 1.001 0.343 -1.034 0.000 0.641 0.805 0.987 0.772 2.814 1.408 1.166 +1 0.897 -1.121 -0.198 0.872 -0.064 1.023 0.182 1.481 2.173 0.467 0.571 0.041 0.000 1.482 -1.082 -1.506 2.548 0.438 -0.909 -0.544 0.000 0.629 0.970 0.980 1.120 1.318 1.076 0.875 +0 0.777 -1.080 0.919 0.336 1.664 0.667 -0.843 0.029 1.087 1.013 -2.107 1.360 0.000 1.308 0.078 -0.461 2.548 1.412 -0.102 -1.201 0.000 2.213 1.628 0.995 0.879 0.753 1.230 1.024 +1 1.037 -0.235 1.710 1.065 0.770 1.152 0.552 -0.441 0.000 0.639 0.679 0.018 1.107 1.456 1.574 1.675 0.000 0.697 0.064 0.839 3.102 0.959 0.800 1.091 0.550 0.449 0.572 0.714 +0 0.776 -1.201 1.187 0.940 0.985 1.015 -0.859 1.525 2.173 1.029 0.177 -0.514 0.000 1.678 0.589 -0.118 0.000 1.456 0.357 -1.136 3.102 0.847 0.897 0.984 0.768 1.246 1.214 1.079 +1 0.300 1.489 0.710 1.582 -1.352 0.734 -0.527 -0.915 2.173 1.004 -0.589 0.136 0.000 1.089 0.133 1.458 0.000 0.991 0.513 -0.156 3.102 0.801 0.841 0.990 0.937 0.791 1.233 1.026 +1 1.054 1.127 -0.895 1.464 -1.064 2.065 0.745 -0.767 0.000 1.484 -2.224 0.989 0.000 1.532 0.459 -0.188 1.274 1.428 0.041 1.127 0.000 1.009 1.090 0.987 0.983 1.135 0.857 0.853 +1 0.771 -0.245 -0.555 0.331 0.539 1.323 -0.321 0.821 2.173 1.378 -0.030 -1.411 0.000 1.633 0.826 -0.092 0.000 2.254 0.500 -0.667 3.102 0.791 0.915 0.987 1.085 1.983 1.236 0.999 +0 0.497 -0.892 0.853 2.375 1.693 1.054 -0.912 -1.481 2.173 0.999 1.871 0.139 0.000 1.160 1.508 -0.491 0.000 2.352 0.802 0.198 3.102 0.915 0.898 1.034 0.747 2.460 1.887 1.907 +0 0.416 0.392 -0.759 1.281 0.400 2.364 -0.627 -0.194 1.087 3.397 -0.943 1.511 0.000 0.901 -0.626 -1.349 2.548 0.423 -1.178 -1.569 0.000 0.660 0.805 0.992 1.891 1.570 1.798 1.762 +0 0.708 -0.845 1.199 1.922 0.701 0.879 -0.527 -1.126 0.000 0.753 -0.050 -0.809 0.000 0.689 -0.932 -0.206 2.548 0.438 0.034 -1.389 0.000 0.644 0.779 0.985 0.784 0.582 0.640 0.894 +0 0.660 0.162 1.201 1.598 0.578 1.440 -0.323 -1.692 2.173 0.865 0.521 -0.439 2.215 0.667 2.015 1.666 0.000 2.001 -0.441 -0.057 0.000 0.847 1.015 0.988 1.275 1.655 1.177 1.062 +1 1.633 1.751 0.564 1.253 -0.034 0.486 0.342 -0.395 2.173 1.362 1.139 1.569 2.215 1.600 0.252 -0.935 0.000 0.543 0.501 1.391 0.000 0.905 1.104 1.015 1.037 1.280 1.070 1.034 +1 1.128 -0.570 1.282 1.171 0.793 0.715 -1.000 -0.976 2.173 0.488 0.186 -0.468 0.000 0.902 -2.204 0.123 0.000 1.001 -0.340 -1.231 0.000 0.641 0.617 0.982 0.568 0.654 0.715 0.691 +0 0.668 2.430 0.580 1.473 0.531 0.747 0.524 -1.263 2.173 0.631 1.073 -0.965 0.000 0.630 0.185 0.573 2.548 0.419 0.223 -0.927 0.000 0.290 0.597 0.998 1.304 0.863 0.893 0.748 +0 0.287 1.741 -0.970 1.391 0.141 0.686 -0.264 0.999 2.173 0.831 0.041 -0.889 2.215 0.463 -1.589 -1.335 0.000 0.710 -0.002 1.576 0.000 0.698 0.727 0.988 0.809 1.115 0.792 0.755 +0 1.119 -0.729 1.165 1.498 0.525 1.347 1.253 -1.165 2.173 0.626 1.865 1.032 0.000 1.274 2.024 -0.686 0.000 1.222 0.620 0.374 3.102 1.386 1.127 0.986 2.416 1.382 1.578 1.660 +1 1.094 0.030 1.577 0.553 -0.142 0.943 0.329 0.409 2.173 0.663 -1.068 -1.164 0.000 0.553 1.249 1.526 2.548 0.819 -0.117 -0.550 0.000 0.695 0.961 1.078 0.889 0.904 0.873 0.758 +0 3.461 -0.384 0.379 0.272 0.551 1.042 0.218 -1.297 0.000 0.528 -0.126 -0.579 0.000 0.753 -1.071 -1.543 2.548 0.827 -0.030 1.056 3.102 0.990 0.898 0.994 0.664 0.564 0.762 0.943 +1 0.816 1.238 -0.400 0.827 -0.963 1.249 0.408 1.130 0.000 0.587 0.197 -0.826 0.000 0.477 1.234 -1.710 0.000 0.629 -0.359 0.194 3.102 1.020 0.866 0.983 0.794 0.221 0.712 0.877 +0 2.460 1.484 0.505 0.705 1.606 0.960 0.153 -0.947 2.173 0.578 -0.123 1.218 1.107 1.338 -0.841 -0.801 0.000 0.539 0.403 1.285 0.000 1.144 0.943 1.528 1.173 1.030 1.231 1.233 +0 0.920 0.014 1.698 2.818 -1.393 1.419 -0.921 0.246 1.087 0.723 0.599 0.035 0.000 0.823 0.240 0.364 0.000 1.108 -1.221 -1.034 0.000 0.405 1.118 0.987 0.759 0.845 1.449 1.193 +0 1.293 0.725 1.252 1.644 0.755 0.892 0.657 0.060 0.000 1.631 0.363 -1.043 2.215 0.850 -0.021 1.585 0.000 0.710 0.950 -0.992 3.102 1.630 1.022 0.990 1.574 0.384 1.008 0.987 +0 0.330 0.885 1.074 1.794 0.388 0.730 0.185 -1.184 0.000 0.712 -0.286 -1.466 2.215 1.013 0.494 -0.057 2.548 0.564 0.327 1.417 0.000 0.708 0.840 0.987 1.601 0.945 1.117 1.022 +0 0.656 1.392 1.554 0.670 -0.518 0.514 -1.339 1.739 0.000 1.201 0.789 0.734 2.215 1.125 -0.120 -0.368 2.548 0.560 0.553 -1.163 0.000 1.056 0.980 0.984 0.833 1.199 0.998 0.885 +1 0.423 -2.302 0.267 1.010 -1.158 0.883 -1.273 1.721 0.000 1.078 0.181 -0.416 2.215 0.841 -0.170 0.839 2.548 0.853 -1.063 0.834 0.000 0.952 0.876 0.991 1.177 0.935 0.971 0.896 +1 1.873 1.060 0.675 0.788 0.154 0.610 0.769 1.590 0.000 0.918 1.403 -1.431 2.215 0.815 0.108 -0.711 2.548 0.664 1.562 -0.701 0.000 1.023 0.861 0.990 1.165 0.858 0.891 0.800 +1 1.276 0.166 1.592 0.361 -0.825 1.056 0.197 -0.731 1.087 1.859 0.913 0.689 0.000 1.087 0.805 -1.682 2.548 1.234 0.867 -0.443 0.000 1.684 1.341 0.987 0.953 1.111 1.175 0.985 +1 1.089 1.490 -0.347 0.779 0.861 0.859 0.031 1.633 2.173 0.863 -0.023 -0.767 0.000 1.479 -0.004 0.621 0.000 0.679 -0.575 -0.870 3.102 1.221 1.028 1.131 0.979 0.693 0.938 0.890 +0 0.953 -0.251 0.904 0.959 1.672 0.336 0.816 -1.456 0.000 0.754 0.256 -0.385 1.107 0.403 1.815 1.181 0.000 1.197 -0.583 -0.058 3.102 0.614 1.006 0.989 0.893 0.497 0.705 0.702 +1 0.308 -2.192 0.890 0.521 0.757 1.301 0.095 0.658 2.173 0.951 -0.301 -0.651 0.000 1.563 -0.420 -1.353 0.000 0.750 0.164 -1.030 3.102 1.116 0.643 1.001 0.860 1.045 1.035 0.884 +0 1.462 0.573 -0.047 1.309 -0.472 0.849 -0.441 1.178 0.000 0.702 0.437 0.660 0.000 1.074 -0.263 1.728 2.548 0.977 0.632 -0.350 0.000 0.868 0.945 0.981 0.713 0.659 0.762 0.708 +0 1.580 -1.134 -0.592 0.443 -1.655 0.964 -0.532 -1.665 2.173 1.195 -1.272 0.341 2.215 0.653 0.048 0.520 0.000 0.792 0.922 0.883 0.000 0.770 0.897 0.989 0.951 1.658 0.989 0.922 +1 1.909 0.355 1.698 1.529 0.946 2.153 1.530 0.204 0.000 1.132 0.278 -1.241 0.000 1.810 -1.254 -1.553 0.000 1.594 -0.946 -1.131 3.102 0.997 0.870 1.482 1.005 0.843 1.002 0.889 +1 1.415 0.643 -0.145 1.454 -0.794 0.945 -1.441 0.416 0.000 0.430 -0.744 -0.801 0.000 2.393 -0.038 1.508 1.274 1.212 -0.407 -1.299 3.102 1.296 1.114 1.095 1.590 0.803 1.109 1.267 +1 0.652 0.252 -1.499 1.384 1.618 1.177 0.362 -0.256 2.173 0.479 -1.218 0.398 0.000 1.138 -0.128 1.507 0.000 0.645 -1.022 -0.100 1.551 1.160 0.789 0.984 1.490 0.825 0.987 0.902 +0 1.189 -0.130 -0.622 0.682 1.450 1.221 -0.767 -0.096 1.087 1.489 0.185 1.665 2.215 0.916 0.674 0.708 0.000 0.595 1.868 -0.986 0.000 1.042 1.008 1.193 1.062 2.220 1.291 1.050 +1 0.968 -0.533 1.530 0.743 0.462 0.711 -0.684 -0.377 2.173 0.833 -1.159 0.458 1.107 0.885 0.270 -1.274 0.000 0.626 1.109 1.129 0.000 0.812 1.061 0.987 0.681 0.826 0.891 0.793 +0 0.406 1.366 1.610 0.781 0.135 0.616 1.246 -0.532 0.000 1.021 -0.456 1.117 2.215 0.897 0.084 0.704 0.000 1.844 0.040 -1.681 0.000 1.132 1.023 0.992 1.603 0.813 1.417 1.279 +1 0.447 1.908 0.460 1.002 -0.167 1.029 0.659 -1.673 0.000 0.899 1.239 1.330 2.215 1.184 0.792 -0.067 0.000 0.914 0.673 -0.429 3.102 1.980 1.181 0.993 0.902 0.841 0.859 0.793 +0 1.383 -0.570 -0.270 0.350 1.194 1.593 -1.290 1.526 2.173 1.433 0.163 0.100 0.000 2.176 -0.969 -0.894 1.274 0.883 -0.867 0.942 0.000 0.775 1.077 0.989 1.328 1.918 1.145 0.943 +1 0.544 1.659 1.034 0.499 -1.600 1.212 0.812 -0.784 2.173 1.615 1.098 1.119 2.215 1.523 -2.438 0.353 0.000 0.732 0.806 -0.075 0.000 0.826 0.948 0.987 0.994 2.063 1.040 0.842 +0 0.970 -0.085 0.092 1.275 -0.960 0.467 -0.884 1.163 2.173 0.394 1.020 -1.021 2.215 0.346 -2.272 0.798 0.000 0.734 2.057 0.700 0.000 0.715 0.590 1.251 0.965 0.922 0.850 0.877 +0 0.350 -1.924 0.103 1.524 0.105 0.508 0.522 -0.361 2.173 0.735 -1.044 0.887 0.000 1.191 0.078 -1.191 0.000 0.507 -0.510 -1.129 0.000 0.793 0.951 0.985 0.541 0.432 0.565 0.583 +1 0.871 -0.232 -0.624 0.370 0.847 0.765 -0.658 -1.395 1.087 0.675 -0.359 0.830 0.000 0.990 0.208 -0.784 2.548 0.718 0.587 0.312 0.000 1.158 1.027 0.987 0.754 0.756 0.881 0.792 +0 1.059 0.150 0.915 1.409 -0.156 0.652 -1.456 1.143 0.000 0.845 -0.882 -0.937 2.215 0.757 0.419 -0.969 2.548 0.527 -0.347 -1.527 0.000 0.777 1.006 1.391 1.134 0.628 0.799 0.850 +0 1.475 -0.470 -1.039 1.301 -1.536 1.157 -0.068 0.485 0.000 0.847 0.087 -0.255 2.215 0.747 0.678 0.841 0.000 1.108 -0.294 1.553 3.102 0.875 0.938 0.995 0.682 0.894 0.763 0.966 +0 4.249 0.396 1.708 1.297 -0.379 1.441 0.454 -0.078 0.000 0.455 -0.362 -0.369 0.000 1.290 -0.386 1.415 2.548 1.151 1.283 0.252 1.551 0.861 0.934 3.098 1.675 1.339 1.335 1.356 +1 1.019 0.379 -1.129 0.510 1.353 0.875 0.427 1.390 0.000 0.886 0.573 0.858 0.000 1.797 0.046 -0.869 2.548 1.704 1.894 1.105 0.000 0.878 0.627 0.989 0.732 0.715 0.864 0.752 +1 0.596 0.983 0.343 0.540 -0.977 0.629 -2.631 -1.197 0.000 1.758 -0.941 0.639 1.107 1.330 -0.368 0.057 2.548 0.767 0.184 -0.398 0.000 0.817 1.827 0.988 2.226 0.936 1.507 2.233 +0 1.130 0.638 -0.230 1.695 -0.253 1.325 -1.135 1.463 0.000 0.406 -0.831 0.943 0.000 0.592 0.541 -1.213 1.274 0.484 0.488 1.500 3.102 0.740 1.088 0.995 0.756 0.262 0.681 1.357 +0 0.722 1.565 -0.654 0.779 0.432 1.042 1.780 1.664 0.000 1.244 0.873 0.284 1.107 1.105 0.803 -0.945 2.548 0.627 1.493 -1.101 0.000 0.929 1.124 0.987 0.912 1.115 1.034 0.916 +0 0.618 0.383 -1.335 1.571 -0.298 0.660 0.171 0.944 1.087 0.670 -1.042 0.537 2.215 0.802 0.935 -1.276 0.000 0.910 -0.943 -1.571 0.000 1.252 1.072 1.098 1.042 0.739 0.839 0.843 +0 0.914 0.618 -0.486 0.910 0.350 1.025 0.352 -1.717 0.000 1.079 1.035 1.060 2.215 0.734 2.538 -0.335 0.000 0.958 0.000 -0.795 3.102 2.819 1.724 0.987 0.988 1.047 1.185 1.066 +0 0.503 1.340 0.288 0.767 -0.302 0.733 -1.903 -0.029 0.000 0.782 -0.136 -1.124 0.000 0.776 0.783 1.027 2.548 0.606 -1.304 0.874 3.102 2.110 1.223 0.999 0.716 0.807 1.076 0.977 +1 0.843 0.279 0.455 1.342 1.635 0.753 -0.342 0.191 0.000 1.429 0.595 -0.822 2.215 1.472 0.128 -1.667 2.548 0.500 0.816 0.463 0.000 0.705 1.115 1.288 1.174 1.126 0.946 0.882 +1 0.694 0.222 -1.514 0.771 -0.154 0.805 -0.337 0.935 0.000 1.305 0.524 -0.245 2.215 1.259 0.043 1.434 0.000 0.735 -0.033 -0.698 0.000 0.862 0.984 0.985 0.777 1.021 1.008 0.844 +1 0.392 2.157 -1.651 0.639 0.438 0.568 0.011 -1.332 2.173 0.924 0.867 0.160 0.000 1.248 0.298 1.271 1.274 1.293 0.871 -0.495 0.000 0.799 1.056 0.995 0.728 0.768 0.837 0.732 +0 2.519 -0.075 0.013 0.113 -0.626 2.054 1.511 1.508 0.000 0.960 -0.519 -0.686 2.215 0.596 1.439 0.432 2.548 0.693 0.218 -0.778 0.000 2.025 1.332 0.994 0.810 1.223 1.465 1.489 +0 0.989 -0.344 0.276 0.696 1.670 0.826 -2.074 -0.574 0.000 0.508 0.217 -0.368 2.215 0.810 -0.302 1.527 2.548 1.378 -0.359 0.802 0.000 1.349 0.938 1.093 0.652 0.702 0.647 0.622 +1 0.917 -0.215 0.091 0.536 0.541 0.955 -0.554 0.910 2.173 1.088 -0.155 1.590 1.107 1.552 -2.015 -0.527 0.000 1.027 1.195 -0.815 0.000 0.648 1.548 0.987 1.007 0.912 1.226 1.236 +1 0.709 0.204 0.206 0.982 1.589 1.418 0.142 -0.682 2.173 0.838 0.549 0.972 0.000 0.886 1.217 0.702 0.000 0.833 -0.757 1.378 1.551 0.981 0.870 1.096 1.126 1.273 0.965 0.834 +1 0.820 1.391 0.467 0.880 -1.580 2.628 2.237 -0.579 0.000 3.050 1.546 1.326 0.000 1.393 0.804 0.922 2.548 0.868 0.483 -0.348 1.551 1.041 1.227 1.133 0.776 0.776 1.336 1.126 +1 0.975 0.385 0.742 1.025 1.438 0.602 1.012 -0.824 2.173 0.413 0.299 -0.906 0.000 0.716 1.701 -0.263 0.000 0.655 1.932 1.408 0.000 0.852 0.592 0.990 0.647 0.513 0.711 0.694 +0 1.113 0.476 0.374 0.462 -0.116 1.361 1.590 0.563 0.000 0.984 1.043 -0.980 2.215 1.617 1.001 -1.661 2.548 1.197 0.014 -1.068 0.000 2.592 1.802 0.985 1.047 0.772 1.239 1.040 +1 1.436 -0.799 -0.729 0.619 -0.095 1.255 0.655 -0.688 0.000 1.760 -1.056 1.257 0.000 1.691 0.195 0.897 2.548 1.056 -0.334 -1.738 3.102 4.404 2.358 0.991 1.187 0.778 1.561 1.311 +0 1.126 1.239 -0.945 2.182 -1.471 1.819 0.275 0.252 1.087 1.497 1.521 1.626 0.000 1.109 -0.460 -0.198 0.000 0.928 -0.001 0.887 3.102 3.132 1.788 0.989 2.199 0.768 1.510 1.494 +1 0.597 -1.139 0.845 0.864 -0.867 1.552 0.544 0.719 1.087 1.664 -2.370 -0.557 0.000 1.811 -0.605 -1.699 2.548 0.837 0.348 -1.617 0.000 3.274 2.378 0.995 1.555 2.179 2.579 1.871 +0 0.602 1.322 0.398 0.505 1.426 0.354 1.744 1.191 0.000 0.647 -0.111 -0.249 2.215 0.371 1.241 -1.244 2.548 0.562 1.285 1.714 0.000 0.317 0.898 0.997 0.587 0.585 0.550 0.544 +1 0.845 0.653 -1.409 0.498 0.293 0.548 0.949 -0.228 0.000 1.120 -0.211 1.432 1.107 1.283 -0.044 0.005 0.000 1.150 1.070 -1.657 3.102 0.852 1.072 0.986 0.927 0.916 0.960 0.819 +0 1.273 -0.436 0.729 0.781 -0.075 0.876 1.009 -1.640 0.000 0.582 0.669 -0.311 2.215 1.126 1.430 -1.307 0.000 1.197 -1.355 0.344 1.551 1.004 0.783 0.992 0.780 1.159 1.084 0.956 +0 1.311 -1.875 0.238 0.473 -1.436 1.248 -1.164 0.708 1.087 1.093 0.493 -1.143 0.000 1.019 -0.499 -0.567 2.548 0.532 -1.380 1.368 0.000 0.819 0.782 1.089 0.968 1.358 0.890 0.865 +0 1.038 1.163 -1.259 0.345 0.395 0.401 -0.649 1.058 0.000 0.484 -0.698 -1.510 2.215 0.757 0.942 -0.035 0.000 0.977 0.774 0.673 1.551 0.767 0.892 0.992 0.744 0.809 0.606 0.672 +0 0.729 0.411 0.935 0.267 0.955 0.832 0.260 -0.252 2.173 0.989 0.802 -1.649 2.215 0.490 1.250 -1.364 0.000 0.674 1.059 0.356 0.000 0.634 0.772 0.992 0.980 1.326 0.906 0.770 +1 1.175 0.725 1.159 1.683 0.737 1.061 0.286 -0.972 1.087 0.577 -2.075 -1.139 0.000 0.603 -0.589 -1.068 0.000 1.510 -0.106 0.369 3.102 0.740 1.105 0.998 1.453 1.284 1.031 1.149 +1 0.284 -1.252 -1.640 0.604 -1.592 0.937 -0.452 -0.748 2.173 0.718 0.566 0.849 0.000 1.118 -0.026 1.265 0.000 1.149 0.537 0.026 3.102 0.660 0.775 0.994 0.932 0.949 0.889 0.781 +0 0.734 -1.009 -1.301 0.942 0.395 1.148 -0.681 -1.740 2.173 1.007 -1.120 -0.363 2.215 0.957 -0.751 0.637 0.000 0.448 -1.576 -0.071 0.000 0.591 1.042 1.151 0.778 1.543 0.892 0.760 +1 0.361 -0.003 1.168 1.350 -1.245 0.941 2.218 1.145 0.000 1.639 0.319 -1.209 2.215 0.665 0.600 -0.188 0.000 1.720 -1.481 0.614 0.000 0.924 1.211 0.988 1.375 1.438 1.111 1.110 +1 0.413 1.253 1.293 1.546 -1.119 1.008 -0.320 -0.074 1.087 1.049 -0.609 1.383 0.000 0.345 0.588 1.636 0.000 1.035 0.810 0.186 1.551 0.859 0.935 0.986 1.822 0.790 1.176 1.151 +0 1.018 0.183 -0.998 0.784 1.332 1.249 1.100 0.689 0.000 1.234 -1.629 -1.732 0.000 2.852 -0.151 -0.650 2.548 1.478 1.144 1.091 0.000 0.784 0.970 1.067 1.042 1.451 1.489 1.223 +0 1.709 0.490 -0.954 1.522 -1.049 3.217 0.960 -0.845 2.173 2.449 -0.078 1.016 0.000 1.798 1.779 1.466 0.000 1.647 -0.025 0.762 0.000 0.597 1.381 0.993 0.789 2.929 2.437 1.981 +1 0.289 1.879 1.184 0.791 -0.736 1.091 0.048 0.471 0.000 1.275 -0.105 -1.148 1.107 0.511 0.691 -0.162 0.000 0.398 -0.830 0.888 0.000 0.874 1.012 0.989 0.777 0.915 0.959 0.830 +1 1.161 0.041 1.101 1.481 -1.570 0.802 0.608 -0.833 2.173 0.745 -1.236 0.541 0.000 0.866 -0.528 -0.304 0.000 0.727 0.155 0.405 1.551 0.955 0.672 1.219 1.113 0.747 0.826 0.885 +0 1.326 1.110 -0.130 0.570 -1.387 0.687 2.670 -1.412 0.000 0.392 2.874 1.073 0.000 0.517 0.619 -1.592 2.548 0.612 1.445 0.359 0.000 0.876 0.922 1.090 0.755 0.439 0.941 0.865 +1 1.096 -1.541 0.984 1.706 0.726 1.683 -2.458 -1.181 0.000 1.308 -0.025 0.414 0.000 0.489 -1.034 -1.460 0.000 1.167 -0.117 -0.588 3.102 1.250 1.475 1.000 1.472 0.284 1.189 1.259 +1 1.191 -0.268 -1.276 2.063 -0.507 0.805 0.697 0.834 2.173 0.732 -0.337 1.161 1.107 0.568 0.681 -0.293 0.000 0.580 1.085 0.393 0.000 0.407 0.741 1.389 1.506 0.702 1.081 0.869 +0 0.759 -1.418 1.172 0.650 -0.214 0.647 -0.817 -1.471 0.000 0.405 0.303 0.819 0.000 0.649 -0.937 -0.021 2.548 1.058 -0.971 -0.933 3.102 0.792 0.692 0.987 0.643 0.465 0.470 0.493 +0 1.236 1.322 0.317 0.169 -0.612 0.633 0.634 -0.859 2.173 0.756 -0.008 1.497 2.215 0.352 2.066 0.476 0.000 0.517 1.124 1.722 0.000 0.480 0.731 0.996 0.816 0.929 0.747 0.640 +0 0.450 -2.087 1.150 1.544 -1.348 0.416 0.324 1.526 2.173 0.961 -1.152 0.051 2.215 0.326 -0.962 -0.975 0.000 1.190 -0.302 0.324 0.000 0.678 0.692 0.987 1.011 1.189 0.913 0.758 +0 2.366 -0.595 -1.355 0.526 -0.868 1.111 -0.462 0.165 2.173 0.840 -1.443 -1.740 0.000 1.345 0.239 0.702 2.548 0.678 -1.505 0.156 0.000 0.986 1.309 0.982 1.256 0.908 1.135 1.071 +1 0.359 -1.239 1.738 1.588 -0.422 1.116 -0.081 0.950 0.000 0.617 -0.084 0.318 0.000 0.610 0.431 1.271 0.000 1.700 0.277 -0.596 3.102 0.952 1.089 0.987 0.852 0.743 0.890 0.886 +1 1.344 -1.030 1.373 0.906 -1.352 0.776 -1.175 -0.471 1.087 0.866 -0.480 0.040 2.215 0.557 -2.021 -1.692 0.000 1.154 -0.582 1.000 0.000 0.940 0.988 0.988 1.030 0.685 0.855 0.780 +1 1.208 0.075 -0.296 0.776 -1.691 1.130 -0.875 0.296 2.173 0.530 0.630 -0.526 0.000 0.633 1.059 -1.442 0.000 1.224 -0.023 1.345 0.000 0.991 1.097 1.275 1.195 0.996 1.174 0.991 +0 1.140 0.471 1.050 0.777 0.305 0.398 1.622 0.588 0.000 0.973 0.858 -0.487 2.215 1.606 0.128 -1.619 0.000 1.290 1.291 1.525 0.000 0.819 0.951 0.984 0.996 0.582 0.747 0.767 +1 1.304 1.335 0.020 0.452 -1.309 0.983 -0.200 1.480 2.173 0.808 0.800 -0.493 0.000 0.730 1.265 1.419 0.000 0.979 0.145 0.615 0.000 0.863 0.945 0.990 0.544 0.696 0.857 0.741 +0 1.495 0.446 -1.127 0.129 -0.859 1.627 0.837 0.914 2.173 2.293 -0.455 -0.872 2.215 1.420 1.459 1.302 0.000 1.444 -0.259 -0.286 0.000 0.925 1.302 0.991 1.532 3.474 1.640 1.306 +0 0.404 -1.346 -0.448 1.259 1.599 0.535 -0.632 0.901 2.173 0.533 0.054 -0.814 0.000 1.517 -0.503 -0.122 0.000 1.518 0.685 -1.693 3.102 0.911 0.970 0.987 0.990 1.023 0.878 0.824 +1 0.782 0.424 1.487 0.621 -1.014 1.219 0.611 0.322 2.173 0.764 -1.163 -1.349 0.000 0.555 0.261 -0.943 0.000 0.543 -0.590 1.115 3.102 0.886 0.638 0.982 1.166 0.833 0.966 0.830 +1 0.680 -0.393 0.703 1.655 1.011 1.635 0.717 -0.549 0.000 1.320 0.530 1.333 1.107 0.939 -1.891 -1.114 0.000 0.610 -1.099 1.256 0.000 0.776 0.972 0.982 0.694 0.370 1.056 1.036 +0 0.556 -0.320 0.031 1.343 -0.890 0.766 1.774 1.062 0.000 0.533 1.496 0.289 0.000 0.747 -0.732 1.595 1.274 0.846 1.209 -0.690 3.102 0.889 0.818 0.987 0.728 0.996 0.954 1.171 +0 1.772 0.215 -1.304 0.590 -0.558 1.651 0.335 0.409 0.000 2.030 -0.513 0.383 2.215 2.939 -0.116 -0.854 2.548 4.223 -0.638 1.673 0.000 4.347 2.862 0.989 0.734 2.389 2.189 1.733 +0 1.744 -0.554 -0.571 1.660 -1.072 0.830 -0.535 0.923 0.000 0.662 0.280 0.460 1.107 0.519 -0.810 0.569 2.548 1.089 -0.586 1.722 0.000 0.963 0.862 1.026 0.845 0.393 0.786 0.840 +1 0.711 -1.553 -0.870 0.532 -0.491 0.953 0.089 -0.191 2.173 0.595 -1.818 1.694 0.000 0.722 -0.364 1.029 2.548 1.682 -0.524 1.489 0.000 0.895 0.696 0.976 0.844 0.956 0.977 0.851 +1 1.110 -1.111 1.663 0.573 -0.899 1.023 -1.027 -1.239 2.173 0.760 -2.363 0.948 0.000 1.549 -0.053 0.626 0.000 1.209 -0.479 -0.375 3.102 0.721 0.968 0.986 0.713 0.871 0.896 0.774 +1 1.752 -0.150 0.386 0.593 1.175 1.517 0.492 -1.101 2.173 0.868 0.568 0.421 0.000 0.730 0.403 1.345 2.548 0.472 -1.094 0.523 0.000 0.919 0.736 0.987 1.619 1.057 1.074 0.946 +0 0.473 0.506 -1.549 0.935 -0.117 1.359 0.422 -0.596 2.173 1.270 0.044 1.400 0.000 1.263 0.745 1.371 1.274 0.783 0.443 -0.006 0.000 0.675 1.042 0.988 0.885 1.631 0.906 0.803 +1 0.917 -1.681 0.145 0.334 -1.622 0.742 -1.436 -1.634 0.000 1.001 -1.782 1.281 0.000 0.706 0.402 -0.983 2.548 1.897 -1.075 -0.240 3.102 0.967 1.315 0.985 0.847 1.025 1.062 0.878 +1 0.971 0.163 0.778 1.031 0.206 1.092 0.199 -1.013 2.173 1.799 1.554 1.452 0.000 0.894 0.365 -0.064 1.274 2.427 1.105 -0.756 0.000 0.808 1.267 0.984 1.269 0.938 1.244 1.089 +1 1.590 0.352 -1.421 2.307 1.672 3.202 -1.932 0.194 0.000 0.810 -0.943 -0.542 1.107 0.913 1.072 -1.353 2.548 0.789 0.289 1.626 0.000 0.248 0.750 0.983 0.779 1.330 1.004 0.744 +1 0.568 1.035 0.160 0.449 0.972 1.114 0.668 -1.360 2.173 0.750 -0.121 -1.572 0.000 1.433 1.031 0.459 0.000 1.392 0.290 0.100 0.000 0.799 0.852 0.976 1.050 0.595 0.929 0.815 +1 1.557 -0.936 0.623 1.151 0.265 0.989 -0.450 -0.873 2.173 0.392 1.086 1.712 0.000 0.669 -1.024 -1.689 1.274 0.666 -0.842 1.052 0.000 0.929 1.061 0.993 0.850 0.763 0.989 0.936 +0 1.505 -0.075 0.554 0.589 -0.844 0.882 -0.618 -1.269 0.000 1.403 -0.222 -0.119 0.000 0.942 0.509 1.691 2.548 0.845 -0.539 -0.482 0.000 0.860 0.951 1.241 0.887 0.454 0.693 0.733 +1 0.449 1.020 1.663 1.393 1.238 1.210 -0.425 -0.367 1.087 0.845 -0.547 -1.164 2.215 0.643 -0.714 0.661 0.000 0.439 -2.061 1.302 0.000 0.642 1.108 0.987 0.869 0.984 0.973 0.869 +1 1.106 0.979 0.037 0.767 -0.411 0.363 2.416 0.862 0.000 1.432 -0.508 -1.699 1.107 0.639 1.755 -0.415 0.000 0.762 -0.891 0.671 1.551 0.822 1.453 0.990 1.364 0.838 1.401 1.181 +1 1.638 0.507 0.156 0.644 -0.676 0.941 -0.565 -1.536 1.087 0.415 0.887 0.797 2.215 0.365 0.765 1.182 0.000 0.738 0.602 -0.108 0.000 0.918 0.935 0.988 0.641 1.088 0.910 0.780 +0 0.579 -0.464 0.589 0.457 -0.355 0.781 0.703 0.868 2.173 0.591 1.108 -0.789 0.000 0.910 -0.345 -0.719 0.000 0.968 0.211 1.576 0.000 0.917 0.807 0.983 0.628 0.752 0.578 0.568 +0 0.803 0.002 -0.447 1.649 0.249 0.846 0.976 -1.695 0.000 0.462 1.131 -0.586 0.000 0.854 -0.287 1.186 2.548 0.434 0.458 1.525 3.102 1.123 1.042 0.990 0.637 0.250 0.590 0.738 +0 1.647 1.027 -0.920 0.341 -1.263 1.123 -0.971 0.379 2.173 0.364 -0.521 0.178 2.215 1.387 0.555 -1.598 0.000 0.730 -0.737 -1.588 0.000 0.909 0.833 0.978 1.758 0.271 1.099 1.018 +1 0.765 0.253 -0.110 0.346 -1.634 2.050 0.329 1.508 0.000 1.016 -1.591 -0.400 0.000 1.612 0.129 -0.611 2.548 3.584 -1.926 0.040 0.000 0.903 1.133 0.989 0.770 0.805 0.956 0.984 +1 1.332 0.749 -0.878 1.932 -0.338 0.615 1.444 1.292 0.000 0.715 0.596 1.086 1.107 0.470 1.427 -0.286 0.000 1.154 1.005 0.689 3.102 0.946 0.628 1.041 0.972 0.378 0.830 0.778 +0 0.997 2.070 1.388 0.426 -0.498 0.658 0.033 -0.875 0.000 1.414 1.111 0.772 2.215 0.991 -0.959 -0.068 2.548 0.695 -1.091 -1.192 0.000 0.802 0.828 0.984 0.870 1.888 1.253 1.212 +1 0.746 -1.397 -0.180 0.698 1.158 0.492 0.183 -1.609 2.173 0.636 -0.387 0.555 2.215 0.385 -0.941 1.735 0.000 0.778 2.377 1.592 0.000 0.843 0.734 0.988 0.843 0.802 0.634 0.618 +0 0.471 -0.644 0.501 0.810 -0.872 0.965 0.908 0.704 2.173 0.483 -0.035 -1.179 0.000 1.442 0.988 -1.549 0.000 1.679 0.462 -0.273 0.000 0.924 0.951 0.986 1.683 0.516 1.142 1.014 +0 1.629 0.723 -0.881 0.637 -0.912 1.604 -0.492 0.651 0.000 1.530 0.931 -0.266 2.215 2.833 -0.321 1.578 0.000 1.378 -0.324 -0.833 3.102 2.855 1.969 0.990 0.893 1.147 1.791 1.668 +0 0.979 -1.288 0.852 0.351 0.048 1.152 -0.334 1.566 1.087 0.831 -1.359 -1.050 0.000 0.753 0.301 -0.127 2.548 0.971 1.001 0.454 0.000 0.588 0.559 0.978 0.954 1.225 0.998 0.946 +1 1.017 0.103 1.154 0.107 0.242 0.684 -0.558 0.420 0.000 1.387 0.289 -1.200 2.215 0.432 -2.718 0.149 0.000 1.070 -0.963 -0.039 0.000 0.795 0.677 0.982 1.034 0.890 1.075 0.924 +0 3.398 -0.119 -0.253 3.695 -0.278 3.287 0.509 -1.710 2.173 0.771 0.009 1.272 0.000 1.322 -0.582 -0.063 2.548 1.015 1.020 0.627 0.000 0.976 1.168 0.939 4.146 3.033 2.678 2.062 +0 1.522 0.834 -1.504 0.274 0.381 0.556 0.029 -1.065 0.000 0.841 0.843 -0.335 2.215 1.753 0.351 0.521 1.274 0.592 0.367 1.097 0.000 0.834 1.000 0.990 0.823 0.953 0.826 0.744 +0 1.129 -0.231 -0.809 0.209 1.050 1.385 0.473 -1.171 1.087 2.062 -0.908 0.756 2.215 0.817 -1.883 0.521 0.000 0.596 -0.777 -0.935 0.000 0.875 1.028 0.983 1.049 3.107 1.636 1.264 +1 1.012 -0.457 0.739 0.919 -0.361 0.619 -0.098 -0.584 1.087 0.838 0.381 -1.402 2.215 1.246 -0.656 1.557 0.000 1.255 -1.218 0.478 0.000 1.254 1.215 1.118 0.972 0.759 0.924 0.833 +1 0.983 0.664 0.766 1.162 1.190 0.874 0.747 -1.048 2.173 0.763 0.446 -0.147 2.215 0.830 1.261 -1.443 0.000 1.281 -0.671 0.082 0.000 1.878 1.191 0.996 1.201 0.891 0.926 0.983 +1 1.660 -0.640 -1.128 0.695 0.479 0.549 -1.133 -0.401 2.173 1.081 1.198 0.266 0.000 1.485 -0.380 1.099 0.000 0.660 -2.237 1.422 0.000 0.793 0.987 1.477 0.816 0.550 0.696 0.701 +1 0.493 -0.006 -1.448 1.581 0.234 0.563 1.084 1.118 0.000 0.921 0.462 -0.715 2.215 1.380 -0.010 -1.194 2.548 1.345 0.256 0.587 0.000 0.815 1.094 1.221 0.985 0.581 0.857 0.814 +1 0.656 -0.550 -1.136 0.410 0.751 1.814 -2.044 1.624 0.000 1.062 -0.642 0.696 2.215 1.593 0.826 -0.134 0.000 1.467 0.281 -0.861 0.000 0.705 0.923 0.984 0.805 1.022 0.858 0.835 +1 0.685 -0.058 0.780 1.137 -0.133 0.939 0.587 1.658 0.000 0.997 0.780 0.050 0.000 0.955 0.557 -1.266 2.548 0.372 0.787 1.231 3.102 0.916 0.633 0.985 0.540 0.362 0.534 0.595 +1 0.346 1.534 -1.647 0.851 -1.109 1.364 -0.246 0.346 0.000 0.349 0.260 -1.317 2.215 0.532 -0.227 1.083 0.000 1.098 -1.353 -1.482 3.102 0.944 0.913 0.992 0.887 0.616 0.844 0.812 +1 1.036 1.643 -1.299 0.907 1.242 1.101 0.492 -0.410 1.087 1.996 -2.365 1.484 0.000 1.984 1.264 0.286 2.548 1.184 1.474 -0.664 0.000 0.713 0.850 1.010 1.328 1.373 1.102 0.925 +0 0.841 -1.386 0.869 0.799 0.375 1.197 -0.728 -1.722 2.173 1.046 -1.021 -0.171 2.215 0.796 -1.115 -1.040 0.000 0.750 -2.071 0.869 0.000 1.020 0.940 0.976 1.451 1.644 1.162 0.958 +0 0.719 0.399 0.387 1.283 -0.309 0.973 0.275 1.341 0.000 1.040 1.144 -0.653 0.000 0.999 -1.567 0.800 0.000 1.574 0.882 -1.426 1.551 2.243 1.288 0.991 0.914 0.842 1.127 1.148 +1 1.137 -1.144 -1.640 0.909 -0.835 0.924 -0.785 0.159 2.173 0.532 -1.322 -1.184 0.000 0.427 -0.360 1.243 0.000 0.978 0.408 0.651 3.102 0.838 1.009 0.983 1.075 0.814 0.927 0.791 +0 0.898 -0.293 -0.494 2.126 -1.239 1.519 -0.659 -1.528 0.000 1.561 -0.245 0.707 2.215 2.845 -1.564 0.010 0.000 0.675 -0.414 1.560 1.551 4.212 2.253 1.190 1.518 0.654 1.684 1.497 +1 2.111 -0.336 -1.378 0.443 0.018 1.185 -1.030 -0.932 0.000 1.326 -0.787 0.721 0.000 1.083 -0.896 0.391 2.548 1.033 -0.251 1.206 1.551 0.689 0.597 1.275 1.070 0.610 0.748 0.762 +0 1.455 0.570 0.623 1.174 1.459 0.750 -0.137 -1.127 0.000 1.151 1.288 -1.022 2.215 1.356 -0.461 0.210 2.548 0.423 0.746 0.072 0.000 0.890 0.967 1.239 1.097 1.833 1.158 0.993 +1 0.477 -1.491 -0.775 0.511 -0.515 1.223 -0.347 -1.353 2.173 0.908 -0.784 0.476 2.215 0.389 -2.144 0.479 0.000 0.700 0.001 0.991 0.000 0.885 1.193 0.974 0.810 1.587 0.912 0.799 +0 0.596 -2.202 0.706 1.468 0.635 0.816 -0.868 -1.131 2.173 0.602 -1.357 -1.717 0.000 0.773 -1.700 -0.755 0.000 0.726 0.163 0.291 3.102 0.836 0.906 0.995 1.202 0.905 0.853 0.787 +1 1.320 0.495 -1.630 0.256 -1.696 0.718 -1.938 -0.631 0.000 0.588 0.308 1.388 0.000 1.066 -0.453 -0.353 0.000 2.300 -0.249 0.498 1.551 1.311 0.696 0.984 1.056 0.702 0.850 0.943 +1 0.275 -1.842 -0.284 0.575 0.946 1.446 -0.038 -0.987 2.173 1.322 -0.139 0.715 0.000 1.099 -0.508 -0.369 1.274 1.234 0.427 0.992 0.000 0.690 1.087 0.985 1.061 0.927 1.157 0.934 +1 0.298 0.755 1.272 0.564 0.646 0.814 -0.310 0.199 0.000 1.484 -1.117 -1.114 0.000 0.904 -0.360 -1.353 1.274 0.921 -1.146 1.222 3.102 1.170 0.898 0.999 0.722 0.622 0.741 0.653 +1 0.605 0.477 -1.504 0.683 1.189 0.939 -0.829 -0.731 2.173 0.574 -1.443 -1.519 0.000 0.798 -1.491 1.026 0.000 2.162 -0.248 -0.076 3.102 0.989 1.106 0.987 1.650 0.934 1.242 1.184 +0 0.431 -1.204 1.617 1.120 1.627 0.845 -1.832 0.237 0.000 0.914 -1.215 -0.152 0.000 1.523 -2.588 -1.567 0.000 1.049 -0.254 -0.472 3.102 0.856 0.912 0.979 0.495 0.447 0.629 0.878 +1 1.804 1.536 -0.598 0.427 0.265 1.160 0.782 1.061 2.173 0.621 1.538 1.524 0.000 0.577 2.062 0.119 0.000 0.840 0.130 -0.866 0.000 0.932 1.042 0.981 0.575 0.734 0.864 0.773 +1 0.787 -1.593 0.591 0.926 -0.781 0.706 -0.597 1.714 2.173 0.486 -1.140 -0.609 0.000 0.543 -2.282 0.372 0.000 0.651 0.877 0.908 3.102 0.830 1.043 1.117 1.120 0.817 0.877 0.819 +1 0.832 0.801 -1.079 0.788 -0.169 1.112 -0.437 0.655 2.173 1.165 0.176 -0.912 0.000 1.521 -2.001 1.487 0.000 1.319 0.391 -0.257 0.000 0.930 0.927 0.982 1.520 0.961 1.066 0.988 +1 0.988 0.650 1.654 1.047 1.306 0.940 1.356 -1.574 0.000 1.347 1.423 -0.353 0.000 0.825 1.085 0.772 0.000 1.538 0.445 -0.156 3.102 1.362 1.074 0.984 1.052 0.273 0.814 0.785 +1 0.481 0.002 -0.523 0.985 1.712 0.510 0.873 -0.590 0.000 1.011 -0.675 0.887 2.215 0.762 0.605 1.306 0.000 0.835 1.202 0.298 0.000 0.778 1.042 0.982 0.941 1.432 0.863 0.813 +1 0.852 0.609 1.601 2.043 -1.186 0.863 -0.247 0.730 2.173 0.401 1.394 0.675 0.000 0.532 -0.477 -0.506 2.548 0.792 -0.234 0.008 0.000 0.823 0.796 1.079 0.813 0.766 0.942 0.816 +0 1.614 -0.840 1.436 1.172 0.253 1.512 -0.191 1.730 0.000 1.681 -0.324 0.011 2.215 1.078 -1.057 -0.520 2.548 1.039 -0.047 -0.394 0.000 1.806 1.489 1.667 1.349 0.897 1.249 1.166 +0 0.893 0.463 -0.874 0.901 0.931 0.769 0.862 -0.525 0.000 0.749 -0.899 1.066 2.215 0.519 1.120 -1.688 0.000 0.480 1.006 1.089 0.000 0.935 0.665 1.241 0.961 0.604 0.778 0.720 +1 0.638 -1.060 -1.400 1.158 1.291 1.077 -0.964 -0.273 2.173 0.588 -1.725 -1.459 0.000 0.894 -0.491 0.689 0.000 1.157 -0.323 1.662 0.000 0.879 1.020 0.983 1.247 0.830 0.983 0.890 +0 0.451 2.071 1.727 0.804 -0.440 0.842 1.198 1.204 2.173 0.379 -0.216 -0.619 1.107 0.937 0.496 -0.334 0.000 0.607 -0.227 -1.330 0.000 0.739 1.066 0.993 0.661 1.052 0.709 0.661 +1 0.804 -1.456 -1.009 1.128 0.360 0.885 0.438 -0.260 1.087 1.255 0.373 1.570 0.000 0.368 -0.735 -0.515 0.000 0.515 1.184 1.535 0.000 0.800 0.771 1.245 0.751 0.787 0.937 0.810 +0 0.536 -0.998 1.227 0.992 -1.220 0.729 -1.640 -1.672 0.000 1.518 -0.161 0.118 2.215 1.067 -1.698 -1.057 0.000 1.027 -0.261 1.626 3.102 0.852 0.880 0.994 1.074 1.105 1.180 0.985 +1 1.024 -0.655 -1.617 0.612 -0.070 0.905 -0.158 -0.583 2.173 1.069 -0.957 1.204 0.000 0.793 -0.542 0.557 2.548 0.395 1.142 -0.629 0.000 1.504 0.944 1.080 0.843 0.933 0.876 0.764 +0 0.447 1.887 1.520 0.064 0.653 0.657 -0.650 -0.567 0.000 1.267 -0.628 -0.047 0.000 2.474 0.115 1.581 2.548 0.393 0.594 -0.823 3.102 0.875 0.678 0.978 0.846 0.662 1.067 0.931 +1 1.062 -0.732 1.541 0.972 0.834 0.576 -0.622 0.768 0.000 1.548 0.570 -0.587 1.107 0.443 -0.406 -0.634 0.000 0.607 -0.753 -0.974 0.000 0.912 0.678 0.995 1.739 0.869 1.119 0.927 +0 2.048 -0.606 -1.430 3.840 -1.519 2.456 0.716 -0.074 2.173 2.147 -1.408 0.680 0.000 0.838 2.286 -1.080 0.000 1.132 -0.968 1.674 3.102 0.884 1.692 0.986 0.590 2.609 2.618 2.402 +0 0.877 -0.551 -1.313 0.592 0.528 1.000 -0.935 0.655 2.173 0.518 0.133 1.463 0.000 0.676 0.938 -0.823 2.548 1.164 -0.519 -1.050 0.000 0.873 1.126 0.995 0.767 1.547 0.903 0.759 +1 1.307 0.191 -0.930 0.093 -0.685 0.575 1.046 0.353 0.000 0.552 -1.204 -0.910 1.107 1.085 -1.240 0.407 0.000 1.040 0.637 1.372 0.000 0.953 0.692 0.978 0.620 0.540 0.773 0.738 +1 0.401 1.158 -1.619 1.021 0.640 0.648 0.438 1.196 0.000 1.400 0.652 -1.723 0.000 1.800 -0.086 -0.471 1.274 1.079 1.222 -0.428 3.102 1.015 1.166 0.985 0.953 0.920 1.084 0.909 +0 0.318 2.288 -1.204 0.506 0.671 0.628 0.411 -0.889 0.000 0.928 -0.285 0.690 2.215 0.749 -0.623 -1.519 2.548 0.596 0.342 0.061 0.000 0.706 0.951 0.982 0.817 0.827 0.711 0.672 +1 0.824 2.006 -1.011 2.079 -0.861 0.961 0.697 0.903 2.173 0.791 1.422 0.080 0.000 0.744 1.179 1.119 0.000 0.583 2.085 0.496 0.000 0.952 0.885 0.986 0.604 0.909 0.977 0.877 +0 0.832 -1.220 -0.674 0.304 1.055 0.686 0.345 0.267 2.173 0.880 -0.321 1.389 2.215 0.835 0.067 -1.229 0.000 0.506 -1.727 -1.510 0.000 0.943 1.133 0.981 0.765 1.046 0.805 0.712 +1 0.583 1.537 0.599 1.405 0.275 1.120 0.671 -1.695 2.173 1.117 -0.081 -1.142 2.215 0.930 0.415 0.637 0.000 0.865 0.314 -0.504 0.000 0.848 0.966 0.997 1.906 1.016 1.732 1.373 +0 0.890 0.643 -1.607 0.617 -0.406 0.710 0.123 -0.489 1.087 0.566 -1.063 -0.684 0.000 2.073 -0.458 1.073 2.548 0.521 -0.990 1.147 0.000 0.706 0.936 0.987 0.787 1.567 1.023 0.898 +1 0.389 1.614 1.232 1.334 -0.058 0.688 1.454 -0.734 1.087 0.471 0.264 -1.439 2.215 0.478 2.102 -1.297 0.000 0.701 -0.987 -0.108 0.000 1.864 1.101 0.991 0.822 0.727 0.842 0.947 +0 0.955 -0.204 0.316 1.851 0.537 0.900 -0.066 -1.363 1.087 0.954 0.645 -1.216 0.000 0.924 -0.366 1.433 2.548 1.608 0.113 -0.542 0.000 0.874 0.916 0.988 0.953 0.688 0.994 0.857 +0 0.410 -1.222 -0.060 1.116 0.003 0.628 1.872 0.972 0.000 0.399 2.762 -0.983 0.000 0.525 1.020 -1.120 1.274 0.807 2.433 1.518 0.000 0.778 1.147 1.001 0.734 0.428 0.730 0.932 +0 1.193 0.269 1.529 0.484 1.628 0.751 -0.975 -0.886 0.000 0.661 -0.244 0.273 2.215 0.888 -1.101 0.570 2.548 0.509 -2.078 -0.419 0.000 0.873 0.979 0.992 1.228 0.459 0.886 1.042 +1 1.433 0.048 -0.693 0.881 0.125 1.097 1.032 1.307 2.173 0.687 0.855 -0.201 0.000 0.518 -0.623 1.468 2.548 0.517 1.270 -1.315 0.000 0.703 1.035 1.047 0.772 0.926 0.975 0.832 +1 0.661 0.055 -0.781 1.280 -0.355 1.582 1.092 -0.933 2.173 2.791 2.338 1.036 0.000 0.924 -0.541 0.981 2.548 1.018 0.858 -0.435 0.000 0.681 0.864 0.980 0.886 2.070 1.406 1.082 +0 1.235 -0.272 -0.718 0.739 1.204 1.551 -0.563 0.765 0.000 1.371 -1.067 -0.981 2.215 0.613 -1.731 1.341 0.000 1.023 -0.538 -1.353 3.102 1.527 1.225 1.306 0.988 0.427 1.130 0.997 +1 0.686 -0.001 -0.856 0.462 0.139 0.644 -0.960 0.145 1.087 1.110 -0.650 -1.676 0.000 1.190 0.501 -1.472 2.548 0.472 -1.500 -0.729 0.000 0.907 0.972 0.985 1.033 1.415 0.900 0.888 +1 1.590 1.571 1.733 0.358 -0.930 1.873 -0.241 0.479 0.000 1.309 1.156 -0.760 1.107 0.842 2.248 -1.469 0.000 0.581 -0.238 -1.293 1.551 4.655 2.502 0.987 0.995 0.732 1.753 1.580 +0 1.002 -0.940 -1.429 1.313 1.320 0.526 -1.253 -0.523 0.000 0.701 -0.500 0.199 0.000 0.599 -1.186 1.282 2.548 0.622 0.596 -0.493 3.102 0.925 0.803 0.988 0.853 0.724 0.606 0.688 +1 2.266 0.833 1.241 0.858 1.728 0.520 0.692 1.689 2.173 1.323 0.272 0.130 0.000 1.607 0.423 -0.800 2.548 1.450 -2.137 -0.125 0.000 3.552 2.591 0.984 1.265 0.900 1.786 1.732 +1 0.996 -0.118 -1.449 1.212 1.240 0.667 0.565 -0.007 0.000 1.171 0.338 1.576 1.107 1.012 -0.918 -0.880 0.000 1.731 -0.431 -0.171 0.000 0.943 1.092 1.002 0.622 0.809 0.968 0.861 +0 2.045 0.116 -1.137 0.681 0.203 1.414 0.645 -1.562 2.173 2.243 -0.440 0.339 0.000 0.928 -0.054 0.135 0.000 0.630 -0.059 1.567 3.102 0.596 0.848 1.528 1.218 0.485 1.359 1.202 +1 1.890 -0.302 -0.156 0.895 -0.695 2.438 -1.429 1.669 0.000 1.210 -0.951 0.460 0.000 1.885 0.220 -0.500 2.548 0.660 -0.432 0.866 1.551 1.089 0.965 0.984 0.621 0.870 1.541 1.467 +0 2.911 0.747 -0.821 0.334 0.703 1.003 -0.238 1.580 2.173 0.510 -1.136 0.853 0.000 0.632 0.788 0.555 2.548 0.626 0.382 0.140 0.000 0.800 0.923 1.339 0.923 0.980 1.037 0.940 +1 0.631 0.058 1.386 1.100 0.201 1.302 -0.053 -1.075 1.087 0.355 1.358 0.797 0.000 0.414 -1.318 0.515 0.000 0.772 -0.486 -0.186 3.102 1.158 0.765 1.011 1.143 0.814 0.821 0.756 +1 0.283 -0.180 0.734 1.511 1.625 1.185 1.686 -0.022 0.000 0.954 0.215 1.390 1.107 0.891 2.159 -1.568 0.000 0.977 0.404 -0.050 3.102 0.760 0.890 0.982 0.683 0.847 0.856 0.759 +1 1.394 -1.457 -0.054 0.751 -0.305 1.419 0.278 1.667 0.000 0.693 -1.318 -0.473 0.000 0.943 -0.356 0.253 0.000 0.649 0.229 -1.350 3.102 0.979 0.816 0.993 0.712 0.296 0.589 0.571 +1 0.387 1.246 0.986 1.072 -0.762 1.154 -1.089 1.723 2.173 0.821 0.336 -1.074 2.215 1.110 2.676 0.279 0.000 0.963 -1.019 0.746 0.000 1.047 0.855 0.989 2.568 1.402 1.653 1.331 +1 0.475 -2.102 -1.668 1.675 0.796 1.313 -0.593 -0.919 1.087 0.654 -1.245 -0.316 0.000 0.695 -0.331 0.697 0.000 0.716 -1.043 1.347 1.551 0.954 1.131 0.988 0.559 0.975 1.081 0.927 +0 0.363 1.314 -0.960 0.716 0.587 0.746 -0.276 -1.424 2.173 0.507 -0.964 -1.518 2.215 1.283 -0.498 0.207 0.000 0.570 -2.084 0.972 0.000 0.827 0.853 0.986 0.821 0.338 0.770 0.734 +0 0.477 0.809 -0.827 1.018 1.108 1.103 1.368 0.583 2.173 1.419 1.420 -1.127 0.000 1.057 1.768 -0.656 0.000 0.956 1.208 1.568 3.102 0.898 0.828 0.986 0.945 0.843 1.003 0.913 +0 0.417 -0.707 0.795 1.265 -0.864 0.581 0.325 0.403 2.173 1.372 0.410 -0.541 2.215 1.430 -1.118 1.257 0.000 0.508 1.207 -1.044 0.000 0.825 1.034 1.004 0.944 0.989 0.996 1.090 +0 0.405 -2.066 -0.178 0.885 1.195 0.738 0.608 -1.006 0.000 0.516 -0.228 0.494 0.000 1.112 0.086 1.228 2.548 1.213 0.386 0.378 3.102 0.568 0.533 0.981 0.875 0.637 0.690 0.557 +1 1.189 -0.838 -0.889 1.581 -1.023 1.048 -0.695 0.954 2.173 0.497 -0.343 1.501 0.000 1.260 0.502 0.296 1.274 0.514 -1.281 0.799 0.000 0.568 0.869 0.974 1.480 1.248 1.181 0.930 +1 0.850 1.294 -0.536 0.899 -1.062 1.464 -1.338 0.841 0.000 0.687 -0.422 -0.700 0.000 0.655 -2.677 0.951 0.000 1.439 0.292 -1.329 0.000 0.868 0.833 0.990 1.537 0.777 1.336 1.130 +0 0.849 -0.880 0.092 0.247 -0.102 0.604 -1.365 -0.641 1.087 1.814 -0.707 1.064 2.215 1.308 -0.313 -1.451 0.000 0.556 1.484 -0.558 0.000 1.377 1.382 0.993 1.087 1.622 1.254 1.040 +1 0.619 0.743 0.405 0.681 -0.542 0.815 0.044 1.137 1.087 0.934 -0.883 -0.655 2.215 0.486 -1.332 1.131 0.000 1.143 0.058 -1.706 0.000 0.829 0.913 0.988 0.869 1.432 0.827 0.738 +0 1.048 0.274 0.417 0.837 0.535 0.776 0.467 -1.132 0.000 0.405 -1.236 1.485 2.215 0.338 0.849 0.017 0.000 0.602 -0.767 -1.588 3.102 0.820 0.931 0.979 0.694 0.180 0.585 0.694 +1 2.206 -0.128 1.003 1.607 1.178 1.455 1.667 -1.077 0.000 1.485 -0.243 0.309 0.000 1.816 -1.818 -0.709 0.000 0.821 0.391 -0.542 3.102 3.195 1.926 0.994 0.978 0.432 1.216 1.391 +0 0.667 -1.030 0.426 1.345 -0.453 0.403 -1.029 -0.241 0.000 1.098 -0.577 -1.565 0.000 0.623 -0.763 1.478 1.274 1.010 -0.092 1.160 3.102 1.249 0.912 0.989 0.902 0.283 0.652 0.724 +0 0.802 0.918 1.739 0.459 0.572 0.326 -0.203 -0.012 2.173 0.317 -0.706 -1.374 0.000 0.694 -0.791 0.556 0.000 0.637 0.590 -1.257 3.102 0.825 0.626 0.989 0.842 0.491 0.573 0.607 +0 1.743 0.750 -1.401 0.212 0.763 1.472 -1.209 1.250 0.000 1.875 0.432 -0.122 1.107 1.145 0.278 -0.538 0.000 0.492 -0.588 1.528 3.102 0.641 0.751 0.988 0.674 1.012 0.906 0.730 +0 0.357 1.196 -0.728 1.289 1.367 1.045 -0.264 1.302 2.173 1.384 -0.219 -0.473 0.000 1.156 -0.235 -0.073 0.000 0.955 -0.560 -1.268 3.102 0.685 0.794 0.988 1.586 0.805 1.181 1.280 +1 0.989 -0.715 -1.586 1.540 1.571 1.080 -0.647 -0.398 2.173 0.718 2.572 0.688 0.000 0.823 -0.864 -0.613 2.548 0.833 -0.606 0.611 0.000 0.899 1.871 0.993 1.444 0.286 1.665 1.877 +0 1.168 -0.724 -0.487 0.648 -0.254 0.491 -1.196 1.083 2.173 0.870 0.752 1.486 1.107 0.644 0.060 0.520 0.000 0.562 0.426 -1.731 0.000 1.013 0.859 0.991 0.885 1.167 1.102 1.057 +0 1.621 0.739 -0.830 0.520 1.277 0.677 -0.785 0.929 0.000 0.425 0.624 -1.299 2.215 0.369 0.005 0.273 0.000 0.927 -1.044 0.118 3.102 0.622 0.793 1.204 1.100 0.824 0.736 0.756 +1 0.607 0.563 -1.442 0.838 -0.469 1.257 0.583 0.631 0.000 0.757 2.106 -0.909 0.000 2.248 -0.277 1.554 0.000 2.328 0.203 -0.976 3.102 1.197 0.723 0.995 0.833 0.803 0.867 0.733 +1 1.016 0.561 1.313 0.516 -1.149 0.536 -0.610 -1.517 0.000 0.529 -0.496 1.222 1.107 1.529 0.297 -0.034 0.000 0.680 1.145 -0.472 0.000 0.753 0.953 0.981 0.695 0.480 0.644 0.651 +1 0.451 -1.803 -0.552 1.332 -1.160 1.196 -0.541 0.787 0.000 0.989 -0.134 -1.679 2.215 0.728 -0.873 -0.891 2.548 0.776 -1.863 -0.113 0.000 1.041 0.767 0.988 0.807 0.700 0.753 0.693 +1 0.356 -0.619 -1.621 1.085 0.098 1.120 1.011 1.672 0.000 1.139 0.278 -0.520 2.215 0.503 0.196 0.622 2.548 0.695 0.547 1.021 0.000 0.787 0.725 0.985 1.150 0.689 0.834 1.079 +0 0.566 -1.818 0.342 0.944 -1.202 0.614 -0.915 0.612 2.173 0.699 -1.486 1.494 0.000 0.609 -0.323 -1.296 0.000 1.115 -0.745 -0.377 3.102 0.856 0.871 0.996 0.671 0.682 0.644 0.642 +1 2.422 1.580 0.178 0.782 -0.303 0.471 1.530 1.672 0.000 0.909 1.350 -1.297 1.107 1.255 2.262 -1.403 0.000 1.019 1.332 1.144 0.000 0.839 0.884 0.976 1.181 0.632 0.776 0.859 +1 1.485 0.808 1.386 1.839 0.816 1.137 -0.339 -1.309 0.000 1.154 -0.711 -0.122 2.215 1.263 0.144 -0.429 2.548 0.455 0.664 -1.483 0.000 0.682 0.933 1.123 1.707 0.686 1.239 1.170 +0 2.024 0.862 0.082 0.600 -0.013 2.114 -0.935 1.521 0.000 1.189 -0.352 -0.533 0.000 1.043 -0.307 1.081 2.548 1.259 -1.423 -0.499 0.000 0.902 0.950 0.994 0.792 1.015 0.907 0.922 +0 1.680 -1.235 0.440 0.278 -1.025 0.782 2.034 1.254 0.000 1.182 0.046 -1.022 2.215 0.693 -0.749 -0.806 0.000 0.507 -1.843 1.314 0.000 0.789 0.950 0.990 0.742 0.472 0.812 0.720 +0 1.560 -0.136 -1.101 1.725 -1.354 2.184 1.417 0.287 0.000 0.783 1.619 0.812 0.000 2.469 -0.230 -1.647 2.548 0.870 0.956 -0.143 3.102 1.306 0.869 1.000 0.921 1.386 1.811 1.691 +1 1.057 -0.479 0.365 1.194 -0.796 0.809 -1.656 0.949 0.000 1.078 -0.285 -1.598 2.215 1.568 -0.196 -0.572 1.274 0.745 -0.643 1.230 0.000 0.655 0.819 1.346 1.049 1.104 0.814 0.709 +1 0.949 0.954 0.460 1.049 -0.383 1.384 1.044 1.711 0.000 1.100 1.386 0.781 2.215 0.963 0.253 -0.655 0.000 0.695 -1.191 -0.629 0.000 0.868 0.469 0.987 0.868 0.827 0.895 0.789 +1 0.547 -0.881 0.903 1.451 0.394 1.216 -0.659 0.597 1.087 1.434 -1.754 -1.241 0.000 1.576 -0.348 -1.162 2.548 0.396 -0.074 1.350 0.000 1.185 1.099 0.990 0.759 1.742 1.240 1.176 +1 2.122 0.908 1.423 0.475 -1.112 0.940 -0.540 -0.428 2.173 0.462 1.106 -0.319 0.000 0.720 -0.073 0.698 0.000 0.565 0.378 1.003 3.102 0.907 1.011 1.052 0.559 0.843 0.999 0.845 +1 1.506 0.590 0.447 0.967 -0.680 0.962 0.100 -1.727 2.173 0.496 -0.849 0.971 0.000 1.289 1.641 0.621 0.000 2.345 -0.075 -1.015 3.102 0.932 1.136 1.420 1.278 0.966 1.011 1.043 +1 1.296 0.129 1.413 0.500 1.144 1.311 -0.940 -0.698 2.173 0.218 -1.719 -0.582 0.000 0.487 -2.102 0.662 0.000 0.399 0.953 1.342 3.102 0.470 0.901 0.977 0.581 1.208 0.954 0.841 +1 1.390 0.412 -1.286 0.688 -0.066 0.802 0.956 1.022 2.173 1.336 0.216 -0.688 2.215 1.052 0.008 0.100 0.000 0.978 0.792 -1.543 0.000 1.241 1.020 1.208 1.060 1.629 0.942 0.836 +0 0.904 -0.231 -1.618 0.373 -1.435 1.662 -0.378 0.532 0.000 2.011 -0.354 -1.137 2.215 0.510 0.136 1.030 0.000 1.070 -0.722 0.087 3.102 0.844 0.717 0.994 0.899 1.229 1.239 1.064 +0 0.859 -0.310 0.029 1.435 -1.124 1.643 -0.524 0.892 0.000 1.372 -0.811 0.531 1.107 2.826 -1.992 -1.029 0.000 1.392 0.405 1.671 0.000 0.842 0.771 1.325 1.210 0.946 0.856 0.827 +0 0.800 -0.473 1.620 0.500 -0.271 1.017 -0.846 1.077 0.000 1.118 1.058 -0.440 0.000 1.147 -1.163 -0.098 2.548 1.323 0.666 -1.120 0.000 0.939 1.025 0.990 0.862 1.088 1.099 0.899 +1 0.912 -1.842 -1.458 0.269 0.425 0.914 -1.496 0.530 0.000 0.947 -1.245 -0.999 2.215 0.713 -0.456 0.220 2.548 0.747 0.360 -1.649 0.000 1.844 1.084 0.995 0.664 0.853 0.894 0.778 +1 0.947 0.643 0.575 1.133 -0.306 0.529 2.160 -1.050 0.000 0.479 1.283 0.190 2.215 0.956 1.329 -1.570 0.000 0.754 0.790 1.335 0.000 1.026 0.798 1.023 0.686 0.566 0.549 0.633 +1 0.827 1.568 -0.283 0.336 0.449 0.779 1.352 -1.716 0.000 0.938 -0.427 -0.034 2.215 0.569 0.323 -1.587 2.548 0.456 1.656 0.693 0.000 0.804 0.612 0.998 0.859 0.828 0.894 0.787 +0 1.547 -0.128 -0.252 0.553 1.078 0.878 0.359 0.962 2.173 1.484 -0.198 -0.851 2.215 1.164 1.171 1.385 0.000 0.378 1.479 -1.030 0.000 0.629 0.816 1.194 0.956 1.744 1.058 0.966 +1 1.194 -1.422 -0.170 0.934 -0.883 0.608 -2.400 -1.456 0.000 1.254 0.813 1.472 2.215 1.348 -0.748 0.402 1.274 0.628 -1.356 -0.689 0.000 0.715 1.207 0.986 1.773 1.713 1.612 1.341 +1 0.981 1.375 0.223 0.726 -0.516 0.344 0.021 -1.143 1.087 0.509 -1.405 1.130 2.215 0.596 -0.359 1.314 0.000 0.587 -0.274 0.452 0.000 0.458 0.521 0.983 1.892 0.737 1.243 0.982 +0 1.027 -0.277 -0.714 0.404 0.850 0.801 -0.042 1.142 2.173 0.733 -1.935 1.518 0.000 2.160 0.839 -0.170 1.274 0.614 -0.957 -1.030 0.000 0.756 1.161 0.988 1.158 1.725 1.524 1.184 +1 1.057 0.728 1.152 0.772 0.088 1.602 -0.026 -1.359 2.173 0.777 -0.032 -0.316 0.000 0.870 2.350 -0.213 0.000 0.710 -0.045 1.047 3.102 1.470 0.990 1.024 1.369 0.933 1.088 0.961 +1 2.091 0.008 0.445 0.727 0.091 1.694 -0.339 -1.664 2.173 0.805 1.621 -0.421 0.000 0.682 0.078 0.962 2.548 1.014 -0.173 -0.531 0.000 1.279 1.047 0.991 1.854 0.979 1.255 1.200 +1 1.759 -0.752 -1.552 1.105 -1.515 0.605 -0.845 1.581 2.173 1.533 -0.400 0.146 0.000 1.216 -1.361 -0.341 0.000 1.200 -0.315 1.145 3.102 0.809 0.782 0.985 0.619 0.408 0.587 0.626 +0 0.691 -0.381 0.565 0.834 -1.226 0.863 -0.314 1.648 1.087 0.863 -1.677 -0.010 0.000 0.514 -2.066 0.400 0.000 1.375 -0.613 -0.451 3.102 0.799 1.248 1.051 0.683 1.120 0.845 0.744 +0 0.897 -0.222 1.451 0.656 -0.303 0.712 -0.261 -0.248 0.000 1.160 -0.670 0.910 2.215 0.701 -1.017 1.139 2.548 2.426 -0.595 -1.007 0.000 1.348 1.142 1.062 0.797 0.285 0.950 0.807 +1 1.434 -1.847 1.445 0.681 0.156 0.531 -1.465 0.586 0.000 0.776 -0.117 -1.068 0.000 0.959 -0.150 -0.123 2.548 0.381 -0.546 -1.678 3.102 1.687 0.910 1.256 1.172 0.470 0.741 0.800 +0 0.518 0.202 1.052 1.631 -1.446 0.410 -0.799 0.956 0.000 0.742 -1.244 0.096 0.000 1.063 -0.703 1.626 0.000 1.331 0.560 -0.138 3.102 0.868 1.060 0.991 0.803 0.585 0.691 0.777 +1 0.624 0.939 -0.487 1.463 0.139 1.027 0.883 1.537 2.173 0.639 1.261 0.486 0.000 1.135 1.083 -0.960 2.548 0.383 0.985 -1.582 0.000 0.618 0.783 0.982 0.838 1.064 0.967 0.768 +0 0.655 -0.480 -0.945 1.768 -1.636 0.962 1.347 0.031 2.173 0.503 0.586 -0.343 0.000 0.842 -0.906 1.565 0.000 1.573 -0.490 0.428 3.102 0.752 0.968 0.982 2.180 1.547 1.609 1.424 +0 1.202 0.208 -0.256 1.442 -1.018 0.697 0.066 1.056 0.000 0.896 0.730 1.485 0.000 0.682 2.002 -1.586 0.000 0.581 -0.782 0.607 3.102 0.861 0.801 1.155 0.811 0.391 0.584 0.755 +0 1.012 -0.729 -0.280 1.220 0.383 2.443 -0.434 0.100 2.173 4.423 -0.701 -1.583 0.000 0.956 0.205 1.700 2.548 0.643 0.275 0.596 0.000 2.363 1.451 0.986 0.975 1.994 2.150 1.729 +1 0.694 0.871 -0.528 1.741 -0.581 0.752 -0.304 0.933 2.173 0.591 1.207 0.624 1.107 1.455 -1.240 -1.456 0.000 0.539 0.603 1.101 0.000 1.414 1.180 0.978 0.894 0.881 1.219 1.502 +0 1.353 -1.235 -1.170 1.069 1.729 0.850 0.542 0.364 0.000 0.235 -1.177 0.040 2.215 0.504 -0.188 -0.098 0.000 0.662 0.889 -1.585 3.102 0.659 0.751 0.990 0.645 0.613 0.641 0.816 +1 0.962 -0.703 0.609 1.007 1.614 0.876 -0.962 -1.544 2.173 0.850 -1.950 -0.233 0.000 0.955 -0.060 -0.797 2.548 1.132 0.611 0.609 0.000 2.512 1.591 1.074 0.853 0.889 1.174 1.000 +0 1.840 0.612 -0.438 2.640 -0.071 1.415 1.742 -1.638 0.000 0.770 -0.474 0.104 1.107 1.455 0.913 0.779 0.000 2.160 0.304 -1.720 3.102 0.941 1.410 0.995 0.957 1.269 1.490 1.571 +1 0.479 0.668 -1.534 1.117 0.400 1.106 -0.072 1.684 0.000 1.019 -0.545 -0.010 2.215 1.410 -0.800 -0.955 2.548 1.093 0.042 1.014 0.000 0.962 1.237 0.999 0.899 0.979 1.016 0.942 +1 0.385 1.364 0.641 1.333 -0.802 1.307 1.073 1.415 0.000 1.215 0.802 0.081 2.215 1.485 0.661 -0.794 2.548 0.667 -1.656 0.180 0.000 0.810 1.268 0.988 0.942 1.015 1.025 0.933 +1 1.519 0.448 1.207 0.539 0.561 0.767 -0.684 -0.729 2.173 0.757 0.867 -1.093 0.000 1.512 1.207 1.467 0.000 1.475 0.324 0.137 3.102 0.937 0.938 0.994 0.795 1.016 0.970 0.848 +1 0.571 -1.684 -0.218 1.728 1.013 0.446 -0.528 0.429 0.000 0.600 -0.621 1.299 0.000 1.734 1.256 -0.866 0.000 1.585 0.411 -0.738 3.102 0.848 1.134 1.233 0.868 0.821 1.079 0.890 +0 2.930 -0.533 0.818 1.549 0.762 1.678 0.389 -0.805 0.000 1.130 -0.422 -1.052 1.107 1.830 0.055 0.316 2.548 1.906 0.318 -1.308 0.000 0.952 0.885 0.992 1.127 1.491 1.255 1.201 +0 1.885 -0.389 0.009 0.220 -0.824 1.082 0.350 -0.585 1.087 2.181 0.844 1.524 0.000 1.321 0.612 -1.609 0.000 0.980 0.078 0.804 3.102 0.879 0.990 0.997 0.678 1.045 0.733 0.748 +1 2.172 0.438 -0.966 0.944 -0.172 2.939 0.942 1.304 0.000 1.460 0.414 -0.480 2.215 2.575 0.582 0.041 2.548 0.601 -0.265 1.658 0.000 1.516 2.416 1.303 1.218 0.957 1.836 1.594 +1 0.672 0.598 0.115 0.943 1.079 0.688 -0.581 -0.907 2.173 1.092 -0.843 -0.025 1.107 1.076 -0.923 1.321 0.000 1.178 0.571 0.933 0.000 1.007 0.983 0.990 0.888 0.928 0.807 0.751 +1 2.051 0.531 -0.896 0.185 -0.644 0.808 -0.219 0.918 2.173 0.938 2.256 -0.072 0.000 0.932 0.565 0.568 0.000 1.568 0.523 1.481 3.102 1.328 0.973 0.986 1.224 0.777 0.910 0.834 +1 0.610 -1.119 0.534 0.926 -0.917 0.595 -0.460 -0.039 0.000 0.773 -1.045 1.647 0.000 1.117 -1.363 -0.403 2.548 1.088 1.732 1.647 0.000 0.833 0.837 1.005 0.624 0.784 0.806 0.724 +1 0.581 1.722 0.739 1.170 1.353 0.734 1.635 1.179 0.000 0.910 0.923 -0.329 2.215 1.102 0.753 -0.829 2.548 0.911 0.882 -1.575 0.000 0.972 0.776 0.980 0.903 0.468 0.747 0.662 +0 0.634 0.622 -0.290 1.591 -0.827 0.671 -0.311 -0.235 2.173 0.420 2.155 1.066 0.000 0.837 0.210 0.847 0.000 1.870 -0.846 0.888 0.000 0.919 0.890 0.994 0.650 0.631 0.739 0.773 +1 0.343 -1.060 0.319 1.061 1.425 1.490 0.296 -0.142 0.000 1.381 0.285 1.174 2.215 1.227 0.056 -0.827 2.548 1.213 -2.235 -1.618 0.000 4.769 2.750 0.986 1.814 1.356 1.971 1.810 +1 1.317 0.666 1.713 0.112 -1.100 0.585 0.129 0.369 0.000 0.922 -0.322 -0.521 2.215 0.643 1.231 -1.313 0.000 0.376 -0.843 0.047 3.102 1.315 1.088 0.995 0.636 0.319 0.655 0.678 +1 1.184 0.012 1.149 1.011 -0.052 0.701 -0.466 0.874 0.000 1.035 -0.754 -1.443 2.215 1.154 -1.394 -0.804 2.548 0.602 -0.573 -0.183 0.000 0.817 0.986 1.338 1.230 0.776 0.952 0.838 +0 0.418 -0.272 -1.486 1.111 0.666 0.470 -0.045 -0.039 0.000 0.671 0.506 -1.731 2.215 0.781 0.403 -0.510 2.548 0.577 -1.111 1.622 0.000 0.965 0.876 0.989 0.855 0.686 0.722 0.655 +1 0.825 0.305 -0.935 1.268 -0.176 0.918 -0.708 1.433 1.087 0.401 -1.018 -0.819 0.000 0.580 1.124 1.479 0.000 1.051 0.107 0.607 3.102 0.771 1.134 0.991 0.750 0.838 0.958 0.809 +1 0.424 1.210 0.173 1.267 -1.654 0.956 -0.936 0.158 2.173 1.005 -0.694 -0.903 2.215 0.889 -0.764 1.483 0.000 0.981 0.095 0.940 0.000 0.703 0.968 1.013 1.692 1.190 1.285 1.065 +1 0.812 0.440 0.750 0.874 -1.270 1.054 -1.921 0.081 0.000 1.339 -0.841 -1.560 1.107 0.724 -0.100 -1.350 0.000 0.926 -0.247 0.084 1.551 0.931 0.943 1.131 1.082 1.042 1.059 1.155 +1 2.141 -1.231 -1.289 0.967 0.891 0.556 -1.623 0.161 0.000 0.879 -1.023 0.764 1.107 1.004 -0.364 0.131 2.548 1.275 -1.546 1.616 0.000 0.628 0.892 1.840 1.183 0.633 0.927 0.778 +0 0.822 -0.076 -1.314 1.813 -1.063 0.930 0.771 0.770 0.000 1.106 1.292 0.587 0.000 0.604 0.066 0.378 2.548 0.920 1.916 -0.792 0.000 0.698 0.657 0.984 0.873 0.456 0.590 0.849 +1 1.491 -1.054 -0.510 0.413 -1.371 2.830 1.459 0.211 0.000 2.099 -0.418 0.887 0.000 4.630 -0.136 -1.329 2.548 2.131 -1.218 -1.252 0.000 0.741 2.407 0.994 1.644 1.345 2.791 2.593 +0 1.158 0.766 0.727 4.707 0.447 3.421 -0.590 -1.387 0.000 1.118 0.581 0.126 2.215 0.545 2.616 -0.460 0.000 0.845 -1.238 -1.073 3.102 6.985 3.830 0.975 0.742 1.337 2.501 2.489 +1 0.988 0.732 -1.311 0.840 -0.120 0.802 1.058 -0.919 0.000 1.325 0.984 1.080 2.215 0.618 2.140 -0.518 0.000 0.761 -0.435 0.282 3.102 0.977 1.162 1.109 1.034 0.971 1.013 0.866 +0 0.402 0.619 -1.270 1.127 -0.503 1.307 -0.150 1.047 0.000 1.550 0.124 -1.158 1.107 1.006 0.331 0.800 2.548 2.300 0.050 -0.124 0.000 1.533 1.069 0.988 0.836 1.312 0.958 0.859 +0 0.622 -1.138 0.533 0.103 -0.119 0.747 -0.373 1.595 0.000 0.749 0.916 0.936 2.215 0.877 1.318 -0.602 0.000 1.048 -0.654 -1.112 0.000 0.915 0.959 0.983 0.760 0.705 0.802 0.709 +0 0.448 0.139 -0.803 2.074 -1.706 2.206 0.160 0.179 1.087 1.598 -1.102 1.473 2.215 1.999 2.394 -0.685 0.000 2.166 1.355 -1.004 0.000 0.810 1.162 0.990 1.902 3.183 1.725 1.331 +0 0.774 1.613 -1.342 0.556 1.363 1.102 0.020 -0.461 2.173 2.044 0.286 1.692 2.215 1.644 -0.129 0.112 0.000 0.762 -0.557 0.951 0.000 0.909 1.015 0.977 0.849 2.082 1.255 1.082 +1 1.618 0.054 -1.427 0.413 -0.858 0.621 -1.896 0.151 0.000 0.441 -0.133 1.657 0.000 0.834 0.237 0.975 1.274 0.622 -1.269 0.541 0.000 0.771 0.629 0.990 0.622 0.614 0.585 0.599 +1 0.598 -2.307 1.234 0.302 -0.511 0.632 -0.150 0.204 1.087 0.626 -0.571 1.140 0.000 0.696 0.807 -1.224 2.548 0.547 -1.632 -1.610 0.000 0.735 0.947 0.990 0.894 0.911 0.841 0.742 +1 0.674 1.085 1.262 0.264 1.034 0.778 0.596 -0.377 2.173 1.533 1.063 1.030 0.000 1.280 0.658 1.462 0.000 1.278 1.304 -1.298 0.000 0.909 1.486 0.978 0.864 0.957 1.244 0.995 +0 0.711 0.376 -1.292 0.325 0.082 1.291 0.500 1.690 2.173 0.937 0.358 -0.451 2.215 0.767 1.798 0.283 0.000 1.271 0.416 0.301 0.000 0.879 0.920 0.991 0.934 1.518 1.069 0.877 +1 0.871 0.973 -1.083 0.349 1.286 0.987 0.967 1.418 2.173 0.577 -0.401 -0.167 0.000 0.971 -0.312 -0.497 0.000 1.313 -0.454 0.439 3.102 0.339 0.567 0.995 0.847 1.373 0.970 0.828 +1 0.734 -0.537 0.833 0.257 -0.129 0.625 -0.770 -1.167 0.000 0.501 -1.973 -0.716 0.000 0.730 1.016 0.698 2.548 0.617 -0.944 1.394 3.102 0.922 0.682 0.993 1.109 0.768 0.895 0.813 +1 0.851 -0.040 0.135 0.330 -0.916 0.627 1.824 -1.496 0.000 0.998 0.923 0.387 1.107 0.483 -0.102 -1.165 2.548 0.787 0.658 -1.445 0.000 0.593 1.098 0.985 0.586 0.835 0.738 0.886 +0 1.059 -0.943 -1.225 1.735 1.167 1.697 -0.192 -0.421 2.173 0.707 -1.367 0.410 0.000 1.906 -0.023 1.420 2.548 0.449 -0.205 -1.381 0.000 0.861 1.075 1.565 1.822 2.240 1.448 1.154 +1 0.377 -0.365 -1.472 1.242 -0.681 0.635 1.217 -1.527 1.087 0.859 1.554 1.012 2.215 1.287 -0.173 0.175 0.000 0.664 1.655 0.410 0.000 0.742 1.097 0.984 2.190 0.843 1.649 1.279 +1 0.328 -1.948 -0.733 1.707 -1.225 0.773 2.657 1.732 0.000 0.746 0.632 0.215 2.215 2.115 -0.687 0.631 2.548 0.504 -2.227 0.716 0.000 6.951 3.743 0.992 1.321 1.129 2.677 2.224 +0 0.642 0.760 1.289 0.632 -1.458 0.789 0.723 0.311 1.087 0.704 -0.792 -0.759 2.215 1.045 -0.281 1.483 0.000 0.525 0.400 -0.343 0.000 0.879 0.963 0.989 1.379 1.297 1.078 0.918 +0 1.240 -1.275 0.946 0.340 1.308 0.345 -0.769 -1.089 1.087 0.687 0.217 -0.276 2.215 0.296 2.258 -0.969 0.000 0.482 -1.238 -1.309 0.000 1.492 0.999 0.973 0.724 0.607 0.691 0.790 +1 0.875 -0.360 0.872 0.819 -1.389 0.631 -0.729 0.355 0.000 0.959 -0.188 -0.184 0.000 1.445 0.366 1.554 2.548 0.927 0.938 -0.716 3.102 0.895 1.020 1.048 0.738 0.852 0.933 0.821 +0 0.755 -0.250 -0.273 3.584 -0.737 1.257 0.128 1.271 2.173 0.616 0.664 0.690 0.000 1.725 -0.605 0.992 0.000 0.638 0.596 -1.233 0.000 0.824 0.930 0.988 0.856 0.875 1.231 0.977 +1 0.529 1.162 -1.186 0.281 -0.927 1.010 0.724 1.580 0.000 1.313 0.339 0.340 1.107 0.956 0.019 -0.223 0.000 0.573 -0.528 -0.864 3.102 1.876 1.135 0.991 0.948 0.800 0.925 0.802 +1 0.777 -2.102 -0.247 0.774 0.624 1.268 0.130 -1.248 2.173 0.667 -0.770 1.198 2.215 0.387 -1.490 -0.494 0.000 0.831 0.035 0.362 0.000 0.798 0.975 0.985 0.762 1.268 1.127 0.883 +1 0.594 -0.188 1.651 0.420 0.147 0.965 0.249 0.791 0.000 0.466 -0.116 0.054 0.000 1.532 0.930 -1.227 1.274 0.515 -0.511 -1.221 0.000 0.918 0.872 0.983 1.270 0.532 0.866 0.872 +0 0.910 -0.398 1.689 0.375 -1.181 1.002 -0.126 -0.236 0.000 1.247 -1.085 1.338 1.107 0.709 -0.907 0.049 0.000 0.612 -0.572 -1.590 1.551 0.781 0.775 0.992 1.044 0.417 0.890 0.854 +0 0.946 -0.272 -1.557 0.238 0.026 1.451 0.505 0.756 0.000 1.405 0.488 -1.012 2.215 1.483 1.364 -0.255 2.548 0.538 2.417 1.605 0.000 0.817 0.883 0.982 0.751 1.253 0.854 0.797 +1 0.880 -0.100 -0.730 0.704 -0.077 0.942 1.062 0.730 2.173 0.766 -0.228 -1.390 1.107 0.699 -1.526 -0.854 0.000 0.581 -0.896 1.111 0.000 0.722 0.718 0.997 1.490 1.468 1.116 0.996 +1 0.871 -0.714 -1.039 0.566 0.450 1.114 -0.992 0.611 1.087 1.405 -1.290 -1.352 2.215 1.114 0.193 0.651 0.000 1.405 0.874 -1.366 0.000 0.818 0.934 0.988 0.806 1.829 1.127 0.915 +0 1.237 0.613 -0.408 1.270 0.156 0.896 -1.076 1.540 0.000 1.052 1.226 0.319 2.215 0.863 -1.972 1.611 0.000 1.462 -0.302 -1.517 3.102 0.902 0.906 0.987 0.856 1.506 1.605 1.438 +1 2.128 -0.042 1.686 1.284 -1.273 1.151 0.414 0.191 2.173 0.632 -0.423 0.848 2.215 0.745 0.614 -0.711 0.000 0.473 0.035 0.494 0.000 0.616 0.695 1.049 0.932 0.891 1.120 0.881 +1 0.829 -0.657 -1.270 1.127 0.825 3.386 1.441 1.408 0.000 3.409 -0.309 -0.254 1.107 2.389 0.287 -0.205 0.000 0.930 0.637 -0.364 3.102 2.023 1.193 1.272 1.573 0.927 1.073 1.057 +0 0.975 0.825 1.687 0.343 -0.682 0.375 -0.872 -1.065 2.173 0.549 -0.938 0.117 2.215 0.264 0.759 0.248 0.000 0.819 -1.047 0.817 0.000 0.688 0.651 0.983 1.165 0.585 0.894 0.775 +0 0.604 -0.512 0.195 0.317 -0.767 0.600 -1.102 -1.565 2.173 0.395 -2.850 0.159 0.000 1.309 -0.557 1.172 2.548 0.508 -0.917 -1.095 0.000 0.809 1.050 0.989 0.772 0.744 0.744 0.682 +1 2.344 -1.597 -0.415 0.899 0.074 0.347 -1.683 0.886 0.000 0.593 -0.451 0.596 1.107 1.233 -1.702 -1.705 0.000 1.511 0.101 1.491 3.102 0.896 1.073 0.993 0.947 0.667 1.039 0.944 +1 1.185 0.016 1.614 0.672 -1.226 0.667 -0.123 -1.633 2.173 1.695 -1.275 0.143 0.000 1.352 -0.693 -0.336 0.000 1.409 1.022 1.104 3.102 0.737 0.928 0.995 1.042 0.984 0.935 0.825 +1 0.693 -0.672 0.285 0.213 0.178 0.653 -0.153 0.035 2.173 0.792 -0.328 -1.498 0.000 1.493 0.656 -1.492 0.000 0.881 0.420 -0.276 0.000 0.924 0.726 0.981 0.609 0.578 0.776 0.714 +0 1.017 -0.526 -1.057 5.700 -1.371 3.733 -1.007 0.421 2.173 1.432 0.185 -1.571 2.215 0.800 -1.297 -0.200 0.000 1.142 -1.579 0.438 0.000 0.626 1.153 0.993 1.211 3.968 2.896 2.177 +0 0.833 -1.005 -0.875 0.647 -0.531 0.758 0.232 0.688 0.000 1.031 1.290 0.792 2.215 1.532 -0.027 -1.431 2.548 0.657 -0.354 -0.171 0.000 0.836 0.938 0.976 0.770 1.558 1.035 0.916 +0 1.848 0.713 -0.654 3.006 -0.820 1.255 -0.800 0.878 0.000 1.180 0.097 1.010 0.000 1.408 -0.014 1.297 2.548 1.158 1.274 0.810 0.000 1.077 0.926 0.982 0.724 1.214 1.172 1.531 +1 0.521 0.502 -1.284 0.682 0.471 0.519 -1.008 -0.325 0.000 0.477 1.051 0.728 2.215 1.031 0.384 1.606 1.274 0.933 0.582 -0.274 0.000 0.917 0.720 0.988 0.559 0.586 0.521 0.510 +0 0.836 1.675 -0.549 0.750 -1.631 0.610 1.345 0.805 2.173 0.712 0.273 -1.684 0.000 0.982 0.820 0.318 2.548 0.873 1.056 -0.904 0.000 0.848 0.950 0.984 0.877 0.466 0.698 0.730 +0 1.286 -0.032 1.465 0.269 -0.833 0.643 -0.725 0.464 2.173 0.790 0.984 -1.301 2.215 0.397 0.474 0.583 0.000 0.841 -0.334 -0.268 0.000 0.537 0.764 0.986 0.819 1.474 0.860 0.699 +1 0.969 -0.407 -0.911 0.265 1.281 0.855 -1.414 -0.371 0.000 1.334 -0.159 1.093 0.000 0.710 -1.056 1.368 2.548 0.524 1.001 -0.090 0.000 0.810 0.852 0.993 0.490 0.357 0.544 0.619 +0 0.459 0.263 0.758 0.625 -0.288 0.931 -1.251 0.366 2.173 1.621 0.716 -1.344 0.000 0.497 0.768 -0.970 1.274 0.438 -2.359 1.077 0.000 3.302 1.816 0.982 1.767 1.329 1.538 1.422 +1 0.874 0.654 -1.102 2.128 -0.335 1.107 -0.047 1.233 0.000 0.907 -0.072 -0.226 2.215 0.720 1.473 1.656 0.000 0.962 -0.595 -1.158 0.000 1.420 0.992 1.205 0.790 0.844 0.858 0.965 +1 1.293 -1.676 -0.608 0.248 -1.008 0.998 -0.158 1.461 0.000 0.803 -0.565 -0.729 0.000 0.732 -0.817 0.604 1.274 1.026 -0.130 0.681 3.102 1.151 0.885 0.976 0.854 0.255 0.621 0.745 +0 1.170 1.729 -1.559 0.957 -0.607 0.421 -1.208 1.141 0.000 0.535 0.304 0.533 2.215 0.608 -0.644 -0.457 2.548 0.446 -1.902 -0.618 0.000 0.757 0.851 1.110 1.220 0.572 0.902 1.183 +0 0.895 -1.934 0.879 1.047 -0.273 0.510 -0.575 0.194 0.000 1.195 -0.948 1.556 2.215 1.031 -1.287 -0.867 1.274 0.691 0.370 -0.956 0.000 0.919 0.885 1.155 1.142 1.000 0.858 0.871 +0 0.422 -1.331 1.415 0.978 -1.122 1.086 -2.026 1.159 0.000 0.866 -0.216 0.307 0.000 0.747 -0.374 -0.699 0.000 1.695 0.791 -0.767 3.102 0.892 0.952 0.987 1.489 0.806 1.586 1.248 +1 1.107 -0.050 1.570 0.268 1.357 1.485 -0.803 0.872 1.087 1.285 -2.397 -0.449 0.000 0.831 -1.152 1.461 0.000 1.398 0.428 -0.449 0.000 0.801 0.942 0.987 0.932 2.011 1.193 1.060 +0 0.603 -1.436 1.464 0.826 0.544 0.902 -0.697 0.907 1.087 1.118 0.180 -1.334 2.215 0.530 -1.617 -1.630 0.000 0.919 1.592 -0.501 0.000 0.878 1.068 0.981 1.693 1.494 1.261 1.064 +0 1.167 -0.572 1.323 0.273 -0.272 0.814 -0.018 -0.493 0.000 0.747 -0.451 0.771 2.215 0.461 -1.622 -0.327 0.000 0.894 0.550 -1.432 3.102 1.114 0.970 0.987 0.612 0.804 0.768 0.706 +0 1.361 0.050 -0.308 0.047 0.824 0.726 0.953 0.640 0.000 1.220 0.138 -0.893 2.215 1.015 -0.804 1.672 0.000 1.002 0.661 1.510 1.551 1.176 0.814 0.821 0.696 0.890 0.841 0.770 +1 1.247 1.334 -0.085 1.039 0.219 2.487 1.050 0.934 0.000 1.273 -0.276 -0.865 0.000 2.975 -1.880 -1.351 0.000 1.578 0.363 -0.353 3.102 1.530 0.971 0.991 0.993 0.580 0.679 0.961 +1 1.385 0.466 0.474 0.799 0.724 0.936 0.622 -0.669 2.173 0.568 1.058 -1.732 2.215 0.287 0.636 -1.038 0.000 0.375 1.535 1.573 0.000 0.472 0.742 1.001 0.819 0.913 0.867 0.679 +1 1.595 0.289 -0.478 1.425 -1.099 0.932 0.668 0.465 1.087 0.958 0.711 -1.681 0.000 0.815 1.048 1.413 2.548 1.206 0.754 -0.054 0.000 0.892 0.888 1.107 1.291 0.859 0.971 0.832 +0 0.713 0.664 0.821 0.747 0.014 0.574 -0.469 -0.223 2.173 0.584 -1.687 -1.379 0.000 0.957 -0.808 1.645 0.000 1.137 -0.789 -0.592 3.102 0.827 1.194 0.988 0.752 0.346 0.897 0.780 +1 0.681 0.595 -0.971 0.848 0.441 0.676 -0.335 1.523 0.000 0.792 0.756 0.297 2.215 1.115 -0.083 -0.492 2.548 0.891 -1.141 -1.464 0.000 0.821 1.010 1.006 0.624 0.791 0.886 0.789 +1 1.059 0.074 1.231 0.696 0.047 1.458 0.921 -1.516 0.000 1.866 1.773 -0.528 0.000 1.352 -0.534 -0.066 1.274 1.713 1.402 1.015 0.000 0.806 0.472 1.041 0.825 0.821 0.951 0.819 +0 1.100 1.703 -0.578 0.682 0.101 0.750 1.468 0.153 0.000 0.886 -0.135 -1.739 1.107 0.551 0.776 1.584 2.548 0.740 2.056 0.796 0.000 0.853 0.803 0.979 1.622 0.398 1.041 0.955 +1 0.800 0.355 0.423 0.113 -0.151 0.626 -2.030 -1.070 0.000 0.742 -0.749 1.560 2.215 0.627 -0.347 -0.017 2.548 0.681 1.363 1.468 0.000 0.840 1.158 0.993 0.527 0.731 0.761 0.702 +0 0.833 -0.626 -0.286 1.520 -0.716 0.829 -1.838 1.639 0.000 0.621 -1.434 -0.072 0.000 1.070 -0.524 0.866 2.548 0.624 -0.593 -1.720 3.102 1.551 0.928 0.997 1.036 0.454 0.728 0.965 +1 1.744 0.759 0.988 0.542 0.325 1.105 0.264 -0.896 2.173 0.604 -0.602 0.459 2.215 0.331 0.498 1.736 0.000 0.399 -0.958 1.693 0.000 0.383 0.671 0.993 0.938 1.257 1.066 0.820 +1 1.543 -0.299 -1.358 0.792 -1.229 0.767 0.036 0.436 2.173 0.376 -1.499 0.181 0.000 0.765 -0.647 -1.600 0.000 0.770 -0.820 1.140 1.551 0.894 0.964 0.985 0.701 0.648 0.846 0.727 +1 0.796 1.813 1.170 0.609 -1.121 0.751 0.367 0.223 2.173 0.864 0.060 -0.996 2.215 0.957 1.161 0.446 0.000 0.867 0.036 -1.509 0.000 0.696 1.039 0.984 1.289 1.071 1.111 0.906 +1 1.018 0.391 0.618 1.363 -1.491 0.514 0.738 -1.185 0.000 0.371 -0.433 0.697 2.215 0.709 0.729 -0.125 0.000 0.986 -0.993 0.347 1.551 0.887 1.062 1.544 0.839 0.269 0.718 0.728 +1 0.745 0.432 0.523 1.164 1.223 1.871 1.400 -0.503 2.173 1.198 -1.655 1.100 0.000 1.353 0.944 -1.732 2.548 0.953 1.253 -1.196 0.000 0.639 0.733 0.989 0.986 1.812 1.466 1.101 +0 0.872 -0.695 -0.014 0.701 -0.579 0.858 -0.860 0.840 2.173 0.808 -1.560 -1.532 2.215 0.499 -2.007 0.941 0.000 0.864 -1.355 -0.890 0.000 0.752 0.884 0.984 1.123 1.132 0.954 0.838 +0 0.587 -0.621 1.007 1.651 -1.306 0.647 -0.756 -0.798 2.173 0.847 -0.109 0.867 2.215 0.676 1.191 0.901 0.000 0.537 -1.385 0.195 0.000 1.447 0.940 1.189 0.790 1.145 0.852 0.832 +0 1.119 -0.452 0.458 0.750 0.374 1.002 1.001 -1.561 0.000 0.568 1.142 1.091 0.000 1.056 1.120 -0.224 2.548 1.486 -0.703 -0.972 3.102 1.100 1.139 0.996 0.938 1.324 1.103 1.291 +1 0.677 0.195 -0.300 0.639 -1.232 1.061 -0.791 -1.246 2.173 0.289 2.112 0.299 0.000 0.797 -0.333 0.215 0.000 0.717 0.504 1.475 3.102 1.209 0.808 0.989 1.281 0.915 0.997 0.894 +0 0.760 -1.909 1.008 0.428 -0.981 0.894 -1.311 -0.647 1.087 0.558 -0.924 0.227 0.000 0.391 1.635 1.441 0.000 0.771 -0.935 1.593 3.102 0.782 0.972 0.995 0.534 0.797 0.644 0.603 +0 0.858 -1.415 0.799 2.026 0.184 1.542 1.646 -1.669 0.000 1.251 -0.466 -1.330 0.000 1.713 -0.543 0.263 1.274 1.866 -0.846 -0.411 3.102 0.813 0.938 0.988 0.861 0.832 0.714 0.691 +0 0.778 -0.136 -0.224 1.067 1.272 1.272 -2.125 -0.789 0.000 0.886 -1.116 1.299 2.215 2.398 -0.133 0.515 2.548 2.225 -0.488 -1.363 0.000 1.276 1.015 1.231 0.922 1.292 1.022 0.877 +1 1.520 -0.901 0.514 0.532 0.964 0.621 0.825 -1.570 2.173 0.765 -0.077 -0.729 0.000 0.399 0.605 -0.403 2.548 0.478 0.868 1.294 0.000 0.894 0.771 0.982 0.885 0.541 1.037 0.895 +1 0.404 -1.089 -1.274 2.084 -1.561 0.633 1.273 0.145 2.173 0.236 1.676 -0.411 0.000 0.711 0.169 0.882 0.000 0.425 -0.052 0.093 0.000 0.786 0.663 0.993 0.795 0.881 0.938 0.781 +1 1.612 0.476 0.066 0.285 -0.744 1.120 0.629 -1.673 1.087 0.613 0.540 -0.420 0.000 0.472 -0.093 1.651 0.000 1.740 0.308 1.062 3.102 0.924 1.119 0.989 1.266 0.945 1.025 0.993 +1 0.971 -0.658 0.081 0.805 0.734 0.982 -0.115 -1.250 2.173 0.502 0.160 -0.085 0.000 0.698 0.181 1.703 2.548 0.549 0.264 0.882 0.000 0.526 0.868 0.995 0.906 0.503 0.912 0.761 +0 0.767 1.529 -0.039 1.189 1.076 0.691 0.383 1.544 1.087 0.531 1.572 -0.523 0.000 0.618 -0.237 -1.240 2.548 0.741 -0.035 -0.088 0.000 0.821 0.993 1.116 0.987 0.550 0.791 0.744 +1 0.301 -1.149 -0.120 1.764 -0.871 0.520 -0.664 -0.983 0.000 0.890 0.621 0.765 2.215 0.567 -2.163 1.034 0.000 1.878 -0.658 0.913 3.102 1.331 1.094 0.983 1.074 0.920 1.025 1.008 +1 0.968 1.215 -1.037 0.483 -0.207 0.535 0.467 -0.105 0.000 0.667 1.409 0.371 0.000 1.338 0.146 1.401 2.548 0.785 -0.353 -1.328 3.102 0.834 0.928 0.987 0.902 0.546 0.776 0.718 +1 0.704 0.048 0.349 0.604 1.299 1.511 -0.145 -0.623 2.173 1.211 -0.434 0.779 0.000 0.438 0.899 -1.229 2.548 0.745 0.012 -1.615 0.000 1.071 0.877 0.988 1.198 0.802 0.971 0.859 +0 0.571 1.326 -1.342 0.465 0.854 0.597 0.708 -0.960 2.173 0.638 -0.862 1.306 2.215 0.681 0.389 -0.545 0.000 0.960 1.538 0.368 0.000 0.949 0.807 0.987 0.769 1.143 0.867 0.736 +1 1.068 1.916 0.911 0.905 0.564 1.374 1.058 -0.888 2.173 0.598 1.145 0.753 0.000 0.368 2.600 -1.536 0.000 0.734 0.931 1.625 3.102 0.928 1.267 0.975 0.620 0.817 0.932 0.814 +1 0.662 -1.046 -1.445 1.390 0.908 1.083 -0.347 0.305 2.173 1.506 2.686 -1.067 0.000 0.593 -2.265 1.547 0.000 1.176 -0.733 1.075 3.102 0.838 0.812 1.133 1.063 0.828 0.908 0.814 +1 1.696 -0.640 -0.789 1.464 -0.545 1.428 -0.677 0.603 2.173 0.466 -1.002 0.837 2.215 0.739 -1.617 -0.171 0.000 1.564 0.515 1.614 0.000 0.759 1.119 0.972 0.974 0.327 1.105 0.940 +1 1.091 0.472 -1.066 1.289 0.290 0.958 0.628 -1.556 0.000 1.207 0.427 0.642 1.107 0.721 1.285 0.081 0.000 0.700 1.088 -0.864 3.102 1.602 0.930 1.544 1.043 0.891 0.854 0.834 +1 1.447 -0.413 0.012 1.033 -0.945 0.897 -0.645 1.072 2.173 0.460 -0.931 -1.489 1.107 0.659 0.289 -1.513 0.000 0.720 -1.495 0.195 0.000 1.225 1.012 1.285 0.817 0.714 0.826 0.755 +0 0.750 0.588 0.398 2.234 0.575 0.813 -0.607 -0.803 2.173 0.885 -0.225 -1.421 0.000 1.170 -0.294 1.515 2.548 0.855 -1.325 -0.045 0.000 1.357 0.967 0.981 0.975 1.070 1.003 0.945 +0 0.474 0.540 -1.253 2.915 1.324 0.748 1.480 -0.392 0.000 1.125 -1.506 0.191 0.000 0.655 -1.844 -0.506 0.000 1.413 0.702 0.918 3.102 0.836 1.020 1.190 0.735 1.014 1.138 1.346 +0 0.344 -1.296 0.276 1.257 0.597 0.599 -0.066 1.674 2.173 0.716 1.144 -0.651 0.000 0.923 -0.472 -0.786 2.548 0.545 1.227 1.618 0.000 0.734 0.898 0.989 0.857 0.768 0.721 0.752 +1 1.435 -0.897 -0.695 1.221 -0.819 0.964 -0.248 1.445 2.173 0.825 -0.264 -0.413 0.000 1.354 -1.044 0.783 0.000 1.175 -1.565 0.442 0.000 0.908 0.916 0.998 0.694 0.475 0.839 0.872 +0 0.718 -0.014 -0.392 0.919 1.614 1.231 0.054 0.169 2.173 0.872 -1.130 1.693 2.215 1.031 -1.357 0.860 0.000 1.845 -1.180 -1.227 0.000 1.449 0.956 1.094 1.020 1.791 1.254 1.054 +1 1.278 1.183 0.901 0.237 -1.123 0.718 1.142 -0.407 0.000 0.630 1.348 0.280 0.000 0.839 -0.035 -1.624 2.548 1.093 0.423 -1.288 0.000 0.845 0.930 0.990 0.738 0.516 0.759 0.686 +0 0.738 -0.700 1.165 1.122 0.557 0.754 0.905 -0.558 2.173 0.375 0.285 -0.819 0.000 1.073 0.607 -1.532 2.548 0.512 0.565 1.196 0.000 0.565 0.583 0.991 0.902 0.873 0.863 0.668 +1 1.674 -0.675 0.718 1.564 1.139 0.502 -1.122 -0.583 0.000 1.063 -0.242 -1.110 2.215 1.062 0.870 -0.442 2.548 0.597 0.069 1.691 0.000 0.918 1.021 0.979 1.356 0.965 1.251 1.019 +0 1.360 0.986 1.067 0.376 -0.677 0.888 0.437 -0.861 2.173 1.347 1.525 0.111 2.215 1.326 0.351 1.610 0.000 1.224 0.346 -1.459 0.000 0.840 1.042 0.990 0.918 1.564 1.044 0.882 +1 0.403 -1.843 -0.391 0.840 -0.591 0.956 -1.662 0.906 0.000 1.180 -0.595 0.111 2.215 1.346 -1.809 -1.191 0.000 0.921 -0.232 -1.697 3.102 0.989 0.896 0.984 0.743 0.953 0.862 0.834 +0 0.474 -1.836 1.716 1.584 -1.732 1.074 -0.878 0.533 0.000 1.450 -0.802 -0.841 0.000 1.335 -0.694 -0.047 0.000 1.548 -0.554 1.296 3.102 0.872 0.678 0.977 0.806 0.808 0.618 0.601 +0 0.659 0.090 -0.285 1.295 0.752 0.678 0.685 -0.808 0.000 1.042 -0.114 -1.042 0.000 0.819 0.020 -1.659 1.274 0.996 -2.012 0.909 0.000 0.831 0.745 1.030 0.595 0.904 0.853 0.802 +0 0.793 -0.007 -1.605 1.278 0.044 1.074 1.432 0.130 1.087 1.913 -0.763 -1.285 1.107 0.977 2.610 1.514 0.000 1.603 -0.688 0.575 0.000 0.896 1.519 1.389 1.313 3.511 1.741 1.370 +0 1.829 0.021 1.555 2.846 -1.551 1.839 -0.557 0.085 2.173 0.861 -0.075 -0.480 0.000 0.862 2.436 -1.290 0.000 1.185 0.134 0.247 0.000 0.817 0.991 1.065 2.470 1.503 1.609 1.315 +0 0.779 0.270 -1.020 0.773 1.725 0.300 0.556 -0.073 0.000 0.790 -1.039 0.528 2.215 0.784 -0.249 1.584 2.548 0.557 0.640 -1.228 0.000 0.542 0.850 0.990 0.722 0.762 0.879 0.712 +1 2.224 0.801 0.972 0.917 -1.486 0.880 1.219 -0.242 2.173 1.085 0.151 -0.987 2.215 0.812 1.041 1.365 0.000 0.584 1.689 -0.342 0.000 0.835 1.016 1.584 1.385 1.208 1.140 0.931 +1 1.003 -1.132 1.153 1.325 1.322 0.727 -0.783 -0.284 1.087 0.391 -1.431 -0.199 2.215 0.575 -1.816 -1.077 0.000 0.704 -0.603 0.220 0.000 0.803 0.692 0.989 0.787 0.279 0.854 0.712 +1 1.340 -0.225 0.253 1.498 0.771 1.259 -0.343 -1.201 1.087 0.430 -0.642 -0.471 0.000 0.762 -1.597 1.000 0.000 0.529 -0.042 1.167 3.102 0.990 1.231 0.980 0.535 0.741 0.973 0.903 +0 0.449 1.190 -1.245 0.534 1.027 1.551 0.334 -0.557 2.173 1.287 -0.401 1.067 0.000 1.277 0.451 0.914 0.000 0.612 0.050 -0.974 3.102 0.962 0.877 0.988 1.063 0.405 1.162 0.933 +1 0.504 -1.356 0.964 1.089 0.401 0.551 0.889 1.357 1.087 1.251 0.266 -1.105 2.215 0.373 -1.322 -1.625 0.000 0.598 -1.334 0.420 0.000 0.504 1.002 0.985 2.223 1.047 1.860 1.337 +1 0.371 -0.753 1.642 1.145 0.949 0.849 0.913 -1.129 2.173 0.929 1.244 -0.539 0.000 1.283 0.677 0.577 2.548 1.078 0.498 1.395 0.000 0.857 0.850 0.998 1.007 1.305 0.821 0.753 +1 0.718 1.295 -0.166 0.548 -1.369 0.538 1.438 -1.523 0.000 0.674 -0.940 0.146 2.215 0.599 1.589 0.875 0.000 0.651 0.556 1.096 3.102 0.860 0.587 0.988 0.921 0.708 0.881 0.760 +0 0.731 0.197 1.122 0.999 0.205 0.607 -0.271 -0.595 0.000 1.349 -0.302 1.693 2.215 1.114 -0.478 -0.052 2.548 0.611 0.506 -1.610 0.000 0.850 0.970 0.982 0.654 1.310 0.804 0.742 +1 0.469 -1.268 1.090 1.136 0.012 0.974 -0.539 0.820 2.173 1.046 -0.271 -1.049 0.000 1.117 0.659 -1.378 2.548 0.452 2.279 -0.329 0.000 1.999 1.220 0.987 0.753 1.481 1.238 1.079 +0 0.592 0.410 -1.529 1.128 -0.815 1.011 1.054 -0.189 2.173 1.502 0.514 1.044 2.215 0.904 0.431 0.365 0.000 1.622 -1.923 -1.509 0.000 2.846 2.541 0.981 0.899 1.697 2.076 1.785 +1 0.996 -0.710 0.155 0.892 1.351 1.438 -0.756 1.580 2.173 0.879 -1.360 -0.192 0.000 0.414 -1.924 0.316 0.000 1.450 -0.301 -0.287 3.102 0.530 0.663 1.150 1.045 1.549 1.035 0.886 +1 0.753 0.827 0.749 0.402 -0.597 0.902 0.002 -0.110 0.000 1.199 0.328 -1.603 0.000 1.122 0.099 0.516 2.548 0.551 0.112 -0.527 3.102 2.185 1.166 0.984 0.815 0.486 0.825 0.823 +1 1.018 1.080 0.393 0.716 0.980 0.873 0.149 0.910 0.000 0.767 0.873 -1.196 0.000 1.795 0.995 -0.645 1.274 0.554 -0.194 -1.293 3.102 1.149 0.786 0.984 1.095 0.687 0.833 0.852 +1 2.620 -0.787 -0.453 0.539 -0.867 0.926 -0.026 0.834 2.173 0.729 -2.047 -1.186 0.000 1.674 0.568 1.236 0.000 0.706 -0.617 -1.040 0.000 0.803 0.884 0.978 0.556 0.771 0.902 0.765 +1 2.149 0.844 0.295 0.389 -1.740 0.879 -0.103 1.675 0.000 1.261 0.705 -0.822 1.107 0.500 1.281 -1.586 0.000 0.484 -0.626 -1.514 3.102 1.017 1.169 1.224 0.862 0.703 0.815 0.859 +0 0.731 -0.363 -0.142 1.813 0.676 0.459 1.213 -0.532 0.000 0.513 2.070 0.477 0.000 1.956 -0.543 -1.459 2.548 1.015 0.169 -1.603 3.102 0.945 0.958 1.073 1.308 0.464 1.143 1.135 +0 0.417 -1.590 0.521 1.357 -1.110 0.624 0.461 -0.769 0.000 0.784 -0.448 1.557 2.215 0.966 0.288 0.701 0.000 0.573 -0.059 0.135 3.102 1.358 1.108 1.037 0.737 0.592 0.669 0.857 +0 2.342 1.059 1.722 0.424 1.623 0.906 0.886 -0.445 2.173 1.128 1.518 0.277 0.000 0.500 1.241 0.674 2.548 0.373 1.601 -0.385 0.000 0.830 0.948 0.981 0.700 0.736 0.877 0.824 +1 1.601 0.998 1.435 1.008 -1.328 0.584 1.886 -0.453 0.000 0.524 -0.216 1.558 2.215 0.802 1.009 -0.224 2.548 0.752 1.143 -1.417 0.000 0.819 1.077 1.068 0.902 0.847 0.705 0.735 +0 0.392 1.378 1.567 0.171 1.657 0.864 0.888 1.480 2.173 1.201 0.949 -0.301 2.215 1.277 0.403 0.507 0.000 0.506 -0.554 -0.706 0.000 0.942 0.945 0.984 0.780 1.499 0.917 0.803 +1 1.283 -0.376 1.243 1.343 0.732 0.810 -0.356 -1.151 2.173 0.695 2.033 -0.577 0.000 1.039 -0.166 0.395 0.000 1.247 0.797 -0.559 3.102 0.909 0.994 0.987 1.189 0.927 1.047 0.876 +1 1.168 -0.405 1.234 0.981 1.270 0.961 1.449 -0.624 0.000 0.817 0.830 1.583 2.215 2.374 -0.999 -0.755 2.548 0.489 -0.386 0.520 0.000 1.253 0.985 0.975 1.400 2.112 1.587 1.482 +1 0.805 -1.106 -0.142 0.461 -0.063 0.550 -2.857 -0.241 0.000 0.332 2.482 0.587 0.000 0.979 -0.514 0.890 2.548 2.005 -0.310 -1.515 3.102 0.934 1.098 0.985 0.734 0.891 0.875 0.854 +0 0.668 -0.759 -0.651 1.006 0.633 1.151 -0.233 -0.701 2.173 1.397 0.741 1.411 1.107 0.528 0.042 1.402 0.000 0.648 0.211 -0.363 0.000 1.107 1.103 1.040 1.295 2.012 1.299 1.076 +0 0.569 -1.719 -0.297 0.940 -0.971 1.159 1.841 1.116 0.000 1.190 -0.052 1.528 2.215 1.437 -0.257 0.073 0.000 1.379 -2.480 -1.250 0.000 0.928 0.733 0.995 1.050 1.417 0.936 0.828 +1 0.581 0.390 -1.349 0.917 0.055 0.513 -0.528 1.547 0.000 0.965 -1.286 0.096 2.215 0.772 0.850 1.657 0.000 0.778 -1.116 -1.395 3.102 0.913 0.807 0.986 0.931 0.762 0.883 0.791 +1 1.238 -0.305 1.241 0.539 -0.440 0.839 -2.620 -1.243 0.000 0.510 0.901 -0.070 0.000 1.298 -0.612 -1.525 2.548 2.198 -0.038 0.129 0.000 0.740 0.843 1.129 0.779 0.923 0.874 0.774 +0 2.246 0.114 -0.374 0.575 -0.633 1.481 0.635 -1.590 0.000 1.319 0.993 0.432 2.215 1.371 -0.638 0.147 2.548 2.085 -0.381 1.557 0.000 0.611 1.000 0.994 1.354 1.435 1.087 1.045 +0 0.385 -1.293 1.299 1.246 1.243 0.509 0.022 -0.274 0.000 0.381 1.422 -1.048 0.000 0.585 -0.046 0.206 2.548 0.750 -0.573 0.851 3.102 0.948 0.838 0.972 0.656 0.323 0.530 0.608 +1 0.759 0.450 -0.263 1.068 0.767 0.356 0.536 -0.498 2.173 0.711 -1.272 1.471 2.215 0.937 -1.563 -0.940 0.000 0.833 1.007 1.413 0.000 0.697 0.894 0.998 1.079 1.071 0.793 0.824 +0 3.409 -0.390 0.291 1.448 0.591 1.895 -0.905 -1.437 1.087 0.379 0.392 -0.469 2.215 0.766 1.237 -1.177 0.000 0.658 -0.812 -0.843 0.000 0.757 1.027 0.986 0.884 1.300 1.594 1.191 +1 1.703 -0.370 -1.020 0.819 -1.061 0.637 0.590 0.093 2.173 1.171 0.081 0.789 0.000 0.760 0.355 1.075 2.548 0.829 0.163 -1.733 0.000 0.982 0.929 0.976 0.900 0.677 0.805 0.803 +0 6.067 -0.106 0.526 1.353 -0.793 2.974 -0.253 -0.876 2.173 1.201 -1.684 1.612 0.000 1.581 -0.873 -1.261 0.000 0.753 -0.254 0.927 3.102 1.390 1.065 3.682 3.566 1.582 2.186 2.097 +1 0.643 -1.963 0.790 0.862 -1.660 0.762 0.236 -0.785 2.173 0.378 -1.384 1.607 0.000 1.146 -0.073 0.562 2.548 0.462 -2.013 0.229 0.000 0.587 0.840 0.984 1.962 1.107 1.468 1.101 +1 0.625 0.939 0.327 1.252 -0.750 1.544 -0.923 -1.731 0.000 0.737 -0.239 -0.549 0.000 1.742 -0.626 0.659 2.548 1.876 -0.851 0.109 3.102 0.936 0.986 1.011 1.279 0.697 1.086 0.897 +1 2.660 0.241 -1.365 0.623 0.512 1.043 0.742 1.058 0.000 0.918 -1.016 0.120 1.107 1.930 -1.525 -0.380 0.000 0.949 -0.525 1.488 3.102 0.628 0.722 1.770 1.516 0.814 1.004 0.991 +1 0.686 1.039 1.628 1.186 0.767 0.679 0.481 -0.771 0.000 0.907 -0.063 -0.320 2.215 1.308 -0.233 -1.415 2.548 0.545 0.385 1.043 0.000 0.927 0.818 0.987 0.963 0.972 0.827 0.732 +1 1.086 0.966 -0.067 0.992 1.197 0.911 0.217 -0.697 2.173 0.652 1.786 -1.730 0.000 1.170 0.714 1.601 0.000 1.331 0.414 0.357 3.102 0.763 1.008 1.307 1.124 0.962 0.941 0.864 +0 0.759 1.417 1.312 1.406 -1.574 0.833 1.324 -0.237 2.173 0.407 2.524 -1.313 0.000 0.628 1.438 0.412 2.548 0.730 1.594 0.091 0.000 0.958 0.885 0.991 0.771 0.509 0.818 0.704 +0 1.809 -0.096 1.147 0.382 -1.229 0.685 1.531 0.362 0.000 0.666 0.931 -1.224 2.215 0.767 0.164 -0.362 0.000 0.749 -0.776 -0.852 3.102 1.245 1.039 0.986 0.737 0.727 0.822 0.858 +1 0.965 -1.023 1.664 0.447 1.214 1.124 -0.504 0.021 2.173 0.830 -0.544 1.350 0.000 0.658 -1.076 -0.990 2.548 0.674 -1.400 -0.704 0.000 1.099 0.748 1.000 1.144 0.923 0.817 0.761 +1 0.513 0.943 1.607 1.385 -1.648 0.556 1.589 0.566 2.173 0.570 -0.014 -1.454 0.000 1.605 0.060 -0.138 0.000 0.603 0.449 0.257 1.551 1.362 0.801 0.977 0.908 0.381 0.764 1.002 +0 1.125 0.391 -1.106 0.286 -0.160 0.783 0.796 0.550 2.173 0.691 0.117 0.648 2.215 0.422 1.726 -0.894 0.000 0.781 0.946 1.600 0.000 0.812 0.925 0.980 0.795 0.389 0.728 0.679 +1 0.836 -0.253 -0.008 1.122 -1.322 0.537 -1.391 -0.035 0.000 1.045 0.046 1.432 1.107 0.756 1.400 0.595 0.000 0.586 0.869 -0.921 3.102 0.763 0.841 1.242 0.935 0.705 0.785 0.726 +1 0.305 0.267 -0.111 0.678 0.813 1.114 -1.429 -1.728 0.000 1.401 -0.736 -0.603 0.000 0.515 -1.662 1.131 0.000 1.051 -0.828 0.720 1.551 0.773 0.918 0.997 0.541 0.378 0.636 0.604 +1 0.537 -0.973 -0.549 1.827 1.549 0.965 -1.384 0.461 0.000 1.491 -1.086 -1.328 2.215 1.065 1.502 -0.239 0.000 0.952 -0.937 1.169 3.102 0.484 1.362 1.302 0.683 0.836 0.840 0.799 +0 0.467 0.585 -1.522 2.223 1.341 2.053 0.858 1.129 0.000 3.993 -0.465 -0.346 0.000 1.053 -1.054 -1.441 2.548 1.037 1.484 -1.062 0.000 2.304 1.400 0.997 1.770 0.919 1.417 1.254 +0 0.886 1.185 1.065 1.818 0.495 1.086 -0.379 -0.803 2.173 0.409 0.327 1.451 0.000 0.775 0.556 0.115 1.274 1.163 1.320 -1.626 0.000 1.003 0.765 0.984 2.125 1.029 1.342 1.083 +1 1.226 -0.925 -1.634 1.048 0.239 0.872 -0.574 0.053 2.173 0.849 -0.658 -1.257 0.000 1.116 -0.064 0.806 2.548 0.816 -1.385 1.576 0.000 0.839 0.920 1.559 0.994 0.833 0.823 0.731 +0 0.645 -0.635 0.250 1.541 -0.836 0.607 -1.067 1.393 0.000 0.727 -0.678 0.672 1.107 0.715 -0.139 -0.664 2.548 0.412 0.189 -1.465 0.000 0.942 0.844 1.146 0.882 0.746 0.629 0.682 +1 1.227 0.305 -0.007 0.053 1.055 1.924 -1.222 1.077 0.000 2.580 1.001 -0.383 2.215 1.558 1.097 -1.411 0.000 1.262 0.261 -1.116 3.102 0.558 0.593 0.937 1.211 1.163 0.913 0.918 +0 3.000 0.595 -1.143 0.286 0.726 0.985 -1.055 0.158 0.000 0.528 -0.814 1.258 2.215 0.335 2.681 -0.565 0.000 0.695 0.369 -1.694 0.000 0.966 1.209 1.274 1.492 0.440 1.057 1.042 +1 0.825 0.634 0.124 0.545 -0.789 1.098 -0.240 -1.390 1.087 1.113 0.281 1.102 0.000 1.077 0.234 0.530 1.274 1.329 0.087 -0.431 0.000 1.563 0.994 0.980 0.982 1.380 0.974 0.839 +0 2.047 -0.013 -0.207 0.522 -0.897 0.932 -1.085 1.225 1.087 1.249 -0.972 1.545 0.000 0.986 -0.543 -1.440 2.548 1.034 0.003 0.069 0.000 1.380 1.118 0.983 1.512 0.858 1.056 1.018 +0 2.845 -1.917 -0.008 0.431 -0.350 1.209 -0.040 1.537 2.173 0.425 -0.941 -1.648 2.215 0.609 -0.593 -0.113 0.000 0.740 0.327 -1.468 0.000 0.805 0.940 0.976 0.968 0.580 1.455 1.154 +1 1.950 0.475 0.793 0.076 -1.070 0.575 -2.178 -1.684 0.000 0.962 -0.113 -0.747 1.107 0.336 1.134 -0.055 0.000 0.874 -0.009 0.266 3.102 0.795 0.989 0.987 1.119 0.657 0.843 1.076 +0 1.075 -0.872 1.082 0.660 0.189 1.434 -1.452 -1.460 2.173 1.472 -0.490 0.051 2.215 0.457 -0.861 -0.822 0.000 0.888 -1.148 1.308 0.000 0.678 0.926 0.986 1.198 2.348 1.226 0.930 +1 0.333 1.779 0.070 1.431 -0.077 1.082 -0.065 -0.269 0.000 2.209 0.935 -1.606 0.000 2.272 0.104 0.976 2.548 1.346 0.864 0.484 3.102 1.210 1.360 0.975 1.129 0.859 1.166 1.101 +1 0.455 -1.187 0.661 2.285 0.138 1.165 -0.943 1.411 0.000 2.011 0.426 -0.801 2.215 0.727 0.666 1.662 2.548 0.908 0.857 0.642 0.000 2.082 1.331 0.987 1.551 1.041 1.370 1.281 +0 1.336 0.635 -0.449 0.724 0.378 1.004 -0.661 1.712 0.000 0.408 0.208 -1.299 0.000 0.925 -0.108 0.537 2.548 0.685 -0.817 -1.265 3.102 0.836 0.990 0.987 0.784 0.665 0.616 0.762 +0 0.699 -0.868 0.562 1.028 1.283 0.716 -0.144 -0.818 0.000 0.894 -0.264 1.654 2.215 1.019 0.554 -0.310 1.274 0.612 0.166 0.478 0.000 0.946 0.910 0.986 1.364 1.095 1.016 0.912 +0 0.899 -1.004 -0.112 0.621 0.868 0.861 -1.034 -1.707 1.087 0.942 -0.486 0.195 2.215 0.580 0.883 -0.144 0.000 1.289 -0.169 -1.310 0.000 1.017 0.975 0.989 0.973 1.362 0.922 0.891 +0 1.803 -0.892 0.053 0.736 -0.340 1.116 -1.420 -0.361 0.000 2.177 -0.331 1.409 2.215 1.370 -1.347 -1.695 2.548 0.415 -1.849 -1.699 0.000 1.058 1.147 0.986 1.804 1.264 1.314 1.219 +1 0.518 -0.616 0.407 0.686 -1.274 1.295 0.813 0.575 0.000 1.403 -1.609 1.461 0.000 3.306 -1.150 -0.372 1.274 2.111 -0.349 -1.233 1.551 0.579 1.071 0.986 1.336 1.664 1.263 1.105 +0 0.530 -0.330 -0.209 0.772 -1.498 0.330 -0.054 -1.228 0.000 1.242 0.192 -0.376 2.215 1.449 -0.409 1.246 1.274 0.428 2.266 1.434 0.000 1.145 1.146 0.986 0.862 1.493 1.021 0.836 +0 0.973 -1.433 0.925 0.681 -0.881 0.930 0.844 -1.415 2.173 0.756 0.122 -0.451 0.000 0.936 0.524 0.684 2.548 0.452 -0.253 1.380 0.000 0.927 1.026 1.126 1.097 1.115 1.199 1.077 +0 1.442 0.728 0.963 0.074 -1.734 0.840 0.090 -1.242 2.173 0.534 0.448 -0.247 0.000 1.250 0.016 0.201 2.548 0.647 -2.187 -0.948 0.000 0.853 0.904 0.984 0.760 1.231 0.826 0.708 +1 0.951 2.086 0.781 0.506 -0.813 1.241 0.934 -0.073 2.173 0.898 -2.554 1.363 0.000 0.746 0.347 -0.784 0.000 1.348 1.558 -1.684 0.000 1.207 0.766 0.988 1.017 0.945 0.841 0.782 +0 0.407 -0.196 -0.381 1.596 -1.649 0.908 1.446 0.231 0.000 0.731 -1.539 1.512 0.000 0.495 -0.789 0.203 0.000 0.677 2.424 -0.132 0.000 0.917 0.852 1.015 0.896 0.597 0.907 0.792 +0 1.620 -0.612 -0.866 0.129 1.166 0.573 -0.460 0.499 2.173 0.801 0.472 -1.462 0.000 0.378 0.110 -0.326 0.000 1.540 0.642 1.048 3.102 0.737 0.903 0.992 1.208 0.806 0.892 0.789 +0 1.027 2.419 -0.386 2.278 0.987 0.735 1.310 0.744 1.087 0.569 1.418 1.345 0.000 1.273 0.556 1.563 2.548 0.471 -0.377 0.464 0.000 0.889 0.719 2.004 1.749 0.921 1.221 1.040 +0 0.630 -0.699 0.366 0.684 -0.442 0.802 0.057 0.522 0.000 1.350 -1.436 -1.481 2.215 0.529 -0.602 1.606 2.548 0.548 -0.404 -1.031 0.000 1.036 0.752 0.994 1.357 0.491 0.885 0.848 +0 0.287 -0.084 0.945 1.606 1.096 2.767 -1.438 1.397 2.173 3.320 -0.742 -0.536 0.000 1.811 -0.781 -0.066 2.548 1.377 -1.342 -0.521 0.000 1.218 1.073 0.984 0.939 2.829 2.267 1.790 +0 1.830 -1.026 -1.290 0.601 -1.187 1.151 -0.762 0.224 2.173 1.188 -0.612 1.176 2.215 0.404 -0.274 -0.567 0.000 0.408 -1.537 0.183 0.000 0.474 0.729 0.980 1.413 1.307 1.116 0.841 +1 1.284 -0.304 -1.009 1.383 1.674 1.039 -1.248 -0.509 0.000 0.718 0.078 -0.588 0.000 1.188 -0.231 0.254 2.548 2.875 -0.040 1.029 3.102 0.824 1.069 1.222 1.094 0.920 0.953 0.835 +0 1.873 -0.405 -0.734 5.037 -0.783 2.091 -1.926 0.908 0.000 0.794 1.952 0.919 0.000 1.490 0.587 1.362 2.548 1.089 0.420 -1.185 3.102 0.896 0.935 1.017 0.781 0.732 1.226 1.551 +1 0.941 0.719 0.277 2.166 0.508 1.637 -1.743 -1.687 0.000 1.249 -0.130 -0.279 0.000 0.685 -0.511 1.234 2.548 1.235 0.005 -1.189 3.102 0.991 0.926 0.996 1.069 0.609 0.765 0.767 +0 2.112 0.773 -1.230 0.451 0.127 0.948 -0.661 0.476 1.087 0.323 0.727 1.659 0.000 0.480 -0.260 1.415 2.548 0.562 -1.530 -0.449 0.000 1.072 0.998 1.272 0.795 0.650 1.017 0.884 +1 1.164 1.204 -0.343 1.028 0.109 1.750 1.018 -0.532 0.000 1.141 0.499 1.572 0.000 1.365 0.763 1.015 2.548 0.898 1.016 1.642 0.000 0.482 0.642 0.983 0.975 0.352 0.624 0.717 +0 0.623 0.531 0.124 1.550 1.079 0.898 0.180 1.689 1.087 1.272 -0.578 -0.446 2.215 0.696 -0.136 0.290 0.000 0.711 0.746 -1.622 0.000 0.877 0.958 1.032 0.908 1.602 1.069 0.859 +1 0.322 1.112 -0.512 1.270 0.863 0.491 -1.520 -0.488 0.000 1.224 -0.968 -1.178 0.000 1.152 -0.406 1.026 1.274 1.006 0.803 0.041 3.102 1.058 1.245 0.984 0.696 0.896 1.036 1.532 +0 0.824 0.028 1.210 1.445 0.720 1.552 1.340 -0.965 2.173 2.269 0.800 0.531 1.107 1.594 0.769 -1.359 0.000 0.810 -1.336 -1.075 0.000 0.816 0.873 0.999 0.809 2.792 1.487 1.222 +1 0.612 1.032 -0.800 0.748 1.451 0.852 0.642 0.980 0.000 1.496 0.254 -0.794 2.215 1.131 0.540 0.007 2.548 0.859 1.372 0.467 0.000 0.897 0.874 0.989 0.830 0.942 0.977 0.831 +1 0.727 -0.875 -1.518 0.727 -0.390 0.900 0.075 0.537 2.173 0.674 0.996 -0.795 1.107 0.408 0.889 1.239 0.000 0.462 -0.494 -0.011 0.000 0.600 0.620 0.990 0.945 1.208 0.813 0.645 +1 1.042 -1.986 -1.397 0.952 -0.878 0.899 -1.170 0.376 2.173 0.501 -0.424 0.671 0.000 0.738 2.352 -1.695 0.000 0.811 -1.434 -0.021 0.000 0.854 0.807 0.976 0.792 0.641 0.816 0.698 +0 0.999 1.453 -1.555 0.359 0.825 0.825 1.195 0.438 2.173 1.129 0.549 1.244 2.215 1.372 1.919 -0.614 0.000 1.122 0.726 -0.320 0.000 0.970 1.099 0.991 0.713 1.050 1.035 0.885 +0 3.618 -0.465 0.565 4.835 0.336 4.140 0.036 -1.378 0.000 0.352 0.531 1.739 0.000 1.077 -0.133 -0.872 2.548 0.950 -0.229 -0.157 3.102 1.066 1.048 1.185 0.881 0.467 1.096 2.152 +0 0.332 1.310 -1.286 1.285 -0.030 2.277 0.658 -0.628 2.173 2.259 1.037 1.217 2.215 0.781 1.856 1.392 0.000 0.820 0.134 1.215 0.000 0.929 0.859 0.988 1.746 3.391 1.844 1.422 +0 0.571 1.152 0.449 0.791 1.593 0.531 0.073 0.775 2.173 0.722 1.570 -1.247 0.000 1.517 0.635 -0.640 2.548 0.605 -1.621 0.705 0.000 2.624 1.605 0.989 0.881 1.127 1.140 0.934 +0 0.478 1.680 1.198 2.412 1.349 1.775 -1.052 -0.650 2.173 1.017 0.053 0.900 2.215 1.296 -0.732 -0.072 0.000 0.446 -1.087 -1.197 0.000 0.743 0.988 0.990 6.583 2.268 4.134 3.190 +0 1.328 0.701 0.777 0.836 -0.270 1.105 0.059 -0.799 2.173 0.241 -1.497 0.845 0.000 0.315 -0.604 1.663 0.000 0.841 0.625 1.336 3.102 0.365 0.912 1.181 0.691 1.022 0.844 0.787 +1 0.895 0.999 0.482 0.778 -1.540 1.073 0.445 0.691 0.000 1.331 0.729 -1.598 1.107 0.720 0.881 0.049 0.000 1.744 0.309 -0.704 1.551 0.960 1.157 1.120 0.858 1.026 1.018 0.865 +1 1.148 -0.766 1.489 1.176 0.647 1.364 0.217 -0.663 2.173 0.945 -0.117 1.154 0.000 0.761 0.970 0.377 2.548 0.737 -0.528 -1.308 0.000 0.912 0.914 1.107 1.598 1.163 1.165 0.989 +0 0.664 -0.190 -0.437 1.647 -1.126 0.681 0.062 0.669 0.000 1.138 0.301 -0.483 0.000 2.077 -0.143 1.180 2.548 0.495 0.566 0.213 3.102 1.629 0.875 0.986 1.287 0.680 0.864 0.886 +0 1.102 1.059 0.257 0.428 1.388 0.595 0.527 -0.508 1.087 0.577 0.528 0.054 0.000 1.268 0.743 1.722 2.548 0.635 1.360 -1.128 0.000 0.824 0.829 0.991 0.742 0.991 0.700 0.619 +1 0.517 -0.862 0.411 0.951 0.500 1.804 -0.179 -0.641 0.000 1.324 1.172 1.297 2.215 1.105 0.230 0.732 0.000 2.076 0.800 -1.547 3.102 0.968 1.281 0.990 1.046 0.839 1.003 0.904 +1 0.881 -0.557 0.838 2.421 1.349 3.062 -0.162 -0.253 0.000 1.425 -0.508 1.668 0.000 1.334 -0.248 -1.198 2.548 1.484 0.373 1.282 3.102 4.452 2.613 0.996 0.627 0.935 1.712 1.557 +1 1.176 -0.152 0.820 0.838 -0.125 0.646 1.004 0.932 2.173 0.892 -1.545 -0.046 0.000 1.731 1.129 -0.870 0.000 1.762 0.580 1.607 3.102 1.003 1.005 1.034 0.866 0.669 0.794 0.898 +1 0.917 0.023 -1.116 1.062 1.622 0.773 -0.634 1.183 0.000 1.307 0.055 0.939 0.000 1.712 0.154 -0.453 0.000 1.092 -0.298 -0.752 3.102 0.926 0.980 0.991 0.845 0.492 0.680 0.707 +1 0.285 1.180 -1.395 0.874 -0.100 1.047 -2.411 -1.456 0.000 0.906 -0.403 -1.228 0.000 1.665 -1.129 0.549 1.274 1.266 -0.444 1.465 0.000 0.918 0.992 0.995 0.943 0.597 0.856 0.766 +0 0.615 0.321 -1.440 0.940 0.117 0.659 -1.756 -0.873 0.000 0.574 -1.221 0.205 2.215 0.429 -0.208 1.479 0.000 1.329 1.066 1.215 3.102 1.152 0.881 1.039 0.785 1.434 1.158 0.983 +0 0.849 0.025 0.907 0.311 -0.883 0.814 0.406 0.112 0.000 0.390 -2.292 0.965 0.000 1.352 0.548 -1.257 2.548 0.413 1.892 -0.914 0.000 0.885 0.996 0.992 0.926 1.063 0.845 0.751 +0 0.294 -0.629 0.163 2.151 1.221 1.490 -0.007 -0.838 1.087 0.850 0.225 0.611 1.107 0.867 -0.469 -0.329 0.000 0.511 0.436 -1.707 0.000 0.801 0.809 0.982 1.064 1.610 1.363 1.054 +1 0.620 0.702 0.930 0.622 -0.923 1.023 0.645 -1.191 2.173 0.534 1.050 0.506 0.000 1.407 -0.002 0.496 0.000 1.170 0.452 -0.045 0.000 0.769 0.991 0.985 0.839 0.797 0.928 0.835 +0 0.548 -0.322 0.010 1.802 -1.035 0.598 1.508 0.557 1.087 0.970 0.880 1.381 2.215 0.521 -0.267 -0.805 0.000 0.396 2.116 -1.478 0.000 1.013 0.936 1.113 1.221 0.835 1.103 0.923 +0 2.205 -0.455 -0.427 0.647 -0.381 0.949 -1.460 -1.681 2.173 1.238 -1.046 1.401 0.000 1.317 -1.515 1.001 0.000 1.864 -0.953 -0.049 3.102 0.905 0.914 0.991 0.661 1.420 1.017 1.096 +0 0.721 0.826 -1.392 1.266 1.213 1.075 1.297 0.154 0.000 0.837 -0.933 -1.317 0.000 1.256 -1.335 -0.773 1.274 1.878 -0.874 -0.271 3.102 0.722 2.096 0.990 1.424 0.569 1.793 1.475 +1 0.721 0.562 -0.322 1.328 -1.392 0.858 0.966 0.977 2.173 0.994 1.559 -0.548 0.000 1.513 -0.135 0.304 2.548 0.837 -0.252 1.456 0.000 0.800 0.881 1.113 1.063 1.166 0.946 0.776 +1 0.790 -0.268 -1.410 0.680 -0.373 0.989 -0.245 0.137 0.000 1.361 0.623 -1.661 2.215 0.775 0.082 0.790 2.548 0.575 0.437 -0.362 0.000 0.677 0.640 0.981 1.120 0.927 0.903 0.852 +0 0.618 -1.474 0.951 0.628 -0.559 0.960 -1.308 1.498 2.173 0.710 0.138 1.406 0.000 1.552 0.321 -0.597 2.548 1.267 -0.188 -0.431 0.000 0.879 0.803 0.990 0.913 2.030 1.269 1.085 +1 0.792 0.723 -0.290 0.843 0.910 0.680 1.042 1.454 0.000 1.246 1.393 -0.379 0.000 1.099 0.319 -1.216 1.274 0.720 0.775 0.322 3.102 1.983 1.134 1.000 0.800 0.698 0.839 0.744 +0 1.860 1.421 1.642 1.251 -0.864 0.983 -0.763 0.147 1.087 0.814 -0.613 0.684 0.000 0.306 0.007 1.657 0.000 0.616 -0.032 -1.125 3.102 0.639 0.739 1.635 0.941 0.811 1.467 1.209 +0 2.804 0.565 -1.336 1.919 -0.972 1.694 0.491 0.428 2.173 1.404 0.029 0.801 2.215 0.502 -0.445 -0.925 0.000 0.517 -0.515 -0.082 0.000 0.389 1.024 1.037 1.822 0.918 1.705 1.265 +0 2.910 1.083 1.245 1.379 1.296 2.551 -0.269 -0.662 0.000 0.688 1.028 0.558 2.215 0.565 -0.904 0.096 2.548 0.665 -1.428 0.953 0.000 2.519 1.474 0.982 0.847 0.848 1.272 1.889 +1 0.861 -0.359 0.845 0.846 0.155 0.820 -1.640 -0.177 0.000 1.087 -1.049 -1.540 0.000 1.652 -1.230 1.515 0.000 0.968 0.549 0.551 3.102 0.826 0.651 0.985 0.783 0.586 0.812 0.790 +0 0.956 -0.152 -1.150 0.555 -1.418 1.297 -1.710 -0.251 0.000 1.170 -0.117 0.858 0.000 1.392 0.198 1.643 2.548 0.711 -0.598 -1.183 1.551 1.068 0.921 0.990 0.882 0.563 1.169 0.985 +0 0.596 -1.391 1.520 1.163 -0.743 1.394 0.759 0.781 0.000 0.900 -0.589 -0.682 1.107 1.520 -1.228 0.126 2.548 0.853 0.264 1.446 0.000 1.012 1.611 1.029 0.872 0.953 1.396 1.300 +1 1.526 -0.977 0.157 0.228 0.633 1.495 0.267 -0.743 0.000 1.683 -1.606 1.627 0.000 0.707 -2.103 1.176 0.000 0.945 -1.581 0.180 0.000 0.866 0.844 0.988 1.050 0.639 1.056 0.977 +0 0.593 0.032 0.394 0.441 0.554 0.380 -0.361 1.396 0.000 0.892 0.491 -1.609 0.000 1.050 0.866 0.114 0.000 1.215 0.780 -1.108 3.102 0.958 0.669 0.991 0.555 0.175 0.495 0.555 +0 0.986 -0.946 -1.270 0.806 0.198 0.852 0.076 -0.508 2.173 0.750 0.074 -1.498 0.000 0.867 0.000 0.397 2.548 0.497 -1.956 0.656 0.000 1.426 1.057 1.198 0.956 0.780 0.856 0.786 +1 0.921 0.034 0.288 0.574 1.675 0.503 0.319 -0.303 0.000 0.805 -0.121 1.683 2.215 1.029 0.927 1.167 2.548 0.885 -0.516 -0.630 0.000 0.579 0.994 0.988 0.695 0.727 0.741 0.666 +1 0.611 1.623 -0.025 0.763 1.499 0.712 0.973 0.684 0.000 0.909 1.810 -1.112 0.000 1.402 0.022 -1.252 2.548 0.952 0.029 0.121 3.102 0.857 1.113 0.990 0.700 0.834 0.712 0.682 +0 1.207 -0.395 -1.344 0.876 -0.429 0.853 1.022 0.542 2.173 0.410 -1.478 1.344 0.000 0.531 -0.216 -0.400 2.548 0.454 -0.115 -1.573 0.000 0.503 1.235 1.047 0.558 0.848 0.869 0.784 +1 0.561 0.541 -0.475 0.494 0.499 1.439 0.775 0.797 0.000 1.218 0.362 -1.458 0.000 1.705 -0.470 -0.501 2.548 1.245 -0.982 -1.260 3.102 2.583 2.069 0.982 0.708 0.797 1.515 1.187 +1 1.959 0.669 1.468 0.814 0.414 0.773 0.094 -0.952 2.173 1.370 -0.065 0.060 2.215 0.785 -0.271 1.280 0.000 0.432 2.020 -1.011 0.000 1.300 1.092 1.422 1.336 1.204 1.091 0.966 +0 0.543 0.367 -1.562 0.658 1.292 1.034 0.882 0.056 2.173 1.147 -1.689 -1.376 0.000 0.511 -2.201 -1.110 0.000 0.450 -0.744 0.245 0.000 1.016 0.687 0.999 1.074 0.870 1.320 1.362 +0 1.132 0.770 -1.601 2.048 1.131 0.575 -0.119 -0.270 0.000 0.856 0.511 -0.576 2.215 1.056 -1.033 -0.761 0.000 1.242 0.393 0.474 3.102 0.955 1.007 1.326 1.208 0.755 0.866 1.028 +1 0.992 0.323 1.360 1.309 1.312 0.792 -0.236 -0.180 2.173 0.714 1.669 0.099 0.000 0.366 0.148 0.004 2.548 0.494 -0.639 -1.375 0.000 1.269 1.181 0.991 0.682 0.175 0.761 0.865 +0 1.263 0.591 1.721 0.541 -1.539 0.917 -0.626 0.873 2.173 0.972 -0.950 0.014 0.000 0.949 0.442 -0.122 0.000 1.157 -0.602 -1.547 3.102 0.886 0.838 0.975 1.029 0.894 0.808 0.780 +1 0.613 0.834 1.542 1.257 0.423 1.187 0.826 -1.292 0.000 1.176 0.306 0.291 1.107 0.461 0.964 -0.492 0.000 1.263 -0.080 1.586 3.102 0.891 0.908 1.029 0.735 1.037 0.941 0.848 +0 1.766 -0.540 1.634 0.886 1.251 0.822 0.443 -0.761 0.000 0.932 0.486 0.327 2.215 0.277 0.057 0.010 1.274 0.411 1.708 -0.231 0.000 0.890 0.952 0.994 0.688 0.193 0.840 0.962 +1 0.380 0.822 -0.459 1.136 1.030 0.731 0.463 -1.290 0.000 0.926 -0.227 0.281 2.215 0.993 0.536 1.547 0.000 1.876 -0.024 -0.391 3.102 0.939 0.956 0.984 1.115 0.688 0.908 0.803 +1 1.031 -0.232 1.388 0.919 1.384 2.153 0.966 -0.186 0.000 1.244 1.030 -1.638 1.107 1.028 -0.378 -1.479 2.548 1.241 0.707 1.205 0.000 1.265 1.002 0.991 1.534 0.985 1.020 0.949 +1 0.766 -0.842 0.669 1.097 -0.613 0.959 -0.425 -0.203 2.173 0.470 1.606 1.172 0.000 1.423 0.735 -1.590 0.000 0.806 -1.824 1.458 0.000 0.937 0.637 1.161 0.781 0.691 0.959 0.967 +0 3.148 -1.131 0.928 3.036 0.855 2.728 0.630 -0.797 1.087 0.728 -1.028 1.383 2.215 0.600 1.368 -1.094 0.000 1.221 0.215 -0.431 0.000 0.821 0.873 0.988 0.803 2.750 3.214 2.472 +0 0.652 2.075 0.301 1.056 -0.176 0.761 -2.139 -0.095 0.000 1.745 0.164 -1.541 2.215 0.490 1.889 -1.521 0.000 1.363 1.005 -1.522 1.551 4.618 3.088 0.983 1.495 0.755 2.050 1.846 +0 1.365 -0.329 -1.385 0.978 -0.489 0.604 0.335 0.728 0.000 1.087 0.735 1.613 2.215 1.603 0.065 -0.537 0.000 1.375 1.136 0.658 1.551 1.625 1.251 1.157 1.104 0.903 0.996 0.988 +0 0.414 1.082 -0.073 0.779 -1.015 0.867 0.837 0.944 0.000 0.639 0.331 1.341 2.215 1.114 0.017 -0.787 2.548 0.488 -1.642 0.905 0.000 0.500 0.760 0.982 0.565 0.856 0.632 0.595 +1 1.443 0.081 1.518 0.492 -0.696 0.874 -0.659 0.523 2.173 0.970 1.618 -0.069 0.000 1.579 0.947 -1.335 2.548 0.537 0.431 -0.922 0.000 0.867 1.013 1.064 1.032 2.022 1.258 1.066 +1 0.959 0.631 0.626 1.297 -1.127 1.776 0.034 1.244 2.173 1.614 -1.275 -0.638 0.000 1.121 0.037 -0.239 0.000 1.118 -0.052 0.511 0.000 0.829 0.933 1.546 1.436 0.946 0.996 1.042 +1 1.024 0.823 1.306 0.614 -0.710 1.011 0.085 -0.113 0.000 1.355 0.340 -1.386 2.215 1.332 1.536 0.826 2.548 0.582 -0.634 -0.690 0.000 0.870 0.993 1.066 0.817 1.658 0.907 0.767 +0 0.398 1.393 0.134 0.795 0.231 0.499 0.779 -0.254 0.000 0.733 0.096 1.676 2.215 0.772 1.431 1.270 1.274 0.701 0.409 -1.421 0.000 0.956 1.070 0.999 0.896 0.704 0.728 0.759 +1 1.459 -0.547 0.110 0.413 -1.093 1.113 -0.163 -0.666 2.173 0.903 -0.139 0.892 0.000 0.985 2.192 -1.390 0.000 1.015 -1.096 1.263 0.000 0.892 0.826 0.986 0.859 0.867 0.884 0.806 +1 0.493 -0.955 -1.647 1.599 0.260 0.941 -0.965 1.318 0.000 1.555 0.627 -0.940 0.000 1.586 -0.640 -0.041 1.274 0.783 -1.401 -1.542 0.000 0.845 0.706 1.216 0.773 0.697 0.796 0.744 +1 0.953 -0.214 -1.640 0.630 -0.143 0.994 0.531 -1.572 0.000 1.183 -1.841 -0.164 0.000 2.359 0.271 0.646 2.548 1.014 0.662 -0.970 3.102 4.168 2.462 1.047 1.032 1.211 1.833 1.381 +1 0.690 0.073 -0.358 0.712 -1.421 0.696 -0.193 1.278 0.000 0.911 1.353 0.614 1.107 0.596 0.921 -0.608 2.548 0.379 1.303 -0.896 0.000 0.862 1.016 0.980 0.702 0.714 0.835 0.717 +1 0.837 -0.660 -0.878 0.797 -1.566 1.424 -0.747 -0.627 2.173 0.912 -0.623 1.347 0.000 0.739 0.024 0.358 2.548 1.156 -1.230 1.050 0.000 0.675 0.796 0.988 1.001 1.116 1.018 0.911 +1 0.455 -2.335 -1.625 1.149 0.431 1.018 -0.877 -1.405 2.173 0.617 -1.589 0.333 0.000 0.833 -1.966 -0.548 0.000 0.724 -0.047 1.135 1.551 0.837 0.913 0.989 1.183 0.786 0.856 0.810 +0 0.278 1.453 -1.511 2.099 -0.632 1.225 -0.131 1.121 2.173 0.943 -0.911 -1.204 0.000 0.865 -0.764 0.191 0.000 0.616 0.104 1.663 3.102 1.320 0.859 0.987 1.505 0.447 0.941 0.970 +0 2.265 -1.757 -0.714 0.806 -0.618 1.390 -0.433 1.142 2.173 0.940 -1.120 -0.305 1.107 0.472 2.270 1.601 0.000 0.436 -1.739 0.499 0.000 2.481 2.014 0.985 0.861 1.735 1.580 1.963 +0 1.511 -0.571 -0.841 3.200 -0.505 2.003 -2.480 1.124 0.000 1.232 -0.015 -0.219 0.000 1.349 1.177 1.361 1.274 1.762 0.235 1.220 3.102 1.428 1.279 0.996 2.343 0.639 1.671 1.415 +0 1.460 -0.679 -1.205 0.550 1.183 0.963 -1.503 -0.133 2.173 0.904 -0.649 0.959 2.215 0.449 1.168 1.454 0.000 0.431 -1.594 1.726 0.000 0.718 0.811 1.038 0.846 1.291 0.925 0.739 +0 0.707 -0.365 -0.849 0.528 -0.872 0.202 -1.131 0.550 0.000 0.551 -0.005 0.506 2.215 0.370 -1.459 1.690 2.548 0.408 -1.730 0.927 0.000 0.478 0.529 0.978 0.547 0.598 0.592 0.491 +1 1.283 0.246 -0.730 0.757 -0.500 1.092 -0.297 1.398 2.173 0.971 -1.796 -1.065 0.000 1.445 -0.090 0.664 1.274 0.988 -0.787 0.154 0.000 1.283 1.453 0.989 1.476 0.975 1.157 1.303 +0 1.075 1.250 -0.088 0.981 1.190 0.766 0.624 1.260 1.087 1.473 0.879 -0.849 0.000 0.483 -0.602 0.590 0.000 0.374 0.018 0.398 3.102 1.674 0.950 1.299 0.930 0.435 0.816 0.811 +0 0.574 -1.110 1.061 1.313 -0.371 0.664 0.733 -1.635 2.173 0.639 -2.206 -0.650 0.000 1.122 -0.043 1.164 2.548 0.597 -0.105 0.312 0.000 1.205 1.256 1.155 1.338 0.759 1.132 1.002 +1 0.812 0.883 -1.651 0.479 -0.442 0.824 0.736 -0.537 0.000 1.140 0.584 0.911 0.000 0.649 -1.313 0.870 2.548 1.115 0.573 -1.425 3.102 1.521 1.071 0.989 1.328 1.008 0.916 0.946 +1 1.084 1.831 -1.312 0.792 -0.704 0.550 0.742 0.246 0.000 0.561 0.238 -0.470 1.107 0.552 0.139 1.347 2.548 1.189 -0.563 1.011 0.000 1.056 0.857 0.989 0.777 0.591 0.619 0.706 +1 0.596 -1.860 1.307 0.964 -1.402 0.667 0.614 0.534 1.087 0.431 0.099 0.916 0.000 0.633 1.008 -1.317 2.548 0.925 0.805 -0.609 0.000 0.886 0.711 0.979 1.004 0.830 0.950 0.816 +0 1.242 0.717 -0.164 0.667 -0.984 0.662 0.844 -1.482 2.173 0.750 -0.283 1.444 2.215 1.191 -0.707 0.155 0.000 0.374 -0.665 1.508 0.000 0.691 1.166 0.989 0.936 0.802 0.781 0.773 +0 1.830 0.236 0.039 1.430 -0.142 1.040 -0.831 1.678 0.000 0.868 -0.133 -1.180 2.215 1.221 -0.241 1.342 0.000 0.554 -0.946 0.875 3.102 0.845 0.959 0.980 0.745 0.684 0.772 0.978 +0 1.269 0.041 -1.239 1.690 -0.722 1.347 0.915 -1.022 0.000 1.165 -1.096 0.618 2.215 1.505 -0.247 0.368 0.000 2.723 -0.177 0.979 3.102 0.759 0.822 0.984 1.613 0.919 1.287 1.027 +0 0.802 -0.011 -0.135 1.578 0.973 0.704 -0.933 1.249 2.173 0.754 0.409 -0.745 0.000 0.781 1.651 -0.811 0.000 1.126 1.201 -1.361 3.102 0.890 0.670 1.311 0.960 1.559 1.115 1.038 +1 1.021 -0.482 -0.027 1.195 -1.223 0.555 -1.247 0.522 0.000 0.794 0.484 1.425 2.215 0.641 -1.858 -0.334 0.000 1.284 -0.773 -1.525 3.102 0.863 0.906 1.348 1.049 0.824 0.915 0.860 +1 0.381 -0.514 0.996 1.168 1.562 0.569 -2.456 -0.025 0.000 1.022 -0.725 -0.457 2.215 1.221 0.630 0.502 0.000 2.809 0.878 1.735 0.000 0.792 0.763 0.980 1.255 1.109 0.945 0.862 +1 0.640 -0.490 -0.596 0.723 1.635 1.690 0.027 0.142 0.000 1.304 -0.819 -1.177 0.000 1.765 0.224 1.474 2.548 1.075 -1.213 -0.636 0.000 0.868 0.876 0.984 0.760 0.406 0.905 0.769 +1 0.295 1.251 0.264 2.782 1.122 0.417 -0.247 -0.206 2.173 0.822 1.009 -1.059 2.215 0.469 1.438 -1.375 0.000 1.461 1.324 -0.751 0.000 0.908 0.996 0.990 1.221 0.844 1.160 0.955 +1 1.495 -0.338 -1.049 1.067 -0.902 0.345 1.336 1.473 2.173 1.312 0.626 0.330 2.215 0.510 2.368 0.532 0.000 0.791 1.938 1.293 0.000 0.454 1.081 0.986 0.923 0.921 0.970 1.015 +0 0.659 0.936 -1.531 0.878 -0.677 0.714 -1.147 0.217 2.173 0.325 -1.954 -1.574 0.000 0.423 -0.376 1.603 2.548 0.546 0.206 0.947 0.000 0.868 0.826 0.990 0.830 0.700 1.236 1.091 +1 1.086 -0.797 -0.167 0.570 -1.222 0.990 -1.778 1.459 0.000 0.358 2.543 0.116 0.000 0.716 -0.101 0.458 2.548 0.705 -2.197 -1.679 0.000 0.678 0.963 0.988 0.715 0.576 0.834 0.772 +1 0.935 1.386 0.314 0.735 -1.534 1.892 0.725 0.813 1.087 1.135 -0.049 -1.257 0.000 0.497 0.862 1.026 2.548 2.018 0.889 -0.022 0.000 0.849 1.029 1.144 1.167 0.264 1.433 1.233 +0 1.013 0.642 0.595 0.476 1.088 0.822 2.012 1.408 0.000 1.645 0.881 -0.386 2.215 0.593 1.630 -0.867 0.000 1.310 0.438 -1.521 3.102 1.124 1.010 0.985 1.207 1.159 1.050 1.057 +0 0.492 -1.491 -0.259 1.513 0.715 0.761 0.454 -0.750 0.000 0.830 -1.158 1.700 1.107 1.047 -0.294 0.060 2.548 1.044 0.441 -1.605 0.000 0.948 0.981 0.985 0.853 1.082 0.909 0.925 +1 1.189 1.158 -1.077 0.895 1.303 0.864 0.852 -1.384 0.000 1.442 1.201 0.230 2.215 1.511 0.514 0.997 2.548 1.444 0.809 -0.732 0.000 0.951 1.266 1.200 1.159 1.136 1.084 0.948 +1 0.785 0.175 0.246 1.043 1.381 1.160 -2.034 -0.698 0.000 1.622 -0.842 -1.298 0.000 3.071 -1.208 0.647 2.548 1.542 0.219 -1.643 3.102 0.993 0.764 1.070 1.429 2.072 1.254 1.051 +1 0.520 0.637 1.495 1.219 -1.409 1.301 -0.523 0.278 1.087 0.746 -0.121 0.968 0.000 2.130 0.774 -1.221 0.000 1.967 0.661 0.207 3.102 0.906 1.188 0.978 1.316 1.206 1.313 1.168 +0 0.598 -1.858 -0.120 0.249 -1.365 0.331 -2.332 -1.239 0.000 0.923 0.373 1.524 2.215 0.522 -1.171 0.652 2.548 0.976 -0.581 -0.056 0.000 1.106 0.728 0.986 0.976 0.865 0.863 0.742 +0 0.645 1.182 -1.216 1.166 0.587 0.544 0.695 0.444 2.173 0.338 -2.558 -0.931 0.000 0.582 -0.128 -0.874 0.000 0.401 1.258 1.659 0.000 1.084 0.695 1.200 0.719 0.807 0.850 1.021 +0 0.552 1.397 0.414 0.884 -1.056 0.426 1.775 -0.323 0.000 0.921 -1.982 1.169 0.000 1.629 -0.163 -1.031 2.548 0.846 0.545 0.381 0.000 0.773 0.754 0.986 1.267 1.025 0.899 0.807 +0 1.453 2.074 1.410 0.411 -1.723 0.994 1.816 -0.922 0.000 0.765 1.453 0.732 2.215 0.880 0.306 -0.638 0.000 1.424 -0.200 0.687 1.551 0.849 0.982 0.995 0.726 0.926 0.873 0.851 +1 1.563 -0.440 -0.525 0.291 -1.027 0.937 0.746 1.284 0.000 0.487 -0.326 -0.300 0.000 0.857 -0.483 0.819 2.548 1.091 1.375 1.299 0.000 0.913 0.876 0.984 0.562 0.486 0.573 0.627 +0 0.616 -1.337 0.361 1.222 0.980 1.020 -0.433 -0.552 2.173 0.567 2.504 -1.572 0.000 1.157 0.088 1.524 2.548 0.711 0.658 0.351 0.000 1.173 1.226 0.984 1.613 1.344 1.336 2.007 +1 0.698 1.866 -1.198 0.694 0.469 0.418 0.794 -0.729 0.000 0.937 0.245 1.196 2.215 0.502 1.118 1.245 0.000 0.796 -1.329 -0.627 1.551 0.824 1.063 0.988 0.938 1.136 1.046 0.857 +1 0.486 0.996 -0.332 0.500 0.087 1.116 0.145 0.181 2.173 0.752 -0.105 -1.482 0.000 1.271 0.831 1.606 0.000 0.800 -1.031 1.243 0.000 0.879 1.305 0.987 0.537 0.734 0.791 0.689 +0 0.555 0.308 -1.614 1.483 -0.530 1.186 0.670 1.559 1.087 1.550 0.791 0.397 0.000 0.569 1.139 -0.434 0.000 0.594 -0.199 -0.976 3.102 1.031 0.886 1.043 1.158 0.792 0.945 0.881 +1 0.853 0.725 -0.242 1.191 0.202 1.176 1.081 -0.358 0.000 0.952 0.627 0.952 1.107 0.707 1.639 1.459 0.000 1.298 0.015 -1.662 3.102 1.010 0.977 0.978 1.120 0.779 0.868 0.808 +0 0.713 2.002 1.506 0.589 0.324 0.964 -0.887 -1.201 0.000 0.739 1.144 -0.181 2.215 1.182 0.567 0.992 0.000 1.161 0.799 -1.313 3.102 0.910 0.859 0.989 0.684 0.718 0.634 0.618 +1 0.955 1.692 -0.461 0.558 -0.006 0.977 0.592 -1.369 0.000 1.003 1.527 0.991 2.215 1.252 0.947 0.159 2.548 0.423 -1.318 1.251 0.000 1.086 0.993 0.977 0.951 0.873 0.836 0.775 +0 0.417 1.279 0.590 1.059 -0.408 1.202 -0.306 1.116 1.087 1.499 0.935 -0.514 2.215 1.081 0.351 1.150 0.000 0.524 1.908 1.201 0.000 0.917 1.156 0.989 0.775 2.379 1.265 1.041 +1 0.498 1.290 -1.594 0.869 0.886 1.033 0.239 -0.595 0.000 1.067 0.687 1.450 2.215 0.974 0.933 -0.867 0.000 1.473 0.011 0.478 3.102 0.847 1.111 0.986 0.635 0.957 0.972 0.838 +1 0.995 -0.144 -1.352 0.625 1.117 0.783 0.840 -0.587 0.000 0.647 -0.298 -0.415 2.215 1.401 0.195 1.113 1.274 0.928 -1.547 0.842 0.000 2.662 1.510 0.990 0.701 1.028 1.112 0.937 +0 0.588 0.490 -0.137 1.607 0.439 0.457 -0.449 -1.272 0.000 1.017 -1.317 -0.695 1.107 1.084 -1.229 1.550 0.000 1.156 -0.334 1.638 3.102 0.923 0.967 0.996 1.155 0.974 1.414 1.296 +1 0.361 1.296 0.253 2.219 -0.519 1.460 0.541 1.522 2.173 0.748 0.440 -1.186 2.215 1.310 -0.781 -0.054 0.000 1.239 -0.517 0.422 0.000 1.034 1.124 0.989 2.119 0.992 1.440 1.647 +0 0.522 0.673 1.375 0.810 -1.038 0.827 -1.139 0.225 2.173 0.536 -0.735 -0.198 2.215 1.062 -0.609 1.450 0.000 1.218 0.313 -1.486 0.000 0.908 0.911 0.987 1.033 0.413 0.836 0.752 +0 0.673 -0.093 -0.641 0.895 0.400 0.580 -0.460 -1.628 0.000 0.442 -0.782 -0.766 1.107 1.048 0.022 0.823 2.548 1.249 0.747 -0.067 0.000 0.999 0.876 0.988 0.718 0.780 0.671 0.687 +0 0.323 -0.612 -0.674 1.572 -1.658 1.085 -1.065 0.500 0.000 1.298 -1.658 -0.932 0.000 1.056 -0.243 0.949 2.548 1.301 -1.402 0.851 3.102 0.788 1.140 0.982 1.247 0.694 1.206 1.022 +0 0.795 0.140 -0.749 1.015 -1.068 0.493 -1.159 0.144 2.173 0.667 -0.358 -0.624 0.000 0.699 0.239 1.413 0.000 1.435 -0.861 1.190 3.102 1.065 0.924 0.975 1.341 0.722 1.098 0.908 +1 0.975 2.058 0.686 1.015 0.081 0.831 0.705 -0.999 2.173 0.593 1.054 1.663 0.000 0.716 -2.126 -1.049 0.000 1.030 1.131 0.573 0.000 0.858 0.975 0.992 1.016 0.986 0.945 0.801 +0 0.845 1.026 -0.231 1.208 0.641 1.112 2.071 -1.213 0.000 0.741 -0.818 -0.355 2.215 0.913 -0.531 1.544 0.000 1.169 0.895 1.272 0.000 0.836 0.994 0.991 0.616 0.868 0.785 0.693 +0 0.352 0.838 0.984 2.691 1.306 1.038 1.023 -0.630 2.173 0.880 0.241 -0.729 0.000 1.183 -0.001 0.413 2.548 0.868 0.122 -1.661 0.000 0.849 0.940 0.985 1.826 1.341 1.268 1.051 +0 1.195 0.272 -1.599 0.357 -1.185 0.477 -1.237 1.368 2.173 0.810 0.329 -0.163 0.000 1.148 -0.846 0.348 2.548 0.474 0.662 1.223 0.000 0.788 1.027 0.999 1.264 0.748 1.001 0.861 +0 2.342 -1.590 1.625 0.750 -1.145 1.513 0.440 -0.019 2.173 0.600 -0.055 0.146 0.000 1.147 -0.900 -1.612 2.548 0.371 -0.439 -1.550 0.000 0.632 0.760 1.106 2.743 2.069 1.794 1.322 +0 0.500 -0.872 1.264 0.767 -1.713 1.014 -0.311 0.633 2.173 1.145 -0.142 -1.509 0.000 1.050 0.233 -0.153 1.274 1.684 -1.317 -0.525 0.000 1.022 1.028 0.990 1.393 0.915 1.165 1.148 +0 0.281 0.263 1.035 2.106 -1.301 0.952 -0.352 -0.688 2.173 0.771 -0.351 -0.023 0.000 1.716 -0.995 1.204 2.548 1.554 0.096 0.505 0.000 0.778 0.944 0.986 0.849 1.688 0.992 0.889 +0 1.464 1.744 -0.451 1.253 0.104 1.117 1.434 -1.687 0.000 1.552 0.783 0.142 1.107 1.420 0.664 1.364 2.548 1.304 0.290 -1.564 0.000 1.086 0.876 0.981 1.117 1.409 1.154 1.230 +1 1.150 -1.556 -1.051 0.368 -0.464 0.686 -0.252 -0.469 2.173 0.880 -1.415 0.361 0.000 0.705 -2.230 0.958 0.000 1.166 -0.699 -1.460 3.102 0.891 0.825 1.001 0.751 0.788 0.874 0.780 +1 0.829 0.175 -1.635 1.634 -1.267 0.485 2.156 -0.234 0.000 0.536 -0.679 0.732 0.000 0.626 1.387 0.337 0.000 1.066 -1.199 1.292 3.102 1.243 0.801 0.976 0.819 0.691 0.740 0.814 +0 0.840 -0.872 0.152 0.473 1.329 1.468 -1.238 -0.137 2.173 1.043 0.327 1.603 0.000 1.352 -0.170 -1.531 1.274 0.587 -0.644 1.243 0.000 0.679 0.587 0.990 1.051 1.934 1.262 1.010 +0 0.374 -0.832 0.956 0.923 -0.913 1.463 0.317 0.483 2.173 0.855 -0.982 -1.439 2.215 1.010 0.498 -0.739 0.000 1.367 0.239 -1.502 0.000 0.839 0.962 0.988 1.846 2.006 1.352 1.212 +1 0.875 0.761 -1.431 0.296 -0.013 0.953 0.639 0.586 0.000 1.389 1.333 -0.975 2.215 0.827 1.584 0.685 0.000 0.816 0.992 1.372 3.102 0.929 0.709 0.982 0.928 0.827 0.939 0.852 +1 1.004 2.119 1.087 0.962 -0.192 1.337 1.017 -1.592 2.173 0.932 -0.968 0.263 0.000 0.820 -1.595 0.130 0.000 0.585 1.275 -0.779 1.551 0.949 0.980 1.244 1.435 0.670 1.157 1.290 +0 0.852 0.020 0.730 0.506 -0.891 1.080 0.026 -0.156 2.173 0.883 1.895 -1.649 0.000 1.700 -1.733 -1.540 0.000 1.210 0.614 0.424 0.000 1.553 1.299 0.984 0.816 0.861 1.145 0.916 +0 0.835 -0.911 0.622 0.436 -0.916 1.437 -0.935 -0.856 0.000 1.295 -0.086 1.105 2.215 0.875 0.678 1.310 2.548 0.780 0.642 0.578 0.000 2.205 1.714 0.988 0.787 0.528 1.260 0.999 +1 1.273 0.578 -1.504 0.366 1.536 2.400 0.606 -0.424 0.000 1.108 0.053 0.323 0.000 1.946 0.526 1.486 0.000 2.766 0.983 1.287 3.102 0.937 0.908 0.981 0.792 0.770 0.678 0.641 +1 0.788 -1.615 -0.225 1.188 -0.385 2.395 -1.534 1.187 0.000 2.656 -0.660 -0.647 0.000 1.113 -0.674 0.983 2.548 0.812 0.456 -0.910 3.102 1.158 0.876 0.987 1.323 0.877 1.222 1.265 +1 0.966 0.666 0.055 1.455 0.828 0.355 2.038 0.708 0.000 0.691 -0.369 -1.654 2.215 0.862 0.788 -1.095 2.548 0.713 1.280 -1.219 0.000 0.784 1.113 1.055 0.909 0.674 0.795 0.768 +1 0.404 -1.174 -0.508 1.436 1.090 0.701 0.343 1.637 0.000 0.899 1.893 -1.050 0.000 1.101 -0.514 0.251 2.548 1.519 0.943 -0.096 3.102 0.827 1.134 1.046 1.444 0.991 0.987 0.979 +1 0.706 -0.230 1.538 0.754 1.107 0.891 -0.082 0.774 2.173 1.396 -0.678 -1.064 0.000 1.086 -1.149 -0.332 2.548 1.056 -1.679 -0.950 0.000 0.816 1.237 0.984 0.881 1.283 0.870 0.786 +1 0.996 0.662 0.845 0.471 -0.146 0.621 1.557 -1.517 0.000 1.330 -0.083 0.244 2.215 1.157 -1.036 1.411 0.000 1.063 0.064 -0.977 3.102 0.780 0.767 0.984 0.691 0.961 0.983 0.856 +1 0.290 -0.999 1.421 0.347 0.151 0.555 -0.649 -0.479 2.173 0.756 1.104 1.371 0.000 0.757 0.844 0.486 0.000 1.026 0.756 -1.168 1.551 0.839 0.757 0.983 0.812 0.831 0.805 0.689 +1 0.642 2.091 0.036 1.298 1.111 1.525 2.086 -0.899 0.000 1.078 1.173 1.374 2.215 0.831 0.308 0.465 2.548 1.121 -1.012 1.543 0.000 5.320 2.996 1.042 0.859 0.862 1.872 1.586 +0 0.668 -0.119 0.887 0.428 0.775 0.802 0.245 -1.497 0.000 0.602 0.118 -0.051 2.215 0.885 1.320 -0.178 0.000 1.605 2.452 1.455 0.000 1.255 1.023 0.979 0.516 0.353 0.718 0.828 +1 1.408 1.125 -0.476 0.578 -0.702 2.153 -0.830 0.871 0.000 2.059 0.651 -1.087 2.215 0.966 -0.058 -1.050 2.548 0.990 1.746 0.495 0.000 1.046 1.005 0.985 0.899 0.565 0.861 0.774 +1 0.897 1.228 -0.801 0.264 0.572 0.471 -0.912 -0.001 2.173 0.278 -0.996 1.029 0.000 0.830 0.153 -1.384 2.548 1.308 1.011 -1.550 0.000 1.217 1.157 0.982 0.629 0.863 0.746 0.691 +1 0.887 0.011 1.354 0.823 -1.716 1.108 -0.253 -0.734 0.000 1.137 -0.437 0.405 2.215 0.749 0.785 0.507 0.000 0.449 -1.066 -1.243 3.102 1.761 1.064 0.990 1.172 0.700 0.865 0.918 +0 0.624 0.900 -0.234 2.429 0.436 0.979 1.221 -1.717 2.173 0.638 0.876 -0.794 0.000 0.584 1.727 -1.434 0.000 0.397 -0.132 -1.021 1.551 0.928 0.879 0.989 0.698 0.630 0.922 0.807 +1 0.474 0.890 1.571 0.753 -0.275 0.790 0.082 -1.345 0.000 0.779 0.992 -1.193 0.000 0.667 1.842 0.395 0.000 1.522 0.857 0.746 3.102 0.905 0.863 0.988 0.660 0.309 0.537 0.568 +1 1.151 0.982 1.237 0.765 1.066 1.529 1.164 -0.245 0.000 0.495 -2.483 1.638 0.000 1.267 0.962 -1.540 2.548 0.612 0.551 -0.719 0.000 0.723 1.226 1.000 0.485 0.515 0.734 0.836 +0 0.438 -0.097 -1.222 1.760 0.533 1.386 0.266 -0.953 0.000 1.041 -1.272 1.042 2.215 1.764 0.294 0.465 0.000 0.962 0.695 -0.856 0.000 0.717 1.097 1.217 1.025 1.364 1.001 0.880 +1 1.499 0.005 -0.254 1.205 0.613 0.593 -0.132 1.740 0.000 0.593 -0.122 -0.742 0.000 1.467 0.442 1.566 2.548 0.378 1.176 -0.001 1.551 0.989 0.857 1.311 0.695 0.625 0.777 0.758 +1 0.567 -0.666 -1.415 1.213 0.197 0.799 1.065 -0.010 2.173 0.644 2.845 1.415 0.000 0.950 -0.027 -1.320 0.000 0.392 0.750 -0.840 3.102 0.430 1.212 1.141 0.672 0.406 0.766 1.221 +1 1.073 -1.687 -0.875 1.510 -1.101 1.968 -1.202 0.589 1.087 1.587 -0.716 -0.764 1.107 1.570 -0.107 -1.205 0.000 2.795 0.091 1.105 0.000 2.033 1.855 1.001 2.214 2.521 1.845 1.960 +1 0.638 -1.434 1.738 2.542 1.402 2.976 -0.052 -0.193 0.000 1.138 0.046 -1.440 2.215 1.392 -0.780 1.732 0.000 0.852 -0.084 -0.916 0.000 0.945 0.748 0.993 1.537 1.421 1.164 0.999 +0 0.746 -1.983 0.262 0.211 -0.771 1.027 -2.452 -1.185 0.000 0.413 -1.113 -0.857 0.000 1.133 -1.067 0.053 2.548 3.278 -0.419 1.122 3.102 1.068 1.254 0.977 1.035 1.309 1.406 1.104 +1 1.440 -0.205 -0.386 1.177 -0.009 0.785 2.121 -1.136 0.000 1.739 0.231 1.151 2.215 0.755 -0.148 -1.263 2.548 0.687 -0.996 0.893 0.000 3.069 1.831 0.985 1.595 1.032 1.426 1.485 +1 0.878 0.321 1.706 1.395 0.884 0.773 -0.377 -0.788 1.087 1.270 -0.078 -0.125 2.215 0.928 -0.250 1.059 0.000 0.415 -1.460 -0.935 0.000 0.866 0.987 1.034 1.148 0.850 0.958 0.837 +1 0.778 0.921 -1.002 0.566 0.163 0.825 0.010 1.277 0.000 0.579 -0.708 0.780 0.000 1.415 0.312 -0.736 2.548 0.983 1.105 -1.663 3.102 0.956 0.776 0.989 0.629 0.812 0.778 0.699 +1 0.712 -0.705 0.465 1.298 -1.028 0.642 -0.294 -0.910 0.000 0.756 0.213 1.325 1.107 1.037 -0.471 -0.328 2.548 0.615 2.148 0.963 0.000 2.132 1.404 1.298 0.752 1.003 1.027 0.989 +1 0.964 1.066 0.887 0.490 -0.425 1.223 0.168 -0.097 2.173 0.850 0.358 -1.503 1.107 0.535 -2.569 1.450 0.000 0.893 0.908 1.607 0.000 0.502 0.959 0.987 0.887 1.439 0.962 0.761 +1 0.948 -0.370 0.162 0.995 1.310 0.961 -0.575 -0.146 0.000 1.047 -0.217 1.288 2.215 1.032 0.388 -1.416 2.548 0.930 -0.956 -0.978 0.000 1.068 1.169 1.157 0.752 0.803 0.945 0.839 +1 0.607 0.032 0.377 1.100 0.707 2.024 2.319 -1.078 0.000 1.445 0.319 -0.405 2.215 2.327 1.214 1.023 2.548 1.651 0.514 0.781 0.000 1.157 1.286 0.977 1.941 2.131 1.557 1.249 +0 1.980 -0.593 -1.501 1.676 -1.065 1.343 -0.054 0.357 0.000 0.857 -0.250 -0.102 0.000 1.001 1.020 0.286 2.548 1.124 -0.439 1.096 3.102 0.945 0.970 0.986 0.925 0.921 1.112 1.180 +1 0.494 0.866 0.434 0.534 -1.488 1.251 1.123 0.195 0.000 0.583 0.287 -1.264 2.215 0.865 -0.268 1.394 2.548 0.402 1.372 0.792 0.000 1.182 1.109 0.991 0.602 0.560 0.876 0.768 +1 0.740 -0.267 0.981 1.515 0.088 0.475 1.067 1.192 0.000 0.602 1.909 -1.589 0.000 0.761 0.401 0.283 0.000 1.297 -0.557 -1.317 3.102 0.908 0.771 1.057 0.780 0.872 0.730 0.636 +1 0.418 -1.341 -1.195 0.993 0.620 0.653 -0.192 1.007 2.173 0.878 -0.415 -0.016 2.215 0.466 -2.093 -1.325 0.000 0.398 -0.352 -1.013 0.000 0.531 0.866 0.988 0.977 0.898 0.869 0.738 +0 1.466 -1.441 1.026 1.494 0.805 0.783 1.658 -0.686 0.000 0.631 -0.129 0.439 2.215 1.826 0.418 -0.916 0.000 1.771 -1.033 1.640 3.102 0.433 0.934 1.006 0.943 1.009 0.975 1.426 +1 0.779 -0.316 1.595 0.344 -1.433 1.283 0.948 0.873 0.000 0.787 2.707 -1.023 0.000 0.956 0.137 -0.212 0.000 0.931 -1.955 -0.728 0.000 0.531 0.941 0.986 0.548 0.471 0.600 0.595 +0 1.097 0.508 1.591 1.625 -1.387 0.619 0.752 -0.070 2.173 0.671 0.273 -0.408 2.215 0.771 0.922 1.020 0.000 1.280 1.800 0.904 0.000 0.674 0.939 0.983 0.949 0.365 0.817 0.847 +1 0.941 1.258 -0.703 0.478 0.833 0.905 1.467 0.876 2.173 0.780 2.348 -1.281 0.000 0.454 0.211 -0.214 1.274 0.530 2.477 -0.550 0.000 0.565 0.894 0.988 0.879 0.851 0.831 0.749 +1 0.495 1.051 -0.732 0.939 1.523 0.880 0.234 -0.857 2.173 0.776 2.259 1.686 0.000 0.951 0.020 1.588 2.548 1.479 0.290 0.551 0.000 2.007 1.441 0.986 0.786 0.927 1.161 0.939 +1 0.645 1.438 0.116 1.023 -1.228 1.050 0.312 1.008 0.000 0.713 -0.451 -1.144 2.215 1.051 0.915 -0.063 0.000 1.297 1.072 -1.542 3.102 0.846 0.923 1.052 1.049 0.913 0.927 0.806 +1 1.819 0.386 0.099 0.568 1.696 1.056 -1.300 -0.140 0.000 1.465 0.224 1.503 0.000 0.805 -0.480 0.574 0.000 2.158 0.365 -1.295 3.102 1.197 1.388 1.396 1.111 0.400 1.173 1.078 +1 0.893 0.540 1.113 1.274 0.383 1.452 0.231 -0.396 2.173 1.120 0.231 -1.027 2.215 2.629 -1.920 1.427 0.000 0.613 1.443 1.333 0.000 4.675 3.190 0.991 1.316 1.010 2.430 2.107 +0 1.034 -0.326 -1.629 1.085 0.544 0.566 2.165 1.739 0.000 0.804 0.242 -0.395 2.215 0.371 0.048 0.020 2.548 0.368 -1.210 -0.755 0.000 1.039 0.718 1.358 0.696 0.220 0.614 0.602 +0 1.629 -1.807 0.711 0.336 -1.223 0.668 -0.779 0.984 2.173 1.133 1.442 -1.302 1.107 1.712 2.119 -0.563 0.000 0.421 -1.723 -1.678 0.000 4.241 2.552 1.010 0.777 2.115 2.018 2.377 +1 0.298 1.146 -0.522 1.039 0.127 0.796 0.352 -1.369 2.173 0.565 -0.528 -0.300 0.000 1.141 -0.494 1.251 2.548 0.885 -1.876 -0.097 0.000 0.902 1.036 0.989 0.999 1.000 0.995 0.869 +1 2.461 -0.146 -0.536 0.933 -0.950 0.608 2.724 1.339 0.000 0.550 0.385 0.750 1.107 1.179 -0.735 0.773 1.274 0.508 -0.189 -1.394 0.000 1.863 1.343 0.996 1.282 0.547 1.396 1.438 +0 1.007 -0.208 1.699 0.385 0.259 1.108 0.938 -1.401 2.173 0.929 -1.030 0.158 0.000 0.682 -0.843 0.940 0.000 1.016 0.329 -0.155 3.102 0.795 0.813 0.985 0.892 1.060 1.188 0.955 +0 0.607 -1.016 -0.654 1.997 -1.594 0.729 -0.657 1.229 2.173 0.818 0.157 0.043 2.215 0.852 0.087 0.600 0.000 0.852 0.649 -0.674 0.000 0.917 0.980 1.141 1.229 1.107 0.946 0.887 +0 0.593 0.627 -0.314 1.696 -1.175 0.863 -1.563 1.272 0.000 0.715 0.231 1.258 2.215 0.924 -0.358 -0.121 2.548 0.960 -1.478 0.320 0.000 1.062 1.117 0.987 0.871 0.863 0.906 1.079 +1 0.354 1.548 -1.558 1.560 -0.314 0.760 1.134 0.025 0.000 2.273 -0.074 -1.737 2.215 1.561 0.597 0.667 2.548 1.780 1.083 -0.582 0.000 0.935 1.131 0.986 1.604 1.819 1.481 1.219 +1 0.732 0.451 0.106 1.338 -0.226 1.057 0.191 -0.067 0.000 1.008 -0.448 1.632 2.215 0.932 0.773 0.876 0.000 0.891 0.896 1.554 0.000 0.767 1.106 0.989 1.526 0.428 1.206 1.110 +1 1.328 -0.711 0.349 0.674 1.086 1.126 -1.116 -1.325 2.173 0.625 -1.471 0.091 0.000 0.654 -0.233 1.032 0.000 0.553 -0.737 -0.313 3.102 0.988 1.216 0.983 0.577 0.668 0.831 0.754 +1 1.383 0.264 0.959 0.529 -0.163 0.477 2.741 1.362 0.000 1.543 0.606 -0.769 1.107 0.824 1.165 -0.040 2.548 0.594 -0.253 0.858 0.000 0.783 0.976 1.004 0.743 0.834 0.815 0.713 +1 2.025 0.145 0.064 0.499 -0.305 0.436 1.215 -1.554 0.000 1.252 0.598 1.198 2.215 0.944 0.154 -0.966 0.000 0.886 0.437 0.520 0.000 0.963 0.799 0.995 0.892 0.544 0.896 0.797 +0 0.396 -0.918 0.496 0.672 -1.344 1.037 0.122 0.279 2.173 1.309 -1.081 -0.374 0.000 1.549 -0.890 0.268 0.000 1.874 -1.722 -1.226 0.000 0.928 1.097 0.990 0.729 1.707 1.153 1.016 +0 1.511 -0.077 -0.626 0.982 -0.780 0.587 -1.109 0.956 0.000 0.946 -0.810 0.376 2.215 0.918 -0.374 1.435 0.000 0.949 0.208 -1.616 1.551 0.729 0.797 0.983 0.725 0.965 0.871 0.912 +1 0.676 0.287 -1.097 0.527 1.254 1.203 0.431 0.524 2.173 0.949 0.257 0.025 0.000 1.786 0.264 -1.421 2.548 0.958 -0.922 -1.312 0.000 1.480 1.301 0.982 1.030 1.801 1.152 0.985 +1 0.752 -0.269 -1.204 0.882 0.513 0.607 0.137 0.207 2.173 0.992 0.892 -0.429 2.215 1.258 0.420 1.432 0.000 0.911 1.241 1.647 0.000 0.674 1.020 1.128 0.957 0.770 0.833 0.793 +0 1.300 0.079 -0.980 0.480 1.038 1.511 -1.462 -1.504 0.000 0.987 -0.498 0.515 0.000 0.947 0.055 -0.173 0.000 1.528 0.539 0.954 3.102 0.963 0.950 1.061 0.839 0.615 0.772 0.770 +0 0.566 0.628 -0.331 1.025 -0.687 1.647 -1.046 -0.800 2.173 1.488 -1.590 1.142 0.000 1.365 0.594 0.251 0.000 2.429 0.279 1.140 3.102 0.656 1.496 0.991 0.905 2.622 1.586 1.306 +0 1.744 0.293 -0.501 1.144 -0.967 0.801 1.246 0.922 1.087 1.027 0.370 0.307 0.000 1.556 1.001 1.588 1.274 0.485 -0.372 -1.700 0.000 0.975 0.979 0.993 1.214 0.793 1.067 0.955 +1 1.636 0.355 0.033 0.537 -0.373 2.874 -0.931 1.150 0.000 1.774 -1.398 -0.680 1.107 1.537 -0.201 -0.764 2.548 0.598 -1.045 -1.054 0.000 0.871 0.708 0.992 1.455 1.144 1.033 0.902 +0 1.394 -0.688 1.419 0.428 -1.458 0.627 -2.480 -0.795 0.000 1.055 0.207 0.508 1.107 0.609 1.782 -1.461 0.000 1.314 1.667 0.032 0.000 0.962 0.911 0.989 0.951 0.743 0.841 0.942 +1 2.178 1.001 1.288 0.613 -1.633 1.476 2.505 0.087 0.000 1.890 0.439 -1.052 2.215 0.586 -0.707 0.514 2.548 0.399 -2.246 -0.173 0.000 7.960 4.417 0.983 1.362 1.324 2.875 2.237 +1 1.165 -0.681 0.410 0.326 -0.928 1.231 -0.423 0.099 0.000 1.372 -0.162 -1.586 0.000 1.591 -0.332 -0.959 2.548 1.437 -0.057 1.102 3.102 0.918 1.032 0.994 0.880 1.122 0.914 0.768 +1 0.657 0.125 -0.506 0.259 -0.471 0.698 -0.749 -0.190 2.173 0.999 -1.674 1.528 0.000 0.997 -0.425 0.475 0.000 1.778 -0.347 -1.740 3.102 1.613 1.194 0.987 0.614 1.178 0.961 0.829 +1 0.581 -1.114 0.022 0.488 1.513 1.093 -1.188 0.520 2.173 1.281 -0.653 -1.029 0.000 0.826 -0.825 1.493 2.548 0.552 -1.555 -1.332 0.000 0.755 0.751 0.987 0.887 0.926 0.915 0.789 +0 0.878 0.769 -1.108 0.292 0.224 0.591 -0.751 -0.003 2.173 0.753 -1.743 1.546 0.000 0.810 -0.828 1.041 2.548 0.574 0.627 -0.866 0.000 1.576 0.988 0.988 0.802 0.700 0.764 0.764 +0 1.810 -0.784 -1.512 0.388 1.489 0.456 0.607 -0.996 2.173 0.699 -0.644 0.548 0.000 0.543 -0.419 0.182 1.274 0.779 0.632 0.207 0.000 0.798 0.893 0.980 0.762 0.644 0.646 0.692 +0 1.273 0.617 0.148 0.395 1.155 0.807 0.802 0.645 1.087 0.848 0.857 -1.008 2.215 0.895 1.969 -1.208 0.000 0.462 1.382 -1.728 0.000 0.629 0.854 0.989 0.885 1.213 0.753 0.663 +1 0.713 -1.241 -1.592 1.148 0.674 0.836 -0.711 -1.384 1.087 0.633 -1.450 -0.412 0.000 1.139 -2.346 0.018 0.000 0.718 -1.416 1.015 3.102 0.890 0.744 1.117 0.947 0.800 0.895 0.813 +1 0.934 0.768 1.314 0.395 0.217 0.945 0.540 -1.464 2.173 1.020 -0.419 0.522 2.215 0.416 -0.531 -0.347 0.000 0.845 0.359 -0.124 0.000 0.370 0.863 0.991 1.100 1.588 1.000 0.816 +1 1.049 -0.619 1.501 0.127 -1.021 1.048 -1.099 1.100 0.000 1.186 0.038 -1.022 2.215 1.320 -0.382 0.226 2.548 1.896 -0.103 -0.363 0.000 2.360 1.482 0.982 0.817 1.238 1.186 0.989 +0 1.056 0.171 1.094 0.575 -1.594 1.652 1.936 1.598 0.000 1.352 0.234 0.028 2.215 0.798 -0.013 -0.878 0.000 1.457 -0.311 -0.357 1.551 0.833 0.971 0.979 0.851 0.585 0.788 0.728 +0 1.183 1.136 -1.018 1.394 -0.232 0.707 0.753 1.281 2.173 0.570 0.119 -0.310 0.000 0.452 2.703 -1.515 0.000 0.668 1.460 0.379 0.000 0.965 0.747 1.158 0.765 0.323 0.759 0.679 +0 1.160 -0.248 -0.572 0.633 0.332 0.850 -0.849 -1.077 2.173 1.132 -0.816 0.782 0.000 0.933 -1.308 0.288 0.000 1.249 -0.815 1.742 1.551 0.830 0.912 0.991 0.824 0.620 0.868 0.797 +1 1.114 1.168 1.580 1.282 -0.966 1.040 -0.600 0.056 2.173 0.469 -1.764 1.296 0.000 0.513 -1.080 0.700 0.000 0.802 0.308 -0.793 3.102 0.454 0.921 1.242 0.714 0.829 1.128 1.142 +1 0.632 0.830 -0.929 0.630 -0.487 1.347 -0.019 -0.315 1.087 1.026 -1.208 1.417 0.000 2.047 -0.079 1.015 0.000 1.061 0.000 1.551 3.102 1.570 0.961 0.988 1.520 1.257 1.272 1.495 +0 2.021 0.557 -0.801 0.303 0.156 1.628 1.339 0.979 0.000 2.136 0.843 -1.130 2.215 2.051 -0.069 0.188 0.000 1.661 0.292 1.200 3.102 1.561 1.396 0.990 0.889 1.534 1.519 1.228 +1 0.807 1.393 1.361 0.858 0.326 0.723 1.972 0.026 0.000 0.446 -0.827 -0.041 1.107 1.608 0.265 -1.280 2.548 1.349 -0.389 -1.523 0.000 0.840 0.959 0.986 0.912 0.974 0.841 0.725 +1 0.954 0.667 -1.664 1.061 -0.499 1.025 0.579 0.917 0.000 1.470 0.576 -0.741 1.107 0.502 0.138 0.531 2.548 0.955 1.398 0.888 0.000 0.868 0.597 1.210 0.802 0.856 0.937 0.852 +0 0.283 -0.694 -1.511 1.221 0.586 0.623 -0.662 -0.846 0.000 0.872 -0.529 1.497 2.215 0.486 1.096 -0.440 0.000 0.721 1.291 0.395 3.102 0.805 0.912 0.986 1.527 1.077 1.086 0.982 +0 0.799 -1.774 0.943 1.203 -0.704 0.636 -1.062 -0.047 2.173 0.581 -0.067 1.128 2.215 0.499 -2.052 1.472 0.000 1.106 -0.572 -1.563 0.000 0.886 0.915 1.353 1.093 0.909 0.823 0.798 +1 0.642 1.009 1.411 1.663 -0.376 0.552 0.698 -0.574 2.173 0.764 -0.215 1.252 0.000 1.262 0.268 1.527 0.000 0.708 -0.299 0.254 3.102 0.539 1.063 1.430 0.869 0.582 0.692 0.791 +1 0.480 -1.077 -0.547 0.640 -1.646 0.772 0.024 0.285 0.000 1.193 -0.651 0.543 0.000 2.547 -0.044 -1.158 2.548 1.289 0.800 1.327 3.102 0.860 1.201 0.992 1.602 1.308 1.413 1.370 +1 1.029 -0.317 -1.317 0.819 -0.308 0.905 -0.063 0.898 0.000 0.552 -0.648 0.423 0.000 0.668 0.527 1.537 2.548 2.087 0.160 -0.862 3.102 0.817 1.123 1.005 0.720 0.770 0.721 0.689 +1 1.005 0.449 -1.316 0.636 0.040 0.622 1.018 -0.615 2.173 0.779 -0.606 1.380 0.000 0.909 0.954 0.907 2.548 0.646 -1.102 0.359 0.000 0.805 0.991 1.042 0.680 0.918 0.918 0.794 +1 0.795 1.450 0.435 0.702 -1.369 0.907 0.644 -1.069 0.000 0.755 1.000 -0.687 0.000 1.786 -2.033 1.245 0.000 1.869 0.142 -0.163 3.102 0.682 0.947 1.033 0.995 0.929 0.946 0.876 +1 0.925 0.674 1.630 0.644 -0.917 0.733 0.263 1.162 0.000 1.059 0.492 0.260 0.000 0.822 -0.881 -0.503 2.548 0.528 0.618 -1.197 0.000 0.949 0.942 0.990 0.614 0.149 0.611 0.632 +1 0.818 1.860 0.766 0.462 1.071 0.735 -0.352 -1.232 2.173 0.343 1.304 -0.802 0.000 0.379 1.013 -1.309 0.000 0.775 0.116 -0.268 3.102 0.252 0.663 1.002 0.708 0.642 0.808 0.646 +1 0.309 -0.578 0.052 1.897 0.034 1.197 0.639 -1.522 2.173 0.434 -0.138 -0.028 0.000 0.581 0.680 1.269 0.000 0.413 0.589 -0.007 0.000 0.961 1.039 0.982 1.657 0.812 1.919 1.573 +1 2.452 -0.781 0.204 0.652 0.594 0.444 -0.446 1.300 0.000 0.572 -1.248 -1.144 2.215 0.788 -2.123 -1.248 0.000 1.095 -0.230 -1.413 3.102 1.402 0.979 0.978 1.020 0.413 0.812 0.872 +1 1.815 -1.781 0.031 0.317 1.597 0.720 -0.263 -1.685 2.173 0.661 -2.122 1.219 0.000 1.083 -0.673 -1.189 1.274 0.424 -0.938 0.139 0.000 0.686 1.037 1.038 0.995 0.544 0.948 0.822 +0 1.002 0.257 0.346 0.609 0.424 0.438 -0.647 -1.047 0.000 0.652 -0.127 1.209 2.215 1.243 0.210 -0.523 2.548 0.794 0.742 -1.643 0.000 0.893 0.775 0.990 0.829 0.972 0.750 0.745 +1 0.663 1.282 0.372 0.981 -1.618 0.464 2.050 -1.356 0.000 0.444 0.894 -1.040 2.215 0.450 1.970 -0.214 0.000 0.811 -0.709 1.114 3.102 0.705 1.304 1.090 0.628 0.743 0.788 0.705 +0 1.500 -0.052 -0.710 0.447 -0.970 1.012 0.312 0.632 0.000 1.630 0.414 -1.367 2.215 0.988 1.159 0.693 0.000 0.398 0.482 -1.682 3.102 0.900 0.679 1.002 1.066 0.211 0.951 1.009 +1 0.768 0.027 -0.585 1.095 1.499 0.819 -1.588 0.878 2.173 0.618 -0.358 -1.033 0.000 0.530 -0.470 0.219 2.548 0.588 -1.725 -0.739 0.000 0.780 1.091 1.211 0.714 0.657 0.835 0.774 +0 1.011 1.403 1.410 0.525 0.414 0.695 0.147 0.149 2.173 0.771 -1.548 -0.591 0.000 0.818 0.230 1.474 0.000 1.524 0.723 -0.875 3.102 1.007 0.908 0.985 1.145 0.956 0.892 0.806 +1 0.622 0.798 -0.993 0.353 1.468 0.568 1.235 -0.134 0.000 0.509 0.766 0.466 2.215 1.612 0.824 1.365 2.548 1.804 1.415 -0.647 0.000 0.934 1.088 0.995 0.711 0.700 0.701 0.752 +1 0.617 -0.880 -1.739 1.248 0.669 1.116 0.165 -0.675 1.087 0.815 -0.045 1.301 0.000 0.704 0.018 0.427 0.000 0.458 0.852 -0.708 0.000 0.893 1.091 1.004 0.567 0.425 0.806 0.740 +1 0.976 -0.338 -0.840 0.266 0.011 1.287 0.008 0.746 1.087 0.905 -0.822 -1.277 0.000 0.670 -1.304 -0.510 0.000 0.467 -1.145 1.678 1.551 0.841 0.542 0.986 1.122 0.865 0.931 0.856 +1 1.380 0.468 -0.083 2.257 -0.477 2.122 0.291 -1.013 2.173 1.557 -2.280 0.812 0.000 3.721 -0.845 1.211 0.000 0.916 0.233 0.415 0.000 0.826 0.558 0.992 1.483 1.052 1.097 1.030 +1 0.564 0.065 0.700 0.949 1.742 0.800 0.111 0.145 2.173 0.649 -1.409 -1.368 0.000 0.730 -1.122 -0.364 0.000 0.478 0.843 1.446 3.102 0.839 0.899 0.984 0.930 0.676 0.812 0.842 +0 0.774 -1.904 0.443 1.010 -1.563 0.675 0.459 -0.973 0.000 0.941 0.914 -0.402 2.215 1.028 -0.577 1.687 2.548 0.502 0.056 1.180 0.000 0.847 0.801 1.191 1.985 1.349 1.330 1.131 +1 0.795 0.308 1.304 1.933 0.721 1.018 -0.089 -0.620 2.173 0.628 0.246 -1.405 0.000 0.496 2.611 -1.724 0.000 0.774 0.709 0.007 0.000 0.911 0.859 0.991 0.738 0.759 0.910 0.796 +1 0.908 0.872 -0.430 0.679 0.658 0.418 -2.499 -1.145 0.000 0.371 1.920 -0.982 0.000 0.855 0.376 0.616 2.548 0.755 -0.919 0.975 3.102 0.218 0.939 0.985 0.656 0.548 0.676 0.637 +0 0.293 -1.776 -0.717 1.418 0.858 1.179 -1.578 -0.258 0.000 1.017 -0.472 1.519 2.215 1.014 0.034 -1.400 2.548 0.694 1.898 -0.447 0.000 1.177 1.388 0.992 0.771 0.600 1.056 0.908 +1 0.457 -1.194 -1.139 1.075 -1.722 0.752 -2.528 -1.705 0.000 1.787 0.795 0.412 2.215 1.193 1.079 0.305 2.548 2.026 0.583 -0.628 0.000 0.684 3.049 0.977 1.395 0.323 2.556 1.914 +1 1.825 0.002 -1.336 0.527 1.003 0.620 -0.688 -0.161 2.173 0.475 -1.117 0.068 2.215 1.310 -0.175 0.806 0.000 0.568 -0.791 -1.118 0.000 1.009 0.875 1.167 0.924 0.245 0.757 0.710 +1 1.785 0.701 -1.416 0.053 -1.156 0.897 0.429 -0.283 0.000 1.235 0.491 1.163 2.215 0.568 -0.484 0.163 0.000 0.466 0.998 0.057 0.000 0.817 1.217 1.001 0.939 1.324 1.044 0.965 +1 0.865 -0.648 1.073 2.164 1.625 0.844 -0.216 -0.033 2.173 0.761 -0.293 -1.221 0.000 0.723 0.345 -1.048 2.548 1.089 0.759 0.372 0.000 0.965 0.952 0.988 0.843 0.826 0.928 0.820 +1 0.688 -0.688 -1.275 0.140 -0.418 1.549 -0.583 -0.142 2.173 0.985 -0.287 -1.730 0.000 0.822 1.819 1.080 0.000 1.078 -0.363 -1.080 3.102 0.897 0.737 0.992 1.132 1.028 1.004 0.851 +0 1.738 0.359 1.689 0.741 1.375 0.673 0.258 -1.264 2.173 0.780 -0.461 0.151 0.000 0.649 -0.183 0.539 0.000 0.853 -1.944 -0.817 0.000 1.304 1.313 0.984 1.366 1.290 1.252 1.140 +0 0.576 0.343 1.317 1.394 0.070 1.741 -0.614 -1.353 2.173 1.566 -0.199 0.407 2.215 0.749 -0.813 1.647 0.000 0.688 -0.578 -0.198 0.000 0.793 0.952 1.120 1.601 2.482 1.372 1.061 +1 1.244 -1.721 -0.998 0.505 -0.936 0.874 -1.178 0.721 2.173 0.764 -1.168 0.287 0.000 0.478 -1.487 1.527 0.000 0.500 0.302 -1.336 0.000 0.857 0.698 0.990 1.135 0.698 0.803 0.701 +1 1.525 0.442 1.474 0.419 0.150 0.761 -0.618 -1.002 2.173 0.724 -0.719 0.101 1.107 0.890 0.346 -0.543 0.000 1.087 -0.085 0.530 0.000 0.931 0.953 1.030 0.946 0.918 0.853 0.757 +1 0.313 -0.852 -0.562 0.433 0.533 0.762 0.471 -0.145 2.173 1.144 0.030 -1.449 2.215 0.907 -1.451 1.595 0.000 0.496 -0.354 1.229 0.000 0.517 0.861 0.977 0.663 1.305 0.949 0.847 +0 0.322 -1.405 -0.196 0.868 1.689 0.565 -0.334 0.132 0.000 1.011 -1.308 0.047 1.107 0.828 1.058 -0.940 2.548 0.488 0.055 1.424 0.000 0.912 0.705 0.989 0.954 1.738 1.306 1.053 +0 1.459 -0.899 -1.256 0.285 0.707 0.943 0.720 0.691 2.173 0.500 -1.382 -1.367 0.000 1.204 0.396 -0.543 0.000 0.693 0.098 1.188 0.000 0.865 1.263 0.986 0.687 0.377 0.941 0.817 +0 0.620 0.199 -0.401 0.635 1.359 1.354 0.445 -0.923 2.173 1.631 -1.158 0.437 2.215 1.196 0.065 1.491 0.000 0.762 -2.213 1.149 0.000 1.952 1.622 0.990 0.912 2.871 1.777 1.453 +1 1.961 0.451 1.730 0.609 -1.442 0.798 0.394 -0.097 2.173 0.330 0.117 0.442 0.000 0.596 -0.153 0.753 2.548 0.659 -0.320 -0.722 0.000 0.549 0.519 0.983 0.741 0.644 0.837 0.669 +1 0.691 1.177 0.070 1.096 -0.845 1.005 0.354 1.204 2.173 1.196 -0.020 1.730 0.000 1.209 0.327 -0.670 0.000 1.616 0.431 -0.028 0.000 0.852 0.808 0.986 1.120 0.843 0.871 0.800 +1 0.527 1.443 0.909 0.867 -0.265 0.444 1.925 -0.243 0.000 0.910 0.449 1.309 1.107 0.863 2.241 -1.198 0.000 1.305 -0.240 1.530 3.102 0.893 1.329 0.988 0.832 0.425 1.072 0.904 +1 0.493 -0.642 0.359 0.247 -0.793 0.884 0.643 -1.413 0.000 1.033 0.436 0.617 2.215 1.269 0.929 -0.321 0.000 1.611 -0.050 1.742 1.551 1.626 1.240 0.990 0.693 1.032 0.995 0.825 +1 0.625 -1.036 0.379 0.728 1.580 1.636 0.336 -1.537 2.173 0.719 -0.020 0.561 0.000 1.682 -0.064 -0.115 1.274 0.371 0.151 -0.518 0.000 0.560 1.212 0.990 0.868 2.026 1.098 0.885 +1 0.479 0.731 0.745 2.542 1.423 0.781 0.081 -0.563 2.173 0.319 2.145 -0.077 0.000 0.271 0.549 -0.949 2.548 0.520 1.439 0.292 0.000 0.317 0.757 0.994 0.648 0.247 0.925 0.756 +1 0.787 0.347 -0.441 0.846 0.716 0.596 -0.626 -1.361 2.173 0.680 1.006 0.247 0.000 0.471 2.102 1.325 0.000 1.024 0.648 -1.269 3.102 0.929 0.807 0.988 0.872 0.636 0.874 0.777 +1 0.603 -0.206 1.061 0.813 -0.166 1.174 0.743 1.193 2.173 1.281 0.196 -0.303 0.000 0.905 0.830 -1.384 1.274 0.978 0.123 -0.989 0.000 0.846 0.878 0.987 1.272 0.943 0.996 0.954 +1 0.729 -1.717 -0.464 0.917 -1.510 2.182 -1.620 -0.910 0.000 2.733 -1.271 0.737 2.215 0.994 -0.325 1.068 2.548 0.749 -1.669 1.316 0.000 0.736 0.860 0.987 1.081 1.007 1.122 0.867 +1 0.313 0.134 -1.634 1.235 -0.795 0.955 0.316 1.159 2.173 0.761 -0.944 0.024 2.215 0.344 0.057 -1.025 0.000 0.383 -0.618 -1.676 0.000 0.276 0.602 0.985 0.743 1.378 1.010 0.740 +0 1.242 -0.467 1.335 0.830 -0.557 0.591 0.432 -1.201 2.173 0.769 1.174 0.488 0.000 0.307 0.099 -1.501 1.274 0.586 -2.141 1.225 0.000 2.881 1.527 1.394 0.938 0.166 1.058 0.933 +0 0.867 -0.305 0.857 1.095 -0.599 0.524 0.725 -0.741 0.000 0.773 -0.984 0.094 2.215 0.713 -0.562 -1.381 2.548 0.547 -1.369 1.134 0.000 1.463 0.920 1.305 0.808 0.782 0.738 0.697 +1 1.217 -0.038 0.862 0.793 -1.092 0.745 1.435 1.377 0.000 0.709 -0.380 0.104 0.000 1.246 0.942 -0.445 2.548 1.403 -0.124 -0.834 3.102 0.782 0.820 1.337 0.854 0.722 0.767 0.669 +0 0.696 0.741 1.294 1.161 -0.657 0.798 -0.459 1.189 1.087 0.762 -0.542 -0.597 2.215 0.568 -0.267 0.697 0.000 0.572 1.332 0.245 0.000 0.996 0.914 1.224 0.912 1.148 0.901 0.871 +0 0.749 0.968 0.010 0.992 1.232 0.770 1.412 0.251 1.087 0.554 1.392 -1.208 0.000 1.163 0.183 -1.281 2.548 1.040 -0.613 1.223 0.000 0.864 0.980 1.065 0.748 1.389 1.190 1.030 +1 0.803 1.840 1.150 0.977 1.288 1.547 1.265 0.087 0.000 0.898 0.378 -1.426 0.000 1.745 -0.739 -1.297 2.548 0.799 -0.249 -1.409 3.102 1.204 1.051 0.973 0.791 0.251 1.008 0.877 +1 0.533 -0.703 0.400 1.730 0.180 0.887 0.057 -1.199 2.173 0.793 -1.095 -0.321 0.000 1.593 0.900 1.655 2.548 1.054 -0.279 1.207 0.000 1.266 1.199 0.984 2.469 1.071 1.786 1.409 +1 0.774 -1.057 0.736 0.657 -0.227 0.968 -0.605 -1.141 0.000 1.371 -1.474 1.140 0.000 0.621 -1.151 0.042 2.548 0.744 -0.616 -0.347 1.551 2.450 1.435 0.987 0.524 0.225 0.899 0.810 +0 1.735 -0.522 0.285 0.122 0.516 1.273 0.062 1.657 2.173 0.403 0.051 1.257 0.000 0.736 -2.530 -0.739 0.000 1.688 0.970 1.654 3.102 1.021 0.979 0.993 1.314 0.898 1.064 0.914 +1 0.793 0.503 -0.920 1.837 -0.247 1.180 1.558 0.630 0.000 1.787 1.462 -1.419 2.215 0.914 1.180 1.272 2.548 0.597 0.553 1.730 0.000 1.230 0.816 0.990 1.566 0.903 1.130 1.135 +1 0.793 -0.557 -0.341 0.871 -1.317 1.137 0.152 -1.143 2.173 1.562 0.794 0.960 0.000 2.009 0.559 0.181 2.548 0.410 0.527 -1.601 0.000 0.779 1.065 0.984 0.961 1.805 1.163 1.156 +0 0.826 0.508 -1.254 0.892 -0.563 1.564 -1.330 0.874 0.000 1.390 -0.063 -1.131 2.215 1.442 -0.655 0.304 2.548 1.049 -2.275 -0.879 0.000 0.865 0.976 0.987 0.641 1.532 1.267 1.132 +1 0.827 -1.069 -1.510 1.764 1.710 0.938 0.240 1.347 1.087 0.686 -1.915 -0.125 0.000 1.592 2.013 0.512 0.000 1.298 0.577 -0.182 0.000 0.685 0.767 1.000 0.839 1.273 1.129 1.054 +0 0.401 -0.731 0.449 0.365 0.795 0.764 -0.010 -1.187 2.173 0.844 0.429 1.097 0.000 0.996 0.378 0.070 2.548 0.699 -1.952 -0.297 0.000 2.116 1.445 0.995 0.910 1.012 1.062 0.930 +0 0.336 1.414 -0.213 1.781 1.715 0.706 0.713 -1.307 0.000 0.894 1.808 0.578 0.000 1.047 -0.382 -1.198 0.000 0.982 -0.914 -0.221 0.000 0.942 0.840 1.057 1.479 0.390 0.948 0.941 +1 0.669 -0.352 0.358 0.544 -1.347 0.849 1.402 0.426 0.000 1.410 0.475 -0.315 0.000 2.084 0.167 -1.637 2.548 1.029 0.433 1.173 3.102 0.987 0.794 0.987 1.020 0.668 0.902 0.949 +0 3.480 -0.174 -1.289 0.637 -1.577 1.741 -1.803 0.434 0.000 0.819 -0.354 -0.649 1.107 0.906 -0.814 0.514 2.548 0.911 -1.688 1.211 0.000 1.254 0.910 0.978 0.875 0.830 1.035 1.555 +0 1.373 -2.041 0.824 0.092 0.709 0.858 0.292 -0.222 0.000 0.688 -1.219 1.451 2.215 0.924 -0.274 -1.475 2.548 0.543 -2.269 -1.456 0.000 0.684 0.995 0.989 0.657 0.587 0.950 1.097 +0 0.562 0.972 1.152 0.420 1.356 0.710 -0.650 0.291 2.173 1.035 -0.765 -0.758 1.107 1.535 0.518 -1.509 0.000 0.401 -1.142 0.690 0.000 1.259 1.172 0.992 0.816 1.026 0.946 0.825 +1 0.336 -1.189 -1.503 0.911 -0.072 1.164 0.578 -0.743 2.173 1.233 -0.006 1.452 0.000 1.033 -0.342 0.837 0.000 0.392 -0.351 0.402 3.102 0.966 0.614 0.993 0.876 0.719 0.957 0.813 +1 0.458 0.628 -0.427 0.409 -0.463 0.685 -0.305 -1.486 0.000 1.109 -1.441 0.488 2.215 0.689 -0.845 1.569 0.000 0.609 0.931 1.432 0.000 0.903 0.805 0.986 0.898 0.617 0.907 0.781 +0 0.379 0.037 -0.744 1.226 1.027 0.546 -2.800 0.928 0.000 0.892 0.724 -1.217 2.215 0.702 -0.355 -0.303 1.274 1.372 1.011 -0.723 0.000 0.955 0.899 0.988 0.804 0.796 0.657 0.656 +1 1.727 -0.498 -1.120 0.731 1.664 1.132 -0.573 0.390 2.173 0.658 0.027 -0.251 0.000 0.381 -1.458 0.621 0.000 0.657 0.243 1.679 3.102 0.884 0.781 0.989 0.556 0.932 0.889 0.770 +0 0.628 -1.046 -0.454 0.896 0.196 1.142 0.554 1.387 0.000 2.028 -0.329 -0.851 2.215 0.570 -0.358 1.015 0.000 1.018 0.074 0.501 3.102 0.862 0.768 0.989 1.005 1.249 1.142 0.982 +1 0.574 0.600 0.940 0.846 -0.593 1.459 -0.399 -0.477 2.173 1.322 -0.926 0.987 0.000 1.068 1.908 1.300 0.000 1.023 -0.858 -1.248 3.102 1.190 0.908 0.988 0.922 0.925 1.014 0.882 +0 0.540 1.383 1.538 0.611 -0.367 1.084 0.822 1.311 2.173 1.041 -0.598 -0.529 2.215 0.868 -1.656 0.416 0.000 0.808 -0.847 -1.249 0.000 0.997 0.932 0.986 1.115 1.985 1.388 1.560 +0 1.941 0.336 -0.060 0.369 0.758 1.039 -0.226 -1.656 2.173 0.505 -0.450 1.456 0.000 0.592 -1.327 -0.648 0.000 1.205 -0.858 0.161 3.102 0.906 0.874 0.980 0.870 1.278 1.030 0.897 +1 0.841 -0.136 -1.431 1.450 0.052 0.738 -0.381 -0.146 0.000 1.347 0.766 1.697 2.215 1.095 0.616 0.263 2.548 0.686 -1.326 0.829 0.000 1.089 1.016 1.488 1.312 1.243 1.107 0.984 +1 1.647 -0.522 0.343 0.513 0.082 0.417 0.383 -1.170 1.087 1.214 1.590 -1.348 0.000 0.659 0.371 0.427 2.548 0.533 1.081 0.990 0.000 0.918 0.942 0.984 1.039 0.648 0.733 1.119 +1 0.420 -1.688 -1.293 0.450 -0.670 1.235 -0.211 0.750 0.000 0.857 0.061 -0.893 2.215 1.005 -1.233 -1.687 2.548 0.810 -0.575 0.338 0.000 0.751 0.758 0.981 0.722 0.997 0.648 0.588 +1 0.851 -1.194 -0.806 0.930 0.067 0.610 -0.822 -0.048 0.000 0.422 -0.448 -0.251 0.000 0.619 1.253 -1.673 0.000 0.421 -2.354 -1.614 0.000 0.963 0.976 0.986 0.879 0.372 0.715 0.699 +1 0.424 1.480 -1.248 0.361 -0.496 0.973 -0.054 1.611 0.000 1.458 -0.650 -0.229 2.215 0.596 0.222 0.608 2.548 0.710 -0.578 0.483 0.000 1.156 0.797 0.991 0.933 0.824 0.892 0.785 +0 0.305 -0.054 1.522 0.948 -0.956 0.563 -0.221 -0.141 0.000 0.977 0.415 0.961 2.215 0.714 -2.367 0.901 0.000 1.427 0.344 -1.133 3.102 0.864 1.019 0.988 0.650 1.013 0.738 0.730 +0 0.861 0.407 0.623 2.301 1.114 0.943 1.078 -0.827 0.000 0.569 2.089 -1.018 0.000 1.373 0.271 -1.122 2.548 2.117 -0.388 0.398 3.102 0.904 0.964 0.987 1.201 1.375 1.247 1.246 +1 1.908 -0.359 0.491 1.126 1.018 0.894 -1.042 -0.945 1.087 0.573 -0.377 -0.488 0.000 0.769 -0.626 -1.623 1.274 0.397 0.100 -1.191 0.000 0.400 0.524 0.988 0.868 0.622 0.954 0.765 +0 0.409 -1.936 1.310 1.316 0.147 0.685 0.575 -1.016 2.173 0.737 0.751 0.423 0.000 1.178 -0.024 -1.495 2.548 0.653 -0.977 1.226 0.000 1.187 1.031 0.990 2.364 0.581 1.696 1.495 +1 1.577 0.788 1.222 0.762 0.461 2.274 0.441 -0.055 0.000 1.816 0.993 -1.307 0.000 0.718 0.101 1.606 2.548 0.626 1.497 -0.735 0.000 0.879 0.858 0.986 0.622 0.460 0.539 0.700 +0 1.440 0.466 0.995 0.082 -1.377 0.680 0.374 -0.093 2.173 1.227 -0.834 -1.212 0.000 1.033 -1.169 -0.756 0.000 1.034 -1.883 0.574 0.000 0.783 0.861 0.984 0.861 0.867 0.892 0.881 +1 0.900 -0.534 0.823 0.896 0.038 0.824 1.035 1.216 1.087 1.112 0.683 -1.437 2.215 0.706 -0.547 0.224 0.000 0.872 1.988 -0.706 0.000 0.853 0.922 0.993 1.100 0.989 0.914 0.785 +1 1.722 1.003 0.098 0.698 0.134 0.351 -1.591 0.470 0.000 0.653 0.606 -0.733 1.107 0.872 -0.497 -1.650 2.548 0.753 0.974 -0.734 0.000 0.775 0.740 0.986 1.418 0.771 0.973 0.877 +0 1.564 1.728 1.652 1.153 0.960 0.671 -0.041 -0.412 0.000 0.450 -0.670 1.662 0.000 0.637 1.267 -0.230 2.548 0.475 -0.035 0.331 3.102 1.181 0.973 1.086 0.853 0.386 0.669 0.939 +0 0.519 1.588 -0.921 1.333 1.568 0.617 0.904 0.956 2.173 0.613 -0.101 -0.590 0.000 1.036 1.217 -0.257 2.548 1.038 0.054 0.395 0.000 0.811 0.904 0.986 0.851 0.910 0.701 0.735 +0 0.609 -0.595 1.181 0.710 -0.059 0.585 0.045 -0.519 0.000 0.768 -0.779 -1.297 2.215 0.733 1.140 0.778 2.548 0.925 -0.392 0.380 0.000 0.865 0.896 0.985 0.762 1.234 0.883 0.786 +0 2.584 0.731 -1.123 0.620 0.831 1.953 -0.881 0.541 0.000 1.262 1.422 -1.009 1.107 0.666 -0.483 0.204 0.000 0.564 -0.459 -0.660 0.000 0.717 1.399 1.722 1.075 0.896 1.824 1.658 +1 0.766 -0.440 0.946 0.500 -1.116 1.247 -0.274 1.499 2.173 1.012 -0.504 -0.395 2.215 0.520 0.117 -1.114 0.000 1.899 -0.364 0.178 0.000 1.054 0.771 0.987 0.799 1.650 0.980 0.816 +0 0.440 -0.129 0.770 1.682 -0.567 1.283 0.719 1.450 0.000 0.851 1.030 -0.576 2.215 0.818 0.524 0.645 2.548 1.634 2.092 0.170 0.000 0.832 0.877 1.112 0.848 0.820 0.861 0.910 +1 1.565 1.356 -0.034 0.116 0.649 0.787 -0.072 1.510 2.173 0.913 0.598 -1.229 2.215 0.521 1.789 -0.819 0.000 0.967 1.090 0.592 0.000 0.793 0.851 0.975 1.204 0.891 0.916 0.810 +0 1.516 0.896 -1.496 0.484 0.453 0.345 0.406 1.613 0.000 0.976 0.125 0.125 1.107 0.770 1.186 0.362 2.548 0.465 -1.146 -1.014 0.000 0.755 0.876 1.166 1.034 0.604 0.739 0.710 +0 0.571 1.042 0.164 1.559 0.010 0.547 -0.074 0.884 2.173 0.595 -0.796 -1.468 2.215 0.737 -0.318 1.591 0.000 1.381 0.648 -1.149 0.000 0.958 0.898 0.991 1.916 0.784 1.406 1.202 +1 0.945 2.182 1.374 0.906 1.208 1.373 1.123 -0.480 2.173 0.563 0.904 1.688 0.000 0.452 1.230 0.690 0.000 0.665 0.285 0.460 3.102 0.627 1.088 0.983 0.703 0.862 0.959 0.779 +1 1.131 0.286 -1.370 0.294 1.044 0.474 -0.275 -0.277 2.173 0.825 -0.613 -0.808 2.215 0.941 1.070 0.586 0.000 0.905 0.477 0.192 0.000 0.471 1.103 0.989 0.826 0.454 0.715 0.723 +1 1.120 -1.164 0.433 0.568 1.577 0.887 -1.363 -1.012 0.000 1.696 -0.944 0.711 2.215 1.162 -2.197 -1.052 0.000 0.959 -0.640 1.616 3.102 0.993 0.964 0.987 0.688 0.846 1.165 0.966 +0 0.903 1.364 1.229 1.289 -0.145 0.881 1.112 -1.707 2.173 0.795 1.656 0.149 0.000 0.693 0.571 -0.762 0.000 0.640 1.244 -1.040 0.000 0.822 1.040 1.413 1.212 1.449 1.043 0.906 +0 0.637 1.299 -1.708 1.014 -0.413 1.056 1.213 0.361 2.173 0.826 0.639 -0.599 0.000 1.386 -0.041 -1.595 2.548 1.509 -0.804 1.233 0.000 0.843 0.762 1.024 0.931 1.798 1.118 1.025 +0 0.500 1.707 0.262 0.432 1.336 1.235 0.838 0.571 2.173 1.594 0.653 -0.756 2.215 1.060 0.757 -1.364 0.000 0.931 1.258 1.442 0.000 0.735 1.004 0.989 0.765 1.929 1.098 0.893 +0 1.571 0.271 0.681 0.093 1.619 1.373 0.667 -0.902 0.000 1.394 -0.074 1.265 2.215 0.959 0.047 -0.760 0.000 0.954 0.310 0.177 3.102 0.704 0.887 0.996 0.861 0.896 1.064 0.968 +0 0.420 -1.611 -0.946 0.618 0.873 0.800 -0.321 -0.701 0.000 1.166 -1.162 1.424 2.215 1.302 -0.434 0.078 2.548 0.373 1.367 0.616 0.000 1.232 0.979 0.995 0.769 1.317 1.021 0.835 +1 0.889 -1.466 0.976 0.467 -0.578 1.016 -0.257 -1.222 0.000 1.841 -1.352 -0.295 2.215 0.391 -0.847 1.178 0.000 0.810 -0.121 1.455 0.000 0.925 0.917 0.985 0.946 0.775 1.046 0.877 +0 1.937 -0.778 0.707 0.901 0.859 1.242 1.138 -1.278 0.000 0.526 1.318 -0.886 2.215 0.603 0.573 0.666 0.000 1.016 -0.389 -0.301 3.102 1.368 0.875 0.987 1.696 0.764 1.119 0.975 +0 1.270 -1.031 0.683 0.939 1.614 0.496 1.005 -1.686 0.000 0.524 -1.149 -0.202 1.107 0.995 0.420 -0.313 2.548 1.106 -0.749 -0.976 0.000 0.757 0.906 1.125 0.784 0.708 0.904 1.054 +0 0.588 1.402 -0.394 0.782 1.406 0.435 -0.927 1.446 1.087 1.060 -1.121 -0.440 0.000 0.927 -0.547 0.993 2.548 0.745 -0.145 -0.724 0.000 0.767 0.889 0.987 0.950 0.342 0.723 0.850 +1 0.398 -0.917 -0.986 0.205 0.010 0.844 0.101 -0.141 0.000 1.157 2.254 1.592 0.000 1.188 0.828 -0.235 0.000 1.240 -1.856 -1.654 0.000 0.882 0.879 0.986 0.655 1.001 0.857 0.981 +1 1.103 1.345 -1.159 1.544 0.566 1.054 1.123 -0.203 0.000 0.407 -1.612 0.593 0.000 0.516 1.450 0.877 2.548 1.865 1.016 -1.724 1.551 0.857 0.728 1.808 1.120 0.555 0.766 0.799 +0 2.092 0.338 -0.879 1.476 -1.655 2.391 -0.403 0.713 1.087 1.542 0.150 -1.473 2.215 1.845 0.795 -0.577 0.000 2.320 0.172 0.264 0.000 1.750 1.796 1.566 2.450 2.723 1.862 1.677 +1 0.741 -0.218 -0.446 0.681 0.705 0.381 0.061 -1.139 2.173 0.267 2.094 -0.120 0.000 0.737 -1.243 0.724 2.548 0.550 1.229 -1.637 0.000 0.552 1.081 0.984 0.695 0.833 0.717 0.688 +0 5.039 -0.657 1.147 0.310 1.306 1.227 0.461 -0.717 0.000 1.134 0.935 -0.452 0.000 1.854 -0.180 -0.177 2.548 0.617 -0.408 -1.260 0.000 0.909 0.980 1.007 1.788 1.036 1.180 1.303 +1 0.840 -1.240 -0.221 1.017 0.119 1.294 -1.102 0.029 0.000 0.964 -1.306 -1.578 0.000 1.544 -0.662 1.523 1.274 1.082 -0.845 -0.881 3.102 2.370 1.383 0.989 1.100 0.830 1.063 0.961 +0 1.894 -0.111 1.603 1.188 -1.630 1.198 0.947 -0.274 2.173 0.590 -0.829 1.156 1.107 0.542 0.188 0.516 0.000 0.831 0.038 -0.795 0.000 0.688 0.798 0.989 0.831 1.745 1.255 0.962 +1 0.849 -1.358 -0.567 1.136 -1.581 1.503 -1.430 0.551 2.173 0.539 -0.734 -0.928 0.000 1.240 -2.185 -1.653 0.000 1.250 -1.463 -0.086 0.000 0.923 1.211 1.076 0.662 0.725 0.869 0.790 +1 1.278 -0.699 -1.423 0.984 1.325 0.924 -0.334 0.111 1.087 0.735 -1.070 1.051 0.000 0.361 -0.704 -0.266 0.000 1.041 -0.272 -0.732 3.102 0.857 0.976 0.990 0.726 0.715 0.813 0.726 +1 0.431 1.550 -0.705 1.190 0.815 1.356 0.144 1.419 2.173 0.968 0.455 -0.146 0.000 2.486 0.332 -0.901 0.000 1.294 0.473 0.615 3.102 1.502 1.283 0.987 1.172 0.977 1.255 1.092 +1 0.786 -0.167 0.934 0.946 -1.732 0.915 -0.951 -0.256 1.087 0.644 -2.034 1.467 0.000 1.191 -0.534 -1.102 2.548 1.037 0.363 0.004 0.000 0.641 0.788 0.987 1.293 0.931 0.945 0.778 +0 0.812 -1.023 0.164 1.224 0.842 0.687 -0.900 -1.244 2.173 0.720 0.156 -1.629 0.000 0.528 0.776 0.388 0.000 1.028 0.129 -0.732 3.102 0.976 1.005 0.998 0.821 0.634 0.767 0.743 +0 1.330 -0.807 1.360 1.665 0.244 0.932 0.081 1.474 0.000 1.667 0.867 -0.634 0.000 1.351 0.499 -0.328 1.274 0.651 0.362 -0.986 0.000 0.972 0.928 1.741 1.441 0.391 1.047 0.992 +0 0.681 0.222 -0.517 0.977 0.425 1.144 0.271 -1.348 2.173 1.556 -0.269 0.406 2.215 0.922 0.166 0.866 0.000 0.953 -0.720 -0.514 0.000 1.003 0.946 0.990 1.130 2.038 1.121 0.923 +0 1.059 0.052 0.123 0.730 1.344 0.987 -0.051 1.386 2.173 0.718 -0.729 -1.160 2.215 0.901 0.105 -0.492 0.000 1.027 0.747 0.049 0.000 0.654 0.882 1.086 0.866 1.027 0.863 0.769 +0 1.518 1.227 -1.711 0.913 1.616 1.608 0.493 0.322 0.000 1.558 1.440 -1.213 2.215 0.491 0.027 -0.453 2.548 0.843 0.206 -0.054 0.000 0.896 0.747 0.980 0.976 0.942 1.180 1.099 +0 1.918 -0.086 -0.813 3.775 -0.917 4.190 0.138 0.786 1.087 1.531 -0.861 -1.335 0.000 0.859 1.124 -1.095 2.548 0.781 -1.041 -0.382 0.000 0.928 0.942 0.981 3.987 2.712 2.566 1.882 +1 1.261 -0.336 1.479 0.725 -0.786 0.982 -1.012 -1.582 1.087 1.085 -0.492 0.528 0.000 0.709 2.672 0.411 0.000 1.566 -0.728 -0.758 3.102 0.890 0.928 1.182 0.840 0.894 0.897 0.808 +1 1.194 0.165 0.667 0.634 1.614 0.981 0.797 -0.868 0.000 1.130 -0.396 1.020 2.215 0.731 1.368 -1.340 0.000 0.523 0.473 1.096 0.000 0.831 0.950 0.987 0.718 0.646 1.025 0.889 +1 0.960 0.282 1.241 1.069 -1.214 1.038 0.458 0.729 2.173 1.034 -2.815 -0.873 0.000 0.743 0.805 -0.640 0.000 1.516 0.213 -1.333 0.000 0.780 1.230 1.125 1.140 1.185 1.037 0.925 +1 0.673 -0.414 -1.389 1.812 -0.565 0.515 -0.317 0.450 2.173 0.780 1.019 1.298 2.215 0.584 -1.072 -0.962 0.000 0.811 -1.459 0.079 0.000 0.651 0.724 1.036 1.260 0.944 0.931 0.867 +1 0.942 -0.537 0.408 0.540 -0.258 0.847 -0.308 -1.087 0.000 1.405 -0.552 -0.555 2.215 1.465 -0.960 1.474 0.000 3.348 -0.785 0.539 0.000 1.123 0.727 0.986 0.896 0.809 0.958 0.975 +0 1.249 -1.702 0.479 0.740 0.174 1.648 -0.284 -1.184 0.000 1.383 -0.782 0.301 0.000 1.206 -0.151 1.143 1.274 1.010 -0.967 -0.777 0.000 1.031 0.883 0.993 0.846 0.606 0.732 0.724 +0 1.170 1.016 -0.510 0.573 -1.337 0.673 1.421 0.430 2.173 0.822 -0.102 -1.565 2.215 0.538 -1.736 1.257 0.000 0.384 0.025 -0.250 0.000 0.748 0.764 0.986 0.938 1.413 1.023 0.919 +0 0.534 -1.050 1.221 0.904 -0.219 0.881 -1.299 -0.820 2.173 0.555 2.173 -1.357 0.000 1.954 0.148 0.618 1.274 1.681 0.625 -1.581 0.000 1.054 1.630 0.984 0.840 2.052 1.750 1.376 +0 0.641 0.611 -0.420 0.069 -1.501 1.363 -0.887 1.603 0.000 1.616 1.026 -0.363 1.107 0.911 1.225 -0.021 2.548 0.798 -0.248 -0.187 0.000 0.792 1.837 0.904 0.982 0.434 1.677 1.266 +0 0.876 -0.699 -0.337 1.168 1.427 0.293 1.454 0.424 0.000 0.848 0.980 1.181 1.107 0.293 1.919 -0.382 0.000 0.593 0.986 -1.008 3.102 0.390 0.581 1.401 0.922 0.592 0.844 0.786 +1 0.666 -0.179 0.021 0.839 -1.618 0.727 0.738 -0.316 2.173 0.749 -0.586 0.067 2.215 0.726 -0.343 -1.636 0.000 1.806 0.532 1.458 0.000 0.799 1.092 1.031 0.852 0.870 0.893 0.764 +0 0.467 -1.656 0.367 1.882 1.167 0.903 -1.543 -1.237 0.000 0.882 -1.805 -0.776 0.000 1.434 -0.572 0.461 2.548 0.666 -0.317 -0.578 3.102 0.817 0.746 0.992 0.773 0.609 0.901 0.908 +0 0.543 0.269 -1.743 0.978 0.425 0.918 0.498 -1.502 0.000 1.004 -0.219 -1.563 0.000 1.261 -0.096 -0.053 0.000 0.712 -0.690 -0.079 3.102 0.768 0.981 0.990 0.513 0.442 0.717 0.702 +1 0.331 -0.543 1.403 1.619 -1.716 1.076 -0.451 -0.166 2.173 0.844 0.979 -1.107 0.000 1.408 -0.098 0.595 2.548 0.884 -0.773 1.035 0.000 0.952 0.972 0.988 1.444 1.007 1.297 1.074 +0 0.879 0.034 -1.648 0.513 -0.488 0.604 0.062 -0.019 2.173 0.640 0.600 0.920 2.215 0.383 -0.751 0.782 0.000 0.516 0.356 1.428 0.000 0.424 0.570 0.988 0.700 0.731 0.623 0.522 +0 1.573 0.466 -1.588 0.369 -0.918 0.734 1.324 0.521 0.000 0.724 0.306 0.677 1.107 0.514 1.718 -0.574 0.000 0.662 -0.166 -0.980 1.551 0.971 0.917 0.989 0.876 0.644 0.665 0.767 +1 0.421 -1.576 -0.240 0.774 0.245 0.835 -0.101 1.489 2.173 0.716 -1.025 1.201 0.000 1.243 1.068 -0.274 2.548 1.175 -1.986 -0.188 0.000 0.935 0.759 0.998 0.953 1.536 0.981 0.870 +0 0.881 -1.523 1.146 0.436 -0.732 1.077 -1.301 -0.897 2.173 0.396 -1.149 1.051 0.000 1.610 -0.381 0.709 1.274 0.697 -2.142 -0.841 0.000 0.851 0.898 0.990 0.796 1.797 0.979 0.811 +1 2.210 0.185 -1.499 0.471 -0.443 0.905 -0.123 0.182 0.000 0.759 -0.475 -1.201 0.000 1.015 0.553 0.830 2.548 0.506 0.396 0.064 0.000 0.861 0.932 1.152 0.815 0.320 0.695 0.654 +0 0.830 0.827 0.531 0.880 -1.565 0.923 0.303 -0.603 0.000 0.893 -0.223 -0.888 0.000 1.214 1.214 0.853 0.000 0.827 -0.233 0.952 3.102 0.885 0.725 1.125 0.565 0.510 0.512 0.557 +1 0.936 -0.947 -1.606 0.688 -0.593 0.858 -0.267 -1.444 2.173 0.950 -1.353 0.054 0.000 1.457 -0.542 0.454 0.000 1.124 -0.919 1.097 3.102 0.975 0.831 0.986 0.832 0.900 0.956 0.857 +1 1.994 -0.311 1.146 0.565 -1.536 1.196 -0.666 0.067 2.173 1.313 -0.276 -0.750 2.215 1.147 0.062 1.725 0.000 0.389 -0.705 -1.493 0.000 0.815 0.871 0.989 1.310 1.290 1.123 0.930 +0 1.214 -0.822 0.808 0.553 1.018 1.310 0.553 -1.343 2.173 1.275 0.238 0.106 0.000 0.459 0.319 1.038 0.000 0.813 0.777 -0.575 1.551 0.876 0.699 0.989 1.401 0.727 0.962 0.907 +0 0.793 0.185 0.057 2.109 -0.676 1.061 -0.530 -0.981 2.173 1.052 -0.504 0.852 0.000 1.578 -0.506 1.454 2.548 1.422 0.629 0.821 0.000 0.874 0.942 1.098 0.972 1.308 1.053 0.985 +0 0.866 -0.591 -1.366 0.500 0.339 1.083 -0.511 1.351 2.173 1.474 -1.515 -0.284 2.215 0.725 0.000 1.642 0.000 0.697 0.559 -0.161 0.000 0.827 0.901 0.986 1.081 2.110 1.174 0.945 +0 0.351 1.212 -0.899 1.282 0.761 3.184 0.477 0.074 2.173 3.749 1.111 -1.404 0.000 1.829 -0.155 1.477 0.000 0.575 -0.274 -1.245 0.000 0.860 0.855 0.986 1.832 0.929 1.165 1.103 +0 2.253 0.120 1.482 0.242 -0.943 0.585 1.054 0.307 0.000 0.799 0.592 -1.124 2.215 0.708 -0.341 -0.830 2.548 0.950 1.365 -0.268 0.000 0.654 0.922 0.984 0.801 0.460 0.693 0.793 +1 2.327 -1.339 -0.998 1.421 -1.426 2.703 0.901 0.842 0.000 1.470 -1.536 -0.502 0.000 0.554 -0.781 0.666 2.548 0.708 -0.482 -0.158 1.551 0.834 0.792 0.985 0.835 0.331 0.706 0.673 +0 0.412 -1.225 1.320 0.458 -0.643 0.659 -0.503 1.056 0.000 0.690 -1.304 -0.225 2.215 0.909 -0.862 -1.009 1.274 0.457 -1.314 -1.519 0.000 0.773 0.867 0.980 0.663 0.572 0.631 0.613 +0 1.107 0.512 -0.151 1.110 0.803 0.820 0.639 -1.279 2.173 0.438 0.355 1.629 0.000 0.708 -0.510 -1.686 1.274 0.673 0.996 -0.597 0.000 0.929 0.956 1.165 0.909 0.696 0.820 0.747 +0 0.439 1.428 -0.750 0.801 -1.326 0.997 0.371 0.416 2.173 1.053 0.369 1.544 0.000 0.532 0.863 -1.723 0.000 1.480 -0.695 -0.361 3.102 0.400 1.116 0.999 0.828 1.170 0.962 0.847 +0 0.557 0.046 0.769 1.813 -1.251 0.815 0.145 1.069 0.000 0.838 0.626 -0.573 0.000 1.290 1.198 0.362 1.274 1.071 1.460 1.460 0.000 0.843 0.938 1.350 1.277 1.642 1.055 0.873 +0 1.760 0.062 1.516 1.945 1.172 1.839 -1.119 -0.276 0.000 0.364 -0.882 -1.563 2.215 0.339 -0.975 -0.889 2.548 0.418 -0.357 -0.386 0.000 0.497 0.851 0.984 0.780 0.215 0.550 1.038 +0 0.861 0.523 1.494 1.122 0.162 0.891 -0.998 -1.272 2.173 1.049 0.415 -0.341 2.215 0.763 -1.500 1.383 0.000 0.562 0.391 0.895 0.000 0.966 0.970 1.269 0.896 1.534 1.075 0.979 +1 0.941 0.719 0.837 0.853 -1.300 0.667 -0.281 0.885 2.173 0.851 0.274 0.297 0.000 2.032 0.616 -0.836 2.548 0.766 0.645 1.646 0.000 1.020 1.127 1.164 0.884 1.621 0.938 0.819 +1 0.984 -1.358 -0.851 0.819 1.444 0.574 -0.362 0.316 1.087 0.741 -1.205 0.753 2.215 1.390 -1.706 -1.391 0.000 1.203 -1.337 -0.370 0.000 0.980 0.772 1.092 0.946 0.569 0.702 0.660 +1 0.634 1.476 1.324 0.764 1.313 0.987 1.062 -0.242 1.087 0.723 0.291 -0.622 0.000 1.074 0.638 -1.622 0.000 0.420 -0.286 0.442 0.000 0.842 0.816 1.001 0.636 1.213 0.859 0.731 +0 1.177 -0.160 0.154 3.206 0.497 1.710 -0.932 -1.071 0.000 1.043 1.379 -1.416 0.000 1.712 -0.541 0.648 2.548 1.982 -0.016 -1.328 0.000 0.645 0.912 0.993 0.849 0.984 0.875 1.227 +1 0.991 -0.259 0.322 0.984 1.517 1.253 -0.143 -0.662 0.000 0.518 -0.297 1.470 0.000 0.613 -1.260 1.037 2.548 1.066 0.379 0.500 3.102 0.627 0.912 1.205 0.710 0.712 0.776 0.755 +1 1.908 -1.067 0.395 0.852 0.775 1.158 -0.621 -1.210 1.087 0.427 -0.087 -0.905 0.000 0.808 -0.649 1.078 0.000 0.445 -1.507 -0.495 0.000 0.924 0.918 0.978 0.604 0.758 1.007 0.849 +1 1.091 -1.196 1.560 0.432 -0.523 0.869 -1.486 -0.951 1.087 0.613 -0.964 0.348 0.000 1.456 -1.443 0.892 0.000 0.881 -0.117 -0.312 3.102 0.811 0.887 0.983 0.801 0.846 0.867 0.759 +1 0.947 -1.305 -0.869 1.161 -1.435 0.930 -0.709 1.045 0.000 1.072 -0.187 0.733 1.107 0.894 1.450 -1.010 0.000 0.456 0.498 0.248 3.102 1.015 1.021 0.985 1.006 0.370 1.024 1.044 +0 1.451 -0.692 0.994 0.361 1.178 1.129 0.937 -0.620 0.000 1.459 1.813 1.634 0.000 0.861 -0.077 -1.014 2.548 1.113 0.592 -0.085 0.000 0.820 0.860 0.980 0.848 0.770 0.664 0.860 +1 1.269 -0.268 1.241 1.617 -1.679 0.866 -0.150 -0.472 0.000 0.449 0.468 1.073 2.215 1.565 0.494 0.323 0.000 0.647 0.955 -0.863 3.102 1.562 1.011 0.986 0.656 0.507 0.707 0.895 +0 1.448 0.965 -0.487 2.697 -0.073 1.300 0.295 1.481 0.000 1.441 -0.689 1.743 2.215 0.552 -0.110 0.455 2.548 0.847 -1.679 0.924 0.000 2.006 1.305 1.002 2.334 0.914 1.460 1.460 +0 0.590 -1.376 1.092 1.360 1.324 1.128 -0.535 -0.045 2.173 1.033 0.701 -1.301 2.215 0.623 -0.519 -1.679 0.000 0.716 0.679 -0.268 0.000 0.894 0.992 0.986 1.116 1.792 1.157 0.923 +0 0.865 0.487 1.738 0.740 0.934 1.112 -0.136 -0.952 0.000 1.150 -1.535 0.288 0.000 0.709 -1.080 1.369 1.274 1.040 0.703 0.204 3.102 2.868 1.712 0.995 0.699 0.972 1.217 1.259 +1 0.740 0.949 -0.060 2.050 0.755 0.951 0.166 -1.259 2.173 0.808 -0.488 1.338 2.215 0.936 0.024 -0.862 0.000 0.822 -0.818 -0.307 0.000 0.727 0.790 1.145 1.442 1.026 1.109 0.895 +1 0.493 -0.478 0.990 1.512 -1.057 0.646 1.120 1.258 0.000 0.552 0.940 -1.476 0.000 2.510 -0.589 0.120 2.548 0.429 -0.076 -1.434 3.102 0.801 0.560 1.152 1.203 0.813 1.098 0.992 +1 1.553 -0.881 -1.612 0.653 0.592 0.502 0.056 -0.091 0.000 0.864 0.566 0.356 2.215 1.564 1.418 -1.488 0.000 0.892 0.553 -0.529 0.000 1.160 1.194 1.276 0.759 0.523 0.874 1.070 +1 0.516 -0.842 -0.676 1.048 0.772 0.531 0.651 -1.292 2.173 0.859 -0.383 -0.318 2.215 0.675 -1.213 0.982 0.000 0.908 -0.679 0.347 0.000 1.005 0.964 0.987 0.717 0.940 0.733 0.675 +1 1.209 0.610 0.479 0.644 1.319 0.784 -0.549 -0.882 2.173 0.324 0.652 -0.556 0.000 0.916 1.399 1.624 0.000 0.678 -1.082 0.216 0.000 0.857 1.226 0.987 0.495 0.855 0.751 0.741 +0 0.965 -1.175 1.416 1.102 1.108 1.205 -1.353 -0.624 0.000 0.706 -1.077 0.327 1.107 0.478 -1.661 -1.339 0.000 0.734 -0.150 1.159 3.102 0.879 0.967 0.985 0.773 0.542 0.739 0.836 +1 2.073 0.007 -1.585 0.668 -1.542 1.095 0.093 0.030 2.173 0.439 0.488 0.362 0.000 0.820 0.454 -0.954 2.548 0.896 -1.177 0.654 0.000 0.932 0.923 0.985 1.483 0.946 0.978 0.920 +1 0.550 1.012 0.376 0.513 -0.586 1.114 2.050 -1.254 0.000 1.266 0.195 1.378 2.215 1.336 0.492 -0.656 1.274 2.194 -0.078 0.709 0.000 2.350 1.971 0.989 0.943 1.355 1.517 1.748 +1 2.048 0.231 -1.410 0.208 0.580 0.998 -0.615 -0.081 2.173 0.563 0.426 0.170 0.000 0.661 -1.065 1.034 2.548 0.699 0.911 1.559 0.000 0.959 0.828 0.985 0.851 0.899 0.912 0.785 +0 0.618 -0.641 1.163 0.123 -1.245 0.843 2.839 -1.400 0.000 0.601 -0.942 0.284 0.000 0.640 1.194 0.841 1.274 0.601 0.466 -0.616 3.102 0.847 0.891 0.989 0.624 0.493 0.923 0.826 +1 0.506 -2.134 0.773 2.364 0.911 1.042 -1.430 -1.323 2.173 0.914 -1.238 0.133 2.215 0.527 1.629 -1.055 0.000 0.383 -0.067 -1.022 0.000 1.118 1.375 0.975 1.391 1.393 1.265 1.259 +0 1.516 -0.713 -1.531 0.723 1.410 1.088 -0.722 0.471 2.173 0.825 -0.537 -0.788 2.215 0.669 -1.880 -0.585 0.000 0.399 -0.016 0.110 0.000 0.767 0.927 0.979 0.823 1.270 0.963 0.836 +0 2.129 0.105 0.268 0.595 0.415 1.650 0.844 -1.510 1.087 0.980 0.474 -0.425 2.215 0.465 0.979 0.574 0.000 0.586 -0.754 1.300 0.000 0.754 0.824 0.976 2.028 1.590 1.407 1.086 +1 0.531 -1.204 -0.660 0.899 1.201 0.552 -0.760 -0.924 0.000 1.103 0.804 1.565 1.107 1.418 0.296 0.194 2.548 0.930 1.366 1.671 0.000 0.891 0.975 0.987 1.086 1.299 0.967 0.864 +0 0.366 -1.671 1.642 2.169 -1.083 1.025 -0.783 -0.068 2.173 1.336 -0.860 1.690 0.000 1.192 -0.844 0.602 2.548 0.787 -1.541 0.279 0.000 1.092 0.969 0.985 1.755 0.787 1.325 1.253 +0 1.049 0.231 0.630 1.188 0.483 0.698 -1.771 -0.726 0.000 0.838 0.766 1.274 2.215 1.271 0.633 -0.968 2.548 0.665 1.818 -1.367 0.000 3.762 2.305 0.994 0.998 0.989 1.548 1.360 +0 0.536 1.071 1.154 2.406 -1.595 0.433 0.935 -0.529 0.000 0.952 1.247 0.549 2.215 0.617 -1.526 -0.549 0.000 0.814 -0.265 0.189 0.000 0.990 0.833 0.984 0.867 0.590 0.816 0.755 +1 0.367 -0.946 0.783 0.763 0.280 1.083 -0.028 -1.680 2.173 0.963 -0.678 0.216 0.000 1.192 0.365 -0.900 2.548 0.651 -0.140 1.024 0.000 0.740 1.021 0.985 1.063 0.966 0.910 0.824 +1 1.647 -0.275 0.131 1.107 -0.388 0.715 -0.537 1.055 2.173 0.353 0.537 -0.920 0.000 0.547 0.296 -1.611 2.548 1.160 -0.347 -1.681 0.000 0.915 0.925 0.981 0.811 0.630 0.783 0.739 +1 0.722 -1.195 1.588 1.556 -0.985 1.537 -0.143 0.171 2.173 1.306 -0.145 -1.020 0.000 0.996 -0.411 1.214 2.548 1.526 -2.102 1.173 0.000 3.216 1.935 1.077 1.670 1.267 1.683 1.399 +0 0.696 -0.626 1.022 0.347 -0.622 0.608 -0.146 -0.360 0.000 1.235 -0.288 1.622 2.215 1.095 -0.204 0.373 0.000 0.895 -0.121 -1.149 3.102 0.902 0.772 0.989 0.771 0.575 0.784 0.682 +1 0.734 0.336 -0.446 1.303 0.675 1.052 0.158 -0.917 0.000 0.769 -0.128 1.035 2.215 1.698 -0.299 1.615 0.000 1.385 -0.948 0.299 1.551 1.068 1.220 1.148 0.752 0.754 0.930 0.881 +0 1.506 -0.605 0.967 0.307 -1.503 0.925 0.001 -0.457 0.000 0.523 -1.035 -0.984 2.215 0.569 0.902 1.306 2.548 0.824 -0.361 -1.596 0.000 1.177 0.979 0.987 0.751 0.872 0.691 0.758 +0 1.815 -0.957 -0.074 0.830 0.360 1.510 -1.544 -1.573 0.000 1.295 0.248 1.260 1.107 1.726 -0.469 -0.487 1.274 0.495 1.119 0.585 0.000 2.932 2.116 0.976 1.363 1.708 1.603 1.407 +1 0.891 2.240 -0.237 1.022 -1.255 0.728 -0.032 -0.109 2.173 1.405 -0.485 -1.735 0.000 0.513 2.107 1.130 0.000 0.707 -1.554 -0.763 0.000 0.547 0.661 1.048 1.567 0.934 1.106 1.029 +0 2.514 -0.234 -0.993 0.469 1.493 0.989 -0.682 0.267 2.173 0.476 -0.818 0.792 0.000 0.855 0.129 1.248 2.548 0.589 -0.406 -1.572 0.000 0.598 0.719 1.180 0.922 1.012 0.989 0.784 +0 0.653 -0.978 -0.182 1.204 -1.692 0.750 -0.853 1.134 0.000 1.888 -0.614 -0.567 1.107 1.080 0.349 1.497 2.548 1.061 -0.471 0.413 0.000 0.850 0.905 1.201 1.080 1.661 1.097 0.950 +0 0.348 -0.881 -1.509 2.117 -1.564 0.736 -0.542 0.479 1.087 1.016 0.979 -0.199 1.107 0.541 0.646 0.764 0.000 0.612 0.836 -1.221 0.000 0.627 0.827 0.982 1.214 1.314 1.118 0.861 +1 0.487 0.570 0.401 1.204 -1.029 0.686 1.424 -1.044 2.173 1.223 0.046 0.500 0.000 0.995 0.896 1.587 2.548 0.527 0.571 0.097 0.000 0.503 0.916 1.019 0.720 0.754 0.875 0.766 +1 0.335 1.116 0.228 0.826 1.666 1.327 1.339 -0.764 1.087 1.331 0.246 1.042 0.000 0.339 0.805 -0.619 0.000 0.569 0.098 -0.293 0.000 1.061 0.712 0.987 1.308 0.894 1.025 0.901 +1 1.800 0.443 -0.776 0.216 1.320 0.591 0.624 0.740 2.173 0.703 -0.231 -1.212 0.000 0.528 -0.442 0.675 2.548 0.668 0.132 1.261 0.000 0.729 0.839 0.987 0.781 0.411 0.680 0.633 +0 1.061 -1.299 1.631 2.761 1.591 1.543 1.823 0.423 0.000 2.550 1.417 -0.431 2.215 1.589 0.062 1.685 1.274 1.741 -0.543 -1.182 0.000 1.145 1.615 0.998 1.813 2.583 3.883 3.894 +0 0.722 0.080 -1.689 1.179 -0.974 0.619 0.129 0.516 0.000 0.811 1.050 0.085 0.000 1.223 1.005 -1.232 2.548 1.086 -0.468 1.094 3.102 0.915 1.067 0.986 0.745 1.120 0.799 0.849 +0 1.345 0.738 0.518 0.362 0.762 0.507 0.836 -0.793 2.173 1.029 0.212 -1.697 2.215 0.673 -0.775 -0.985 0.000 0.516 0.972 0.076 0.000 0.938 0.858 0.986 0.890 0.844 0.790 0.712 +0 1.410 -2.134 0.812 0.583 0.177 0.664 -1.013 1.144 1.087 1.198 -0.294 -0.733 0.000 1.360 -0.793 -1.649 2.548 1.650 -0.867 -0.681 0.000 0.752 0.958 0.989 0.773 0.697 0.802 0.910 +0 1.009 0.735 0.973 1.358 0.389 1.978 -0.707 -0.996 2.173 0.747 -1.136 0.431 2.215 0.742 -0.291 1.133 0.000 0.697 -1.073 1.374 0.000 0.434 1.303 0.997 1.460 1.765 1.846 1.406 +1 0.610 0.291 1.128 1.010 -0.127 0.800 1.272 -1.370 1.087 1.209 0.425 0.373 1.107 0.535 2.208 -1.078 0.000 0.907 0.652 1.703 0.000 0.840 1.182 0.987 0.986 1.580 0.903 0.813 +0 0.416 0.559 1.691 0.526 -1.409 0.613 0.907 1.534 2.173 0.917 1.523 -0.365 2.215 0.806 1.822 -1.442 0.000 2.042 0.334 -0.188 0.000 1.794 1.173 0.988 0.967 1.151 1.076 1.076 +0 0.643 -1.666 -0.532 1.340 1.096 1.030 1.136 -0.410 0.000 1.673 -0.891 -0.839 2.215 2.478 -0.293 1.094 0.000 1.215 -0.914 1.362 0.000 0.888 0.837 1.279 1.278 0.948 1.110 1.035 +1 1.121 -0.096 -1.619 1.256 0.935 0.581 -0.862 0.468 2.173 0.941 -1.434 -0.521 0.000 0.742 1.483 -1.237 0.000 1.099 -0.301 0.257 0.000 1.154 0.996 1.224 0.923 0.758 0.702 0.796 +1 1.920 0.327 0.134 0.415 -0.144 0.664 0.531 1.403 2.173 0.549 0.135 1.602 2.215 0.619 1.275 -1.284 0.000 0.461 -0.630 -0.889 0.000 0.794 0.751 0.998 0.910 0.237 0.774 0.697 +1 1.061 -0.613 1.342 0.824 1.192 1.812 1.051 -0.517 0.000 1.040 0.408 0.559 0.000 1.215 0.599 -1.686 2.548 2.248 -0.380 1.690 3.102 0.930 1.063 0.981 0.709 0.752 0.841 0.733 +1 0.722 -0.535 1.040 0.498 -0.360 0.874 0.351 -0.937 0.000 0.753 -0.396 -1.188 0.000 1.532 0.093 0.650 2.548 1.270 0.575 1.561 0.000 0.774 1.024 0.980 0.871 0.633 0.919 0.841 +0 0.910 0.021 1.541 0.421 0.206 0.663 0.597 -0.809 0.000 0.505 1.601 -1.614 0.000 0.903 0.884 0.473 2.548 0.814 -0.249 -0.040 3.102 1.057 0.964 0.990 0.609 0.532 0.683 0.724 +1 1.668 0.993 0.367 0.657 -1.029 2.060 1.715 -1.160 0.000 1.055 0.709 1.387 0.000 1.609 0.484 -0.159 2.548 1.290 -0.098 1.638 0.000 0.746 1.315 1.380 0.657 0.380 0.750 0.770 +1 0.775 -0.824 -1.436 0.873 0.704 0.967 -0.055 1.398 0.000 1.404 -0.757 -0.603 0.000 0.994 -0.696 -0.096 0.000 1.377 0.528 1.060 1.551 0.800 0.621 1.067 0.862 0.700 0.826 0.777 +0 0.907 0.770 0.647 0.126 1.345 0.748 -0.424 -1.040 0.000 0.818 0.804 -0.144 2.215 0.471 -0.124 1.613 0.000 1.021 1.339 1.344 0.000 0.880 1.067 0.979 0.620 0.734 0.695 0.667 +1 1.088 -1.507 0.353 0.544 1.670 1.976 0.784 -1.416 0.000 1.874 -1.169 0.750 0.000 2.659 -0.925 -0.825 2.548 1.683 -0.295 0.487 0.000 0.989 0.883 0.988 1.140 0.738 1.069 0.897 +0 3.344 0.008 -0.364 0.606 -0.566 0.973 0.289 1.363 2.173 0.776 -0.914 1.410 2.215 0.370 1.136 0.338 0.000 0.594 -0.999 -1.692 0.000 0.919 0.873 0.992 1.363 0.840 1.268 1.011 +0 0.842 -0.228 0.625 0.690 -0.464 0.457 -0.745 1.293 0.000 0.648 0.006 -1.536 2.215 1.283 -0.763 -1.443 2.548 1.325 -0.271 -0.150 0.000 1.174 0.977 0.988 0.745 0.429 0.678 0.679 +1 0.933 0.232 0.800 0.365 -0.091 0.770 -0.631 0.940 0.000 0.511 -0.074 0.407 0.000 1.841 -0.267 -1.192 2.548 0.982 1.058 -1.398 0.000 0.724 0.935 0.984 1.001 0.744 0.873 0.761 +1 0.788 -0.858 1.049 0.621 0.426 0.594 0.209 -1.582 0.000 1.148 0.508 0.749 2.215 0.786 -1.002 -1.409 0.000 0.993 0.618 -0.375 0.000 0.886 0.811 0.986 1.410 0.744 1.131 1.022 +1 0.675 0.406 0.824 0.478 1.650 0.772 -0.831 -0.619 2.173 0.711 0.059 0.190 0.000 1.328 -0.398 -1.466 0.000 0.711 -2.262 -1.402 0.000 0.753 1.274 0.983 0.623 0.579 0.900 0.823 +1 0.560 -1.418 -1.552 0.692 -0.663 0.786 0.919 0.431 2.173 0.585 1.478 1.361 2.215 0.640 1.586 -1.054 0.000 0.606 0.037 -0.288 0.000 0.993 1.020 0.989 1.077 0.800 0.939 0.830 +0 0.901 -0.165 -0.759 0.902 1.631 1.112 -1.641 0.223 0.000 1.395 -1.017 -1.421 2.215 1.295 0.872 0.245 0.000 1.088 1.943 0.820 0.000 1.169 1.226 1.042 0.841 0.556 1.583 1.247 +0 0.579 2.041 1.214 0.366 0.264 0.549 1.181 0.083 2.173 0.849 0.136 -1.333 2.215 0.520 0.302 1.574 0.000 0.547 -1.417 0.703 0.000 0.820 1.064 0.977 0.857 1.109 0.791 0.751 +1 1.836 0.054 0.056 1.316 0.152 1.516 -0.374 1.503 0.000 0.906 -0.030 -1.046 0.000 1.403 0.542 -1.280 2.548 1.054 -0.335 -0.485 0.000 0.867 0.750 0.968 0.742 0.847 0.938 0.779 +0 0.925 0.309 -0.652 1.670 -1.509 1.285 -0.494 0.584 2.173 0.714 -0.339 1.248 0.000 1.246 1.204 -1.009 2.548 0.592 1.714 0.044 0.000 1.487 1.238 1.200 1.586 2.266 1.319 1.148 +1 0.359 1.266 1.109 0.561 -0.989 1.047 0.044 -1.335 2.173 1.283 -0.260 1.405 0.000 1.075 0.352 -0.482 2.548 1.809 -0.021 -0.059 0.000 0.903 1.278 0.978 0.733 0.947 1.171 0.950 +0 0.643 0.100 0.608 0.688 -1.504 0.689 -0.457 1.320 2.173 0.712 0.467 -0.720 1.107 1.384 0.208 -0.015 0.000 0.807 -2.393 1.149 0.000 0.874 1.026 0.984 0.760 1.114 1.012 0.837 +0 1.118 1.031 1.539 0.506 0.420 1.159 1.199 -1.089 2.173 0.933 0.534 0.348 1.107 0.915 -0.809 0.541 0.000 0.621 -1.097 -1.167 0.000 0.850 0.988 0.990 0.997 1.560 1.235 1.094 +0 0.356 -0.139 -0.042 1.344 -0.933 0.859 1.091 1.249 2.173 0.441 0.533 -0.476 0.000 0.692 0.703 0.128 0.000 0.537 1.963 -0.039 0.000 0.698 0.941 0.983 0.865 0.807 1.183 1.099 +1 0.963 1.779 -0.894 1.085 -1.076 1.093 0.117 0.958 2.173 0.596 0.785 -0.595 0.000 0.479 -2.333 1.698 0.000 0.687 -0.513 -0.505 3.102 2.254 1.230 0.994 1.487 0.953 1.090 1.233 +0 0.829 0.569 0.657 0.211 -1.478 0.459 -1.329 -1.074 0.000 1.018 0.198 0.290 2.215 0.909 -0.042 -0.700 0.000 1.444 0.064 1.605 3.102 0.893 0.929 0.992 0.816 1.017 0.846 0.891 +1 1.841 0.666 0.930 0.686 -0.355 1.382 0.329 -0.473 2.173 0.465 -0.425 -1.124 0.000 0.599 0.680 -1.615 2.548 0.435 -1.177 -1.203 0.000 0.481 0.811 1.425 0.821 1.000 0.940 0.761 +1 0.683 -2.110 -0.451 0.511 -0.834 0.499 -0.782 1.652 1.087 0.655 0.968 -1.358 2.215 0.515 -1.688 0.574 0.000 2.438 -0.145 0.381 0.000 1.184 1.076 0.980 1.216 0.924 0.983 0.933 +0 0.499 1.128 0.946 0.444 1.220 1.105 0.529 -1.205 2.173 1.158 0.780 0.247 2.215 0.689 0.762 1.407 0.000 0.664 -0.022 -0.276 0.000 0.816 0.851 1.002 0.890 1.623 0.970 0.780 +1 2.721 1.091 0.725 0.523 0.944 0.950 2.432 -0.909 0.000 0.978 1.136 -0.273 1.107 1.025 1.044 1.626 0.000 0.366 0.473 -0.581 0.000 1.017 1.019 0.992 1.188 0.801 0.953 1.056 +0 0.713 -0.752 -0.208 0.853 0.758 0.865 -0.902 -0.886 2.173 0.883 0.348 -1.457 2.215 1.042 -1.191 1.355 0.000 0.651 1.479 -0.264 0.000 0.802 1.060 0.991 0.901 1.077 0.939 0.863 +0 0.794 0.465 0.881 0.667 -0.885 1.590 -0.302 0.394 2.173 0.806 -0.049 1.636 0.000 2.130 0.321 -1.207 2.548 0.603 1.021 0.382 0.000 0.950 0.915 1.008 1.076 2.400 1.179 0.941 +0 0.326 -1.446 0.350 0.375 1.518 1.199 0.057 1.637 2.173 1.089 -0.655 -0.399 2.215 1.077 -0.311 0.562 0.000 0.988 -2.048 -0.599 0.000 0.644 1.152 0.983 0.837 1.741 0.974 0.802 +1 0.916 -1.352 -0.720 0.320 -1.328 0.579 0.012 1.281 2.173 0.452 -1.914 1.714 0.000 0.979 -1.788 1.122 0.000 1.242 -1.082 -0.140 0.000 1.027 1.112 0.984 0.542 0.676 0.700 0.657 +1 0.965 -0.629 1.720 1.446 -0.529 0.981 -1.063 0.751 0.000 1.020 0.041 -0.431 0.000 1.980 -0.631 1.363 1.274 1.695 -0.993 -0.602 3.102 0.844 0.834 1.470 1.190 1.417 0.941 0.860 +1 1.191 0.296 0.117 0.739 -0.683 1.997 1.225 -1.134 0.000 1.033 -0.331 -0.776 0.000 1.862 0.275 1.168 2.548 2.322 0.114 0.630 3.102 0.902 0.877 0.990 0.877 0.753 0.827 0.776 +0 2.178 -0.851 -0.902 0.262 0.985 0.931 -1.386 0.343 2.173 0.861 -1.596 1.596 0.000 1.291 -1.635 -0.001 0.000 0.655 -0.240 0.969 3.102 1.607 1.055 1.037 1.170 0.654 0.803 0.858 +1 0.441 2.150 1.061 1.092 0.345 0.766 1.479 -1.504 0.000 1.028 0.064 -0.042 2.215 0.792 0.416 1.619 0.000 0.818 1.016 -0.900 3.102 0.882 0.652 0.983 0.881 0.768 0.844 0.798 +0 0.305 -0.863 -0.353 2.308 -1.499 1.387 -0.425 0.353 2.173 0.418 -1.168 -1.682 2.215 0.374 0.697 -0.938 0.000 0.516 0.015 -0.457 0.000 0.680 0.985 0.998 0.498 1.169 1.033 0.817 +1 0.821 1.143 -0.065 2.526 0.449 0.959 0.420 -1.343 2.173 1.207 -0.644 1.365 0.000 2.575 1.138 -0.695 0.000 1.398 1.033 1.171 3.102 3.966 2.364 0.990 0.876 1.072 1.516 1.444 +0 0.623 1.018 1.167 1.208 0.277 0.948 -0.727 -1.524 0.000 1.147 0.600 -0.313 2.215 0.829 -0.282 1.579 0.000 0.702 -0.189 0.345 3.102 0.636 0.788 0.993 0.831 0.574 0.921 0.875 +1 0.743 0.543 -0.294 1.523 0.663 0.835 0.027 -0.667 0.000 0.881 -0.160 -1.179 2.215 0.894 0.778 1.239 0.000 1.947 -0.224 1.501 3.102 0.913 0.921 1.119 1.049 0.788 0.878 0.756 +1 2.089 -0.100 -0.859 1.001 0.721 0.707 0.168 1.377 2.173 0.540 -1.098 0.983 2.215 0.403 0.763 0.172 0.000 1.013 -0.923 -0.478 0.000 0.890 0.983 1.982 1.196 0.708 0.930 0.811 +0 0.681 -0.662 1.072 1.462 -1.702 0.786 1.635 0.410 0.000 0.709 1.318 1.059 0.000 1.326 -0.529 -0.724 0.000 1.135 0.593 -1.278 3.102 0.904 1.041 0.992 1.620 0.736 1.140 1.366 +1 0.351 1.397 1.177 2.619 0.702 0.797 -0.949 -1.074 0.000 0.757 -0.930 -0.433 0.000 0.816 0.897 0.538 1.274 1.395 0.589 -1.060 3.102 0.902 1.106 0.987 0.904 0.816 1.042 2.029 +1 1.100 0.399 -0.221 0.474 -1.012 0.803 0.870 -1.327 1.087 0.559 0.625 1.020 2.215 0.613 -1.017 -0.214 0.000 0.974 -0.517 0.665 0.000 0.644 0.753 0.983 0.943 0.851 0.832 0.755 +1 0.759 -0.198 -1.478 0.650 0.587 0.848 2.268 -1.723 0.000 1.543 0.714 -0.523 0.000 2.222 0.819 0.427 2.548 0.956 -1.180 -1.418 0.000 0.753 0.915 0.988 0.965 0.869 0.958 0.798 +0 1.040 -1.543 1.658 0.336 1.525 0.499 -1.047 -0.175 2.173 0.431 -1.398 0.469 0.000 0.344 -2.100 1.392 0.000 0.950 -0.165 -0.496 3.102 0.511 0.689 0.988 0.839 0.383 0.631 0.574 +0 1.038 -1.032 -0.346 0.469 -0.217 1.000 -0.937 0.433 2.173 1.065 -1.583 1.560 0.000 1.364 -0.349 -1.378 2.548 0.841 -0.364 1.624 0.000 0.783 0.864 0.991 0.959 1.510 0.996 0.947 +0 3.587 -0.434 -0.315 2.020 -0.352 0.987 1.635 1.020 0.000 1.659 -0.023 -1.612 0.000 1.312 -0.573 -1.634 0.000 2.954 0.900 1.538 0.000 0.698 0.984 0.957 0.857 0.176 0.912 1.206 +1 0.630 0.889 1.165 1.084 -1.366 1.784 0.840 0.193 0.000 0.418 1.614 1.136 0.000 1.204 -0.695 -1.101 2.548 0.828 1.025 -0.874 3.102 0.903 0.888 0.985 1.314 0.907 0.870 0.840 +0 1.130 -1.702 -0.326 0.521 0.615 0.600 2.505 -0.952 0.000 0.719 -1.466 1.258 2.215 0.856 1.007 1.572 0.000 1.374 0.180 0.778 3.102 0.792 0.829 0.988 0.790 0.951 1.004 1.070 +1 0.621 0.523 -0.117 0.654 -1.100 0.700 0.018 0.522 0.000 1.314 -0.695 -1.074 2.215 0.358 -1.429 0.805 0.000 0.723 -0.271 1.484 3.102 0.965 0.730 0.991 0.746 0.676 0.889 0.763 +0 0.719 -0.560 1.136 2.187 0.795 1.225 0.529 -0.860 0.000 0.845 0.491 1.676 2.215 1.189 -0.646 0.029 2.548 0.463 -0.129 -0.555 0.000 0.511 0.918 0.984 0.939 1.265 1.072 1.241 +0 0.828 -0.387 -0.018 1.212 1.501 0.460 -0.152 -1.027 0.000 1.095 0.395 -1.699 2.215 1.352 1.071 0.319 2.548 0.562 -1.669 0.228 0.000 1.085 1.147 1.359 1.225 1.353 1.083 0.963 +1 0.470 0.733 -0.287 0.872 0.939 0.669 -1.721 -0.688 0.000 0.793 0.828 -0.989 0.000 1.842 0.501 1.062 2.548 1.375 0.313 0.326 3.102 0.805 1.040 0.987 0.881 0.757 0.878 0.790 +1 0.881 1.237 -1.179 1.781 1.428 0.291 1.805 -1.470 0.000 0.543 1.105 -0.166 2.215 1.171 -0.012 0.501 2.548 0.376 -1.829 0.823 0.000 1.904 1.271 1.235 1.220 0.704 0.880 0.966 +0 1.579 -0.834 1.583 0.343 0.590 1.461 -0.754 -1.514 2.173 1.666 -1.289 0.240 1.107 0.765 -1.516 -0.823 0.000 1.180 -2.175 -0.118 0.000 0.807 1.053 0.988 0.881 2.387 1.349 1.125 +1 0.762 0.005 1.172 0.647 -1.651 0.493 1.057 -0.003 2.173 0.489 -1.030 1.174 2.215 0.531 -1.808 -1.126 0.000 0.664 -1.345 0.131 0.000 0.605 1.335 0.994 0.849 1.117 0.869 0.901 +0 0.292 1.288 -0.783 1.748 -0.468 0.478 -2.605 1.314 0.000 0.521 0.449 1.508 2.215 0.990 -1.193 0.496 1.274 0.717 -1.327 -1.437 0.000 0.732 0.824 1.001 0.868 0.978 0.888 1.027 +0 2.183 1.530 0.851 0.237 -0.622 1.283 0.174 -1.023 2.173 1.206 -1.242 1.518 0.000 1.409 -1.634 0.115 0.000 2.140 1.057 -0.884 3.102 0.817 2.119 0.986 1.148 1.030 1.869 1.981 +0 0.894 1.460 -0.680 0.444 0.771 1.434 0.335 -1.416 0.000 1.494 1.168 0.368 0.000 1.830 0.763 1.733 1.274 1.818 1.785 0.338 0.000 0.845 1.026 0.982 1.018 1.377 1.011 0.886 +0 1.700 0.521 -1.286 3.820 -1.622 2.414 -0.217 0.206 0.000 1.502 -0.694 0.387 2.215 1.652 0.225 1.725 2.548 0.926 -0.531 -0.193 0.000 0.939 0.835 1.049 0.674 1.773 1.574 1.810 +0 2.008 -0.041 0.394 0.865 -0.297 0.855 -1.227 1.652 0.000 0.830 0.925 1.460 0.000 0.962 -1.267 -0.797 2.548 1.126 -0.488 -1.338 0.000 0.816 1.078 1.064 1.107 0.549 0.751 0.923 +0 0.787 -0.228 1.161 0.754 0.328 0.848 -1.115 -0.920 2.173 0.614 -0.738 0.420 0.000 0.566 -1.606 1.533 0.000 0.559 -1.535 0.305 0.000 0.897 0.988 0.988 0.844 0.632 0.889 0.822 +0 1.462 -0.502 0.378 0.783 -1.730 1.478 -1.935 -0.907 0.000 2.290 -0.780 0.956 2.215 0.697 -1.074 -0.619 0.000 1.115 0.450 -1.222 1.551 0.863 1.690 1.403 1.074 1.692 1.700 1.404 +0 1.107 1.087 -0.917 0.558 0.209 0.682 -0.115 -0.549 0.000 1.431 -0.392 0.719 2.215 1.156 -0.007 -1.590 2.548 0.707 -0.465 1.107 0.000 1.083 0.892 0.986 1.244 1.223 0.954 0.851 +0 0.351 -1.149 -0.034 0.680 -1.177 1.457 1.061 -1.117 0.000 1.027 -0.449 0.880 0.000 1.882 0.061 0.329 2.548 0.904 -0.157 1.190 3.102 3.347 1.896 0.997 0.887 0.709 1.384 1.089 +1 0.525 0.435 0.544 0.497 -1.068 1.844 -0.943 1.084 2.173 1.646 0.387 -0.039 0.000 1.789 -0.380 -0.927 0.000 1.466 0.124 1.537 3.102 1.005 1.072 0.991 1.097 1.221 1.231 0.991 +0 1.021 0.245 0.585 0.615 -1.644 0.702 0.564 -1.165 2.173 0.805 2.192 -0.097 0.000 0.747 0.813 -0.735 2.548 0.439 0.316 1.619 0.000 1.141 0.817 0.995 0.831 0.371 0.707 0.744 +0 1.270 0.349 1.181 0.911 1.057 0.762 1.532 0.415 0.000 0.717 -0.728 -1.150 2.215 1.564 0.636 -0.620 2.548 0.526 0.430 -0.950 0.000 0.897 1.064 1.000 1.252 1.028 1.057 1.015 +1 1.142 -1.185 0.664 0.773 -0.214 1.147 -0.046 -1.380 0.000 0.707 -0.501 -0.566 0.000 0.788 -0.203 1.156 0.000 1.148 0.119 0.459 1.551 1.157 0.814 0.986 0.518 0.119 0.478 0.532 +0 1.117 -1.139 -0.119 0.637 -1.085 0.830 -0.845 -1.426 2.173 1.237 -0.809 0.900 1.107 0.494 -1.591 -0.385 0.000 0.599 -1.335 0.665 0.000 0.488 0.788 0.990 0.982 1.289 0.865 0.707 +1 0.453 0.662 -0.419 0.878 1.440 0.868 -0.245 -0.142 2.173 1.313 -0.759 -0.749 2.215 1.126 -0.590 1.143 0.000 1.026 0.143 1.231 0.000 0.959 1.091 0.988 0.861 0.921 0.862 0.775 +0 0.442 -0.460 -0.865 1.286 0.697 0.681 2.319 -0.588 0.000 0.679 0.346 -1.534 1.107 0.748 -1.062 1.411 0.000 0.626 0.718 1.625 0.000 0.915 0.959 1.030 0.820 0.880 0.678 0.654 +1 0.353 1.429 0.165 2.087 -1.037 0.816 1.046 0.632 0.000 1.207 -0.203 1.347 2.215 0.942 -0.117 -0.142 2.548 1.164 -2.019 -1.380 0.000 0.729 0.797 1.050 1.557 1.105 1.148 1.016 +0 0.703 -2.264 -1.551 1.113 -0.651 0.537 -0.513 1.143 1.087 0.546 -2.426 0.276 0.000 0.619 0.015 -0.627 2.548 0.511 1.274 0.515 0.000 0.779 0.993 0.984 1.385 0.744 1.107 0.911 +0 0.803 0.056 1.001 0.489 -0.415 0.846 -0.022 -1.109 2.173 1.129 0.870 0.474 0.000 0.697 0.828 1.611 2.548 1.061 1.246 -0.234 0.000 0.953 0.878 0.986 0.856 0.764 0.909 0.843 +1 1.317 -0.640 1.167 0.833 0.914 1.395 0.277 -0.988 2.173 0.596 0.389 0.938 0.000 0.605 0.453 0.086 0.000 0.796 -0.488 -0.235 3.102 0.641 1.191 0.986 0.759 0.860 0.994 0.839 +0 1.259 1.036 -1.153 0.301 0.058 0.975 0.948 1.567 2.173 0.945 2.372 -0.312 0.000 1.113 0.767 -0.163 1.274 1.636 0.832 0.638 0.000 0.902 0.940 0.984 0.706 1.300 0.821 0.775 +0 0.436 1.839 1.296 1.263 -1.498 0.552 1.532 -0.190 0.000 1.187 0.457 -0.452 1.107 1.359 -0.175 1.260 1.274 0.596 -1.420 1.725 0.000 2.310 1.508 0.978 0.872 1.424 1.144 1.014 +0 0.958 -0.691 -0.016 0.249 -1.485 1.483 -0.226 -1.365 0.000 1.265 -1.146 0.613 2.215 2.291 -0.095 0.886 0.000 1.288 -0.894 -0.627 0.000 1.028 0.947 0.988 0.767 1.892 1.030 0.861 +0 0.600 0.233 -1.674 1.774 -0.814 0.521 -0.416 1.142 2.173 0.353 0.192 0.991 2.215 0.988 -1.526 -0.046 0.000 0.482 -2.272 1.258 0.000 0.818 0.964 1.001 0.722 0.216 0.676 0.898 +1 0.979 0.406 -1.731 0.612 0.038 0.718 0.972 -0.509 2.173 1.038 -0.454 1.118 2.215 0.827 -0.247 -0.977 0.000 0.481 1.805 0.365 0.000 1.256 0.910 1.072 0.840 1.613 0.962 0.808 +0 3.062 0.432 -1.004 0.312 0.142 1.045 0.342 1.206 1.087 0.748 0.596 0.298 2.215 0.512 1.902 0.914 0.000 0.554 1.531 0.302 0.000 0.954 0.989 1.164 1.065 0.966 1.062 0.899 +0 0.790 0.390 0.084 0.566 -1.420 1.241 0.719 -0.842 2.173 0.781 0.920 0.801 0.000 1.301 0.046 0.488 0.000 1.556 -0.504 1.173 3.102 0.835 0.917 0.992 0.821 1.773 1.155 0.934 +1 0.690 -0.174 1.030 2.228 1.726 0.599 -0.795 0.145 0.000 0.963 -1.316 -0.713 2.215 0.433 0.221 0.252 2.548 0.456 -1.459 0.045 0.000 0.400 0.683 1.009 0.745 0.803 0.884 0.809 +0 1.475 -0.243 1.490 0.318 1.128 0.297 0.681 1.366 0.000 0.841 0.559 -0.328 2.215 0.936 -0.380 -0.516 1.274 0.923 1.675 -1.310 0.000 0.779 0.891 0.988 0.911 0.513 0.770 0.775 +0 0.912 0.892 0.121 1.597 -0.811 0.599 -1.777 0.810 0.000 0.913 1.569 0.419 2.215 1.022 0.731 1.550 2.548 1.335 0.131 -0.208 0.000 1.895 1.682 1.244 1.002 0.974 1.509 1.358 +1 0.843 -1.378 1.368 0.634 -0.730 1.086 -0.384 -0.976 0.000 0.671 -2.050 0.390 0.000 1.012 -0.304 -0.495 2.548 2.086 -0.307 0.858 3.102 0.922 1.290 0.987 0.836 1.043 0.826 0.836 +0 0.549 -1.856 -1.549 0.495 -0.030 1.290 -0.594 0.680 0.000 1.222 -0.850 -0.150 2.215 2.027 -0.585 1.644 0.000 2.941 0.059 -1.386 1.551 2.220 1.842 0.990 1.009 1.749 1.473 1.155 +0 0.783 0.564 0.989 0.678 -0.482 0.677 -0.900 -0.354 2.173 0.332 -1.533 -0.691 0.000 0.713 -1.920 0.987 0.000 1.574 -0.379 -1.430 3.102 0.772 0.891 0.988 0.994 0.935 0.804 0.861 +0 2.494 -0.012 1.649 0.298 -1.417 1.246 -0.048 1.142 2.173 1.443 0.206 -0.425 0.000 1.818 -0.385 -0.418 0.000 1.145 0.304 0.484 3.102 0.802 0.964 0.983 0.908 0.755 1.182 1.149 +1 0.664 0.861 -0.011 1.508 0.508 0.735 -0.913 -1.446 0.000 0.507 0.294 1.166 2.215 0.550 -1.928 -0.649 0.000 0.617 -1.208 -0.508 3.102 1.045 1.073 0.995 1.476 0.711 0.997 1.385 +0 1.076 0.153 -1.384 0.376 -0.618 1.171 1.424 0.179 0.000 1.658 -1.096 -1.659 2.215 0.747 -0.331 0.165 2.548 0.463 0.189 0.607 0.000 0.839 0.950 0.990 0.901 1.267 1.687 1.338 +1 1.007 -0.996 -1.567 1.078 0.914 1.508 0.107 -1.473 2.173 0.987 -0.357 -0.897 2.215 1.580 2.371 0.434 0.000 1.593 0.136 0.442 0.000 2.708 2.636 1.136 1.302 0.990 2.085 1.964 +1 0.692 1.009 -1.675 0.757 -0.655 1.020 0.637 0.198 1.087 1.341 0.571 -1.128 1.107 0.681 1.474 0.585 0.000 1.023 2.356 1.170 0.000 0.881 0.857 0.991 0.859 1.601 0.984 0.868 +0 0.308 2.046 -1.009 2.050 0.414 0.830 0.855 -1.346 0.000 0.610 2.188 1.617 0.000 0.780 0.458 0.542 2.548 1.268 -0.202 -1.043 3.102 1.327 1.169 1.055 1.545 0.806 1.046 1.055 +0 0.536 1.957 -1.682 1.257 0.581 0.579 -0.306 -1.316 0.000 0.762 0.724 -0.380 1.107 0.751 -0.843 1.219 1.274 0.863 -0.351 -0.514 0.000 0.718 0.798 1.015 1.496 1.097 1.081 1.040 +1 0.875 0.592 0.706 0.127 0.215 1.417 0.946 0.844 0.000 1.160 -0.134 0.632 2.215 2.252 0.951 -1.051 2.548 1.466 -0.301 -0.560 0.000 0.813 0.884 0.982 1.273 2.025 1.140 0.951 +1 2.207 1.249 -0.892 0.772 0.459 0.917 1.187 0.961 2.173 0.791 1.280 -1.638 0.000 0.677 -0.089 0.420 2.548 0.797 0.557 -0.294 0.000 1.027 1.008 1.697 1.138 0.823 0.993 0.857 +0 1.238 -2.110 0.375 0.528 -1.117 0.572 0.007 -0.914 0.000 0.625 -0.907 1.180 2.215 0.338 0.017 0.377 0.000 0.828 -0.439 -1.723 3.102 0.726 0.803 1.091 0.863 0.352 0.661 0.755 +1 0.420 2.222 1.714 1.435 0.203 0.815 0.962 1.369 0.000 0.930 0.252 -1.706 2.215 0.885 1.268 -0.314 0.000 0.998 1.099 -0.694 0.000 1.342 0.997 1.052 1.121 0.964 1.083 0.970 +0 1.214 -0.675 0.853 0.833 0.570 0.840 -0.445 -1.412 0.000 0.623 -0.228 1.444 1.107 1.624 -0.247 -0.338 2.548 0.692 0.615 -0.696 0.000 1.015 1.044 0.987 0.666 1.068 0.775 0.813 +0 1.034 -0.919 -1.362 1.716 -0.783 1.869 -0.767 0.672 0.000 0.670 -2.766 -1.286 0.000 0.798 2.690 1.314 0.000 1.042 0.575 -0.800 3.102 0.973 0.938 0.990 0.798 1.146 0.920 0.875 +1 0.345 -1.529 0.763 0.622 1.602 1.043 -0.486 -0.875 2.173 1.002 0.125 -0.197 2.215 1.368 -0.629 0.616 0.000 0.795 1.562 0.837 0.000 0.984 1.041 0.998 0.954 0.985 0.849 0.762 +0 0.907 -1.692 0.629 2.663 1.169 1.134 -2.243 -0.572 0.000 1.188 -0.054 -1.018 0.000 1.028 -0.108 1.637 2.548 0.713 0.999 0.863 0.000 1.101 1.706 1.007 1.778 0.808 1.374 1.393 +0 0.412 1.642 -1.515 1.506 -0.418 0.762 0.037 0.309 2.173 0.745 0.255 1.047 2.215 1.470 0.159 -1.248 0.000 1.546 -0.779 1.194 0.000 0.587 1.026 0.988 0.959 0.695 0.813 0.780 +1 0.571 0.470 0.782 0.507 -0.725 0.649 1.297 1.192 2.173 0.512 2.902 -1.568 0.000 1.245 0.303 0.419 0.000 0.915 -0.310 -0.875 1.551 2.456 1.517 0.990 0.780 1.097 1.141 0.913 +1 1.616 -0.881 -0.445 0.057 -1.499 1.309 -0.560 1.241 2.173 0.423 -0.318 -0.248 0.000 0.728 -0.168 -1.257 2.548 0.546 0.622 1.402 0.000 0.716 0.949 0.985 0.651 0.973 0.893 0.739 +1 1.125 0.794 0.630 0.104 -0.432 1.160 1.038 1.579 2.173 1.167 1.260 -0.294 0.000 0.971 0.352 -0.872 1.274 1.128 -0.082 1.140 0.000 0.805 1.152 0.988 0.781 1.151 0.842 0.761 +0 0.710 -1.557 -0.409 2.510 -1.025 1.189 -1.080 1.220 0.000 0.612 -0.408 1.041 0.000 0.987 -0.658 0.155 2.548 0.628 -0.427 -0.283 3.102 0.685 0.965 0.984 0.726 0.239 0.725 0.976 +1 0.873 0.267 -1.547 1.460 0.584 0.988 -1.047 0.359 2.173 1.576 -1.102 -1.044 0.000 0.501 -1.715 -0.774 0.000 1.294 -0.172 1.184 3.102 0.617 1.142 1.470 1.301 0.964 1.019 1.093 +1 0.788 -0.083 -1.618 0.631 0.518 1.231 0.305 0.370 2.173 0.799 2.394 -0.855 0.000 1.390 -0.010 -1.081 0.000 0.822 0.616 1.364 3.102 0.842 0.764 0.987 0.892 0.862 0.826 0.717 +0 0.352 -2.175 0.962 2.027 0.308 0.656 0.048 -1.538 0.000 0.953 -1.113 -0.983 2.215 0.698 0.030 1.195 0.000 0.423 -0.710 -0.693 3.102 0.763 1.013 0.980 0.597 0.170 0.677 0.812 +1 0.361 -2.057 0.907 0.563 -0.832 1.042 -0.152 0.251 0.000 1.138 -0.436 -0.871 2.215 1.153 -2.410 1.639 0.000 0.798 -0.718 1.490 3.102 0.893 0.916 0.987 0.729 0.752 0.851 0.756 +0 0.671 -0.885 -1.547 0.682 0.185 0.820 0.378 -1.151 2.173 0.644 -0.351 -0.575 0.000 0.479 0.938 0.801 0.000 1.030 1.235 0.290 3.102 1.030 0.894 0.988 0.932 1.092 0.809 0.717 +1 1.040 -0.061 0.060 0.253 -1.110 1.365 -0.222 -1.568 2.173 1.067 0.058 0.383 2.215 1.128 0.118 -0.291 0.000 0.979 0.394 1.144 0.000 1.133 0.845 0.991 1.146 1.763 1.026 0.887 +1 1.363 0.329 -1.226 0.255 -1.001 0.828 -1.143 0.482 2.173 1.469 0.810 -0.794 0.000 1.282 0.524 1.109 2.548 0.900 0.676 0.346 0.000 0.704 0.871 0.981 0.903 1.430 1.174 0.914 +0 0.536 -0.422 0.445 2.634 1.221 1.317 -1.970 -0.911 0.000 0.872 0.328 0.344 0.000 1.066 0.509 0.888 2.548 1.251 -0.205 -0.345 0.000 0.889 0.924 1.061 0.794 0.969 0.878 0.862 +1 0.879 0.437 -1.656 0.147 -0.207 0.666 0.552 0.083 0.000 0.413 1.393 0.742 0.000 1.587 -0.445 -0.737 2.548 1.124 -0.324 1.409 3.102 0.801 0.937 0.995 0.787 0.955 0.886 0.777 +1 0.401 -0.352 0.840 2.332 -0.370 1.307 0.475 1.511 2.173 0.859 0.597 0.771 0.000 1.035 1.043 -1.400 2.548 1.101 0.482 -0.383 0.000 1.092 0.992 1.188 1.669 0.868 1.219 1.049 +1 0.944 0.599 -0.786 0.972 0.136 1.024 1.151 -1.535 2.173 0.751 1.796 -0.144 0.000 1.477 1.396 1.147 2.548 0.655 1.439 0.429 0.000 0.455 0.832 0.988 1.096 1.054 0.952 0.856 +0 4.126 1.032 -0.120 0.484 -0.605 1.783 0.645 0.120 0.000 5.193 0.059 1.673 2.215 0.613 1.437 1.353 0.000 0.624 0.379 1.723 3.102 1.912 1.239 0.985 3.793 0.330 2.262 2.030 +0 0.346 -1.226 -0.886 1.020 -0.535 0.746 -0.762 -0.785 2.173 0.689 -1.203 1.572 0.000 1.387 -0.077 0.774 2.548 0.844 -2.312 0.104 0.000 0.729 0.979 0.976 0.939 1.326 0.935 0.922 +0 2.247 0.237 -0.648 0.455 0.487 0.642 -0.839 -1.723 1.087 1.400 -0.051 0.978 2.215 0.952 1.521 -0.764 0.000 0.718 -0.629 0.582 0.000 1.639 1.482 1.197 1.177 1.069 1.129 1.063 +0 1.980 -0.614 0.791 0.708 -0.161 1.123 -0.415 0.163 2.173 1.409 -0.920 -1.570 2.215 0.483 0.635 -1.031 0.000 1.238 -0.056 -0.640 0.000 0.697 0.937 1.240 1.292 1.915 1.113 0.911 +1 0.565 0.759 0.197 0.370 -0.298 0.592 1.245 0.483 0.000 1.132 1.303 -0.938 2.215 1.376 1.091 1.738 2.548 1.908 0.507 0.944 0.000 0.845 1.040 0.983 1.153 0.889 0.959 0.954 +1 1.002 0.122 -1.331 0.768 0.872 0.830 1.125 -0.397 1.087 0.570 -1.048 0.303 0.000 0.664 1.816 0.884 0.000 1.915 -0.564 1.482 0.000 1.220 0.876 1.113 1.062 0.870 1.077 0.912 +0 0.700 -1.188 1.138 1.419 1.081 1.411 -0.737 1.570 2.173 2.075 -0.223 -0.626 0.000 1.059 0.025 -0.397 2.548 1.452 2.055 0.014 0.000 1.265 0.766 0.990 1.399 1.608 1.343 1.428 +1 1.591 -0.168 0.294 0.765 1.656 0.294 0.637 0.646 0.000 0.506 0.438 -0.463 0.000 1.026 0.344 -1.213 2.548 0.902 -0.769 -1.652 3.102 0.855 1.047 1.439 1.003 0.586 0.725 0.771 +0 0.973 0.269 -0.492 0.699 -0.831 0.835 -0.331 -1.738 2.173 0.575 0.972 0.474 0.000 1.499 -0.013 0.677 0.000 0.558 1.000 -0.791 3.102 0.798 0.767 0.983 0.957 0.818 0.809 0.825 +1 0.393 -0.576 -1.286 1.527 0.802 2.742 -1.467 -1.003 0.000 0.960 -0.933 -0.088 2.215 1.850 -0.181 0.446 2.548 2.824 -0.567 1.023 0.000 0.741 1.002 1.021 0.844 0.858 0.897 0.759 +0 1.125 -0.104 -0.521 0.849 0.782 0.955 0.055 1.241 2.173 0.555 -0.510 -0.071 0.000 0.884 0.764 -0.582 0.000 1.223 1.071 1.621 3.102 0.923 1.024 1.250 1.020 0.840 0.876 0.819 +1 0.384 -1.197 0.736 0.250 0.678 1.268 0.464 0.316 2.173 0.886 0.162 -1.018 0.000 0.446 1.944 -1.530 0.000 1.349 -0.923 -1.733 0.000 0.947 0.608 0.981 0.798 0.832 0.865 0.731 +1 1.005 -1.101 0.590 0.802 1.058 0.909 0.312 -0.834 2.173 0.857 -0.484 -1.266 2.215 0.647 0.441 0.445 0.000 0.560 -1.336 0.011 0.000 0.865 1.022 0.986 1.119 0.733 1.223 1.004 +0 0.441 0.840 -1.403 0.957 0.783 0.831 -0.990 -1.689 2.173 0.630 -1.125 -0.687 0.000 0.819 -0.179 0.420 0.000 0.399 0.076 -1.377 1.551 1.077 1.088 0.989 0.614 0.388 1.079 1.035 +0 0.520 0.756 -1.121 1.774 -0.091 0.451 0.832 0.664 0.000 0.564 -0.084 1.382 0.000 0.943 0.547 1.566 1.274 1.013 -0.234 -0.859 3.102 0.825 0.827 1.066 0.923 0.699 0.695 0.685 +0 1.791 0.107 -1.054 0.521 -0.554 0.903 0.763 0.597 0.000 0.466 1.282 1.284 0.000 0.818 -0.429 -0.207 2.548 0.669 -0.604 -1.710 3.102 0.886 0.937 0.994 0.772 0.557 0.748 0.810 +1 1.020 -1.592 -0.635 0.144 -0.862 1.925 0.059 1.613 0.000 1.403 -0.704 -0.250 2.215 1.732 0.086 0.164 2.548 1.106 -1.231 0.789 0.000 0.598 1.016 0.981 1.023 0.922 0.867 0.786 +0 0.436 -0.535 0.091 1.171 1.617 0.963 -0.938 -0.493 2.173 0.926 -0.655 0.855 0.000 0.993 -0.652 -1.270 2.548 0.565 -1.680 -0.028 0.000 0.959 0.942 0.987 0.959 0.797 0.794 0.722 +0 1.456 0.204 0.175 1.031 0.797 0.785 -0.964 -1.254 2.173 1.364 -0.643 1.658 0.000 1.673 0.560 -0.359 2.548 0.712 -1.095 0.980 0.000 0.852 0.863 0.984 0.969 1.626 1.145 1.086 +1 0.625 -0.195 0.908 2.331 0.250 1.177 -0.027 -1.330 1.087 0.396 -0.634 -0.655 0.000 0.517 0.523 -1.076 0.000 0.532 0.377 0.402 1.551 0.910 0.955 0.992 0.454 0.861 0.956 0.860 +0 1.384 0.493 0.157 0.734 0.945 0.332 1.101 1.190 0.000 0.320 -0.255 0.019 0.000 0.718 -0.115 -1.037 2.548 1.031 0.343 -1.549 0.000 0.791 0.783 0.988 0.837 0.374 0.727 0.631 +0 0.615 -1.738 -0.412 1.358 0.341 0.531 -1.458 0.885 0.000 0.680 -0.029 -1.570 2.215 0.574 -1.255 -1.507 0.000 1.024 -0.945 -0.855 3.102 0.828 0.876 0.983 0.694 0.629 0.713 0.679 +0 1.047 0.897 0.034 1.458 -0.696 0.790 1.880 1.304 0.000 0.901 0.078 -1.022 2.215 0.980 0.538 0.557 2.548 1.039 0.753 1.351 0.000 0.719 0.886 1.046 0.850 1.021 0.931 0.924 +1 1.147 -0.900 -1.286 1.640 -0.659 1.071 -0.831 0.693 2.173 0.358 0.564 0.098 2.215 0.422 1.479 -1.550 0.000 0.838 0.250 0.740 0.000 0.832 1.242 1.020 0.881 0.844 0.979 0.921 +0 1.336 0.075 1.051 1.103 0.338 1.059 1.423 -1.205 0.000 0.807 1.052 0.427 0.000 0.908 0.589 -1.541 0.000 0.964 -0.219 -0.444 1.551 0.887 1.080 1.007 0.538 0.173 0.679 0.856 +1 0.796 1.024 -1.168 1.188 0.753 1.332 0.351 0.406 2.173 0.602 0.270 -0.743 0.000 0.956 0.219 -1.223 0.000 0.493 -1.134 1.303 0.000 0.943 1.176 1.330 0.675 0.958 0.795 0.800 +0 1.388 1.803 0.595 0.246 1.096 1.173 0.244 -0.971 0.000 1.260 1.009 -0.987 2.215 2.460 -1.408 1.155 0.000 1.332 0.521 0.208 3.102 0.921 0.958 0.987 1.131 1.058 0.829 0.890 +0 0.661 1.740 -0.153 1.012 0.073 0.788 -0.962 -1.415 0.000 1.455 1.098 1.000 2.215 0.464 -2.027 -1.141 0.000 0.859 -0.264 -0.611 3.102 0.776 0.719 0.986 1.041 1.279 1.410 1.287 +1 0.908 -0.466 -0.623 0.763 1.232 0.961 -0.804 1.294 2.173 0.820 -0.018 0.225 0.000 0.784 0.478 -0.573 0.000 0.850 -2.445 -0.897 0.000 0.884 1.153 1.147 0.559 0.520 0.691 0.698 +1 0.914 -0.387 0.049 0.546 1.635 1.172 -0.487 1.024 2.173 1.683 -0.510 -1.071 0.000 1.310 0.901 0.168 0.000 0.917 -0.058 -0.462 0.000 0.923 0.711 0.989 0.845 0.856 0.982 0.828 +0 0.469 1.385 -0.976 0.841 -1.175 0.835 -0.473 1.463 0.000 1.302 1.168 -0.600 2.215 0.949 0.500 1.421 0.000 1.142 0.301 0.422 3.102 0.861 0.903 0.984 0.906 0.997 1.088 0.920 +1 1.350 -2.050 0.497 0.890 -1.703 0.613 0.727 -0.808 1.087 0.521 -0.884 1.096 1.107 0.808 -1.236 0.184 0.000 1.017 -1.047 -1.624 0.000 0.986 1.053 1.392 0.846 1.123 1.326 1.097 +0 0.522 -2.269 -1.317 1.079 1.498 0.950 -0.255 0.015 2.173 0.973 -0.611 0.501 2.215 2.040 -1.305 -1.342 0.000 0.756 0.613 -1.503 0.000 0.699 1.478 0.975 0.966 0.656 1.135 1.264 +0 0.605 -1.998 0.774 0.461 -0.538 0.508 -0.574 1.305 0.000 0.891 -1.615 1.022 0.000 0.933 -0.500 -0.757 2.548 0.878 0.346 -0.822 3.102 0.889 1.072 0.993 0.789 0.349 0.809 0.716 +0 1.956 -1.069 -1.357 0.611 -0.235 1.931 -0.851 -1.715 2.173 1.623 -1.087 0.024 2.215 1.986 -0.269 0.091 0.000 1.362 -0.383 0.680 0.000 0.930 0.956 1.284 1.148 2.627 1.567 1.334 +1 1.442 0.181 1.361 1.048 -1.648 1.291 0.522 -0.096 2.173 0.704 1.255 -1.050 0.000 0.710 1.171 0.541 1.274 0.527 1.563 1.209 0.000 0.747 1.168 0.989 0.980 0.796 1.093 0.974 +1 0.580 -0.264 0.170 0.985 1.340 0.840 0.824 -1.093 0.000 0.790 -0.334 0.773 2.215 1.016 0.622 0.168 2.548 1.332 -0.183 -1.372 0.000 0.975 1.112 0.989 0.616 0.711 0.901 0.781 +0 0.330 0.914 0.964 1.966 -0.627 0.874 0.031 0.586 0.000 0.727 -0.142 -1.194 2.215 0.806 -0.033 1.419 0.000 0.583 -1.757 -1.283 0.000 1.030 1.051 1.104 0.941 0.581 0.724 0.813 +1 0.627 1.152 -1.300 1.295 -1.001 2.262 -1.843 1.337 0.000 1.231 -0.920 0.039 1.107 2.733 0.158 -0.272 2.548 0.478 0.738 1.702 0.000 2.980 2.413 0.991 1.089 1.270 2.305 1.959 +0 0.477 0.153 -0.336 1.240 -1.604 0.561 -1.229 -1.185 1.087 0.599 -0.729 -0.280 0.000 1.280 -1.158 1.430 1.274 1.074 0.865 -0.424 0.000 1.073 1.132 0.988 0.867 0.746 0.960 0.824 +1 1.638 0.269 1.191 0.948 -1.579 0.968 0.375 -0.395 0.000 0.768 2.728 1.241 0.000 0.866 0.143 -0.922 2.548 0.704 0.819 0.880 1.551 1.054 0.885 1.041 0.833 0.648 0.633 0.820 +0 0.392 2.166 -0.353 0.894 1.589 0.650 0.326 0.409 2.173 0.659 1.779 -1.275 0.000 0.744 0.396 1.514 0.000 0.912 -0.267 0.009 3.102 1.029 1.040 0.982 0.871 0.394 0.792 0.721 +0 0.386 0.345 -1.207 1.151 1.078 1.726 -0.581 -0.410 0.000 1.545 -0.244 1.030 1.107 1.522 0.321 1.556 1.274 1.307 -0.656 -0.951 0.000 1.100 1.806 0.988 1.126 0.896 1.438 1.351 +1 1.256 0.932 0.351 0.428 -1.070 1.968 -0.547 -0.124 0.000 1.659 1.008 1.584 2.215 1.031 1.597 1.148 0.000 1.800 0.036 1.550 3.102 0.934 0.965 0.989 1.049 0.810 0.840 0.753 +0 0.950 -0.660 -1.628 0.493 -0.511 0.517 0.048 0.338 1.087 0.968 0.697 1.623 0.000 1.532 -0.395 -0.294 2.548 0.405 0.316 1.326 0.000 0.257 1.158 0.993 0.892 0.657 0.782 0.809 +1 0.721 0.466 -1.301 0.728 0.733 0.745 -0.496 1.463 0.000 1.001 -0.673 -0.237 0.000 1.033 -0.458 0.917 2.548 0.797 -0.882 1.020 1.551 1.841 1.076 0.988 0.683 0.206 0.707 0.683 +0 0.401 0.685 -0.441 1.583 1.556 0.790 -0.568 0.032 1.087 0.686 -1.108 1.127 0.000 1.030 -0.430 -0.572 0.000 1.067 -1.028 -1.469 3.102 1.361 1.032 1.075 1.000 1.004 0.961 0.907 +1 1.067 1.068 -1.688 0.890 1.706 0.607 1.538 0.147 2.173 0.274 -1.542 -1.394 0.000 0.451 0.896 0.153 2.548 0.643 -0.119 -0.020 0.000 0.672 1.194 0.986 0.723 0.188 0.755 0.766 +0 0.661 1.474 -1.575 1.284 -1.045 0.644 0.572 1.524 2.173 1.239 0.658 -0.003 1.107 0.937 -0.822 1.448 0.000 0.577 -0.354 0.757 0.000 0.512 1.175 0.981 0.753 1.291 0.871 0.863 +1 0.489 -0.206 -0.509 1.307 1.678 0.915 0.091 -0.121 0.000 1.197 0.385 1.637 2.215 0.923 -0.777 0.175 0.000 0.438 -1.599 0.362 0.000 0.920 0.968 1.020 0.714 0.397 0.937 0.825 +1 0.676 1.380 -0.709 1.109 1.736 0.584 -0.269 -0.326 2.173 0.657 0.404 0.962 2.215 0.651 -0.178 -1.457 0.000 0.633 -0.206 1.173 0.000 0.494 0.683 0.988 0.869 0.896 0.891 0.755 +1 0.484 0.673 -0.910 0.957 0.980 0.814 -0.069 -0.076 0.000 2.188 -0.315 -1.329 1.107 0.912 -0.076 1.434 0.000 2.825 -1.584 0.242 0.000 0.945 0.973 0.987 0.555 1.543 1.035 0.860 +0 0.776 0.751 -0.628 2.167 -1.088 1.057 -0.216 0.860 2.173 0.861 -0.403 -0.028 2.215 0.352 -0.902 1.493 0.000 0.456 -1.644 1.055 0.000 0.283 0.651 0.985 1.925 1.017 1.429 1.219 +0 1.429 1.336 -1.245 0.457 0.230 1.306 0.983 -0.583 1.087 2.069 1.084 1.062 0.000 0.519 1.677 1.162 0.000 0.573 0.045 0.052 3.102 0.606 0.828 1.088 0.899 0.667 1.123 0.961 +0 1.122 -1.277 1.510 1.002 -1.205 0.994 0.647 0.894 0.000 0.766 -1.204 -0.649 2.215 0.840 0.465 0.323 0.000 1.222 2.054 0.127 0.000 0.819 0.852 0.985 0.784 1.009 1.010 1.178 +0 0.461 0.122 -0.279 1.024 1.236 0.834 0.682 -0.221 1.087 0.619 0.241 0.393 0.000 0.625 0.438 -1.253 0.000 0.956 1.200 1.302 1.551 0.957 0.813 0.988 0.592 0.995 0.667 0.616 +1 0.556 -1.299 -1.630 1.338 -1.145 1.144 0.898 0.861 2.173 0.694 0.935 -0.178 0.000 0.553 0.496 -1.537 2.548 0.662 0.316 -0.582 0.000 0.851 1.159 0.984 1.328 0.842 2.049 1.509 +0 0.317 2.149 0.739 1.641 -1.433 0.423 -1.035 -0.504 0.000 1.163 -0.187 -0.263 0.000 0.635 -0.869 0.827 1.274 0.800 1.954 1.093 0.000 0.729 0.810 0.990 1.492 0.215 1.535 1.716 +1 0.409 -0.276 0.736 0.852 1.559 0.865 0.254 -0.310 2.173 0.894 1.051 1.317 0.000 0.591 -1.981 0.378 0.000 0.626 1.623 -0.540 0.000 0.928 0.904 0.989 1.348 0.295 0.897 1.249 +1 0.599 -0.784 0.997 0.303 -0.814 0.496 -0.771 1.473 0.000 1.182 1.397 -0.043 2.215 0.542 0.096 0.305 0.000 0.592 1.236 -1.121 3.102 0.915 0.858 0.986 1.028 0.623 0.910 0.779 +0 1.848 0.276 0.337 0.516 -0.365 0.843 1.159 1.726 0.000 0.950 0.117 -0.830 1.107 0.972 0.658 1.100 2.548 0.695 0.606 -1.147 0.000 0.665 0.941 0.987 0.824 1.054 0.784 0.844 +1 0.917 0.789 1.371 0.864 1.197 0.416 0.251 1.201 0.000 0.563 -0.322 -0.908 0.000 0.623 1.803 -1.439 0.000 2.020 1.005 -0.082 3.102 1.018 1.137 0.986 0.938 0.682 0.820 0.848 +0 0.555 -0.253 1.152 2.195 0.489 0.741 -0.146 -0.516 2.173 0.810 -0.456 -1.311 0.000 1.743 -1.090 1.665 0.000 1.581 0.652 -0.049 3.102 0.797 0.993 0.992 1.124 0.724 0.949 0.916 +1 0.981 -0.696 1.671 1.532 1.065 0.582 -0.996 -0.808 0.000 0.347 -1.215 0.402 2.215 0.329 -0.338 -0.059 0.000 0.381 -0.418 -0.509 3.102 0.553 0.520 0.986 0.626 0.271 0.493 0.556 +0 0.493 -0.712 -0.901 1.298 1.704 0.786 -0.057 -0.006 2.173 0.200 -0.698 0.361 0.000 0.427 0.615 -1.371 0.000 1.435 -0.596 1.112 3.102 0.562 0.615 0.986 0.764 1.022 0.913 0.737 +0 2.304 0.012 0.524 0.754 0.170 1.234 -0.375 -1.241 1.087 0.316 0.369 -0.577 0.000 0.529 -0.318 -1.633 1.274 0.714 -0.813 1.031 0.000 0.764 0.876 0.993 0.823 0.348 1.035 0.812 +1 2.610 -0.694 -1.074 0.729 -1.250 1.206 1.450 0.524 0.000 1.321 -0.530 0.129 0.000 1.521 -0.669 0.735 2.548 1.287 -0.897 -1.636 3.102 0.525 0.983 0.976 1.383 0.921 0.923 0.924 +0 0.373 2.046 1.675 1.153 0.281 0.867 1.063 -1.290 1.087 1.052 0.743 0.103 2.215 1.291 -2.498 -1.102 0.000 1.069 0.303 0.808 0.000 1.071 0.987 0.985 0.959 1.354 0.905 0.777 +1 1.186 -0.661 -0.822 0.166 -1.686 0.724 0.370 0.867 0.000 0.562 -1.127 -0.458 0.000 0.848 0.204 1.665 2.548 0.831 0.378 0.368 3.102 0.925 0.677 0.987 0.691 0.595 0.560 0.584 +1 0.447 -0.694 0.017 0.862 -1.088 0.761 -1.006 0.972 2.173 0.565 0.572 1.372 2.215 0.490 -1.322 -0.517 0.000 0.554 0.200 -0.332 0.000 0.555 0.815 0.985 1.130 0.925 0.885 0.738 +1 0.652 -1.915 0.285 0.696 -0.015 0.886 -0.240 0.384 2.173 0.710 -1.899 1.647 0.000 1.549 2.103 1.363 0.000 2.459 -0.424 -0.760 1.551 0.683 1.079 1.000 0.756 1.357 1.024 0.881 +0 1.729 -0.186 -0.093 0.462 -1.656 1.188 2.887 -1.378 0.000 0.984 -1.442 0.049 2.215 1.301 -1.180 0.463 0.000 1.523 -0.819 -1.714 3.102 0.882 0.872 1.222 0.985 1.139 0.873 0.792 +1 1.055 -0.247 -0.316 1.754 -1.028 1.198 -0.354 1.109 1.087 0.328 -1.034 -0.719 0.000 0.685 1.354 0.972 0.000 0.862 0.550 0.270 0.000 0.864 1.042 1.127 0.545 0.782 0.922 0.781 +0 0.655 -0.356 0.174 0.918 -1.066 0.308 1.580 1.162 0.000 0.487 -2.458 0.076 0.000 0.723 -0.632 0.475 2.548 1.173 0.742 1.607 3.102 0.814 0.920 0.987 0.875 0.861 0.666 0.689 +1 1.085 0.333 -0.317 0.501 1.572 0.697 0.037 1.500 2.173 0.612 -1.198 0.696 2.215 0.742 1.336 -0.096 0.000 0.441 -0.496 -1.613 0.000 0.989 0.949 1.013 0.887 0.912 0.817 0.732 +0 1.740 0.324 1.688 1.304 1.336 1.865 0.323 -0.065 0.000 0.903 0.135 -1.164 2.215 0.341 0.448 -0.667 0.000 0.884 0.466 0.977 1.551 0.750 0.911 0.975 0.872 0.774 0.836 1.015 +0 1.737 -0.024 1.516 1.275 0.953 0.966 -0.540 -0.262 0.000 0.854 -0.441 0.212 0.000 0.934 -0.068 -0.902 2.548 1.401 0.727 -1.450 3.102 0.805 0.857 1.001 0.902 0.597 0.872 0.974 +1 0.893 -0.581 0.846 0.812 -0.545 0.985 -0.866 1.530 0.000 1.645 -0.624 -0.461 2.215 0.910 0.352 1.385 2.548 0.366 1.994 0.112 0.000 2.424 1.393 1.121 0.891 1.471 1.298 1.056 +0 1.065 -0.997 0.559 0.153 -0.966 1.146 -0.596 -0.978 2.173 1.102 0.460 0.667 2.215 0.783 -1.333 1.472 0.000 0.565 -0.247 -0.151 0.000 0.859 0.966 0.996 0.797 1.892 1.027 0.864 +0 0.636 -1.883 -0.146 1.284 -0.019 2.254 1.181 1.656 2.173 1.822 -0.160 -0.200 0.000 1.209 -0.561 0.301 1.274 1.276 -0.129 1.702 0.000 1.966 1.285 0.987 3.068 2.873 2.101 1.803 +1 0.786 -1.464 1.067 0.565 0.133 0.694 0.004 -1.141 2.173 0.475 0.292 1.153 0.000 0.533 -0.845 0.859 0.000 1.556 0.721 -0.481 3.102 0.543 0.922 0.984 0.952 0.787 0.860 0.742 +0 0.361 -0.339 0.437 0.654 -0.546 0.731 0.101 0.595 2.173 0.696 1.421 -0.553 0.000 0.826 1.427 1.164 2.548 0.452 0.921 1.599 0.000 0.741 0.964 0.991 0.719 0.909 0.699 0.638 +1 0.345 1.528 0.427 1.419 1.252 0.978 -0.692 -1.234 2.173 0.996 0.831 -0.220 1.107 0.882 0.054 0.192 0.000 1.213 0.131 1.292 0.000 0.956 0.946 0.994 1.240 1.696 1.094 0.920 +0 0.372 -0.195 -1.456 1.011 -1.691 0.932 0.506 -0.805 0.000 1.122 1.854 1.043 0.000 1.598 0.509 0.514 2.548 0.981 0.567 -0.210 0.000 1.042 0.787 0.983 1.641 0.197 1.138 1.070 +1 1.489 2.036 -1.683 0.856 1.334 0.712 1.058 0.162 2.173 0.741 0.509 -0.371 2.215 0.622 1.554 -1.110 0.000 0.479 0.999 1.026 0.000 0.585 0.703 0.997 1.148 0.575 0.938 0.730 +1 1.743 -1.008 -0.589 0.828 -0.764 0.862 -0.725 -1.629 2.173 1.274 -0.737 1.033 2.215 1.154 -2.057 0.765 0.000 0.805 0.393 -0.201 0.000 2.111 1.476 0.983 1.056 1.042 1.108 1.116 +1 2.203 0.650 1.706 0.918 -1.202 1.191 0.550 0.100 2.173 0.420 1.270 1.022 1.107 0.619 -0.353 -1.129 0.000 0.735 0.138 0.538 0.000 0.799 1.058 0.987 0.724 0.867 1.020 0.853 +0 0.742 1.048 -1.047 0.467 -0.103 0.433 -0.422 1.422 0.000 0.813 1.227 1.151 2.215 0.735 -0.206 -0.024 2.548 0.892 2.147 -0.084 0.000 1.074 1.053 0.982 0.874 0.980 0.770 0.726 +1 1.150 -1.362 0.377 1.319 1.627 0.643 -0.113 -0.835 2.173 0.522 0.982 0.440 2.215 0.364 1.204 -0.746 0.000 0.562 -0.931 1.595 0.000 0.883 0.743 1.541 1.274 0.928 1.109 0.913 +0 1.379 0.617 -1.381 0.776 1.400 1.317 0.085 0.220 0.000 0.446 -0.008 -0.937 1.107 0.585 0.660 0.629 0.000 0.442 2.083 1.689 0.000 0.765 0.880 0.986 0.507 0.238 0.634 0.736 +1 0.297 0.724 -1.269 1.171 0.679 1.588 -1.342 -1.295 0.000 2.789 -0.320 0.296 0.000 1.924 0.323 -0.904 2.548 1.378 2.169 -1.634 0.000 0.874 0.814 0.986 1.294 1.028 1.123 1.260 +0 1.408 -0.113 1.082 1.619 1.568 0.622 0.167 -0.907 0.000 0.585 -1.109 0.531 1.107 0.755 2.130 -0.370 0.000 1.207 -0.285 -0.126 0.000 0.923 0.946 0.990 0.795 0.365 0.702 0.776 +0 0.670 -0.590 -0.341 1.301 1.427 1.311 -0.117 0.037 2.173 0.945 0.698 1.614 1.107 1.181 -0.133 0.828 0.000 1.650 0.379 -1.400 0.000 1.215 0.994 1.293 1.216 1.767 1.091 0.928 +0 0.745 1.082 0.093 0.862 -0.011 1.527 1.023 0.351 2.173 1.120 1.059 -0.800 0.000 2.215 0.856 -1.621 2.548 0.500 -1.609 1.126 0.000 0.211 2.084 0.983 1.031 2.245 1.946 1.884 +0 0.415 -0.907 -0.839 2.005 -1.355 1.321 0.471 0.721 0.000 0.954 -0.345 0.520 0.000 1.389 0.404 -0.690 2.548 0.683 -0.865 -1.484 3.102 1.107 1.157 0.996 1.834 0.777 1.126 1.529 +1 1.227 0.815 -0.708 0.335 1.163 0.578 0.851 1.294 0.000 0.674 -0.251 -0.160 2.215 1.387 0.129 -1.269 1.274 1.888 1.167 0.671 0.000 0.957 1.198 0.989 0.786 0.889 0.975 0.847 +0 0.795 0.534 1.104 0.866 -0.139 0.806 0.759 -0.317 2.173 0.592 -1.381 1.566 0.000 0.452 1.040 0.326 0.000 1.178 -0.078 1.735 3.102 0.575 0.660 1.034 0.760 1.092 0.965 0.876 +1 0.642 -0.568 -1.703 0.553 -1.021 1.315 0.048 1.187 0.000 1.425 0.561 -0.116 2.215 0.869 1.417 -0.693 2.548 0.983 -0.299 -0.894 0.000 0.836 1.155 0.989 0.776 0.842 0.932 0.829 +0 0.502 -1.035 1.401 0.729 0.143 1.650 -0.443 0.879 2.173 1.418 -0.075 -1.115 2.215 1.266 1.159 -0.756 0.000 1.375 -1.834 -0.846 0.000 0.959 1.425 0.986 1.320 2.232 1.440 1.217 +1 0.709 0.823 0.063 1.784 0.816 0.445 1.439 0.295 0.000 0.652 1.119 -1.560 2.215 0.754 1.515 -1.219 0.000 1.406 0.467 -1.062 3.102 1.025 0.861 0.987 0.886 0.456 0.757 0.710 +1 1.118 -0.684 -0.217 1.177 0.243 0.403 -1.348 -0.891 0.000 0.776 0.898 1.479 2.215 0.717 -1.001 1.353 2.548 0.737 -0.216 1.406 0.000 0.875 1.086 0.979 0.885 0.950 0.880 0.805 +1 0.575 0.350 -0.243 0.704 -1.117 0.716 -0.082 -0.387 0.000 0.881 0.353 0.559 1.107 0.793 -0.565 -1.202 0.000 1.309 0.822 1.352 3.102 0.883 0.860 0.983 0.955 0.705 0.768 0.679 +1 0.600 0.338 -0.789 0.593 0.388 0.952 0.420 1.694 0.000 1.041 0.291 -0.225 2.215 1.086 0.482 1.211 0.000 1.124 2.349 -0.661 0.000 0.780 0.847 0.990 0.712 0.853 0.880 0.801 +1 0.340 1.440 0.456 0.832 -1.118 0.635 0.450 1.291 2.173 1.549 0.366 0.139 1.107 0.827 0.177 -0.527 0.000 1.158 -0.757 -1.636 0.000 1.103 1.007 0.989 0.909 1.259 0.942 0.799 +0 0.862 -2.244 -0.375 1.042 -0.309 0.776 0.521 1.532 2.173 0.557 -1.370 -1.230 0.000 1.166 1.242 0.550 0.000 1.266 0.879 -1.254 3.102 1.132 1.048 1.003 1.736 0.678 1.384 1.111 +1 2.773 0.260 -0.638 1.701 -1.050 1.092 0.722 0.751 2.173 0.543 -0.389 -1.650 0.000 0.883 1.254 1.089 0.000 0.485 1.189 0.500 3.102 0.752 0.832 1.089 1.824 0.325 1.162 1.091 +0 0.816 1.420 -0.547 0.711 -0.983 0.652 0.841 -1.592 1.087 1.545 -1.595 0.497 0.000 0.753 -1.152 -0.153 0.000 0.843 -1.168 -1.641 3.102 0.966 0.931 0.983 1.011 1.082 1.337 2.137 +0 1.834 -0.687 0.012 0.957 0.474 1.776 1.074 -1.559 0.000 1.465 -0.097 -0.123 2.215 1.096 -2.023 1.515 0.000 1.209 -1.077 -0.136 1.551 0.830 0.811 0.988 0.901 0.761 0.966 0.860 +0 0.561 -1.760 -1.367 0.645 0.888 1.158 -0.472 -0.313 1.087 1.187 -0.356 1.701 0.000 1.095 -0.723 1.349 0.000 0.955 -1.614 0.056 0.000 1.034 0.862 0.980 1.017 0.783 0.851 0.766 +0 1.046 1.144 -0.362 1.288 -0.562 0.787 -1.168 1.053 1.087 0.687 -1.225 -1.687 0.000 0.760 0.828 1.108 2.548 1.014 -0.449 -0.392 0.000 1.078 0.981 0.995 0.976 1.202 1.739 1.575 +0 0.847 -0.348 -0.207 1.067 -1.039 0.582 -1.844 1.412 0.000 1.251 -1.160 -0.930 2.215 1.071 0.508 0.587 0.000 2.035 -0.488 0.878 3.102 2.372 1.436 0.984 0.918 1.504 1.231 1.099 +1 0.840 0.073 0.640 1.419 0.600 0.345 -0.144 1.611 2.173 1.259 -0.909 -1.073 2.215 0.474 -1.535 -1.588 0.000 1.147 -1.068 -0.142 0.000 0.801 0.781 0.987 0.757 0.752 0.880 0.762 +1 1.074 -0.135 0.069 0.593 0.888 0.989 -0.873 -0.524 2.173 0.599 1.102 0.970 0.000 0.794 1.369 1.713 0.000 1.083 0.394 -1.603 3.102 0.685 0.600 0.986 0.899 1.208 1.125 1.006 +0 0.953 -1.059 -0.864 0.925 -0.357 1.012 -0.556 0.423 2.173 0.796 0.359 1.715 0.000 0.465 -0.881 -1.696 2.548 0.445 -0.174 1.135 0.000 0.709 1.159 0.985 0.661 0.824 0.840 0.910 +0 1.519 0.204 -1.361 1.493 -1.495 1.843 1.109 0.476 2.173 0.903 0.248 -0.915 2.215 1.581 0.730 0.792 0.000 0.992 1.545 -0.194 0.000 0.914 0.863 0.991 2.015 1.992 1.435 1.141 +1 0.439 0.988 0.008 2.500 -0.421 0.509 1.472 0.973 0.000 0.772 0.751 1.632 2.215 0.779 1.283 1.725 2.548 0.508 0.293 -1.286 0.000 0.835 0.632 0.991 0.982 0.276 0.917 0.804 +1 1.362 1.360 0.778 1.545 1.035 1.002 0.787 -0.860 2.173 0.820 0.859 -0.323 0.000 0.508 -0.438 -0.073 0.000 1.133 0.204 1.614 3.102 0.762 0.855 0.990 1.458 0.945 0.993 0.903 +1 0.639 1.330 -0.725 1.630 1.657 2.171 1.906 0.107 0.000 1.605 0.681 1.631 2.215 0.788 1.743 -1.477 0.000 0.743 -0.782 -1.331 0.000 1.661 1.239 1.185 1.403 1.564 1.090 1.011 +1 1.780 0.330 -0.093 0.331 -1.100 1.341 -0.144 1.292 2.173 1.423 1.336 -0.205 0.000 1.350 -0.181 -1.256 1.274 0.966 1.193 1.495 0.000 1.317 1.663 0.991 1.412 1.253 1.535 1.259 +1 0.393 1.365 1.576 0.792 -1.182 1.192 0.668 -0.137 2.173 1.072 0.375 1.395 0.000 0.586 -0.489 0.568 2.548 0.704 -0.223 1.707 0.000 0.965 0.737 0.998 1.088 0.912 0.823 0.762 +1 0.311 -1.178 0.766 0.660 -1.371 0.764 -0.327 1.188 2.173 1.232 -0.753 -0.274 2.215 0.312 1.222 0.967 0.000 0.798 0.346 -0.818 0.000 0.611 0.925 0.987 0.742 1.420 0.850 0.725 +1 1.192 -1.240 -0.664 1.233 1.285 0.531 -0.545 -0.138 2.173 0.855 -1.865 1.417 0.000 0.971 -1.377 0.035 0.000 0.952 -0.522 -1.542 1.551 1.355 0.991 1.651 1.034 0.718 0.772 0.777 +0 0.435 0.373 -1.099 2.466 -0.458 1.032 0.183 0.983 1.087 0.601 -0.639 1.715 0.000 1.078 1.516 -1.276 0.000 1.663 0.485 0.669 3.102 1.857 1.390 0.989 1.574 0.477 1.100 1.123 +0 0.489 -0.560 -0.287 1.635 -0.867 0.447 0.430 0.168 2.173 1.026 0.881 1.484 0.000 0.699 -0.317 0.185 2.548 0.800 -0.090 1.413 0.000 0.629 0.864 0.987 1.182 0.280 0.809 1.025 +1 0.805 1.554 -0.598 1.345 -1.659 0.490 1.458 -0.087 0.000 0.746 0.212 -1.295 2.215 1.509 0.636 0.379 0.000 1.031 0.314 1.328 1.551 0.879 0.896 1.177 0.900 0.559 0.776 0.827 +0 1.334 -1.125 1.309 0.263 -0.309 0.487 -1.430 0.135 0.000 0.660 0.410 -1.008 2.215 0.430 -0.294 -1.517 0.000 0.384 0.760 0.142 3.102 0.954 0.962 0.984 0.670 0.407 0.625 0.628 +0 1.466 -1.111 1.287 2.576 1.130 1.982 0.371 -0.394 0.000 0.822 -0.059 1.675 2.215 0.875 1.011 -0.483 0.000 0.419 1.416 -1.234 3.102 0.911 0.837 0.979 1.276 0.591 1.261 1.893 +1 0.869 -0.329 1.598 0.669 -0.111 0.816 -1.098 -0.969 0.000 0.745 -0.341 -0.898 0.000 1.503 0.346 0.979 2.548 1.455 0.121 0.019 3.102 1.038 1.001 1.056 0.722 0.871 0.703 0.655 +1 1.333 1.213 1.469 0.725 -1.231 0.800 0.517 0.058 2.173 0.419 -2.324 -0.905 0.000 0.953 0.766 -1.631 0.000 0.778 0.750 -0.763 3.102 2.419 1.525 0.993 1.095 0.585 1.170 1.129 +0 1.927 1.637 0.422 0.302 0.328 1.063 -0.364 -1.678 0.000 1.436 1.680 -0.055 0.000 2.099 0.430 -1.366 1.274 0.597 -0.325 1.477 0.000 0.357 0.944 1.001 1.507 0.930 0.950 1.069 +1 0.650 0.699 -0.098 0.861 -0.605 1.050 -0.275 1.510 0.000 0.973 0.562 1.205 0.000 1.555 0.493 -0.402 2.548 1.143 -0.856 -0.131 3.102 0.982 1.050 0.986 0.802 0.918 0.996 1.174 +1 0.424 -0.694 1.732 1.077 -1.672 0.697 1.163 -0.407 0.000 0.763 0.606 0.867 2.215 0.746 1.843 0.088 0.000 0.834 -0.409 -1.489 3.102 0.798 0.994 0.989 0.720 0.744 1.057 1.630 +0 1.416 -0.247 1.096 0.743 0.092 0.898 0.816 0.945 2.173 1.650 0.240 -1.197 0.000 1.268 -0.115 -0.495 2.548 1.180 -0.074 -1.668 0.000 0.809 0.967 1.116 0.917 1.438 1.058 0.982 +1 0.895 -1.225 -1.193 1.138 -0.531 0.812 -0.412 1.030 2.173 0.771 0.885 0.526 2.215 0.561 -0.913 -1.698 0.000 0.872 -0.199 -0.563 0.000 0.725 0.955 0.993 1.335 0.976 1.333 1.023 +1 0.852 0.515 0.282 0.285 -0.301 0.696 -0.385 -0.909 2.173 0.706 0.717 -0.185 0.000 1.395 -0.472 1.702 2.548 0.906 1.105 0.834 0.000 0.884 1.115 0.990 0.910 0.874 0.927 0.814 +1 0.725 -1.110 -0.840 0.824 1.371 0.477 -0.265 0.721 0.000 1.081 -0.928 -1.331 2.215 0.996 2.482 -0.203 0.000 2.149 0.164 0.240 0.000 0.746 0.612 0.989 0.649 0.761 0.846 0.767 +0 1.464 -0.882 -1.234 1.799 1.631 0.634 -0.446 0.391 0.000 0.789 0.238 1.689 2.215 1.198 -0.880 -0.195 0.000 1.567 2.331 0.760 0.000 0.890 0.919 1.193 0.906 1.225 1.078 1.053 +1 1.383 -0.559 -1.567 2.071 -1.593 1.012 1.868 0.643 0.000 0.542 -0.730 -0.841 0.000 1.768 -0.991 -0.381 2.548 0.854 -0.274 0.621 0.000 0.879 0.797 0.995 1.582 1.411 1.205 0.962 +1 0.685 -2.266 0.131 1.021 -1.211 1.119 -1.195 1.413 2.173 1.132 -1.278 0.217 2.215 0.847 -1.743 -0.462 0.000 1.192 -1.472 -0.940 0.000 0.522 0.995 1.084 0.993 1.463 1.023 0.818 +0 0.690 0.819 0.605 0.214 0.684 0.735 -0.101 -1.670 0.000 1.343 0.341 -0.849 2.215 1.068 -0.258 0.192 2.548 0.756 2.064 0.693 0.000 0.275 1.230 0.982 0.994 1.106 1.073 0.943 +1 1.682 -0.221 1.179 1.701 0.565 2.008 -0.450 -1.437 0.000 1.325 -0.427 0.268 2.215 1.312 0.222 0.744 0.000 2.042 0.521 -1.248 0.000 0.918 1.018 1.231 0.952 0.832 1.140 1.144 +1 0.323 1.083 -1.132 1.769 -0.368 0.976 -0.403 0.707 1.087 1.034 -0.152 1.513 2.215 0.701 -0.297 -0.541 0.000 0.591 -0.383 -1.178 0.000 0.518 0.819 0.993 1.090 0.998 0.960 0.753 +1 0.437 1.255 0.864 1.663 -1.110 1.156 1.154 0.529 2.173 0.984 0.059 1.704 0.000 0.507 0.738 -0.949 0.000 0.650 0.424 -0.420 3.102 0.848 0.660 1.156 1.240 0.757 0.845 0.831 +0 0.418 0.510 1.657 1.129 0.147 0.747 1.242 -0.132 0.000 1.307 1.336 1.542 2.215 1.004 0.559 -0.694 0.000 1.248 -0.149 -1.172 1.551 0.910 0.986 0.987 0.934 1.234 0.998 0.850 +0 0.825 1.625 1.420 0.976 0.629 0.939 0.213 -0.717 2.173 0.309 0.544 -0.961 2.215 0.593 2.447 0.626 0.000 0.702 -0.144 0.584 0.000 0.809 0.852 0.986 0.660 0.220 0.774 0.675 +1 0.616 0.460 -0.636 0.517 1.071 0.792 0.634 0.672 0.000 1.066 0.274 0.166 0.000 0.908 -0.077 -1.740 2.548 1.324 0.862 -0.994 3.102 0.930 1.098 0.988 0.771 0.719 0.858 0.754 +0 1.706 -0.782 1.019 0.541 0.232 0.697 -0.927 1.636 1.087 0.511 0.559 -1.243 0.000 0.867 1.762 -0.874 0.000 1.260 -1.286 -0.111 0.000 0.820 0.838 0.987 0.794 1.072 1.034 1.129 +0 1.156 0.245 0.911 0.656 -0.040 0.733 1.714 -1.412 0.000 0.918 -1.427 -1.586 0.000 1.412 -1.054 -0.314 2.548 2.424 -0.701 0.651 3.102 0.825 0.909 0.982 0.763 1.104 0.854 0.850 +1 1.053 0.575 -0.351 0.275 1.049 0.869 1.137 -1.556 2.173 0.784 -0.192 1.121 0.000 0.737 1.381 0.257 0.000 0.803 -0.695 0.205 0.000 0.830 0.865 0.985 1.014 0.790 0.875 0.771 +0 1.064 -0.335 -0.335 0.990 1.396 2.076 -0.748 -0.152 1.087 1.820 -0.707 1.583 0.000 1.372 -0.310 1.299 0.000 1.124 -1.059 -1.269 3.102 0.773 0.891 1.422 1.354 1.430 1.512 1.234 +1 0.401 0.609 -0.829 0.718 0.151 0.636 2.561 -0.757 0.000 1.013 0.470 1.396 0.000 1.320 -0.779 -0.747 2.548 1.228 -1.153 1.466 3.102 0.810 1.017 0.988 1.630 0.924 1.427 1.199 +0 0.664 -0.231 0.695 0.483 -1.531 0.798 0.855 -0.095 2.173 0.963 -0.941 1.474 2.215 0.904 -2.099 -1.225 0.000 0.445 -1.466 1.006 0.000 0.662 0.788 0.992 0.823 1.858 1.362 1.151 +0 0.729 -0.868 0.536 0.921 -0.171 1.066 0.649 0.139 2.173 2.081 -0.874 -1.616 2.215 1.005 -2.073 -0.629 0.000 1.344 -1.217 1.327 0.000 0.975 1.272 0.985 0.808 2.888 1.687 1.344 +1 1.248 0.981 0.711 1.717 0.664 0.414 1.352 -0.738 2.173 1.052 1.446 1.714 0.000 0.930 -1.834 -0.531 0.000 0.776 1.133 0.110 0.000 1.170 0.941 0.996 0.979 0.628 0.907 0.829 +0 0.844 -0.803 1.380 0.253 -0.546 1.404 -0.079 -1.584 0.000 1.493 -0.961 0.027 1.107 1.636 -0.029 0.043 2.548 0.735 0.553 1.268 0.000 1.025 1.524 0.992 1.024 0.826 1.322 1.043 +1 0.479 -0.725 0.590 2.003 0.007 0.902 2.292 -0.915 0.000 1.512 1.013 1.357 2.215 0.566 0.358 1.408 0.000 0.880 1.114 0.277 3.102 0.642 0.715 0.984 1.601 0.875 1.831 1.428 +1 0.537 -0.241 0.591 0.270 -0.989 1.145 -1.268 0.819 2.173 0.802 -0.345 -1.482 0.000 0.727 -0.691 -0.805 2.548 0.413 -0.911 -1.612 0.000 0.903 0.721 0.992 1.472 1.170 1.035 0.891 +1 0.612 -1.829 1.678 0.552 -0.283 0.943 -0.557 -0.789 2.173 1.378 -0.989 1.252 2.215 0.836 -2.241 0.557 0.000 0.931 -0.463 0.095 0.000 0.916 0.970 0.991 0.804 1.662 1.026 0.835 +0 2.949 0.811 -0.457 0.658 1.456 0.995 0.879 -1.022 2.173 1.296 0.246 1.119 0.000 1.144 0.656 1.451 2.548 2.096 0.860 0.707 0.000 1.158 0.851 1.907 1.159 1.057 1.042 1.134 +0 0.891 -0.762 0.792 0.829 -1.319 0.535 -0.727 -0.906 2.173 0.407 -2.079 -1.269 0.000 0.856 -1.094 0.412 2.548 0.821 -1.518 0.885 0.000 0.873 0.742 1.126 0.719 0.807 0.617 0.578 +0 1.248 -0.447 -0.304 0.772 -1.134 1.085 0.649 1.356 0.000 1.004 0.430 0.639 2.215 1.350 0.924 -0.639 0.000 0.595 -0.743 -1.520 3.102 2.156 1.367 0.988 1.003 0.820 0.976 0.944 +1 0.420 2.058 -0.506 1.262 0.833 0.695 0.002 -0.567 2.173 0.373 0.776 -0.834 0.000 0.953 -1.615 0.929 0.000 0.737 -1.222 -1.297 1.551 1.781 1.078 0.989 1.698 0.764 1.604 1.736 +0 1.374 0.140 -0.684 0.030 -1.145 0.430 -0.240 -1.364 0.000 1.293 -0.216 0.566 0.000 1.593 0.428 1.125 2.548 0.683 1.307 -1.154 0.000 0.871 0.962 0.657 0.466 1.018 0.739 0.679 +0 1.773 0.222 -0.089 1.547 -0.695 0.532 -1.432 1.563 0.000 1.905 0.669 1.082 0.000 0.883 0.030 -1.064 2.548 0.748 -0.810 -0.657 0.000 0.905 0.828 1.191 0.748 0.379 0.576 0.773 +0 0.598 -0.766 -1.703 0.239 -1.381 0.277 -0.584 0.222 1.087 0.526 0.597 -0.065 2.215 0.403 2.198 -1.191 0.000 0.674 0.695 0.756 0.000 0.756 0.831 0.974 0.688 0.387 0.546 0.575 +1 1.806 0.111 -0.461 0.862 0.321 1.045 1.139 1.127 2.173 0.375 1.051 -0.670 0.000 0.593 0.224 1.574 1.274 0.460 2.137 -1.490 0.000 0.571 0.888 1.120 0.832 0.598 0.971 0.844 +1 1.190 -0.245 -1.345 1.290 -1.183 1.205 0.602 0.877 2.173 0.339 2.125 0.830 0.000 1.233 0.268 -0.045 2.548 0.626 -0.883 0.082 0.000 0.953 1.060 0.977 1.025 1.148 1.084 0.890 +0 0.535 1.078 -0.160 1.198 0.640 1.029 0.636 1.291 2.173 0.966 1.269 -0.633 2.215 1.299 -0.293 -0.562 0.000 1.166 0.221 -1.674 0.000 1.072 1.021 0.992 1.192 1.529 1.014 0.878 +0 2.409 -0.060 -1.559 1.080 0.099 0.602 2.163 0.643 0.000 0.974 -0.823 0.122 2.215 1.067 -0.799 -1.157 2.548 0.924 -0.568 1.211 0.000 0.818 0.768 2.228 1.479 0.990 1.047 0.892 +1 0.501 0.046 0.612 0.933 -1.190 0.799 0.680 0.679 0.000 0.637 1.285 -1.433 0.000 0.540 1.341 -0.400 0.000 1.290 0.181 0.276 3.102 1.086 0.876 0.988 0.718 0.597 0.587 0.643 +0 0.670 -1.361 1.200 0.293 -0.196 0.519 1.010 -1.116 0.000 0.770 -0.157 1.563 2.215 0.690 0.707 0.000 0.000 1.348 -1.675 0.352 0.000 0.919 0.959 0.985 0.691 0.722 0.807 0.742 +1 1.531 -0.285 -1.141 0.560 0.651 1.803 0.620 0.358 1.087 1.910 0.260 -1.711 0.000 0.619 0.571 -0.380 2.548 0.576 1.330 -0.730 0.000 1.438 1.061 1.282 1.589 0.813 1.254 1.131 +1 0.755 0.479 0.080 1.012 1.160 0.525 -0.201 -1.567 1.087 0.495 1.542 -1.523 0.000 0.815 -1.232 -0.201 2.548 1.126 1.008 -0.029 0.000 0.966 1.011 1.002 0.997 0.914 0.976 0.852 +1 0.744 0.002 -0.444 0.592 0.883 1.899 -0.348 0.423 0.000 0.909 2.561 -1.191 0.000 2.154 0.573 -1.670 2.548 1.717 -0.449 -0.339 0.000 0.872 0.823 0.982 1.118 0.866 0.843 0.765 +1 0.547 -0.885 -1.384 1.065 0.113 0.602 -0.335 0.744 1.087 1.408 -0.149 1.494 0.000 1.212 0.808 -0.582 1.274 0.883 -0.344 -1.045 0.000 1.111 0.976 1.031 1.061 1.209 0.931 0.868 +1 1.454 0.157 0.903 1.814 -0.021 0.765 1.432 -1.321 1.087 0.954 0.535 -0.005 0.000 1.469 -1.334 -0.952 0.000 0.915 0.615 -1.529 3.102 2.556 1.641 1.663 1.618 0.360 1.439 1.342 +0 1.427 0.261 0.237 0.230 -0.665 1.018 0.458 -1.631 2.173 0.683 2.535 0.201 0.000 0.795 0.912 -1.164 2.548 0.508 0.847 0.217 0.000 0.687 0.924 0.990 0.768 0.551 0.787 0.711 +1 0.628 -1.305 0.374 1.051 1.120 0.369 -0.104 1.621 0.000 0.969 -0.148 -1.204 2.215 1.134 0.777 -0.680 0.000 1.242 0.774 0.454 0.000 1.057 1.013 0.984 0.615 0.709 0.899 1.028 +0 0.447 -0.446 -1.740 0.436 -1.175 1.253 0.355 -0.135 2.173 0.985 -2.165 -1.653 0.000 1.154 -1.600 1.051 0.000 1.134 -1.708 -0.985 0.000 0.790 1.485 0.992 1.733 0.572 1.657 1.343 +1 1.326 1.209 1.193 0.864 0.525 0.442 -0.085 -0.105 0.000 0.838 -0.694 1.523 2.215 0.991 -0.546 -0.533 2.548 1.257 -1.497 -0.950 0.000 0.672 0.954 0.989 1.177 0.931 0.976 0.822 +1 1.126 -0.333 -1.289 0.469 -1.625 1.061 -1.032 0.407 2.173 0.793 -0.931 1.538 2.215 0.416 -0.053 -1.005 0.000 0.628 0.498 0.029 0.000 0.490 0.899 0.981 0.635 1.151 0.860 0.752 +1 1.216 -2.332 1.277 0.179 -1.732 0.591 -1.144 -0.693 0.000 1.010 -0.393 -1.199 2.215 0.496 0.410 1.523 2.548 0.878 -0.578 0.170 0.000 0.904 0.765 0.981 1.140 0.582 0.862 0.776 +1 1.261 -0.304 0.338 1.056 -0.260 2.285 -2.057 0.603 0.000 2.335 -0.612 -0.983 2.215 1.680 -0.733 -1.497 2.548 1.715 -0.059 -1.702 0.000 4.440 2.977 0.993 1.358 0.958 2.343 1.854 +0 0.403 -1.388 0.173 0.641 1.020 0.539 0.510 -1.033 0.000 0.740 -1.074 -1.261 2.215 1.017 0.485 1.201 2.548 0.666 -0.358 1.146 0.000 0.952 0.916 0.996 0.843 1.118 0.905 0.803 +1 0.822 0.391 -1.279 0.727 1.719 0.538 0.858 -0.033 0.000 0.794 -0.250 1.243 1.107 1.194 -0.571 0.242 2.548 0.988 1.036 -0.895 0.000 0.808 1.078 0.990 0.948 0.835 0.950 0.878 +1 1.644 0.072 -0.242 0.865 -0.047 1.376 0.837 -1.131 1.087 0.796 0.724 1.371 2.215 0.977 0.678 0.713 0.000 0.443 0.967 0.446 0.000 0.921 0.875 0.976 1.298 1.195 1.049 0.973 +0 0.846 -0.177 0.966 0.747 -1.374 1.899 -0.695 0.158 0.000 2.111 0.577 -1.351 0.000 0.368 -1.144 -0.027 0.000 1.817 -0.982 1.235 3.102 0.491 0.730 0.989 0.694 0.519 0.800 0.770 +0 0.565 0.888 -1.468 0.909 -0.716 0.725 0.752 1.159 2.173 0.665 -0.011 -1.322 0.000 1.704 0.247 -0.119 2.548 0.686 -0.947 0.826 0.000 0.974 0.991 0.984 0.839 1.311 0.883 0.790 +1 1.101 -0.071 -0.113 0.707 1.039 0.621 -0.828 -0.119 0.000 1.051 0.011 1.564 2.215 0.592 1.143 -1.206 2.548 0.508 -1.249 -1.307 0.000 0.807 1.045 1.054 0.891 0.750 0.830 0.756 +0 0.546 -0.495 0.337 1.251 -1.362 0.963 -0.128 1.330 0.000 1.587 -0.616 -0.327 2.215 1.991 1.908 0.143 0.000 1.093 -1.047 -1.531 3.102 0.766 0.841 1.144 0.990 1.117 1.104 0.914 +0 0.594 0.790 -0.860 1.312 1.255 0.920 -0.057 -0.961 2.173 1.263 0.288 0.690 2.215 0.692 0.693 -0.541 0.000 0.668 1.808 0.875 0.000 0.919 1.015 1.154 1.062 1.606 0.991 0.875 +0 1.799 -1.747 -0.641 0.529 1.738 0.625 -1.521 1.098 0.000 0.530 -0.657 -0.260 2.215 1.192 -0.706 -1.529 1.274 1.121 -2.079 0.597 0.000 0.848 0.966 1.135 0.924 0.770 0.811 0.813 +0 1.904 0.013 0.734 0.936 1.332 0.468 -2.108 -1.264 0.000 1.020 -0.022 -0.804 2.215 0.339 -1.191 0.684 0.000 0.613 -0.739 -0.135 0.000 0.882 1.101 0.986 0.752 0.354 0.799 0.908 +0 0.529 -1.709 1.061 0.860 0.300 0.696 2.226 1.022 0.000 0.944 1.794 -0.681 0.000 0.520 -2.153 -0.440 0.000 0.897 0.001 -1.046 1.551 0.983 0.932 0.996 0.745 0.574 0.633 0.616 +1 1.948 0.490 -1.236 0.703 -0.706 0.718 0.647 0.421 1.087 0.629 1.184 1.540 0.000 0.462 1.617 0.284 0.000 1.269 0.373 1.042 1.551 0.784 0.791 0.999 0.948 0.546 0.857 0.751 +1 0.450 1.186 0.304 1.164 1.652 0.779 -0.545 -1.735 1.087 1.602 0.800 -0.271 2.215 0.757 -0.473 -0.252 0.000 1.073 0.488 1.307 0.000 1.138 0.988 0.985 1.173 2.006 1.272 1.067 +0 2.894 -0.523 1.368 1.396 1.541 0.985 0.182 -0.838 0.000 1.697 0.172 -0.472 0.000 1.682 -0.226 0.784 0.000 1.072 -0.376 -0.161 1.551 0.933 0.688 1.001 0.897 0.217 0.808 0.874 +1 0.736 -0.112 -0.629 0.656 1.114 0.894 0.153 -1.400 0.000 1.739 0.729 0.414 2.215 0.980 0.822 -0.823 0.000 0.886 1.090 -0.520 3.102 0.944 0.885 0.989 0.939 0.893 1.000 0.834 +0 1.233 -1.565 -0.693 0.616 -1.021 0.596 -0.842 1.698 0.000 0.840 -0.009 0.860 2.215 1.340 -0.875 0.463 2.548 0.714 -0.878 -0.786 0.000 0.788 0.934 0.985 1.117 0.683 0.865 0.771 +0 0.342 -1.382 0.717 1.717 -0.962 0.557 2.094 0.791 0.000 0.383 -0.280 1.280 2.215 0.461 0.811 0.101 0.000 0.820 -0.220 -1.581 3.102 0.842 0.665 1.059 0.774 0.270 0.560 0.601 +0 1.008 0.642 0.945 0.575 -0.294 0.886 0.419 -1.434 2.173 0.812 0.100 0.478 2.215 0.567 0.986 -0.167 0.000 0.389 0.025 -1.192 0.000 0.503 0.663 0.985 0.595 1.250 0.750 0.611 +1 1.656 0.172 -0.791 0.337 0.718 0.584 -1.250 1.354 1.087 0.248 -0.550 1.706 0.000 0.756 -0.462 -0.235 0.000 0.470 0.660 1.166 3.102 0.655 0.722 1.012 0.618 0.681 0.734 0.612 +1 0.587 -0.346 -1.266 0.921 0.391 0.774 -0.484 1.570 0.000 0.799 0.712 -1.272 0.000 0.605 2.459 -0.785 0.000 0.704 -0.364 0.292 3.102 0.836 1.027 1.016 0.568 0.265 0.599 0.723 +1 1.068 -0.668 0.501 0.071 -0.769 0.378 -1.749 -1.357 0.000 0.644 -2.378 1.233 0.000 1.374 -0.612 -0.577 2.548 0.435 -1.289 1.625 0.000 0.840 0.769 0.990 0.834 0.606 0.769 0.804 +0 0.523 -1.018 -1.124 0.510 1.477 0.908 -0.347 0.935 1.087 0.799 -0.304 -0.601 0.000 0.956 1.016 -0.948 2.548 0.932 0.004 0.435 0.000 0.921 1.030 0.983 0.751 1.480 0.889 0.766 +0 0.345 1.548 0.043 1.686 -1.351 1.395 -0.953 1.297 0.000 0.882 0.610 0.186 2.215 1.432 0.071 -0.646 0.000 0.883 -0.240 0.844 0.000 0.891 0.845 1.004 1.046 0.771 0.952 1.328 +1 2.088 1.289 0.858 0.958 0.457 0.832 1.065 -0.915 0.000 0.904 0.807 -1.475 2.215 0.612 1.031 -0.298 2.548 0.380 0.065 0.125 0.000 0.824 0.721 1.001 0.765 0.700 0.800 0.785 +0 2.507 -0.757 -0.963 2.588 -0.874 2.588 0.703 0.872 1.087 0.399 0.679 0.585 0.000 0.513 0.764 -1.120 2.548 0.772 -0.089 0.665 0.000 0.304 0.573 0.983 0.830 1.403 2.159 1.557 +1 0.407 -1.400 0.338 1.440 0.913 1.143 -1.127 -1.181 2.173 0.997 0.861 -0.977 0.000 0.678 -0.857 -0.348 0.000 2.496 0.568 0.819 3.102 0.838 1.151 0.982 1.357 2.546 1.371 1.172 +0 0.628 -1.674 -0.693 0.793 1.374 0.653 -0.211 -0.844 0.000 1.081 0.371 0.898 0.000 0.811 -0.322 0.071 2.548 0.682 -0.001 1.643 1.551 0.793 0.744 0.987 0.798 0.571 0.694 0.686 +1 1.464 0.279 -0.404 0.690 -1.184 1.603 0.269 0.842 2.173 1.519 -1.421 -0.299 0.000 1.235 0.757 -0.221 0.000 3.206 -1.055 -1.630 0.000 0.949 1.021 0.990 1.455 0.852 1.320 1.224 +0 0.759 -0.775 1.341 0.344 -1.499 0.698 -0.226 0.215 2.173 0.617 -1.174 1.444 2.215 1.060 1.074 -0.358 0.000 0.711 0.508 -0.504 0.000 0.304 0.804 0.993 0.699 0.995 0.876 0.789 +0 0.829 1.170 -0.587 0.902 -1.469 1.067 0.519 -1.454 1.087 1.056 1.053 1.045 0.000 1.290 1.516 -0.021 0.000 1.033 0.535 0.481 3.102 1.282 0.819 0.986 0.716 1.096 0.801 0.734 +1 1.283 -0.249 0.742 1.618 0.947 0.486 -1.422 -0.534 0.000 1.031 -0.424 -1.070 0.000 0.535 -1.120 0.696 0.000 0.414 -1.397 -1.653 0.000 0.826 0.592 0.993 0.820 0.243 0.618 0.616 +1 0.492 0.549 0.341 1.355 0.763 2.219 -1.413 -0.549 0.000 2.157 0.611 1.441 2.215 0.788 0.099 -1.421 0.000 1.411 0.353 0.957 3.102 2.621 2.345 0.979 1.323 0.686 2.305 2.299 +1 1.218 0.527 0.882 0.262 -0.059 0.481 1.531 -1.516 0.000 0.338 1.167 0.229 0.000 0.588 -1.582 -1.327 2.548 0.570 0.371 0.122 3.102 0.868 0.627 0.990 0.934 0.724 0.859 0.771 +0 0.981 0.952 0.703 0.816 1.731 0.980 -0.480 1.596 2.173 0.511 -0.823 -0.139 0.000 1.812 0.719 -0.131 2.548 0.722 -0.029 -1.287 0.000 0.756 0.959 0.991 1.108 2.009 1.119 0.945 +1 0.995 1.532 -0.700 0.369 -1.635 1.041 1.730 1.485 0.000 1.488 1.033 0.378 1.107 0.679 -0.078 0.572 0.000 1.365 0.476 -1.010 0.000 1.003 0.907 0.983 0.512 0.831 0.687 0.634 +1 1.149 0.211 0.669 0.173 1.675 2.568 -0.941 -0.965 0.000 2.653 -0.095 1.242 1.107 1.790 -1.989 0.370 0.000 0.947 -0.686 0.260 3.102 3.344 2.009 0.989 0.930 1.229 2.008 1.541 +1 0.368 -0.662 -0.424 0.591 -1.015 0.803 0.723 1.073 2.173 1.028 0.283 0.235 2.215 0.846 -1.172 -0.593 0.000 0.887 -0.107 -1.491 0.000 0.906 1.085 0.985 1.970 0.963 1.474 1.180 +0 0.790 -0.202 0.020 1.301 -1.325 0.967 0.183 0.786 2.173 0.371 1.794 0.190 0.000 1.071 0.977 -1.039 2.548 0.460 0.011 -1.575 0.000 0.780 0.872 1.315 0.933 1.392 0.950 0.803 +0 1.416 0.944 -0.630 2.342 -1.052 1.215 1.634 0.733 0.000 0.783 0.834 0.494 2.215 1.201 0.663 -1.574 1.274 1.070 1.429 1.226 0.000 0.749 0.736 0.995 0.835 0.989 0.888 1.110 +1 0.796 1.070 0.404 1.290 0.998 0.995 -0.250 -0.249 0.000 0.904 -2.881 -1.447 0.000 0.993 1.766 -0.909 0.000 0.790 -1.002 -1.703 0.000 1.129 1.397 0.995 0.669 0.554 1.288 1.448 +1 0.676 -1.062 -1.710 1.258 0.175 0.790 -0.031 -0.911 0.000 0.296 0.387 1.288 0.000 0.668 1.123 -1.587 0.000 1.435 -0.713 0.888 3.102 0.967 0.927 1.267 0.755 0.442 0.717 0.717 +1 1.412 -1.356 -0.659 0.778 -1.675 1.062 -1.669 0.612 0.000 0.729 -1.263 1.508 2.215 0.269 -1.139 0.246 2.548 0.468 -0.557 -0.825 0.000 1.107 0.894 1.150 0.606 0.427 0.541 0.652 +1 1.847 0.374 1.454 1.345 0.977 0.724 -0.296 -0.401 2.173 0.630 0.358 -0.516 2.215 0.458 0.211 0.708 0.000 1.136 1.224 -0.794 0.000 0.937 0.961 0.993 1.042 0.353 0.979 0.850 +0 1.673 1.448 0.804 0.860 -1.162 0.598 0.251 -1.130 2.173 0.528 1.805 -1.403 0.000 0.603 1.127 0.326 0.000 1.042 0.129 -0.089 3.102 0.907 0.905 1.629 1.075 0.675 0.895 0.771 +1 1.677 0.192 0.934 1.398 0.205 0.940 0.460 -0.784 2.173 0.517 1.215 -1.356 2.215 1.036 -1.322 -0.710 0.000 0.868 -0.274 1.541 0.000 0.963 1.007 1.294 1.079 0.653 0.988 0.855 +0 0.727 -1.847 0.471 0.702 1.324 0.706 -0.526 -1.372 2.173 0.729 -0.646 0.881 1.107 1.158 -0.217 -0.447 0.000 0.751 0.423 -0.072 0.000 0.889 0.888 0.994 0.897 0.948 0.693 0.710 +0 0.701 0.098 1.256 1.258 0.532 0.731 -0.094 -0.565 1.087 0.702 1.544 0.820 0.000 0.833 0.119 -1.428 2.548 0.943 0.991 -1.127 0.000 1.064 0.910 0.995 1.061 0.691 0.815 0.792 +0 2.237 -0.619 -0.890 0.918 -0.631 0.446 0.364 1.217 0.000 0.690 -0.784 1.407 0.000 1.159 -0.366 0.217 2.548 0.548 -0.579 1.099 3.102 0.759 0.899 0.998 0.772 0.443 0.732 0.810 +0 0.297 2.165 -1.474 0.794 -0.278 1.329 0.977 -1.032 2.173 0.900 -0.297 1.494 2.215 2.527 0.610 0.663 0.000 0.535 -2.451 -0.075 0.000 3.883 2.299 0.977 0.830 1.656 2.025 1.558 +0 1.389 0.383 0.957 1.272 1.351 0.955 1.338 0.420 1.087 1.271 0.848 -1.272 0.000 1.451 1.002 -0.790 0.000 1.699 0.142 -0.515 3.102 0.909 0.930 0.986 1.341 1.313 1.107 1.174 +0 0.725 0.315 -0.538 0.329 0.509 0.548 -0.241 0.701 0.000 0.426 -0.221 0.341 0.000 0.701 -0.439 -0.725 0.000 0.880 -0.445 1.384 3.102 1.078 0.758 0.997 0.644 0.215 0.493 0.513 +1 1.614 -0.040 0.156 0.205 1.047 1.127 -0.891 -1.410 2.173 0.653 -0.648 -0.415 2.215 0.786 -0.380 0.523 0.000 0.901 -0.419 1.629 0.000 0.780 0.978 0.981 0.646 0.998 0.881 0.755 +0 0.451 1.747 -1.208 0.963 0.886 1.021 0.527 -0.594 1.087 0.865 -0.320 1.191 0.000 0.459 -0.396 -0.858 2.548 0.929 -1.356 1.138 0.000 0.815 0.729 0.987 1.005 0.473 0.956 0.933 +1 0.693 1.841 1.348 0.851 -0.082 0.579 0.656 -0.564 2.173 0.563 0.911 1.063 0.000 0.725 -0.691 -1.607 2.548 1.068 0.819 -0.027 0.000 0.841 0.958 1.021 0.857 0.903 0.915 0.778 +1 0.428 -1.772 0.043 0.698 0.888 0.751 -0.714 0.365 0.000 1.367 -0.764 -1.361 2.215 1.041 -0.600 1.279 0.000 1.880 -0.518 -0.592 3.102 0.907 0.951 0.992 0.981 0.932 0.782 0.718 +0 1.904 0.156 0.048 5.442 0.090 3.277 0.743 -1.599 0.000 0.545 0.044 -1.580 0.000 0.514 0.655 0.599 2.548 0.728 0.052 1.050 3.102 1.022 1.054 1.004 0.683 0.242 0.825 1.770 +0 0.411 0.894 -1.290 1.617 0.138 1.047 0.129 -0.611 0.000 1.011 -1.367 -1.736 0.000 1.205 -2.364 1.450 0.000 1.401 -0.166 1.466 3.102 1.160 1.305 1.084 0.651 0.634 1.077 1.461 +0 2.078 1.411 -1.461 0.707 -1.535 1.255 0.319 0.637 0.000 0.956 -1.826 -0.442 0.000 0.560 0.113 -1.089 2.548 0.693 0.844 0.692 3.102 3.656 2.058 0.978 0.817 0.524 1.277 1.835 +1 0.562 -1.089 1.314 0.554 -1.724 2.335 -0.941 0.020 0.000 2.775 -0.744 1.727 0.000 0.928 -1.303 -0.674 2.548 0.689 -2.390 -1.487 0.000 0.846 1.159 0.997 0.510 0.602 0.735 0.714 +1 1.857 0.990 1.110 0.445 0.480 0.342 1.495 -1.319 0.000 0.724 0.324 -0.304 0.000 0.708 0.780 -0.218 2.548 0.564 -0.859 1.675 0.000 1.020 0.722 0.985 1.243 0.728 0.867 0.813 +1 1.212 -0.959 -0.200 0.712 0.808 0.729 -1.404 -1.149 2.173 0.662 -0.656 0.774 0.000 1.520 -1.081 1.560 1.274 0.376 -2.070 -0.697 0.000 0.921 0.924 1.015 0.969 0.857 0.808 0.715 +0 0.842 0.709 -0.515 0.946 1.355 1.221 -0.023 0.457 2.173 0.885 -0.442 -1.133 0.000 1.267 0.069 -1.664 0.000 1.372 -0.787 -0.455 0.000 0.918 0.826 1.229 1.124 0.846 0.855 0.819 +1 1.134 1.405 1.239 0.979 1.709 1.095 0.324 -0.195 2.173 0.794 0.710 0.311 0.000 1.102 -2.046 -1.182 0.000 0.603 -0.111 0.991 3.102 3.328 1.793 0.997 1.364 0.780 1.404 1.425 +1 1.423 -0.755 1.451 1.630 1.387 0.761 -0.631 -0.400 0.000 0.565 -1.532 1.084 2.215 1.918 -1.922 -0.651 0.000 1.575 -0.301 0.206 3.102 1.053 0.992 0.960 1.188 0.817 0.784 0.862 +0 1.298 -0.523 -1.253 0.836 -0.648 0.648 -0.274 0.957 0.000 0.455 0.101 -1.000 2.215 0.478 -1.291 0.568 2.548 0.457 -0.201 0.522 0.000 0.319 0.652 0.986 0.723 0.642 0.545 0.608 +0 0.350 1.113 0.170 1.392 1.472 0.669 0.734 -0.790 2.173 0.448 -0.229 0.413 0.000 1.239 0.037 1.393 2.548 1.440 0.523 -0.327 0.000 0.975 0.793 0.989 1.058 1.122 0.921 0.868 +1 1.092 -0.204 0.106 1.789 0.860 1.372 -1.326 -0.464 0.000 1.666 0.692 1.537 2.215 0.616 0.270 -1.397 0.000 0.543 0.941 -0.303 0.000 0.598 0.788 1.218 0.922 1.059 1.018 0.823 +0 1.875 -0.424 -0.981 0.872 0.531 0.371 1.855 -1.530 0.000 0.484 0.856 0.658 0.000 0.600 1.330 0.596 2.548 1.430 1.671 0.968 0.000 0.951 1.254 1.734 1.244 1.100 0.885 0.947 +1 2.467 -0.070 -0.110 0.273 -0.264 0.556 -0.208 -1.536 2.173 0.673 -1.756 -1.533 0.000 0.512 -1.221 1.526 0.000 0.721 0.368 1.087 3.102 0.399 0.796 1.001 1.071 0.520 0.751 0.892 +1 0.555 -1.600 0.130 1.224 1.262 0.499 0.901 -1.019 2.173 0.386 -0.421 -1.631 0.000 0.805 0.193 0.574 2.548 1.070 -0.761 -0.546 0.000 0.724 0.815 0.988 0.854 0.831 0.958 0.792 +0 1.078 1.109 -0.011 0.395 -1.106 0.665 1.531 1.702 0.000 0.729 0.889 0.662 2.215 0.590 0.727 -1.366 0.000 0.910 -0.410 -0.192 3.102 0.597 1.047 0.986 0.728 0.759 0.756 0.732 +0 1.106 -1.305 -1.238 0.572 -1.465 0.633 0.096 0.255 0.000 0.429 -0.762 1.137 1.107 0.858 0.744 -1.038 2.548 1.213 -0.924 0.274 0.000 0.858 1.106 0.997 0.663 0.827 0.733 0.774 +0 1.533 -0.753 0.693 1.104 1.281 0.767 0.418 -0.236 0.000 1.800 -0.426 -1.471 2.215 1.329 2.273 0.525 0.000 1.653 -0.097 -0.861 3.102 2.517 2.050 0.989 1.286 0.851 1.925 1.759 +0 1.606 0.682 0.108 1.261 0.828 0.785 -1.712 -1.143 0.000 0.487 -1.208 -1.462 2.215 0.640 -0.612 0.723 0.000 0.659 0.842 -1.377 3.102 1.449 0.849 1.192 0.818 0.707 0.880 1.149 +0 0.576 -2.140 -0.756 0.973 1.212 0.815 -0.371 -0.644 2.173 0.665 0.344 0.781 0.000 0.833 0.286 -0.137 0.000 1.346 -0.500 1.236 3.102 0.840 0.975 1.016 0.869 1.107 0.953 1.003 +1 0.787 1.001 -1.558 0.905 -0.247 0.626 1.754 0.470 0.000 0.973 0.758 -0.455 0.000 1.704 0.985 0.197 0.000 2.533 0.523 -1.491 3.102 0.794 0.516 1.082 0.847 0.662 0.920 0.802 +1 1.589 -0.298 0.738 1.615 1.115 1.163 0.425 -0.779 1.087 0.208 1.707 -0.697 0.000 0.698 -0.443 1.705 2.548 1.167 -0.513 0.160 0.000 1.053 1.023 0.997 0.711 1.029 1.175 0.994 +1 1.046 0.225 -0.708 1.166 1.188 0.650 -0.375 -0.458 0.000 1.385 -0.559 -1.235 0.000 1.218 0.316 1.151 2.548 2.399 -0.150 0.499 1.551 1.313 1.405 1.515 1.077 0.804 1.095 0.985 +1 0.572 1.719 1.115 0.765 -0.332 0.739 -0.689 0.020 2.173 0.311 -0.750 -1.035 0.000 0.963 0.438 -1.247 0.000 1.416 -0.766 1.195 3.102 0.709 0.818 0.989 1.856 0.951 1.571 1.180 +0 1.057 -0.048 0.847 1.397 1.628 1.016 0.382 0.486 0.000 0.666 0.310 -1.461 1.107 2.196 0.923 -0.595 2.548 0.424 -0.954 -0.751 0.000 1.234 1.086 1.090 1.535 1.012 1.025 0.994 +1 0.802 -0.561 0.469 0.777 -0.659 0.830 0.209 -1.117 2.173 2.087 0.186 1.386 0.000 1.296 -1.495 -0.256 0.000 0.977 -0.279 0.184 1.551 0.986 0.874 0.983 0.848 0.917 1.050 0.882 +1 2.366 0.640 -0.621 1.002 0.284 1.441 2.203 0.800 0.000 1.140 0.734 -1.732 1.107 0.596 -0.855 1.421 2.548 0.655 2.241 1.069 0.000 0.497 1.640 1.555 1.234 0.874 1.499 1.492 +0 1.957 -0.818 1.536 0.493 0.038 2.023 -1.044 0.185 0.000 1.888 -0.432 -1.439 2.215 0.877 -2.202 -1.220 0.000 0.986 -1.429 -0.050 0.000 0.965 0.829 1.327 1.073 0.877 0.981 0.925 +0 1.038 -0.167 -1.699 0.698 0.531 0.553 -1.483 1.533 0.000 1.106 -2.332 -1.118 0.000 1.947 -1.100 1.083 0.000 2.520 -0.094 -0.398 3.102 0.787 0.852 1.067 0.983 0.944 1.091 0.938 +1 0.636 0.074 0.832 1.040 -0.443 1.107 0.141 -1.310 0.000 1.301 0.378 0.432 2.215 0.863 0.692 1.741 0.000 0.557 1.066 0.814 0.000 0.868 0.746 1.027 0.764 0.871 0.935 0.815 +0 1.142 -0.844 -1.249 1.270 -0.488 2.854 -1.150 -0.707 0.000 3.864 -1.573 0.925 0.000 1.245 -1.393 1.342 2.548 1.097 -0.667 0.732 3.102 7.203 3.792 1.057 1.050 0.574 2.238 1.779 +0 0.908 -1.856 -1.583 1.557 -1.201 1.564 0.789 0.443 2.173 1.094 -1.165 1.520 2.215 0.961 1.258 -0.123 0.000 1.266 2.196 -0.704 0.000 0.840 0.938 0.989 0.793 2.759 1.903 1.593 +1 2.224 0.287 -1.669 1.127 1.662 2.207 -0.706 -0.573 2.173 1.393 0.231 0.956 2.215 1.768 -0.589 -0.096 0.000 1.590 -0.795 1.173 0.000 1.229 1.612 1.002 2.128 2.835 1.801 1.684 +1 0.764 -1.039 -0.447 1.064 1.691 0.759 -1.213 0.142 0.000 0.979 -0.049 1.422 2.215 0.634 0.845 -0.732 2.548 0.617 2.471 1.097 0.000 4.260 2.326 1.171 0.915 0.889 1.505 1.363 +0 1.652 0.814 1.446 0.376 -1.712 1.119 -0.687 -0.198 0.000 0.725 0.253 -0.647 0.000 0.941 0.049 1.366 2.548 0.854 0.682 0.575 3.102 0.827 0.880 0.997 0.732 0.523 0.751 0.981 +0 0.512 -0.464 -0.754 0.358 0.853 0.545 2.665 -1.546 0.000 0.580 1.154 0.409 2.215 0.657 1.455 0.944 0.000 1.080 -0.114 0.250 1.551 1.043 0.933 0.997 0.743 0.519 0.898 1.443 +1 1.035 -0.466 -0.236 1.760 -0.739 0.844 -1.083 0.909 2.173 0.385 -1.398 1.593 0.000 0.987 -0.121 1.734 1.274 0.676 0.692 1.233 0.000 0.951 0.875 0.984 0.941 0.956 1.023 0.869 +1 0.580 -0.270 0.736 0.402 0.011 0.894 -0.949 -0.967 2.173 0.368 -0.077 -1.696 0.000 0.649 0.604 0.337 0.000 0.657 0.422 -0.586 3.102 0.778 1.046 0.993 0.764 0.704 0.704 0.701 +0 0.429 1.159 -0.135 1.571 1.643 0.828 -0.363 1.232 2.173 1.534 0.228 -0.480 2.215 0.380 -1.813 0.674 0.000 0.382 -2.177 0.111 0.000 0.244 0.854 1.137 1.288 1.734 1.168 1.207 +0 1.058 0.270 -0.292 0.886 -0.511 0.976 -0.567 1.035 2.173 0.690 -0.043 0.464 0.000 0.653 0.494 -0.904 0.000 0.371 2.351 -1.053 0.000 0.938 1.073 0.989 1.551 1.562 1.155 0.985 +1 0.345 0.576 -1.477 0.770 -0.569 3.540 1.146 1.298 0.000 2.513 -0.592 -0.053 0.000 2.216 -0.324 -0.367 2.548 1.431 0.292 -0.957 3.102 2.053 1.301 0.984 0.706 0.847 0.981 0.838 +0 3.106 -1.365 0.473 0.357 1.208 1.713 -2.031 -1.033 0.000 0.538 -0.850 0.858 2.215 0.619 -1.070 1.601 2.548 0.479 -2.337 -1.089 0.000 0.539 0.859 0.986 0.566 0.392 0.828 1.046 +0 0.866 0.094 -0.447 1.019 -1.505 0.745 -0.436 0.201 2.173 0.753 -1.474 -1.454 2.215 1.045 -0.884 0.796 0.000 0.647 0.019 -1.724 0.000 0.838 0.842 1.060 0.944 1.263 0.878 0.777 +0 1.415 -0.584 1.725 0.608 -1.018 0.861 0.379 -0.074 0.000 0.450 -1.309 0.826 2.215 0.840 0.113 -0.943 0.000 0.513 -0.451 1.210 0.000 0.965 1.069 0.998 0.583 0.173 0.620 0.709 +0 2.119 -0.329 0.960 1.120 -0.820 1.141 -1.046 -0.641 2.173 1.476 0.332 0.582 0.000 1.236 -0.918 -1.388 2.548 0.842 1.190 -1.699 0.000 1.528 1.679 2.133 1.618 0.922 1.456 1.319 +1 0.425 -0.293 -0.671 0.776 0.900 0.996 0.540 -0.646 0.000 1.127 1.178 0.696 1.107 0.485 0.465 -1.373 0.000 1.259 1.036 -1.615 3.102 0.806 0.777 0.991 1.234 0.938 1.161 1.000 +0 0.427 -1.207 -0.972 2.425 -0.391 0.638 -0.612 1.625 2.173 0.445 1.105 0.400 0.000 0.617 -0.431 -0.315 0.000 2.112 1.032 1.160 0.000 0.934 0.862 0.984 0.745 0.328 0.757 0.691 +0 1.641 -0.177 1.676 1.574 1.217 0.829 -0.908 -0.224 1.087 0.666 -1.074 0.361 0.000 0.785 0.048 -0.981 2.548 0.942 0.495 -0.373 0.000 0.790 0.947 0.990 1.495 0.812 1.018 0.996 +1 1.108 -1.314 0.622 0.196 0.812 1.178 -1.138 -1.417 0.000 0.190 -1.010 -1.031 2.215 0.965 -0.738 0.166 0.000 0.449 -0.287 -0.114 3.102 1.925 0.997 0.981 0.518 0.215 0.617 0.649 +0 1.050 -0.933 -0.031 1.909 -1.300 0.570 -0.076 0.059 0.000 0.736 -0.115 1.700 2.215 0.927 0.731 0.208 2.548 1.057 1.401 1.251 0.000 1.576 0.963 1.785 1.105 0.952 1.027 1.099 +1 2.024 -0.276 -1.364 0.495 -0.713 0.367 -1.145 1.240 0.000 0.691 -0.510 -0.662 0.000 0.968 -0.723 0.538 2.548 0.788 -0.038 0.011 0.000 0.877 0.732 0.988 0.990 0.610 0.793 0.697 +0 1.217 -0.018 0.806 1.435 -0.042 0.849 -1.584 1.479 0.000 0.849 0.785 -0.505 2.215 0.757 -1.712 -1.445 0.000 0.795 -0.430 -0.375 3.102 0.726 0.911 1.266 0.983 0.535 1.176 1.133 +0 1.788 0.664 1.540 1.052 -0.936 0.673 1.589 -0.966 0.000 1.241 1.625 -0.567 0.000 1.086 -0.439 -1.309 2.548 0.962 0.490 0.030 3.102 0.686 0.846 1.501 1.009 0.851 1.008 0.988 +0 1.356 0.434 -1.267 0.388 0.175 0.942 -0.767 1.098 2.173 0.297 -0.394 -1.427 0.000 0.873 -0.598 -0.567 2.548 1.162 -0.255 0.227 0.000 0.764 0.814 0.988 0.703 1.129 0.845 0.700 +0 1.043 -0.709 0.227 0.694 1.734 0.903 -0.891 -0.183 1.087 0.737 -0.726 -0.967 0.000 1.364 -0.545 1.498 0.000 0.785 -1.285 1.152 1.551 0.917 0.854 1.153 0.850 0.881 0.792 0.712 +0 2.891 0.816 0.489 1.355 0.903 1.508 0.814 -1.233 0.000 0.805 0.250 -0.535 2.215 0.526 0.706 1.697 2.548 0.488 1.320 -1.163 0.000 0.519 0.865 1.002 0.820 0.651 0.852 1.040 +0 1.416 0.839 1.323 0.330 0.103 0.690 -0.960 -0.354 0.000 0.650 -0.057 0.930 2.215 0.660 -0.226 -0.481 2.548 0.524 -1.518 -1.228 0.000 0.766 0.953 0.988 0.851 0.669 0.647 0.868 +0 0.764 -1.590 -0.880 0.980 1.331 1.045 -0.945 -0.873 2.173 0.752 -0.543 0.168 1.107 0.808 -1.859 0.593 0.000 1.970 0.712 1.229 0.000 0.899 0.882 1.093 0.962 1.084 0.834 0.769 +1 1.742 0.277 0.223 0.695 -0.414 0.697 0.145 -1.195 2.173 0.438 1.788 1.506 0.000 1.507 0.263 1.447 2.548 0.415 0.286 -0.175 0.000 0.718 0.813 0.984 1.121 0.884 0.908 0.800 +0 1.599 0.469 1.600 1.500 1.024 1.245 -0.991 -0.162 0.000 0.376 0.265 1.077 0.000 0.901 0.865 -1.343 0.000 1.473 1.328 -0.698 3.102 0.794 0.803 1.063 0.856 0.421 0.854 0.710 +1 0.479 0.335 -1.445 0.776 -0.200 1.766 -0.752 1.536 2.173 1.854 -1.284 0.046 0.000 0.827 -1.221 -0.516 0.000 0.426 -0.450 -0.337 3.102 0.921 0.556 0.993 1.177 0.917 1.252 1.016 +0 0.556 0.693 -0.262 0.859 -1.039 0.648 -0.099 0.397 1.087 0.768 0.528 -1.519 2.215 0.681 -0.226 1.238 0.000 0.565 1.612 -0.245 0.000 1.123 0.904 0.982 0.734 1.080 0.717 0.699 +1 1.189 0.199 1.027 1.276 -0.073 1.916 -2.051 -1.527 0.000 1.521 -1.546 -0.047 2.215 1.020 -0.127 0.676 0.000 0.971 -0.388 -0.357 0.000 0.899 1.145 1.427 0.907 0.900 1.097 0.896 +0 1.838 0.222 -1.685 0.875 0.168 1.226 1.229 -1.715 0.000 1.736 0.075 -0.486 1.107 2.821 0.352 0.609 0.000 0.838 -1.375 1.638 0.000 1.458 1.088 1.748 1.380 0.644 1.217 1.144 +0 0.519 -0.573 1.063 0.681 -0.118 1.176 -0.184 0.933 2.173 0.906 0.012 -1.271 0.000 0.772 -1.137 -0.204 2.548 0.674 -1.157 -1.505 0.000 0.860 0.970 0.984 0.774 1.211 0.845 0.730 +0 0.368 1.732 -0.137 0.180 -1.403 1.230 0.608 0.947 2.173 0.764 1.155 -0.401 0.000 0.742 -0.043 0.052 2.548 0.735 -0.190 -1.452 0.000 1.092 0.790 0.987 0.921 0.949 0.842 0.723 +1 0.645 -0.406 1.578 0.704 -0.925 0.334 1.429 1.032 0.000 0.507 0.883 -0.057 1.107 0.357 0.566 -0.846 2.548 0.384 1.708 0.589 0.000 0.272 0.457 0.985 0.659 0.302 0.666 0.746 +1 0.299 1.036 0.844 1.103 1.552 0.670 0.070 0.303 2.173 0.728 -0.431 -0.561 0.000 0.808 0.325 -1.497 2.548 0.776 -1.874 0.511 0.000 0.601 0.825 0.988 1.082 0.925 1.153 1.292 +1 0.893 0.582 0.554 0.810 -0.961 1.367 0.240 0.117 2.173 1.068 0.302 -1.215 0.000 1.075 -0.038 -1.728 0.000 0.589 2.255 -1.507 0.000 1.042 0.724 1.153 0.946 0.984 0.856 0.758 +0 0.886 1.074 -1.738 0.775 0.074 0.748 -0.333 -1.235 0.000 1.091 0.407 0.336 2.215 0.665 -0.549 0.019 0.000 1.192 -0.661 0.646 0.000 1.022 0.825 1.146 0.705 0.944 0.687 0.648 +1 0.731 0.517 0.217 0.579 -0.509 0.623 -0.173 1.437 1.087 0.825 0.953 -0.377 0.000 0.788 0.846 0.797 1.274 0.510 1.669 -1.256 0.000 0.752 1.128 0.986 0.766 0.695 0.743 0.741 +1 0.678 -0.339 -0.972 1.115 0.854 0.782 -1.001 0.187 1.087 0.965 -1.203 1.301 0.000 0.835 -1.442 -0.214 2.548 0.932 -0.377 -0.731 0.000 0.982 0.978 1.201 0.874 0.461 0.685 0.714 +0 1.500 -0.536 -1.566 0.720 -1.487 1.136 -0.465 0.180 2.173 0.950 -0.031 1.507 2.215 1.354 0.483 0.206 0.000 0.592 0.463 -0.838 0.000 0.798 1.005 1.009 1.421 1.462 1.033 0.934 +0 0.902 0.639 1.399 1.332 0.895 0.979 1.377 -0.038 0.000 0.442 1.340 -0.801 0.000 0.964 0.594 -1.364 2.548 0.520 -0.160 -1.325 3.102 0.887 1.005 0.998 0.751 0.236 0.672 0.765 +1 1.124 -0.769 0.169 2.204 0.918 1.460 0.826 -0.652 0.000 0.965 -0.687 1.496 2.215 0.768 0.476 -1.457 2.548 0.484 -1.064 -1.618 0.000 1.896 1.172 1.364 1.005 0.733 1.081 1.271 +0 0.723 -1.497 0.511 0.183 -0.608 0.662 -0.199 1.667 0.000 0.831 0.053 1.010 0.000 0.500 0.625 -0.501 2.548 1.943 -0.812 -0.571 3.102 0.903 0.845 0.995 0.755 0.702 0.842 0.745 +1 1.363 0.637 1.516 1.524 1.080 0.488 0.439 0.413 0.000 1.419 -0.550 -0.315 2.215 0.743 0.091 1.571 0.000 1.103 -0.456 -0.707 0.000 0.944 0.968 0.979 0.839 0.788 1.049 0.870 +1 1.281 0.748 0.223 1.008 -1.311 1.075 2.833 1.072 0.000 1.192 1.035 -0.743 2.215 2.160 1.099 1.640 0.000 1.804 0.184 -0.136 0.000 2.447 1.366 1.547 0.996 0.681 1.002 0.920 +1 0.656 -0.245 -0.850 0.939 0.341 0.849 -0.271 0.141 0.000 0.937 -0.312 -0.239 2.215 2.038 -0.005 -1.600 2.548 1.921 -1.223 1.394 0.000 0.919 0.900 0.987 0.999 1.402 0.877 0.764 +1 1.036 -1.069 0.030 0.269 0.794 1.341 -1.554 -0.982 2.173 0.737 -2.128 -1.204 0.000 0.907 -1.065 0.633 0.000 2.559 1.137 0.805 0.000 1.417 0.970 0.976 1.263 0.542 0.836 0.853 +0 1.263 0.043 1.639 0.826 1.042 1.106 0.908 0.219 0.000 0.888 0.707 -0.806 2.215 0.490 0.549 -1.378 2.548 0.383 0.235 0.685 0.000 0.517 0.940 0.983 0.563 0.348 0.636 0.725 +1 1.628 -0.066 -0.130 0.774 0.215 0.980 -0.840 -1.607 2.173 0.503 -0.532 -1.001 0.000 0.276 0.817 0.819 0.000 1.491 -0.783 0.996 3.102 0.726 0.789 0.992 1.091 0.915 1.120 0.875 +0 1.815 1.433 -0.602 1.080 -0.851 0.710 0.731 1.663 1.087 0.918 1.724 0.764 0.000 1.314 -1.060 -1.620 0.000 1.050 -0.168 0.467 0.000 0.634 1.042 0.979 1.322 0.832 1.087 0.994 +0 0.297 1.365 -1.078 2.092 0.743 1.495 -0.738 -0.300 2.173 1.860 0.856 1.099 2.215 3.441 0.490 -1.054 0.000 1.859 -0.000 1.648 0.000 1.964 2.179 1.089 2.371 3.225 2.074 1.838 +0 1.863 -0.297 -0.984 0.335 -1.412 1.184 -0.459 -1.686 2.173 0.896 0.782 -0.037 0.000 1.828 0.473 0.506 0.000 1.149 -0.392 0.332 3.102 0.965 0.798 0.986 0.903 1.196 1.176 1.122 +0 1.793 -0.871 0.894 0.473 -0.593 1.182 -0.577 -0.596 0.000 1.032 -1.359 -1.488 2.215 0.499 -0.641 0.324 2.548 0.431 -0.849 -1.711 0.000 0.950 1.067 1.242 0.622 0.808 0.711 0.773 +0 0.417 -1.630 0.356 1.734 0.368 0.681 0.726 -1.658 2.173 0.759 -1.555 -1.078 0.000 0.663 -0.676 1.257 2.548 0.795 -0.185 0.700 0.000 0.841 0.833 0.987 0.693 0.791 0.865 0.753 +1 1.282 1.329 1.090 1.073 -1.209 1.106 -1.065 0.462 0.000 2.159 0.690 -0.692 2.215 2.048 0.698 1.518 0.000 0.711 0.234 -0.280 0.000 1.026 0.662 1.426 1.449 0.625 0.932 0.902 +1 0.998 -1.171 -0.297 1.224 -0.069 0.836 0.684 -1.340 0.000 0.689 0.176 1.036 0.000 0.774 0.528 1.529 1.274 1.118 0.854 0.584 3.102 0.907 0.867 0.983 1.018 0.558 0.821 0.763 +0 1.354 1.946 -0.456 0.269 -0.761 0.632 0.107 1.412 0.000 0.671 0.858 0.565 0.000 0.509 1.237 0.808 2.548 0.715 -0.867 -1.224 3.102 1.104 0.969 0.976 0.667 0.826 0.806 0.823 +1 0.477 0.119 0.976 1.381 -0.164 1.233 -2.489 0.912 0.000 1.092 1.651 -1.529 2.215 1.048 0.513 -1.008 0.000 1.407 1.210 -0.676 3.102 0.739 0.788 0.985 0.789 0.791 0.852 0.710 +0 0.694 -0.613 -0.388 0.897 1.338 0.582 -0.205 0.641 1.087 0.338 -0.699 -0.819 0.000 0.535 0.535 1.512 0.000 0.646 0.391 0.241 0.000 0.725 0.705 1.093 0.790 0.923 0.660 0.572 +1 1.927 0.094 0.287 0.250 1.320 0.693 0.044 -1.167 2.173 0.938 -1.206 -0.698 2.215 0.638 -0.363 -1.326 0.000 0.930 0.096 0.989 0.000 0.922 0.937 0.986 1.227 0.951 0.970 0.850 +0 0.430 -1.025 -0.694 1.900 0.124 1.032 0.317 -1.373 2.173 1.586 0.392 1.470 0.000 1.265 -1.032 -0.047 2.548 0.384 0.812 1.246 0.000 1.267 1.044 0.987 0.646 1.740 1.158 1.084 +1 1.008 0.201 0.104 0.483 1.541 1.482 0.194 -0.518 0.000 0.780 0.702 1.196 0.000 1.413 -0.406 0.979 1.274 1.864 -0.867 0.093 0.000 0.972 0.895 0.985 0.549 0.568 0.577 0.565 +0 1.176 -0.273 -0.299 1.586 0.321 1.168 -1.057 1.220 1.087 0.961 -1.470 1.662 0.000 0.850 -0.057 -1.136 2.548 1.381 0.381 -0.669 0.000 0.962 0.785 1.004 1.408 1.240 1.044 0.996 +1 0.451 -0.403 0.147 1.334 1.275 0.754 -0.982 -1.440 2.173 0.745 -1.105 0.255 2.215 1.107 -0.186 0.515 0.000 1.818 -0.696 -0.705 0.000 0.699 0.768 0.988 0.821 1.105 0.715 0.668 +0 1.375 -1.651 0.499 0.766 1.182 1.126 -0.573 -1.363 0.000 1.089 -0.986 -0.093 2.215 0.819 -0.904 1.478 0.000 1.490 -0.436 -0.723 3.102 1.013 0.916 0.988 1.006 0.681 0.902 1.026 +1 1.006 0.844 -0.845 0.823 1.359 0.989 0.628 -0.235 0.000 1.134 1.412 1.026 2.215 1.093 0.212 1.569 2.548 0.602 1.529 -0.120 0.000 0.762 1.159 1.153 0.934 0.948 0.957 0.842 +1 0.992 1.151 0.070 0.079 1.568 0.392 2.324 0.818 0.000 0.686 2.007 -1.509 0.000 0.950 -0.179 -1.567 2.548 1.051 0.722 -0.501 3.102 0.964 0.932 0.987 0.822 0.757 0.900 0.816 +1 1.600 -1.336 1.445 1.207 0.954 0.717 2.694 -0.623 0.000 0.475 0.562 1.299 0.000 0.530 0.776 -0.501 2.548 0.964 -0.590 -1.086 0.000 0.913 0.639 0.995 0.514 0.523 0.724 0.685 +0 0.906 0.008 0.076 1.392 -0.544 0.669 1.253 1.731 0.000 0.709 0.812 0.746 2.215 0.707 2.044 1.239 0.000 0.389 -0.229 -0.808 1.551 0.826 0.842 0.990 0.507 0.543 0.615 0.778 +0 1.168 -0.133 0.060 0.829 -0.944 2.449 -0.266 -0.413 0.000 1.828 -0.653 1.186 2.215 1.751 -1.390 1.241 0.000 1.678 -0.597 1.621 1.551 0.897 1.059 1.071 0.953 0.606 0.925 0.940 +1 1.149 -1.038 0.695 0.522 0.480 0.840 -0.661 -0.713 2.173 0.929 -0.614 1.280 0.000 1.398 -0.217 -1.601 2.548 0.815 -1.760 -0.464 0.000 0.861 1.023 0.986 0.971 1.011 0.880 0.765 +0 3.015 -0.123 0.462 0.390 -0.040 1.515 -0.694 -1.217 2.173 1.193 -1.224 -1.132 0.000 1.543 -0.650 0.788 2.548 0.411 -1.188 1.119 0.000 0.822 1.134 0.974 1.957 1.852 1.395 1.224 +1 1.116 -1.186 0.426 0.812 -0.429 1.155 -0.647 -1.615 2.173 0.750 -1.116 -0.021 0.000 0.320 -1.049 -1.258 0.000 0.766 0.906 0.933 1.551 0.674 0.931 0.984 1.294 1.230 1.084 0.885 +0 0.322 -1.074 -0.104 2.046 0.885 0.545 -1.123 -1.026 0.000 0.815 0.836 -1.045 2.215 0.458 -0.126 0.364 0.000 0.601 -0.001 -0.372 3.102 0.973 1.062 0.989 0.902 0.457 1.261 1.018 +1 1.630 0.466 0.639 0.773 0.003 0.638 -0.176 -1.492 2.173 0.623 -0.594 -0.627 2.215 0.387 -1.345 -0.304 0.000 0.779 -0.148 1.350 0.000 0.736 0.671 0.996 1.004 0.683 0.887 0.760 +0 0.742 -0.919 -0.800 1.034 0.453 0.755 0.464 1.678 0.000 0.798 0.806 0.357 2.215 0.882 0.332 -0.548 2.548 0.843 1.322 1.254 0.000 0.850 0.967 1.097 0.824 0.682 0.771 0.898 +0 0.411 -0.878 1.694 0.878 0.560 1.854 -0.116 1.252 2.173 2.836 -0.119 -0.602 1.107 0.377 0.869 -0.560 0.000 0.373 0.677 0.847 0.000 0.396 0.940 0.987 1.317 3.357 1.551 1.127 +0 0.877 0.477 -0.761 0.667 -0.068 0.764 0.591 0.520 2.173 0.787 0.037 1.695 0.000 1.274 -0.420 1.161 0.000 1.022 1.698 -0.650 0.000 0.887 1.148 0.984 0.635 0.974 0.790 0.727 +1 2.099 -0.197 0.759 0.334 -0.532 1.009 0.375 -1.296 2.173 0.550 -0.003 -0.525 0.000 0.600 0.924 -1.613 0.000 0.549 0.295 -0.117 3.102 0.869 0.730 1.065 0.604 0.688 0.824 0.720 +0 1.083 -0.436 -1.120 0.322 -0.084 1.363 0.158 0.397 2.173 1.197 -2.649 -1.200 0.000 0.881 -0.504 0.811 0.000 0.876 0.094 1.524 3.102 0.897 0.941 0.980 0.694 0.982 0.907 0.764 +0 0.561 -0.838 0.270 1.010 1.245 0.708 -0.547 -0.632 2.173 0.908 0.914 -1.446 2.215 0.559 1.755 -0.058 0.000 0.547 0.326 0.716 0.000 0.648 1.086 0.990 0.950 1.246 0.868 0.808 +0 1.070 0.384 -1.126 0.950 1.650 1.099 -0.269 0.000 0.000 1.761 0.152 1.443 2.215 1.164 0.676 -0.056 0.000 2.042 -0.394 0.279 3.102 1.082 0.847 0.989 0.853 1.581 1.216 1.095 +0 0.277 1.125 -0.627 1.124 -1.666 0.597 -0.437 -0.063 0.000 0.556 -1.270 -0.779 2.215 1.308 -0.297 1.011 2.548 0.401 -1.610 -0.392 0.000 0.649 0.877 0.989 0.734 1.016 0.688 0.669 +1 3.325 0.647 -0.479 0.178 -1.475 0.663 -0.132 -1.711 1.087 1.396 1.928 0.600 0.000 1.205 1.419 1.595 0.000 0.627 0.267 0.366 3.102 1.177 0.850 0.989 1.231 0.670 0.979 1.070 +0 1.464 -0.890 -1.704 0.725 -1.664 0.745 0.239 0.415 0.000 0.646 -0.645 -0.238 1.107 0.952 -0.032 1.680 1.274 1.309 2.134 -0.168 0.000 0.783 0.837 0.984 0.975 0.865 0.774 0.850 +1 1.620 1.667 1.668 0.546 1.172 1.100 1.327 -0.813 2.173 0.737 1.029 -0.226 1.107 0.657 1.124 0.583 0.000 0.874 1.791 1.289 0.000 1.038 1.053 0.995 1.078 0.694 0.967 0.843 +1 0.696 -0.023 0.604 0.521 0.918 0.955 0.286 1.673 2.173 1.260 0.550 -0.292 0.000 1.115 -0.486 1.602 0.000 0.563 0.580 -0.681 1.551 0.861 0.516 0.991 0.939 0.679 0.876 0.799 +0 0.543 0.166 1.541 1.835 1.009 1.304 -0.571 -0.881 1.087 0.721 -1.364 0.960 0.000 0.624 -0.879 0.154 0.000 0.548 0.689 -0.940 0.000 0.850 0.912 0.979 0.764 0.271 1.186 0.954 +0 0.705 -0.531 -0.945 0.842 1.367 0.751 0.679 0.457 1.087 1.457 0.084 -1.244 1.107 0.555 -2.395 0.958 0.000 0.918 -0.987 0.380 0.000 0.733 1.641 0.986 0.899 1.608 1.377 1.098 +1 1.067 -1.176 0.600 0.799 0.368 0.940 -0.190 -1.100 2.173 0.343 -1.237 -1.526 2.215 0.607 -1.568 1.223 0.000 0.908 -0.655 -0.513 0.000 0.911 1.002 1.000 0.721 0.568 1.019 0.827 +1 0.952 0.081 -1.263 0.400 0.930 0.826 -0.254 -0.798 2.173 0.492 0.066 1.025 2.215 0.447 -0.061 0.082 0.000 0.443 -1.901 0.660 0.000 0.896 1.114 0.991 0.622 0.947 0.747 0.667 +0 0.626 0.103 -0.067 1.652 0.180 0.712 0.380 1.588 2.173 1.178 -1.147 -0.610 2.215 1.749 -0.348 1.102 0.000 1.459 -0.092 -1.285 0.000 1.493 1.023 0.984 1.822 1.698 1.389 1.269 +0 0.458 -1.626 -0.020 1.310 -1.232 0.452 -1.032 -0.359 0.000 1.214 -0.914 0.847 0.000 0.524 0.697 -1.458 2.548 0.452 -0.950 1.135 1.551 1.394 1.177 0.987 0.597 0.490 0.760 0.812 +0 2.525 -0.638 -0.603 2.642 -0.361 1.260 0.677 1.301 0.000 1.070 0.096 1.077 2.215 0.582 -1.456 -1.685 0.000 0.807 -1.136 0.884 3.102 1.149 0.957 0.995 1.069 0.686 1.229 1.567 +1 0.738 -0.398 -0.901 0.339 0.768 1.202 0.031 0.123 0.000 1.429 1.983 1.680 0.000 1.323 0.601 1.419 2.548 1.172 -1.709 -0.717 0.000 0.673 0.647 0.989 1.078 1.084 1.082 0.879 +0 0.732 2.051 0.179 1.143 -0.655 1.575 0.685 1.372 2.173 0.926 1.219 -0.547 2.215 0.671 0.654 -0.165 0.000 0.449 2.015 0.884 0.000 0.759 1.170 0.989 0.866 1.826 1.456 1.109 +1 2.046 -0.362 -0.017 0.589 -0.281 0.956 -0.919 1.507 2.173 0.700 -0.297 -1.369 0.000 0.508 -0.849 1.017 2.548 0.461 0.789 -1.025 0.000 0.544 0.874 0.972 0.712 0.372 0.870 0.799 +1 0.406 1.463 0.023 0.905 -1.376 1.043 0.030 -0.602 0.000 1.706 0.222 0.974 2.215 1.119 -0.386 -0.379 0.000 1.171 -1.198 1.581 0.000 0.591 0.886 0.989 1.771 0.241 1.101 1.318 +1 0.680 -0.137 -1.609 1.010 0.605 1.195 0.374 0.782 1.087 2.291 1.407 -1.129 0.000 2.011 0.478 0.368 2.548 1.130 1.897 0.162 0.000 0.906 1.158 1.045 0.810 0.719 0.953 0.905 +1 1.266 -0.645 -0.640 0.819 -0.121 1.370 0.433 1.618 2.173 1.535 -0.321 0.850 0.000 0.942 0.954 -0.201 0.000 1.441 -0.170 -0.967 3.102 1.479 1.242 0.980 1.465 1.182 1.053 1.025 +0 0.363 1.846 1.737 1.889 -0.546 0.396 -0.046 -1.673 0.000 1.208 0.844 1.127 2.215 1.742 0.078 0.722 2.548 0.757 1.632 -1.223 0.000 1.040 0.968 1.014 1.645 0.828 1.236 1.051 +0 0.512 1.422 1.695 0.726 -0.405 0.880 -0.371 1.688 2.173 0.720 0.995 -1.649 2.215 1.423 0.291 0.047 0.000 0.652 1.383 0.387 0.000 0.831 1.004 0.991 1.757 0.902 1.144 1.056 +0 2.565 0.022 -0.718 0.832 1.646 1.067 -1.288 0.988 2.173 0.446 0.136 0.288 0.000 0.329 0.948 -0.436 2.548 0.564 -0.618 0.491 0.000 0.321 0.779 1.715 0.850 1.289 1.228 0.936 +1 0.981 -0.080 0.206 1.335 0.277 1.262 -0.676 1.689 2.173 0.403 1.840 1.396 0.000 1.275 -0.531 0.909 2.548 1.226 0.732 -0.876 0.000 0.947 1.313 0.985 1.447 1.026 1.186 1.199 +1 0.832 0.139 -0.783 0.657 0.048 0.903 -0.363 0.908 2.173 0.986 -0.582 -1.191 0.000 0.794 0.694 1.042 0.000 0.927 -0.027 -0.347 3.102 1.580 1.014 0.995 0.965 0.891 0.845 0.773 +0 0.504 1.073 -1.421 2.067 -1.208 0.758 1.518 0.460 0.000 0.835 2.517 -0.793 0.000 0.740 2.499 0.540 0.000 1.759 1.174 -1.579 3.102 0.914 1.009 0.982 0.723 0.782 0.726 0.883 +0 1.076 0.255 -0.091 1.516 -0.191 0.742 0.085 0.825 0.000 0.873 0.922 1.419 0.000 1.337 0.860 -1.166 2.548 1.228 0.161 -1.259 3.102 1.156 1.018 0.997 1.287 0.386 0.898 1.005 +1 0.640 0.679 -0.351 1.063 -0.729 1.911 0.541 0.735 0.000 1.388 -1.230 1.741 0.000 0.997 -0.206 -0.626 2.548 0.752 -0.199 1.692 0.000 0.716 1.034 0.982 1.277 0.304 0.927 1.267 +0 1.413 -0.483 -0.117 1.367 -0.663 0.881 1.634 -1.587 0.000 0.928 -1.824 0.177 0.000 1.188 0.559 1.394 2.548 0.902 -0.278 1.034 3.102 0.891 0.933 0.987 0.841 0.464 0.842 1.056 +0 0.936 -1.635 -0.006 0.289 -1.504 0.459 0.149 1.122 0.000 1.021 0.237 -1.392 2.215 0.936 0.080 0.074 2.548 0.898 1.384 1.429 0.000 0.856 0.900 0.994 0.744 1.010 0.805 0.871 +0 0.938 0.448 -1.670 1.957 -1.014 0.831 0.532 0.638 0.000 0.971 0.716 -0.084 2.215 0.547 -0.939 1.282 2.548 0.451 1.489 1.132 0.000 0.741 0.875 1.048 1.065 1.067 0.879 0.873 +0 0.868 1.262 1.383 1.460 -1.298 1.157 0.631 -0.349 0.000 2.060 0.358 1.431 2.215 1.436 0.105 -0.086 0.000 1.564 1.619 0.711 0.000 0.819 0.865 1.036 1.139 1.664 1.346 1.265 +0 0.465 2.136 0.846 1.772 -1.572 0.352 -1.001 -0.121 0.000 0.722 -0.167 0.390 0.000 1.193 1.030 -0.003 2.548 1.279 0.563 1.270 3.102 0.964 1.296 1.033 1.130 0.887 1.170 1.652 +1 1.215 1.156 -0.810 1.979 -0.258 2.467 0.741 1.555 0.000 0.920 1.055 -0.222 2.215 1.158 0.215 0.351 2.548 0.765 0.328 -1.473 0.000 0.919 1.526 1.028 0.565 0.723 1.237 1.269 +1 1.640 0.997 -0.587 0.783 -1.455 0.909 -2.309 0.859 0.000 0.809 0.943 -1.021 2.215 0.769 -0.010 0.137 0.000 1.659 -0.102 1.383 1.551 0.669 0.813 1.105 0.588 1.054 0.795 0.733 +1 0.334 -2.183 1.004 1.495 -0.587 1.260 -0.677 -1.713 2.173 0.806 -0.326 -0.196 0.000 0.836 -1.548 0.137 0.000 1.873 -0.811 1.349 3.102 0.855 0.933 0.989 1.363 0.650 0.996 0.910 +1 0.568 1.234 1.207 0.099 1.684 0.716 1.007 -0.302 0.000 0.515 1.950 -0.749 0.000 1.053 -0.004 -1.641 2.548 1.079 0.178 0.631 3.102 0.894 0.942 0.986 0.584 0.729 0.657 0.605 +1 0.900 -0.113 1.540 1.502 -1.635 0.334 0.057 -0.812 2.173 1.922 -0.399 0.050 2.215 0.748 0.481 0.860 0.000 0.828 -0.250 -1.424 0.000 0.851 1.153 0.979 0.772 0.873 1.051 0.887 +0 0.738 -1.292 0.131 0.252 1.197 1.694 -0.723 -1.620 0.000 2.302 -0.354 0.188 2.215 0.690 0.320 -0.807 2.548 0.574 0.118 -1.718 0.000 0.689 0.932 0.991 0.833 1.155 1.352 1.067 +1 0.578 -0.954 1.579 0.608 1.694 0.517 -1.020 1.138 0.000 0.743 -0.663 0.306 1.107 1.414 -0.117 -0.320 2.548 1.086 0.979 -1.046 0.000 1.070 1.590 0.988 0.863 0.658 1.270 1.083 +1 1.418 -0.267 0.490 1.853 1.035 1.269 -2.623 -0.746 0.000 0.918 -0.958 -1.060 2.215 0.869 0.437 -1.738 2.548 0.548 -0.339 1.478 0.000 0.603 0.890 1.059 0.929 0.934 0.962 0.761 +0 0.782 -0.290 1.366 0.585 -1.551 1.083 -0.906 -0.218 2.173 0.634 -2.411 -1.033 0.000 0.562 -0.521 0.808 0.000 0.991 0.621 1.478 3.102 0.698 0.956 0.980 1.108 1.495 0.957 0.815 +0 2.899 -1.209 -0.507 0.512 -1.417 1.270 1.338 0.653 0.000 0.689 -1.206 0.437 2.215 0.950 -0.958 -1.519 2.548 0.881 -1.489 -1.025 0.000 0.859 0.934 1.233 0.916 0.848 0.784 0.792 +0 0.706 1.759 -1.685 2.058 -0.103 0.582 -0.133 0.260 1.087 0.816 1.162 1.586 2.215 0.761 -0.286 -0.915 0.000 0.539 -0.641 0.848 0.000 0.724 0.936 1.652 1.464 1.189 1.111 1.037 +1 0.455 -0.118 1.525 0.992 -0.173 2.415 -1.635 -1.567 0.000 2.366 0.737 -0.047 2.215 0.962 -0.767 0.920 1.274 1.358 -0.092 0.212 0.000 0.773 0.937 0.984 0.676 1.881 1.068 0.848 +1 0.761 2.184 0.117 0.847 1.177 1.023 0.632 -1.151 2.173 0.417 1.107 0.809 0.000 1.086 1.550 -0.396 0.000 0.501 0.605 1.215 0.000 0.908 1.003 0.989 0.733 0.715 0.842 0.733 +1 0.339 -1.498 -1.719 1.190 -1.072 0.676 0.340 1.365 2.173 1.249 -0.550 -0.205 0.000 0.575 -1.102 1.099 2.548 0.483 -0.844 0.702 0.000 0.876 1.134 0.991 0.733 0.691 0.741 0.715 +1 0.710 -0.820 -1.476 0.226 -0.731 0.994 -0.748 1.439 0.000 1.092 -0.596 -0.179 2.215 1.027 0.141 0.251 0.000 1.124 0.642 -0.783 3.102 0.911 1.152 0.975 0.862 0.911 0.990 0.823 +0 2.123 -0.589 1.587 0.710 0.865 0.537 1.648 0.089 0.000 2.134 0.627 -0.589 2.215 1.338 0.483 1.256 2.548 0.642 0.314 -0.456 0.000 0.840 1.110 1.030 1.969 1.791 1.398 1.430 +0 0.410 -1.787 -1.492 1.128 -0.651 1.846 -0.244 -1.352 2.173 1.511 0.448 0.332 0.000 1.448 0.441 0.745 1.274 1.647 0.524 -0.229 0.000 1.010 0.905 0.980 1.018 2.078 1.502 1.274 +0 1.178 -1.318 0.280 1.194 1.625 0.784 -0.940 -1.459 2.173 0.861 0.316 0.275 2.215 0.398 -0.401 0.263 0.000 0.392 0.009 -0.909 0.000 0.684 0.887 1.538 1.057 1.462 1.055 0.831 +1 0.808 -0.482 -0.967 1.203 0.680 0.313 0.680 -0.981 2.173 0.477 -1.173 -1.327 0.000 1.749 1.035 0.405 2.548 0.862 0.109 1.156 0.000 0.922 0.939 1.361 0.856 0.899 0.922 0.807 +0 1.338 -0.377 -0.606 1.093 1.637 0.497 -1.627 0.871 0.000 0.772 -0.804 -0.216 0.000 0.757 0.366 1.327 2.548 1.050 -0.864 1.723 1.551 0.871 1.056 1.508 0.843 0.584 0.707 0.742 +0 0.707 -1.349 -0.309 0.760 1.600 0.746 -0.703 0.242 2.173 0.707 -0.558 -1.277 2.215 0.809 -1.193 -0.812 0.000 1.140 -1.060 1.172 0.000 1.035 0.952 1.004 0.683 1.049 0.718 0.648 +0 0.592 1.128 0.099 0.541 -1.414 1.166 0.440 -0.243 1.087 1.274 0.036 -1.414 2.215 1.351 -0.617 1.111 0.000 0.620 1.373 1.153 0.000 0.937 0.989 0.989 0.793 1.603 0.939 0.776 +1 0.563 -1.613 -0.890 0.557 -0.352 0.977 -0.709 1.416 2.173 0.388 -0.750 -0.805 0.000 1.119 -1.150 0.325 0.000 0.512 2.171 1.742 0.000 0.896 1.085 0.984 0.501 0.670 0.654 0.642 +0 0.392 -0.908 0.968 2.635 0.190 1.103 -1.444 -0.312 1.087 1.186 -0.245 -1.499 0.000 1.812 -0.799 -1.680 2.548 1.245 0.283 1.369 0.000 0.961 0.862 0.983 1.174 1.741 1.249 1.228 +1 0.957 -0.405 -1.194 1.028 1.340 0.887 0.299 -0.900 2.173 1.188 -1.157 0.832 2.215 0.985 0.244 0.722 0.000 0.660 1.493 -0.568 0.000 0.802 1.036 1.039 0.984 1.951 1.078 0.944 +0 0.484 -0.636 1.620 1.631 0.726 0.904 0.636 1.573 2.173 0.746 -2.595 -0.569 0.000 1.148 1.456 -0.078 0.000 1.377 0.438 0.309 1.551 0.964 0.967 0.990 1.455 1.075 1.091 1.414 +0 0.900 -0.421 -1.539 0.720 -0.862 1.054 2.103 0.656 0.000 0.563 2.077 -0.844 0.000 0.357 1.488 1.013 2.548 0.960 1.029 -1.329 3.102 1.598 0.879 0.989 1.056 0.394 0.835 1.449 +1 1.086 0.012 0.124 0.992 -1.318 1.535 0.635 0.706 2.173 0.860 0.774 1.088 1.107 0.642 -1.270 0.642 0.000 0.566 -1.991 0.098 0.000 0.979 1.245 1.385 1.328 0.585 1.136 1.024 +1 1.738 -0.787 -0.346 0.945 -0.002 0.683 -0.268 1.347 2.173 0.597 0.444 -0.026 0.000 0.903 0.478 -1.280 2.548 1.535 -2.092 1.493 0.000 0.839 0.855 0.982 1.229 0.792 1.040 0.932 +0 0.383 0.794 -1.539 1.899 -0.420 1.101 -0.158 1.508 0.000 0.606 -0.393 0.397 2.215 0.741 -0.347 -0.416 1.274 0.646 -1.231 1.563 0.000 0.915 0.999 1.000 0.895 0.477 0.717 0.892 +1 1.079 -0.215 -1.190 0.261 -0.099 1.505 0.381 -1.685 2.173 1.596 -1.609 0.314 0.000 1.323 0.139 0.301 2.548 0.673 0.968 -0.347 0.000 1.188 1.498 0.983 1.155 1.728 1.806 1.386 +1 0.461 2.079 0.765 0.840 0.293 1.307 -0.337 -1.111 2.173 0.311 2.747 -1.356 0.000 0.570 -0.326 0.769 0.000 1.037 0.175 1.046 3.102 0.616 1.113 0.974 0.632 1.197 1.015 0.810 +1 1.757 1.046 0.989 0.222 -1.463 1.163 1.104 -1.233 2.173 2.186 2.139 -0.133 0.000 1.404 0.697 1.556 2.548 1.315 1.352 0.566 0.000 0.783 1.053 0.985 0.715 0.976 0.837 0.738 +0 3.054 -0.487 1.606 0.303 0.819 1.005 0.064 -0.103 1.087 0.588 0.292 0.550 2.215 0.793 -1.514 -0.354 0.000 0.494 1.755 0.055 0.000 1.306 1.151 0.989 1.536 0.641 1.042 1.022 +0 0.733 -1.257 1.157 0.876 -0.994 2.305 -0.557 0.032 2.173 3.278 -1.087 -1.639 2.215 1.252 0.275 1.289 0.000 1.186 -0.221 0.611 0.000 0.863 1.689 1.036 0.999 4.194 2.074 1.573 +1 0.395 -0.893 0.025 1.478 -1.672 1.244 0.377 -0.171 0.000 1.481 0.435 1.009 2.215 0.640 0.378 -0.837 0.000 0.756 0.403 -1.465 3.102 0.908 0.824 1.058 1.236 0.756 0.926 0.984 +1 2.147 -0.242 -1.011 0.602 1.242 0.597 -0.886 0.527 0.000 0.446 0.239 0.757 0.000 0.926 -0.499 1.570 0.000 1.046 -0.453 0.008 1.551 0.772 0.721 1.411 0.745 0.305 0.601 0.619 +0 1.392 -0.645 -1.725 0.301 -1.152 0.547 -1.506 -0.507 0.000 0.816 -0.900 -0.032 1.107 0.846 -0.304 0.616 2.548 0.701 0.145 1.368 0.000 1.300 0.937 0.987 0.926 0.554 0.712 0.701 +0 0.613 -0.687 -1.133 0.798 -1.659 1.495 -0.564 -0.020 0.000 1.370 0.836 1.578 2.215 0.472 0.187 0.081 0.000 1.103 -0.052 1.486 0.000 0.767 0.819 0.994 0.630 1.755 1.323 1.031 +1 0.835 -1.163 0.925 1.230 -0.218 1.763 -1.054 -0.312 1.087 1.918 -2.133 1.640 0.000 1.020 -0.396 1.027 0.000 1.460 -0.882 -1.391 3.102 2.421 1.522 1.203 1.007 1.403 1.643 1.319 +0 0.867 1.098 -1.554 0.456 1.190 2.115 1.008 1.445 2.173 1.933 1.174 -0.506 0.000 1.753 1.589 0.033 0.000 0.995 0.568 -0.035 1.551 0.614 0.587 0.981 0.945 1.515 1.177 1.003 +1 0.555 0.756 -1.112 1.398 0.877 1.097 0.021 1.223 1.087 1.808 -2.346 -0.315 0.000 1.025 -0.373 1.513 0.000 1.541 0.011 -1.006 3.102 0.845 0.804 1.190 0.948 1.246 0.852 0.761 +0 1.122 0.884 0.015 0.375 -0.060 1.155 1.394 0.720 2.173 0.936 0.769 -1.413 0.000 1.650 1.044 -0.764 2.548 1.480 1.635 -1.620 0.000 0.867 0.940 0.985 0.890 1.689 0.952 0.842 +0 1.274 -0.528 -1.570 0.677 -0.341 1.296 0.973 0.824 0.000 1.476 -0.289 -0.897 0.000 0.950 -0.331 -0.129 1.274 1.131 -0.535 0.358 3.102 2.300 1.507 1.152 0.793 0.355 0.979 0.850 +0 0.778 0.133 0.347 1.030 -0.259 1.000 -1.329 -1.680 2.173 0.274 -0.148 1.141 0.000 0.649 -1.433 0.150 2.548 0.381 -1.992 -0.756 0.000 0.711 0.714 0.984 0.614 1.007 0.867 0.694 +1 1.756 0.642 0.282 1.108 0.077 1.110 1.043 1.646 2.173 0.965 0.805 -1.292 0.000 0.404 1.559 0.620 0.000 0.448 0.911 -0.113 1.551 1.043 0.889 0.976 0.516 0.746 0.977 0.884 +0 0.685 -0.181 -0.498 2.083 0.249 0.892 -1.825 1.465 0.000 0.526 0.498 -1.689 2.215 0.484 -0.721 -0.531 2.548 0.559 -1.720 -0.975 0.000 0.880 0.805 1.032 0.958 0.596 0.816 0.973 +1 1.583 -0.427 -1.379 0.255 0.558 0.732 0.715 0.737 2.173 0.654 -0.465 -0.436 2.215 1.041 0.240 0.153 0.000 0.995 2.069 1.568 0.000 1.861 1.263 0.990 0.715 1.099 1.034 1.006 +1 0.921 0.663 -0.987 0.541 1.422 0.859 -0.150 -0.129 0.000 1.011 -0.112 -1.483 1.107 0.626 0.998 1.432 2.548 0.897 -0.160 0.913 0.000 0.927 1.043 0.992 0.567 0.685 0.742 0.731 +1 0.346 0.766 1.130 0.719 -1.001 0.842 -0.252 -0.241 1.087 0.628 -1.295 -0.133 2.215 1.109 1.294 1.690 0.000 1.949 -0.670 1.524 0.000 2.214 1.851 0.985 1.481 0.614 1.366 1.410 +1 0.911 -1.375 0.636 0.662 -0.203 0.741 0.866 1.284 2.173 0.969 -0.435 1.735 2.215 0.450 -1.083 -1.728 0.000 0.560 1.238 1.580 0.000 0.987 0.836 0.990 0.926 1.013 0.951 0.810 +1 0.775 -0.696 -0.158 1.488 0.447 1.236 -0.086 -1.613 2.173 0.752 0.954 0.140 0.000 0.594 -1.402 1.522 0.000 0.743 -0.177 -0.729 3.102 0.766 1.215 0.992 0.656 0.729 0.902 0.808 +1 0.920 -0.744 -1.567 2.355 0.847 1.277 -1.250 -0.488 1.087 0.237 -0.685 0.772 2.215 0.684 -0.866 -1.190 0.000 0.529 0.440 -1.452 0.000 0.559 0.967 1.678 0.751 0.769 1.046 0.867 +0 0.791 -2.041 -0.855 0.859 0.184 0.486 0.226 0.775 0.000 0.472 0.015 1.604 2.215 0.363 -1.713 -0.586 0.000 1.137 -0.801 -1.345 3.102 1.189 0.898 0.985 1.173 0.455 0.831 0.857 +1 0.578 -0.271 1.443 0.983 -1.189 0.888 -0.289 -0.474 2.173 0.930 -0.741 0.209 2.215 0.674 -1.033 -0.823 0.000 0.871 -0.865 1.309 0.000 0.795 0.879 0.996 0.922 0.835 0.801 0.691 +0 1.026 0.621 -0.093 0.134 0.998 1.511 1.427 0.242 2.173 2.045 -0.858 -1.419 0.000 1.119 0.210 -1.697 2.548 1.171 0.816 0.910 0.000 2.774 1.613 0.989 1.209 1.904 2.067 1.557 +0 2.296 0.265 0.258 0.405 0.910 0.913 1.898 -1.350 0.000 1.077 -0.054 0.971 2.215 1.212 -0.770 -0.610 1.274 1.674 -0.488 -1.283 0.000 0.985 1.043 0.981 1.162 1.298 0.934 0.933 +0 2.664 -1.238 0.932 0.893 0.932 1.532 -0.344 -1.079 2.173 1.037 0.571 -0.802 0.000 1.471 -0.587 0.380 2.548 0.509 1.564 0.976 0.000 1.149 1.402 0.970 2.233 1.832 1.562 1.631 +0 0.343 0.937 -0.988 1.529 0.839 1.088 0.155 0.739 0.000 2.426 -0.795 -0.951 2.215 0.580 0.481 0.237 2.548 0.666 -0.807 1.442 0.000 1.089 0.746 1.001 2.044 1.435 1.323 1.208 +1 0.505 -0.756 0.069 1.239 -1.562 1.317 0.915 1.646 1.087 1.061 0.698 0.283 0.000 1.230 1.661 0.344 0.000 1.784 -0.731 -0.708 3.102 1.023 1.712 1.090 0.751 2.166 1.586 1.421 +1 1.071 0.359 -0.694 0.620 1.471 1.074 0.065 0.747 0.000 1.055 0.460 -1.503 1.107 0.718 0.789 0.346 0.000 1.323 -0.336 -0.620 3.102 0.857 1.100 1.048 0.696 0.902 0.938 0.812 +0 0.296 2.274 0.008 1.767 -0.385 1.472 -0.120 1.196 0.000 1.572 0.949 -0.536 0.000 1.391 -0.754 1.251 2.548 0.671 -1.121 -1.356 1.551 3.729 2.220 0.977 1.567 0.560 1.494 1.390 +1 0.791 -0.505 1.396 0.589 -0.218 1.552 0.409 -1.516 0.000 1.655 1.044 0.327 1.107 1.180 0.716 -0.283 2.548 0.698 0.672 1.249 0.000 1.011 1.287 0.987 1.418 0.805 1.229 1.138 +0 0.554 -1.033 1.216 1.045 -0.324 0.514 -2.368 -0.646 0.000 0.567 -1.952 -1.483 0.000 0.939 -1.377 0.752 0.000 0.547 -0.986 -0.716 0.000 0.815 0.648 1.037 1.074 0.539 1.134 0.928 +0 0.780 -0.486 -0.127 1.335 -1.237 0.705 -2.426 1.632 0.000 1.222 -1.634 0.880 0.000 0.982 0.605 -0.136 2.548 1.178 -0.858 -0.572 3.102 0.911 1.014 1.189 0.913 0.845 0.997 0.920 +0 0.804 -1.187 -0.133 0.717 -1.596 0.680 0.997 0.836 0.000 0.721 0.715 -1.127 2.215 0.439 -2.427 -0.354 0.000 0.497 -0.395 1.732 3.102 3.126 1.648 1.018 1.022 0.452 1.153 1.008 +1 0.804 -1.537 -1.094 0.894 -1.336 1.271 -0.130 0.251 2.173 1.560 -0.479 1.151 1.107 1.027 1.710 -0.990 0.000 1.385 -0.239 -1.098 0.000 1.181 1.091 0.982 1.367 1.548 1.156 0.977 +0 0.571 -1.364 0.908 1.134 -1.599 0.978 -0.356 -0.280 0.000 0.797 0.726 1.449 2.215 0.686 0.304 -0.823 0.000 0.791 -0.879 0.676 3.102 0.866 0.848 0.991 0.916 0.869 0.840 0.815 +1 1.095 0.228 1.138 0.693 0.882 0.866 0.787 0.974 0.000 1.320 -0.296 -0.058 2.215 0.522 0.071 1.736 0.000 1.814 0.944 -1.346 0.000 0.885 1.107 0.980 1.035 0.955 0.968 0.904 +0 1.800 -0.128 0.412 1.130 -0.204 1.293 0.082 -1.152 2.173 0.716 -0.169 1.678 1.107 1.309 -1.095 1.447 0.000 1.752 0.911 0.368 0.000 2.741 1.622 1.040 1.481 0.809 1.287 1.210 +1 1.754 -1.471 -0.990 0.450 -0.065 0.648 -1.830 -1.286 0.000 0.846 -1.294 1.440 0.000 1.019 -1.149 0.374 2.548 1.030 0.087 0.553 3.102 0.910 0.796 0.988 0.852 0.591 0.755 0.700 +1 0.495 -0.259 1.553 1.594 -1.062 0.942 1.332 0.782 0.000 0.709 0.618 0.080 2.215 1.019 2.101 -1.110 0.000 0.963 1.915 -0.054 0.000 0.890 1.048 0.985 1.184 0.353 0.918 1.259 +0 0.366 1.094 0.155 1.786 1.074 0.651 -0.847 0.132 0.000 0.751 -0.843 -0.678 0.000 0.884 -0.030 -1.104 2.548 1.092 0.024 -1.625 3.102 0.991 0.985 0.985 1.268 0.340 0.952 1.286 +0 0.576 -0.147 -1.482 1.026 1.318 1.050 -0.121 -0.394 1.087 1.080 1.126 0.996 2.215 0.517 -0.982 -0.423 0.000 0.482 -0.856 -1.663 0.000 0.893 0.963 0.990 1.481 1.836 1.297 1.015 +1 0.850 -0.303 0.739 0.987 1.683 0.685 0.444 -0.743 2.173 0.746 1.209 0.123 0.000 0.840 1.244 1.722 2.548 0.484 0.722 -0.455 0.000 0.417 0.704 0.989 0.940 0.876 0.754 0.708 +0 1.653 -0.613 0.452 1.092 -0.207 1.325 1.167 -1.041 2.173 0.684 1.605 -1.639 0.000 0.881 -0.793 1.091 1.274 0.594 0.626 0.735 0.000 0.799 0.959 1.040 0.790 2.067 1.502 1.285 +0 0.940 -0.689 -0.975 0.522 -0.994 0.425 -0.501 -1.319 0.000 0.561 -0.593 1.120 2.215 1.121 0.381 0.606 0.000 0.860 -0.456 0.441 3.102 1.361 0.884 0.992 0.773 0.361 0.627 0.745 +1 0.810 -0.512 0.267 0.709 -0.885 1.008 -1.029 1.296 0.000 1.121 -0.239 -0.403 2.215 1.049 -1.868 1.028 0.000 1.445 -1.261 -1.079 1.551 1.074 1.149 0.984 0.631 1.019 1.141 0.999 +1 0.581 -0.941 -0.052 0.410 -1.488 1.048 -0.450 0.642 2.173 0.670 -0.428 -0.410 1.107 1.470 -1.003 1.200 0.000 0.762 -1.037 -0.718 0.000 0.890 1.050 0.993 0.607 1.002 0.737 0.652 +1 1.018 0.768 -0.770 0.363 0.563 1.141 -0.452 0.186 2.173 0.904 -1.695 -1.573 0.000 1.053 0.768 -1.386 0.000 1.029 -0.266 1.156 1.551 0.708 0.703 0.984 1.302 0.883 0.935 0.849 +1 0.546 -1.655 1.016 0.470 0.746 0.601 0.151 0.846 0.000 0.544 0.281 -0.122 0.000 1.576 -1.311 -0.884 0.000 0.554 -0.892 -1.176 3.102 0.934 0.778 0.981 0.481 0.216 0.469 0.492 +0 1.141 0.248 0.323 1.058 1.262 0.792 1.243 -0.565 0.000 0.600 1.167 -1.736 1.107 0.765 0.869 -1.354 1.274 0.575 2.324 -1.395 0.000 0.924 0.788 1.140 0.850 0.258 0.648 0.684 +0 0.596 0.780 -0.925 0.754 -0.272 0.785 1.160 -1.031 0.000 1.090 0.215 0.867 0.000 0.589 1.118 0.448 0.000 0.439 1.106 -1.215 3.102 0.920 0.665 0.999 0.508 0.254 0.389 0.501 +1 0.760 0.598 0.270 1.110 1.325 0.659 -0.381 0.015 0.000 0.794 0.216 0.595 0.000 1.157 0.165 -0.944 2.548 0.823 0.907 -1.683 3.102 0.900 0.974 1.036 0.895 0.578 0.756 0.720 +1 1.948 -1.594 -1.045 0.209 -0.670 1.400 -1.326 0.921 2.173 0.588 -1.145 -0.013 0.000 0.607 -0.338 0.156 2.548 0.557 1.673 -1.255 0.000 1.883 1.091 0.976 1.475 0.932 1.237 1.207 +0 0.639 1.236 -1.405 0.314 1.641 1.320 0.143 1.517 0.000 1.144 1.126 -0.645 2.215 0.563 1.903 -0.501 0.000 1.367 0.609 0.203 0.000 0.912 0.788 0.978 0.647 0.796 0.665 0.661 +0 0.652 -0.443 1.706 1.111 -0.951 1.586 -0.937 1.544 0.000 2.081 -0.640 -0.664 2.215 2.181 -0.707 0.771 2.548 1.605 -2.342 0.353 0.000 3.382 2.334 0.989 0.923 2.181 1.996 1.531 +1 0.503 -0.808 -1.388 2.097 1.471 1.049 -0.631 -0.399 2.173 0.423 -1.657 -1.068 0.000 0.647 -0.387 0.729 0.000 1.277 -0.008 0.476 1.551 0.971 0.955 0.994 1.168 0.953 1.140 0.916 +1 0.382 -0.107 1.441 1.569 -0.289 1.317 0.532 -0.903 2.173 1.208 0.643 1.056 1.107 0.597 -0.585 0.987 0.000 0.535 -1.374 0.789 0.000 0.346 0.882 1.072 0.996 1.824 1.113 0.960 +0 0.761 0.033 0.694 0.299 0.556 0.486 -0.329 -1.572 2.173 0.828 0.075 -0.830 2.215 0.398 -0.933 0.933 0.000 0.410 -0.797 -0.761 0.000 0.446 0.578 0.991 0.765 0.610 0.689 0.543 +0 0.472 1.413 -0.726 1.475 0.141 0.734 -0.142 0.862 0.000 0.485 -1.395 -1.246 0.000 0.742 0.522 1.550 0.000 1.166 0.443 -0.319 3.102 0.911 0.967 0.994 0.888 0.713 0.795 0.988 +1 0.480 -1.707 -0.662 0.253 -0.908 1.094 -1.223 0.520 0.000 1.100 -1.347 -1.690 0.000 0.900 -0.942 -1.101 1.274 0.575 -0.089 -0.610 3.102 0.861 0.698 0.980 0.590 0.354 0.470 0.501 +0 2.020 0.547 -0.208 1.077 -0.833 1.704 -1.441 1.408 0.000 0.897 -1.399 -1.637 0.000 2.869 -0.744 -0.063 0.000 0.753 0.950 -1.414 3.102 1.014 0.859 1.088 0.766 0.932 1.098 1.548 +1 1.526 -0.919 0.478 0.697 -0.381 2.496 -0.339 -1.004 0.000 1.768 0.519 0.978 1.107 1.328 -0.282 0.838 0.000 0.694 0.030 -1.634 0.000 0.859 0.892 0.998 1.460 0.877 1.088 0.894 +1 1.923 0.374 0.880 0.499 -1.012 0.407 -0.273 0.817 0.000 0.775 0.347 -1.165 2.215 0.938 -1.128 -0.851 2.548 0.438 -0.607 -0.716 0.000 0.650 0.725 1.345 1.235 0.841 0.910 0.735 +0 2.738 0.149 -1.638 0.834 -1.410 1.423 -0.167 0.188 2.173 0.838 0.861 0.086 0.000 0.340 -0.193 1.156 0.000 1.064 0.779 -0.926 3.102 0.972 0.855 1.008 0.757 1.334 1.284 1.084 +1 0.440 -1.808 -1.046 1.605 1.478 0.984 -0.914 1.094 2.173 0.776 -1.930 -0.799 0.000 1.001 1.219 -1.522 0.000 2.339 -1.915 0.129 0.000 0.729 1.132 0.991 0.821 0.846 0.886 0.793 +0 0.936 -1.643 -1.134 0.276 -1.676 0.621 0.664 1.015 0.000 0.791 -1.204 -0.004 2.215 0.375 -0.989 0.815 0.000 1.198 0.879 -1.221 3.102 0.879 0.935 0.989 0.800 1.470 0.926 0.889 +1 0.429 1.376 -1.513 2.616 -0.793 0.873 0.052 1.170 2.173 1.347 0.789 0.470 2.215 0.604 -0.298 -0.823 0.000 0.689 0.464 1.685 0.000 0.638 0.974 0.989 1.956 1.128 1.505 1.193 +0 0.481 -0.835 0.471 1.247 -0.120 0.699 1.539 -1.634 2.173 0.535 -0.772 -0.794 0.000 0.487 -0.924 0.767 0.000 0.465 -0.523 1.398 0.000 0.995 0.719 0.983 1.249 0.306 0.808 0.764 +1 0.619 1.022 -0.666 1.206 0.995 1.035 0.486 -1.253 0.000 0.563 1.041 1.329 2.215 0.620 1.935 0.430 0.000 1.349 -0.461 0.373 1.551 1.310 0.939 1.194 0.985 0.929 0.784 0.733 +1 0.876 1.565 0.638 0.745 1.647 0.918 1.218 -0.977 2.173 1.029 1.294 1.729 2.215 1.244 0.881 0.133 0.000 1.798 -1.880 0.303 0.000 0.753 0.956 0.984 0.961 0.927 0.758 0.712 +0 0.653 0.272 -1.625 1.052 -0.762 1.289 0.508 1.228 0.000 1.063 0.949 -1.087 0.000 1.129 1.245 -0.511 0.000 1.311 0.705 0.429 3.102 0.897 1.002 0.993 0.691 0.298 0.686 0.638 +1 0.512 -0.945 1.294 1.118 -0.907 1.163 0.711 0.796 2.173 0.973 -0.883 -0.537 0.000 1.072 0.790 -1.027 2.548 0.729 1.972 0.748 0.000 2.898 1.756 0.987 1.684 1.391 1.363 1.361 +1 0.937 0.893 1.654 0.902 -0.462 1.896 0.805 0.606 0.000 1.432 0.199 -0.915 0.000 1.331 1.492 -1.496 0.000 1.237 0.135 -0.171 1.551 1.962 1.123 1.203 0.789 0.700 0.846 0.760 +1 0.611 -0.692 1.699 2.237 -1.210 1.286 -0.624 0.964 2.173 0.687 -1.168 0.055 2.215 0.566 -2.279 -0.151 0.000 0.484 -0.115 0.565 0.000 0.922 1.150 0.986 0.997 1.087 1.105 0.936 +1 0.410 -0.695 -1.683 1.544 -0.202 0.983 0.498 1.264 2.173 1.111 0.104 1.690 0.000 1.258 -0.391 -0.264 2.548 0.616 0.875 -0.242 0.000 1.186 0.945 1.071 0.610 1.516 0.982 0.918 +1 0.620 -1.523 -0.159 1.901 -1.293 1.184 -1.678 0.492 0.000 1.073 -1.200 -0.881 2.215 1.186 -0.535 -1.735 2.548 1.089 -0.673 0.791 0.000 0.918 1.295 1.283 0.749 0.923 1.053 0.994 +1 1.155 -0.319 -0.471 1.473 0.752 0.894 1.531 0.801 0.000 2.013 -1.213 1.539 0.000 1.548 -0.770 1.281 0.000 1.981 -0.559 0.356 0.000 0.830 0.738 1.613 1.549 1.447 1.438 1.311 +0 0.843 0.874 0.215 0.467 1.649 0.684 -0.654 1.738 2.173 0.761 1.156 -0.849 1.107 1.053 -0.378 0.474 0.000 0.688 -1.408 0.706 0.000 1.086 0.978 0.988 0.763 1.364 1.021 0.874 +1 1.200 -0.983 0.244 0.566 0.984 0.784 0.377 -0.659 2.173 0.533 0.142 0.661 0.000 1.093 0.088 -1.712 2.548 0.852 0.776 -0.983 0.000 0.943 0.815 0.986 1.107 0.952 1.060 0.926 +1 1.828 -0.030 -0.244 0.243 -0.428 1.166 1.151 1.424 2.173 0.677 0.591 0.243 1.107 0.570 2.274 -1.322 0.000 0.609 0.887 -0.493 0.000 0.672 0.976 0.980 0.622 1.202 1.016 0.903 +0 0.784 -1.159 0.438 1.004 0.140 1.657 -0.199 0.618 2.173 1.525 -2.254 -1.635 0.000 2.077 -1.026 -1.215 0.000 1.639 -1.153 -0.464 0.000 1.303 1.178 0.995 1.769 1.346 1.430 1.374 +0 1.914 1.142 -1.138 0.769 -0.896 0.697 1.114 0.781 0.000 0.602 -0.770 0.258 1.107 0.748 0.539 -1.622 0.000 0.378 1.380 -0.361 3.102 0.788 0.915 0.973 0.517 0.708 0.976 0.910 +0 0.903 0.281 1.575 0.452 -0.421 1.015 0.538 -0.926 0.000 1.140 0.637 0.490 2.215 0.508 -0.618 0.607 2.548 0.472 -1.363 1.095 0.000 1.731 1.138 0.987 0.887 0.587 0.941 0.797 +0 0.909 1.101 1.568 1.043 -0.444 1.003 0.916 0.086 2.173 0.837 2.528 1.583 0.000 0.600 1.522 1.579 2.548 0.567 0.789 -0.631 0.000 1.165 0.696 1.310 1.000 1.012 0.888 0.816 +1 0.327 0.610 -0.821 1.419 1.498 0.611 -1.214 1.269 0.000 1.421 -0.776 -0.519 2.215 0.725 -1.515 -0.055 0.000 1.252 0.029 1.213 0.000 0.901 0.930 0.991 1.973 0.872 1.219 1.211 +1 0.653 1.786 0.236 1.148 1.143 0.712 -0.170 -0.866 2.173 0.824 1.035 1.653 0.000 0.306 -2.021 1.135 0.000 0.608 0.922 -0.358 3.102 0.865 0.944 0.984 0.610 0.567 0.797 0.702 +0 1.850 0.215 1.006 0.406 1.010 1.252 -1.367 -0.132 2.173 1.604 -0.989 -0.791 0.000 0.939 0.470 -1.205 2.548 1.000 -0.913 0.772 0.000 1.630 1.328 0.971 0.901 1.852 1.467 1.365 +1 1.337 0.078 1.017 1.061 -0.145 1.375 0.171 0.141 0.000 1.637 0.826 -1.177 0.000 0.860 0.603 1.249 2.548 0.398 0.775 -0.457 0.000 0.727 0.962 1.429 0.802 0.563 0.691 0.687 +1 0.503 2.127 1.070 1.729 0.983 0.666 0.392 -0.767 2.173 0.870 -0.305 -0.461 2.215 0.848 0.841 1.689 0.000 0.851 0.188 1.005 0.000 0.839 0.880 0.973 1.179 0.507 1.027 0.832 +1 1.230 0.709 1.481 1.110 0.936 0.662 -1.646 -0.848 0.000 1.615 0.628 -0.314 2.215 1.037 0.230 1.307 0.000 0.684 0.109 0.682 3.102 1.035 0.910 0.997 1.398 0.781 1.041 1.228 +1 1.033 -1.045 1.342 0.171 -0.054 0.530 -0.415 1.048 0.000 0.867 -0.136 -0.744 2.215 1.493 0.575 0.080 2.548 0.414 0.734 1.009 0.000 0.499 0.865 0.983 1.027 0.947 0.813 0.708 +0 1.739 -0.310 -1.341 1.198 0.964 2.075 0.820 0.726 2.173 2.064 1.293 -1.123 2.215 3.014 -1.795 -0.383 0.000 1.223 -0.212 0.983 0.000 0.934 4.758 1.748 2.015 3.129 3.897 2.900 +0 1.671 -0.286 -1.726 1.346 -0.498 1.060 0.900 -0.254 0.000 0.773 0.460 0.910 1.107 1.460 0.796 1.320 2.548 0.775 1.718 0.220 0.000 0.856 1.002 1.859 1.403 0.467 1.001 1.019 +1 1.232 -0.490 1.050 0.888 1.417 1.657 0.424 -0.602 0.000 0.882 -1.280 -0.798 0.000 1.044 -1.135 1.372 2.548 1.552 0.686 0.177 1.551 1.182 0.916 0.996 1.330 1.472 1.141 1.001 +1 1.207 1.403 0.437 0.980 -1.223 1.451 1.847 1.563 0.000 2.569 0.941 -0.480 2.215 0.697 0.705 -1.410 2.548 0.568 0.289 1.074 0.000 0.551 0.639 1.503 1.334 1.067 0.969 0.966 +1 0.555 -0.117 1.592 0.564 0.609 0.908 -0.846 -1.515 0.000 0.947 -0.928 -0.722 2.215 0.750 0.142 1.118 0.000 2.535 -0.153 0.393 3.102 1.071 1.044 0.988 0.826 1.304 0.963 0.887 +0 0.663 0.901 0.754 0.744 -1.421 1.374 0.144 0.352 2.173 1.316 -0.411 -1.546 2.215 0.727 -0.845 -0.250 0.000 0.856 0.181 -1.245 0.000 0.860 0.864 0.990 1.248 2.041 1.252 1.044 +1 0.333 -1.521 0.744 0.817 -1.524 1.543 0.299 0.699 0.000 0.963 -0.626 -0.627 0.000 1.927 -0.405 -1.286 2.548 0.673 0.358 -0.055 3.102 0.765 0.723 0.986 0.628 0.873 0.602 0.566 +1 0.693 2.048 -1.734 0.509 1.310 0.603 0.754 -0.421 0.000 0.583 2.016 -0.186 0.000 1.335 0.726 1.694 1.274 0.733 -0.194 0.550 3.102 0.926 0.912 0.990 0.610 0.766 0.812 0.743 +1 0.873 0.251 -0.607 0.980 -1.533 0.843 0.560 1.673 2.173 0.670 0.569 0.970 0.000 1.475 -0.385 0.065 1.274 0.655 0.992 0.026 0.000 0.977 0.890 0.984 0.927 1.553 0.891 0.755 +1 0.733 0.267 -1.614 0.575 -0.186 1.003 0.439 1.412 2.173 1.286 0.933 0.193 0.000 1.430 0.415 -0.207 0.000 2.257 1.713 -1.445 0.000 0.904 0.718 0.988 0.883 0.702 0.933 0.839 +1 0.555 -0.663 0.483 0.887 -0.568 0.764 -0.328 1.675 2.173 0.541 -1.720 0.209 0.000 0.652 -2.496 1.200 0.000 1.188 -0.538 -0.583 3.102 0.846 0.974 0.992 1.033 0.915 0.927 0.804 +0 1.000 0.540 -0.168 0.518 -0.860 0.530 2.619 0.512 0.000 0.801 0.373 -1.661 1.107 0.816 0.832 -0.526 0.000 1.155 -0.289 1.167 1.551 0.885 1.224 0.982 0.785 0.584 0.992 0.976 +1 1.444 0.528 1.475 1.394 1.063 0.576 0.966 0.903 0.000 1.118 -0.380 -1.168 0.000 0.931 1.839 -0.028 0.000 1.956 1.147 -0.666 3.102 1.018 1.006 0.995 0.590 0.669 0.901 0.820 +0 0.499 -0.777 -1.083 1.272 1.219 0.875 -0.633 0.496 2.173 1.030 0.344 1.698 0.000 0.928 0.046 -0.488 2.548 1.294 1.878 -0.410 0.000 0.849 0.965 0.986 0.872 0.956 0.998 0.957 +0 0.735 -0.014 0.571 1.776 -0.046 1.565 -0.861 -1.686 0.000 0.471 -1.044 -1.014 0.000 0.951 -1.134 0.334 2.548 0.494 -0.128 0.346 3.102 1.057 0.910 0.991 1.026 0.300 0.795 1.057 +0 3.056 -0.780 -0.264 1.062 -0.318 2.004 -0.771 1.545 0.000 1.645 0.006 -0.078 2.215 1.145 -1.312 1.124 1.274 0.994 0.468 -1.498 0.000 0.743 0.944 0.967 0.812 1.727 1.116 1.042 +0 1.868 -1.121 1.249 0.873 1.110 1.074 0.603 -0.732 2.173 0.660 0.436 -0.150 0.000 0.441 -0.412 1.578 0.000 0.563 -0.304 -0.544 3.102 0.913 0.886 0.996 0.848 0.439 1.441 1.156 +1 0.487 -0.160 -1.622 1.242 0.298 0.451 0.542 -0.291 0.000 0.675 -0.472 0.757 0.000 1.275 1.342 -1.641 2.548 0.975 -1.688 -1.038 0.000 1.024 0.591 1.064 1.162 0.771 1.024 0.871 +1 1.641 -0.086 0.907 0.353 -0.349 3.066 -0.254 -0.852 0.000 1.624 -0.273 1.510 0.000 2.305 0.993 0.664 2.548 1.197 -0.676 0.934 3.102 4.020 2.510 0.988 0.960 1.431 2.239 1.708 +0 0.715 0.275 -1.482 1.382 -1.448 0.592 -1.139 1.311 2.173 0.891 -0.564 0.018 2.215 0.944 0.583 0.509 0.000 1.086 0.904 -0.558 0.000 0.949 0.967 0.988 0.790 1.029 0.943 0.950 +0 0.914 -0.161 1.365 1.649 -1.404 0.809 -1.091 0.390 0.000 0.584 -1.398 1.138 0.000 1.096 -1.446 -0.422 2.548 1.099 0.019 -0.489 3.102 0.944 0.954 1.025 0.817 0.753 0.859 0.917 +1 0.780 1.603 -0.435 0.723 0.750 0.716 0.411 0.059 1.087 1.000 1.738 1.513 0.000 0.595 1.262 -1.316 0.000 1.017 0.113 -1.376 3.102 0.702 0.809 0.986 0.969 0.878 0.854 0.819 +1 0.662 0.681 -0.308 0.264 0.653 0.939 0.565 1.081 0.000 0.796 1.529 -1.122 2.215 1.123 0.389 -0.866 1.274 0.625 1.239 0.293 0.000 0.943 1.102 0.979 1.024 0.650 0.845 0.807 +1 0.413 -1.246 1.529 1.073 -0.989 0.349 -1.536 -1.084 0.000 1.003 -1.620 1.414 0.000 0.533 -0.542 0.284 0.000 0.591 -0.997 0.045 3.102 0.979 0.736 0.986 0.805 0.685 0.897 0.803 +0 1.141 0.072 0.334 1.007 1.193 1.943 1.346 -1.120 0.000 1.450 -2.735 -0.014 0.000 1.536 -0.060 -0.047 0.000 2.812 -0.996 -1.681 3.102 0.800 1.866 1.039 1.325 1.220 1.754 1.376 +1 0.884 0.715 1.001 1.227 0.150 0.895 0.641 1.308 2.173 1.270 0.687 -0.538 0.000 1.122 0.291 -1.024 0.000 0.589 0.684 -1.668 0.000 0.961 0.772 1.000 0.873 0.678 0.774 0.735 +1 0.480 1.358 0.070 0.889 1.143 0.503 1.256 -1.103 0.000 0.422 -1.406 0.465 2.215 0.403 0.549 -0.638 2.548 0.369 0.911 1.008 0.000 0.624 1.118 0.991 0.581 0.647 0.665 0.630 +1 0.804 0.664 0.744 1.333 -0.505 0.556 1.999 -1.656 0.000 0.398 -0.419 0.629 2.215 0.630 0.363 -1.083 2.548 0.717 1.270 1.200 0.000 0.571 1.008 1.294 0.734 0.579 0.680 0.716 +0 1.752 0.212 -1.001 0.695 -1.237 0.782 -0.985 0.711 2.173 1.304 0.507 0.622 0.000 1.372 -0.538 -1.068 2.548 1.571 -0.325 0.431 0.000 0.937 1.041 0.997 0.574 1.316 1.054 1.074 +0 1.234 0.157 0.881 0.475 -0.377 1.699 0.551 0.261 0.000 2.461 -1.606 -1.295 0.000 0.490 -0.774 -1.263 2.548 0.992 -1.071 1.303 3.102 7.235 3.684 0.989 0.805 0.410 2.128 1.665 +1 1.129 0.872 -1.256 2.698 0.838 1.651 -2.427 0.613 0.000 2.573 1.181 -0.458 0.000 2.391 0.495 1.101 1.274 0.981 1.468 -1.117 0.000 1.291 0.868 2.297 1.334 0.825 1.275 1.287 +1 1.114 0.343 0.921 0.909 -1.488 1.196 0.589 -1.430 0.000 0.886 1.239 -0.027 1.107 1.530 0.798 0.468 2.548 1.179 -0.338 -0.467 0.000 0.912 1.284 1.151 0.926 0.589 1.026 0.922 +1 0.304 -0.386 -0.965 0.824 -0.295 1.784 0.849 -1.361 0.000 0.896 1.599 0.134 0.000 1.809 0.690 0.100 0.000 1.920 -0.351 1.198 1.551 0.898 0.688 0.983 1.145 0.877 1.065 1.508 +0 2.631 -0.619 0.392 0.673 0.324 1.849 0.761 -1.260 2.173 0.369 0.942 0.259 2.215 0.679 1.090 1.446 0.000 0.370 -0.942 -1.466 0.000 0.845 1.044 0.982 0.939 1.196 1.744 1.334 +1 0.525 1.541 -0.231 0.619 0.919 0.696 -0.459 -1.532 2.173 0.371 -0.009 0.230 0.000 0.528 -1.049 0.694 2.548 1.299 1.305 -0.855 0.000 1.088 1.091 0.988 0.947 0.734 0.868 0.769 +0 0.469 -0.085 0.815 1.358 -1.195 1.340 0.937 1.479 2.173 1.714 -1.179 -0.492 0.000 1.600 -0.664 0.435 1.274 0.462 1.024 -0.578 0.000 1.029 0.864 1.074 1.160 2.262 1.215 1.009 +1 2.277 0.728 -1.009 0.928 -1.618 0.652 -0.415 0.048 0.000 0.934 0.735 0.202 1.107 1.004 0.687 1.012 1.274 0.580 -0.775 0.613 0.000 0.828 0.874 1.048 1.025 0.687 0.905 0.801 +1 0.648 -0.060 -0.206 1.690 0.533 0.697 -1.041 -0.849 2.173 0.638 -0.389 1.506 2.215 0.929 0.051 -1.309 0.000 0.656 -0.993 1.517 0.000 0.859 0.901 0.987 0.887 0.897 0.930 0.768 +0 1.031 0.424 1.175 1.310 -1.679 1.639 1.104 -0.567 0.000 0.901 -0.131 -1.579 2.215 1.547 0.912 0.801 0.000 1.320 0.121 0.082 3.102 0.975 0.958 0.985 0.757 0.991 0.966 0.970 +1 0.835 0.469 -0.615 0.863 0.592 1.419 -0.218 1.116 2.173 1.561 -1.101 -1.532 0.000 1.568 -0.291 -0.306 2.548 1.212 -0.302 -1.051 0.000 1.034 1.343 1.041 1.143 1.784 1.294 1.146 +1 0.292 1.297 0.874 0.780 -1.429 1.078 -0.969 -0.784 1.087 1.403 -0.932 0.616 1.107 0.880 -0.470 -1.558 0.000 0.485 0.569 0.545 0.000 0.823 1.003 0.982 1.026 1.724 1.019 0.851 +1 0.593 0.319 -0.866 0.916 -0.096 0.847 -0.358 -1.579 0.000 0.858 -0.748 -0.238 2.215 1.382 -0.352 1.432 0.000 1.119 0.380 0.684 3.102 0.806 0.962 0.979 0.567 0.871 0.879 0.804 +0 0.964 1.773 -0.733 0.945 -0.956 0.712 0.013 -1.361 0.000 1.264 -0.546 1.027 0.000 1.191 1.221 0.023 2.548 0.570 -0.116 0.478 1.551 1.781 1.029 0.974 0.801 0.568 1.026 1.036 +0 0.279 1.027 -0.611 2.222 -0.826 0.789 0.896 1.144 0.000 0.872 0.068 0.135 2.215 1.031 0.656 0.566 2.548 1.228 1.175 -1.653 0.000 0.954 0.853 0.987 1.853 0.506 1.342 1.212 +1 0.964 0.153 0.614 0.744 -0.684 1.052 0.220 1.496 2.173 0.789 -2.224 -0.400 0.000 0.675 1.339 -0.043 0.000 0.518 0.673 0.475 3.102 3.601 2.010 1.080 0.987 0.664 1.533 1.213 +1 1.270 -0.618 1.024 0.227 -1.674 1.019 0.097 -0.599 1.087 0.972 -1.060 -0.259 2.215 0.659 -0.419 0.708 0.000 1.472 -1.266 1.486 0.000 0.924 1.028 0.992 1.122 1.023 0.998 0.910 +1 1.204 -1.807 -0.729 1.023 -0.604 0.717 -1.520 1.032 2.173 0.647 -2.406 1.121 0.000 1.311 -0.368 0.335 2.548 1.458 -0.717 -1.328 0.000 1.526 1.079 0.998 1.053 1.011 0.952 0.962 +0 0.781 -0.168 -0.989 0.736 0.790 0.896 0.747 0.191 2.173 0.713 0.647 1.528 0.000 0.858 1.118 -1.396 0.000 0.486 -0.867 -0.723 3.102 0.674 1.150 1.050 0.566 0.880 0.797 0.729 +0 0.498 -0.383 -1.115 1.368 1.644 0.654 0.839 -1.635 2.173 1.582 1.342 -0.230 0.000 0.900 1.308 0.812 0.000 1.399 0.554 0.478 1.551 1.478 1.023 0.987 1.295 0.961 1.088 1.430 +0 1.254 0.400 1.364 1.811 1.457 0.608 -1.143 -0.698 0.000 0.687 0.027 -0.532 0.000 1.123 -1.298 0.157 2.548 0.380 2.245 0.271 0.000 0.883 0.932 1.000 0.702 0.612 0.878 0.918 +0 0.928 0.346 -0.669 2.447 -0.930 1.418 -1.319 0.951 2.173 0.770 -1.771 0.412 0.000 0.469 -2.518 1.240 0.000 1.244 -0.746 -0.324 3.102 0.826 0.788 0.990 0.749 1.320 1.375 1.136 +1 0.926 0.177 -0.941 1.615 -0.265 1.308 -2.348 1.301 0.000 2.782 -1.449 -0.746 0.000 2.745 -0.620 0.887 2.548 0.704 0.646 1.410 0.000 3.055 2.192 0.987 1.673 0.981 1.751 1.560 +1 0.407 1.971 1.404 0.714 -1.104 0.984 0.699 0.142 0.000 1.091 0.534 -1.455 2.215 1.041 1.131 1.260 1.274 0.921 1.281 0.056 0.000 0.633 0.970 0.992 0.642 0.827 0.913 0.785 +0 0.561 0.827 -1.057 0.768 1.181 0.883 0.634 0.734 1.087 1.318 0.538 -0.857 0.000 1.227 -1.325 1.376 0.000 2.571 -0.178 -0.312 3.102 1.022 1.025 0.989 0.813 1.469 1.027 0.869 +0 0.770 -0.209 -0.338 1.020 -1.568 0.880 -0.650 1.347 2.173 1.037 -0.905 0.376 0.000 1.801 0.276 0.344 0.000 2.045 1.071 -1.196 1.551 0.772 1.232 1.098 0.912 1.926 1.165 0.985 +0 1.031 -0.663 1.254 0.699 1.738 1.240 0.234 0.841 2.173 1.269 -1.182 -0.366 2.215 1.449 -0.627 -1.065 0.000 1.036 0.190 -0.510 0.000 0.912 0.983 0.977 0.883 2.194 1.271 1.094 +1 0.531 0.612 -0.072 1.121 1.070 0.836 -0.149 -0.649 0.000 1.238 1.139 1.535 0.000 1.295 -1.452 -0.141 2.548 0.927 -1.193 -1.381 3.102 0.887 1.130 0.987 0.870 0.756 0.856 0.811 +0 0.426 -1.745 -0.400 0.549 0.884 0.462 0.004 -1.455 0.000 0.556 -0.054 1.298 0.000 0.706 0.940 -0.531 2.548 1.205 0.125 0.090 3.102 0.660 0.788 0.995 0.830 0.493 0.596 0.611 +0 1.816 -0.146 -1.418 0.712 -0.766 0.910 -0.372 -0.515 2.173 1.237 -0.862 1.255 0.000 1.928 -0.709 0.447 1.274 0.704 -1.346 0.808 0.000 0.658 0.901 0.978 0.871 1.302 1.022 1.000 +1 0.948 0.387 -1.154 0.546 0.478 0.630 0.432 1.368 0.000 0.809 -0.100 -0.184 2.215 0.661 1.166 0.284 2.548 1.635 0.095 -1.530 0.000 0.822 0.935 0.992 0.693 0.663 0.782 0.678 +1 0.934 -1.487 -0.052 0.878 -0.797 1.030 -0.054 1.656 2.173 0.470 -0.525 -0.644 0.000 1.035 -0.144 0.870 2.548 0.626 1.279 -0.240 0.000 0.909 0.878 0.985 1.263 0.841 0.930 0.867 +0 2.965 -0.962 0.278 0.883 -0.931 0.837 -1.377 -1.111 2.173 0.684 -1.555 1.738 0.000 0.935 -0.599 0.854 1.274 1.147 0.725 -1.316 0.000 0.718 0.763 1.986 1.109 1.156 1.053 0.919 +1 1.180 0.275 1.429 0.339 -1.397 0.464 0.709 -1.072 2.173 1.130 1.237 -0.152 2.215 0.549 0.048 -1.299 0.000 0.516 -1.240 -0.655 0.000 0.593 1.162 0.988 0.762 0.841 0.900 0.784 +1 0.603 -0.709 -0.320 0.387 -0.822 0.556 0.112 -0.984 2.173 0.597 0.450 0.856 0.000 0.598 -0.827 0.353 0.000 0.932 1.027 1.374 3.102 0.789 0.899 0.982 0.743 0.789 0.669 0.629 +1 0.445 0.693 -1.156 0.599 -1.446 0.502 -1.833 0.411 0.000 1.565 -0.131 1.660 2.215 0.869 -1.072 -0.380 2.548 1.430 0.660 0.175 0.000 0.853 0.869 0.995 1.628 1.373 1.479 1.217 +0 0.869 -2.209 -0.723 3.064 -0.744 1.429 -0.145 0.688 0.000 1.255 -1.626 -1.343 0.000 1.274 0.603 0.628 2.548 2.117 0.073 1.277 3.102 3.641 2.283 1.013 2.293 0.783 1.787 1.835 +0 0.439 0.442 -0.875 0.514 -0.326 1.398 -1.025 -1.473 0.000 2.047 1.602 1.504 0.000 2.425 0.680 -0.205 1.274 2.631 0.612 0.221 3.102 1.740 1.790 0.989 1.242 0.723 1.455 1.497 +0 0.902 -0.043 -0.626 0.788 1.089 0.479 -0.134 -1.490 2.173 0.857 0.694 -0.201 0.000 0.482 0.674 -1.162 0.000 1.086 -0.254 1.090 3.102 0.751 0.852 1.168 0.711 0.559 0.630 0.607 +1 1.006 -1.638 1.026 1.900 1.666 0.743 -0.869 0.092 0.000 0.358 -0.889 -0.171 2.215 0.812 -0.519 -1.012 0.000 0.604 0.552 -0.765 1.551 1.191 0.841 1.045 0.851 0.425 0.834 0.849 +0 0.833 0.846 -0.541 1.245 -0.609 0.822 0.252 1.571 0.000 0.834 -0.623 1.433 0.000 1.390 0.256 0.748 2.548 0.927 0.136 -0.171 1.551 0.843 0.977 1.003 1.069 0.641 0.742 0.852 +1 0.764 1.178 -1.511 1.760 -0.028 1.777 1.214 1.445 0.000 2.387 1.648 -0.548 0.000 1.495 0.957 0.401 1.274 0.614 1.648 -1.568 0.000 0.925 0.931 1.563 0.783 0.723 0.676 0.739 +1 1.205 0.662 -0.192 2.442 -1.077 2.719 0.009 1.135 1.087 1.857 -1.207 -0.368 0.000 0.843 0.397 -1.188 0.000 1.075 1.006 -0.552 0.000 0.705 0.686 1.698 2.599 1.324 1.656 1.328 +1 0.671 -1.052 -0.795 0.238 1.685 0.871 -0.164 0.099 2.173 1.185 -0.629 -1.447 0.000 1.447 0.003 0.821 2.548 0.369 0.931 -1.444 0.000 0.864 1.105 0.979 0.823 0.855 0.923 0.784 +0 1.532 -0.927 1.398 1.346 -0.250 0.725 0.157 1.133 2.173 0.775 -0.191 -0.424 2.215 0.421 0.275 -1.248 0.000 0.632 -1.302 -0.761 0.000 0.658 0.872 1.982 1.179 1.105 1.003 0.810 +0 0.907 0.905 0.079 1.399 0.604 0.503 -2.003 1.714 0.000 0.546 -0.724 -0.687 2.215 0.710 2.563 -1.572 0.000 1.034 1.227 -0.849 1.551 0.663 0.767 0.999 0.813 0.922 1.067 1.426 +1 0.439 -0.706 -1.292 0.098 -1.362 1.956 0.460 -0.814 0.000 2.094 1.347 0.899 2.215 2.130 1.072 0.304 2.548 1.106 -1.228 -1.148 0.000 0.569 1.141 0.901 0.981 1.170 0.996 0.971 +1 1.095 -1.338 -0.560 1.322 -1.486 1.453 -0.933 0.854 1.087 0.563 -0.946 -0.349 2.215 0.350 -1.087 -0.015 0.000 1.055 -0.877 -1.016 0.000 0.735 1.134 1.234 0.750 1.177 1.023 0.849 +0 1.929 0.669 0.189 0.768 -0.395 0.597 0.752 1.593 0.000 1.467 0.548 -1.049 2.215 0.871 0.421 1.230 2.548 0.698 1.151 0.946 0.000 0.918 0.974 0.984 0.877 1.065 0.917 0.810 +1 1.072 -1.381 0.138 1.003 -0.844 0.646 -0.060 1.366 1.087 1.514 -1.236 1.103 0.000 2.117 -0.990 -0.589 2.548 0.472 -0.997 1.614 0.000 0.491 0.787 1.111 0.753 1.630 1.067 0.970 +1 2.189 0.374 -0.344 0.344 -1.726 0.845 0.621 1.737 2.173 1.011 0.732 0.863 2.215 0.717 1.452 -1.732 0.000 0.704 2.039 -0.133 0.000 0.850 0.988 1.138 1.070 0.969 0.939 0.892 +1 2.755 -0.144 0.809 0.632 0.459 2.459 0.048 -1.186 0.000 1.781 -0.731 0.201 2.215 0.769 -0.675 -0.261 2.548 1.052 -1.146 -1.649 0.000 1.569 1.099 0.982 1.012 0.503 0.973 0.938 +0 0.933 -0.110 -1.470 0.555 -0.132 0.518 1.182 0.892 0.000 1.289 -0.559 1.592 2.215 1.208 0.278 -0.260 2.548 1.481 1.266 0.062 0.000 0.937 0.963 0.986 0.791 1.451 1.183 1.001 +1 1.334 -0.980 1.401 0.743 -1.034 0.616 -1.027 0.881 0.000 0.705 0.804 -0.182 0.000 0.916 -0.979 -0.640 2.548 1.057 -0.859 0.270 0.000 0.906 0.720 1.120 0.617 0.130 0.515 0.534 +0 0.334 -1.396 -1.231 2.396 -0.101 0.779 2.891 1.433 0.000 0.417 0.732 -0.609 2.215 0.524 0.395 1.301 2.548 0.385 2.002 -1.348 0.000 0.542 0.986 1.055 1.135 0.498 0.900 2.277 +0 0.975 -0.839 -1.163 1.959 -0.336 0.868 -0.028 0.518 0.000 0.904 -0.751 1.579 1.107 1.460 0.341 1.713 0.000 1.586 -0.095 -0.222 3.102 1.035 1.093 1.299 0.819 1.139 0.912 1.043 +1 1.153 0.312 -1.227 0.988 1.587 0.546 -0.244 1.473 0.000 1.342 -0.752 0.192 2.215 0.372 -1.064 -0.051 2.548 0.476 1.772 -0.701 0.000 0.924 1.049 0.986 0.845 0.221 0.980 0.825 +1 0.450 0.546 1.041 1.672 -0.685 0.432 2.149 -1.709 0.000 0.399 2.816 0.429 0.000 0.613 0.868 -0.097 2.548 1.282 -0.149 1.283 3.102 0.890 0.825 1.201 0.932 0.759 0.895 0.885 +1 0.802 -2.184 -0.199 2.655 1.638 0.740 1.454 1.414 0.000 1.023 -2.068 -1.644 0.000 0.782 -0.883 -1.631 0.000 0.725 0.431 0.030 3.102 0.840 0.836 2.014 1.830 0.672 1.173 1.008 +1 0.663 -0.287 1.299 0.949 1.428 1.580 -1.219 0.958 0.000 2.823 -0.258 -0.716 2.215 0.518 0.344 1.710 0.000 1.014 -1.717 0.172 0.000 1.465 0.888 0.988 1.786 0.474 1.113 1.029 +1 0.929 0.930 -0.772 0.667 -1.682 1.089 0.468 0.852 0.000 1.042 0.466 -1.122 2.215 0.726 0.840 0.379 0.000 0.996 -0.383 -0.228 3.102 0.756 0.929 0.980 0.581 0.800 0.890 0.797 +0 0.366 -1.676 -0.593 0.283 0.147 0.546 0.535 -0.734 2.173 1.104 -0.147 1.323 0.000 0.831 2.600 -0.014 0.000 0.786 -0.623 1.574 0.000 0.833 0.976 0.999 0.664 0.535 0.693 0.655 +0 0.734 0.101 1.259 1.608 0.570 0.302 -0.692 1.648 0.000 0.478 -1.110 -0.429 0.000 1.224 0.401 -1.434 2.548 1.288 -0.491 -0.836 3.102 0.934 0.754 0.989 0.935 0.714 0.828 0.709 +1 1.853 -0.819 -0.688 1.015 -0.823 2.329 -0.247 -1.041 2.173 3.533 -1.198 0.683 0.000 1.025 0.123 -0.198 0.000 2.463 -0.234 1.064 0.000 0.939 1.588 0.974 1.374 1.867 2.165 1.798 +0 0.654 0.590 0.003 0.733 -1.674 1.044 -0.047 0.645 2.173 1.137 -1.657 -0.831 0.000 0.912 1.157 -1.617 0.000 1.375 0.321 1.046 3.102 0.725 0.815 0.987 0.958 0.526 0.841 0.731 +0 0.927 0.991 1.247 1.380 0.977 0.526 -2.561 0.946 0.000 0.872 2.470 -1.165 0.000 1.120 0.660 0.004 0.000 0.733 -0.401 -0.724 1.551 0.807 0.893 0.996 0.962 0.661 1.333 1.315 +0 0.432 0.696 0.121 0.676 -1.199 0.719 0.391 1.003 2.173 0.709 1.137 -1.338 2.215 0.959 0.176 0.051 0.000 0.434 1.554 -0.775 0.000 0.823 0.869 0.985 0.828 0.993 0.745 0.693 +1 1.403 0.054 0.108 1.591 0.424 1.279 -0.415 1.583 2.173 0.691 1.318 -1.341 0.000 0.914 0.632 -0.393 0.000 0.637 -0.022 -0.541 1.551 1.007 0.667 0.991 1.529 0.918 0.995 1.073 +0 0.796 -0.454 -0.932 0.797 0.833 0.992 -0.317 1.311 2.173 1.287 0.177 -0.032 2.215 0.999 0.816 -0.894 0.000 0.378 -1.445 -1.057 0.000 1.175 1.071 1.104 0.842 1.612 1.023 0.863 +1 0.433 -1.220 -0.991 0.919 0.327 0.611 -0.372 1.070 0.000 0.578 -0.495 -0.490 2.215 1.039 0.343 -1.114 2.548 0.621 0.199 -1.453 0.000 0.833 0.783 0.984 0.580 0.579 0.558 0.541 +0 1.521 -0.271 -1.704 0.830 -1.177 0.945 -2.048 1.033 0.000 0.752 0.249 -0.550 0.000 1.019 -0.361 -0.012 0.000 2.160 0.647 0.021 3.102 0.769 1.021 0.999 1.448 1.104 1.063 0.941 +1 0.592 -0.208 1.647 1.415 -0.546 2.399 -1.259 1.624 0.000 1.023 -0.608 0.060 2.215 1.257 0.254 0.619 0.000 3.246 -0.299 -0.451 3.102 1.037 1.055 1.166 0.814 0.763 0.916 0.838 +0 0.875 0.652 -0.157 0.932 -0.619 0.655 0.855 1.209 0.000 0.748 -0.041 -0.541 2.215 0.823 0.159 -1.644 0.000 0.561 -0.798 1.047 3.102 0.850 1.030 0.987 0.700 0.643 0.676 0.702 +1 0.421 -1.178 0.588 0.950 -1.209 0.833 0.151 -0.251 0.000 0.722 -0.146 0.712 2.215 1.495 -0.290 1.549 2.548 0.968 -0.895 -0.203 0.000 0.898 0.894 0.989 0.720 0.761 0.859 0.749 +0 0.884 1.594 1.703 0.210 0.293 0.920 -1.007 -0.870 0.000 0.370 -2.584 1.203 0.000 1.344 0.946 0.599 2.548 1.087 -0.447 -0.263 3.102 1.136 0.854 0.995 0.755 1.033 1.171 1.103 +0 1.720 -0.230 0.661 2.222 0.416 1.258 -2.050 -1.143 0.000 0.808 -1.139 -1.203 0.000 0.736 -0.804 -0.796 2.548 1.240 0.259 1.226 3.102 0.999 0.798 0.999 0.800 0.845 1.079 1.549 +1 0.667 -0.133 -0.360 1.796 0.465 1.566 0.075 -1.351 2.173 0.824 -0.145 0.223 0.000 1.019 0.231 1.314 2.548 0.451 0.522 -0.226 0.000 0.445 0.725 1.027 1.575 1.069 1.090 0.904 +0 1.100 -0.252 0.581 0.324 0.455 1.065 0.769 -0.247 0.000 1.332 -0.848 1.448 2.215 0.830 0.805 -1.087 0.000 0.669 0.862 1.592 3.102 1.166 0.892 0.983 1.146 0.949 1.198 1.011 +0 0.721 0.481 -0.224 0.743 -0.978 0.507 1.080 0.597 0.000 0.570 1.605 1.085 0.000 0.534 -0.338 -1.023 2.548 0.669 0.951 -1.694 3.102 0.584 0.903 0.995 0.720 0.460 0.568 0.689 +0 0.893 -0.235 1.081 0.987 0.573 1.201 -0.774 -0.696 0.000 0.868 -0.491 -1.391 2.215 0.265 -0.813 -1.559 0.000 0.504 -0.784 0.821 3.102 0.714 0.731 0.981 0.447 0.560 0.594 0.702 +1 0.621 -1.404 0.422 0.993 -0.118 1.447 -0.471 1.679 2.173 0.793 -1.242 -1.336 0.000 0.555 -1.027 -0.043 0.000 0.716 2.058 0.472 0.000 0.938 1.146 0.993 0.548 1.006 0.888 0.816 +1 0.769 0.960 1.263 0.792 -0.409 0.684 0.454 -0.197 2.173 0.909 0.169 -0.985 2.215 0.843 0.358 1.028 0.000 1.452 1.483 1.140 0.000 0.924 1.147 1.079 0.815 0.775 0.913 0.778 +0 0.734 0.746 0.560 1.819 0.747 1.348 0.043 -0.789 1.087 1.107 2.557 1.622 0.000 1.525 0.491 1.297 2.548 2.213 -0.378 -0.484 0.000 1.111 0.875 0.994 1.027 1.759 1.472 1.340 +1 0.663 -0.285 0.814 0.754 -0.480 0.794 -1.620 0.915 0.000 0.999 -0.255 -1.007 2.215 0.863 -0.588 0.146 1.274 0.744 -1.053 -1.295 0.000 1.093 0.912 0.987 0.784 0.871 0.863 0.737 +1 0.777 -1.335 -1.331 1.254 0.999 0.814 -0.179 -0.425 2.173 0.549 1.103 1.109 0.000 0.503 0.611 -1.416 2.548 0.922 -0.019 0.450 0.000 0.768 1.027 1.180 0.943 0.713 0.891 0.887 +0 1.647 0.047 -1.042 1.589 -0.529 1.686 0.250 1.281 2.173 0.689 -1.782 -0.554 0.000 0.911 -0.500 0.925 0.000 1.889 1.021 -0.689 0.000 1.446 0.981 1.002 1.815 1.095 1.228 1.243 +0 0.671 -0.371 1.346 1.062 -1.399 0.676 -1.032 -1.086 2.173 0.540 -1.101 0.434 0.000 0.598 -0.735 -0.153 0.000 1.673 0.050 0.459 3.102 0.824 0.849 0.983 0.897 1.284 0.865 0.748 +1 1.103 -0.972 -1.158 1.158 1.654 0.884 -1.121 0.809 0.000 1.148 -0.992 -1.561 0.000 1.122 -0.598 -0.116 0.000 2.865 -0.647 0.354 3.102 0.899 0.883 0.993 1.274 0.933 0.868 0.805 +0 0.552 0.551 1.364 1.034 0.373 0.850 -1.025 1.594 2.173 1.235 -1.270 -0.189 0.000 0.546 -0.243 0.365 0.000 1.473 -0.005 -1.537 3.102 0.913 1.197 0.986 0.965 0.736 0.955 0.862 +0 0.852 0.225 -0.290 1.351 -1.086 0.792 1.341 0.978 0.000 1.542 1.399 -0.775 1.107 1.620 1.353 0.404 2.548 1.460 2.220 1.109 0.000 0.888 0.938 0.989 0.933 1.468 1.040 1.045 +1 1.136 -0.102 -0.831 2.029 -1.549 0.999 0.659 0.363 2.173 0.858 -0.467 0.601 1.107 0.439 0.679 -1.592 0.000 0.604 1.325 0.262 0.000 0.735 1.000 1.266 1.213 0.874 1.159 0.907 +1 1.449 1.598 1.170 0.818 -0.989 0.835 1.346 0.023 2.173 1.005 0.743 0.672 2.215 0.848 1.362 1.635 0.000 1.398 -0.011 -0.777 0.000 0.859 1.172 1.405 1.041 0.847 0.933 0.868 +1 0.649 -1.287 -0.975 2.096 0.024 1.129 0.862 -1.533 0.000 0.395 -0.162 0.574 0.000 0.493 1.415 0.758 0.000 0.904 0.766 1.235 1.551 0.667 0.685 1.267 1.377 0.528 1.109 0.979 +0 1.160 -0.085 -1.221 0.967 -0.519 1.060 0.459 -1.630 0.000 0.971 -0.657 0.329 2.215 1.609 0.082 0.906 1.274 1.679 -0.009 -0.221 0.000 2.007 1.524 0.989 1.061 0.841 1.116 1.001 +1 1.990 -0.417 -1.137 0.438 -0.833 1.040 -0.868 -0.117 2.173 0.796 -0.836 -1.680 0.000 1.471 -2.412 1.255 0.000 1.035 -1.133 0.930 3.102 0.955 0.703 0.987 1.214 0.929 0.959 1.200 +1 0.682 0.285 -0.112 1.035 1.414 0.429 -0.407 0.847 0.000 1.048 -1.060 -1.194 0.000 1.512 -0.279 -1.737 2.548 1.815 -0.354 0.127 1.551 0.714 0.964 1.142 0.861 1.261 0.795 0.725 +1 2.565 -0.182 -0.534 1.503 -1.626 1.361 0.545 0.620 0.000 1.674 -1.387 1.435 2.215 1.073 -1.764 -0.327 0.000 0.684 -1.015 1.122 3.102 3.825 2.081 2.264 2.077 0.287 1.675 1.642 +0 1.031 0.303 -1.613 1.500 -0.926 0.400 -1.149 0.176 0.000 0.430 -1.492 -1.725 2.215 0.762 -0.258 0.560 2.548 0.816 0.517 0.482 0.000 0.914 0.863 1.001 0.911 0.674 0.764 0.753 +1 1.371 -0.064 1.142 1.761 1.740 1.344 -0.863 -0.114 2.173 0.604 -0.050 -1.071 2.215 0.411 -0.749 -0.999 0.000 0.423 -1.365 1.401 0.000 0.428 0.783 1.106 0.834 1.151 1.195 0.895 +0 1.662 -0.327 0.290 0.880 0.277 1.109 0.738 -0.678 2.173 0.856 -0.187 1.294 0.000 0.849 0.879 0.530 2.548 0.384 1.456 -1.001 0.000 1.069 0.834 0.982 1.657 1.080 1.176 1.045 +0 0.447 -0.303 -0.141 1.109 -1.233 1.248 2.055 0.145 0.000 1.436 -0.035 1.579 2.215 0.619 -1.114 0.733 0.000 0.545 -1.962 1.650 0.000 0.830 1.245 0.985 1.067 0.782 1.039 0.898 +0 1.499 -0.134 -1.461 0.397 -0.008 0.734 1.056 -1.737 2.173 0.758 -0.407 0.283 0.000 0.847 -1.801 0.403 0.000 0.875 -0.095 -1.221 0.000 0.823 0.761 1.033 0.593 0.797 0.641 0.571 +1 0.558 -2.016 0.570 0.533 -0.264 1.156 -1.449 -1.640 1.087 0.892 -1.300 0.435 0.000 0.736 0.200 -0.312 0.000 1.063 -0.049 -1.557 3.102 0.543 0.856 0.987 1.056 0.897 0.846 0.759 +1 0.793 -0.204 -0.140 0.177 0.291 1.078 0.876 0.657 1.087 1.139 -0.345 -1.062 0.000 0.585 0.509 1.285 0.000 0.750 0.664 -1.572 1.551 1.224 0.769 0.985 0.838 0.863 0.927 0.803 +0 0.848 -0.070 -1.657 1.096 -1.122 0.477 -0.497 0.378 1.087 0.709 -1.141 1.337 2.215 0.484 0.983 -0.023 0.000 0.788 -0.897 -0.407 0.000 0.915 0.981 0.977 0.886 0.714 0.683 0.696 +1 0.719 0.747 1.486 1.079 -0.216 0.896 0.633 -0.256 0.000 0.708 0.942 -0.854 0.000 1.439 0.231 1.630 1.274 0.719 0.304 0.930 3.102 0.913 0.840 1.219 0.926 0.461 0.802 0.729 +1 2.564 -0.232 -0.071 0.669 0.122 0.661 -2.363 -1.401 0.000 0.542 0.693 -1.693 2.215 0.734 -0.935 1.303 2.548 0.745 -0.384 1.023 0.000 0.747 0.763 0.985 1.060 0.719 0.940 1.180 +0 0.604 -0.210 0.962 0.977 -0.163 0.969 0.659 -1.230 2.173 0.516 0.604 0.440 0.000 0.943 1.456 -0.080 2.548 1.062 1.125 1.499 0.000 0.865 1.005 0.988 1.168 1.176 1.049 0.912 +1 0.572 -1.787 1.359 0.925 -1.172 2.476 -1.142 -0.138 0.000 2.412 -0.644 -1.660 0.000 1.799 -0.843 1.291 2.548 1.409 -0.976 0.668 0.000 1.894 1.150 0.984 1.221 0.899 1.213 1.276 +0 0.861 0.127 -0.743 0.616 -0.044 0.920 1.146 -0.826 1.087 1.562 -0.794 1.211 2.215 0.507 0.364 1.002 0.000 0.660 -1.300 0.126 0.000 0.860 0.851 0.985 1.181 2.655 1.363 1.085 +1 0.355 1.404 -0.759 1.588 -0.931 0.461 -0.314 0.133 0.000 0.511 0.245 1.229 0.000 0.824 -0.916 0.617 2.548 1.047 -0.911 1.389 3.102 0.910 0.760 1.001 1.000 0.456 0.777 0.710 +0 2.973 2.252 0.375 0.143 -1.633 1.660 0.641 -1.127 1.087 0.762 1.340 0.945 1.107 0.543 0.041 -0.640 0.000 0.947 1.180 1.530 0.000 0.939 0.969 0.987 0.830 1.700 1.599 1.252 +1 1.696 0.572 0.185 1.072 0.975 0.472 -0.443 -0.569 0.000 0.717 -0.221 -1.234 2.215 0.959 1.138 -1.551 2.548 0.772 -0.178 1.488 0.000 0.891 0.941 1.222 1.115 0.754 0.884 0.802 +0 1.320 0.542 0.808 0.443 -1.039 0.491 0.502 -1.231 2.173 0.498 -0.780 0.705 0.000 0.675 -0.993 1.566 2.548 0.553 -0.109 -0.748 0.000 0.701 0.738 1.055 0.835 0.760 0.667 0.602 +0 0.494 1.846 0.751 0.757 -0.958 0.585 0.240 1.503 0.000 0.672 1.049 -0.148 2.215 0.881 0.076 0.969 2.548 0.693 -0.708 -0.812 0.000 1.013 1.019 0.990 0.762 0.811 0.691 0.673 +1 1.681 1.053 0.940 1.210 0.635 1.834 2.026 -0.576 0.000 1.653 0.662 -1.310 2.215 1.584 1.206 1.233 2.548 1.362 0.632 0.605 0.000 2.687 2.117 0.990 1.609 1.409 1.635 1.455 +0 1.250 1.515 1.504 0.613 -0.503 0.713 0.629 0.619 1.087 0.647 -0.348 -1.645 0.000 0.842 -0.241 -0.149 0.000 0.376 -0.202 -0.524 3.102 1.106 1.052 1.179 0.723 0.531 0.669 0.775 +1 0.685 -0.096 0.379 0.306 -0.763 1.091 -0.923 1.362 2.173 1.239 0.855 0.027 0.000 1.448 -0.193 -1.605 0.000 1.481 -0.841 -0.285 3.102 1.353 1.119 0.985 0.919 1.341 0.940 0.816 +1 2.504 0.375 0.868 1.007 -1.213 1.136 1.734 -0.605 0.000 0.425 0.057 -1.067 0.000 1.262 0.453 -0.849 2.548 0.674 0.158 0.399 3.102 1.525 1.061 2.100 1.004 0.645 0.874 1.049 +1 0.656 0.693 0.866 1.562 0.152 0.904 -0.277 -1.690 2.173 0.752 0.675 -1.099 2.215 0.593 -0.353 -0.339 0.000 0.378 -0.608 1.446 0.000 0.530 0.678 0.983 0.960 0.872 0.918 0.721 +0 0.699 1.203 -1.586 0.406 0.865 0.531 -0.773 0.288 2.173 0.813 -0.021 1.574 1.107 0.842 -0.229 -0.317 0.000 0.822 -1.347 -1.158 0.000 0.925 0.951 0.977 0.872 0.962 0.707 0.712 +1 0.998 0.012 -0.781 0.747 1.377 0.528 1.688 0.902 0.000 0.756 -0.218 -0.284 0.000 0.833 -0.054 0.606 0.000 0.671 0.672 -1.256 3.102 0.881 0.896 1.113 0.593 0.248 0.587 0.585 +1 0.829 -0.101 -1.396 0.229 0.335 0.759 0.541 0.615 2.173 0.809 0.449 -1.257 0.000 1.027 0.897 -0.600 0.000 0.911 -0.949 0.671 3.102 0.864 1.155 0.986 0.638 0.834 0.905 0.839 +1 0.804 -0.909 -0.162 0.325 0.733 0.849 0.259 -1.151 0.000 1.447 1.199 0.813 2.215 0.757 0.415 -0.468 0.000 0.679 0.563 -1.676 3.102 0.845 0.607 0.985 2.242 0.744 1.424 1.246 +1 1.244 0.334 -1.315 0.564 0.710 0.692 -0.623 0.903 0.000 1.139 -1.202 0.263 0.000 0.648 1.377 -0.891 0.000 1.341 0.319 1.160 0.000 0.846 1.113 1.123 0.863 0.838 0.881 0.799 +0 1.365 0.707 0.068 0.979 0.372 0.481 0.238 -0.934 0.000 0.421 -0.926 -1.024 1.107 0.281 -1.176 -1.609 0.000 0.624 0.926 -1.456 0.000 0.801 0.584 1.001 0.893 0.275 0.843 0.737 +1 1.279 -1.124 -1.104 0.498 -0.430 1.396 0.150 1.085 1.087 0.885 -1.931 -0.997 0.000 1.215 -0.208 -0.035 0.000 0.859 -0.643 0.374 3.102 0.813 1.131 0.994 0.754 0.889 1.187 0.972 +0 1.473 -0.386 1.581 0.799 -0.974 0.714 1.275 -0.017 2.173 0.782 -0.030 0.628 0.000 0.776 -0.860 -0.355 2.548 0.471 0.425 -1.335 0.000 0.805 0.909 1.119 0.828 1.295 1.037 0.851 +0 0.919 -0.731 -1.431 0.882 0.153 0.612 -1.158 1.667 2.173 1.472 -1.180 -0.539 0.000 1.059 1.787 0.800 0.000 1.403 -0.503 1.206 1.551 0.661 1.072 1.235 0.787 0.486 0.835 0.758 +0 0.509 0.962 1.148 1.353 0.014 0.384 0.139 1.651 0.000 0.602 0.290 -0.658 1.107 0.868 -0.981 0.368 0.000 1.084 0.775 -1.300 3.102 0.999 0.947 0.988 0.735 0.463 0.633 0.713 +1 0.758 -0.346 0.767 1.894 -0.099 0.782 1.033 -1.625 2.173 1.097 1.244 -0.943 2.215 1.066 1.174 1.287 0.000 1.139 1.726 -1.649 0.000 1.057 1.059 1.167 1.490 0.803 1.232 1.039 +1 0.714 0.928 0.306 0.947 0.477 1.116 0.251 -1.695 0.000 1.101 2.261 0.083 0.000 1.112 0.934 -1.084 2.548 0.378 0.773 0.999 1.551 3.737 1.908 0.988 0.999 0.473 1.197 1.087 +0 0.457 1.637 1.439 0.668 -0.571 0.662 2.685 0.637 0.000 0.908 0.852 1.396 2.215 1.213 -0.859 -1.194 0.000 0.801 -1.888 0.543 0.000 0.933 0.574 0.984 0.724 0.772 0.915 0.864 +1 0.934 1.631 0.385 1.771 0.055 0.469 1.214 -1.011 0.000 1.305 0.448 -1.497 2.215 0.394 1.345 0.863 0.000 1.110 0.259 1.048 3.102 0.773 0.806 1.000 1.292 0.819 1.382 1.059 +1 2.389 -0.970 -1.607 0.638 -0.415 0.992 -0.899 0.156 2.173 0.588 -1.401 -1.246 0.000 0.638 -0.987 1.203 2.548 0.813 -1.348 -0.006 0.000 0.814 0.917 1.505 0.836 0.807 0.929 0.784 +0 0.902 0.999 0.163 0.806 1.157 0.931 0.632 -0.968 0.000 1.241 -2.606 0.160 0.000 0.626 1.650 -0.802 0.000 1.245 0.637 1.468 3.102 0.878 0.925 0.985 0.827 0.489 0.739 0.745 +1 0.702 -1.839 -0.259 1.542 0.461 0.472 -2.136 -0.808 0.000 0.534 0.014 -0.911 2.215 1.068 -0.686 -1.461 1.274 0.583 -1.714 1.187 0.000 0.782 0.959 0.989 1.021 0.496 0.810 0.757 +0 0.449 0.090 -0.438 1.032 1.427 0.483 1.126 0.305 0.000 1.138 1.237 -0.726 2.215 0.553 -1.115 0.640 2.548 0.406 1.667 1.464 0.000 0.989 1.104 0.989 0.785 1.566 0.907 0.771 +0 0.688 -2.172 -0.599 0.156 0.688 0.969 2.204 0.975 0.000 1.114 -0.889 -0.422 1.107 1.157 -1.108 -1.355 2.548 0.809 -1.279 0.939 0.000 4.333 3.414 0.995 0.691 0.917 2.367 1.979 +0 1.470 1.012 -1.075 0.594 0.056 1.305 -0.542 -1.145 0.000 1.452 -0.508 0.595 2.215 0.275 2.382 1.550 0.000 0.870 0.281 0.585 0.000 0.862 1.260 1.104 0.657 0.252 0.929 0.827 +1 0.453 -1.567 -0.140 1.007 -0.618 0.787 -0.715 1.199 1.087 0.821 -1.107 -1.706 0.000 0.884 0.727 0.507 0.000 1.336 -0.374 -0.400 3.102 1.895 1.271 0.981 0.517 1.087 0.921 0.843 +1 0.351 -1.095 -0.055 1.109 -1.010 1.555 -0.263 0.602 2.173 0.748 0.241 -1.630 0.000 0.897 0.681 -0.796 2.548 0.688 -0.318 -0.900 0.000 0.736 0.801 0.977 2.011 1.602 1.612 1.311 +1 0.988 0.400 -1.711 0.532 0.251 0.934 0.138 1.147 0.000 0.589 2.666 0.024 0.000 1.594 0.911 -0.794 2.548 0.792 1.180 1.523 0.000 0.997 1.047 0.987 0.838 0.757 0.927 0.771 +1 0.857 -0.393 -0.032 0.526 -1.456 1.741 -0.837 0.410 1.087 0.543 -1.486 1.254 0.000 1.246 -0.401 -1.658 1.274 1.979 -0.570 -1.118 0.000 0.913 0.955 0.991 0.997 1.796 1.162 0.947 +0 0.650 -0.142 -1.074 0.378 0.414 0.769 0.554 0.463 0.000 1.385 0.331 -1.561 1.107 0.898 1.494 0.301 0.000 0.833 0.452 -0.803 3.102 0.876 0.838 0.988 0.805 0.620 0.926 0.772 +0 1.491 0.386 0.097 1.124 0.672 0.691 1.179 -1.463 0.000 0.624 0.457 -0.571 2.215 0.981 -0.361 -1.320 0.000 0.894 -0.506 1.205 3.102 1.317 0.959 0.979 0.829 0.774 0.733 0.850 +1 1.940 -0.109 0.535 0.604 -0.162 1.045 0.084 -1.608 2.173 0.699 0.055 -0.712 2.215 0.670 0.301 -1.183 0.000 0.781 0.906 0.449 0.000 0.852 0.893 0.987 0.868 0.909 0.951 0.807 +1 1.251 -0.453 0.410 0.154 0.433 0.706 -0.089 1.270 1.087 0.606 -0.888 -0.389 0.000 0.339 0.225 -0.502 0.000 1.053 -1.226 -1.270 0.000 0.798 1.075 0.983 0.782 0.693 0.682 0.722 +0 0.557 -0.522 0.956 0.978 -1.175 0.567 2.527 1.658 0.000 0.716 0.086 -0.620 2.215 1.231 -0.566 0.377 2.548 0.956 -1.669 0.242 0.000 0.928 0.934 0.987 0.813 0.860 0.677 0.659 +1 0.606 -0.905 1.691 0.938 0.324 1.095 -0.286 -0.213 2.173 1.076 0.295 1.146 0.000 1.455 0.413 -1.445 2.548 0.720 -0.286 -1.101 0.000 1.097 0.910 0.987 0.902 1.530 0.993 0.887 +1 0.612 -0.987 -1.538 1.312 -0.575 1.071 -0.547 0.651 2.173 0.475 -1.135 -0.687 0.000 0.760 -0.656 1.560 2.548 0.764 0.831 -1.395 0.000 1.127 0.818 0.986 1.220 0.826 0.851 0.832 +0 0.883 0.387 0.738 0.488 1.716 0.892 -0.089 0.149 0.000 0.990 0.457 -1.504 1.107 0.617 -0.058 -0.914 0.000 0.472 -1.009 0.844 0.000 0.825 1.220 0.986 0.542 0.428 0.704 0.702 +1 1.312 0.833 -0.411 1.082 -0.330 0.641 0.643 0.825 2.173 0.373 -0.739 0.726 0.000 0.728 1.587 -1.550 0.000 0.959 0.418 -1.156 1.551 1.300 0.876 1.005 1.094 0.813 0.799 0.801 +1 0.974 0.319 -1.504 0.371 -0.227 0.606 -0.136 -1.114 0.000 0.591 0.664 0.111 0.000 1.618 1.344 0.995 2.548 0.591 1.311 -0.095 3.102 1.255 0.849 0.982 1.178 0.623 0.855 0.806 +1 0.720 -0.736 -1.428 1.399 -0.594 1.457 -0.710 0.496 2.173 0.864 -0.727 1.158 0.000 0.969 0.250 1.627 1.274 1.446 0.842 -1.421 0.000 0.971 0.713 0.989 1.347 1.474 1.086 1.041 +0 0.430 -1.101 1.154 0.999 -0.724 1.260 -1.456 -1.190 2.173 1.762 0.179 0.475 0.000 1.499 0.763 1.107 0.000 1.462 0.071 -1.010 3.102 0.985 1.015 0.989 0.794 1.255 1.346 1.192 +1 0.601 0.201 0.130 0.817 -0.528 1.226 0.610 1.514 0.000 1.379 0.449 -0.060 2.215 0.691 1.392 -1.622 0.000 0.658 0.074 -1.465 3.102 0.935 0.641 0.988 0.734 0.836 0.961 0.870 +1 0.595 0.755 1.308 1.187 -0.270 0.986 -0.738 -0.209 2.173 1.076 0.059 1.528 0.000 0.792 1.087 1.473 0.000 0.556 0.444 0.905 3.102 0.849 0.547 1.151 1.168 0.851 0.979 0.891 +0 1.882 -0.367 0.321 1.090 0.740 0.690 0.338 -1.466 2.173 0.705 -0.723 -0.963 0.000 1.149 -0.613 -1.578 1.274 0.804 -2.347 -0.684 0.000 1.235 1.037 1.000 1.341 0.598 1.006 1.084 +1 1.031 0.338 -0.140 0.135 -1.406 1.030 -0.625 0.869 1.087 1.133 0.262 -1.009 2.215 0.621 -0.015 1.137 0.000 1.362 -0.940 -1.009 0.000 1.119 1.005 0.993 0.952 1.742 0.987 0.846 +1 0.937 -0.606 0.581 1.334 1.018 0.525 0.746 -0.622 2.173 0.695 -1.290 -1.696 0.000 0.645 -1.055 -0.294 2.548 0.665 -1.697 -0.782 0.000 0.724 1.301 0.988 0.745 0.831 0.959 0.880 +0 1.476 -0.564 -1.178 2.767 -0.921 1.256 -0.044 0.578 0.000 1.324 -0.444 1.294 2.215 0.387 0.779 0.568 0.000 0.589 -0.497 -0.175 0.000 0.903 1.102 0.989 1.082 0.561 1.073 1.111 +1 0.414 -1.340 -0.405 3.093 0.084 0.666 -0.456 -1.370 2.173 0.834 1.130 -1.656 0.000 0.960 0.462 1.037 2.548 0.733 -1.302 -1.217 0.000 1.901 1.218 0.975 1.147 0.963 1.027 1.117 +1 0.954 -0.228 1.727 0.344 -0.275 0.983 -0.501 -1.376 0.000 1.246 -0.499 -0.122 1.107 1.142 -0.135 0.357 0.000 1.388 -0.970 1.188 3.102 0.798 1.007 0.989 0.957 1.166 0.821 0.761 +1 1.417 0.391 -0.436 1.364 0.081 1.253 0.451 1.473 1.087 0.458 0.654 1.635 2.215 0.697 0.835 0.401 0.000 0.811 1.455 -1.087 0.000 0.882 1.121 0.994 0.864 0.201 0.995 0.845 +1 1.182 0.284 -1.610 1.247 -1.680 0.781 0.736 0.592 2.173 0.784 0.279 -0.184 0.000 0.384 0.608 -0.542 2.548 0.467 1.036 -0.683 0.000 0.520 0.744 0.985 0.647 0.583 0.748 0.699 +0 1.089 0.390 1.676 0.844 1.092 1.529 -1.237 0.072 1.087 0.505 -1.907 -0.689 0.000 1.010 -1.248 -1.264 2.548 1.938 -0.209 1.657 0.000 0.991 0.986 0.988 1.389 1.449 1.647 1.367 +1 1.010 0.013 0.356 0.253 -0.560 0.617 -0.255 -1.079 0.000 0.995 -1.063 0.321 1.107 0.986 -1.215 1.359 1.274 1.154 0.406 -1.547 0.000 0.719 1.010 0.988 1.018 0.857 0.913 0.899 +0 0.880 1.159 -1.225 0.973 -0.635 0.485 2.474 0.752 0.000 0.695 -0.033 0.053 2.215 0.384 0.578 -1.666 0.000 0.831 0.000 1.493 3.102 1.047 0.968 0.992 1.150 0.661 0.868 0.852 +0 0.867 0.410 -1.278 2.469 1.686 1.847 0.077 0.010 0.000 0.765 1.488 1.656 0.000 0.914 -0.512 0.950 2.548 1.049 -1.060 -0.298 3.102 3.214 2.019 0.993 1.201 0.727 1.361 1.337 +0 1.033 -1.845 1.135 0.887 1.202 0.705 -1.071 -1.063 0.000 1.080 -1.711 -0.419 0.000 1.351 -1.051 -1.575 0.000 1.172 -0.807 0.076 3.102 0.902 0.878 1.004 0.796 0.360 0.593 0.696 +0 0.299 -1.400 -0.350 0.623 1.437 0.764 -0.938 -0.703 2.173 1.121 0.803 1.001 0.000 0.486 0.494 -0.621 2.548 1.131 -0.234 0.816 0.000 0.893 0.869 0.993 0.580 0.622 0.606 0.594 +0 0.689 -0.615 1.012 0.424 -0.617 0.669 -1.359 -0.433 0.000 0.991 -0.389 -0.941 2.215 1.228 -1.252 1.191 1.274 0.706 -1.721 0.410 0.000 0.822 0.896 0.988 0.840 1.251 0.791 0.688 +1 0.573 -0.082 1.354 0.744 0.310 0.685 1.439 0.756 0.000 1.094 0.763 -1.065 1.107 1.073 1.346 -0.402 2.548 1.105 2.323 -0.995 0.000 1.067 0.914 0.989 1.229 0.765 1.066 1.085 +0 0.642 -0.672 -1.224 1.428 0.954 0.585 0.027 -0.732 2.173 0.516 -1.207 1.505 0.000 0.481 -0.494 0.335 1.274 0.588 -1.534 -0.535 0.000 0.726 0.876 1.225 0.654 0.575 0.655 0.617 +0 0.402 -1.052 0.474 2.160 -1.116 0.886 0.839 0.441 0.000 0.482 0.410 1.153 1.107 0.577 1.743 0.843 0.000 1.291 0.802 -0.964 3.102 0.851 1.011 1.278 1.062 0.700 0.952 1.191 +0 0.364 2.045 -0.758 1.858 -1.715 0.727 0.010 -0.526 2.173 1.099 0.244 1.110 0.000 0.811 2.030 -0.024 0.000 0.527 -1.275 -0.946 3.102 2.083 1.678 0.983 1.264 0.607 1.170 1.127 +0 0.338 0.915 -0.926 1.250 1.138 0.725 -0.259 -0.720 2.173 0.274 1.279 1.401 1.107 0.370 1.777 -0.025 0.000 1.091 1.076 0.436 0.000 0.368 1.063 0.984 0.478 0.844 0.878 0.739 +1 0.674 0.985 -1.543 1.054 0.937 0.768 0.877 -0.174 0.000 1.031 0.863 -0.642 1.107 0.991 1.314 1.362 0.000 1.199 -0.371 1.368 1.551 1.603 1.158 0.988 0.665 1.212 0.962 0.839 +1 0.565 -0.595 0.086 1.378 0.012 1.136 0.179 -0.848 0.000 2.397 1.029 1.414 2.215 0.955 -0.294 -0.425 0.000 1.593 0.033 0.533 3.102 0.845 1.160 0.978 1.647 1.570 1.462 1.238 +0 0.696 -0.219 1.653 0.292 -0.733 0.551 0.230 -1.347 2.173 1.445 0.739 0.885 1.107 0.402 -2.129 1.671 0.000 0.519 -0.616 -0.881 0.000 1.214 0.920 0.996 0.878 1.238 0.855 0.733 +1 1.967 -1.011 -0.126 0.612 -0.448 0.896 -0.369 1.201 2.173 0.634 -0.674 -1.200 0.000 0.919 0.277 -0.361 0.000 0.771 1.292 1.264 0.000 1.007 1.170 0.989 1.208 0.844 1.098 0.965 +1 0.688 -0.580 0.343 0.164 -1.239 1.304 -0.540 1.450 2.173 0.754 -0.951 -0.098 0.000 0.465 2.499 1.057 0.000 0.657 -1.353 -0.440 0.000 0.702 1.131 0.995 0.558 0.938 0.782 0.736 +1 0.593 0.082 -0.844 0.444 1.380 0.534 -1.562 0.800 0.000 0.850 -0.984 -0.313 2.215 0.706 -0.262 -1.308 2.548 0.565 -2.159 1.192 0.000 0.531 0.888 0.988 0.560 0.713 0.706 0.647 +0 0.726 0.670 1.161 0.280 -1.589 1.050 -1.423 -1.482 0.000 1.379 0.428 -0.344 2.215 1.930 0.566 0.675 2.548 0.429 -0.808 1.550 0.000 0.474 1.841 0.986 0.877 1.387 1.543 1.195 +1 0.701 1.407 0.913 1.086 -1.579 0.481 0.559 0.433 1.087 1.130 1.164 -1.170 2.215 1.253 -2.459 -0.376 0.000 2.310 0.025 1.062 0.000 1.374 0.855 0.985 0.810 1.131 0.916 0.884 +1 0.507 0.281 1.717 3.300 1.488 1.883 -1.679 0.007 0.000 0.537 0.254 -1.432 2.215 0.607 -0.933 -0.073 1.274 0.543 -1.300 -0.633 0.000 0.849 0.555 0.993 0.776 0.707 0.950 1.276 +1 0.396 -1.013 -1.223 0.146 -0.381 1.367 0.838 -0.174 0.000 1.297 -0.045 -1.130 0.000 3.360 -0.739 1.322 0.000 1.205 0.171 0.447 3.102 0.785 0.892 0.978 0.661 0.447 0.637 0.630 +1 1.184 0.721 -1.023 0.570 -1.394 0.984 1.088 0.008 1.087 0.571 0.007 1.385 0.000 0.723 1.289 1.165 2.548 0.429 1.116 0.502 0.000 0.652 0.941 0.980 0.734 0.922 0.790 0.718 +0 0.500 0.660 0.944 1.270 -1.246 1.477 -0.555 -0.350 2.173 1.942 -0.320 1.460 2.215 0.844 -1.053 -0.032 0.000 0.565 -0.352 0.720 0.000 0.557 1.133 1.017 1.410 2.504 1.384 1.106 +0 1.645 -0.684 -0.131 0.050 0.657 0.542 -0.008 1.500 2.173 0.494 -1.524 0.337 0.000 1.353 -0.644 -1.258 2.548 0.855 -0.366 1.166 0.000 0.764 0.900 0.980 0.919 0.753 0.772 0.713 +1 0.514 -1.314 1.231 1.251 -0.478 1.778 -0.356 -1.630 2.173 1.266 -0.825 0.430 2.215 1.684 -2.074 0.289 0.000 0.687 -0.384 -0.778 0.000 0.737 1.125 1.110 1.424 2.187 1.692 1.321 +1 0.319 -1.741 -1.316 1.523 0.152 0.884 -0.322 1.357 0.000 1.514 -1.436 -0.858 2.215 0.709 -2.338 1.168 0.000 1.601 -0.113 0.687 3.102 1.561 1.070 0.987 0.969 1.716 1.290 1.108 +1 0.954 0.356 0.303 1.442 -0.269 0.917 -0.456 1.568 2.173 0.642 0.689 -1.081 2.215 0.626 -2.117 -0.915 0.000 1.003 -0.574 0.361 0.000 1.128 1.222 0.980 0.800 1.040 1.027 1.156 +0 1.160 -0.281 0.445 1.828 1.004 1.646 -0.959 -1.347 2.173 0.751 -0.412 -0.608 0.000 0.903 0.078 -0.117 2.548 1.202 -0.842 0.857 0.000 1.254 0.872 0.989 1.763 1.598 1.262 1.061 +1 1.017 1.586 -0.767 1.555 -0.346 1.072 0.469 1.295 2.173 0.295 1.613 -1.301 0.000 0.273 0.309 1.596 1.274 0.758 0.343 0.333 0.000 0.738 0.838 0.983 0.654 0.186 0.910 0.728 +1 0.380 1.700 1.080 2.886 1.315 0.594 -2.663 0.007 0.000 1.392 -0.342 -1.446 0.000 1.290 0.106 -0.740 2.548 1.578 0.958 0.026 3.102 0.800 1.677 0.977 1.562 0.911 1.757 4.073 +1 0.296 2.169 -0.415 0.982 0.429 1.498 -0.367 -1.313 0.000 2.287 0.142 0.241 2.215 0.858 1.295 -1.268 0.000 1.294 0.233 0.928 3.102 0.833 0.769 0.982 0.880 0.906 0.917 0.781 +1 0.730 -0.269 0.029 0.738 -1.016 1.058 -1.417 1.323 2.173 0.932 -1.623 -0.274 0.000 0.495 -0.589 -0.824 0.000 0.437 -0.115 1.549 3.102 0.746 1.268 0.986 0.550 0.519 0.880 0.869 +0 0.849 0.602 -0.231 1.561 -0.170 0.920 0.900 0.810 2.173 0.860 -0.411 1.480 2.215 0.872 2.443 -1.406 0.000 1.120 1.922 0.952 0.000 0.949 1.301 0.989 1.118 1.196 1.313 1.426 +1 0.927 -0.944 -1.174 0.627 1.488 0.862 -0.239 -0.207 0.000 1.066 0.930 0.291 0.000 0.744 0.023 1.088 2.548 1.800 0.834 -0.718 3.102 1.014 0.906 0.989 0.658 0.991 0.759 0.833 +0 0.931 1.431 -0.446 1.248 -0.101 0.614 1.441 1.072 2.173 0.642 0.232 1.409 0.000 0.901 0.663 -1.492 2.548 0.378 -0.645 1.140 0.000 0.637 0.703 0.992 0.871 0.765 0.802 0.693 +1 0.693 -0.999 -0.300 0.495 -1.128 1.481 -1.455 -1.563 2.173 1.104 1.887 0.437 0.000 1.386 0.007 0.648 2.548 2.096 -1.931 -0.058 0.000 0.982 0.982 0.987 1.303 2.174 1.194 0.966 +1 1.542 -0.429 -1.444 0.799 -0.573 1.378 -0.586 0.552 2.173 0.542 -1.485 1.689 0.000 0.552 -0.292 -0.783 0.000 0.566 -0.656 -0.349 3.102 0.855 1.212 1.088 0.603 0.685 0.893 0.785 +1 0.643 -0.694 0.966 1.074 -1.351 0.649 -0.073 1.197 2.173 1.322 0.730 -0.278 0.000 0.800 -0.329 -1.170 2.548 0.581 0.877 0.652 0.000 0.866 0.950 1.001 0.721 0.770 0.820 0.813 +0 0.665 -0.350 -0.985 0.374 1.605 0.706 0.703 0.348 2.173 0.817 0.003 0.096 2.215 0.787 0.536 -1.512 0.000 1.123 2.178 -1.649 0.000 1.246 1.323 0.980 0.788 0.476 1.029 0.877 +0 0.730 0.827 0.174 0.491 1.453 1.076 0.390 1.426 2.173 1.159 -0.315 -0.863 2.215 1.264 -0.404 0.478 0.000 1.268 -0.237 -1.432 0.000 0.940 0.965 0.985 1.244 1.568 1.072 0.887 +1 0.703 0.124 -0.096 0.516 -1.083 0.927 -0.668 1.345 0.000 0.728 -0.553 0.800 0.000 1.549 -0.978 -0.385 2.548 1.075 0.219 -1.058 3.102 0.830 1.022 0.994 1.213 0.905 0.945 0.968 +1 1.164 1.075 0.793 1.683 0.199 1.278 0.552 -1.326 2.173 0.703 0.561 -0.328 0.000 0.456 -0.406 1.697 0.000 0.792 0.020 0.591 0.000 0.960 1.004 0.988 0.635 0.865 1.023 0.886 +0 0.448 -1.906 -0.585 0.665 1.106 0.523 -1.055 0.462 0.000 1.108 -0.872 -0.788 1.107 0.590 -0.990 1.320 0.000 1.084 0.406 -1.662 3.102 0.697 0.979 0.981 0.793 1.020 0.786 0.688 +1 1.116 -1.284 1.466 2.313 -0.139 0.741 0.106 -0.089 2.173 1.622 -0.749 -1.682 0.000 1.155 -2.464 -1.415 0.000 1.617 -1.042 -0.298 0.000 0.885 0.876 2.208 1.526 0.777 1.132 0.914 +1 1.035 0.821 -1.549 0.653 -0.921 0.306 1.056 0.419 0.000 0.749 -0.590 0.545 1.107 0.680 0.267 1.219 0.000 1.222 0.152 -0.772 3.102 1.018 1.087 0.979 0.759 0.874 0.906 0.804 +0 0.847 0.235 1.575 0.565 -1.447 0.605 0.986 1.261 0.000 0.731 0.950 0.326 2.215 0.586 0.222 -0.268 2.548 0.994 -1.044 -0.664 0.000 0.842 0.790 0.991 0.724 0.440 0.723 0.692 +1 0.480 -0.357 -1.123 2.404 0.471 1.780 0.346 -1.293 0.000 1.384 -0.800 0.835 0.000 1.584 0.426 -0.394 1.274 0.418 2.051 -1.300 0.000 1.666 1.475 1.475 0.722 0.790 1.112 1.200 +0 2.774 -0.067 -1.332 0.840 -1.302 1.603 -1.229 0.556 0.000 1.461 -0.999 0.149 0.000 1.964 0.397 -1.619 2.548 1.298 -1.285 -0.203 1.551 1.218 0.984 1.008 0.693 1.834 1.646 1.709 +1 0.839 0.427 1.306 0.962 0.475 1.242 0.348 -0.423 2.173 1.311 -0.334 1.054 0.000 1.243 -1.395 -1.117 0.000 0.858 -0.532 -1.691 0.000 0.880 0.680 0.985 1.159 0.916 0.985 0.843 +0 0.846 0.908 -0.961 0.370 1.563 0.670 -0.130 0.286 1.087 0.489 -1.099 0.931 0.000 1.126 -0.031 -1.067 2.548 0.596 -1.820 1.241 0.000 0.434 0.957 0.983 0.853 1.017 0.772 0.755 +0 1.577 0.332 -0.220 0.232 1.369 0.834 -0.246 1.559 0.000 0.767 -0.726 -0.998 1.107 0.539 -1.302 1.187 0.000 1.092 1.161 0.235 3.102 0.854 0.905 0.990 0.636 1.295 0.963 0.901 +0 0.479 -2.150 0.327 0.616 -1.174 0.835 -1.005 1.624 2.173 0.494 -1.227 -0.116 0.000 0.652 -1.666 -1.368 0.000 1.836 -0.765 0.352 3.102 0.825 0.874 0.995 0.742 1.197 0.750 0.660 +0 2.209 -0.247 0.513 0.520 -0.248 0.896 2.815 0.301 0.000 1.802 0.735 -1.709 1.107 1.683 0.385 -1.041 0.000 2.338 0.825 -1.247 0.000 0.746 1.012 0.988 0.800 2.345 1.456 1.369 +0 1.063 1.338 0.762 0.321 -0.428 0.786 0.516 -1.534 0.000 0.569 2.577 1.355 0.000 1.147 -0.876 -0.056 2.548 0.948 0.490 0.059 3.102 0.687 0.810 0.983 1.633 0.687 1.032 0.973 +1 0.731 -1.408 0.973 0.703 0.585 0.903 -0.132 -1.468 0.000 0.751 -1.508 0.469 2.215 0.899 -2.600 -0.515 0.000 1.391 0.444 -0.762 0.000 0.976 1.005 0.974 0.685 0.890 0.858 0.763 +0 1.140 0.158 -0.861 0.843 -0.246 0.354 -1.243 0.775 0.000 0.583 -0.397 0.604 2.215 0.775 -0.109 -1.692 2.548 0.825 -2.088 1.626 0.000 0.794 0.883 0.986 0.776 0.636 0.645 0.755 +1 0.562 -0.792 -1.528 2.153 1.321 1.526 -0.101 0.499 0.000 2.259 0.528 -1.186 2.215 1.110 -0.122 0.087 0.000 2.230 -0.202 -0.577 3.102 0.850 1.303 0.984 2.321 1.335 1.648 1.631 +0 0.560 0.287 0.280 1.995 1.005 0.882 -0.947 -0.246 0.000 0.974 -0.975 -1.178 2.215 1.127 0.944 1.654 2.548 0.386 -2.207 -0.660 0.000 0.909 0.931 0.982 0.792 1.480 1.200 1.293 +0 0.884 1.674 0.582 0.561 1.640 0.765 2.687 -0.469 0.000 0.369 -0.863 0.969 0.000 1.299 0.436 1.628 2.548 1.666 -0.712 -0.628 1.551 0.387 1.043 0.986 1.967 1.290 1.366 1.403 +0 0.582 -2.138 -0.872 0.511 -1.476 0.791 -1.059 0.363 2.173 0.844 0.056 0.799 0.000 0.943 0.536 1.711 0.000 1.528 0.413 -0.747 3.102 1.067 1.034 0.993 0.913 1.404 0.986 0.940 +1 0.677 -0.289 -1.397 2.952 -0.154 1.131 -0.364 0.877 0.000 1.087 1.098 -1.305 2.215 0.333 0.849 1.457 0.000 0.475 0.278 1.104 1.551 0.946 0.522 1.764 1.686 0.598 1.066 1.061 +1 1.006 -0.665 -0.120 1.248 -0.424 1.184 0.872 1.701 2.173 0.698 0.097 -0.769 0.000 0.465 -0.698 1.068 2.548 0.797 0.642 0.134 0.000 0.859 0.961 0.976 0.743 0.987 1.037 0.847 +1 0.779 0.693 -0.840 1.179 -0.104 0.604 0.016 0.989 0.000 0.943 1.065 1.675 1.107 1.201 -0.077 1.375 2.548 0.675 -2.350 -0.025 0.000 0.992 0.923 0.989 0.970 0.769 0.852 0.799 +1 0.906 0.172 0.708 0.768 1.580 0.623 -0.013 1.139 0.000 0.683 -1.353 -0.375 1.107 0.681 -0.385 -1.035 0.000 1.265 -1.462 -1.065 0.000 0.734 0.681 0.987 1.212 0.483 0.792 0.818 +1 1.520 1.104 0.293 0.710 1.053 1.063 2.829 -0.804 0.000 0.841 0.929 1.306 0.000 1.207 0.411 -1.557 2.548 0.423 1.767 -0.106 0.000 0.884 0.807 0.990 0.696 0.679 0.710 0.658 +1 2.354 -1.849 -0.282 0.520 -1.174 0.422 -1.869 0.999 0.000 0.573 -1.395 -1.385 0.000 1.235 -0.871 0.692 2.548 1.387 -0.989 1.625 3.102 0.906 0.839 1.102 1.073 0.754 0.909 0.795 +0 1.186 -1.354 1.684 0.339 -0.553 1.196 -0.736 1.386 2.173 1.582 -1.168 0.124 0.000 0.974 1.423 -1.243 0.000 1.114 -1.212 -0.601 3.102 3.982 2.367 0.989 0.959 1.272 1.728 1.467 +1 0.733 -1.729 0.379 0.932 1.131 2.048 -0.649 1.731 2.173 2.796 -0.797 -0.432 0.000 0.604 -1.872 -0.352 0.000 1.644 -1.475 0.979 0.000 1.036 0.725 0.988 1.290 0.864 1.027 0.891 +1 0.880 1.540 0.212 1.336 -1.667 1.930 0.098 -1.633 0.000 0.741 0.438 -0.271 0.000 1.178 -0.399 0.355 1.274 1.632 0.483 1.370 3.102 2.434 1.520 1.491 1.477 1.011 1.178 1.241 +0 2.679 -0.294 0.333 0.823 -0.135 1.140 -0.234 -0.972 2.173 0.594 -0.345 1.522 2.215 0.672 0.194 1.059 0.000 1.295 0.191 -1.450 0.000 0.794 0.958 0.987 0.988 0.946 1.059 0.925 +1 0.916 0.561 0.980 0.569 0.091 0.462 0.222 -1.301 1.087 0.640 2.170 0.876 0.000 0.345 -0.002 1.319 0.000 1.118 1.159 -0.524 0.000 0.856 0.682 0.990 0.620 0.405 0.572 0.530 +0 0.716 -0.356 1.344 1.289 -1.659 0.801 -0.817 0.244 0.000 0.929 -0.671 -1.021 2.215 0.972 -0.341 0.660 2.548 0.913 0.052 -0.431 0.000 0.957 1.010 0.994 0.864 1.021 0.815 0.857 +0 0.661 -0.586 0.075 1.154 0.984 0.705 -1.089 -1.487 2.173 0.419 -1.041 0.653 0.000 0.734 -1.145 -1.021 2.548 0.493 2.240 -0.439 0.000 0.706 0.814 0.985 0.857 0.373 0.747 0.698 +0 2.172 -0.877 -0.751 0.162 -0.709 0.598 -0.084 0.535 2.173 0.702 0.068 1.571 2.215 0.368 -1.920 0.902 0.000 0.457 -0.363 0.374 0.000 0.472 0.678 0.998 1.039 0.770 0.839 0.706 +0 0.697 -0.264 -0.446 1.803 -1.472 0.694 -0.519 0.490 0.000 1.068 1.063 0.631 2.215 1.377 0.353 -0.888 2.548 0.403 0.116 1.729 0.000 0.775 0.983 1.238 0.811 1.345 1.033 0.921 +1 0.599 0.255 0.425 1.440 -0.646 2.299 1.635 0.800 0.000 1.358 -0.028 -1.177 2.215 0.777 -0.130 1.697 0.000 1.897 -0.929 -0.950 3.102 0.901 0.967 1.057 0.887 0.878 0.805 0.736 +0 1.329 0.083 -1.451 1.941 1.412 0.904 -1.995 -0.068 0.000 0.721 -1.358 -0.384 2.215 1.074 -1.152 0.615 2.548 0.553 1.011 -1.252 0.000 0.285 0.988 1.185 1.276 0.735 1.097 0.935 +1 1.105 0.217 -1.316 0.276 1.097 0.618 0.808 -0.190 0.000 0.651 1.743 0.627 0.000 1.615 -0.529 -0.961 1.274 1.143 -1.187 0.839 3.102 0.858 1.219 0.995 0.829 1.131 0.990 0.847 +0 0.656 1.252 1.684 1.163 -1.618 0.920 1.130 0.127 0.000 0.785 0.639 -1.468 2.215 0.355 -1.993 0.323 0.000 0.898 1.599 0.905 0.000 1.052 1.185 0.988 0.755 0.500 0.697 0.826 +1 0.861 0.603 -0.470 1.125 0.783 0.644 1.325 0.001 0.000 0.886 0.988 1.622 1.107 0.754 -0.791 1.407 2.548 0.647 1.115 -1.046 0.000 0.798 0.940 1.233 0.942 0.960 0.878 0.799 +1 1.299 -0.948 0.998 0.186 1.023 0.657 -0.546 0.272 1.087 1.047 -0.417 -0.330 2.215 1.237 -0.241 -1.571 0.000 0.743 -0.642 -1.406 0.000 0.313 0.964 0.980 0.965 0.636 0.745 0.727 +1 2.234 -0.063 0.720 0.848 0.328 0.387 -0.236 1.738 0.000 0.876 0.241 -0.687 0.000 1.233 0.549 -1.468 0.000 0.880 -0.671 -1.002 3.102 1.055 0.688 0.996 0.609 0.220 0.639 0.701 +0 1.408 -1.646 1.288 0.608 0.023 0.425 -0.921 1.310 1.087 0.654 -1.810 -0.734 0.000 1.160 -0.043 -0.572 2.548 0.422 -0.928 -0.428 0.000 0.322 0.734 1.163 0.681 0.955 0.841 0.722 +1 0.794 0.403 0.028 1.134 -1.051 1.744 -0.228 1.081 2.173 1.247 0.425 -0.623 0.000 1.566 -2.382 -0.975 0.000 1.836 -0.937 0.632 1.551 0.897 0.871 1.086 1.473 1.151 1.155 1.002 +1 0.915 -0.852 1.300 0.450 -1.252 0.677 0.215 -1.174 2.173 2.013 -0.493 -0.826 2.215 3.341 -1.751 0.515 0.000 0.705 0.861 1.456 0.000 0.555 2.349 0.977 1.221 0.829 1.875 1.444 +1 0.527 1.056 1.362 1.859 -0.782 0.870 -0.754 1.149 2.173 0.855 -0.012 0.083 0.000 0.289 -1.301 1.077 0.000 0.480 1.925 -1.419 0.000 0.837 0.919 1.283 1.204 0.810 1.210 1.004 +1 2.151 0.217 -0.811 0.482 -0.913 1.069 -0.260 0.480 2.173 1.096 0.085 1.670 2.215 0.347 -1.043 -0.569 0.000 1.079 0.093 1.116 0.000 0.836 0.828 0.971 1.456 1.426 1.151 0.955 +1 1.205 -0.288 -0.995 1.697 -1.442 1.093 0.139 0.153 2.173 1.429 0.537 0.884 2.215 0.526 0.766 -1.601 0.000 0.558 0.184 -0.666 0.000 0.484 0.842 0.985 1.647 1.188 1.366 1.030 +1 0.425 -2.314 -0.753 1.825 0.557 0.674 -0.267 -1.657 2.173 0.754 -0.511 -0.900 0.000 0.582 -1.296 0.173 0.000 1.164 -0.049 0.821 3.102 0.960 0.926 1.129 1.318 0.745 1.214 1.033 +0 0.366 -1.579 -0.317 0.605 1.616 1.058 -1.021 -0.752 2.173 0.602 1.003 -1.361 0.000 0.590 1.589 1.006 0.000 2.120 0.652 0.665 1.551 0.842 0.885 0.989 0.878 2.236 1.379 1.094 +1 0.495 -1.149 -1.630 1.694 1.001 0.653 0.769 -0.095 1.087 0.397 -1.499 -0.781 0.000 1.106 0.144 -1.239 2.548 0.602 1.033 1.163 0.000 1.355 0.947 0.986 1.219 0.968 0.936 0.853 +0 0.402 0.087 -0.223 0.608 1.650 0.380 0.012 1.321 2.173 0.429 2.868 -1.680 0.000 0.398 -1.297 -0.938 0.000 0.693 1.351 -0.087 0.000 0.868 1.098 0.992 0.551 0.517 0.889 1.051 +0 0.711 -0.785 0.042 1.553 -0.873 1.102 1.521 0.972 2.173 0.707 0.975 0.567 0.000 1.249 -0.765 -0.954 2.548 1.070 0.526 -1.739 0.000 1.012 0.909 1.068 0.616 2.610 1.652 1.329 +1 0.986 -1.067 1.663 0.366 0.481 0.575 0.031 1.132 2.173 0.547 -2.349 -0.415 0.000 0.636 0.856 -1.234 0.000 0.517 0.500 -0.719 0.000 1.501 0.887 0.983 0.899 0.185 0.779 0.727 +0 0.431 1.121 1.177 0.348 0.254 0.592 1.514 -0.160 0.000 0.689 -0.418 -1.441 2.215 0.672 -0.271 0.939 1.274 0.955 -0.632 1.643 0.000 1.984 1.250 0.994 0.701 0.609 0.882 0.755 +0 1.044 1.391 1.531 0.374 0.906 1.852 0.426 1.417 2.173 3.674 -0.917 -0.132 1.107 2.025 0.181 -1.387 0.000 1.060 -1.691 0.014 0.000 1.753 1.698 0.990 2.500 4.733 2.711 2.076 +1 1.003 0.162 -0.705 1.304 0.769 0.678 -2.607 0.930 0.000 1.210 -1.250 -0.751 2.215 1.307 0.608 1.662 2.548 1.154 -0.541 0.242 0.000 0.982 1.261 1.538 1.046 1.880 1.555 1.447 +1 2.018 0.352 -0.465 0.540 0.032 1.174 0.485 1.329 2.173 0.687 0.228 -1.174 0.000 0.700 -0.286 1.170 0.000 0.867 -0.015 0.232 3.102 0.958 0.934 0.988 0.586 0.934 0.955 0.831 +1 0.617 -0.348 1.157 0.869 -0.484 0.661 -1.390 1.644 1.087 0.885 -0.478 1.703 0.000 0.980 -0.801 -0.269 2.548 0.557 -1.711 0.595 0.000 0.896 0.829 1.010 0.870 1.026 0.693 0.636 +1 0.738 -0.389 0.702 0.966 -0.752 1.045 -0.371 1.340 2.173 0.447 1.903 -0.874 0.000 0.646 -0.655 -0.022 2.548 0.707 0.437 -0.323 0.000 0.670 0.874 1.130 0.987 0.981 0.958 0.849 +1 0.851 0.658 0.398 0.365 -0.536 1.012 0.533 1.573 0.000 0.949 1.322 0.146 2.215 1.031 1.601 0.760 0.000 1.313 0.058 -1.476 3.102 1.063 0.854 0.990 0.899 1.220 0.949 0.790 +0 0.463 0.526 -0.130 1.051 1.410 0.524 0.586 0.484 0.000 0.746 -0.062 0.711 2.215 1.823 0.247 -0.916 2.548 0.754 1.245 -0.890 0.000 1.013 1.049 0.986 0.625 1.250 0.816 0.726 +1 0.694 -1.192 -1.266 2.247 -1.438 0.817 -1.375 0.364 0.000 0.776 0.086 0.195 2.215 0.532 -0.751 -0.710 1.274 0.762 0.111 0.728 0.000 0.973 0.876 1.010 0.643 0.593 0.771 0.790 +1 0.363 -0.092 -1.626 0.178 1.557 0.657 -1.220 1.193 0.000 0.946 0.219 0.035 0.000 1.246 -1.096 0.019 2.548 1.688 -0.323 -1.068 0.000 0.921 1.008 0.795 0.384 1.087 0.888 1.027 +0 0.346 0.170 1.101 2.501 -1.555 0.936 -1.165 0.522 1.087 0.557 1.141 -0.030 0.000 0.607 0.173 -0.848 1.274 0.832 -0.654 -0.256 0.000 1.032 0.682 0.983 1.391 1.126 0.969 0.961 +1 0.671 -0.148 -0.808 0.591 -1.700 1.054 -0.658 0.475 0.000 1.076 0.468 -0.491 0.000 1.623 -0.210 -1.671 2.548 1.344 -0.999 1.162 3.102 2.207 1.608 0.996 0.762 0.847 1.199 1.006 +0 1.742 1.881 1.391 0.290 -0.918 0.674 1.506 -0.478 0.000 0.904 1.095 -1.446 1.107 1.120 0.477 -0.159 0.000 1.344 0.896 0.794 3.102 0.924 0.965 0.992 0.817 0.897 0.805 0.869 +1 0.742 -1.044 1.447 0.513 -0.787 0.448 -0.380 -1.293 2.173 0.462 -2.690 -0.649 0.000 0.739 -0.247 0.681 2.548 1.098 -1.704 0.622 0.000 0.912 1.046 0.985 0.722 0.702 0.829 0.720 +0 1.107 1.615 0.786 1.182 0.934 0.884 0.783 0.232 2.173 2.251 -0.058 -1.170 0.000 0.895 1.169 -0.046 0.000 1.082 -0.652 -1.147 1.551 1.043 1.029 0.992 0.832 1.332 0.977 0.826 +1 0.713 -1.450 0.817 0.364 -1.184 0.464 -0.875 1.604 1.087 0.794 2.701 -1.259 0.000 0.664 2.301 0.997 0.000 2.949 -0.517 0.041 3.102 1.007 2.403 0.994 0.867 1.233 2.225 1.830 +1 0.372 -1.721 1.294 0.749 -1.649 0.674 0.133 -1.081 0.000 0.802 0.827 -0.248 0.000 1.470 -1.117 0.521 2.548 1.103 -0.902 -1.106 0.000 0.852 0.936 0.988 0.937 0.475 0.865 0.758 +1 0.832 -1.514 0.993 1.754 0.182 0.629 -1.983 -1.035 0.000 1.338 0.815 -1.665 2.215 0.491 0.437 -1.243 0.000 1.091 -0.591 0.612 0.000 0.951 1.083 1.115 1.018 1.103 1.494 1.152 +1 0.371 -0.122 0.822 1.548 -0.158 0.857 0.628 -1.162 2.173 0.809 -0.156 1.531 0.000 0.508 0.219 -0.683 0.000 1.211 0.537 0.561 3.102 0.918 0.846 0.987 0.924 1.078 1.009 0.862 +1 0.533 0.150 -0.653 0.370 0.791 1.087 -0.747 -1.189 0.000 1.091 -0.434 0.593 2.215 0.727 -0.803 -0.589 0.000 0.700 0.024 1.686 3.102 0.912 0.938 0.991 0.630 0.684 0.713 0.647 +0 1.188 -0.158 -1.590 0.839 -0.584 0.372 -1.353 1.731 0.000 0.750 -0.420 0.950 0.000 1.073 -0.302 -0.355 2.548 1.043 -0.023 0.176 3.102 0.907 0.974 1.090 0.767 0.392 0.652 0.673 +1 1.012 1.947 0.915 0.906 -1.731 0.777 -0.325 -0.094 0.000 0.615 0.406 0.926 0.000 0.872 0.120 -1.384 2.548 0.900 0.632 -1.078 0.000 0.957 0.771 0.985 1.179 0.438 1.108 0.914 +1 0.668 0.057 1.735 2.306 -0.608 1.030 0.344 1.227 0.000 1.222 0.865 0.676 2.215 0.593 -0.201 -0.609 1.274 0.375 0.387 -1.161 0.000 0.795 0.868 1.475 0.696 0.983 0.938 0.893 +1 2.024 -0.778 -1.446 1.060 -0.240 0.606 -1.286 0.151 2.173 0.377 -1.198 1.644 0.000 0.553 -1.836 0.454 0.000 1.384 -0.103 0.675 3.102 0.814 0.738 1.797 1.202 0.746 0.933 0.767 +1 1.278 1.864 -0.153 1.162 -0.151 1.045 0.578 1.419 2.173 0.506 0.075 0.405 0.000 1.129 0.683 -1.420 2.548 0.490 1.424 -0.967 0.000 0.851 0.930 0.990 1.057 0.752 1.070 0.866 +1 0.693 0.234 1.419 0.768 -1.209 0.635 0.287 0.770 2.173 0.920 -2.213 -0.078 0.000 0.432 -1.082 -0.537 0.000 0.814 1.064 -1.317 3.102 0.939 1.717 0.993 0.769 0.823 1.400 1.128 +0 2.060 -0.587 0.380 2.553 1.076 0.877 0.841 -0.321 0.000 1.431 2.537 -1.189 0.000 1.042 0.885 -1.413 1.274 1.006 -1.137 -0.421 3.102 2.912 1.801 1.864 1.267 1.291 1.904 2.378 +1 1.018 1.554 0.979 1.489 -0.657 0.479 0.501 -1.092 1.087 0.625 0.562 0.079 0.000 0.996 -0.748 -0.760 2.548 0.989 0.463 1.327 0.000 0.920 0.851 1.698 1.678 0.665 1.103 0.944 +1 0.779 0.003 -1.265 0.445 0.015 1.259 0.914 -1.537 2.173 1.050 0.093 0.676 0.000 1.477 0.520 0.114 0.000 0.517 -0.117 -0.617 3.102 1.033 0.745 0.986 1.238 0.789 1.037 0.942 +0 0.647 2.022 -1.498 0.823 -0.678 0.791 1.230 0.310 2.173 0.347 0.428 -1.707 0.000 0.484 -1.214 1.385 0.000 0.741 -0.454 0.075 3.102 0.671 1.209 0.991 0.865 0.842 0.765 0.803 +1 0.405 -1.193 -0.681 0.795 1.145 1.365 0.655 0.358 0.000 2.257 -0.772 -1.523 2.215 0.917 0.697 -0.290 0.000 0.542 0.172 -0.281 3.102 1.115 0.672 0.993 1.008 1.038 1.523 1.173 +0 0.595 1.815 1.601 0.456 -1.364 0.636 0.185 0.609 0.000 0.601 -1.790 1.151 0.000 0.675 1.599 0.119 0.000 2.257 0.466 -0.924 3.102 0.892 1.013 0.997 0.544 0.615 0.614 0.598 +1 0.909 -0.257 -0.572 1.501 -1.423 1.137 -1.803 -0.227 0.000 1.445 -0.367 1.241 2.215 0.598 -0.761 -0.088 0.000 1.538 -0.577 1.480 3.102 0.915 1.018 1.121 1.141 0.358 0.792 0.835 +1 0.554 -0.603 0.571 0.430 -1.657 1.108 0.888 -0.505 0.000 1.208 -0.539 1.449 0.000 1.466 0.390 -1.434 2.548 1.055 0.411 -0.084 0.000 0.698 1.100 0.991 0.748 0.880 0.886 0.771 +1 2.221 1.381 0.384 0.319 -1.660 1.808 0.175 -0.991 0.000 1.001 0.216 1.320 2.215 0.692 0.645 0.549 0.000 0.753 -0.198 -0.203 3.102 0.761 0.619 1.123 0.874 0.789 0.839 0.711 +1 0.504 0.625 -0.848 1.359 1.598 2.850 -0.941 1.549 0.000 2.634 1.093 -0.168 1.107 1.602 -2.049 0.065 0.000 1.444 1.120 -0.787 0.000 1.347 1.289 0.987 1.641 0.993 1.108 1.106 +1 1.042 0.122 -1.545 0.824 -1.079 1.171 -0.493 0.188 2.173 0.768 -0.800 0.989 2.215 0.276 -0.549 0.577 0.000 0.805 1.355 -1.544 0.000 0.861 0.971 0.988 1.506 0.950 1.125 0.949 +0 0.664 -0.025 1.616 1.605 -1.083 0.805 -2.581 -0.510 0.000 0.710 -2.405 -1.567 0.000 1.025 -0.747 0.996 0.000 1.687 0.574 0.052 1.551 1.039 1.290 0.982 1.167 0.851 1.121 1.040 +1 0.828 1.095 1.563 0.413 -1.513 0.383 -1.034 0.185 1.087 0.597 0.837 0.697 0.000 0.690 0.521 -1.145 1.274 0.394 2.378 0.398 0.000 0.758 0.776 0.981 0.890 0.829 0.808 0.743 +1 1.125 -0.595 1.383 1.259 1.311 1.069 -0.625 0.548 2.173 1.073 -0.706 -0.068 2.215 2.577 -0.690 -1.170 0.000 0.538 1.090 -1.119 0.000 0.637 0.947 0.996 1.209 0.835 0.968 0.908 +0 0.799 1.486 -0.681 0.653 -0.326 1.030 0.871 0.393 0.000 1.748 0.342 -1.550 2.215 0.693 0.659 1.067 0.000 0.605 -0.702 -0.217 1.551 0.921 1.036 0.991 1.100 1.046 1.148 0.975 +0 1.397 1.504 -1.592 0.707 1.161 0.933 1.702 0.055 0.000 0.639 0.342 -0.887 2.215 0.671 2.130 0.661 0.000 0.859 1.045 -1.197 3.102 0.864 0.886 0.994 0.832 0.362 0.778 0.793 +0 1.208 1.782 0.655 0.994 1.267 0.441 -1.476 0.113 0.000 1.051 -0.151 -1.297 2.215 0.432 1.073 -0.657 2.548 0.692 1.547 -0.797 0.000 2.256 1.320 0.996 1.881 0.645 1.178 1.408 +0 4.114 -1.136 0.450 2.004 -1.726 1.308 -0.879 0.148 2.173 1.928 -0.140 -1.105 0.000 3.007 -0.566 -1.483 2.548 0.593 0.534 0.163 0.000 1.386 1.326 3.680 2.102 2.480 1.967 1.801 +1 1.399 0.606 0.867 1.374 1.353 0.733 1.666 -0.996 0.000 0.746 1.539 -0.368 0.000 0.455 0.557 -1.640 0.000 0.765 0.642 -0.210 1.551 0.931 0.587 0.988 0.594 0.152 0.528 0.632 +1 1.264 1.592 1.627 0.641 -0.991 0.620 -0.262 -0.512 2.173 0.599 1.987 0.752 0.000 1.310 -0.283 -0.005 0.000 0.628 1.085 -1.595 3.102 0.684 0.534 0.991 1.422 0.791 0.891 0.833 +0 1.966 0.808 -0.016 0.658 1.676 0.869 0.184 -0.743 2.173 0.894 1.435 0.678 0.000 1.127 0.708 -1.472 2.548 1.110 -1.667 1.421 0.000 0.401 1.375 1.574 1.115 0.839 1.085 1.171 +1 0.668 -0.818 0.959 0.964 1.486 0.434 -0.861 0.515 1.087 1.381 -0.350 -0.362 2.215 0.754 0.192 1.710 0.000 0.601 1.186 -1.508 0.000 0.510 0.874 0.991 1.403 0.861 0.941 0.984 +0 0.950 -0.329 -1.336 0.631 0.380 0.792 -0.509 0.365 1.087 0.270 -0.188 -0.822 0.000 0.704 -0.269 -1.692 0.000 0.389 1.039 -1.396 3.102 0.473 0.798 1.072 0.614 0.830 0.621 0.542 +0 0.457 -0.153 -1.248 2.186 -0.492 1.606 -0.810 1.150 1.087 1.493 0.732 -0.908 0.000 1.704 -0.727 0.748 1.274 0.514 -0.278 -0.373 0.000 0.841 1.682 0.988 2.009 0.732 1.501 1.380 +1 1.601 0.400 -0.393 0.310 -0.852 0.615 0.275 0.933 1.087 0.558 1.776 -0.774 0.000 0.771 1.275 1.400 2.548 0.891 1.698 1.049 0.000 0.921 1.071 0.988 1.000 0.616 0.803 0.844 +1 0.921 -1.862 0.970 1.384 1.476 0.471 -0.625 1.482 0.000 1.057 0.213 -0.073 2.215 1.496 0.037 -0.841 0.000 0.950 -1.101 0.494 3.102 1.405 1.110 0.994 2.272 0.886 1.417 1.380 +0 0.609 0.134 0.525 0.958 -1.037 0.947 -0.031 -0.127 2.173 0.795 2.651 0.915 0.000 1.442 -0.879 1.616 2.548 0.516 -1.276 -0.280 0.000 3.540 2.550 1.044 0.936 1.618 2.028 1.519 +1 0.467 0.148 1.668 1.245 -0.452 0.916 -1.138 -1.444 1.087 0.834 -1.150 0.448 2.215 0.660 1.114 0.506 0.000 0.493 -1.294 0.267 0.000 1.191 1.029 0.997 1.079 1.275 1.014 0.906 +0 1.084 -0.446 -0.619 0.392 0.302 1.103 -2.219 1.275 0.000 1.380 -0.586 -1.614 2.215 2.583 -1.392 -0.098 0.000 1.768 -2.130 -0.471 0.000 1.486 1.634 0.994 0.993 0.327 1.425 1.124 +1 0.516 -0.346 -0.670 0.711 -1.096 0.662 1.828 0.474 0.000 1.329 1.070 -1.259 2.215 0.849 0.822 1.317 2.548 0.525 -0.438 -0.055 0.000 0.727 0.722 0.978 0.691 0.833 0.840 0.775 +1 0.529 0.466 -0.520 1.165 0.379 0.905 -2.117 -0.369 0.000 0.757 0.476 0.949 0.000 1.933 0.297 1.579 1.274 0.790 1.301 1.562 0.000 1.033 0.941 0.981 0.762 0.710 0.793 0.734 +1 0.820 -1.966 -1.108 0.529 1.227 0.727 0.361 0.863 2.173 0.628 0.478 -0.408 2.215 0.506 -1.657 0.296 0.000 0.450 -0.396 -1.589 0.000 0.647 0.903 0.983 1.047 0.908 0.964 0.783 +0 0.914 0.142 1.209 0.550 1.024 0.897 2.419 0.917 0.000 0.957 1.602 -0.538 2.215 1.718 0.970 -0.906 0.000 1.013 0.157 -0.401 3.102 2.777 1.741 0.990 0.803 0.717 1.181 1.063 +0 1.488 -1.054 1.496 2.561 1.415 1.283 0.340 -0.236 0.000 0.718 -1.456 -0.103 1.107 1.731 -0.431 -1.520 2.548 1.088 -0.685 -0.145 0.000 0.860 0.986 1.011 1.247 1.299 1.105 1.086 +1 0.699 -0.625 -1.286 1.429 -0.411 0.499 -0.014 1.183 0.000 0.806 0.849 1.647 2.215 0.532 -0.354 0.658 2.548 0.539 0.284 -0.063 0.000 0.729 0.690 0.987 0.684 0.716 0.767 0.664 +0 0.841 -1.892 0.086 1.365 0.973 0.711 -0.163 -1.426 2.173 0.830 -0.213 0.104 0.000 1.188 -0.263 -0.644 0.000 0.975 -0.354 1.614 3.102 0.951 1.050 1.064 0.922 0.363 0.965 0.991 +0 0.595 -1.338 -0.997 0.576 -0.474 0.722 -0.540 0.353 0.000 1.074 -1.348 -1.236 2.215 0.893 -0.052 -0.721 0.000 1.102 2.214 0.971 0.000 1.042 1.249 0.979 0.826 0.120 1.590 1.273 +1 0.496 -1.100 1.616 0.091 1.526 0.678 -0.243 -1.641 2.173 0.992 0.663 0.467 0.000 0.743 -0.377 -0.473 2.548 0.548 2.144 -1.410 0.000 0.977 0.960 0.977 0.630 0.772 0.865 0.746 +0 0.513 -0.406 0.299 1.193 -0.665 0.966 1.443 0.629 0.000 1.057 1.082 1.029 2.215 1.962 -0.561 -1.183 2.548 0.562 0.943 -1.051 0.000 1.138 0.834 0.990 0.980 2.046 1.390 1.143 +1 2.090 -0.309 -1.316 1.446 -0.931 0.558 2.211 0.413 0.000 0.923 -1.032 -0.168 2.215 1.064 1.948 1.488 0.000 1.217 -0.207 0.102 0.000 0.735 0.940 0.999 1.205 1.625 1.142 1.039 +0 0.698 2.076 -0.820 0.984 -0.834 0.825 0.417 -1.448 2.173 1.525 0.786 1.103 2.215 1.077 1.786 0.507 0.000 1.264 -0.240 -0.418 0.000 1.029 0.940 1.005 0.876 1.273 0.986 0.918 +1 0.647 -0.037 0.256 1.037 -1.223 0.847 -0.524 0.693 0.000 0.666 0.284 -1.031 2.215 0.502 -1.533 1.225 0.000 0.941 0.132 -0.080 0.000 0.897 1.143 1.103 0.655 0.531 0.726 0.691 +0 0.959 0.989 -0.703 1.886 -1.037 1.833 0.870 -0.484 2.173 1.565 -1.105 1.454 0.000 0.602 -1.598 0.861 0.000 2.223 -0.225 0.953 1.551 0.889 0.998 0.997 1.032 2.430 2.039 1.715 +1 2.171 -0.507 0.843 0.521 -0.981 1.178 -0.703 -0.462 2.173 0.695 -0.649 -1.216 1.107 0.584 0.948 1.070 0.000 0.656 -1.362 -1.407 0.000 1.327 0.927 1.469 1.357 0.837 0.969 0.903 +0 1.174 -0.070 -1.173 1.393 -1.021 0.891 -0.608 0.808 2.173 0.752 0.214 1.371 0.000 1.095 0.919 0.140 2.548 0.370 0.780 0.493 0.000 0.550 0.727 0.982 1.310 1.312 1.186 0.957 +1 1.038 0.306 1.079 0.815 -1.532 0.433 -0.648 1.425 2.173 0.996 -1.295 -0.319 2.215 0.652 0.426 -1.156 0.000 0.412 -0.615 -0.164 0.000 0.577 0.808 0.989 0.570 1.023 0.824 0.686 +0 0.428 -0.635 -0.364 2.359 0.468 0.509 -0.051 0.529 0.000 1.242 0.176 1.464 2.215 1.931 1.810 -1.059 0.000 1.200 -0.454 -1.020 3.102 2.782 1.996 0.990 1.391 0.957 1.418 1.659 +1 0.653 -0.948 -0.506 1.566 -1.326 2.561 1.832 -0.196 0.000 2.335 -0.486 1.281 1.107 1.135 -1.079 1.099 0.000 2.133 -0.511 -1.688 3.102 1.060 3.405 0.989 1.344 0.912 3.093 2.527 +1 0.595 0.316 1.569 1.497 0.466 0.542 -1.361 -0.291 2.173 0.800 0.816 0.997 0.000 0.868 -0.255 -1.253 2.548 0.903 1.006 -0.939 0.000 1.109 0.954 1.096 1.133 0.816 0.975 0.885 +0 0.951 -0.856 0.223 1.308 1.218 2.035 -0.539 -0.913 2.173 2.561 -0.533 0.793 2.215 0.730 -0.796 -0.526 0.000 0.696 0.364 -1.013 0.000 0.890 0.967 1.207 0.911 3.357 1.653 1.268 +0 6.695 -1.608 -0.184 0.835 -1.599 3.701 -1.419 1.443 2.173 1.586 -1.140 1.131 2.215 3.735 0.336 -1.072 0.000 1.133 -1.375 0.203 0.000 3.048 3.870 3.133 4.193 1.096 4.272 4.316 +1 0.824 -0.100 0.108 1.057 0.922 1.720 0.565 0.786 0.000 4.604 -0.181 -0.861 2.215 1.645 0.812 1.254 0.000 2.053 -0.485 0.424 3.102 0.988 1.551 0.982 1.982 2.603 1.947 1.556 +1 0.883 -0.700 -1.093 0.509 1.376 0.785 -1.312 -1.480 0.000 1.399 -1.234 0.394 2.215 0.826 -1.797 0.987 0.000 2.217 -0.949 -0.384 3.102 0.719 1.090 0.988 1.165 1.031 0.953 0.892 +1 0.994 0.404 1.591 0.916 0.679 0.825 1.875 -1.461 0.000 0.560 -0.332 0.850 2.215 1.631 -1.171 -0.153 2.548 1.579 -0.208 -0.388 0.000 2.668 1.813 0.986 1.460 0.941 1.740 1.382 +1 0.787 -0.358 -0.844 0.666 -1.729 1.051 -0.234 -1.547 2.173 1.301 -0.010 0.269 0.000 0.624 -1.774 -0.570 0.000 1.051 -0.292 -0.358 0.000 0.856 1.093 0.987 0.731 1.117 1.038 0.901 +1 0.572 -0.671 0.740 0.721 1.534 0.783 0.553 0.035 2.173 0.648 -0.117 -1.529 1.107 0.439 -1.403 -0.625 0.000 0.484 0.319 -1.401 0.000 0.656 0.894 0.988 0.862 1.096 1.053 0.828 +1 1.323 -0.271 0.767 0.836 -0.650 0.733 0.414 0.672 0.000 0.863 -0.900 -0.784 0.000 0.840 0.087 -1.253 2.548 0.748 -0.709 1.324 1.551 0.700 0.785 1.394 0.758 0.534 0.615 0.599 +1 1.004 0.891 1.219 1.196 -0.309 0.412 1.035 -0.145 0.000 1.127 0.184 1.252 2.215 1.440 0.336 -0.675 0.000 0.686 -0.465 0.313 3.102 0.796 0.735 1.489 1.086 0.667 0.793 0.789 +1 2.658 -1.671 -0.341 0.201 -0.145 1.061 -1.303 1.148 1.087 0.470 -1.076 0.451 0.000 0.859 -1.230 -1.446 2.548 0.618 -1.160 1.513 0.000 0.582 0.581 0.996 1.453 0.855 1.014 0.792 +1 0.525 0.670 -0.169 0.988 0.545 1.234 0.411 -0.375 0.000 0.681 -0.416 0.038 0.000 2.111 -0.879 1.556 2.548 1.295 0.084 -1.164 3.102 0.895 1.032 0.984 0.814 1.070 0.876 0.752 +0 0.850 -0.117 -1.529 1.068 -0.572 0.338 1.628 -1.643 0.000 0.719 1.360 -0.590 2.215 0.906 0.273 1.003 2.548 0.925 -1.042 0.450 0.000 1.107 0.772 1.003 0.882 0.984 0.787 0.758 +1 0.662 2.000 -0.216 1.771 -0.932 1.471 1.444 0.966 1.087 0.669 0.786 -0.664 2.215 0.598 1.548 -0.021 0.000 0.984 1.234 1.653 0.000 0.849 0.928 0.992 0.960 1.530 1.267 0.982 +0 0.453 -1.992 -1.022 1.046 0.951 0.992 -1.053 0.083 2.173 1.115 0.164 -1.565 2.215 0.514 -0.214 -0.554 0.000 0.700 -1.374 1.463 0.000 0.818 0.874 0.986 1.167 1.845 1.400 1.067 +1 1.530 -0.776 1.711 0.090 0.952 1.037 -0.468 -0.173 2.173 0.742 0.865 0.725 2.215 0.929 1.422 -1.279 0.000 0.427 -1.051 1.042 0.000 1.469 1.072 0.980 1.163 1.332 1.050 0.983 +1 1.064 -2.158 -0.937 0.100 1.302 0.553 -2.114 0.481 0.000 1.692 -0.962 -1.236 1.107 1.688 -1.003 0.702 0.000 0.412 -0.374 0.257 3.102 0.998 0.639 0.979 0.830 0.763 1.018 0.878 +1 1.623 0.496 -0.838 0.500 -0.332 0.987 -0.189 0.780 0.000 1.350 0.097 -1.491 2.215 0.791 -0.852 0.303 0.000 0.577 -0.653 -0.383 3.102 0.898 0.710 0.991 1.022 0.763 0.882 0.973 +0 1.831 1.342 1.419 1.523 0.906 1.197 -0.273 -0.334 0.000 0.621 0.689 -0.005 0.000 0.306 0.251 -0.605 0.000 1.026 0.866 -1.159 3.102 0.858 0.758 1.031 1.384 0.645 0.956 0.864 +1 1.025 -0.581 -1.061 0.166 0.973 0.883 -0.590 0.235 2.173 1.278 -0.376 1.411 0.000 1.169 0.009 -0.462 1.274 0.537 -0.896 1.051 0.000 0.512 1.064 0.983 0.805 0.840 0.858 0.797 +0 0.460 -1.792 -0.436 0.542 0.701 1.044 -0.278 -0.669 2.173 0.531 -1.229 -1.169 0.000 0.929 0.849 1.513 0.000 1.518 0.073 0.343 1.551 0.929 1.395 0.979 0.868 1.082 1.128 0.912 +0 0.621 -0.508 -1.679 2.017 0.693 2.477 -0.998 1.368 0.000 2.410 0.297 -0.309 0.000 2.525 1.967 -0.854 0.000 1.123 0.833 -0.365 3.102 1.177 1.903 1.307 1.147 0.444 1.511 1.235 +0 0.712 1.145 -1.266 2.816 1.635 0.984 0.443 -0.169 2.173 0.961 1.071 0.072 2.215 0.467 -0.459 -0.455 0.000 0.955 1.754 1.023 0.000 1.447 1.032 0.987 1.618 0.568 1.205 1.044 +0 0.884 1.111 -0.624 0.325 0.382 0.467 -0.472 1.127 2.173 0.706 -0.099 -0.887 2.215 0.893 -0.957 0.642 0.000 0.639 0.588 1.366 0.000 0.973 0.911 0.986 1.203 0.835 0.925 0.913 +0 1.354 -0.864 0.207 0.362 0.970 0.453 -0.688 -0.610 2.173 1.137 -2.424 -1.248 0.000 0.699 -0.887 1.193 0.000 1.103 0.245 1.496 3.102 1.590 1.264 0.990 0.964 0.807 1.076 0.963 +0 0.448 0.593 -1.605 1.847 -1.423 0.982 0.619 -0.073 2.173 0.428 0.176 0.785 0.000 0.939 0.513 -0.769 2.548 0.494 -1.713 1.194 0.000 0.847 0.961 0.985 1.492 0.704 1.007 0.919 +1 0.506 -0.144 0.562 1.011 -1.057 0.740 -1.048 0.785 0.000 0.569 -0.551 1.457 0.000 1.198 -0.518 -0.044 2.548 1.169 0.767 -1.411 3.102 0.855 0.946 0.987 0.708 1.127 0.899 0.774 +1 0.910 -0.679 0.815 1.748 -0.054 0.680 -0.066 -0.845 0.000 1.251 0.107 1.668 0.000 0.818 -1.345 -1.439 2.548 0.554 0.691 0.183 0.000 0.946 0.939 1.231 0.820 0.224 0.700 0.762 +1 2.991 -0.038 1.261 0.502 0.192 2.866 1.837 -0.359 0.000 1.506 -0.869 -1.554 2.215 0.964 -0.683 1.306 1.274 0.517 -1.369 -1.277 0.000 5.363 3.906 1.392 1.355 0.690 3.164 2.575 +1 0.908 -1.732 -1.690 0.103 0.007 1.106 -0.572 -1.402 2.173 1.804 -1.367 -1.315 2.215 0.874 -0.495 -0.456 0.000 3.539 0.667 0.412 0.000 0.834 1.554 0.988 0.787 0.902 1.206 0.952 +1 1.563 -1.103 0.259 1.207 0.722 1.666 0.205 -1.059 0.000 0.630 0.108 1.219 0.000 1.388 -0.443 -1.381 1.274 1.429 -0.262 0.363 0.000 0.901 1.034 0.985 0.577 0.686 0.786 0.713 +1 0.901 0.728 0.150 0.262 -1.654 0.953 0.263 1.110 0.000 1.582 0.709 -0.907 2.215 0.736 0.930 1.434 1.274 0.846 -0.110 -0.037 0.000 1.211 0.843 0.981 0.954 0.998 0.955 0.845 +1 1.035 -0.281 -1.450 0.468 1.676 0.851 -0.759 -0.963 2.173 0.779 -1.508 -0.406 0.000 0.913 -0.275 1.168 2.548 1.878 1.075 0.320 0.000 1.086 0.999 0.979 0.734 1.063 0.832 0.750 +0 1.211 1.032 -0.717 0.630 1.017 1.913 1.043 -0.157 0.000 1.888 2.323 -1.738 0.000 1.369 -0.538 1.578 1.274 2.015 0.464 0.564 3.102 0.625 1.782 1.210 1.206 1.265 1.753 1.377 +1 0.778 0.192 1.361 0.628 0.559 0.852 -0.649 -0.050 0.000 0.705 1.031 1.642 2.215 1.160 0.101 -0.991 2.548 0.533 -0.464 1.009 0.000 0.841 0.951 0.996 0.908 0.819 0.870 0.792 +1 0.999 1.000 1.506 1.084 -1.032 0.700 0.185 0.554 0.000 0.630 0.848 1.209 1.107 1.588 1.736 -1.388 0.000 2.356 1.041 -0.244 3.102 1.035 1.064 1.089 0.707 1.083 0.786 0.804 +0 0.511 0.703 -0.614 1.148 -0.535 1.136 -0.598 1.424 2.173 0.686 -1.403 1.201 0.000 1.283 -1.543 -1.691 0.000 2.524 0.359 -0.282 0.000 0.755 1.015 0.997 1.249 1.124 0.945 0.938 +1 0.980 -0.305 -1.177 0.647 0.212 0.980 -0.843 -0.499 0.000 1.687 -1.095 0.978 2.215 0.742 -0.591 -1.494 2.548 0.554 0.056 0.219 0.000 0.864 0.812 1.047 1.135 0.984 0.949 0.828 +1 1.148 1.428 0.726 0.795 1.648 0.851 0.334 -1.157 2.173 0.547 0.554 -0.025 0.000 0.722 -0.322 -0.239 2.548 0.917 0.712 1.111 0.000 0.799 0.946 0.988 0.988 0.798 0.883 0.748 +1 0.923 -1.337 -1.720 0.720 -0.078 1.008 0.634 0.693 1.087 0.863 0.411 -0.822 0.000 1.064 -0.003 -1.420 0.000 1.178 2.076 0.353 0.000 0.817 1.375 1.125 0.999 0.980 1.082 0.992 +0 0.683 0.934 -0.071 2.487 0.408 0.807 0.622 -1.480 0.000 1.067 0.155 1.512 1.107 1.751 0.311 -0.749 2.548 0.701 -1.002 0.937 0.000 1.530 1.009 0.988 1.488 1.304 1.308 1.259 +0 1.283 -0.676 -1.231 0.516 -0.752 0.846 0.582 0.433 2.173 0.933 2.126 -1.547 0.000 1.013 -0.693 0.017 2.548 0.520 -1.510 1.008 0.000 3.365 2.190 0.981 0.825 0.940 1.527 1.548 +1 0.617 0.118 1.076 0.933 -1.679 0.976 -0.352 -0.392 2.173 1.066 -0.317 0.225 0.000 1.673 0.419 1.544 0.000 0.704 0.847 1.542 0.000 1.173 0.964 0.986 0.711 0.683 0.757 0.788 +0 0.438 -0.780 -0.328 0.136 1.133 0.935 -0.005 -0.238 1.087 1.769 0.838 1.466 1.107 0.974 0.638 -0.831 0.000 1.110 0.764 0.444 0.000 1.054 0.928 0.977 0.978 2.072 1.101 0.884 +1 0.636 -1.047 -0.807 0.804 0.298 0.901 -0.050 1.145 1.087 1.102 -0.549 -1.613 2.215 1.008 -0.439 -0.172 0.000 0.452 0.129 -0.445 0.000 0.301 0.939 0.986 1.029 0.972 0.980 0.840 +0 1.406 -1.022 1.699 0.183 0.979 0.779 -0.181 0.632 0.000 0.615 -1.101 -0.518 2.215 0.753 -2.681 -1.142 0.000 0.724 0.358 -0.078 3.102 2.915 1.698 0.976 0.767 0.570 1.117 0.981 +1 1.830 0.146 -0.872 0.716 -0.258 0.852 -0.428 0.871 2.173 0.556 0.857 1.557 2.215 0.587 -0.013 -0.004 0.000 0.512 1.550 -0.471 0.000 0.699 1.012 0.986 0.918 0.924 0.928 0.796 +1 1.591 0.464 0.523 1.143 0.779 2.247 0.279 0.974 2.173 4.049 0.231 -1.017 0.000 1.372 0.940 0.122 0.000 1.583 2.290 -0.885 0.000 0.992 1.274 0.991 1.090 0.896 1.949 1.570 +1 1.225 -0.285 0.866 0.852 0.577 1.015 0.207 -1.630 2.173 1.175 -0.251 -0.357 0.000 1.070 -0.493 -1.017 2.548 0.931 0.577 0.519 0.000 1.178 0.984 0.978 1.305 0.845 0.975 0.971 +0 0.842 -0.397 -0.624 0.829 -0.928 0.505 0.427 0.250 0.000 0.728 0.135 1.599 2.215 1.260 -0.809 1.189 1.274 0.770 -0.791 0.626 0.000 0.779 0.856 0.996 1.010 0.658 0.819 0.801 +1 0.898 0.536 -1.164 0.828 -0.418 0.798 0.440 0.307 2.173 0.875 0.059 1.506 0.000 0.449 -0.851 1.557 2.548 0.408 -0.289 0.768 0.000 0.506 0.855 0.993 0.884 0.873 0.790 0.724 +1 1.520 0.393 -0.208 1.042 -0.858 0.906 1.480 -0.707 0.000 1.341 0.592 1.221 2.215 1.192 -0.458 1.274 1.274 1.158 1.988 0.810 0.000 1.694 1.717 0.987 1.184 0.789 1.401 1.249 +1 0.593 0.325 0.843 0.534 1.163 0.794 -0.817 1.728 0.000 1.289 -0.935 -0.523 2.215 0.932 0.607 -0.497 2.548 0.372 -0.540 0.744 0.000 0.648 1.039 0.992 1.018 1.058 0.877 0.808 +1 0.891 -1.507 1.261 0.780 -0.591 0.692 -0.493 -0.114 2.173 1.154 -0.311 0.692 2.215 1.054 -0.440 -1.535 0.000 1.255 -1.370 -1.152 0.000 0.890 1.093 1.149 1.046 0.881 0.929 0.851 +1 0.335 2.040 -0.850 2.260 -0.386 0.506 0.050 -1.673 2.173 1.142 0.496 0.673 2.215 0.779 1.843 1.205 0.000 0.588 1.106 1.737 0.000 0.435 0.851 1.000 1.057 0.991 0.951 0.861 +0 1.889 -0.138 1.150 1.012 1.711 0.836 -0.163 -0.226 1.087 0.719 -0.914 -0.871 0.000 0.571 -1.758 0.111 0.000 0.526 0.487 -0.789 3.102 0.914 0.948 0.990 0.744 0.434 0.838 0.851 +0 0.642 0.476 1.149 0.954 -0.913 1.620 0.137 -1.562 2.173 1.707 -0.328 0.262 0.000 0.582 -0.829 0.117 0.000 0.864 -0.783 -1.143 3.102 0.787 0.976 1.040 0.910 0.847 1.282 1.061 +0 4.682 1.002 -1.371 1.893 -1.244 1.743 0.674 0.110 0.000 2.114 1.093 0.599 0.000 0.760 0.134 -1.208 2.548 1.396 0.367 1.136 3.102 1.963 1.484 0.995 0.826 0.684 1.136 1.698 +1 1.579 -0.703 -1.104 0.579 -1.000 1.557 0.677 1.071 2.173 0.983 1.087 0.002 1.107 0.531 0.215 -0.254 0.000 0.820 -0.410 -0.843 0.000 0.457 0.736 0.989 1.712 1.548 1.351 1.045 +1 3.715 0.702 1.626 0.515 0.860 1.897 2.346 -0.378 0.000 0.675 0.865 0.915 0.000 0.849 -0.254 0.684 2.548 0.798 0.839 -0.647 3.102 0.989 0.872 1.219 1.077 0.729 1.295 1.531 +0 1.767 0.515 -0.613 1.365 -0.317 3.272 -0.745 0.899 0.000 1.620 0.341 -1.032 1.107 1.635 -2.659 -0.919 0.000 0.664 0.044 1.676 3.102 6.848 3.664 0.982 0.910 0.620 2.934 2.543 +1 0.629 -1.419 1.419 0.595 -1.345 1.223 -2.453 0.211 0.000 0.682 -1.972 -1.519 0.000 1.253 -0.517 -1.510 2.548 1.270 -0.324 0.461 3.102 0.803 1.294 0.989 0.585 0.948 1.186 1.066 +1 0.872 1.353 0.808 0.844 0.543 0.334 -1.017 0.308 0.000 1.209 0.229 -0.934 2.215 0.952 -0.150 1.731 2.548 0.454 -1.473 -0.532 0.000 0.467 0.968 0.988 1.422 0.802 1.281 1.308 +0 0.825 0.030 -1.071 0.533 -0.432 0.662 0.209 1.556 0.000 1.127 0.646 0.696 2.215 0.550 0.019 -0.291 0.000 0.914 0.941 -0.698 3.102 1.084 0.981 0.994 0.517 0.898 0.712 0.687 +1 0.510 1.140 -1.013 0.928 -0.318 0.588 -0.153 -1.261 0.000 0.815 -0.626 0.611 2.215 0.581 -0.467 -0.128 0.000 1.024 -1.231 1.473 1.551 0.916 0.917 0.978 2.151 0.677 1.625 1.317 +1 0.628 -0.155 -0.774 1.385 0.836 0.803 -1.028 -0.234 2.173 1.048 0.441 1.389 0.000 1.431 -0.196 -0.002 0.000 1.266 -0.988 -1.650 3.102 0.916 1.109 1.282 1.038 1.024 1.043 0.905 +1 0.709 0.275 1.536 0.093 -0.809 0.706 0.724 -1.149 1.087 0.765 1.682 0.177 0.000 0.848 1.039 0.991 0.000 0.940 -0.107 -0.504 3.102 0.906 0.975 0.935 0.862 0.609 0.796 0.832 +1 0.495 0.914 1.361 1.013 0.052 1.353 -0.039 0.188 2.173 1.157 -0.651 1.515 0.000 1.108 -0.748 -1.236 0.000 1.671 2.292 -1.266 0.000 0.822 1.436 0.990 0.863 0.968 1.126 0.906 +1 3.884 -0.229 1.101 0.210 0.569 1.021 1.231 -0.752 0.000 0.546 -0.552 -0.278 2.215 2.028 0.574 -0.620 0.000 0.760 -0.235 -0.925 0.000 0.862 1.109 0.992 0.484 0.625 0.952 1.319 +0 0.943 1.422 -1.220 0.743 1.134 0.611 0.457 0.878 1.087 0.900 0.757 -1.008 2.215 0.671 2.603 0.379 0.000 0.743 0.842 0.439 0.000 0.828 0.998 0.988 0.744 1.096 0.873 0.768 +0 1.642 -1.163 -0.703 0.295 -1.548 1.217 0.259 1.111 2.173 0.792 -0.088 -0.547 2.215 0.636 -0.102 1.578 0.000 0.845 0.189 0.085 0.000 0.800 0.799 0.986 0.672 1.463 1.086 0.862 +0 1.078 -0.581 1.257 1.322 -1.424 0.632 -0.451 0.600 0.000 1.354 0.112 -0.376 0.000 0.655 0.497 -0.134 2.548 1.195 -0.256 1.728 3.102 1.617 0.936 1.098 0.558 0.735 0.768 0.837 +1 1.392 0.525 -0.445 1.232 -0.989 2.957 -0.287 1.538 0.000 1.594 0.292 0.427 2.215 0.385 -1.839 -0.156 0.000 0.959 1.219 -0.313 3.102 0.297 1.150 0.985 0.729 0.973 0.967 0.956 +0 0.469 0.137 -1.226 0.718 -0.009 0.867 0.936 1.357 1.087 1.321 -0.164 -0.137 2.215 1.149 1.601 -1.497 0.000 0.930 0.879 0.636 0.000 0.967 0.912 0.989 0.759 1.792 1.113 1.088 +1 0.504 -0.674 -0.015 0.663 -0.738 1.001 -0.318 0.660 2.173 1.266 0.614 -1.485 1.107 0.690 1.435 -0.189 0.000 0.483 1.151 -1.502 0.000 0.729 1.067 0.983 1.733 1.752 1.378 1.228 +0 2.450 1.863 0.223 0.080 1.329 1.098 0.937 1.419 0.000 0.736 0.754 -1.513 0.000 0.920 0.829 -0.340 0.000 0.981 -0.897 -1.542 1.551 0.925 1.174 0.994 1.813 0.806 1.244 1.193 +0 0.281 -1.028 1.735 0.685 -1.409 0.500 0.246 -0.617 1.087 0.595 0.856 1.036 0.000 0.645 -0.639 0.187 2.548 0.372 -1.289 1.374 0.000 0.920 0.769 0.993 0.800 0.585 0.604 0.579 +0 0.872 -1.795 1.334 1.056 0.136 0.577 0.578 -0.804 0.000 0.737 1.506 1.057 2.215 0.523 -0.628 -0.618 2.548 0.756 0.851 0.443 0.000 0.936 0.924 1.172 0.766 1.118 1.424 1.248 +1 0.974 -1.169 1.065 0.678 -0.116 1.190 -0.241 0.913 0.000 1.131 -0.156 -1.139 2.215 2.624 -1.122 -0.793 2.548 1.437 -1.642 0.677 0.000 1.995 1.909 0.987 1.096 1.172 1.563 1.227 +1 0.899 1.367 0.973 0.815 0.054 1.133 -1.063 -1.019 0.000 0.734 1.501 -1.093 2.215 1.236 1.101 0.453 1.274 1.649 0.271 -1.709 0.000 1.210 1.030 0.986 0.850 1.011 0.832 0.734 +1 0.865 0.067 -0.502 1.459 0.067 0.946 1.222 1.339 2.173 0.606 0.021 0.617 2.215 0.451 1.096 0.805 0.000 0.483 2.167 -0.651 0.000 0.632 0.769 0.985 1.293 0.986 0.904 0.769 +1 0.781 -0.174 -0.170 0.435 0.657 0.845 0.380 -1.723 0.000 0.605 1.355 0.397 2.215 1.549 1.883 -0.775 0.000 1.388 -0.598 0.959 1.551 0.680 0.929 0.983 0.626 1.121 0.698 0.632 +0 2.163 -0.110 1.211 0.310 -0.041 0.699 1.147 -1.402 0.000 0.941 1.974 -0.467 0.000 1.139 -0.769 0.260 2.548 1.227 -1.056 -1.649 3.102 1.035 1.752 1.025 0.851 0.914 1.413 1.226 +1 0.347 -0.411 1.356 0.739 0.051 1.719 -0.337 -1.466 0.000 0.827 0.456 0.147 2.215 1.159 -1.175 0.577 0.000 1.707 -0.648 -0.007 0.000 0.878 1.029 0.979 1.105 0.792 0.936 0.866 +1 2.388 -0.558 -0.717 0.983 0.208 1.005 -1.236 0.890 0.000 1.158 -0.992 1.733 2.215 0.457 -1.124 -1.513 0.000 0.525 -1.346 0.465 3.102 1.009 0.931 1.573 0.890 0.675 0.904 0.902 +1 0.375 0.611 -1.260 0.401 1.111 1.119 0.185 -0.568 2.173 0.777 0.274 1.654 0.000 0.911 0.171 0.096 2.548 1.306 0.270 0.672 0.000 0.821 0.717 0.984 1.016 0.708 0.799 0.747 +0 2.982 -1.084 -0.989 1.200 -1.739 1.283 -0.458 0.709 0.000 1.061 -0.195 -1.234 2.215 1.526 -0.950 0.533 2.548 1.466 0.193 0.316 0.000 1.038 0.867 1.637 1.053 1.474 1.155 1.313 +0 1.025 -0.811 0.303 1.781 0.763 1.096 -0.753 1.031 2.173 1.218 -1.031 -1.112 1.107 1.785 -0.146 -1.256 0.000 2.085 -0.711 -0.510 0.000 1.524 1.060 0.986 0.833 1.611 1.189 1.199 +0 0.626 -1.363 -0.986 1.620 1.542 0.341 -1.759 -0.783 0.000 0.896 -0.626 0.356 2.215 0.704 0.045 0.364 2.548 0.473 0.139 -1.436 0.000 0.773 0.837 1.061 0.981 0.298 0.802 0.705 +1 0.456 -2.149 0.139 1.536 1.017 0.873 0.276 -0.745 2.173 0.445 -0.660 0.470 0.000 0.873 -0.358 1.733 2.548 0.478 -1.888 -1.596 0.000 0.782 1.148 0.988 0.842 0.935 1.072 0.868 +1 1.219 -1.349 -0.283 1.135 0.238 0.963 0.477 1.562 2.173 0.589 -0.713 -1.057 0.000 0.447 0.106 0.617 1.274 0.487 -2.252 -1.420 0.000 0.835 0.796 0.988 1.617 0.633 1.012 0.957 +0 0.593 -0.770 1.033 1.078 -0.210 1.794 0.159 -1.256 2.173 0.940 0.142 0.571 2.215 1.094 -1.372 1.077 0.000 0.779 1.546 -0.418 0.000 0.819 1.467 0.996 1.421 1.905 1.753 1.355 +1 0.845 -0.685 -1.325 0.722 0.995 0.737 -0.516 -0.030 2.173 0.608 1.254 1.252 0.000 0.455 1.310 -0.306 0.000 0.720 -0.890 1.730 3.102 0.798 0.949 0.987 0.861 0.800 0.844 0.765 +1 0.595 -0.962 -0.632 0.412 -1.535 0.644 -0.072 1.670 2.173 0.584 -0.069 0.512 2.215 0.806 -2.006 1.172 0.000 0.749 -0.895 -0.940 0.000 0.957 0.973 0.989 0.698 0.781 0.790 0.745 +1 0.383 -0.470 0.104 1.169 -0.941 1.033 2.355 1.454 0.000 1.428 1.051 -0.056 2.215 1.389 2.502 1.068 0.000 3.052 0.957 -1.709 3.102 0.991 1.499 0.983 1.031 1.879 1.312 1.188 +0 0.469 1.239 -0.446 0.975 0.558 0.681 -0.235 -1.068 0.000 0.602 -0.766 0.514 0.000 0.974 0.502 -0.429 1.274 1.649 1.403 1.438 3.102 0.741 0.781 0.992 0.975 1.123 0.899 0.809 +0 1.584 0.929 1.170 0.716 0.249 1.641 -0.316 -1.166 0.000 2.496 0.970 0.580 2.215 1.352 -0.544 -0.598 2.548 0.987 -0.984 -1.151 0.000 0.865 0.863 1.088 0.863 2.429 1.928 1.608 +0 0.694 -2.236 -0.003 1.110 0.550 0.708 -1.367 -1.344 0.000 0.848 -1.149 1.132 2.215 0.796 -0.320 -1.683 0.000 1.407 -0.107 -0.305 3.102 0.830 1.032 0.996 0.759 1.094 0.803 0.832 +1 1.580 -0.382 0.353 1.484 -0.194 0.999 -2.563 -1.574 0.000 1.224 0.399 1.371 2.215 1.079 -0.741 -0.192 0.000 0.480 0.352 -1.165 0.000 0.803 1.150 1.002 0.844 0.878 0.964 0.818 +0 1.294 0.637 1.016 1.545 1.425 1.018 0.374 -0.741 0.000 1.230 -0.296 0.546 1.107 0.927 -1.527 -0.641 0.000 1.038 0.956 -0.583 1.551 1.493 0.897 0.991 1.315 1.185 1.023 1.063 +1 0.304 -1.851 -1.119 1.287 -0.544 0.916 -0.426 -0.858 2.173 1.069 -0.867 1.010 2.215 1.159 0.318 1.135 0.000 0.732 -2.125 0.964 0.000 1.083 0.825 0.988 0.612 1.486 0.925 0.848 +0 0.762 -1.328 1.639 1.575 -1.320 1.004 2.830 1.582 0.000 2.089 0.464 0.142 0.000 1.067 0.230 0.697 0.000 1.491 0.105 -0.199 3.102 1.128 0.837 0.983 1.395 0.621 1.201 1.566 +0 0.406 -0.819 0.591 0.865 -0.401 0.855 0.977 1.244 2.173 0.897 -0.615 -1.517 0.000 0.934 -0.845 -0.134 2.548 0.556 -0.432 -1.077 0.000 0.952 0.950 0.987 0.984 1.625 0.901 0.770 +0 1.867 0.678 0.605 0.232 -1.037 0.826 -1.507 -0.800 2.173 0.665 -0.752 0.617 2.215 0.548 -2.062 -1.026 0.000 1.281 -1.854 1.454 0.000 0.728 0.918 0.991 1.700 1.125 1.142 1.190 +0 0.808 1.560 -1.694 0.832 0.318 0.974 0.835 0.083 2.173 1.204 -1.009 -1.391 2.215 0.383 -0.269 0.163 0.000 0.784 -0.470 0.998 0.000 0.421 0.789 1.103 1.881 2.321 1.477 1.113 +1 0.434 -0.154 1.046 1.016 -0.089 1.199 -1.760 0.690 0.000 1.408 -0.136 -1.345 2.215 1.617 -0.315 -0.598 0.000 0.882 -0.465 1.410 3.102 3.012 1.737 0.982 1.099 0.651 1.408 1.313 +0 0.338 -1.998 1.649 1.187 -0.265 0.795 -0.499 0.825 1.087 0.598 2.274 -0.931 0.000 0.601 1.950 1.233 0.000 1.054 0.518 -1.298 3.102 0.859 0.822 0.992 0.919 1.078 1.180 1.354 +0 0.604 0.235 -0.684 0.856 0.393 1.291 -0.864 -1.145 0.000 0.767 -1.966 1.022 0.000 1.269 -1.229 1.426 2.548 2.153 -0.633 0.144 1.551 2.343 1.481 0.985 0.646 1.216 1.180 1.013 +0 1.376 0.523 -0.122 1.959 0.300 0.878 0.803 0.706 2.173 1.125 1.185 1.703 0.000 2.663 0.940 -1.083 2.548 1.109 0.450 -1.530 0.000 0.603 0.933 0.984 0.911 1.915 1.324 1.203 +0 1.517 -0.242 -0.451 0.531 -0.391 0.596 2.512 1.646 0.000 0.864 1.331 1.111 2.215 0.799 -0.557 0.604 2.548 0.513 2.476 -0.517 0.000 0.806 0.897 0.987 0.764 1.097 1.158 1.530 +1 1.235 -0.130 0.821 0.571 -0.243 0.595 0.553 -0.451 2.173 0.758 1.116 -1.449 2.215 0.510 -1.303 0.290 0.000 0.370 -0.554 1.385 0.000 0.554 1.067 0.986 0.788 0.829 0.779 0.727 +1 0.789 -0.956 -0.995 1.743 -1.171 0.403 -2.172 -1.180 0.000 1.297 -0.565 0.171 2.215 0.600 -1.362 -1.679 0.000 0.481 -1.221 0.225 1.551 0.837 0.861 0.991 0.678 0.327 0.936 0.790 +1 0.519 0.668 -1.175 0.505 0.513 0.857 -1.439 -1.561 0.000 0.703 -1.062 -0.863 0.000 1.390 -0.177 0.166 1.274 1.352 -1.351 0.605 3.102 1.016 1.092 0.986 1.071 0.908 1.205 1.351 +1 1.141 -0.488 -0.226 0.652 0.837 0.890 0.157 -1.478 1.087 0.538 0.036 0.081 0.000 0.822 0.988 1.287 2.548 0.514 -0.903 -0.261 0.000 0.455 0.930 0.989 0.967 0.822 0.859 0.728 +0 0.537 -1.408 1.207 0.168 -1.435 0.300 -1.745 0.919 0.000 0.498 -1.628 -1.184 0.000 1.150 -0.370 -0.404 2.548 1.118 0.170 0.252 3.102 0.779 0.961 0.978 0.754 0.553 0.703 0.654 +1 0.706 0.814 1.364 1.370 -1.038 0.777 -0.918 0.549 1.087 0.586 -0.715 -0.792 2.215 0.503 0.403 0.216 0.000 0.371 0.831 -1.642 0.000 0.492 0.764 1.129 0.920 0.934 1.028 0.784 +1 1.005 -0.048 -0.263 0.331 -1.726 0.897 0.251 1.337 2.173 1.272 1.652 -1.610 0.000 0.942 -1.063 -0.038 0.000 1.559 0.081 0.530 3.102 0.398 1.209 0.984 0.726 0.839 1.009 1.033 +0 1.084 1.800 -1.726 0.459 0.283 0.732 0.034 1.222 0.000 0.809 -0.678 -1.030 2.215 0.891 1.611 -0.152 0.000 0.980 0.335 0.135 3.102 0.925 0.935 0.987 0.854 0.838 1.056 0.924 +1 0.692 -1.421 0.463 0.484 0.768 1.721 -0.427 -0.832 0.000 0.583 -0.807 0.690 0.000 0.928 -0.588 -1.481 2.548 1.799 -0.130 0.271 0.000 0.672 0.940 0.973 0.649 0.686 0.647 0.593 +1 0.636 0.312 0.525 0.565 -0.245 0.697 -0.009 0.194 0.000 1.349 0.087 -1.292 2.215 0.643 0.910 0.735 0.000 1.417 -0.561 -1.446 0.000 0.848 1.301 0.989 0.559 0.697 0.784 0.731 +1 0.278 0.267 0.024 1.320 1.467 1.074 1.551 -1.343 2.173 1.138 1.901 0.454 0.000 0.832 0.741 -0.571 2.548 0.912 1.258 -0.312 0.000 0.896 0.873 0.990 1.832 0.881 1.228 1.291 +1 0.467 1.329 -1.705 1.220 -0.992 1.168 0.129 0.759 2.173 0.385 0.333 0.021 0.000 0.481 -0.411 -0.244 2.548 0.656 0.771 -1.012 0.000 0.559 0.872 0.994 0.640 0.784 0.818 0.667 +0 0.497 1.030 -1.296 0.338 0.335 0.979 0.759 -0.699 2.173 2.599 1.084 0.898 2.215 1.386 -0.227 -1.024 0.000 1.240 1.002 0.341 0.000 1.303 1.032 0.987 1.303 2.362 1.409 1.123 +1 0.958 -0.207 0.378 0.449 -1.323 1.083 -1.150 -1.619 2.173 1.135 -0.504 -0.183 0.000 0.662 -1.490 -0.166 0.000 0.935 -1.133 0.914 3.102 0.783 0.808 0.989 0.984 0.813 0.909 0.783 +1 0.765 -0.378 0.170 0.382 1.690 1.818 -2.245 -1.552 0.000 2.478 0.860 -0.246 0.000 0.751 1.718 1.483 0.000 1.124 0.896 0.856 0.000 0.688 0.862 0.986 0.589 0.634 0.592 0.585 +1 0.379 -1.546 0.285 0.872 -0.953 2.658 -0.959 1.091 0.000 1.897 -1.108 -0.805 1.107 0.982 -0.212 -0.635 2.548 1.102 -1.497 -0.778 0.000 0.971 1.614 0.983 0.830 0.716 1.557 1.223 +0 0.807 1.114 0.425 0.833 1.034 1.317 1.457 0.249 0.000 0.672 1.472 -0.647 2.215 1.076 0.758 -1.383 1.274 0.802 -2.008 -1.734 0.000 1.193 1.272 0.986 0.923 0.639 1.069 0.949 +0 0.332 -0.251 0.874 1.217 -1.324 0.594 0.733 0.907 2.173 1.437 -1.019 -0.086 2.215 0.908 2.118 -1.704 0.000 0.476 0.492 -1.355 0.000 0.747 0.888 0.986 1.478 1.745 1.555 1.232 +0 0.721 -0.275 -0.484 0.410 1.493 1.018 -1.039 -0.154 2.173 1.153 0.516 0.952 0.000 0.922 -0.229 1.369 0.000 1.624 0.309 -1.486 3.102 0.861 0.921 0.989 1.074 1.646 1.178 0.963 +1 1.291 0.183 0.608 0.121 -1.695 1.066 -1.234 -0.185 0.000 0.926 0.000 -1.639 2.215 1.431 -0.633 1.733 2.548 0.969 0.047 0.897 0.000 1.137 1.009 0.983 1.062 0.452 0.839 0.796 +0 3.018 -0.100 1.015 0.451 -1.266 1.001 0.624 -0.648 2.173 0.357 0.773 1.470 0.000 0.333 -0.129 -0.763 1.274 1.181 1.560 -0.636 0.000 0.933 0.917 1.431 0.833 0.294 1.007 0.960 +1 0.814 -0.217 0.838 1.581 -1.685 1.207 -0.413 -0.729 2.173 1.628 1.497 0.938 0.000 0.873 -0.464 0.169 0.000 1.190 0.733 -0.666 3.102 0.859 1.062 1.201 1.266 0.884 1.377 1.235 +1 0.866 -0.979 0.565 1.015 -0.546 0.961 -0.560 0.154 2.173 1.339 0.312 -1.714 0.000 0.466 2.047 0.735 0.000 0.704 -0.533 -1.381 3.102 1.670 1.122 1.093 0.748 0.856 1.150 1.089 +1 0.453 -1.405 -0.724 2.920 -0.214 0.803 -2.643 -1.549 0.000 0.921 -2.890 1.168 0.000 1.306 -1.162 1.193 1.274 0.812 -2.151 -1.157 0.000 1.000 0.862 0.989 1.314 0.397 0.815 1.145 +1 0.540 -0.597 -0.998 0.668 0.995 0.583 -0.926 0.781 0.000 1.035 -1.474 -1.372 2.215 1.163 -0.480 -0.672 2.548 0.656 -2.222 0.084 0.000 1.079 1.114 0.987 1.033 0.913 0.867 0.857 +1 0.600 -0.008 0.801 0.348 -1.689 0.617 0.821 1.027 1.087 1.051 0.536 0.268 1.107 2.350 1.051 -1.185 0.000 0.412 -0.024 0.529 0.000 1.280 1.184 0.983 0.758 0.767 0.947 0.789 +0 0.741 -0.091 -0.886 0.306 -1.697 0.722 -2.032 -1.354 0.000 0.718 1.508 1.463 0.000 1.299 0.321 0.506 2.548 1.705 0.511 -0.196 3.102 0.747 0.941 0.988 1.005 0.688 0.824 0.906 +0 1.162 1.105 -1.153 2.117 -0.554 0.802 1.233 1.530 0.000 1.023 1.273 1.027 0.000 1.309 0.494 0.692 2.548 1.635 0.931 0.036 3.102 0.845 0.915 1.120 0.887 0.703 0.900 0.999 +1 1.509 0.778 -1.597 0.496 -1.150 0.951 0.448 0.252 1.087 0.693 1.217 -1.079 0.000 1.042 -0.313 0.218 0.000 1.471 1.357 1.170 3.102 1.664 1.234 0.990 0.956 1.206 0.991 0.956 +1 0.346 -1.470 0.489 0.747 -0.821 0.615 -0.396 1.182 0.000 0.674 0.949 0.736 2.215 1.355 -0.874 -0.917 0.000 0.533 -0.417 1.685 3.102 0.894 0.847 0.996 0.545 0.599 0.576 0.548 +0 0.380 -1.076 -0.065 2.097 -0.635 0.994 1.306 0.439 0.000 0.849 0.927 -1.197 1.107 0.741 1.005 0.864 0.000 2.178 0.669 1.433 3.102 0.877 1.123 0.992 2.424 0.861 2.132 2.366 +0 1.541 0.524 0.392 2.444 0.115 1.003 0.840 -1.635 0.000 1.536 -0.315 -1.629 2.215 0.516 1.046 -0.529 2.548 0.927 -0.927 0.323 0.000 0.669 1.167 0.987 0.794 1.098 1.194 1.175 +1 0.714 1.725 -1.396 0.737 0.739 0.523 0.840 1.612 2.173 0.504 -0.655 -0.298 2.215 0.385 1.562 1.035 0.000 0.643 1.212 -0.217 0.000 0.501 0.784 0.988 0.753 0.978 0.906 0.708 +1 0.606 0.198 1.699 0.964 -0.334 1.242 1.002 0.408 0.000 1.265 1.240 1.669 2.215 1.014 2.128 1.702 0.000 1.116 0.714 -0.714 0.000 1.354 1.042 1.023 0.536 1.064 0.846 0.824 +0 0.647 1.381 0.573 0.873 -0.771 0.756 0.377 1.519 0.000 1.015 0.780 -1.295 2.215 0.646 -1.119 -0.255 0.000 1.323 0.052 -0.385 3.102 0.943 1.036 0.988 0.716 0.862 0.833 0.770 +1 0.640 -1.090 -1.182 1.402 -0.927 0.936 0.051 0.851 0.000 0.619 -0.430 -0.589 2.215 0.748 -1.004 0.498 0.000 1.151 -1.220 1.442 0.000 1.032 1.039 0.986 0.640 0.419 0.670 0.991 +0 1.048 -0.715 1.080 0.741 -1.520 1.127 0.978 -0.173 2.173 0.569 0.058 1.091 2.215 0.817 0.389 -1.381 0.000 0.578 -1.424 1.143 0.000 1.131 0.782 0.988 1.452 1.211 0.990 0.907 +1 0.605 -0.265 -1.416 0.756 -0.384 1.474 0.452 -0.855 0.000 1.622 1.683 1.341 0.000 1.421 1.284 0.652 2.548 0.604 -0.439 1.024 3.102 3.755 2.184 0.987 1.593 0.831 1.465 1.504 +1 1.040 0.702 -0.386 0.369 -1.635 0.572 -0.391 -1.031 2.173 0.839 0.423 0.956 2.215 0.532 0.991 -1.452 0.000 1.298 1.654 0.771 0.000 0.934 0.860 0.987 0.887 1.085 0.901 0.797 +0 1.179 0.565 -0.277 0.678 0.145 0.667 0.225 1.248 2.173 1.217 -0.610 -1.366 2.215 0.287 1.593 -0.392 0.000 0.608 -1.243 0.182 0.000 1.144 0.962 0.991 1.529 1.104 1.134 0.967 +1 0.559 -1.813 0.167 0.511 -0.327 1.033 0.776 -1.449 2.173 0.353 -0.779 1.475 0.000 0.828 -0.380 -0.002 0.000 0.848 0.125 0.853 3.102 0.821 1.198 0.988 0.669 0.924 0.925 0.787 +1 0.965 0.244 0.038 0.745 1.427 0.866 -0.539 0.046 2.173 0.681 1.077 1.484 0.000 1.053 0.008 -1.666 0.000 0.866 -0.201 1.302 3.102 0.934 1.048 1.115 0.635 0.841 0.688 0.666 +1 0.922 -2.005 0.530 0.536 -1.467 0.856 -0.699 0.932 2.173 0.868 -1.003 0.152 2.215 1.152 -1.756 -0.917 0.000 0.878 -1.068 -1.422 0.000 0.845 0.996 0.988 0.870 0.846 0.814 0.786 +0 2.922 -0.164 0.530 1.773 0.496 2.284 0.154 -1.447 2.173 1.570 1.183 -0.076 0.000 0.697 -0.822 1.314 0.000 1.640 -0.821 1.693 0.000 1.251 0.789 0.995 2.636 1.199 1.706 1.352 +0 0.640 -1.085 0.597 0.546 -0.483 0.658 -1.920 -1.038 0.000 0.883 -0.094 0.646 1.107 1.236 -0.588 1.251 0.000 1.211 -0.281 -1.452 3.102 1.817 1.178 0.988 1.044 0.893 0.973 0.906 +0 0.304 2.175 -1.621 0.785 0.753 1.072 0.546 -0.666 2.173 0.649 0.657 1.672 2.215 0.783 1.293 0.769 0.000 0.440 -0.172 0.879 0.000 0.590 1.051 0.992 0.623 1.058 0.755 0.673 +0 0.783 1.646 -0.924 0.535 1.280 0.546 0.562 -1.682 0.000 0.926 1.242 1.074 1.107 1.342 0.039 -0.351 2.548 1.326 0.311 0.025 0.000 1.305 1.004 0.987 0.829 1.378 1.009 0.931 +1 0.354 -1.404 0.210 4.521 0.906 3.115 -1.455 -0.914 0.000 1.436 0.054 0.925 1.107 1.984 1.274 -0.634 0.000 0.963 -1.123 0.941 0.000 2.637 1.675 1.029 1.605 0.263 1.798 1.798 +0 1.035 1.406 -0.724 0.833 -1.274 0.700 1.249 1.325 2.173 0.439 -0.426 0.919 0.000 1.060 0.485 0.754 2.548 0.870 1.637 -0.253 0.000 0.682 0.720 0.988 0.938 0.663 0.776 0.678 +0 0.487 0.597 -0.925 2.710 -1.659 1.634 1.142 0.261 0.000 0.724 0.948 0.641 0.000 1.223 2.258 -0.772 0.000 0.986 -0.853 1.226 3.102 0.800 0.919 0.988 1.129 0.791 1.029 1.131 +1 1.656 -0.268 1.441 0.393 1.029 1.693 -1.917 0.134 0.000 1.505 1.383 -1.297 1.107 0.953 1.967 -0.298 0.000 0.727 0.805 -0.746 0.000 0.995 1.064 0.988 0.570 0.807 0.880 0.927 +1 0.472 1.150 0.226 1.809 -0.403 1.113 -0.507 1.401 2.173 0.860 0.314 -1.396 2.215 0.315 0.028 1.234 0.000 1.015 -0.828 0.083 0.000 0.632 0.836 0.993 1.341 1.040 1.723 1.403 +0 1.371 -0.969 0.236 1.248 -1.078 0.802 0.788 0.950 0.000 0.701 0.783 -1.016 2.215 0.866 -0.549 -1.187 1.274 0.565 -1.522 0.489 0.000 1.819 1.329 1.678 1.327 0.645 0.954 1.037 +1 0.651 0.388 -1.364 0.688 -0.656 0.534 -0.618 -1.625 2.173 0.624 0.697 0.351 0.000 0.819 -0.955 0.415 2.548 0.449 -0.418 1.225 0.000 0.662 0.820 0.991 0.766 0.813 0.620 0.625 +0 1.790 1.406 1.377 0.770 0.712 0.896 1.340 -0.294 2.173 1.149 0.820 -0.677 0.000 0.352 -0.166 0.666 2.548 0.487 -0.161 -1.055 0.000 0.621 0.751 0.986 0.670 0.796 0.825 0.814 +1 1.001 -1.259 -1.235 1.686 -0.553 0.729 -1.044 0.109 2.173 1.165 2.595 1.599 0.000 1.165 0.305 0.744 2.548 0.794 -0.173 -1.331 0.000 2.567 1.951 1.039 0.931 1.063 2.042 2.267 +1 2.324 0.275 -0.179 0.442 -0.875 0.900 0.560 1.596 2.173 0.890 -0.272 0.741 0.000 0.848 -0.720 -1.703 2.548 1.229 0.287 -0.829 0.000 0.846 0.901 0.991 1.095 0.817 1.005 0.821 +1 1.052 -1.171 0.309 1.157 0.046 0.935 -0.301 -1.087 2.173 0.556 -0.089 0.853 0.000 1.234 -0.022 -0.097 0.000 1.645 -0.441 -1.713 0.000 0.923 0.988 0.993 1.617 0.997 1.338 1.100 +1 0.489 -0.121 0.475 1.099 -1.437 1.553 -0.400 0.247 2.173 0.950 -0.347 -1.209 0.000 0.962 -1.602 1.168 0.000 0.867 1.096 -0.980 0.000 1.134 0.790 1.004 1.169 1.025 1.060 0.886 +0 1.175 -0.294 1.320 0.834 -0.271 0.975 0.889 -0.965 2.173 0.735 1.594 1.580 0.000 1.389 0.655 0.194 2.548 0.936 1.160 0.685 0.000 0.871 0.982 1.358 1.256 1.260 0.999 0.905 +1 0.898 0.140 1.504 0.899 -0.141 0.965 0.478 -0.929 0.000 1.299 0.184 1.093 2.215 0.510 -2.382 0.283 0.000 0.972 1.054 -0.532 0.000 0.788 0.738 1.240 0.890 0.832 0.902 0.801 +0 0.930 0.252 -0.816 0.772 -1.401 0.495 1.306 0.386 2.173 0.856 0.083 0.124 2.215 0.697 -0.732 -1.679 0.000 0.697 0.166 1.242 0.000 0.549 0.951 0.984 0.887 0.668 0.835 0.741 +1 1.476 1.568 -0.078 0.447 0.399 1.883 -0.118 -1.072 0.000 2.140 0.793 0.956 2.215 1.098 0.481 0.643 2.548 0.790 0.254 -0.330 0.000 1.222 1.507 0.980 1.219 0.511 1.519 1.331 +0 0.408 -0.006 -0.604 1.358 1.418 1.644 -0.568 -1.521 2.173 1.737 -0.687 0.404 0.000 0.805 -1.159 -0.656 0.000 2.236 -0.445 -0.017 3.102 1.571 1.022 0.999 0.946 1.983 1.376 1.147 +0 0.716 1.172 1.551 1.366 1.627 0.755 -0.034 -1.242 0.000 1.030 -0.473 -0.108 2.215 0.398 2.166 -0.520 0.000 0.624 -0.976 0.794 3.102 0.777 0.832 0.999 1.629 0.583 1.573 1.196 +0 0.529 0.569 0.242 0.801 1.425 1.554 1.491 -1.574 0.000 0.848 0.358 0.701 2.215 0.978 0.377 -0.617 2.548 1.912 -0.483 0.240 0.000 1.038 1.078 0.987 0.648 0.898 0.856 0.756 +0 0.767 -0.513 -0.216 1.728 0.502 0.881 -1.602 1.356 2.173 0.732 -0.981 -1.110 2.215 0.435 -1.506 -1.410 0.000 0.574 1.302 -0.898 0.000 1.346 0.972 0.987 1.329 1.007 1.012 0.985 +0 0.548 1.261 -1.678 2.130 1.332 1.263 -0.548 -0.101 2.173 1.020 -0.249 -1.532 0.000 0.960 -0.284 -0.570 1.274 0.615 1.860 -0.325 0.000 1.144 0.899 0.995 1.758 0.587 1.198 1.063 +1 0.633 0.454 -0.153 2.363 1.139 1.706 1.004 -0.364 0.000 1.312 0.039 0.926 2.215 1.940 1.173 -1.251 0.000 0.636 -1.083 -1.067 0.000 0.747 0.881 1.556 0.900 0.873 0.717 0.719 +0 1.036 0.162 1.057 0.980 1.248 0.653 -0.576 1.666 1.087 0.742 -1.088 -0.453 2.215 1.412 -2.447 -0.101 0.000 1.220 0.209 -1.163 0.000 0.487 0.829 0.989 1.010 1.004 1.052 1.611 +1 1.447 -0.404 0.954 1.265 1.289 0.704 -0.658 -1.309 0.000 0.622 1.512 -1.019 0.000 1.879 1.395 -0.124 0.000 1.268 -0.077 1.525 3.102 1.198 1.430 0.980 1.191 1.148 1.454 1.529 +1 1.593 -0.829 -0.117 0.370 0.548 2.089 1.986 1.554 0.000 1.248 -1.053 0.135 1.107 1.225 -0.265 -0.019 0.000 2.031 -0.380 -1.095 3.102 0.721 0.861 0.980 0.654 1.365 0.806 0.666 +1 0.840 0.098 -0.423 0.974 -0.670 1.541 0.126 -0.165 2.173 1.544 0.450 1.301 0.000 1.057 -0.279 1.657 0.000 0.753 -1.256 -0.230 0.000 0.990 0.769 0.989 0.978 0.884 1.166 1.066 +1 0.629 0.733 1.078 0.766 1.733 1.110 0.394 0.927 0.000 1.041 0.310 -1.192 2.215 1.801 0.423 -0.405 2.548 0.876 0.928 0.522 0.000 0.771 1.295 0.983 1.080 0.954 1.038 0.912 +0 0.840 -2.103 1.356 1.345 -1.621 1.604 -1.130 0.555 2.173 1.297 -0.003 -0.846 2.215 0.759 0.432 -0.466 0.000 0.768 -0.654 -0.439 0.000 0.907 0.966 0.983 1.416 2.390 1.497 1.443 +1 0.875 -0.064 1.001 0.366 0.113 1.112 0.240 -1.741 2.173 1.296 0.167 0.279 0.000 0.679 -2.589 -0.639 0.000 1.834 0.121 -0.918 1.551 0.966 1.089 0.989 0.935 1.025 1.050 0.900 +1 0.731 1.426 0.457 0.537 -1.074 0.797 0.371 1.211 0.000 0.945 1.077 -1.326 2.215 1.534 0.887 -0.468 2.548 0.947 0.623 0.631 0.000 0.709 1.026 0.985 0.829 0.897 0.891 0.836 +1 0.875 0.472 -0.990 0.521 0.757 0.374 0.856 0.491 2.173 1.010 0.443 -0.100 2.215 0.668 -1.545 1.447 0.000 1.309 0.536 -1.476 0.000 1.583 1.226 0.987 0.731 0.496 0.942 0.814 +1 2.396 0.717 -0.491 0.480 -1.022 0.738 0.440 1.367 1.087 0.557 -0.834 1.037 0.000 0.374 0.546 1.049 1.274 1.121 1.494 1.006 0.000 1.802 0.947 0.993 1.216 0.192 0.789 0.884 +0 1.776 0.593 -0.124 0.651 -0.310 0.710 0.948 -1.633 0.000 1.127 0.570 1.129 2.215 1.216 0.489 0.630 2.548 0.716 0.612 -0.765 0.000 0.777 0.966 0.988 1.177 0.542 0.830 0.840 +0 0.900 0.091 -1.078 0.772 1.526 1.529 0.685 0.521 0.000 0.615 1.407 0.028 0.000 1.119 0.919 -1.426 2.548 1.044 0.832 -0.981 3.102 0.823 1.030 0.986 0.582 0.323 0.848 0.781 +0 0.447 -0.436 0.519 1.800 1.402 1.370 0.670 0.562 2.173 1.992 0.601 -0.909 0.000 0.812 0.315 -0.290 0.000 1.003 0.466 -1.325 3.102 1.067 0.731 0.992 1.075 1.232 1.132 1.062 +1 0.461 2.236 0.998 0.623 1.720 0.697 0.434 0.636 0.000 0.948 -0.127 -1.199 0.000 1.096 -0.277 -0.038 0.000 1.330 0.411 0.066 3.102 1.079 0.690 0.980 0.636 1.031 0.861 0.795 +0 2.362 1.842 0.820 1.941 0.692 1.687 2.717 -1.244 0.000 1.032 -0.834 -0.731 2.215 0.796 1.187 0.179 1.274 0.612 1.310 -0.613 0.000 0.929 1.203 0.978 0.853 1.440 2.163 1.605 +0 0.940 0.985 -1.660 1.077 1.299 0.544 0.055 0.593 0.000 0.987 0.528 0.008 0.000 1.261 0.595 -0.866 2.548 0.405 -0.907 -1.450 3.102 0.873 1.038 0.989 0.966 0.604 0.809 0.893 +1 1.198 -1.432 1.002 0.523 0.588 2.532 -0.509 -1.078 0.000 1.107 -0.760 0.656 2.215 2.078 -1.826 0.602 0.000 0.686 1.246 -1.618 0.000 1.035 0.992 1.000 0.591 0.483 0.622 0.621 +0 0.910 -0.301 -1.104 0.698 1.283 1.076 -0.816 -0.953 2.173 1.483 -0.244 0.469 0.000 0.986 1.018 1.053 0.000 0.665 0.030 1.371 0.000 0.958 0.838 0.985 0.897 0.520 0.915 0.807 +1 0.542 0.428 0.866 1.325 -0.417 0.516 0.840 1.287 0.000 1.299 1.411 -0.473 2.215 1.215 0.083 1.630 0.000 0.520 -0.276 1.037 3.102 0.701 0.489 1.074 0.895 1.046 0.901 0.814 +0 0.999 -0.359 -1.350 0.839 0.327 0.612 -0.540 0.782 0.000 1.045 0.005 1.302 1.107 1.060 0.211 0.282 0.000 3.140 0.423 -0.869 3.102 0.855 0.876 1.266 1.097 1.571 1.084 0.934 +1 0.859 1.294 -0.430 0.802 1.612 0.351 -0.218 -0.883 0.000 0.910 2.266 1.057 0.000 0.483 0.569 -1.389 2.548 0.709 0.065 -0.292 1.551 0.923 0.731 1.109 0.697 0.394 0.482 0.672 +0 0.888 -0.256 1.000 1.243 0.203 0.731 -0.179 -1.514 2.173 0.708 0.163 -0.605 0.000 0.426 0.294 -0.950 2.548 0.425 1.382 0.553 0.000 0.849 0.914 0.988 0.692 0.382 0.690 0.677 +0 0.433 0.977 -1.303 1.957 -0.298 0.687 -0.015 1.295 0.000 0.578 0.888 1.109 2.215 1.255 -1.136 -0.329 2.548 1.568 -0.780 1.596 0.000 0.866 0.878 1.004 1.495 1.467 1.120 1.140 +0 1.600 -0.040 -0.314 0.844 0.906 1.197 -2.905 1.189 0.000 1.550 0.644 -1.168 2.215 0.707 1.879 1.653 0.000 1.105 -0.178 0.410 3.102 10.018 5.222 1.435 1.309 1.285 3.675 2.753 +1 0.744 0.660 -0.123 1.376 -1.602 0.654 0.895 0.686 0.000 0.992 1.088 -1.525 2.215 1.150 0.996 -0.631 2.548 1.578 1.634 0.497 0.000 0.898 1.087 1.362 0.823 0.818 0.896 0.856 +1 2.190 0.602 1.577 2.602 -1.692 3.165 -0.914 0.075 0.000 1.212 0.352 -1.655 0.000 0.684 0.466 -0.831 2.548 1.411 0.330 -0.349 3.102 0.771 1.005 0.969 0.920 0.319 0.921 0.833 +1 0.777 -0.015 0.275 1.059 1.347 0.533 0.139 -0.192 1.087 0.712 -0.969 1.401 0.000 1.203 0.048 -1.444 2.548 0.706 1.715 0.019 0.000 0.388 0.935 1.034 0.772 0.903 0.692 0.720 +0 1.222 -0.601 0.475 0.856 -0.182 0.609 -1.256 -1.026 0.000 0.957 -0.447 1.270 2.215 0.727 2.446 -1.397 0.000 0.535 -1.737 -0.164 0.000 0.714 1.036 0.989 0.534 0.173 0.626 0.699 +1 1.239 -0.256 -1.178 0.567 0.319 1.388 1.982 1.389 0.000 1.662 -0.080 -0.243 2.215 1.860 -0.528 0.165 2.548 0.554 0.561 -1.237 0.000 0.622 0.863 1.133 0.930 0.820 0.788 0.683 +1 0.729 -0.603 -1.321 0.620 0.403 2.046 -2.134 0.693 0.000 1.093 -1.442 -1.497 2.215 1.457 -0.777 1.538 0.000 3.858 -0.679 -0.521 3.102 0.906 1.598 0.985 0.910 1.562 1.681 1.293 +0 0.370 -0.694 0.657 0.094 -0.272 1.674 1.120 0.761 2.173 2.037 0.327 -0.879 0.000 0.721 0.617 -1.714 2.548 0.490 0.907 -0.611 0.000 0.590 0.754 0.830 0.864 1.125 1.292 0.990 +1 0.725 -0.266 0.322 2.285 1.034 0.513 0.128 -0.150 0.000 1.324 0.480 -1.444 1.107 0.876 1.170 -0.444 2.548 0.573 0.482 -0.949 0.000 0.578 0.882 1.067 1.261 1.010 1.109 0.906 +1 0.885 -1.423 1.186 0.773 -0.969 0.835 1.075 0.572 2.173 0.691 0.228 -1.461 0.000 0.868 -0.699 -0.474 2.548 0.508 -1.913 -0.597 0.000 1.372 0.914 1.068 1.774 1.419 1.211 1.079 +1 0.688 -1.923 0.359 0.362 0.375 0.990 -0.965 0.936 2.173 0.890 -0.939 -0.935 0.000 0.835 -0.684 -0.088 2.548 0.368 -0.753 -1.377 0.000 0.290 1.052 0.996 0.577 0.912 0.717 0.651 +0 0.601 0.486 -1.037 1.800 -0.090 0.465 -0.109 -0.730 2.173 0.862 -0.329 -1.439 2.215 0.609 -1.453 0.349 0.000 0.929 -0.190 1.008 0.000 0.769 0.882 1.086 0.674 0.565 0.703 0.765 +0 0.465 0.925 1.191 0.736 -0.762 0.998 2.336 -1.640 0.000 1.773 1.662 -0.083 0.000 1.208 0.884 0.362 2.548 1.674 -0.583 -1.427 3.102 0.825 0.852 0.991 0.797 1.488 1.109 1.314 +1 1.742 0.523 1.733 1.093 1.055 0.748 1.424 0.261 2.173 0.237 0.823 0.177 0.000 1.138 1.723 -0.761 0.000 1.021 0.269 -0.831 3.102 0.738 0.773 1.096 0.849 0.942 0.898 0.838 +0 0.924 -1.270 -1.392 1.934 -1.215 3.614 -0.825 0.670 0.000 3.100 -0.509 -1.070 1.107 1.425 -0.503 0.358 2.548 1.784 -1.705 -0.724 0.000 4.459 2.474 1.005 1.872 2.144 2.535 2.255 +0 1.660 -0.592 -0.034 0.537 -0.909 0.877 -2.540 -1.692 0.000 1.236 -0.830 -1.204 2.215 1.495 -1.411 0.639 1.274 1.908 -1.111 1.143 0.000 1.300 0.864 0.986 0.980 1.527 0.979 0.928 +0 2.144 0.100 -0.768 0.690 -1.415 0.693 -0.377 0.097 0.000 0.570 0.804 0.546 0.000 1.416 0.206 1.288 2.548 1.003 0.578 0.200 3.102 0.930 0.961 0.986 0.854 0.786 0.837 0.761 +0 0.339 2.327 -0.609 0.732 0.577 1.173 0.314 0.577 2.173 1.272 0.830 -0.945 2.215 0.913 1.552 1.604 0.000 0.674 -1.568 -1.198 0.000 0.963 0.939 0.986 0.860 1.829 1.005 0.831 +1 0.809 -0.801 1.018 0.449 -0.819 1.119 0.745 -1.236 2.173 0.910 0.285 0.764 0.000 0.470 1.011 -0.496 2.548 0.703 -0.678 0.170 0.000 0.909 1.120 0.993 0.680 0.582 0.722 0.688 +1 1.857 0.222 -1.552 1.416 1.463 1.049 -0.319 -0.430 1.087 0.949 0.113 0.637 1.107 0.333 0.920 -0.178 0.000 0.390 0.688 0.425 0.000 0.309 0.786 0.994 1.088 1.246 1.158 0.889 +0 0.522 -1.696 -0.140 1.285 0.534 0.707 0.039 -1.123 0.000 1.076 1.622 -0.152 0.000 1.190 0.324 1.728 0.000 1.334 -0.454 -1.720 1.551 0.930 0.826 0.991 0.862 0.518 0.560 0.741 +0 0.546 0.346 0.875 0.982 -0.879 0.556 0.747 1.006 2.173 0.759 0.369 -0.888 0.000 0.731 -0.490 0.188 2.548 0.868 -0.431 1.669 0.000 0.926 0.934 1.015 0.686 0.767 0.680 0.621 +1 1.482 -0.171 0.270 0.861 -0.097 0.509 -0.883 0.082 2.173 1.341 -0.781 -1.414 2.215 1.248 0.822 1.456 0.000 0.702 1.376 -1.063 0.000 0.884 1.348 0.980 1.457 1.187 1.147 1.080 +1 0.430 -0.670 1.499 0.456 0.103 0.867 -1.514 -1.709 0.000 0.628 -1.330 -1.029 2.215 1.189 -1.129 -0.092 1.274 1.342 -2.226 0.014 0.000 1.925 1.212 0.989 0.710 0.689 0.893 0.755 +1 0.755 -0.517 -0.381 3.202 -1.002 0.895 0.037 1.459 2.173 1.332 0.240 0.846 0.000 0.571 -0.622 1.489 0.000 0.949 0.987 0.124 0.000 0.946 1.040 1.141 1.438 1.239 1.004 1.014 +0 1.486 0.284 -1.541 0.718 0.122 0.861 -0.668 -0.106 2.173 0.628 -0.493 1.009 0.000 0.751 -0.708 -1.237 0.000 1.193 -0.376 0.510 3.102 0.958 1.000 1.428 0.927 0.577 0.835 0.765 +1 0.609 -0.643 -1.518 0.919 1.312 1.122 0.280 1.631 1.087 1.284 1.244 -0.720 2.215 1.299 0.321 0.235 0.000 1.342 -0.272 -0.394 0.000 0.930 1.255 0.981 0.634 1.764 1.195 1.010 +1 1.099 0.508 1.145 0.660 -1.630 1.227 0.256 -1.515 0.000 2.024 -1.269 0.041 0.000 1.299 1.409 0.696 1.274 1.543 0.174 -0.878 3.102 4.389 2.541 0.992 1.048 1.323 2.074 1.595 +1 0.382 -1.186 1.277 0.523 0.655 0.990 0.113 -1.456 2.173 0.745 0.766 0.246 0.000 0.877 1.452 -0.412 0.000 0.533 -0.830 0.738 3.102 0.862 0.927 1.000 0.899 0.836 0.921 0.799 +0 0.585 1.053 1.593 0.253 -1.195 1.145 0.245 0.349 0.000 1.958 -0.055 -1.371 2.215 2.408 -0.419 -0.087 0.000 1.189 -1.116 -0.191 0.000 0.865 0.952 1.000 0.806 0.478 1.134 0.952 +1 1.340 0.480 -1.731 0.448 0.930 0.988 -0.727 -0.450 1.087 0.532 -1.555 -1.554 2.215 0.803 -0.850 0.276 0.000 0.564 0.417 0.561 0.000 0.609 0.808 0.994 1.218 1.014 0.923 0.792 +0 0.863 -1.714 -1.669 0.227 0.384 0.398 0.100 -0.268 0.000 0.816 0.196 1.501 2.215 1.047 -0.546 0.567 2.548 1.434 -0.891 -0.666 0.000 0.821 1.066 0.993 0.740 0.837 0.765 0.722 +0 1.350 0.840 1.470 0.266 1.728 0.861 -1.170 -0.853 2.173 0.867 -2.180 -0.134 0.000 0.901 0.102 0.903 2.548 0.682 -0.811 0.005 0.000 0.691 0.904 0.987 0.802 1.335 1.319 1.521 +1 0.610 -1.273 -0.606 0.273 0.268 1.218 1.717 -1.523 0.000 1.106 -0.556 -0.491 2.215 0.880 1.153 1.079 0.000 1.566 -0.363 0.335 3.102 0.922 0.945 0.993 0.652 0.809 0.896 0.767 +1 0.441 1.438 -1.216 1.243 0.678 0.449 1.144 -0.723 0.000 0.521 0.958 0.504 0.000 0.679 -0.383 0.876 0.000 0.640 0.481 -0.440 3.102 0.922 0.926 1.015 0.640 0.503 0.687 0.631 +0 1.280 -0.474 -0.203 1.654 -0.722 0.899 -0.086 1.315 2.173 0.492 0.303 0.803 1.107 0.769 0.696 0.210 0.000 0.378 0.629 -0.750 0.000 0.452 0.858 0.989 0.891 0.479 0.924 0.754 +1 0.375 0.035 1.562 1.854 -0.327 0.838 -1.238 0.634 2.173 0.539 -0.855 -1.291 0.000 0.716 -0.797 -1.667 1.274 0.555 -1.061 -0.329 0.000 0.664 0.669 1.145 0.873 0.865 0.896 0.707 +0 0.834 0.192 0.717 0.858 -0.192 0.696 -1.246 1.494 0.000 0.883 -1.604 -0.670 0.000 1.004 0.048 -1.694 2.548 0.489 -0.502 0.480 3.102 1.581 0.958 0.982 0.844 0.527 0.780 0.940 +0 1.750 1.370 0.368 0.582 0.879 0.636 1.132 -1.639 0.000 0.477 2.454 -1.366 0.000 0.477 0.477 1.170 1.274 1.037 0.936 -0.467 0.000 0.920 0.742 0.984 0.650 0.402 0.517 0.651 +1 0.890 -0.737 1.551 0.729 -0.638 0.924 0.251 -0.685 1.087 0.313 -1.167 1.234 0.000 0.678 -2.092 1.004 0.000 1.094 -0.657 0.760 1.551 0.952 0.654 1.028 0.933 1.178 0.927 0.794 +1 1.144 0.466 -1.417 1.713 -1.573 1.543 0.032 -1.425 0.000 3.927 -2.156 0.209 0.000 0.975 0.126 -1.688 0.000 1.501 -0.571 -0.211 3.102 0.530 0.940 0.972 1.103 0.901 0.918 0.810 +1 1.995 -0.914 -0.352 0.483 1.417 0.951 -1.037 1.410 2.173 0.416 -1.616 -0.372 0.000 0.835 -0.820 0.764 0.000 0.382 0.129 -0.827 3.102 0.849 0.909 1.359 0.681 0.704 0.793 0.696 +0 2.339 1.351 -1.071 0.376 -1.258 0.705 -1.583 0.396 0.000 0.672 0.481 1.499 2.215 1.342 -0.371 0.815 1.274 0.983 -0.877 -0.276 0.000 0.867 0.811 0.980 1.832 0.750 1.219 1.088 +1 1.279 0.942 0.293 0.327 1.550 0.744 -0.591 -1.094 2.173 0.438 -0.529 -0.389 0.000 0.777 -0.369 1.052 2.548 0.640 -0.598 1.393 0.000 0.692 0.633 0.987 0.858 0.889 0.951 0.787 +1 0.429 -1.118 1.288 2.003 0.341 1.123 -1.946 -1.480 0.000 0.769 -0.771 -0.248 1.107 0.671 -0.832 -1.489 0.000 0.584 -1.239 -0.130 3.102 0.859 0.792 0.986 0.771 0.223 0.753 0.825 +0 0.684 0.255 1.428 1.156 1.251 1.330 -0.256 0.195 2.173 1.474 0.875 -1.484 2.215 0.732 -0.637 -0.303 0.000 1.169 0.421 -0.765 0.000 0.772 0.976 0.983 0.890 2.413 1.393 1.169 +1 0.680 0.346 0.863 1.610 0.085 0.951 0.897 -1.156 2.173 0.454 0.784 -0.643 2.215 0.465 1.640 1.644 0.000 0.444 1.313 1.039 0.000 0.267 0.655 0.989 0.666 0.435 0.771 0.651 +1 0.672 0.258 -0.037 0.602 1.520 1.438 0.525 -0.297 0.000 1.309 0.542 1.158 0.000 1.865 0.857 -1.636 2.548 0.960 -1.030 1.396 0.000 1.526 1.433 0.986 0.939 1.324 1.207 0.999 +1 0.644 0.375 0.727 1.277 -1.665 1.216 0.507 -0.139 0.000 1.005 0.730 1.468 2.215 0.770 0.063 -1.008 2.548 0.729 -0.143 -0.009 0.000 1.015 0.808 1.049 0.629 0.807 0.821 0.762 +1 1.025 -0.156 0.395 0.555 -0.689 1.617 -0.384 -0.805 1.087 1.186 0.085 1.025 0.000 1.271 -0.836 1.411 0.000 0.817 0.798 0.587 3.102 0.793 0.628 0.990 1.033 1.454 1.044 0.889 +1 1.229 -0.987 0.984 1.625 0.459 1.840 -0.156 -0.814 2.173 0.804 -1.306 1.252 0.000 1.340 -1.950 -1.413 0.000 1.006 -2.377 0.904 0.000 1.036 1.027 0.990 2.115 1.129 1.523 1.357 +1 1.224 0.125 1.468 1.754 0.910 1.006 -0.017 -0.838 0.000 1.224 -0.446 0.042 2.215 0.962 0.128 -1.266 0.000 0.380 -2.194 0.640 0.000 0.678 1.213 0.985 0.731 0.673 0.819 0.921 +0 1.026 -0.837 -1.322 0.871 -0.833 0.778 -0.554 0.961 0.000 0.502 -1.477 0.541 0.000 1.895 -1.229 -0.065 0.000 2.026 -1.270 1.503 3.102 0.825 0.902 0.978 0.569 1.290 0.871 0.872 +1 1.260 0.639 -0.257 1.682 -0.013 1.895 0.822 -0.043 2.173 0.592 2.276 -0.690 0.000 3.260 -0.058 -1.552 2.548 1.826 1.721 1.160 0.000 0.877 1.013 0.992 1.828 3.349 1.716 1.345 +0 0.620 0.314 -0.192 1.328 -0.820 0.422 0.413 1.735 2.173 0.476 0.709 1.095 2.215 0.655 -1.324 0.088 0.000 0.869 -0.361 1.102 0.000 0.795 0.842 0.979 0.788 0.375 0.621 0.796 +1 0.560 -1.264 0.255 0.782 -1.350 1.316 -0.236 0.324 2.173 0.965 -0.090 -1.393 0.000 0.819 2.619 1.632 0.000 1.167 0.052 -0.580 0.000 0.931 0.871 0.985 1.336 0.828 1.067 1.015 +1 0.749 0.204 1.742 0.553 -0.350 1.494 0.653 -1.541 2.173 2.007 0.257 0.570 1.107 2.025 0.668 -0.590 0.000 1.355 -0.515 -0.187 0.000 1.482 1.739 0.990 0.845 2.461 1.593 1.217 +1 0.685 0.424 0.338 0.627 -1.232 0.840 0.925 0.196 0.000 0.901 -0.615 1.411 2.215 0.789 -0.605 -1.231 0.000 0.858 0.527 -1.217 3.102 1.913 1.158 0.988 0.762 0.771 0.938 0.778 +0 1.524 1.163 0.206 0.892 0.930 0.856 2.530 -0.344 0.000 0.875 0.443 -1.453 0.000 0.814 1.232 -1.300 2.548 1.493 0.946 1.337 3.102 0.863 1.083 0.988 0.776 0.591 0.827 0.883 +1 2.726 -0.959 1.515 0.993 0.857 0.689 0.048 -0.499 1.087 0.399 0.377 0.182 0.000 1.044 -0.874 -0.032 0.000 1.665 -0.493 -1.152 3.102 0.864 1.093 1.272 1.508 0.729 1.076 1.039 +1 0.697 -0.862 -1.005 1.485 1.106 1.777 -0.538 1.455 2.173 1.495 -0.146 -1.191 0.000 2.955 -2.213 -0.262 0.000 2.188 0.859 0.280 0.000 0.889 0.654 1.333 1.026 0.824 1.123 1.039 +0 1.584 -0.268 -0.318 2.260 0.082 1.848 0.813 -1.672 2.173 0.909 0.064 1.524 0.000 1.443 0.306 0.379 2.548 0.827 -0.046 -1.237 0.000 0.941 1.015 0.995 2.383 2.019 1.625 1.344 +1 1.610 0.241 0.168 0.129 -0.424 0.883 -1.800 -1.532 0.000 0.933 -1.216 1.120 2.215 0.729 -0.186 -1.606 2.548 0.525 1.093 -0.906 0.000 1.372 0.950 0.980 1.318 0.734 0.920 1.071 +1 0.950 -0.781 -1.158 0.787 -0.639 0.861 0.983 1.330 2.173 0.970 1.000 -0.055 2.215 0.410 1.177 -1.657 0.000 0.481 2.316 0.583 0.000 0.595 0.741 0.994 1.930 1.276 1.557 1.389 +1 1.338 0.583 -1.372 1.228 -0.050 0.819 -0.233 0.465 2.173 0.479 -1.045 -0.913 0.000 0.541 0.606 -1.127 0.000 1.051 0.133 1.188 0.000 0.784 1.007 1.650 0.880 0.575 0.817 0.746 +1 0.753 0.368 -1.717 1.226 0.573 0.918 -0.947 -0.861 2.173 0.525 1.152 -1.702 0.000 0.961 -0.474 0.189 0.000 1.373 0.258 -0.348 3.102 0.834 0.838 1.172 0.829 0.962 0.937 0.795 +0 0.668 -0.091 -0.005 1.274 1.444 0.696 1.454 -0.428 0.000 0.806 0.785 1.439 2.215 0.439 2.187 0.597 0.000 1.025 -1.397 1.628 0.000 0.928 0.968 1.234 0.810 1.103 0.837 0.895 +0 0.898 1.499 -1.176 0.614 -1.661 0.397 0.951 -0.280 2.173 0.591 -0.235 1.231 0.000 0.933 -0.329 0.198 0.000 0.483 0.593 0.886 1.551 0.916 0.859 0.974 0.574 0.406 0.519 0.626 +0 1.626 0.256 1.590 1.225 1.056 0.786 -0.760 -0.263 1.087 1.168 -0.744 -0.741 2.215 1.096 -0.033 -1.398 0.000 1.893 -0.202 0.147 0.000 0.940 1.027 0.992 1.346 0.588 1.103 0.925 +1 0.565 -0.087 1.533 1.256 -0.860 0.984 1.911 0.693 0.000 0.825 0.638 -0.920 0.000 0.367 1.448 1.276 1.274 0.783 0.823 -0.267 3.102 1.876 1.220 0.988 0.682 0.422 0.821 0.720 +0 1.655 0.577 1.033 2.347 1.377 1.478 -0.847 -0.517 0.000 0.503 0.263 -1.367 2.215 0.562 0.668 -0.668 0.000 0.394 -0.173 0.798 1.551 1.460 0.947 0.993 0.815 0.386 0.697 1.105 +1 0.875 -0.071 0.384 0.425 -1.563 1.362 0.424 1.481 1.087 1.129 0.526 -0.258 0.000 1.246 -0.957 -0.650 0.000 0.793 0.611 0.620 0.000 0.884 0.713 0.983 1.067 0.667 0.877 0.797 +1 2.092 -0.951 -1.724 0.926 1.157 0.908 -1.167 -0.445 1.087 0.904 -0.543 0.041 0.000 0.780 -0.269 1.273 0.000 0.429 0.966 0.098 3.102 1.167 1.108 0.999 0.984 1.009 0.998 0.907 +0 0.322 2.013 1.676 2.849 1.332 1.346 -0.884 -0.195 1.087 0.563 0.988 -1.600 0.000 0.609 0.303 0.423 0.000 0.841 -0.451 -0.699 0.000 0.924 0.662 0.985 2.528 1.052 1.598 1.236 +0 0.752 -0.129 -1.204 0.627 -1.436 0.431 0.717 1.551 2.173 0.539 -0.220 -0.295 0.000 1.098 1.507 0.512 2.548 0.630 0.007 0.311 0.000 0.404 0.867 0.984 0.599 0.805 0.690 0.675 +1 0.691 -1.621 -0.259 1.555 0.158 1.274 -0.914 1.587 2.173 0.335 -2.450 -0.486 0.000 0.796 -0.268 -0.984 0.000 0.485 0.649 -1.525 3.102 0.829 0.823 0.993 1.436 0.842 0.989 0.849 +1 1.876 0.909 -1.176 0.559 1.672 0.722 -0.774 0.419 0.000 1.183 1.444 0.731 2.215 0.612 0.041 -0.285 0.000 0.560 0.550 1.452 3.102 0.873 0.757 0.983 1.264 0.543 0.938 0.983 +1 1.331 -0.092 -0.994 1.308 1.712 1.007 0.822 0.281 0.000 0.859 0.135 -1.635 0.000 1.124 -0.660 0.525 1.274 0.603 -1.931 -0.359 0.000 1.697 0.980 1.181 1.099 0.494 0.739 0.776 +1 0.419 2.187 -1.220 1.085 1.131 1.022 0.362 -0.539 0.000 0.755 1.364 1.547 2.215 0.877 0.941 -0.136 0.000 1.038 -0.168 1.066 3.102 0.837 1.057 0.993 0.563 0.788 0.895 0.835 +1 0.691 2.044 -1.157 0.901 0.396 0.849 1.435 1.142 0.000 1.073 0.789 -0.638 2.215 0.798 -0.091 1.287 2.548 1.099 1.484 -0.395 0.000 1.471 1.207 1.077 0.978 1.075 0.964 0.895 +0 1.158 -0.507 -0.255 0.761 1.359 0.912 1.854 -1.017 0.000 0.292 2.200 -0.192 0.000 0.738 0.857 1.122 2.548 0.942 -0.419 1.363 3.102 0.773 0.940 1.292 0.738 0.520 0.911 1.059 +0 1.587 -0.621 -0.508 1.242 -0.873 1.131 -0.936 0.923 0.000 0.952 -0.051 -1.591 2.215 1.112 -0.473 0.509 0.000 0.737 -0.502 1.430 3.102 0.854 1.291 0.984 0.807 0.373 0.769 0.958 +0 1.535 -0.133 1.283 0.386 -1.361 0.879 -0.178 -0.664 2.173 0.844 -0.463 0.324 1.107 0.964 0.076 0.049 0.000 1.198 -0.759 -0.964 0.000 1.031 1.035 0.985 1.069 1.004 0.844 0.802 +0 1.235 1.100 0.371 0.195 1.652 1.328 0.457 -0.068 0.000 1.284 -2.278 -1.038 0.000 1.674 -1.786 -1.418 0.000 1.538 -0.206 0.959 0.000 0.945 0.837 0.984 0.586 0.813 1.082 1.295 +1 0.907 -1.094 1.692 0.265 -0.079 1.223 -0.780 -1.534 0.000 1.545 1.597 0.086 0.000 0.851 -0.122 0.047 2.548 0.932 0.058 -1.118 1.551 1.514 0.987 0.982 0.881 0.594 0.778 0.760 +1 0.734 1.633 0.916 0.758 -1.131 0.888 -0.235 0.128 2.173 0.433 1.692 -1.438 0.000 0.654 0.427 0.970 0.000 0.730 -0.174 -0.965 3.102 0.873 1.187 0.995 0.787 0.710 0.908 0.795 +1 0.726 -0.462 -1.239 3.277 -0.698 2.209 -0.448 1.021 0.000 0.813 0.613 0.920 2.215 1.079 -0.514 -0.117 2.548 0.756 -1.246 -0.849 0.000 2.238 1.577 1.003 0.819 1.019 1.133 1.342 +1 2.003 0.130 0.243 1.402 -0.472 0.413 1.107 -0.456 2.173 0.870 -0.677 -1.525 0.000 1.314 0.099 1.133 0.000 0.897 0.654 -1.409 3.102 0.926 0.697 1.393 1.007 0.499 0.719 0.655 +1 1.214 0.251 -1.419 0.777 -0.443 0.685 1.359 0.071 2.173 0.669 0.347 1.588 0.000 0.444 1.465 0.648 0.000 1.101 1.230 1.150 3.102 0.838 0.916 1.038 0.890 0.760 0.801 0.711 +0 1.751 0.332 -0.335 2.592 -1.139 0.967 0.260 0.921 1.087 1.492 0.565 0.405 0.000 1.208 -0.033 1.388 2.548 0.931 -0.513 -1.369 0.000 1.820 1.278 1.955 1.433 0.585 1.247 1.180 +1 0.939 0.927 -1.455 0.326 -0.624 0.590 1.211 0.906 1.087 1.171 1.130 1.715 2.215 2.160 2.301 -0.013 0.000 0.700 1.796 -0.840 0.000 0.954 1.230 0.990 0.720 0.815 1.108 0.931 +1 0.981 -0.254 0.941 1.470 -1.699 0.966 1.144 -0.794 0.000 1.527 -0.037 0.404 2.215 1.035 0.751 -1.393 0.000 0.708 0.458 -0.085 3.102 0.970 0.762 1.150 1.174 0.488 1.017 1.041 +0 0.523 1.008 1.355 0.833 -0.562 0.564 0.528 -0.076 0.000 0.705 -0.533 1.532 2.215 0.894 0.743 -1.314 2.548 1.540 -0.012 0.720 0.000 1.015 1.006 0.985 0.776 0.773 0.774 0.684 +0 0.639 -0.478 -1.467 1.897 1.313 0.291 -0.117 -0.337 2.173 0.429 1.468 -0.930 0.000 0.886 0.872 -0.204 0.000 0.382 -1.038 0.100 1.551 0.702 0.711 0.987 0.837 0.250 0.562 0.768 +0 0.385 -1.086 0.594 0.279 0.292 1.881 0.494 -1.581 2.173 2.330 -0.184 0.320 2.215 1.086 0.456 -1.139 0.000 0.742 0.610 -0.280 0.000 0.702 1.012 0.982 0.880 3.234 1.517 1.173 +0 0.570 0.216 0.671 0.733 -0.971 0.980 -0.654 -0.909 2.173 0.551 0.333 0.954 0.000 0.562 1.042 -1.561 0.000 2.065 0.946 0.443 3.102 0.931 0.975 0.989 1.006 2.091 1.092 0.889 +0 0.330 -1.826 1.621 1.614 -1.733 1.749 0.072 0.221 0.000 1.558 -0.933 -1.128 2.215 0.696 -0.514 0.733 0.000 0.711 1.300 -0.801 0.000 1.092 0.965 0.992 0.923 0.894 1.239 1.115 +0 1.117 -0.543 0.562 0.337 -1.426 0.551 -0.984 -1.050 2.173 0.858 1.271 1.476 2.215 0.609 1.675 0.018 0.000 0.559 -0.095 -0.603 0.000 0.810 0.863 0.988 0.775 1.631 1.034 0.953 +0 1.211 0.751 -1.263 0.819 1.254 1.252 0.622 0.011 2.173 1.275 -0.417 -1.538 2.215 0.805 0.097 0.416 0.000 0.677 0.647 1.158 0.000 0.576 0.984 1.057 1.243 2.100 1.175 0.932 +0 0.642 -0.181 -1.409 0.687 -1.123 0.893 1.362 0.201 0.000 1.003 -0.090 0.652 0.000 1.150 -0.642 1.589 0.000 0.919 0.347 1.454 0.000 0.888 0.994 0.982 1.559 0.497 1.098 1.000 +1 2.163 0.414 -1.161 1.134 -0.615 0.820 -0.852 0.502 2.173 0.666 -0.323 -1.693 0.000 0.766 0.936 0.893 0.000 0.970 0.736 0.250 3.102 1.131 0.856 1.027 1.601 0.955 1.098 0.980 +0 0.543 0.473 0.242 1.231 -0.853 0.700 0.799 1.131 1.087 0.454 1.647 0.350 0.000 0.566 1.963 1.072 0.000 1.578 0.035 -1.547 3.102 0.930 0.943 0.990 0.924 0.859 0.749 0.693 +0 1.111 -0.047 0.415 0.190 0.984 0.449 1.469 0.037 0.000 0.889 1.165 1.382 1.107 0.935 0.902 -1.419 2.548 1.379 1.439 -0.859 0.000 0.880 1.003 0.985 1.038 0.570 0.899 0.959 +0 1.060 -0.651 -1.154 0.427 -1.155 0.586 0.321 0.817 2.173 0.858 -0.083 0.352 2.215 0.778 1.006 -0.724 0.000 0.801 -0.130 -1.327 0.000 0.738 0.900 0.978 1.193 0.476 0.924 0.878 +0 1.039 -0.349 -1.377 1.023 -0.560 0.831 -1.378 0.530 0.000 1.469 0.009 -1.686 1.107 1.089 -0.490 0.349 0.000 0.898 -0.533 -0.677 3.102 0.803 0.849 0.986 0.879 0.887 1.050 0.945 +0 0.344 -1.812 0.167 0.333 -0.778 1.433 -0.548 -1.281 2.173 1.115 -0.761 0.591 2.215 0.837 1.918 0.265 0.000 0.719 -1.674 1.157 0.000 0.839 1.563 0.990 0.952 1.860 1.569 1.213 +0 3.324 0.471 -0.979 0.980 -0.596 1.497 0.292 0.818 2.173 0.641 -0.249 0.424 0.000 1.615 -0.527 1.039 0.000 1.385 1.067 -1.176 1.551 0.861 0.913 0.997 0.697 1.678 1.398 1.351 +1 0.536 -0.242 1.194 1.174 -0.234 0.598 -0.563 -0.689 0.000 0.871 -1.289 1.003 2.215 1.345 -0.048 0.986 2.548 1.015 -1.673 -0.679 0.000 0.933 1.118 1.055 0.803 0.784 0.945 0.837 +1 1.147 -1.031 -1.239 0.426 1.431 1.639 1.635 1.527 0.000 1.115 -0.408 0.007 2.215 1.803 0.503 0.069 2.548 0.682 -0.751 -0.161 0.000 0.609 0.669 0.984 0.975 0.766 0.928 0.733 +1 0.755 -2.203 -0.889 0.666 0.354 0.754 -0.376 0.014 0.000 0.927 -2.655 -1.572 0.000 0.771 -0.054 -0.311 2.548 1.589 -0.171 1.429 3.102 3.143 1.997 0.987 1.044 0.848 1.378 1.117 +1 0.999 -1.843 -0.413 1.144 -0.594 0.794 -0.472 0.977 2.173 0.446 -2.088 0.772 0.000 0.714 0.197 -1.179 2.548 0.530 -0.491 1.640 0.000 0.705 0.857 1.008 1.230 0.934 0.913 0.791 +0 0.971 0.068 -0.483 0.753 0.269 0.696 0.891 -1.638 0.000 0.539 -0.637 -1.343 0.000 1.221 -0.147 0.808 2.548 0.517 0.452 0.041 1.551 1.168 0.823 0.979 0.845 0.444 0.706 0.715 +0 1.011 -1.399 0.504 0.375 1.333 1.412 -1.194 -0.135 2.173 0.951 -1.113 -1.635 0.000 1.828 -0.520 1.504 0.000 0.429 0.079 -0.420 3.102 0.857 0.790 0.993 1.001 0.611 1.116 0.930 +0 1.798 -0.380 1.201 0.292 -0.363 1.298 0.556 1.519 1.087 2.321 0.781 -0.286 2.215 0.683 0.506 -1.300 0.000 0.561 -1.441 -0.454 0.000 1.072 1.300 0.991 1.694 2.569 1.472 1.226 +1 1.381 1.042 0.792 0.943 -1.682 0.741 2.007 -0.813 0.000 1.353 1.019 -0.145 2.215 0.623 0.896 -1.256 0.000 0.948 1.111 -1.029 1.551 0.818 1.058 1.252 0.812 0.745 0.814 0.801 +1 0.662 -2.291 1.023 0.997 -0.962 0.777 -0.641 -0.630 2.173 0.789 -0.531 0.225 2.215 1.397 -1.186 1.103 0.000 0.782 -1.886 1.742 0.000 0.852 1.025 1.099 1.160 0.804 0.949 0.903 +1 0.745 -0.227 -0.881 0.279 -0.044 0.737 0.507 -1.677 0.000 0.858 -0.464 -0.222 0.000 1.288 -0.537 1.277 2.548 1.624 0.185 0.268 0.000 0.856 1.118 0.988 0.530 0.256 0.664 0.674 +0 0.585 1.509 1.709 1.521 0.616 0.919 -0.517 -1.398 0.000 0.881 0.698 0.136 2.215 0.471 0.250 0.554 2.548 1.016 -0.143 -0.071 0.000 1.397 0.943 1.089 0.844 0.295 0.800 0.969 +1 0.600 -0.148 0.432 0.574 -0.715 1.086 -1.158 0.691 0.000 0.567 -1.347 -1.365 0.000 0.360 -0.871 -0.234 0.000 1.902 -0.289 -1.110 3.102 0.934 0.748 0.986 0.516 0.242 0.537 0.596 +1 1.006 0.090 -0.264 2.000 0.367 1.047 -0.827 -1.487 2.173 0.576 -0.059 1.492 0.000 0.882 0.245 -0.815 0.000 0.572 -0.433 0.832 3.102 0.972 0.961 1.058 0.615 0.722 1.003 0.868 +0 1.356 1.606 -1.658 0.958 0.660 0.526 -2.811 1.407 0.000 0.814 0.816 -0.492 2.215 1.240 0.211 -0.078 2.548 1.254 0.867 0.145 0.000 0.816 0.710 1.372 1.067 0.510 0.920 0.751 +1 0.912 -0.332 1.580 0.317 -1.246 2.927 1.235 -0.135 0.000 2.463 1.021 1.407 0.000 2.055 0.537 -1.557 2.548 1.019 0.660 0.911 0.000 0.932 1.154 0.991 0.860 0.985 1.013 0.865 +0 0.953 -1.152 -0.398 0.699 1.200 0.912 -0.385 1.061 2.173 1.943 -0.983 -0.879 2.215 0.901 -1.101 1.192 0.000 1.052 -1.166 0.464 0.000 0.663 0.717 1.121 0.990 2.025 1.097 0.911 +1 0.830 1.119 1.314 0.918 -1.198 0.641 1.276 -1.065 0.000 0.810 0.948 0.253 0.000 1.145 0.031 0.868 1.274 0.858 1.868 -0.782 0.000 1.140 1.172 0.990 0.606 0.584 0.691 0.689 +1 0.485 1.001 -0.506 1.485 -0.200 1.615 -0.301 -1.710 2.173 1.470 1.471 0.276 0.000 0.860 1.008 1.581 0.000 1.107 0.533 -1.132 3.102 1.631 1.223 0.998 2.858 0.989 1.773 1.573 +1 1.273 -1.399 0.647 1.112 1.260 0.818 -0.549 0.056 0.000 0.611 -0.030 -0.510 2.215 1.634 -0.886 -1.443 2.548 0.983 -0.742 -0.980 0.000 1.125 1.185 0.986 1.186 0.947 0.986 0.941 +1 1.581 0.452 0.538 1.062 -0.067 1.022 -0.940 -1.242 2.173 0.604 -0.481 1.160 2.215 0.484 -1.251 -1.686 0.000 0.686 -0.369 -0.222 0.000 0.687 0.678 0.986 0.823 0.994 1.081 0.854 +1 0.762 0.590 -1.425 0.709 -1.520 0.799 -0.360 -1.533 0.000 1.223 -1.587 0.577 0.000 1.923 0.010 0.079 2.548 1.845 -0.478 -0.506 3.102 2.443 1.691 0.981 1.138 0.841 1.285 1.112 +0 2.481 -0.420 -0.702 0.443 0.261 0.954 -1.506 1.408 0.000 0.771 -1.134 -0.060 2.215 0.826 -0.002 0.978 2.548 0.881 -2.041 0.574 0.000 1.171 1.142 1.108 0.982 0.860 0.881 1.017 +1 0.549 1.171 -1.446 0.446 1.438 0.999 -0.156 0.346 2.173 0.925 -1.075 -0.886 0.000 0.865 -0.039 -1.292 0.000 1.364 0.555 1.112 3.102 0.908 1.215 0.982 0.979 0.945 1.017 0.863 +0 0.839 -0.076 -1.000 1.018 0.212 1.104 -1.153 0.632 0.000 1.291 -1.067 -1.402 2.215 0.647 -1.951 -1.503 0.000 0.539 -1.163 -0.248 1.551 1.623 0.948 1.136 1.109 0.659 0.876 0.907 +1 0.669 -0.427 -1.252 1.465 -0.204 1.268 -1.237 -1.576 0.000 0.862 2.113 0.638 0.000 0.442 -0.467 -1.631 0.000 2.089 0.217 0.292 3.102 0.511 1.583 1.112 0.905 0.650 1.251 1.045 +1 0.348 -1.474 0.773 1.901 1.470 0.905 0.374 -0.524 2.173 0.297 1.349 -0.780 0.000 0.720 0.884 0.592 2.548 0.397 -0.545 0.546 0.000 0.683 0.632 0.983 0.915 0.902 0.973 0.775 +1 1.554 0.932 -0.865 1.540 -1.449 1.958 1.826 0.528 0.000 2.703 -0.172 -1.515 1.107 2.068 0.939 0.287 0.000 0.780 -0.152 -0.372 3.102 1.703 1.640 1.074 1.479 1.124 2.453 1.979 +1 1.881 0.567 -0.339 0.602 0.492 1.177 1.298 -1.643 2.173 0.762 1.127 0.313 0.000 0.436 0.680 -1.477 0.000 1.389 0.582 1.146 3.102 0.902 1.081 1.004 0.909 0.900 1.009 0.839 +1 1.013 0.085 1.562 0.378 -1.614 0.794 0.421 0.840 2.173 1.266 0.298 -0.331 0.000 0.889 -1.103 -1.588 2.548 0.719 -0.501 0.625 0.000 1.071 1.177 0.980 0.804 1.281 1.010 0.953 +1 0.845 0.052 1.722 0.596 0.105 0.774 -0.227 -1.466 0.000 1.048 -0.525 0.707 2.215 1.057 0.223 0.073 2.548 1.064 -0.860 -0.858 0.000 0.922 1.086 0.988 0.778 0.754 0.878 0.755 +0 1.440 -0.114 -0.678 0.885 0.995 1.367 1.746 1.486 0.000 1.816 0.385 0.238 2.215 1.337 0.434 -0.242 0.000 1.819 0.802 -1.450 3.102 2.951 1.793 1.561 1.228 1.705 1.561 1.412 +0 0.749 -0.900 0.788 0.755 1.183 1.346 1.337 -0.415 0.000 1.588 -0.639 -1.277 2.215 1.406 -1.171 1.127 2.548 1.621 -0.831 0.438 0.000 3.653 2.929 0.979 0.603 1.409 2.111 1.927 +1 0.282 -2.020 -0.779 1.574 1.269 0.648 0.499 1.567 0.000 0.582 2.487 -0.403 0.000 0.607 -0.959 0.530 0.000 1.515 0.109 -0.495 0.000 0.795 0.767 0.982 1.892 0.677 1.745 1.319 +0 1.108 1.331 0.422 0.520 -1.624 0.971 0.792 -0.401 2.173 0.829 0.435 1.535 1.107 0.531 -0.780 -1.026 0.000 0.758 -0.841 1.273 0.000 0.616 1.179 1.012 0.796 1.321 0.876 0.859 +0 0.952 -0.554 -1.594 0.613 0.427 0.627 -1.082 1.178 0.000 1.048 -0.184 -1.271 0.000 1.337 0.213 0.303 2.548 0.988 -1.199 -0.643 3.102 0.806 1.062 1.026 0.699 1.057 0.808 0.726 +0 1.206 1.413 0.767 0.807 -0.228 0.678 -1.047 -1.740 2.173 0.654 -2.534 -1.244 0.000 0.407 -1.589 0.291 0.000 0.821 0.108 -0.424 1.551 0.853 0.915 1.067 0.765 0.884 1.138 1.531 +1 0.544 -0.966 -1.593 0.794 0.410 0.717 -0.413 -0.900 0.000 0.616 -1.050 -0.369 2.215 1.436 -0.612 1.264 2.548 0.485 1.061 0.494 0.000 1.210 0.907 0.991 0.695 1.016 0.823 0.709 +0 0.363 -0.747 1.667 2.057 0.504 0.800 1.871 -1.625 0.000 0.662 -2.131 -0.480 0.000 0.675 0.420 -0.526 0.000 1.585 -1.073 -1.154 3.102 0.802 0.894 1.038 1.001 1.209 1.101 1.003 +0 1.639 -0.467 1.004 0.571 1.324 1.114 -1.460 -0.219 0.000 1.803 1.201 -1.306 2.215 0.882 1.389 -0.710 0.000 1.657 0.451 0.892 3.102 0.994 1.044 0.987 0.931 1.534 1.493 1.237 +1 0.613 0.155 1.293 0.884 -1.449 0.899 1.719 0.660 0.000 1.131 0.161 -0.856 2.215 0.884 1.479 -0.989 2.548 0.589 1.568 -0.327 0.000 0.946 0.984 0.981 1.216 0.856 0.909 0.843 +1 0.633 0.586 -0.472 1.327 -1.028 0.627 1.325 1.302 0.000 0.702 0.635 -0.245 2.215 0.470 2.160 -1.545 0.000 0.862 1.523 0.281 0.000 0.936 0.933 0.984 0.721 0.593 0.619 0.799 +1 2.111 0.434 1.391 0.402 0.331 0.708 0.673 0.910 2.173 0.767 1.198 -1.023 0.000 1.143 -1.323 -0.468 2.548 1.346 -0.209 -0.556 0.000 0.752 0.986 1.041 0.644 1.786 1.124 0.964 +1 0.385 -0.358 1.250 1.778 -1.309 1.193 -0.567 -0.392 2.173 0.951 0.110 0.513 2.215 0.461 -1.429 1.370 0.000 0.382 -1.831 -1.391 0.000 0.318 0.961 0.983 1.211 1.266 1.060 0.857 +1 0.666 -1.386 -0.955 1.362 -0.234 1.263 1.129 1.526 0.000 0.845 -0.932 1.108 1.107 2.164 -0.741 -0.076 2.548 0.555 -0.862 -0.630 0.000 0.768 1.003 0.996 0.967 1.263 0.821 0.752 +0 0.725 0.784 1.106 1.408 0.360 0.685 0.274 1.622 2.173 0.689 0.670 -0.063 2.215 0.405 0.649 -1.362 0.000 1.231 -0.537 -1.199 0.000 0.582 0.800 0.987 0.887 1.032 0.713 0.701 +1 2.101 -0.007 -1.083 1.104 -0.630 0.621 -0.279 1.610 0.000 1.390 -0.933 0.582 2.215 0.813 -2.067 -0.753 0.000 1.541 0.436 0.069 3.102 0.943 1.057 0.991 1.030 1.224 1.157 1.093 +0 0.600 -2.395 -1.504 0.886 -0.305 0.662 -0.205 0.920 2.173 0.571 -0.840 -1.041 0.000 0.681 -0.337 1.335 2.548 0.928 -1.002 0.024 0.000 0.793 0.925 0.985 0.850 0.312 0.812 0.712 +0 0.719 -1.899 -0.204 1.242 0.404 0.810 -0.340 -1.164 2.173 0.643 -0.457 -0.700 2.215 0.679 -1.522 1.304 0.000 0.379 1.506 0.617 0.000 1.579 1.097 0.991 1.175 0.436 0.807 0.868 +1 0.628 0.172 0.227 2.754 -0.474 0.823 0.935 1.016 2.173 1.062 -0.269 1.596 1.107 0.914 -0.262 -1.233 0.000 0.469 -0.006 0.939 0.000 0.676 0.937 1.076 1.368 1.126 1.196 0.943 +1 0.483 1.215 -1.188 0.843 1.565 0.622 0.565 -1.394 0.000 1.203 0.517 1.515 0.000 1.271 0.390 -0.194 2.548 2.168 -0.426 0.515 3.102 0.912 1.239 0.980 2.053 0.977 1.430 1.270 +0 1.640 -2.185 1.590 0.717 1.696 0.664 -0.320 -0.715 1.087 0.743 1.023 0.377 2.215 0.725 -2.235 -0.107 0.000 0.625 0.184 0.284 0.000 1.342 1.138 1.005 2.166 1.153 1.522 1.305 +1 1.563 -1.177 -1.545 1.261 -1.092 1.345 -1.148 0.644 2.173 0.596 -0.555 -1.537 0.000 1.068 -0.561 -0.214 2.548 0.608 -1.996 -0.044 0.000 1.121 0.897 0.980 1.582 1.121 1.131 0.950 +1 0.756 0.605 -1.013 1.022 1.101 0.725 0.638 -0.511 0.000 0.687 -0.257 0.249 2.215 0.794 -1.046 1.235 0.000 1.756 -0.175 1.718 1.551 1.951 1.219 1.151 0.784 0.961 0.899 0.826 +0 2.354 0.365 -0.899 0.603 -0.924 1.411 -0.270 1.065 2.173 0.528 0.354 0.823 0.000 0.438 1.329 -0.296 0.000 0.393 1.939 0.762 0.000 0.755 1.066 0.979 0.698 0.641 1.080 0.912 +1 0.844 1.553 0.847 0.809 1.698 0.798 1.020 -1.444 0.000 1.025 0.514 -0.277 2.215 1.220 1.293 0.055 0.000 0.695 0.799 1.479 3.102 0.893 1.039 0.992 0.486 0.780 0.668 0.667 +1 1.098 -1.293 0.346 1.101 -1.129 0.931 0.762 1.685 2.173 1.323 0.046 0.531 0.000 1.739 -1.029 -0.476 0.000 0.961 -0.940 1.610 3.102 0.838 0.604 1.479 1.808 1.102 1.175 0.995 +1 0.406 -0.710 0.939 0.446 -1.314 0.370 -0.512 0.680 0.000 0.565 0.470 1.268 2.215 0.692 -0.124 -0.770 2.548 0.434 -1.402 -0.375 0.000 0.626 0.692 0.981 0.622 0.674 0.526 0.539 +1 0.403 0.775 -0.469 0.559 0.224 0.943 1.609 -0.698 0.000 1.022 -1.756 1.265 0.000 1.376 1.272 0.771 1.274 1.185 1.079 -1.248 0.000 0.813 0.980 0.982 1.189 0.848 0.881 0.982 +1 0.733 1.283 0.610 1.413 0.944 1.168 -0.014 -0.626 2.173 0.543 0.726 -1.698 2.215 0.738 -0.035 -1.306 0.000 0.514 0.947 1.062 0.000 0.713 0.893 0.978 0.929 1.066 1.422 1.107 +0 0.399 0.973 -0.820 1.063 1.682 0.440 1.814 -0.866 0.000 0.933 0.623 0.785 2.215 1.257 0.852 -0.297 2.548 0.509 2.081 1.221 0.000 0.731 0.994 0.979 0.971 0.968 0.869 0.747 +0 0.409 1.925 -0.728 1.518 1.453 0.758 1.030 0.561 2.173 0.681 1.091 -0.676 2.215 0.607 0.024 -0.872 0.000 0.915 1.150 -1.654 0.000 0.946 0.873 1.008 0.865 0.950 0.805 0.736 +1 1.042 -0.999 0.802 0.743 -1.088 0.578 -1.182 -0.591 0.000 1.167 0.105 0.742 1.107 0.695 0.857 -0.882 2.548 0.666 -0.870 -1.154 0.000 0.657 0.948 1.209 1.015 1.037 0.897 0.832 +1 0.389 2.026 -0.986 1.851 -0.766 0.716 -0.741 0.840 2.173 0.457 0.363 -0.794 0.000 0.730 0.275 0.635 0.000 1.330 0.409 1.642 3.102 0.851 0.882 0.980 0.863 0.964 1.054 0.849 +0 0.876 0.135 0.655 1.153 0.480 0.572 -0.981 -1.483 2.173 0.895 -1.483 -0.575 2.215 0.497 0.533 -1.201 0.000 0.533 -0.874 1.483 0.000 0.639 0.869 0.985 0.990 0.821 0.863 0.740 +0 0.400 0.378 1.059 1.725 0.163 0.341 1.893 -1.604 0.000 0.560 0.825 0.586 0.000 1.541 0.615 -1.023 2.548 0.511 0.264 -1.296 0.000 0.998 0.998 0.984 0.587 0.734 0.785 0.828 +1 0.876 0.822 0.658 1.272 0.414 1.271 -0.435 -1.038 2.173 0.473 -1.231 -1.509 0.000 1.247 0.218 0.603 0.000 0.760 -1.202 1.395 3.102 1.478 0.959 0.989 2.221 1.006 1.617 1.383 +1 1.030 -1.509 0.131 0.715 1.329 0.555 -0.837 0.182 2.173 1.358 -1.405 0.838 1.107 2.240 -1.104 -1.520 0.000 1.743 0.347 -0.059 0.000 0.705 1.170 1.048 0.712 0.811 1.007 0.864 +1 1.240 0.483 -1.655 0.731 -0.635 1.010 0.222 -1.017 2.173 1.083 -0.622 0.519 0.000 1.235 1.397 0.360 0.000 0.730 1.749 1.485 0.000 0.938 0.828 1.049 0.709 0.804 0.936 0.821 +0 2.324 0.426 -1.190 0.297 1.053 0.884 0.179 0.373 2.173 0.686 -0.244 0.770 0.000 1.030 1.233 1.507 2.548 1.412 -1.085 0.139 0.000 0.850 0.927 1.036 0.858 1.254 0.986 0.980 +1 0.420 -0.987 0.130 1.341 1.544 1.233 -2.941 1.598 0.000 1.932 -0.011 -0.268 2.215 0.672 0.217 0.066 0.000 1.342 -0.323 0.647 0.000 0.621 0.878 0.994 0.866 0.663 0.948 0.806 +1 0.371 -2.272 -1.504 0.777 -1.741 0.698 -0.976 0.608 0.000 0.990 -0.458 -0.313 1.107 1.638 -0.399 -1.108 2.548 1.031 0.310 0.558 0.000 0.951 0.977 0.995 0.757 0.889 0.905 0.835 +1 0.620 1.262 -1.627 0.367 -0.538 1.096 0.846 -0.300 0.000 1.155 0.925 1.229 2.215 0.691 0.993 0.308 0.000 0.551 -0.777 1.598 3.102 0.837 1.046 0.993 0.827 0.819 0.923 0.794 +1 1.433 -0.277 1.504 0.976 -1.332 1.215 0.453 0.057 0.000 0.660 -0.281 -1.194 2.215 0.596 0.505 0.505 0.000 1.297 0.480 -0.795 0.000 0.893 0.751 0.986 0.706 0.411 0.511 0.601 +0 1.116 0.106 -0.597 3.477 -0.913 1.676 -0.304 0.201 0.000 1.384 1.358 1.636 2.215 1.354 2.386 1.245 0.000 1.700 1.438 1.177 0.000 1.001 1.024 0.991 2.162 1.409 1.567 1.382 +0 0.569 -0.610 0.127 0.645 -0.711 0.636 0.541 -1.697 0.000 1.456 -0.411 0.375 0.000 1.508 -0.865 -1.436 2.548 1.333 -1.163 0.562 0.000 0.889 1.204 0.999 0.528 0.725 0.749 0.663 +0 1.394 -1.095 -0.022 0.527 -1.116 0.690 0.136 1.641 0.000 1.014 -0.914 1.608 0.000 1.970 -0.147 0.980 2.548 1.187 2.280 -0.394 0.000 1.006 1.047 0.989 0.773 1.464 1.018 0.986 +0 0.760 -0.377 -1.609 0.829 -0.808 0.623 -0.158 -0.575 1.087 0.864 0.840 0.947 2.215 0.558 0.767 0.417 0.000 0.372 -0.415 1.119 0.000 0.469 0.648 0.992 1.274 1.205 0.943 0.755 +1 0.998 1.339 -0.989 0.611 -1.345 0.785 0.579 1.094 2.173 1.375 0.751 -0.209 0.000 1.600 0.467 -1.644 1.274 1.250 1.165 0.622 0.000 1.284 1.257 1.003 1.129 0.871 1.028 1.065 +0 0.519 -0.939 1.271 0.238 1.268 0.796 0.634 -0.462 0.000 1.300 0.011 -1.707 2.215 2.143 -0.264 0.047 2.548 1.127 1.106 -1.440 0.000 0.827 0.902 0.989 0.988 1.794 1.030 0.853 +1 0.706 -0.760 1.114 0.423 0.203 0.883 2.281 -0.338 0.000 0.716 0.480 1.445 2.215 0.741 -0.926 -1.068 2.548 0.708 -1.320 -0.026 0.000 0.737 1.231 0.985 0.705 0.876 0.792 0.734 +1 0.937 0.241 1.678 0.614 0.919 0.885 0.729 0.012 0.000 1.196 -0.603 1.566 2.215 1.207 0.044 -0.309 0.000 0.623 -2.277 -1.378 0.000 0.841 0.849 0.987 0.634 0.944 1.037 0.907 +0 0.767 0.028 0.703 1.457 -1.151 0.728 0.738 -1.577 0.000 1.225 -0.532 0.499 2.215 1.016 2.083 0.332 0.000 0.970 1.044 -0.184 0.000 0.769 1.658 1.457 1.166 0.920 1.361 1.191 +0 0.953 -1.325 -0.761 1.863 -1.563 0.403 -0.268 -0.126 2.173 1.137 0.855 0.660 2.215 0.758 -0.751 0.988 0.000 0.639 0.892 -0.336 0.000 0.947 0.865 1.219 2.095 0.887 1.361 1.159 +1 1.419 -2.151 1.506 0.740 0.455 1.292 -0.850 -0.793 2.173 0.523 -1.185 -0.164 1.107 0.827 0.344 1.340 0.000 0.671 -0.141 0.827 0.000 1.195 0.963 1.152 1.604 0.686 1.054 1.061 +1 1.416 0.413 1.361 1.339 -0.090 1.308 0.945 -0.590 0.000 0.452 1.165 1.620 2.215 1.103 2.017 1.284 0.000 0.814 -0.825 0.577 3.102 2.592 1.498 1.842 1.053 0.851 1.240 1.154 +0 1.261 -1.439 -0.828 0.310 0.515 0.664 0.752 0.062 0.000 0.534 -0.255 0.682 0.000 0.908 -0.919 1.051 2.548 1.463 -0.000 -1.079 3.102 0.900 0.876 0.988 0.771 0.949 0.685 0.643 +0 0.609 -0.347 -0.301 0.453 1.602 0.660 -1.038 0.933 0.000 1.056 -0.105 1.166 2.215 0.880 -0.307 -0.613 0.000 1.337 1.392 -0.881 0.000 0.691 0.750 0.986 0.786 0.763 0.579 0.611 +0 1.342 -0.382 0.018 0.121 0.349 1.148 -0.237 -1.104 2.173 1.576 -0.448 0.609 2.215 0.324 -1.028 1.099 0.000 0.977 -0.009 -1.666 0.000 0.524 0.821 0.986 1.091 1.991 1.073 0.853 +0 0.597 1.864 0.287 0.639 0.851 0.625 2.111 1.015 0.000 1.152 -0.277 -0.401 2.215 0.991 -0.655 -1.276 1.274 0.489 -0.562 1.129 0.000 1.616 1.709 0.980 1.066 0.843 1.310 1.076 +0 1.663 -1.109 0.455 1.582 -0.158 1.294 -1.463 -1.708 0.000 1.191 -0.720 -0.447 2.215 1.000 -0.989 1.089 2.548 1.481 -1.864 -1.466 0.000 0.945 0.992 1.177 0.924 1.157 1.092 1.146 +1 1.787 -1.516 -1.071 0.775 0.157 0.627 0.184 0.296 2.173 0.782 -0.841 -1.153 0.000 1.017 -1.065 0.828 0.000 1.138 -0.429 1.496 3.102 1.352 0.872 1.459 1.412 0.851 0.999 0.898 +0 0.516 -0.215 0.373 0.976 -1.309 0.891 0.835 0.635 0.000 0.473 -1.489 -1.224 2.215 0.522 0.093 -0.953 2.548 0.496 1.658 1.498 0.000 0.940 0.866 0.988 0.704 0.498 0.944 0.798 +0 0.375 1.376 -1.710 1.723 0.649 0.538 2.044 -0.581 0.000 0.772 -1.063 0.964 0.000 1.486 0.839 -1.290 2.548 1.611 0.623 -0.559 3.102 0.839 0.876 0.989 0.918 0.732 0.814 0.751 +1 0.331 -0.424 1.182 0.817 0.365 1.313 0.890 -1.076 0.000 0.631 1.284 0.301 2.215 0.480 2.490 0.921 0.000 1.161 1.001 1.532 3.102 2.047 1.267 0.992 0.565 0.694 0.899 0.807 +0 0.401 -0.206 0.531 1.813 1.584 0.597 -0.818 0.016 0.000 0.609 -1.123 0.819 0.000 0.780 0.267 -0.577 2.548 0.441 0.393 -1.465 3.102 0.874 0.898 0.987 0.554 0.324 0.596 0.663 +0 1.962 -0.173 -1.116 0.133 1.145 1.027 -1.829 0.465 0.000 1.834 0.530 -0.927 2.215 1.700 0.814 0.958 0.000 0.818 1.670 0.500 0.000 0.923 0.944 0.990 0.943 1.286 1.124 1.087 +1 1.282 0.212 -0.174 1.081 -0.729 0.741 -1.618 0.297 0.000 1.173 -0.451 -1.462 2.215 1.139 -1.160 1.159 0.000 0.642 1.236 1.113 0.000 1.196 1.018 0.991 1.194 0.461 0.915 1.133 +1 0.942 -1.139 -1.001 0.896 1.549 1.742 -0.839 -1.253 1.087 2.248 2.024 0.509 0.000 1.161 0.015 0.350 2.548 1.983 0.085 -0.391 0.000 1.828 2.187 0.988 0.774 1.926 3.371 2.752 +1 1.201 1.506 -0.147 0.870 -0.726 1.266 0.539 0.853 2.173 0.425 1.782 -1.006 0.000 0.488 1.972 -1.465 0.000 0.510 -0.478 -0.668 1.551 0.304 0.712 0.990 1.317 0.972 0.907 0.834 +1 2.373 -1.468 0.390 0.986 -0.130 0.628 -0.702 -1.114 2.173 0.536 -1.777 -1.105 0.000 0.906 2.009 -1.450 0.000 1.390 -1.222 1.338 1.551 3.963 2.391 0.991 0.971 0.882 1.678 1.837 +0 0.907 0.421 -0.059 0.777 -0.550 0.477 -0.041 -1.590 0.000 1.100 -0.403 1.117 2.215 1.054 0.021 -0.800 2.548 0.572 -1.149 0.721 0.000 0.902 0.793 0.992 1.330 1.158 0.967 0.871 +0 0.340 0.441 0.190 1.619 1.262 0.778 0.773 -0.641 2.173 0.506 1.526 0.255 0.000 0.749 0.399 -1.071 2.548 0.665 -0.271 0.565 0.000 0.854 0.887 0.987 0.807 0.395 0.824 0.766 +0 3.825 -0.254 -1.436 0.872 -1.264 2.110 0.581 0.278 0.000 0.603 -0.123 -0.744 0.000 1.207 0.283 0.716 2.548 0.728 -0.922 1.405 3.102 2.099 1.333 0.999 0.828 0.691 1.015 1.385 +1 1.337 0.712 1.628 1.613 1.731 2.029 0.363 1.741 2.173 1.664 0.890 -0.041 0.000 0.889 1.076 0.366 0.000 1.417 -0.656 0.394 0.000 0.713 0.633 0.986 1.128 1.214 1.365 1.239 +1 0.303 0.242 0.273 0.470 0.527 1.034 -1.528 -0.907 2.173 1.104 -0.889 0.824 2.215 1.218 -0.962 -0.470 0.000 1.225 -0.876 1.638 0.000 1.056 1.072 0.975 2.876 1.650 2.057 1.742 +0 1.123 1.850 -1.063 2.117 -1.537 1.130 0.740 0.143 0.000 1.637 0.496 0.533 2.215 1.498 1.004 -1.674 2.548 0.708 1.327 -0.494 0.000 0.951 0.963 0.982 0.934 1.601 1.484 1.360 +1 1.021 -0.538 -1.726 0.297 -0.440 1.078 -0.232 -0.088 2.173 0.779 -0.924 1.293 0.000 1.341 0.421 0.535 2.548 0.829 -1.372 -1.381 0.000 0.887 1.291 0.991 1.084 0.958 1.032 0.915 +0 1.322 -1.823 0.492 1.347 0.637 0.820 0.157 -1.262 0.000 0.941 -0.833 -0.869 1.107 0.797 -0.740 1.566 2.548 0.395 -1.166 0.341 0.000 1.142 0.880 0.971 1.119 0.747 1.098 1.230 +1 0.561 -1.440 1.539 0.513 0.208 0.565 -0.538 1.297 2.173 0.855 -0.992 -1.166 0.000 0.819 -1.528 -0.535 0.000 1.239 0.402 0.314 3.102 0.814 0.999 0.985 0.720 0.834 0.875 0.740 +0 2.036 0.714 1.101 0.234 1.061 0.592 1.254 0.298 2.173 0.607 0.528 -0.939 0.000 0.998 -2.243 -0.841 0.000 0.863 1.120 -1.196 3.102 0.814 1.058 0.994 0.770 0.738 0.702 0.772 +0 0.726 -0.595 0.928 1.469 0.039 0.593 -1.574 -0.952 2.173 0.451 0.715 1.636 0.000 0.612 -0.967 -1.307 0.000 0.498 1.036 1.198 3.102 0.909 0.996 1.026 0.792 1.237 0.844 0.782 +0 2.145 -0.361 -1.499 0.824 -1.360 1.884 -0.190 0.515 0.000 2.011 0.325 -1.203 1.107 1.666 -0.443 0.138 2.548 1.187 0.219 0.909 0.000 0.951 0.925 0.966 1.135 1.996 1.517 1.453 +1 0.658 -0.108 0.317 0.842 1.307 1.469 0.634 -1.111 0.000 1.151 -0.321 0.772 2.215 1.058 0.169 -0.668 2.548 1.221 2.202 0.707 0.000 3.156 1.966 0.986 0.742 1.171 1.630 1.272 +1 0.638 -1.221 0.768 0.976 -0.904 0.680 0.371 1.017 0.000 1.059 -0.818 0.027 2.215 0.697 0.556 -1.710 0.000 1.631 -0.026 -0.741 3.102 0.794 1.010 1.091 0.798 0.903 0.909 0.876 +0 0.820 0.273 0.965 0.895 -1.306 0.648 0.037 0.413 2.173 1.342 0.009 -0.622 2.215 0.688 1.405 -1.677 0.000 0.601 -0.205 1.344 0.000 0.778 0.902 1.055 0.949 1.103 0.836 0.747 +0 0.765 -0.919 0.449 1.191 1.480 0.614 1.017 -0.021 2.173 1.212 -0.541 -1.182 2.215 0.593 1.935 0.380 0.000 0.580 -0.205 1.698 0.000 1.132 0.850 1.059 0.989 1.565 1.097 1.073 +1 0.579 1.620 0.366 0.592 0.234 2.496 0.569 -1.099 2.173 1.186 1.014 0.792 0.000 1.072 -0.341 1.587 2.548 1.343 0.094 0.691 0.000 0.913 1.210 0.998 1.559 1.678 1.495 1.219 +1 0.910 0.006 -1.343 0.452 0.252 1.069 -0.444 0.140 2.173 0.422 -1.018 0.980 0.000 1.229 0.295 1.592 2.548 0.913 0.277 -0.327 0.000 0.960 0.881 0.985 0.895 1.490 0.838 0.719 +1 1.726 0.627 -1.212 0.734 -0.413 0.714 2.244 1.300 0.000 1.363 0.527 1.688 0.000 2.367 0.220 0.213 2.548 2.095 0.956 -0.105 3.102 0.525 1.312 1.028 1.290 0.932 1.101 1.009 +1 1.212 -0.213 -1.188 1.116 -0.843 1.388 0.195 0.740 2.173 0.801 -0.367 0.256 0.000 0.604 -0.617 -0.871 0.000 1.180 -0.429 1.668 0.000 0.921 1.134 0.989 0.703 0.833 1.000 0.877 +1 0.328 0.149 1.386 1.461 -0.325 1.902 2.817 1.724 0.000 1.248 -0.976 0.133 2.215 1.200 -2.240 1.732 0.000 2.612 0.099 0.740 0.000 1.243 0.991 0.988 1.137 0.559 0.817 0.822 +1 0.651 0.939 1.122 0.484 0.841 1.242 0.783 -0.809 1.087 0.525 0.085 0.948 0.000 0.762 0.231 -1.417 0.000 0.992 -0.422 -0.188 1.551 0.825 1.097 0.973 0.694 1.028 0.880 0.764 +1 0.827 1.549 -0.312 0.925 0.211 1.135 1.107 -1.580 1.087 0.762 1.345 1.067 2.215 0.863 0.700 -0.675 0.000 0.772 1.515 0.146 0.000 0.783 1.058 0.984 0.902 0.956 1.020 0.871 +0 1.104 0.022 0.911 1.388 0.149 0.836 -0.806 -1.384 2.173 0.492 -1.350 1.309 0.000 0.990 -0.977 -0.710 1.274 0.756 -0.409 0.035 0.000 0.813 0.846 1.087 1.030 0.664 0.951 0.795 +1 0.749 -0.727 0.155 1.437 -0.975 1.558 1.341 0.960 2.173 2.399 -0.392 -1.013 2.215 0.659 -0.693 1.186 0.000 1.948 -0.374 0.481 0.000 0.767 1.646 1.222 2.354 3.973 2.083 1.669 +1 1.918 0.720 -1.026 0.225 0.388 0.971 0.652 1.214 2.173 0.503 2.050 0.304 0.000 0.765 0.084 -0.634 0.000 0.797 -0.753 1.272 0.000 0.963 1.076 0.988 0.584 0.767 0.743 0.710 +1 0.402 -0.271 -1.086 1.673 0.093 0.403 -1.100 -0.778 0.000 1.756 -0.977 1.709 2.215 0.634 -1.356 -0.360 0.000 0.979 -1.149 0.373 0.000 0.836 1.124 0.992 0.636 0.889 0.915 0.803 +1 0.873 -0.488 0.996 0.878 -0.448 0.781 0.495 -1.240 2.173 0.646 0.549 0.945 2.215 0.731 -0.471 -1.050 0.000 0.684 0.896 0.315 0.000 1.008 0.832 1.169 0.808 0.964 0.785 0.697 +1 2.351 0.072 -0.404 0.872 -1.037 0.885 -0.053 1.106 2.173 0.380 -0.343 1.510 0.000 0.276 -0.332 0.810 0.000 0.966 0.298 -1.298 3.102 0.907 0.854 1.071 0.711 0.835 0.918 0.785 +1 0.730 -1.946 -0.031 1.203 0.459 1.136 -1.154 -1.233 1.087 0.932 -0.757 1.417 2.215 0.510 -1.463 -0.653 0.000 0.815 0.286 0.465 0.000 1.007 0.904 0.994 1.270 1.076 0.977 0.854 +1 1.047 -0.894 0.676 0.477 -1.095 3.077 -0.057 -0.140 0.000 1.403 -0.097 1.687 2.215 1.809 -0.634 1.307 1.274 3.955 -0.817 -1.612 0.000 5.763 3.590 0.988 0.920 0.765 2.288 1.689 +0 0.539 0.494 0.228 1.143 0.576 0.702 -0.619 1.173 0.000 1.029 -0.491 -0.597 2.215 1.152 -0.230 -1.710 0.000 0.503 0.857 -0.455 3.102 0.888 0.875 0.984 1.533 0.555 0.943 1.074 +0 0.509 0.955 -1.231 1.442 -1.368 0.761 -1.545 -0.515 0.000 1.345 0.987 0.221 2.215 1.009 -0.867 1.111 2.548 1.017 0.758 -1.725 0.000 0.607 0.907 1.001 2.139 1.681 1.600 1.201 +1 1.692 1.393 1.065 1.550 1.099 0.903 1.055 -1.490 0.000 1.847 0.614 -0.425 1.107 0.875 1.224 -0.635 0.000 0.652 0.924 -0.375 1.551 1.063 0.974 1.010 0.875 0.243 1.289 1.049 +1 0.736 1.938 -0.331 2.826 1.072 0.658 0.546 -1.685 2.173 0.661 0.373 0.645 1.107 0.647 1.091 1.271 0.000 0.908 0.772 -1.184 0.000 0.687 0.695 1.906 1.523 0.841 1.130 0.895 +1 0.533 -0.921 -1.009 1.196 -0.277 1.428 0.527 1.627 0.000 1.258 1.469 0.130 2.215 0.663 0.245 -0.275 0.000 0.766 0.855 1.069 3.102 0.565 0.565 0.980 2.824 0.698 1.823 1.621 +1 0.735 1.419 0.312 1.027 1.470 0.743 -0.255 -0.223 2.173 0.290 0.433 0.738 0.000 1.229 -0.652 -1.203 1.274 0.679 1.373 1.711 0.000 0.577 0.967 1.041 1.249 0.961 1.115 0.886 +0 0.533 -0.863 0.926 0.389 0.438 0.798 1.531 0.869 0.000 1.332 0.625 0.243 1.107 1.187 -1.006 -1.719 0.000 0.826 2.049 -1.244 0.000 1.188 0.819 0.981 0.749 1.778 1.232 1.034 +0 0.526 -1.174 0.986 0.561 -0.930 1.164 -0.578 0.580 2.173 0.979 -0.733 -0.885 0.000 1.583 -0.545 1.445 2.548 1.320 1.968 -0.756 0.000 0.847 0.940 0.985 0.880 1.188 0.944 0.789 +1 0.590 0.909 0.724 0.576 1.324 0.940 0.248 -0.967 0.000 0.637 0.377 -0.319 0.000 1.169 -1.108 1.385 1.274 1.295 0.104 0.419 3.102 0.914 0.971 0.990 1.981 0.989 1.312 1.205 +0 0.410 -0.529 1.583 1.783 -0.911 1.045 -1.480 1.028 0.000 0.791 0.540 -0.152 2.215 0.442 -1.377 0.365 0.000 0.798 -0.249 -0.662 3.102 0.902 0.851 0.990 0.512 0.448 0.549 0.595 +1 0.838 -0.410 -0.720 0.318 0.464 0.974 0.206 -1.291 2.173 0.787 0.175 0.959 0.000 0.865 -0.338 -0.014 0.000 2.099 -0.825 -1.323 3.102 0.843 1.060 0.993 0.941 0.963 0.886 0.772 +1 0.791 0.565 1.542 0.707 -0.519 0.699 -0.718 -0.124 2.173 0.420 0.133 1.147 0.000 0.479 1.746 0.940 0.000 0.923 -0.867 -1.495 3.102 0.708 0.946 0.994 0.945 0.814 0.845 0.750 +1 1.049 -0.695 0.610 0.243 0.832 0.818 -0.633 -0.020 0.000 1.509 -0.192 1.494 2.215 1.155 -0.770 -0.576 2.548 0.747 -1.146 -1.263 0.000 1.165 0.775 0.981 0.903 1.420 0.969 0.862 +1 0.763 0.362 -1.422 0.640 -0.624 0.617 1.341 -0.687 0.000 0.790 -0.067 0.312 2.215 0.610 1.799 -1.558 0.000 1.890 0.636 1.053 3.102 0.854 1.090 0.986 0.937 0.830 0.899 0.791 +1 1.008 0.747 -1.499 0.785 -0.734 1.165 0.099 -1.131 0.000 0.895 -0.999 1.026 2.215 2.449 -0.020 0.307 1.274 0.490 0.779 1.551 0.000 0.911 1.354 0.985 1.473 1.259 1.297 1.171 +0 1.662 0.115 -1.137 0.771 0.625 0.649 0.406 -0.201 2.173 0.324 1.861 0.335 0.000 0.286 0.475 -1.685 0.000 0.370 -0.403 -0.184 3.102 0.918 1.172 1.568 0.750 0.240 0.666 0.726 +1 1.694 1.163 -1.231 0.333 0.621 0.753 -0.003 1.250 2.173 0.576 -0.819 0.187 1.107 0.739 0.308 0.230 0.000 1.171 1.063 -0.470 0.000 0.863 1.036 1.035 1.178 0.897 0.947 0.838 +0 0.780 -1.154 1.627 1.226 -1.422 1.497 0.620 -0.024 0.000 2.840 -0.332 -1.419 0.000 1.854 -2.246 0.138 0.000 2.026 0.170 -0.520 3.102 0.659 1.268 0.984 1.654 0.768 1.295 1.093 +1 0.773 0.590 -1.211 1.406 -1.271 0.889 -0.142 0.337 2.173 0.707 0.155 -0.037 0.000 1.335 -0.311 1.074 2.548 0.998 1.980 -0.918 0.000 0.718 1.566 1.004 1.642 0.847 1.304 1.198 +1 1.119 1.135 -1.456 0.793 0.680 1.037 1.132 0.807 2.173 1.384 0.132 -1.246 0.000 1.509 -1.383 -0.417 0.000 1.989 -0.629 0.152 0.000 0.903 0.793 1.224 0.920 0.769 1.513 1.394 +1 0.850 -1.271 -0.412 1.566 1.470 1.066 -0.332 1.195 2.173 1.035 -1.270 0.028 2.215 0.808 0.342 -1.616 0.000 0.771 -0.593 -1.127 0.000 0.620 0.868 1.586 1.108 1.553 1.053 0.933 +0 0.951 2.276 -1.208 0.861 1.387 0.897 0.935 0.668 2.173 0.342 1.734 -0.950 0.000 0.671 1.894 0.453 0.000 1.501 0.553 -0.676 3.102 0.709 0.831 0.989 0.965 1.162 0.935 0.766 +0 0.909 -0.732 -0.083 0.873 -0.649 0.879 -0.344 0.632 1.087 1.072 0.226 1.493 2.215 0.470 0.747 1.069 0.000 0.509 -0.832 -1.348 0.000 0.715 0.745 0.989 1.383 1.084 1.077 0.863 +0 0.479 1.464 1.540 1.131 -0.830 0.959 0.843 1.522 0.000 1.843 0.698 -0.197 2.215 0.576 1.278 0.383 2.548 0.973 -1.281 -1.353 0.000 0.873 0.820 0.984 0.966 0.667 1.039 0.897 +1 0.538 -1.586 1.063 1.116 -1.422 0.903 -0.663 -0.859 0.000 1.074 -0.494 0.364 2.215 0.753 1.052 0.797 2.548 0.703 -0.778 -1.647 0.000 0.809 1.138 0.987 1.928 0.961 1.388 1.180 +0 0.350 -2.067 -1.083 1.232 -0.030 1.314 -1.491 0.887 0.000 1.372 -1.007 -1.054 2.215 0.863 -1.292 1.292 0.000 0.769 0.369 -0.207 0.000 0.977 0.955 0.982 0.601 0.350 0.586 0.617 +1 1.138 -0.202 -0.785 1.315 0.050 1.101 -0.226 1.536 2.173 1.914 -0.582 0.878 0.000 0.887 1.496 -0.246 0.000 1.173 -1.106 -1.511 0.000 0.795 0.636 1.159 1.319 0.846 0.917 0.891 +0 0.324 2.143 -0.796 0.683 -0.168 1.085 -0.081 -1.115 2.173 1.456 2.211 0.624 0.000 0.665 -1.157 -1.230 2.548 0.640 -0.125 0.535 0.000 1.932 2.456 0.990 0.951 0.676 1.853 1.400 +0 0.421 2.067 0.697 0.876 -0.027 1.117 0.587 1.136 1.087 0.387 -0.810 0.818 0.000 1.818 0.968 -0.580 2.548 1.174 -0.739 -1.115 0.000 0.865 1.249 0.989 0.810 1.824 1.192 1.012 +1 0.488 -1.026 -0.184 0.456 -0.058 0.418 -1.253 1.741 0.000 1.109 -0.360 -1.112 0.000 0.805 0.381 -0.117 2.548 1.390 0.827 0.925 3.102 1.026 1.061 0.991 0.786 0.694 0.933 0.809 +1 0.732 -0.578 -1.659 0.856 1.170 0.735 -0.973 1.361 2.173 1.202 -0.551 -1.422 2.215 0.794 -1.781 0.892 0.000 1.762 -0.487 -0.648 0.000 0.992 1.314 0.982 0.586 0.868 1.050 0.983 +0 2.001 -0.593 -1.208 0.748 1.522 1.968 0.630 0.353 2.173 1.474 0.246 -1.538 0.000 0.340 -0.084 -0.350 1.274 0.543 0.960 -0.282 0.000 1.186 0.764 1.068 2.203 0.714 1.334 1.195 +1 0.367 1.523 -0.965 0.962 1.078 0.707 0.125 0.092 1.087 1.132 -0.594 -1.414 2.215 0.482 -0.168 -0.427 0.000 0.427 2.002 0.453 0.000 0.917 0.779 0.989 2.239 1.379 1.631 1.243 +0 1.329 -0.572 -0.110 1.954 -0.926 0.567 -0.343 -1.183 2.173 1.499 -0.579 0.667 0.000 1.119 -0.940 1.228 1.274 0.883 -1.325 -1.349 0.000 1.648 1.043 1.497 0.877 0.888 0.870 0.958 +0 2.287 -0.233 1.342 1.056 1.651 1.079 0.379 -0.110 2.173 0.500 1.028 -1.092 0.000 0.787 -2.528 -0.340 0.000 0.602 1.287 -0.262 0.000 0.517 0.789 0.980 0.810 0.894 1.123 0.982 +1 2.962 -1.207 -1.704 1.348 1.470 2.632 -1.671 -0.163 0.000 1.405 -0.001 1.717 0.000 1.319 -1.450 0.297 2.548 1.226 -0.513 1.087 0.000 0.917 0.999 0.983 0.660 0.863 0.931 0.934 +0 0.551 0.114 -0.638 1.996 -1.352 0.861 -0.652 0.740 0.000 0.963 -0.073 -1.730 2.215 0.990 0.712 0.101 2.548 0.382 -0.676 0.319 0.000 1.146 1.210 0.988 0.949 1.131 0.982 1.058 +1 0.830 0.826 -1.064 0.351 1.371 0.776 -0.061 1.358 0.000 0.912 0.323 -0.484 1.107 0.809 -1.005 -0.056 2.548 0.750 0.923 0.937 0.000 0.842 1.086 0.979 0.866 0.791 0.923 0.903 +0 0.848 0.112 1.029 0.261 1.168 0.500 0.490 -0.244 2.173 0.804 0.823 1.178 2.215 0.828 1.121 -0.997 0.000 1.166 -0.321 -0.678 0.000 1.022 1.058 0.979 0.773 0.909 0.720 0.693 +0 0.787 0.788 -0.347 0.764 -1.488 0.506 0.862 1.464 0.000 1.049 -0.598 -0.272 2.215 1.308 -0.200 1.003 1.274 0.634 -2.240 1.319 0.000 2.495 1.532 0.987 0.874 1.163 1.151 1.023 +1 0.468 0.835 -0.064 1.246 1.375 0.824 -0.719 0.777 0.000 1.299 0.197 -0.731 1.107 0.809 -0.195 1.287 0.000 0.643 -1.581 -0.635 0.000 0.753 0.763 1.019 1.044 0.624 0.882 0.871 +0 0.652 1.362 -0.107 0.962 -1.489 0.664 2.038 0.940 0.000 0.853 -0.236 -0.175 2.215 0.830 1.406 1.672 0.000 0.867 0.642 -0.614 3.102 0.888 0.920 1.038 1.038 0.508 0.992 0.845 +1 0.377 -0.142 -1.187 0.519 1.030 2.284 -0.639 1.501 0.000 2.306 0.477 0.027 0.000 1.283 0.941 -0.335 1.274 1.911 -0.091 -0.951 3.102 1.719 1.096 0.989 0.761 0.959 0.940 0.900 +1 0.631 -0.270 -0.485 0.570 -1.364 0.547 0.843 0.370 0.000 0.778 -0.615 0.054 1.107 1.181 0.839 -1.607 2.548 1.207 -0.184 -1.725 0.000 1.370 1.038 0.979 0.855 1.341 0.879 0.766 +0 2.025 0.586 0.678 1.328 1.288 1.085 0.705 -0.805 0.000 1.239 0.048 -0.691 0.000 1.248 0.826 -1.244 2.548 2.045 -0.155 0.836 3.102 0.869 0.844 1.188 0.802 1.360 1.085 1.171 +0 0.761 -0.477 0.787 1.127 -0.269 0.702 0.149 -1.023 0.000 1.126 -1.665 0.509 0.000 1.085 0.027 1.723 2.548 2.292 -0.536 -0.590 0.000 1.081 1.065 1.044 0.595 0.173 0.650 0.690 +1 1.375 0.268 0.717 0.745 -1.640 0.667 1.209 0.191 0.000 0.935 -1.058 -1.124 2.215 0.804 -1.126 -1.702 0.000 0.389 -0.269 0.023 3.102 2.436 1.278 1.194 1.179 0.514 1.044 0.940 +1 0.806 -0.145 0.914 1.158 0.931 0.851 0.996 -1.104 2.173 0.212 2.477 0.194 0.000 0.915 -0.323 -1.071 2.548 0.784 0.730 0.727 0.000 0.556 0.951 0.974 1.744 0.808 1.166 1.073 +1 0.593 -0.043 0.958 0.716 -1.048 0.737 0.419 0.647 2.173 0.756 0.184 -0.305 2.215 0.500 0.955 1.285 0.000 1.209 -1.325 -0.274 0.000 0.660 0.745 0.985 0.738 0.841 0.697 0.681 +0 0.456 1.554 -0.409 0.711 -0.555 0.519 0.655 1.717 1.087 0.592 1.271 0.135 2.215 0.727 0.269 0.943 0.000 0.399 -0.683 1.385 0.000 0.418 0.670 0.992 0.790 0.851 0.648 0.588 +0 0.435 -1.375 -0.136 0.842 1.268 2.540 -1.040 0.805 1.087 2.839 -0.367 -0.790 2.215 1.292 -1.039 -1.132 0.000 0.665 1.718 -1.496 0.000 2.457 2.009 0.986 1.424 4.143 2.422 2.040 +1 0.905 -0.636 0.545 0.620 -0.516 1.036 -0.412 -1.629 0.000 0.610 -0.509 0.262 0.000 0.779 -0.770 -0.794 2.548 1.059 0.224 1.110 1.551 1.675 1.071 0.989 0.639 0.798 0.750 0.731 +1 0.575 1.621 -1.491 0.456 -1.726 0.940 0.715 0.258 0.000 0.698 -0.218 0.505 0.000 1.312 0.570 -0.775 2.548 0.791 0.840 -1.698 3.102 0.926 0.946 0.997 0.736 0.594 0.795 0.750 +1 0.772 0.068 1.383 0.832 0.170 1.742 0.156 -1.187 0.000 1.928 0.664 0.685 0.000 1.639 -0.491 -0.493 2.548 0.594 -0.112 1.008 3.102 4.001 2.105 0.988 0.914 0.752 1.497 1.168 +0 1.166 -0.732 -1.148 0.936 -0.215 0.631 0.189 1.666 2.173 0.576 0.082 0.532 2.215 0.860 0.801 -0.269 0.000 0.854 -1.640 1.184 0.000 1.177 0.986 1.079 0.845 0.759 0.756 0.741 +1 1.682 -1.750 1.151 0.794 0.808 1.447 -0.818 -1.009 1.087 0.814 -0.727 -0.111 1.107 0.334 -0.977 1.543 0.000 0.456 1.813 0.652 0.000 1.095 1.062 1.003 1.626 1.158 1.173 1.191 +1 0.474 0.161 1.666 1.038 0.233 0.950 1.217 0.908 0.000 2.694 1.395 -1.047 0.000 1.761 0.200 0.619 1.274 1.024 -0.444 -0.237 3.102 0.900 1.079 0.990 0.585 0.818 0.911 0.836 +0 2.158 -1.337 1.011 0.686 0.790 1.696 -0.384 -0.812 2.173 0.637 -0.240 -1.685 0.000 1.118 -0.459 0.732 2.548 0.720 -0.022 -0.340 0.000 0.831 0.943 1.005 0.621 1.691 1.328 1.052 +0 0.585 -1.624 -0.556 0.599 -0.396 0.946 -0.741 -0.208 1.087 1.217 -1.242 1.172 2.215 0.997 -0.551 -0.615 0.000 1.531 -2.461 0.913 0.000 0.954 1.068 0.982 0.645 1.554 0.907 0.791 +0 1.699 1.935 -0.751 1.956 -0.339 0.763 0.419 1.374 0.000 1.075 0.886 0.890 0.000 1.062 1.254 -1.667 2.548 1.073 1.343 0.253 0.000 0.909 0.955 0.987 1.199 0.623 0.940 0.949 +0 0.366 0.045 0.321 2.054 1.539 0.671 0.693 0.113 0.000 0.637 1.601 -0.376 0.000 1.077 -0.889 -1.329 1.274 1.361 -0.223 -0.499 1.551 0.905 0.949 1.070 0.895 0.713 1.003 0.990 +1 0.365 -0.938 -1.087 1.190 1.318 0.910 -0.350 0.593 2.173 1.437 0.212 -1.348 0.000 1.071 0.114 -0.628 2.548 0.636 -0.184 -0.131 0.000 0.950 0.773 0.983 1.238 1.136 1.093 1.105 +1 0.906 0.234 -0.209 0.645 1.010 1.355 0.816 -0.762 2.173 1.274 1.003 1.294 1.107 0.572 0.666 0.526 0.000 0.715 0.302 1.140 0.000 0.393 1.047 0.987 0.994 1.867 1.084 0.844 +1 1.194 -1.871 1.459 0.287 -0.154 1.175 -1.211 0.298 2.173 0.778 -2.156 -1.586 0.000 0.799 -1.151 -0.341 0.000 0.522 -0.378 -1.359 3.102 1.243 0.798 0.990 0.999 0.888 0.841 0.752 +1 0.619 1.214 0.814 0.817 -1.121 0.420 0.447 1.158 0.000 0.598 0.958 0.026 0.000 1.459 -0.506 -1.360 2.548 0.433 -0.846 0.517 3.102 0.952 1.189 0.986 0.803 0.619 0.839 0.771 +1 0.576 -1.656 -0.325 1.879 0.642 1.566 -1.512 -0.882 2.173 0.934 -1.350 0.763 0.000 1.042 -0.994 1.325 0.000 1.214 -0.652 -1.693 3.102 0.768 0.740 1.103 1.500 1.120 1.088 1.007 +1 0.758 1.534 -1.038 1.156 0.013 1.141 0.857 0.961 1.087 0.488 2.527 -0.774 0.000 0.768 1.772 -1.200 0.000 0.520 -0.023 -0.862 3.102 0.527 1.206 1.053 0.685 0.896 0.824 0.767 +1 1.318 1.612 1.403 0.933 0.800 0.561 0.898 -1.728 0.000 0.970 -0.092 -0.851 2.215 0.669 0.698 0.195 2.548 0.574 0.991 -0.731 0.000 0.690 0.823 0.986 0.833 0.791 1.094 0.881 +1 0.943 0.075 -0.223 0.403 1.262 0.729 0.317 0.734 0.000 0.681 0.642 0.098 0.000 1.926 -0.439 -1.487 2.548 0.452 1.138 -1.006 3.102 0.852 0.717 0.987 0.914 0.814 0.911 0.785 +1 2.451 -0.367 -0.039 1.504 0.527 0.624 1.780 -1.690 0.000 0.866 -1.035 1.163 2.215 2.093 0.684 -1.227 2.548 1.115 -1.091 -1.193 0.000 0.848 0.974 1.300 1.195 1.906 1.462 1.429 +0 0.707 -0.145 -0.218 0.523 -0.621 1.199 0.829 1.345 0.000 1.223 -0.205 -1.090 1.107 1.314 -0.607 -1.555 0.000 2.384 -1.007 0.363 3.102 1.603 1.188 0.973 0.795 1.690 1.094 0.919 +0 0.939 1.628 0.479 1.494 -0.246 0.952 -0.312 -1.406 2.173 1.002 1.353 0.995 0.000 0.899 -0.921 -0.762 2.548 0.495 -0.653 1.469 0.000 1.267 1.456 0.996 1.678 0.754 1.461 1.282 +1 2.006 0.036 -1.637 0.703 -0.504 1.058 0.269 -0.111 2.173 0.476 0.936 -1.107 0.000 1.126 -0.424 0.547 2.548 0.854 0.114 1.097 0.000 0.838 0.981 1.403 1.103 0.917 0.994 0.837 +1 0.759 -0.716 -0.232 0.504 0.759 0.794 -0.173 -1.379 0.000 1.121 -0.015 0.147 0.000 1.106 -0.733 -0.516 2.548 1.692 -0.158 1.399 3.102 0.870 0.917 0.985 0.709 1.084 0.753 0.659 +1 1.160 1.255 1.022 1.608 -1.283 0.619 1.654 1.359 0.000 0.658 0.947 -0.508 2.215 1.001 1.255 0.389 0.000 1.455 0.184 -0.404 3.102 1.106 0.993 1.656 1.205 0.351 0.836 0.849 +1 1.283 1.785 1.356 0.418 0.784 1.014 -0.055 -0.329 2.173 0.649 1.246 -0.667 0.000 0.578 2.541 1.257 0.000 0.579 -0.159 -1.464 3.102 0.728 0.982 0.992 0.727 0.694 0.958 0.795 +1 0.916 -1.414 1.696 1.007 0.197 0.904 -0.788 0.351 2.173 0.564 -1.690 0.637 0.000 0.808 -1.874 -0.937 0.000 1.060 -0.814 1.284 3.102 1.037 1.073 1.298 0.732 0.777 0.726 0.690 +1 1.739 1.296 0.266 0.567 0.161 3.012 -1.344 -1.230 0.000 2.255 1.638 -0.824 0.000 1.623 1.476 0.632 0.000 5.594 0.786 0.889 3.102 0.804 2.557 0.983 1.392 0.474 3.157 3.025 +1 1.349 -0.455 1.468 0.761 0.842 0.683 -2.795 -1.054 0.000 0.614 -0.718 -0.148 0.000 0.526 -1.201 -1.415 0.000 0.757 0.129 -0.283 3.102 0.952 1.328 0.985 0.768 0.477 1.202 1.060 +0 1.196 -0.952 -0.900 1.649 -0.844 1.667 -0.044 1.123 2.173 0.958 0.567 -0.652 1.107 0.429 -0.572 0.046 0.000 0.857 -0.580 0.780 0.000 0.907 1.088 0.989 1.629 1.951 1.809 1.405 +0 1.457 0.510 -1.430 0.508 -0.952 0.838 0.737 0.119 2.173 0.714 0.175 0.981 2.215 0.303 -0.237 1.574 0.000 0.552 -1.398 1.731 0.000 0.353 1.058 0.985 0.911 0.861 0.857 0.808 +1 0.389 0.970 1.127 1.173 -0.171 0.664 -1.028 1.328 2.173 0.806 -0.904 -0.986 2.215 0.333 1.214 0.263 0.000 0.626 0.167 -0.667 0.000 1.040 1.491 0.989 1.028 0.938 1.231 1.017 +0 1.105 1.288 0.671 0.631 -0.705 1.206 0.437 0.597 2.173 0.792 0.653 -1.143 0.000 1.407 0.889 -1.560 2.548 0.827 -0.096 -0.202 0.000 0.898 0.837 1.094 0.957 1.573 0.964 0.862 +0 0.951 0.977 -1.008 0.872 0.109 0.695 0.075 -0.039 1.087 1.305 0.705 -1.527 2.215 2.243 -0.145 0.646 0.000 0.792 -0.009 -1.399 0.000 0.950 1.053 1.066 0.939 1.441 0.860 0.764 +1 0.969 0.852 1.435 1.112 0.804 0.343 1.388 -0.525 0.000 0.963 0.016 -0.783 0.000 0.348 0.916 0.244 2.548 0.481 -2.260 0.590 0.000 0.958 0.655 0.985 0.577 0.294 0.419 0.605 +1 0.917 -0.467 -1.628 0.324 -1.624 0.896 -0.199 -0.056 2.173 0.706 1.810 1.411 0.000 0.947 -1.211 -0.480 0.000 0.951 0.752 0.682 0.000 0.842 1.083 0.990 1.157 0.983 1.021 1.124 +0 0.554 -0.453 1.161 0.681 -0.287 1.544 0.567 0.953 0.000 0.819 1.790 1.525 0.000 0.979 0.487 1.357 0.000 2.407 -0.423 -0.935 3.102 0.945 1.178 0.988 0.944 1.166 1.054 0.873 +1 1.024 0.159 0.272 0.791 -1.099 0.804 -1.337 -0.296 2.173 0.685 0.382 0.793 1.107 1.400 -0.120 1.286 0.000 0.791 0.815 1.616 0.000 0.758 0.627 1.177 1.092 1.408 1.024 0.896 +0 1.047 0.598 1.172 0.275 -1.620 0.798 -2.041 -0.320 0.000 0.751 -0.371 -1.317 1.107 0.668 -0.493 0.438 2.548 0.472 -1.560 -1.688 0.000 0.889 0.875 0.983 0.721 0.755 0.779 0.868 +0 0.513 0.861 -0.746 0.371 1.478 0.610 -0.985 -1.114 0.000 0.816 -0.252 0.262 2.215 0.657 -1.392 -0.315 0.000 2.007 0.911 1.446 3.102 0.919 0.887 0.990 0.910 1.316 1.019 0.834 +1 0.865 0.198 0.923 0.718 -0.111 0.913 0.902 -1.199 2.173 0.868 -0.381 0.149 0.000 0.485 0.728 1.623 1.274 0.449 -1.275 1.021 0.000 0.755 0.776 0.984 1.129 0.469 0.863 0.772 +0 2.633 0.596 0.014 0.259 0.372 1.671 0.434 -1.541 0.000 0.701 0.037 1.576 0.000 0.884 -0.789 0.011 1.274 0.608 0.336 0.483 0.000 0.881 0.912 0.982 0.821 0.293 0.918 1.040 +1 0.768 0.980 -0.000 2.006 -0.613 0.932 0.644 1.593 0.000 0.831 1.052 0.690 2.215 0.620 0.252 1.045 0.000 0.707 0.040 -1.128 3.102 0.698 0.821 0.983 0.622 0.786 0.709 0.798 +1 0.513 0.786 1.049 0.349 0.282 1.100 -0.483 -1.309 2.173 0.901 2.337 0.252 0.000 0.594 -1.162 1.165 2.548 0.853 0.429 -0.901 0.000 1.164 1.426 0.989 0.971 0.895 1.220 1.009 +1 0.499 -0.179 1.166 1.121 -0.842 0.787 0.758 0.537 2.173 0.519 -1.465 -1.501 0.000 0.650 -0.925 -0.136 0.000 0.414 0.343 -1.452 1.551 0.868 0.634 1.007 0.991 0.599 0.824 0.729 +1 0.460 1.520 -1.482 0.966 1.572 1.272 2.029 -0.545 0.000 0.996 1.461 0.858 0.000 2.143 1.021 -1.310 1.274 2.003 0.477 0.474 0.000 0.760 0.849 0.987 0.865 0.973 0.884 0.778 +0 1.217 -0.616 1.330 1.447 1.667 0.794 0.199 0.190 0.000 0.731 0.955 -0.593 0.000 1.021 -1.140 -0.783 2.548 0.711 -0.152 0.743 3.102 1.235 0.842 0.990 0.955 0.735 0.850 1.065 +1 0.795 0.690 1.324 1.607 1.471 0.746 1.214 -0.463 2.173 0.282 -0.135 0.773 2.215 0.433 0.061 -0.091 0.000 0.370 1.893 -1.187 0.000 0.711 1.324 0.982 0.540 0.784 0.905 0.880 +1 2.277 -1.311 0.358 1.029 0.447 1.064 -1.545 1.538 2.173 1.140 -1.709 -0.320 0.000 1.179 -0.093 -1.613 2.548 1.040 -1.296 -1.288 0.000 1.095 1.334 0.997 1.341 1.184 1.207 1.145 +0 1.044 -0.167 -1.518 1.361 -1.246 1.274 0.305 1.619 2.173 2.629 -0.072 0.331 0.000 1.079 -2.728 -0.143 0.000 1.119 -0.269 -0.602 3.102 5.378 2.954 0.974 1.187 1.217 2.412 1.915 +1 0.520 -1.957 -0.062 0.933 0.900 0.715 -1.766 0.309 0.000 1.270 -0.954 -1.198 2.215 1.226 -0.921 1.606 0.000 1.672 -2.403 -0.501 0.000 1.477 1.217 0.980 0.999 0.435 1.071 0.930 +0 0.670 -0.366 0.707 0.993 -0.622 0.374 0.463 -0.199 2.173 0.612 -0.141 0.944 0.000 1.379 -0.050 1.635 1.274 0.834 -1.927 -1.050 0.000 1.520 1.206 1.052 0.873 0.923 0.898 0.786 +1 0.545 -0.230 -0.803 0.774 1.253 0.827 0.745 -1.501 2.173 1.343 -0.002 -0.066 0.000 1.210 -0.478 0.395 2.548 0.885 -2.191 1.591 0.000 1.457 1.071 0.986 1.045 1.500 1.031 0.937 +0 1.025 -0.419 1.496 0.105 -0.615 0.720 -1.146 0.752 0.000 0.834 -1.541 -0.083 0.000 1.614 0.997 -1.605 2.548 1.233 0.452 -0.189 3.102 0.819 0.954 0.978 0.821 1.075 1.097 0.898 +1 0.612 -0.351 1.460 0.474 -0.898 1.234 -0.448 0.578 0.000 1.290 -0.287 -1.654 0.000 0.557 2.635 -0.070 0.000 1.510 -0.420 -0.236 3.102 2.431 1.359 0.990 0.773 0.424 0.946 0.836 +1 0.602 -0.257 0.300 1.074 -1.353 0.604 -1.830 0.889 0.000 0.773 0.058 -1.695 1.107 1.467 -0.588 -0.756 2.548 1.540 -1.019 0.293 0.000 0.887 1.276 1.110 0.684 0.941 1.018 0.888 +0 0.451 -2.210 0.224 0.669 -1.028 0.835 -0.653 0.434 2.173 1.377 -2.179 1.391 0.000 0.729 -0.673 -1.175 0.000 0.962 0.411 -1.035 0.000 0.735 0.661 0.989 0.847 0.372 0.778 0.826 +1 0.458 0.705 1.663 0.929 0.001 1.038 1.028 -0.143 0.000 1.959 -0.056 1.589 0.000 1.960 0.240 0.714 0.000 1.602 0.951 1.425 3.102 0.750 0.876 0.990 0.845 1.805 0.940 0.765 +0 0.568 0.112 -1.410 1.840 1.210 1.530 0.533 -0.313 1.087 0.874 -0.638 1.530 2.215 1.171 -0.150 -0.231 0.000 1.271 -0.767 -1.484 0.000 1.072 0.971 0.996 0.662 2.009 1.223 1.033 +1 0.984 -0.777 -0.096 1.824 -0.040 0.867 -0.008 -1.385 2.173 0.791 -0.385 1.169 2.215 0.729 0.009 1.703 0.000 0.428 -1.530 0.779 0.000 0.797 0.784 0.974 1.195 0.937 1.212 0.968 +0 0.777 -1.392 0.349 0.882 -0.612 1.691 -0.720 0.287 1.087 2.260 -0.001 -1.666 0.000 0.379 0.534 -1.195 0.000 1.260 -0.579 -0.919 3.102 0.722 0.889 0.986 0.894 1.368 1.410 1.160 +1 1.291 -0.342 0.966 0.595 0.608 1.952 -2.488 -0.860 0.000 1.395 -0.117 0.303 0.000 1.238 0.634 1.295 2.548 1.597 -0.125 1.624 3.102 0.866 1.068 0.997 1.038 0.564 0.829 0.806 +1 0.579 -0.909 1.027 2.006 1.739 0.913 -0.225 1.276 0.000 1.373 0.748 -0.593 2.215 1.604 -0.176 0.243 0.000 1.419 1.413 -0.375 3.102 1.746 1.818 0.991 2.361 0.653 1.840 1.603 +1 1.151 1.498 -0.214 0.495 1.352 1.223 1.392 -1.022 2.173 1.051 1.236 1.346 0.000 1.114 1.508 0.731 0.000 0.498 0.214 -0.046 3.102 0.930 0.785 1.033 0.932 0.811 0.926 0.801 +1 0.624 0.969 0.058 1.147 1.196 0.934 -2.611 0.089 0.000 0.761 -0.208 -0.630 0.000 0.792 -1.283 -1.242 2.548 1.029 0.042 1.217 1.551 0.824 0.835 1.003 0.630 0.778 0.840 0.769 +0 0.666 1.372 -1.349 1.462 -0.509 0.743 0.632 1.481 0.000 1.000 0.423 0.646 2.215 0.672 -0.314 0.367 2.548 0.624 0.164 -0.739 0.000 0.973 0.916 0.987 1.113 0.411 0.940 0.865 +0 0.407 -1.179 0.094 1.431 1.375 0.702 -1.271 -0.225 0.000 1.104 0.883 -1.520 2.215 0.575 -0.047 -0.378 0.000 1.125 0.929 0.288 0.000 0.765 1.039 0.989 1.700 0.324 1.124 1.082 +1 0.993 -0.991 -1.278 1.566 0.525 1.540 -0.459 0.508 2.173 0.937 -0.230 -1.088 0.000 0.422 -1.139 -1.720 0.000 1.071 -0.384 -0.575 3.102 0.729 0.575 1.725 1.291 1.125 0.942 0.915 +1 0.477 -0.571 -1.629 1.341 -0.294 1.016 -0.225 1.182 0.000 1.144 -0.734 -0.664 0.000 1.381 -0.354 0.588 1.274 0.668 -0.832 -1.295 3.102 2.361 1.310 1.034 0.837 0.763 0.933 0.825 +1 0.850 0.493 1.697 0.336 0.546 0.846 -0.575 -0.913 2.173 0.973 -2.895 -0.636 0.000 1.432 -1.541 -0.202 0.000 2.153 0.408 1.071 0.000 1.189 1.225 0.990 1.215 1.019 0.981 1.201 +1 1.289 -0.220 -0.282 0.256 -0.829 1.085 -1.938 1.533 0.000 0.329 -0.965 -0.294 0.000 0.531 -1.182 1.373 2.548 0.960 -1.028 0.455 3.102 1.134 0.926 0.994 0.717 0.402 0.558 0.736 +1 0.897 0.621 1.579 0.371 1.252 0.969 0.670 -1.679 2.173 0.892 -0.226 -0.756 1.107 1.700 -0.335 0.799 0.000 0.416 -1.486 0.151 0.000 0.878 1.097 0.991 0.784 1.200 1.049 0.870 +0 1.519 -0.989 -1.164 1.261 -0.370 0.748 0.328 0.828 2.173 0.491 0.470 -0.742 1.107 0.666 -0.004 1.596 0.000 0.932 -0.828 0.530 0.000 0.841 0.768 1.258 1.462 0.884 1.001 0.842 +1 0.984 0.832 -1.606 1.409 -0.848 1.217 0.208 0.482 0.000 0.938 0.515 -0.380 0.000 0.632 0.062 -1.385 0.000 1.032 1.119 1.225 3.102 0.970 0.928 1.031 0.817 0.650 0.660 0.656 +0 0.847 0.955 0.702 1.281 -1.608 0.916 0.478 -0.335 2.173 0.479 -0.360 0.948 2.215 0.493 -0.685 -0.489 0.000 0.919 -0.049 -1.340 0.000 0.578 0.750 1.259 0.843 0.990 0.856 0.746 +1 0.688 -1.592 -1.111 0.681 1.649 0.553 0.828 -1.246 0.000 1.195 -0.889 0.093 2.215 0.482 0.123 -0.250 0.000 1.638 -1.668 -1.580 0.000 0.968 0.927 0.987 0.602 0.511 0.654 0.648 +1 1.652 0.143 0.034 0.339 -1.554 0.395 0.219 -0.745 0.000 0.667 -1.206 1.322 1.107 0.441 2.664 -0.129 0.000 1.115 0.031 1.268 0.000 0.950 0.687 1.026 0.733 0.224 0.676 0.615 +0 0.660 -1.887 -0.622 0.606 -1.742 0.472 -0.853 -1.664 2.173 0.797 -2.271 0.456 0.000 0.836 -1.807 -0.328 0.000 0.437 0.835 1.370 1.551 0.843 1.079 0.991 0.761 0.549 0.909 0.775 +0 0.320 0.283 -0.752 0.375 1.354 1.095 1.613 -0.173 2.173 0.989 0.278 -1.058 0.000 1.413 -0.061 1.317 2.548 1.815 0.666 -1.661 0.000 1.009 1.057 0.994 2.126 2.112 1.530 1.313 +1 0.818 -0.534 -1.705 0.947 -0.577 0.525 -1.429 0.869 2.173 0.806 -1.211 1.584 0.000 1.280 -0.009 -0.128 1.274 1.351 -0.868 -0.281 0.000 0.939 0.810 1.037 0.802 1.129 0.770 0.670 +1 1.816 -0.887 -0.305 0.736 -0.626 0.759 -1.346 0.817 1.087 0.921 -1.189 1.248 0.000 1.027 -1.324 1.717 2.548 0.512 -1.477 -0.223 0.000 0.903 0.674 0.982 0.998 0.799 0.882 0.772 +1 1.958 0.542 0.806 0.037 -0.474 0.966 -0.259 -1.092 2.173 0.502 2.869 1.309 0.000 0.884 0.920 -0.598 0.000 1.674 -0.331 -0.303 3.102 1.125 0.954 0.997 1.269 0.884 0.956 0.852 +0 0.499 0.061 1.194 1.030 0.729 1.512 -0.823 1.357 2.173 2.180 0.143 -0.644 2.215 0.659 0.094 -0.014 0.000 0.733 -1.106 -0.588 0.000 0.707 0.880 0.993 1.829 2.933 1.764 1.360 +0 0.911 -1.719 -1.503 0.331 0.737 1.601 -1.058 -0.936 2.173 1.028 -1.387 1.116 0.000 2.172 -2.451 0.492 0.000 0.652 -0.636 0.332 3.102 1.972 1.263 0.986 0.971 0.998 1.553 1.213 +1 1.023 -0.601 -0.153 0.687 1.002 0.765 0.634 1.187 0.000 0.783 0.232 -1.599 2.215 0.887 -1.026 -0.326 1.274 0.720 0.662 -0.729 0.000 1.123 0.826 1.002 0.633 1.038 0.844 0.788 +0 1.293 -1.678 0.809 0.832 1.735 0.947 -1.166 -1.303 0.000 1.314 -1.138 -0.521 2.215 0.962 1.147 0.732 0.000 0.621 -0.310 0.731 3.102 3.123 1.681 1.066 1.180 0.809 1.306 1.251 +1 0.749 -0.440 1.533 1.713 -1.634 0.686 1.548 -0.524 0.000 0.565 -0.400 -1.628 2.215 0.607 -1.273 0.061 0.000 1.854 0.373 0.317 0.000 0.671 0.794 0.973 0.514 0.420 0.746 0.834 +1 0.630 0.298 1.353 1.529 0.815 0.715 -1.308 -1.372 0.000 0.916 -0.996 0.710 0.000 1.068 0.430 -0.541 0.000 2.433 -0.606 -0.653 3.102 1.659 0.928 0.983 1.222 0.860 0.843 0.854 +0 0.602 0.495 0.366 0.763 -1.107 0.574 -0.439 1.693 2.173 0.779 -1.105 -1.122 0.000 0.742 1.985 0.368 0.000 1.220 0.641 0.193 3.102 3.165 1.829 0.988 0.890 1.036 1.236 1.008 +1 0.651 -0.245 0.145 0.658 1.105 1.105 -0.581 -1.325 2.173 0.470 -1.169 -0.506 2.215 0.576 -2.518 0.861 0.000 1.049 -0.815 0.683 0.000 0.883 0.768 0.994 1.047 0.788 0.894 0.784 +0 0.494 -1.311 -1.247 1.854 -1.688 0.978 0.231 0.290 0.000 0.787 -0.477 -0.911 2.215 0.266 1.828 -1.127 0.000 0.891 -0.510 -0.141 3.102 1.274 0.890 0.985 0.730 0.486 0.750 0.858 +0 1.039 -1.339 -1.273 0.361 -0.074 0.800 -1.330 1.257 2.173 0.610 -0.272 -1.522 1.107 0.767 -1.439 0.073 0.000 0.804 0.059 -0.306 0.000 0.850 1.049 0.996 0.620 0.837 0.755 0.683 +1 1.219 0.772 1.575 1.122 -1.321 0.857 0.413 -0.287 2.173 0.788 0.942 0.534 1.107 0.501 -0.341 -0.531 0.000 0.707 0.012 0.762 0.000 0.616 0.648 0.986 1.199 0.883 0.919 0.772 +1 0.630 -0.129 0.406 0.589 -0.626 1.036 0.590 -1.389 2.173 0.862 1.395 1.162 0.000 1.527 0.927 0.582 0.000 0.470 -1.539 -0.066 0.000 0.952 1.041 0.981 0.930 0.933 0.993 0.848 +1 0.746 -0.817 0.878 0.972 0.511 1.092 0.070 -0.781 2.173 1.078 0.207 -1.482 2.215 0.985 -0.368 0.443 0.000 0.463 0.204 1.569 0.000 0.679 1.012 0.981 1.508 0.949 1.341 1.053 +1 0.959 0.278 -0.420 1.214 -1.431 1.327 -0.209 0.963 2.173 0.268 0.596 -1.688 0.000 0.587 0.054 0.039 2.548 0.829 -0.439 -0.823 0.000 0.822 1.241 1.180 0.715 0.827 0.896 0.785 +1 1.346 -0.636 -0.631 1.769 0.142 1.031 -0.440 1.149 2.173 0.646 0.075 0.757 0.000 0.872 0.232 -1.276 2.548 0.499 -0.852 -0.913 0.000 0.854 0.773 1.372 1.067 1.052 1.047 0.843 +0 0.754 0.530 -1.713 1.211 0.712 0.880 0.047 -0.458 2.173 0.727 -0.539 -0.558 2.215 1.023 -1.433 1.403 0.000 0.388 -1.339 -1.570 0.000 0.804 0.935 1.082 1.074 0.375 0.845 0.888 +1 0.454 0.614 -1.567 0.644 -1.634 1.131 -0.289 -1.383 0.000 2.769 0.318 0.537 1.107 1.333 -0.715 -1.089 0.000 0.991 0.692 0.106 0.000 0.799 0.595 0.988 1.567 1.224 1.460 1.180 +0 0.747 0.047 1.658 0.369 -0.946 0.495 -0.047 0.751 2.173 0.480 0.041 -0.684 0.000 1.154 -0.158 -0.163 2.548 0.614 1.495 1.468 0.000 0.964 0.874 0.977 0.768 0.693 0.685 0.643 +1 0.679 -1.702 -0.404 1.032 -0.435 0.782 -1.066 1.055 1.087 0.706 -0.674 -1.605 2.215 0.897 -0.336 -0.450 0.000 0.931 0.173 1.212 0.000 1.047 1.023 0.989 0.834 0.769 0.804 0.750 +0 1.462 -0.329 0.499 1.970 1.076 1.009 -0.502 -0.947 2.173 1.110 0.248 0.093 0.000 1.416 0.510 -1.439 2.548 1.235 -1.313 -1.724 0.000 0.795 1.095 1.168 1.535 1.049 1.219 1.087 +0 1.862 1.526 0.951 0.362 -0.381 0.597 2.298 -0.372 0.000 0.686 1.149 -1.146 1.107 0.407 0.385 -1.542 0.000 0.769 0.532 0.311 3.102 1.242 0.878 1.060 0.892 0.659 0.635 0.683 +0 0.915 -1.307 -0.365 0.800 0.804 1.026 -0.335 -1.180 1.087 0.881 -1.835 0.695 0.000 0.663 -2.595 1.198 0.000 0.690 -1.013 0.994 1.551 0.779 0.552 1.030 1.133 0.918 1.086 0.911 +1 0.725 0.888 1.282 1.432 -1.360 1.067 0.026 0.986 0.000 1.339 0.128 -0.072 0.000 1.095 0.981 -0.582 2.548 0.873 -0.274 -0.884 0.000 0.918 1.251 0.988 0.713 0.697 0.737 0.859 +1 0.585 -0.769 0.927 0.534 -0.942 0.639 1.348 0.139 2.173 0.673 0.368 -0.588 2.215 0.909 -0.688 1.573 0.000 0.719 -0.082 1.448 0.000 0.315 0.798 0.986 1.676 0.768 1.137 0.949 +0 0.304 0.865 0.520 1.296 -1.450 0.472 0.667 0.735 0.000 0.904 0.435 1.727 2.215 0.701 1.170 -0.399 0.000 1.674 -0.206 -0.318 3.102 0.940 0.935 0.985 0.815 1.141 0.785 0.723 +0 0.617 -0.295 1.333 0.701 -1.162 0.885 0.519 -0.278 1.087 0.443 1.405 1.056 2.215 0.942 2.248 1.252 0.000 0.391 1.343 -1.020 0.000 0.664 1.298 0.990 1.108 0.965 1.004 1.177 +0 1.244 -0.669 -1.374 2.193 1.607 2.901 0.308 -0.031 0.000 2.223 -0.619 1.616 2.215 0.937 -0.201 -0.420 0.000 0.609 -1.464 1.283 0.000 1.092 0.663 1.006 0.644 0.906 0.797 0.756 +0 1.276 0.274 0.013 0.593 -0.821 0.771 0.879 0.701 2.173 0.665 1.155 1.203 0.000 0.930 1.221 -1.194 0.000 1.435 1.811 -0.779 0.000 1.006 0.964 0.989 0.981 0.897 0.834 0.809 +0 0.698 -1.181 0.216 0.995 1.541 0.854 -1.149 -1.719 0.000 0.658 -0.826 -0.958 0.000 0.442 -0.169 0.674 0.000 1.723 0.561 0.175 3.102 0.869 0.959 1.074 0.536 0.718 0.763 0.671 +0 1.271 0.454 0.345 0.962 -1.012 0.497 -0.749 -1.400 0.000 0.897 -0.646 0.486 0.000 1.038 0.494 -1.508 2.548 0.955 -0.524 1.110 3.102 1.407 0.853 1.440 0.924 0.714 0.734 0.801 +0 0.874 -0.513 1.471 0.978 -0.445 0.645 -1.407 -0.801 2.173 0.998 0.294 0.340 2.215 0.527 -0.247 1.037 0.000 0.651 -0.155 -0.986 0.000 0.626 0.747 1.266 0.965 1.530 0.899 0.715 +1 0.449 -2.133 -0.985 2.042 -0.405 0.455 -0.866 1.234 0.000 0.820 -0.650 1.638 2.215 1.179 -0.924 0.641 2.548 0.771 0.035 -1.537 0.000 0.691 0.727 0.979 1.081 0.837 0.871 0.782 +0 0.537 2.307 0.512 1.049 -0.668 0.738 1.057 1.379 0.000 0.783 0.145 0.417 1.107 0.717 -0.011 -0.962 2.548 0.637 1.437 -0.588 0.000 1.084 0.923 0.986 1.032 0.757 0.810 0.779 +1 1.378 1.272 -0.687 2.179 -0.041 0.642 0.138 0.476 2.173 0.753 2.599 -1.682 0.000 0.775 0.285 -1.349 0.000 0.636 1.109 0.752 0.000 0.981 1.042 1.321 1.213 0.790 0.984 1.049 +1 1.548 -1.061 -1.359 1.032 -0.777 0.622 -1.039 0.520 2.173 0.486 -1.615 1.299 0.000 1.819 -0.103 0.334 2.548 1.048 -0.977 0.019 0.000 0.880 1.024 0.988 1.077 0.680 1.071 0.885 +0 0.876 0.470 -0.838 1.228 1.543 1.063 -0.382 -0.439 2.173 1.122 1.139 1.545 0.000 0.756 0.875 0.302 0.000 0.806 0.561 0.841 3.102 1.099 1.120 1.206 0.714 1.048 0.856 0.797 +0 2.049 -1.019 -0.809 0.721 0.813 0.530 0.954 0.852 0.000 0.449 -0.439 0.830 2.215 0.563 1.343 -1.181 0.000 0.989 -1.182 0.977 3.102 0.982 0.832 1.675 0.996 0.313 0.790 0.998 +1 0.806 1.488 -0.120 0.184 1.327 0.415 1.390 0.234 1.087 0.968 1.599 -1.696 2.215 0.574 -0.413 1.543 0.000 1.009 -0.402 -0.072 0.000 0.857 1.123 0.988 0.580 0.927 0.780 0.685 +0 1.743 0.849 0.053 2.571 -0.146 1.951 1.238 1.613 0.000 3.401 0.530 -0.116 1.107 1.130 0.925 1.019 1.274 1.357 1.265 -1.396 0.000 1.085 1.038 0.996 0.612 1.846 1.916 1.728 +1 1.117 -0.491 0.267 1.459 -0.330 0.897 2.426 -0.931 0.000 1.030 0.190 0.694 0.000 1.810 1.006 1.324 2.548 0.686 2.406 1.516 0.000 1.000 0.918 0.988 1.847 0.799 1.175 1.729 +0 0.812 0.295 -0.520 0.628 -0.827 0.501 0.899 -1.411 2.173 0.897 0.111 -0.040 1.107 0.986 -0.034 0.880 0.000 1.102 1.045 1.353 0.000 0.926 0.991 0.985 0.898 1.015 0.756 0.799 +1 0.525 0.830 1.108 0.467 -0.779 3.107 1.466 0.438 0.000 3.680 -0.037 -1.337 1.107 1.070 -0.404 -1.525 2.548 0.773 0.283 -1.630 0.000 0.924 1.233 0.980 0.650 0.559 0.755 0.723 +1 0.441 -1.535 0.789 0.313 -1.443 2.026 0.478 -0.636 0.000 1.272 -0.921 -1.408 1.107 3.355 -0.429 0.985 0.000 1.605 -0.721 0.682 3.102 1.520 0.906 0.980 0.811 1.228 1.022 0.845 +1 1.172 0.328 -0.798 0.439 0.756 1.201 0.538 -0.583 0.000 1.877 0.485 0.915 0.000 1.327 -0.211 1.559 2.548 1.135 -1.220 -0.429 0.000 1.115 1.068 0.988 0.840 0.685 0.911 0.765 +0 1.445 -0.437 1.664 0.213 -1.605 0.798 -0.355 0.468 2.173 0.737 -0.252 -0.154 0.000 0.696 -0.923 -0.460 2.548 1.104 -0.161 -0.883 0.000 0.892 0.745 0.975 0.747 0.756 0.749 0.667 +1 0.897 -0.727 -0.032 0.420 -0.592 1.391 -0.142 -1.582 0.000 1.763 -0.110 -0.220 2.215 1.126 -0.872 1.156 1.274 0.611 -1.382 0.738 0.000 0.457 1.074 0.990 0.916 1.559 0.850 0.755 +1 1.029 -0.904 0.352 1.720 0.233 1.501 0.243 -1.406 2.173 0.853 0.435 -1.711 0.000 1.462 0.135 0.374 2.548 0.368 1.136 -0.590 0.000 0.709 0.962 1.002 2.504 1.846 1.766 1.456 +0 0.490 -2.188 -0.962 0.846 -1.112 1.087 -0.530 -0.161 2.173 0.894 -0.541 0.395 2.215 1.554 -1.117 1.492 0.000 0.429 -1.883 1.472 0.000 0.487 0.969 0.985 0.990 0.697 0.947 0.862 +1 1.317 0.051 1.278 0.400 1.496 0.792 0.387 -0.037 2.173 0.795 0.359 -0.892 0.000 0.644 -0.157 -1.405 0.000 0.763 -0.480 0.389 3.102 0.574 0.888 0.982 0.627 0.516 0.733 0.721 +1 0.838 -0.744 0.423 1.073 1.395 0.466 -1.154 -1.500 0.000 0.975 -0.086 0.487 0.000 1.910 0.521 -1.285 2.548 1.347 1.020 0.088 3.102 0.927 0.859 1.010 1.282 1.228 1.079 0.934 +1 0.332 -0.853 -1.678 0.086 0.383 0.486 -0.906 0.740 0.000 1.113 1.058 -1.687 2.215 1.299 -0.444 -0.897 2.548 0.392 2.105 0.212 0.000 1.829 1.444 0.826 0.663 1.393 1.137 0.880 +0 0.757 0.402 0.671 0.423 1.240 0.474 1.175 -1.685 2.173 0.833 -0.434 -0.562 0.000 0.708 1.743 1.431 0.000 0.711 -0.125 0.503 0.000 0.834 1.038 0.990 0.885 0.622 0.678 0.743 +1 0.906 -0.765 1.426 0.162 0.861 1.115 0.881 -0.134 2.173 0.912 -0.189 -1.493 1.107 0.985 1.543 0.723 0.000 0.646 -1.635 -0.777 0.000 0.334 0.821 0.980 1.203 1.632 1.315 1.073 +0 1.000 -0.885 -0.096 0.923 -1.056 0.810 -0.495 0.510 1.087 0.771 -1.950 0.302 0.000 1.975 -0.135 -1.391 2.548 0.861 -1.057 -1.180 0.000 0.754 1.037 1.012 0.986 1.585 0.953 0.847 +0 1.545 -0.060 0.394 0.335 -1.085 0.997 0.434 -0.517 2.173 0.813 0.606 -1.695 0.000 0.872 -0.389 0.886 2.548 1.272 -0.418 -1.556 0.000 0.803 0.833 0.987 0.921 1.223 0.883 0.810 +0 2.294 0.573 -0.676 0.142 1.381 0.941 0.746 1.118 0.000 1.048 0.471 0.690 0.000 0.585 -0.416 -0.011 2.548 2.049 0.139 -1.405 3.102 0.845 0.924 0.987 0.863 0.839 0.883 0.926 +1 0.943 -1.601 -0.014 0.155 -0.967 1.585 -0.723 1.500 2.173 1.705 -0.683 -1.107 0.000 1.145 -0.394 -0.366 1.274 2.083 -0.284 0.379 0.000 0.731 0.983 0.982 0.631 1.686 0.997 0.801 +0 0.507 1.101 1.372 1.546 -1.139 1.091 -0.461 0.193 2.173 1.030 -0.927 1.705 0.000 0.860 0.178 1.555 2.548 0.981 1.764 -0.051 0.000 3.183 1.775 0.989 1.838 1.207 1.395 1.366 +0 1.012 0.183 -1.683 1.439 1.092 0.703 1.708 -0.456 0.000 0.923 -0.565 1.081 0.000 0.920 0.606 -0.876 0.000 0.722 2.018 0.418 0.000 0.963 1.070 1.000 0.740 0.476 0.674 0.844 +1 1.142 -0.288 -0.503 1.143 -0.358 1.044 -0.848 0.732 2.173 0.760 -2.464 -1.340 0.000 1.595 -0.700 1.456 1.274 0.940 -0.982 -0.980 0.000 0.879 1.185 0.987 1.206 0.977 1.065 1.054 +1 0.697 -1.508 0.202 0.506 0.518 0.923 0.122 -1.467 0.000 0.680 0.235 0.218 2.215 1.100 -0.703 -1.577 2.548 1.471 -0.772 -0.236 0.000 1.871 1.193 0.993 0.640 1.039 0.903 0.825 +0 0.480 0.095 0.479 0.917 -1.481 0.700 0.088 0.108 2.173 0.797 0.518 -1.061 1.107 0.559 -0.243 1.297 0.000 0.734 0.851 0.558 0.000 1.040 0.891 0.988 0.834 0.986 0.697 0.654 +0 1.248 0.525 -0.242 1.576 -1.064 0.857 -1.099 1.321 1.087 0.485 0.220 1.475 0.000 0.749 -1.291 0.132 0.000 0.869 -0.357 -0.107 0.000 0.883 0.925 1.309 0.930 0.321 1.092 0.877 +1 0.622 0.279 -0.040 0.658 -1.205 1.306 1.456 0.600 0.000 1.221 1.056 -1.659 0.000 0.655 0.716 0.365 0.000 1.348 1.141 -0.596 3.102 0.938 0.827 0.990 0.615 0.682 0.601 0.570 +1 1.472 0.198 1.370 0.803 -0.211 0.732 1.372 -0.006 0.000 0.597 -0.137 -0.009 2.215 1.264 -0.470 -1.408 2.548 1.182 1.285 1.252 0.000 0.913 0.942 1.490 1.003 0.896 0.890 0.839 +0 0.745 -1.481 -1.571 0.862 0.863 1.320 -1.638 1.571 0.000 1.763 -1.501 -0.041 0.000 2.054 1.150 -0.475 0.000 1.094 -0.760 0.312 3.102 0.832 0.993 0.990 0.658 0.730 0.786 0.677 +0 0.655 -0.480 -0.551 1.810 -0.023 0.660 -0.315 1.133 2.173 0.655 0.128 -1.327 0.000 0.511 2.374 -1.481 0.000 1.242 0.921 -1.146 3.102 0.926 0.770 0.990 1.138 1.127 1.146 1.353 +0 1.485 0.791 0.358 0.864 -0.783 0.895 0.740 1.112 2.173 0.883 0.428 -0.984 0.000 0.501 -0.301 -0.547 0.000 0.631 -0.673 -0.990 3.102 0.562 1.146 1.344 0.895 1.017 0.836 0.776 +1 0.827 -0.119 0.324 0.354 -1.525 1.129 0.229 -1.292 2.173 0.960 1.006 0.413 0.000 1.011 1.030 -0.249 0.000 0.695 2.395 1.308 0.000 0.852 0.717 0.991 0.943 0.831 0.923 0.783 +0 0.620 0.693 -0.546 2.155 0.051 0.927 -0.147 1.577 0.000 0.629 -2.201 -0.981 0.000 0.508 -0.451 0.728 2.548 0.910 -0.464 -1.312 0.000 0.991 0.848 0.982 0.897 0.573 0.968 1.014 +1 1.119 -0.185 1.582 1.461 0.830 1.310 1.121 -0.595 2.173 0.507 -0.599 -0.179 0.000 0.493 -1.139 1.714 0.000 1.127 -0.025 1.121 3.102 0.799 0.692 1.109 1.815 1.509 1.208 1.041 +1 0.769 0.524 -1.224 0.964 0.043 2.024 -0.256 -0.078 0.000 1.791 -0.154 1.137 0.000 1.661 -0.679 -1.547 2.548 1.652 -0.208 1.551 0.000 0.824 1.087 1.085 0.685 0.858 0.920 0.924 +0 1.335 -0.274 0.813 0.599 0.380 0.622 1.964 -0.975 0.000 0.803 2.281 -1.514 0.000 0.527 -1.570 -1.130 0.000 1.596 -0.416 -0.135 3.102 0.753 0.884 0.996 0.770 0.873 1.225 1.447 +0 0.466 0.259 0.837 3.177 1.304 0.893 -0.030 -0.502 0.000 0.486 2.463 -1.333 0.000 0.511 1.394 -0.535 1.274 1.198 -0.604 0.327 3.102 2.517 1.367 0.988 0.898 0.928 1.108 1.356 +0 0.959 0.213 1.514 0.486 0.725 0.920 -0.818 -1.567 2.173 0.796 0.290 -0.520 0.000 0.999 1.287 0.656 0.000 0.442 0.710 0.985 0.000 0.882 0.707 0.984 1.193 1.507 1.266 1.022 +0 0.785 0.899 -0.859 0.984 -0.942 1.400 0.763 0.628 2.173 0.919 0.526 -1.318 2.215 0.643 0.297 0.971 0.000 0.605 1.499 -1.275 0.000 0.830 0.927 0.987 0.886 1.652 1.196 0.940 +1 0.815 -0.281 1.207 0.346 -1.407 0.687 -0.338 -0.432 2.173 0.432 1.885 0.975 0.000 0.627 0.988 -1.372 0.000 1.178 0.192 0.494 1.551 0.768 0.788 0.991 0.898 0.757 0.795 0.880 +1 1.221 -0.739 -0.118 1.345 -0.655 2.465 -1.114 1.121 2.173 2.094 -0.470 -0.703 0.000 1.033 -0.614 0.455 0.000 1.529 0.672 -1.468 0.000 0.650 1.050 0.990 0.632 1.144 1.338 1.057 +1 1.662 -0.322 0.283 0.599 -0.951 1.559 -0.159 1.649 2.173 0.718 -1.067 -1.290 2.215 0.778 1.096 -0.410 0.000 1.527 -0.817 0.863 0.000 0.809 0.964 1.239 1.431 1.054 1.035 0.960 +0 2.307 0.588 -1.143 0.474 0.797 0.557 1.574 0.973 0.000 1.007 0.924 0.230 2.215 0.523 0.233 1.157 0.000 0.640 -0.877 -0.774 3.102 0.714 1.031 1.426 1.181 1.027 0.883 0.848 +0 0.324 -0.991 1.427 3.103 -1.264 2.521 -0.239 0.513 2.173 0.981 -0.534 1.645 2.215 1.246 -2.152 -0.778 0.000 0.814 0.583 -0.045 0.000 1.317 1.289 0.987 2.883 2.004 1.907 1.650 +0 0.393 -1.487 1.508 3.241 -1.659 1.855 0.377 0.158 2.173 2.043 -0.361 0.373 0.000 2.605 -0.003 -1.475 2.548 1.211 0.797 -0.160 0.000 1.289 1.867 0.986 2.997 2.776 3.434 3.564 +1 2.485 -1.087 0.688 0.118 1.500 0.487 -0.116 -1.623 0.000 0.573 -0.026 -0.990 2.215 1.022 -0.956 -0.399 1.274 0.498 -1.858 -1.638 0.000 0.952 0.887 0.997 1.029 0.599 0.795 0.764 +0 0.559 0.649 -0.521 0.262 0.168 0.825 1.025 -1.594 1.087 1.195 1.016 0.252 2.215 0.470 0.454 -0.095 0.000 0.808 -0.632 -1.679 0.000 0.813 1.002 0.986 1.095 1.454 0.997 0.831 +0 3.255 -0.315 0.635 1.214 -0.969 1.268 -0.719 -1.027 2.173 1.506 -0.177 1.071 2.215 0.808 -0.006 -0.648 0.000 0.925 -1.452 -0.462 0.000 0.950 0.866 2.732 1.570 2.009 1.524 1.257 +1 0.384 0.406 0.077 0.588 -0.959 0.673 -0.155 -1.485 0.000 0.810 0.893 1.115 2.215 0.755 0.711 -0.558 0.000 0.862 -0.661 0.296 0.000 0.854 0.842 0.988 0.710 0.289 0.719 0.658 +1 0.900 0.188 -1.630 1.654 -1.084 0.974 0.069 0.405 2.173 0.744 0.597 1.151 2.215 0.851 -0.429 1.137 0.000 0.763 -0.120 -0.994 0.000 0.922 1.093 0.979 1.368 0.852 0.971 0.979 +1 0.924 -0.200 -1.134 0.513 1.320 0.881 -0.275 -0.036 1.087 1.035 1.101 1.358 0.000 0.573 -0.488 -0.568 0.000 0.369 0.797 -0.657 0.000 0.784 1.318 0.992 0.683 0.867 0.787 0.806 +0 1.107 -0.038 1.209 0.764 0.335 0.672 0.089 0.088 0.000 0.821 0.771 -1.191 2.215 0.982 -0.038 -1.238 2.548 0.662 1.268 -0.290 0.000 0.864 0.947 0.985 0.824 0.412 0.733 0.752 +0 0.299 1.073 -0.302 1.292 1.514 1.198 1.125 1.075 2.173 1.673 -0.046 -0.671 1.107 0.799 0.463 -0.460 0.000 0.743 0.164 -1.043 0.000 0.818 0.862 0.992 0.920 2.458 1.465 1.280 +0 0.462 -1.385 -0.656 0.805 0.407 1.613 0.133 -0.394 2.173 1.001 0.172 1.350 2.215 0.954 0.942 -1.562 0.000 1.945 0.546 -0.880 0.000 0.913 1.061 0.989 0.958 1.870 1.127 0.989 +0 2.078 1.567 -1.241 0.731 -0.619 0.768 0.118 1.609 2.173 0.712 -0.315 1.080 0.000 1.414 1.029 0.459 0.000 0.795 -0.752 0.122 3.102 0.873 0.964 0.986 1.380 0.918 1.204 1.093 +0 0.708 -0.021 0.108 0.803 1.360 0.766 -0.291 -1.223 2.173 0.386 -1.065 -0.810 0.000 0.548 -1.694 0.792 0.000 1.326 -1.172 0.229 1.551 0.752 0.882 0.985 0.736 1.205 0.752 0.666 +1 1.709 -0.286 0.393 1.551 -0.574 1.180 -0.530 1.410 0.000 0.950 -0.132 -1.603 2.215 0.988 -2.257 0.806 0.000 0.562 -0.745 0.215 0.000 0.834 0.898 1.725 1.299 0.544 0.875 1.000 +1 0.761 0.834 0.945 0.437 1.329 1.929 -0.148 -1.256 0.000 1.857 0.385 0.399 2.215 1.246 -0.867 -0.291 2.548 1.091 1.532 1.456 0.000 0.787 1.275 0.980 0.968 1.507 1.173 0.977 +0 0.839 0.219 1.120 0.394 -0.580 0.822 1.144 -1.350 2.173 0.893 -0.228 -0.081 0.000 1.163 -0.231 0.636 0.000 1.148 -0.493 -1.219 3.102 0.942 0.960 0.991 0.801 1.024 1.024 0.860 +1 0.650 1.778 -0.991 0.361 -1.194 1.388 0.797 1.133 0.000 1.779 0.314 -0.331 2.215 1.076 0.859 -1.460 0.000 0.629 0.351 0.488 3.102 1.589 0.985 0.976 0.947 0.644 1.159 0.962 +1 0.724 -1.401 -0.903 0.787 1.375 0.933 -0.332 0.091 2.173 0.677 -0.123 -0.923 0.000 1.097 -0.520 0.900 2.548 1.040 -1.796 -1.151 0.000 1.004 0.997 0.986 0.732 0.852 0.752 0.703 +1 0.713 -0.694 0.416 1.053 -1.438 1.403 0.996 -0.328 0.000 1.325 -0.707 0.834 1.107 1.430 0.518 -1.343 0.000 2.091 -0.154 1.408 3.102 2.095 1.945 1.194 0.917 0.849 1.601 1.325 +0 0.935 0.431 0.085 2.172 -0.629 0.808 0.485 0.852 1.087 0.909 -0.322 1.146 0.000 0.823 -0.245 -1.646 0.000 1.616 0.229 -1.190 3.102 0.778 0.855 1.182 1.242 1.173 0.942 0.930 +1 0.781 -1.344 -0.423 1.650 -1.191 1.309 -0.756 1.624 1.087 0.918 -2.102 0.166 0.000 1.258 -0.145 0.051 0.000 0.911 -0.561 0.314 0.000 0.985 1.075 1.002 1.203 0.535 1.044 0.993 +1 0.989 0.064 -0.136 1.499 -0.029 0.966 -0.801 1.731 1.087 0.605 -1.268 -1.198 0.000 0.389 -1.478 1.064 0.000 0.490 -1.050 0.525 3.102 0.675 0.660 0.973 0.852 0.666 1.140 1.020 +1 0.974 -0.986 -0.615 1.136 1.374 1.051 0.801 0.956 2.173 0.733 0.924 -1.443 2.215 1.121 -0.334 -0.641 0.000 0.669 0.307 0.031 0.000 1.101 0.934 1.421 1.621 1.075 1.210 1.277 +1 0.392 -1.473 -1.145 0.698 -0.798 1.313 -0.273 1.091 2.173 0.598 0.150 -0.197 0.000 0.730 1.010 -1.216 0.000 0.602 0.209 -0.639 0.000 0.949 0.861 1.000 1.155 0.556 0.865 0.797 +1 1.601 0.911 -1.635 0.794 -0.078 0.791 -0.005 0.842 2.173 0.684 0.012 -1.079 0.000 1.739 -0.008 0.221 0.000 0.748 -0.396 0.093 3.102 0.988 0.936 1.540 0.976 0.545 0.839 0.818 +0 0.448 0.983 -0.805 1.479 -1.718 0.917 -0.481 0.399 0.000 0.660 -0.286 -0.684 2.215 0.539 -0.306 1.180 0.000 0.625 0.033 0.021 3.102 0.982 0.858 0.982 0.669 0.358 0.548 0.661 +0 0.606 0.358 -1.016 0.891 0.400 1.730 0.749 -0.354 2.173 2.240 0.465 1.166 0.000 1.294 0.420 -1.633 2.548 0.767 0.616 1.466 0.000 0.508 0.805 0.988 0.971 1.726 1.408 1.110 +1 1.228 1.232 0.843 1.033 0.615 2.574 1.336 -1.646 0.000 2.692 0.855 -0.151 2.215 0.427 -0.277 0.147 2.548 0.903 -0.219 -1.176 0.000 0.604 1.212 0.986 0.939 0.776 1.101 0.974 +0 0.933 0.478 -1.239 0.682 0.481 0.510 -0.337 -1.188 0.000 0.928 -0.572 0.983 2.215 0.849 -0.110 -0.354 0.000 0.606 -1.219 0.920 1.551 0.818 1.006 1.105 0.808 0.308 0.663 0.674 +0 0.605 -0.096 -1.398 1.718 0.390 0.991 -2.133 0.306 0.000 1.131 -0.056 1.530 1.107 0.651 -1.299 -0.871 0.000 1.926 0.962 -1.107 1.551 0.804 1.792 1.411 1.285 1.261 1.897 1.531 +0 0.866 -0.913 -0.056 1.060 -0.776 1.016 -1.071 -1.110 2.173 1.500 -0.545 1.297 2.215 0.792 2.214 0.088 0.000 0.690 -0.833 0.218 0.000 2.198 2.230 0.984 0.887 1.575 1.845 1.467 +0 0.621 0.901 0.883 1.117 -0.886 0.367 2.443 0.622 0.000 0.809 -0.787 1.143 2.215 0.694 0.631 -0.339 2.548 0.832 0.748 -1.055 0.000 1.049 0.880 1.153 0.640 1.015 0.805 0.731 +0 0.940 -1.425 1.447 0.668 -0.891 0.534 0.861 -0.145 0.000 1.014 -1.135 -1.138 2.215 0.903 -2.194 -0.050 0.000 1.184 0.126 0.806 1.551 0.441 1.144 0.986 0.851 1.199 1.086 0.904 +1 0.808 0.149 0.766 0.392 -0.784 0.596 -0.199 -0.692 2.173 0.546 2.100 1.285 0.000 0.828 -0.310 1.477 2.548 0.942 0.239 -1.389 0.000 0.855 1.004 0.985 0.717 0.814 0.850 0.784 +1 1.815 0.313 -0.515 1.559 -1.143 1.969 0.672 1.408 0.000 0.615 -0.863 0.401 0.000 0.999 -1.143 0.947 2.548 0.413 -0.244 -0.213 0.000 0.398 0.495 1.248 1.121 0.618 1.057 0.840 +0 0.700 0.899 0.637 1.122 1.652 1.027 -0.521 -1.018 2.173 1.067 1.710 0.020 0.000 0.351 -1.597 0.678 0.000 0.920 1.338 1.223 0.000 1.145 1.136 0.987 1.241 0.654 1.296 1.066 +1 0.979 -0.429 -1.720 1.277 -0.401 0.651 0.940 -0.004 2.173 0.779 0.657 1.455 0.000 0.976 -1.066 1.248 0.000 1.802 0.417 -0.694 0.000 1.440 1.171 1.438 1.171 0.740 0.978 0.983 +1 0.773 -0.046 0.888 0.472 -1.432 2.017 -1.593 -0.435 0.000 1.300 -0.891 1.027 2.215 0.972 -1.974 -1.632 0.000 1.992 -0.145 1.531 3.102 0.828 1.024 0.984 0.669 0.843 0.902 0.771 +1 1.549 0.841 -0.854 0.671 0.763 0.916 2.506 -0.491 0.000 1.376 0.250 0.958 2.215 1.383 0.635 1.520 2.548 1.236 -0.384 1.490 0.000 0.860 0.885 1.404 0.987 0.783 0.889 0.812 +1 0.809 -0.754 -0.774 1.760 -0.247 0.566 -1.765 -1.408 0.000 1.102 -0.157 -1.681 1.107 0.759 -0.521 -0.374 0.000 3.412 -0.600 0.863 3.102 1.211 1.163 1.001 1.446 1.404 1.169 1.088 +1 0.515 0.454 -0.582 0.816 0.400 0.634 0.187 -1.273 2.173 1.158 -0.642 -1.037 0.000 1.444 0.583 0.524 0.000 1.581 0.420 1.672 3.102 0.998 1.172 0.987 0.953 0.523 0.919 0.881 +1 1.937 1.301 -1.301 0.507 0.744 1.204 1.344 -0.303 2.173 0.688 0.741 0.310 2.215 1.433 1.654 1.500 0.000 0.753 -0.418 0.943 0.000 1.737 1.212 1.322 1.211 0.810 1.092 1.005 +1 0.366 1.900 0.901 0.652 0.774 1.237 0.102 -1.481 0.000 1.552 0.659 0.254 1.107 0.706 -0.099 -0.907 0.000 0.500 -0.200 1.354 3.102 0.962 1.130 0.995 0.526 0.765 0.721 0.664 +1 0.451 1.055 -0.298 1.039 -0.141 0.753 -0.727 1.432 0.000 0.678 -0.082 -1.500 0.000 0.631 0.346 -0.521 2.548 1.255 1.004 0.424 0.000 0.881 0.905 0.994 0.664 0.290 0.614 1.182 +0 1.728 1.460 1.198 0.843 -1.619 1.335 -0.940 -0.682 0.000 1.012 0.220 1.562 0.000 0.674 0.864 0.574 2.548 0.587 1.198 -1.404 3.102 2.699 1.738 0.982 0.714 0.485 1.174 1.398 +1 1.138 0.426 -0.047 0.939 -0.774 1.590 0.548 -0.590 2.173 2.121 -0.104 0.804 0.000 1.749 -1.319 1.529 1.274 1.508 0.568 1.423 0.000 1.552 1.919 0.987 0.774 3.134 1.964 1.548 +0 0.943 0.249 1.039 2.680 1.038 1.811 0.576 -0.778 2.173 0.783 0.654 -1.407 2.215 1.240 0.084 0.536 0.000 0.547 0.767 -0.312 0.000 0.738 0.909 0.971 2.398 0.944 1.589 1.253 +0 0.442 0.118 -1.021 1.661 0.789 0.748 1.272 -1.731 1.087 0.640 1.780 -1.121 0.000 1.116 -0.562 0.119 2.548 1.422 1.052 -0.357 0.000 0.867 0.942 1.185 0.824 1.690 1.098 1.017 +0 0.526 -1.433 -0.055 1.779 0.173 1.261 -0.431 1.570 0.000 1.210 0.243 1.299 0.000 2.945 -0.607 -0.394 2.548 0.753 -1.041 -1.463 3.102 1.116 0.926 0.990 0.930 0.993 1.370 1.220 +1 0.844 -1.034 -0.227 0.507 1.204 1.012 0.917 -1.121 0.000 1.206 -1.223 0.858 2.215 0.578 -2.273 -1.053 0.000 0.806 -2.134 0.741 0.000 0.753 0.940 0.988 0.515 0.352 0.591 0.564 +0 0.566 1.264 -0.838 1.090 0.278 0.788 0.854 0.933 0.000 0.864 -0.711 1.682 2.215 1.534 -0.624 -1.077 0.000 0.828 -0.532 -0.172 3.102 1.322 1.038 0.987 1.013 0.761 1.099 1.319 +0 0.723 0.550 0.356 1.616 -0.220 1.122 2.230 -1.471 0.000 1.321 -0.530 0.525 2.215 1.763 1.059 1.229 0.000 1.319 1.308 -1.071 3.102 2.190 1.375 0.982 1.435 1.916 1.901 1.559 +1 1.104 0.483 -1.025 1.548 -1.299 0.860 0.952 0.427 2.173 1.300 -2.416 0.467 0.000 0.549 0.149 -1.422 2.548 0.569 0.143 0.582 0.000 0.504 0.524 0.992 0.483 0.922 0.934 0.759 +0 0.661 -1.443 -0.722 0.529 1.433 0.881 0.465 -0.735 1.087 0.968 0.399 0.961 1.107 0.485 1.478 1.507 0.000 1.256 0.194 0.296 0.000 0.832 0.838 0.990 1.559 1.358 1.428 1.174 +1 0.634 -1.699 1.673 1.496 -0.663 1.558 0.256 0.128 1.087 1.180 -2.581 1.628 0.000 0.888 -0.391 1.360 0.000 0.698 0.585 -1.380 3.102 2.165 1.881 1.162 2.128 1.107 2.082 1.711 +1 1.007 -0.436 -0.640 0.988 0.299 1.680 -0.705 -1.320 0.000 0.836 -0.653 0.287 0.000 1.992 0.207 0.355 2.548 1.046 -0.234 1.488 0.000 1.102 0.986 1.035 0.779 0.688 0.679 0.681 +1 0.943 -0.374 0.316 0.970 -1.180 0.811 0.819 1.608 0.000 1.379 -0.016 0.050 2.215 0.766 1.063 -1.229 0.000 0.560 -0.802 -1.736 3.102 0.817 0.808 1.292 0.942 0.883 0.950 0.882 +1 1.066 -0.699 -1.410 1.098 -0.629 0.700 -0.923 0.866 0.000 1.083 0.247 0.431 2.215 0.737 0.983 -0.712 0.000 1.089 -0.085 0.885 3.102 0.294 0.887 0.986 0.867 0.428 0.842 0.820 +1 0.787 0.894 0.280 1.232 0.433 2.828 -0.055 -1.070 0.000 1.383 -0.503 0.585 2.215 2.322 0.330 1.144 0.000 0.981 -0.440 -0.525 3.102 0.836 1.152 0.984 0.725 0.885 0.947 0.836 +1 1.675 -0.256 -0.504 1.428 0.208 1.302 0.107 1.180 0.000 0.597 0.971 -0.679 1.107 0.443 -0.770 0.030 1.274 0.819 0.629 -1.664 0.000 1.005 0.931 1.284 0.950 0.665 0.795 0.916 +0 2.030 -1.210 -1.250 0.706 1.027 1.388 0.381 0.360 2.173 0.459 -0.087 1.230 0.000 0.653 2.335 1.572 0.000 0.377 0.562 -0.533 3.102 0.773 0.543 1.470 2.077 0.563 1.283 1.425 +0 0.962 0.549 -1.365 1.342 -0.029 1.360 0.340 0.806 2.173 1.613 -0.506 -1.315 2.215 0.899 -0.358 0.602 0.000 1.344 -0.937 -0.540 0.000 1.134 1.244 1.470 1.292 2.272 1.335 1.177 +0 0.673 0.836 -1.359 1.474 -1.417 1.204 -1.465 0.232 2.173 0.615 -0.822 -0.220 0.000 0.986 1.973 -1.602 0.000 1.450 -0.718 1.156 3.102 0.694 0.859 0.999 1.850 1.122 2.452 1.764 +0 0.608 -0.716 1.379 1.963 -1.037 0.921 -0.845 0.721 1.087 0.684 -1.500 -0.469 2.215 0.446 -0.779 -0.408 0.000 0.954 0.635 1.597 0.000 0.953 1.008 1.244 1.260 1.105 0.934 0.854 +1 0.398 1.379 -1.038 1.199 -1.407 0.810 0.054 -0.396 2.173 0.911 1.429 1.013 0.000 0.767 0.619 1.630 0.000 1.417 0.667 0.171 0.000 0.854 0.731 0.977 0.871 0.741 0.844 0.795 +1 0.360 -0.935 -1.049 0.734 1.482 0.813 0.247 1.581 2.173 1.053 -0.524 -0.621 0.000 1.058 -1.117 -0.151 0.000 0.900 0.299 0.311 3.102 0.874 0.858 0.992 1.606 0.826 1.183 1.100 +0 0.594 0.228 1.371 1.115 -1.081 0.946 0.603 -0.196 2.173 0.394 1.126 1.047 0.000 0.432 -2.336 -1.247 0.000 0.452 1.564 -0.074 0.000 0.508 0.727 0.988 0.632 0.832 0.739 0.682 +1 1.605 -0.278 0.033 0.924 0.371 0.959 0.924 -0.880 2.173 0.949 0.559 1.499 2.215 0.698 0.011 1.497 0.000 0.382 0.035 -0.933 0.000 0.464 0.759 0.987 1.315 1.206 1.269 0.951 +1 0.466 1.073 -0.374 0.022 -0.121 0.903 -0.434 0.074 2.173 0.359 2.071 0.647 0.000 1.086 0.905 1.430 1.274 0.506 1.180 -1.405 0.000 0.571 1.289 0.543 0.580 1.506 0.928 0.713 +1 2.477 -1.667 -1.127 0.890 -1.543 1.325 1.062 0.696 0.000 1.611 -0.987 -0.218 2.215 0.731 -1.443 0.961 0.000 0.627 0.026 -1.671 0.000 0.849 1.111 0.986 0.722 1.171 0.989 0.873 +0 2.232 0.127 0.041 3.490 0.385 3.415 -0.387 -1.400 0.000 2.419 0.155 0.468 1.107 2.632 -0.932 -1.261 2.548 1.779 0.313 1.049 0.000 0.898 1.059 1.182 2.644 3.165 1.975 1.544 +0 1.451 -0.835 0.892 1.078 1.329 0.846 -0.055 -0.026 2.173 0.448 -2.645 -0.629 0.000 0.965 -1.019 -1.428 2.548 0.825 0.029 -1.055 0.000 0.671 0.732 0.979 1.118 1.243 0.918 0.941 +1 0.540 -0.885 -0.417 0.279 0.685 0.840 0.785 1.173 0.000 1.233 -0.070 -0.270 2.215 1.109 0.743 -1.710 0.000 0.442 0.175 -1.564 3.102 0.896 0.553 0.999 0.699 0.620 0.866 0.750 +1 0.717 -0.190 0.302 0.557 -0.273 1.164 0.120 0.608 0.000 1.179 2.272 -1.428 0.000 1.671 -0.662 -1.443 2.548 0.711 -0.284 0.022 0.000 0.773 0.689 0.995 1.230 0.894 0.907 0.862 +1 1.233 -0.697 1.293 0.549 1.215 0.907 -0.988 -0.596 0.000 0.931 0.300 1.174 2.215 0.708 -1.311 0.313 0.000 1.110 -1.523 -0.598 0.000 0.982 1.276 0.989 0.597 0.580 0.736 0.721 +0 0.659 -0.998 1.229 0.911 -0.812 1.239 -0.845 -1.251 2.173 1.275 -0.483 0.373 1.107 1.005 -0.942 0.104 0.000 1.532 0.045 1.077 0.000 1.311 0.918 1.035 0.792 1.868 1.127 0.932 +1 1.412 -0.313 1.244 0.694 0.393 1.399 0.640 -0.405 2.173 0.426 2.748 0.748 0.000 0.744 -0.602 -1.601 0.000 0.582 1.486 -1.588 3.102 2.507 1.380 0.987 1.527 1.014 1.198 1.223 +0 0.529 1.170 0.667 1.147 -0.515 1.077 2.521 -0.522 0.000 1.252 -0.133 0.904 2.215 0.825 1.030 1.469 0.000 0.502 -1.983 -1.537 0.000 2.014 1.125 0.991 1.344 0.791 0.946 1.071 +0 1.386 -2.416 0.101 0.602 -1.724 1.413 -0.782 -1.511 2.173 0.879 -0.108 -0.614 0.000 1.813 1.650 0.733 0.000 0.571 -0.294 0.926 1.551 0.974 1.011 1.262 1.772 0.798 1.797 2.525 +0 2.187 0.673 1.662 0.505 0.321 0.735 1.450 -0.108 1.087 0.383 2.292 -0.451 0.000 0.410 1.580 0.814 0.000 0.717 2.177 1.133 0.000 0.681 0.753 1.361 1.248 0.566 0.823 0.795 +0 0.915 0.056 -0.133 1.101 -1.008 0.877 1.444 1.690 2.173 0.636 2.792 0.359 0.000 1.089 1.144 1.110 0.000 0.559 -0.067 -0.480 3.102 1.399 1.191 0.988 1.245 0.930 0.912 1.041 +0 0.476 -2.349 -0.470 0.715 -1.711 1.079 -0.516 -0.157 1.087 0.792 -0.852 1.428 2.215 0.593 -0.036 -1.165 0.000 0.623 0.920 1.280 0.000 0.676 1.047 0.989 0.698 1.368 0.862 0.820 +0 0.372 0.717 0.157 0.535 -1.076 0.701 0.089 1.662 0.000 1.392 -0.564 -0.016 1.107 1.210 0.768 -1.723 0.000 1.465 -0.948 1.550 0.000 0.999 0.921 0.988 0.779 0.502 0.853 0.737 +1 2.200 -0.197 -1.460 1.999 -1.405 1.657 2.025 0.491 0.000 0.485 -0.826 -0.979 0.000 0.892 -0.987 -0.531 2.548 1.535 -0.591 1.242 1.551 1.441 0.921 0.975 1.098 0.910 0.971 1.275 +0 2.318 0.861 1.100 0.884 1.596 0.942 0.426 -0.528 0.000 0.795 -0.322 -0.490 2.215 1.025 0.266 0.412 2.548 1.449 0.047 -1.396 0.000 1.300 0.889 0.985 0.862 0.760 0.921 0.975 +0 1.012 -0.030 0.894 0.879 -0.320 1.125 0.806 1.292 0.000 1.089 1.753 1.613 0.000 2.021 -0.856 -0.884 0.000 0.980 0.452 0.320 3.102 1.384 1.154 1.161 0.629 0.615 0.837 0.897 +1 0.865 -1.037 1.186 0.394 -0.583 0.502 1.204 1.336 2.173 0.684 0.496 0.259 2.215 0.278 0.413 0.987 0.000 0.451 -1.085 0.179 0.000 0.957 1.126 0.990 0.935 0.777 0.985 0.829 +1 1.240 1.307 1.299 1.528 0.079 0.944 0.338 0.051 1.087 1.051 1.890 -1.138 0.000 0.910 2.392 1.443 0.000 0.379 1.399 -1.526 0.000 1.210 1.035 1.698 1.250 0.296 1.158 1.066 +1 0.643 -0.182 -0.340 1.487 0.861 0.603 -0.911 -1.638 2.173 0.604 0.230 -1.586 0.000 0.648 0.136 -0.994 2.548 0.366 0.027 0.884 0.000 0.489 0.552 1.196 0.798 0.611 0.705 0.584 +1 0.829 -0.071 0.543 0.553 1.265 0.747 -0.035 0.101 0.000 1.006 -2.127 0.258 0.000 2.299 -0.580 -1.358 2.548 0.841 -0.419 -0.850 0.000 0.959 1.100 0.994 1.334 0.664 1.004 0.947 +0 1.720 -0.296 -0.169 1.160 -0.078 0.851 -0.393 -1.717 1.087 0.350 -0.436 -0.834 0.000 0.716 0.336 1.027 2.548 0.830 1.407 1.257 0.000 0.779 0.840 0.983 1.382 0.713 0.953 0.885 +0 0.672 0.625 -0.526 0.644 1.011 1.386 -0.151 0.156 2.173 1.178 0.875 1.688 2.215 0.802 0.169 -1.621 0.000 0.615 -1.230 -1.088 0.000 0.805 0.996 0.990 0.867 2.117 1.161 0.923 +0 0.373 0.109 0.285 1.698 -1.451 0.381 -0.396 -0.530 0.000 0.551 0.916 1.497 2.215 0.566 -2.507 0.323 0.000 0.719 -0.878 0.240 3.102 1.085 0.664 1.103 0.698 0.842 0.866 0.866 +1 0.672 -0.009 1.347 1.585 -0.722 0.665 1.433 -0.558 2.173 0.780 -0.339 0.651 0.000 0.818 -0.811 1.463 2.548 0.877 0.943 0.905 0.000 0.907 0.853 1.369 1.109 1.606 0.977 0.916 +0 0.825 0.336 1.379 1.440 -1.182 0.664 -0.103 0.606 2.173 0.384 -1.116 -0.635 0.000 0.480 -2.299 0.784 0.000 0.595 1.104 -0.435 3.102 0.799 1.018 1.119 0.708 0.745 0.852 0.910 +0 0.524 1.551 0.146 1.132 0.718 1.108 -0.010 -0.407 2.173 1.169 0.581 1.459 2.215 0.611 1.057 -0.709 0.000 0.766 -0.025 -1.707 0.000 0.759 0.880 0.990 0.874 1.742 1.009 0.829 +0 1.233 -0.207 1.298 0.877 -1.291 0.460 0.408 -0.213 0.000 0.377 -1.409 0.111 0.000 0.441 -0.590 0.364 2.548 0.963 -0.696 -1.611 0.000 0.981 1.026 1.043 0.661 0.654 0.691 0.709 +0 1.106 -2.254 -1.116 1.357 -1.637 0.854 -0.450 -0.322 1.087 0.558 -0.066 0.288 0.000 1.010 -0.905 1.157 0.000 0.654 1.092 0.727 3.102 0.982 1.032 0.985 1.712 1.019 1.329 1.124 +1 0.748 1.891 -1.163 0.187 0.575 0.598 0.381 1.678 0.000 0.582 -2.101 0.373 0.000 0.606 0.757 -0.258 2.548 1.340 1.884 -0.704 0.000 1.871 1.139 0.996 0.727 0.635 0.816 0.701 +1 0.355 0.772 -0.669 0.158 0.164 1.297 -0.633 -1.256 2.173 1.239 -0.477 1.126 0.000 1.004 -0.484 0.571 0.000 1.413 1.011 -0.150 0.000 0.821 0.744 0.988 0.881 0.990 0.966 0.808 +1 0.675 0.590 -1.736 1.093 0.373 0.393 0.561 -0.798 0.000 0.445 -0.475 -0.058 2.215 0.678 2.048 -0.626 0.000 1.079 -0.640 1.513 1.551 0.888 0.965 1.126 0.838 0.624 0.861 0.774 +0 1.614 0.625 -0.385 0.725 -1.054 1.083 0.623 0.844 0.000 1.065 0.623 -1.284 1.107 0.981 -0.443 0.573 2.548 1.159 0.621 1.397 0.000 0.952 0.946 0.986 0.799 1.259 0.913 0.920 +0 5.215 -0.529 -0.590 0.950 -1.016 2.811 -0.064 1.194 2.173 0.355 -2.657 1.024 0.000 1.084 0.081 0.582 0.000 0.597 -0.591 0.253 3.102 0.970 1.257 1.153 0.838 1.123 2.055 1.554 +0 0.329 -0.022 1.457 1.778 -0.820 1.066 -0.844 0.527 1.087 0.329 0.205 0.964 0.000 0.604 -1.070 -1.117 1.274 0.543 -2.355 1.584 0.000 1.233 1.085 0.989 0.781 1.009 1.012 0.978 +0 0.775 0.068 1.509 0.316 -1.264 1.272 -1.673 -1.235 0.000 0.951 -1.276 0.708 2.215 1.863 -0.539 1.219 2.548 1.413 -0.922 -0.364 0.000 0.904 1.159 0.988 0.817 0.817 0.764 0.722 +0 3.963 -1.376 -1.235 0.880 -0.869 1.393 -0.173 0.660 0.000 1.043 -0.323 0.359 0.000 0.891 -1.368 -1.695 2.548 1.305 0.987 0.400 3.102 0.718 1.051 0.979 0.699 1.668 1.632 1.701 +0 0.992 -0.653 0.341 0.352 -0.721 0.704 -0.348 0.950 0.000 0.655 -2.866 -0.991 0.000 0.928 2.360 1.466 0.000 1.094 1.415 0.681 0.000 0.886 0.850 0.980 1.147 0.223 0.897 1.297 +1 1.554 0.726 -1.122 0.344 -1.194 0.944 1.634 -0.295 0.000 1.908 -0.085 0.964 2.215 0.768 -0.699 -0.675 0.000 1.071 0.390 0.555 0.000 1.111 0.810 0.974 1.665 0.842 1.109 0.981 +0 0.978 -0.864 1.592 0.484 -1.055 0.389 2.279 0.018 0.000 0.494 0.321 -0.083 2.215 0.698 -0.003 1.127 2.548 0.500 0.857 1.398 0.000 0.785 0.868 0.993 0.743 0.563 0.608 0.744 +0 0.598 -0.274 0.944 0.450 1.638 0.505 -1.200 0.496 0.000 0.619 -1.484 1.403 2.215 1.635 -0.882 -0.175 2.548 2.615 1.133 -1.172 0.000 3.416 2.437 0.984 1.260 1.100 1.699 1.363 +1 0.897 -0.100 -0.850 0.931 -1.674 0.684 -0.937 0.423 0.000 0.894 0.137 0.007 0.000 0.823 1.058 -1.600 2.548 0.792 -0.340 -1.740 3.102 1.126 0.945 0.985 0.626 0.542 0.871 0.811 +0 0.891 -0.682 1.531 0.993 1.040 1.086 -0.848 0.122 0.000 1.023 -0.962 -1.575 2.215 0.646 0.195 -0.867 2.548 0.837 -0.897 -0.593 0.000 0.890 0.878 1.000 0.897 0.755 0.838 0.865 +0 0.997 -1.911 -0.309 1.684 0.583 0.604 -0.448 -0.910 1.087 1.524 -0.335 -1.424 0.000 1.157 -0.531 -0.032 0.000 2.897 -0.563 0.560 0.000 0.942 0.885 1.293 1.324 0.980 1.250 1.161 +0 0.377 -2.261 -1.261 1.582 -1.492 0.870 -0.337 -0.204 0.000 0.581 -0.663 0.919 2.215 0.502 -0.212 1.246 2.548 1.082 0.243 0.576 0.000 1.069 0.876 0.990 0.653 0.212 0.577 0.762 +0 1.160 0.945 1.576 1.372 -1.654 1.036 -1.065 -0.700 2.173 1.564 0.551 0.637 0.000 0.744 0.350 0.060 0.000 1.196 1.383 0.149 0.000 0.836 1.341 0.977 2.612 0.719 1.747 1.547 +0 1.610 -0.073 -1.629 0.814 -1.420 2.192 -0.153 -1.307 2.173 3.965 1.303 0.176 1.107 2.119 1.825 1.033 0.000 0.447 1.065 0.413 0.000 0.695 1.726 0.986 0.849 5.516 2.838 2.447 +0 0.959 -0.669 1.719 0.801 0.826 1.088 -0.240 -0.124 0.000 0.584 -0.900 0.473 0.000 1.175 0.170 -1.372 2.548 0.941 -0.666 -1.509 3.102 1.049 1.016 0.987 0.752 0.424 0.842 0.780 +0 0.812 -0.776 0.509 1.106 1.722 0.790 0.735 -1.210 2.173 0.902 0.099 0.543 2.215 0.645 1.288 -0.192 0.000 0.466 0.578 -0.779 0.000 0.739 0.977 1.166 1.217 1.306 0.962 0.963 +1 1.053 2.065 1.531 1.121 0.316 0.493 0.507 -0.865 0.000 0.932 0.351 -0.383 2.215 0.566 0.842 1.712 0.000 0.393 -1.438 1.564 0.000 0.913 0.929 1.337 1.027 0.827 0.987 0.975 +1 1.174 -0.802 -0.337 1.093 0.256 1.047 0.130 -1.520 2.173 0.801 -0.233 1.435 0.000 0.383 -1.093 1.367 0.000 0.601 0.295 0.063 0.000 0.896 0.872 0.988 0.507 0.798 0.837 0.744 +1 1.156 -0.156 -0.606 1.499 0.145 2.677 0.015 -0.843 0.000 1.242 -1.108 1.207 2.215 0.758 -1.393 0.597 2.548 1.185 0.961 1.226 0.000 0.803 0.930 1.142 1.376 0.575 0.953 0.864 +1 0.550 -2.264 1.230 0.503 -0.145 1.215 -0.624 -1.222 2.173 1.072 -0.943 0.632 1.107 0.752 -0.690 -0.021 0.000 1.171 -1.685 0.460 0.000 0.966 1.271 0.988 0.708 1.695 1.178 1.059 +0 0.401 -2.170 0.637 1.628 -1.533 0.400 -0.649 0.903 0.000 0.565 -0.986 -1.469 2.215 0.929 0.369 -0.025 1.274 0.591 -0.580 -0.310 0.000 0.659 0.643 1.038 0.719 0.950 1.074 0.867 +0 1.697 0.760 1.489 0.765 0.524 1.060 0.974 -1.460 2.173 0.774 -0.353 -0.303 2.215 0.637 -0.150 0.456 0.000 2.099 -1.236 0.024 0.000 1.025 0.805 1.205 1.014 1.504 1.383 1.288 +1 1.728 0.082 0.566 0.268 0.951 0.826 -0.327 1.027 0.000 1.018 0.457 -0.752 2.215 1.185 -1.349 -1.349 0.000 1.526 -0.322 0.104 0.000 1.021 1.171 0.979 0.621 0.705 0.748 0.712 +0 0.746 -1.031 -0.754 1.150 1.560 0.592 0.279 0.149 0.000 0.866 0.044 0.974 1.107 0.889 1.420 0.139 0.000 1.161 0.632 -0.784 3.102 0.892 0.974 1.117 1.020 0.963 0.843 0.962 +0 0.891 -0.593 -0.392 0.485 0.351 1.158 0.271 -1.208 2.173 1.146 -0.844 0.889 1.107 0.688 -1.315 0.328 0.000 0.384 -2.013 0.486 0.000 0.729 1.248 0.991 0.969 1.907 1.056 0.903 +0 0.478 -0.632 -0.146 1.589 -1.345 0.691 0.843 0.462 0.000 0.910 0.245 1.391 2.215 1.121 0.984 -0.146 0.000 0.432 -0.723 -1.314 3.102 0.844 0.883 1.064 0.927 0.496 0.754 0.874 +0 0.592 0.791 -1.145 0.650 0.146 1.251 0.221 -1.593 2.173 1.100 0.831 0.272 1.107 0.856 -1.370 0.227 0.000 0.885 1.491 0.811 0.000 0.802 0.879 0.987 1.208 1.803 1.057 0.899 +0 0.656 -1.537 -1.612 0.096 -1.614 0.918 -0.853 0.673 0.000 0.584 -1.514 0.175 1.107 1.423 -0.912 -0.939 0.000 0.900 -0.716 1.589 3.102 1.072 0.793 0.986 0.765 0.668 0.586 0.599 +1 0.446 1.033 -1.526 2.178 1.015 3.186 1.633 -0.909 0.000 1.849 0.000 1.030 2.215 1.969 -0.300 1.519 2.548 1.671 0.216 0.202 0.000 0.710 0.939 1.027 1.228 0.929 0.969 0.930 +0 1.265 1.128 -1.196 0.559 -0.730 1.291 0.474 0.517 2.173 1.436 0.823 -1.658 1.107 1.061 0.350 -0.286 0.000 0.674 -0.161 1.243 0.000 0.955 0.958 0.977 0.808 1.890 1.124 0.941 +0 0.621 -0.943 -1.698 0.950 0.592 0.901 0.176 -1.422 0.000 1.203 -2.449 0.403 0.000 0.736 1.116 0.183 2.548 1.693 0.217 -0.951 3.102 0.530 0.897 0.989 1.093 0.845 0.971 0.861 +1 1.604 -0.042 0.970 0.463 1.134 0.727 -0.174 -0.451 2.173 1.065 0.404 -1.276 2.215 0.559 1.098 0.018 0.000 0.485 0.432 -1.026 0.000 0.633 0.868 0.991 1.036 0.959 0.925 0.783 +1 0.884 -0.477 -0.295 1.082 -1.223 0.539 0.909 -0.914 0.000 0.683 0.727 -1.479 0.000 0.885 -1.670 1.099 0.000 0.888 0.779 0.507 3.102 0.960 0.817 1.004 0.935 1.211 1.038 0.926 +1 0.574 -1.679 -0.752 0.456 -1.193 1.050 1.109 0.936 0.000 0.588 0.079 -1.199 2.215 0.822 0.009 -0.269 2.548 0.604 2.456 0.865 0.000 1.275 1.359 0.999 0.605 0.549 0.995 1.089 +1 0.628 0.652 -1.154 0.795 0.311 0.974 0.533 1.577 0.000 1.365 0.877 0.648 2.215 1.344 0.660 -0.061 2.548 1.977 -1.075 -0.970 0.000 0.971 1.204 0.989 0.837 0.867 0.950 0.801 +0 2.281 -0.391 -0.926 0.356 1.734 0.571 -1.426 1.185 0.000 0.462 -1.721 0.448 0.000 0.726 0.200 0.247 2.548 0.881 0.483 0.602 3.102 0.698 0.969 0.989 0.879 0.222 0.713 0.826 +1 1.951 0.946 1.742 0.614 -1.618 1.437 1.022 -0.045 2.173 0.811 -0.232 0.555 0.000 0.897 0.277 -1.092 0.000 0.645 0.187 1.012 3.102 0.830 1.149 1.000 0.618 0.932 1.063 0.867 +1 0.348 -0.011 1.007 2.427 -0.467 2.984 -2.227 1.355 0.000 2.892 -0.042 -0.274 1.107 0.680 -0.400 1.681 0.000 0.867 0.222 -0.914 3.102 0.852 0.728 1.236 0.867 0.811 0.886 0.798 +1 0.852 1.071 -1.257 0.713 -0.345 0.951 0.552 0.857 2.173 0.836 -1.309 -1.691 1.107 1.256 0.577 -0.185 0.000 1.225 -0.351 -0.315 0.000 0.773 1.184 0.989 1.872 1.748 1.420 1.214 +1 1.495 0.348 -1.077 0.346 0.257 0.268 0.184 -0.569 0.000 0.676 1.381 0.814 0.000 0.783 1.340 1.267 2.548 1.740 1.229 -0.181 3.102 1.043 0.838 0.988 0.858 0.861 0.769 0.704 +1 0.952 -1.752 1.478 1.641 1.348 1.390 -0.572 0.447 0.000 1.845 -0.858 -1.297 0.000 1.668 1.379 -0.023 0.000 1.178 -0.467 1.505 0.000 1.152 0.684 0.974 1.641 0.591 1.185 1.198 +0 0.918 0.801 0.742 0.588 0.864 1.137 -1.025 -1.319 2.173 1.036 -0.269 -0.071 1.107 0.636 -1.627 0.624 0.000 0.705 -0.782 -1.709 0.000 0.714 0.910 0.996 1.379 1.569 1.081 0.907 +1 1.054 1.332 -0.105 1.000 0.479 1.386 -2.825 -0.913 0.000 1.295 1.411 1.422 2.215 1.326 0.304 1.291 2.548 1.076 0.411 -0.576 0.000 0.876 1.087 0.984 0.960 0.836 0.889 0.784 +1 0.472 -1.140 -0.598 2.016 -1.664 0.877 -0.612 0.644 1.087 0.704 0.802 0.349 2.215 0.955 -2.447 -0.773 0.000 0.868 -0.936 -1.373 0.000 1.010 1.495 1.108 1.442 0.959 1.363 1.195 +0 0.599 -0.378 0.609 0.830 -1.452 1.162 -0.054 -1.682 2.173 1.327 -0.377 -0.020 2.215 1.110 -1.464 -0.473 0.000 0.550 -1.066 0.371 0.000 0.908 1.021 0.989 0.750 1.848 1.153 0.952 +1 1.020 0.300 0.840 0.753 -1.363 0.783 -1.019 -1.215 2.173 0.668 -0.246 0.607 0.000 1.556 -0.218 -1.718 2.548 1.998 -0.315 -0.132 0.000 0.935 1.237 1.112 1.069 0.820 0.957 0.873 +0 1.453 -0.314 0.218 1.704 -0.313 1.050 -0.412 1.660 2.173 0.763 -0.628 -0.842 0.000 0.828 0.688 1.099 0.000 1.082 0.971 0.741 0.000 1.053 0.912 1.004 0.915 0.508 1.003 0.863 +1 0.768 0.782 -1.544 0.755 0.748 0.995 0.694 0.800 2.173 1.148 0.494 -1.111 0.000 0.908 -0.255 -0.756 0.000 0.580 0.637 0.329 0.000 0.802 0.681 0.991 0.773 0.693 0.890 0.793 +0 1.902 -0.179 0.551 1.064 0.948 1.233 -0.027 -0.830 0.000 0.539 -1.097 -1.700 0.000 0.487 0.165 -1.225 2.548 0.592 1.229 0.428 3.102 1.581 0.869 0.998 0.887 0.498 0.751 0.934 +0 0.647 0.818 -0.818 2.915 -0.204 1.553 -2.329 1.288 0.000 1.578 0.596 0.900 0.000 0.488 0.147 -0.977 0.000 1.822 0.612 -1.459 3.102 1.370 1.081 1.001 1.164 0.859 0.850 0.955 +1 0.812 -0.887 1.734 1.268 0.883 0.952 0.213 -0.949 0.000 0.920 -0.480 0.935 2.215 1.025 -0.932 -0.109 2.548 1.085 -0.664 0.448 0.000 0.893 0.738 0.987 0.853 0.879 0.642 0.593 +1 0.770 0.035 -0.561 0.875 0.583 1.244 0.125 -1.087 2.173 0.744 -0.151 -1.586 0.000 1.123 -0.287 0.742 0.000 0.873 -0.299 -0.479 1.551 0.896 0.914 0.987 1.012 0.635 1.026 0.852 +1 1.204 1.415 -0.119 1.989 0.617 0.959 0.890 -1.356 2.173 1.079 -0.238 -1.013 0.000 1.148 0.271 0.723 0.000 1.294 -0.599 1.203 3.102 1.770 1.202 1.321 1.504 1.376 1.334 1.275 +1 1.834 0.115 0.306 0.416 -0.469 1.443 -0.025 -1.364 2.173 0.643 0.541 1.070 0.000 0.436 -0.683 -0.209 2.548 0.553 -1.892 0.378 0.000 1.570 0.928 0.987 1.452 0.928 0.973 0.940 +0 1.136 0.164 -1.637 0.889 0.688 0.541 1.327 -0.971 0.000 0.851 0.184 -0.359 2.215 0.484 -1.901 1.110 0.000 0.522 1.891 0.862 0.000 0.899 0.964 1.205 0.804 0.484 0.675 0.734 +0 0.729 1.566 0.191 0.968 0.576 0.915 0.610 -1.256 1.087 0.549 -0.350 1.017 2.215 0.446 -0.861 -1.023 0.000 0.552 0.116 -0.156 0.000 0.496 0.720 0.974 0.741 1.066 0.850 0.712 +1 0.583 -1.451 -0.358 0.547 -1.738 0.853 -0.912 1.626 2.173 0.423 1.488 -0.051 0.000 0.606 -0.139 0.098 2.548 0.551 0.647 -0.401 0.000 0.312 1.353 0.984 0.896 0.948 0.836 1.071 +1 0.753 0.139 -0.183 0.779 0.726 0.536 -0.360 -0.554 1.087 0.940 0.487 1.191 2.215 1.086 0.363 -1.215 0.000 0.545 1.121 0.794 0.000 0.923 0.840 0.986 0.865 1.143 0.734 0.715 +1 0.364 -1.982 -0.273 0.440 -0.697 1.024 -0.066 0.831 2.173 1.204 0.173 -1.315 2.215 0.829 0.270 -0.642 0.000 0.490 0.687 0.438 0.000 0.611 0.905 0.983 0.862 1.540 0.925 0.768 +1 1.360 -1.930 0.538 0.163 -1.234 0.885 -0.353 -0.881 2.173 0.546 -1.685 0.311 0.000 0.528 -1.626 -1.579 0.000 0.825 -0.200 1.333 3.102 0.816 1.091 0.980 0.746 0.826 0.854 0.746 +0 1.384 1.017 -1.205 1.897 -1.704 0.786 0.145 -0.035 0.000 1.220 -0.688 0.655 2.215 0.748 -0.914 0.010 0.000 0.596 -1.108 1.513 0.000 0.839 0.939 0.988 0.578 0.821 1.207 1.183 +0 0.327 2.181 -1.356 2.407 -1.392 0.911 0.478 -0.364 0.000 0.796 1.053 1.677 0.000 1.357 -0.548 0.615 1.274 1.239 -1.373 0.343 0.000 1.830 1.193 1.002 1.594 0.346 1.081 1.058 +1 0.609 -0.033 0.509 0.981 1.509 1.266 0.981 -1.244 0.000 1.427 1.113 0.567 2.215 0.414 -0.418 -0.136 0.000 0.378 0.583 -1.636 3.102 1.502 0.797 0.987 0.848 0.625 0.949 0.822 +1 0.980 -1.254 0.007 0.773 -1.212 1.615 -0.722 1.223 2.173 0.946 -0.850 -0.379 0.000 1.071 -0.966 -1.132 2.548 0.853 -1.535 -0.410 0.000 0.815 0.766 1.073 1.297 1.421 1.003 0.881 +0 1.108 0.045 -0.460 1.435 1.527 0.765 -0.464 1.697 2.173 1.369 0.542 0.243 2.215 0.792 0.027 -0.957 0.000 0.876 -0.879 1.056 0.000 1.034 0.803 1.705 1.287 1.664 1.068 0.906 +1 0.817 0.459 1.471 0.778 -0.129 0.820 0.147 0.339 1.087 0.861 -0.063 -1.314 0.000 1.322 -0.805 -1.587 0.000 2.254 0.391 -0.423 3.102 0.934 1.170 1.095 0.778 0.941 0.939 0.817 +0 1.464 -1.012 -1.334 2.120 1.720 1.255 -0.766 -0.207 0.000 1.186 -1.870 1.135 0.000 1.679 0.315 -0.359 0.000 2.195 -0.984 1.234 3.102 0.752 0.956 0.992 0.923 0.794 0.971 1.058 +0 2.269 -0.404 1.167 0.285 0.009 1.002 0.326 -0.787 0.000 0.726 0.460 -1.479 2.215 0.770 -0.277 0.459 2.548 1.073 -0.167 -0.256 0.000 0.844 0.895 0.986 0.907 0.844 0.699 0.834 +1 2.387 0.556 1.586 0.348 1.673 0.520 0.219 0.398 2.173 0.554 0.663 -0.568 0.000 0.914 -0.421 -0.653 2.548 0.671 1.333 0.270 0.000 0.662 0.757 0.986 1.008 0.760 0.900 0.790 +0 1.259 1.948 -0.756 0.728 -1.416 1.063 0.152 1.585 2.173 1.770 1.575 0.112 2.215 0.527 -0.371 1.015 0.000 0.979 0.224 -1.006 0.000 0.815 0.775 0.989 1.278 2.541 1.628 1.352 +1 0.949 -0.297 -1.008 1.249 -1.150 0.896 -0.684 0.666 1.087 0.545 -0.484 1.298 0.000 0.475 0.171 -0.463 2.548 0.467 -0.004 0.424 0.000 0.494 0.505 0.984 0.554 0.784 0.907 0.733 +1 0.527 1.595 -1.432 1.334 0.709 1.859 -0.529 1.466 2.173 1.124 -1.906 0.076 0.000 1.578 -0.913 -0.157 0.000 1.573 -1.678 -0.627 0.000 0.818 0.941 1.087 2.229 1.115 1.504 1.750 +0 0.327 -1.836 -0.562 1.947 -0.350 0.805 -0.515 0.929 1.087 1.087 0.140 1.266 0.000 0.849 -0.927 1.641 2.548 1.267 0.220 -0.254 0.000 0.759 0.778 0.994 0.944 0.668 0.863 0.841 +1 1.008 0.178 0.694 0.802 -1.168 0.630 1.890 -0.538 0.000 0.481 0.550 -0.438 0.000 0.754 0.344 -1.470 1.274 1.461 -1.362 1.334 0.000 0.852 0.876 1.238 0.733 0.689 0.805 0.767 +1 1.215 0.416 0.546 0.626 -1.677 1.261 -0.650 0.797 2.173 3.439 0.274 -0.576 0.000 3.059 2.110 1.132 0.000 0.943 -0.380 -0.853 0.000 1.060 0.887 1.097 1.037 0.910 1.427 1.209 +0 0.712 -0.662 -0.306 1.765 -0.459 1.566 1.126 1.437 2.173 0.899 0.199 0.296 0.000 0.473 -0.070 -1.130 0.000 0.631 1.122 -1.604 3.102 0.971 0.774 0.984 3.416 0.424 2.154 1.620 +1 0.686 1.003 0.430 0.775 1.716 1.132 -0.735 -0.703 2.173 0.981 -1.004 1.238 2.215 0.941 0.089 -1.350 0.000 1.403 0.751 -0.051 0.000 0.881 0.869 0.988 1.677 1.543 1.388 1.105 +0 0.493 0.427 1.602 1.336 -1.093 0.683 -0.489 1.142 2.173 0.764 -0.886 0.035 0.000 0.417 0.919 -1.252 0.000 1.222 -0.025 0.172 3.102 1.250 1.046 0.987 0.980 0.776 0.951 0.912 +1 0.662 -0.642 0.436 0.751 -1.041 1.345 0.844 1.396 2.173 0.731 0.700 0.088 2.215 0.565 0.846 -0.491 0.000 0.802 0.489 -0.934 0.000 0.315 1.028 0.988 0.744 1.353 0.943 0.776 +0 1.833 2.245 -0.796 1.833 -0.725 1.312 1.418 1.095 0.000 0.529 2.679 0.345 0.000 1.001 0.715 0.821 0.000 0.890 1.190 -1.686 3.102 0.937 0.714 0.995 1.299 0.696 0.901 0.924 +0 1.344 0.701 0.969 1.865 0.841 0.946 1.539 -1.064 0.000 0.774 0.651 -1.092 2.215 2.045 0.696 -0.157 2.548 1.347 0.398 1.636 0.000 1.485 0.929 0.970 1.357 1.000 1.090 1.111 +0 1.478 1.441 1.425 0.896 0.523 1.339 0.407 -0.300 2.173 0.751 0.660 -1.327 2.215 0.615 -0.589 -1.040 0.000 0.402 1.131 -0.743 0.000 0.994 1.328 1.157 1.578 1.195 1.280 1.479 +0 1.430 0.561 -0.506 1.467 -1.097 0.623 0.833 1.461 0.000 1.022 0.693 0.866 2.215 1.105 -0.047 0.476 0.000 0.558 -0.645 -1.198 3.102 1.348 0.938 1.018 1.216 0.857 0.835 0.851 +1 0.472 -1.393 1.435 1.598 -0.689 0.957 0.529 0.236 2.173 1.063 2.096 -1.643 0.000 0.781 -0.592 -0.356 2.548 0.409 -1.248 0.773 0.000 1.009 1.563 1.134 0.688 0.870 1.211 1.611 +0 0.487 -0.589 -1.605 0.258 -0.404 1.175 1.186 -0.936 2.173 0.861 -1.700 0.861 0.000 1.001 -0.858 1.089 0.000 0.825 0.662 0.510 3.102 0.658 1.095 0.987 0.866 1.027 1.674 1.285 +0 1.660 -0.138 -1.048 0.668 -1.678 0.996 -0.922 0.848 2.173 1.123 -0.463 -0.571 2.215 1.422 0.065 0.862 0.000 0.482 0.127 -0.152 0.000 0.724 1.035 0.990 1.270 1.532 1.014 0.909 +1 2.171 0.518 0.144 0.377 -0.252 1.118 0.954 -1.617 1.087 0.558 1.229 0.880 0.000 0.310 0.832 0.473 2.548 0.535 0.831 -0.413 0.000 0.661 0.886 0.989 0.471 0.698 0.886 0.713 +1 0.579 0.473 0.370 0.504 1.061 1.808 2.217 -0.302 0.000 2.174 0.463 1.063 2.215 0.876 -0.458 -1.258 0.000 0.790 -1.975 -1.292 0.000 0.988 0.889 0.989 1.060 0.852 1.241 1.246 +0 1.617 0.139 1.335 0.484 0.102 0.801 1.232 0.288 2.173 0.707 1.641 -0.519 0.000 1.376 -0.378 -1.202 2.548 0.655 0.880 -1.658 0.000 0.810 0.860 1.098 0.948 1.759 1.021 0.938 +0 1.280 -1.517 0.647 0.508 0.572 1.237 -1.686 0.190 2.173 0.891 -1.339 -1.494 0.000 0.719 -0.980 1.674 0.000 0.956 -0.663 -0.261 1.551 0.394 0.735 1.001 0.894 0.706 0.881 0.811 +0 0.711 -1.732 1.238 1.023 0.297 0.590 -0.159 0.782 0.000 0.683 -0.244 -1.544 1.107 0.710 -0.814 -0.827 0.000 0.715 0.843 -0.931 3.102 0.751 0.806 0.988 0.893 0.539 0.795 0.696 +1 0.722 -0.199 -0.403 1.462 0.562 1.406 -1.478 -1.701 0.000 0.823 -0.788 -0.080 0.000 1.486 -0.899 -1.021 2.548 0.817 -0.056 0.313 0.000 1.015 1.081 1.088 0.677 0.920 0.786 0.723 +1 2.102 -0.947 0.254 0.322 0.742 1.126 -0.466 -1.363 2.173 0.530 -0.882 -0.603 0.000 0.400 0.498 1.122 2.548 0.373 0.085 -1.613 0.000 0.550 0.626 0.978 0.825 0.793 1.006 0.784 +1 0.968 -0.534 1.576 0.475 -0.810 0.820 -1.138 -0.713 0.000 1.156 -1.040 -0.164 1.107 1.696 -0.912 1.067 2.548 1.181 0.418 1.221 0.000 0.933 0.953 0.988 1.023 1.333 0.909 0.811 +1 1.336 -1.104 1.706 0.666 -1.135 1.054 -1.363 0.136 2.173 1.201 -1.169 0.823 2.215 1.438 -1.665 -1.458 0.000 0.422 -0.787 0.357 0.000 0.943 1.051 0.982 1.276 0.970 0.987 0.896 +0 0.656 -1.462 -0.762 0.969 -0.093 0.767 -0.732 0.881 2.173 0.795 -0.150 1.637 2.215 1.218 0.347 -1.140 0.000 0.566 0.399 0.740 0.000 0.910 1.108 0.992 1.404 0.800 1.107 1.142 +0 0.854 0.759 1.508 1.592 -1.559 0.612 1.712 0.641 0.000 0.877 0.833 -0.358 1.107 0.734 2.618 -1.542 0.000 1.559 1.405 -0.055 0.000 0.877 0.984 0.980 1.104 0.591 0.787 0.965 +1 1.857 -0.763 -1.541 1.158 1.363 0.782 1.231 0.132 0.000 1.496 -1.237 -0.028 0.000 1.014 0.672 -1.613 1.274 1.191 -0.144 0.243 1.551 1.146 1.054 1.017 0.993 0.924 0.861 1.185 +1 0.789 -1.094 1.277 0.589 0.096 0.397 -1.307 0.891 0.000 0.762 0.701 -0.496 2.215 0.682 0.389 1.018 2.548 0.485 0.279 -0.650 0.000 0.904 0.985 0.982 0.893 0.759 0.933 0.777 +1 1.576 0.585 0.017 0.424 -0.450 1.348 -0.980 -0.961 0.000 1.031 -0.258 -1.484 0.000 1.681 -0.200 0.771 2.548 1.265 0.124 0.354 0.000 0.949 1.012 0.975 0.992 0.631 0.818 0.800 +1 0.718 -1.455 -1.153 0.823 0.267 1.047 -0.159 0.085 1.087 1.401 -0.631 1.709 0.000 0.430 -0.988 -0.989 0.000 0.607 -0.260 -1.338 3.102 0.819 0.490 1.021 1.057 0.812 0.851 0.804 +1 3.508 -0.092 -0.884 0.834 -0.741 1.219 -0.922 0.715 2.173 1.116 -0.738 1.435 0.000 0.937 -0.704 -1.080 0.000 1.580 -1.512 1.115 0.000 0.914 0.932 0.967 0.629 0.787 1.221 1.085 +1 0.788 0.311 -0.603 2.352 -0.597 1.723 -0.020 0.884 0.000 0.878 0.473 -1.242 0.000 0.997 0.100 0.502 1.274 0.550 0.911 -1.220 3.102 0.961 0.823 1.003 0.807 0.636 0.760 0.764 +0 1.171 0.305 -0.155 0.680 -1.067 0.983 -0.072 0.980 2.173 0.633 0.346 1.515 0.000 1.534 0.555 -1.337 2.548 1.337 -1.287 -0.039 0.000 0.891 0.888 0.989 0.815 1.431 0.939 0.779 +1 0.861 0.184 -0.681 0.589 0.888 0.701 -0.011 1.684 2.173 0.866 -0.570 -0.046 0.000 0.538 0.890 0.955 2.548 0.728 -1.008 -0.908 0.000 0.796 0.905 0.986 0.763 0.612 0.751 0.673 +0 1.676 1.033 -0.634 0.244 -1.397 0.741 -2.939 -0.472 0.000 1.048 -0.557 1.181 0.000 1.339 0.798 1.482 2.548 0.769 -0.861 0.387 0.000 0.814 1.131 0.985 0.680 0.189 0.664 0.939 +1 0.771 1.496 1.268 0.877 -0.296 1.658 0.706 -0.314 1.087 0.313 0.915 1.024 0.000 1.290 1.179 -1.738 0.000 0.786 -1.238 1.058 0.000 0.954 1.150 1.124 1.152 1.710 1.195 1.018 +1 0.584 -2.056 1.693 0.361 -1.084 0.956 -1.191 -1.630 1.087 1.182 -1.291 0.581 0.000 1.217 -1.012 -0.147 2.548 0.525 -0.195 -0.862 0.000 1.151 0.854 0.986 0.662 1.309 0.891 0.764 +0 1.059 -0.443 0.136 1.203 -1.700 1.351 2.711 -1.740 0.000 0.589 -2.666 -0.758 0.000 0.714 -1.720 0.111 0.000 1.474 0.300 -0.536 3.102 0.882 1.920 1.559 0.867 0.781 1.899 1.888 +0 1.458 0.275 -1.008 0.537 1.526 0.524 0.415 1.118 2.173 0.743 -0.642 0.638 0.000 0.897 -0.304 0.014 2.548 1.012 -0.436 -0.916 0.000 1.116 0.922 0.985 0.801 0.789 0.681 0.682 +1 0.722 1.079 1.303 1.547 0.656 0.546 -0.263 0.040 0.000 0.779 0.299 1.562 0.000 0.808 0.155 -0.951 0.000 0.799 0.697 -0.807 3.102 0.939 0.926 0.986 0.776 0.763 0.872 0.779 +1 0.970 -0.304 0.373 0.337 0.410 1.155 -0.040 -1.169 2.173 1.284 -0.363 -0.065 1.107 1.400 -1.374 1.418 0.000 1.163 -0.609 1.048 0.000 0.863 1.043 0.986 0.768 1.533 1.020 0.893 +0 1.344 0.397 0.949 1.828 1.165 2.156 0.833 -0.635 2.173 0.823 -1.144 1.035 2.215 0.418 1.067 0.611 0.000 0.466 0.611 -1.226 0.000 0.497 0.902 0.995 2.410 3.044 1.856 1.333 +1 0.961 0.261 0.073 0.578 -1.152 0.688 0.181 -1.238 0.000 0.958 -0.286 -0.728 0.000 1.414 1.262 0.471 0.000 1.727 0.421 0.928 3.102 0.871 0.996 0.989 0.779 0.761 0.869 0.785 +1 1.958 0.099 -1.694 0.308 -0.744 0.895 -0.581 0.025 2.173 0.577 -0.447 0.566 0.000 0.435 -1.329 -1.317 0.000 0.580 0.735 0.218 3.102 0.861 0.790 0.988 0.708 0.626 0.849 0.754 +1 0.679 1.220 -0.669 0.765 0.508 1.249 -0.541 1.192 2.173 0.916 -2.121 -0.813 0.000 0.473 -0.603 0.202 0.000 0.687 -0.045 -1.283 0.000 1.073 0.617 0.987 1.876 0.830 1.259 1.530 +1 0.523 -1.556 0.132 2.268 1.671 0.748 0.270 -0.627 2.173 0.827 -0.026 -1.454 0.000 0.964 0.111 0.365 0.000 0.587 -0.392 1.196 0.000 1.050 1.130 1.484 0.974 0.679 1.119 0.953 +1 0.503 -2.304 -0.071 1.606 -1.099 1.179 2.970 1.552 0.000 1.622 -1.088 -0.274 1.107 1.420 -1.442 0.684 0.000 0.755 -1.917 1.255 0.000 0.701 0.887 0.995 1.212 0.792 0.880 0.901 +1 0.904 -1.791 -0.776 0.673 -1.229 2.019 -0.864 -1.733 0.000 1.186 -0.699 0.496 0.000 2.406 -2.106 -0.373 0.000 3.396 -1.848 -0.020 0.000 0.997 1.079 0.979 0.762 0.875 1.001 0.970 +1 1.168 1.256 -0.248 0.520 1.508 1.513 0.105 1.249 2.173 0.773 -0.462 -0.139 2.215 0.765 0.973 -0.530 0.000 1.107 1.389 -1.063 0.000 0.561 1.057 1.079 1.389 1.582 1.179 1.028 +1 0.923 -1.510 -1.655 0.631 -1.476 0.350 -1.638 -0.137 0.000 0.913 0.215 0.197 2.215 0.776 -0.426 0.949 2.548 0.488 0.028 -1.329 0.000 0.815 0.875 0.989 0.702 0.643 0.760 0.682 +0 1.230 0.746 -1.214 0.153 0.758 0.662 -0.108 0.093 0.000 1.035 1.080 1.332 2.215 0.847 -1.160 -0.077 0.000 0.772 -0.057 -1.307 3.102 0.856 0.813 0.990 0.786 0.752 0.985 0.943 +1 1.172 0.238 0.180 0.846 0.172 1.251 -2.722 -0.447 0.000 2.845 0.841 1.357 0.000 1.133 0.456 -0.590 2.548 1.555 -0.781 1.604 3.102 0.976 0.801 0.999 1.022 1.225 0.874 0.881 +1 0.824 -0.827 1.468 0.701 -0.671 0.784 1.125 -0.762 2.173 0.922 0.397 0.766 0.000 0.444 -0.114 0.400 0.000 0.404 0.973 1.091 3.102 0.413 1.104 0.987 0.684 0.593 0.827 0.767 +1 0.525 -0.000 -0.714 0.202 -1.606 1.157 0.216 1.178 2.173 1.026 -0.387 -1.124 0.000 1.041 -0.814 -1.735 0.000 1.619 0.996 -0.300 0.000 0.920 1.134 0.989 1.061 0.945 0.981 0.958 +1 0.648 -1.535 -1.506 0.945 -0.955 0.482 0.488 0.858 2.173 0.804 -0.442 -0.498 2.215 0.619 -2.584 0.606 0.000 0.610 -0.706 0.973 0.000 0.822 1.084 0.987 0.969 0.971 0.961 0.873 +0 0.780 -1.577 -0.982 0.593 -0.333 0.818 -0.638 1.031 2.173 0.961 -0.454 -1.569 0.000 1.199 -1.030 0.254 2.548 0.669 -0.812 -0.355 0.000 0.966 0.972 0.996 0.767 0.852 0.772 0.732 +1 0.343 1.356 1.125 1.748 0.857 1.831 -1.169 -1.131 0.000 1.796 -0.551 0.169 1.107 1.205 0.683 1.318 2.548 1.001 -0.281 -1.641 0.000 0.950 1.141 0.992 1.304 1.741 2.377 1.794 +0 1.633 -1.022 -0.099 0.629 1.458 1.351 -1.022 -0.635 2.173 1.606 -1.312 1.530 2.215 1.581 -0.873 1.169 0.000 0.452 2.124 -0.090 0.000 2.749 2.403 1.385 1.070 2.042 1.885 1.518 +0 1.001 -0.629 -1.353 0.715 1.007 0.856 0.934 0.469 2.173 1.135 0.136 -0.709 2.215 1.062 0.468 1.469 0.000 1.618 1.888 -1.113 0.000 0.804 1.043 0.995 1.237 1.401 1.087 0.964 +1 1.207 0.579 -0.441 0.771 0.937 1.010 -0.226 -1.077 2.173 1.173 0.882 0.478 2.215 1.241 -1.600 1.033 0.000 0.755 0.594 -1.158 0.000 1.946 1.598 1.265 0.852 1.847 1.447 1.227 +0 1.283 -1.366 0.903 1.769 0.126 1.178 -0.731 -0.676 2.173 1.062 0.016 -0.379 2.215 1.168 -0.791 1.698 0.000 2.336 -0.116 1.543 0.000 0.725 1.374 1.343 1.454 0.770 1.156 1.228 +0 0.543 -0.411 -0.164 1.388 0.699 0.618 -0.992 -0.629 1.087 0.513 0.952 -1.660 0.000 1.554 -0.660 -1.433 2.548 1.214 -0.735 0.610 0.000 1.432 1.196 0.988 0.853 0.826 0.892 0.871 +0 0.758 1.313 -0.571 0.563 -1.134 1.047 -1.160 1.631 2.173 0.667 0.233 -0.026 2.215 0.413 -1.078 0.272 0.000 0.414 -1.838 0.404 0.000 0.712 0.814 0.997 0.671 1.545 1.067 0.859 +1 1.352 0.734 -1.591 0.961 -0.909 1.381 -1.379 -0.253 0.000 1.435 1.137 1.088 2.215 0.672 0.976 -0.302 1.274 1.507 1.850 1.154 0.000 0.831 0.925 0.994 0.701 0.993 0.815 0.714 +1 1.107 -0.919 0.978 1.181 -1.373 0.939 -0.657 -0.145 0.000 1.068 -0.549 0.280 2.215 1.773 -0.369 -1.335 2.548 0.777 -2.495 -1.507 0.000 1.217 0.794 1.353 0.953 1.457 0.949 0.926 +0 1.294 -1.717 0.590 1.636 -1.479 0.698 -1.063 -0.934 0.000 1.874 -0.810 0.589 2.215 1.027 -0.165 -0.841 2.548 1.062 -1.743 -1.358 0.000 0.855 0.864 1.930 1.613 1.498 1.284 1.136 +1 0.607 -1.066 -1.262 1.093 0.558 0.755 0.649 0.855 2.173 0.939 -1.251 -0.870 0.000 0.702 -0.549 -1.637 0.000 0.731 -0.809 -0.125 1.551 0.906 0.676 1.125 1.158 0.937 0.935 0.840 +0 0.521 -1.814 0.525 0.792 -0.408 0.476 -2.736 0.907 0.000 0.455 -2.594 -0.373 0.000 0.576 0.634 -1.376 2.548 0.864 -0.678 -1.677 3.102 0.904 0.962 0.992 0.823 0.470 1.049 0.895 +0 0.619 0.453 -1.257 1.498 1.209 1.178 0.616 -0.136 2.173 0.521 0.038 0.276 0.000 1.165 -0.462 -0.781 0.000 1.862 0.158 1.474 3.102 0.943 0.968 1.060 0.599 1.593 0.971 0.805 +0 1.191 -0.940 0.272 0.635 1.199 0.940 -1.923 -1.028 0.000 0.419 -1.332 -0.424 2.215 0.480 -0.755 1.191 2.548 0.530 -2.339 1.208 0.000 1.086 0.887 0.988 0.623 0.491 0.585 0.652 +0 0.768 0.372 0.409 0.875 1.174 1.264 0.302 -1.530 1.087 1.564 0.744 0.037 2.215 0.675 -0.188 -1.180 0.000 0.483 1.733 0.573 0.000 1.089 1.020 0.984 0.909 2.099 1.143 0.943 +0 2.210 -1.143 -0.318 1.097 0.030 1.688 0.967 1.430 1.087 0.677 -0.533 -1.075 2.215 0.450 -0.819 0.724 0.000 0.444 0.956 -1.083 0.000 0.775 1.133 0.997 0.876 1.794 1.863 1.359 +1 1.238 -0.192 1.263 0.437 -0.043 0.958 -0.181 1.740 2.173 1.545 0.095 -0.125 0.000 0.862 0.043 -1.123 0.000 0.603 -0.672 -0.157 3.102 1.387 0.811 0.990 0.777 0.836 0.893 0.795 +0 1.446 0.988 1.096 0.681 -0.104 0.784 1.815 -1.354 0.000 0.774 0.082 0.056 1.107 1.132 0.136 1.566 2.548 1.593 1.180 -0.330 0.000 1.409 1.414 1.213 0.876 0.973 1.071 0.948 +0 0.925 -0.618 -0.475 0.529 0.150 0.740 0.137 1.246 0.000 0.323 -0.917 1.669 0.000 0.300 0.153 -1.025 2.548 0.566 0.902 -0.302 3.102 0.704 0.760 0.985 0.607 0.243 0.577 0.672 +1 2.620 -0.353 -0.954 1.730 -1.222 2.205 -0.553 0.231 0.000 2.486 -0.455 1.017 0.000 1.374 -1.349 -1.339 2.548 0.373 -0.769 -1.280 3.102 3.243 1.808 0.984 1.165 0.157 1.513 1.638 +0 0.710 0.407 1.069 0.794 -0.264 0.563 0.103 0.270 0.000 0.425 1.023 -0.556 0.000 1.041 0.496 -1.096 2.548 1.262 -0.250 1.509 3.102 0.870 0.911 0.988 0.728 0.732 0.674 0.613 +1 0.969 -0.018 0.378 0.293 -1.147 1.109 -0.112 -0.140 0.000 1.240 0.001 1.319 1.107 1.398 -0.470 -1.398 2.548 0.707 -0.526 -0.839 0.000 0.872 1.130 0.988 0.860 0.967 0.996 0.836 +1 0.844 -0.616 -0.133 0.584 0.271 1.007 -0.744 -1.248 0.000 1.811 -0.237 1.077 0.000 1.127 -0.432 -0.678 2.548 1.600 0.573 -0.069 0.000 0.489 0.840 0.982 0.674 0.645 0.586 0.567 +0 0.550 0.056 0.770 0.801 -1.468 0.799 0.012 0.190 0.000 1.312 -0.891 1.708 2.215 0.700 -0.814 -0.083 2.548 1.297 -0.372 -0.941 0.000 0.957 0.853 0.988 0.662 1.017 0.651 0.593 +1 0.329 1.696 1.392 1.044 -0.886 0.621 -0.438 -1.047 2.173 0.425 1.169 1.056 0.000 0.393 -0.125 1.270 2.548 0.801 2.371 0.292 0.000 0.848 0.828 0.996 0.731 0.543 0.969 0.822 +0 0.716 -0.730 0.546 0.742 -0.046 0.682 -0.676 1.150 1.087 0.348 -2.190 -0.462 0.000 0.597 -1.470 1.131 0.000 1.373 -1.254 -1.249 3.102 0.729 0.803 0.976 0.818 0.954 0.746 0.638 +1 0.624 1.748 -0.489 0.894 0.278 1.285 -0.404 -1.719 2.173 0.530 -1.039 -0.319 1.107 1.140 -0.373 -0.100 0.000 0.825 0.770 -1.727 0.000 1.312 0.930 0.999 1.533 1.227 1.131 0.980 +1 0.447 0.520 0.302 0.362 0.286 0.835 -0.450 0.745 2.173 1.122 -1.459 -1.190 0.000 0.977 -0.792 -1.711 0.000 0.385 -0.879 -0.485 1.551 0.892 0.590 0.992 0.696 0.568 0.796 0.722 +1 0.663 0.344 0.493 0.851 -1.032 0.847 0.393 1.680 0.000 1.061 0.045 -0.083 0.000 1.351 0.301 1.258 0.000 1.417 -0.166 -0.532 3.102 0.882 0.670 1.020 0.648 0.520 0.475 0.515 +1 1.339 1.387 0.467 0.434 -1.261 0.919 -0.473 -1.144 2.173 0.539 0.589 -1.721 0.000 0.543 -0.780 0.719 0.000 0.986 -0.130 0.033 3.102 0.943 0.953 1.056 0.805 0.894 1.004 0.864 +0 1.085 0.552 1.398 0.903 -1.324 0.692 -0.187 -0.456 2.173 0.580 0.578 -0.186 0.000 1.359 0.379 0.818 2.548 0.465 -0.797 -1.156 0.000 0.778 0.835 0.989 0.956 1.163 0.823 0.707 +1 0.566 1.432 0.936 0.418 1.534 0.582 -0.014 -0.035 1.087 0.512 1.329 -0.757 0.000 0.491 2.138 1.590 0.000 0.862 -0.883 1.581 1.551 0.766 1.065 0.975 0.713 0.851 0.912 0.783 +0 0.743 -1.553 -0.241 0.608 -0.784 0.888 -1.358 0.573 2.173 1.151 0.323 1.684 2.215 0.997 0.786 -0.955 0.000 1.177 -1.475 0.917 0.000 0.936 0.975 0.988 0.955 1.897 1.205 1.017 +0 2.597 -0.003 -0.788 0.897 0.664 0.973 0.604 0.046 0.000 1.023 0.465 -1.450 2.215 0.880 -0.670 1.742 0.000 0.379 -0.254 -0.032 0.000 0.651 0.717 2.043 1.106 0.638 0.875 0.740 +1 0.639 0.882 0.924 1.584 -1.193 1.170 -0.152 -0.360 2.173 1.108 0.528 -1.513 0.000 1.066 0.360 1.422 0.000 1.725 1.344 0.541 3.102 0.928 1.212 1.316 1.337 1.861 1.173 1.021 +1 0.646 -0.809 0.126 0.353 0.143 0.562 -1.886 1.300 0.000 0.814 -0.990 -0.644 2.215 1.047 -1.670 -1.246 0.000 1.227 -0.878 1.271 3.102 0.829 1.103 0.988 0.847 0.892 0.913 0.786 +0 0.449 -1.586 -1.570 1.374 -0.919 1.455 -0.916 0.556 0.000 1.308 -2.904 -1.235 0.000 0.636 -0.240 -0.246 2.548 0.693 -0.811 1.036 0.000 0.643 0.891 0.984 0.631 0.551 0.646 0.794 +0 1.050 1.968 -1.704 0.611 -0.058 0.529 1.135 0.079 0.000 1.090 0.889 -1.314 2.215 0.957 0.840 0.993 2.548 0.826 1.636 -0.173 0.000 0.599 1.052 1.105 0.813 0.948 0.745 0.714 +0 0.705 -1.903 0.205 2.272 -0.243 0.557 -0.142 1.618 0.000 1.417 0.631 1.144 2.215 1.092 -1.346 -1.006 2.548 0.725 0.558 -1.125 0.000 0.725 0.992 0.992 3.753 2.091 2.386 1.933 +1 1.410 -0.826 1.118 0.142 -1.159 0.485 0.156 -0.440 0.000 0.607 0.703 -1.306 2.215 0.861 1.294 0.080 2.548 0.485 0.812 -0.157 0.000 0.363 0.551 0.994 1.111 0.778 0.831 0.702 +1 2.047 1.124 -1.365 1.138 -0.537 1.896 -2.868 1.353 0.000 0.998 0.741 0.169 0.000 1.396 0.403 -0.483 2.548 1.289 -0.261 0.591 0.000 0.937 0.751 1.437 1.015 0.864 0.904 0.804 +1 0.559 0.070 -0.519 0.213 -1.411 0.850 -0.104 0.648 2.173 0.475 0.516 -1.052 0.000 0.527 -1.161 -0.360 1.274 0.830 0.752 1.368 0.000 0.946 0.894 0.986 0.929 0.836 0.830 0.757 +0 0.637 -0.505 -1.539 2.538 -0.871 0.744 -0.623 1.149 0.000 0.546 -1.052 0.407 0.000 0.471 0.927 0.317 2.548 0.433 1.857 0.895 0.000 0.896 0.879 0.999 0.626 0.278 0.636 0.773 +1 0.563 0.727 -1.148 0.955 0.405 0.949 1.834 -0.976 0.000 0.770 -0.604 1.025 0.000 1.761 0.625 0.565 2.548 1.021 -0.781 -1.289 0.000 1.022 1.007 1.001 0.714 0.737 0.882 0.778 +0 0.691 1.543 0.106 0.995 1.010 1.251 0.610 0.862 2.173 0.622 -0.408 -0.137 0.000 1.934 1.463 -1.113 0.000 2.082 0.393 -1.055 1.551 1.086 1.011 0.986 0.733 1.691 0.976 0.845 +0 0.461 -0.550 -0.218 0.743 1.066 1.464 -0.130 1.578 2.173 0.892 0.222 0.104 0.000 0.920 1.560 -0.522 0.000 1.061 0.140 -0.597 3.102 1.352 0.881 0.985 0.983 1.235 1.195 0.957 +1 0.766 -1.026 -0.231 1.050 1.394 0.518 0.450 0.106 0.000 1.015 1.068 -1.355 2.215 1.111 0.674 1.099 2.548 0.710 -2.336 -0.474 0.000 0.767 0.865 1.236 1.117 0.927 1.089 0.903 +0 4.233 0.579 0.834 1.008 1.313 1.841 1.613 -0.641 0.000 1.221 0.599 -1.416 1.107 0.514 2.295 -1.312 0.000 0.673 0.490 0.051 3.102 1.281 1.022 1.197 1.544 0.793 1.017 1.485 +1 0.917 -0.616 -0.385 0.629 0.799 0.689 0.191 -1.624 1.087 0.693 -0.923 -1.560 1.107 1.113 -0.758 -0.101 0.000 0.604 -2.088 1.025 0.000 1.144 0.965 0.988 0.973 0.616 0.881 0.774 +0 0.667 -1.880 -0.206 1.170 -0.271 0.844 0.693 1.590 0.000 0.586 -0.084 0.365 2.215 0.366 -0.967 -1.721 1.274 0.432 -0.095 -1.638 0.000 0.441 0.809 0.993 0.646 0.531 0.547 0.783 +1 0.841 -1.475 1.131 0.324 -0.386 1.920 2.867 -0.416 0.000 0.802 -0.430 1.268 0.000 2.063 0.648 1.010 2.548 1.046 -0.147 1.705 1.551 0.934 1.275 0.994 0.640 0.836 0.817 0.755 +1 0.996 0.207 -0.734 1.212 0.745 0.367 1.199 -1.205 0.000 0.882 0.982 -0.348 0.000 1.660 0.121 1.105 2.548 0.827 0.173 -1.350 3.102 0.853 1.270 1.479 0.813 0.718 0.784 0.775 +0 0.343 -1.797 -0.557 2.075 0.126 1.257 0.649 -1.612 2.173 0.664 -0.330 0.806 2.215 0.548 1.136 -0.722 0.000 0.662 -0.157 0.145 0.000 1.067 0.935 0.982 4.330 1.304 2.718 2.415 +0 0.620 1.248 -1.074 1.403 -0.839 0.852 -0.071 0.966 0.000 0.565 0.767 0.362 2.215 0.569 -0.455 -1.594 2.548 0.693 -0.946 0.421 0.000 0.858 0.784 0.993 1.381 0.723 1.010 1.258 +1 0.368 0.145 1.520 0.758 -1.027 1.041 0.646 0.680 2.173 1.622 1.383 -0.999 0.000 1.347 -0.303 0.430 2.548 0.697 -0.305 -0.916 0.000 0.915 0.952 0.996 0.924 0.826 1.018 0.898 +1 1.416 0.242 0.637 0.634 -1.608 0.612 1.377 -0.081 1.087 0.934 1.376 -1.080 2.215 0.351 -1.001 1.181 0.000 0.784 -0.316 -1.407 0.000 0.651 1.157 1.181 1.116 0.871 0.927 0.865 +1 0.709 0.650 0.243 0.671 -0.887 0.797 0.743 0.956 0.000 0.557 1.503 1.495 0.000 0.966 1.334 -0.828 2.548 1.658 -0.170 -0.558 3.102 0.876 1.040 0.988 0.612 0.934 0.942 0.824 +1 2.392 0.939 0.195 1.114 0.514 1.552 1.726 -1.143 0.000 0.911 0.969 0.983 2.215 1.059 0.070 -1.287 2.548 0.401 0.204 1.349 0.000 1.365 1.275 0.998 0.861 1.053 1.039 1.196 +0 0.509 0.785 -0.762 1.793 1.463 1.204 1.012 -0.338 2.173 1.148 1.314 1.240 2.215 0.790 0.088 -0.670 0.000 0.776 1.518 0.780 0.000 1.184 1.012 1.201 0.801 1.735 1.073 0.920 +1 0.343 1.313 -0.568 0.935 1.593 0.937 0.326 1.187 0.000 0.940 0.162 -0.223 2.215 1.305 -0.781 -0.845 2.548 0.574 -0.498 0.536 0.000 0.823 1.073 0.991 0.877 0.888 0.925 0.799 +0 0.461 0.561 1.039 0.132 -0.264 1.203 0.657 -0.542 2.173 0.935 -0.234 1.687 0.000 0.993 -0.658 0.489 2.548 0.840 -2.041 1.445 0.000 0.808 0.790 0.991 1.182 1.495 1.041 0.874 +0 1.072 0.338 0.978 0.782 -1.325 0.763 0.479 -1.471 1.087 0.516 -0.653 -1.014 0.000 1.313 -0.299 0.455 2.548 1.020 1.105 -0.528 0.000 1.174 1.007 1.111 0.746 1.335 1.026 0.924 +0 0.596 -1.673 1.040 0.760 -0.643 0.567 -0.327 1.380 2.173 1.013 -2.015 -1.245 0.000 0.937 -0.135 0.180 0.000 0.626 0.660 -0.019 0.000 0.867 0.909 0.985 1.154 0.464 0.874 0.797 +1 0.885 -1.354 0.225 1.051 1.433 1.114 -0.150 0.312 2.173 0.576 -0.767 1.549 0.000 1.164 -0.953 -1.305 2.548 0.718 0.653 -0.671 0.000 1.055 0.847 1.184 1.193 1.555 0.998 0.895 +0 0.578 0.990 -0.798 0.972 -1.503 0.405 0.521 1.311 2.173 0.516 -1.066 0.700 0.000 0.743 0.580 0.349 2.548 1.494 1.534 -1.302 0.000 0.929 0.869 0.991 0.883 0.522 0.693 0.649 +0 1.223 0.409 1.367 0.740 -1.163 0.937 0.048 -1.049 0.000 1.591 -0.219 0.827 2.215 0.818 0.624 -0.022 0.000 1.137 0.066 -0.503 3.102 1.358 0.803 1.001 1.032 1.146 0.988 0.883 +1 1.635 -0.051 1.565 0.596 -1.497 0.734 1.351 0.287 2.173 0.267 -0.261 1.056 0.000 1.227 -0.004 -0.227 2.548 0.519 -2.114 -0.772 0.000 0.831 0.924 0.977 1.238 1.021 1.107 1.054 +1 0.647 -1.062 1.419 0.091 -0.101 0.771 -0.733 -1.079 2.173 0.977 -2.428 0.863 0.000 0.713 -0.617 0.227 2.548 0.596 1.117 -0.570 0.000 3.454 1.900 0.992 0.804 0.854 1.309 1.078 +1 1.408 1.144 1.329 1.585 0.658 0.755 1.256 -0.208 2.173 0.803 1.334 -0.707 0.000 0.998 0.672 -1.143 1.274 0.660 0.649 -1.733 0.000 0.809 0.751 1.176 1.063 0.858 0.914 0.798 +1 0.684 1.472 -0.848 1.176 -1.545 1.057 0.545 0.603 2.173 0.687 0.872 -0.895 0.000 0.598 0.155 1.290 2.548 0.988 1.332 0.079 0.000 0.910 1.122 0.993 0.989 0.605 1.092 0.929 +1 0.761 0.601 0.391 0.963 -0.493 0.543 0.646 -0.435 2.173 0.733 0.701 1.014 2.215 0.299 1.953 0.659 0.000 0.453 0.683 1.662 0.000 0.431 0.582 0.988 0.785 0.896 0.618 0.518 +0 0.776 0.487 0.887 0.467 -0.878 0.603 1.183 -0.982 1.087 0.548 -1.032 0.813 0.000 0.517 0.966 0.809 0.000 1.501 0.595 -1.592 3.102 1.038 0.992 0.989 0.714 0.580 0.832 0.729 +1 0.416 -0.702 -1.377 1.533 1.337 1.514 0.339 -0.465 2.173 0.808 0.012 1.437 0.000 0.613 -1.023 0.896 0.000 1.111 0.787 0.302 3.102 0.821 0.927 0.986 1.432 0.970 1.034 0.950 +1 0.322 -0.072 -1.528 1.850 -0.617 1.373 -1.529 0.463 2.173 1.881 -1.478 1.501 0.000 1.040 -1.252 -0.798 0.000 0.924 1.076 -1.725 0.000 0.827 0.669 0.988 2.519 0.674 1.524 1.253 +0 0.454 0.236 0.073 0.933 1.034 0.938 0.342 -0.839 2.173 0.804 -0.978 1.380 2.215 0.539 -1.152 0.958 0.000 0.800 -0.716 -1.142 0.000 0.913 0.986 0.986 0.650 1.489 0.918 0.769 +0 0.284 -1.170 0.025 3.605 1.133 1.614 -0.708 -1.181 0.000 1.564 0.698 -0.416 0.000 1.891 -1.126 1.653 2.548 1.311 -1.239 -1.030 0.000 0.931 1.105 1.179 0.949 1.505 1.136 1.232 +1 0.846 -0.236 0.118 0.274 -0.835 1.845 -1.438 -1.235 0.000 1.728 -1.466 0.188 0.000 2.693 0.602 1.011 0.000 1.979 0.894 0.634 0.000 0.954 1.090 0.981 0.677 0.549 0.640 0.724 +0 0.596 -2.088 -0.819 1.921 -1.611 0.733 -0.124 -0.569 1.087 1.343 0.197 0.184 2.215 0.921 -2.326 0.911 0.000 0.708 0.845 0.923 0.000 2.578 1.918 0.988 2.459 0.948 1.768 1.647 +0 0.599 1.160 0.066 1.875 0.687 0.951 1.303 -1.038 0.000 0.813 -0.126 1.682 2.215 0.389 0.432 1.513 2.548 0.796 1.005 -0.406 0.000 0.721 1.161 0.990 0.740 0.204 0.928 0.926 +0 1.044 0.272 -0.841 1.630 0.141 0.483 0.014 0.588 2.173 0.521 1.074 1.633 0.000 0.724 -0.378 1.560 0.000 0.595 1.288 -0.764 0.000 1.040 0.827 1.399 0.862 0.618 0.660 0.652 +0 0.347 -0.450 1.474 1.609 0.762 0.774 -0.878 -0.815 2.173 0.918 -1.542 -1.307 0.000 0.807 -0.629 0.845 0.000 1.474 -0.638 0.028 1.551 1.374 1.082 0.994 0.839 0.781 0.846 0.828 +1 0.563 -1.318 -0.425 1.549 1.291 0.881 0.141 -1.176 2.173 0.647 -0.540 0.119 0.000 0.985 0.392 1.051 2.548 0.373 0.652 0.763 0.000 0.579 0.934 1.294 1.104 1.065 1.040 0.862 +1 0.776 -0.253 -0.699 1.230 1.174 0.950 -1.146 1.506 2.173 1.159 -0.570 -0.261 2.215 0.569 0.711 -0.674 0.000 0.596 -0.750 0.075 0.000 0.727 1.142 1.344 1.023 1.607 0.981 0.833 +0 1.214 0.234 -1.582 1.360 1.385 1.587 -0.548 0.132 2.173 1.936 2.554 -1.687 0.000 1.127 -0.932 -0.194 0.000 1.344 0.038 -0.519 0.000 0.855 0.916 0.989 1.065 1.688 1.288 1.123 +0 0.614 0.466 1.330 1.278 -1.398 0.446 0.012 -0.574 2.173 0.657 -1.562 0.597 0.000 0.649 -0.740 0.049 2.548 0.560 -1.561 -0.653 0.000 0.722 0.885 0.993 1.080 0.456 0.787 0.998 +1 1.679 0.894 0.260 0.741 1.318 1.172 -0.228 -1.124 2.173 0.815 0.348 -1.591 0.000 1.437 -0.118 1.540 2.548 1.798 0.406 -0.156 0.000 0.824 1.218 1.260 1.555 1.094 1.152 1.030 +1 1.033 0.852 0.754 0.183 -0.580 0.505 -0.870 -0.145 2.173 0.746 -0.788 -1.722 2.215 0.438 0.501 -1.196 0.000 0.417 0.322 1.459 0.000 0.323 0.639 0.992 0.871 0.893 0.734 0.589 +0 0.362 -1.062 -0.439 0.478 -0.943 0.516 -0.628 1.028 2.173 0.683 -0.681 -1.551 2.215 0.687 0.329 -0.573 0.000 0.933 0.976 0.496 0.000 0.813 0.966 0.979 0.833 0.637 0.721 0.665 +0 1.429 -0.043 -1.625 0.263 1.354 0.875 1.046 0.462 0.000 0.395 1.737 -0.108 0.000 0.565 2.089 -0.649 0.000 1.739 0.023 -0.628 3.102 0.772 0.893 0.981 0.883 1.238 0.892 0.995 +1 1.074 0.538 -0.339 1.476 -0.567 0.948 0.139 1.456 0.000 0.630 -0.021 1.008 0.000 1.170 0.660 -0.796 2.548 1.047 0.852 1.331 0.000 0.714 0.852 0.977 0.702 0.688 0.764 0.882 +0 1.125 -0.867 -0.472 0.697 0.281 1.685 -1.719 -0.149 0.000 1.626 -0.340 1.511 0.000 2.250 0.425 1.517 2.548 1.504 -0.791 -1.280 3.102 0.601 0.827 0.984 1.721 1.356 1.193 1.020 +1 0.292 1.219 1.480 0.883 -0.114 1.297 0.883 0.545 0.000 1.121 1.074 -1.199 0.000 1.080 0.391 -1.050 2.548 0.511 -0.839 1.192 3.102 2.576 1.634 0.987 0.714 0.677 1.083 0.916 +0 0.332 1.870 0.124 2.092 0.297 0.771 -0.232 -1.301 2.173 0.433 0.443 -0.948 0.000 1.658 0.103 0.816 1.274 2.187 0.957 -1.345 0.000 0.631 0.837 0.981 0.831 1.352 1.013 1.007 +0 1.188 -0.198 0.165 1.091 -0.009 1.241 0.050 -1.661 1.087 0.544 0.931 -0.735 0.000 1.013 1.230 1.052 2.548 0.436 -1.282 -0.473 0.000 1.026 1.099 0.986 1.587 1.323 1.330 1.094 +1 0.787 0.340 -0.263 0.780 1.703 0.629 0.451 0.005 2.173 0.779 1.171 1.645 0.000 1.510 0.938 -1.226 0.000 2.552 0.814 0.749 3.102 0.887 1.198 1.064 0.938 0.905 0.977 0.841 +1 1.071 0.295 -0.971 1.556 -1.172 1.559 0.359 -0.742 2.173 1.477 0.156 0.977 0.000 0.802 -0.663 0.995 1.274 0.932 -1.166 0.112 0.000 0.638 0.579 0.988 0.973 1.606 1.107 0.983 +0 0.936 -1.135 -0.334 0.634 1.043 0.439 0.298 -1.732 0.000 0.705 -0.040 -1.205 0.000 1.022 -0.437 0.459 2.548 0.811 0.627 -0.199 3.102 0.578 0.921 1.010 0.814 0.599 0.634 0.682 +1 0.676 -1.307 -1.546 0.948 0.414 0.533 -1.489 -0.797 0.000 0.563 1.020 -0.622 0.000 1.297 -0.413 0.938 2.548 1.083 0.582 1.663 3.102 0.782 0.972 1.088 1.028 0.779 0.823 0.757 +1 0.627 -0.944 0.512 1.643 -0.266 1.310 0.937 -1.139 2.173 1.351 1.202 1.062 0.000 0.691 2.481 0.880 0.000 0.862 0.232 -1.421 0.000 0.903 1.108 0.982 0.626 0.749 1.416 1.294 +1 1.282 -0.606 -0.657 0.271 1.222 0.567 -0.624 1.068 0.000 0.861 -0.495 0.575 0.000 1.130 -0.693 -1.380 1.274 1.403 0.634 -1.304 3.102 0.646 0.977 0.984 0.941 0.808 0.853 0.794 +0 0.498 -0.861 1.680 1.601 -0.163 0.838 1.235 -1.544 0.000 1.141 -0.826 -0.515 2.215 1.102 0.575 0.582 0.000 0.847 -0.265 0.967 3.102 0.863 1.235 1.232 0.764 0.895 0.772 0.765 +1 0.550 -0.294 -1.344 0.797 0.828 0.715 -0.823 0.459 2.173 0.978 0.643 1.445 0.000 1.569 -0.975 -0.775 2.548 0.498 -1.504 -0.824 0.000 1.643 1.347 0.987 0.847 1.194 1.081 0.892 +1 0.597 -0.032 0.672 0.709 -0.327 0.886 0.123 1.118 0.000 0.599 1.001 1.059 0.000 0.942 0.927 -0.478 1.274 0.905 0.825 -1.309 0.000 0.811 0.895 0.989 0.970 0.807 0.693 0.751 +1 0.615 1.102 -0.556 2.133 -1.262 1.255 0.261 0.274 0.000 1.437 0.574 1.469 2.215 1.212 0.699 0.642 2.548 0.528 -1.296 -1.149 0.000 1.197 1.044 0.986 1.223 0.959 1.020 0.978 +0 0.680 -0.718 -1.579 0.522 0.042 0.772 -1.156 1.340 2.173 0.690 1.144 -0.371 2.215 0.700 -0.358 0.128 0.000 0.809 0.348 -0.386 0.000 0.915 0.957 0.985 0.803 1.900 1.073 0.868 +1 1.207 0.279 -1.732 0.917 1.383 1.070 0.038 -0.186 0.000 0.472 -2.192 -0.128 0.000 0.544 -0.775 0.836 2.548 0.930 0.480 -0.891 1.551 2.139 1.279 0.996 0.775 0.688 0.901 0.942 +1 0.359 0.784 1.375 1.216 -0.794 0.594 -1.031 0.711 2.173 0.741 -0.203 -0.722 0.000 0.507 -0.400 1.392 1.274 0.386 0.632 0.719 0.000 0.757 0.891 0.987 0.616 0.445 0.635 0.587 +1 0.578 1.557 0.509 0.392 -1.574 0.927 0.465 -0.005 1.087 0.630 0.676 -1.110 0.000 1.043 0.068 0.783 2.548 1.913 0.679 1.734 0.000 0.787 0.979 0.978 0.795 0.835 0.889 0.746 +0 2.028 1.164 -1.360 0.311 -1.713 0.772 0.345 0.129 0.000 0.845 0.731 0.720 0.000 1.125 1.276 1.296 2.548 1.462 1.078 -0.455 3.102 0.937 0.967 0.998 0.817 0.982 0.812 0.884 +1 0.321 1.373 1.054 2.149 -1.656 0.563 -0.520 0.211 0.000 0.759 -0.994 -0.051 1.107 0.837 -2.008 1.586 0.000 0.685 0.651 0.376 3.102 0.919 0.744 0.992 0.941 0.713 1.696 1.438 +0 0.559 0.557 1.525 0.670 -0.725 0.618 1.959 -0.005 0.000 0.403 0.275 1.056 1.107 0.751 -1.252 1.254 0.000 0.511 1.137 -0.253 3.102 3.352 1.785 0.987 0.602 0.444 1.050 0.907 +1 0.906 0.488 1.492 0.626 0.678 1.072 -0.840 -0.628 2.173 0.510 -1.201 1.137 1.107 0.314 1.993 -0.202 0.000 0.386 -1.119 1.705 0.000 0.988 1.011 0.985 1.189 1.108 0.929 0.843 +0 0.297 -0.904 -0.088 1.908 1.165 1.124 0.974 -0.974 0.000 1.098 0.005 0.400 2.215 0.637 -0.205 0.703 2.548 0.482 -0.615 -1.255 0.000 1.129 1.083 0.990 1.156 0.261 0.922 1.182 +0 1.472 -0.754 -0.812 1.399 -0.203 0.649 0.482 1.134 0.000 0.636 -1.003 0.754 1.107 0.437 0.043 1.700 0.000 0.755 -0.972 -1.622 1.551 0.926 0.948 1.038 0.754 0.528 0.679 0.824 +0 0.783 -1.773 -0.593 0.982 -0.673 0.578 -0.741 0.671 0.000 0.968 -1.483 1.172 0.000 0.525 0.304 1.684 0.000 0.818 -1.215 -1.044 3.102 0.939 0.879 0.979 0.452 0.162 0.565 0.653 +0 0.537 -0.910 0.559 3.811 0.437 1.901 -1.933 -0.856 0.000 1.274 1.841 1.682 0.000 1.240 2.451 1.593 0.000 0.955 0.077 1.319 3.102 0.804 0.932 0.984 0.848 0.562 0.863 1.641 +0 1.896 0.715 1.332 0.633 0.360 2.523 1.018 -0.985 0.000 1.813 0.479 0.693 0.000 1.481 1.168 0.800 2.548 0.576 0.747 0.242 0.000 0.682 0.795 1.165 0.758 0.947 1.109 1.117 +0 0.707 -1.212 1.634 2.007 -1.174 0.434 -0.823 -0.193 2.173 0.711 0.370 1.108 0.000 0.905 0.259 0.037 0.000 1.889 -0.172 0.609 3.102 0.953 1.008 0.990 1.238 0.703 0.878 0.908 +0 0.736 1.712 1.602 1.198 -1.645 0.951 -0.468 -0.339 2.173 1.081 0.556 0.769 2.215 1.229 0.321 -0.937 0.000 0.483 -0.960 1.723 0.000 0.908 0.928 0.992 0.956 1.496 1.145 0.973 +0 0.714 -0.080 0.761 0.250 0.138 0.625 1.641 -0.085 0.000 1.065 0.270 -1.379 2.215 0.631 2.650 0.404 0.000 0.890 1.097 1.731 3.102 0.896 0.881 0.979 0.903 0.560 0.991 0.860 +1 1.488 0.973 0.943 0.607 0.511 0.649 0.164 -0.965 2.173 0.507 0.360 1.629 2.215 0.807 1.291 -0.856 0.000 0.769 0.601 0.297 0.000 0.811 0.768 0.976 0.766 0.614 0.836 0.727 +0 1.422 0.180 1.035 1.363 1.437 1.143 -0.936 -0.435 2.173 0.570 1.501 0.681 0.000 1.042 -0.215 -0.755 0.000 0.610 0.465 -0.556 3.102 0.805 0.828 0.982 0.761 0.734 1.206 0.969 +1 0.799 -1.018 -0.845 0.239 0.814 0.846 -0.092 -0.968 2.173 1.088 -0.784 1.207 0.000 0.963 0.110 0.931 0.000 1.285 0.214 -0.015 3.102 0.971 0.969 0.988 0.684 0.858 0.853 0.729 +1 0.591 0.026 0.859 1.712 1.497 2.606 1.432 -0.081 0.000 0.978 -0.637 0.368 1.107 1.667 0.250 -1.656 0.000 2.482 1.801 -1.214 0.000 0.766 0.961 0.987 0.920 0.892 0.905 0.799 +1 1.408 -0.310 -0.394 0.470 -0.029 0.969 -1.922 0.094 0.000 1.767 0.089 1.722 2.215 1.363 -0.703 -1.065 0.000 1.779 -0.488 1.034 1.551 0.924 0.924 0.984 1.453 1.082 1.069 0.902 +1 0.559 0.832 0.750 0.904 -0.365 1.723 -0.451 -1.393 2.173 1.426 -1.015 0.591 0.000 0.658 -0.385 0.860 0.000 1.255 -0.897 -0.829 1.551 0.811 0.975 0.990 1.976 0.904 1.413 1.370 +1 1.292 0.504 -0.877 0.684 -0.661 1.155 0.179 0.854 2.173 0.453 1.003 -0.440 0.000 0.777 -0.209 0.403 0.000 1.092 0.659 1.741 3.102 0.878 0.951 0.995 0.794 0.927 0.938 0.807 +0 0.463 1.709 -1.338 0.328 -0.803 0.683 0.665 0.797 2.173 0.439 -1.026 -1.017 0.000 1.128 0.262 1.558 1.274 0.796 0.279 -0.187 0.000 0.780 0.981 0.978 0.681 0.724 0.712 0.659 +1 0.318 1.315 -1.725 1.022 0.699 1.103 0.154 -1.109 0.000 0.723 -0.594 1.005 2.215 0.746 0.664 -0.606 0.000 1.637 0.653 0.314 3.102 0.858 1.180 0.993 0.619 0.938 0.953 0.830 +1 0.699 -0.707 1.685 1.759 -1.572 1.222 -0.358 0.371 0.000 0.895 -0.251 -0.442 2.215 1.021 -0.478 -1.125 0.000 1.109 -0.895 1.062 3.102 1.966 1.257 1.006 1.280 0.956 0.900 1.063 +0 0.524 -2.417 0.264 1.034 1.627 0.654 -0.498 -0.295 2.173 0.497 -1.470 -0.705 0.000 0.451 0.388 -0.927 0.000 0.638 0.879 0.576 0.000 0.832 0.852 0.986 1.129 1.034 0.820 0.764 +1 0.525 -0.807 1.104 0.342 0.305 0.782 0.142 1.024 2.173 0.824 1.698 -0.307 0.000 1.130 0.485 -0.937 2.548 1.027 -1.461 -0.904 0.000 0.894 0.976 0.981 0.642 1.171 0.891 0.780 +1 1.057 0.037 -0.141 1.795 0.513 0.908 -0.855 1.690 2.173 1.248 0.236 -0.953 2.215 0.510 0.565 0.865 0.000 0.706 0.159 1.642 0.000 0.450 0.775 1.062 1.412 1.410 1.181 0.909 +1 0.955 -0.709 0.622 0.880 -0.465 0.644 -1.743 1.532 0.000 0.448 1.841 -0.645 0.000 1.352 0.109 -0.038 1.274 0.751 -0.474 -1.404 0.000 0.838 0.955 1.054 0.740 0.820 0.924 0.817 +1 0.615 0.980 0.296 1.022 -0.971 0.437 -1.195 -0.991 2.173 0.680 -0.345 0.806 2.215 0.343 0.536 -0.072 0.000 0.928 0.674 1.461 0.000 0.615 0.859 0.998 0.911 0.875 0.849 0.697 +1 0.813 -1.079 1.510 0.638 0.281 1.349 -0.442 -1.003 2.173 1.015 -0.220 0.697 0.000 0.997 -0.618 0.068 2.548 0.846 0.660 1.294 0.000 0.891 0.857 0.991 1.235 1.200 1.028 0.963 +0 2.044 1.388 -1.557 0.157 0.224 0.318 -0.465 -0.540 0.000 1.068 -0.058 0.303 0.000 0.627 0.548 1.726 2.548 0.720 0.498 0.109 3.102 0.890 0.864 0.994 0.796 0.510 0.560 0.820 +0 1.150 -1.414 -0.283 0.221 1.447 0.424 1.418 1.143 0.000 0.882 0.060 1.142 1.107 2.048 -0.449 -1.075 2.548 1.166 0.740 0.583 0.000 0.901 0.945 0.984 0.898 1.361 1.082 1.054 +1 0.982 1.267 1.184 1.007 0.246 1.044 0.161 -1.075 2.173 0.797 -0.859 -0.065 1.107 0.351 -1.548 1.447 0.000 0.365 1.398 0.705 0.000 1.061 0.905 1.031 1.337 1.291 1.178 0.990 +1 1.305 -1.001 1.264 0.344 -0.732 0.459 0.699 -1.109 2.173 0.891 -0.655 -0.587 1.107 0.858 -1.908 1.215 0.000 0.786 -0.049 -0.852 0.000 0.988 1.008 0.987 0.850 0.822 0.750 0.697 +0 2.151 0.191 -1.021 0.140 -0.338 0.746 1.324 -1.358 2.173 1.545 -0.480 0.475 0.000 0.765 1.390 0.899 0.000 0.931 -1.010 1.175 0.000 1.085 1.010 0.994 1.019 0.549 1.199 1.077 +1 1.096 0.244 0.715 0.319 -1.298 0.678 0.768 -0.238 0.000 1.321 0.500 1.625 0.000 0.888 -0.207 -1.208 2.548 0.942 -0.304 0.217 3.102 2.015 1.290 0.994 0.643 0.672 0.868 0.762 +0 0.338 2.163 -1.278 1.560 0.649 0.464 2.030 -1.629 0.000 0.735 1.025 -1.332 2.215 0.994 0.476 -0.378 2.548 0.921 1.310 0.549 0.000 0.953 0.991 0.992 0.974 0.730 0.852 0.748 +1 0.513 -0.168 -0.071 1.061 1.014 1.557 -0.558 -0.732 2.173 0.565 1.249 1.548 0.000 0.807 -0.192 1.141 0.000 0.603 -0.895 0.538 1.551 0.927 0.810 0.993 1.431 0.971 1.057 0.943 +0 1.641 0.717 1.329 0.630 0.732 1.082 1.218 -0.978 2.173 0.770 0.531 -0.528 0.000 1.294 -0.616 1.037 2.548 1.334 1.099 -0.094 0.000 0.725 0.886 0.983 1.112 2.166 1.245 1.099 +1 0.894 -0.474 -1.000 1.526 -1.260 1.156 -0.602 1.284 2.173 1.289 0.168 -0.187 0.000 1.041 -0.062 0.572 2.548 1.320 -0.612 0.147 0.000 0.944 0.791 0.997 1.352 0.899 1.021 1.049 +1 0.364 1.454 -0.411 0.631 -1.593 0.837 -0.839 0.571 2.173 0.544 0.446 0.114 2.215 0.333 0.356 -1.051 0.000 0.422 -0.618 -0.410 0.000 0.410 0.570 0.991 0.655 0.802 0.718 0.555 +0 0.516 -0.211 0.119 0.475 1.592 0.726 0.806 -0.200 0.000 0.850 0.869 -1.252 1.107 1.090 0.757 0.389 2.548 0.938 0.408 -1.735 0.000 1.165 1.036 0.993 0.641 1.019 0.744 0.657 +0 2.374 1.171 1.062 2.077 0.665 1.217 0.848 -0.764 0.000 1.411 0.543 -1.084 0.000 2.557 -0.345 -0.420 2.548 3.163 1.282 1.246 3.102 0.894 1.603 1.076 0.871 3.248 1.949 1.848 +0 0.799 0.368 -0.482 0.467 -0.352 0.316 -0.988 -0.934 0.000 0.657 0.243 0.090 0.000 0.786 1.265 1.706 2.548 0.785 -0.017 -1.676 0.000 0.906 0.696 0.985 1.044 0.428 0.898 0.759 +1 1.235 -1.050 -1.651 2.194 -1.243 1.117 -0.622 0.476 2.173 0.756 -2.133 0.000 0.000 0.332 -0.958 0.323 2.548 0.446 -0.124 -0.911 0.000 1.070 1.173 0.989 0.747 0.194 1.009 0.953 +1 1.127 -1.192 -1.016 1.348 -0.880 2.671 -0.242 0.583 0.000 2.840 -0.907 -1.188 2.215 2.267 -0.721 -1.701 2.548 1.467 -0.686 -0.269 0.000 0.822 1.215 0.986 1.177 1.218 1.093 1.085 +0 2.276 0.991 1.723 0.913 1.206 1.618 2.134 -0.080 0.000 1.048 -0.663 -0.238 1.107 2.018 0.037 1.667 2.548 0.491 -1.077 0.579 0.000 3.775 3.206 0.990 1.691 1.632 2.308 1.900 +0 1.483 -0.213 0.091 0.796 -0.037 0.985 -0.077 -1.649 0.000 0.339 -1.571 -1.605 2.215 0.842 0.679 1.578 0.000 0.653 -1.408 -0.002 3.102 0.790 0.876 0.991 0.859 0.422 0.762 0.908 +0 0.615 1.114 1.079 1.099 1.470 0.639 1.289 -0.238 2.173 0.366 2.484 1.666 0.000 0.428 2.048 -1.092 0.000 0.751 -0.073 1.091 0.000 1.077 0.901 0.987 0.700 0.288 0.695 0.682 +0 0.849 0.276 0.851 0.635 0.040 0.730 -0.870 -1.679 2.173 0.758 -1.595 -1.352 0.000 2.077 -0.270 0.237 2.548 0.789 0.614 1.505 0.000 1.596 1.014 0.989 0.973 1.581 1.086 1.100 +0 1.077 -1.263 -0.858 0.720 -0.059 1.068 -0.075 0.169 0.000 0.501 0.684 -1.698 0.000 1.469 -0.108 1.271 2.548 0.729 0.127 -0.127 3.102 1.013 0.888 0.986 1.308 0.760 0.904 1.371 +1 1.266 -0.335 1.009 0.620 0.061 0.845 -0.400 -0.762 2.173 1.073 0.082 -1.410 2.215 0.728 -0.369 0.625 0.000 0.369 0.606 -1.515 0.000 0.992 0.995 0.987 1.010 0.846 0.852 0.766 +1 0.480 0.801 -0.159 0.422 -1.456 1.339 0.731 0.988 2.173 0.886 2.709 -0.635 0.000 0.944 1.516 -0.678 0.000 1.237 0.214 1.667 3.102 0.832 1.460 0.981 1.181 0.849 1.396 1.101 +1 0.890 0.494 0.776 0.709 -0.527 0.685 -0.550 0.238 0.000 1.269 0.562 1.722 2.215 0.700 -0.345 -0.875 2.548 0.848 0.881 -0.506 0.000 0.898 0.788 1.014 0.908 0.875 0.909 0.811 +1 2.806 0.620 0.101 0.453 1.103 0.413 0.025 -0.835 0.000 0.917 0.771 -1.359 1.107 0.965 -0.365 1.289 2.548 0.822 1.291 -1.608 0.000 0.939 0.923 1.228 1.223 0.933 0.981 0.860 +1 0.604 -1.043 -0.827 1.384 1.471 0.893 -0.674 0.736 2.173 0.799 -0.186 -0.531 2.215 0.651 0.513 -0.863 0.000 0.705 -1.569 -1.643 0.000 1.148 0.982 1.112 0.943 1.171 0.851 0.760 +0 0.834 -2.062 1.347 0.374 -0.352 1.033 -0.577 -1.516 1.087 1.606 -0.199 -0.028 0.000 0.574 -0.691 0.479 0.000 0.588 0.687 1.728 3.102 0.770 0.888 0.990 0.950 0.654 0.955 0.913 +0 1.232 1.907 0.835 0.193 0.742 0.449 0.029 1.720 0.000 0.791 -0.628 -0.046 2.215 0.579 -1.515 -1.671 0.000 1.152 0.672 -1.012 3.102 0.876 0.967 0.997 0.841 0.948 0.942 1.004 +1 1.229 2.078 -0.216 0.620 0.677 0.988 0.953 1.605 1.087 0.531 1.286 -0.967 2.215 0.688 0.536 0.273 0.000 0.682 0.577 -1.434 0.000 0.756 0.821 0.984 0.704 0.805 0.834 0.716 +0 2.532 -0.217 0.619 0.653 -0.989 1.040 1.135 -0.973 0.000 1.169 0.088 -0.978 1.107 1.378 0.893 0.753 2.548 0.593 0.691 1.384 0.000 1.038 0.947 1.768 1.209 1.484 1.125 1.121 +0 0.627 -1.430 0.114 1.034 0.642 0.748 -0.286 -0.797 0.000 1.539 0.593 -1.224 1.107 1.883 0.088 0.926 2.548 0.705 -1.818 0.458 0.000 0.815 0.902 0.978 1.888 1.753 2.048 1.564 +1 0.283 1.427 0.075 0.425 -0.866 1.130 0.452 1.346 2.173 0.678 -0.155 -0.161 2.215 1.124 1.134 -0.832 0.000 0.876 0.792 0.804 0.000 1.101 0.947 0.992 1.000 1.322 0.913 0.809 +1 0.778 -1.916 0.691 1.311 0.581 0.925 -0.145 -1.053 2.173 0.665 -0.694 1.558 1.107 0.645 -1.017 0.863 0.000 0.637 0.616 0.132 0.000 0.869 1.032 0.999 0.805 0.883 0.966 0.819 +1 0.551 2.247 -0.698 0.750 1.720 0.506 2.941 1.718 0.000 1.288 0.181 0.193 2.215 0.858 0.369 1.264 1.274 0.831 0.975 -1.014 0.000 0.955 0.979 0.979 0.745 0.927 0.847 0.785 +0 1.441 0.200 -1.023 0.467 1.211 0.846 -0.203 0.900 0.000 0.509 0.768 -1.608 0.000 1.307 -0.939 -0.578 2.548 0.829 -0.832 0.119 3.102 1.295 0.988 1.027 0.920 0.468 0.876 0.811 +1 0.773 -0.170 -1.302 0.821 0.541 0.955 0.922 0.635 0.000 1.074 -0.331 -0.732 2.215 0.619 -1.309 1.157 0.000 1.043 1.059 -1.140 3.102 0.616 0.914 1.099 0.814 0.923 0.922 0.814 +0 0.883 -0.428 -0.326 0.947 0.900 0.470 0.075 1.686 1.087 1.412 -1.894 -0.530 0.000 1.331 -1.205 1.495 1.274 1.249 -1.679 0.841 0.000 0.740 1.089 1.132 0.943 0.766 0.731 0.758 +1 0.756 1.551 1.190 1.277 -1.475 0.439 1.712 -0.152 0.000 0.483 0.122 -1.141 2.215 0.465 0.562 0.844 2.548 0.507 -1.055 0.298 0.000 0.501 0.597 0.992 0.614 0.507 0.523 0.718 +1 0.538 1.339 -0.884 0.305 -0.102 1.148 -0.012 1.392 0.000 1.312 -0.787 -0.553 2.215 0.440 2.354 -0.548 0.000 0.584 -0.218 0.755 3.102 2.505 1.400 0.983 0.892 0.762 1.345 1.057 +0 0.381 -1.726 -0.785 1.179 1.206 0.851 -0.432 -1.358 2.173 0.774 -0.610 -0.515 0.000 1.045 -0.941 0.361 0.000 1.235 0.546 0.674 3.102 1.019 1.045 0.987 0.857 1.219 0.913 0.834 +0 1.247 0.511 0.706 0.241 -1.230 1.034 -0.166 -0.018 2.173 1.387 0.527 -1.684 2.215 0.845 -0.583 -0.844 0.000 0.456 0.593 1.242 0.000 0.821 0.888 0.983 0.853 1.870 0.987 0.821 +1 1.318 0.765 -0.151 1.082 -1.420 1.142 0.543 0.703 0.000 0.818 0.695 -1.469 2.215 1.447 -0.088 -0.668 2.548 0.448 0.169 1.130 0.000 0.452 1.001 1.505 0.990 0.904 0.906 0.875 +1 0.563 1.065 1.005 0.844 -0.793 0.517 0.318 1.367 0.000 0.449 0.978 -0.391 2.215 0.776 1.953 -1.742 0.000 1.355 -0.586 0.105 3.102 0.910 0.760 0.990 0.555 0.744 0.610 0.567 +0 0.833 0.442 -0.225 0.456 0.511 0.813 0.345 0.351 0.000 0.898 0.498 -1.580 2.215 1.242 -1.945 1.324 0.000 1.501 0.022 -0.902 3.102 0.804 1.048 0.995 0.858 0.655 0.747 0.718 +1 1.157 0.183 0.884 1.141 -1.690 2.526 -0.386 -0.286 0.000 1.508 0.198 1.264 2.215 1.547 0.356 -1.201 2.548 1.584 -0.168 1.604 0.000 1.133 1.015 1.166 0.710 1.301 0.894 0.800 +1 2.328 0.425 -1.043 0.560 -1.328 0.831 -1.625 0.213 0.000 1.646 1.078 1.251 2.215 0.407 0.944 0.779 0.000 0.640 0.372 -0.262 3.102 1.926 1.203 0.986 1.552 0.959 1.508 1.381 +0 0.365 -0.841 -1.541 1.557 -0.314 1.042 0.021 1.306 2.173 0.521 -0.311 -0.439 0.000 0.875 -0.316 0.802 2.548 0.525 0.908 0.837 0.000 0.943 0.852 0.989 1.174 0.567 0.822 0.797 +1 0.430 -0.455 -0.570 0.815 0.553 1.132 2.503 -0.304 0.000 1.072 1.114 0.909 0.000 1.280 0.782 1.550 2.548 2.319 -0.075 -1.371 1.551 2.714 2.020 0.994 0.966 0.913 1.773 1.387 +0 0.856 -2.098 1.384 0.484 0.701 1.015 -1.954 -1.572 0.000 1.188 -0.449 -0.108 2.215 0.972 1.463 -0.076 0.000 0.556 0.802 -1.442 3.102 0.895 1.244 0.998 1.075 0.889 1.097 0.952 +0 1.025 0.897 -0.912 0.524 0.696 0.797 -0.436 -0.180 0.000 0.928 1.565 1.715 2.215 1.148 -0.015 0.360 0.000 1.035 -0.719 1.359 3.102 0.884 0.963 1.008 0.789 1.424 1.183 0.983 +0 0.511 -0.434 0.166 0.640 -1.714 0.918 -2.725 0.394 0.000 0.951 0.625 -1.362 2.215 1.180 -0.370 -0.608 0.000 1.095 0.657 1.490 3.102 0.802 0.869 0.989 0.594 0.503 0.638 0.590 +1 0.397 -0.696 -1.173 0.872 0.475 0.849 -2.905 -0.749 0.000 1.295 -1.408 1.239 0.000 1.736 -0.465 0.881 2.548 1.464 -0.166 -0.866 3.102 1.197 1.673 0.987 0.893 1.233 1.492 1.153 +1 0.870 0.361 1.113 0.346 -0.233 0.506 -1.152 1.644 0.000 0.601 1.296 -0.370 2.215 0.421 -0.451 0.859 0.000 0.764 -1.909 -1.335 0.000 0.861 0.690 0.988 0.862 0.746 0.892 0.766 +0 1.682 0.128 0.970 0.416 0.076 0.586 2.046 -0.572 0.000 0.479 0.998 -0.740 2.215 0.723 1.211 1.736 2.548 0.389 -0.334 -1.296 0.000 0.850 0.641 0.987 0.870 0.503 0.699 0.630 +0 0.468 -0.815 -0.828 0.749 1.548 1.598 -0.947 1.533 2.173 1.297 -2.146 -0.058 0.000 1.237 -1.172 0.041 2.548 0.757 -2.101 -0.844 0.000 0.864 0.804 0.989 1.001 1.733 1.341 1.064 +1 2.764 1.050 -0.550 0.151 -0.416 2.144 1.522 0.999 1.087 1.191 1.292 -1.276 2.215 1.279 -0.728 -0.640 0.000 1.179 1.113 0.185 0.000 1.218 1.106 0.975 2.149 2.100 1.562 1.257 +0 2.062 0.014 -0.600 0.400 -0.976 1.204 -0.282 1.243 0.000 0.836 0.717 0.073 2.215 1.183 0.450 -1.570 1.274 0.847 -0.239 0.423 0.000 1.039 1.054 0.980 0.934 1.061 0.911 0.963 +1 1.337 -0.155 1.073 0.332 -0.145 1.713 0.572 0.028 0.000 1.420 0.283 -1.242 1.107 0.844 -0.075 0.641 0.000 2.043 -0.510 -1.462 3.102 0.796 0.968 0.992 0.860 0.780 0.828 0.741 +0 3.188 0.565 -0.604 0.343 1.289 1.171 -0.856 0.975 2.173 1.215 -1.590 1.140 0.000 1.383 0.444 -1.281 2.548 0.776 -1.364 0.321 0.000 0.851 0.825 1.435 0.961 1.829 1.452 1.539 +0 0.394 -0.168 -1.084 1.191 1.338 0.870 -0.620 -0.112 2.173 0.780 1.481 -1.417 0.000 0.766 0.087 0.461 0.000 0.717 0.819 0.753 3.102 1.494 0.887 0.992 1.251 0.951 0.967 0.867 +1 0.658 0.039 -1.411 0.387 -0.695 1.244 1.141 0.649 2.173 0.937 0.882 -1.310 0.000 0.433 0.021 -0.952 0.000 0.507 0.151 -0.117 1.551 0.585 1.250 0.992 0.589 0.683 1.069 0.940 +0 0.436 1.956 -1.347 1.790 0.403 0.511 0.255 0.113 2.173 0.395 1.232 0.113 0.000 0.611 -0.921 -1.693 0.000 1.136 0.056 -0.812 0.000 0.942 0.876 1.224 1.169 0.935 0.943 0.955 +0 0.718 0.965 -1.612 1.311 -0.839 0.350 0.261 0.365 0.000 0.357 1.368 -0.437 2.215 0.916 -0.385 1.403 0.000 1.261 0.737 0.716 3.102 0.894 0.826 0.985 0.877 0.546 0.607 0.652 +1 0.497 -1.434 -0.793 0.964 1.629 0.830 -0.568 1.511 2.173 0.563 0.620 -0.467 0.000 0.918 1.857 0.159 0.000 1.287 0.613 0.005 0.000 0.855 0.726 0.986 1.114 0.748 1.073 1.666 +0 0.658 -0.495 -1.309 0.750 0.946 0.876 0.225 -0.841 2.173 0.790 -1.090 0.250 2.215 1.828 -1.017 1.133 0.000 0.698 -0.498 -0.612 0.000 1.037 0.810 0.984 0.850 1.352 0.887 0.787 +1 0.356 1.631 -0.610 0.621 0.080 0.564 0.349 -1.296 1.087 0.596 -0.956 0.615 0.000 0.586 1.346 0.885 1.274 0.757 -1.327 -1.509 0.000 0.868 1.016 0.988 0.696 0.786 0.854 0.764 +1 0.427 0.188 1.017 0.791 0.332 1.285 0.661 -0.763 0.000 1.649 0.496 1.248 1.107 1.441 1.003 0.656 2.548 0.698 2.192 -0.972 0.000 0.995 1.392 0.996 1.296 0.967 1.165 1.196 +0 2.552 -0.487 1.658 1.232 -1.677 1.837 -1.508 0.922 2.173 2.644 -0.250 -0.485 2.215 1.559 -0.601 -0.852 0.000 1.947 1.637 -0.085 0.000 0.863 0.909 1.001 2.105 3.794 2.214 1.807 +0 0.840 -1.319 -1.679 0.838 -1.306 1.391 -0.813 0.719 0.000 1.611 -0.955 -0.779 2.215 0.570 -0.575 1.128 0.000 0.846 0.488 0.116 3.102 0.596 0.887 0.993 1.166 1.189 1.121 1.182 +0 0.739 1.087 -1.282 1.109 -1.450 1.483 0.446 0.142 0.000 1.470 -0.108 1.493 1.107 0.846 0.132 -0.686 0.000 0.803 0.230 0.994 3.102 0.515 0.681 0.983 0.859 0.467 0.955 0.928 +1 0.898 -0.664 1.556 0.401 -0.545 0.972 0.123 -0.467 2.173 0.638 1.021 0.354 2.215 0.872 -0.025 -1.681 0.000 0.658 -1.955 -1.604 0.000 0.864 0.958 0.988 0.833 0.959 0.758 0.683 +1 0.864 0.614 0.351 1.013 0.405 1.575 -0.541 -1.472 0.000 1.050 -2.306 0.149 0.000 1.868 0.354 1.285 1.274 3.688 0.712 0.162 0.000 1.247 0.907 1.002 1.174 0.921 0.937 0.869 +0 0.778 -1.619 0.257 0.378 0.972 0.490 -0.673 1.066 2.173 0.916 -1.440 -1.250 2.215 0.633 -0.819 -0.366 0.000 0.524 -2.262 -1.065 0.000 0.758 0.832 0.997 0.899 0.948 0.685 0.639 +0 0.832 0.422 0.321 1.179 1.260 0.630 -0.598 0.019 2.173 1.309 -0.206 -1.431 0.000 0.981 0.225 -0.914 0.000 0.879 -0.129 0.607 3.102 0.878 0.931 1.029 0.925 0.436 0.789 0.796 +1 1.141 0.635 -1.644 1.268 0.673 0.584 0.147 -0.083 0.000 0.361 -0.637 -0.006 0.000 0.951 0.217 -0.911 0.000 0.815 -0.486 -1.181 1.551 0.911 0.764 1.448 0.930 0.380 0.617 0.671 +1 1.026 0.159 1.676 1.249 0.824 0.395 -0.639 0.484 0.000 0.863 0.156 -1.167 1.107 0.897 0.452 -0.108 0.000 1.267 -0.699 1.606 0.000 0.922 0.953 1.089 0.891 1.097 0.804 0.718 +0 1.296 -1.030 -0.858 0.654 0.402 1.186 1.973 1.195 0.000 1.049 -0.218 -0.004 2.215 0.474 1.870 0.840 0.000 1.551 -1.948 -0.987 0.000 0.427 0.955 1.156 0.865 1.005 1.261 1.537 +1 1.078 -0.025 -1.273 0.359 -1.625 0.771 0.132 0.473 0.000 1.017 -1.006 -1.164 2.215 0.964 0.680 1.008 0.000 0.576 -0.394 1.385 0.000 0.865 0.846 0.999 0.621 0.321 0.919 0.831 +0 0.884 1.192 1.472 1.034 -1.087 0.461 -0.508 -0.539 2.173 0.336 2.143 1.462 0.000 0.295 -1.268 0.483 2.548 0.687 -2.103 0.732 0.000 0.317 1.005 0.987 0.968 0.421 0.808 0.733 +1 1.067 0.321 0.776 0.499 -0.763 0.971 0.551 -0.717 0.000 0.991 -1.113 0.787 2.215 0.460 -0.328 -1.228 0.000 1.102 -0.248 -0.243 3.102 0.781 0.672 0.994 0.938 0.860 0.931 0.811 +0 1.888 -0.475 -1.610 0.323 0.913 0.684 0.302 0.071 0.000 0.791 0.507 1.203 2.215 0.559 2.589 -0.027 0.000 0.894 -1.106 -0.344 1.551 1.583 0.978 0.986 0.867 1.099 0.881 0.863 +1 0.490 0.415 -0.537 0.749 1.116 0.534 0.910 -0.279 2.173 0.819 -1.406 -0.643 2.215 0.861 -0.765 0.891 0.000 0.753 -1.167 -1.495 0.000 0.785 0.828 0.986 0.814 1.472 0.929 0.782 +0 0.316 -1.311 -1.613 1.057 -1.597 0.803 -0.175 0.214 0.000 1.136 0.740 -0.709 2.215 1.049 -0.626 0.718 0.000 1.245 -0.716 1.490 3.102 0.840 0.925 1.001 0.905 1.381 1.014 0.913 +1 1.509 0.583 -1.271 0.372 0.720 1.105 0.828 -0.514 0.000 1.513 1.241 1.663 2.215 1.115 2.273 0.793 0.000 1.204 -0.977 0.672 0.000 0.985 0.816 1.012 0.852 0.854 0.896 0.787 +0 0.857 -1.512 0.859 1.709 1.270 1.112 -1.249 -0.802 2.173 0.426 -1.308 1.644 0.000 0.774 -0.828 -0.413 2.548 0.825 -0.165 0.220 0.000 0.876 0.997 0.988 1.079 0.446 1.089 0.923 +1 1.914 0.512 1.178 0.482 -0.335 0.856 2.169 -0.792 0.000 0.531 1.089 0.409 2.215 0.366 2.585 -0.632 0.000 0.692 1.270 1.553 0.000 1.083 0.964 1.303 0.687 0.185 0.576 0.774 +1 0.593 0.578 0.587 1.288 -1.495 0.890 -0.151 0.006 0.000 0.868 -1.000 0.969 2.215 0.799 1.066 -1.692 0.000 1.150 -0.580 -1.003 3.102 0.850 0.984 1.156 1.115 0.895 0.935 0.837 +1 2.169 0.924 -0.398 1.800 0.691 0.682 2.837 1.251 0.000 1.286 0.438 -1.730 2.215 0.575 -0.385 0.653 1.274 0.534 -2.218 -1.633 0.000 7.300 3.858 2.276 1.706 0.870 2.333 2.017 +1 0.670 -0.159 0.845 1.888 0.349 0.710 -1.093 -0.980 2.173 0.973 -0.508 -1.646 2.215 0.743 -1.494 -0.178 0.000 0.597 -1.360 -1.658 0.000 0.714 0.850 0.981 1.145 0.778 0.948 0.791 +0 0.643 -0.160 0.658 0.928 -0.854 1.666 0.048 -0.297 2.173 1.737 1.364 1.365 2.215 1.240 0.589 1.091 0.000 0.435 -0.050 -1.033 0.000 0.817 0.905 1.047 0.912 3.095 1.496 1.171 +0 1.329 0.368 -1.029 0.887 0.702 0.549 -1.242 -0.139 0.000 0.628 1.698 1.511 0.000 1.272 0.438 1.487 2.548 0.577 -0.290 -0.968 0.000 0.757 0.809 1.504 0.941 0.888 0.799 0.794 +1 0.646 -0.079 0.632 0.908 0.366 1.965 -1.816 -0.858 0.000 1.585 0.914 1.186 0.000 2.287 -0.214 0.381 1.274 0.670 0.470 -1.525 0.000 0.901 0.781 0.988 0.812 0.844 0.949 0.995 +0 0.701 1.397 1.626 1.127 0.368 0.998 1.010 1.035 0.000 0.678 -2.830 -0.874 0.000 1.099 1.285 -0.450 0.000 0.646 0.820 -0.920 3.102 1.870 1.004 1.115 0.668 0.244 0.639 0.612 +1 0.370 -0.049 -0.070 1.259 -1.072 0.369 -2.238 -1.130 0.000 0.646 -1.036 -0.590 0.000 1.338 -0.597 0.622 2.548 0.704 -0.615 1.167 0.000 0.890 0.808 0.993 1.498 0.555 1.123 0.944 +1 1.285 2.083 -0.454 0.720 -0.119 1.384 0.829 1.555 2.173 0.665 1.172 0.535 2.215 0.387 1.807 0.631 0.000 0.854 0.912 -0.705 0.000 0.660 1.008 1.002 0.762 1.153 1.070 0.832 +0 1.794 -0.777 -0.021 0.726 0.703 0.740 -1.722 -1.728 0.000 0.688 -1.157 -1.512 2.215 0.536 -1.177 -1.076 0.000 0.803 -0.716 0.360 3.102 0.679 0.773 0.988 0.978 0.676 0.638 0.724 +1 0.697 1.341 1.042 1.133 -1.384 0.481 0.391 1.698 2.173 0.549 1.089 -0.615 0.000 0.863 -0.714 0.304 1.274 0.534 -2.428 -0.670 0.000 2.609 1.545 1.006 0.665 0.914 1.078 1.208 +0 1.805 -0.439 -1.416 0.964 1.546 0.661 1.121 0.040 0.000 1.099 -0.119 0.511 0.000 1.699 -0.747 1.469 2.548 2.365 -0.120 -0.179 3.102 0.904 0.975 0.976 0.774 1.616 1.118 1.066 +0 2.005 -0.780 1.343 1.628 -1.403 0.899 -0.035 -0.554 2.173 0.744 -0.269 0.379 0.000 1.063 -0.835 0.114 1.274 0.556 0.215 1.083 0.000 0.574 0.724 1.552 1.248 0.885 1.130 0.911 +1 1.792 0.188 1.387 0.805 0.387 1.272 0.332 -0.892 2.173 0.804 -1.737 -0.031 0.000 0.939 0.473 -1.612 0.000 0.587 0.418 0.229 1.551 0.884 1.108 1.304 0.702 0.779 0.908 0.808 +0 1.584 0.397 -0.001 0.252 1.398 0.840 0.213 -0.808 0.000 1.381 -0.530 1.105 2.215 0.490 0.491 -1.086 2.548 0.860 -0.746 1.456 0.000 1.385 0.793 0.986 1.180 0.946 0.852 0.846 +0 0.309 -1.721 1.566 1.169 0.150 1.557 -0.371 1.438 2.173 0.775 -1.204 -0.409 0.000 1.584 -0.062 -0.199 2.548 0.759 -0.579 -1.349 0.000 0.797 0.857 0.990 1.170 1.971 1.127 0.940 +0 0.887 -0.630 -0.941 0.995 -0.273 0.858 -1.629 1.065 0.000 0.905 -0.649 1.520 2.215 1.480 -0.679 -1.267 1.274 3.871 0.355 -0.349 0.000 1.084 1.419 0.979 0.968 0.727 1.109 1.037 +1 1.758 0.308 1.438 0.345 0.628 1.307 0.344 -0.917 0.000 0.924 0.460 0.473 1.107 0.270 1.208 -0.793 2.548 0.391 2.107 0.136 0.000 1.694 0.887 0.987 0.808 0.536 0.817 0.830 +1 2.097 0.802 -1.582 0.214 -0.718 0.456 0.363 0.713 2.173 0.539 -0.042 -1.229 0.000 0.728 1.051 0.119 0.000 0.941 0.309 0.322 0.000 0.935 0.710 0.992 0.632 0.381 0.590 0.573 +1 0.414 1.530 -0.780 0.994 0.935 0.516 1.260 -0.115 0.000 0.571 -0.540 0.635 2.215 1.095 -0.984 -0.978 2.548 0.869 -1.115 1.713 0.000 2.023 1.271 0.986 2.051 0.864 1.439 1.317 +0 1.402 -1.683 1.204 1.805 0.599 0.769 -0.076 -0.732 2.173 0.595 0.165 0.438 2.215 1.074 -0.628 -1.115 0.000 1.104 -1.570 -0.880 0.000 0.803 0.894 1.142 1.224 0.875 1.254 1.108 +1 0.475 -0.712 -1.352 1.288 -0.334 0.712 1.198 1.027 2.173 0.571 1.816 0.409 0.000 0.654 -2.393 -0.742 0.000 0.867 2.139 1.645 0.000 0.885 0.798 0.989 1.269 0.486 1.287 1.438 +1 0.742 1.852 1.386 0.639 0.232 0.490 0.552 0.033 1.087 0.492 1.611 -1.620 0.000 0.409 -0.026 -1.017 0.000 0.727 -0.617 -1.678 3.102 0.743 0.769 0.989 1.363 0.769 0.995 0.828 +1 1.247 -0.716 0.696 0.583 0.179 0.794 -1.050 -1.493 2.173 0.537 0.040 -0.112 0.000 0.607 -0.864 0.922 0.000 1.242 -0.075 -0.759 3.102 0.848 0.741 0.988 1.049 0.835 0.844 0.732 +1 0.377 0.446 -0.612 2.216 -0.188 1.277 -0.178 1.713 2.173 0.295 -0.052 -0.094 1.107 0.553 -1.251 1.230 0.000 0.612 0.355 0.667 0.000 0.729 0.876 0.995 0.724 0.903 1.343 1.162 +0 1.483 0.433 1.268 0.545 0.255 0.897 0.396 -0.972 2.173 0.570 1.255 0.318 0.000 0.757 0.136 0.540 0.000 0.521 -0.534 -0.327 3.102 0.640 1.089 0.987 0.674 0.560 0.717 0.686 +1 0.528 -1.512 1.715 0.989 0.051 1.072 -0.679 -1.286 0.000 0.485 0.512 0.327 2.215 0.826 -0.771 -0.759 0.000 1.118 -0.743 1.184 3.102 0.787 0.912 0.999 0.909 0.691 0.780 0.770 +0 0.545 -0.423 -0.510 1.760 -0.147 0.555 -1.948 0.966 0.000 0.721 -0.480 0.987 2.215 1.208 -1.157 -1.244 2.548 0.937 0.104 -1.626 0.000 0.868 0.910 0.993 1.042 0.980 1.046 1.148 +0 0.887 0.530 -0.956 0.824 1.335 1.367 1.612 -1.144 2.173 1.163 2.373 -0.251 0.000 1.802 0.177 1.052 2.548 1.108 1.003 -0.451 0.000 1.011 1.326 1.043 0.865 2.354 1.553 1.263 +0 0.527 -0.374 -0.694 1.869 -1.658 1.068 1.084 0.710 2.173 0.981 -0.077 0.079 2.215 0.814 0.055 0.900 0.000 2.625 0.687 -0.914 0.000 1.729 1.333 1.048 1.609 1.240 1.197 1.137 +1 0.860 -0.528 -0.252 1.554 0.298 1.009 -0.986 -1.312 1.087 0.286 -0.434 0.575 0.000 0.450 -0.105 1.179 2.548 0.619 -2.089 1.500 0.000 0.864 1.373 0.984 0.715 0.758 0.861 0.919 +0 0.529 1.705 1.662 0.511 1.182 1.452 1.152 -0.234 0.000 1.243 -1.470 1.059 0.000 2.123 1.197 0.251 0.000 2.588 0.747 -1.293 1.551 1.357 1.865 0.981 0.984 1.395 1.819 1.404 +1 0.392 1.103 0.857 2.116 -0.239 0.851 -0.056 -0.898 2.173 1.349 1.445 0.024 0.000 1.478 2.381 -1.717 0.000 0.926 0.314 1.667 3.102 0.843 1.023 1.053 0.896 0.723 0.858 0.776 +1 0.623 0.360 -1.365 2.851 1.630 2.164 0.858 -0.136 0.000 0.727 0.195 0.221 2.215 0.607 1.407 0.750 0.000 0.941 -0.301 -1.676 3.102 0.833 1.175 0.987 0.762 0.770 0.923 0.933 +0 0.841 0.707 -1.089 0.359 -0.476 0.831 0.144 0.148 0.000 1.344 0.650 1.663 1.107 1.388 0.130 0.688 0.000 0.897 -0.099 -0.782 3.102 0.903 0.881 0.993 0.965 0.894 0.941 0.935 +0 0.553 1.368 1.203 2.258 -1.112 1.361 0.528 0.458 2.173 0.667 0.917 -0.262 0.000 1.262 1.369 -1.497 1.274 1.013 1.434 0.519 0.000 0.813 0.963 1.347 1.733 1.802 1.243 1.021 +0 1.224 0.618 1.165 1.217 -0.165 1.174 -0.306 0.066 0.000 3.201 0.394 1.724 1.107 1.916 0.476 -0.123 2.548 0.635 -0.432 -0.511 0.000 0.669 0.787 1.575 1.661 2.625 1.643 1.370 +1 1.062 -0.368 -0.273 0.586 0.334 0.794 0.652 1.304 2.173 1.295 -1.553 -0.774 0.000 1.079 -0.608 1.732 2.548 0.918 0.682 -1.670 0.000 0.926 0.839 0.990 1.318 0.929 0.960 0.829 +0 1.890 -1.100 0.880 1.248 1.383 1.303 -0.241 -0.302 0.000 0.830 -0.872 -1.581 2.215 0.931 -0.651 -1.013 2.548 0.555 0.784 -0.541 0.000 0.861 0.847 0.995 0.883 0.466 0.803 1.067 +0 0.588 -0.276 -0.088 0.839 -1.011 0.673 0.712 0.626 2.173 0.501 -2.060 1.521 0.000 0.927 -0.517 -0.966 0.000 0.670 -0.090 1.232 3.102 1.214 0.843 0.981 0.840 0.477 0.939 0.880 +1 0.708 0.755 1.368 0.743 -1.644 1.104 1.171 -0.757 2.173 0.606 -0.025 0.501 1.107 0.546 2.076 0.271 0.000 0.550 1.854 1.346 0.000 0.498 0.956 1.000 1.004 1.336 0.958 0.808 +0 3.514 -0.145 -0.297 2.003 -0.307 2.268 -0.268 1.350 0.000 1.383 -0.430 -1.705 0.000 1.337 0.900 0.265 2.548 0.765 -1.000 -0.929 3.102 1.153 0.908 1.022 1.185 1.231 0.987 1.184 +0 1.357 0.731 1.307 0.664 -0.481 0.774 -0.248 -1.477 2.173 0.794 -0.632 -0.685 0.000 1.414 0.133 0.231 2.548 0.522 -1.026 1.021 0.000 0.874 0.915 1.314 1.006 1.329 0.897 0.842 +1 0.934 0.165 -1.524 0.660 1.114 0.892 -0.409 -1.015 2.173 0.833 1.285 0.759 0.000 1.139 1.147 0.103 0.000 1.196 0.920 -0.743 3.102 0.835 0.863 0.988 0.833 0.949 1.070 0.995 +0 1.368 -1.408 -1.029 0.714 -0.942 0.587 0.520 0.622 0.000 0.761 -0.936 0.100 2.215 1.275 -1.902 1.018 0.000 0.687 1.600 -0.570 0.000 0.773 0.928 0.998 0.488 0.508 0.594 0.736 +0 0.717 -0.650 -0.737 0.321 1.169 0.749 -0.160 0.282 0.000 0.720 -0.875 0.795 0.000 1.187 -0.282 -1.540 1.274 0.887 0.443 -1.080 3.102 0.911 0.991 0.989 0.674 0.464 0.788 0.691 +0 0.542 0.941 -1.728 0.745 0.486 0.511 1.309 0.718 0.000 0.611 0.862 -1.237 2.215 0.795 -0.545 -0.930 1.274 1.044 1.852 0.018 0.000 0.839 0.923 0.989 1.128 0.635 0.892 0.800 +0 0.408 1.841 -0.468 1.123 0.406 0.765 0.233 1.708 0.000 0.651 1.080 -0.635 2.215 0.730 -0.728 0.766 2.548 0.844 0.035 -0.914 0.000 0.869 0.875 0.991 0.846 1.071 0.740 0.742 +0 0.856 1.730 -1.730 1.044 0.652 0.647 1.154 -1.230 0.000 0.537 -0.506 -0.172 2.215 0.685 0.735 0.373 0.000 0.572 0.032 0.174 3.102 0.741 0.916 1.098 0.781 0.209 0.809 0.758 +1 1.398 2.073 -1.214 0.434 1.740 0.714 1.332 -0.240 2.173 0.976 0.850 0.639 2.215 1.148 1.357 1.321 0.000 0.639 1.662 -0.918 0.000 0.885 0.957 0.974 1.097 0.922 0.871 0.766 +1 0.559 -0.851 -1.583 0.328 -0.803 0.910 0.304 0.667 2.173 0.629 -0.249 0.859 0.000 0.463 0.462 -0.101 2.548 0.685 2.314 -1.369 0.000 0.597 0.553 0.986 0.575 0.523 0.609 0.525 +0 1.663 1.538 -0.389 0.305 -1.516 1.009 -0.560 -0.434 2.173 1.197 0.481 -1.532 0.000 1.106 0.311 0.359 2.548 1.572 0.760 1.465 0.000 0.972 0.958 0.990 1.782 1.061 1.229 1.226 +0 1.938 -0.591 1.466 0.822 1.004 0.961 -0.454 -0.354 0.000 0.863 0.568 -0.305 2.215 1.906 -0.804 -1.551 2.548 1.266 -0.793 0.191 0.000 0.898 0.927 0.979 0.924 1.645 1.077 1.084 +0 1.167 1.783 0.030 0.351 1.732 0.416 0.938 1.281 1.087 1.013 -0.385 -1.461 0.000 0.876 1.115 -1.232 2.548 0.589 -0.460 0.944 0.000 0.840 0.685 0.984 0.704 0.587 0.572 0.562 +1 0.990 -0.835 -1.465 0.734 -0.932 1.838 -0.029 0.694 2.173 1.594 -0.916 -1.165 0.000 0.564 0.685 -0.190 0.000 0.487 -0.836 1.678 1.551 1.780 1.003 0.984 1.925 0.926 1.285 1.204 +1 0.485 1.319 -1.041 1.556 1.382 0.999 0.650 -0.249 2.173 1.094 -0.010 -1.419 0.000 1.414 -0.660 0.703 0.000 0.489 -0.435 -0.457 1.551 1.928 1.085 0.987 1.207 0.481 1.006 1.050 +0 0.594 -0.004 1.207 0.869 -0.512 0.926 -1.453 1.233 2.173 0.859 -2.684 -0.654 0.000 1.067 -0.297 -0.096 0.000 0.899 -1.031 -1.290 3.102 0.906 0.770 0.996 1.128 0.745 0.788 0.953 +1 1.291 -0.469 1.256 0.469 1.557 1.836 -1.349 0.097 0.000 0.685 -0.022 -1.690 0.000 1.020 0.438 -1.310 2.548 0.693 -0.865 1.609 1.551 0.831 0.978 0.987 0.757 0.622 1.132 1.114 +0 0.868 0.627 0.592 0.610 1.451 0.677 -0.728 -0.224 2.173 0.720 -0.419 -1.569 2.215 0.651 0.646 -1.739 0.000 0.935 0.400 -0.212 0.000 0.850 0.924 0.993 0.739 0.975 0.732 0.678 +1 0.703 -1.245 1.163 1.668 -1.532 0.579 -0.761 0.810 0.000 1.964 -0.696 -0.072 2.215 1.074 -1.236 -0.786 2.548 1.698 -0.651 1.516 0.000 0.898 1.075 0.987 1.513 1.054 1.045 0.980 +0 0.716 -0.978 0.200 1.050 1.346 0.849 0.058 1.121 0.000 0.862 -2.592 -0.072 0.000 1.019 -1.172 -1.331 0.000 1.027 -0.264 -0.950 0.000 0.964 1.180 1.032 0.526 0.497 0.702 0.706 +1 0.917 0.842 -0.130 0.501 -1.405 1.104 -0.256 -1.358 0.000 0.809 -0.799 0.742 2.215 1.258 0.413 0.178 2.548 0.781 1.803 1.164 0.000 1.675 1.356 0.990 0.671 0.899 1.081 0.941 +1 1.326 -0.563 -1.171 0.340 -0.340 1.137 1.143 1.175 0.000 1.178 -0.776 0.130 0.000 1.314 -0.077 0.490 0.000 1.184 0.016 -0.874 3.102 0.924 1.026 0.984 0.797 0.765 0.915 0.832 +0 1.934 0.779 1.371 0.737 1.156 0.972 0.190 -1.561 2.173 0.607 -2.804 0.086 0.000 2.011 0.353 -0.281 2.548 1.132 0.865 -0.506 0.000 3.806 2.781 1.007 1.525 1.602 1.954 2.001 +1 1.225 0.256 -0.608 0.881 1.632 0.898 -0.756 0.013 0.000 0.376 -2.484 0.975 0.000 1.842 0.315 1.530 2.548 0.831 -0.587 -0.875 0.000 0.948 0.983 1.297 0.974 0.919 0.997 0.895 +1 0.539 -1.577 -1.265 0.799 1.298 0.989 -1.023 0.331 0.000 1.523 -0.945 1.589 2.215 1.371 -0.996 -0.564 0.000 0.896 2.078 -0.669 0.000 0.692 0.823 0.991 0.682 0.926 0.928 0.799 +0 2.334 1.293 -0.019 1.183 0.293 1.236 -0.786 -1.599 2.173 0.493 0.472 -0.753 0.000 1.005 -1.500 1.686 0.000 0.919 0.239 1.229 3.102 1.636 1.094 0.974 2.542 0.898 1.593 1.485 +1 0.413 1.511 1.706 2.402 0.955 1.013 -1.805 -0.802 0.000 1.075 0.203 -0.972 1.107 0.838 0.168 0.162 0.000 1.127 0.653 0.909 3.102 2.329 1.836 0.994 0.820 1.028 1.363 2.339 +0 1.202 0.037 -1.413 0.803 0.671 0.474 -0.763 -1.189 0.000 0.624 0.572 0.328 2.215 0.609 -0.685 0.131 2.548 0.420 0.124 0.900 0.000 0.725 0.795 1.297 0.794 0.487 0.620 0.584 +1 0.449 -0.118 -1.668 1.308 -0.073 0.689 -0.098 0.390 0.000 0.966 -1.208 -1.169 2.215 1.167 -0.072 -1.208 2.548 1.161 -0.831 0.846 0.000 0.836 1.108 1.052 0.974 0.696 0.903 0.805 +0 1.249 0.091 1.097 1.708 0.650 0.732 2.362 -0.307 0.000 1.063 -0.443 -0.543 1.107 0.825 -1.444 -1.586 0.000 1.767 0.024 -1.304 1.551 5.156 2.949 0.999 1.226 0.840 1.909 1.704 +0 0.537 0.644 1.467 0.587 -1.407 0.716 1.003 0.837 2.173 0.917 -1.062 -0.452 0.000 0.473 -0.603 0.115 2.548 0.628 1.015 -0.906 0.000 1.467 0.859 0.980 1.023 0.810 0.930 0.831 +1 2.472 0.928 1.701 0.584 -0.326 0.481 -0.275 1.691 0.000 1.092 1.350 0.111 2.215 0.608 -0.277 0.255 0.000 1.066 -0.340 -0.865 1.551 0.936 0.709 1.611 1.333 1.250 1.043 0.948 +1 1.000 1.469 0.261 1.867 1.012 0.708 0.019 -0.894 2.173 0.487 2.141 -1.231 0.000 0.507 0.817 -1.691 2.548 0.468 -1.426 -1.022 0.000 2.163 1.195 1.187 1.520 0.596 0.972 1.051 +1 0.850 -0.237 -0.382 0.637 -1.503 1.128 2.147 0.884 0.000 1.200 0.428 0.080 2.215 2.040 0.007 -1.190 1.274 1.093 -0.507 -1.715 0.000 3.412 2.342 0.986 0.795 1.557 1.813 1.559 +1 0.627 1.697 -0.055 1.096 1.695 0.320 -0.088 1.175 2.173 0.319 0.186 -0.473 2.215 0.292 0.539 1.416 0.000 0.386 -2.239 -1.075 0.000 0.964 0.698 1.148 0.762 0.473 0.642 0.889 +0 1.575 -0.050 1.272 0.972 1.511 1.048 -0.777 0.973 2.173 2.090 0.055 -0.800 2.215 0.934 -1.374 -0.001 0.000 0.488 0.378 -0.060 0.000 0.855 1.015 0.967 1.581 2.372 1.408 1.212 +0 0.782 0.063 -0.724 1.321 0.190 1.207 -0.575 -0.053 0.000 1.095 -1.320 1.469 0.000 1.798 -0.189 1.512 2.548 1.488 -0.252 -1.327 3.102 2.584 1.693 1.034 1.138 0.691 1.219 1.096 +0 0.489 1.233 -0.441 2.737 -0.426 1.564 0.436 1.624 0.000 1.289 -0.223 0.108 2.215 0.849 0.901 1.278 0.000 1.327 -0.532 1.424 3.102 0.861 0.926 0.989 0.917 1.121 1.138 1.232 +0 1.150 -1.923 -1.481 0.482 0.158 1.051 -0.782 1.208 2.173 0.531 -0.362 0.894 0.000 1.475 -0.673 -0.398 2.548 1.018 0.867 -0.867 0.000 1.207 1.119 1.028 1.076 1.540 1.028 1.053 +1 1.157 0.517 0.064 0.803 1.511 0.963 0.605 1.346 0.000 1.280 -0.657 -0.821 0.000 0.389 -1.208 0.563 1.274 0.787 0.492 -0.646 3.102 2.724 1.537 1.289 0.821 0.601 0.973 0.895 +0 1.136 -0.207 -1.172 0.820 1.312 0.642 -0.194 -0.171 2.173 0.405 -1.910 1.253 0.000 0.388 -1.515 -0.831 0.000 1.273 -0.110 1.006 3.102 0.721 0.871 1.050 0.710 0.836 0.690 0.659 +1 0.924 1.972 1.221 1.063 -0.028 1.089 1.253 0.102 2.173 1.126 1.079 1.714 0.000 1.082 0.772 -1.177 0.000 1.216 1.251 -0.689 3.102 0.893 0.838 1.239 0.974 0.810 0.946 0.914 +1 0.899 0.220 0.951 0.376 -0.773 0.926 -0.837 -0.703 0.000 0.798 -0.841 0.663 1.107 0.886 0.089 -0.947 0.000 1.178 -0.544 1.491 0.000 0.872 1.015 0.988 0.879 0.635 0.832 0.806 +1 1.149 0.669 1.100 0.554 -1.500 2.786 1.485 0.108 0.000 2.707 1.144 1.385 0.000 2.373 0.957 -1.160 0.000 1.056 -2.364 -1.308 0.000 2.926 1.712 0.985 0.721 0.265 1.092 0.907 +1 1.249 -0.865 -0.948 0.702 0.526 0.698 -0.765 -1.284 0.000 0.801 -0.938 0.672 2.215 1.142 -1.349 0.997 2.548 0.534 -1.826 -1.401 0.000 1.003 0.872 1.259 0.855 0.396 0.678 0.674 +1 0.916 -1.093 -0.018 1.275 1.686 0.937 -0.548 -1.737 2.173 1.417 -0.327 0.162 0.000 0.722 0.134 -0.574 0.000 0.402 -0.347 0.575 3.102 1.028 0.593 1.496 1.017 0.567 0.825 0.839 +0 0.803 1.453 -1.386 0.608 -0.116 0.477 -0.345 1.064 2.173 0.338 -2.186 -0.366 0.000 0.873 0.715 0.367 2.548 0.656 0.179 0.666 0.000 1.081 1.070 0.988 0.887 0.674 0.712 0.811 +0 0.799 -1.367 1.261 1.395 0.987 0.612 -0.613 -1.039 2.173 0.499 0.597 -0.253 1.107 0.318 0.669 -0.673 0.000 0.637 2.046 0.482 0.000 1.009 0.692 0.995 1.326 0.752 1.250 1.557 +1 0.568 0.436 0.794 0.367 -1.492 0.823 2.219 0.840 0.000 0.822 1.915 0.358 0.000 0.893 0.790 -0.933 2.548 0.749 -1.784 -1.255 0.000 0.774 1.003 0.990 0.829 0.181 0.826 1.052 +0 0.459 -0.592 0.764 1.310 -1.506 0.702 0.644 0.740 0.000 0.803 0.229 -0.354 2.215 0.789 1.334 -0.255 2.548 0.816 2.492 -1.267 0.000 0.694 0.865 0.988 1.004 0.558 0.754 0.726 +1 0.457 -1.456 0.938 0.423 0.662 1.501 -0.469 -0.758 2.173 0.927 0.542 0.736 2.215 0.933 -0.332 1.373 0.000 0.500 0.819 -1.532 0.000 0.664 1.223 0.984 0.686 1.934 1.048 0.877 +0 1.003 -1.368 -1.222 0.939 -0.202 0.711 -0.116 -0.428 1.087 0.821 -0.708 0.018 0.000 0.987 -0.404 1.148 0.000 1.317 -1.126 1.042 3.102 0.858 1.080 1.070 0.836 1.200 0.808 0.754 +0 1.193 -0.149 1.353 0.356 -0.903 2.676 1.070 0.240 1.087 2.697 -0.702 -1.271 0.000 2.205 -0.517 0.917 0.000 1.539 0.373 0.127 0.000 0.789 0.927 0.991 1.038 4.053 2.031 1.595 +0 1.214 -0.297 0.064 0.646 1.040 0.882 -0.044 -0.888 2.173 0.773 0.393 1.242 2.215 1.105 -0.980 1.701 0.000 0.400 1.798 0.414 0.000 0.838 0.932 0.987 0.751 1.175 0.803 0.771 +1 1.118 1.477 1.658 0.585 -0.664 0.915 -1.711 1.079 0.000 1.666 -0.082 0.007 1.107 0.598 0.901 -0.912 0.000 0.631 -0.016 -1.426 3.102 2.744 1.528 0.987 1.435 0.890 1.276 1.379 +1 0.767 -0.951 -0.522 0.550 1.404 0.643 0.956 1.295 0.000 0.876 0.195 0.823 2.215 1.300 0.336 -1.001 0.000 1.706 -0.501 -0.240 3.102 1.526 1.179 0.987 0.672 1.011 0.980 0.848 +1 0.574 -0.288 0.769 0.936 -0.699 0.480 0.033 -0.445 0.000 1.013 -0.232 1.389 0.000 1.458 0.789 -1.521 1.274 1.918 0.224 0.252 1.551 1.489 1.156 0.987 0.969 1.335 0.952 0.826 +1 0.770 -0.528 0.257 0.403 -1.139 0.808 0.294 -1.449 2.173 1.604 -0.745 0.862 0.000 0.906 -0.685 -0.614 2.548 0.413 -0.614 1.734 0.000 0.750 0.991 0.989 1.068 0.935 0.921 0.818 +0 1.052 0.823 -0.622 0.475 -1.122 0.987 0.023 0.307 2.173 0.806 -1.480 -1.353 0.000 0.874 0.903 1.043 2.548 1.311 0.245 1.035 0.000 0.884 1.290 0.995 0.996 0.920 1.060 0.937 +0 1.245 0.017 0.427 2.898 0.460 2.037 -0.983 -1.201 0.000 0.595 -0.714 1.229 2.215 0.515 -0.852 -1.474 2.548 0.571 -0.249 -0.375 0.000 1.259 1.185 0.988 1.181 0.386 0.927 1.428 +0 0.497 -1.109 -0.813 0.622 0.912 0.885 0.961 -1.543 0.000 0.648 1.177 -0.918 1.107 0.881 1.249 0.237 0.000 1.576 0.357 0.626 1.551 1.619 1.024 0.984 1.282 0.970 1.254 1.382 +0 0.627 -0.806 0.570 0.444 -0.870 1.686 -0.202 0.861 1.087 1.282 0.785 -1.672 1.107 2.086 -1.263 -0.213 0.000 1.596 -0.216 -1.264 0.000 0.873 0.938 0.983 0.965 1.996 1.301 1.037 +1 0.732 0.103 -1.694 0.442 1.073 0.589 1.730 -1.121 0.000 0.997 0.087 0.400 2.215 0.429 2.324 0.085 0.000 0.751 0.194 -1.123 3.102 0.884 0.789 0.986 0.895 0.768 0.873 0.772 +1 2.187 -1.674 -1.242 0.508 0.758 0.887 -1.146 0.623 0.000 0.524 -0.306 0.707 1.107 0.467 -0.787 -0.969 0.000 1.136 -1.898 0.348 0.000 0.898 0.854 1.421 0.920 0.748 0.813 0.726 +1 0.976 -0.383 -0.937 0.627 -0.076 0.783 -0.240 -1.295 0.000 0.699 -2.329 0.987 0.000 0.699 -2.615 1.330 0.000 1.039 0.977 -1.310 0.000 0.801 0.905 0.988 0.718 0.451 0.623 0.637 +1 1.147 0.376 -0.871 0.459 0.569 0.915 -0.247 1.244 0.000 0.672 -0.255 -1.632 0.000 1.238 0.810 -0.252 2.548 0.893 -0.728 -0.057 3.102 0.868 0.934 0.987 0.679 0.817 0.917 0.790 +0 0.863 1.191 1.441 1.987 1.107 1.193 1.860 -0.307 0.000 0.392 1.701 0.184 0.000 0.408 0.545 -0.952 2.548 1.166 1.188 -1.222 1.551 0.628 0.851 0.982 0.842 0.258 0.675 0.839 +1 0.475 0.939 0.181 1.085 -0.446 0.526 1.053 -1.211 1.087 0.775 1.599 0.963 2.215 0.987 0.451 0.658 0.000 0.719 -0.300 -1.329 0.000 0.852 0.810 0.983 1.164 0.913 0.882 0.759 +1 1.593 -0.447 -1.070 0.591 -0.913 2.729 -0.351 0.820 0.000 2.010 -0.187 -0.632 2.215 0.957 0.028 -0.865 0.000 1.553 -0.010 -1.379 3.102 0.731 0.748 0.989 0.722 1.004 0.753 0.638 +0 1.430 -0.756 1.228 0.663 0.051 0.648 -0.352 -0.144 2.173 0.654 -1.387 1.558 0.000 0.748 -0.932 -1.244 2.548 0.603 0.221 -0.823 0.000 1.038 0.974 1.177 0.783 0.784 0.697 0.665 +0 0.553 -0.163 0.480 0.628 -1.325 1.236 1.285 0.651 0.000 1.539 0.386 -1.301 2.215 0.686 0.912 0.149 2.548 0.898 -0.391 -0.573 0.000 1.325 0.785 0.989 1.057 1.106 1.055 1.054 +0 0.903 -1.396 -0.128 1.468 -0.683 1.207 -2.117 1.289 0.000 1.540 2.113 1.186 0.000 2.432 -1.202 -0.396 2.548 0.817 0.205 -1.334 0.000 1.003 1.134 0.988 0.741 0.313 1.481 1.582 +0 0.335 1.491 -1.411 1.135 0.797 0.623 0.564 -0.606 2.173 0.640 -0.924 0.112 2.215 0.772 -0.661 1.669 0.000 0.615 -1.820 -1.618 0.000 0.620 0.770 0.988 1.273 0.958 1.450 1.620 +0 0.370 -0.661 1.430 1.327 -0.485 0.799 0.058 -1.737 0.000 1.192 -0.228 0.401 2.215 0.953 0.455 -1.171 0.000 0.770 0.827 0.609 3.102 0.843 0.852 0.987 0.838 0.587 0.850 0.778 +0 1.105 -0.950 -1.484 1.009 1.146 1.312 2.468 0.069 0.000 0.614 -0.452 -0.536 0.000 1.160 -2.041 -0.189 0.000 2.239 0.318 1.606 3.102 0.918 0.981 1.019 0.759 0.513 0.700 0.712 +0 0.472 -1.690 -0.247 1.678 0.694 1.361 -0.736 0.085 2.173 1.194 -0.443 -0.759 0.000 2.373 -1.255 -1.693 1.274 0.929 -0.027 -1.562 0.000 0.955 1.345 0.984 0.899 2.352 1.332 1.162 +0 0.723 -0.930 -0.912 1.104 0.766 0.558 -0.766 1.291 2.173 0.568 -1.413 0.080 0.000 0.675 -1.556 -0.837 0.000 0.462 -1.327 -1.309 3.102 0.709 0.868 1.236 0.644 0.446 0.525 0.553 +0 1.122 -0.987 0.586 0.284 1.648 0.756 -1.002 -0.486 2.173 0.642 -1.047 -1.518 1.107 0.787 -0.244 -0.831 0.000 1.092 0.481 0.884 0.000 0.980 0.904 0.989 0.888 0.822 0.765 0.735 +0 0.730 -0.551 -0.826 0.911 -0.290 0.927 -1.017 0.083 2.173 0.830 -2.559 1.259 0.000 1.437 -0.379 -1.618 0.000 1.241 -0.702 1.314 1.551 2.424 1.406 0.985 1.047 1.022 1.184 1.230 +1 1.748 0.058 1.071 0.155 1.152 0.705 -0.459 -0.565 2.173 0.567 -1.951 0.996 0.000 0.364 -1.289 -0.197 2.548 1.560 0.412 -1.215 0.000 2.262 1.274 0.990 1.148 0.374 0.888 0.956 +1 0.669 -0.844 -0.874 1.200 0.448 0.815 -1.631 1.157 0.000 0.718 -0.701 -0.287 2.215 1.909 -0.347 -1.386 2.548 1.321 -1.111 0.548 0.000 0.866 1.060 1.152 1.084 1.063 1.041 0.899 +1 0.858 0.863 1.331 1.113 -0.570 1.019 0.256 -0.535 0.000 1.735 0.420 0.696 2.215 1.020 -0.505 -1.401 0.000 0.600 0.085 -1.313 3.102 1.500 0.837 1.340 1.172 0.907 1.042 0.948 +1 0.788 0.913 0.877 0.180 1.320 1.248 0.392 -0.841 0.000 0.813 0.165 1.126 2.215 0.763 0.541 1.357 2.548 0.498 -0.713 -1.022 0.000 0.828 0.966 0.981 0.569 0.249 0.795 0.726 +0 0.878 -0.547 0.671 0.471 1.136 0.841 -1.023 -1.613 2.173 0.586 -1.612 0.957 0.000 0.854 1.315 -0.722 0.000 2.465 0.047 -0.193 3.102 0.895 0.963 0.992 0.918 1.701 1.233 1.136 +1 0.783 -0.886 0.272 1.072 1.366 1.213 -0.525 1.055 0.000 1.253 -0.233 -1.244 2.215 2.327 2.022 -0.521 0.000 1.926 -1.583 -0.910 0.000 0.792 1.070 1.059 0.643 1.443 0.967 0.859 +0 0.600 -1.022 0.330 0.999 -1.190 1.656 0.394 1.163 0.000 1.197 0.625 -0.014 2.215 0.680 1.552 0.686 0.000 1.670 -0.607 -0.650 0.000 1.558 1.046 1.051 1.218 0.878 0.990 1.121 +1 0.646 -0.498 1.592 2.034 1.557 0.458 1.382 0.142 2.173 0.565 0.574 0.327 0.000 0.617 0.384 -0.203 0.000 1.093 0.229 -0.722 3.102 0.644 0.563 0.976 0.881 0.687 0.828 0.695 +0 1.070 1.278 -1.053 0.458 0.407 0.556 -0.781 -0.737 2.173 0.571 -0.950 0.531 0.000 0.751 -1.035 1.539 0.000 1.434 0.222 1.147 3.102 0.797 0.836 0.986 0.808 1.073 0.819 0.845 +0 0.978 0.036 0.285 0.594 -0.230 0.565 0.889 -1.471 1.087 0.421 0.733 1.463 2.215 0.805 -2.166 -0.126 0.000 0.436 -1.294 -1.568 0.000 0.688 1.208 0.980 1.114 0.346 1.109 0.958 +0 0.281 -0.030 -1.589 1.904 0.830 0.999 0.441 1.460 2.173 1.741 0.194 -0.651 2.215 1.199 0.210 0.632 0.000 2.255 -0.753 -0.755 0.000 2.035 1.586 0.989 1.194 1.849 1.347 1.255 +0 2.386 -0.512 -0.060 1.210 -0.400 0.455 2.552 1.582 0.000 0.835 -0.298 1.441 0.000 0.509 -0.067 1.123 2.548 1.094 -0.817 -1.099 3.102 0.905 0.766 0.987 0.837 0.583 0.708 0.791 +0 0.282 -1.559 0.832 1.498 0.252 1.201 0.585 -1.358 0.000 0.798 1.429 -1.324 0.000 1.738 0.831 0.471 2.548 1.323 1.070 -0.520 3.102 0.947 0.965 0.999 0.883 0.928 1.047 1.085 +1 1.055 -0.653 -0.688 1.296 -0.809 0.371 0.373 -0.524 0.000 0.787 0.061 0.616 0.000 1.358 0.636 -1.721 2.548 1.464 0.855 0.691 0.000 0.999 1.027 0.981 0.719 0.868 1.053 0.982 +1 0.764 -0.039 -1.021 0.735 -0.583 1.349 0.644 0.974 2.173 0.366 0.499 -1.700 0.000 0.328 0.885 -1.018 2.548 0.498 -0.895 -0.876 0.000 0.618 1.026 0.979 0.660 0.818 1.056 0.825 +1 0.674 1.048 -1.549 1.030 -0.231 1.428 0.094 1.159 1.087 0.581 0.258 -0.894 0.000 0.557 1.282 -0.225 0.000 0.777 -0.364 0.440 3.102 0.722 0.698 1.071 1.327 0.738 0.900 0.818 +1 0.865 0.412 1.511 1.103 -0.849 0.940 0.372 0.295 0.000 1.051 1.211 1.603 2.215 1.065 0.944 -0.097 0.000 0.762 -0.000 -1.141 3.102 0.868 0.886 1.150 0.882 0.735 0.895 0.827 +0 0.577 1.174 -0.049 0.676 1.238 0.518 -0.340 -1.677 0.000 0.588 0.596 -0.265 2.215 0.521 -0.443 0.503 2.548 0.466 1.110 -1.192 0.000 0.773 0.773 0.982 0.575 0.507 0.548 0.536 +1 1.043 0.347 -1.555 1.751 -1.009 0.621 -0.731 0.187 2.173 0.756 0.709 0.739 0.000 0.466 -0.459 1.640 0.000 0.692 0.460 0.434 1.551 0.891 0.916 0.985 0.805 0.503 0.807 0.762 +0 1.167 0.346 1.669 0.806 0.657 1.014 0.861 -0.300 2.173 0.668 -1.183 1.498 0.000 0.836 0.724 0.627 2.548 1.646 1.766 -0.994 0.000 3.654 2.091 1.062 1.144 0.852 1.445 1.187 +0 0.738 1.166 0.550 1.749 1.125 0.518 -2.507 -0.079 0.000 0.570 2.009 -0.984 0.000 1.234 2.294 -1.675 0.000 1.541 0.400 -0.447 3.102 0.805 0.644 0.988 1.211 0.133 0.769 0.838 +0 0.756 0.655 1.156 1.404 -1.559 0.774 -1.715 0.350 0.000 1.094 0.691 -0.786 2.215 1.153 -1.164 1.272 2.548 0.435 -0.812 -0.480 0.000 0.694 0.821 0.990 0.949 1.804 1.233 1.155 +1 0.906 -1.266 -1.600 0.320 0.493 0.449 -0.417 0.235 1.087 0.566 -1.789 -0.858 0.000 1.260 -0.577 -1.597 2.548 1.353 0.593 0.662 0.000 0.808 0.978 0.988 0.704 0.939 0.676 0.642 +1 2.672 0.822 0.926 0.589 0.806 0.830 0.373 -0.601 0.000 0.542 2.024 -0.923 0.000 0.742 0.124 -0.071 0.000 0.947 0.033 -1.593 1.551 1.093 0.762 0.979 0.909 0.684 0.764 0.725 +1 1.448 0.084 1.032 0.540 -0.388 0.478 0.655 0.683 0.000 0.838 0.233 -1.134 2.215 1.386 0.995 -0.519 2.548 0.959 0.283 -1.701 0.000 0.882 0.980 1.173 0.897 0.785 0.812 0.731 +0 1.926 -0.705 1.543 0.467 -0.408 1.291 0.433 -0.489 2.173 2.097 -0.491 0.939 2.215 0.685 0.859 -0.676 0.000 0.781 1.249 -0.895 0.000 0.277 0.572 1.291 1.107 2.609 1.454 1.259 +0 1.087 -0.256 0.022 0.810 1.147 0.819 0.556 -1.158 2.173 0.572 0.927 0.054 0.000 0.831 0.154 1.226 0.000 0.590 0.973 -0.564 3.102 1.015 1.024 1.103 0.730 0.438 0.724 0.688 +0 0.816 -1.556 0.435 1.133 -1.513 0.762 1.570 -0.253 0.000 0.725 0.120 0.753 1.107 0.863 -1.595 -0.495 0.000 1.516 0.658 -1.330 0.000 0.835 1.043 1.310 0.773 0.620 0.763 0.724 +0 0.864 -1.260 0.634 1.216 -0.279 1.181 -0.506 1.451 0.000 0.566 -1.475 -1.592 1.107 1.470 -1.296 -0.598 0.000 0.470 0.106 -0.304 0.000 0.821 0.792 1.041 0.637 0.450 0.571 0.590 +1 0.499 -1.286 0.496 2.036 1.324 2.429 -1.612 -0.519 0.000 2.190 -1.011 1.019 2.215 1.238 -0.684 -0.909 0.000 1.097 -0.444 1.672 3.102 0.836 0.644 0.985 0.827 0.855 0.881 0.859 +1 0.688 -0.530 -0.787 0.254 1.499 0.830 -0.125 -0.096 2.173 1.431 -0.336 1.510 0.000 0.673 -0.701 0.869 1.274 0.463 1.245 -0.738 0.000 1.470 0.981 0.987 0.788 0.773 0.889 0.771 +1 0.841 0.378 -0.212 0.727 -1.375 0.923 0.411 -1.478 0.000 1.128 1.205 0.007 2.215 0.496 -0.741 1.082 0.000 0.677 1.695 -1.014 0.000 0.930 0.905 0.987 0.797 0.737 0.826 0.722 +0 0.584 -1.556 0.625 1.531 0.713 0.577 -1.233 -1.165 1.087 0.474 -1.495 1.552 0.000 1.159 -1.297 -0.514 2.548 0.366 2.416 0.056 0.000 0.270 0.646 0.997 1.071 0.572 0.862 0.716 +0 0.883 -0.139 0.696 1.162 -0.065 0.789 1.363 1.520 2.173 0.872 0.291 -1.461 0.000 1.164 -0.175 -0.550 2.548 0.396 -0.195 0.220 0.000 0.791 0.902 0.988 0.752 1.542 1.111 0.906 +0 1.037 1.273 -1.294 1.058 0.496 1.203 -0.131 0.938 1.087 0.594 0.269 1.259 0.000 0.935 -2.331 -0.562 0.000 2.043 1.311 -0.121 3.102 2.499 2.110 1.450 0.957 2.091 2.122 1.884 +0 1.536 1.372 0.723 0.252 -0.832 1.002 1.644 -0.141 2.173 1.200 1.526 -1.259 2.215 1.571 1.369 -1.722 0.000 2.244 2.348 0.190 0.000 1.121 1.014 0.987 0.899 1.366 1.121 0.978 +1 0.859 -0.860 0.895 1.772 0.283 1.075 -1.054 -1.174 2.173 0.781 -0.546 1.593 0.000 0.535 -0.044 -0.141 2.548 0.371 -1.568 0.328 0.000 0.809 0.891 0.988 0.592 0.908 0.930 0.796 +0 0.600 -1.055 1.710 1.509 -0.510 1.877 -1.129 -1.277 2.173 1.506 -1.297 0.822 0.000 1.310 -1.552 0.258 0.000 1.778 -0.381 0.591 3.102 1.121 0.962 1.199 1.075 2.035 1.486 1.222 +1 1.438 -0.154 -0.687 0.817 -1.689 1.054 -1.212 0.700 0.000 1.104 0.641 -1.571 0.000 2.269 -0.533 0.404 2.548 0.735 0.482 -0.482 3.102 3.217 1.865 1.178 1.282 0.932 1.330 1.169 +1 0.770 1.247 1.158 1.514 1.604 1.379 -1.216 0.478 2.173 1.520 0.389 -0.619 1.107 1.197 0.098 -1.204 0.000 0.567 0.903 -0.365 0.000 0.858 0.894 0.988 3.779 2.627 2.603 1.929 +1 0.798 -0.208 1.463 0.716 -1.277 0.503 -0.974 -0.465 0.000 0.660 -0.680 0.400 0.000 0.846 0.029 -1.442 2.548 0.872 -1.263 0.590 0.000 0.878 1.088 0.985 0.624 0.363 0.638 0.757 +1 1.436 0.216 -1.109 1.003 -0.698 0.902 0.526 1.449 0.000 1.274 -0.118 0.834 2.215 2.421 1.046 0.129 0.000 1.727 0.062 -1.482 0.000 1.013 0.790 0.997 1.286 0.811 0.828 0.863 +1 0.670 -2.150 0.565 0.605 1.695 0.787 -0.437 0.553 0.000 1.003 -0.345 -1.156 2.215 0.737 0.943 -1.494 2.548 0.994 -1.036 0.125 0.000 0.967 1.045 0.990 0.964 0.741 0.910 0.846 +1 1.032 -0.507 -1.220 1.039 1.153 1.510 0.177 0.232 2.173 1.022 2.516 -1.134 0.000 0.940 0.647 1.359 2.548 0.764 -0.578 -0.997 0.000 2.865 1.785 1.209 1.409 1.323 1.690 1.552 +1 0.643 0.086 -1.641 0.362 -0.030 0.770 2.702 1.529 0.000 1.633 -1.075 -0.288 2.215 1.243 -0.219 -0.335 2.548 3.611 0.022 1.205 0.000 0.817 1.278 0.985 0.913 0.679 1.273 1.012 +1 1.483 -1.157 0.931 0.865 -0.763 1.774 0.917 -0.014 0.000 0.958 -0.218 1.465 0.000 1.495 0.265 -1.541 2.548 2.171 -0.638 -1.288 1.551 3.173 2.185 1.567 1.100 0.824 1.622 1.564 +0 0.533 -0.374 -1.210 0.965 1.034 0.494 -2.378 -0.537 0.000 0.739 0.737 -0.186 0.000 0.679 1.471 -0.815 0.000 1.569 0.610 0.685 3.102 0.755 0.861 0.986 0.973 0.724 0.892 0.907 +0 0.429 -1.663 -0.629 0.495 0.883 0.546 -0.122 -1.022 0.000 0.575 2.019 1.451 0.000 0.851 0.291 -0.590 0.000 1.203 -0.232 0.523 3.102 0.761 0.836 0.979 0.646 0.290 0.566 0.565 +1 1.347 -1.055 0.461 0.372 1.661 1.341 -2.340 -1.606 0.000 1.293 -0.799 -0.795 2.215 1.690 -1.193 -0.172 2.548 1.634 -0.826 1.147 0.000 0.848 1.151 0.988 1.005 0.922 0.956 0.823 +0 1.914 -1.517 0.373 0.144 -0.907 1.880 0.584 -0.992 2.173 0.566 0.833 1.743 0.000 1.093 -1.474 0.779 0.000 0.958 -0.684 0.889 3.102 0.939 1.208 0.983 2.438 1.780 1.587 1.580 +1 0.497 -0.833 1.149 1.709 0.131 0.971 0.756 -1.622 0.000 0.396 0.344 -0.548 0.000 0.408 2.066 0.218 0.000 0.901 0.577 1.156 3.102 0.840 1.001 1.014 0.883 0.655 0.872 0.793 +1 0.754 -0.001 1.460 0.978 0.473 0.530 2.321 -1.417 0.000 0.637 1.522 -0.595 0.000 0.814 0.115 -0.694 2.548 1.186 0.358 0.740 1.551 0.968 1.065 0.992 0.599 0.731 0.846 0.966 +1 0.606 0.238 0.945 1.266 -1.351 0.997 0.316 -0.192 1.087 0.615 0.053 1.523 0.000 0.848 -0.327 -0.248 0.000 1.253 -1.008 1.471 0.000 0.775 0.536 1.067 1.051 0.666 0.821 0.747 +0 0.500 1.415 -1.067 0.200 1.704 0.748 -1.263 1.115 0.000 1.002 -0.583 -0.082 1.107 1.301 -0.112 -0.788 2.548 1.054 -1.600 -1.432 0.000 1.108 1.267 0.995 0.679 0.778 0.993 0.924 +1 1.135 -0.308 0.574 0.275 -0.845 0.382 2.606 -1.453 0.000 0.494 -1.318 -1.453 1.107 0.886 -1.125 -0.441 2.548 1.093 -0.005 0.222 0.000 0.845 0.739 0.988 0.685 0.558 0.562 0.545 +1 0.349 1.255 -0.985 0.646 -0.931 0.801 0.927 -0.820 0.000 1.319 -0.162 0.855 0.000 0.517 -1.108 0.861 0.000 0.786 0.203 0.366 3.102 0.696 0.686 0.987 0.648 0.440 0.465 0.582 +1 1.631 0.190 -0.315 1.076 0.096 0.758 0.498 -1.246 2.173 0.645 1.546 1.183 0.000 1.556 0.483 1.021 1.274 0.788 0.971 -1.275 0.000 0.769 0.836 1.002 1.203 1.204 1.033 0.965 +0 2.055 -0.883 0.806 1.140 0.667 1.059 -1.429 -0.809 2.173 0.482 -1.413 0.191 0.000 0.879 -1.058 1.620 2.548 1.301 -0.448 -1.242 0.000 0.856 0.873 0.973 0.871 0.993 1.137 0.957 +0 1.559 -1.783 0.402 0.418 -0.779 0.772 0.259 1.646 2.173 0.789 1.013 -1.347 0.000 0.587 -0.185 0.296 2.548 0.424 2.461 -0.731 0.000 0.925 1.005 0.989 1.602 0.811 1.047 1.477 +1 0.293 2.157 -0.273 0.628 -1.059 1.154 -1.415 0.559 0.000 1.966 0.441 -1.233 2.215 0.835 0.453 0.672 0.000 0.649 -0.824 0.245 0.000 0.728 0.749 0.985 0.815 0.934 0.878 0.751 +0 1.387 0.395 -1.230 1.583 -0.159 1.134 -0.359 1.243 0.000 1.060 1.433 0.314 2.215 1.873 1.004 -1.528 2.548 0.877 1.566 -0.351 0.000 2.578 1.912 1.689 1.324 1.517 1.422 1.299 +0 1.404 -1.323 -1.366 1.188 -0.624 0.397 -2.917 -0.844 0.000 0.743 -1.432 1.255 2.215 1.820 -1.778 0.828 0.000 1.068 -1.122 0.228 3.102 1.719 1.118 1.110 0.935 0.644 0.774 0.879 +0 1.434 0.604 1.658 1.044 -0.194 1.498 2.030 0.028 0.000 1.551 0.294 -1.154 1.107 1.656 0.056 1.292 0.000 0.738 -0.768 -1.730 0.000 0.797 0.864 1.687 1.150 0.988 0.855 0.865 +0 1.175 -0.859 1.664 0.437 -0.777 1.091 -0.473 -0.915 2.173 1.137 -1.338 1.101 2.215 0.533 -2.031 0.746 0.000 1.583 1.255 0.410 0.000 3.219 2.342 0.993 0.784 1.763 1.713 1.384 +1 1.932 0.823 1.054 0.749 0.397 0.542 -0.752 -0.503 0.000 0.448 -0.920 0.286 2.215 0.410 0.928 -0.078 0.000 1.611 0.266 -1.226 0.000 0.913 0.686 0.986 0.914 0.526 0.663 0.719 +0 0.786 0.991 -1.721 0.926 -0.158 0.557 0.261 1.297 2.173 0.786 1.550 -0.360 0.000 0.449 -1.112 1.008 2.548 0.722 0.231 -1.056 0.000 0.903 1.032 1.167 0.934 0.532 0.804 0.738 +0 1.107 0.538 -1.297 0.803 0.959 1.191 0.950 -0.308 2.173 1.461 0.697 1.326 2.215 0.535 -0.241 0.550 0.000 0.648 0.322 -0.673 0.000 0.618 0.872 1.169 1.152 1.946 1.063 0.844 +0 0.662 -0.450 1.677 0.496 0.276 0.896 1.570 1.189 0.000 1.231 -0.736 -0.532 2.215 0.789 1.958 -1.401 0.000 0.930 0.939 0.452 0.000 0.919 0.868 0.987 0.956 0.832 1.268 1.000 +1 0.608 -1.260 0.484 0.594 -1.028 1.014 1.378 -1.740 0.000 1.258 0.629 -0.073 2.215 0.611 1.707 -0.218 0.000 0.779 -0.187 1.037 0.000 1.177 0.939 0.987 1.062 0.686 1.176 1.136 +1 0.321 -1.394 1.010 0.690 -0.991 0.462 1.260 -1.023 0.000 0.537 0.456 -0.572 0.000 1.028 -0.680 0.418 1.274 1.492 0.419 1.103 3.102 0.874 0.951 0.988 0.745 0.835 0.877 0.771 +0 0.904 0.816 0.585 0.300 -0.584 0.869 0.916 -0.872 2.173 0.412 -1.497 1.436 0.000 0.541 2.124 -1.282 0.000 0.588 -0.628 0.910 3.102 2.502 1.437 0.985 0.927 1.039 1.082 0.907 +1 0.451 -0.318 1.440 1.212 -1.287 3.069 -1.133 -0.251 0.000 1.661 1.728 1.537 0.000 1.819 -2.332 1.036 0.000 1.707 -0.030 -1.591 3.102 0.873 0.887 0.998 0.833 0.617 0.845 1.348 +0 0.396 -1.290 -0.945 0.868 1.010 0.486 0.698 -1.297 1.087 0.348 -1.152 0.163 0.000 0.664 -0.224 0.595 0.000 1.279 0.797 -0.363 3.102 0.456 0.861 0.986 1.763 0.631 1.368 1.044 +1 0.925 1.663 1.353 0.569 -1.333 0.441 -1.412 1.673 0.000 1.047 0.328 -0.853 0.000 0.904 -0.381 0.772 0.000 1.952 0.582 0.294 1.551 0.922 1.193 0.992 0.649 0.492 0.714 0.801 +1 1.289 0.147 -1.295 1.515 -1.500 1.028 -0.945 1.042 0.000 1.408 0.797 -0.004 1.107 0.929 -0.670 -0.277 2.548 0.565 0.408 -1.587 0.000 1.223 1.092 0.969 1.650 1.085 1.159 1.147 +1 0.384 -2.007 -1.648 0.629 -0.075 0.800 -0.170 -0.212 1.087 0.811 -1.081 1.690 2.215 0.926 -0.686 -1.119 0.000 1.390 0.568 0.971 0.000 1.541 1.166 0.992 0.764 1.307 0.949 0.810 +1 0.937 -0.781 1.214 0.742 0.051 0.558 0.022 0.114 2.173 0.918 -1.582 1.491 0.000 1.185 -0.304 -1.718 1.274 1.067 0.174 -0.889 0.000 0.854 0.866 1.001 0.775 1.026 0.717 0.693 +1 1.304 -1.634 -1.187 0.228 0.938 1.030 -1.150 0.121 1.087 1.676 -0.531 1.393 0.000 1.383 -1.051 -0.379 0.000 0.526 2.376 1.403 0.000 2.443 1.276 0.987 1.034 0.691 1.041 0.911 +1 1.044 -0.440 0.700 0.313 0.247 0.729 -2.426 -1.354 0.000 0.646 -1.016 0.206 2.215 0.586 0.370 -1.119 0.000 1.043 0.530 1.114 3.102 0.820 1.028 0.984 0.588 0.884 1.092 1.077 +1 2.080 -0.087 -0.289 1.684 -1.713 0.781 0.547 1.462 2.173 0.642 1.252 0.557 2.215 0.472 0.501 -1.018 0.000 1.039 -0.688 0.334 0.000 0.926 0.978 2.486 1.554 0.855 1.163 0.960 +1 0.949 1.439 1.214 1.132 0.723 1.165 1.059 -0.690 2.173 0.901 1.085 1.066 2.215 0.842 0.660 -1.307 0.000 0.646 1.454 -0.198 0.000 0.810 0.833 0.985 1.329 1.508 0.979 0.816 +0 1.062 0.518 -0.468 0.982 0.363 0.849 0.447 -1.435 2.173 0.537 -0.094 -1.231 2.215 0.892 0.303 0.469 0.000 1.242 -0.183 0.973 0.000 0.605 1.062 0.990 0.784 0.331 0.727 0.731 +1 1.709 1.279 1.052 0.163 -0.870 0.889 -0.099 -0.822 1.087 0.641 0.464 -1.623 0.000 0.892 -1.224 0.047 0.000 0.917 -0.003 0.664 0.000 0.915 0.974 0.991 0.596 0.813 0.848 0.740 +1 0.807 0.503 1.346 0.801 -1.045 0.839 -0.392 -1.424 2.173 1.522 -0.022 0.296 0.000 0.756 -0.391 -0.254 0.000 0.725 -0.916 1.003 3.102 0.852 0.789 0.987 0.877 0.735 0.875 0.868 +0 0.491 0.347 -1.264 1.723 -1.184 1.500 0.902 0.215 0.000 1.450 -0.014 -1.689 2.215 1.075 1.346 0.902 2.548 0.852 1.088 -0.276 0.000 0.815 0.940 0.976 0.901 1.447 1.248 1.392 +1 0.599 -0.405 0.601 0.209 -0.102 0.938 0.079 -1.629 2.173 0.783 1.071 0.808 2.215 2.003 0.049 0.556 0.000 2.261 -0.157 -0.626 0.000 1.104 1.098 0.986 0.637 1.223 0.950 0.822 +1 0.894 0.066 -1.633 0.668 0.738 1.339 0.358 -0.323 2.173 0.801 -0.388 1.408 0.000 0.931 -0.349 0.672 0.000 0.580 1.062 -1.351 3.102 0.815 0.822 0.988 1.120 0.866 0.941 0.818 +0 0.494 1.436 -1.058 2.502 -1.318 0.722 0.050 0.362 0.000 0.841 -0.528 0.947 0.000 0.948 -1.570 0.346 0.000 1.543 -0.165 -1.671 3.102 0.971 1.043 0.980 1.459 0.797 1.447 1.668 +1 0.981 -0.917 -0.497 3.311 -0.103 1.579 -0.201 1.577 0.000 0.361 -1.123 -1.244 1.107 0.944 -1.009 1.072 0.000 0.650 0.600 -1.226 3.102 1.412 0.982 0.981 1.228 0.473 0.840 1.222 +1 1.876 0.581 -0.727 0.859 -0.518 0.589 0.890 0.940 2.173 0.846 -0.403 1.613 2.215 0.738 0.874 -0.273 0.000 1.000 -0.337 1.151 0.000 1.146 0.952 0.977 1.109 0.941 1.029 0.893 +1 2.237 -1.294 -1.213 0.199 -1.634 0.849 -0.112 0.654 2.173 0.565 -0.972 0.987 0.000 0.647 -0.244 -0.436 2.548 0.996 -1.192 0.342 0.000 0.577 0.710 0.996 0.745 0.772 0.931 0.809 +1 0.826 0.578 1.616 0.634 0.648 0.932 0.395 0.021 1.087 0.483 0.952 -0.443 0.000 0.703 0.453 -1.442 0.000 1.198 1.320 1.350 3.102 0.734 0.851 0.982 0.558 1.255 0.773 0.691 +0 1.790 -0.021 -0.393 0.500 -1.332 0.656 0.242 0.822 2.173 0.741 1.101 -1.512 2.215 0.620 -0.267 1.138 0.000 0.467 0.895 -0.283 0.000 0.719 0.715 0.987 0.982 1.001 0.849 0.693 +0 0.542 1.145 -1.048 1.179 0.272 0.813 1.206 -1.634 2.173 0.442 0.594 -1.185 0.000 0.272 0.973 1.408 0.000 0.657 0.057 0.691 0.000 0.723 0.744 1.028 1.039 1.529 0.964 0.764 +0 0.466 0.286 1.089 1.319 -0.360 0.609 0.165 0.310 2.173 0.835 0.994 -1.219 2.215 0.700 0.889 0.864 0.000 1.302 2.046 -1.381 0.000 1.275 0.981 1.047 0.664 1.130 0.933 0.867 +0 2.307 0.643 0.837 0.836 1.167 1.330 1.339 -1.080 0.000 0.567 0.532 -0.083 2.215 0.547 1.169 0.702 2.548 0.568 0.806 -0.497 0.000 0.714 0.882 0.981 0.581 0.444 0.656 0.920 +1 0.711 1.297 -1.040 0.813 0.875 1.568 0.960 -1.460 2.173 0.637 -0.367 0.392 0.000 0.930 1.874 -0.736 0.000 2.544 0.971 0.712 3.102 0.917 1.047 1.041 0.984 1.968 1.203 0.972 +0 0.866 -0.016 0.807 0.882 1.110 0.730 0.968 0.121 1.087 0.854 0.119 -1.392 0.000 0.604 -0.523 -0.606 2.548 0.563 -2.466 -0.410 0.000 0.894 1.100 0.987 0.879 0.870 0.760 0.746 +0 1.080 -1.796 0.564 1.837 -0.031 0.926 -0.098 -1.706 2.173 0.653 -0.948 -1.453 0.000 0.919 0.167 -0.389 0.000 0.511 -0.299 -0.164 1.551 0.931 0.989 0.997 0.684 0.722 1.167 0.984 +0 1.484 -0.046 -1.002 0.657 -1.362 2.276 -0.243 1.192 0.000 1.508 1.026 0.040 2.215 2.877 0.616 -0.740 0.000 1.742 0.559 0.881 1.551 1.990 1.785 0.976 1.609 1.044 1.337 1.245 +0 1.019 -0.195 -1.032 0.725 -0.206 0.599 -0.240 0.356 2.173 0.746 0.911 0.391 2.215 0.699 -1.836 -1.460 0.000 1.194 1.359 -0.225 0.000 0.480 0.963 0.979 1.029 0.615 0.754 0.793 +0 0.380 -0.510 -1.706 1.566 -1.321 1.666 0.654 0.846 0.000 1.116 0.332 0.100 2.215 1.736 -0.132 -0.997 2.548 1.095 -1.983 -0.935 0.000 4.946 2.935 1.000 0.846 1.288 1.931 1.627 +0 0.445 -1.332 -0.835 2.036 -1.278 0.691 -0.843 1.692 2.173 0.653 -1.435 -0.272 0.000 1.340 -0.341 0.606 2.548 1.247 -1.643 0.482 0.000 0.795 0.975 0.996 0.779 1.036 0.868 0.947 +1 0.475 -1.115 -1.001 1.167 -1.629 3.671 -0.743 0.358 0.000 3.563 1.157 -1.488 2.215 1.045 2.429 -0.898 0.000 0.661 0.343 1.117 0.000 1.550 1.008 0.987 4.829 0.881 3.016 2.788 +1 0.699 1.625 -1.430 1.074 1.427 1.004 1.033 0.400 0.000 0.448 0.045 -0.779 0.000 1.269 -0.043 1.718 2.548 1.243 0.240 0.159 0.000 0.755 0.972 0.990 1.555 0.904 1.023 1.113 +0 1.079 0.753 -1.345 0.585 -0.316 0.799 0.679 0.481 2.173 0.562 0.900 1.499 0.000 0.518 -0.408 -0.859 2.548 0.547 0.918 0.976 0.000 0.334 0.627 0.984 0.669 0.890 0.715 0.614 +1 0.784 0.785 -0.352 1.044 1.388 1.318 -0.060 -1.513 0.000 1.585 0.267 0.010 2.215 1.744 0.576 0.558 0.000 0.794 1.312 0.856 0.000 0.722 1.006 1.253 0.836 1.011 0.863 0.813 +1 0.588 0.381 -1.398 0.577 -1.697 0.682 -0.130 1.347 0.000 1.718 1.170 -0.363 0.000 0.770 0.004 0.039 1.274 1.099 0.938 0.110 0.000 0.743 0.782 0.997 1.005 1.021 0.889 1.033 +0 1.328 0.723 -1.549 0.442 0.934 0.695 0.745 -0.142 0.000 1.373 0.967 0.677 2.215 2.287 0.449 -1.104 2.548 0.639 -0.068 0.893 0.000 0.927 0.900 0.987 0.899 1.942 1.049 0.917 +1 1.194 0.175 -0.627 0.865 1.711 0.779 -1.010 -1.636 2.173 1.275 0.203 0.571 0.000 0.882 0.980 -0.214 0.000 0.747 -1.030 0.530 3.102 0.634 0.759 1.210 1.025 0.754 0.820 0.777 +1 0.539 -0.219 -1.499 1.333 0.589 0.672 0.839 1.036 2.173 0.509 1.277 -1.368 0.000 1.462 0.211 -1.059 0.000 1.064 -2.342 -0.420 0.000 0.835 1.041 1.118 0.842 0.749 0.826 0.838 +1 0.393 -0.960 -1.464 0.845 0.155 2.371 0.017 -0.575 0.000 3.276 1.408 1.466 0.000 2.470 -0.519 -0.037 2.548 1.681 1.608 1.013 0.000 1.425 1.447 0.987 1.114 0.890 2.211 2.423 +0 0.475 1.613 -0.811 1.170 1.030 1.208 2.203 -1.023 0.000 0.594 -1.646 1.073 0.000 1.231 1.123 0.749 2.548 0.614 1.455 -0.083 0.000 1.036 1.350 1.029 1.151 1.107 1.333 1.079 +0 0.572 0.671 -0.659 0.233 1.209 0.889 0.716 0.138 2.173 0.641 1.764 -1.474 0.000 0.586 1.468 0.780 0.000 0.935 0.042 -1.623 1.551 0.848 1.087 0.990 0.587 1.020 0.788 0.783 +1 0.427 1.496 -1.304 0.448 -0.090 0.978 1.157 0.663 2.173 1.012 0.240 -1.327 0.000 0.825 -0.659 -1.352 0.000 1.020 0.553 1.456 3.102 0.942 0.967 0.987 0.939 0.743 1.086 0.886 +1 0.416 -1.640 0.468 1.435 -0.778 0.955 -1.340 0.849 2.173 1.182 -1.132 0.172 2.215 1.864 -1.418 -1.251 0.000 0.702 -0.214 -1.436 0.000 0.904 1.335 0.987 1.053 0.909 1.049 0.901 +1 0.963 0.668 1.355 0.980 -0.253 0.620 1.028 -1.223 2.173 0.283 1.696 1.417 0.000 0.397 -0.774 -0.524 2.548 0.788 -0.025 0.279 0.000 0.813 0.784 1.336 0.794 0.767 0.672 0.608 +1 0.551 1.117 0.119 0.616 1.554 0.690 0.442 0.628 0.000 1.442 0.712 -0.888 1.107 0.659 -0.841 0.979 0.000 1.094 -0.183 -1.428 0.000 0.972 0.650 0.988 0.897 0.959 0.911 0.765 +1 0.349 -0.501 1.044 0.972 0.565 0.404 0.600 0.866 0.000 1.168 0.257 -0.478 2.215 0.513 0.529 -1.025 0.000 0.473 -0.558 0.491 3.102 0.811 0.836 0.985 0.604 0.608 0.663 0.593 +0 1.797 -0.504 -0.458 0.147 0.716 1.233 -0.853 0.963 0.000 0.500 -1.473 0.575 0.000 1.555 0.495 -0.924 2.548 1.131 -0.157 1.554 3.102 0.798 0.833 0.985 0.834 0.885 1.128 1.013 +0 1.713 0.072 0.879 1.254 -0.532 0.885 -1.776 1.086 0.000 0.907 -2.102 -0.849 0.000 0.811 0.318 -0.465 2.548 1.499 -1.525 1.608 0.000 0.796 0.832 1.940 1.035 0.586 0.964 1.148 +1 0.597 0.711 0.211 0.412 -1.388 0.903 -0.637 0.129 2.173 1.371 -0.349 -1.119 2.215 1.132 -1.200 1.319 0.000 0.600 -1.230 -1.327 0.000 1.191 1.019 0.988 1.325 1.495 1.233 1.026 +0 1.122 0.029 0.755 0.437 -0.552 0.562 -0.061 -0.915 0.000 1.011 -0.815 -0.472 2.215 1.532 1.102 1.212 2.548 1.514 -0.634 1.703 0.000 0.999 0.889 0.987 1.018 2.079 1.066 0.892 +1 0.571 -0.418 -0.193 1.144 0.658 0.823 -0.256 -1.364 2.173 0.721 -0.792 0.970 0.000 0.527 -2.386 0.446 0.000 0.730 -0.595 -0.741 0.000 0.946 0.921 0.983 0.500 0.832 0.706 0.678 +1 1.315 0.375 -0.329 0.574 -0.806 1.210 2.197 -1.210 0.000 0.698 0.430 0.560 0.000 2.124 0.316 1.149 2.548 1.356 -0.322 0.545 3.102 2.774 2.337 0.986 1.290 0.829 1.669 1.368 +0 2.645 0.416 0.161 0.199 1.039 1.712 -0.476 -1.507 0.000 0.884 -0.828 -0.854 2.215 2.169 0.877 0.672 1.274 0.772 -0.458 1.676 0.000 0.476 0.883 0.989 0.845 2.101 1.494 1.400 +0 0.378 -1.194 1.414 1.637 -0.548 1.673 -1.406 0.685 2.173 1.071 -0.259 -0.926 0.000 0.511 1.649 -1.352 0.000 0.884 -1.594 1.519 0.000 1.197 0.821 1.069 1.353 1.151 1.368 1.199 +1 1.245 0.617 1.198 0.967 -1.512 0.961 1.253 -0.247 0.000 0.281 1.391 1.563 2.215 0.946 -0.470 1.706 2.548 0.749 1.505 0.513 0.000 0.894 0.735 0.988 0.751 0.626 0.881 0.822 +1 1.054 -1.099 0.942 0.632 0.379 0.997 2.270 0.751 0.000 1.936 0.131 -0.815 2.215 1.000 0.123 -1.543 2.548 0.698 -0.611 1.174 0.000 0.791 1.041 0.976 0.865 0.902 0.989 0.824 +0 1.932 1.526 0.238 1.138 0.527 1.119 -0.093 -1.162 0.000 0.756 -0.537 -1.702 0.000 0.648 1.308 1.333 2.548 0.646 0.370 -1.588 0.000 1.018 0.950 0.980 0.790 0.961 1.065 1.436 +1 1.857 1.302 -1.007 1.235 1.690 2.371 0.758 0.903 1.087 1.341 0.703 -0.665 0.000 1.350 0.850 -0.131 2.548 0.653 1.559 -0.620 0.000 0.740 0.682 1.369 2.061 1.801 1.499 1.318 +1 1.969 0.641 0.818 0.880 1.480 1.070 0.316 -1.055 2.173 0.851 0.602 -0.611 0.000 0.589 -0.530 0.219 2.548 0.623 -0.936 1.445 0.000 1.307 1.015 1.026 0.821 1.014 0.975 0.898 +1 0.726 -0.378 1.391 1.567 -1.332 1.097 0.761 1.068 0.000 0.676 -0.042 -1.015 0.000 1.704 -0.225 -0.394 2.548 1.802 0.219 0.336 1.551 0.835 0.813 0.987 1.080 0.888 0.947 0.806 +0 1.502 -0.500 -0.017 0.423 1.280 0.679 -0.362 1.679 2.173 0.930 -0.141 1.193 2.215 0.647 0.292 -0.431 0.000 1.290 -0.591 -0.546 0.000 0.553 0.974 1.016 0.920 0.515 0.725 0.702 +0 0.877 1.154 1.697 0.389 0.981 0.629 1.136 0.239 2.173 0.316 0.042 1.384 2.215 0.539 0.291 0.101 0.000 0.761 1.732 -1.044 0.000 0.930 0.736 0.980 0.487 0.679 0.577 0.584 +0 0.919 -0.044 -0.793 1.492 -1.632 0.764 -0.610 1.576 0.000 0.516 -0.826 0.783 0.000 1.518 0.000 -0.203 2.548 0.889 -0.698 -0.429 0.000 0.890 0.862 1.111 1.029 0.859 0.834 0.820 +0 1.040 -1.422 -1.407 1.740 1.243 1.386 1.277 -0.213 0.000 1.367 -1.200 0.628 0.000 1.177 0.487 -0.778 0.000 1.229 0.341 -1.632 3.102 0.959 0.899 1.275 1.186 0.931 0.846 0.852 +1 1.712 0.523 -0.076 1.063 -0.488 1.122 0.778 1.561 2.173 0.486 0.646 0.783 0.000 0.635 1.269 -1.399 0.000 0.481 0.490 -0.562 3.102 0.848 0.764 0.996 0.475 0.736 0.910 0.763 +0 2.331 0.735 0.119 0.647 1.025 1.242 -0.924 -1.485 1.087 0.400 -0.469 0.928 0.000 0.747 0.415 1.235 2.548 1.398 -0.440 -1.120 0.000 0.938 0.860 1.241 0.834 1.181 1.361 1.088 +0 1.218 1.112 -1.160 0.469 1.294 0.741 0.178 -1.347 2.173 0.389 0.685 0.313 1.107 0.582 0.650 0.778 0.000 0.966 -0.867 1.013 0.000 0.841 0.970 0.988 0.698 0.815 0.667 0.758 +0 0.760 -0.145 1.081 0.668 1.098 0.543 0.083 -0.538 0.000 0.639 0.441 -1.313 0.000 1.039 1.232 -0.061 1.274 0.777 -1.967 1.085 0.000 0.837 0.909 0.979 0.848 0.263 0.960 0.862 +1 0.625 -1.161 1.191 0.876 -0.007 1.068 0.048 -0.815 0.000 0.929 0.759 1.559 2.215 0.671 0.689 1.068 0.000 0.675 0.299 -1.053 3.102 0.703 0.742 0.987 0.850 0.529 0.994 0.841 +1 1.574 -0.335 -0.280 0.594 -1.169 0.506 0.511 1.575 0.000 0.759 0.985 -1.586 2.215 0.557 -0.446 0.593 0.000 1.854 1.158 0.654 3.102 0.893 0.966 0.988 1.008 0.986 0.989 0.853 +0 0.935 -1.681 -1.001 0.610 -0.004 0.602 -0.799 1.193 0.000 0.556 -0.639 0.313 0.000 0.763 -0.228 -0.846 2.548 1.222 0.451 1.585 0.000 0.921 1.067 0.992 0.905 0.687 1.046 0.912 +0 3.361 1.523 1.110 0.621 0.654 1.575 2.176 -0.739 0.000 1.231 1.488 -0.202 0.000 1.155 1.682 1.309 0.000 0.782 0.928 0.213 3.102 0.987 0.999 0.981 0.835 0.376 0.617 1.013 +1 2.148 -0.714 -0.349 1.279 0.164 0.401 2.841 1.575 0.000 0.696 -1.656 1.723 0.000 0.839 0.137 -1.601 2.548 0.609 -0.948 1.355 0.000 0.582 0.931 1.024 0.574 0.510 0.710 0.815 +1 1.413 -1.015 -1.027 1.429 -1.424 1.032 -0.242 0.949 2.173 1.044 -0.175 -0.485 0.000 0.665 0.231 0.501 0.000 1.064 0.818 0.737 3.102 1.031 0.904 0.989 1.602 0.753 1.334 1.172 +0 0.912 -0.411 0.378 0.243 0.751 0.885 -1.215 0.439 1.087 0.369 -0.448 1.142 0.000 1.277 -0.358 -1.491 2.548 1.479 1.111 -1.301 0.000 0.893 0.896 0.992 0.929 1.423 0.937 0.788 +0 0.412 0.827 1.322 0.941 -0.562 0.871 0.053 0.184 2.173 0.733 -0.056 -1.342 0.000 0.986 -2.088 1.613 0.000 1.232 0.519 -0.546 3.102 1.081 0.975 0.990 0.772 0.742 0.811 0.700 +0 2.024 -1.353 -0.612 0.156 1.579 0.497 -1.243 1.738 0.000 0.743 0.450 0.938 2.215 0.642 -0.328 1.016 2.548 0.489 -1.433 0.086 0.000 0.884 0.879 0.983 0.905 0.315 0.957 0.802 +1 0.918 -1.014 1.192 1.667 0.842 2.428 -1.842 -0.829 0.000 2.075 -1.258 0.853 1.107 0.642 -0.573 -0.712 1.274 0.399 -2.105 0.309 0.000 1.384 1.059 0.991 0.968 1.280 1.511 1.515 +0 1.313 1.007 -1.461 1.551 -0.896 0.747 -0.422 0.797 2.173 0.739 -0.582 -0.087 0.000 0.774 -2.290 0.484 0.000 1.001 -0.402 1.477 1.551 0.777 0.709 0.989 1.052 0.526 1.095 0.976 +0 1.914 0.188 0.807 0.732 1.394 1.397 0.523 -0.695 2.173 0.711 1.926 -0.811 0.000 1.370 1.118 1.148 1.274 0.478 -0.002 -0.065 0.000 1.003 1.028 0.987 0.908 1.824 1.254 1.115 +1 0.509 -1.066 0.302 0.718 -0.854 1.088 0.863 0.578 0.000 0.709 -2.171 -1.519 0.000 1.432 0.108 -0.518 2.548 1.389 -0.779 -1.515 3.102 0.663 0.803 0.983 0.785 1.032 0.903 0.771 +1 1.134 0.932 -1.637 0.443 1.517 0.969 -0.732 -0.403 2.173 0.923 -0.239 0.522 0.000 0.643 -0.524 1.351 0.000 0.470 -0.474 -1.632 3.102 0.827 1.110 0.987 0.513 0.642 0.799 0.767 +1 1.046 -0.298 -0.199 1.641 0.558 1.040 -0.490 -1.353 0.000 0.692 -1.069 1.723 0.000 0.839 -0.928 -0.764 1.274 1.454 -2.111 0.545 0.000 0.854 0.770 1.145 0.626 0.515 0.606 0.796 +1 0.345 -0.956 -0.146 0.973 -1.306 0.572 1.835 0.584 0.000 1.230 0.774 -1.403 2.215 0.749 1.253 -0.379 1.274 1.040 -0.224 1.296 0.000 0.750 0.773 0.987 0.732 0.867 1.003 1.008 +0 0.807 -0.118 1.385 1.270 -0.937 1.163 0.994 0.627 2.173 0.742 0.754 -1.190 0.000 0.330 0.446 0.302 0.000 1.389 0.796 -0.394 3.102 0.792 0.901 1.216 1.410 1.071 1.014 0.911 +0 1.369 1.925 0.721 0.634 -0.125 0.548 0.580 -1.371 0.000 0.740 1.201 -1.598 2.215 1.125 0.946 -0.253 2.548 0.408 -0.383 1.011 0.000 0.723 0.830 0.981 0.893 0.913 0.728 0.735 +0 2.508 0.626 -0.440 0.163 1.067 0.958 -0.070 1.410 1.087 0.664 0.255 0.317 2.215 0.447 1.721 1.528 0.000 0.479 2.356 -1.492 0.000 0.324 0.882 0.984 1.402 0.998 0.965 0.930 +1 0.493 -0.629 -0.194 0.345 -0.718 1.526 -0.553 -1.251 2.173 1.513 -2.145 0.550 0.000 0.795 -0.206 1.725 0.000 1.247 0.455 -0.386 0.000 1.126 0.927 0.975 1.188 1.223 0.920 0.822 +0 1.860 -0.161 -0.642 0.998 -1.204 0.504 1.528 1.289 0.000 1.045 0.121 0.951 2.215 0.355 1.412 -0.508 0.000 0.731 0.826 0.053 1.551 0.760 0.919 0.984 0.825 0.671 0.872 0.889 +1 0.476 -0.032 0.723 0.508 -0.595 0.513 0.239 -0.606 2.173 0.336 2.667 1.584 0.000 0.365 2.358 -0.938 0.000 0.646 1.194 0.765 3.102 0.652 0.849 0.987 0.925 0.692 0.711 0.830 +1 0.829 -0.029 -0.027 0.810 1.322 0.893 -0.978 1.012 2.173 0.904 0.108 -1.026 0.000 1.265 -0.550 -0.578 0.000 0.626 -1.261 -1.070 3.102 0.922 1.072 1.065 0.726 0.783 0.675 0.680 +1 0.345 2.270 0.764 0.492 -0.533 1.388 1.350 -1.639 0.000 0.889 2.735 0.526 0.000 1.005 0.153 0.430 2.548 1.081 1.006 0.545 0.000 0.780 0.717 0.979 0.686 0.914 0.602 0.539 +1 0.927 -0.744 -0.197 0.262 -1.506 0.511 -1.821 1.546 0.000 0.670 0.317 -1.538 2.215 1.150 0.281 0.244 2.548 0.572 -1.239 0.467 0.000 0.698 1.050 0.986 0.675 0.932 0.907 0.788 +0 0.361 2.091 1.046 1.170 -0.362 0.672 0.024 -1.610 0.000 0.794 -0.035 0.640 2.215 0.510 0.036 1.433 2.548 0.568 0.670 -0.414 0.000 0.914 0.903 0.990 0.738 0.444 0.665 0.672 +0 1.848 0.639 0.656 0.623 -1.653 0.655 0.057 -0.019 2.173 0.675 0.524 1.398 0.000 0.779 -2.109 -1.088 0.000 1.384 -1.005 -1.511 0.000 0.793 0.747 1.298 0.951 0.595 0.858 1.117 +0 0.886 -0.192 1.596 0.957 -1.705 0.486 -0.095 -1.289 0.000 0.662 -1.336 0.546 1.107 0.944 0.549 0.164 2.548 1.018 -0.741 -0.351 0.000 0.916 0.929 0.989 0.899 1.013 0.941 0.851 +0 1.469 -0.493 1.613 0.508 -1.050 0.713 0.625 -0.139 0.000 1.139 0.105 1.221 0.000 0.892 -0.438 -0.380 2.548 1.388 0.936 -0.898 3.102 1.093 0.889 0.985 0.812 0.849 0.750 0.735 +1 0.685 -0.890 0.054 1.076 -1.305 0.889 0.655 0.481 1.087 1.233 1.214 -1.364 2.215 0.357 1.026 0.004 0.000 0.660 1.546 1.281 0.000 0.547 0.727 1.118 1.293 1.601 1.277 1.002 +1 0.675 0.531 1.568 0.658 0.266 1.407 -1.345 1.189 0.000 1.020 -0.438 0.010 2.215 0.728 0.328 -0.415 0.000 1.733 0.613 -1.157 0.000 0.803 1.052 0.990 0.929 0.997 0.913 0.808 +0 0.343 1.638 -1.553 1.313 -0.794 1.419 0.318 0.109 2.173 0.874 1.139 1.040 2.215 0.809 0.496 -1.205 0.000 0.596 1.746 1.425 0.000 0.846 0.777 0.980 1.140 1.415 0.994 0.885 +0 2.759 -0.717 0.845 0.717 1.360 1.243 -1.454 -0.623 0.000 0.720 0.186 1.424 2.215 1.244 -0.865 -0.395 0.000 1.044 -0.304 -1.167 3.102 0.742 0.849 0.985 0.863 0.607 0.972 1.112 +1 1.054 0.201 -0.831 0.557 0.435 0.983 -0.047 1.435 1.087 0.910 -1.366 0.004 0.000 1.351 -0.018 -0.223 2.548 0.981 0.158 -1.720 0.000 0.875 0.871 0.988 0.979 1.432 0.968 0.858 +0 0.508 -0.259 -0.653 0.521 0.648 0.909 -0.359 0.138 2.173 1.000 0.241 1.471 2.215 0.988 0.403 -1.370 0.000 0.411 -2.192 -0.984 0.000 1.577 1.237 0.990 0.864 1.379 1.043 0.916 +0 0.749 -1.210 -1.109 0.680 1.308 0.801 0.935 -0.865 0.000 1.246 -1.171 0.021 2.215 1.411 -2.214 1.336 0.000 1.429 -1.601 0.689 0.000 0.952 0.955 0.990 0.975 0.861 0.906 0.860 +1 1.174 -0.384 -0.297 0.137 -0.145 2.363 1.128 -1.631 0.000 1.885 0.084 0.139 1.107 1.756 -0.930 0.403 0.000 1.685 -1.591 -0.822 0.000 1.905 1.717 1.000 0.785 0.936 1.376 1.135 +1 0.639 0.277 0.556 1.983 -0.042 0.475 1.314 -1.125 0.000 0.980 0.411 -1.320 2.215 0.659 -1.605 1.007 0.000 0.661 0.699 -1.739 0.000 1.318 0.982 0.991 1.196 0.628 0.850 0.861 +0 0.746 -0.928 -0.648 0.947 -1.706 1.091 -0.120 0.023 2.173 0.690 -0.313 1.455 0.000 0.607 -1.377 -1.218 0.000 1.024 0.183 0.856 3.102 0.913 0.795 0.986 1.068 0.786 0.827 0.762 +0 0.529 2.248 -1.281 0.669 -1.276 0.807 -0.629 0.699 0.000 1.448 1.510 -0.547 2.215 1.123 0.734 1.094 2.548 0.782 -0.154 1.611 0.000 0.929 0.916 0.973 0.856 1.446 1.303 1.105 +1 0.568 -2.042 0.675 1.082 -1.296 0.586 -0.526 -0.310 0.000 0.386 -0.933 -1.353 0.000 0.527 1.366 1.174 2.548 0.861 -0.741 0.344 3.102 0.844 1.040 1.063 0.763 0.849 1.100 0.916 +0 0.468 -1.269 1.199 0.828 -1.160 0.870 0.357 -0.229 0.000 0.621 -0.018 0.481 0.000 0.767 -1.829 1.160 0.000 1.424 -0.297 -1.203 3.102 0.979 1.046 0.983 0.629 0.716 0.727 0.684 +1 0.883 -1.111 0.012 1.921 1.456 1.481 0.146 -0.912 0.000 1.301 -0.756 -0.182 0.000 2.130 -0.712 1.507 2.548 1.191 -0.562 0.552 0.000 0.999 0.680 1.739 1.087 0.860 0.895 0.910 +0 1.558 -0.802 1.346 0.662 0.604 1.497 -0.342 0.306 0.000 1.383 0.616 -1.416 0.000 1.125 0.543 -0.580 1.274 0.670 1.490 -1.160 0.000 0.829 0.664 0.980 0.607 0.433 0.805 0.784 +1 1.809 -0.320 -1.566 0.846 1.009 1.637 -0.703 0.419 2.173 1.638 0.986 -1.128 0.000 0.861 -0.690 -0.775 0.000 0.658 0.184 1.101 3.102 0.675 1.040 1.254 0.659 0.824 0.975 0.846 +0 0.929 1.278 -1.016 0.900 1.008 0.962 0.648 1.590 2.173 1.085 0.441 -0.144 0.000 0.585 -0.718 0.903 2.548 0.735 -0.701 -0.642 0.000 0.953 0.852 1.227 0.903 0.913 0.904 0.892 +1 1.185 -2.115 0.376 0.245 -0.246 0.813 -1.042 -1.166 2.173 0.617 1.995 1.418 0.000 0.951 -0.809 0.125 0.000 1.370 -0.489 1.417 1.551 0.734 0.888 1.000 1.036 0.853 0.823 0.729 +1 1.011 0.570 0.528 0.777 1.664 0.862 0.060 0.562 2.173 0.973 1.172 -1.006 0.000 1.025 0.161 -1.216 2.548 0.405 0.189 0.038 0.000 1.022 0.694 1.049 0.759 1.172 0.864 0.780 +1 0.904 0.222 -1.270 0.105 1.392 1.670 0.427 0.698 1.087 0.714 -0.920 -1.700 0.000 1.725 0.219 -0.504 2.548 1.750 -1.701 -1.628 0.000 0.844 1.827 0.992 1.360 1.879 1.847 1.404 +1 0.957 -0.444 -1.638 1.378 -0.931 0.651 -0.112 0.674 2.173 0.477 -0.842 1.521 0.000 1.248 -0.443 -0.083 2.548 1.370 -0.813 0.358 0.000 0.915 0.803 0.986 1.068 0.740 0.831 0.748 +0 0.594 0.279 -1.625 1.771 -0.917 1.524 -0.010 1.530 2.173 0.852 0.126 0.925 0.000 1.608 1.806 0.647 0.000 3.537 -0.813 -0.512 0.000 1.208 0.860 0.984 1.211 1.499 1.013 1.001 +1 0.572 0.728 -1.670 1.083 -0.776 1.771 0.957 -0.247 0.000 1.376 -1.777 1.384 0.000 1.789 1.006 1.232 2.548 1.392 0.896 -0.749 0.000 0.861 0.615 0.989 1.137 1.650 1.491 1.248 +0 1.637 -1.729 -1.164 0.583 -0.943 0.927 -1.060 -0.584 2.173 1.142 -0.902 0.906 0.000 1.464 -0.509 0.600 2.548 1.332 -1.326 1.390 0.000 0.869 0.829 1.001 0.779 1.325 0.978 0.983 +0 0.432 0.215 0.388 0.992 -1.255 1.053 0.114 -0.370 0.000 1.460 -0.002 0.873 2.215 0.598 0.305 -1.737 2.548 1.178 -1.404 -1.404 0.000 1.585 1.092 0.991 0.962 0.725 0.973 0.864 +1 1.223 -2.041 -1.470 0.378 0.467 0.516 0.542 0.929 0.000 0.667 0.626 0.181 1.107 1.132 -0.804 0.166 0.000 1.132 -0.854 -1.009 0.000 1.016 0.900 0.988 1.644 0.805 1.057 1.238 +0 0.413 1.608 1.617 1.680 0.990 0.856 -1.138 -0.474 0.000 0.314 -0.305 0.497 0.000 0.746 0.419 -1.623 2.548 0.866 -0.175 -0.799 3.102 0.969 1.057 0.987 0.850 0.466 0.643 0.855 +1 1.769 0.418 -1.228 2.167 -1.310 1.623 -1.297 -0.016 0.000 0.449 1.276 -1.631 0.000 1.589 -0.567 1.439 2.548 1.215 -0.754 0.979 0.000 1.470 1.082 0.959 1.162 1.089 1.110 1.009 +1 0.381 -0.934 -0.282 0.599 1.164 0.932 0.765 -0.446 0.000 0.457 -0.162 -0.589 0.000 1.129 0.656 1.492 1.274 1.428 0.361 1.075 3.102 0.699 1.066 0.990 0.678 0.382 0.814 0.706 +0 1.013 0.645 -0.404 0.556 1.484 0.955 -0.003 1.294 2.173 1.126 -0.633 -0.576 1.107 0.551 -0.725 0.314 0.000 0.655 0.461 -1.087 0.000 0.794 0.856 1.031 0.940 1.597 0.951 0.767 +1 0.767 1.091 1.651 0.969 0.502 0.673 2.045 -1.729 0.000 1.523 -0.148 0.124 2.215 0.791 -1.558 1.286 0.000 1.634 0.010 -0.577 3.102 4.271 2.535 1.027 1.158 0.850 1.764 1.414 +0 0.385 1.122 0.766 2.140 0.351 0.635 1.000 1.399 2.173 0.931 1.848 -1.119 0.000 1.256 0.922 -1.006 0.000 0.482 0.718 -0.559 3.102 0.780 1.035 0.988 0.740 0.576 0.797 0.905 +1 0.932 -0.963 -0.625 0.936 0.623 1.200 -0.913 0.441 1.087 1.287 -0.418 -1.459 0.000 0.815 -1.228 -1.692 0.000 0.697 0.179 -0.812 1.551 0.816 0.717 1.168 0.855 1.050 0.998 0.873 +0 0.847 -0.443 0.433 0.864 -1.357 0.468 -1.590 1.458 0.000 0.549 1.268 -0.121 2.215 0.633 -1.122 -1.097 0.000 0.630 -0.249 -0.960 3.102 0.751 0.595 1.184 0.957 0.597 0.857 0.754 +0 0.675 -1.974 0.779 2.568 0.053 0.632 -0.248 -1.670 2.173 0.537 1.365 -0.663 0.000 0.662 -1.191 -1.728 2.548 0.913 0.806 -1.489 0.000 0.650 0.884 1.110 0.998 0.449 1.124 1.526 +0 0.677 -1.548 -1.146 0.709 0.861 0.491 -1.646 -0.412 2.173 0.932 -0.740 0.888 2.215 0.496 0.743 -1.152 0.000 0.565 -0.695 1.679 0.000 0.855 0.837 0.987 0.679 1.027 0.696 0.612 +0 1.210 -0.953 0.543 0.969 -0.087 0.997 -0.552 0.820 2.173 1.342 -1.199 -1.598 2.215 1.502 -0.674 -0.994 0.000 1.071 -1.103 -0.304 0.000 0.914 1.032 0.980 0.778 1.514 1.053 0.970 +1 1.176 -0.064 0.093 0.749 -1.443 0.802 -1.073 -1.533 0.000 0.679 -0.115 -1.672 2.215 0.568 1.043 -0.156 2.548 0.394 2.007 -0.025 0.000 1.095 0.828 1.278 0.754 0.785 0.791 0.753 +0 0.873 0.939 -0.974 0.887 -1.246 0.467 0.859 -0.483 2.173 1.190 0.556 0.843 0.000 0.403 0.314 0.024 2.548 1.132 -0.759 1.474 0.000 1.473 0.908 0.981 0.681 0.278 0.751 0.762 +1 1.297 1.167 -0.085 1.130 -0.562 1.288 0.345 1.358 2.173 0.605 1.197 -0.947 0.000 0.900 1.041 0.567 0.000 0.442 0.747 -0.548 3.102 0.847 0.930 0.982 0.485 0.820 1.081 0.872 +0 0.541 -0.261 0.984 0.284 1.303 0.853 0.475 -0.641 2.173 0.677 1.057 1.026 0.000 0.738 1.215 0.765 2.548 0.566 -0.147 -1.361 0.000 0.867 0.992 0.978 1.256 1.040 1.114 0.956 +0 1.096 0.912 -0.939 1.643 -0.367 1.055 -0.577 1.088 0.000 0.657 0.714 -1.591 2.215 0.712 0.835 0.773 0.000 0.792 -0.991 -0.582 3.102 0.956 1.008 0.985 0.922 0.891 0.763 0.928 +0 1.075 -2.015 -0.381 1.527 -0.759 0.657 0.589 1.185 1.087 0.719 -0.626 0.872 0.000 0.317 -0.341 1.554 0.000 0.432 -1.313 0.824 3.102 0.434 0.603 0.991 0.657 0.762 1.136 0.905 +1 2.221 -2.021 1.376 0.428 -0.882 0.648 -0.493 -0.630 2.173 0.818 -2.141 -0.095 0.000 0.764 -1.251 0.628 2.548 0.807 -1.540 -1.461 0.000 1.016 1.087 1.209 0.816 0.888 0.952 0.855 +0 1.203 -1.061 0.774 0.600 1.731 0.379 -0.515 0.369 1.087 0.510 0.993 1.043 0.000 1.041 -0.054 -0.907 2.548 1.134 1.021 -1.052 0.000 0.946 0.940 0.985 0.897 0.737 0.698 0.784 +1 1.489 -1.282 -0.121 0.714 -0.748 0.883 -0.242 0.822 2.173 0.622 -1.083 -1.577 2.215 1.034 -0.843 1.213 0.000 0.723 -0.774 -1.156 0.000 0.806 0.840 0.995 0.834 1.029 0.877 0.756 +1 0.664 -1.265 1.159 0.892 -1.532 1.205 -0.434 -1.470 2.173 2.423 0.616 0.297 0.000 1.348 1.300 1.308 2.548 1.169 0.530 -0.512 0.000 0.725 0.887 0.977 1.259 1.950 1.780 1.546 +0 1.127 -0.113 0.938 0.363 0.945 0.987 -0.891 -0.693 2.173 0.705 -0.734 1.579 0.000 0.373 -1.144 -0.395 0.000 1.306 -0.720 0.493 3.102 0.898 0.921 0.985 0.813 1.054 0.996 0.826 +0 0.876 -0.480 -1.407 0.419 1.075 1.880 1.646 1.672 0.000 2.026 -0.998 -0.076 0.000 0.758 1.140 0.742 2.548 1.493 -0.429 0.331 3.102 1.013 0.833 0.992 0.763 0.867 1.056 0.976 +0 0.428 -0.097 -1.504 0.850 0.201 0.977 1.417 0.578 2.173 0.434 0.007 -0.959 0.000 1.198 -1.237 -0.528 2.548 0.389 -0.396 1.533 0.000 0.438 1.067 0.990 1.160 2.793 1.314 0.991 +1 0.529 -1.463 -0.067 1.024 1.616 0.476 -2.255 0.566 0.000 1.429 -0.271 -0.994 2.215 0.742 -0.243 0.199 2.548 0.942 2.407 1.356 0.000 0.711 0.824 1.018 1.083 0.963 0.995 0.854 +1 1.532 0.109 -0.312 0.712 1.355 0.810 -1.470 1.470 2.173 1.141 -1.268 -0.592 0.000 0.605 0.119 0.272 0.000 0.545 0.284 1.174 3.102 1.040 0.859 1.444 1.431 0.757 0.915 0.879 +1 0.879 -0.125 1.452 1.540 -0.019 0.712 1.124 -1.717 0.000 0.997 -1.006 -0.860 0.000 1.370 0.200 -0.243 2.548 0.968 0.020 0.989 0.000 0.911 0.990 1.563 0.820 0.502 0.646 0.693 +1 0.762 0.161 -0.436 0.600 0.846 0.931 -1.238 -0.524 0.000 1.280 -0.502 1.093 2.215 1.407 -0.050 -1.661 0.000 1.132 -1.167 0.179 0.000 0.936 1.173 0.990 0.825 1.014 1.037 0.856 +1 1.280 0.360 -0.905 1.982 -1.294 1.508 1.474 0.401 0.000 1.123 -0.007 1.260 2.215 0.488 1.204 -1.315 2.548 0.730 0.442 0.414 0.000 0.798 0.918 0.988 1.170 0.804 0.994 1.234 +1 1.933 -0.432 0.170 0.753 -0.480 1.187 -0.420 -1.379 2.173 1.389 -0.477 1.435 1.107 0.724 -1.118 -1.502 0.000 1.249 0.343 0.194 0.000 1.336 1.101 0.984 1.380 1.077 1.149 0.993 +1 1.605 -1.031 0.505 0.629 0.458 0.639 0.150 -1.654 1.087 0.678 -0.218 -1.014 0.000 1.116 -0.682 -0.883 2.548 1.120 0.104 0.939 0.000 1.132 0.880 0.979 1.108 0.833 0.888 0.795 +1 1.527 -2.084 -1.126 0.474 1.595 0.494 -1.911 0.207 0.000 0.757 -0.308 0.442 0.000 1.006 -1.275 1.465 0.000 0.710 -0.449 -0.343 3.102 1.153 0.770 0.987 0.664 0.435 0.556 0.624 +1 1.382 0.293 -1.403 0.937 0.834 0.657 -0.098 0.246 2.173 0.923 0.761 0.719 2.215 0.286 -0.683 -0.843 0.000 0.519 1.213 -0.563 0.000 0.579 0.717 1.422 1.030 0.704 0.789 0.668 +0 1.161 -0.071 -0.808 2.102 -1.214 1.242 -1.930 0.629 0.000 0.933 0.192 1.532 0.000 1.547 0.553 -0.253 2.548 1.168 1.205 0.673 3.102 1.514 1.051 0.996 1.018 0.881 0.944 0.991 +1 1.036 -0.229 1.722 0.800 -0.127 1.059 -0.066 1.238 2.173 1.570 -0.265 -0.567 0.000 0.600 0.606 0.671 0.000 0.596 -0.523 0.457 3.102 1.521 0.926 1.256 0.953 0.595 0.928 0.817 +1 0.902 -0.805 -1.174 0.160 -0.660 0.899 -0.505 -1.556 2.173 0.559 0.863 -0.295 0.000 1.310 0.389 0.203 2.548 1.771 -1.283 -1.520 0.000 0.951 0.918 0.989 0.716 1.503 1.260 1.064 +1 1.028 -0.108 -0.566 0.278 -0.387 1.293 0.840 -0.862 2.173 1.516 0.291 0.369 0.000 2.391 0.338 1.631 1.274 0.714 1.197 1.039 0.000 1.063 1.599 0.989 1.324 1.788 1.536 1.276 +1 0.503 2.280 0.135 2.093 0.688 0.440 1.226 -0.608 1.087 0.302 2.283 -1.362 0.000 0.842 1.077 1.579 0.000 0.947 -0.294 -1.358 3.102 0.933 0.775 1.000 0.892 0.738 0.901 0.787 +1 1.001 -0.717 -1.244 0.461 -0.004 0.838 0.150 0.451 2.173 0.490 -1.210 1.469 0.000 0.849 0.017 -1.364 0.000 0.588 0.198 -1.702 0.000 0.865 1.085 0.988 0.920 0.809 0.859 0.762 +0 0.421 1.109 -1.720 0.612 -0.693 0.525 -1.433 1.046 2.173 0.525 -1.608 -0.742 0.000 0.826 1.061 1.029 1.274 1.137 -0.689 -0.165 0.000 0.658 0.834 0.994 0.799 1.402 0.985 0.848 +1 0.696 -0.207 1.712 0.642 1.307 0.897 -2.334 -0.200 0.000 1.147 0.290 0.983 2.215 1.291 0.130 -0.798 0.000 1.611 0.731 -1.482 3.102 0.821 0.881 0.982 0.719 1.041 0.699 0.640 +0 0.352 0.510 -1.026 1.753 1.285 0.732 0.148 -0.141 2.173 0.746 -0.080 -0.844 2.215 1.124 0.916 1.092 0.000 0.471 1.064 -0.496 0.000 0.802 0.934 0.984 0.974 0.656 0.855 0.751 +1 0.602 -1.540 0.219 0.247 -0.972 1.735 -1.302 0.422 2.173 1.171 -0.820 -1.633 0.000 0.957 -1.241 -1.008 0.000 0.550 -0.070 1.415 3.102 0.966 0.689 0.990 1.008 1.053 1.114 0.913 +1 0.793 1.167 -0.754 0.871 0.495 0.698 -0.540 -1.732 0.000 1.072 1.082 0.178 0.000 1.384 0.930 1.518 2.548 1.080 1.298 -0.241 0.000 0.604 0.993 1.040 0.862 0.761 0.840 0.732 +1 2.264 -0.102 -0.530 1.173 -1.559 0.746 -0.195 0.654 2.173 0.534 1.295 0.905 2.215 0.550 -0.236 1.638 0.000 0.600 1.133 -1.001 0.000 0.720 0.863 1.805 1.338 0.809 1.065 0.857 +1 1.971 -1.403 -0.427 0.870 -0.710 0.744 -1.138 1.454 0.000 0.651 -0.848 0.885 2.215 0.593 -1.311 1.705 2.548 0.449 0.124 1.026 0.000 0.698 0.536 0.995 0.821 0.483 0.735 0.739 +0 0.407 1.071 -0.437 2.735 -0.183 1.358 -0.042 -1.633 1.087 0.667 -0.888 1.308 0.000 0.777 0.553 1.337 0.000 1.396 -0.761 0.252 3.102 0.923 0.942 0.988 2.853 1.587 2.253 1.873 +1 0.792 -0.440 1.480 0.889 0.278 0.665 1.189 -0.391 1.087 0.526 -0.718 -1.473 1.107 0.689 -1.261 0.672 0.000 0.818 1.367 -1.182 0.000 1.969 1.233 1.027 1.119 1.218 1.020 0.901 +1 0.403 -1.974 -0.535 0.365 -0.793 1.021 -1.137 1.394 2.173 0.924 -0.156 -1.062 2.215 1.332 -0.932 -0.065 0.000 0.481 0.395 0.793 0.000 0.952 0.986 0.978 0.980 1.359 0.951 0.802 +1 0.708 -0.678 -0.475 1.088 1.546 0.677 -1.424 1.716 2.173 0.755 -0.036 0.576 2.215 1.093 -0.202 -0.044 0.000 1.324 -0.121 -1.126 0.000 1.099 0.909 1.178 0.787 1.204 0.885 0.779 +1 0.716 0.384 -1.463 0.910 -0.178 0.855 -0.307 1.305 2.173 0.447 0.630 -0.456 0.000 0.746 -0.505 -0.760 0.000 1.146 0.573 0.736 1.551 0.614 1.029 1.025 0.704 0.749 0.725 0.676 +1 0.870 -0.355 -1.737 1.753 -1.128 0.753 2.075 1.196 0.000 1.360 -1.039 0.427 2.215 0.749 0.914 0.060 0.000 1.135 -0.704 -0.220 0.000 0.858 0.872 0.987 0.540 0.845 0.874 0.735 +0 0.515 0.358 1.191 0.697 0.412 0.958 0.213 0.583 1.087 1.166 1.676 -0.754 0.000 0.736 0.564 -0.968 0.000 1.227 1.194 1.731 1.551 0.870 0.840 0.991 0.791 1.234 1.053 1.098 +1 0.395 -1.107 1.010 1.202 -0.101 0.770 -1.068 -0.687 0.000 0.919 0.665 1.366 2.215 1.340 0.167 -1.538 2.548 1.043 -0.229 0.228 0.000 1.153 1.219 0.987 0.967 0.661 1.007 0.877 +1 2.351 0.281 -0.077 0.436 0.494 0.883 0.257 1.178 1.087 0.806 1.256 -0.558 0.000 1.343 0.389 -1.677 0.000 0.703 0.977 0.817 0.000 0.929 0.967 0.984 1.178 1.026 0.950 0.870 +1 0.934 0.895 1.092 0.309 -1.536 0.826 0.822 -0.252 0.000 1.079 0.044 1.121 2.215 0.965 0.424 -1.370 2.548 0.522 1.745 -0.746 0.000 0.803 0.881 0.985 0.963 0.878 0.883 0.824 +1 0.505 -2.290 -1.683 1.270 0.423 0.606 -2.375 -0.850 0.000 0.409 -1.894 -1.080 0.000 1.359 -0.211 1.112 2.548 0.767 -1.975 0.509 0.000 0.980 0.672 1.050 1.341 0.791 0.931 0.870 +1 0.620 1.533 1.443 0.961 -0.090 1.542 -1.041 -1.611 0.000 0.903 1.421 1.006 0.000 2.225 0.541 -0.440 2.548 1.512 0.329 0.366 3.102 0.822 0.877 1.050 1.004 0.941 0.932 0.808 +1 1.229 0.345 -0.481 0.613 -1.691 0.907 0.630 1.118 2.173 1.004 -0.356 -1.131 2.215 1.712 0.813 0.048 0.000 1.573 1.698 1.297 0.000 1.967 1.493 1.067 0.746 1.459 1.346 1.111 +1 1.748 0.020 -0.467 1.432 -1.328 1.175 -0.281 1.065 2.173 0.670 -1.150 1.641 0.000 0.448 -0.275 0.625 0.000 0.508 -0.744 -1.009 3.102 0.768 0.769 1.533 0.745 0.822 0.991 0.846 +0 0.647 0.768 0.969 1.633 0.469 1.752 0.531 0.690 0.000 0.802 1.808 0.910 0.000 2.705 -0.469 -1.247 2.548 3.334 -0.735 -0.848 3.102 1.859 3.034 0.990 1.562 0.914 2.357 1.825 +0 0.404 -0.198 -0.839 1.507 1.275 0.533 0.024 -0.455 2.173 0.778 -0.837 0.368 2.215 0.833 -0.766 1.730 0.000 0.754 -1.146 -0.490 0.000 0.829 0.807 1.021 0.823 0.773 0.708 0.666 +0 3.288 1.124 -1.203 0.257 -0.304 1.738 0.389 0.381 1.087 0.492 -0.131 0.039 0.000 0.823 -0.271 1.211 0.000 1.106 0.224 -1.722 3.102 0.853 0.985 0.988 0.795 1.394 1.392 1.156 +1 0.647 -0.415 -1.729 1.521 -0.576 0.785 -0.185 0.496 2.173 0.807 -1.535 1.667 2.215 0.827 0.286 -0.778 0.000 0.737 -1.555 0.550 0.000 1.391 1.093 1.185 1.014 1.348 0.938 0.861 +1 0.435 0.675 0.884 0.872 -0.188 0.499 0.407 -0.129 0.000 0.475 0.901 1.713 2.215 1.140 0.902 -1.383 2.548 0.739 1.260 1.045 0.000 0.970 0.894 0.990 0.777 0.270 0.686 0.665 +1 0.543 0.339 -0.448 1.041 -1.225 0.406 -0.494 -1.530 0.000 0.379 0.720 0.664 2.215 0.748 -0.776 0.213 0.000 0.917 -1.017 0.686 3.102 1.006 0.776 0.987 0.763 0.610 0.628 0.590 +1 0.645 1.238 1.271 0.650 0.202 0.899 0.922 -0.624 2.173 0.905 0.352 0.974 0.000 1.798 0.353 1.645 0.000 1.388 -0.374 -0.347 3.102 1.114 1.282 0.992 0.911 0.929 1.080 0.894 +0 1.504 1.813 1.651 0.977 -0.922 0.675 0.369 -0.324 0.000 1.203 1.246 0.318 0.000 0.737 0.991 -1.451 0.000 0.820 0.218 0.782 1.551 1.173 0.849 1.232 0.885 0.194 0.718 0.760 +1 0.449 -0.835 -0.988 1.179 -0.174 1.605 -0.745 1.189 0.000 1.212 1.049 -0.438 2.215 1.018 0.565 -0.787 0.000 0.735 -1.206 0.892 0.000 0.872 0.894 0.994 1.626 0.686 1.592 1.229 +1 1.278 -1.050 -1.423 1.866 -1.037 0.608 0.045 0.198 2.173 1.119 -0.433 1.031 2.215 0.432 -0.169 -0.696 0.000 0.811 0.497 0.748 0.000 0.682 0.720 0.996 1.474 0.880 1.213 1.000 +1 1.317 0.291 -0.656 1.144 -0.130 0.850 -0.117 1.182 1.087 0.852 1.104 1.554 2.215 0.381 1.370 -0.210 0.000 0.464 1.691 0.321 0.000 0.459 0.844 0.987 1.079 0.933 1.012 0.789 +1 1.113 1.527 1.484 0.505 -0.585 0.695 -0.094 -0.666 2.173 0.390 -0.226 -0.082 0.000 1.022 -0.104 0.977 2.548 0.822 0.735 0.890 0.000 0.713 0.766 0.995 0.952 1.046 0.901 0.744 +1 0.583 0.917 0.940 1.274 -1.702 1.024 -0.073 0.110 2.173 0.718 -0.368 -0.791 2.215 0.797 0.362 0.858 0.000 1.026 0.265 -1.297 0.000 0.931 1.020 0.990 0.846 0.936 0.867 0.754 +0 0.873 -1.649 0.437 0.731 1.410 1.224 -0.545 -0.187 1.087 0.536 1.192 -1.452 0.000 0.987 0.717 -0.770 0.000 0.484 -1.980 1.100 0.000 0.689 0.855 0.992 1.096 0.801 0.936 0.981 +1 0.783 -0.201 0.450 0.614 0.187 1.944 -0.076 -1.253 2.173 1.710 -0.816 0.652 1.107 1.110 -2.328 -0.103 0.000 1.087 -1.467 1.293 0.000 0.898 1.439 0.982 1.587 2.854 1.840 1.648 +0 1.517 -0.242 -0.299 0.723 -0.159 0.855 -0.660 1.607 2.173 0.875 -0.081 0.852 2.215 0.395 -1.363 -1.150 0.000 0.454 -0.804 -1.513 0.000 0.196 0.674 0.986 1.220 0.885 0.944 0.729 +0 1.311 0.108 -1.666 0.365 -0.507 0.821 -0.182 -0.349 2.173 1.114 -0.454 0.999 2.215 0.759 2.213 -1.587 0.000 1.009 -1.002 0.467 0.000 0.846 1.506 0.986 0.854 1.334 1.317 1.106 +1 0.619 1.689 -1.406 1.007 -0.411 0.710 1.836 0.233 0.000 0.985 -0.012 -1.431 2.215 1.787 0.102 1.299 0.000 1.612 1.160 0.830 0.000 0.919 0.851 0.985 1.467 0.686 0.993 0.986 +1 0.763 0.739 0.345 0.712 1.100 0.965 -0.157 -1.258 2.173 0.581 0.568 -0.852 0.000 0.969 0.235 0.858 0.000 1.094 -1.311 -1.039 3.102 0.887 0.983 0.987 1.040 0.863 0.863 0.775 +0 1.394 -1.038 -1.604 1.092 1.128 0.676 -0.637 0.517 2.173 0.368 -1.611 -1.475 0.000 1.282 0.085 -0.473 2.548 0.642 -1.286 -0.273 0.000 0.560 0.839 1.074 0.953 1.003 0.941 0.777 +0 0.905 0.948 0.036 1.082 -1.163 0.841 0.468 1.122 1.087 0.691 -0.403 -0.040 0.000 0.565 0.362 0.596 0.000 0.700 1.814 1.603 0.000 0.890 0.962 1.209 1.059 1.130 0.833 0.749 +0 0.775 0.464 1.354 0.991 0.188 1.498 0.949 0.419 2.173 2.314 0.365 -1.302 0.000 0.845 0.306 -1.571 2.548 0.818 -0.172 -0.343 0.000 1.463 0.865 1.053 0.858 1.437 1.347 1.089 +0 0.526 -0.687 1.554 2.766 -0.764 0.881 1.487 0.927 0.000 0.987 -0.148 0.712 2.215 0.849 0.786 -1.491 2.548 0.877 -2.044 1.140 0.000 1.811 1.231 1.452 1.360 1.029 1.061 1.363 +1 0.991 0.004 0.705 1.195 -0.739 0.639 -0.749 1.691 2.173 0.743 -2.249 1.451 0.000 1.105 0.962 0.385 0.000 0.877 0.302 -1.044 3.102 3.797 2.172 1.453 1.040 0.680 1.352 1.172 +0 0.363 -1.732 -0.789 2.071 1.102 0.662 -1.157 0.205 0.000 0.644 -1.213 0.958 2.215 0.957 -0.371 -0.962 0.000 0.859 1.400 -0.255 0.000 1.363 1.012 1.191 1.051 0.642 0.720 0.817 +0 0.529 -0.252 -0.961 1.650 -0.564 0.924 -1.192 0.098 2.173 1.720 -0.150 1.353 0.000 0.968 1.435 1.724 0.000 1.087 0.326 -0.767 3.102 0.968 0.895 0.981 1.609 1.198 1.387 1.186 +1 1.227 0.492 0.443 1.480 -1.238 1.484 0.400 -1.114 2.173 0.804 2.285 0.043 0.000 0.369 0.330 0.814 2.548 1.282 -0.980 1.703 0.000 0.624 0.651 1.863 1.317 0.909 1.106 1.059 +1 0.604 -1.295 0.069 0.793 0.783 0.758 -0.485 -1.104 2.173 0.747 -0.168 0.700 2.215 1.424 0.157 -0.467 0.000 0.375 1.645 -1.681 0.000 1.096 1.011 0.986 1.305 1.120 1.051 1.202 +0 1.566 -0.173 0.736 1.850 -1.315 0.733 0.444 -1.518 2.173 1.041 0.453 0.138 0.000 0.877 -1.071 0.416 0.000 1.936 1.121 -1.301 0.000 1.063 0.904 2.268 1.357 0.888 0.998 0.979 +0 0.397 -0.803 0.027 1.017 -0.058 0.490 -0.044 1.621 0.000 0.676 -0.539 -1.050 2.215 1.301 1.268 -0.577 2.548 0.706 -0.213 0.803 0.000 0.887 0.936 0.987 2.764 1.204 1.754 1.438 +1 0.606 -1.310 1.567 0.917 -0.246 1.486 2.334 0.093 0.000 1.601 -0.851 -1.301 2.215 1.417 -0.446 1.714 1.274 1.524 -0.723 0.884 0.000 0.765 0.874 1.031 0.920 0.728 0.821 0.765 +0 1.559 -0.187 -1.727 0.333 1.738 0.647 -0.253 -0.359 1.087 1.119 0.421 0.512 0.000 0.977 -0.301 0.921 2.548 1.915 0.972 -0.799 0.000 1.273 1.059 0.984 0.767 0.906 0.774 0.776 +1 0.624 -0.479 0.230 1.307 -0.850 1.333 -0.620 -0.358 0.000 1.790 0.135 1.596 2.215 1.533 0.116 0.931 2.548 0.649 -0.872 1.013 0.000 0.961 1.431 1.034 1.245 0.992 1.318 1.092 +0 0.915 1.588 -1.124 1.047 -1.461 1.061 1.106 1.244 2.173 0.591 0.995 -0.688 0.000 0.991 1.064 0.223 1.274 1.342 2.022 -0.074 0.000 1.073 0.808 0.975 1.025 1.018 0.909 0.893 +0 0.368 1.543 0.216 1.509 -1.572 0.305 -0.777 1.080 2.173 0.627 -0.336 -1.081 2.215 0.478 0.754 -0.453 0.000 1.119 0.413 0.381 0.000 0.567 0.713 1.031 1.098 0.615 0.873 0.746 +0 1.269 -0.307 -1.573 0.426 0.500 0.770 -0.155 0.422 0.000 0.779 -1.450 -1.536 2.215 0.634 -1.304 0.235 0.000 0.949 -0.294 -0.613 3.102 0.883 0.769 0.987 0.776 0.739 0.773 0.726 +1 1.984 0.777 0.494 0.578 -0.956 0.469 1.264 1.164 2.173 0.553 1.389 -1.713 1.107 0.643 -1.235 -1.324 0.000 1.435 0.705 -1.168 0.000 0.762 0.995 1.431 0.950 0.394 0.721 0.794 +1 0.284 1.928 0.052 1.889 1.004 0.906 -2.170 -0.222 0.000 1.468 0.631 -0.971 2.215 2.292 0.130 0.430 0.000 0.993 -0.086 -1.440 3.102 4.024 2.385 0.996 1.299 0.614 1.978 1.840 +1 1.517 -0.218 -1.492 0.985 1.528 0.904 -0.639 0.343 2.173 0.665 1.220 0.307 0.000 0.414 -1.732 1.467 0.000 0.939 -0.335 -0.995 0.000 0.899 1.009 0.996 0.675 0.647 0.869 0.818 +0 2.636 0.295 -1.045 2.473 -0.724 1.982 -0.327 0.751 2.173 0.626 -0.162 0.125 0.000 0.770 -0.139 1.366 2.548 0.708 -1.922 1.675 0.000 1.405 0.983 1.012 2.683 0.821 1.703 1.481 +1 2.015 -0.460 0.973 1.126 0.680 0.796 -0.749 -1.239 2.173 1.262 -0.171 -0.298 2.215 0.888 1.153 -0.822 0.000 0.961 0.314 1.730 0.000 0.893 1.097 0.989 1.338 1.189 1.135 1.064 +0 0.791 0.797 0.013 0.266 -1.042 0.850 -1.778 0.691 0.000 0.597 2.123 -1.468 0.000 1.134 1.726 -0.248 0.000 1.332 0.934 -1.221 3.102 0.987 0.879 0.986 0.694 0.961 0.726 0.664 +0 0.803 2.166 0.242 0.871 1.314 0.741 1.405 -0.247 2.173 0.563 0.229 1.150 0.000 0.322 0.471 1.535 0.000 0.595 -1.646 -0.634 0.000 0.892 0.604 0.986 0.951 1.079 0.959 1.223 +1 0.585 -1.843 -0.910 1.232 0.606 1.188 -2.283 0.372 0.000 1.033 0.340 -1.432 1.107 0.444 -1.106 0.079 2.548 0.824 -1.012 -1.327 0.000 1.740 0.989 1.152 1.597 0.940 1.418 1.171 +0 1.398 0.743 -0.171 0.585 -0.429 0.568 -0.732 1.420 2.173 0.422 -1.919 -1.366 0.000 0.527 -1.652 0.132 0.000 1.077 0.459 1.389 3.102 0.708 1.082 0.984 1.450 0.572 0.984 1.165 +0 0.534 0.725 0.560 2.122 0.940 2.734 0.648 -0.848 2.173 1.059 0.826 -0.460 0.000 4.288 0.213 1.023 2.548 0.435 0.136 -1.249 0.000 0.655 0.788 0.991 1.708 4.332 2.398 1.795 +0 1.160 0.434 -1.459 1.680 1.470 1.069 1.285 -0.135 1.087 0.818 2.849 1.073 0.000 0.596 0.143 -1.021 0.000 0.562 0.095 0.862 0.000 0.633 0.928 0.989 0.818 0.561 1.052 0.814 +0 0.879 -0.837 -0.121 0.823 -0.525 1.227 -0.833 -1.071 0.000 1.174 1.004 0.787 0.000 1.610 0.220 0.692 2.548 1.789 -0.932 -1.623 0.000 1.124 0.822 0.992 0.953 0.537 1.057 0.960 +1 0.324 -0.596 0.920 1.207 -1.088 1.154 -0.055 1.276 2.173 0.967 1.614 0.190 0.000 0.644 0.821 -0.513 2.548 0.729 -0.005 -0.497 0.000 1.202 0.741 0.989 1.312 1.199 1.059 1.265 +0 1.546 -0.825 -0.568 2.154 -0.029 1.267 -0.241 1.358 0.000 1.672 0.028 -1.608 2.215 0.714 0.768 1.528 2.548 1.949 -0.346 0.243 0.000 1.431 1.132 1.182 1.821 0.605 1.315 1.237 +0 0.481 -1.166 -1.721 1.030 1.628 0.954 1.204 0.594 0.000 0.891 0.615 -1.120 2.215 0.734 1.173 -0.541 2.548 0.476 -0.122 0.081 0.000 0.885 0.819 0.976 2.095 0.515 1.792 1.841 +1 0.433 -0.259 1.216 0.952 0.120 0.886 1.038 -0.364 0.000 0.807 0.860 -0.943 0.000 1.632 1.090 1.035 2.548 1.040 -0.115 -1.604 3.102 0.907 1.008 0.991 0.785 0.993 0.963 0.824 +0 1.067 -1.079 0.736 0.948 1.289 0.942 -0.975 -1.187 0.000 2.344 -0.818 -0.591 0.000 1.712 0.088 0.674 2.548 2.121 -1.799 1.478 0.000 1.634 1.648 0.990 0.760 0.560 1.403 1.224 +1 0.369 0.660 1.732 0.098 -0.648 2.617 1.111 0.657 0.000 2.247 -0.542 -1.032 0.000 1.207 -1.331 1.595 1.274 2.166 -0.944 -0.633 3.102 1.625 1.095 0.838 0.654 1.138 0.939 0.792 +0 0.589 -0.595 0.733 1.041 0.104 0.833 -1.210 1.452 2.173 0.751 -1.220 -0.814 0.000 0.924 -0.510 -0.467 2.548 0.425 -1.480 1.725 0.000 0.588 0.801 0.986 0.745 1.140 0.922 0.825 +0 0.999 -1.420 0.534 1.497 0.946 0.598 -1.028 -0.054 0.000 0.760 -0.382 -1.221 2.215 0.615 -0.411 -0.659 0.000 1.450 0.184 -1.487 1.551 0.652 0.955 0.994 1.350 0.371 1.216 1.028 +0 0.531 -0.441 -0.710 2.335 0.111 2.794 0.285 -1.611 2.173 2.270 0.258 0.288 0.000 0.526 0.987 1.388 1.274 0.747 -0.531 -1.332 0.000 1.863 1.252 1.041 2.405 0.889 1.601 1.500 +1 0.754 -1.222 -0.374 0.476 -1.187 0.820 -0.084 0.025 0.000 1.207 -0.880 1.238 2.215 0.769 -0.809 -1.227 0.000 0.994 0.279 -1.723 3.102 0.899 0.851 0.990 1.125 0.794 0.931 0.816 +0 0.639 -0.527 1.259 0.868 0.950 0.595 -0.285 -1.492 2.173 1.137 1.394 -0.539 2.215 0.780 2.084 -0.248 0.000 0.680 1.990 0.314 0.000 0.392 0.628 0.986 0.826 1.492 1.032 0.990 +1 2.215 0.371 0.053 0.907 -0.404 1.580 0.454 1.648 0.000 0.498 -0.433 0.813 1.107 0.320 0.168 -0.892 0.000 0.579 0.777 -0.967 3.102 0.978 0.952 0.998 0.648 0.604 0.629 0.893 +0 0.645 0.501 -1.263 0.470 -0.245 1.073 1.419 -1.669 0.000 1.529 0.139 0.169 1.107 0.928 -0.721 0.683 0.000 0.947 -0.304 -1.067 3.102 2.875 1.727 0.984 1.111 1.014 1.355 1.131 +1 1.935 0.093 0.397 1.395 0.186 1.188 -0.360 -1.446 2.173 0.370 -1.038 -1.369 0.000 0.617 -2.203 -1.591 0.000 0.463 0.538 -0.421 0.000 0.661 0.669 0.977 0.538 0.739 1.098 0.874 +0 1.827 0.076 1.436 1.031 -0.468 1.050 0.140 0.054 0.000 0.560 -0.822 -0.593 0.000 0.398 -0.633 -0.107 2.548 0.604 -0.070 -1.111 3.102 1.226 0.823 1.881 0.960 0.317 0.625 0.729 +1 0.830 -1.064 -0.818 0.439 1.202 0.905 -1.727 1.367 0.000 1.443 -1.642 -0.299 2.215 0.949 -1.426 -1.509 0.000 1.170 -1.171 0.641 3.102 0.882 0.839 0.986 0.811 0.894 0.942 0.786 +0 0.660 -1.986 -1.308 1.060 0.162 1.050 -0.837 0.965 2.173 1.470 -1.975 -0.168 0.000 2.136 -0.449 1.586 2.548 3.257 -1.096 -0.812 0.000 1.878 2.170 1.124 1.406 1.043 1.692 1.366 +1 1.161 -0.814 1.692 2.167 -1.197 0.816 0.357 0.087 1.087 0.938 -0.816 0.828 2.215 0.894 -1.494 0.382 0.000 0.613 -0.641 -1.181 0.000 0.885 1.154 1.128 1.179 1.143 1.195 0.999 +1 0.577 1.015 -1.222 0.825 -0.058 1.370 1.053 -0.489 2.173 0.998 0.490 1.029 0.000 1.455 -1.684 1.472 0.000 1.267 1.072 1.499 0.000 0.842 0.673 0.984 0.828 1.003 0.987 0.860 +0 0.577 -0.263 -1.424 0.794 0.305 0.809 -1.081 1.041 2.173 1.392 0.387 -0.881 2.215 0.584 -0.307 1.352 0.000 0.878 0.700 0.073 0.000 0.874 0.962 0.984 0.756 1.999 1.031 0.854 +1 2.383 -0.322 -0.920 0.528 -0.697 1.099 1.574 1.009 2.173 0.769 -0.772 -0.922 0.000 1.151 -0.693 0.826 0.000 0.447 0.822 1.344 1.551 1.443 0.962 0.978 2.062 0.316 1.310 1.279 +0 0.739 1.142 1.284 0.945 0.128 0.527 0.726 -0.150 2.173 0.823 2.255 -1.497 0.000 1.057 0.780 0.843 2.548 0.946 -0.033 -0.668 0.000 0.859 0.851 0.999 0.668 0.727 0.591 0.574 +1 0.608 0.729 1.648 1.127 -0.947 0.937 1.175 1.427 2.173 0.506 -0.311 -0.507 1.107 0.420 2.247 -0.371 0.000 0.936 0.722 0.217 0.000 0.708 0.980 0.983 0.861 1.303 0.826 0.756 +1 0.687 0.285 -0.667 0.233 0.478 0.733 0.154 1.666 0.000 1.403 -0.573 1.026 2.215 1.546 -0.861 -0.568 1.274 1.071 -0.625 -1.294 0.000 0.885 1.067 0.987 1.266 1.576 1.197 1.047 +0 1.546 -1.760 -0.550 1.508 -0.105 1.498 -1.324 1.555 0.000 0.225 -1.402 1.027 0.000 0.547 -0.005 0.184 2.548 1.135 -0.254 1.528 0.000 1.062 0.783 0.989 0.490 0.252 0.525 0.638 +0 0.499 -0.452 -1.221 0.313 0.282 1.121 0.623 -0.851 2.173 1.631 -0.668 0.860 2.215 0.607 -0.407 -0.592 0.000 0.517 0.080 1.407 0.000 0.625 0.866 0.990 0.782 2.436 1.198 0.904 +0 1.145 0.971 0.265 0.506 -0.732 0.849 1.028 1.009 2.173 1.129 -0.613 -1.529 2.215 0.776 -0.653 -0.383 0.000 0.796 -1.168 -0.643 0.000 0.364 0.798 0.990 0.882 1.730 1.115 0.991 +0 2.109 0.829 -0.232 0.609 1.696 1.414 0.064 0.022 0.000 1.270 0.706 -1.580 0.000 1.581 -0.256 1.576 2.548 1.337 1.008 1.549 3.102 0.861 1.270 1.549 1.419 0.908 1.047 1.024 +0 1.140 0.551 -0.655 1.880 -1.215 0.953 0.253 -0.513 0.000 1.284 -2.202 1.102 0.000 1.912 -0.345 0.436 2.548 1.244 0.269 1.558 3.102 1.634 1.149 0.988 1.573 1.084 1.084 1.014 +0 0.957 0.902 -1.418 0.476 1.723 0.937 -2.683 0.256 0.000 1.366 -0.873 1.740 2.215 1.197 1.489 -0.350 2.548 0.675 -1.540 0.429 0.000 0.570 1.805 0.987 1.074 2.571 2.666 2.088 +1 0.285 0.871 0.008 1.298 0.277 0.832 -1.822 -1.554 0.000 0.925 -1.033 -1.404 0.000 1.057 -0.677 0.980 2.548 1.038 -0.476 -0.308 3.102 0.784 1.025 0.983 2.140 0.737 1.576 2.339 +1 1.974 -0.009 -0.801 0.329 -1.380 0.860 -0.239 0.808 0.000 0.663 0.801 0.883 0.000 0.887 -1.066 -1.197 0.000 0.813 0.359 -1.300 3.102 0.902 0.884 0.988 0.796 0.587 0.587 0.756 +1 0.643 -0.689 -0.362 2.068 -0.793 0.317 0.398 1.269 0.000 0.937 -1.152 0.801 2.215 0.895 1.312 0.272 0.000 0.907 -0.660 1.212 0.000 0.913 0.874 0.987 1.169 0.754 0.901 1.157 +1 0.990 1.161 0.151 0.918 1.508 1.090 0.764 1.599 2.173 0.771 0.372 -0.533 2.215 0.668 1.667 1.614 0.000 1.432 1.915 -0.112 0.000 1.105 1.011 1.242 0.981 1.293 0.991 0.864 +0 1.774 0.690 -1.043 0.504 -0.332 0.560 -1.958 0.803 0.000 1.170 0.962 -1.685 0.000 0.940 -1.038 -0.531 2.548 2.253 -0.039 0.351 3.102 1.423 1.417 0.989 1.206 1.018 1.131 1.087 +1 0.657 1.143 1.170 0.666 -1.016 0.937 1.058 0.130 0.000 1.357 1.256 -1.206 2.215 0.419 1.559 0.399 0.000 0.739 0.153 1.576 3.102 0.719 0.788 0.990 0.805 0.752 0.824 0.721 +1 0.777 0.438 0.259 0.587 -1.717 0.946 -0.706 -1.299 2.173 1.515 -0.181 0.647 2.215 0.582 -1.415 -0.684 0.000 0.546 -0.314 -0.692 0.000 0.388 1.015 0.983 0.925 1.794 0.943 0.791 +0 1.806 1.848 -0.973 0.721 -1.389 1.046 -0.340 0.813 2.173 0.397 -0.501 0.088 0.000 0.451 1.388 0.438 1.274 0.366 2.068 0.718 0.000 1.089 1.117 0.973 0.766 0.964 1.603 1.271 +0 0.657 -0.367 0.936 0.993 -0.222 0.812 0.583 0.071 1.087 1.121 -1.529 1.740 0.000 1.218 -0.617 -1.519 0.000 0.457 2.474 -1.053 0.000 0.906 0.856 0.989 0.858 0.714 1.161 0.955 +1 1.031 -0.302 -1.196 0.198 0.173 0.794 -0.337 1.579 0.000 1.047 0.890 0.097 2.215 0.582 -0.634 0.666 0.000 0.516 1.035 -0.906 3.102 0.923 0.844 0.987 0.903 0.532 0.837 0.742 +1 0.845 -0.561 1.269 2.064 -1.637 1.052 0.221 0.198 0.000 1.569 -0.774 -0.650 1.107 0.530 -1.475 0.760 0.000 0.949 -0.739 0.418 3.102 0.471 0.988 0.996 0.851 0.906 0.941 0.787 +0 0.802 -0.450 -0.275 0.579 -0.531 1.010 -1.537 1.483 0.000 0.969 -1.947 0.020 0.000 1.048 -0.602 0.493 1.274 1.649 -1.100 -1.629 0.000 0.690 1.132 0.984 0.503 0.679 0.765 0.768 +0 1.493 0.571 0.319 0.585 -0.417 0.634 1.473 -1.575 1.087 0.718 2.164 -1.040 0.000 0.581 1.629 1.425 0.000 1.018 0.546 0.666 3.102 0.818 0.899 0.983 1.031 0.848 0.718 0.742 +0 1.021 -1.082 -1.035 0.562 -1.476 1.414 -0.256 -1.709 0.000 1.786 -1.686 -0.008 0.000 1.735 -0.087 0.085 1.274 2.224 -2.212 -1.541 0.000 0.857 0.774 0.992 1.051 0.878 0.871 0.731 +1 0.578 0.163 -0.534 1.293 0.570 0.881 1.792 -0.549 0.000 1.435 -1.246 -1.540 1.107 0.622 -1.453 -0.113 0.000 1.875 -0.910 1.175 1.551 0.426 0.870 1.005 0.988 0.959 1.036 0.850 +1 0.621 0.102 0.586 0.716 -0.506 0.598 0.753 0.296 0.000 0.913 1.240 -1.672 2.215 0.778 0.028 -0.334 0.000 1.417 0.077 1.672 3.102 0.803 0.965 0.987 1.208 0.657 0.844 0.813 +0 1.440 0.186 -0.506 0.837 1.305 0.470 0.424 -1.630 0.000 1.173 0.102 0.129 2.215 1.036 0.414 1.192 0.000 0.781 2.451 1.501 0.000 0.706 1.097 1.518 0.804 0.736 0.733 0.735 +0 3.021 0.011 -1.714 2.751 -1.559 1.541 -1.833 0.142 0.000 3.094 -0.046 0.351 2.215 1.617 -0.485 -1.203 1.274 0.819 -0.782 1.141 0.000 1.022 0.902 0.998 2.874 2.414 1.952 1.463 +0 1.388 -0.457 1.176 1.108 1.724 0.875 0.918 0.362 2.173 0.629 1.295 -0.486 0.000 0.839 -0.709 -1.047 2.548 0.853 -0.291 0.177 0.000 1.049 0.989 0.984 1.563 1.446 1.127 1.036 +1 0.633 -0.776 -1.364 0.315 0.241 0.829 0.163 1.377 2.173 0.345 -0.389 0.854 0.000 1.438 0.878 -0.480 2.548 0.476 -1.467 -0.117 0.000 0.561 1.072 0.989 0.754 1.464 0.892 0.749 +1 0.903 0.326 0.049 0.403 -1.484 0.611 -0.417 -1.443 1.087 0.917 -1.658 1.005 0.000 0.732 1.469 -0.915 0.000 0.876 0.067 -0.143 0.000 0.936 1.025 0.988 0.573 0.781 0.661 0.620 +1 0.630 1.030 -0.339 0.917 0.750 0.648 0.887 0.590 0.000 0.910 0.250 -1.073 2.215 1.158 -0.581 -1.598 2.548 0.786 -0.532 0.660 0.000 0.924 1.118 0.988 1.358 0.709 0.996 0.925 +1 0.643 1.652 1.314 0.822 0.297 0.949 0.731 0.478 1.087 1.002 0.431 -1.018 2.215 0.723 -0.149 -1.346 0.000 0.466 -0.259 -0.217 0.000 0.547 0.933 0.987 0.930 1.416 0.821 0.722 +0 0.658 -0.020 -1.433 0.813 0.078 1.526 0.241 -0.966 0.000 1.173 0.134 0.854 1.107 0.269 -1.591 0.202 0.000 0.458 1.051 -0.866 3.102 1.678 0.985 0.991 0.800 0.769 0.992 0.815 +1 0.890 1.176 1.335 0.709 -1.082 1.161 0.864 -0.627 2.173 1.487 0.019 1.130 0.000 0.339 -0.132 0.043 0.000 0.688 0.629 0.643 3.102 0.908 0.601 0.987 0.957 0.863 0.945 0.825 +1 0.693 0.131 -0.041 0.912 1.508 0.814 -0.120 -1.213 0.000 0.569 -1.287 -1.297 0.000 1.389 -0.088 0.797 2.548 1.727 -0.497 0.009 3.102 0.930 1.126 1.084 0.721 0.828 0.928 0.814 +1 2.035 -0.933 0.827 0.557 -1.480 0.816 0.160 -1.175 2.173 0.774 -0.006 -0.110 2.215 0.411 0.680 -0.906 0.000 0.910 -1.093 -1.685 0.000 0.932 0.854 1.289 1.327 0.963 1.000 0.837 +1 0.792 -0.079 -1.313 0.521 -1.564 1.441 -0.161 0.316 2.173 0.711 -1.261 0.674 0.000 0.617 -1.004 1.514 2.548 0.850 -1.879 -1.694 0.000 0.994 0.648 0.986 1.424 1.184 1.036 1.123 +1 0.625 0.090 -1.275 0.819 -1.611 0.971 0.055 0.864 2.173 0.539 -0.242 -0.336 2.215 0.780 1.290 0.507 0.000 0.749 0.381 -0.520 0.000 0.794 0.950 0.987 0.743 0.955 0.832 0.850 +1 1.448 0.108 -0.536 0.756 -1.610 1.154 -1.456 -1.528 0.000 1.595 -2.176 1.144 0.000 1.290 1.185 -0.238 0.000 2.161 -0.678 0.066 3.102 0.884 0.621 1.193 1.055 0.887 0.945 0.877 +1 0.587 1.140 0.195 0.717 0.219 0.518 2.926 0.858 0.000 0.768 1.038 -0.686 2.215 0.803 0.395 1.680 1.274 0.702 1.983 -1.429 0.000 0.855 1.134 1.003 0.781 0.756 0.892 1.021 +1 0.408 -1.375 0.076 0.948 -0.243 0.522 -0.554 -0.568 0.000 0.644 0.614 -0.393 0.000 1.518 0.163 1.436 0.000 1.183 -1.180 1.071 3.102 0.801 0.878 0.986 0.910 0.301 0.749 0.693 +0 1.530 -1.100 -0.551 0.348 -0.695 0.569 -0.851 0.123 2.173 0.769 -0.141 1.499 0.000 1.758 0.126 1.029 0.000 0.570 -1.303 -1.424 3.102 0.775 0.862 0.990 0.773 0.631 0.763 0.941 +0 0.777 0.196 -0.328 1.498 -0.489 0.748 -0.445 1.671 0.000 0.709 -1.393 1.526 0.000 0.958 1.166 0.833 0.000 1.489 -0.134 -0.690 3.102 0.947 0.944 0.976 0.738 0.760 0.707 0.755 +1 0.626 -1.904 -1.432 1.225 0.771 0.526 -0.317 -0.356 2.173 0.461 -0.545 -1.666 0.000 0.836 0.300 -0.775 0.000 1.195 0.984 0.487 3.102 0.813 0.928 1.111 1.091 0.900 1.243 1.040 +1 0.440 -0.738 1.663 1.597 -0.026 1.622 -2.425 1.112 0.000 1.062 -0.972 -0.861 2.215 0.982 -2.080 -1.701 0.000 1.901 -0.662 -0.239 3.102 0.592 0.849 1.160 0.675 0.695 0.779 0.787 +0 1.840 -0.674 -0.705 1.012 -1.247 1.074 -0.300 0.621 0.000 0.949 0.099 1.238 2.215 0.843 0.146 -1.444 2.548 0.544 0.373 -0.156 0.000 0.933 0.951 0.988 0.785 0.631 0.857 0.945 +1 0.402 -1.780 -1.184 0.276 0.724 0.907 -1.129 -0.413 0.000 0.872 -0.434 -1.249 2.215 0.903 -0.906 0.230 0.000 1.141 0.020 0.912 0.000 0.898 1.071 0.981 0.642 0.763 0.837 0.716 +0 0.753 -0.153 -0.866 1.029 1.640 0.530 -2.260 -0.153 0.000 0.725 -0.955 -1.033 0.000 0.671 0.343 1.604 2.548 2.069 -0.270 0.504 3.102 1.312 1.345 0.991 0.951 0.820 1.039 0.909 +1 1.696 -1.657 -0.665 0.683 -1.214 0.944 -1.153 0.973 2.173 0.558 -0.517 -1.514 2.215 0.705 -2.316 0.732 0.000 0.918 -1.335 -0.329 0.000 0.909 0.864 0.990 1.366 0.904 0.984 0.847 +0 1.646 -0.031 -0.719 1.286 -0.239 1.056 -0.256 1.362 0.000 0.475 -0.389 -0.197 1.107 0.663 -0.130 -1.624 0.000 1.408 -0.159 0.545 0.000 0.988 0.834 0.978 0.556 0.596 0.572 0.630 +1 1.824 -0.111 -0.650 0.632 -0.048 0.733 -0.050 0.069 0.000 1.313 -0.163 1.231 2.215 0.747 -0.504 1.502 1.274 0.622 0.561 -1.723 0.000 0.902 1.046 0.990 0.902 0.327 0.890 0.783 +0 0.745 -0.859 -1.354 1.732 -0.704 0.606 -0.246 0.180 2.173 0.799 0.825 1.468 0.000 0.980 -0.888 1.257 2.548 0.572 -0.105 -0.999 0.000 0.831 0.960 0.987 0.963 0.869 0.801 0.806 +0 0.478 1.131 1.149 0.361 -0.886 0.730 1.409 -1.430 2.173 0.535 1.238 1.485 0.000 0.873 0.641 0.092 2.548 0.910 -0.269 -0.784 0.000 0.904 0.879 0.994 0.678 1.041 0.726 0.653 +1 1.052 -0.045 -0.633 1.212 0.147 0.932 0.467 1.185 2.173 0.994 -0.533 -0.941 0.000 0.728 -0.616 -1.656 2.548 1.024 -0.097 0.056 0.000 1.050 1.246 1.012 0.837 0.839 0.859 0.814 +0 0.896 -0.111 0.195 0.826 1.419 0.587 -0.522 -1.080 1.087 0.647 1.124 -0.247 0.000 1.332 0.240 -1.554 2.548 0.716 0.378 0.813 0.000 0.790 0.997 1.064 0.830 0.639 0.739 0.709 +1 1.775 -0.030 0.599 0.271 0.043 0.910 -0.155 -1.689 2.173 0.716 -0.084 -0.931 0.000 0.763 -0.336 0.191 0.000 1.058 0.870 -1.184 3.102 0.974 1.020 0.988 0.903 0.805 0.862 0.796 +1 0.626 -0.067 -1.208 0.278 -0.607 1.271 0.654 0.304 2.173 1.486 1.891 -1.193 0.000 0.867 1.916 1.361 0.000 1.507 -0.029 1.044 3.102 1.303 1.738 0.989 1.504 1.043 1.468 1.586 +1 1.210 0.137 0.766 0.401 1.559 1.041 0.338 -0.644 2.173 1.438 -0.696 1.601 0.000 0.619 0.526 -0.252 2.548 0.711 -0.284 -0.086 0.000 1.064 1.066 0.987 1.168 0.366 0.990 0.881 +1 0.497 -0.887 0.723 2.221 1.539 1.334 0.042 -0.065 0.000 0.630 0.141 -1.521 2.215 0.692 0.860 -1.423 0.000 0.772 -0.975 -0.921 3.102 1.814 1.256 0.988 0.744 0.550 0.864 0.970 +1 1.039 -1.923 0.051 0.449 0.432 1.010 0.144 1.585 2.173 0.646 -0.010 0.361 0.000 0.580 0.392 -0.894 0.000 0.721 0.055 -0.618 3.102 0.875 1.017 0.975 0.731 0.827 0.963 0.838 +1 0.694 -2.157 0.736 1.403 -1.709 0.831 -0.920 -0.128 2.173 1.125 -0.348 -0.765 1.107 0.991 -0.364 1.395 0.000 0.546 -0.050 0.880 0.000 0.968 0.993 1.104 1.498 0.875 1.166 0.984 +0 0.840 -0.786 -1.486 0.870 1.032 1.204 0.669 1.065 0.000 1.165 -0.657 -0.938 2.215 1.538 0.023 -0.217 0.000 1.127 -1.033 -0.732 3.102 2.381 1.834 0.986 0.906 0.353 1.325 1.170 +0 0.637 0.411 0.111 0.364 -1.464 1.092 0.249 1.136 1.087 0.909 1.050 -0.209 2.215 1.122 0.755 -0.964 0.000 1.074 1.079 -0.941 0.000 0.985 0.829 0.994 0.922 1.509 0.910 0.859 +0 0.654 -0.750 -0.137 0.381 1.168 0.993 -0.711 -0.524 0.000 1.434 -0.496 1.278 0.000 0.754 -1.207 -1.176 2.548 0.451 0.812 0.568 1.551 1.128 1.066 0.985 0.810 0.775 0.706 0.762 +0 0.720 0.518 0.217 0.693 1.653 0.493 1.074 -0.493 2.173 0.459 1.908 1.400 0.000 0.393 0.110 1.122 0.000 0.416 1.017 1.294 0.000 0.708 0.871 0.990 0.683 0.615 0.637 0.587 +1 0.463 -0.246 1.310 0.474 0.726 0.606 -0.496 -1.322 1.087 0.379 2.417 0.079 0.000 1.048 0.045 -0.158 2.548 0.764 1.027 1.716 0.000 0.841 1.015 0.980 0.977 0.904 0.934 0.812 +1 1.708 -0.077 -0.098 1.133 -0.715 0.741 0.639 1.336 2.173 0.982 0.347 -1.433 2.215 0.522 0.547 0.255 0.000 0.674 -0.597 1.113 0.000 0.650 0.791 1.015 1.273 0.776 0.972 0.792 +0 0.676 0.556 1.494 0.478 -0.453 0.549 -0.256 -0.020 0.000 0.472 1.039 -0.222 0.000 0.494 -0.965 1.466 1.274 1.218 -0.892 -1.401 3.102 0.797 1.055 0.987 0.859 0.314 0.778 0.767 +1 0.795 -0.427 0.789 1.876 1.411 1.426 -0.569 -0.225 2.173 0.353 0.057 -0.790 0.000 0.347 -0.219 -1.627 2.548 1.003 -0.894 -1.684 0.000 0.726 1.051 0.983 0.539 0.849 0.981 0.811 +0 0.416 -1.827 1.318 1.242 -0.210 0.762 -1.094 0.086 2.173 1.006 -0.006 1.374 0.000 1.572 -0.373 -1.505 0.000 0.585 -1.556 -0.515 0.000 0.796 0.843 0.986 0.665 0.533 0.788 0.746 +0 0.449 -0.968 1.600 1.430 -1.015 0.641 -0.304 0.146 2.173 1.125 0.870 0.869 2.215 0.780 0.598 -1.528 0.000 0.701 1.071 -0.312 0.000 0.792 0.979 0.992 0.916 1.100 0.941 0.791 +1 0.563 -1.594 1.430 1.950 0.708 0.698 -1.555 -1.534 2.173 0.861 -0.268 -1.014 2.215 0.610 0.180 0.241 0.000 0.721 -1.510 -0.699 0.000 1.020 1.008 0.987 1.528 0.944 1.102 0.996 +0 0.537 -0.800 -0.244 2.371 -0.687 2.183 -0.319 0.979 2.173 1.136 -0.053 0.591 0.000 2.228 0.134 -0.705 0.000 1.679 -0.227 -1.401 1.551 0.625 1.066 0.996 2.106 1.701 1.450 1.183 +0 2.459 1.308 -1.340 1.364 -1.241 1.315 0.556 0.611 1.087 0.723 1.092 0.198 0.000 1.236 -0.018 -0.012 0.000 0.626 -0.417 -1.019 3.102 0.917 0.940 0.991 0.829 1.095 1.253 1.144 +1 2.337 -1.525 -0.126 0.234 -1.309 1.031 -1.407 0.576 2.173 1.611 -1.296 -1.399 0.000 1.092 -0.786 1.469 2.548 0.704 -2.340 -0.663 0.000 1.408 1.134 0.985 0.949 1.023 1.078 1.019 +1 0.600 1.295 0.591 1.280 -1.266 0.674 0.655 -0.029 2.173 1.058 1.395 1.344 0.000 1.416 0.552 -0.559 2.548 1.347 0.268 1.220 0.000 0.929 1.198 1.208 0.901 0.562 0.946 0.858 +0 1.497 1.063 -0.932 0.237 -0.578 0.334 -0.029 -0.360 0.000 0.786 1.375 1.053 0.000 0.621 -0.407 1.460 2.548 0.996 1.308 0.162 3.102 1.346 0.973 0.998 0.779 0.903 0.697 0.717 +1 0.912 -0.890 -0.370 0.775 1.073 1.174 -0.448 -1.236 2.173 1.015 -1.093 0.341 0.000 0.688 -0.285 0.919 0.000 1.000 -0.613 1.569 1.551 0.832 0.761 1.122 1.039 0.680 0.887 0.780 +1 0.903 -1.971 0.797 0.768 -0.526 1.085 -0.717 -1.125 2.173 0.814 -1.644 -1.681 0.000 1.067 1.878 0.246 0.000 1.590 -1.104 0.526 0.000 0.852 0.952 1.072 0.988 0.869 0.935 0.797 +0 0.744 -1.368 0.370 1.613 0.450 0.965 -0.274 -1.424 2.173 0.518 -0.546 -0.783 0.000 0.699 -0.698 1.512 2.548 0.781 0.410 0.006 0.000 0.709 0.862 0.976 0.776 0.548 0.892 0.771 +0 1.044 -1.073 0.189 0.531 -0.968 0.734 -0.644 -0.936 0.000 0.900 -0.792 0.911 2.215 1.117 -0.855 1.563 1.274 0.790 -1.364 -0.191 0.000 0.939 0.944 0.986 0.761 0.595 0.761 0.693 +0 0.800 0.321 -1.096 1.269 -0.678 1.208 -0.513 1.720 2.173 0.736 0.835 0.268 2.215 0.634 -1.360 0.379 0.000 0.440 1.958 -0.038 0.000 1.962 1.257 0.978 1.096 1.693 1.210 1.078 +1 0.365 -0.761 -0.734 2.465 -1.611 1.754 0.834 -1.695 2.173 1.693 -0.098 0.336 0.000 2.509 1.093 -0.294 0.000 0.954 1.275 0.190 3.102 1.057 0.658 0.985 1.339 1.441 1.214 1.344 +1 0.669 -0.898 -1.284 1.160 -0.208 0.933 -0.155 0.481 0.000 1.371 -0.595 -1.654 2.215 0.794 1.802 -0.557 0.000 0.896 0.057 -0.749 3.102 2.345 1.395 1.007 0.982 0.807 1.278 1.137 +0 0.593 -1.430 0.834 1.488 -0.807 0.912 -0.901 -0.943 2.173 1.585 -0.005 0.583 2.215 1.028 -0.014 1.083 0.000 0.477 0.360 -1.307 0.000 0.668 1.043 1.296 1.526 1.923 1.208 1.012 +1 0.773 0.898 1.701 0.790 -0.497 0.845 1.549 1.128 0.000 0.951 0.468 -0.318 2.215 0.631 0.300 0.851 0.000 0.772 -0.449 -1.283 3.102 0.923 1.084 0.993 0.727 0.721 0.890 0.768 +1 0.953 -0.205 0.999 0.514 -0.066 0.975 1.539 -1.562 0.000 1.088 0.311 0.196 1.107 0.917 -0.624 0.525 0.000 0.562 -0.398 -1.441 3.102 1.216 0.850 0.985 0.814 0.760 0.880 0.750 +1 0.740 -0.851 -1.295 0.760 0.334 1.425 -0.150 -0.200 2.173 1.188 -2.420 1.521 0.000 0.992 -0.366 1.478 0.000 0.628 0.329 0.895 3.102 0.939 1.347 1.034 0.640 0.880 0.819 0.756 +0 0.601 -1.811 1.188 0.845 -1.672 0.501 -1.613 -0.132 0.000 1.170 -0.392 0.773 2.215 0.902 0.059 -1.111 2.548 1.262 -0.720 -0.678 0.000 0.745 0.883 1.001 0.856 1.113 0.859 0.795 +0 0.335 2.180 0.357 1.986 -0.138 0.969 -0.732 1.553 1.087 0.981 -1.400 1.617 0.000 0.665 0.207 -0.711 0.000 1.153 0.159 1.632 3.102 0.935 0.935 0.979 1.871 0.544 1.228 1.284 +1 0.813 0.474 -1.013 1.236 -0.331 1.305 0.664 1.287 0.000 0.892 0.544 0.598 0.000 1.797 0.399 -0.268 2.548 1.384 1.237 1.539 0.000 0.974 0.886 0.990 0.715 0.865 1.051 0.964 +1 0.610 -0.597 1.201 1.076 0.011 1.130 -2.213 1.445 0.000 1.039 -0.982 -0.399 2.215 0.604 -1.938 0.163 0.000 1.937 -0.205 -0.464 3.102 0.932 1.051 0.988 0.757 0.507 0.884 0.788 +1 1.267 -1.487 -1.231 1.257 1.674 0.665 -0.560 -1.738 0.000 1.138 -2.013 -0.075 0.000 1.479 -0.409 -0.142 2.548 1.503 -0.313 0.921 3.102 0.832 0.980 0.989 1.154 0.933 1.082 0.908 +1 0.494 -1.155 0.730 0.291 -0.382 0.419 -2.230 1.185 0.000 0.193 2.663 1.315 0.000 0.527 0.397 -1.079 2.548 0.535 0.184 -0.423 1.551 0.817 1.031 0.988 0.517 0.230 0.678 0.654 +0 0.518 1.730 0.911 0.345 1.156 0.945 0.403 -0.689 2.173 0.793 0.152 -1.149 0.000 0.529 2.288 -0.075 0.000 1.216 0.151 1.459 3.102 1.675 1.199 0.989 0.971 1.068 0.904 0.813 +0 0.940 -1.159 -0.192 1.566 0.495 0.450 0.594 -1.455 0.000 0.505 1.042 1.069 1.107 0.685 -0.128 -0.671 0.000 1.198 -0.516 -1.479 3.102 0.799 1.023 0.989 0.940 0.850 0.910 0.779 +1 1.045 -0.183 1.449 0.646 0.844 0.886 -0.112 -0.739 1.087 1.648 0.383 1.183 2.215 1.819 0.327 -0.112 0.000 2.607 2.496 -1.091 0.000 1.321 1.125 0.981 0.627 1.813 1.145 0.972 +1 0.715 0.040 -0.879 0.991 -1.116 1.281 -0.171 0.983 2.173 0.729 -0.431 0.148 2.215 1.033 -0.706 -0.884 0.000 0.776 -0.744 0.879 0.000 0.989 0.783 0.983 1.378 0.992 0.982 0.840 +0 0.396 -0.917 -1.506 1.370 -0.624 0.631 -0.191 1.740 1.087 0.481 -1.302 0.024 0.000 0.750 -0.852 0.948 0.000 1.072 0.483 0.439 3.102 0.707 0.880 0.993 0.769 0.874 0.686 0.713 +1 0.833 -0.059 1.128 0.517 -1.132 0.651 -0.583 -0.397 0.000 0.844 1.043 1.240 2.215 1.080 -0.339 0.222 0.000 0.963 0.200 -1.022 3.102 0.873 0.676 0.986 0.662 0.806 0.796 0.699 +0 0.591 -0.857 0.600 2.762 1.196 0.587 1.427 -0.377 0.000 0.840 -0.034 -0.806 2.215 0.845 -0.655 -1.392 1.274 1.437 0.413 0.035 0.000 0.964 0.853 0.983 1.238 0.549 0.872 0.820 +0 0.759 1.242 1.069 0.951 0.333 0.814 1.009 -1.573 0.000 0.767 0.396 0.547 2.215 1.221 0.361 -0.975 1.274 1.045 0.828 -0.233 0.000 1.318 0.932 0.982 0.540 1.008 0.792 0.753 +1 0.856 0.447 -0.077 1.302 -1.442 1.142 1.284 1.128 2.173 0.510 1.350 0.311 2.215 0.639 -0.182 0.333 0.000 0.686 0.925 -1.400 0.000 0.893 1.032 1.378 0.894 0.757 0.891 0.766 +0 1.562 0.969 0.909 0.129 0.377 0.548 1.241 -0.864 0.000 0.906 -0.062 -1.711 2.215 1.017 0.961 -0.335 0.000 1.297 0.677 0.127 3.102 0.819 0.862 0.980 0.671 1.071 0.725 0.726 +0 0.387 1.397 -1.144 0.456 0.690 0.968 -0.080 0.298 0.000 0.888 -1.142 -1.340 2.215 0.415 0.561 -1.147 2.548 1.189 -0.827 0.573 0.000 0.871 0.874 0.984 0.906 0.668 0.866 0.762 +1 0.765 -0.213 1.689 2.183 0.964 1.078 -0.524 -0.751 1.087 0.482 0.936 -1.214 0.000 0.605 0.433 -0.035 2.548 0.703 0.241 0.692 0.000 0.795 1.040 1.088 0.844 0.803 0.996 0.844 +0 0.606 0.321 0.148 1.729 -0.963 0.625 -0.140 -1.638 1.087 0.868 1.379 1.512 2.215 0.951 1.900 0.524 0.000 0.389 2.293 -0.395 0.000 0.544 0.780 1.194 0.883 0.989 0.924 0.947 +1 0.369 -1.109 0.247 0.463 -0.160 0.929 0.474 0.509 0.000 0.849 1.317 -1.151 0.000 0.774 1.293 0.427 2.548 1.905 -0.063 -1.102 3.102 0.776 0.871 0.987 0.804 1.187 0.730 0.677 +1 0.982 -1.068 1.308 0.139 1.734 0.721 0.231 -1.268 2.173 0.991 -0.842 0.505 2.215 1.270 -0.962 -0.440 0.000 1.076 0.020 1.594 0.000 0.968 0.867 0.992 0.831 1.436 1.005 0.868 +1 0.305 -0.761 1.686 0.766 0.801 0.893 0.173 -0.688 2.173 0.989 0.037 0.060 2.215 1.398 0.446 1.117 0.000 1.028 1.012 -1.709 0.000 0.889 1.146 0.980 0.969 0.869 0.947 0.810 +1 0.944 0.400 0.208 0.419 -1.015 1.030 0.747 -1.156 2.173 0.839 -2.069 1.034 0.000 1.197 0.729 1.426 0.000 1.140 -2.028 0.325 0.000 0.947 1.200 0.990 0.661 0.899 0.965 0.865 +0 0.919 -0.262 0.255 0.808 0.438 0.912 0.340 -0.974 0.000 1.453 -0.358 0.981 2.215 0.927 -1.190 -1.191 2.548 0.831 -1.155 1.321 0.000 0.852 0.954 0.987 0.917 1.290 0.997 0.950 +1 1.515 0.690 0.523 0.219 1.372 0.751 0.051 0.186 0.000 1.183 -0.857 -0.740 1.107 1.660 -0.850 1.497 0.000 0.654 -0.276 1.576 1.551 2.124 1.167 0.985 1.566 0.725 1.011 1.091 +0 0.681 1.038 0.305 0.330 0.687 1.214 0.966 1.260 2.173 1.431 0.502 -0.382 2.215 0.915 1.866 -1.723 0.000 0.772 1.718 -0.298 0.000 0.890 1.140 0.996 0.871 1.983 1.164 1.025 +1 1.270 -0.071 -0.585 1.650 -1.052 0.216 1.627 1.617 0.000 0.622 0.405 1.209 0.000 0.585 1.136 0.062 0.000 0.878 -0.743 1.177 0.000 0.888 0.582 0.977 1.109 0.369 0.731 0.735 +1 1.295 -0.150 1.639 0.233 -0.943 0.647 -1.047 -0.415 0.000 0.803 -0.841 -1.398 1.107 0.600 -0.616 1.475 0.000 0.673 -0.005 -1.511 0.000 0.837 0.890 0.981 0.535 0.498 0.546 0.639 +1 1.452 -0.083 0.584 1.055 -0.079 0.754 0.685 1.608 2.173 0.513 -0.368 -1.176 2.215 0.604 0.570 0.155 0.000 1.226 0.682 -1.445 0.000 0.946 0.786 0.990 0.837 0.745 0.855 0.763 +1 0.672 -0.178 -0.724 0.901 1.398 0.947 0.848 0.589 2.173 0.905 0.909 1.257 0.000 1.970 0.164 -0.590 2.548 0.706 2.380 -1.176 0.000 0.880 0.786 1.016 0.900 1.599 0.967 0.846 +1 4.570 -0.893 -1.657 0.317 -0.391 2.550 1.647 -0.294 0.000 1.738 -1.288 0.796 0.000 2.580 0.079 0.304 0.000 2.134 0.001 -0.904 3.102 1.274 0.979 1.517 1.377 0.808 1.080 1.169 +0 2.702 -0.626 0.023 0.558 -1.570 0.501 -0.246 1.144 0.000 0.856 0.371 1.643 2.215 0.982 0.977 1.020 0.000 2.027 -0.467 -0.830 3.102 0.907 0.722 1.685 1.108 1.106 1.033 1.032 +1 1.282 2.147 0.804 1.817 0.530 1.523 -0.824 -0.848 0.000 0.756 1.596 1.128 0.000 1.316 0.683 1.547 1.274 0.744 1.036 -0.410 1.551 0.744 0.723 1.006 0.968 0.766 1.138 1.086 +1 1.008 -0.508 1.009 0.899 -0.122 0.577 -0.318 -1.233 0.000 1.023 -0.047 1.631 2.215 1.375 -1.106 0.174 1.274 0.796 -0.572 -0.843 0.000 0.405 0.974 1.123 0.921 1.446 0.838 0.769 +1 0.918 -0.848 0.232 1.577 0.881 0.446 -0.470 0.018 2.173 0.544 0.362 -0.874 0.000 0.536 1.451 -0.861 0.000 1.736 0.047 -1.263 3.102 0.538 0.791 0.987 1.122 0.888 0.788 0.818 +1 0.370 -0.720 0.219 1.117 -0.370 0.606 1.721 -1.626 0.000 0.838 -0.442 -0.637 2.215 0.894 1.114 1.059 0.000 1.342 0.082 0.690 0.000 0.904 0.954 0.984 0.579 0.546 0.681 0.676 +0 0.945 0.508 1.439 0.697 -1.727 0.697 -2.295 -0.014 0.000 0.829 -0.009 -0.757 1.107 1.016 0.612 0.521 0.000 0.940 1.396 1.003 0.000 0.724 1.083 0.994 0.826 0.579 0.742 0.779 +1 0.579 -0.923 0.002 0.773 1.016 0.415 -0.513 -0.038 1.087 0.470 -2.422 1.375 0.000 1.253 -0.501 -1.130 2.548 1.107 -0.496 -0.638 0.000 1.377 0.996 0.996 0.855 0.748 0.749 0.748 +1 1.938 0.364 0.469 0.542 -1.632 0.703 1.773 -1.705 0.000 0.675 -0.380 -1.469 0.000 1.203 1.055 -0.626 2.548 1.034 1.144 0.329 1.551 0.931 0.861 1.347 0.792 0.653 0.743 0.781 +0 0.945 -1.192 1.499 0.697 -0.304 0.608 1.008 0.114 2.173 0.812 0.158 -1.591 2.215 0.430 1.722 1.670 0.000 0.376 0.610 -1.087 0.000 0.384 0.611 1.122 0.916 1.130 0.994 0.854 +1 1.541 0.598 1.354 2.462 0.874 0.829 2.569 -0.668 0.000 1.274 0.290 -0.918 2.215 0.372 1.628 -1.195 0.000 0.449 0.762 0.674 3.102 0.616 0.716 1.129 1.582 0.709 0.970 1.264 +1 0.654 2.053 -1.470 0.708 0.579 0.287 1.354 1.022 0.000 0.705 -0.978 -0.327 2.215 0.703 -0.006 1.699 0.000 0.773 0.819 -1.194 1.551 0.945 1.104 0.988 0.600 0.908 0.930 0.789 +0 0.832 -0.576 -1.577 0.349 -0.105 0.802 0.667 0.800 0.000 1.083 -0.557 -1.072 2.215 1.332 0.035 0.091 0.000 0.802 -1.445 -1.367 0.000 1.262 0.831 0.985 0.711 0.817 0.932 0.880 +1 1.098 -0.550 -0.049 0.474 -0.290 0.762 -0.597 -1.711 2.173 0.614 2.766 -0.925 0.000 1.538 0.135 1.316 2.548 1.209 0.861 -0.032 0.000 0.572 0.911 0.989 1.073 0.749 0.875 0.807 +0 0.741 -0.209 1.287 1.010 -1.238 1.197 -1.366 0.387 1.087 0.626 -1.182 -0.866 2.215 1.126 -0.565 -1.553 0.000 0.382 -1.553 -0.491 0.000 0.762 1.201 0.988 0.832 1.156 1.067 0.883 +1 3.455 0.643 -1.443 0.775 -0.804 1.646 -2.010 0.302 0.000 0.483 0.393 0.138 2.215 0.629 0.343 0.944 0.000 0.567 -1.066 -1.710 1.551 0.709 0.558 1.237 1.062 0.649 0.845 0.732 +0 0.703 -0.687 -0.346 0.709 1.580 0.600 0.049 1.551 0.000 0.607 0.795 -0.395 1.107 0.486 0.624 -0.058 2.548 0.406 -1.973 -0.495 0.000 1.360 0.984 0.989 0.885 0.176 0.751 0.698 +0 0.699 0.928 -1.346 0.880 -0.442 0.256 -2.104 0.838 0.000 1.140 -1.001 0.791 2.215 0.357 0.987 0.013 0.000 0.528 -0.227 -0.800 1.551 1.349 0.999 0.990 0.683 0.745 1.188 1.103 +0 0.513 0.802 1.343 0.426 -0.056 0.470 0.242 1.484 0.000 0.646 1.497 -1.107 1.107 0.395 0.999 0.445 0.000 1.103 -1.015 0.612 0.000 1.051 0.975 0.989 0.911 0.285 0.832 0.731 +0 1.075 1.119 0.848 0.756 0.643 0.405 2.018 -0.119 0.000 0.407 2.783 0.422 0.000 1.460 1.064 -1.398 2.548 0.836 0.642 1.682 3.102 0.545 1.085 0.975 0.733 0.346 0.728 0.726 +1 1.029 0.826 0.278 1.173 0.909 1.250 1.061 -1.067 2.173 0.686 0.469 0.870 0.000 0.788 2.202 -0.415 0.000 1.122 1.123 -1.694 3.102 0.879 0.799 0.985 1.370 0.693 0.923 0.832 +0 1.799 -0.712 0.066 0.367 -0.453 0.498 -0.155 1.693 2.173 0.310 -0.527 1.214 0.000 0.446 -0.088 0.544 0.000 0.430 0.672 1.471 0.000 0.658 0.574 0.981 0.948 0.490 0.759 0.615 +0 1.071 0.549 -0.399 0.766 -0.690 0.942 -0.171 1.598 2.173 0.399 -1.462 1.126 0.000 0.442 -0.217 -0.588 0.000 1.326 0.130 0.496 3.102 0.777 0.832 0.995 0.907 1.010 1.021 0.932 +0 0.319 -1.132 -0.671 0.340 1.426 1.189 0.075 0.401 2.173 1.267 0.303 -1.265 2.215 1.297 -0.696 -0.838 0.000 0.699 0.560 -0.655 0.000 0.877 0.894 0.998 0.911 1.815 0.934 0.777 +1 0.579 -0.063 0.278 0.839 1.691 0.705 -0.242 1.419 0.000 1.817 -0.765 -0.721 0.000 1.051 0.439 1.057 1.274 2.335 0.712 0.200 3.102 0.983 1.454 0.988 0.846 0.865 1.273 1.032 +1 0.704 -2.014 1.178 1.301 -1.727 0.967 -0.841 -0.010 0.000 0.387 -0.821 -0.709 2.215 1.000 -1.425 0.416 0.000 1.009 -1.534 -1.689 0.000 0.926 0.759 0.996 0.709 0.233 0.515 0.689 +1 1.455 0.167 -1.445 0.736 0.315 1.449 -0.086 0.485 1.087 0.656 0.019 -0.622 0.000 1.792 -1.451 -1.291 0.000 1.317 0.898 1.121 3.102 1.764 1.951 1.433 1.300 1.200 1.568 1.300 +1 2.367 -0.639 -0.656 0.359 0.399 0.799 -1.369 0.807 2.173 0.551 -0.029 -1.734 2.215 0.797 -1.370 -1.661 0.000 0.988 1.780 0.928 0.000 0.788 1.047 1.040 0.865 1.023 0.920 0.810 +0 0.633 0.064 -1.334 1.000 0.128 0.956 0.640 1.676 0.000 1.116 0.059 0.347 2.215 0.807 0.506 -1.144 0.000 0.848 0.965 -0.014 3.102 0.893 0.912 1.068 0.735 0.579 0.863 0.765 +1 2.776 -0.612 -1.194 0.130 0.147 0.598 0.271 0.362 1.087 0.998 -0.636 1.221 2.215 0.538 -2.306 0.394 0.000 0.706 -0.257 0.754 0.000 0.948 0.873 0.992 1.229 0.964 0.977 0.923 +0 0.517 1.690 -0.345 1.126 0.991 0.962 1.444 -0.741 1.087 0.702 2.302 -1.503 0.000 1.103 1.332 1.021 2.548 0.504 1.099 0.182 0.000 0.884 0.887 0.988 0.593 1.283 0.786 0.699 +0 0.619 0.140 0.921 0.478 1.671 0.796 -0.763 -1.140 0.000 1.577 -0.569 0.292 0.000 0.708 0.332 -0.609 2.548 0.627 -1.767 -1.658 0.000 0.923 0.927 0.981 0.651 0.732 0.754 0.884 +0 1.358 -0.433 1.557 0.709 -1.410 1.011 0.054 0.316 2.173 0.761 0.178 -1.249 2.215 0.974 0.602 0.946 0.000 0.626 2.399 -0.256 0.000 1.030 0.949 0.994 0.629 1.277 0.890 0.770 +0 0.711 0.307 -1.384 1.191 -0.290 0.678 -1.436 0.519 0.000 0.814 -1.387 -1.071 2.215 0.844 -0.407 1.104 2.548 0.518 -0.393 1.587 0.000 0.873 0.940 1.063 0.845 0.931 0.821 0.813 +0 0.411 0.118 0.407 1.156 -1.227 0.507 -1.526 -0.127 2.173 1.049 -1.288 1.504 2.215 0.745 -2.649 -0.060 0.000 0.437 -0.133 1.116 0.000 1.287 0.850 0.990 0.885 1.074 0.784 0.828 +0 1.904 0.464 -0.985 0.931 -1.030 0.882 -0.853 0.867 1.087 0.555 -0.469 0.405 0.000 0.506 -1.149 0.516 2.548 0.599 1.486 -1.244 0.000 0.836 0.641 0.989 1.226 0.310 1.280 1.142 +1 0.280 1.475 0.951 1.422 -0.105 0.612 1.236 -1.432 2.173 0.546 0.602 -0.068 0.000 0.970 1.411 1.467 0.000 1.295 -0.348 -1.658 3.102 1.068 0.928 0.986 1.974 0.899 1.343 1.104 +1 0.301 -2.034 -1.341 0.852 0.695 1.232 -1.152 -0.576 0.000 1.628 -1.197 1.534 0.000 1.434 0.055 0.590 1.274 1.130 -0.470 -1.314 3.102 0.637 0.728 0.984 0.748 1.011 0.894 0.786 +0 1.048 1.480 -1.298 0.850 0.010 0.609 -1.433 1.158 0.000 0.635 -2.776 -1.014 0.000 0.433 1.830 1.411 0.000 0.631 -0.505 0.354 3.102 0.889 1.111 1.208 0.943 0.435 0.925 0.807 +1 0.604 0.406 0.792 0.334 -0.189 0.632 -1.063 -1.682 2.173 1.026 0.953 -0.612 1.107 0.783 1.419 0.474 0.000 0.868 1.648 -0.054 0.000 0.873 0.993 0.992 0.798 1.741 0.978 0.840 +0 2.120 -1.368 0.042 2.335 0.393 2.222 -0.456 -1.395 0.000 0.515 -0.928 -1.144 0.000 0.372 -1.131 1.194 0.000 1.393 0.000 0.444 3.102 0.764 0.907 0.989 1.013 0.490 0.940 1.396 +1 1.006 0.266 1.642 1.124 -0.368 0.612 0.551 1.027 0.000 1.015 1.014 1.590 0.000 1.105 -0.132 -0.157 2.548 1.332 1.229 -0.109 3.102 0.911 1.097 1.431 0.875 0.837 0.909 0.860 +0 1.420 -0.501 0.440 0.356 1.736 0.545 0.531 1.440 1.087 0.884 -1.106 -0.778 2.215 0.674 -0.342 -0.379 0.000 0.855 -0.477 -1.025 0.000 0.741 0.805 0.989 0.879 1.335 0.845 0.690 +0 0.396 1.911 -1.499 0.799 0.018 0.454 -0.315 1.142 0.000 0.654 -0.866 0.015 2.215 0.672 1.393 -1.712 0.000 0.768 0.841 -0.990 0.000 1.064 0.948 0.984 0.550 0.490 0.613 0.611 +1 1.275 -1.356 0.157 0.891 0.976 1.042 -0.621 1.638 2.173 0.748 -0.825 -0.763 0.000 0.318 0.081 -1.538 0.000 0.486 0.637 1.217 3.102 0.604 0.852 0.993 0.840 0.633 0.838 0.753 +0 0.689 1.186 0.035 1.170 0.691 0.803 -1.749 -1.060 0.000 0.493 -0.998 1.166 2.215 0.564 -1.342 -0.333 0.000 1.103 0.856 -1.457 3.102 0.759 0.839 0.990 0.880 0.936 1.072 1.717 +0 0.716 -1.393 0.142 1.331 1.429 0.685 0.347 0.598 0.000 0.705 -0.617 -1.412 2.215 1.289 -1.925 -0.919 0.000 1.844 -1.020 -0.077 3.102 3.125 1.838 1.240 0.937 1.009 1.209 1.061 +1 0.653 -0.375 -0.971 1.129 0.284 1.125 -0.497 0.906 2.173 1.623 2.058 -1.151 0.000 0.458 -2.183 -1.330 0.000 0.535 -0.698 -0.257 3.102 0.678 0.982 1.076 0.526 0.725 0.634 0.631 +0 1.636 0.395 1.456 0.537 1.192 1.137 0.539 -0.590 2.173 0.514 0.067 -1.061 0.000 1.047 1.153 0.429 0.000 0.666 -0.698 0.778 3.102 1.308 0.956 0.988 1.353 1.109 0.973 0.881 +0 2.065 0.397 -0.001 0.170 1.646 1.295 0.827 -1.700 1.087 1.091 1.893 1.429 0.000 2.255 0.064 -0.203 1.274 0.532 0.457 1.125 0.000 0.932 0.975 0.986 1.432 2.238 1.220 0.968 +1 0.452 1.371 1.491 1.617 -1.174 0.583 0.002 0.871 0.000 1.121 -0.417 -0.020 2.215 0.316 -1.185 -1.076 0.000 1.109 -0.321 -1.678 1.551 0.931 0.881 0.987 1.381 1.004 1.489 1.285 +0 1.787 -1.180 0.895 0.940 -0.456 0.732 -1.569 -0.190 0.000 0.929 -2.198 1.490 0.000 1.139 -1.086 -0.979 2.548 1.283 0.250 1.697 0.000 0.937 0.899 1.684 1.108 0.896 0.814 0.770 +1 0.635 0.503 -0.542 0.739 -1.367 1.340 1.346 0.265 0.000 0.438 -0.236 1.594 0.000 0.742 0.179 -0.777 0.000 1.359 0.698 1.636 3.102 0.911 0.705 0.994 0.811 0.537 0.630 0.607 +0 0.973 0.327 -0.518 0.455 -1.571 0.844 1.084 1.171 0.000 0.710 1.038 -0.334 2.215 0.865 0.373 0.611 2.548 0.778 0.984 -1.439 0.000 0.880 0.978 0.991 0.739 0.687 0.677 0.655 +0 0.765 0.361 -1.549 1.167 0.163 1.345 -0.125 0.889 0.000 0.826 0.644 -0.807 2.215 0.970 -0.505 0.248 0.000 1.731 -0.631 -1.055 3.102 0.753 0.948 1.308 1.020 0.867 0.770 0.729 +1 0.336 1.354 -0.566 0.831 1.434 0.984 -0.012 0.076 2.173 1.216 -0.191 -1.240 0.000 1.359 0.128 0.921 2.548 0.659 -0.593 1.633 0.000 0.735 1.153 0.996 0.921 1.001 0.968 0.822 +0 1.407 -1.371 0.408 0.634 -1.677 1.116 0.733 1.631 2.173 0.720 1.381 -1.536 0.000 1.806 -0.378 -0.133 2.548 0.973 1.753 -0.217 0.000 1.081 1.173 1.248 1.080 2.072 1.444 1.561 +1 1.656 -0.070 -1.498 0.966 -0.970 2.020 -0.544 0.410 0.000 1.693 -0.491 1.579 2.215 0.995 2.592 -0.032 0.000 1.602 -0.325 -1.132 1.551 0.850 0.887 0.979 0.919 0.960 0.921 0.827 +0 1.852 1.435 1.312 0.565 -1.389 0.512 -1.138 0.121 0.000 1.111 -1.763 -0.415 0.000 0.676 1.017 -1.277 2.548 0.767 0.107 -0.069 3.102 0.921 0.885 0.983 0.649 0.564 1.025 1.604 +0 1.047 -0.364 -0.131 0.370 -0.995 1.206 0.459 0.454 2.173 0.873 1.891 1.611 0.000 1.166 -0.216 -1.113 2.548 0.643 0.175 1.688 0.000 0.917 1.559 0.987 0.918 1.552 1.311 1.111 +0 0.614 1.162 1.725 0.771 -0.870 0.734 -0.086 0.077 0.000 0.940 -0.341 1.291 0.000 0.775 -0.163 -0.134 2.548 0.683 -0.895 1.411 1.551 1.584 0.976 0.994 0.700 0.605 0.661 0.691 +1 0.914 -0.659 -0.239 1.033 -1.726 0.672 -0.513 1.045 2.173 0.586 0.765 -0.235 2.215 0.515 -1.035 -0.598 0.000 0.747 -2.340 -0.271 0.000 0.867 1.011 1.310 0.963 1.063 0.989 0.874 +1 0.556 -0.714 -0.800 0.410 -1.511 1.106 0.208 0.436 1.087 1.638 -0.694 -1.050 1.107 0.723 -0.387 1.697 0.000 0.743 -0.569 -0.374 0.000 0.781 1.009 0.984 0.913 2.149 1.104 0.875 +1 0.917 -1.972 0.019 0.745 -0.337 2.405 -0.846 -1.740 0.000 1.706 -1.105 0.185 0.000 1.867 -1.027 -1.049 2.548 1.408 -0.518 0.603 0.000 0.910 0.652 0.991 0.939 0.758 0.918 0.795 +0 0.394 1.102 1.476 1.632 -0.466 0.460 0.137 0.644 2.173 1.118 0.030 -1.537 1.107 1.132 -0.839 -0.125 0.000 0.865 2.110 0.765 0.000 1.274 0.890 1.093 1.103 0.976 0.860 0.899 +0 1.537 0.542 1.310 1.399 1.440 0.666 0.402 -0.244 2.173 0.608 2.321 -0.837 0.000 0.913 2.007 0.036 0.000 0.813 0.150 0.309 1.551 0.979 0.986 0.982 0.766 0.383 0.819 0.942 +0 0.570 0.954 1.581 1.197 1.336 0.429 2.124 -0.084 0.000 0.710 -0.604 -0.194 2.215 1.353 0.092 -1.436 2.548 0.941 -0.260 0.576 0.000 0.901 0.904 0.988 1.742 1.013 1.304 1.086 +1 0.686 -0.222 1.560 0.609 0.543 0.354 -0.544 0.796 0.000 0.553 -0.251 -0.237 0.000 1.159 1.215 -1.451 2.548 1.224 0.839 -0.479 3.102 0.767 0.810 0.991 1.318 0.715 0.992 0.848 +1 0.427 -1.746 1.015 1.573 -1.232 0.582 -0.866 1.379 0.000 0.605 -1.310 -0.024 2.215 0.448 -0.211 -0.913 2.548 0.646 0.699 0.353 0.000 1.174 0.995 1.022 0.702 0.514 0.631 0.751 +1 0.955 -0.511 -0.868 1.260 -0.194 1.996 -2.038 1.143 0.000 1.498 0.295 -1.239 0.000 1.590 0.519 0.016 2.548 0.816 -0.800 0.792 3.102 6.231 3.213 0.993 0.808 0.923 2.444 1.941 +1 1.019 -1.098 1.240 0.763 -0.958 0.968 -1.272 -0.120 0.000 1.342 -0.116 1.466 2.215 0.651 1.678 -0.913 0.000 0.917 -1.254 0.509 0.000 0.789 0.565 1.121 0.945 0.950 0.961 0.839 +1 1.334 -0.657 1.439 0.861 -0.095 1.208 -1.472 0.714 0.000 1.549 -0.173 -1.026 2.215 0.827 0.310 -1.466 0.000 1.124 0.398 -0.443 3.102 0.700 0.751 1.458 0.983 0.717 0.869 0.731 +1 2.062 0.140 0.671 2.363 -0.111 1.191 -0.090 -1.593 0.000 1.276 0.767 -1.277 2.215 0.494 -0.775 0.838 0.000 1.348 0.730 -0.218 3.102 1.046 0.984 1.980 1.109 0.967 1.185 0.997 +0 1.141 -0.595 1.585 1.179 -1.715 0.589 -0.194 0.737 2.173 0.817 -1.029 0.806 2.215 0.484 -0.193 -0.862 0.000 1.420 -0.571 -0.198 0.000 0.909 0.931 0.977 0.862 0.458 0.746 0.757 +1 0.818 -0.283 1.682 0.505 -1.113 1.152 -0.844 0.915 0.000 2.214 -1.481 -0.915 0.000 1.439 -0.762 -0.285 2.548 2.616 -0.543 0.371 3.102 0.839 0.953 0.993 1.077 0.838 0.955 0.958 +1 0.877 -0.834 1.592 1.111 0.421 1.369 -1.683 -1.692 0.000 1.760 -1.538 0.033 0.000 0.703 -1.942 -1.351 0.000 1.272 -1.110 0.645 3.102 1.006 1.039 1.190 0.867 0.935 0.890 0.829 +1 1.092 0.676 0.961 0.240 -0.417 0.560 1.096 0.510 1.087 0.654 -0.073 0.393 0.000 1.298 0.608 -1.332 2.548 2.198 1.471 -0.889 0.000 2.182 1.411 0.988 0.834 1.084 1.002 0.889 +0 0.472 0.464 -0.268 1.190 1.564 0.871 -1.347 0.863 0.000 1.619 -0.331 -1.293 2.215 1.647 -1.247 0.233 0.000 1.538 -1.053 -0.475 1.551 1.160 1.111 1.035 0.956 1.175 1.257 1.200 +1 0.436 -1.117 1.209 1.619 -1.575 0.949 -1.495 -0.406 2.173 0.726 -2.053 0.246 0.000 0.864 -1.175 0.730 1.274 1.095 -1.251 1.484 0.000 1.104 1.075 0.989 0.836 0.971 0.881 0.794 +1 0.661 0.244 0.604 1.937 -0.023 1.170 -0.891 1.661 0.000 1.024 0.746 -1.188 2.215 0.642 1.386 0.570 2.548 0.386 -0.558 -1.290 0.000 0.956 0.833 0.987 0.941 0.924 0.948 0.809 +1 0.645 -0.781 0.115 1.583 0.508 1.415 0.974 -1.264 0.000 0.722 0.725 0.961 1.107 0.585 0.279 -0.907 0.000 1.065 1.136 -0.281 3.102 1.031 0.964 0.980 0.729 0.754 0.784 0.936 +0 0.550 0.603 1.577 1.366 0.567 0.754 0.628 -1.207 2.173 0.619 1.111 -0.478 0.000 1.035 -0.155 1.660 0.000 1.012 -0.222 0.815 0.000 0.781 0.957 0.990 0.994 1.166 0.819 0.743 +1 0.741 -0.434 -0.860 1.170 0.247 1.466 -0.263 -0.052 2.173 2.672 -0.383 -1.687 1.107 0.718 0.700 0.158 0.000 0.394 -0.513 -0.361 0.000 0.836 1.194 1.084 1.447 2.904 1.498 1.210 +1 0.531 1.787 -1.180 0.865 0.931 0.958 0.391 -0.655 1.087 0.542 2.188 -1.568 0.000 0.386 0.839 1.185 2.548 0.719 2.411 0.197 0.000 0.855 0.659 0.986 1.008 0.781 0.900 0.775 +1 0.915 -0.405 -1.526 0.527 0.251 0.589 -0.101 0.511 0.000 1.083 0.120 -1.702 2.215 1.305 -0.118 -0.721 2.548 1.806 -0.979 0.415 0.000 0.898 1.171 0.988 0.694 0.990 1.003 0.825 +0 1.951 -0.613 -0.040 2.305 -0.070 1.823 0.132 -1.706 2.173 1.764 -0.740 1.717 1.107 2.614 -0.401 0.307 0.000 0.801 0.324 -1.161 0.000 1.691 1.863 1.007 2.685 1.229 1.972 1.696 +1 1.031 0.601 1.615 0.392 -0.502 0.945 0.542 0.669 0.000 1.144 -0.286 -0.629 2.215 0.984 0.273 0.080 0.000 1.492 -0.145 -1.631 3.102 0.905 1.163 0.985 1.022 0.928 0.966 0.875 +0 0.568 0.449 0.855 0.613 -1.653 0.509 -0.490 0.436 2.173 0.423 0.099 -1.424 0.000 1.402 0.735 -0.646 2.548 0.797 1.031 -0.262 0.000 0.971 0.865 0.987 1.014 1.140 0.918 0.787 +1 0.706 0.757 -0.685 0.406 1.502 1.178 0.254 -0.906 0.000 0.940 -0.292 0.794 2.215 0.681 0.637 0.260 0.000 0.689 1.632 0.402 0.000 0.818 0.682 0.993 1.137 0.716 0.833 0.726 +0 0.473 -0.610 -1.034 1.456 -0.124 0.363 -2.775 1.336 0.000 0.373 -1.301 0.853 0.000 0.506 0.128 -1.040 1.274 0.724 -0.652 1.520 3.102 0.706 0.996 0.988 0.716 0.408 0.616 0.663 +1 0.727 1.145 -0.634 1.505 -1.102 1.193 -0.666 0.272 2.173 0.803 -1.866 -1.451 0.000 0.796 -0.475 1.400 2.548 0.604 -0.190 0.650 0.000 1.219 0.850 0.976 1.534 1.036 1.079 1.091 +1 0.557 -0.667 -1.469 1.845 -0.794 0.600 0.890 0.493 2.173 0.935 0.584 1.678 0.000 1.268 1.904 0.017 0.000 1.683 0.111 1.231 3.102 0.781 0.899 0.982 1.016 0.777 0.898 0.771 +0 0.578 0.030 0.066 1.544 1.736 0.778 1.144 -1.245 0.000 0.901 0.081 -1.640 2.215 1.426 -0.866 0.549 2.548 2.184 -2.013 -0.258 0.000 0.890 0.915 1.306 1.102 1.286 1.161 0.988 +1 0.764 1.096 -1.676 1.307 -0.256 2.111 0.676 0.319 2.173 1.380 -0.199 -1.136 2.215 1.573 2.025 -1.074 0.000 1.273 -0.442 -0.410 0.000 0.801 0.821 1.326 1.359 2.687 1.435 1.139 +0 0.618 0.543 1.613 1.852 0.789 0.676 -1.577 -0.484 0.000 0.743 -0.340 -1.064 2.215 0.666 -0.099 0.061 2.548 0.375 -1.760 1.301 0.000 0.867 0.869 1.001 0.727 0.641 0.745 0.925 +1 0.871 -0.395 0.418 0.535 -0.287 0.401 -1.893 1.028 0.000 0.639 0.313 -0.640 2.215 0.792 -0.604 1.289 2.548 1.512 -0.046 -1.112 0.000 0.977 0.799 0.986 0.767 0.840 0.618 0.658 +1 0.654 -1.317 1.702 0.344 -0.203 0.741 0.819 -0.661 2.173 0.582 2.131 1.591 0.000 0.572 0.129 0.917 0.000 0.706 -2.481 -0.749 0.000 0.698 0.888 0.992 0.510 0.723 0.665 0.780 +1 0.448 -2.179 0.003 1.617 1.630 0.761 -1.256 -0.196 0.000 0.935 -0.876 0.953 2.215 1.071 -0.941 -0.749 2.548 0.666 -1.101 -1.519 0.000 1.011 1.002 1.173 1.021 1.065 0.891 0.825 +1 1.389 0.236 1.470 0.467 0.354 1.224 -1.919 1.018 0.000 1.465 -0.752 -0.787 1.107 1.546 -0.206 -0.196 2.548 0.808 -1.149 -1.152 0.000 0.774 1.736 0.985 1.284 0.925 1.354 1.299 +1 1.238 -1.766 -1.391 0.273 1.399 0.548 -1.441 0.571 0.000 0.577 -1.126 -1.076 2.215 1.485 0.438 0.453 0.000 0.932 0.851 -1.114 0.000 0.876 1.298 0.984 0.562 0.251 0.743 0.903 +1 0.786 0.317 0.930 0.535 -0.546 0.674 0.687 1.702 0.000 0.793 1.034 -0.345 2.215 0.904 0.802 1.276 2.548 2.090 0.245 -0.772 0.000 0.791 0.942 0.985 0.803 0.898 0.671 0.648 +1 0.781 0.685 -1.089 1.346 -1.670 0.493 2.484 0.344 0.000 0.927 -0.427 -0.536 2.215 0.917 0.194 -1.356 0.000 1.015 0.442 0.954 1.551 1.305 0.867 0.995 1.322 0.961 0.936 0.845 +0 0.623 1.449 -0.108 0.975 -1.454 0.774 -0.200 1.551 2.173 0.493 -0.761 -0.947 0.000 0.816 0.846 0.243 2.548 0.993 0.115 0.306 0.000 0.932 0.922 1.011 0.712 1.089 0.847 0.812 +1 1.115 -0.073 0.703 1.453 -0.261 0.683 -0.467 -1.071 2.173 0.655 0.088 1.554 2.215 0.480 -0.262 1.122 0.000 0.719 -1.283 -0.338 0.000 0.762 0.736 1.345 0.970 0.744 0.818 0.706 +1 0.765 -0.216 -0.502 1.352 0.606 0.781 -0.233 0.011 2.173 0.747 0.591 1.393 0.000 1.318 0.027 -1.467 0.000 1.114 -1.061 -0.812 3.102 0.928 1.107 1.185 0.709 0.855 0.930 0.852 +0 0.366 -1.177 -1.438 0.465 -1.258 0.410 -1.570 0.101 0.000 0.571 -0.688 0.832 0.000 0.683 -0.334 1.660 2.548 0.891 -0.358 -0.590 3.102 0.783 0.744 0.979 0.603 0.535 0.543 0.613 +1 1.604 0.424 0.948 0.730 -1.122 1.063 -0.320 -1.143 2.173 0.700 -0.905 0.809 1.107 0.623 2.553 0.081 0.000 0.587 -0.242 -0.222 0.000 1.020 1.139 1.435 1.252 1.307 1.000 0.928 +1 1.387 -0.621 0.866 0.549 1.364 0.909 -0.722 -0.449 2.173 0.318 -1.708 -0.420 0.000 0.594 -1.075 1.309 0.000 1.006 -0.403 -1.546 3.102 0.694 0.801 0.985 0.691 0.854 0.817 0.706 +1 0.695 0.197 1.125 0.722 -0.202 0.961 1.248 -1.651 0.000 1.814 1.299 0.185 0.000 1.447 0.037 -1.278 2.548 1.003 0.336 -1.701 0.000 0.666 0.862 0.987 0.544 0.880 0.828 0.792 +1 0.754 0.615 -1.346 0.179 1.705 1.028 0.048 0.836 2.173 0.848 -0.654 -0.570 1.107 1.133 0.352 -0.196 0.000 1.692 0.347 1.597 0.000 1.526 1.212 0.979 0.950 1.406 0.996 0.850 +1 0.379 1.340 0.562 1.061 -1.682 1.405 0.579 0.814 2.173 1.318 0.125 0.359 2.215 1.593 1.967 -1.027 0.000 1.293 0.189 -1.267 0.000 0.914 0.878 0.992 1.530 0.923 1.327 1.104 +0 0.464 -2.161 -0.966 1.196 0.693 0.574 -0.551 0.723 0.000 0.862 -0.843 -0.748 2.215 0.722 0.124 -1.322 2.548 0.591 -1.410 1.456 0.000 0.763 0.928 1.029 1.161 0.604 0.885 0.793 +0 0.314 -0.863 1.325 1.115 0.690 0.337 -2.718 -0.632 0.000 0.552 -0.320 0.567 2.215 1.126 0.508 -0.445 2.548 0.640 1.503 1.551 0.000 0.490 0.919 0.985 1.855 0.766 1.222 1.052 +1 0.658 -1.330 0.029 0.982 0.937 1.461 0.109 -1.368 0.000 0.667 -0.512 -0.441 2.215 0.895 -0.414 0.638 2.548 0.971 0.858 0.745 0.000 1.924 1.401 0.987 0.535 0.679 0.984 0.919 +1 1.868 0.019 0.780 0.638 -1.454 2.141 -1.253 -0.081 0.000 1.518 0.639 -0.500 0.000 2.824 -0.343 1.640 2.548 2.813 -1.023 -1.701 3.102 1.024 1.972 1.367 1.156 0.986 1.718 1.499 +1 1.233 -0.275 0.346 1.311 -0.446 0.709 0.816 -1.461 2.173 1.190 1.228 1.629 2.215 0.696 0.084 -0.210 0.000 0.502 0.891 0.102 0.000 0.378 0.953 1.152 1.252 0.558 1.162 0.900 +1 0.419 1.335 0.926 1.191 -1.131 0.799 1.834 -0.351 0.000 1.044 0.512 0.481 2.215 1.179 0.469 1.462 2.548 0.786 0.611 -0.774 0.000 0.843 1.147 0.989 0.705 0.911 0.933 0.812 +1 2.457 0.526 -0.511 0.491 -1.341 1.153 -0.223 0.796 2.173 0.806 1.127 -1.548 0.000 1.040 0.970 -0.579 0.000 1.927 0.977 1.292 3.102 1.078 1.021 1.034 1.538 1.375 1.235 1.108 +0 0.526 0.795 1.480 1.650 1.522 0.437 2.017 -1.312 0.000 0.986 0.451 0.169 2.215 0.593 2.521 -0.360 0.000 0.618 0.699 -0.075 1.551 0.772 0.681 0.993 1.150 0.203 0.784 1.085 +1 0.375 -0.719 0.736 0.819 -0.455 1.523 -1.116 1.504 0.000 0.649 0.145 -0.305 0.000 0.441 -0.464 -0.688 1.274 1.233 -0.577 0.359 0.000 0.829 0.767 0.983 0.611 0.347 0.657 0.668 +0 0.535 0.851 -0.880 1.795 -0.047 0.987 -2.791 1.532 0.000 1.020 -1.514 1.359 0.000 0.432 0.230 0.377 2.548 0.852 -0.508 -0.570 3.102 0.911 0.884 0.983 0.603 0.406 0.641 1.213 +1 1.243 0.351 1.288 1.593 -1.708 0.447 1.109 0.108 2.173 0.481 -0.776 0.344 0.000 1.086 -0.392 -0.366 1.274 0.580 0.496 -0.744 0.000 0.773 0.720 0.991 1.231 0.825 0.936 0.804 +0 0.911 1.078 0.184 1.293 -0.268 1.055 -0.240 -1.250 0.000 0.734 0.336 -1.634 0.000 1.495 0.559 0.869 2.548 1.001 -0.269 0.533 3.102 0.844 1.023 0.988 0.939 0.534 0.908 0.923 +1 0.471 -0.708 0.088 0.343 -1.521 1.074 -0.006 1.648 2.173 1.002 0.437 0.246 1.107 1.044 -0.178 -0.518 0.000 0.808 -1.488 0.775 0.000 0.906 1.032 0.997 0.869 1.497 0.937 0.800 +0 0.551 -1.093 -1.685 0.148 -0.479 0.846 -0.773 -0.540 2.173 0.502 0.255 0.727 0.000 0.687 1.620 1.034 0.000 0.382 2.296 -1.585 0.000 1.037 0.865 0.988 0.840 0.575 0.925 0.777 +1 1.356 -1.317 0.178 2.624 -1.196 0.882 -0.984 -1.432 2.173 0.957 0.664 -1.716 0.000 0.401 -1.476 1.644 0.000 0.389 -1.721 0.173 0.000 1.360 1.168 2.470 1.171 0.518 0.893 1.084 +0 0.406 -0.913 -1.043 1.175 1.127 0.619 0.138 -0.003 2.173 1.197 -0.928 0.478 1.107 1.126 1.021 -1.485 0.000 0.420 -1.370 -1.015 0.000 1.452 1.178 0.989 0.861 0.901 1.054 0.887 +1 1.240 1.038 1.180 2.124 0.604 0.511 0.891 -1.216 2.173 0.808 1.635 -0.872 0.000 0.648 1.067 -0.461 0.000 0.934 0.134 -0.439 3.102 0.785 0.724 1.114 1.103 0.547 0.842 0.772 +1 0.444 0.405 -1.127 1.115 0.611 1.338 -0.545 0.637 2.173 0.745 1.130 -1.608 0.000 0.993 -0.837 -1.184 0.000 0.752 -1.527 -0.527 0.000 0.919 1.372 0.987 0.536 0.844 0.819 0.723 +0 0.706 0.490 -1.417 0.927 -1.560 0.618 -0.236 0.624 2.173 0.463 0.724 0.218 1.107 0.928 0.281 -0.589 0.000 0.398 1.569 -1.264 0.000 0.699 0.924 1.004 0.802 0.491 0.856 0.735 +0 1.022 -1.246 -0.177 0.164 -1.328 0.921 -0.838 -1.616 2.173 1.643 -0.547 0.605 0.000 1.008 -0.447 0.076 2.548 0.771 0.088 -0.898 0.000 1.006 0.883 0.999 0.945 1.218 0.755 0.685 +1 1.171 0.252 0.823 1.653 1.150 1.375 0.971 -1.256 2.173 2.392 2.049 -0.306 0.000 1.578 0.399 1.390 0.000 0.566 1.316 -1.536 0.000 0.805 1.065 0.978 0.573 0.840 1.130 0.937 +1 0.667 -1.332 -0.712 0.877 0.899 0.751 -0.433 0.663 0.000 0.736 -0.534 -0.649 2.215 0.728 0.335 0.203 0.000 3.025 -0.648 -1.501 3.102 0.768 0.930 1.052 0.976 0.948 0.972 0.866 +0 0.526 0.938 -0.817 2.830 -0.253 1.106 -1.670 1.372 0.000 0.847 -0.445 1.122 2.215 1.021 -0.483 -0.648 2.548 0.937 -0.748 1.634 0.000 0.725 0.828 0.988 1.435 0.989 1.369 1.957 +1 1.015 0.316 -0.998 0.958 0.173 1.249 -0.331 1.287 0.000 0.693 1.639 -0.988 0.000 1.835 0.222 0.207 2.548 1.215 0.373 -0.666 3.102 0.454 0.667 1.189 0.851 0.816 0.835 0.768 +0 1.019 0.840 0.317 2.432 1.115 0.448 -0.270 -0.251 2.173 0.510 -1.684 -0.617 0.000 1.206 0.392 -1.192 2.548 0.920 -1.482 -1.455 0.000 0.612 1.167 1.437 1.196 0.762 0.989 1.236 +1 0.749 0.170 -1.286 0.730 1.386 0.492 0.634 1.058 0.000 0.762 0.837 0.172 2.215 1.665 1.125 -0.797 2.548 0.849 -0.365 1.087 0.000 0.560 0.745 0.979 1.272 0.945 0.978 0.863 +1 1.676 -0.182 -0.485 0.958 1.099 0.493 0.861 -0.319 0.000 1.314 0.219 1.088 2.215 0.536 0.232 -1.122 0.000 1.341 0.914 -1.672 3.102 0.999 1.193 1.738 1.256 0.907 0.981 0.943 +0 2.412 0.291 1.412 1.998 1.743 1.097 0.904 0.243 0.000 0.928 0.661 -1.233 0.000 1.426 0.705 -0.549 0.000 1.785 -1.380 0.362 1.551 0.863 0.578 0.972 2.138 0.720 1.388 1.467 +0 1.784 0.008 1.346 0.375 -0.189 0.982 0.676 -0.910 0.000 0.680 -0.188 0.098 0.000 0.674 0.248 0.039 2.548 0.775 0.264 1.694 3.102 0.593 0.593 1.113 0.750 0.551 0.520 0.500 +1 1.376 -0.850 -0.897 0.560 -0.062 0.831 1.151 0.249 0.000 1.130 0.236 -1.596 2.215 0.959 -0.557 1.187 0.000 1.158 0.144 0.758 3.102 2.015 1.145 0.996 0.982 0.881 0.964 0.977 +0 1.139 -0.488 -0.055 1.071 0.467 0.663 -0.732 -1.076 0.000 0.979 -0.905 1.447 2.215 0.761 0.632 -0.897 2.548 0.379 0.523 -1.470 0.000 0.624 0.807 0.994 1.055 1.143 0.919 0.807 +0 1.576 0.206 -0.132 2.022 0.205 1.505 1.250 -1.459 2.173 0.738 0.401 1.219 1.107 0.371 -1.488 0.138 0.000 0.493 0.521 -1.641 0.000 0.806 0.727 0.979 2.331 1.235 1.547 1.223 +1 0.764 1.591 1.305 0.632 -0.078 0.643 -0.061 -1.078 2.173 0.431 -0.960 0.648 2.215 0.674 1.528 0.141 0.000 0.656 0.570 1.585 0.000 0.803 0.940 0.987 1.273 0.859 1.048 0.853 +0 1.511 -0.730 0.934 1.315 -0.907 0.849 0.211 0.984 2.173 2.691 0.527 -0.596 2.215 1.298 -0.996 1.304 0.000 0.785 1.200 -0.167 0.000 1.243 1.047 1.945 2.069 2.231 1.589 1.345 +1 0.657 1.036 0.003 0.551 -1.155 0.365 -0.160 0.364 2.173 0.502 -2.299 0.590 0.000 0.447 -1.503 -1.682 0.000 1.283 -0.867 1.135 0.000 0.893 0.843 0.989 1.276 0.795 0.926 1.324 +0 0.677 -0.636 1.039 0.907 -0.170 0.828 0.114 0.246 2.173 1.146 -0.193 -1.388 2.215 0.726 0.600 -0.607 0.000 0.690 0.760 1.251 0.000 0.783 0.833 0.987 0.677 1.445 0.819 0.727 +1 0.493 0.388 0.925 1.150 0.377 0.914 0.382 -1.323 1.087 0.564 0.633 -0.535 2.215 0.623 -0.108 -1.011 0.000 0.919 1.241 -1.627 0.000 0.903 0.957 0.988 1.249 0.704 0.899 1.151 +0 0.583 -1.298 -0.854 1.265 0.284 0.810 -0.518 -1.452 2.173 0.946 -1.523 -0.244 0.000 1.168 -0.865 1.082 2.548 0.962 -1.733 0.670 0.000 0.967 0.989 1.018 1.057 0.953 0.930 0.816 +1 2.510 0.206 1.077 0.165 -0.323 1.068 -0.954 -0.160 0.000 0.607 0.727 1.643 2.215 1.054 1.011 -0.899 0.000 0.444 0.443 -1.317 3.102 0.849 0.753 0.989 0.661 0.220 0.770 0.857 +1 1.263 0.988 -0.617 0.615 0.309 1.295 1.185 0.630 2.173 0.987 1.045 -0.966 0.000 0.952 -2.625 -1.494 0.000 0.871 1.708 -0.419 0.000 0.837 0.735 0.988 1.041 0.966 0.908 0.795 +0 0.608 -0.054 -0.064 1.794 0.192 0.725 2.000 -0.692 0.000 0.941 1.014 1.082 2.215 1.098 -1.028 -1.288 0.000 0.370 0.642 -0.731 1.551 0.415 0.588 0.983 0.934 0.536 0.909 1.019 +0 1.308 0.667 0.904 0.767 0.140 2.330 0.109 0.682 2.173 3.897 0.335 -1.120 0.000 1.434 0.684 1.240 2.548 2.419 -0.098 -0.519 0.000 1.067 1.762 0.988 0.819 1.324 2.068 1.604 +0 0.594 2.205 -1.599 1.070 1.647 0.866 0.434 1.135 2.173 1.050 0.356 0.382 2.215 1.774 0.455 -0.731 0.000 0.553 0.845 -0.510 0.000 1.028 1.105 0.983 0.883 0.884 0.956 0.967 +0 1.183 0.516 1.575 0.667 -0.826 0.881 -2.022 1.025 0.000 0.681 -1.032 -0.057 2.215 1.134 1.123 -0.700 2.548 0.523 -1.853 -0.074 0.000 0.873 0.919 1.021 0.786 1.418 1.572 1.394 +1 0.760 0.832 1.732 0.826 1.085 0.781 0.988 0.709 2.173 1.464 -1.091 -0.452 0.000 0.608 -0.580 -1.688 0.000 1.582 -0.387 -1.067 3.102 0.771 0.837 0.977 0.848 1.505 1.223 1.062 +1 0.557 0.144 -0.881 1.781 -0.172 0.740 0.959 1.169 2.173 0.500 -0.061 -0.618 0.000 0.975 0.727 -1.536 1.274 0.552 0.388 0.783 0.000 0.823 0.842 0.979 1.105 0.690 0.828 0.780 +0 3.208 -0.165 1.046 2.096 0.836 1.375 0.220 -0.793 0.000 1.625 -0.315 -0.431 2.215 0.685 -0.865 1.631 2.548 1.116 -1.161 -0.912 0.000 0.833 0.982 0.964 1.951 1.132 1.282 1.308 +0 2.028 1.070 1.307 0.746 -1.000 1.553 0.256 -0.548 2.173 1.253 0.108 1.046 2.215 0.848 0.591 -0.945 0.000 0.958 -0.344 0.431 0.000 0.779 1.093 1.489 1.138 2.040 1.378 1.081 +0 1.359 -0.948 1.077 0.145 -0.854 0.548 -1.557 -1.279 2.173 0.614 0.617 0.206 0.000 0.790 0.740 -1.008 2.548 0.520 -0.725 0.511 0.000 0.640 1.126 0.989 0.924 1.243 0.840 0.775 +0 0.481 -0.396 -0.783 0.879 0.976 1.717 -0.718 0.110 2.173 2.501 -1.304 -0.309 0.000 4.993 -0.355 1.696 2.548 2.238 0.214 1.648 0.000 4.079 2.769 0.984 1.224 3.663 2.564 1.899 +0 0.315 -1.370 0.926 1.848 -0.979 0.976 -0.262 0.017 2.173 0.580 -0.321 0.607 0.000 0.658 -0.470 1.608 0.000 0.674 1.002 1.579 3.102 0.749 0.885 1.045 1.195 1.091 1.049 0.875 +1 0.556 -0.208 -1.669 1.334 0.758 0.639 0.616 -0.222 2.173 0.566 -0.387 -0.870 0.000 0.654 0.341 -1.699 0.000 0.878 0.410 1.245 3.102 0.822 0.706 0.988 0.586 0.770 0.682 0.661 +1 0.989 -1.787 -0.247 0.848 -1.586 0.471 0.121 -1.333 2.173 0.854 0.496 0.237 2.215 0.709 -0.977 0.768 0.000 0.879 0.348 1.663 0.000 0.949 0.910 1.185 1.093 0.940 1.097 0.929 +0 1.283 -0.647 -1.623 0.398 1.279 0.633 0.736 -1.126 1.087 0.760 0.217 -0.300 2.215 0.981 -1.344 0.630 0.000 0.722 1.000 1.341 0.000 0.859 0.902 0.989 0.899 0.742 0.722 0.744 +1 0.507 1.448 -0.020 1.083 -1.092 0.538 -0.297 -0.548 0.000 1.142 0.998 1.096 2.215 0.586 -1.057 0.449 0.000 0.923 1.300 -0.994 3.102 0.909 1.105 0.988 1.114 0.917 0.986 1.102 +0 1.517 -0.472 1.501 1.597 1.333 1.022 0.214 -0.357 0.000 1.096 0.070 1.619 2.215 1.008 -0.750 0.141 0.000 1.603 0.972 -0.070 3.102 0.940 1.265 0.981 1.383 1.379 1.124 1.096 +0 1.365 -0.908 1.734 0.941 1.236 3.074 -0.423 1.359 2.173 4.498 1.345 -0.478 2.215 2.485 1.593 -0.147 0.000 0.871 0.939 0.887 0.000 1.188 1.367 0.987 0.848 7.860 3.740 2.959 +1 0.672 -0.656 -1.592 1.342 0.353 1.020 0.468 1.511 0.000 0.860 -0.666 0.064 2.215 1.153 1.387 -1.044 0.000 0.777 -1.820 1.100 0.000 0.745 0.825 1.294 0.844 0.682 0.763 0.723 +0 0.647 0.910 1.638 0.767 -1.437 0.750 0.874 0.088 2.173 0.876 0.887 0.873 2.215 1.008 0.335 -0.962 0.000 1.089 -0.528 -0.516 0.000 0.755 0.998 0.988 0.897 0.776 0.837 0.811 +1 0.916 1.690 -0.634 0.552 1.147 0.748 1.313 -0.865 2.173 1.027 1.136 -1.462 0.000 1.636 1.648 0.380 0.000 1.195 0.611 1.039 3.102 1.150 0.926 0.987 0.729 1.035 0.738 0.661 +0 0.760 -1.133 -0.067 0.769 1.512 1.124 2.093 0.633 0.000 1.058 1.553 -0.522 0.000 1.400 0.468 -1.261 1.274 1.090 1.439 1.381 3.102 0.850 0.887 1.047 1.411 0.891 1.064 1.098 +0 0.812 1.438 0.576 1.265 -0.323 1.119 -0.448 1.418 0.000 0.661 -1.304 -0.785 0.000 0.882 -1.166 1.008 0.000 2.425 0.281 -0.987 3.102 0.999 1.072 1.017 1.156 0.267 1.008 1.221 +1 1.193 0.713 -1.367 1.371 1.463 1.236 -0.612 -0.820 2.173 1.725 0.458 0.154 2.215 1.215 -1.625 0.692 0.000 0.415 -1.908 0.884 0.000 0.884 1.408 0.987 1.443 2.059 1.474 1.468 +1 1.898 0.517 0.949 1.760 0.203 1.243 0.149 -1.353 2.173 1.327 -0.172 1.636 0.000 1.651 0.433 -0.068 0.000 0.933 0.777 0.583 0.000 0.821 0.588 1.575 1.760 0.922 1.161 1.002 +1 0.814 0.462 -0.230 1.436 -0.244 1.475 -0.026 -1.246 0.000 0.757 -1.239 1.238 0.000 1.514 -1.122 0.799 2.548 1.271 -0.319 0.890 0.000 0.709 0.619 0.977 2.115 0.703 1.460 1.292 +1 0.337 -0.054 0.805 1.145 -0.355 0.738 1.140 1.334 2.173 1.058 1.230 -1.579 0.000 1.619 0.039 -0.005 2.548 0.369 0.869 -0.758 0.000 0.556 1.238 0.988 1.567 1.500 1.157 1.112 +1 0.702 0.186 1.242 1.406 -1.603 1.552 -0.553 -0.571 0.000 2.401 0.629 0.935 2.215 1.615 1.044 0.598 2.548 2.641 0.269 -0.751 0.000 0.839 0.886 0.984 1.436 0.819 1.175 1.081 +1 0.821 0.710 1.120 0.679 -1.004 0.749 0.279 -1.359 2.173 0.611 -0.901 0.433 0.000 0.818 0.696 0.263 0.000 0.870 1.151 -1.428 3.102 0.927 0.951 0.989 0.717 0.496 0.728 0.667 +1 1.136 0.719 1.366 0.626 -1.006 0.708 0.728 0.127 1.087 0.815 -0.432 1.490 0.000 1.085 1.123 -0.755 2.548 0.370 1.205 -0.094 0.000 1.055 1.040 0.988 0.757 0.826 0.810 0.737 +1 0.997 1.079 -1.146 0.728 0.204 0.483 -1.041 1.024 2.173 0.472 -0.341 -0.341 2.215 0.810 1.125 1.252 0.000 0.749 2.021 -1.219 0.000 0.871 1.065 1.107 1.217 0.708 1.026 0.901 +1 3.071 -1.038 0.676 0.620 0.675 3.127 -0.650 -1.471 0.000 2.361 -0.480 0.096 1.107 0.442 0.150 -1.294 0.000 0.974 -2.292 -0.005 0.000 0.954 1.249 0.979 1.402 1.137 1.738 1.735 +0 0.923 1.275 -1.046 1.126 -0.379 0.635 1.499 0.862 0.000 0.919 1.058 -0.354 2.215 1.165 0.122 1.188 1.274 0.646 0.884 -1.623 0.000 0.799 0.936 0.984 1.280 1.211 0.924 0.829 +0 0.295 -0.063 -1.391 3.780 -0.320 1.040 -0.865 1.657 0.000 1.172 -0.028 1.183 2.215 0.825 -1.221 1.037 0.000 0.745 1.806 -1.241 0.000 0.969 0.988 1.204 0.755 0.743 0.995 1.128 +0 0.998 -0.746 0.068 0.051 1.718 1.133 -1.050 1.403 2.173 1.674 0.547 -0.348 2.215 0.651 0.711 1.528 0.000 0.701 -1.141 -0.082 0.000 0.808 0.923 0.982 0.883 2.731 1.300 1.036 +0 1.267 -0.708 -1.331 1.436 -0.949 1.664 -0.753 0.541 2.173 0.496 -1.117 -0.732 0.000 0.462 -0.584 0.938 0.000 0.831 0.697 -1.460 3.102 0.756 0.991 0.976 0.680 1.638 1.256 0.976 +1 0.708 -0.807 1.189 0.347 0.755 0.659 -0.668 -0.939 0.000 0.864 0.621 1.064 1.107 1.342 -0.438 -0.339 2.548 1.051 0.523 0.288 0.000 0.954 0.970 0.998 0.626 1.283 0.933 0.820 +1 0.357 -1.029 1.040 1.081 -1.183 0.855 0.323 0.924 0.000 0.287 1.509 0.632 0.000 0.518 -1.317 -1.117 2.548 0.726 0.643 -0.323 3.102 0.739 1.119 0.987 0.633 0.704 0.729 0.687 +1 0.912 -1.211 -0.673 2.702 -0.548 1.063 -0.718 0.673 2.173 1.063 0.719 1.420 0.000 0.774 0.524 -1.649 2.548 0.670 1.081 1.350 0.000 0.662 1.252 0.992 1.833 1.257 1.555 1.557 +1 1.836 -0.141 0.592 0.575 -0.104 0.407 -0.466 -0.892 0.000 0.870 -0.865 -1.670 2.215 0.434 -1.775 0.675 0.000 1.655 0.216 -1.088 3.102 0.965 0.932 0.979 1.044 0.853 0.893 0.792 +0 0.924 -0.301 1.626 0.182 -0.953 1.124 -0.460 -1.121 0.000 0.798 -0.529 0.673 1.107 1.850 0.264 -0.152 2.548 0.848 -1.937 1.224 0.000 1.950 1.476 0.981 0.981 1.036 1.104 0.932 +1 0.485 0.618 -1.639 0.948 -0.102 1.249 0.582 -1.363 0.000 2.288 0.407 0.612 0.000 1.936 -0.389 -0.654 2.548 2.623 -0.173 -1.186 3.102 0.900 0.933 0.989 0.783 0.815 0.938 0.840 +0 0.841 1.305 -0.536 1.533 0.646 0.714 0.483 1.479 1.087 0.477 -2.140 0.091 0.000 0.512 0.803 0.019 0.000 0.714 1.462 -1.182 0.000 0.661 0.799 1.377 1.008 0.411 0.821 0.688 +1 0.570 0.721 0.779 0.601 1.658 0.526 1.125 -0.772 0.000 0.631 1.037 0.235 0.000 1.175 -0.105 1.599 2.548 1.121 -0.223 -0.683 0.000 0.897 1.021 0.985 0.571 0.594 0.717 0.667 +1 0.515 -1.143 -1.551 0.608 -0.138 2.469 -0.342 -1.450 0.000 1.104 0.223 -0.036 0.000 1.793 -1.322 0.449 2.548 1.453 -0.611 1.169 3.102 1.415 1.560 0.987 0.851 0.872 1.518 1.475 +1 0.703 -0.760 1.218 1.352 1.486 0.366 0.445 1.161 0.000 0.758 -0.263 -0.380 2.215 0.550 0.624 0.543 0.000 1.569 1.041 -0.471 3.102 0.438 0.770 0.980 0.995 0.827 0.919 0.745 +1 1.053 -1.488 0.202 1.039 -1.233 0.350 -2.444 -1.188 0.000 0.847 0.096 1.426 2.215 0.879 -0.801 0.445 2.548 0.798 -0.847 -0.592 0.000 0.760 0.830 1.393 1.247 0.849 0.873 0.811 +1 0.506 -1.981 -0.869 0.332 1.418 1.312 -0.826 1.664 2.173 0.990 -1.358 0.088 0.000 0.669 -1.862 0.394 0.000 0.788 1.360 0.090 0.000 0.534 0.496 0.986 0.839 0.900 0.920 0.772 +0 0.576 -1.816 -0.981 0.434 0.782 0.764 -0.078 -0.026 2.173 0.805 -0.749 1.274 0.000 1.067 -0.156 1.739 2.548 0.922 -0.929 -0.219 0.000 0.748 0.963 0.980 0.723 1.126 0.734 0.656 +0 0.779 -0.425 0.558 0.662 -0.622 0.972 -1.031 1.136 2.173 0.784 -0.074 0.181 0.000 1.209 0.626 -1.128 2.548 0.744 -0.532 -1.735 0.000 1.023 0.993 0.988 0.879 1.784 1.003 0.852 +1 0.582 -0.450 -0.398 1.704 0.139 2.506 0.479 -1.057 2.173 3.251 1.373 0.855 0.000 1.588 2.155 -0.417 0.000 1.126 0.038 0.538 0.000 1.007 0.864 0.986 2.596 1.086 1.699 1.364 +1 0.665 0.934 1.095 1.052 -1.646 0.637 1.850 0.035 0.000 0.975 0.258 -0.405 2.215 0.828 -0.153 -1.703 2.548 0.532 1.578 1.279 0.000 0.801 1.078 0.989 0.543 0.904 0.891 0.850 +0 0.829 1.805 -0.112 0.585 1.605 0.319 1.455 0.736 0.000 0.457 -0.395 -0.722 2.215 0.523 0.159 -1.725 0.000 0.448 -0.165 -0.268 0.000 0.776 0.754 0.989 0.735 0.439 0.716 0.626 +1 0.487 0.043 -0.651 0.157 -0.174 0.763 0.428 -1.464 2.173 0.740 0.086 -0.138 2.215 0.892 -1.604 0.528 0.000 1.639 -0.709 1.049 0.000 0.870 1.100 0.851 0.941 1.047 1.058 0.871 +0 1.677 -0.637 -1.491 0.966 -1.148 0.898 -0.832 0.490 2.173 0.640 0.426 0.309 0.000 0.971 0.207 -1.732 0.000 0.930 -0.883 1.157 1.551 0.860 1.098 0.984 1.356 0.557 0.943 1.028 +1 0.729 0.406 -1.293 0.645 1.192 0.751 0.715 1.387 2.173 0.907 0.534 -0.571 2.215 1.363 -0.316 -0.311 0.000 0.861 -0.668 0.766 0.000 1.024 0.937 0.990 0.651 1.197 0.922 0.854 +0 0.741 0.264 0.828 0.930 -1.201 0.476 -0.322 1.091 0.000 1.257 -0.099 0.090 2.215 1.260 -0.016 -1.523 2.548 0.648 -0.498 -0.595 0.000 0.855 0.842 1.112 0.696 1.329 0.800 0.695 +0 0.318 0.096 -0.821 0.529 -1.037 1.175 -1.865 -1.722 0.000 0.836 -0.543 0.460 2.215 1.280 0.961 0.323 2.548 0.803 0.383 -0.583 0.000 2.509 1.785 0.999 1.601 0.996 1.677 1.358 +1 2.360 -0.588 -0.550 0.371 -1.581 1.103 -0.628 1.267 2.173 0.563 -1.421 -0.493 0.000 1.237 0.190 1.466 0.000 1.663 -0.618 0.266 3.102 0.926 1.073 1.038 0.873 1.128 1.006 0.852 +1 1.047 -0.157 0.034 0.452 1.293 0.735 -0.670 -0.557 2.173 0.728 1.142 0.579 0.000 0.791 1.045 1.595 0.000 0.688 0.872 -1.215 3.102 0.923 0.677 0.987 0.831 0.844 0.888 0.765 +0 0.341 1.150 -0.992 0.599 -1.008 0.640 0.458 1.257 2.173 0.894 -0.296 0.045 0.000 0.759 0.571 -1.379 2.548 0.594 0.759 0.500 0.000 0.721 0.894 0.993 0.635 0.606 0.666 0.663 +0 1.458 -0.559 -0.535 0.581 1.639 0.608 -1.230 -1.679 1.087 0.725 -2.228 0.772 0.000 0.755 -0.076 -1.582 2.548 1.352 0.686 0.250 0.000 2.986 1.805 1.180 0.866 0.522 1.192 1.021 +0 0.541 -1.259 1.619 0.384 0.767 0.790 -0.951 0.164 0.000 0.990 -0.278 -0.830 2.215 1.668 0.410 1.697 2.548 1.058 0.112 0.161 0.000 0.802 1.020 0.995 0.775 1.159 1.040 0.865 +0 0.840 2.235 0.046 0.668 -1.252 1.428 0.009 1.485 1.087 1.711 1.268 -0.194 2.215 0.964 -0.503 -1.202 0.000 0.546 1.178 -0.588 0.000 0.992 0.967 0.990 0.882 2.796 1.554 1.301 +0 1.492 -0.796 0.539 0.688 0.323 0.807 -0.794 -1.342 1.087 0.837 -0.473 -0.502 2.215 0.519 -0.229 -0.938 0.000 0.814 -0.902 1.726 0.000 0.573 0.607 0.988 1.202 0.853 0.886 0.722 +0 0.756 1.039 1.335 1.841 -1.470 1.581 0.449 0.232 2.173 1.419 -2.511 -1.624 0.000 1.687 -2.676 -0.559 0.000 1.352 0.069 0.770 0.000 0.646 0.764 0.990 0.441 0.824 1.013 0.819 +0 1.495 0.737 -1.221 0.935 1.017 0.581 0.525 0.065 2.173 0.270 1.377 -1.042 0.000 0.460 0.855 0.345 0.000 0.564 -0.997 0.516 3.102 0.529 0.658 1.477 1.028 0.638 0.796 0.641 +0 1.325 -1.343 0.828 0.525 -0.032 0.440 0.711 -1.146 0.000 0.713 0.467 -0.194 0.000 0.916 -0.675 -1.573 2.548 0.525 1.200 1.394 3.102 0.911 0.960 0.981 0.980 0.740 0.750 0.818 +0 1.082 -0.577 -1.740 0.330 0.107 0.831 -2.592 -0.137 0.000 1.700 -0.147 0.803 2.215 1.931 -0.869 -1.088 2.548 0.553 0.158 0.579 0.000 2.076 1.795 0.990 0.908 2.067 1.687 1.348 +0 0.779 0.473 1.546 0.902 -0.723 0.858 0.541 0.304 0.000 1.111 0.146 -0.244 2.215 1.835 -0.126 -1.553 2.548 0.989 1.099 0.970 0.000 0.970 1.008 1.034 0.785 1.419 1.069 0.897 +1 0.510 -0.502 1.283 1.428 0.220 2.544 0.099 -1.212 0.000 0.744 -0.489 0.404 0.000 0.903 0.149 -0.113 2.548 3.322 -1.220 0.836 1.551 3.037 1.886 0.987 0.880 1.561 1.929 1.510 +1 0.654 -1.137 0.053 0.690 -1.699 1.923 -1.043 1.300 0.000 1.346 -0.194 -0.178 0.000 2.390 -1.264 -0.493 1.274 1.875 -0.952 -1.139 3.102 0.828 1.168 0.986 0.877 0.911 0.953 0.853 +0 1.597 0.091 -0.844 1.228 -1.200 1.056 -0.199 -0.409 2.173 1.127 -0.818 1.677 0.000 1.160 -0.861 0.571 2.548 2.065 -1.092 1.010 0.000 1.220 1.000 0.998 0.881 1.188 1.136 1.114 +1 0.948 -0.751 -0.939 1.035 1.715 0.963 0.521 0.620 2.173 0.440 -0.758 -0.458 0.000 0.683 0.896 -1.288 2.548 0.534 1.898 1.308 0.000 0.745 0.751 0.991 0.748 1.026 0.902 0.817 +0 0.460 0.494 -0.403 1.473 -0.370 1.159 -2.180 0.951 0.000 1.352 -0.261 -1.132 2.215 0.792 0.071 0.243 0.000 0.830 2.429 0.608 0.000 0.590 2.131 0.980 1.321 1.213 2.017 1.621 +1 2.549 -0.524 1.716 2.156 -1.269 1.851 -2.412 0.412 0.000 1.480 0.195 -0.645 0.000 0.568 0.796 -0.816 2.548 0.809 0.908 0.271 3.102 1.109 0.850 1.419 1.105 0.434 0.996 0.946 +0 0.866 0.122 0.781 1.087 1.529 0.661 0.638 -1.143 0.000 0.886 0.425 -0.236 2.215 0.550 -0.187 -0.458 1.274 0.837 0.900 0.715 0.000 0.985 0.896 0.995 0.755 0.284 0.668 0.659 +0 0.557 0.274 1.131 1.700 -1.611 1.087 -1.086 -1.181 1.087 1.343 0.701 0.347 0.000 1.170 0.465 0.802 2.548 1.406 0.734 -0.160 0.000 0.803 0.772 0.993 1.703 1.859 1.433 1.296 +0 1.450 -1.520 0.347 0.942 -0.428 1.532 -0.545 -1.176 2.173 0.449 0.212 -1.331 0.000 1.018 -2.413 1.023 0.000 1.601 0.476 0.932 0.000 1.005 0.649 1.040 1.597 1.100 1.083 1.098 +1 0.634 2.178 -0.858 0.966 -0.583 2.775 1.462 1.187 0.000 1.010 0.737 -0.567 0.000 0.957 0.092 -1.136 2.548 2.613 -0.708 -0.585 0.000 0.916 0.723 0.989 0.691 0.517 0.588 0.589 +1 1.245 2.112 0.378 0.708 1.646 0.332 -2.529 1.463 0.000 0.776 1.141 -0.440 1.107 0.472 -0.204 -1.565 1.274 0.652 -0.035 0.175 0.000 0.603 0.548 1.183 1.061 0.732 0.817 0.692 +0 1.041 -0.311 1.188 0.728 -0.160 0.384 0.206 0.032 0.000 0.488 -0.541 -0.434 2.215 1.271 0.150 1.722 2.548 0.915 -0.397 -1.437 0.000 0.933 0.804 1.131 0.684 0.838 0.640 0.590 +0 1.005 1.683 0.469 0.278 1.217 1.266 0.937 -1.076 0.000 1.105 0.762 0.857 2.215 0.633 0.135 -0.169 2.548 0.668 1.216 1.717 0.000 0.898 0.897 0.988 0.643 0.766 0.886 0.800 +0 0.392 -0.226 0.907 1.461 -1.627 1.673 -1.974 0.185 0.000 0.564 -1.894 1.587 0.000 1.579 -0.865 -1.590 2.548 0.862 -0.944 0.145 0.000 0.955 0.888 0.988 0.597 0.431 0.573 0.549 +1 0.919 0.194 -0.976 0.735 0.382 0.619 -0.242 -0.367 0.000 1.303 -0.022 1.671 2.215 0.823 0.662 0.927 1.274 0.746 1.643 1.380 0.000 1.742 1.123 1.071 0.927 0.804 0.920 0.805 +1 0.539 -0.549 0.493 1.731 -1.450 0.562 0.148 1.189 1.087 0.698 0.483 -0.384 2.215 0.691 -0.212 -0.043 0.000 0.544 0.980 -1.328 0.000 0.807 0.745 1.317 1.005 0.925 0.790 0.688 +0 1.408 1.020 -0.673 0.693 1.157 0.720 -0.442 1.195 0.000 0.777 0.516 1.439 0.000 0.619 -1.233 -0.543 2.548 0.546 -1.350 -0.211 0.000 0.877 0.880 1.364 1.212 0.644 0.796 0.868 +0 0.936 0.174 -0.733 0.654 1.317 1.084 2.003 1.262 0.000 1.201 -0.749 -1.250 0.000 2.018 -0.592 -0.345 1.274 0.969 0.519 0.195 3.102 1.725 1.476 1.043 0.959 0.883 1.100 0.959 +1 1.504 -0.080 1.626 1.061 -0.971 1.701 1.540 0.941 0.000 1.558 -1.209 -0.349 2.215 0.954 0.701 -0.229 2.548 1.038 -0.640 1.254 0.000 0.834 1.145 1.257 1.017 1.544 1.158 0.999 +0 0.463 -0.265 1.143 0.445 -0.907 0.744 -0.470 1.684 2.173 0.614 0.926 0.170 0.000 1.077 1.540 -0.324 2.548 0.794 0.216 1.120 0.000 0.758 1.014 0.985 1.693 1.817 1.256 1.037 +1 1.601 0.205 0.104 0.109 -1.544 1.666 -0.458 -1.544 0.000 1.588 0.041 1.066 2.215 1.751 -0.171 -0.327 0.000 2.025 0.564 -0.577 0.000 1.037 0.986 0.987 1.012 0.488 1.061 0.881 +1 0.436 1.226 1.020 0.521 -0.517 1.356 0.539 -1.599 0.000 0.735 -0.010 0.176 0.000 0.883 0.236 1.151 2.548 1.066 2.313 0.267 0.000 0.802 0.794 0.996 0.746 0.972 0.633 0.594 +0 1.448 -0.116 1.070 0.596 0.270 0.818 -0.288 1.665 2.173 1.961 -0.396 -0.277 1.107 0.594 -0.065 -1.308 0.000 1.084 1.934 -1.711 0.000 1.368 1.404 0.988 1.278 1.836 1.488 1.232 +1 0.876 -0.925 -1.729 1.207 -1.274 0.400 -0.663 1.022 0.000 1.077 -0.110 -0.024 2.215 1.045 0.515 -1.287 0.000 1.649 1.061 0.477 3.102 0.723 0.897 1.001 1.326 1.044 1.047 0.849 +1 1.435 0.393 0.264 1.023 -0.161 1.010 -0.169 1.415 2.173 1.092 -0.949 -1.259 2.215 0.520 -1.350 0.455 0.000 0.936 0.069 -1.405 0.000 1.015 0.925 0.998 1.644 1.213 1.323 1.076 +1 1.611 -1.168 1.419 0.335 -1.111 0.684 -0.301 -0.237 2.173 0.465 -1.417 -0.548 0.000 0.577 -0.161 -0.688 0.000 0.655 -1.573 1.516 0.000 0.901 1.026 0.989 1.041 0.846 0.898 0.781 +1 0.903 0.807 -1.008 0.494 -0.413 0.724 -0.108 0.934 2.173 0.708 0.312 -1.544 1.107 0.614 0.583 0.815 0.000 1.206 -0.479 -0.400 0.000 1.043 0.877 0.988 1.253 0.861 0.896 0.836 +1 0.718 -2.143 0.263 0.519 -1.504 0.640 -1.512 1.171 1.087 0.651 -1.488 -1.527 0.000 1.198 -0.662 -0.494 2.548 0.741 -1.181 0.086 0.000 0.900 0.803 0.989 0.685 1.176 0.707 0.629 +1 0.277 -1.792 -1.324 0.678 0.779 0.678 0.015 1.185 0.000 0.491 0.181 -1.581 0.000 1.481 -0.749 -0.264 2.548 1.146 0.234 -0.895 1.551 0.874 1.035 0.980 0.702 0.788 0.730 0.653 +1 0.534 1.390 0.768 0.365 -1.331 0.538 -0.117 1.730 2.173 0.368 -0.597 -0.171 0.000 0.459 -1.697 -0.146 0.000 0.753 -0.838 0.520 0.000 0.465 0.882 0.993 0.662 0.553 0.661 0.638 +1 0.721 0.578 0.266 0.972 -0.725 1.138 -0.255 0.999 2.173 1.135 0.440 -1.085 0.000 0.515 -0.702 -0.144 2.548 0.803 -0.533 -1.113 0.000 0.715 0.718 0.983 1.305 0.855 0.896 0.883 +1 1.097 0.681 1.297 1.372 0.567 0.933 0.037 -0.984 2.173 0.680 0.846 -0.212 2.215 0.743 -0.531 1.707 0.000 0.869 -0.022 -0.375 0.000 0.882 0.834 1.038 1.299 0.905 0.928 0.813 +1 1.035 1.047 1.431 0.462 0.161 0.849 -0.227 -1.550 2.173 0.704 0.324 -0.087 2.215 0.730 0.715 0.756 0.000 0.820 0.078 -0.655 0.000 0.869 0.943 0.986 0.738 1.148 0.767 0.670 +1 0.692 0.540 -0.531 1.278 -1.293 1.535 0.164 -1.470 0.000 1.471 -0.735 0.074 2.215 2.209 -1.331 0.637 2.548 0.455 0.511 0.620 0.000 1.248 1.866 0.985 1.616 1.158 1.610 1.336 +1 0.485 0.212 -0.200 1.908 0.257 0.802 -0.636 1.648 2.173 0.869 0.125 -1.063 2.215 0.730 -0.801 0.742 0.000 0.563 -1.518 -1.227 0.000 0.775 0.925 0.991 1.695 0.926 1.217 1.117 +1 1.157 0.867 -0.518 0.963 -0.392 0.319 0.850 -1.468 0.000 0.895 1.433 1.368 0.000 0.741 1.901 1.567 0.000 1.201 0.348 0.465 3.102 0.721 0.969 0.980 0.754 0.473 0.683 0.774 +1 0.937 -1.100 -1.091 0.739 1.553 1.115 -0.749 -1.658 2.173 1.389 -0.319 0.297 0.000 1.457 0.794 -0.775 0.000 2.499 -0.112 1.023 0.000 0.960 1.164 0.987 0.659 0.650 1.075 0.908 +1 0.599 -0.499 0.117 1.374 -0.984 1.014 -0.507 1.025 2.173 1.122 -0.790 -0.525 2.215 0.919 -0.917 0.196 0.000 0.819 -1.326 1.691 0.000 0.975 0.948 1.052 0.656 1.564 0.925 0.813 +0 0.564 -1.735 0.062 0.185 -1.231 0.728 0.057 0.910 1.087 1.064 -0.011 -0.908 2.215 1.102 0.774 -1.185 0.000 1.361 1.520 -0.029 0.000 0.968 1.103 0.982 0.803 1.293 0.986 0.952 +1 0.430 1.524 -0.996 0.378 0.658 0.792 -0.227 -0.691 2.173 1.342 -0.792 0.629 0.000 0.457 0.705 -1.642 0.000 0.600 -0.987 1.417 3.102 1.503 0.896 0.994 0.776 0.780 0.832 0.753 +1 0.305 1.173 -0.190 0.257 1.042 1.543 0.999 -1.675 0.000 2.166 0.502 0.171 2.215 1.184 -0.161 -1.684 2.548 0.717 -0.442 0.325 0.000 0.919 0.931 0.987 1.039 1.797 1.231 0.970 +0 1.044 0.869 -0.758 0.706 -1.460 1.206 0.122 0.883 1.087 0.846 -0.110 0.121 0.000 1.604 -1.693 -0.994 0.000 1.269 0.767 0.635 3.102 2.323 2.043 0.987 1.469 0.609 1.539 1.585 +0 0.606 0.522 0.922 1.061 -0.271 0.886 -0.650 1.209 2.173 0.764 0.292 -0.921 2.215 1.407 0.285 0.008 0.000 2.007 -1.438 -1.473 0.000 0.919 0.964 0.988 1.147 1.287 1.034 0.874 +0 0.774 -0.606 -0.130 1.562 0.644 1.005 -1.452 -1.726 2.173 1.106 -1.241 -0.103 2.215 0.780 -0.703 -1.298 0.000 1.100 -0.844 -0.644 0.000 0.909 1.044 0.988 0.832 1.550 1.057 0.947 +0 0.461 -0.009 -1.427 1.320 -0.494 1.618 -0.034 1.169 2.173 0.660 2.458 -0.572 0.000 0.331 -2.591 1.552 0.000 1.307 0.225 -0.360 3.102 0.834 0.995 0.987 1.498 1.529 1.084 1.093 +0 1.651 -0.724 -0.003 0.637 0.604 1.031 0.861 -1.730 2.173 0.465 1.314 -1.063 2.215 0.492 0.012 0.324 0.000 0.464 1.624 0.322 0.000 0.594 0.923 0.989 1.073 0.629 1.106 0.881 +0 1.977 1.600 0.129 0.370 -1.618 0.680 1.261 0.726 0.000 1.168 1.240 -1.281 2.215 0.537 1.682 -1.667 0.000 1.043 0.640 -1.585 3.102 0.959 1.046 1.185 0.922 0.371 0.807 0.755 +0 1.487 1.030 0.198 1.399 0.269 1.832 -1.063 1.564 0.000 1.162 0.535 -0.713 2.215 0.874 1.375 -0.457 0.000 1.005 -0.050 -1.469 3.102 0.510 0.729 0.985 1.268 0.685 1.032 0.790 +0 0.472 1.839 0.474 1.223 1.343 1.371 -0.387 -0.755 0.000 1.335 0.164 0.388 2.215 0.977 1.012 -1.692 0.000 0.435 2.489 -1.348 0.000 0.884 1.307 0.996 0.978 0.675 1.041 1.006 +0 1.267 1.363 -1.494 0.472 -1.248 0.607 0.370 -0.679 0.000 0.969 1.704 1.317 0.000 1.369 -0.885 0.100 2.548 0.843 -0.060 0.258 0.000 0.853 0.925 0.982 0.592 1.038 1.384 1.120 +1 0.408 -1.291 0.096 1.091 -0.296 1.418 0.673 1.507 2.173 0.599 -0.510 0.252 0.000 0.449 0.171 -1.385 0.000 0.400 0.378 -0.516 3.102 0.844 1.203 0.985 0.472 0.777 0.891 0.768 +1 0.813 0.110 1.020 0.796 -0.482 0.547 1.183 -1.261 0.000 0.703 -1.389 1.390 2.215 0.998 1.613 0.525 0.000 1.160 -0.234 0.070 3.102 0.650 0.830 1.088 0.940 0.904 1.047 0.863 +0 1.206 -0.267 0.208 0.594 1.402 0.687 0.551 -1.704 0.000 0.364 0.328 -1.372 2.215 0.783 -1.079 -0.608 2.548 0.405 0.444 0.281 0.000 0.786 0.986 1.032 0.669 0.597 0.594 0.621 +1 0.466 -0.732 -0.382 1.825 0.995 1.036 1.133 -1.170 2.173 0.702 -1.457 0.795 0.000 1.408 -0.127 -0.783 2.548 0.526 0.372 0.947 0.000 0.904 1.126 1.209 1.829 1.154 1.289 1.166 +0 0.308 0.478 -1.365 2.144 -1.700 0.817 -0.306 0.005 0.000 0.654 1.038 0.420 0.000 0.747 0.129 1.410 0.000 0.754 -1.077 -0.157 0.000 0.981 0.656 0.990 0.472 0.240 0.421 0.606 +1 2.065 -0.545 -0.907 0.428 0.719 0.978 -0.843 1.035 2.173 0.897 -1.602 1.690 2.215 0.515 -0.340 0.774 0.000 0.794 0.141 -0.096 0.000 0.994 1.103 1.295 1.084 0.952 0.978 0.887 +1 0.660 -0.712 1.606 0.471 -0.562 1.135 0.341 0.404 0.000 1.730 -1.129 -0.872 2.215 1.478 -1.252 0.958 0.000 1.463 -0.185 -1.369 3.102 2.519 1.792 0.985 1.049 0.936 1.538 1.199 +0 0.662 -1.682 0.254 1.132 -0.909 0.914 -0.743 0.479 0.000 1.280 -0.157 -1.538 0.000 1.070 -0.257 1.129 0.000 1.036 2.429 -1.490 0.000 1.068 0.912 1.039 0.919 0.912 0.983 0.961 +0 0.568 -1.189 -0.872 1.624 1.393 1.026 -0.455 -1.399 0.000 1.313 -1.154 0.613 1.107 0.762 0.205 -0.133 1.274 0.617 1.868 -0.300 0.000 0.258 0.557 1.186 1.000 1.050 1.162 1.213 +1 1.111 1.560 -0.689 0.660 -0.530 2.317 -0.741 0.779 1.087 1.047 -0.114 -0.517 1.107 2.357 0.695 -1.527 0.000 1.710 0.649 -0.207 0.000 0.964 1.491 0.992 2.492 2.233 2.093 1.754 +1 1.802 0.828 -1.468 1.536 1.439 1.159 1.380 0.151 2.173 0.652 0.227 -0.712 2.215 0.779 0.289 -0.025 0.000 0.674 1.729 0.605 0.000 0.906 0.802 1.148 1.646 1.192 1.167 0.965 +1 1.632 1.196 0.552 0.288 0.171 0.768 1.277 -1.299 2.173 1.137 1.306 1.263 0.000 0.458 0.968 -0.376 0.000 1.484 2.035 -0.692 0.000 0.725 0.827 0.981 0.844 1.291 0.962 0.929 +0 0.315 1.183 -0.662 0.470 0.255 0.335 0.951 0.798 2.173 0.619 1.629 1.596 0.000 0.963 1.123 -0.992 2.548 0.398 0.134 0.438 0.000 0.776 0.688 0.994 0.697 0.714 0.664 0.638 +0 2.002 0.597 0.906 1.390 0.357 0.869 0.347 0.251 2.173 2.277 -0.351 -1.134 0.000 0.925 -0.819 -1.700 0.000 0.972 0.262 -1.009 3.102 1.245 0.827 1.096 0.765 0.882 1.087 1.243 +0 0.342 2.116 0.135 1.931 1.302 0.726 1.235 0.395 2.173 0.928 0.558 -0.234 2.215 1.202 1.278 -0.786 0.000 0.865 1.385 -1.581 0.000 0.752 1.001 0.986 1.445 0.770 1.043 0.928 +1 0.969 0.322 0.500 1.210 -0.313 0.645 -1.398 1.215 2.173 0.649 -0.395 -0.355 0.000 1.271 1.244 -1.693 0.000 1.180 1.073 -0.684 0.000 1.067 0.844 1.003 1.311 1.016 1.241 1.088 +0 0.775 -0.724 -1.033 0.673 -0.156 0.870 0.448 0.142 2.173 0.915 -1.909 -0.805 0.000 1.133 -1.390 1.476 2.548 1.443 0.253 1.188 0.000 0.900 0.896 0.994 0.831 1.840 1.462 1.215 +1 0.806 -0.786 1.701 0.751 1.413 1.216 -1.195 -1.073 0.000 1.739 -0.405 0.756 2.215 0.694 -0.475 0.263 0.000 0.934 -0.982 0.216 3.102 0.880 0.956 0.992 0.746 0.701 0.862 0.736 +1 0.470 -0.240 1.010 0.799 -1.036 0.679 0.048 -1.211 2.173 0.845 -0.505 0.266 0.000 0.954 0.319 0.024 0.000 0.450 0.162 0.401 0.000 0.686 1.056 0.993 0.662 0.731 0.832 0.731 +1 2.191 0.725 0.379 0.715 0.114 0.967 0.582 -1.446 1.087 1.482 -0.554 1.133 2.215 1.411 -2.391 -0.642 0.000 0.588 0.530 -1.152 0.000 1.181 1.839 0.986 1.425 1.677 1.819 1.818 +1 1.156 -0.205 0.784 1.604 0.364 1.566 -0.411 -1.711 2.173 1.754 -0.152 -0.684 0.000 0.648 1.293 -0.456 0.000 0.938 -0.906 0.935 3.102 1.465 1.392 0.997 1.588 0.983 1.294 1.265 +1 0.563 -0.399 -1.400 1.293 -0.076 1.038 -1.101 -0.993 0.000 1.166 -0.222 0.356 0.000 1.003 -0.313 -0.364 0.000 2.819 0.076 1.286 1.551 1.006 1.229 1.098 1.124 0.841 0.997 0.897 +1 1.862 -0.351 0.695 0.952 0.257 1.168 0.014 -1.066 2.173 1.166 0.597 1.478 1.107 0.437 -1.017 -0.364 0.000 1.007 0.316 -0.503 0.000 0.607 1.051 0.994 1.574 1.391 1.284 1.019 +0 0.458 0.953 -0.724 1.262 0.721 0.606 0.490 -1.157 2.173 0.958 0.898 -0.005 0.000 1.330 -0.950 1.586 0.000 0.528 1.709 -0.726 0.000 0.785 0.892 1.016 0.983 0.997 0.883 0.798 +0 1.030 0.716 0.225 1.720 0.895 1.390 -0.051 -0.632 0.000 0.430 -2.800 1.186 0.000 1.586 -1.360 -1.354 0.000 1.222 0.058 1.496 3.102 0.808 0.858 1.049 0.822 0.183 0.723 0.871 +1 1.278 2.224 0.845 0.491 -1.106 0.453 1.028 0.098 1.087 0.701 0.462 -1.077 2.215 0.786 0.692 1.222 0.000 0.862 0.876 -0.566 0.000 0.917 0.716 1.079 0.828 0.760 0.809 0.713 +0 0.318 0.709 -1.278 2.134 -0.401 0.990 -1.377 1.107 1.087 0.444 -1.660 1.711 0.000 0.708 -0.428 -0.913 0.000 0.449 0.684 0.884 3.102 0.829 0.965 0.995 0.673 0.960 1.808 1.512 +1 0.639 -0.651 1.540 1.018 1.589 0.403 0.046 -1.731 0.000 0.735 1.250 -0.300 2.215 1.597 0.246 -0.152 0.000 1.102 1.247 -1.640 0.000 0.849 0.971 0.980 1.064 0.916 1.333 1.029 +1 1.749 -1.253 -1.248 0.213 -0.697 1.072 -1.216 -0.634 0.000 1.700 -0.450 1.186 2.215 1.778 0.166 0.270 2.548 0.468 1.604 1.164 0.000 2.727 1.941 0.977 1.235 1.488 1.496 1.301 +1 0.711 1.387 -0.550 0.682 1.169 0.840 0.675 -0.416 0.000 0.670 0.423 1.652 2.215 1.162 -1.547 0.720 2.548 1.221 -1.052 -1.394 0.000 0.820 0.941 0.986 2.025 1.390 1.340 1.174 +1 2.221 -0.485 -0.529 1.659 -1.097 1.174 -0.588 1.232 2.173 0.516 0.873 0.734 2.215 0.480 0.239 0.155 0.000 0.727 -0.675 0.738 0.000 0.491 0.729 1.301 1.295 1.061 1.266 0.968 +1 0.646 1.537 0.597 0.585 -0.993 0.415 -0.661 -0.411 2.173 1.071 1.309 -1.148 0.000 1.105 -0.176 1.601 2.548 0.742 1.322 -0.218 0.000 0.874 1.170 0.984 0.830 0.844 0.915 0.784 +0 1.087 -0.371 -1.135 0.986 1.413 1.557 -0.756 0.358 1.087 0.805 -0.740 -0.141 0.000 1.329 0.961 -1.615 0.000 2.037 -0.953 -0.881 1.551 0.841 1.651 1.073 1.399 1.733 1.539 1.251 +0 1.526 0.015 0.377 1.189 0.370 2.183 -0.313 0.248 1.087 1.187 -1.013 -1.736 0.000 2.235 -0.533 -1.236 2.548 0.977 0.086 1.505 0.000 0.903 0.936 0.989 0.698 2.703 1.564 1.326 +0 0.480 1.213 -1.015 0.266 -1.263 1.325 -0.053 0.257 2.173 0.915 0.854 -1.458 2.215 1.408 2.276 -1.334 0.000 0.644 1.589 0.852 0.000 1.018 1.045 1.000 1.052 1.799 1.584 1.294 +1 0.477 1.135 0.031 0.819 1.296 1.230 0.902 -1.424 2.173 1.009 0.063 0.280 2.215 0.398 1.060 0.973 0.000 0.510 -1.712 -0.307 0.000 0.564 0.803 0.987 1.128 1.787 1.139 0.863 +1 0.847 -0.198 0.589 0.753 -1.645 0.926 -0.963 0.268 0.000 1.480 -0.879 -1.251 0.000 0.834 -0.740 0.969 2.548 0.775 -0.250 -0.407 1.551 2.437 1.362 1.000 0.594 0.602 0.897 0.798 +1 1.502 1.235 -0.315 0.909 0.387 0.979 1.103 -0.781 2.173 1.137 1.371 0.526 2.215 1.361 1.278 1.279 0.000 2.196 -1.677 1.656 0.000 0.478 3.022 0.985 0.742 1.453 2.434 2.159 +1 1.409 -1.383 0.839 1.938 1.094 1.174 -2.623 -1.023 0.000 1.580 -0.498 -0.586 2.215 0.838 -0.857 0.256 0.000 0.943 1.605 1.586 0.000 2.146 1.171 0.992 1.680 0.806 1.074 1.228 +1 1.165 -0.383 -0.555 0.299 1.551 0.830 -0.577 0.269 2.173 0.562 -0.921 1.593 2.215 0.724 1.309 -1.327 0.000 0.623 0.373 1.196 0.000 0.682 0.879 0.990 0.848 0.952 0.858 0.758 +1 1.842 0.028 0.346 0.722 -0.478 0.706 2.140 1.680 0.000 0.740 0.565 -1.321 1.107 1.094 0.686 0.842 0.000 0.618 -0.298 -1.207 0.000 1.021 0.830 1.079 0.693 0.373 0.674 0.654 +1 2.836 -0.194 1.465 0.223 0.455 1.387 -1.969 -0.347 0.000 1.212 0.981 0.516 2.215 0.613 0.204 -1.408 0.000 0.791 0.940 -1.302 3.102 2.610 2.268 0.994 1.309 0.883 2.074 1.746 +1 1.802 1.481 -0.873 0.561 -1.042 1.125 1.176 0.878 2.173 0.758 0.715 -1.725 1.107 0.378 1.608 0.596 0.000 1.163 -0.248 -0.123 0.000 0.999 0.927 0.976 1.490 1.019 1.076 1.006 +1 0.365 0.334 1.060 1.061 -0.253 0.770 1.179 -0.724 2.173 0.593 1.424 0.614 0.000 1.072 1.216 1.351 0.000 1.174 1.919 -0.404 0.000 0.865 0.902 0.985 0.852 1.124 0.894 0.770 +0 0.543 0.053 0.195 1.206 1.437 1.140 -1.940 0.242 0.000 1.352 -1.015 -1.244 2.215 1.086 -0.936 1.184 2.548 0.782 -1.885 -0.817 0.000 0.844 1.038 1.009 1.099 1.050 0.925 0.825 +0 0.460 -1.249 1.561 0.825 0.232 0.783 0.029 0.693 0.000 0.972 -0.197 -0.220 2.215 0.819 -0.243 1.455 0.000 1.181 -1.435 -1.644 0.000 0.937 0.928 0.986 0.709 0.747 0.762 0.679 +1 1.110 0.944 -0.582 1.145 0.092 1.085 0.364 1.453 1.087 0.762 -1.966 -0.135 0.000 0.877 -0.946 -1.470 2.548 0.739 -0.754 -0.896 0.000 0.845 0.801 0.986 1.404 1.099 1.213 1.420 +0 0.928 -0.271 -1.611 0.958 1.294 1.261 0.916 -0.415 2.173 1.222 1.148 -0.091 0.000 2.110 1.517 1.609 0.000 1.554 -0.114 0.474 3.102 0.465 1.552 0.986 1.358 1.346 1.321 1.150 +1 1.328 0.741 1.358 1.446 -0.177 0.695 -0.337 -0.899 2.173 0.632 -0.778 0.155 2.215 0.505 1.032 -0.979 0.000 0.468 1.421 -0.519 0.000 0.266 0.849 1.886 1.319 0.824 1.001 0.810 +0 1.292 -1.242 1.562 0.352 1.736 0.422 0.220 -0.691 2.173 0.764 -0.320 0.758 0.000 1.267 -0.812 0.169 2.548 0.742 -1.429 0.039 0.000 0.944 0.921 0.987 0.959 0.832 0.775 0.718 +0 0.722 0.769 0.402 0.755 -0.706 0.623 0.240 -1.417 2.173 0.492 0.178 1.470 0.000 0.640 -0.381 -0.142 2.548 0.724 1.265 0.620 0.000 0.854 0.786 0.985 0.758 0.765 0.696 0.639 +1 0.615 0.839 -0.767 1.261 0.258 2.247 -0.196 1.439 0.000 3.664 -0.834 -0.517 0.000 1.221 -0.394 0.854 0.000 1.195 0.374 1.203 3.102 0.904 0.634 0.987 0.546 0.108 0.483 0.522 +1 0.414 0.829 -0.924 0.812 -0.354 1.055 0.086 0.211 2.173 1.301 -0.861 1.657 0.000 0.761 -0.024 1.094 2.548 0.581 -1.138 1.554 0.000 0.968 0.819 0.995 1.558 0.800 1.134 1.454 +0 0.556 1.301 0.363 0.855 1.312 0.528 1.230 -0.839 2.173 0.672 2.038 -0.520 0.000 0.500 1.972 0.509 0.000 0.796 0.322 1.557 3.102 0.712 0.844 0.986 0.867 0.646 0.680 0.634 +1 0.951 -2.027 -0.086 0.406 1.271 0.854 -0.939 0.919 0.000 1.204 -1.137 -1.059 0.000 0.441 1.163 1.452 2.548 0.697 -1.798 1.102 0.000 0.982 0.963 0.987 1.150 0.294 0.834 0.784 +1 0.490 0.540 -0.263 0.761 0.846 1.462 -0.835 -1.371 2.173 0.709 -1.214 -0.249 0.000 0.898 -1.101 1.377 1.274 1.007 -1.279 0.585 0.000 0.768 0.783 0.994 2.085 0.919 1.477 1.341 +0 0.749 -0.572 -0.668 1.894 -1.532 1.614 -0.586 0.459 2.173 0.869 -0.400 -1.574 0.000 1.260 0.484 -0.837 2.548 0.665 -1.141 0.698 0.000 1.009 1.073 1.158 1.649 1.944 1.299 1.072 +0 0.821 -1.325 0.287 0.716 -1.569 0.349 -1.035 -1.267 0.000 0.647 -1.459 -0.655 0.000 0.567 -2.189 0.688 0.000 1.204 -0.708 -0.098 0.000 0.866 0.800 1.057 0.594 0.289 0.553 0.552 +1 0.509 1.327 -1.693 0.261 0.069 0.828 -0.338 1.494 0.000 0.961 0.013 0.129 0.000 1.116 -0.056 1.034 1.274 1.278 -1.651 -0.434 0.000 1.818 1.112 0.980 0.651 1.014 0.898 0.755 +0 0.438 1.392 0.296 2.501 0.006 0.946 -0.136 -1.315 2.173 0.829 -0.224 1.425 0.000 0.455 -0.551 -0.014 0.000 1.017 0.473 1.533 3.102 0.924 0.912 0.979 0.950 0.676 1.000 0.878 +0 0.874 0.845 1.136 2.139 -1.655 0.594 -0.141 0.162 2.173 1.010 -1.047 -0.470 2.215 0.570 0.475 -0.735 0.000 1.204 1.149 0.589 0.000 0.940 0.843 1.113 1.803 0.827 1.284 1.084 +0 1.209 -1.003 -1.401 0.251 1.298 1.398 -0.061 0.506 0.000 1.385 -0.492 -1.182 2.215 0.925 0.268 -0.001 0.000 1.727 -0.218 0.953 3.102 0.975 0.864 0.985 0.873 1.321 1.129 1.128 +1 0.964 0.281 0.602 1.107 -1.560 1.464 -0.313 0.539 0.000 0.962 -0.725 -1.175 1.107 1.006 -1.261 -1.132 0.000 1.348 0.285 -0.761 3.102 0.832 1.123 1.331 0.862 0.696 0.758 0.728 +1 0.431 -0.695 0.622 0.647 0.781 0.545 -1.487 -1.200 0.000 0.808 0.113 -0.897 2.215 0.409 -1.359 0.985 0.000 0.929 1.136 1.453 0.000 0.782 0.946 0.992 0.753 0.675 0.691 0.806 +0 1.288 -1.203 -0.728 0.266 -1.432 1.381 -0.259 1.235 2.173 1.336 -2.223 -0.227 0.000 0.576 -0.603 -1.654 0.000 0.698 -1.369 0.126 3.102 1.775 0.987 0.978 1.297 1.169 1.366 1.136 +0 0.533 0.083 -0.154 1.295 0.886 0.924 0.542 -1.511 0.000 0.939 -0.309 0.467 0.000 1.128 -1.054 -0.244 2.548 0.807 0.042 -0.752 0.000 0.898 0.694 0.986 1.080 1.538 0.959 0.891 +1 1.401 0.795 -1.709 1.392 1.551 0.663 0.490 0.272 0.000 0.993 1.206 -0.554 2.215 0.770 0.213 -0.077 2.548 0.840 0.401 1.160 0.000 0.817 0.987 0.997 0.965 0.623 0.927 0.839 +1 0.491 -2.116 0.564 1.785 1.368 0.724 -1.219 -0.406 2.173 0.558 -0.992 -0.975 0.000 0.330 1.140 -0.514 2.548 0.429 -1.989 1.166 0.000 0.761 0.876 0.986 1.403 0.967 1.438 1.093 +1 1.185 -0.447 -1.355 0.318 -0.041 1.166 -0.122 0.094 0.000 0.960 0.011 1.678 2.215 0.491 1.296 1.077 0.000 0.670 0.470 0.721 0.000 0.863 0.983 0.989 0.676 0.631 0.848 0.750 +1 0.915 0.172 -1.613 1.108 -1.053 0.831 0.679 -0.599 0.000 1.694 -0.129 0.975 2.215 1.026 0.419 0.329 1.274 0.447 -1.170 -0.981 0.000 1.202 0.990 0.979 1.230 0.878 1.006 0.958 +0 1.819 -0.873 1.669 0.093 -0.379 1.068 0.666 -0.428 2.173 0.519 -0.546 1.119 0.000 0.295 -0.108 -0.523 2.548 0.458 -1.402 1.003 0.000 0.381 1.307 0.978 0.591 0.291 0.901 0.792 +1 1.832 -0.448 1.628 1.037 -1.564 2.573 2.052 0.737 0.000 1.335 -0.605 -0.853 2.215 2.406 0.527 -0.645 0.000 1.077 0.223 -0.172 3.102 0.834 0.754 0.984 1.139 0.797 0.987 1.052 +0 1.228 0.097 -1.122 0.861 -0.232 0.760 0.013 1.359 2.173 0.747 -0.389 0.161 0.000 0.882 -0.196 1.009 2.548 0.720 -1.327 -0.615 0.000 0.862 1.101 1.024 0.851 0.338 0.726 0.745 +1 1.362 1.350 0.263 1.986 0.918 2.850 -0.546 -0.965 0.000 0.797 0.727 1.172 1.107 0.665 0.529 0.050 2.548 1.399 1.716 0.689 0.000 1.007 0.829 1.268 0.807 0.659 0.655 0.656 +0 0.880 0.372 -1.197 0.660 0.721 0.436 -0.144 -1.471 0.000 1.185 0.119 1.283 2.215 1.173 1.506 0.045 0.000 1.089 -0.241 -0.037 1.551 1.145 0.913 1.042 0.767 0.975 0.884 0.760 +1 0.425 0.146 1.031 2.097 0.343 1.395 0.643 0.035 0.000 1.223 0.811 -1.114 1.107 0.988 -0.154 1.702 2.548 1.034 -0.938 -1.730 0.000 0.529 0.948 0.995 1.090 0.902 0.998 0.879 +1 0.370 -2.328 0.629 2.233 1.262 0.836 -1.265 -0.280 1.087 0.515 -0.831 -1.678 0.000 1.718 -0.877 -0.933 2.548 0.820 -0.253 0.467 0.000 0.827 0.921 0.991 1.289 0.862 1.062 0.879 +0 1.577 -0.364 0.068 1.957 0.034 1.099 0.561 -1.465 0.000 1.063 0.618 1.579 0.000 1.334 -0.084 1.353 1.274 1.117 -0.842 -0.188 3.102 0.892 0.905 0.971 0.504 1.020 0.973 1.305 +1 0.474 1.571 0.085 0.867 -0.462 0.802 1.437 1.598 0.000 0.650 1.786 0.680 0.000 0.874 0.808 -1.272 0.000 1.209 0.411 1.249 3.102 0.900 0.755 0.982 0.738 0.989 0.976 0.868 +1 0.677 -1.667 1.654 1.042 -0.514 1.427 -1.426 -1.463 2.173 1.021 -1.133 0.884 2.215 1.524 -1.698 0.318 0.000 1.573 -2.043 0.102 0.000 0.959 0.972 1.080 0.929 1.537 1.073 0.899 +0 0.854 0.573 -0.283 1.384 0.507 0.892 -0.370 0.267 2.173 1.442 -0.608 -1.724 2.215 1.311 -0.062 -1.429 0.000 0.605 1.295 -1.278 0.000 0.897 1.009 0.987 0.776 1.641 1.105 1.023 +0 0.448 0.449 1.720 0.832 0.398 2.069 -0.561 -1.310 2.173 2.121 -2.766 0.091 0.000 2.703 -0.060 0.915 2.548 1.121 -0.479 -1.665 0.000 0.892 0.841 0.985 0.809 2.773 1.345 1.087 +1 0.766 0.628 1.011 0.421 -1.630 0.810 2.112 -0.804 0.000 1.212 0.335 1.562 0.000 1.088 0.045 0.357 2.548 1.099 -1.062 -0.199 3.102 1.054 0.978 0.995 0.826 0.718 0.795 0.698 +0 0.591 0.922 1.036 1.039 -0.689 0.704 0.259 1.020 2.173 0.559 0.678 -0.453 2.215 1.613 0.172 -1.292 0.000 0.820 0.587 0.616 0.000 1.297 0.893 1.085 0.886 0.919 0.744 0.695 +1 0.960 0.729 -0.016 0.790 -0.715 1.048 -0.770 1.200 2.173 0.411 -1.410 -1.195 0.000 0.619 0.585 1.522 2.548 1.331 -0.435 0.102 0.000 0.971 1.104 0.995 0.738 0.826 0.888 0.797 +1 1.233 1.278 0.160 1.310 -0.216 0.541 0.748 1.499 1.087 0.471 0.838 -1.040 0.000 0.799 0.266 -1.524 0.000 0.961 -1.564 1.309 0.000 0.482 0.538 0.997 1.156 0.769 0.905 0.814 +1 1.687 -0.032 0.254 0.751 0.044 0.888 0.762 -1.276 2.173 1.019 -0.157 1.560 2.215 0.753 -0.267 0.556 0.000 1.117 1.372 -1.381 0.000 0.868 0.991 0.976 1.275 1.027 1.049 0.883 +1 0.753 -0.278 0.418 0.779 -1.083 1.607 0.057 0.934 0.000 1.101 -0.017 -1.124 2.215 2.497 -0.851 -0.356 0.000 1.357 -1.332 -0.471 0.000 0.874 0.955 1.036 0.754 0.850 0.966 0.843 +1 0.641 0.894 1.076 1.262 -1.306 0.849 0.481 0.724 0.000 0.928 0.418 0.204 0.000 1.250 0.869 -1.227 2.548 0.445 -0.611 -0.535 0.000 0.854 0.900 1.046 0.628 0.663 0.846 0.796 +1 0.939 0.243 0.636 0.797 -0.596 0.995 0.316 -0.478 2.173 0.844 0.985 -1.236 2.215 1.759 -0.144 1.364 0.000 1.026 -0.197 0.530 0.000 1.014 1.224 1.073 0.790 0.976 1.040 0.877 +0 0.523 0.402 0.055 1.156 -0.851 0.590 0.575 0.939 2.173 0.735 -0.073 -1.619 2.215 0.776 2.705 -0.308 0.000 0.406 -1.968 1.296 0.000 0.618 0.879 0.987 0.959 0.788 0.896 0.814 +0 0.962 -0.803 -0.057 0.509 0.990 1.105 -1.112 -0.401 1.087 0.932 -0.275 1.454 2.215 1.082 -0.801 1.220 0.000 0.999 0.929 -1.400 0.000 1.567 0.993 0.989 0.910 1.620 1.163 0.957 +1 0.431 0.736 0.022 0.675 -0.068 0.711 1.214 0.405 2.173 1.391 1.022 -1.371 1.107 0.550 2.699 -0.015 0.000 0.931 1.963 1.681 0.000 0.825 0.964 0.996 1.167 1.468 0.962 0.851 +1 1.154 0.646 -0.386 0.780 0.370 0.762 -1.064 1.006 0.000 2.105 -0.086 -1.260 2.215 0.636 1.311 0.866 0.000 0.905 1.365 0.034 0.000 0.695 1.188 0.984 0.673 1.076 1.009 0.834 +1 0.784 -1.019 1.146 1.009 0.261 1.210 0.596 -1.057 2.173 0.712 -1.112 0.682 0.000 0.707 -0.204 1.380 0.000 0.669 1.216 0.743 3.102 0.834 0.840 0.986 1.903 1.040 1.350 1.052 +1 0.586 -1.396 0.349 0.313 1.579 2.581 0.616 -1.511 0.000 3.263 -0.619 0.165 2.215 0.852 -0.910 0.781 0.000 1.613 -0.005 0.294 3.102 0.696 0.759 0.990 1.060 0.707 0.788 0.721 +0 0.530 -1.176 1.471 0.725 0.255 0.395 -1.423 -0.125 0.000 0.696 -0.091 0.191 0.000 1.521 -1.182 -1.532 2.548 0.516 0.697 1.417 3.102 0.864 0.811 0.989 0.916 0.924 0.818 0.714 +0 1.729 1.223 1.279 0.698 0.605 0.715 2.818 -0.420 0.000 0.698 0.166 -1.076 2.215 0.953 -1.072 0.501 1.274 1.008 -0.207 1.404 0.000 1.124 1.006 0.987 1.388 1.063 1.043 1.051 +1 1.504 -1.419 -1.247 0.639 0.984 0.845 -0.824 0.557 2.173 0.449 -1.895 -0.400 0.000 0.401 -0.856 1.434 0.000 0.458 -2.364 -1.005 0.000 0.734 0.800 1.229 1.106 0.792 0.878 0.732 +0 1.809 -0.711 -0.993 1.336 -1.384 0.668 1.511 1.026 0.000 1.034 0.392 0.924 0.000 1.199 -0.140 -0.389 2.548 1.444 0.854 0.224 1.551 1.056 0.897 0.975 0.869 0.820 0.971 1.193 +0 0.527 0.226 1.589 1.443 0.747 1.136 0.720 -0.819 2.173 0.989 0.653 -1.296 2.215 1.059 1.683 0.856 0.000 0.850 0.140 -0.312 0.000 0.837 0.912 0.991 1.247 0.651 0.915 0.829 +0 1.483 -0.707 1.307 0.474 1.044 0.759 -1.456 -0.409 0.000 0.835 -0.601 -0.993 2.215 1.169 0.338 1.008 0.000 1.368 -1.634 -0.959 0.000 0.835 0.843 0.976 0.787 0.697 0.721 0.862 +1 0.374 0.785 -0.924 0.676 -0.936 1.064 -0.433 1.499 0.000 1.564 -0.472 0.292 2.215 1.415 -0.171 -0.729 1.274 1.423 -0.505 0.981 0.000 0.863 1.325 0.994 1.031 1.280 1.106 0.946 +1 0.708 0.893 -0.011 0.756 1.357 1.118 2.309 -0.994 0.000 1.163 -0.176 0.835 2.215 0.606 0.411 -0.326 0.000 0.933 -0.299 1.515 3.102 0.483 0.690 0.987 0.756 0.547 0.691 0.610 +0 0.580 -0.226 -1.304 0.701 -1.535 1.785 -1.710 1.491 0.000 1.356 -1.185 0.061 0.000 1.144 -2.057 -0.438 0.000 1.187 -0.881 -0.521 1.551 1.331 0.867 0.977 0.875 0.674 0.793 1.195 +1 0.604 1.936 0.798 1.423 1.163 3.059 -1.635 -0.905 0.000 1.436 0.200 1.149 0.000 1.633 1.355 0.266 2.548 1.285 1.177 0.693 3.102 2.213 1.298 0.978 0.900 0.418 0.964 0.823 +1 1.030 -0.164 1.538 0.990 0.337 0.661 0.549 -0.920 2.173 1.047 -0.100 -1.464 0.000 1.108 -0.591 0.570 2.548 0.783 0.529 0.118 0.000 1.249 0.898 1.235 0.739 1.242 0.826 0.777 +0 0.443 -0.559 -0.051 0.778 0.730 1.317 0.922 -0.553 1.087 0.862 0.112 1.372 0.000 0.983 0.789 0.929 2.548 1.195 0.605 -1.470 0.000 0.834 0.738 0.998 1.040 1.379 0.981 0.846 +1 1.280 0.126 1.440 0.604 -0.592 1.085 0.657 0.015 0.000 0.798 0.709 -0.382 2.215 1.492 1.010 1.235 0.000 1.468 0.537 -0.948 3.102 0.875 1.062 1.177 0.785 0.478 0.798 0.777 +0 1.185 0.703 0.414 0.733 0.877 0.796 -0.357 1.575 2.173 1.131 0.572 -1.007 0.000 1.365 0.102 -0.495 2.548 0.526 -0.393 0.661 0.000 1.147 1.088 0.989 0.920 1.280 0.882 0.837 +0 1.634 -0.262 1.191 0.357 0.689 1.073 1.440 -0.750 0.000 0.762 -0.357 -1.108 2.215 1.235 0.623 0.596 0.000 2.182 -0.614 0.568 1.551 1.095 1.115 0.997 0.852 1.182 0.966 0.845 +0 0.632 0.571 0.179 1.788 1.099 0.626 0.052 -0.082 0.000 0.926 0.015 -0.945 0.000 0.779 -0.735 -1.454 2.548 0.568 0.820 1.422 3.102 1.135 0.932 1.086 0.555 0.583 0.650 0.768 +0 0.758 0.096 1.217 1.324 -1.089 0.729 -1.443 0.866 0.000 1.081 0.165 -0.561 1.107 0.334 -1.128 -0.639 0.000 0.873 -0.159 0.345 3.102 0.872 0.701 1.214 0.881 0.659 0.811 0.827 +1 0.619 0.775 -0.830 1.351 1.384 0.750 0.793 0.252 1.087 0.709 1.523 0.327 0.000 0.698 -0.904 -0.917 0.000 0.595 0.787 -1.588 3.102 0.925 0.704 1.155 0.555 0.706 0.638 0.600 +0 1.284 1.278 1.390 0.774 -1.665 0.546 0.939 -0.692 2.173 0.545 1.829 -0.243 0.000 0.754 1.805 0.910 0.000 0.696 -1.639 0.883 0.000 0.918 0.823 0.981 0.969 0.648 0.824 0.713 +0 0.949 -0.383 -1.405 1.087 -0.543 0.382 0.446 0.917 1.087 0.652 -0.957 0.696 2.215 0.402 0.779 -0.006 0.000 0.484 -2.257 -0.822 0.000 0.591 1.013 0.987 0.838 0.591 0.694 0.719 +1 0.808 2.013 0.922 0.063 -0.171 1.653 -0.099 -1.181 0.000 1.198 -0.184 0.720 2.215 0.653 0.762 -0.182 0.000 1.656 -0.159 0.073 3.102 0.879 1.185 0.987 1.034 0.700 0.966 1.271 +0 0.412 -1.222 -0.002 0.481 0.737 0.599 -2.505 -1.188 0.000 0.600 -0.024 1.415 0.000 1.222 0.447 -0.086 2.548 1.663 0.179 -1.426 3.102 2.239 1.798 0.983 0.665 1.029 1.433 1.167 +0 1.664 -0.185 1.530 0.582 -1.372 0.535 -0.719 0.248 2.173 0.919 0.686 -0.651 2.215 0.366 2.151 -0.170 0.000 0.471 -0.924 -0.395 0.000 1.262 0.879 0.983 1.013 1.098 0.894 0.847 +1 2.207 -0.601 1.026 1.519 0.635 1.247 -1.247 -1.461 2.173 1.555 -0.706 -0.510 0.000 0.809 0.429 0.199 2.548 0.757 -2.343 -0.943 0.000 1.838 1.602 0.992 1.703 1.747 1.300 1.380 +1 0.640 -1.416 -1.310 0.130 -1.191 0.837 -0.160 0.790 2.173 1.247 -0.978 -0.920 0.000 0.558 -0.867 0.272 2.548 0.714 0.590 -0.115 0.000 1.152 0.834 0.977 0.890 0.516 0.843 0.738 +0 0.999 0.071 0.489 0.356 1.003 1.454 2.324 0.476 0.000 1.706 -0.525 -1.200 2.215 0.909 -0.658 -1.631 0.000 1.170 -1.370 -0.955 0.000 0.848 0.784 0.994 0.825 1.211 0.925 0.799 +0 1.391 -0.717 -0.436 0.578 -1.218 0.375 -0.769 0.347 2.173 0.369 -2.243 -1.240 0.000 1.078 -0.776 -1.637 2.548 0.667 0.510 0.961 0.000 0.847 0.887 0.984 0.693 0.774 0.660 0.754 +1 0.734 0.330 1.378 0.841 0.744 0.516 -1.005 1.306 0.000 0.764 -0.213 -1.606 0.000 1.799 0.100 -0.594 2.548 1.619 -0.703 -0.077 3.102 0.854 1.061 0.992 1.208 0.872 1.043 1.028 +0 1.327 0.496 0.449 1.389 0.374 0.970 1.084 -1.176 0.000 0.943 0.053 -0.115 2.215 1.811 0.894 -1.591 0.000 1.201 0.348 1.353 3.102 0.880 0.909 0.979 0.917 0.948 0.977 1.060 +0 0.496 1.440 -0.982 0.670 -1.459 0.652 0.440 0.833 2.173 0.680 2.504 -0.101 0.000 0.547 -0.859 1.087 0.000 0.965 -0.359 -1.294 0.000 0.827 0.760 0.983 0.457 0.481 0.539 0.542 +1 1.464 -0.523 -0.775 1.184 -1.364 0.938 -0.750 0.403 2.173 0.980 -0.647 1.192 2.215 0.943 -0.250 -1.390 0.000 0.443 2.332 0.519 0.000 0.954 0.901 0.987 1.283 0.922 1.003 0.870 +1 0.784 0.895 0.018 2.781 -0.299 1.149 0.310 1.392 0.000 1.562 1.002 -1.213 2.215 1.376 1.474 0.758 0.000 0.944 1.251 -0.519 0.000 1.150 0.702 0.990 1.481 0.933 1.000 1.010 +0 0.400 1.064 -1.597 0.865 0.295 0.924 0.610 -0.532 2.173 1.465 -0.479 1.143 0.000 0.603 -2.263 -0.459 0.000 1.109 0.343 -1.541 3.102 2.211 1.586 0.986 0.823 0.852 1.413 1.123 +0 1.156 -0.041 -0.575 1.367 -1.061 0.594 -1.300 0.424 0.000 0.706 0.319 0.712 2.215 0.841 0.563 1.313 2.548 0.574 -1.266 -1.659 0.000 0.855 0.939 0.990 0.897 0.439 0.756 0.903 +1 0.857 -0.640 -1.063 0.658 1.466 0.967 -0.748 -0.140 0.000 0.785 0.235 1.507 2.215 0.811 -1.038 0.697 1.274 0.722 -1.493 -0.287 0.000 0.712 0.740 0.991 0.863 0.850 0.844 0.786 +0 0.671 -0.350 -0.605 1.624 1.560 0.962 -1.645 -0.537 0.000 0.719 1.343 1.004 1.107 1.373 0.370 1.280 2.548 1.003 1.205 -0.970 0.000 0.745 0.831 1.343 1.191 0.596 0.836 0.777 +1 1.444 1.002 -0.881 1.068 -0.535 0.547 1.616 1.605 0.000 0.372 0.793 0.537 0.000 1.499 0.635 1.216 1.274 0.911 1.412 0.195 3.102 0.883 0.717 0.983 0.835 0.849 0.874 0.792 +0 2.015 -0.132 0.491 0.673 0.867 0.875 -0.540 -1.119 2.173 0.613 -1.157 -1.674 0.000 0.508 1.780 0.854 0.000 1.507 -0.672 -0.542 3.102 0.842 0.768 0.986 1.079 0.625 1.016 0.853 +0 0.305 2.263 -1.680 2.012 1.192 1.433 -0.890 -0.279 2.173 0.745 -0.158 -1.295 0.000 0.639 -1.364 1.107 0.000 1.069 -0.170 0.991 3.102 1.161 0.818 0.987 2.445 1.279 1.550 1.311 +0 0.552 0.146 -1.597 1.442 0.270 1.141 1.529 0.445 0.000 0.552 1.310 -0.793 0.000 1.880 -0.881 -1.142 2.548 2.129 0.337 1.488 3.102 1.243 1.119 1.228 1.274 1.556 1.055 0.958 +1 0.519 -2.270 -1.371 0.972 1.709 0.854 -1.342 0.229 0.000 0.898 -0.981 -0.543 1.107 1.500 -1.198 1.652 0.000 0.628 -0.597 1.180 0.000 0.919 0.893 0.992 0.596 0.211 0.561 0.635 +0 0.844 1.450 0.161 0.640 -1.186 0.890 0.781 1.731 2.173 0.398 0.200 0.690 0.000 1.047 0.139 0.185 2.548 0.758 1.315 -0.858 0.000 0.882 0.850 0.987 0.755 1.246 0.787 0.672 +0 1.516 0.080 -0.274 0.688 -0.223 1.002 -0.886 -1.578 2.173 1.157 -0.450 0.085 2.215 1.109 -1.480 1.114 0.000 1.555 -1.178 1.591 0.000 0.623 0.842 0.972 0.624 1.618 1.016 1.036 +0 1.426 -1.334 0.582 0.567 -1.037 0.470 -0.226 -0.062 2.173 0.820 -0.749 -1.443 0.000 0.519 0.469 1.277 0.000 0.666 0.186 -1.210 1.551 0.938 0.896 1.238 0.860 0.527 0.663 0.689 +1 0.327 -1.821 1.187 0.640 -1.144 1.036 0.407 -0.127 0.000 1.164 -0.523 -1.639 0.000 0.831 -0.994 0.886 2.548 0.693 -0.289 0.447 1.551 2.555 1.418 0.994 0.686 0.315 0.969 0.818 +1 0.882 0.082 -0.390 1.029 -1.526 1.350 -0.396 -0.136 2.173 1.812 0.335 1.632 0.000 0.728 -2.378 0.491 0.000 0.660 -1.703 0.758 0.000 0.286 0.616 1.126 1.094 0.924 0.936 0.989 +0 0.510 0.903 -0.820 1.152 1.334 1.228 1.279 0.212 2.173 0.848 1.816 -1.210 0.000 0.628 2.430 1.549 0.000 1.455 0.604 1.697 0.000 0.823 0.716 0.990 1.068 0.174 0.845 0.788 +1 1.438 1.638 -0.800 1.245 -0.589 1.279 1.648 1.260 2.173 1.089 1.895 0.760 0.000 0.705 0.090 1.140 0.000 2.331 0.353 -0.272 0.000 1.371 0.991 0.997 1.602 2.166 1.693 1.497 +0 0.721 -0.194 1.034 0.816 -0.549 0.863 0.888 0.795 2.173 1.653 0.100 -1.482 0.000 1.283 0.339 0.200 2.548 1.677 1.323 -0.341 0.000 0.979 1.018 1.052 0.953 0.756 0.880 0.841 +0 1.233 -1.140 -0.060 0.296 -1.683 0.673 -1.851 1.298 0.000 0.885 -1.206 0.584 2.215 1.123 -0.953 -1.129 2.548 0.861 -0.668 1.671 0.000 0.960 0.935 0.982 0.680 1.064 0.712 0.665 +1 0.945 -1.717 -0.925 0.415 0.363 0.551 0.516 -0.277 2.173 0.503 0.548 1.313 2.215 0.419 -1.147 0.789 0.000 0.410 1.085 0.373 0.000 0.777 0.710 0.987 0.941 0.767 0.814 0.693 +1 1.086 0.952 0.313 0.339 -0.992 1.345 0.805 -0.767 0.000 1.690 0.420 0.584 2.215 1.513 0.315 -1.379 0.000 1.390 1.043 1.242 3.102 0.966 1.010 0.980 0.749 0.969 0.704 0.655 +1 1.263 -0.030 1.347 0.977 -1.504 0.987 1.434 -0.683 2.173 0.549 0.702 -0.176 0.000 0.888 -1.697 0.838 0.000 0.443 0.696 0.799 3.102 0.720 0.764 0.986 0.621 0.714 1.040 0.890 +1 1.048 1.027 1.150 0.176 -1.648 0.830 1.132 0.484 0.000 1.429 0.723 1.669 0.000 1.423 0.627 -0.663 2.548 0.873 -0.414 -0.542 0.000 0.893 0.965 0.990 0.927 0.837 0.768 0.723 +0 0.865 0.191 0.409 2.226 0.680 0.858 -0.144 -1.088 0.000 0.532 1.085 -1.274 2.215 0.278 -0.648 -0.915 0.000 0.495 0.964 1.638 0.000 0.937 0.687 1.005 0.605 0.427 0.795 0.845 +1 0.891 -0.318 0.746 1.014 1.541 1.920 -0.895 -1.278 0.000 0.895 0.242 0.592 0.000 0.408 -1.421 0.220 0.000 0.439 -0.519 -1.584 3.102 1.005 1.093 0.992 0.509 0.226 0.644 0.609 +0 0.787 -0.598 -1.676 0.618 -0.302 0.414 -1.767 1.693 0.000 0.995 -0.362 -0.136 2.215 0.679 0.259 1.518 2.548 0.628 -1.355 0.192 0.000 0.763 0.986 0.988 0.604 0.918 0.752 0.677 +1 0.831 -1.128 0.391 1.009 0.398 0.759 -0.347 1.662 1.087 0.818 -1.414 -1.203 0.000 0.275 -1.142 0.028 0.000 1.396 -0.568 -1.004 3.102 0.656 0.843 0.996 1.093 0.755 1.042 0.844 +1 1.690 0.137 -0.577 0.918 0.663 0.622 -0.609 -1.541 2.173 0.954 -1.106 1.168 2.215 0.531 -0.590 -0.688 0.000 0.909 0.353 0.511 0.000 0.802 0.907 1.551 1.130 0.789 0.989 0.812 +1 0.634 0.806 -1.685 1.727 0.746 1.481 -0.587 -0.646 2.173 0.752 -0.666 -1.158 2.215 1.896 -0.514 0.737 0.000 0.834 -0.784 -1.607 0.000 1.218 1.088 1.180 1.889 0.695 1.303 1.205 +0 1.231 -1.796 1.718 0.770 0.158 0.591 -0.226 1.478 2.173 0.657 -1.048 -0.151 0.000 0.548 0.357 -0.227 1.274 0.529 -0.125 -1.183 0.000 0.718 0.834 1.330 1.107 0.743 0.891 0.758 +1 2.556 -0.630 -1.508 0.222 1.467 0.588 -0.500 -0.295 2.173 0.927 -1.198 0.556 2.215 0.651 -0.583 0.545 0.000 0.463 0.114 -0.294 0.000 0.481 0.514 0.978 1.037 0.855 0.926 0.745 +0 0.747 -1.255 0.086 0.519 -0.710 0.918 -0.143 -1.143 2.173 1.092 -0.950 1.247 0.000 1.109 0.130 1.350 2.548 0.587 -0.065 -0.228 0.000 1.122 0.854 0.981 1.369 0.997 1.142 0.988 +0 1.276 -1.162 1.668 1.523 1.615 1.978 -1.024 -1.610 2.173 2.155 0.757 0.309 0.000 1.364 1.332 -0.060 2.548 0.773 1.776 -0.576 0.000 1.221 1.008 1.003 0.847 3.843 1.978 1.651 +0 0.932 -0.139 -1.716 0.365 0.206 1.416 0.108 0.650 2.173 0.882 -1.330 -0.637 0.000 0.992 -0.604 -0.924 0.000 1.006 0.468 -1.280 3.102 0.633 0.898 0.988 0.969 1.278 1.177 0.974 +1 0.982 1.102 -0.291 0.418 0.391 0.690 -0.278 -0.732 1.087 0.634 0.148 0.273 1.107 0.902 -0.237 -1.505 0.000 0.868 0.896 -1.624 0.000 0.709 0.850 0.981 0.789 0.795 0.673 0.659 +0 0.770 -0.170 1.456 0.616 -1.556 0.380 -2.023 0.737 0.000 0.514 -0.697 -0.511 2.215 0.764 2.563 -0.793 0.000 0.656 1.127 -0.290 0.000 0.711 0.862 0.980 0.713 0.541 0.909 1.168 +0 0.994 0.363 0.982 0.555 -0.116 2.053 -0.014 -0.940 2.173 1.648 -0.863 1.175 2.215 0.632 0.696 0.351 0.000 0.763 -1.619 0.341 0.000 1.384 1.270 0.991 1.367 2.832 1.551 1.231 +0 0.559 -0.065 -0.580 0.948 0.871 0.981 -0.053 -1.239 1.087 0.693 1.084 0.256 0.000 0.751 1.922 0.249 0.000 1.140 0.563 1.677 0.000 1.139 1.233 0.988 0.682 0.996 0.790 0.744 +1 0.876 0.437 1.186 0.778 -0.270 0.979 0.375 -1.254 1.087 1.213 -0.456 0.749 2.215 0.790 -0.364 -0.714 0.000 0.958 -0.823 0.136 0.000 0.725 1.052 1.105 0.887 1.710 0.959 0.845 +1 0.587 -1.062 1.328 0.401 -0.768 1.534 -1.172 0.523 0.000 1.188 -1.205 -0.436 0.000 1.274 -0.784 -0.944 0.000 1.103 0.299 1.672 3.102 0.908 0.892 0.991 0.600 0.637 0.804 0.726 +0 2.581 -0.894 1.179 0.958 0.766 2.637 -0.904 -0.347 0.000 1.570 -0.262 1.319 2.215 1.804 -1.188 -0.801 2.548 0.669 1.041 1.348 0.000 1.230 0.954 0.984 1.506 1.944 1.237 1.084 +0 0.404 0.447 -1.001 0.165 -0.160 0.494 -0.952 -1.079 0.000 0.674 0.248 0.332 2.215 0.977 0.879 0.980 2.548 0.421 1.543 0.429 0.000 1.500 1.050 0.983 1.009 0.569 0.779 0.764 +0 2.327 -0.841 0.010 1.353 0.114 0.955 -1.533 0.427 2.173 1.900 -0.234 -1.267 1.107 1.884 0.034 1.462 0.000 1.089 -2.412 -0.870 0.000 0.789 1.160 0.988 1.783 2.419 1.491 1.420 +0 1.898 -1.418 0.112 0.963 -0.251 0.783 0.576 1.584 0.000 0.847 0.518 -1.427 0.000 0.855 0.085 -0.526 2.548 0.859 -0.371 1.180 3.102 0.717 0.921 0.980 0.852 0.679 0.716 1.057 +1 0.866 0.480 0.888 0.755 -1.527 1.104 -0.944 -0.583 0.000 1.023 -0.667 1.277 0.000 0.863 0.059 0.673 0.000 2.828 0.154 -0.773 1.551 0.929 0.675 0.984 1.043 1.137 0.938 0.838 +0 0.831 0.491 1.544 1.572 -1.131 0.965 -0.138 -0.308 1.087 1.114 -0.378 1.292 2.215 0.451 -1.666 -0.625 0.000 0.990 -0.793 0.454 0.000 0.695 0.899 1.059 1.146 1.525 1.031 0.949 +1 0.924 -1.055 0.200 0.279 -0.219 0.807 1.016 -1.446 2.173 0.321 -0.385 1.639 2.215 0.479 0.903 0.019 0.000 0.446 -0.276 0.729 0.000 0.474 0.782 0.993 0.623 0.636 0.797 0.643 +1 1.027 -0.287 -0.757 1.149 0.000 0.855 -0.690 1.268 2.173 0.561 -0.472 -0.177 0.000 0.686 -0.878 -1.608 2.548 0.502 0.619 -1.697 0.000 0.814 0.916 0.987 0.764 0.514 0.774 0.678 +1 0.684 -0.117 1.393 1.871 1.332 1.344 0.339 0.212 2.173 0.509 0.286 -0.876 2.215 0.725 2.343 -1.433 0.000 1.437 0.826 -0.611 0.000 0.980 0.961 0.983 1.803 1.012 1.240 1.483 +0 0.276 -2.108 1.689 1.908 0.589 1.263 1.364 -1.162 0.000 0.570 0.675 -0.769 0.000 1.007 1.524 1.528 0.000 2.206 -0.469 0.276 3.102 0.887 0.706 0.995 0.769 0.596 1.152 1.440 +0 1.653 0.876 1.657 0.545 -0.732 0.875 0.483 0.253 2.173 0.442 -1.485 -0.012 0.000 0.559 -2.070 -0.967 0.000 0.637 -0.634 -0.408 0.000 0.795 1.154 1.099 0.789 0.851 0.820 0.862 +0 1.319 1.010 -1.498 0.022 -0.765 0.331 0.130 -1.672 0.000 0.311 2.369 1.327 0.000 1.603 0.251 -0.775 0.000 0.927 -2.127 0.842 0.000 0.953 0.872 0.888 0.688 0.252 0.600 0.592 +0 0.650 -1.336 -0.668 0.554 1.674 1.244 -0.634 1.459 2.173 1.537 0.093 0.034 0.000 1.509 -0.512 -0.145 2.548 1.196 0.984 -1.334 0.000 0.688 1.151 0.980 1.238 1.695 1.122 1.159 +0 0.445 1.216 -0.759 1.128 1.311 0.678 -0.164 -1.272 0.000 0.719 0.184 1.449 2.215 0.809 1.891 -0.058 0.000 1.506 -0.422 0.086 3.102 0.984 1.050 0.985 1.289 0.945 0.927 0.864 +1 0.662 2.345 -1.062 0.367 -0.008 0.447 -0.114 0.125 2.173 0.452 1.458 1.188 0.000 0.652 1.251 -0.585 0.000 0.751 -0.108 1.419 1.551 0.834 0.846 0.980 0.789 0.563 0.686 0.620 +1 1.226 0.005 0.918 1.842 1.474 1.348 0.175 -0.868 1.087 1.100 0.024 0.131 2.215 0.432 -2.592 0.926 0.000 0.386 2.395 -0.400 0.000 3.945 2.359 1.002 1.564 1.411 1.696 1.493 +1 0.639 -0.021 1.638 1.143 -0.909 0.893 -0.104 -0.161 0.000 0.790 1.891 1.519 0.000 1.819 -0.651 1.266 2.548 0.539 0.432 -1.380 3.102 2.806 1.504 0.989 1.142 0.719 1.344 1.096 +1 0.890 -0.229 -1.301 0.243 0.873 1.570 1.334 -1.696 0.000 1.453 -0.585 0.022 2.215 1.431 -0.776 0.237 0.000 1.853 0.017 0.560 0.000 0.959 0.923 0.990 0.968 0.726 0.669 0.722 +0 0.857 -1.210 1.098 0.632 1.665 1.762 -0.425 0.306 0.000 1.646 -1.407 -1.702 1.107 2.692 -1.539 -0.998 0.000 0.893 -0.378 0.813 0.000 0.846 0.937 0.980 0.801 1.451 1.337 1.217 +0 2.480 -1.516 1.025 0.154 -1.418 0.528 -0.365 -0.686 0.000 0.860 -2.363 -0.748 0.000 1.186 -1.054 -0.166 1.274 0.756 0.205 1.623 1.551 1.096 0.952 0.988 0.871 0.909 0.813 0.803 +1 1.722 1.501 0.186 0.724 1.328 2.403 -1.116 -1.203 2.173 1.845 0.100 0.646 2.215 1.165 0.463 0.272 0.000 0.652 0.615 1.565 0.000 0.890 0.828 1.326 3.880 3.691 2.756 2.029 +0 1.408 -0.358 -0.896 0.411 0.070 0.876 -0.173 -1.564 2.173 0.501 -1.207 0.045 0.000 1.220 0.233 1.024 1.274 0.710 0.347 0.232 0.000 0.732 1.056 0.986 1.011 0.972 0.824 0.759 +0 0.668 0.054 1.590 2.750 -0.462 0.704 -0.261 0.705 2.173 0.940 -0.928 1.627 2.215 0.858 0.327 1.314 0.000 0.584 -1.352 -0.628 0.000 1.190 0.912 1.805 1.459 0.979 1.116 0.948 +0 1.546 -0.990 0.609 2.915 0.994 2.469 0.197 -0.758 2.173 1.335 -2.855 0.137 0.000 1.791 -0.426 -1.180 0.000 1.369 -0.887 1.519 1.551 1.806 1.179 1.002 3.168 2.168 2.052 1.711 +1 1.218 -0.456 -0.737 1.077 -0.032 0.375 -0.507 1.439 0.000 0.647 0.114 0.529 2.215 0.736 1.074 1.685 0.000 0.380 1.164 -0.900 0.000 0.922 0.806 0.988 0.615 0.253 0.541 0.663 +0 1.164 1.141 -0.828 0.412 0.413 0.588 0.430 1.616 0.000 0.702 -0.009 0.145 2.215 0.487 0.778 -1.122 2.548 0.505 -1.660 0.976 0.000 1.356 0.933 0.990 0.739 0.630 0.711 0.724 +1 0.649 -1.279 -0.166 1.104 0.824 0.492 -1.061 -1.429 0.000 0.735 0.368 -0.016 2.215 0.325 0.756 0.108 0.000 0.742 -0.163 -1.123 0.000 1.061 0.936 0.989 1.062 0.689 0.925 0.824 +0 1.933 -0.233 0.941 1.352 0.322 1.189 0.335 -1.385 2.173 0.827 -0.627 -0.966 0.000 0.443 0.175 0.017 0.000 0.649 -1.224 -0.248 3.102 0.823 0.977 1.185 0.863 1.246 1.126 0.947 +0 0.486 -0.556 -1.281 1.151 0.989 0.456 -1.836 -0.807 0.000 0.951 -0.815 -0.669 1.107 1.447 -0.916 0.236 0.000 0.665 -0.647 1.298 1.551 1.341 0.991 0.988 0.486 0.704 0.681 0.679 +1 0.517 1.424 0.029 1.372 1.459 1.410 -0.184 -1.236 0.000 0.920 0.882 0.499 1.107 0.759 0.938 -0.423 2.548 0.736 -1.223 0.378 0.000 1.879 1.428 1.120 0.820 0.658 1.161 1.149 +1 0.704 0.521 1.507 0.456 0.387 0.529 -0.713 -0.524 0.000 0.855 0.411 -0.527 0.000 1.527 0.533 0.925 2.548 0.934 1.142 -1.407 3.102 0.861 0.993 0.988 0.712 0.869 0.915 0.837 +0 0.626 -0.988 0.356 0.241 -0.292 1.501 1.921 1.408 0.000 1.527 -1.003 -0.640 0.000 1.867 -0.399 0.106 2.548 0.677 -1.320 -1.355 0.000 0.878 0.883 0.993 0.696 0.791 0.816 0.747 +0 0.625 -1.759 1.054 1.130 1.647 0.480 0.533 -1.488 0.000 0.742 0.255 -0.584 0.000 1.280 -1.345 -0.005 0.000 1.653 -0.200 1.612 3.102 0.939 0.884 0.981 0.950 1.138 0.846 0.842 +1 0.962 -0.027 -0.561 0.969 0.974 1.095 1.146 -1.609 2.173 1.239 0.000 0.068 1.107 0.596 2.083 0.939 0.000 0.427 1.124 1.407 0.000 0.611 0.979 1.314 0.876 2.006 1.134 1.008 +1 0.571 -0.230 0.124 0.529 1.583 0.937 -0.056 -0.909 0.000 0.896 -2.173 -0.081 0.000 2.361 0.211 0.875 2.548 0.591 -0.687 1.500 3.102 0.921 0.745 0.988 1.071 0.694 0.960 0.874 +0 1.253 0.502 -0.029 0.513 0.928 0.423 2.152 -0.648 0.000 0.774 -0.592 -1.672 2.215 0.286 -0.133 0.194 1.274 0.989 -1.181 -1.352 0.000 0.878 0.595 0.985 0.493 0.511 0.666 0.670 +1 1.961 -1.125 -0.280 1.254 0.309 1.482 -1.159 1.090 2.173 1.201 -0.451 -1.233 2.215 1.497 0.484 -1.559 0.000 0.899 2.377 0.101 0.000 2.241 2.175 1.102 1.535 1.841 2.342 2.171 +0 0.413 0.596 1.637 1.278 -0.669 0.937 1.609 -1.586 0.000 0.979 0.530 0.276 2.215 0.685 0.592 -0.223 2.548 0.588 0.285 1.253 0.000 0.956 0.824 0.987 0.611 0.381 0.593 0.650 +1 0.761 0.544 0.936 1.476 -0.278 1.242 0.661 1.705 0.000 1.137 -0.087 -0.930 2.215 1.414 -1.298 -0.075 2.548 1.734 -0.685 0.776 0.000 0.906 0.837 1.303 1.409 1.348 1.089 0.931 +0 0.605 -0.872 0.083 0.225 1.670 1.212 -0.341 -1.729 2.173 1.483 0.907 -0.407 1.107 0.962 -0.024 0.738 0.000 0.558 -0.478 0.634 0.000 0.236 0.936 0.982 0.927 2.275 1.156 0.925 +0 0.470 -0.217 -1.674 2.262 1.437 1.223 -0.350 -0.173 0.000 0.480 -0.607 -1.011 2.215 0.720 0.418 -0.526 0.000 0.796 -0.456 1.079 0.000 0.938 0.669 1.004 0.797 0.411 0.708 0.669 +0 0.854 1.291 -1.089 1.650 -0.330 0.595 0.391 1.127 2.173 0.643 1.048 1.597 2.215 0.526 1.699 1.057 0.000 0.684 -0.212 1.072 0.000 0.841 0.608 1.041 1.140 0.490 0.826 0.727 +0 0.727 -0.465 -0.480 2.265 0.034 1.057 0.034 -1.528 0.000 1.258 -0.658 -1.416 0.000 1.508 0.763 0.747 0.000 0.952 -0.927 1.120 3.102 0.923 0.985 0.978 0.615 0.274 0.699 0.959 +0 1.689 0.952 -1.425 0.654 0.994 0.482 -1.318 0.112 0.000 0.414 -1.055 1.175 0.000 0.771 1.025 -0.191 1.274 0.590 0.099 0.283 3.102 0.785 1.162 1.194 0.764 0.345 0.676 0.883 +0 1.775 0.392 -1.264 1.961 -0.918 1.126 1.666 -1.157 0.000 2.051 0.556 0.702 0.000 0.990 -2.311 -0.138 0.000 3.277 1.298 0.857 0.000 0.894 0.756 0.985 1.097 0.905 0.858 0.863 +1 0.313 -1.366 1.045 1.315 -1.067 0.671 -0.658 0.028 1.087 1.232 1.039 1.395 2.215 0.955 0.154 -0.498 0.000 0.472 -1.247 0.549 0.000 0.916 0.680 0.985 1.246 1.820 1.082 0.912 +1 0.630 0.869 -1.192 0.963 -0.426 2.050 1.436 1.515 0.000 1.989 0.470 0.113 2.215 0.944 0.254 -1.146 0.000 1.372 -0.145 -0.288 3.102 0.731 1.081 0.991 0.592 0.726 0.700 0.661 +1 0.842 -0.139 0.830 0.643 -0.608 1.098 0.151 1.308 2.173 1.197 0.169 -0.596 0.000 0.491 -0.891 -0.228 0.000 0.486 0.641 -1.110 3.102 0.811 0.573 0.987 0.879 0.678 0.843 0.728 +0 0.604 -0.865 -1.217 0.942 1.496 0.749 0.653 0.071 2.173 0.758 0.309 -0.844 0.000 0.911 -0.781 0.360 1.274 0.789 -0.319 1.676 0.000 0.851 0.970 0.984 0.835 0.898 0.805 0.734 +1 0.638 -1.868 -1.625 1.467 1.470 0.668 -0.854 -0.358 1.087 0.631 -0.875 0.911 0.000 0.726 -1.555 0.346 0.000 0.698 0.003 -0.468 0.000 0.913 0.868 0.983 1.060 0.898 1.001 0.830 +0 1.118 -0.456 -0.446 1.629 -0.956 0.838 -1.229 0.959 1.087 0.704 -0.127 1.198 0.000 0.685 -0.621 0.397 0.000 0.671 0.138 -1.152 3.102 0.781 0.721 0.987 0.509 0.964 0.943 0.796 +1 0.929 0.065 0.522 0.725 -1.283 0.634 -0.035 1.494 0.000 1.298 0.504 -0.376 2.215 0.734 -1.217 1.601 1.274 0.819 -0.042 -0.719 0.000 1.002 1.121 1.135 0.812 1.501 0.889 0.772 +0 1.337 1.197 0.421 1.101 1.441 0.630 1.113 1.409 2.173 1.211 -0.178 -0.684 0.000 0.754 1.508 -0.259 0.000 0.383 -0.359 1.571 3.102 1.640 1.003 1.337 0.796 0.452 0.868 0.895 +0 0.387 -0.890 -0.640 0.293 0.184 0.976 -0.763 -0.365 2.173 0.591 -1.133 -0.890 0.000 2.271 -1.304 1.459 2.548 0.461 -0.763 0.488 0.000 0.649 0.929 0.997 0.949 1.953 1.252 0.959 +1 1.115 0.908 0.313 1.618 -0.319 0.698 -2.746 1.432 0.000 0.794 0.273 -1.427 2.215 1.106 1.309 -0.410 0.000 1.908 1.026 1.591 3.102 0.785 0.930 1.003 1.174 0.713 0.927 0.799 +1 0.694 -0.687 0.664 0.726 -0.744 1.008 -1.205 -0.288 2.173 1.082 -0.327 1.309 2.215 1.154 -0.563 -1.605 0.000 0.517 -1.769 -0.368 0.000 1.040 0.949 0.986 0.834 1.676 0.958 0.811 +0 0.908 -0.352 -0.021 1.681 0.383 1.203 -0.624 -1.644 0.000 0.756 -0.471 -0.837 0.000 0.600 0.514 0.821 2.548 0.432 1.011 -1.166 0.000 0.743 0.735 0.995 0.499 0.380 0.540 0.673 +0 1.288 -1.001 1.127 4.239 0.862 2.040 -0.783 -0.808 0.000 0.506 -0.779 1.713 2.215 1.522 -0.056 -0.522 2.548 0.699 -0.774 -1.399 0.000 0.933 0.955 0.982 0.868 0.913 1.178 1.406 +1 0.382 -0.503 -0.564 1.086 0.363 0.846 -0.035 -1.529 0.000 0.537 -0.218 0.594 2.215 1.192 1.002 0.500 0.000 1.015 0.816 -0.979 3.102 2.054 1.257 0.982 0.735 0.784 0.888 1.097 +1 1.374 0.620 1.203 0.506 -1.541 0.930 -0.018 -0.630 2.173 0.682 -0.175 0.986 1.107 0.555 0.109 0.036 0.000 0.569 -1.026 -0.352 0.000 0.499 0.608 0.986 1.240 1.168 0.920 0.782 +1 1.147 1.810 0.844 0.393 0.190 0.864 1.684 -0.702 2.173 1.126 1.137 1.027 0.000 1.211 1.194 -1.026 0.000 1.444 1.117 1.609 0.000 0.842 1.096 0.978 1.015 0.839 0.959 0.835 +0 1.009 -0.051 1.551 0.463 1.690 0.745 -0.504 1.016 0.000 0.663 -2.576 0.231 0.000 0.551 1.782 0.135 0.000 1.148 0.520 -1.144 3.102 1.020 0.734 0.980 0.791 0.400 0.560 0.598 +1 0.303 -1.054 -1.134 1.234 -0.603 0.749 0.254 -0.137 2.173 0.697 0.333 1.354 0.000 0.618 1.806 -0.757 0.000 0.924 1.016 1.412 0.000 0.942 0.890 0.992 1.823 0.555 1.560 1.450 +0 1.167 1.003 -1.440 0.264 -1.274 0.307 2.156 -1.468 0.000 0.773 0.994 -0.396 0.000 1.698 1.562 0.919 0.000 0.975 0.843 0.546 1.551 1.038 0.812 0.989 0.742 0.661 0.624 0.611 +1 2.120 -0.154 1.328 1.425 1.098 1.473 0.945 -0.736 0.000 0.339 -1.713 -0.042 0.000 0.525 -1.183 1.268 2.548 0.489 2.281 -1.088 0.000 1.377 1.186 0.982 0.566 0.336 1.100 1.339 +0 1.063 1.166 0.270 1.231 0.893 1.272 -0.753 -1.019 2.173 0.823 -0.533 1.637 0.000 0.348 -0.800 -0.055 0.000 1.238 0.122 -0.105 3.102 0.830 0.947 0.993 0.742 1.157 1.227 1.018 +0 0.547 0.599 1.371 3.854 0.864 3.200 0.012 -0.905 0.000 1.316 -1.211 0.966 2.215 1.047 1.099 0.668 2.548 0.384 1.456 -0.008 0.000 0.986 1.992 0.990 2.358 1.973 1.945 2.061 +1 0.570 0.447 0.275 0.103 -0.301 1.132 -0.133 1.729 2.173 0.852 1.034 -0.394 0.000 0.739 -0.721 0.782 2.548 0.672 0.675 -0.760 0.000 0.343 0.975 0.932 0.971 0.941 0.939 0.823 +1 0.888 -1.424 1.050 0.964 -1.704 1.237 0.566 1.637 2.173 2.572 0.081 0.031 0.000 1.173 -0.019 -0.904 0.000 0.702 1.323 -0.505 3.102 1.994 1.350 0.993 1.278 1.061 1.382 1.324 +1 0.369 -0.734 -1.233 0.432 -1.552 0.493 -0.723 -1.700 2.173 0.484 0.298 0.463 2.215 0.558 2.160 0.798 0.000 1.361 0.900 -0.004 0.000 0.921 0.728 0.981 0.783 0.773 0.894 0.777 +0 0.657 0.715 1.037 0.678 -1.669 0.446 -0.653 0.559 0.000 1.109 -0.359 -0.451 2.215 0.691 -1.205 1.632 0.000 0.712 0.003 -0.630 1.551 0.888 1.013 0.984 0.627 0.198 0.630 0.648 +1 0.917 -0.077 -1.717 0.770 0.616 0.901 -1.412 -0.866 2.173 0.757 -1.012 -0.142 2.215 0.666 0.443 0.669 0.000 0.551 -1.437 0.995 0.000 0.920 0.819 1.003 1.170 0.775 0.859 0.789 +1 0.486 -0.517 0.991 1.006 -1.126 0.505 -1.238 0.940 0.000 1.083 -0.382 -0.262 2.215 0.393 -2.238 0.115 0.000 1.050 1.525 1.559 0.000 0.748 1.045 0.991 0.720 0.835 0.828 0.737 +0 1.263 1.327 1.411 0.367 -0.945 0.479 1.259 -1.612 0.000 1.312 1.309 0.291 2.215 0.998 0.465 -0.833 0.000 0.670 1.001 -0.017 3.102 0.938 1.239 0.991 0.638 0.240 0.727 0.698 +0 0.285 1.213 -0.991 1.201 0.723 0.521 1.139 1.520 0.000 0.626 -0.224 0.571 0.000 1.426 0.136 -0.428 2.548 1.779 -0.538 -1.156 3.102 1.289 1.196 0.987 0.945 0.894 0.936 0.815 +1 1.496 0.286 1.390 1.155 0.887 1.071 0.806 -0.515 2.173 0.455 -0.493 -0.205 0.000 0.368 1.077 1.644 2.548 0.410 -0.707 0.572 0.000 0.375 0.847 0.994 0.619 0.742 0.959 0.785 +0 1.984 0.259 -1.545 1.152 1.168 1.226 1.067 0.086 0.000 0.507 0.001 -0.847 2.215 0.563 -0.305 -0.527 2.548 0.809 -1.628 0.939 0.000 0.682 0.742 1.344 0.908 0.188 0.656 0.720 +0 0.485 1.854 -0.409 0.706 -0.910 0.973 1.577 0.778 2.173 0.783 1.220 1.403 0.000 0.553 -0.190 -0.619 0.000 0.709 1.753 -1.323 0.000 1.071 0.951 0.990 1.117 1.495 0.956 0.841 +0 1.265 0.272 1.608 1.288 0.456 1.286 0.404 -0.480 0.000 0.749 -1.348 1.554 2.215 1.215 0.159 -0.037 0.000 1.200 1.555 -1.612 0.000 0.913 1.393 1.523 1.236 0.535 1.196 1.114 +1 0.511 0.288 -1.522 1.224 0.695 0.637 -1.024 1.618 1.087 0.496 -0.170 0.041 0.000 0.276 1.257 0.456 1.274 0.824 0.193 -0.311 0.000 0.733 0.882 0.997 0.524 0.915 0.663 0.659 +0 0.488 -1.409 -1.229 1.234 -0.358 0.607 -1.324 -0.734 2.173 1.511 -0.763 1.245 2.215 0.534 -0.712 0.184 0.000 0.532 -1.874 0.749 0.000 0.552 0.802 0.984 0.683 1.432 1.066 0.835 +1 1.381 -1.134 -1.200 0.979 -0.837 0.747 -0.485 1.315 2.173 0.529 0.023 0.918 0.000 1.336 -0.022 0.142 2.548 1.103 -0.331 -0.826 0.000 0.921 0.950 0.997 1.091 1.123 0.939 0.798 +0 0.424 1.658 1.697 0.588 0.395 0.619 0.496 0.670 0.000 0.419 1.478 -0.247 0.000 0.788 -0.357 -1.247 2.548 0.840 -0.176 1.220 3.102 0.983 1.009 0.994 0.589 0.497 0.658 0.595 +0 1.099 0.267 0.858 0.941 1.270 1.220 -0.452 -0.775 0.000 1.046 -0.130 0.263 2.215 0.710 0.169 1.326 2.548 0.577 -1.975 -1.655 0.000 1.197 1.036 0.979 0.981 0.764 0.845 0.951 +1 0.327 2.427 -0.049 1.459 0.912 0.471 0.442 0.625 0.000 0.716 0.311 -0.532 0.000 1.072 0.830 -1.502 2.548 0.930 0.379 -1.195 0.000 1.008 0.802 0.985 1.149 0.864 0.863 0.750 +1 1.930 -1.199 -0.882 1.893 -0.406 1.430 -1.442 1.263 0.000 0.861 -0.130 0.561 2.215 0.528 -0.954 -0.087 0.000 0.883 0.454 -0.741 0.000 0.786 0.795 1.104 1.380 0.823 1.033 0.865 +1 0.934 -0.643 0.199 0.397 -1.479 0.955 -0.495 -1.464 0.000 1.444 0.010 0.109 2.215 0.823 0.561 -1.249 0.000 1.027 -0.285 1.222 3.102 0.986 0.829 0.988 0.732 0.947 0.975 0.814 +1 2.113 -0.792 -1.488 1.221 1.410 0.811 -0.157 0.139 1.087 0.281 -0.591 -0.918 2.215 0.821 -0.945 0.080 0.000 0.461 -0.070 -0.292 0.000 0.403 0.419 1.125 0.644 0.594 0.892 0.747 +1 0.384 -0.051 0.008 1.563 1.213 0.808 0.040 -0.696 0.000 0.771 1.146 1.543 2.215 1.558 1.504 -0.558 0.000 1.617 0.680 1.138 3.102 0.868 1.250 0.988 0.983 0.405 0.934 0.880 +1 1.076 -1.608 1.373 0.329 -0.024 0.424 -0.160 0.928 1.087 0.265 -2.045 -0.659 0.000 0.455 0.232 -0.458 0.000 1.012 0.068 -0.943 3.102 0.790 0.750 0.987 1.034 0.694 0.803 0.704 +0 1.442 1.224 -1.654 1.550 -1.278 1.174 0.290 0.453 2.173 0.535 0.274 1.642 0.000 0.691 0.874 -0.520 0.000 0.484 2.393 -0.540 0.000 0.925 1.079 0.978 0.760 0.428 1.024 0.841 +0 0.604 0.921 0.437 1.140 -0.590 0.873 1.927 -1.572 0.000 1.100 1.476 -0.091 2.215 1.365 0.837 1.404 2.548 0.543 -1.737 -0.273 0.000 0.893 0.866 0.992 0.794 1.330 0.913 0.907 +0 1.115 1.355 -1.633 1.025 0.596 0.688 -0.342 -0.800 0.000 1.038 -0.653 -0.154 2.215 0.956 0.380 0.951 2.548 0.877 0.174 -1.688 0.000 0.913 0.949 1.341 1.608 1.075 1.081 0.991 +1 1.316 0.365 1.270 0.187 -0.262 0.668 -0.245 -0.196 2.173 0.860 -0.649 -1.264 2.215 0.988 0.816 0.807 0.000 0.709 -0.083 -0.903 0.000 1.040 0.922 0.985 0.824 0.945 0.780 0.724 +1 1.517 -1.215 -0.639 0.286 -1.136 1.179 -0.458 -0.746 0.000 1.235 -0.560 -0.336 0.000 4.053 -0.702 1.419 2.548 1.567 -0.594 0.755 0.000 0.938 0.912 0.989 1.763 1.626 1.534 1.335 +0 0.405 -0.436 -0.566 2.191 -1.508 0.840 1.022 0.058 0.000 1.145 1.618 0.265 0.000 1.291 0.702 1.463 2.548 1.171 -0.050 1.513 0.000 0.775 0.805 0.988 0.937 0.696 0.842 1.102 +1 1.872 0.275 -1.494 0.445 0.573 1.145 0.875 0.186 2.173 0.587 1.603 -1.738 0.000 0.559 -0.470 -0.965 0.000 0.684 -1.878 -1.457 0.000 0.937 1.021 1.211 0.826 0.842 0.920 0.807 +1 1.311 -0.749 1.343 0.229 -0.769 0.726 0.878 -0.452 1.087 0.663 -0.388 -0.156 0.000 0.485 1.243 0.512 0.000 1.027 0.266 -1.557 3.102 0.911 0.934 0.991 0.620 0.811 0.774 0.704 +1 0.707 0.655 0.215 0.397 1.720 1.596 0.289 1.373 1.087 0.919 -1.346 -0.455 0.000 1.518 -0.128 -0.692 0.000 0.681 -0.861 0.343 3.102 1.262 0.868 0.993 0.984 1.183 1.332 1.047 +0 0.812 -1.686 -0.768 0.866 1.600 0.823 0.714 -0.167 2.173 0.755 -0.390 1.303 0.000 0.941 -0.196 0.938 2.548 0.627 -0.354 -0.585 0.000 0.889 1.092 0.987 0.967 1.063 1.220 0.974 +0 1.284 0.596 -1.017 0.169 -0.280 1.466 0.932 -0.437 1.087 1.417 0.933 1.525 0.000 1.853 0.973 0.777 2.548 1.151 0.216 1.055 0.000 0.906 0.958 0.982 0.985 1.829 1.279 1.110 +0 0.617 -0.564 1.060 1.101 0.253 0.372 -0.053 0.305 0.000 0.863 0.067 -0.967 2.215 0.492 0.755 1.572 1.274 0.571 -0.365 -1.322 0.000 0.713 0.665 0.993 0.940 0.587 0.838 0.682 +0 0.433 1.946 1.148 2.090 -0.837 1.783 0.931 0.415 0.000 1.021 0.896 -1.582 1.107 0.930 0.175 -1.039 2.548 0.830 -0.276 1.717 0.000 2.127 1.599 1.287 1.079 0.621 1.167 1.247 +0 1.848 0.807 1.122 1.029 0.123 0.765 -2.911 -0.528 0.000 0.803 -1.244 -1.723 0.000 1.060 -1.364 -0.712 2.548 1.121 -0.376 1.168 3.102 2.130 1.336 1.496 0.975 0.944 1.202 1.925 +0 0.516 -0.463 -1.336 0.118 -0.368 1.246 -0.071 0.458 2.173 1.316 -0.104 -1.646 1.107 0.456 1.011 0.613 0.000 0.383 0.811 -0.393 0.000 0.364 0.821 0.978 1.054 1.785 0.995 0.770 +1 1.405 -1.623 -1.482 0.355 -0.434 0.843 -1.337 0.078 2.173 0.539 -1.129 1.170 0.000 0.635 -0.357 0.798 2.548 0.548 -0.826 -1.019 0.000 0.655 0.795 0.985 0.881 0.713 0.799 0.665 +1 1.126 -0.151 -0.153 1.247 -0.770 0.713 -0.489 0.871 2.173 0.437 -0.035 1.127 0.000 0.883 0.637 1.432 2.548 1.055 -0.718 -1.426 0.000 0.759 0.713 0.981 0.938 0.788 0.859 0.752 +1 1.304 -0.921 -0.486 1.420 -0.886 0.688 0.329 1.112 2.173 0.771 -1.362 1.197 1.107 0.459 -2.619 0.306 0.000 0.589 -0.637 -1.176 0.000 0.915 0.762 0.985 1.594 1.053 1.159 0.986 +1 0.387 -1.681 1.141 0.462 0.893 1.539 0.080 -1.099 0.000 1.155 -0.647 0.984 0.000 1.274 0.044 0.602 1.274 0.790 -0.186 -1.698 0.000 0.902 0.883 0.990 0.646 0.408 0.862 0.781 +0 1.813 0.697 -0.105 0.595 0.050 1.313 0.976 -1.570 1.087 0.547 -1.232 1.157 0.000 0.474 0.376 -0.626 0.000 1.119 0.085 0.589 3.102 1.073 0.771 0.993 1.577 1.329 1.085 1.010 +1 1.577 0.103 0.514 0.304 -1.598 1.355 0.796 -1.201 2.173 0.724 -1.358 -0.582 0.000 0.886 -0.264 0.171 0.000 0.884 0.757 1.394 3.102 0.941 1.070 0.988 1.308 0.834 1.207 1.027 +1 2.559 -0.760 -1.062 0.168 -1.673 1.639 -1.164 0.447 1.087 0.583 -1.710 1.537 0.000 0.383 -0.003 1.613 0.000 0.474 -0.093 -0.527 3.102 0.723 1.214 0.978 0.530 0.887 1.112 0.935 +1 0.512 2.003 -1.193 0.580 1.629 0.790 0.940 0.571 0.000 1.074 0.441 1.239 2.215 1.296 -0.135 -1.076 2.548 0.941 -0.203 -0.570 0.000 0.889 0.855 0.987 0.820 1.156 0.842 0.752 +0 1.237 0.096 -0.398 0.749 -1.372 0.473 1.170 1.165 0.000 1.095 1.036 0.697 2.215 1.150 0.337 -1.724 0.000 0.977 -1.004 -0.893 3.102 0.875 0.916 1.025 0.705 1.599 0.974 0.895 +1 0.421 1.651 -0.894 0.500 -0.813 1.527 0.155 0.372 2.173 0.922 0.060 -1.201 2.215 0.955 -0.768 1.652 0.000 1.020 -1.738 -1.722 0.000 0.729 1.045 0.987 1.138 1.727 1.334 1.117 +1 2.447 0.492 1.136 0.786 0.485 0.733 -0.083 -1.068 0.000 0.892 0.364 -0.445 2.215 1.129 0.671 0.398 0.000 1.745 1.071 -1.176 3.102 1.728 1.111 1.062 1.233 0.869 1.006 0.988 +0 3.276 -0.698 0.157 0.350 1.531 2.247 -1.092 -1.584 1.087 0.595 -0.312 1.463 0.000 0.598 -0.878 -1.355 2.548 1.716 -1.236 0.146 0.000 1.169 0.778 1.402 2.281 0.314 1.409 1.154 +0 0.426 -1.842 1.027 1.309 -1.209 0.869 -0.880 0.284 2.173 0.547 -0.305 0.715 2.215 0.547 -0.956 1.345 0.000 0.941 -1.430 -1.194 0.000 0.762 0.978 0.988 1.099 0.484 0.969 0.801 +0 0.711 -0.606 -1.401 0.559 1.559 1.157 -0.637 -0.798 1.087 1.375 -0.382 0.587 2.215 0.694 0.893 1.407 0.000 1.371 0.495 0.340 0.000 0.907 0.986 0.994 1.051 1.776 1.144 0.981 +1 0.575 1.471 1.241 1.147 -1.157 0.885 -1.095 0.506 2.173 0.485 -1.130 1.292 1.107 0.741 -0.551 -0.698 0.000 0.631 -0.592 -1.269 0.000 0.373 0.884 0.988 1.084 0.627 1.172 0.932 +0 0.417 0.017 -0.260 2.364 -1.152 1.086 1.530 0.761 2.173 0.449 1.709 0.187 0.000 0.584 0.352 -0.593 1.274 0.798 0.463 1.298 0.000 0.824 0.726 0.989 0.557 1.113 1.179 0.957 +1 0.485 0.495 1.011 0.929 -1.153 0.599 -0.231 1.219 0.000 0.855 -0.264 -1.407 0.000 1.049 -1.136 -0.319 2.548 0.825 -1.609 -0.212 0.000 1.062 0.944 0.989 1.340 0.556 0.962 0.906 +0 0.383 1.378 -0.309 3.185 0.576 1.240 1.408 -1.196 0.000 1.147 0.630 -0.794 2.215 1.143 0.831 1.090 2.548 0.708 1.028 1.601 0.000 0.843 0.927 1.097 0.823 1.218 1.040 1.078 +0 1.977 0.276 0.875 1.501 0.437 1.080 -1.059 -0.973 0.000 0.432 -0.351 -0.337 0.000 0.585 0.835 -1.618 1.274 0.688 0.398 -1.077 3.102 0.947 1.098 0.993 0.856 0.252 0.678 1.023 +0 1.488 -0.569 -1.039 0.694 1.479 0.829 0.106 0.304 0.000 0.669 -0.861 0.710 0.000 1.246 0.589 -0.914 2.548 1.266 1.118 0.071 0.000 0.994 0.993 1.078 0.901 1.282 0.969 0.905 +1 0.846 -0.911 0.275 2.294 0.570 0.760 -0.018 -1.738 0.000 0.999 -0.309 -1.184 1.107 1.244 0.447 -1.128 2.548 0.722 1.077 -0.315 0.000 1.346 1.008 1.000 1.915 0.496 1.424 1.343 +1 0.463 -0.458 -1.189 1.113 0.314 1.157 0.018 -1.139 1.087 0.925 -0.549 0.312 1.107 2.565 1.161 1.687 0.000 2.207 0.538 0.119 0.000 1.570 1.086 0.989 0.998 1.537 1.056 0.866 +0 1.575 -0.133 0.690 0.911 1.393 1.700 0.238 -0.678 0.000 1.468 -0.736 1.038 2.215 1.132 0.077 -1.286 1.274 0.494 -0.526 -0.296 0.000 0.788 0.823 0.988 0.695 1.327 1.220 1.108 +1 2.218 -0.819 1.543 0.627 1.024 1.113 -1.041 -0.222 2.173 0.436 -0.638 0.788 0.000 0.910 -1.035 -1.065 2.548 0.449 -0.494 -0.081 0.000 0.407 0.636 0.992 0.874 0.866 1.030 0.786 +0 3.067 -0.053 -1.572 0.692 1.486 1.103 -1.995 0.104 0.000 0.760 0.132 -0.850 2.215 0.695 -1.754 0.704 0.000 1.490 -0.793 0.169 3.102 0.814 0.765 0.979 0.875 0.940 1.040 1.442 +0 2.943 -0.994 1.227 2.299 1.335 3.010 0.280 -0.440 1.087 1.422 -1.647 1.489 0.000 1.146 -0.189 0.038 0.000 0.634 0.990 -0.130 0.000 0.725 1.031 1.011 0.837 0.918 2.221 1.682 +0 2.993 0.024 -0.013 0.531 -0.358 0.810 0.737 1.447 0.000 1.150 1.142 1.680 2.215 1.319 0.129 -0.664 2.548 0.682 -0.802 1.324 0.000 1.099 0.966 0.993 0.794 1.326 1.131 1.095 +1 0.783 -1.438 -1.408 1.690 -1.742 0.815 -1.032 -0.147 2.173 0.892 -0.123 0.578 0.000 0.687 -0.632 0.654 2.548 1.031 -0.209 -0.515 0.000 0.915 0.988 0.975 0.801 0.638 0.850 0.784 +1 0.588 0.577 0.923 1.654 1.678 0.701 1.985 -0.311 0.000 0.642 1.383 0.548 1.107 0.746 0.840 -0.532 1.274 0.796 2.027 -1.418 0.000 0.989 0.876 0.987 0.887 0.636 0.767 0.946 +0 0.555 -1.112 -1.107 0.067 0.634 0.608 0.469 0.617 0.000 0.711 -0.694 0.928 2.215 1.323 0.276 1.555 0.000 1.498 0.452 -0.649 3.102 0.944 0.859 0.998 0.737 1.113 0.694 0.637 +1 1.782 1.678 -0.348 0.701 -0.861 0.411 2.264 -1.468 0.000 0.997 -1.550 1.179 2.215 1.347 0.463 -1.252 1.274 3.225 0.687 0.544 0.000 2.209 1.703 0.983 3.452 1.845 2.222 1.984 +1 1.085 -0.040 -0.512 1.608 -0.953 0.952 -0.662 1.107 2.173 0.568 0.907 1.513 2.215 0.791 -0.338 0.529 0.000 0.578 -2.193 -0.143 0.000 1.120 1.053 0.990 0.890 1.038 1.078 1.112 +1 1.554 -0.281 1.428 0.869 -1.557 0.965 0.685 -0.005 2.173 0.658 0.051 0.640 0.000 0.821 0.624 -1.023 2.548 0.694 -0.719 -0.828 0.000 0.951 0.968 0.989 0.775 0.880 0.933 0.818 +0 0.282 1.135 0.761 0.814 -0.187 0.983 1.931 0.756 0.000 1.244 -0.186 1.685 0.000 1.267 1.297 -0.257 2.548 0.885 -0.349 -0.805 0.000 1.079 0.727 0.978 1.018 0.922 0.919 0.841 +1 2.015 0.074 0.196 0.616 0.850 0.955 0.800 -1.024 2.173 0.329 0.745 0.092 0.000 0.528 -2.331 -1.494 0.000 0.988 0.667 1.568 3.102 0.791 0.918 0.988 0.842 0.741 0.909 0.757 +0 1.381 0.573 0.569 0.751 -0.271 0.683 0.110 1.279 2.173 0.910 -0.546 -1.439 0.000 0.899 0.687 -0.118 2.548 0.822 0.279 -0.967 0.000 0.699 0.923 0.986 0.892 0.983 0.744 0.773 +0 1.675 -0.134 1.429 0.649 -1.330 0.425 -0.200 -0.842 0.000 0.676 0.743 0.559 2.215 1.064 -0.859 -0.133 1.274 0.391 0.491 -0.234 0.000 0.411 0.667 0.989 1.013 1.015 0.858 0.707 +1 0.844 -0.452 1.709 0.991 0.751 1.308 -0.721 -0.545 2.173 0.928 -0.903 1.573 0.000 0.538 0.505 0.257 1.274 0.420 -1.963 0.732 0.000 0.843 0.935 0.989 1.248 0.996 0.933 0.856 +1 1.189 -0.798 -1.716 0.420 0.571 1.276 -0.038 -0.865 2.173 0.731 -0.396 -1.603 2.215 1.377 -1.194 0.208 0.000 0.938 0.130 0.601 0.000 0.853 1.079 0.985 1.201 0.916 1.070 0.934 +1 0.784 0.187 0.216 0.725 -0.398 0.804 1.435 1.632 0.000 0.619 0.380 -1.060 2.215 0.560 1.374 0.779 0.000 0.832 -0.327 0.520 3.102 0.841 1.010 0.986 0.779 0.692 0.739 0.873 +1 1.668 1.308 0.883 0.558 0.885 0.984 0.666 -1.143 2.173 0.800 2.514 -0.599 0.000 0.633 0.751 0.467 2.548 1.028 1.631 0.800 0.000 1.187 0.946 0.978 1.454 0.979 0.987 0.962 +0 1.563 1.440 -1.146 0.238 0.378 0.852 -0.031 0.332 2.173 0.742 0.675 1.345 2.215 0.735 -1.261 0.940 0.000 0.745 -0.744 -0.083 0.000 0.681 0.999 0.993 1.409 1.022 1.006 1.089 +1 1.714 0.914 -0.905 0.505 1.285 1.448 -0.050 1.444 2.173 2.560 0.028 -0.185 2.215 1.294 -1.127 1.120 0.000 2.151 1.269 1.278 0.000 0.864 0.874 1.186 1.390 2.821 1.556 1.251 +0 0.759 0.290 0.245 1.818 -1.090 0.569 -0.706 1.294 2.173 0.719 -1.317 0.788 0.000 0.862 -0.980 -0.112 0.000 1.285 -0.937 -1.461 3.102 0.891 0.882 1.518 1.153 0.585 0.872 0.898 +1 1.395 0.929 0.919 0.583 -1.520 1.351 1.069 -0.869 0.000 1.194 -0.328 -0.335 2.215 0.782 -1.000 0.794 2.548 0.389 0.168 -1.727 0.000 0.922 1.336 1.013 1.032 0.961 1.141 1.064 +1 0.391 2.002 -0.362 1.186 1.525 0.892 0.331 -0.263 2.173 0.682 1.288 -1.468 0.000 0.518 1.891 0.615 0.000 1.185 0.286 0.938 3.102 0.936 0.838 0.988 1.141 0.960 0.838 0.769 +1 1.108 -1.298 1.717 1.306 0.659 0.950 0.075 -0.881 2.173 1.415 -1.919 -0.270 0.000 0.995 2.042 1.537 0.000 1.258 -1.076 1.127 3.102 0.932 1.468 1.358 0.689 1.412 1.330 1.484 +0 1.755 -1.230 1.250 0.664 -0.171 1.111 -2.133 1.553 0.000 0.867 -0.757 -0.383 0.000 1.012 -0.210 -0.919 2.548 1.086 0.309 -0.196 0.000 0.796 0.836 1.432 1.087 0.980 0.831 0.817 +0 2.186 -0.417 0.810 0.954 0.537 2.336 1.234 -0.971 2.173 1.785 -1.049 0.924 1.107 0.522 1.868 -0.974 0.000 0.551 0.181 -0.158 0.000 0.738 0.904 0.981 1.010 5.260 2.620 1.949 +1 0.877 0.086 0.219 1.018 0.250 1.034 1.231 -1.305 0.000 1.131 1.298 0.639 2.215 1.132 0.867 -1.709 0.000 1.358 0.987 -0.072 1.551 0.886 0.909 0.984 1.475 0.674 1.068 1.156 +0 0.303 -1.653 -1.154 0.863 -1.648 0.478 1.243 -0.902 0.000 0.880 -0.822 0.608 0.000 0.754 -0.333 -0.984 2.548 0.683 -1.134 0.835 0.000 0.334 0.776 0.991 0.767 0.621 0.595 0.632 +1 0.724 -0.782 1.216 1.603 -1.699 0.458 2.936 -1.739 0.000 1.464 -0.479 0.215 2.215 1.319 0.586 -0.303 2.548 0.374 -0.214 -0.219 0.000 1.640 1.438 0.987 1.295 1.107 1.582 1.466 +1 1.472 1.506 0.538 1.702 -0.137 1.362 1.593 -1.572 0.000 0.432 0.222 -1.127 1.107 0.607 0.400 -0.047 0.000 0.794 0.480 0.560 1.551 1.897 1.187 1.252 0.740 0.536 0.810 0.949 +0 0.398 -0.991 -0.681 0.656 -0.492 0.607 0.050 1.651 1.087 0.932 0.307 0.721 0.000 0.704 0.780 -1.288 2.548 0.772 -0.446 -0.194 0.000 0.945 0.917 0.993 0.615 0.511 0.664 0.649 +1 2.553 -1.408 0.026 0.592 -0.596 2.056 -1.070 1.049 0.000 1.208 2.483 1.582 0.000 1.226 -1.033 -0.439 0.000 1.880 -1.003 -1.322 1.551 1.056 0.739 0.985 1.148 0.460 0.841 1.054 +1 1.508 -0.041 0.642 0.926 -0.090 1.080 0.458 -1.451 2.173 0.976 1.671 -0.843 0.000 1.088 0.961 1.176 0.000 1.025 0.680 -0.612 1.551 0.588 0.847 1.003 1.341 0.790 0.923 0.934 +1 0.286 1.733 1.709 1.188 0.277 3.223 -0.279 1.616 0.000 3.382 0.276 -0.094 2.215 2.138 1.739 -1.046 0.000 1.926 0.824 0.283 1.551 2.765 2.764 0.982 1.059 1.130 2.564 1.916 +1 1.376 0.906 -0.810 1.142 -0.269 1.449 1.848 1.601 0.000 0.570 0.901 0.504 1.107 0.306 1.439 1.420 0.000 0.462 0.264 0.507 3.102 1.284 0.986 0.987 0.799 0.144 0.733 0.908 +1 0.403 1.239 0.483 1.209 -0.257 1.969 -0.551 1.230 0.000 1.331 0.531 -0.167 2.215 1.386 -0.077 -1.294 0.000 1.525 -1.022 -1.148 3.102 1.052 0.775 0.984 1.296 1.634 1.789 1.554 +0 0.681 -1.348 -1.704 2.095 1.236 2.960 0.096 -0.321 2.173 1.069 1.220 -1.719 0.000 1.014 0.537 0.132 0.000 1.746 1.385 1.121 0.000 1.045 0.722 0.988 3.675 1.966 2.413 2.556 +1 0.536 0.762 0.245 0.990 0.032 1.285 0.794 -1.571 2.173 0.737 0.403 -0.663 0.000 1.276 -0.098 1.114 0.000 0.716 -0.203 -0.374 0.000 1.029 1.278 0.996 0.971 1.105 1.085 1.038 +0 1.405 -0.103 -0.714 0.520 -0.875 0.828 0.161 1.030 1.087 1.526 -0.694 -1.394 2.215 1.207 0.306 0.140 0.000 1.335 -0.700 0.422 0.000 0.941 0.975 0.984 0.863 1.545 1.104 1.005 +1 2.404 -1.208 -1.262 0.375 1.151 0.765 -1.140 0.444 2.173 0.410 -1.886 0.143 0.000 1.061 -1.400 1.158 0.000 0.794 -0.010 -0.369 3.102 0.828 0.873 1.083 1.194 0.741 0.858 0.783 +0 1.051 0.688 1.035 1.562 -1.569 0.645 -0.118 0.542 0.000 1.062 -0.865 -0.669 0.000 0.777 0.926 -1.672 2.548 1.159 -1.904 -0.123 0.000 1.289 1.269 1.269 0.642 0.635 1.142 1.245 +1 0.536 -2.161 -0.523 0.782 0.176 1.007 -0.966 -0.371 1.087 0.774 -0.930 1.006 0.000 1.172 -0.155 1.686 2.548 0.607 0.509 0.937 0.000 0.942 1.091 0.992 0.980 1.415 0.871 0.776 +1 0.755 -1.317 0.774 0.848 -0.680 1.553 -0.864 1.529 0.000 0.833 -0.699 0.070 2.215 0.961 0.540 -0.303 0.000 1.455 -0.317 0.506 3.102 0.912 0.913 1.071 0.706 0.417 0.629 0.691 +0 0.836 1.515 0.305 0.708 -0.432 0.670 0.054 1.031 0.000 0.974 1.228 -1.310 2.215 0.909 -0.941 0.230 0.000 1.018 -1.030 -1.200 0.000 1.025 1.040 0.988 0.986 0.214 1.016 1.277 +1 0.793 -0.546 -1.602 1.018 -1.047 0.707 -0.915 -1.001 2.173 1.143 -0.554 0.737 2.215 0.841 -1.581 0.898 0.000 0.557 -2.003 0.261 0.000 0.484 0.953 0.986 1.122 1.343 0.863 0.792 +0 0.712 1.462 -1.368 0.176 -1.675 1.370 0.889 -1.105 2.173 1.956 1.630 0.438 0.000 0.547 0.076 0.383 0.000 0.958 -0.472 -1.509 3.102 0.623 1.050 0.998 0.654 1.061 0.726 0.649 +1 0.726 0.403 -1.133 1.431 0.362 0.780 -2.228 -1.138 0.000 1.424 0.596 -0.508 2.215 1.182 0.328 1.000 0.000 1.278 1.383 1.202 0.000 0.976 0.718 1.377 0.987 0.532 0.850 0.795 +1 0.478 0.458 1.405 1.527 -0.316 0.742 -0.429 -1.424 0.000 1.032 0.113 1.625 0.000 1.484 -0.256 0.599 2.548 1.374 0.664 -0.187 3.102 0.879 1.235 1.184 0.957 0.941 0.977 0.896 +0 1.003 -0.158 -1.601 1.332 -0.997 0.894 0.334 -1.050 1.087 1.504 -0.424 0.566 0.000 1.177 0.108 0.765 0.000 0.983 -0.978 -0.136 1.551 0.681 0.862 0.991 0.584 1.099 1.043 1.005 +0 0.562 0.100 0.720 0.518 -0.365 0.625 -1.732 0.369 0.000 0.890 0.027 -1.510 1.107 0.867 -1.437 -1.547 2.548 0.667 -0.301 -0.162 0.000 0.869 0.900 0.988 0.857 0.834 0.858 0.740 +0 0.838 -1.188 1.059 0.926 -1.380 0.399 0.190 1.717 0.000 1.029 -0.077 0.161 2.215 0.676 0.698 -1.363 2.548 0.458 1.136 -0.192 0.000 0.762 0.844 0.989 0.913 0.951 0.855 0.757 +1 0.891 -0.684 0.886 0.742 -0.267 0.800 -1.376 1.379 2.173 0.970 -1.909 -0.522 0.000 1.011 -0.042 0.239 1.274 1.366 -1.209 -1.214 0.000 0.958 1.192 0.985 0.591 1.256 1.030 0.893 +1 1.344 1.360 -0.688 0.637 -0.182 2.759 -0.138 1.005 0.000 2.252 0.646 -0.479 2.215 1.341 -1.885 -1.248 0.000 1.029 1.310 -1.575 0.000 1.013 2.467 0.991 0.707 0.850 1.932 1.587 +0 1.293 2.129 -0.515 0.631 1.574 0.695 1.435 0.573 2.173 0.323 0.644 0.630 0.000 1.501 0.622 -1.638 0.000 0.671 0.759 -1.304 3.102 0.951 1.023 1.191 0.713 0.744 0.695 0.759 +1 0.895 1.219 -1.735 0.245 -1.280 0.920 0.198 -0.698 0.000 0.921 1.530 0.830 1.107 0.899 0.245 0.431 2.548 1.430 -1.771 -1.739 0.000 1.402 1.079 0.986 0.909 0.758 0.923 0.833 +1 1.218 0.879 0.954 1.150 1.721 0.609 -0.442 -0.915 0.000 0.633 2.455 0.676 0.000 1.078 0.001 -0.167 1.274 1.212 0.204 -1.244 3.102 0.878 1.124 1.046 1.053 0.729 0.891 0.850 +0 0.278 0.542 -0.238 0.912 0.377 1.701 -1.049 0.808 2.173 0.897 1.528 -0.487 2.215 1.604 -2.647 -1.243 0.000 2.155 2.077 -1.719 0.000 1.065 0.827 0.979 0.910 3.562 1.912 1.533 +1 1.125 0.984 1.054 0.448 0.851 2.358 1.631 0.640 0.000 0.992 0.253 -0.796 1.107 3.351 1.235 -1.003 2.548 0.964 0.560 -1.341 0.000 2.535 2.318 0.984 1.468 1.173 1.884 1.484 +1 1.157 -0.196 -1.152 0.807 0.818 1.395 0.341 1.354 2.173 0.548 0.222 0.567 2.215 0.908 -0.759 -0.178 0.000 2.051 0.204 -0.537 0.000 0.977 0.861 1.311 1.094 0.843 1.066 0.926 +1 0.522 -0.542 1.219 1.440 -0.577 1.053 0.123 1.659 1.087 1.131 -0.649 0.167 0.000 1.092 0.115 0.469 0.000 0.915 -0.565 -0.843 3.102 0.959 0.729 1.200 1.134 0.911 0.834 0.774 +0 0.718 -2.132 1.315 2.040 -1.600 0.732 -1.385 -0.009 2.173 0.705 -1.960 -0.383 0.000 1.103 -0.919 0.486 0.000 0.597 -1.425 0.601 0.000 0.668 0.657 0.990 1.221 0.771 0.802 0.720 +1 0.982 0.610 0.263 0.861 1.364 1.761 1.128 1.404 0.000 1.577 0.660 -0.708 0.000 2.121 -0.678 -0.585 2.548 1.505 -1.030 0.844 0.000 0.974 1.137 1.067 1.368 1.248 1.650 1.302 +1 0.540 0.975 0.523 0.616 -0.129 3.044 0.098 -0.814 2.173 3.624 -0.386 0.425 0.000 2.641 0.796 1.597 0.000 3.132 0.267 1.092 3.102 0.911 1.661 0.984 1.431 3.252 2.331 1.700 +1 1.174 0.696 -1.681 0.560 0.124 1.240 0.971 -1.154 1.087 1.037 0.936 0.729 0.000 1.223 1.191 -0.498 2.548 1.351 -0.995 0.922 0.000 0.639 2.036 1.121 0.906 0.892 1.744 1.370 +1 1.524 0.257 -1.190 0.305 -1.703 0.485 0.862 -1.477 0.000 0.643 1.213 0.704 2.215 0.954 1.234 0.377 0.000 1.315 -0.116 0.160 3.102 1.253 1.040 0.988 0.896 0.736 0.762 0.739 +0 2.042 1.477 0.032 0.224 0.749 0.629 0.153 1.679 2.173 0.690 1.531 -1.692 0.000 0.864 1.288 -1.157 0.000 1.277 0.191 0.026 3.102 0.557 0.995 0.990 1.207 0.946 0.852 0.837 +1 0.852 -0.490 1.494 0.988 0.771 1.084 0.834 -0.587 2.173 0.833 -0.204 0.318 0.000 0.844 -0.733 -1.395 0.000 0.853 0.035 0.793 3.102 1.343 0.837 0.991 1.750 1.051 1.123 0.982 +1 1.554 0.416 1.645 0.750 -0.629 0.996 -0.950 0.696 2.173 0.902 -1.119 -0.325 0.000 0.578 -1.015 -1.500 0.000 0.454 0.737 -0.900 1.551 0.965 1.104 1.329 0.647 1.031 0.951 0.925 +0 0.941 -1.055 -1.462 0.971 0.983 0.975 -0.475 -0.833 1.087 0.986 -1.313 0.475 0.000 0.548 -1.422 -0.477 0.000 1.208 -0.443 0.819 3.102 0.861 1.200 1.067 0.678 1.145 0.835 0.787 +1 0.749 -0.137 1.733 0.557 0.613 0.737 0.926 -1.017 2.173 0.868 -0.348 1.191 0.000 1.137 -0.320 -0.556 2.548 0.765 -1.122 0.469 0.000 0.845 0.975 0.983 1.137 0.910 0.953 0.838 +1 0.764 -0.882 -0.082 1.263 -1.291 0.946 -0.184 0.273 0.000 0.582 -0.344 -1.363 0.000 0.951 -1.591 -0.855 0.000 1.172 0.208 0.853 0.000 0.875 0.846 1.206 0.604 0.150 0.555 0.688 +1 0.803 -0.512 -0.836 0.504 0.497 0.865 -0.159 1.331 0.000 0.615 -0.448 0.463 2.215 1.123 0.683 -0.287 1.274 0.501 -0.316 -0.297 0.000 1.006 1.097 0.988 0.653 0.793 0.730 0.722 +1 0.735 -1.123 1.440 0.679 -0.575 1.272 -0.160 1.407 0.000 1.142 -0.158 0.114 2.215 1.284 -0.031 -0.726 1.274 0.821 2.164 -0.496 0.000 0.820 1.232 0.987 0.999 0.888 0.981 0.914 +1 0.628 -0.892 -0.077 1.314 -0.946 1.313 -0.751 1.277 1.087 0.550 -0.508 -1.522 0.000 0.436 -1.033 -0.525 0.000 0.755 0.621 0.290 0.000 1.013 1.106 0.989 0.690 0.780 0.891 0.843 +1 1.231 0.140 1.388 1.043 -1.195 0.885 0.232 -0.466 2.173 1.700 -2.329 0.530 0.000 1.392 0.744 -1.232 2.548 0.854 0.139 0.751 0.000 2.630 2.794 1.143 0.793 0.971 2.240 1.828 +1 0.546 -0.274 0.313 0.564 1.044 1.113 0.444 1.048 2.173 0.767 1.483 -0.372 0.000 1.101 0.456 -0.518 0.000 1.026 -0.347 -0.799 0.000 0.949 0.668 0.983 0.732 0.528 0.782 0.683 +1 0.927 -2.132 0.691 1.810 0.550 2.071 0.753 -0.957 0.000 1.980 -0.844 -1.682 0.000 2.015 -0.503 0.548 2.548 2.177 0.770 1.635 1.551 0.714 0.976 1.001 2.251 1.854 1.552 1.284 +0 0.556 1.352 -1.036 1.079 0.420 0.701 0.250 -1.082 0.000 0.614 -0.456 0.818 2.215 1.095 -0.191 -1.663 0.000 1.380 -0.087 -0.162 3.102 0.869 0.967 1.037 0.865 0.660 0.751 0.832 +1 0.648 -0.867 0.110 0.256 1.742 1.128 -1.005 -1.677 0.000 1.031 -0.228 -0.122 2.215 1.440 -2.508 0.929 0.000 2.177 -0.201 0.585 3.102 0.925 1.405 0.994 0.678 0.804 1.082 0.890 +0 0.771 -0.232 0.068 0.993 -1.725 0.843 -0.053 -1.217 2.173 1.066 -0.847 0.438 0.000 0.796 -0.153 1.740 2.548 0.798 0.167 0.479 0.000 0.677 0.856 1.211 0.845 0.471 0.820 0.733 +0 1.284 -0.095 -1.055 0.862 0.078 0.485 -1.734 0.653 0.000 0.879 -1.155 1.380 2.215 0.374 -2.442 -0.714 0.000 0.424 0.267 -0.152 3.102 0.811 0.808 1.243 0.592 0.708 0.711 0.790 +1 1.030 1.361 1.228 0.191 -0.570 0.507 0.163 -1.098 2.173 0.770 1.844 1.193 0.000 1.052 1.391 0.484 0.000 1.811 0.911 -0.289 3.102 0.863 1.030 0.984 0.762 0.833 0.887 0.769 +0 0.328 0.676 0.970 2.030 -1.372 0.790 -0.667 0.740 0.000 1.020 -0.832 0.201 0.000 1.263 0.452 -0.616 2.548 1.848 -0.193 -0.293 3.102 0.909 0.989 0.986 0.845 0.549 0.896 1.119 +1 0.370 2.240 -0.983 0.504 -1.527 0.739 1.574 0.135 0.000 0.865 1.110 1.509 2.215 1.186 0.614 0.487 0.000 1.583 -0.132 -1.213 3.102 0.955 1.123 0.998 0.729 0.997 1.015 0.855 +0 1.610 1.244 -0.963 0.917 -0.865 1.014 0.305 0.824 0.000 0.448 0.563 -0.703 2.215 0.774 0.823 1.269 0.000 0.708 0.909 0.514 3.102 0.786 0.904 1.007 0.811 0.471 0.589 0.891 +1 0.670 0.044 1.193 1.258 0.166 1.219 -0.697 -1.654 2.173 1.372 -1.160 -0.085 0.000 0.727 0.430 1.285 2.548 0.384 -2.021 -1.288 0.000 1.041 1.270 1.016 1.256 0.916 1.091 1.003 +0 0.906 -0.455 0.828 0.616 -1.454 0.593 -0.053 0.031 2.173 0.476 0.879 -1.464 0.000 0.613 -0.939 -0.062 0.000 0.664 -0.345 -0.879 3.102 0.703 1.083 0.991 0.591 0.500 0.714 0.690 +1 0.850 -1.142 1.593 0.884 -0.304 0.577 -2.021 0.813 0.000 0.606 -1.016 1.314 2.215 0.622 0.041 -0.442 0.000 0.991 -0.318 -0.946 1.551 0.948 1.107 1.189 0.701 0.669 0.741 0.791 +1 0.548 0.608 0.617 0.143 0.972 1.222 -0.858 -0.988 0.000 1.008 -0.140 1.019 2.215 0.768 -1.176 -0.362 2.548 0.629 -1.059 0.574 0.000 1.349 0.867 0.992 0.658 1.052 0.904 0.783 +1 0.502 0.184 0.905 0.300 0.853 0.818 0.337 -0.563 2.173 1.053 -1.063 0.299 2.215 1.003 -0.543 -1.080 0.000 0.709 -2.013 1.443 0.000 0.843 0.956 0.989 0.693 1.432 0.971 0.871 +1 0.768 -0.710 -1.372 2.113 -0.830 1.058 -0.076 0.956 2.173 0.305 0.492 0.961 0.000 0.441 0.242 0.037 0.000 0.505 -2.495 -0.133 0.000 1.281 1.386 0.978 1.447 0.790 1.052 1.077 +1 0.280 -0.000 0.180 1.718 -1.344 1.089 1.111 1.473 2.173 1.045 1.546 -0.847 0.000 1.048 -0.754 0.319 0.000 1.119 0.644 0.524 3.102 0.873 1.054 0.987 0.964 0.906 1.019 0.853 +1 0.599 0.673 0.220 0.789 -0.813 0.640 -1.287 -1.735 2.173 0.988 -0.398 -1.231 0.000 1.046 -0.279 0.180 0.000 0.855 -1.171 0.414 0.000 0.684 0.911 0.989 0.823 1.647 0.938 0.767 +1 2.046 0.547 1.262 1.213 1.184 1.292 -1.961 -0.739 0.000 1.364 -0.719 -0.171 2.215 0.655 -0.538 1.735 0.000 0.890 -0.133 0.702 3.102 1.805 1.476 0.982 1.646 0.762 1.101 1.468 +1 0.893 -0.013 0.018 1.731 1.153 2.113 0.500 0.430 0.000 4.139 0.983 -1.132 0.000 1.228 -0.033 -0.428 2.548 1.460 2.321 1.464 0.000 1.148 1.614 1.471 1.075 0.882 1.277 1.220 +0 2.224 -1.541 0.973 0.827 0.589 1.046 -1.133 -0.776 0.000 1.026 -0.509 -0.839 0.000 0.722 -0.810 1.586 2.548 0.380 -0.821 0.117 3.102 0.704 0.915 0.987 0.546 0.389 0.568 0.886 +0 0.650 2.150 0.122 0.313 -0.968 1.054 -1.727 -0.603 0.000 1.804 1.197 0.715 2.215 1.438 1.245 -1.565 2.548 1.548 1.204 1.107 0.000 1.421 0.933 0.986 0.914 1.519 1.001 0.857 +0 0.358 -0.760 0.247 1.364 -1.066 0.777 0.145 0.548 0.000 0.843 0.538 0.964 2.215 1.636 0.219 -1.148 2.548 0.561 1.296 1.003 0.000 0.869 1.234 0.987 0.921 1.196 0.830 0.792 +1 1.413 0.070 -1.608 0.641 -1.479 2.098 1.907 0.326 0.000 1.160 1.223 -1.443 2.215 1.792 0.391 -1.100 1.274 1.203 0.219 0.343 0.000 0.777 1.099 0.989 1.214 0.803 0.892 0.954 +0 1.452 -0.446 0.091 0.374 -1.082 0.516 0.567 -0.996 2.173 0.855 -0.657 0.575 1.107 1.058 0.413 1.730 0.000 0.752 -0.837 1.565 0.000 0.797 0.934 0.985 0.794 1.164 0.755 0.726 +1 0.459 -0.580 0.109 0.831 -1.377 0.674 -1.678 -0.483 0.000 0.533 -0.085 1.383 0.000 1.211 1.283 1.254 2.548 0.992 0.625 0.376 3.102 1.701 1.392 0.982 1.616 0.659 1.334 1.168 +0 0.827 -1.997 0.287 1.068 1.401 0.842 0.622 -1.245 1.087 0.560 -0.518 -1.499 0.000 1.361 -0.902 0.173 0.000 1.064 0.313 0.540 3.102 1.371 0.999 1.099 2.045 1.009 1.412 1.179 +0 0.703 -1.039 -1.515 1.416 1.047 1.089 -1.443 0.011 2.173 0.231 -1.010 -0.214 2.215 0.274 -1.638 1.584 0.000 1.070 -0.555 -1.132 0.000 0.522 0.937 1.022 0.613 0.215 0.731 0.643 +1 0.952 -1.600 -0.784 0.575 1.351 0.613 -1.327 -0.195 2.173 0.856 -0.070 1.016 2.215 1.360 -0.679 -0.630 0.000 0.837 -1.608 1.331 0.000 0.828 0.956 0.988 0.708 1.190 0.790 0.698 +0 0.365 -0.408 0.146 1.319 -1.675 0.988 0.314 1.361 2.173 1.168 1.005 0.241 2.215 0.881 2.078 -0.689 0.000 1.266 1.432 -0.216 0.000 0.585 0.908 0.987 0.938 1.459 1.130 1.275 +1 2.138 0.432 0.271 0.399 1.190 1.441 0.969 -0.982 0.000 0.893 1.032 -1.737 0.000 2.921 -0.751 -0.380 0.000 3.389 0.204 1.200 3.102 1.020 0.993 0.987 1.065 0.872 0.913 0.867 +0 1.203 1.930 -0.110 1.060 0.559 0.564 -2.826 1.301 0.000 1.686 0.253 -1.219 2.215 0.773 -0.116 -0.531 0.000 0.826 0.543 0.257 3.102 0.851 0.949 0.982 0.604 1.056 1.097 1.035 +0 0.624 0.911 0.788 2.753 0.122 1.299 1.350 -1.320 0.000 0.612 1.397 1.072 2.215 0.602 1.779 1.512 0.000 0.548 -0.438 -1.395 3.102 1.002 0.987 1.024 0.849 0.730 0.751 0.980 +0 0.329 -2.328 -0.489 2.053 -0.903 1.278 -2.321 1.237 0.000 1.013 -0.592 -0.034 2.215 0.470 -1.863 0.568 0.000 0.545 -0.718 -1.541 3.102 0.819 0.804 0.983 0.938 0.661 1.013 1.033 +0 1.752 -0.927 -1.454 0.722 1.305 0.481 -0.012 1.076 0.000 1.005 -0.690 0.002 2.215 0.813 -0.735 -0.380 2.548 0.476 0.568 -0.972 0.000 0.749 0.881 0.988 0.839 0.328 0.788 0.744 +1 0.593 1.487 -1.644 1.839 1.427 0.851 0.437 -0.405 2.173 0.385 1.149 -0.045 0.000 0.869 -0.150 0.996 2.548 0.648 1.720 -0.058 0.000 0.297 0.807 0.972 1.249 1.073 0.901 0.798 +1 0.491 0.770 0.611 1.057 0.617 0.615 -1.133 0.720 2.173 0.604 -0.720 -0.804 2.215 0.859 1.267 -1.004 0.000 0.411 -2.197 -1.509 0.000 2.433 1.433 0.983 0.646 0.898 1.090 0.947 +0 0.829 -1.553 -0.452 0.317 -0.641 0.701 -1.823 0.641 0.000 1.377 -1.058 -1.441 1.107 0.792 -0.827 0.091 2.548 0.855 1.660 0.928 0.000 1.030 1.119 0.982 0.902 1.094 1.144 0.985 +1 1.856 1.283 0.428 0.544 -1.521 1.035 0.807 -1.411 1.087 0.361 -0.884 -0.885 1.107 0.803 1.054 0.885 0.000 1.296 0.025 0.018 0.000 1.037 0.899 1.368 1.274 0.968 1.013 0.905 +1 1.000 -0.512 -1.416 0.584 -0.015 0.931 -0.925 1.117 0.000 0.448 -1.912 0.711 0.000 0.617 -1.375 -0.552 2.548 1.002 0.750 -0.607 3.102 0.937 0.853 1.009 0.744 0.914 0.810 0.708 +0 0.550 0.637 1.143 1.323 -0.815 0.484 -0.741 -0.217 1.087 0.607 -0.289 0.557 2.215 0.711 -1.555 -1.670 0.000 0.869 -0.008 1.569 0.000 0.850 0.867 1.160 0.867 0.543 0.706 0.761 +0 0.428 -0.488 1.082 1.301 -0.386 0.478 -0.628 -1.453 0.000 0.765 0.232 0.488 2.215 0.575 0.922 -0.325 2.548 0.752 -2.228 1.676 0.000 0.920 0.912 1.002 0.696 0.548 0.659 0.660 +1 1.004 -2.255 0.249 0.635 0.764 1.375 -1.283 -1.009 0.000 0.451 -1.861 1.297 0.000 1.388 0.152 0.532 2.548 0.529 0.278 -1.533 3.102 0.869 0.982 0.983 0.881 0.630 0.866 0.762 +1 0.996 -0.802 0.466 0.788 -0.786 1.626 -0.873 0.186 0.000 2.490 -1.061 -1.495 2.215 0.713 -1.875 1.241 0.000 0.804 -0.559 1.307 3.102 0.765 0.996 1.109 0.670 0.786 0.857 0.738 +0 0.700 -1.020 -0.370 0.707 1.328 1.121 -0.525 -0.200 2.173 1.027 0.151 1.190 0.000 1.493 0.461 -1.556 0.000 0.866 0.694 0.622 3.102 1.221 0.902 0.988 0.853 1.040 1.087 0.930 +1 0.752 0.347 0.920 0.340 -1.173 1.016 0.117 -0.248 1.087 0.964 0.357 0.086 0.000 1.736 0.100 1.379 2.548 2.182 0.382 -1.548 0.000 0.977 0.913 0.983 0.914 1.646 0.889 0.767 +1 0.827 0.848 1.455 1.473 1.543 1.284 0.526 0.076 2.173 0.930 0.902 -0.887 2.215 0.440 0.814 -0.329 0.000 0.427 1.521 1.289 0.000 0.528 0.724 1.000 1.088 1.269 1.150 0.879 +1 0.520 0.381 -1.379 1.028 1.153 0.802 -1.235 -0.748 2.173 0.620 -2.032 0.736 0.000 0.379 0.010 1.467 0.000 0.749 -0.455 -0.531 3.102 0.822 0.719 0.987 0.855 0.334 1.101 0.946 +1 1.468 -0.625 0.233 1.181 -0.277 0.839 2.804 -1.364 0.000 1.400 -0.430 1.185 2.215 0.793 0.726 -1.100 2.548 0.854 0.232 -0.004 0.000 0.585 0.771 0.985 1.166 1.235 1.076 0.827 +1 0.933 1.262 -0.164 0.417 0.814 0.518 -0.250 1.410 2.173 0.466 1.327 -0.893 0.000 0.749 0.670 1.295 0.000 1.146 -0.972 -0.458 3.102 0.883 0.822 0.993 0.967 0.897 0.799 0.754 +1 0.979 -0.755 -0.507 1.468 -1.458 0.870 -1.213 -0.142 0.000 1.129 -0.422 0.123 0.000 1.852 -0.338 1.711 0.000 2.136 -0.587 0.728 1.551 0.989 1.049 1.254 0.875 1.364 1.106 1.031 +0 1.599 0.746 -1.571 1.297 1.247 0.587 -0.515 -0.179 2.173 0.682 0.811 0.105 0.000 0.753 0.773 -0.437 2.548 0.531 1.328 1.563 0.000 0.817 0.931 1.131 0.921 0.641 0.924 0.795 +1 0.829 1.447 1.599 0.221 -1.565 1.473 -2.364 -0.738 0.000 1.360 0.811 1.565 0.000 2.473 0.210 0.222 2.548 1.109 0.859 0.929 0.000 0.881 0.765 0.985 1.161 0.998 0.974 0.834 +1 1.230 -2.115 1.270 0.887 0.717 0.777 -1.067 -0.060 0.000 0.600 -0.492 -0.335 2.215 0.564 -0.915 -1.647 2.548 1.090 -0.098 -1.020 0.000 0.728 0.552 0.977 0.659 0.593 0.690 0.650 +1 0.725 -0.602 -1.143 1.057 0.170 0.956 -0.497 1.723 0.000 0.915 -0.710 -0.704 0.000 1.275 -0.251 0.261 2.548 1.387 -0.646 1.213 3.102 0.790 1.032 1.123 0.797 0.810 0.672 0.677 +0 0.642 0.888 -0.279 0.848 1.307 0.801 1.405 -0.526 2.173 0.476 1.743 0.494 0.000 0.588 2.072 1.671 0.000 1.295 -0.601 1.599 1.551 0.735 0.882 1.012 0.882 1.719 1.078 0.875 +0 0.492 0.150 -1.313 0.819 -0.098 0.709 -0.148 -1.646 0.000 0.905 -0.352 0.720 2.215 0.420 -1.137 -0.997 0.000 0.949 0.652 0.129 3.102 0.785 0.952 0.993 0.561 0.660 0.723 0.746 +0 0.737 -1.336 1.561 0.974 -0.454 0.808 -0.632 0.739 2.173 0.510 0.186 0.444 0.000 1.313 0.094 -1.345 0.000 1.123 0.015 -1.038 3.102 0.762 0.785 1.139 0.961 1.062 0.785 0.685 +0 0.607 -2.180 1.128 0.096 -0.062 1.271 -0.991 1.622 2.173 1.444 -0.295 -0.400 2.215 0.773 0.827 -0.187 0.000 0.469 -1.103 0.834 0.000 0.863 1.045 0.995 0.847 2.056 1.249 1.065 +1 1.130 0.768 0.132 0.347 1.455 1.421 0.130 1.482 2.173 1.036 0.348 -0.405 0.000 0.540 -1.784 -0.248 0.000 0.507 0.986 -0.820 3.102 1.655 1.077 0.988 1.094 0.929 1.155 0.970 +1 0.611 -1.305 -0.121 0.697 0.494 1.343 0.738 -1.289 2.173 0.680 0.729 1.283 2.215 0.894 -0.652 0.095 0.000 1.034 0.233 0.330 0.000 0.845 1.029 0.981 1.413 1.030 1.079 0.968 +0 1.877 1.268 -1.203 0.627 1.454 1.064 0.772 0.527 2.173 1.503 -1.440 -0.624 0.000 1.365 0.250 1.022 0.000 1.076 1.257 -0.014 3.102 0.541 0.816 1.021 1.340 0.678 0.922 0.826 +1 1.338 0.144 -0.177 0.338 -1.198 0.917 0.141 0.877 1.087 0.845 -0.133 -0.548 0.000 1.335 0.741 1.559 2.548 1.256 1.806 -1.644 0.000 0.840 1.283 0.985 0.946 0.923 1.034 0.908 +1 1.694 -0.752 0.165 0.844 1.005 0.706 0.110 1.408 0.000 0.727 0.294 -1.367 0.000 0.726 -0.010 -0.608 2.548 0.777 0.464 -1.012 3.102 0.920 0.836 1.138 0.925 0.263 0.675 0.770 +0 0.445 1.682 -0.952 0.670 -1.073 0.569 1.101 0.296 0.000 0.703 -0.073 -0.871 0.000 0.766 0.033 1.305 1.274 0.608 0.341 1.580 1.551 1.440 1.034 1.000 0.557 0.161 0.630 0.609 +0 0.879 0.731 -0.161 1.550 -0.734 0.858 1.300 0.741 2.173 0.701 1.133 1.707 2.215 0.519 2.360 1.449 0.000 0.776 0.572 0.371 0.000 0.966 0.763 0.985 0.993 0.875 0.977 0.870 +1 0.465 -0.542 0.995 1.102 -1.410 0.774 1.078 1.721 0.000 0.957 0.039 -0.222 2.215 1.327 0.637 0.369 2.548 0.774 -0.209 0.602 0.000 0.889 1.061 0.987 0.877 0.729 0.850 0.779 +1 0.982 -0.028 -0.451 1.189 0.560 1.085 0.683 1.312 2.173 0.718 0.507 0.562 2.215 1.370 0.449 -0.501 0.000 1.408 0.610 -1.117 0.000 0.829 0.972 1.183 1.209 0.820 0.924 0.883 +0 0.772 -0.035 -0.370 0.665 1.344 1.072 0.343 1.011 0.000 1.143 0.193 -0.789 0.000 0.735 0.994 -0.315 2.548 0.770 -0.404 1.600 3.102 2.354 1.346 0.992 0.684 0.758 0.909 0.764 +1 1.030 -0.752 1.278 0.891 1.737 0.500 0.275 -0.379 2.173 0.289 0.792 0.369 0.000 0.326 -0.812 0.873 2.548 0.878 -0.862 -0.457 0.000 0.828 0.598 0.985 0.514 0.551 0.740 0.682 +1 1.174 1.794 0.459 0.792 1.346 1.255 1.092 -0.776 2.173 0.423 0.084 0.031 0.000 0.478 1.347 -1.710 2.548 0.923 -2.391 1.741 0.000 0.918 1.112 0.987 0.608 0.742 0.878 0.787 +0 1.561 -1.619 1.471 0.318 -1.273 0.656 -0.016 0.185 2.173 0.571 -0.232 1.322 1.107 1.175 -1.711 -0.366 0.000 0.439 -0.742 -1.167 0.000 0.664 0.932 0.986 1.175 0.776 0.806 0.800 +1 2.081 0.738 0.887 0.583 1.512 0.602 -0.270 -0.970 1.087 0.735 -0.822 -1.203 0.000 2.013 -0.264 0.024 2.548 1.059 -0.124 1.721 0.000 0.692 1.209 0.984 1.150 1.070 1.053 0.964 +0 1.168 1.045 -1.410 0.729 -1.101 0.705 0.749 0.373 2.173 0.996 0.809 -0.385 0.000 1.465 0.085 -1.502 0.000 2.173 -0.136 1.100 3.102 0.870 0.939 0.978 1.143 1.012 1.130 1.009 +1 0.837 -1.009 1.163 1.118 -1.660 0.555 -0.238 0.132 2.173 0.698 -2.019 -0.931 0.000 1.366 0.955 0.170 0.000 2.147 -0.229 -1.106 0.000 0.883 0.956 0.987 1.096 0.739 0.924 1.056 +0 2.381 -1.203 0.390 0.738 1.013 1.205 0.018 -1.425 0.000 0.848 -1.173 -0.616 0.000 1.165 -0.776 0.945 2.548 1.615 1.759 -0.650 0.000 2.002 1.043 0.986 0.646 0.360 0.878 1.011 +0 0.630 -1.053 -0.137 0.832 1.206 0.949 -0.516 -0.697 2.173 1.199 -1.140 1.142 0.000 0.463 0.312 0.091 2.548 0.637 -0.171 -1.313 0.000 1.077 0.884 0.990 1.004 0.652 0.835 0.765 +1 0.747 -0.331 0.636 1.303 1.250 1.015 0.049 -0.293 0.000 0.643 0.550 0.987 2.215 0.687 -0.399 -0.905 2.548 0.974 0.977 -1.456 0.000 1.593 0.992 0.990 0.855 0.792 0.778 0.921 +0 0.664 -0.213 -1.731 0.881 -1.082 0.563 0.285 -1.016 0.000 1.087 0.665 0.496 1.107 0.541 0.038 0.114 0.000 1.305 -0.369 1.049 3.102 0.854 0.950 0.985 0.779 0.820 0.937 0.830 +0 0.860 1.835 -0.534 0.723 0.849 1.307 0.502 0.801 2.173 1.341 0.842 -0.913 2.215 0.676 0.888 1.580 0.000 0.481 -1.975 -1.115 0.000 1.670 1.587 1.035 1.245 1.979 1.363 1.294 +1 1.913 -0.467 1.698 0.906 -1.395 0.912 -0.042 -0.348 2.173 0.427 1.181 0.668 2.215 0.858 0.548 -1.659 0.000 1.843 -1.988 0.172 0.000 0.485 0.969 0.998 1.189 0.954 1.082 0.892 +1 1.451 0.077 -0.796 0.620 1.665 0.813 -0.071 1.047 2.173 0.622 -1.494 0.611 2.215 0.338 1.395 1.164 0.000 0.640 -2.084 -0.433 0.000 1.962 1.291 1.048 1.012 0.929 0.959 0.940 +0 1.067 0.278 0.954 0.488 -0.887 1.573 -0.315 -1.626 2.173 1.747 0.941 -0.121 0.000 0.377 0.592 0.028 2.548 1.158 -0.059 0.651 0.000 1.558 0.805 0.996 1.037 1.072 1.336 1.049 +1 0.811 0.071 -0.966 0.796 1.316 1.005 0.119 -0.423 1.087 0.743 0.832 -0.512 0.000 1.333 0.707 1.358 0.000 1.652 0.058 -1.691 3.102 1.045 0.941 0.986 0.904 1.241 0.964 0.811 +1 0.969 -0.318 0.620 0.354 1.708 0.935 0.123 -0.883 1.087 0.663 -0.764 -1.063 0.000 0.613 1.893 1.252 0.000 0.921 -0.906 0.383 0.000 0.939 0.807 0.996 0.493 0.752 0.711 0.631 +0 1.487 -0.224 -1.573 1.524 1.538 0.526 0.813 0.970 0.000 1.506 -0.882 -0.251 0.000 0.702 -0.152 0.400 2.548 0.452 -0.893 -0.936 3.102 2.500 1.368 0.993 0.907 0.450 0.825 0.950 +0 1.047 -0.816 -0.010 0.827 0.333 1.003 -0.867 1.124 2.173 0.558 -0.049 1.413 1.107 0.928 0.213 -1.155 0.000 1.893 0.500 -0.671 0.000 0.673 0.840 0.989 1.069 0.546 0.985 1.081 +1 1.226 -0.234 -0.511 0.240 -1.164 0.573 -0.625 1.407 0.000 0.880 0.877 0.085 2.215 1.184 0.031 0.727 2.548 0.938 -0.699 -1.120 0.000 0.859 0.891 0.981 1.053 0.767 0.849 0.822 +0 1.847 0.495 -0.590 0.105 -0.422 1.248 0.153 -1.143 0.000 2.363 0.502 1.526 0.000 1.841 0.540 0.243 2.548 1.670 1.102 0.115 3.102 1.056 1.337 0.988 0.950 0.523 1.022 0.885 +1 1.537 -0.330 0.614 0.831 0.185 0.707 -0.560 -1.445 0.000 0.580 -0.555 1.482 0.000 0.921 0.475 -0.742 2.548 0.386 0.280 -1.199 3.102 0.826 0.879 0.979 0.680 0.187 0.690 0.635 +1 1.338 0.809 -1.659 0.176 -0.641 1.262 0.039 -0.301 2.173 0.896 1.196 0.609 0.000 0.689 1.211 -1.305 0.000 0.605 0.287 -1.686 3.102 1.192 0.774 0.985 1.367 0.888 0.904 0.860 +0 1.150 0.764 0.534 2.573 0.700 1.181 -0.827 -1.126 0.000 1.569 0.205 -1.146 2.215 0.576 1.437 1.146 0.000 0.644 -0.911 0.061 0.000 1.181 0.832 1.001 2.006 0.931 1.331 1.550 +0 1.250 0.715 0.441 0.117 -0.880 2.270 0.313 -1.210 0.000 1.246 0.010 0.574 2.215 0.967 2.219 0.008 0.000 0.672 -0.183 1.174 3.102 3.096 1.735 0.998 0.650 0.436 1.373 1.140 +1 0.646 -1.430 1.648 0.300 0.766 1.251 -0.441 -0.891 0.000 1.297 -1.001 0.558 2.215 1.055 -0.439 -1.525 2.548 1.064 -0.057 -0.027 0.000 0.905 0.878 0.979 0.824 1.235 0.737 0.665 +1 0.570 -0.383 -0.135 0.935 1.683 0.572 0.077 1.611 2.173 0.426 -1.591 -0.121 0.000 0.844 -0.521 -0.647 2.548 0.838 -2.192 0.441 0.000 0.552 0.790 1.009 0.644 0.825 0.852 0.753 +0 1.381 -1.826 0.712 0.589 0.939 0.962 -0.478 -1.145 2.173 0.811 -1.026 -0.236 0.000 0.370 0.392 1.558 0.000 0.496 -0.618 1.739 3.102 1.083 0.987 1.001 0.607 0.389 0.836 0.772 +1 0.904 1.680 -1.541 1.132 -1.041 1.572 -0.151 -0.135 0.000 2.819 1.405 0.387 0.000 2.705 -0.257 -1.685 2.548 2.540 0.652 -1.612 0.000 0.847 1.043 0.998 2.629 0.325 1.651 1.687 +0 1.299 -0.694 1.183 0.765 0.181 0.531 -2.772 -0.286 0.000 0.229 -1.205 -0.751 0.000 0.729 -0.893 -1.603 2.548 0.366 0.171 -1.480 3.102 0.705 0.862 1.084 0.605 0.252 0.629 0.723 +1 0.816 -1.119 0.202 0.792 -1.373 0.831 -0.059 -0.635 0.000 1.534 0.971 1.511 0.000 1.396 0.950 -0.170 0.000 0.661 0.323 0.674 3.102 1.373 0.939 1.101 0.810 0.137 0.671 0.823 +0 2.579 1.044 0.380 0.514 -0.477 0.911 0.070 -1.724 2.173 0.907 -0.753 -1.734 0.000 0.689 1.431 -0.881 0.000 0.375 -0.467 -1.062 0.000 0.724 0.873 1.112 1.072 1.060 1.113 0.913 +1 1.344 -1.256 1.333 0.535 0.291 0.850 2.465 -0.917 0.000 0.931 -0.180 -0.170 2.215 1.136 -0.302 0.998 0.000 0.484 0.877 1.350 3.102 3.826 1.978 0.986 1.061 0.715 1.475 1.788 +0 0.898 -0.140 -1.102 0.982 0.024 1.452 -0.587 0.800 0.000 2.042 0.511 -1.421 2.215 1.195 1.249 -0.310 0.000 1.548 -0.227 -0.015 1.551 0.994 1.070 1.106 1.182 1.666 1.065 0.948 +0 0.415 -1.384 -1.051 0.692 0.473 1.044 -0.323 -1.191 1.087 0.792 1.571 0.322 2.215 1.124 0.476 0.854 0.000 0.394 1.990 -0.651 0.000 1.066 0.797 0.989 0.882 2.002 1.126 0.974 +1 0.947 0.672 0.171 1.010 0.997 1.010 0.416 -1.586 2.173 1.910 0.278 0.290 0.000 1.192 -0.394 1.720 2.548 1.101 1.899 -0.050 0.000 1.247 1.431 0.990 1.073 0.640 1.168 0.983 +0 0.964 0.141 0.787 1.260 -0.184 0.572 -0.173 -1.123 2.173 0.636 0.115 1.470 0.000 0.735 0.856 -0.901 1.274 0.933 -1.133 0.858 0.000 0.967 1.028 1.171 0.932 0.500 0.716 0.748 +0 0.591 1.725 -0.151 1.602 0.229 1.456 -0.315 1.341 0.000 0.618 0.453 -0.874 2.215 0.581 -0.942 1.331 0.000 1.134 -1.688 -0.899 0.000 0.933 1.110 0.985 0.665 0.369 0.954 1.078 +1 1.049 2.093 -1.375 0.922 1.368 0.707 0.737 0.516 1.087 0.728 1.069 -0.613 2.215 0.588 0.616 1.656 0.000 0.853 -0.454 -0.310 0.000 0.918 0.866 0.994 0.857 0.919 0.836 0.814 +0 1.128 -1.356 0.629 0.494 -1.614 0.572 -0.965 -0.153 1.087 0.775 -1.510 -1.382 0.000 0.655 -1.384 -0.698 0.000 0.914 -0.113 1.261 3.102 0.631 0.816 0.985 0.766 0.803 0.653 0.646 +0 0.941 -1.123 -1.040 2.095 0.802 1.836 -0.672 -1.644 1.087 1.276 -0.633 -0.164 0.000 1.282 -0.071 -0.454 0.000 1.198 0.278 0.410 3.102 0.763 0.831 1.938 1.678 1.720 1.321 1.285 +1 0.511 -0.449 1.122 0.284 0.557 1.041 -2.229 0.984 0.000 2.503 -0.361 -0.643 2.215 1.992 -0.106 1.343 0.000 0.756 0.502 0.318 0.000 1.189 0.860 0.989 1.429 0.813 1.123 0.983 +0 1.709 -0.562 -0.247 1.014 0.140 0.631 0.776 -1.329 2.173 0.944 1.234 1.623 0.000 0.323 -1.074 1.320 2.548 0.622 0.791 1.099 0.000 0.482 0.760 0.993 1.465 0.751 0.948 1.059 +0 0.690 2.262 0.632 1.367 -0.377 0.829 1.437 0.563 2.173 0.967 0.913 1.680 0.000 0.956 0.548 -0.780 2.548 0.709 1.449 -0.789 0.000 0.964 1.090 1.061 1.033 1.146 0.873 0.862 +0 0.350 -1.405 0.272 1.234 -1.399 0.990 0.161 -0.262 0.000 0.712 -1.581 1.588 0.000 1.015 -0.704 1.114 0.000 1.683 0.254 0.250 3.102 0.788 1.259 0.991 0.517 0.710 0.765 0.704 +0 1.415 0.037 1.240 0.926 0.690 1.024 0.933 -0.636 0.000 1.092 0.501 -0.982 2.215 0.975 -0.548 0.993 2.548 0.798 0.489 0.035 0.000 0.823 0.742 0.989 0.514 1.252 0.905 0.982 +1 1.085 -0.172 -0.509 0.299 1.402 1.361 -0.408 1.363 0.000 0.784 -1.006 0.008 2.215 1.087 0.292 -0.630 2.548 0.546 -0.095 0.136 0.000 1.194 1.228 0.991 0.668 0.894 0.960 0.817 +1 0.856 -0.263 -0.637 0.927 1.480 0.794 0.272 -1.508 2.173 1.246 -0.871 0.723 0.000 0.830 -0.515 -0.251 2.548 0.621 -0.742 0.191 0.000 0.529 0.672 1.165 0.780 1.016 0.865 0.773 +1 0.773 -0.461 -0.169 0.771 0.663 0.975 -0.060 1.362 0.000 1.377 0.461 -0.877 2.215 0.864 -1.139 0.306 0.000 1.177 -0.569 -1.433 3.102 0.733 0.869 0.987 1.339 0.894 0.931 0.847 +0 0.592 -0.341 -1.697 0.574 0.492 1.049 -0.183 -0.766 2.173 1.029 -0.208 0.636 0.000 1.185 0.145 1.246 2.548 0.760 -0.842 -0.944 0.000 1.234 0.946 0.979 1.019 1.369 0.921 0.821 +1 0.799 -1.181 -0.953 0.058 1.540 1.267 0.052 1.393 0.000 1.605 0.229 -0.192 0.000 1.369 -0.917 -1.305 2.548 1.188 -0.736 -0.103 3.102 0.943 0.763 0.981 0.665 0.863 0.869 0.749 +0 0.486 0.917 -1.428 0.953 0.209 0.477 -0.218 1.431 2.173 0.977 0.292 0.635 0.000 1.078 0.262 -0.502 0.000 0.552 -1.696 -1.136 0.000 1.311 1.029 0.988 0.550 0.556 0.625 0.632 +1 1.416 -0.021 0.339 1.366 0.062 2.933 2.773 1.684 0.000 2.287 0.487 -0.305 1.107 1.477 -0.253 0.013 2.548 1.081 -0.116 -1.267 0.000 5.742 5.296 0.988 0.961 0.956 3.801 3.027 +0 0.879 -0.469 1.725 1.782 1.294 0.822 -0.636 -0.305 0.000 1.469 -0.459 0.256 2.215 0.905 -0.280 -1.149 0.000 1.283 -0.763 -1.042 3.102 1.100 1.134 0.990 0.984 1.174 1.031 0.982 +0 0.558 0.054 -0.966 1.552 0.063 1.944 -1.411 -1.711 2.173 2.035 0.348 -0.189 2.215 1.361 -1.597 1.012 0.000 0.407 0.691 1.267 0.000 1.394 1.504 1.032 0.726 4.128 2.052 1.661 +1 1.555 -1.215 1.137 1.050 1.113 1.293 -2.137 -0.846 0.000 0.974 -0.521 1.120 0.000 1.654 -0.520 -0.562 1.274 1.343 0.070 0.315 3.102 0.839 1.103 0.977 1.260 0.895 1.167 0.989 +0 1.372 -1.143 1.132 0.217 0.252 1.012 -1.329 0.152 2.173 1.175 -1.867 -1.175 0.000 0.574 0.077 0.748 2.548 0.464 1.776 -1.343 0.000 0.814 0.641 0.977 0.952 0.881 0.974 0.860 +0 1.455 -0.422 0.302 0.582 -1.719 1.187 -0.801 -1.720 2.173 1.139 -1.354 -0.258 2.215 0.772 -0.060 0.998 0.000 0.743 -0.531 -1.029 0.000 0.844 1.008 1.235 1.154 1.733 1.061 0.866 +0 0.555 0.969 0.917 1.102 1.643 0.656 -0.543 -1.088 2.173 1.208 0.587 -0.206 2.215 0.590 -0.101 1.229 0.000 0.483 1.454 1.091 0.000 0.892 0.940 0.980 1.259 1.228 1.236 0.973 +1 2.378 0.123 0.742 0.116 1.479 1.044 -0.611 -1.045 2.173 0.560 -0.612 0.118 1.107 0.686 1.684 1.528 0.000 0.623 0.829 0.963 0.000 0.474 0.985 0.978 1.424 0.975 1.032 0.985 +1 1.042 0.051 0.154 0.870 -0.234 1.077 0.370 -1.568 2.173 0.419 -0.289 1.095 1.107 0.346 -0.246 1.630 0.000 0.443 -1.044 0.189 0.000 0.471 0.765 0.987 0.761 0.748 0.866 0.705 +0 0.613 -1.118 1.415 1.209 1.334 1.133 0.753 -0.738 0.000 1.112 -1.066 0.801 1.107 1.404 -0.312 -0.849 2.548 1.102 0.393 -0.156 0.000 0.889 0.919 0.986 0.935 1.419 1.237 1.616 +0 0.386 0.831 -1.302 1.893 0.274 1.105 0.233 0.397 2.173 1.095 -2.515 -1.581 0.000 1.545 0.424 -1.506 2.548 0.990 1.367 -1.249 0.000 5.555 3.481 1.171 0.828 1.622 2.418 2.069 +1 0.845 -1.609 0.810 1.558 1.451 0.335 -1.312 0.191 0.000 0.554 0.584 -0.726 1.107 0.354 0.910 -1.239 2.548 0.427 0.697 1.375 0.000 0.913 0.721 0.994 1.336 0.230 1.216 0.992 +0 0.388 1.704 1.305 0.284 0.853 0.952 1.375 0.538 0.000 1.331 0.463 -1.178 2.215 0.822 1.208 -0.846 2.548 0.676 0.770 1.394 0.000 0.900 0.934 0.997 0.879 0.587 0.913 0.802 +1 1.442 -0.925 1.259 0.212 -0.909 3.287 -1.614 -0.248 0.000 1.999 -0.292 1.527 2.215 0.926 -1.405 -1.179 0.000 0.380 -1.185 0.925 3.102 0.604 1.032 0.984 0.463 0.618 0.630 0.621 +1 1.187 -0.438 1.363 1.574 -1.497 1.670 -0.946 -1.536 2.173 2.983 -0.570 0.003 0.000 1.277 -0.760 0.631 2.548 0.373 0.043 -0.297 0.000 0.577 0.912 1.015 0.832 1.692 1.510 1.331 +1 0.384 -1.929 -1.431 1.918 0.866 0.788 -1.299 -1.085 2.173 0.428 -0.593 -0.693 0.000 0.336 0.424 0.490 1.274 0.665 -1.755 0.121 0.000 0.736 0.704 1.043 0.953 0.906 0.898 0.755 +0 0.485 1.476 -1.048 1.986 -0.266 1.090 -0.293 0.950 0.000 1.013 0.149 -1.665 2.215 1.553 1.353 0.014 0.000 1.163 -1.037 -0.822 3.102 0.820 0.951 0.984 2.193 1.004 1.606 1.529 +1 0.800 -0.470 0.891 1.003 -0.598 0.661 1.229 0.785 0.000 1.181 -0.482 -1.298 2.215 0.840 -0.409 0.633 1.274 0.472 -0.167 0.004 0.000 0.868 0.780 1.209 0.905 1.044 0.913 0.833 +0 0.785 0.352 -0.465 1.626 1.507 1.154 0.304 0.362 0.000 1.221 0.906 -1.094 2.215 1.395 -0.789 -1.121 0.000 0.558 -1.105 0.432 0.000 0.986 0.741 1.532 1.072 1.004 0.902 0.878 +1 2.118 -1.267 0.630 0.610 -1.467 1.291 -0.730 -0.591 0.000 0.990 -1.643 1.628 0.000 0.911 -0.166 -1.588 0.000 1.620 0.229 1.410 3.102 0.915 0.701 1.496 1.264 0.843 0.928 0.964 +1 1.359 -0.088 -0.511 0.603 1.091 1.219 -0.497 -0.967 2.173 1.148 0.278 0.880 2.215 0.777 -0.136 1.147 0.000 1.278 -0.402 1.737 0.000 0.928 0.744 1.244 0.976 1.871 1.033 0.891 +0 1.021 0.166 1.100 0.922 -1.050 0.688 0.604 -0.003 2.173 0.474 0.346 -0.505 0.000 1.134 -0.526 1.316 0.000 0.967 0.963 -0.925 1.551 1.247 0.995 1.255 0.951 0.681 0.765 0.727 +1 0.419 1.625 -1.273 0.463 1.674 0.917 1.419 -0.246 0.000 0.915 0.232 -0.165 1.107 1.746 0.267 1.515 2.548 0.731 -2.251 0.989 0.000 4.994 2.768 0.974 0.685 1.342 1.850 1.409 +1 0.953 0.066 1.573 0.384 -0.934 0.594 -0.620 -0.595 2.173 0.327 -1.509 1.617 2.215 0.387 -1.051 0.022 0.000 0.920 0.640 -0.516 0.000 0.796 0.764 0.994 0.755 0.669 0.553 0.568 +0 2.077 -0.626 -0.641 0.502 -0.547 1.123 -0.659 0.790 1.087 0.956 -0.083 -1.168 0.000 0.681 -1.111 0.373 2.548 0.953 -0.643 -1.639 0.000 1.151 0.970 0.986 1.426 0.504 0.949 0.910 +1 0.282 -1.190 -0.474 1.056 1.229 0.628 0.866 -0.013 2.173 1.082 0.182 -1.174 2.215 1.039 -0.194 0.324 0.000 0.932 -0.784 1.711 0.000 1.107 1.081 0.987 2.156 1.132 1.646 1.291 +0 0.958 -0.018 -0.031 1.109 -0.873 1.060 1.405 1.241 2.173 0.769 -0.539 1.697 0.000 1.335 -0.749 -0.514 2.548 0.630 0.246 0.386 0.000 0.928 0.952 0.989 1.466 2.514 1.316 1.085 +0 0.608 -0.582 0.273 1.292 0.192 0.736 -0.183 1.162 2.173 0.650 -0.253 -1.144 0.000 1.173 0.293 -0.879 0.000 0.880 1.246 1.027 0.000 0.909 0.785 0.977 1.195 1.203 1.256 1.708 +0 0.909 0.076 1.658 2.207 -1.278 1.372 -0.332 0.411 2.173 0.360 -1.976 0.681 0.000 0.702 0.406 -1.497 0.000 1.236 0.116 -0.386 3.102 1.413 1.045 0.988 1.772 0.966 1.182 1.071 +0 0.905 0.422 1.021 0.617 -0.916 0.649 1.023 -0.100 0.000 0.731 0.714 -0.785 0.000 1.585 1.220 1.633 1.274 0.714 1.075 0.914 3.102 0.875 0.759 1.020 0.822 0.491 0.757 0.693 +1 1.363 0.719 -0.809 0.356 -0.158 1.253 1.426 0.815 2.173 0.690 1.173 -0.548 0.000 0.589 0.046 1.433 2.548 0.921 0.195 -1.329 0.000 0.853 0.731 0.989 1.264 0.984 0.911 0.826 +1 0.398 1.952 1.666 0.457 -1.278 1.591 0.129 -0.196 0.000 1.003 0.564 -1.111 1.107 1.124 0.503 1.295 2.548 1.835 -0.513 -1.425 0.000 1.009 1.056 0.989 0.654 0.933 0.748 0.671 +0 0.515 0.347 -1.396 1.738 -1.448 0.720 0.807 -0.248 2.173 0.516 -1.173 0.779 0.000 0.865 0.830 1.198 0.000 1.254 0.325 0.306 3.102 0.946 0.947 0.991 1.079 0.527 0.883 0.929 +1 0.554 -0.461 0.015 0.637 -1.568 0.520 -1.282 1.703 0.000 0.584 -1.070 0.012 0.000 0.971 -0.016 1.527 2.548 0.486 1.170 0.409 3.102 1.175 0.963 0.988 0.578 0.601 0.754 0.687 +0 0.356 1.693 0.726 1.208 -0.809 0.436 0.743 1.252 2.173 0.868 0.099 -1.308 2.215 0.589 -0.071 -0.129 0.000 0.393 0.809 0.167 0.000 0.323 0.635 0.990 0.982 0.733 0.992 0.828 +1 3.036 -0.339 -1.591 0.431 -1.148 1.845 0.268 -0.085 0.000 0.952 -0.010 1.685 2.215 1.540 -0.455 0.300 2.548 1.620 -0.145 1.259 0.000 0.746 0.756 0.994 1.405 1.261 0.973 0.811 +1 1.472 0.834 -0.360 1.221 0.076 1.352 0.558 1.724 2.173 0.605 0.892 0.415 0.000 0.362 -0.271 0.589 2.548 0.514 -0.533 -1.592 0.000 0.943 1.076 0.978 0.607 0.839 1.000 0.830 +0 0.529 -1.785 -0.663 0.412 0.155 0.494 -0.538 -1.395 2.173 0.723 -1.027 1.140 0.000 0.502 -1.215 -1.166 2.548 0.754 -1.703 0.158 0.000 0.894 0.881 0.983 0.567 0.283 0.557 0.560 +0 0.382 -0.738 -1.493 1.946 1.048 0.935 0.890 -0.828 2.173 0.784 1.611 -0.011 0.000 0.651 0.465 -1.480 2.548 0.548 2.411 0.940 0.000 0.856 0.921 0.984 1.399 0.571 0.905 1.044 +0 0.387 1.501 -0.468 2.235 0.531 0.812 0.882 1.185 2.173 0.997 0.874 -0.865 0.000 0.772 1.414 -1.628 0.000 0.580 0.021 -1.035 0.000 0.964 1.112 1.009 0.995 0.898 0.833 0.867 +1 0.376 2.073 0.598 1.021 -1.246 0.911 0.941 -0.626 2.173 0.858 0.243 0.499 0.000 1.401 0.739 -1.440 2.548 1.062 -0.426 1.285 0.000 0.943 1.145 0.983 0.773 0.947 0.976 0.857 +1 0.314 1.057 -0.787 0.841 1.269 0.663 -1.155 1.391 0.000 1.266 -0.931 -0.405 2.215 0.970 0.240 -0.126 2.548 0.598 0.289 1.494 0.000 0.804 1.047 0.988 2.557 0.820 1.636 1.514 +1 0.992 0.438 -0.627 1.443 1.623 0.784 -0.125 1.073 0.000 1.158 0.491 1.364 0.000 1.035 0.872 0.155 2.548 1.034 -0.272 -0.695 3.102 0.833 1.074 1.487 1.034 0.772 0.854 0.847 +0 0.568 0.736 -0.123 1.885 0.807 0.808 2.065 -0.861 0.000 0.487 1.332 1.459 2.215 0.388 2.030 -0.465 0.000 0.858 -0.354 -1.713 3.102 0.359 0.727 1.066 0.866 0.619 0.807 0.872 +1 1.210 -0.124 1.445 0.413 1.641 0.473 -0.929 0.941 0.000 0.550 1.226 -0.848 2.215 0.984 -0.267 -0.197 2.548 0.636 -1.724 -0.962 0.000 0.921 0.886 1.002 0.903 0.799 0.852 0.731 +1 1.016 0.492 -0.322 0.531 1.569 0.730 -0.866 -0.028 0.000 1.075 -0.048 1.436 2.215 2.102 0.459 0.653 2.548 2.269 -0.021 -1.312 0.000 0.489 1.224 1.009 0.895 1.128 1.075 0.936 +0 1.623 -0.061 0.983 0.969 0.476 0.626 0.925 -0.640 0.000 1.009 0.106 -0.512 0.000 1.086 0.428 -1.703 2.548 0.599 0.243 -1.172 0.000 1.039 0.972 0.993 0.880 0.773 0.742 0.722 +0 0.475 -1.581 1.691 1.960 -0.557 1.004 -0.232 1.481 2.173 0.708 -0.396 0.500 0.000 0.566 -1.066 -0.585 2.548 0.381 1.992 1.079 0.000 1.331 1.093 1.201 1.538 1.009 0.986 1.142 +0 0.290 0.429 0.626 0.529 -1.402 0.600 0.672 0.139 2.173 0.519 2.277 1.671 0.000 0.675 0.301 -0.868 2.548 0.997 0.892 1.213 0.000 0.757 0.759 0.989 0.719 0.641 0.731 0.788 +0 0.287 -0.457 0.905 1.937 -1.665 1.984 0.503 -1.193 0.000 1.504 0.153 0.622 2.215 2.698 -0.625 0.520 2.548 1.772 0.294 -0.066 0.000 2.445 2.256 0.992 1.665 0.951 1.857 1.530 +1 0.900 0.531 -0.660 1.408 0.124 1.369 -0.232 -1.486 0.000 0.582 -0.822 -0.197 1.107 1.054 -0.543 0.777 0.000 0.522 0.792 0.535 0.000 0.720 0.712 1.013 0.707 0.676 0.616 0.609 +1 1.370 -0.455 -0.731 0.558 -1.726 1.043 0.124 0.722 2.173 1.292 -0.862 0.068 2.215 0.971 -0.716 1.725 0.000 1.158 0.031 -1.361 0.000 0.960 1.219 0.987 1.157 1.310 1.089 0.947 +1 2.366 -0.719 -1.389 0.746 -0.676 0.680 -0.352 -0.380 0.000 1.615 -0.190 0.703 2.215 0.298 -1.079 1.028 0.000 0.541 -0.525 -0.889 3.102 0.846 1.040 1.105 0.524 0.856 0.981 0.836 +0 1.442 -0.783 -0.616 0.735 -1.374 0.587 -0.044 0.031 0.000 0.860 -1.968 -1.660 0.000 0.882 -0.943 0.775 2.548 0.915 0.305 -1.692 3.102 2.311 1.388 0.985 0.728 0.755 0.962 0.881 +0 0.413 -0.407 0.016 1.470 -0.182 1.656 0.595 -0.703 2.173 2.424 0.866 1.118 1.107 1.107 0.698 1.433 0.000 1.508 1.112 -1.718 0.000 0.819 1.329 0.991 0.965 2.972 1.532 1.289 +0 0.517 1.589 1.444 0.934 -0.764 0.561 -0.916 -1.150 2.173 0.721 0.703 1.601 2.215 0.806 -0.711 0.957 0.000 1.004 -0.652 0.388 0.000 0.869 0.937 0.992 0.897 1.036 1.237 1.632 +1 0.580 -0.841 -1.196 0.523 0.098 0.727 0.047 -0.479 0.000 1.467 0.391 0.919 2.215 0.890 0.828 -0.920 0.000 1.320 0.803 1.631 3.102 0.852 0.980 0.989 1.542 0.832 1.216 1.151 +1 2.135 1.293 0.039 1.032 0.817 0.679 -1.441 -0.703 0.000 0.964 0.788 1.156 0.000 1.121 0.045 1.665 2.548 0.419 0.729 1.554 0.000 0.718 0.731 1.327 1.354 0.755 0.930 0.810 +0 0.561 -0.668 0.907 0.542 -1.451 0.886 -0.495 0.160 2.173 1.210 -0.257 -1.304 2.215 0.703 0.179 1.164 0.000 0.497 -1.239 -0.064 0.000 0.849 0.893 0.984 0.998 1.487 0.958 0.798 +1 0.773 0.715 1.362 1.246 -0.970 0.956 -0.857 -1.139 2.173 2.339 1.960 0.307 0.000 0.806 0.502 1.721 0.000 0.883 0.211 0.050 0.000 0.939 0.851 1.174 1.242 0.781 0.941 0.822 +1 0.595 -0.458 -0.582 1.226 1.728 0.831 -0.954 0.445 0.000 1.064 -1.000 -1.139 2.215 1.169 -0.825 1.103 0.000 0.877 -1.872 0.113 0.000 0.963 0.855 1.032 0.713 0.349 0.730 0.659 +0 1.032 -0.614 0.328 1.380 0.915 0.482 -1.255 -1.401 2.173 0.512 -0.076 -0.661 2.215 0.409 -1.339 -0.122 0.000 1.625 0.052 -1.407 0.000 1.068 0.761 0.988 1.026 0.646 0.763 0.725 +0 1.345 1.439 1.728 0.773 1.176 0.713 -0.585 -0.405 2.173 0.727 -0.278 -1.314 2.215 1.075 -0.658 1.125 0.000 1.208 -0.294 0.286 0.000 0.893 0.965 0.985 0.946 0.790 1.018 0.970 +0 1.379 -0.393 1.219 1.394 0.463 0.641 0.740 -0.442 0.000 0.537 1.435 0.833 2.215 1.123 0.433 -1.391 0.000 1.332 -0.743 -0.780 3.102 1.170 1.028 1.209 1.048 1.363 0.968 0.984 +1 0.607 -0.033 0.814 1.553 -1.663 0.495 0.845 1.535 0.000 1.410 0.916 -0.026 1.107 0.428 0.727 -0.472 0.000 0.495 -0.512 -0.918 3.102 0.804 0.969 1.063 0.603 0.846 0.878 0.747 +0 1.040 -1.192 1.466 1.968 -1.434 1.343 -0.463 0.245 0.000 1.590 -1.632 -0.685 0.000 0.850 -1.037 -0.021 0.000 1.632 -0.115 1.400 3.102 0.801 1.253 1.002 0.644 0.224 0.708 0.948 +1 0.462 -0.495 -1.067 0.718 0.884 0.924 1.623 -1.573 0.000 1.301 -0.248 -0.370 0.000 0.825 -1.039 -0.451 0.000 2.921 -0.440 1.019 3.102 0.737 0.646 0.988 0.869 0.820 0.909 0.828 +1 0.825 -0.127 -1.227 0.832 1.343 1.099 0.047 1.348 0.000 0.995 0.840 -0.884 2.215 0.902 -2.159 -0.483 0.000 1.750 -0.024 0.128 3.102 1.318 1.172 0.988 1.048 1.091 0.969 0.899 +1 1.194 0.168 0.819 0.174 -0.744 0.388 0.895 0.141 0.000 1.140 -0.106 -1.513 2.215 1.206 0.079 -0.357 2.548 0.473 -1.752 1.258 0.000 1.506 1.062 0.980 0.937 1.082 0.877 0.809 +1 0.404 -0.694 -0.389 0.426 -0.307 0.419 -2.583 1.002 0.000 0.377 0.178 -0.842 2.215 0.302 -1.403 1.655 0.000 0.785 -1.116 0.851 3.102 0.645 1.104 0.984 0.891 0.640 0.683 0.996 +0 1.522 1.345 1.522 1.025 1.269 0.733 1.734 1.246 0.000 0.889 0.612 -1.259 2.215 2.506 -0.758 -0.130 0.000 0.852 -0.496 -0.525 0.000 0.772 0.857 0.999 1.088 0.619 0.951 1.607 +1 1.772 0.622 0.317 0.525 -0.582 0.858 1.422 1.522 2.173 0.554 0.250 -0.761 0.000 1.121 0.452 -1.348 2.548 0.798 0.134 1.717 0.000 0.697 1.193 0.986 0.950 0.876 0.918 0.844 +1 0.514 -1.289 0.752 0.382 0.939 0.911 -0.062 -1.072 0.000 0.941 -0.925 -1.305 2.215 2.229 1.928 -0.237 0.000 2.514 0.109 1.289 0.000 1.096 2.290 0.974 0.890 0.485 2.140 1.687 +1 0.880 -0.823 1.590 1.395 -0.832 0.578 -1.066 -0.072 0.000 0.747 0.413 1.349 2.215 0.906 -1.275 0.717 0.000 1.267 0.792 -0.313 3.102 0.874 1.202 1.258 1.168 0.906 0.983 0.951 +0 0.366 -0.851 1.117 1.262 0.701 0.884 0.445 -0.961 0.000 0.873 0.608 -0.354 0.000 0.522 -1.654 0.537 0.000 1.181 1.400 1.621 3.102 0.986 0.866 0.980 2.692 0.174 1.802 1.684 +0 0.514 -1.180 -1.719 4.347 -1.296 2.763 -0.199 0.429 2.173 0.846 -1.426 -1.091 2.215 0.309 0.527 0.176 0.000 0.923 -0.567 0.758 0.000 0.494 0.871 0.991 3.938 2.680 2.511 1.813 +0 0.705 0.344 -0.601 0.751 -0.177 0.846 2.057 1.042 0.000 0.846 1.341 0.405 2.215 1.689 0.487 -1.085 2.548 0.637 2.295 -1.566 0.000 0.886 0.916 1.000 0.874 1.355 1.088 0.944 +0 0.909 1.050 0.943 0.719 -0.721 1.242 1.158 -0.084 0.000 1.280 0.067 1.036 2.215 1.401 1.246 -0.869 0.000 1.012 -1.241 1.728 0.000 0.358 1.168 1.117 1.427 1.606 1.139 1.122 +1 0.623 -0.155 1.012 1.447 -0.742 2.547 -0.840 1.098 0.000 2.741 0.399 -0.515 0.000 1.212 -0.405 -1.276 2.548 0.584 0.988 -1.156 0.000 1.102 0.843 1.316 0.669 0.593 0.562 0.567 +0 0.334 -1.496 -0.004 0.958 -1.511 1.015 0.587 1.475 1.087 1.105 -0.662 -0.048 2.215 0.385 1.071 0.970 0.000 0.393 0.743 -0.101 0.000 0.727 1.103 0.990 1.321 1.861 1.785 1.349 +0 0.654 -0.166 0.400 0.448 1.535 0.886 0.299 0.772 0.000 0.626 2.377 -0.374 0.000 1.220 0.409 -1.186 2.548 0.801 -1.100 -1.331 1.551 2.451 1.737 0.989 0.860 0.766 1.330 1.045 +0 0.516 -0.268 1.597 0.954 0.845 1.086 -1.932 -0.602 0.000 0.999 1.206 1.284 1.107 0.946 -0.990 -0.215 0.000 0.843 -0.081 0.482 0.000 0.846 0.645 0.993 0.649 0.943 0.902 0.806 +0 1.145 -0.463 -0.498 0.567 1.050 0.905 1.445 1.471 0.000 1.397 0.389 -0.695 2.215 1.249 -0.663 0.187 2.548 1.475 -0.432 1.259 0.000 2.072 1.898 1.100 0.707 1.305 1.422 1.175 +0 0.309 1.803 1.452 2.374 0.539 1.059 1.085 -0.903 2.173 0.445 0.644 -1.628 0.000 0.391 -0.906 1.227 2.548 0.448 2.496 1.473 0.000 0.865 0.977 0.982 1.380 1.253 1.060 0.913 +1 0.619 0.904 0.362 0.835 1.260 0.644 -0.173 -1.608 2.173 0.624 0.435 0.543 0.000 0.516 -0.665 -0.340 2.548 0.737 1.487 -0.388 0.000 0.914 1.083 0.989 1.036 0.684 0.915 0.804 +0 1.550 1.100 1.585 0.744 -0.921 0.691 0.894 -1.007 0.000 1.059 0.833 0.829 2.215 1.109 -0.017 0.216 2.548 0.846 1.056 -0.464 0.000 0.590 0.952 1.150 0.965 0.798 0.866 0.819 +1 0.468 0.275 1.620 1.108 -0.690 1.480 -2.234 0.616 0.000 1.908 0.620 -0.762 2.215 1.983 -1.956 1.097 0.000 1.327 -0.359 -0.505 3.102 1.303 1.960 0.992 0.747 0.871 2.870 2.490 +0 0.478 1.998 0.034 0.805 -0.718 0.654 -0.970 0.912 0.000 1.167 0.555 -1.637 2.215 1.224 -0.086 -0.141 2.548 0.683 -1.148 -0.285 0.000 0.923 0.926 0.983 0.917 1.312 0.995 0.970 +1 0.791 0.754 -0.208 0.801 -0.751 0.818 -0.097 0.456 0.000 1.365 0.153 1.460 1.107 0.825 0.703 -1.243 1.274 1.185 0.275 -0.131 0.000 0.826 1.001 0.998 1.116 0.812 0.884 0.819 +0 1.452 0.792 -0.916 1.250 -0.155 0.763 -0.843 1.094 1.087 1.690 0.110 -0.688 2.215 2.089 -1.000 0.561 0.000 1.147 1.788 1.710 0.000 0.973 0.800 1.182 0.882 1.867 1.308 1.417 +0 0.355 0.299 -1.341 1.512 0.258 0.618 -0.660 -1.286 0.000 0.769 -0.071 0.655 2.215 0.772 -1.575 -1.021 0.000 0.848 -0.775 1.272 3.102 0.898 1.138 1.006 0.763 0.502 0.702 0.775 +1 0.810 0.667 -1.653 1.012 -1.178 0.332 0.963 -1.077 0.000 0.460 1.498 0.459 0.000 0.957 -0.686 -0.112 2.548 0.881 0.299 0.983 3.102 0.964 0.855 0.995 0.791 0.716 0.930 0.797 +1 0.653 -0.442 -0.217 1.122 -0.934 1.005 0.420 0.256 2.173 0.858 -0.793 -1.734 0.000 0.646 2.484 -0.725 0.000 1.065 0.346 1.210 3.102 0.897 0.901 0.988 1.381 0.829 1.012 1.242 +1 0.684 0.358 0.030 0.585 -1.570 0.884 0.658 0.731 2.173 1.279 0.223 -0.961 0.000 0.550 -0.845 0.970 1.274 0.654 -0.039 -1.733 0.000 0.781 0.861 0.989 0.790 0.789 0.856 0.752 +1 0.794 -1.088 0.258 1.056 -0.807 1.039 -0.007 1.427 1.087 0.814 -1.152 -0.361 0.000 0.785 -1.058 0.855 1.274 0.516 -2.063 -1.127 0.000 0.794 0.777 1.039 1.262 0.883 0.949 0.852 +1 0.854 1.155 0.099 0.023 -0.410 0.684 0.866 -1.231 0.000 1.025 -0.127 0.837 2.215 0.959 1.278 -0.626 0.000 0.646 0.907 1.539 3.102 0.851 0.651 0.779 0.715 0.644 0.834 0.710 +1 0.430 0.011 1.016 0.891 -1.160 1.518 -1.830 0.377 0.000 1.334 -0.540 -1.095 0.000 0.924 1.142 1.332 2.548 1.056 1.913 -0.783 0.000 0.803 0.911 0.989 0.642 0.763 1.494 1.442 +1 0.390 1.113 -0.115 1.235 -1.016 0.751 0.199 -1.598 0.000 0.816 -0.362 -0.098 2.215 2.008 -0.449 0.891 2.548 0.862 0.124 -1.138 0.000 0.497 0.973 0.986 1.185 1.062 0.908 0.827 +0 0.673 -0.537 1.268 0.263 0.949 1.806 -1.038 1.397 2.173 1.559 -0.646 -0.302 1.107 1.771 -1.650 -0.337 0.000 0.732 -1.104 -0.985 0.000 0.758 0.955 0.976 1.337 2.513 1.455 1.336 +0 0.740 -0.545 0.661 0.955 -0.522 0.586 -0.058 -0.225 0.000 0.932 -0.887 -1.740 2.215 0.784 0.276 -1.180 0.000 1.142 -0.616 1.277 3.102 0.950 0.926 1.019 0.874 0.391 0.732 0.688 +0 0.633 1.154 1.016 2.141 0.848 0.940 0.536 -0.154 2.173 1.112 1.522 -1.295 0.000 0.940 0.004 1.573 0.000 0.729 0.257 -0.724 3.102 0.926 1.150 0.996 0.832 0.443 0.823 0.801 +0 1.811 2.088 -0.245 0.239 -1.275 1.315 2.161 1.648 0.000 0.804 2.225 0.660 0.000 0.819 0.969 -0.975 2.548 0.475 1.376 0.953 3.102 0.765 0.827 0.983 0.606 0.491 0.523 0.543 +0 0.294 1.270 1.325 1.633 -0.714 0.922 1.495 -0.616 2.173 1.486 1.173 1.007 0.000 1.170 2.585 1.226 0.000 1.021 1.221 1.591 0.000 0.975 1.195 0.987 0.637 0.960 1.010 0.876 +0 1.154 1.382 -0.452 0.377 0.483 0.676 1.339 0.692 2.173 1.237 1.262 -1.227 2.215 0.688 2.629 1.617 0.000 0.810 1.704 0.332 0.000 0.835 1.064 0.986 0.807 1.329 0.850 0.769 +1 0.446 1.683 -0.381 1.582 0.402 1.055 -0.768 -1.627 2.173 0.398 0.625 -1.115 0.000 0.677 -0.244 0.139 2.548 0.370 0.868 -0.611 0.000 0.241 0.862 0.980 0.662 1.086 1.116 0.851 +1 0.339 -1.053 -1.249 1.012 -0.330 0.980 0.415 0.655 0.000 0.912 0.959 -1.470 2.215 1.064 -0.396 -0.774 2.548 0.868 1.332 0.568 0.000 0.887 1.203 0.988 1.038 1.016 1.512 1.647 +0 1.419 0.396 1.002 0.443 1.688 0.693 -1.099 0.448 2.173 1.031 -0.404 -0.786 0.000 0.733 0.541 -0.626 0.000 0.566 -0.872 -1.317 0.000 0.771 0.992 0.992 0.480 0.906 0.833 0.763 +1 1.685 0.008 -0.919 0.370 0.504 1.191 -0.664 0.904 1.087 1.072 -1.301 -1.158 1.107 0.593 -1.043 -0.453 0.000 0.785 0.167 0.464 0.000 0.987 0.993 1.049 1.290 1.691 1.106 0.906 +0 1.706 -0.446 -1.112 1.030 -1.699 1.250 -1.446 0.834 1.087 0.717 -0.639 -0.508 2.215 0.650 -1.823 -0.634 0.000 0.683 -0.738 0.988 0.000 0.849 0.925 0.982 0.793 1.424 1.174 0.952 +0 0.887 0.542 0.166 1.002 1.644 1.452 -0.876 0.355 0.000 1.170 -0.545 1.523 1.107 1.629 0.125 -0.843 2.548 1.873 -0.212 -1.522 0.000 1.084 0.943 1.269 0.984 1.347 0.921 0.800 +0 0.692 0.228 -0.092 1.046 -0.932 0.627 0.242 0.412 2.173 1.021 0.254 1.010 0.000 1.121 1.245 -1.117 1.274 0.712 1.731 -1.717 0.000 1.351 1.053 0.990 1.024 1.197 0.878 0.913 +0 0.997 -0.819 0.668 1.295 0.156 0.870 1.141 -1.671 2.173 1.289 2.360 1.557 0.000 1.359 -0.955 -0.501 0.000 0.727 2.118 -0.660 0.000 1.150 0.814 0.987 2.225 0.567 1.392 2.070 +1 0.859 -0.088 0.119 0.486 -0.925 0.485 -0.552 1.280 2.173 1.235 -0.075 -1.038 2.215 1.226 -0.162 0.816 0.000 0.963 0.412 -0.248 0.000 1.064 0.831 0.988 0.833 1.026 0.810 0.736 +0 0.621 -0.297 1.723 0.984 0.918 0.357 -0.457 0.387 2.173 0.448 1.959 -0.750 0.000 0.841 0.707 -0.731 2.548 0.714 0.338 -1.247 0.000 0.733 0.923 0.993 1.089 0.732 0.746 0.865 +0 0.559 -0.091 1.469 1.146 -0.940 0.555 0.034 0.994 2.173 0.863 -0.753 -1.368 0.000 1.044 -0.623 0.524 2.548 0.687 -0.509 0.167 0.000 0.989 0.901 0.988 0.828 0.523 0.666 0.642 +0 1.381 -0.192 0.030 0.137 0.510 0.648 -0.101 1.474 0.000 0.990 -0.256 -1.426 2.215 0.688 -0.328 0.737 0.000 1.181 0.352 -0.608 3.102 0.757 0.893 0.997 1.001 0.740 0.702 0.712 +1 1.108 1.304 -1.228 1.017 0.073 0.640 1.869 -0.415 0.000 1.230 0.442 1.618 2.215 0.814 1.891 0.399 0.000 0.900 1.365 0.941 3.102 0.877 0.727 1.356 1.163 0.807 0.934 0.854 +0 1.145 0.081 1.327 0.797 -1.647 0.946 -0.539 -0.392 2.173 0.457 -2.442 0.534 0.000 0.522 0.194 0.844 0.000 0.766 1.240 -0.702 3.102 1.384 1.300 0.986 0.990 1.109 1.127 1.023 +1 1.500 -0.250 1.285 1.475 0.872 0.984 0.122 -0.981 2.173 1.180 -0.664 -0.203 1.107 0.500 0.735 -1.129 0.000 0.954 -0.147 0.382 0.000 0.923 0.840 0.981 1.380 1.216 1.183 0.990 +1 2.869 -0.817 0.610 1.640 0.326 0.691 -1.833 -1.465 0.000 1.638 -0.569 -0.547 2.215 1.452 1.392 -1.208 0.000 1.027 -0.498 1.445 3.102 0.836 1.051 0.980 1.613 1.141 1.161 1.527 +0 0.638 0.540 0.001 3.379 0.518 1.796 1.780 -0.807 0.000 2.169 1.574 1.612 0.000 0.583 0.766 -0.725 1.274 1.192 1.873 -1.184 0.000 0.900 0.739 0.983 0.708 0.744 1.059 1.459 +0 0.387 1.455 -1.237 0.747 0.654 0.741 0.562 -0.388 2.173 0.935 1.973 -1.633 0.000 0.686 -2.103 -1.635 0.000 1.414 -0.781 0.721 3.102 0.932 0.950 0.990 0.762 1.277 1.232 1.114 +0 1.403 -0.133 0.647 0.965 0.158 0.515 -0.962 1.481 2.173 0.597 1.034 -1.246 0.000 0.788 -1.263 -0.740 2.548 0.457 -0.087 -1.370 0.000 0.428 0.902 0.994 0.868 0.740 0.739 0.796 +0 0.584 -0.643 1.155 0.808 0.437 0.797 -2.921 -0.367 0.000 1.216 0.345 -0.388 2.215 2.678 -1.126 1.647 0.000 0.790 -0.499 0.405 3.102 3.595 2.129 0.991 1.449 0.727 2.035 1.555 +1 1.176 -0.121 -1.273 0.996 1.606 0.657 1.237 0.077 2.173 0.872 0.430 0.880 0.000 0.658 0.659 -0.303 0.000 0.535 0.304 -1.353 1.551 1.030 0.830 0.982 0.516 0.667 0.880 0.809 +1 0.940 0.677 -0.766 2.741 -1.152 1.446 -1.935 0.785 0.000 1.094 0.625 1.118 2.215 0.489 -0.967 1.518 0.000 1.649 1.111 -0.480 0.000 0.715 1.261 0.982 0.820 0.839 0.958 0.870 +1 0.716 0.262 -0.632 1.034 -1.058 0.578 0.171 1.317 2.173 0.553 1.230 -1.104 0.000 1.243 1.072 0.613 2.548 0.799 1.845 0.637 0.000 0.965 1.012 0.987 1.339 0.836 0.973 0.979 +1 0.686 -0.862 1.601 0.258 0.329 0.875 0.405 -1.639 2.173 0.734 -0.527 0.405 0.000 1.237 0.243 -0.112 0.000 1.931 -0.214 -1.028 3.102 0.895 1.074 0.978 0.742 0.860 0.947 0.796 +1 0.359 1.943 -0.187 0.358 -0.808 1.369 0.249 -1.609 2.173 0.865 -0.186 0.190 0.000 0.831 1.888 0.544 0.000 0.433 0.219 -0.694 3.102 0.967 1.318 0.979 0.471 0.599 0.766 0.690 +1 0.828 0.204 -0.552 0.837 0.855 1.019 0.757 -0.824 0.000 1.502 1.059 1.333 0.000 1.023 1.823 -0.517 0.000 1.689 0.214 0.787 1.551 1.312 1.110 1.101 0.708 0.645 0.980 0.861 +1 0.952 1.691 1.533 1.275 -1.169 0.574 -0.337 -0.211 0.000 0.917 0.997 1.107 2.215 1.085 0.298 0.353 2.548 1.069 1.065 -0.449 0.000 1.106 0.830 0.992 0.897 0.766 0.872 0.945 +0 1.037 -1.134 -0.484 0.816 -1.291 0.478 0.453 -1.333 0.000 0.798 -0.328 0.129 1.107 0.808 -0.005 1.184 0.000 0.636 -0.085 0.758 3.102 0.895 0.944 0.990 0.782 0.353 0.682 0.773 +1 0.723 -0.826 1.360 1.080 -0.727 1.647 -1.210 -0.030 2.173 1.139 -1.311 -1.482 0.000 0.978 -2.146 1.661 0.000 0.638 -0.551 0.577 1.551 0.932 0.771 1.166 1.177 0.643 0.962 0.859 +0 1.129 0.615 -0.014 2.421 -0.600 0.901 -1.331 1.108 0.000 1.449 2.416 1.395 0.000 1.495 1.003 0.149 2.548 0.971 1.255 -1.729 3.102 1.596 0.958 1.154 0.911 0.933 1.009 1.243 +1 1.216 0.012 -1.144 1.141 1.605 0.585 1.275 -0.460 2.173 0.630 -2.366 0.463 0.000 1.216 0.732 -1.658 0.000 1.848 0.667 0.462 3.102 0.988 1.028 1.005 1.073 0.856 0.927 0.834 +1 0.525 -0.779 -0.128 1.109 0.427 0.843 -0.123 -0.953 2.173 1.439 0.124 1.424 1.107 1.604 0.167 -0.436 0.000 0.706 -1.601 1.636 0.000 1.200 0.937 0.978 1.665 1.378 1.337 1.165 +0 0.584 -0.425 1.533 0.639 -0.687 0.739 -1.250 -0.559 0.000 0.972 1.363 1.668 2.215 1.033 -0.029 0.567 2.548 0.888 -0.344 0.151 0.000 0.915 0.960 0.990 1.545 1.219 1.242 1.085 +0 0.339 -0.395 -0.970 1.429 1.667 0.560 -1.462 1.586 1.087 0.536 1.957 -0.178 0.000 0.729 0.830 0.247 1.274 0.466 -0.353 0.215 0.000 1.031 0.639 0.989 0.539 1.406 1.117 1.238 +1 0.540 -0.714 1.278 1.993 0.808 1.149 -1.595 -0.912 0.000 0.471 -1.176 -0.793 2.215 0.975 -1.168 -0.161 0.000 1.317 -1.168 1.553 3.102 1.233 1.130 0.997 0.880 0.613 0.669 0.852 +0 0.936 -1.026 -1.696 0.772 0.939 0.640 -2.815 0.080 0.000 0.640 0.008 1.538 2.215 0.568 1.082 -1.334 0.000 1.524 1.045 -0.675 0.000 0.575 0.846 0.985 0.723 0.667 0.633 0.742 +1 0.368 -1.910 -0.446 1.105 1.065 0.854 -1.920 0.020 0.000 1.026 -1.816 1.484 0.000 0.910 -0.096 -1.508 2.548 0.798 -0.944 -1.322 0.000 0.811 0.919 0.989 1.895 0.571 1.364 1.124 +0 0.407 1.742 -1.394 1.689 -0.344 1.100 0.698 -0.819 1.087 1.026 0.421 1.552 0.000 1.708 0.316 0.689 2.548 1.422 -0.382 1.167 0.000 0.901 0.960 0.988 1.261 1.698 1.322 1.367 +1 0.656 -1.870 -1.284 0.374 1.317 1.010 -0.546 0.421 0.000 0.740 -0.816 -0.135 0.000 0.949 0.070 -1.086 1.274 1.694 -0.896 1.739 1.551 0.922 1.139 0.976 0.589 0.802 0.927 0.807 +1 2.363 1.058 0.907 1.071 0.330 1.278 0.820 -0.689 2.173 1.701 0.976 -1.556 0.000 0.351 -0.223 1.208 0.000 1.003 -0.170 0.377 3.102 0.802 0.896 1.093 0.852 1.176 1.130 0.883 +1 0.957 0.582 -0.121 0.996 -1.328 1.093 0.466 -1.608 0.000 2.118 0.394 0.607 2.215 0.551 1.062 0.125 2.548 1.057 0.858 -0.973 0.000 1.004 0.939 1.197 1.267 0.659 1.087 0.947 +1 1.178 -0.144 1.424 1.919 0.931 0.969 -0.214 -0.713 2.173 0.420 -0.360 -1.227 0.000 0.878 0.586 -0.288 0.000 0.873 -0.623 1.648 3.102 0.924 0.834 0.987 0.684 0.867 0.956 0.817 +0 1.270 0.146 0.332 0.048 -0.656 0.761 0.486 -1.297 0.000 0.711 1.044 0.939 2.215 0.653 -1.048 -0.368 2.548 0.870 -0.733 -1.309 0.000 0.921 0.911 0.838 0.763 1.189 0.859 0.780 +1 1.467 0.348 -0.666 0.540 1.315 0.473 0.122 0.813 0.000 0.428 0.754 0.237 0.000 0.677 -0.416 0.152 2.548 1.669 -0.869 1.669 3.102 0.571 1.018 1.205 0.757 0.832 0.774 0.714 +1 1.386 0.234 0.230 0.689 -1.693 0.693 -0.265 1.698 1.087 0.342 1.518 0.582 0.000 0.622 0.571 -1.556 2.548 0.666 -1.366 -0.962 0.000 0.885 0.853 1.336 0.781 0.414 0.665 0.662 +0 0.596 0.024 -0.552 0.841 1.461 0.959 0.848 0.867 2.173 0.751 -0.933 -1.607 0.000 1.535 -0.774 -0.380 2.548 0.672 -1.572 -0.840 0.000 0.738 0.886 0.987 0.996 1.992 1.270 1.000 +1 0.701 -0.496 -0.713 0.201 -1.618 1.329 0.720 0.893 2.173 1.316 0.586 -1.218 0.000 1.351 1.189 -0.913 0.000 1.919 0.304 -0.108 3.102 0.920 1.204 0.990 1.100 1.362 1.259 1.009 +1 1.151 -0.356 1.572 0.904 -1.504 3.212 -0.306 0.421 0.000 1.894 1.602 -1.731 0.000 2.083 -0.624 -1.119 2.548 2.679 -0.582 -0.523 3.102 0.306 0.738 0.989 0.847 0.924 0.865 0.735 +0 1.235 0.102 1.562 1.724 -1.559 0.805 2.612 -0.397 0.000 0.571 -1.552 0.863 0.000 0.714 -0.321 1.048 2.548 0.512 2.325 0.077 0.000 0.661 0.765 0.979 0.681 0.490 0.620 0.655 +0 0.749 -0.033 -0.142 0.891 1.739 0.790 1.264 0.164 0.000 1.345 0.721 -1.132 2.215 1.054 1.550 1.181 0.000 0.449 1.598 0.581 0.000 1.349 0.786 1.123 0.909 0.737 0.884 0.860 +1 0.298 -0.735 -1.386 2.426 -0.254 2.516 0.749 1.581 0.000 1.258 1.281 0.099 0.000 1.231 0.420 0.312 2.548 1.373 0.713 -0.372 3.102 1.718 1.174 1.005 1.011 0.607 0.867 1.309 +1 0.589 -1.402 -1.514 1.061 0.004 0.532 0.654 -0.865 0.000 0.543 -0.830 1.214 2.215 1.017 -1.216 -0.305 2.548 1.152 0.691 1.653 0.000 0.921 0.981 1.073 0.620 0.797 0.903 0.896 +0 0.650 0.044 -0.515 0.686 0.676 0.723 -1.105 0.599 2.173 0.845 -0.455 -1.709 2.215 0.594 -1.295 -1.464 0.000 0.522 -1.545 -0.455 0.000 0.503 0.735 0.987 0.778 1.073 0.690 0.616 +0 0.589 -0.031 -1.150 1.122 0.094 0.463 -0.845 1.099 1.087 0.622 -0.089 1.059 2.215 0.908 -1.505 -0.352 0.000 0.883 -0.364 1.561 0.000 1.167 0.947 1.015 0.788 0.309 0.621 0.656 +1 0.457 -1.114 0.142 0.965 1.086 0.912 -0.164 1.218 2.173 0.958 0.662 -0.897 0.000 0.855 0.063 -1.220 2.548 1.155 0.874 -0.226 0.000 0.855 1.150 0.985 1.183 0.900 1.056 1.114 +1 0.372 -0.862 -0.458 2.275 0.211 1.047 0.045 1.531 2.173 1.138 -0.356 -1.244 0.000 0.425 1.615 -0.462 0.000 0.621 0.871 -0.041 0.000 0.841 0.860 0.989 0.908 0.241 1.238 1.024 +1 0.997 0.520 0.208 1.357 0.153 0.889 0.184 -1.648 2.173 0.502 1.023 -1.149 1.107 0.713 -0.855 -1.088 0.000 0.698 0.508 1.409 0.000 0.899 0.743 0.997 1.448 0.613 0.970 0.903 +1 0.704 0.488 -1.193 1.092 -0.405 0.636 0.024 -0.872 2.173 0.736 0.030 1.026 0.000 0.801 1.306 0.758 0.000 0.802 -0.960 1.603 1.551 0.940 0.991 0.993 0.753 0.760 0.824 0.839 +0 1.702 -0.994 -1.217 1.793 -1.162 0.672 -0.966 0.724 0.000 0.583 1.687 1.444 0.000 0.447 2.194 -0.552 0.000 0.947 -0.848 -0.112 1.551 0.809 1.113 0.976 0.884 0.159 0.948 1.548 +1 0.717 0.047 -1.398 1.491 0.197 0.796 0.465 -0.469 2.173 0.987 0.465 1.340 2.215 1.292 0.850 0.098 0.000 0.571 -1.132 1.686 0.000 1.637 1.170 1.420 1.024 1.302 0.941 0.859 +0 1.272 0.353 0.351 0.752 1.714 0.853 -0.649 0.631 0.000 1.270 -0.698 -1.180 0.000 0.761 -0.043 1.244 2.548 0.441 -1.021 -0.807 1.551 2.208 1.188 1.277 0.709 0.508 0.784 0.809 +0 0.501 -0.358 0.657 0.832 -1.688 0.921 -0.852 1.544 2.173 1.066 -2.468 -0.167 0.000 0.871 -1.475 -0.965 2.548 0.988 -1.944 0.569 0.000 0.835 0.885 0.983 0.667 0.963 1.051 0.887 +1 0.342 -1.776 -1.059 1.905 -0.775 0.949 1.730 0.349 0.000 0.874 0.514 1.279 2.215 1.238 -0.848 1.304 1.274 0.868 -0.479 -0.136 0.000 0.886 1.028 0.979 1.062 0.882 0.962 0.810 +0 0.437 0.079 -1.091 1.127 1.415 2.370 -0.250 -1.716 0.000 2.256 0.743 -0.312 0.000 2.635 -0.835 0.307 0.000 1.244 1.795 1.353 0.000 0.780 0.879 0.985 0.598 1.084 0.865 0.904 +1 0.989 0.263 -0.368 0.472 -0.918 0.461 -1.168 -1.175 2.173 0.956 -1.218 0.640 0.000 1.167 0.640 -1.597 0.000 1.024 -0.172 0.894 0.000 0.764 0.958 0.993 0.635 0.738 0.620 0.664 +0 0.998 0.465 -0.585 0.287 1.674 0.640 0.889 1.272 2.173 0.409 -0.641 -0.562 0.000 1.037 0.433 -0.262 2.548 0.788 1.540 0.970 0.000 0.601 0.775 0.981 0.628 1.020 0.703 0.592 +0 0.903 0.598 -0.689 0.668 0.996 0.963 -0.817 -1.418 0.000 0.882 -0.159 0.696 2.215 0.427 -1.273 0.450 0.000 1.115 -0.359 -0.280 1.551 1.194 0.941 1.075 0.806 0.700 0.783 0.791 +1 1.000 -0.854 -0.659 0.608 -0.095 0.818 -0.640 0.920 0.000 0.616 -0.160 1.526 0.000 0.640 -0.712 1.367 2.548 0.591 2.341 -0.466 0.000 0.862 0.742 0.987 0.766 0.324 0.596 0.699 +0 2.606 1.568 1.167 2.059 0.908 0.504 0.626 -1.029 2.173 1.387 -0.060 -0.874 0.000 1.378 -0.407 -0.169 2.548 0.370 -1.562 -0.735 0.000 0.975 0.876 0.992 1.344 0.935 1.475 1.504 +0 0.477 0.580 0.763 1.469 -1.710 0.549 -0.116 -0.115 2.173 0.308 -0.202 0.390 0.000 0.622 0.538 -1.023 2.548 0.368 -1.732 0.830 0.000 0.502 0.670 0.985 1.035 0.593 0.700 0.707 +0 0.722 0.390 0.861 0.371 -0.438 1.100 -1.069 1.306 0.000 1.844 -0.050 -0.279 2.215 0.813 0.039 -1.238 2.548 0.920 -1.826 -1.439 0.000 1.309 1.203 0.989 1.134 0.991 1.347 1.307 +1 0.938 1.508 1.742 0.833 -0.096 1.016 1.127 0.323 2.173 1.185 0.825 -1.366 0.000 0.950 0.791 0.832 2.548 0.805 1.152 -0.668 0.000 0.824 0.958 1.221 0.966 0.566 0.875 0.793 +0 0.873 1.109 0.317 1.365 0.976 0.850 -0.829 -0.738 2.173 1.824 1.817 -0.914 0.000 1.140 -0.336 1.141 2.548 1.071 -1.269 1.291 0.000 0.904 0.925 0.988 1.217 1.251 1.442 1.372 +0 0.799 -1.630 1.368 1.027 -1.352 0.524 -0.533 1.183 0.000 0.898 0.037 -0.346 2.215 0.729 0.306 0.909 1.274 1.399 -1.357 -0.475 0.000 1.504 1.128 0.981 1.533 0.789 1.184 1.038 +0 1.166 0.661 -1.701 0.679 1.085 0.417 0.316 -0.202 0.000 0.569 1.256 -1.118 0.000 1.353 1.213 0.385 1.274 0.516 -0.145 -1.078 3.102 0.923 0.920 0.984 0.559 0.813 0.713 0.688 +0 1.042 -0.134 -0.324 1.332 0.445 1.496 -0.733 -1.186 1.087 1.112 -1.098 0.539 0.000 1.829 -0.594 1.058 2.548 1.125 0.945 -1.095 0.000 2.490 1.706 1.044 1.482 1.856 1.471 1.259 +1 0.807 1.024 1.355 0.223 -1.074 1.583 -1.823 -1.188 0.000 1.572 -0.524 0.804 2.215 2.125 -0.265 0.172 2.548 1.118 -0.055 -0.920 0.000 2.106 2.376 0.986 1.035 1.075 1.764 1.480 +0 2.082 0.110 -0.753 0.349 1.045 0.984 -0.538 -0.093 2.173 1.222 -0.790 1.517 2.215 0.488 -0.900 1.163 0.000 0.587 0.303 1.024 0.000 0.436 0.816 1.180 1.228 1.617 1.062 0.839 +1 0.385 2.114 -0.222 1.320 -0.943 0.505 -0.032 -0.258 0.000 0.615 0.257 0.907 2.215 1.518 -0.659 1.070 0.000 0.674 -0.031 -1.359 1.551 1.568 0.975 0.993 0.890 0.525 0.654 0.828 +0 0.624 0.610 -0.407 1.033 -1.363 1.284 1.568 -1.628 0.000 1.067 0.709 -0.006 2.215 1.332 1.619 -0.559 0.000 3.348 -0.055 0.925 1.551 0.919 1.133 0.982 0.904 1.448 0.984 0.841 +0 1.204 1.055 -0.376 0.485 0.107 0.671 -1.247 0.635 0.000 1.015 -0.367 -1.366 0.000 0.937 0.413 -1.146 2.548 1.157 -0.440 1.405 0.000 0.855 0.850 0.989 0.867 0.669 0.654 0.904 +0 1.209 1.787 0.916 0.984 0.267 1.026 0.488 0.415 2.173 1.843 0.445 -1.720 0.000 1.648 0.665 -0.462 0.000 1.846 1.211 -0.946 1.551 2.449 1.626 0.987 0.900 1.552 1.342 1.213 +0 0.643 -0.947 0.111 0.792 1.435 1.113 -1.151 -1.739 1.087 1.319 -0.241 -1.269 0.000 2.120 -0.236 0.150 2.548 0.583 -1.289 -1.189 0.000 0.801 0.932 0.989 0.848 2.096 1.192 0.963 +0 0.447 -0.922 1.561 1.690 0.295 0.590 -0.664 -0.955 2.173 0.640 -1.508 1.422 0.000 0.353 -2.031 0.757 0.000 0.764 0.573 -1.177 3.102 0.482 0.963 1.094 0.929 0.539 0.748 0.726 +1 0.823 -0.210 -0.051 0.418 -1.163 0.634 2.049 -0.784 0.000 0.644 -0.740 -0.344 0.000 1.065 -0.267 1.351 1.274 1.341 -0.681 0.841 0.000 1.061 0.886 0.988 0.982 0.701 0.775 0.731 +0 0.604 -1.149 0.898 1.823 1.197 1.452 0.298 -0.447 2.173 1.134 0.205 -1.338 2.215 0.682 1.002 0.175 0.000 0.543 0.166 0.448 0.000 0.353 0.886 0.988 1.674 1.361 1.245 0.983 +1 1.348 -1.798 0.277 0.431 -0.757 0.458 -1.518 0.618 0.000 0.631 -0.797 -0.928 0.000 0.720 -0.165 1.386 0.000 1.153 -0.825 -1.488 3.102 0.924 0.714 0.993 0.676 0.319 0.576 0.555 +0 0.472 -1.655 0.845 1.386 -0.059 1.278 -1.063 -0.363 2.173 1.992 0.607 1.244 2.215 1.023 -0.758 1.714 0.000 0.667 -1.694 -1.624 0.000 0.818 1.110 0.985 3.233 3.233 2.335 1.735 +1 0.752 1.159 1.100 0.849 -1.364 0.852 0.713 0.619 0.000 1.341 0.176 -1.342 2.215 1.688 0.237 -0.072 0.000 0.492 -1.046 1.260 3.102 1.355 1.093 0.989 0.777 0.777 1.044 0.903 +1 0.567 -0.559 1.176 0.601 -1.014 0.788 -0.428 -0.405 0.000 0.659 1.562 1.528 0.000 0.717 -0.530 0.299 2.548 1.212 0.182 1.268 3.102 0.730 1.112 0.989 0.603 0.619 0.741 0.670 +1 0.277 -2.072 0.073 1.109 -1.685 0.900 0.533 -1.107 2.173 0.830 0.032 0.303 0.000 0.866 -0.814 0.915 0.000 0.592 -0.027 -0.145 3.102 0.924 0.602 0.987 1.097 0.630 0.806 0.789 +1 0.524 0.750 -1.028 1.425 -1.514 3.720 -0.536 0.083 0.000 0.949 -0.860 0.530 1.107 2.287 -0.526 1.304 0.000 6.068 -0.601 -1.430 0.000 0.905 0.959 0.989 0.515 0.534 0.694 0.677 +0 0.604 0.470 -1.256 1.095 1.739 1.181 0.549 -0.149 0.000 0.863 -0.161 0.839 2.215 1.312 -0.160 -1.731 2.548 0.587 -0.912 -0.920 0.000 0.806 1.094 0.992 1.004 0.831 0.958 1.037 +1 0.899 0.177 -0.944 0.933 -1.535 0.500 0.840 -0.325 1.087 1.141 -0.699 0.223 2.215 0.777 0.252 1.594 0.000 0.532 -1.406 -1.697 0.000 0.824 1.003 0.985 1.368 1.098 0.963 0.879 +0 2.358 -1.304 -0.542 0.169 -0.327 1.299 -0.718 1.135 2.173 0.418 -0.869 -1.101 0.000 0.612 -0.593 0.554 0.000 0.474 0.162 -1.159 3.102 0.780 0.883 1.002 0.668 0.830 1.013 0.800 +0 1.097 -1.355 1.713 1.449 0.052 0.716 -0.459 1.129 2.173 0.447 -1.160 1.023 0.000 0.587 -1.532 -1.319 0.000 0.872 -0.868 -0.526 0.000 0.804 0.761 1.742 1.064 0.901 0.889 0.726 +1 0.472 -0.794 -1.435 1.257 0.745 0.704 1.635 -0.910 0.000 1.122 -0.053 1.183 0.000 1.429 -0.916 -0.480 2.548 1.840 0.229 -1.051 3.102 0.862 1.157 0.988 0.893 1.045 0.988 0.875 +0 1.287 0.556 0.852 1.862 0.693 1.452 0.483 -1.368 2.173 1.247 1.060 -0.966 0.000 1.322 1.239 0.187 0.000 0.551 0.093 0.101 1.551 1.718 1.028 1.011 1.760 0.935 1.104 1.210 +0 0.340 0.094 1.040 0.889 -0.434 0.844 1.227 -1.538 2.173 1.647 1.146 0.210 2.215 1.343 1.040 1.396 0.000 1.284 0.635 -0.656 0.000 1.420 1.011 0.987 0.789 1.736 1.060 0.868 +0 0.738 -0.560 0.650 0.798 -0.170 0.810 -0.864 -1.189 1.087 0.761 -0.816 -0.057 2.215 0.851 -0.155 1.021 0.000 0.817 -1.296 1.550 0.000 0.868 1.020 0.996 0.683 0.985 0.785 0.737 +0 0.366 -1.129 0.943 0.445 0.087 0.548 0.251 -1.447 0.000 0.893 0.511 1.528 2.215 0.607 -0.054 0.352 2.548 0.506 -0.877 -0.378 0.000 0.869 0.782 0.981 0.520 0.721 0.583 0.568 +0 0.999 0.954 -1.736 0.964 0.973 0.530 -1.735 -0.542 0.000 0.702 -0.494 0.778 2.215 0.818 1.118 -0.495 1.274 0.370 -0.154 -0.956 0.000 0.636 0.851 0.991 0.813 1.079 0.902 1.105 +0 0.640 -1.021 0.114 1.554 1.041 0.618 -0.985 -1.564 0.000 0.531 -2.373 0.469 0.000 0.353 0.182 -1.188 0.000 0.713 1.094 -0.941 3.102 0.601 0.827 1.025 0.959 0.332 0.850 0.773 +0 0.728 -0.686 1.522 1.698 -1.386 0.870 0.082 -0.410 0.000 1.020 0.036 0.395 0.000 0.873 0.070 1.276 2.548 0.664 0.821 -0.588 0.000 0.983 0.829 0.993 1.121 0.305 0.815 0.897 +1 0.906 -0.731 1.016 2.359 1.484 1.056 -1.052 -0.702 2.173 0.856 0.085 -0.046 2.215 0.436 -1.820 1.049 0.000 0.839 -0.602 -0.116 0.000 0.737 0.841 0.975 1.385 1.153 1.234 0.963 +0 0.841 0.048 0.915 0.687 -0.786 0.627 -0.686 -1.516 1.087 0.546 0.190 1.601 0.000 1.253 0.133 -0.135 2.548 0.980 -0.862 -0.036 0.000 0.725 0.879 1.053 0.780 1.150 0.707 0.633 +0 0.871 -1.221 1.366 0.826 -0.322 0.740 -0.558 0.125 1.087 0.492 -0.118 -1.691 2.215 0.799 -0.486 1.159 0.000 0.797 -0.151 -1.075 0.000 0.943 0.899 1.173 0.763 0.908 0.691 0.650 +0 0.710 -0.376 -1.609 1.226 -0.982 0.874 0.646 0.560 1.087 0.464 0.200 0.922 0.000 1.105 0.435 -1.278 2.548 1.163 -0.135 -0.093 0.000 0.779 0.846 0.985 1.144 1.224 0.831 0.740 +1 1.144 0.615 0.168 0.741 -0.292 0.571 1.603 -0.017 0.000 1.367 -0.318 1.642 0.000 1.089 -0.339 -1.521 2.548 0.757 -0.760 1.170 3.102 0.955 0.851 0.981 0.956 0.493 0.693 0.677 +1 1.552 -1.195 -0.462 0.730 -0.275 1.225 -1.226 1.309 1.087 0.665 -1.337 0.470 2.215 0.489 -0.937 -1.439 0.000 0.540 -2.067 -0.469 0.000 0.625 0.896 0.991 0.774 0.917 1.001 0.794 +1 0.289 2.404 -0.247 0.783 -0.856 0.584 0.219 -1.284 0.000 0.588 1.304 -0.253 0.000 0.876 0.693 1.134 2.548 1.444 -0.830 0.915 3.102 0.815 1.102 0.981 0.751 0.873 0.840 0.738 +1 0.945 -0.175 -0.909 1.056 -1.316 0.914 0.127 0.594 2.173 0.825 -0.505 1.575 0.000 0.441 -0.560 0.352 0.000 0.732 -1.076 -0.360 3.102 0.826 0.891 0.989 0.612 0.936 0.885 0.762 +0 0.605 -0.810 1.087 0.445 1.585 0.724 0.044 1.236 1.087 1.240 0.354 -0.262 0.000 1.193 0.775 -1.271 2.548 0.485 1.326 0.201 0.000 0.776 0.944 0.992 1.130 1.014 1.193 1.211 +0 1.070 -2.064 -1.551 1.149 -0.660 0.674 -1.434 0.305 0.000 0.771 -0.824 1.376 2.215 0.686 -0.842 -0.452 2.548 0.907 -0.866 0.961 0.000 0.709 0.767 1.105 0.769 0.771 0.756 0.752 +0 1.404 -0.626 0.868 0.964 -0.130 1.060 -0.165 1.631 2.173 1.078 -2.116 -0.607 0.000 1.289 -1.072 -0.195 2.548 1.164 -1.086 -1.324 0.000 0.964 0.878 1.262 1.195 1.642 1.011 0.915 +1 0.542 0.771 -1.067 1.028 0.667 0.983 -1.319 0.368 0.000 1.029 1.162 0.983 0.000 1.227 -1.078 -0.898 0.000 1.818 0.507 -0.653 3.102 0.793 1.018 1.034 0.750 0.671 0.715 0.655 +1 0.633 -0.076 -0.962 0.978 0.041 1.150 -0.247 1.287 2.173 0.853 1.036 -1.433 0.000 0.557 2.059 -0.307 0.000 0.755 -0.038 -0.103 3.102 0.985 0.749 0.987 1.116 0.942 0.819 0.820 +1 1.941 1.136 -1.738 1.119 -1.053 0.591 0.655 -0.053 2.173 0.462 0.061 -0.398 2.215 0.647 0.904 1.294 0.000 0.868 1.063 0.252 0.000 0.677 0.657 1.183 1.142 0.331 0.822 0.712 +1 1.022 -0.930 -0.240 0.656 0.456 0.552 0.537 -1.578 0.000 0.780 -1.063 -1.484 2.215 0.694 -1.731 0.097 0.000 0.950 -0.814 0.102 3.102 0.823 0.836 0.985 0.522 0.771 0.633 0.691 +0 1.195 -0.791 0.724 1.623 1.446 0.484 -1.318 -0.150 0.000 0.551 -0.934 -1.257 2.215 0.788 -0.109 -0.720 0.000 0.807 1.009 -0.369 1.551 0.899 0.948 1.170 0.843 0.901 0.885 0.842 +1 1.778 0.630 0.709 0.621 1.447 0.953 0.808 -1.552 2.173 0.930 0.680 -0.223 2.215 0.785 -0.309 -0.462 0.000 0.583 0.768 -0.729 0.000 0.533 0.907 0.987 0.940 1.292 0.923 0.805 +0 0.616 -0.057 0.362 0.979 -1.084 0.308 0.681 0.845 0.000 0.668 0.866 -1.300 2.215 0.864 -0.810 0.678 2.548 0.646 -1.162 -0.744 0.000 1.082 0.928 1.038 0.763 1.136 0.721 0.652 +1 0.876 -2.277 -1.225 1.021 -0.260 0.159 0.829 0.144 1.087 0.869 -0.527 1.572 2.215 0.452 -0.895 0.135 0.000 0.642 -1.931 0.637 0.000 0.500 0.793 1.002 1.162 0.666 0.989 0.788 +0 0.960 0.077 -1.037 1.884 -0.413 0.953 -1.034 1.483 2.173 0.486 -0.537 0.867 0.000 0.838 -0.668 -0.044 2.548 0.461 -1.138 1.136 0.000 0.298 0.492 0.995 1.523 1.106 1.038 0.830 +0 0.362 -1.252 -1.699 1.271 1.325 0.647 -0.045 -0.089 0.000 1.158 0.628 -1.167 2.215 0.940 -0.986 0.169 2.548 0.443 0.686 1.271 0.000 0.852 0.980 0.984 1.051 1.497 1.766 1.483 +0 0.988 1.156 -0.804 1.563 -0.898 0.675 1.670 0.875 2.173 0.569 0.234 1.046 2.215 1.132 1.047 0.297 0.000 0.611 1.972 -1.707 0.000 0.845 0.785 0.993 1.364 0.731 0.975 0.839 +0 1.114 0.636 0.369 0.891 1.045 0.615 0.896 -1.478 2.173 0.255 2.861 -1.620 0.000 0.920 0.727 -0.947 1.274 0.438 1.889 1.650 0.000 0.172 0.636 0.993 0.921 0.435 0.717 0.649 +0 0.608 0.001 -1.139 1.407 -0.218 0.751 0.348 0.361 2.173 0.901 2.202 1.446 0.000 1.207 2.709 -0.880 0.000 1.652 0.647 1.352 3.102 0.692 0.777 0.990 0.793 0.952 0.856 0.918 +0 0.319 -2.000 -1.007 1.956 1.396 0.791 0.162 0.194 0.000 0.818 -1.165 -0.106 2.215 1.659 -0.567 -1.506 2.548 0.699 1.101 -0.360 0.000 0.882 1.110 0.987 0.864 1.234 1.047 1.116 +0 1.888 -0.245 -0.586 0.377 1.255 0.460 -2.034 1.632 0.000 0.918 0.508 0.221 2.215 0.732 -0.214 -1.502 0.000 1.030 -0.537 1.203 0.000 0.896 1.058 1.164 0.919 0.392 0.965 0.905 +1 0.898 -1.507 -1.547 0.203 -1.193 0.926 -1.204 0.471 2.173 0.422 -0.929 1.465 0.000 0.940 0.404 -1.419 0.000 1.377 -1.012 -0.338 3.102 0.887 1.003 0.992 1.014 0.796 0.897 0.794 +0 0.372 0.722 1.225 1.699 -0.167 0.536 1.514 1.506 0.000 0.738 1.548 -1.257 0.000 1.387 -0.021 0.481 0.000 0.852 0.478 -0.803 3.102 0.809 0.724 1.047 0.889 0.474 0.599 0.673 +1 2.388 0.090 -0.653 0.571 -0.274 1.073 -0.345 0.862 2.173 0.974 0.617 -1.740 0.000 0.541 0.024 -1.742 2.548 0.522 0.480 -0.067 0.000 0.928 1.147 0.997 0.768 0.700 0.962 0.883 +0 0.606 -1.131 -0.208 0.916 1.695 1.448 -0.138 0.377 2.173 1.134 -0.811 -0.966 1.107 0.983 2.627 -1.353 0.000 1.046 -0.414 1.528 0.000 0.855 0.927 1.022 0.747 1.887 1.064 0.854 +1 0.517 -0.605 1.692 0.838 0.228 1.169 -0.564 0.791 0.000 1.458 -1.222 -1.159 1.107 0.800 -0.885 -0.478 0.000 0.658 -1.044 -0.029 3.102 1.623 0.949 0.986 0.924 0.753 0.986 0.817 +1 0.713 1.926 0.105 1.371 -1.300 1.313 0.357 0.282 2.173 0.828 0.980 -1.317 0.000 0.567 2.040 1.631 0.000 0.471 0.090 1.520 1.551 0.845 0.617 1.307 1.722 0.755 1.104 0.986 +1 1.039 1.036 -1.551 0.804 0.380 0.553 0.051 -0.253 0.000 0.549 -0.923 -0.542 2.215 1.023 -0.451 1.266 1.274 0.527 0.224 1.001 0.000 0.751 0.787 1.248 1.101 0.816 0.864 0.733 +1 0.893 -0.604 -1.067 1.040 0.013 1.538 -1.243 -1.427 0.000 1.124 0.752 0.269 0.000 1.736 -0.488 1.084 2.548 0.995 -1.841 -0.358 0.000 0.962 1.050 1.105 1.039 0.790 0.934 0.896 +0 0.361 -1.235 0.107 0.663 0.752 0.638 -1.422 0.914 0.000 0.790 -0.594 -1.056 2.215 0.277 -0.887 -1.634 0.000 1.193 0.448 -1.077 3.102 0.592 1.067 0.989 0.851 0.533 0.742 0.725 +0 1.852 -0.265 0.916 0.844 -1.361 0.605 1.741 -0.312 0.000 0.415 1.116 -0.888 0.000 0.673 -0.154 0.332 2.548 1.326 -0.120 -1.435 1.551 0.622 1.053 1.536 0.844 0.722 0.767 0.922 +0 0.476 -1.532 -1.726 0.861 -0.413 0.530 0.052 1.279 0.000 0.550 -0.218 -0.551 2.215 0.800 -0.868 0.064 2.548 0.416 -1.405 0.874 0.000 0.751 0.772 0.989 0.606 0.455 0.561 0.550 +0 0.526 0.968 -0.495 1.542 -1.350 0.557 1.246 0.001 0.000 0.362 1.400 -1.635 0.000 0.851 1.254 0.691 2.548 0.369 0.877 0.530 1.551 0.954 0.686 0.990 0.604 0.093 0.601 0.595 +0 0.824 0.771 1.669 0.945 1.050 1.218 0.604 0.084 0.000 0.406 1.079 0.690 0.000 1.020 2.035 -1.532 0.000 1.077 -0.825 -0.615 3.102 0.863 1.198 0.989 0.559 0.444 0.844 0.871 +1 1.464 -0.452 -0.601 0.579 -1.221 0.897 -0.776 1.256 2.173 1.296 -0.921 0.555 2.215 0.693 2.052 -1.517 0.000 0.654 -1.601 0.170 0.000 0.316 0.851 0.992 1.125 0.947 0.962 0.772 +1 0.688 0.310 -1.606 1.170 -0.765 2.223 0.764 0.626 2.173 1.834 2.557 1.091 0.000 3.284 0.738 -0.780 0.000 1.728 0.786 -1.080 1.551 5.418 3.061 0.985 1.817 2.081 2.528 2.135 +1 1.930 0.748 -0.663 0.374 0.176 0.874 -0.463 1.575 2.173 0.623 0.426 1.541 0.000 0.656 -1.210 0.274 2.548 0.940 0.003 0.326 0.000 0.913 0.852 0.988 1.204 0.965 1.121 0.920 +1 1.419 -1.257 -0.894 0.563 1.174 1.506 0.011 1.015 2.173 1.031 -1.231 -0.269 0.000 0.880 -0.225 0.233 2.548 1.040 -0.855 1.671 0.000 1.338 0.978 1.186 1.580 0.948 1.083 1.026 +1 1.704 0.155 1.731 0.736 -1.646 0.938 -1.133 -0.299 2.173 0.740 -0.422 0.405 2.215 0.616 -0.602 1.027 0.000 0.590 -1.297 -0.676 0.000 0.733 0.766 0.992 1.113 0.852 1.238 0.983 +1 1.794 0.282 0.078 0.419 -0.168 0.594 0.764 -1.648 0.000 0.879 -0.543 1.424 2.215 0.712 -1.105 -0.475 0.000 0.380 -0.256 1.144 3.102 1.701 0.932 0.993 1.217 0.146 0.758 0.838 +1 0.583 -0.581 1.146 0.634 -1.108 1.769 1.267 -0.515 0.000 0.967 -0.322 1.742 0.000 1.044 0.801 -1.583 0.000 2.199 0.416 1.061 1.551 0.881 0.860 0.987 0.762 0.940 0.698 0.635 +0 0.736 1.116 -1.708 1.329 1.621 1.207 -1.634 -0.363 0.000 0.695 -1.798 0.380 0.000 0.695 -0.567 -1.398 2.548 1.646 -0.464 0.841 3.102 1.222 1.159 0.977 1.825 0.738 1.346 2.309 +1 1.231 -1.082 -0.787 0.848 -0.418 1.273 1.535 1.076 0.000 0.624 -1.813 -1.176 0.000 0.742 1.125 -0.129 0.000 2.029 0.046 0.303 3.102 0.678 1.314 0.985 0.650 0.919 0.824 0.763 +1 0.398 -0.805 -1.342 0.958 1.518 0.485 -0.971 -0.350 0.000 0.591 -1.101 0.599 0.000 0.681 0.776 1.372 2.548 0.991 -0.159 -0.718 3.102 0.862 1.053 0.977 0.695 0.689 0.692 0.711 +0 0.886 -0.302 1.325 1.746 -1.665 1.058 0.481 -0.350 0.000 0.804 0.014 0.459 2.215 0.824 -1.342 1.540 0.000 0.831 -0.139 -0.580 3.102 0.895 0.897 0.991 0.817 0.598 0.780 0.975 +0 3.399 0.095 1.730 0.744 -1.024 2.037 -0.308 0.310 0.000 0.857 0.147 -1.204 2.215 0.946 -0.487 -0.438 2.548 0.420 -0.004 0.619 0.000 0.444 0.854 1.351 0.793 0.695 0.928 1.188 +1 0.664 -1.225 0.127 0.095 1.597 0.728 -1.023 0.717 0.000 0.898 -0.711 -0.629 2.215 0.818 -1.679 1.281 0.000 1.421 -0.176 1.716 3.102 0.889 0.985 0.984 0.703 0.914 0.856 0.753 +0 1.267 1.307 -1.730 0.879 -1.497 1.341 0.365 0.630 0.000 1.544 0.831 -0.947 2.215 1.718 0.656 0.206 2.548 1.125 1.254 -1.078 0.000 0.915 0.917 0.986 1.159 1.497 1.190 1.309 +0 0.990 -0.020 -0.828 1.176 0.791 0.667 -0.730 1.690 0.000 0.984 -0.325 0.586 1.107 0.734 0.152 -0.016 2.548 0.545 -1.848 -0.110 0.000 1.184 1.026 1.485 0.914 0.519 0.764 0.774 +0 0.875 -0.123 -0.007 0.826 1.533 0.484 -0.943 1.489 2.173 0.250 -0.037 -0.794 0.000 0.342 1.798 0.825 0.000 0.389 -1.262 -0.533 3.102 0.694 0.905 1.158 0.643 0.464 0.606 0.586 +1 0.998 -0.335 1.612 1.479 -1.110 0.665 1.011 0.463 2.173 0.403 0.937 -0.989 0.000 0.969 -0.171 -0.083 2.548 0.786 0.486 1.181 0.000 0.746 0.696 1.070 0.915 0.797 0.943 0.758 +1 1.387 0.206 1.244 0.159 -0.538 1.051 -2.343 -1.162 0.000 1.099 1.305 0.538 0.000 0.902 0.246 0.180 0.000 0.731 -0.470 1.453 3.102 0.888 0.921 0.988 0.529 0.220 0.610 0.620 +1 2.735 0.187 -1.566 0.360 -0.812 2.423 1.770 0.056 0.000 0.664 0.203 -1.164 0.000 1.491 0.473 0.631 2.548 1.331 -0.279 1.600 0.000 1.006 1.129 0.992 0.704 0.891 0.864 0.816 +0 0.336 0.941 0.226 0.291 1.219 1.203 -0.054 1.662 2.173 1.053 0.308 -0.318 2.215 1.213 -0.790 -0.281 0.000 0.434 -0.698 1.241 0.000 0.784 0.823 0.990 0.912 1.648 0.985 0.822 +0 0.900 0.318 1.416 0.455 0.801 0.916 0.734 -0.880 0.000 0.895 -0.189 0.525 2.215 0.878 -0.114 -0.627 0.000 0.738 0.990 1.250 3.102 0.807 0.885 0.982 0.690 0.707 0.822 0.798 +0 0.482 1.737 -1.519 0.486 -0.290 0.775 0.848 -0.525 0.000 0.691 0.563 0.012 2.215 1.810 0.613 1.517 2.548 0.711 2.030 1.298 0.000 1.504 1.033 0.983 0.823 1.162 0.947 0.786 +0 0.915 -0.392 -1.621 1.790 -1.036 0.661 0.162 0.284 0.000 0.788 -0.220 0.889 1.107 0.278 -0.542 1.165 0.000 0.596 1.403 0.584 0.000 0.840 0.766 0.980 0.733 0.819 0.756 0.791 +0 1.519 0.837 0.953 1.421 1.366 0.501 -2.876 1.470 0.000 1.043 0.545 -0.494 0.000 0.629 1.050 -0.097 2.548 0.836 -0.864 -0.492 3.102 4.465 2.349 0.995 0.828 0.761 1.648 1.965 +1 0.752 0.595 -0.077 2.233 0.518 1.511 -1.437 -1.427 0.000 1.145 -0.622 0.201 0.000 0.688 -0.854 1.665 1.274 1.026 0.368 -1.093 3.102 3.012 1.654 0.991 0.928 0.619 1.145 1.319 +0 3.405 0.177 -0.495 3.120 -0.598 3.668 -0.671 1.142 0.000 1.029 -0.760 -0.709 2.215 0.674 0.051 1.569 2.548 0.567 -1.222 1.353 0.000 0.984 0.921 0.962 0.878 0.872 1.253 1.931 +0 0.662 -1.343 1.504 1.003 -0.713 0.840 -2.135 0.066 0.000 1.028 -0.400 -1.564 2.215 1.313 -0.597 0.880 2.548 0.409 -0.852 -0.657 0.000 0.762 1.118 1.028 0.823 1.008 1.025 0.869 +1 1.314 0.194 0.837 0.813 -0.268 0.510 -1.054 -0.093 0.000 0.583 -0.897 -1.019 2.215 1.235 -1.004 1.657 2.548 1.278 1.457 -0.950 0.000 0.912 0.870 1.201 0.943 0.607 0.832 0.767 +1 0.411 0.641 1.160 1.004 -0.152 1.167 -0.001 -1.224 1.087 1.019 0.628 -0.430 0.000 2.239 -1.146 1.285 0.000 0.653 -1.928 0.531 0.000 0.860 1.151 0.987 0.522 1.034 0.801 0.726 +1 1.737 -0.307 -0.520 1.292 -1.310 1.885 1.265 0.975 0.000 1.485 0.795 -0.672 2.215 1.476 0.084 -1.228 2.548 0.816 -1.002 1.226 0.000 0.781 1.904 1.354 1.165 0.955 1.542 1.551 +0 1.297 0.338 1.480 1.147 0.897 1.318 1.038 -0.858 0.000 1.061 0.926 0.358 2.215 0.405 1.064 -1.409 0.000 0.526 -0.282 0.058 3.102 0.632 0.788 0.990 1.024 0.500 0.789 0.924 +0 0.818 0.367 -0.688 1.179 0.522 1.658 0.961 -1.725 2.173 1.074 0.676 0.211 0.000 1.268 0.212 -0.210 1.274 0.425 -0.222 1.503 0.000 0.921 0.717 1.207 1.429 1.896 1.134 0.970 +1 1.381 0.980 -1.538 1.033 1.460 0.503 -1.005 0.352 2.173 0.396 1.179 0.762 0.000 0.386 -0.942 -0.352 0.000 0.717 0.187 -0.606 0.000 0.969 0.837 0.981 0.775 0.742 0.874 0.755 +1 0.835 0.224 0.421 1.738 1.099 0.989 -0.050 -0.423 1.087 0.970 -1.097 -0.980 1.107 0.561 0.833 -1.479 0.000 0.623 -0.847 1.116 0.000 0.870 0.994 0.987 1.308 1.071 1.094 0.903 +1 0.289 -1.889 1.685 1.037 1.414 0.696 -0.652 1.083 2.173 0.651 -0.904 0.175 0.000 1.117 0.899 -0.945 0.000 1.924 -0.359 -0.554 3.102 1.829 1.209 0.992 0.632 1.227 1.006 0.892 +0 1.089 1.102 0.742 0.763 0.279 0.892 1.926 -1.022 0.000 0.605 0.113 0.873 0.000 1.059 0.706 -1.600 2.548 0.856 0.354 0.394 0.000 0.736 0.639 0.997 0.764 0.297 0.691 0.623 +0 0.499 2.010 -1.076 0.983 -1.033 1.110 0.789 -0.668 2.173 2.952 -0.292 0.898 2.215 0.573 -0.475 -0.829 0.000 0.498 -1.998 1.576 0.000 0.803 1.497 0.971 0.699 3.046 1.642 1.419 +1 1.564 -0.959 0.816 1.185 -0.668 0.416 -1.084 -0.424 0.000 1.101 0.202 -1.012 2.215 0.968 2.677 1.552 0.000 1.123 0.002 -0.014 3.102 0.808 0.943 1.834 1.414 0.793 0.964 1.165 +1 1.060 -0.270 1.452 0.472 -0.410 0.773 -0.366 0.482 0.000 0.648 0.454 1.623 2.215 0.464 0.877 -0.859 0.000 1.075 -1.284 -0.335 3.102 0.818 0.955 0.987 0.606 1.157 0.791 0.698 +0 1.876 0.736 1.723 0.744 1.296 0.965 0.891 0.400 2.173 0.675 1.451 -1.019 0.000 1.084 -0.054 -0.970 2.548 1.041 -0.162 0.584 0.000 0.669 0.964 0.983 1.243 1.360 0.997 0.967 +1 1.270 0.291 0.697 1.420 -1.208 0.501 -0.455 -1.257 0.000 0.777 -0.006 1.533 0.000 1.963 0.811 -0.298 2.548 0.655 -2.060 0.956 0.000 0.834 0.754 1.841 1.328 0.822 0.898 0.888 +0 0.630 0.241 -0.805 1.593 1.186 0.944 -0.239 -0.444 2.173 0.864 -0.897 1.203 1.107 0.487 0.423 0.020 0.000 0.749 -0.893 -1.140 0.000 0.806 0.814 1.353 1.198 1.402 0.980 0.798 +0 0.963 2.277 -1.672 0.846 -1.138 0.614 0.839 -0.358 1.087 1.223 0.209 0.622 1.107 0.269 0.097 -1.568 0.000 0.423 1.331 1.392 0.000 0.348 0.623 0.978 0.940 1.067 1.049 0.774 +0 0.800 0.949 0.174 1.257 0.968 0.432 0.320 -0.724 1.087 0.360 -0.926 0.264 0.000 0.788 -0.306 0.973 0.000 0.479 -0.987 1.441 3.102 0.550 0.736 0.984 0.950 0.599 0.755 0.722 +0 1.212 -1.496 1.480 0.614 0.562 1.167 -0.860 -0.715 1.087 1.128 -0.045 1.352 2.215 1.270 -2.696 -0.045 0.000 0.877 -1.125 0.070 0.000 0.793 0.878 0.988 1.191 1.768 1.062 0.843 +0 1.234 -1.096 -0.623 0.897 0.094 0.571 0.532 -1.720 0.000 0.874 0.100 1.168 1.107 0.992 -0.355 -0.933 2.548 0.792 -0.822 0.533 0.000 1.260 0.948 0.992 1.079 0.970 0.789 0.785 +0 1.300 0.825 0.617 1.870 0.057 1.122 -0.787 1.190 0.000 0.991 -1.147 1.536 0.000 2.030 2.350 -0.073 0.000 3.785 -0.034 -0.523 3.102 0.810 0.865 1.043 1.451 1.067 1.306 1.502 +0 2.315 1.083 -1.208 0.454 -0.964 1.399 0.743 -1.519 2.173 1.383 0.380 0.301 0.000 2.614 1.019 0.587 2.548 1.170 0.847 -0.066 0.000 0.753 0.907 0.987 0.908 2.298 1.373 1.305 +1 0.648 -0.362 -1.581 0.181 -0.801 1.511 -2.595 1.587 0.000 1.682 0.491 0.356 0.000 1.618 0.955 -0.790 2.548 1.033 -0.317 0.304 0.000 0.790 0.947 0.989 0.764 0.917 0.953 0.842 +0 0.710 -0.656 -0.163 0.577 1.274 0.603 -1.241 -1.196 0.000 1.109 0.345 1.624 2.215 2.525 -0.258 0.350 2.548 1.468 1.293 -1.192 0.000 0.966 1.012 0.988 0.805 1.722 1.185 0.955 +1 0.839 -1.630 0.660 0.761 -0.788 0.398 1.435 -1.140 2.173 0.283 -2.040 0.277 0.000 0.869 -0.486 1.380 2.548 0.655 0.164 0.561 0.000 0.843 1.005 1.068 1.592 1.020 1.074 1.092 +1 1.118 -0.287 0.737 1.336 0.133 1.924 -0.331 -1.474 2.173 1.520 1.021 0.587 0.000 0.445 1.104 -0.808 0.000 0.963 -0.506 -0.447 3.102 1.204 1.176 0.993 1.770 1.169 1.454 1.262 +1 0.756 0.428 -1.164 0.594 1.552 0.804 -0.922 -1.307 0.000 0.569 -1.601 0.145 0.000 0.756 -0.765 -0.497 2.548 1.691 -0.168 0.399 3.102 0.918 1.032 0.986 0.681 0.684 0.665 0.639 +0 1.973 0.057 -1.278 0.283 -1.287 1.494 0.749 0.736 0.000 2.003 -0.336 -0.829 1.107 1.482 -0.134 0.798 2.548 0.923 1.009 0.268 0.000 0.842 0.918 0.984 0.999 1.830 1.492 1.308 +0 0.487 -0.621 -0.212 1.328 1.615 1.087 -0.444 -0.598 2.173 0.588 0.057 -1.418 0.000 0.916 0.586 1.439 2.548 2.264 -0.145 0.354 0.000 0.776 0.868 1.111 1.032 1.395 0.923 0.844 +1 0.898 1.254 -1.200 0.366 0.031 0.662 -0.372 1.674 1.087 0.341 0.178 -0.334 0.000 0.583 1.195 0.813 0.000 1.579 -0.578 0.602 3.102 0.959 0.914 0.987 1.474 0.907 1.167 1.130 +0 0.356 -0.732 -0.492 1.340 0.052 1.112 0.301 -1.328 2.173 0.852 0.625 1.530 0.000 1.269 0.046 1.055 2.548 1.305 -1.135 -0.082 0.000 0.935 0.924 0.990 1.461 1.255 1.549 1.325 +1 0.513 -0.782 -1.207 1.613 0.194 1.398 1.928 -1.740 0.000 0.837 0.731 -0.250 2.215 1.164 -1.647 0.104 0.000 1.527 -0.782 1.631 0.000 1.587 0.923 1.201 1.043 0.535 1.004 0.869 +1 0.964 0.223 0.913 0.725 0.326 0.564 -0.462 -0.771 0.000 0.527 -1.180 -0.063 0.000 1.085 -0.212 -1.247 2.548 0.838 1.076 0.872 0.000 0.824 0.724 0.985 0.595 0.165 0.579 0.543 +0 0.968 1.482 0.538 1.529 0.021 1.084 -0.646 -1.345 0.000 0.951 1.131 1.323 2.215 0.592 -1.271 -0.631 0.000 0.730 0.022 0.299 3.102 1.025 0.940 0.982 1.090 0.751 1.073 1.546 +0 0.504 -1.328 0.924 0.770 -1.059 0.726 -0.445 1.436 2.173 1.073 0.095 -0.129 2.215 0.620 -0.711 -0.192 0.000 1.110 0.578 -1.634 0.000 1.149 0.947 0.990 1.022 1.332 1.084 0.979 +1 0.964 0.938 -1.532 0.869 -0.428 0.937 0.867 0.516 2.173 1.073 -0.085 1.250 0.000 1.162 0.422 -1.526 2.548 1.533 0.134 -0.636 0.000 0.658 1.034 1.064 0.698 1.282 0.858 0.773 +1 0.556 1.309 1.023 1.582 -1.642 0.849 0.337 -1.456 2.173 1.516 -1.483 0.226 0.000 0.806 0.760 -0.406 0.000 0.730 1.285 1.600 0.000 0.878 0.927 0.991 0.694 0.932 0.729 0.685 +0 1.115 -0.021 0.076 1.497 -0.153 0.791 -1.228 -0.995 2.173 0.596 -0.165 -1.304 0.000 1.064 -2.332 1.206 0.000 1.189 0.348 1.088 3.102 0.855 1.124 0.981 1.002 1.364 1.147 1.128 +0 1.929 0.412 -1.051 1.666 -0.692 0.875 0.392 1.457 2.173 0.904 0.454 0.779 0.000 0.744 -0.235 0.186 0.000 1.482 0.884 0.105 3.102 0.793 0.950 0.989 1.106 1.203 1.064 0.983 +1 1.393 0.238 -1.492 0.735 -0.652 0.789 0.509 0.871 1.087 0.740 0.327 0.177 2.215 0.433 -0.919 -1.370 0.000 0.817 -0.188 0.697 0.000 0.682 0.754 0.987 0.873 0.665 0.785 0.684 +1 1.163 0.604 1.308 0.456 0.120 1.343 -0.380 -1.554 0.000 0.885 0.210 0.200 0.000 0.779 1.709 0.008 0.000 1.091 0.954 0.243 0.000 1.203 0.730 0.985 0.569 0.259 0.459 0.532 +1 1.335 -1.804 1.014 0.617 -1.607 0.898 0.167 -0.415 2.173 0.480 0.125 0.376 0.000 0.858 0.236 1.408 0.000 0.816 -1.251 -0.760 3.102 0.792 0.969 0.991 0.730 0.893 1.180 1.071 +0 0.758 1.354 1.027 1.626 0.874 0.796 1.262 -0.916 2.173 0.815 0.569 -1.704 1.107 1.179 1.838 -0.625 0.000 0.772 1.072 0.636 0.000 1.033 1.081 0.993 1.294 0.877 0.922 0.914 +0 0.789 1.167 -1.095 0.533 -0.822 0.351 1.551 -0.504 0.000 0.819 1.442 0.458 0.000 0.279 -0.931 1.285 1.274 0.738 -0.009 1.214 1.551 0.869 0.927 0.978 0.656 0.184 0.630 0.655 +0 0.744 2.088 1.037 2.002 1.680 0.950 -0.851 -0.129 0.000 0.371 0.013 -0.708 1.107 0.818 0.480 1.540 1.274 0.941 -1.357 0.505 0.000 0.967 0.789 0.988 1.090 0.547 0.982 1.976 +0 0.692 -0.712 1.636 0.879 0.222 1.467 -0.444 0.380 0.000 0.960 -0.006 -1.307 0.000 0.498 1.256 -1.075 1.274 0.821 1.683 1.515 0.000 1.146 0.973 1.033 0.889 0.686 0.821 0.730 +1 1.003 -1.698 -1.084 0.919 -0.793 0.702 0.125 0.850 2.173 0.458 -1.539 1.632 0.000 0.821 0.232 -1.528 2.548 1.212 -1.080 0.578 0.000 0.858 0.947 0.984 0.844 0.797 0.896 0.769 +0 0.367 -1.548 1.143 1.634 -0.738 0.816 0.678 0.997 2.173 0.301 -0.355 1.292 1.107 0.318 1.078 0.578 0.000 0.419 -0.292 -0.617 0.000 0.491 0.584 1.065 0.739 0.444 1.109 0.862 +0 0.867 -0.113 -0.775 0.235 1.385 1.683 0.038 -1.569 2.173 1.237 -0.927 -0.123 0.000 1.755 0.024 0.517 1.274 1.271 -0.542 0.513 0.000 0.920 0.978 0.990 1.073 2.038 1.406 1.117 +1 0.655 1.488 -0.580 0.602 -0.301 0.512 -0.530 0.768 2.173 0.879 -0.992 -1.541 2.215 0.584 -1.491 1.344 0.000 0.460 0.299 -0.125 0.000 0.869 0.725 0.999 0.901 0.894 0.870 0.755 +1 0.353 1.279 -0.718 0.569 -0.442 0.670 -0.161 1.726 0.000 0.753 -0.412 1.017 0.000 1.097 1.175 0.267 2.548 1.128 1.081 -1.245 3.102 0.923 1.072 0.985 0.917 0.832 0.925 0.836 +1 1.937 -0.148 1.216 1.822 1.606 1.331 -1.234 0.913 2.173 1.825 1.178 -0.544 2.215 2.258 -0.339 -0.193 0.000 0.963 -0.907 -1.475 0.000 0.969 1.647 0.980 1.514 4.216 2.185 1.792 +0 0.789 0.965 0.502 2.311 0.839 1.527 -0.456 -0.941 0.000 0.611 0.297 -1.154 0.000 0.306 0.834 -0.488 2.548 0.489 -0.409 0.813 3.102 0.894 0.861 0.986 0.684 0.354 0.561 0.946 +0 0.606 -1.354 -1.573 1.494 -0.555 0.911 0.304 -1.586 2.173 1.039 -1.243 0.979 2.215 1.351 -1.269 -0.133 0.000 0.866 -1.702 -0.109 0.000 0.396 0.952 1.047 1.389 1.644 1.237 1.074 +0 1.967 -0.694 -1.601 0.818 1.603 1.037 -0.752 0.300 1.087 0.717 -0.177 -0.764 0.000 0.821 0.333 1.279 2.548 1.912 -1.018 -0.302 0.000 1.035 1.101 0.983 0.706 1.119 0.991 1.003 +1 0.411 0.204 -0.780 0.887 0.824 1.075 0.886 -0.505 2.173 0.862 -0.098 0.811 0.000 0.533 -0.535 -1.706 0.000 1.011 0.961 -1.544 3.102 0.837 0.857 0.989 1.240 0.900 0.915 0.852 +1 0.657 1.445 1.098 1.277 -0.945 0.938 0.757 -0.061 2.173 0.608 2.351 1.580 0.000 0.611 0.556 1.626 0.000 0.396 1.291 0.609 0.000 0.966 0.952 1.223 1.059 0.743 0.887 0.812 +1 3.063 0.272 0.964 2.048 1.233 3.061 1.445 -0.575 0.000 1.071 -0.001 1.490 2.215 0.438 -0.546 0.133 2.548 0.600 -0.758 -1.168 0.000 3.243 2.116 1.006 0.820 0.719 1.721 1.871 +1 1.723 0.612 0.471 0.785 0.990 0.761 0.785 -0.660 0.000 0.663 -0.481 -0.933 2.215 0.758 -0.394 -1.639 2.548 0.742 0.866 -1.045 0.000 0.795 0.692 0.996 1.015 0.449 0.888 0.744 +1 1.256 -1.245 1.041 1.088 -0.685 0.635 -0.940 -0.793 2.173 1.071 -1.157 -0.058 2.215 1.114 -0.769 1.437 0.000 0.380 2.459 1.510 0.000 0.836 0.966 1.619 0.992 0.761 0.783 0.757 +0 2.093 -0.848 -0.264 1.746 0.123 2.167 -0.456 -1.651 0.000 1.288 -1.061 1.637 0.000 2.372 -0.986 0.267 1.274 1.134 2.072 0.010 0.000 1.302 0.994 0.979 0.745 1.532 1.505 1.552 +1 0.734 0.737 1.555 1.508 -1.273 0.980 1.178 0.289 0.000 0.699 1.475 0.850 0.000 1.622 0.814 -0.869 2.548 0.386 -0.182 1.079 3.102 0.899 0.717 0.986 0.787 0.692 0.827 0.842 +1 1.094 -0.729 -0.742 0.727 0.108 1.036 -0.412 -1.246 2.173 0.511 0.570 1.476 1.107 1.209 -0.728 0.711 0.000 1.164 -0.553 -0.015 0.000 0.801 0.907 0.980 0.964 0.880 0.882 0.827 +1 0.513 -0.061 1.474 0.561 -0.268 0.646 -1.420 -1.207 0.000 0.918 -2.151 -0.662 0.000 1.337 -0.586 0.791 2.548 0.941 -1.164 1.435 3.102 1.012 0.884 0.989 0.714 0.575 0.901 0.769 +1 1.050 -1.102 -0.324 1.257 -0.088 0.881 -0.543 1.327 2.173 0.592 -1.146 0.756 0.000 0.858 -2.508 -0.138 0.000 2.901 -0.447 -1.405 3.102 1.236 1.467 0.996 1.539 1.062 1.275 1.182 +0 0.467 -0.884 0.882 2.474 -1.675 1.283 0.612 0.014 2.173 0.253 0.830 0.429 0.000 1.027 -0.653 -0.994 2.548 0.941 1.163 -1.564 0.000 0.644 0.956 1.107 0.818 1.526 1.384 1.157 +0 1.095 -0.492 0.518 1.169 -0.092 0.933 1.531 -1.238 0.000 0.490 0.407 0.589 2.215 0.646 1.153 1.588 0.000 0.550 1.114 1.053 3.102 0.804 0.979 0.988 0.954 0.293 0.667 1.096 +1 0.978 -0.225 1.629 1.119 -1.151 0.582 -1.567 0.461 2.173 0.842 -0.333 0.058 1.107 0.816 -0.070 -1.497 0.000 0.378 0.497 0.443 0.000 0.764 0.870 0.994 0.967 0.773 0.846 0.710 +1 0.277 -0.553 -1.277 1.442 0.502 0.713 1.311 1.685 2.173 1.561 0.387 -1.406 1.107 1.244 -0.143 0.047 0.000 1.353 0.133 0.970 0.000 0.992 1.192 0.991 1.005 0.919 0.938 0.871 +0 1.204 -0.223 -0.856 0.471 0.852 0.666 -0.365 -0.228 0.000 1.382 -0.142 0.975 2.215 1.153 -0.631 -1.474 2.548 0.891 1.858 -0.748 0.000 0.688 0.976 1.043 0.701 1.141 0.799 0.724 +1 1.416 -0.553 -1.506 1.680 -1.279 1.137 -0.304 0.921 0.000 0.656 2.054 -0.234 0.000 0.368 0.867 0.494 2.548 0.718 -1.423 -0.011 0.000 1.468 0.978 0.999 0.832 0.160 0.677 0.887 +1 0.641 -0.956 1.518 0.402 0.962 1.253 0.420 -0.128 2.173 1.319 -0.350 1.366 0.000 0.617 -0.577 -0.243 0.000 0.997 0.269 -0.940 1.551 0.867 0.904 0.987 1.116 0.793 0.976 0.954 +1 1.186 -0.091 -0.145 0.797 1.523 0.672 1.341 1.351 1.087 0.347 2.089 0.967 0.000 0.755 1.065 -0.553 2.548 1.399 1.169 -1.264 0.000 0.926 0.942 1.344 0.879 0.882 0.845 0.821 +0 1.300 2.183 1.344 1.412 0.793 0.995 1.235 1.051 2.173 1.692 2.285 -1.089 0.000 1.453 0.543 -0.260 0.000 1.873 1.396 -0.768 3.102 1.178 0.946 0.987 0.752 1.475 1.000 1.011 +0 0.641 -0.659 -0.537 0.619 0.808 1.646 -0.454 0.097 2.173 1.024 0.109 1.578 2.215 1.788 -0.616 -1.307 0.000 1.430 -0.852 -1.740 0.000 0.738 0.892 0.981 0.883 1.938 1.326 1.049 +0 0.566 -2.108 -0.472 1.119 -1.237 0.596 -0.318 1.229 0.000 0.725 -0.863 -1.263 2.215 0.579 -1.159 0.158 2.548 0.445 2.261 0.215 0.000 1.819 1.416 0.998 0.584 0.673 1.043 1.067 +1 0.778 -1.146 -0.244 1.537 -1.127 1.069 2.333 0.591 0.000 1.259 0.227 1.491 2.215 0.830 -1.291 0.290 0.000 1.072 -0.629 -1.151 3.102 0.853 0.811 1.081 1.396 0.903 1.051 0.944 +1 2.092 -0.227 -1.335 0.305 -0.052 1.122 0.108 0.402 2.173 0.519 0.408 -0.050 0.000 0.393 -1.188 1.155 0.000 0.752 -0.585 1.491 3.102 0.912 0.803 1.013 0.612 0.904 0.876 0.742 +0 0.409 1.107 1.142 0.696 0.010 1.167 0.217 1.550 0.000 0.683 0.689 -0.871 2.215 0.932 0.222 0.454 0.000 1.070 1.095 -0.316 0.000 0.937 0.796 0.992 0.561 0.200 0.583 0.638 +1 1.582 -0.668 1.466 1.026 -1.531 1.049 -0.172 -0.235 2.173 0.487 -0.007 -1.327 0.000 0.838 0.754 0.083 0.000 0.586 0.174 1.187 3.102 1.026 0.935 0.991 0.643 0.811 0.956 0.891 +0 0.734 -2.036 -0.436 0.074 -1.579 1.048 0.053 1.463 2.173 0.919 0.428 -1.313 2.215 1.462 -0.278 -0.261 0.000 1.998 -0.060 0.304 0.000 0.946 1.266 0.986 1.217 0.906 1.082 1.015 +1 0.627 -0.877 1.299 1.163 0.143 2.519 -0.384 -1.187 0.000 1.407 -0.261 0.525 2.215 1.373 0.590 0.685 1.274 0.853 -0.587 0.905 0.000 0.438 0.511 1.020 0.748 0.733 0.733 0.561 +1 0.903 0.291 -1.699 0.365 -0.039 1.402 0.932 0.683 2.173 0.841 0.950 -1.339 0.000 1.588 -0.216 -0.794 0.000 0.869 0.355 -0.173 3.102 1.014 0.755 0.982 0.990 0.870 1.080 0.901 +1 2.139 -0.406 -0.271 0.629 -0.757 0.906 -0.080 1.461 2.173 0.615 -0.422 0.231 0.000 0.783 -0.600 0.966 0.000 0.459 2.266 -1.577 0.000 0.885 1.061 0.989 0.777 0.464 0.877 0.797 +0 0.560 -2.215 1.475 0.350 -1.638 0.769 -0.346 -1.270 2.173 0.780 0.125 0.690 2.215 0.357 -2.373 0.318 0.000 0.423 0.856 -0.363 0.000 1.307 1.033 0.989 0.802 1.151 0.850 0.775 +1 0.861 -0.598 -0.461 0.844 0.583 0.923 -0.875 -1.358 0.000 0.987 -0.028 1.597 2.215 1.176 0.351 0.279 0.000 1.718 -1.169 -0.179 3.102 0.760 1.101 0.986 0.903 1.464 0.903 0.839 +1 1.157 -0.452 1.219 1.024 -1.381 1.070 -0.037 0.878 0.000 2.073 -0.052 -0.382 1.107 0.612 -1.241 -1.242 0.000 0.961 0.231 -1.103 1.551 1.720 1.187 1.081 1.413 0.798 1.119 1.008 +1 0.953 0.155 0.346 0.571 1.502 1.057 0.408 1.307 2.173 1.333 0.178 -0.325 0.000 1.189 -0.393 -1.554 2.548 0.668 1.165 -0.433 0.000 0.803 1.170 0.987 0.771 0.960 1.048 0.876 +1 0.653 1.237 1.537 1.887 -0.949 1.417 0.379 0.330 1.087 0.677 2.468 1.641 0.000 0.493 -0.718 -0.361 0.000 0.513 0.665 -0.949 0.000 0.939 0.741 1.207 1.635 0.851 1.054 1.010 +0 0.912 1.870 1.192 0.644 1.225 0.848 -0.026 -0.494 2.173 1.045 1.821 1.599 0.000 0.397 0.265 0.404 0.000 0.406 0.480 1.365 1.551 1.228 0.691 0.994 1.284 0.646 0.900 0.823 +0 0.855 -0.682 1.111 1.573 -0.102 0.522 -0.669 -0.405 2.173 0.525 -1.283 -1.651 0.000 1.113 -0.178 1.622 2.548 0.448 -1.790 0.200 0.000 0.764 0.961 1.426 0.814 0.947 0.772 0.789 +1 0.763 0.313 1.712 1.152 -1.238 0.858 0.549 0.342 2.173 0.455 -0.543 0.914 2.215 0.426 2.238 1.678 0.000 1.174 0.357 -0.737 0.000 0.743 0.822 0.981 0.919 0.703 0.863 0.763 +1 0.791 0.128 0.592 0.643 1.393 0.462 -1.042 -0.468 0.000 0.595 0.717 1.040 2.215 1.078 -0.163 -0.992 0.000 1.421 0.301 1.735 3.102 0.888 1.031 0.986 0.757 0.511 0.778 0.694 +1 0.849 1.071 -1.294 0.579 -0.059 0.499 0.358 -0.341 2.173 0.949 1.211 1.444 0.000 1.155 -0.614 0.582 2.548 0.729 0.600 -1.223 0.000 0.785 0.933 0.982 0.962 0.867 0.877 0.762 +1 0.884 -0.029 -0.007 1.037 -1.297 1.143 -0.421 -1.577 2.173 1.327 -0.283 0.391 0.000 0.753 0.585 0.742 2.548 0.943 2.095 -0.489 0.000 2.971 1.641 1.216 0.989 1.194 1.504 1.219 +1 1.098 0.457 -1.569 1.894 -1.483 1.406 0.019 0.125 2.173 0.592 -2.846 0.911 0.000 0.690 0.294 -0.978 2.548 0.745 1.786 0.291 0.000 5.453 2.993 0.976 1.980 1.046 2.047 2.094 +1 1.082 1.205 -0.401 1.359 -0.834 0.605 -0.565 1.500 1.087 0.467 0.751 0.964 2.215 0.622 0.181 0.116 0.000 0.875 -1.949 1.116 0.000 1.480 1.076 0.983 0.921 0.673 1.161 1.478 +1 1.653 0.394 0.442 0.905 0.344 1.101 0.438 -1.681 2.173 0.590 -0.059 -0.524 0.000 0.647 -0.896 -1.589 0.000 0.572 -0.769 -1.032 3.102 0.899 1.045 0.986 0.956 0.774 1.006 0.922 +0 0.774 -0.272 -0.899 2.040 -1.676 2.358 -0.758 0.050 2.173 0.726 -0.134 0.363 0.000 2.806 -0.368 1.624 1.274 1.769 -1.720 1.650 0.000 0.954 0.926 1.121 0.835 3.220 1.757 1.378 +1 1.008 -0.465 0.520 1.383 0.765 2.309 -1.253 1.461 0.000 1.450 -1.379 -0.419 2.215 2.145 -2.273 -0.193 0.000 1.031 -0.415 -0.495 0.000 0.756 0.769 0.988 0.970 0.929 0.934 0.795 +1 0.538 0.152 -0.995 1.099 0.298 0.560 2.908 -0.471 0.000 1.200 -1.545 1.416 0.000 1.377 -1.799 -0.896 0.000 2.433 -0.743 1.611 1.551 0.875 0.989 0.987 1.147 1.116 0.817 0.859 +0 1.545 0.627 1.545 1.826 -1.433 1.797 -0.206 -0.033 1.087 0.695 0.021 1.242 2.215 0.591 1.253 -0.300 0.000 0.585 -0.834 1.111 0.000 1.148 0.842 1.031 2.152 1.512 1.424 1.138 +1 1.100 -0.890 1.619 0.303 -0.686 0.931 -0.064 -0.935 2.173 1.167 -0.164 0.506 2.215 2.051 0.342 0.916 0.000 1.557 -0.761 -0.544 0.000 0.600 1.097 0.984 0.825 1.479 0.901 0.802 +1 0.869 0.410 0.823 0.869 -0.769 0.611 -1.339 1.129 0.000 0.927 -0.956 -1.736 2.215 1.476 0.342 -0.119 2.548 0.596 0.281 -1.138 0.000 1.198 0.840 1.192 0.787 1.535 0.965 0.890 +0 0.436 1.282 0.764 1.658 -0.173 1.575 -0.227 -1.585 0.000 1.109 -0.372 0.295 1.107 0.569 0.280 -0.249 1.274 0.563 0.443 1.425 0.000 0.818 0.952 0.985 1.634 0.496 1.033 1.310 +1 0.299 -1.278 -0.227 1.038 -1.736 1.601 0.163 0.979 2.173 1.294 -0.142 -0.088 0.000 1.035 0.352 -1.208 0.000 0.762 1.247 -0.945 0.000 0.617 0.545 0.990 1.027 0.889 0.893 0.775 +1 2.021 -0.876 0.247 0.713 0.519 0.950 -0.088 -1.467 2.173 0.568 0.272 0.969 2.215 0.922 -0.249 1.496 0.000 1.667 0.915 -0.654 0.000 0.832 0.745 0.998 1.421 0.899 0.969 0.841 +0 0.359 2.293 0.673 0.490 -0.554 0.700 1.846 -1.085 0.000 0.803 1.221 0.429 0.000 0.827 0.493 -1.616 2.548 0.662 0.209 -0.236 3.102 1.568 0.918 0.977 0.670 0.542 0.693 0.646 +1 0.407 -1.107 1.742 1.758 -0.039 1.561 2.138 -1.414 0.000 1.397 -0.644 0.032 2.215 1.487 -1.243 0.367 2.548 2.333 -0.920 1.484 0.000 1.032 1.046 1.172 0.768 0.714 0.949 0.847 +0 0.516 0.963 -1.547 1.652 0.920 0.386 0.933 0.266 0.000 0.815 0.976 -1.038 2.215 0.942 -0.860 0.284 2.548 0.978 -0.748 1.581 0.000 0.971 0.909 1.017 0.920 1.367 0.979 0.858 +1 0.594 0.899 0.975 0.617 0.334 0.466 1.160 -1.420 0.000 0.621 -0.586 -1.004 0.000 0.448 0.496 1.210 2.548 0.505 -2.133 0.645 0.000 1.107 0.910 0.988 0.477 0.125 0.592 0.597 +1 0.321 1.752 -1.487 0.733 -1.266 0.837 1.549 1.120 0.000 0.819 -0.829 0.795 2.215 1.285 2.335 -0.822 0.000 0.729 0.591 -0.286 3.102 2.076 1.330 0.993 0.953 0.834 1.575 1.283 +1 1.395 1.611 -0.320 1.482 0.462 0.592 1.975 -0.947 0.000 0.354 -2.337 1.185 0.000 1.308 1.023 -1.608 2.548 0.863 0.104 0.918 3.102 0.795 0.907 1.291 1.225 0.748 0.907 0.795 +0 0.639 -0.283 0.412 1.448 0.813 0.914 0.513 -1.089 1.087 0.606 0.221 -0.010 2.215 1.402 -1.311 1.596 0.000 1.134 1.274 -0.516 0.000 3.196 1.873 0.978 1.640 0.919 1.303 1.298 +1 1.236 -0.877 -0.832 1.648 0.440 1.171 -0.181 -1.393 2.173 0.389 -1.299 -1.074 0.000 1.279 -1.238 1.040 2.548 0.802 -0.754 0.308 0.000 0.878 0.974 1.802 1.156 1.562 1.182 0.941 +1 0.610 0.505 -1.070 0.959 0.733 1.376 -0.030 1.522 2.173 1.753 1.712 -0.276 0.000 0.640 -0.039 -1.336 2.548 0.973 -0.221 0.504 0.000 0.659 1.171 1.058 0.651 0.625 0.744 0.721 +1 0.658 -0.236 -1.561 1.060 1.141 0.768 0.150 -1.098 0.000 1.025 0.491 0.862 2.215 1.746 0.827 0.181 2.548 1.067 1.292 -0.813 0.000 1.105 1.302 0.987 1.006 0.868 1.067 1.110 +0 1.452 0.447 -0.655 0.779 1.571 0.531 0.251 -0.130 0.000 0.629 1.133 1.639 2.215 0.357 1.307 1.225 2.548 0.559 2.229 0.943 0.000 0.775 0.787 1.336 0.745 0.196 0.575 0.573 +1 0.670 -1.632 -1.241 0.149 0.602 0.741 -0.662 -0.916 1.087 0.907 0.393 1.103 0.000 1.128 -0.605 0.311 2.548 0.844 -1.330 -0.454 0.000 1.750 1.154 0.995 0.668 1.019 0.909 0.776 +1 0.772 0.504 1.460 1.306 -1.183 0.974 0.352 0.400 2.173 0.563 0.944 1.643 0.000 0.987 0.868 -0.298 2.548 0.897 -0.214 0.811 0.000 0.874 0.909 0.987 0.813 0.809 0.849 0.756 +0 2.074 1.036 -1.410 2.308 -0.938 1.880 -1.353 0.566 2.173 0.899 -1.993 1.739 0.000 0.437 -1.148 1.286 0.000 1.839 -1.365 0.154 0.000 0.863 0.836 1.253 0.772 2.725 2.680 2.205 +0 1.075 -1.129 0.553 0.754 -0.035 0.529 0.255 -1.424 0.000 0.976 0.292 -0.665 1.107 1.035 0.240 1.432 2.548 0.568 1.430 1.461 0.000 0.794 0.838 0.990 0.912 1.014 0.833 0.809 +0 0.443 1.680 0.666 3.283 1.198 1.754 0.431 -0.322 0.000 0.805 -0.602 -0.609 2.215 0.821 0.595 -0.815 0.000 2.246 0.867 1.629 3.102 0.957 0.982 0.989 1.296 1.572 1.904 1.927 +0 0.913 -2.064 -0.613 1.183 -0.363 1.050 0.660 0.674 2.173 0.741 -0.711 1.311 0.000 0.656 -2.497 -1.247 0.000 0.960 -1.580 -1.686 0.000 0.824 0.684 0.987 1.994 1.465 1.354 1.175 +1 1.570 -0.554 1.279 0.461 -1.541 0.761 -0.344 -0.007 2.173 0.800 -0.252 -0.827 0.000 0.525 -1.435 0.459 0.000 0.521 0.757 -1.275 3.102 1.151 0.893 0.988 0.638 0.753 0.726 0.724 +0 0.356 0.249 0.242 0.692 -1.534 0.458 1.636 -0.829 0.000 0.457 -0.081 1.358 2.215 1.449 1.335 1.000 1.274 1.439 0.585 -0.418 0.000 0.759 0.923 0.987 1.454 0.787 0.931 0.963 +1 0.771 1.091 -0.086 0.909 0.225 1.721 0.354 -0.228 0.000 1.906 0.733 1.259 0.000 2.556 -0.172 -1.486 0.000 1.311 0.310 0.197 3.102 1.242 1.119 0.985 0.510 0.370 0.906 0.889 +0 0.858 0.592 -1.577 0.595 0.822 0.964 0.425 1.079 1.087 1.775 0.817 -0.710 2.215 0.681 -0.053 1.275 0.000 1.006 -1.649 0.016 0.000 0.840 1.405 0.988 1.124 1.964 1.402 1.112 +0 1.190 -1.429 -1.655 0.443 -0.108 0.503 -0.361 -0.088 0.000 0.424 0.693 -0.873 0.000 0.664 0.227 0.582 2.548 1.219 0.404 1.064 3.102 0.845 0.833 0.990 0.866 0.300 0.745 0.733 +0 0.593 -0.108 1.009 0.315 -1.550 0.714 0.761 1.076 0.000 0.800 0.338 -1.453 2.215 1.395 0.365 -0.441 2.548 0.783 0.373 0.155 0.000 0.863 1.011 0.988 0.684 0.888 0.764 0.679 +1 0.557 -0.533 0.220 0.150 0.996 1.686 0.603 -0.903 2.173 1.502 -0.820 0.966 0.000 1.132 0.037 0.555 0.000 1.125 -0.432 -1.336 3.102 0.815 0.921 0.982 1.104 1.028 1.005 0.853 +0 1.185 0.601 0.929 0.752 0.020 0.838 0.909 -1.534 2.173 0.547 1.628 0.956 0.000 1.582 0.479 -0.727 2.548 1.075 -0.792 -0.239 0.000 1.076 0.939 0.987 0.994 0.996 0.861 0.827 +0 0.906 -0.234 -1.702 0.891 -1.146 1.017 1.583 -0.025 2.173 0.448 0.770 0.702 0.000 1.533 0.720 1.380 2.548 0.898 0.436 -0.827 0.000 0.819 0.899 0.988 0.799 1.618 1.079 0.859 +0 0.660 1.007 1.269 0.596 -0.484 0.966 0.636 1.733 1.087 1.300 0.737 -0.587 2.215 0.911 -0.400 -0.381 0.000 0.852 -0.419 1.295 0.000 0.934 0.936 0.987 0.781 1.433 0.925 0.771 +0 0.790 0.417 1.211 0.383 0.567 1.141 1.773 -1.095 0.000 1.720 0.108 0.569 1.107 0.805 2.065 -1.635 0.000 1.102 0.364 -0.761 3.102 0.906 1.021 0.977 0.802 1.175 1.479 1.326 +1 0.873 -1.555 -1.094 0.611 0.758 0.630 -0.135 -0.970 2.173 0.752 -1.669 1.394 0.000 0.957 -0.997 0.641 0.000 1.425 0.324 0.241 3.102 0.915 1.232 1.007 0.905 0.928 0.967 0.866 +0 0.647 -1.125 0.635 0.244 -0.148 0.821 0.364 -1.277 0.000 0.617 0.804 -0.772 0.000 1.103 0.459 0.886 2.548 0.681 0.463 0.640 3.102 0.752 1.062 0.978 0.537 0.147 0.677 0.669 +1 0.963 -0.376 0.842 1.288 -0.637 0.854 0.341 0.770 0.000 1.064 0.663 -1.226 2.215 0.731 -0.256 -0.510 2.548 0.644 -0.251 -1.236 0.000 1.161 0.907 1.499 1.162 0.733 0.777 0.799 +0 0.465 1.593 1.083 0.111 0.731 0.867 0.147 -1.092 2.173 0.828 -0.849 0.464 2.215 0.633 -0.719 -0.827 0.000 0.881 -0.122 0.774 0.000 0.894 1.006 0.976 0.831 1.397 0.836 0.751 +0 1.770 0.674 1.278 1.333 -1.545 0.956 0.469 0.423 0.000 0.791 0.058 -1.186 2.215 0.757 -0.257 -0.698 2.548 0.409 1.719 -0.875 0.000 1.218 1.016 1.199 0.909 0.378 0.780 0.865 +0 0.813 0.084 0.273 0.847 -0.276 0.781 -1.042 -0.750 2.173 0.693 -0.163 -1.120 0.000 0.870 -0.276 1.498 1.274 1.896 1.576 0.776 0.000 1.488 1.002 0.990 0.804 1.003 0.788 0.788 +0 0.774 0.918 0.674 1.565 0.013 0.850 -0.799 -1.462 2.173 0.643 0.005 1.394 2.215 0.568 0.007 -0.328 0.000 0.390 0.173 1.050 0.000 0.494 0.745 0.990 0.869 0.739 0.987 0.746 +0 1.528 0.589 -0.221 2.069 -0.068 1.517 0.016 -1.721 0.000 1.180 -0.085 1.343 0.000 1.063 0.425 -0.710 2.548 1.068 -0.962 1.472 3.102 0.924 1.027 0.999 1.705 1.051 1.141 1.101 +0 0.479 1.225 -1.623 1.920 -1.209 0.998 -1.466 0.313 2.173 0.477 -1.636 -1.690 0.000 1.197 -1.836 -0.687 0.000 2.006 1.145 0.677 0.000 0.930 1.153 0.980 0.883 0.539 1.209 1.159 +1 2.876 0.394 -1.413 0.380 -1.016 1.438 -0.797 0.169 0.000 1.441 -0.050 0.771 2.215 0.523 -0.281 -1.386 2.548 0.838 0.183 -0.518 0.000 1.323 1.043 0.983 1.472 0.867 0.929 1.135 +1 0.670 1.150 -1.180 0.738 0.148 1.007 -0.495 -1.572 2.173 0.645 0.500 0.214 2.215 0.653 1.461 0.537 0.000 0.816 -0.194 0.598 0.000 0.850 1.351 0.987 0.610 1.341 0.905 0.793 +0 0.704 -0.284 -0.879 0.942 -0.158 0.719 -0.294 1.236 2.173 0.904 -1.440 0.868 2.215 0.659 -0.906 -0.903 0.000 0.473 0.658 -0.849 0.000 0.627 0.979 0.987 1.021 0.840 0.993 0.825 +0 1.065 -1.802 -0.187 0.677 -0.865 1.512 -1.647 -0.575 2.173 1.240 -1.524 0.898 2.215 1.767 -0.792 1.199 0.000 1.361 -1.099 -1.691 0.000 0.957 0.911 0.982 0.833 1.958 1.307 1.163 +0 0.395 2.359 0.175 2.788 -1.684 1.351 0.927 -1.664 0.000 1.107 1.478 0.430 2.215 0.506 2.056 -1.137 0.000 0.842 0.507 -0.402 3.102 1.256 1.032 1.446 1.436 0.723 1.085 1.104 +1 1.650 0.515 -0.188 0.135 -0.695 0.470 -0.066 -1.011 0.000 1.012 0.424 1.230 0.000 0.655 0.577 1.018 2.548 0.803 -0.543 -1.740 1.551 1.024 0.699 0.985 0.731 0.510 0.662 0.655 +1 1.235 -0.734 1.509 0.977 1.384 0.949 -0.137 0.197 1.087 1.345 -0.653 -0.745 2.215 1.094 -1.704 -1.678 0.000 1.412 0.788 -0.302 0.000 1.033 0.946 0.987 1.251 1.327 1.092 0.956 +1 0.986 0.812 0.981 0.411 -1.195 1.009 -0.164 -0.675 2.173 0.422 1.960 0.975 0.000 0.685 0.228 0.236 2.548 0.366 1.489 -1.386 0.000 0.439 1.281 0.986 0.611 0.788 0.810 0.729 +0 0.281 1.588 -0.927 1.059 1.008 0.737 0.098 -0.055 0.000 0.717 0.869 -1.095 0.000 0.419 0.036 0.169 2.548 0.966 -0.287 1.665 3.102 0.920 0.791 0.980 0.545 0.484 0.479 0.508 +0 0.552 0.952 -1.360 1.402 -0.023 1.022 0.339 -0.861 2.173 0.690 -1.573 0.605 0.000 0.508 -0.077 1.489 0.000 0.564 -1.096 1.195 3.102 0.963 0.548 1.138 0.942 1.072 0.886 0.999 +0 0.673 -1.584 -0.298 0.478 1.329 0.864 0.699 -0.968 0.000 1.220 -0.049 0.079 2.215 2.528 -1.088 1.416 1.274 0.569 -1.734 0.125 0.000 2.214 1.576 0.991 0.932 2.076 1.538 1.191 +0 1.074 0.265 -0.145 0.548 -0.039 0.418 -0.650 0.986 0.000 0.721 -0.492 -1.508 1.107 0.507 -1.489 0.791 0.000 1.153 0.411 -1.229 1.551 0.447 0.897 1.001 0.874 0.475 0.681 0.680 +1 0.935 0.086 1.368 1.399 0.417 1.246 -1.042 -0.623 2.173 0.874 -0.874 0.947 2.215 1.468 -0.016 -1.673 0.000 0.454 -0.234 -0.908 0.000 0.808 0.871 1.198 1.547 1.522 1.132 0.975 +1 0.313 2.008 0.367 1.243 0.862 1.182 0.693 -0.693 0.000 0.873 0.666 -1.401 2.215 0.680 -0.922 0.750 1.274 0.700 1.725 1.627 0.000 0.886 1.013 0.999 0.904 1.094 0.778 0.715 +1 0.328 0.448 -1.494 0.815 0.483 1.276 -0.632 0.818 2.173 1.193 -0.508 -1.210 0.000 0.603 2.367 -0.722 0.000 0.688 -0.274 -0.509 3.102 3.016 1.672 0.983 0.763 0.935 1.588 1.266 +0 0.702 -0.046 -0.140 0.905 -1.532 0.504 -1.643 0.557 0.000 1.001 -1.322 -0.544 1.107 1.093 -1.144 1.486 2.548 0.543 -0.407 0.978 0.000 0.576 0.852 1.049 0.861 1.076 0.802 0.735 +1 2.589 -1.034 -0.139 0.604 -0.494 1.009 -0.204 0.984 2.173 0.521 -1.188 0.253 0.000 0.881 -0.653 0.778 2.548 2.100 1.486 -1.025 0.000 0.874 0.747 0.982 1.431 0.374 0.954 0.871 +1 1.594 -1.303 1.472 0.755 0.662 0.826 -0.344 -1.461 0.000 0.971 -0.424 0.194 2.215 1.174 -0.623 -0.392 0.000 1.729 -1.171 -0.646 3.102 0.871 0.918 1.013 1.029 0.996 0.888 0.754 +0 0.908 -1.333 1.595 0.847 -1.164 0.980 -0.655 0.533 2.173 0.735 -0.639 -0.637 0.000 0.767 -0.312 0.013 0.000 0.824 0.494 -1.681 3.102 0.663 0.908 0.989 1.110 1.075 1.047 0.920 +0 1.049 0.704 -1.448 0.787 -0.727 0.787 0.418 0.541 2.173 0.512 1.460 -0.809 0.000 0.622 1.101 1.272 0.000 0.443 0.393 0.116 1.551 0.835 0.942 0.993 0.577 0.234 0.649 0.639 +0 1.014 -1.148 1.527 1.452 -1.078 0.835 1.645 0.331 0.000 1.003 -0.204 -1.659 0.000 1.037 0.602 -0.179 2.548 1.253 0.239 0.258 1.551 2.825 1.659 1.201 1.361 0.373 1.042 1.378 +0 1.026 0.439 -0.752 1.796 1.443 0.702 -1.065 1.028 0.000 0.639 -0.577 -0.124 1.107 0.756 0.200 0.303 2.548 0.470 -1.285 -1.666 0.000 0.610 0.798 1.727 1.184 0.415 0.821 0.853 +1 1.305 -1.012 0.220 2.617 0.400 0.931 -1.340 -1.614 2.173 0.953 1.003 -1.547 0.000 1.133 -1.098 -0.557 2.548 0.654 -2.426 0.831 0.000 1.023 0.898 0.990 1.082 1.047 1.215 1.046 +0 2.769 0.632 -0.193 1.567 -0.817 0.969 -1.016 1.566 2.173 0.970 -0.794 1.177 0.000 0.706 1.402 1.516 0.000 1.569 1.134 -0.491 3.102 0.603 0.875 1.536 0.866 2.321 1.620 1.771 +0 1.914 1.213 0.736 0.769 0.105 1.447 -0.620 -1.227 2.173 0.268 -1.061 1.371 0.000 0.687 -0.243 -0.583 2.548 0.810 -0.350 0.862 0.000 0.339 0.928 0.983 1.067 0.718 1.561 1.196 +0 0.484 0.152 1.701 0.751 0.079 0.700 1.453 -0.666 0.000 0.761 1.414 0.601 0.000 0.851 1.150 -1.704 1.274 0.534 0.717 1.666 3.102 1.410 1.028 0.988 0.633 0.117 0.624 0.816 +0 0.726 1.854 -0.338 0.624 1.622 0.595 -0.003 1.248 2.173 0.484 0.644 0.108 0.000 0.661 -0.185 -1.301 0.000 0.920 -1.669 0.661 0.000 0.914 0.796 0.991 0.992 0.788 0.949 0.831 +0 0.539 -0.677 -0.536 1.206 0.324 1.560 -1.123 0.196 0.000 2.429 -2.388 -1.260 0.000 1.265 -0.495 -1.467 1.274 1.839 -0.022 0.960 3.102 4.963 3.114 0.992 0.777 0.999 2.212 1.884 +0 1.097 -1.459 -0.531 1.007 -0.455 0.753 0.812 -1.528 2.173 0.590 0.296 1.211 0.000 0.910 -0.305 0.209 2.548 0.748 0.529 0.615 0.000 0.627 0.794 0.978 2.478 1.208 1.626 1.516 +0 0.760 -0.844 -0.870 0.961 -0.358 0.733 -1.360 0.978 1.087 0.619 -0.898 0.394 0.000 0.589 -1.722 -0.644 0.000 1.105 -1.186 -1.356 0.000 0.954 0.859 0.989 0.675 0.232 0.772 0.686 +1 0.967 0.792 0.884 0.220 -1.367 0.604 1.037 -1.740 0.000 0.486 2.189 1.434 0.000 1.490 0.913 -0.460 2.548 1.720 0.580 0.129 1.551 0.803 1.131 0.991 0.938 0.647 0.869 0.810 +1 0.930 0.080 -0.163 1.073 -1.266 0.856 -0.346 -1.212 0.000 1.676 0.119 1.001 0.000 0.946 -0.351 -1.740 0.000 1.792 0.586 0.440 3.102 1.003 0.926 1.159 0.566 0.640 0.626 0.621 +1 1.121 0.747 -0.549 1.085 0.468 1.943 -2.429 1.209 0.000 0.863 1.271 -1.148 2.215 2.123 0.109 -0.995 2.548 1.356 0.517 0.351 0.000 1.017 0.964 1.212 1.137 0.927 0.878 0.801 +1 0.676 -1.613 -1.684 0.757 -0.463 0.797 -1.183 1.413 1.087 0.708 -0.875 0.584 2.215 1.363 -1.999 -0.339 0.000 0.843 -1.291 -1.081 0.000 0.829 0.993 0.986 0.800 0.770 0.859 0.745 +0 1.705 0.150 -0.771 1.067 1.240 0.691 0.503 1.208 0.000 1.254 -1.046 -0.439 1.107 1.388 -0.701 1.235 1.274 0.824 0.232 0.414 0.000 0.769 0.858 1.814 1.414 1.416 1.142 1.034 +1 0.514 1.000 1.110 0.961 -1.643 1.453 2.434 0.584 0.000 1.286 1.041 0.889 0.000 1.527 -0.388 -0.962 2.548 1.806 0.624 -1.339 3.102 0.989 1.772 0.987 0.856 0.895 1.873 1.559 +0 1.358 -1.770 -0.624 0.535 0.248 0.533 -0.266 1.692 0.000 0.568 -1.578 1.732 0.000 0.913 -1.349 1.044 1.274 1.162 -0.342 -0.148 1.551 0.855 0.944 0.988 0.819 0.821 0.675 0.726 +0 0.647 -0.509 -0.721 0.974 -1.703 0.790 0.474 -0.070 1.087 0.958 0.152 0.951 2.215 0.306 -1.270 -1.410 0.000 0.645 -1.180 -0.208 0.000 0.433 0.877 0.985 1.015 1.040 0.968 0.781 +1 1.299 -0.067 0.303 0.318 -0.513 0.745 -0.468 -1.709 2.173 0.719 -0.737 -1.089 0.000 1.253 0.836 0.745 0.000 1.017 0.448 -0.611 3.102 0.724 1.030 0.981 0.713 0.911 0.743 0.749 +0 1.014 -0.642 1.378 1.609 1.681 0.969 -0.424 -0.030 0.000 0.731 -0.778 -0.915 1.107 0.777 -1.281 0.029 2.548 0.627 -1.645 0.558 0.000 1.181 0.997 0.987 0.933 0.650 0.757 0.858 +1 0.453 1.202 -1.688 1.452 -0.966 0.658 0.229 0.969 0.000 0.620 -0.510 -1.477 2.215 1.336 0.245 0.367 0.000 0.959 -0.768 -0.407 3.102 0.873 0.963 0.981 0.650 0.589 0.753 0.775 +1 1.648 1.455 -1.610 0.988 -0.817 1.066 1.161 0.639 2.173 0.660 1.066 1.151 0.000 0.862 2.248 0.240 0.000 1.950 1.381 -1.030 0.000 1.206 0.963 1.160 0.843 0.693 0.919 0.856 +0 0.652 -0.137 -0.411 1.548 -1.261 0.654 0.519 0.081 0.000 0.745 -0.429 1.167 1.107 0.762 0.754 -1.451 2.548 0.446 1.113 0.059 0.000 0.345 0.868 0.987 0.721 0.784 0.680 0.729 +1 1.888 -0.987 1.687 3.028 -1.740 2.632 0.945 0.088 0.000 1.077 -0.858 -0.238 2.215 0.957 -0.412 1.288 0.000 1.734 -0.263 -1.317 1.551 3.329 2.522 1.003 1.674 1.079 1.785 2.442 +1 0.951 -0.103 0.628 0.777 -1.299 1.867 -0.150 1.624 2.173 2.080 0.072 -0.072 0.000 0.773 -0.310 -1.208 2.548 1.173 -0.773 -0.294 0.000 0.732 0.654 1.175 1.073 0.841 0.894 0.791 +1 0.662 -0.340 -1.026 0.637 -1.456 0.659 -0.218 -0.015 1.087 1.073 0.427 0.569 0.000 1.594 1.173 -1.608 0.000 0.409 0.853 0.666 3.102 2.052 1.083 0.985 0.976 0.482 0.912 1.114 +1 1.863 -0.563 -0.633 0.606 -0.943 1.305 -0.349 0.076 0.000 2.097 0.816 -1.631 0.000 1.191 -1.102 0.254 0.000 1.298 -0.671 1.483 3.102 0.869 0.891 0.999 1.107 0.531 0.829 0.802 +1 1.841 -1.141 -0.668 0.954 -1.159 0.850 -0.705 0.311 2.173 1.036 -0.197 1.212 2.215 0.582 -0.250 -1.578 0.000 1.211 -0.087 0.580 0.000 0.866 0.812 0.980 1.401 1.063 1.126 0.944 +1 1.801 -0.529 0.511 1.065 1.168 0.571 -0.146 0.143 0.000 2.300 2.695 -1.255 0.000 0.931 0.465 -0.210 2.548 1.257 0.712 1.203 3.102 0.676 0.783 1.072 0.992 0.804 0.808 0.686 +0 1.763 0.366 1.286 0.631 1.567 0.884 0.441 -0.152 1.087 0.960 0.834 -1.174 0.000 1.073 1.314 -0.535 0.000 1.797 0.954 0.858 3.102 0.965 1.059 0.981 0.925 1.156 0.982 1.023 +1 0.498 -0.823 1.189 0.520 -1.534 1.297 0.648 -1.559 0.000 1.436 0.223 0.384 0.000 1.556 0.823 -0.242 2.548 0.484 1.977 -0.366 0.000 1.557 0.861 0.988 0.947 0.576 0.667 0.710 +0 0.628 0.884 1.172 1.073 0.695 1.319 0.866 -1.599 0.000 0.893 -0.178 -0.519 2.215 1.829 -0.442 -0.011 2.548 1.531 -1.202 0.440 0.000 1.284 1.533 0.989 0.955 0.635 1.380 1.167 +0 1.933 0.882 0.964 0.429 -1.481 0.645 0.153 -0.768 2.173 0.567 1.335 -0.521 2.215 0.685 0.861 1.716 0.000 0.679 1.034 0.080 0.000 0.757 0.773 1.019 0.870 0.609 0.801 0.663 +1 0.571 -0.657 0.666 0.666 -1.569 1.017 0.035 0.142 2.173 0.479 -1.108 0.183 0.000 1.449 0.015 -1.371 2.548 0.704 -0.500 -0.474 0.000 0.849 0.925 0.983 0.968 1.480 1.020 0.866 +0 1.361 -1.447 -0.132 0.714 -1.628 0.490 0.052 -0.325 0.000 0.564 0.296 0.590 0.000 1.501 -1.267 -1.241 2.548 1.254 0.906 1.087 3.102 0.832 0.822 1.332 0.909 1.890 1.232 1.076 +0 0.721 0.543 1.158 0.935 -0.943 0.460 -0.761 1.605 0.000 0.498 -2.184 -1.636 0.000 1.078 -1.422 -0.498 2.548 1.569 -0.612 0.453 1.551 0.852 0.987 1.079 1.215 0.862 0.921 0.927 +1 0.370 -1.708 -0.548 1.023 -1.270 0.980 -0.143 0.552 2.173 0.324 0.994 -0.521 0.000 1.077 0.423 1.521 0.000 1.010 0.801 -0.131 3.102 0.910 1.055 0.991 0.853 0.862 0.831 0.773 +1 0.754 -1.504 1.562 0.583 0.535 1.184 2.054 -1.625 0.000 1.237 -0.130 -0.144 2.215 1.943 1.063 -0.812 0.000 2.263 -0.296 0.513 3.102 0.809 1.218 0.990 0.772 0.859 1.088 1.051 +0 0.645 0.838 -1.180 0.197 -0.660 0.942 1.233 -0.463 2.173 1.206 0.431 1.045 0.000 0.812 0.853 0.498 0.000 0.819 0.953 -1.665 3.102 0.812 0.736 0.999 1.032 0.822 0.863 0.826 +0 0.817 0.349 -0.034 0.427 -0.673 0.738 1.252 1.457 0.000 1.543 1.446 -0.199 1.107 1.076 0.460 1.571 0.000 0.724 -0.884 -0.007 0.000 0.802 0.982 0.981 1.368 0.940 0.932 1.016 +1 0.726 0.312 1.062 0.635 -0.028 0.843 -0.342 0.573 0.000 1.411 -0.555 -1.311 2.215 1.017 2.323 0.323 0.000 1.413 -1.110 -0.785 3.102 0.893 0.789 0.992 0.847 0.766 0.773 0.670 +1 1.640 -0.097 -1.259 0.795 -0.366 0.895 0.794 0.413 2.173 0.973 -0.371 0.845 2.215 0.944 0.822 1.444 0.000 0.425 0.082 -0.409 0.000 0.753 0.823 1.141 1.094 1.006 1.012 0.838 +1 0.546 -1.934 1.106 1.127 -1.560 0.915 -0.694 -0.675 0.000 1.544 -2.824 0.222 0.000 1.030 0.342 -1.680 2.548 1.582 -0.779 1.353 3.102 1.189 1.180 0.980 0.898 0.788 0.946 0.836 +0 0.423 1.479 -1.494 0.983 0.163 1.336 -0.295 -0.732 0.000 0.912 0.538 0.430 2.215 1.334 0.730 1.347 2.548 1.901 0.945 -1.617 0.000 0.432 0.960 0.987 0.777 0.875 0.654 0.636 +1 1.118 -0.323 -1.418 0.309 0.934 0.903 -0.377 1.415 0.000 0.815 -2.300 -0.434 0.000 1.237 -1.049 -0.047 2.548 1.005 -1.446 0.535 0.000 0.916 0.876 0.990 0.661 0.726 0.764 0.734 +1 0.997 -0.258 0.109 0.952 1.553 0.623 -0.068 0.737 0.000 1.018 -0.877 -1.039 2.215 0.859 -0.719 -1.646 2.548 0.653 0.936 -0.021 0.000 0.871 0.941 1.301 0.991 0.520 0.825 0.760 +1 1.575 -0.507 -1.086 0.544 -1.424 0.530 -0.810 0.657 1.087 0.763 -0.887 1.195 0.000 1.221 -0.717 -0.214 1.274 0.584 0.059 0.002 0.000 0.885 0.844 0.987 1.017 0.708 0.807 0.732 +0 0.737 -0.746 1.527 0.203 0.362 1.026 -2.822 1.143 0.000 1.038 1.160 -0.857 1.107 1.277 -1.017 -0.388 2.548 0.897 -0.012 1.272 0.000 1.006 0.907 0.984 0.931 1.821 1.014 0.851 +0 1.285 -0.713 0.693 0.961 1.451 0.980 0.358 -1.143 1.087 0.514 1.056 -0.453 0.000 0.542 -0.710 0.353 0.000 0.524 1.184 0.070 3.102 1.023 1.022 0.987 0.867 0.794 0.920 0.824 +0 0.381 0.818 -1.402 1.045 1.204 0.873 -0.583 -0.279 2.173 0.460 -0.397 0.357 0.000 1.017 0.888 -0.934 2.548 0.511 1.948 1.274 0.000 0.670 1.272 0.991 0.932 1.207 0.888 0.832 +0 1.991 0.236 0.088 2.083 -0.011 1.273 -0.230 -0.853 0.000 1.884 -0.706 1.456 2.215 1.184 0.454 1.370 0.000 1.039 0.377 0.596 3.102 0.698 0.747 0.977 2.340 1.184 1.462 1.268 +1 0.943 -1.469 -0.262 2.380 0.365 0.946 -1.298 -1.641 2.173 0.347 -0.529 1.081 2.215 0.449 0.370 -1.585 0.000 0.764 -0.699 1.583 0.000 0.466 0.635 1.112 0.791 0.629 0.959 0.840 +1 0.951 0.704 0.529 0.773 -0.663 0.538 -0.141 1.468 0.000 0.385 0.838 -0.922 0.000 1.002 1.326 1.224 0.000 1.540 0.606 -0.360 0.000 0.935 0.858 1.044 0.875 0.492 0.746 0.699 +0 0.557 -1.212 0.730 0.887 -0.990 0.920 -0.837 0.370 2.173 0.992 -1.676 1.732 1.107 0.645 0.425 0.241 0.000 0.621 -1.231 -1.000 0.000 1.010 0.890 0.988 0.768 1.469 0.916 0.773 +1 1.112 -1.749 1.455 0.377 -0.582 1.581 0.357 -1.000 0.000 1.063 -0.933 1.197 2.215 1.306 -1.758 -0.309 0.000 1.662 1.166 0.654 0.000 0.886 1.026 0.985 0.841 0.497 0.823 0.762 +1 0.325 -1.722 -0.097 2.580 -0.239 0.968 -0.508 -1.319 2.173 0.574 -1.304 -1.113 0.000 1.509 0.717 0.940 0.000 1.145 -1.184 1.011 1.551 0.903 1.055 0.990 1.252 1.095 1.002 1.061 +1 0.739 -0.843 -0.227 0.654 -1.399 0.933 0.762 -0.798 2.173 1.169 -0.365 1.264 0.000 0.402 -0.788 -1.713 0.000 0.992 0.857 0.793 1.551 0.976 0.943 0.987 1.143 1.017 1.099 0.920 +0 0.457 -1.336 0.185 0.880 -1.398 1.205 -0.049 -0.683 2.173 1.137 -1.101 1.055 0.000 1.685 -0.752 1.604 0.000 1.090 -0.568 0.548 3.102 1.061 0.865 0.987 0.886 1.154 1.174 0.937 +1 0.400 -0.015 0.684 2.768 1.514 0.875 -0.098 -0.048 2.173 0.336 2.011 0.382 0.000 0.857 -0.989 -0.933 2.548 0.785 0.298 -0.736 0.000 0.858 0.954 0.992 1.058 0.951 1.032 0.975 +1 0.942 0.718 1.649 1.613 -1.241 0.915 -0.168 -0.183 2.173 1.130 0.875 0.352 0.000 0.998 2.668 1.524 0.000 0.941 1.271 -0.906 0.000 0.886 1.064 0.991 0.719 0.711 0.836 0.746 +1 0.454 1.719 0.069 1.357 -1.684 0.891 1.239 -0.345 0.000 0.405 1.304 0.445 0.000 0.892 -0.215 1.583 2.548 1.047 0.921 1.360 3.102 0.836 0.884 1.088 1.050 0.554 0.799 0.788 +0 1.373 1.370 -1.718 0.494 -0.836 0.452 0.900 0.803 2.173 0.534 0.287 -1.585 0.000 0.685 0.285 0.519 2.548 1.938 0.074 -0.253 0.000 1.243 0.992 0.988 0.893 0.274 0.667 0.785 +0 1.141 0.176 -0.690 1.698 -0.070 1.000 0.238 -1.128 2.173 1.330 -2.274 0.764 0.000 1.125 0.747 1.210 0.000 1.172 -0.355 -1.709 3.102 4.425 2.469 1.024 1.022 0.692 1.888 1.681 +1 0.659 -0.289 1.108 1.131 0.118 1.022 -1.118 -1.720 2.173 0.879 -0.556 0.332 0.000 0.589 1.251 0.149 0.000 0.438 -1.994 -1.285 0.000 1.165 1.087 0.988 1.254 0.904 0.906 0.874 +1 0.930 -1.632 0.827 1.223 -0.824 0.970 -1.600 -0.958 1.087 1.078 -0.770 0.195 2.215 0.722 -1.708 -1.593 0.000 0.599 -1.252 1.184 0.000 0.815 1.020 1.473 1.072 1.445 0.937 0.845 +0 0.533 -1.483 1.324 0.552 -0.596 0.802 0.071 -0.131 2.173 0.483 -0.454 -1.535 0.000 1.310 -0.179 1.155 2.548 0.394 1.147 -0.594 0.000 0.737 0.809 0.989 0.739 1.183 0.744 0.659 +0 1.379 0.643 1.613 0.635 0.818 0.499 0.824 0.459 2.173 0.851 -0.110 -0.988 1.107 1.195 -1.419 -0.511 0.000 0.834 1.659 1.188 0.000 0.852 1.039 0.989 0.751 1.038 0.743 0.720 +0 0.509 1.660 0.101 0.905 1.166 0.827 0.414 0.497 2.173 0.897 -0.476 -0.531 0.000 1.845 0.247 -1.252 2.548 0.508 -0.715 1.664 0.000 0.824 0.897 0.991 1.349 1.543 1.362 1.333 +0 0.652 -0.025 -0.488 0.889 0.642 1.352 1.633 1.191 0.000 1.260 0.404 0.065 2.215 1.243 -1.543 -0.261 0.000 1.768 -0.762 -1.568 1.551 1.196 1.007 0.991 0.798 1.652 1.114 0.910 +0 0.744 -1.339 1.108 1.265 -0.117 1.061 0.033 -1.610 2.173 0.904 0.523 1.071 0.000 1.542 1.263 -0.760 1.274 2.149 0.066 0.350 0.000 0.937 1.030 1.200 2.102 1.604 1.573 1.275 +0 0.780 -0.209 -0.396 1.516 0.604 1.264 -1.444 -1.401 0.000 0.711 -0.710 0.040 0.000 0.966 0.808 0.210 2.548 0.982 0.071 -1.631 3.102 0.922 0.931 1.182 0.840 0.802 0.675 0.653 +0 0.515 -0.616 -0.983 1.371 1.470 0.556 -1.431 -1.266 0.000 0.796 -0.882 -0.335 2.215 1.650 0.404 0.910 1.274 1.796 -1.343 -0.171 0.000 1.280 0.881 0.984 0.879 1.411 1.204 1.042 +0 0.916 -0.774 -1.599 1.237 1.654 0.699 -1.346 -0.683 0.000 1.280 -2.726 0.108 0.000 0.919 0.543 -1.191 2.548 2.122 -1.351 0.795 3.102 2.057 1.613 0.984 0.694 1.779 1.698 1.634 +1 0.373 -1.421 -1.256 1.301 1.070 1.247 -0.256 -0.546 2.173 0.515 -1.144 -0.765 0.000 0.597 0.230 1.645 0.000 0.508 1.607 0.475 0.000 0.895 1.281 0.987 1.205 0.949 1.318 1.087 +0 0.934 0.845 0.735 1.438 1.326 1.287 0.649 -0.063 2.173 0.883 0.176 -1.548 0.000 0.700 1.337 -1.069 0.000 0.466 -0.748 -0.792 0.000 0.945 1.065 0.989 0.689 0.425 0.873 0.813 +1 0.925 -0.516 1.553 1.652 -1.320 1.381 -0.674 0.582 2.173 0.788 0.171 -0.914 0.000 0.747 -0.376 -0.320 2.548 0.874 0.493 -0.489 0.000 0.822 1.096 0.987 0.833 0.934 1.017 0.890 +0 2.976 0.647 1.331 1.349 0.977 1.716 1.199 -0.346 1.087 0.551 0.250 -1.300 2.215 0.603 0.406 -0.864 0.000 0.490 0.517 -0.446 0.000 0.227 0.585 0.987 0.948 1.295 1.467 1.083 +0 1.600 0.098 0.989 1.153 -0.323 0.770 0.652 -1.612 2.173 1.032 -0.993 -0.077 2.215 0.820 -1.548 -1.004 0.000 0.638 -1.462 -0.287 0.000 0.897 0.947 1.741 1.236 1.785 1.142 1.090 +1 0.469 -0.520 -0.197 0.784 0.493 0.706 1.172 -0.188 0.000 0.943 0.652 -1.704 2.215 0.339 0.820 -1.278 0.000 0.909 -0.942 1.443 1.551 0.973 1.006 0.983 0.882 0.895 0.847 0.759 +0 1.012 -1.405 -0.993 0.339 1.459 0.950 -0.743 0.933 2.173 0.684 0.085 -0.010 2.215 0.893 -1.307 0.068 0.000 1.705 -1.928 -1.273 0.000 0.956 0.933 0.989 0.963 1.027 0.798 0.733 +1 1.538 -0.911 -1.649 0.908 -1.338 1.176 -0.450 -0.022 0.000 0.686 0.431 1.465 1.107 1.427 -1.358 -0.508 0.000 1.544 -0.812 -1.018 0.000 0.918 0.881 0.980 1.128 0.338 0.805 0.776 +1 1.822 0.449 -0.602 1.204 1.549 1.469 1.241 0.911 2.173 1.035 -2.882 -0.626 0.000 0.778 0.724 -1.323 1.274 0.445 1.300 1.206 0.000 0.339 0.708 1.914 0.959 1.242 1.161 0.879 +0 1.310 -0.374 0.374 0.728 -0.391 0.820 1.373 1.643 2.173 0.502 0.478 -1.600 2.215 0.588 0.264 -0.007 0.000 0.615 0.397 -1.227 0.000 0.649 1.012 0.991 0.892 0.482 1.087 0.849 +1 0.658 -0.398 1.056 1.011 -1.420 0.639 -2.260 -0.564 0.000 0.838 -0.458 0.564 2.215 0.645 -1.067 -1.598 2.548 0.751 -2.263 -0.833 0.000 0.437 1.011 0.990 0.811 0.778 0.741 0.763 +0 1.411 0.549 0.986 1.593 1.617 1.176 1.928 -0.584 0.000 1.099 0.132 0.543 2.215 0.626 0.672 -0.394 0.000 1.125 0.383 -1.359 3.102 1.025 1.079 1.118 0.986 1.007 1.122 1.146 +0 1.715 -0.244 0.174 1.362 1.027 0.765 -0.958 -0.561 0.000 1.403 -0.976 -1.520 2.215 0.473 -0.121 -1.634 2.548 0.544 0.595 0.529 0.000 1.239 0.827 1.473 1.514 0.400 0.959 0.912 +0 0.402 0.329 0.974 4.086 0.837 1.679 -1.142 -0.863 0.000 1.498 -0.269 -1.014 2.215 2.589 -0.021 1.090 2.548 0.996 -0.507 -0.456 0.000 0.893 0.974 0.988 0.900 2.001 1.525 1.599 +0 0.561 -1.451 -0.368 1.163 -0.726 0.330 1.443 0.731 0.000 0.939 -0.073 1.592 2.215 0.493 0.098 0.709 0.000 0.468 -1.351 -1.458 0.000 0.714 0.641 0.993 0.814 0.450 1.122 0.907 +0 0.701 -1.620 0.066 0.967 -1.049 0.781 -0.442 1.067 2.173 0.496 -0.588 -0.870 0.000 0.508 -1.017 0.551 0.000 1.068 0.262 -1.041 3.102 0.834 0.863 0.987 1.047 0.988 0.956 0.777 +0 0.401 2.426 -0.606 1.490 1.552 1.029 -0.751 -0.573 2.173 0.850 -0.038 -1.131 0.000 1.216 -1.988 1.033 0.000 2.206 1.315 0.532 3.102 0.884 0.940 0.997 1.040 2.675 2.067 1.644 +0 0.831 0.442 -0.560 0.735 0.812 0.867 0.360 -1.449 2.173 0.837 0.873 0.571 2.215 0.767 0.808 -1.130 0.000 0.538 1.355 0.290 0.000 0.730 0.763 1.023 0.676 1.261 0.757 0.654 +1 0.875 1.742 -0.607 1.152 -0.394 1.357 0.734 1.317 2.173 0.925 0.277 -0.263 0.000 1.456 -0.111 0.236 2.548 2.737 0.248 -1.555 0.000 0.911 0.716 0.996 1.476 1.646 1.197 0.995 +1 1.970 -1.563 1.674 0.458 1.198 0.530 -0.770 -1.064 0.000 1.630 -1.088 0.186 2.215 1.104 -0.491 0.859 2.548 0.563 0.079 -0.921 0.000 0.507 1.061 0.997 1.033 0.915 1.077 0.958 +1 0.450 0.935 1.725 1.143 0.293 0.690 -0.337 -1.355 0.000 0.789 -0.795 0.828 2.215 0.809 -1.585 -1.150 0.000 1.681 0.337 0.323 1.551 1.050 1.103 0.989 0.589 0.815 0.972 0.912 +1 0.427 -1.574 -0.395 1.617 0.575 0.723 0.834 -1.380 2.173 0.503 0.861 -0.403 0.000 0.787 0.059 0.618 2.548 0.498 -0.488 -1.535 0.000 0.765 0.705 0.984 1.167 0.988 1.587 1.288 +1 0.828 1.317 -1.179 1.732 0.598 0.919 1.156 0.315 2.173 0.413 1.029 1.732 0.000 0.419 0.559 -1.103 0.000 1.244 -0.241 0.126 3.102 1.028 0.900 1.658 1.006 0.919 1.064 1.143 +0 0.313 1.929 -1.618 0.968 -1.034 1.050 -0.541 1.030 2.173 1.094 -0.941 -0.978 0.000 1.372 0.428 0.457 1.274 0.480 0.493 -1.428 0.000 0.895 1.294 0.992 1.216 1.084 1.036 0.953 +1 0.295 1.743 1.398 0.617 -0.558 1.201 1.148 -1.353 2.173 0.815 0.151 0.607 0.000 1.295 0.920 -0.017 0.000 0.994 0.020 1.345 3.102 1.091 0.916 0.996 0.943 1.023 1.029 0.851 +0 0.656 -1.342 -0.147 0.498 1.556 0.834 -0.052 0.529 0.000 1.124 -0.850 -1.564 0.000 0.457 -0.864 -1.026 2.548 0.964 -0.644 -0.552 0.000 1.074 0.898 0.987 0.534 0.469 0.535 0.522 +1 0.301 -1.398 -0.961 1.635 0.359 0.537 -0.360 0.980 0.000 0.863 -0.250 1.684 2.215 1.350 0.727 -1.105 2.548 0.750 0.585 -0.694 0.000 0.847 0.977 0.989 1.342 0.926 1.515 1.210 +1 0.809 -0.770 -1.438 0.414 -0.017 0.946 -0.015 -0.712 1.087 0.841 -0.102 0.161 2.215 1.645 -0.593 0.972 0.000 0.440 -1.367 1.629 0.000 0.838 0.846 0.983 0.984 0.931 0.893 0.852 +0 0.914 -1.331 0.121 0.891 -0.625 0.363 -0.929 -1.204 2.173 0.690 -1.310 0.843 0.000 0.481 0.398 1.660 2.548 0.373 -1.464 -0.438 0.000 0.681 0.792 0.985 0.727 0.476 0.719 0.664 +0 0.363 1.698 1.031 0.515 -0.515 1.489 0.552 -1.621 1.087 0.965 0.264 -0.365 2.215 1.642 1.183 0.953 0.000 1.235 1.580 0.271 0.000 1.024 1.343 0.992 0.982 1.617 1.300 1.031 +0 0.426 0.962 1.263 1.068 0.715 2.309 0.454 -1.300 0.000 2.321 -0.427 0.388 2.215 0.318 1.250 0.037 2.548 0.543 0.988 -0.961 0.000 0.818 0.900 0.990 0.889 0.987 1.616 1.306 +0 3.174 -0.834 0.584 1.252 0.590 2.229 -0.326 -1.073 0.000 0.854 0.454 1.403 2.215 0.773 -0.557 -0.325 2.548 0.408 0.580 0.001 0.000 0.470 0.481 0.995 1.280 0.992 0.953 0.724 +0 0.744 -1.858 -1.488 0.354 1.529 1.012 -1.097 0.649 1.087 0.902 -1.136 -1.482 2.215 0.897 -0.968 -0.771 0.000 0.704 0.571 -0.403 0.000 0.914 0.910 0.998 0.963 1.321 0.928 0.802 +1 0.427 1.935 -0.740 0.358 1.031 1.412 0.748 1.134 0.000 1.128 2.390 -0.585 0.000 0.619 1.224 1.696 0.000 0.764 -0.889 0.602 3.102 0.958 0.896 0.995 0.791 0.558 0.783 0.704 +0 1.099 0.926 -0.328 2.444 0.807 0.476 0.239 -0.867 0.000 0.679 -0.141 -1.348 2.215 0.875 -0.966 -1.420 2.548 0.731 0.614 0.341 0.000 0.830 0.880 1.938 1.396 0.392 1.207 0.964 +0 1.800 0.980 -0.200 1.139 -0.498 0.794 1.300 0.718 2.173 0.853 -0.486 -1.335 0.000 1.001 1.087 1.336 0.000 1.184 0.389 -1.270 3.102 0.838 1.329 0.975 0.829 1.096 0.896 0.921 +0 1.384 1.429 -1.160 1.575 -0.165 0.769 0.056 1.470 0.000 1.060 0.712 -0.133 2.215 1.068 -0.929 1.203 0.000 0.796 1.044 0.822 3.102 1.001 0.995 1.599 1.037 0.666 0.979 1.200 +0 0.752 0.023 0.513 1.425 -1.452 0.362 -0.547 -0.141 0.000 0.891 1.166 -1.497 0.000 1.164 -0.167 0.882 2.548 1.160 0.836 -0.651 0.000 0.924 1.213 1.406 0.750 0.341 0.730 0.741 +1 0.609 -0.545 -0.152 0.547 -0.941 1.281 0.395 0.938 2.173 0.745 0.074 -1.054 0.000 0.799 -0.626 0.332 2.548 0.858 1.365 -1.130 0.000 0.902 1.071 0.993 1.669 0.973 1.083 1.115 +1 1.732 -0.716 1.307 0.731 -1.693 1.781 1.419 -0.132 0.000 0.805 0.032 -0.962 2.215 0.834 0.187 0.742 2.548 0.593 -1.198 -0.814 0.000 3.242 1.956 0.992 1.065 0.873 1.301 1.588 +1 1.502 -2.000 1.067 0.893 -1.232 0.639 -0.366 0.566 2.173 0.700 -0.957 0.188 0.000 1.100 -0.502 -0.496 2.548 0.370 -1.676 -1.563 0.000 0.763 0.641 1.408 1.263 0.859 1.051 0.845 +0 1.205 0.393 -0.622 1.410 0.215 1.350 -0.152 0.096 2.173 2.229 -0.706 1.528 2.215 0.718 -1.895 -1.602 0.000 1.458 1.348 -1.107 0.000 0.917 0.969 1.238 0.913 2.563 1.530 1.327 +0 0.890 0.409 -1.370 0.294 -0.524 0.777 0.706 1.532 0.000 0.872 0.068 -0.060 2.215 1.042 0.108 0.413 2.548 0.678 0.598 -0.857 0.000 0.925 0.946 0.981 0.882 0.419 0.738 0.711 +0 1.150 1.215 -0.010 0.780 1.369 0.647 1.927 1.408 0.000 1.635 1.296 -0.668 2.215 1.026 -0.217 0.961 2.548 0.597 -1.350 1.513 0.000 0.772 1.152 1.243 0.986 1.819 1.104 0.941 +1 1.501 -1.538 1.725 0.678 -0.128 0.831 -0.384 0.409 2.173 0.634 0.088 -0.893 0.000 0.602 -1.332 -0.146 2.548 0.677 -1.051 1.463 0.000 0.957 1.002 1.391 0.793 0.652 0.827 0.781 +1 0.556 -1.234 -1.384 0.887 0.099 0.782 -2.655 -1.663 0.000 0.959 -1.007 -0.267 2.215 0.548 -0.658 1.458 2.548 0.519 -2.186 0.135 0.000 0.972 0.928 0.987 0.633 0.779 0.886 0.803 +1 1.273 -0.051 1.263 0.909 -0.898 1.198 0.057 0.474 2.173 0.936 -1.019 1.461 0.000 1.977 0.857 -0.510 2.548 0.889 -0.268 -1.109 0.000 0.979 1.402 1.386 1.271 1.725 1.332 1.127 +1 1.031 0.476 -0.487 1.201 -0.238 0.770 -0.599 1.447 2.173 0.449 -1.479 1.050 0.000 0.535 -0.929 -0.913 2.548 0.552 -1.722 -1.273 0.000 0.590 0.677 0.988 0.633 0.699 0.793 0.745 +1 0.652 1.815 -0.194 0.102 0.885 0.669 -0.181 -1.314 2.173 0.688 1.139 0.712 0.000 0.921 1.435 1.492 0.000 0.988 -0.154 -0.005 3.102 0.828 0.961 0.995 0.912 0.796 0.861 0.755 +0 0.619 -1.019 -0.097 1.186 0.560 0.939 1.433 -0.944 2.173 1.077 0.352 -1.623 0.000 1.423 0.230 1.209 2.548 1.101 2.302 -0.156 0.000 0.950 1.025 0.982 1.468 1.623 1.997 1.561 +0 0.682 -0.056 1.375 0.770 -0.795 0.975 -2.354 0.473 0.000 1.348 -0.297 0.206 0.000 1.796 -0.084 -1.317 2.548 1.304 0.643 1.559 0.000 1.110 0.840 0.987 0.499 0.144 0.482 0.508 +0 0.307 0.677 -1.475 0.890 1.507 0.831 1.435 0.616 0.000 0.733 -1.846 -1.350 0.000 1.236 0.466 -0.098 2.548 0.726 0.219 -0.916 0.000 0.618 0.828 0.987 1.078 0.215 0.852 0.775 +0 0.838 0.951 -1.299 1.376 -1.128 0.675 1.143 0.735 0.000 0.655 0.489 -0.250 2.215 1.055 -0.389 0.522 2.548 0.410 0.132 1.657 0.000 0.724 0.817 0.995 0.985 0.709 1.108 0.935 +0 0.363 -0.449 1.045 1.043 1.241 1.332 0.567 0.584 2.173 0.988 2.469 -1.077 0.000 1.982 0.075 -0.862 2.548 1.660 -0.020 1.080 0.000 3.182 2.456 0.988 1.220 2.016 1.828 1.417 +1 0.418 1.992 -0.009 0.434 -1.725 0.520 1.356 0.105 0.000 0.763 0.790 -0.511 0.000 1.242 -0.498 1.233 0.000 1.172 0.363 -1.128 3.102 0.800 0.786 0.983 0.518 0.236 0.520 0.515 +1 0.938 0.586 -0.409 0.308 -0.932 1.032 1.194 0.512 0.000 1.728 1.285 -0.960 2.215 1.771 1.465 0.979 0.000 1.029 0.760 1.330 3.102 1.108 0.830 0.992 0.763 1.090 1.169 0.983 +1 1.024 -0.546 -1.488 0.893 0.742 0.309 -1.944 -0.282 0.000 0.772 1.030 -1.205 2.215 0.605 -0.800 1.106 0.000 0.790 -1.433 0.177 0.000 0.860 0.678 1.199 1.076 0.528 0.838 0.771 +1 0.523 0.925 -0.158 0.718 1.169 0.941 0.426 -1.532 2.173 0.790 -0.219 0.180 0.000 0.708 -0.710 -1.583 0.000 0.990 -2.242 0.625 0.000 0.818 0.769 0.987 0.854 0.900 1.148 0.987 +1 1.260 1.007 0.192 0.505 -1.233 0.367 -1.224 -1.310 2.173 0.626 -0.337 1.075 2.215 0.548 0.249 1.167 0.000 0.751 -0.610 -0.981 0.000 0.758 0.602 1.060 0.886 0.673 0.833 0.692 +1 1.282 0.369 -0.090 0.149 -0.570 0.680 1.053 0.898 1.087 0.685 0.002 1.559 0.000 0.701 -0.793 -1.087 2.548 0.829 1.854 -1.072 0.000 0.944 1.090 0.989 0.709 1.281 0.825 0.759 +0 0.331 -2.005 1.520 0.485 0.069 0.711 -0.243 0.230 0.000 0.824 0.867 -0.716 2.215 1.827 0.362 1.604 2.548 0.387 -0.090 -0.773 0.000 0.631 0.844 0.977 0.973 1.177 0.873 0.786 +1 0.679 -0.432 -0.483 0.444 1.661 0.920 0.431 1.568 2.173 0.841 0.148 -0.191 0.000 1.240 -0.521 0.248 1.274 0.845 -0.812 -1.156 0.000 1.063 0.851 0.982 1.139 1.420 0.927 0.845 +0 0.676 0.091 -1.669 0.964 -0.792 1.991 1.877 1.189 0.000 1.520 -2.296 -0.471 0.000 1.190 -0.065 -0.881 2.548 1.850 1.437 0.521 0.000 0.656 1.024 0.989 0.663 0.348 0.906 0.798 +1 0.280 0.811 0.232 1.078 1.348 0.911 -0.233 -0.860 0.000 1.246 0.155 1.177 2.215 0.842 -0.316 0.228 2.548 1.320 -0.882 -0.546 0.000 0.851 0.870 0.984 0.702 0.868 0.965 0.837 +0 0.763 -0.043 -1.486 0.834 0.666 0.779 0.036 0.883 0.000 0.960 0.481 -0.325 2.215 0.952 -0.682 -0.671 2.548 0.976 -0.578 1.617 0.000 0.957 1.037 1.030 0.852 0.742 0.852 0.739 +0 1.628 -0.421 0.916 0.651 0.386 0.709 0.821 -0.917 0.000 1.225 -0.035 -0.886 1.107 0.658 0.547 1.452 1.274 0.674 -0.036 0.520 0.000 1.122 0.889 0.980 0.667 0.876 0.834 0.800 +1 1.197 1.292 0.632 0.115 -0.768 0.932 -0.352 -1.151 2.173 0.737 0.822 0.914 0.000 1.088 0.232 -0.366 2.548 0.676 -1.164 -1.573 0.000 1.117 1.151 0.992 1.230 0.904 1.176 0.980 +1 0.798 0.803 -0.317 1.451 -0.005 0.545 -0.634 -0.467 0.000 1.418 -0.172 1.628 2.215 0.937 -1.180 1.038 2.548 0.790 -0.053 -1.354 0.000 0.776 1.013 0.976 1.101 0.952 1.042 0.880 +1 0.549 0.720 -1.017 0.378 -0.784 0.672 1.032 1.002 0.000 0.969 0.218 -1.742 0.000 1.688 -0.908 -0.359 0.000 1.551 -0.430 0.175 3.102 0.901 0.881 0.984 0.746 0.584 0.850 0.818 +1 1.388 0.814 -0.929 0.752 1.169 0.966 -0.601 1.475 2.173 0.457 -0.532 0.197 0.000 1.272 -0.627 -0.421 2.548 1.003 0.789 0.530 0.000 0.768 0.867 1.344 1.323 1.369 1.105 0.944 +0 1.201 -0.727 0.120 1.121 0.664 0.489 1.227 0.423 2.173 0.789 0.193 -0.930 0.000 1.027 -1.081 -1.394 1.274 0.629 0.829 1.538 0.000 0.820 0.928 0.980 1.396 1.616 1.136 1.021 +1 2.302 0.967 -0.183 1.723 0.243 1.648 0.265 1.687 2.173 0.565 0.924 -1.578 0.000 0.755 0.772 0.361 2.548 1.112 0.327 -0.693 0.000 0.878 1.029 1.036 0.569 1.356 1.397 1.085 +0 0.772 0.975 -0.343 3.733 0.125 1.062 0.826 -1.451 2.173 0.862 1.219 1.657 2.215 0.917 1.118 0.745 0.000 1.025 0.629 1.344 0.000 0.921 1.197 0.993 1.792 0.556 1.316 1.276 +1 1.322 -0.057 -1.155 1.228 -0.795 0.668 0.883 0.198 2.173 0.497 -2.015 0.196 0.000 0.736 0.090 1.692 0.000 0.842 0.973 0.741 0.000 0.819 0.907 0.990 1.318 0.870 1.055 0.962 +1 1.209 -0.271 1.366 1.608 -1.540 0.841 -0.296 -0.142 2.173 0.672 1.054 0.725 1.107 0.781 -0.259 -0.684 0.000 0.973 1.708 -0.707 0.000 0.912 1.170 0.990 1.142 1.137 1.052 1.023 +1 1.679 0.866 -1.547 0.650 1.446 3.177 -0.994 -0.394 0.000 2.719 0.112 1.184 2.215 0.887 0.390 0.535 0.000 0.676 0.742 1.078 1.551 0.458 0.803 0.985 0.585 0.500 0.924 0.824 +0 0.672 -0.444 -0.749 1.491 0.160 0.503 -0.941 1.711 2.173 0.578 0.193 -1.446 1.107 0.739 0.482 1.240 0.000 0.609 0.990 -0.270 0.000 0.765 0.905 1.014 0.847 0.534 0.711 0.692 +1 0.393 0.298 -0.602 0.761 0.230 0.654 -0.862 -1.403 0.000 1.664 -0.382 -0.068 2.215 1.480 -0.873 1.501 0.000 2.229 0.488 1.183 1.551 0.888 1.376 0.984 0.752 1.802 1.315 1.081 +0 1.965 0.015 -1.078 1.834 -1.513 0.983 -1.160 0.475 0.000 1.370 -0.091 0.653 1.107 1.060 -0.790 -0.699 1.274 0.622 -0.375 0.100 0.000 0.591 0.896 1.002 1.568 1.305 1.136 1.132 +0 1.339 -1.146 1.691 1.009 -0.423 0.553 -1.650 -1.511 0.000 0.754 2.626 0.905 0.000 0.826 -1.249 -0.023 2.548 1.097 -0.354 -0.007 3.102 0.915 1.499 1.522 0.897 0.357 1.496 1.666 +0 0.875 -1.405 1.598 1.502 -1.077 1.243 -0.550 0.708 0.000 0.986 0.597 -0.376 0.000 0.904 -0.325 -0.235 2.548 1.526 -0.561 1.539 3.102 0.795 0.905 1.060 0.957 0.909 0.755 0.890 +0 0.941 -0.130 1.085 1.833 -1.473 0.588 -0.519 -0.682 2.173 0.390 -0.496 0.090 2.215 0.418 1.692 0.810 0.000 0.632 -2.095 -0.876 0.000 2.558 1.411 1.352 1.005 0.452 0.923 0.929 +1 0.410 1.530 -0.047 1.723 1.261 0.834 -0.022 -0.675 0.000 0.941 -0.304 -0.105 0.000 0.886 0.616 -1.442 2.548 1.286 0.535 0.933 3.102 0.965 1.028 1.077 0.689 0.687 0.825 0.985 +0 1.643 1.920 -0.834 0.596 0.819 0.314 0.953 1.240 2.173 0.542 0.136 0.539 0.000 0.657 2.699 -0.002 0.000 0.423 0.761 1.732 1.551 1.819 1.036 1.367 0.871 0.165 0.662 0.724 +1 1.263 -0.324 -1.330 0.879 -1.105 1.129 2.222 0.158 0.000 0.926 0.117 0.672 0.000 0.987 0.075 1.596 2.548 0.752 0.877 -0.111 0.000 0.951 1.655 0.989 0.601 0.866 1.950 1.868 +0 0.458 -0.679 1.010 1.061 0.027 0.459 0.645 -1.276 0.000 0.806 0.345 1.370 2.215 0.600 -0.233 -0.052 0.000 0.995 1.130 -0.823 1.551 0.952 0.829 0.988 1.497 0.854 1.135 0.958 +0 0.929 -1.495 1.261 0.941 0.527 0.755 -0.943 -0.948 2.173 0.302 -2.844 -0.602 0.000 0.651 0.367 -1.636 0.000 0.663 -0.897 0.543 3.102 1.819 1.062 0.991 1.016 0.731 0.770 0.761 +0 0.903 -0.404 0.577 1.613 1.362 0.788 1.083 -0.629 2.173 0.885 2.188 -0.619 0.000 1.406 0.600 1.561 2.548 1.121 1.697 0.121 0.000 0.814 0.865 1.087 0.924 1.240 1.091 1.320 +1 0.430 -0.517 -1.086 1.119 0.367 1.171 -0.970 -1.738 1.087 1.217 -1.147 -0.436 0.000 1.185 -1.075 1.072 2.548 0.724 -2.325 -0.278 0.000 1.109 1.218 0.987 1.173 0.851 1.083 1.027 +1 0.732 -1.361 1.083 1.131 -1.352 0.503 -0.085 -1.569 0.000 0.966 1.076 0.147 0.000 0.826 0.080 -0.802 1.274 0.573 1.918 1.178 0.000 0.991 0.998 1.024 0.679 0.228 0.665 1.072 +0 2.456 -1.429 -1.717 0.197 -1.586 0.648 -1.317 -0.489 0.000 1.775 1.145 0.564 2.215 0.738 -1.293 -0.882 2.548 1.338 0.948 -0.262 0.000 0.815 0.829 0.972 0.712 2.376 1.896 1.550 +1 0.574 -0.903 -0.179 0.218 0.638 0.885 -0.656 -1.050 0.000 1.333 -0.224 0.469 2.215 1.170 -0.151 1.125 0.000 0.396 0.239 -0.684 3.102 0.909 0.635 0.979 0.733 0.590 0.877 0.775 +1 1.100 0.125 1.370 0.797 -1.120 1.204 0.026 0.251 2.173 0.837 0.315 -0.619 0.000 1.156 -0.403 0.940 2.548 0.967 0.745 -1.074 0.000 0.577 1.079 1.016 1.161 0.923 0.901 0.842 +0 0.640 0.554 -0.854 0.517 0.768 0.653 0.651 -1.686 1.087 0.489 -0.892 0.321 0.000 1.065 -0.300 1.280 0.000 1.852 -0.659 -0.284 3.102 0.904 0.939 0.987 0.761 1.441 0.886 0.744 +1 0.898 -0.919 -1.230 1.775 -1.067 3.290 0.745 0.740 2.173 1.155 -1.214 -1.023 0.000 1.936 -0.402 -1.052 2.548 0.686 -1.478 -0.222 0.000 0.826 0.869 0.990 2.817 3.718 2.473 1.997 +0 0.898 -0.579 -0.895 0.481 0.266 1.002 -0.434 1.542 0.000 1.038 -0.381 -0.427 2.215 0.744 -1.161 0.279 2.548 0.731 -1.116 1.159 0.000 0.755 0.893 0.992 0.634 0.700 0.843 0.757 +1 0.976 0.562 -1.314 0.042 -0.888 0.345 -1.612 1.362 0.000 0.469 0.520 0.347 2.215 0.613 -0.417 -1.085 0.000 0.848 0.368 -0.092 0.000 0.840 0.875 0.823 0.626 0.306 0.600 0.583 +1 0.762 -0.230 0.902 1.662 0.148 0.395 -1.424 0.310 1.087 0.751 -1.582 1.687 2.215 0.685 0.651 -1.587 0.000 1.069 -0.819 -0.659 0.000 0.900 1.822 0.988 1.215 0.762 1.540 1.292 +0 0.526 1.027 0.306 0.760 1.101 1.431 0.558 0.021 2.173 1.661 -0.718 -1.312 0.000 1.528 -2.229 0.914 0.000 1.649 -1.052 -0.965 1.551 1.263 0.926 0.989 1.386 2.118 1.710 1.914 +1 0.966 0.863 -0.461 1.140 -1.530 1.987 1.369 -1.721 0.000 3.129 0.610 0.172 2.215 1.659 0.672 -1.583 0.000 1.784 0.367 0.617 3.102 0.796 0.980 1.193 1.560 0.857 1.176 1.078 +0 0.683 -1.500 0.194 0.300 1.120 0.733 -0.964 0.556 2.173 0.439 -1.639 1.018 0.000 1.123 -1.588 -1.356 0.000 1.811 -0.498 -1.045 3.102 0.907 1.008 0.999 0.817 1.231 0.833 0.743 +1 0.707 -0.267 -1.036 0.661 0.297 0.534 -1.130 -0.146 2.173 0.505 -0.033 0.486 0.000 0.866 0.308 1.710 0.000 0.824 -0.957 -1.393 3.102 0.925 0.986 0.982 0.588 0.633 0.667 0.603 +1 0.361 -2.075 -1.158 0.480 -0.690 0.877 -0.763 1.216 0.000 0.516 -0.280 -1.716 0.000 1.029 -1.319 0.420 0.000 2.681 0.307 -0.701 3.102 0.943 1.152 0.997 0.719 0.853 0.769 0.693 +0 0.287 -2.017 -0.482 0.669 1.712 0.784 -0.242 1.412 2.173 0.488 0.324 0.155 0.000 1.219 -0.538 -0.237 2.548 0.723 -0.701 -0.865 0.000 0.781 0.891 0.993 0.753 1.232 0.719 0.641 +0 2.353 -1.596 -1.284 0.329 -0.025 1.118 -0.854 0.258 2.173 0.809 -1.235 1.348 2.215 0.635 -0.373 -0.721 0.000 0.718 -0.569 0.925 0.000 0.749 0.796 1.105 0.885 1.199 1.055 0.854 +0 1.735 -0.152 0.244 0.577 -1.364 1.115 -0.228 -0.997 1.087 0.956 -0.462 0.673 2.215 1.146 -0.902 -1.348 0.000 1.246 -0.704 1.397 0.000 0.822 0.986 1.375 1.165 1.527 0.958 0.903 +0 1.749 0.203 1.214 0.333 -0.163 1.069 -0.108 -0.940 2.173 0.805 -0.412 0.185 2.215 0.483 0.026 -0.166 0.000 0.521 0.638 1.132 0.000 0.677 0.829 1.000 0.823 1.179 0.901 0.721 +0 0.536 -0.150 0.151 1.955 0.167 1.482 -0.858 -1.701 2.173 0.823 -0.418 -0.397 2.215 0.916 -1.344 -1.165 0.000 0.685 -1.110 0.971 0.000 0.822 0.879 0.985 0.790 1.542 1.198 0.963 +1 0.429 0.128 1.159 1.567 -1.045 1.043 1.148 0.606 0.000 0.744 -0.803 1.284 1.107 0.766 0.790 -0.881 0.000 0.944 -0.682 -0.517 3.102 1.583 1.391 1.040 0.891 0.755 1.093 0.961 +1 1.115 0.166 0.395 0.574 -0.059 0.457 -0.565 0.400 0.000 1.514 0.487 -1.270 2.215 0.978 0.689 -0.434 2.548 0.760 -1.484 1.367 0.000 0.899 1.150 0.993 1.318 0.900 1.048 0.941 +0 0.991 -0.327 -0.317 1.242 -1.314 0.777 -0.382 -1.323 2.173 1.058 0.118 0.965 0.000 0.306 -2.208 0.250 0.000 0.821 -0.428 0.725 3.102 1.531 0.844 1.202 0.742 0.815 0.799 0.790 +1 2.115 0.145 1.612 0.171 0.256 1.080 0.163 -0.565 2.173 1.030 0.344 -1.326 2.215 1.993 0.620 -0.872 0.000 2.771 -0.026 0.075 0.000 0.900 1.128 0.989 0.742 0.992 0.973 0.970 +0 0.720 0.215 -1.644 0.663 -1.244 0.996 -0.214 0.097 1.087 0.499 -0.871 0.820 2.215 0.586 -0.894 -0.483 0.000 0.806 -1.133 1.248 0.000 0.772 0.904 0.988 0.702 0.725 0.774 0.668 +0 0.650 1.213 -1.659 1.007 -0.191 0.565 1.235 -0.030 2.173 0.716 1.003 -1.194 0.000 1.297 0.308 0.750 2.548 1.051 0.154 1.728 0.000 0.739 0.968 1.087 0.664 0.852 0.767 0.708 +0 1.389 0.316 0.779 0.515 0.032 0.963 -0.185 -0.320 0.000 0.910 -0.266 -1.534 2.215 0.597 -0.585 -1.080 0.000 1.187 -0.204 1.166 3.102 0.917 0.988 0.985 0.945 0.611 0.749 0.751 +0 1.477 -1.232 -1.403 2.478 0.739 0.722 -2.632 0.550 0.000 1.191 -0.846 0.027 2.215 1.041 -1.295 -1.024 2.548 1.085 -0.912 0.903 0.000 1.241 1.229 2.480 1.551 1.016 1.132 1.103 +0 1.743 0.631 0.983 0.360 -1.295 0.761 -0.091 -1.714 2.173 0.982 0.344 0.303 1.107 1.331 -0.273 -0.559 0.000 0.728 1.061 -0.627 0.000 0.948 0.938 0.987 0.822 1.265 0.869 0.829 +1 0.955 0.720 -0.512 0.748 0.585 0.764 0.066 0.125 0.000 1.089 0.266 1.683 1.107 0.967 0.960 1.435 0.000 0.902 0.011 -1.177 3.102 0.987 0.763 0.988 0.665 0.493 0.649 0.616 +0 0.710 1.010 -0.547 1.962 -0.115 0.517 -1.508 1.463 0.000 0.353 0.763 1.118 1.107 0.569 -0.538 -1.186 0.000 1.116 -2.247 1.369 0.000 0.816 0.920 0.992 0.827 0.260 0.719 1.256 +1 0.791 1.625 -0.842 1.190 0.526 1.009 0.953 -0.496 0.000 0.630 1.433 0.253 0.000 2.080 0.518 -1.537 2.548 0.686 0.206 0.502 0.000 0.983 0.932 1.268 1.309 0.899 0.968 0.888 +0 1.053 1.508 -1.693 0.568 0.846 0.397 -0.074 0.312 0.000 0.471 -1.189 -0.713 0.000 0.663 -0.216 1.325 2.548 1.114 0.919 -0.502 3.102 0.920 0.926 0.982 0.966 0.808 0.751 0.937 +0 0.398 0.361 -1.595 1.257 -0.890 0.488 -0.037 0.201 2.173 0.495 0.920 -1.451 0.000 1.420 -0.910 0.913 2.548 0.773 -1.878 1.382 0.000 1.969 1.339 0.983 1.838 0.809 1.238 1.269 +0 1.436 0.754 1.461 0.679 0.937 1.147 0.907 0.728 2.173 1.259 2.236 -0.826 0.000 0.618 2.559 -0.588 0.000 1.200 0.783 -0.651 0.000 0.839 0.891 0.990 0.894 0.912 1.075 1.055 +1 1.971 0.244 1.418 0.300 0.855 1.172 -0.721 -0.027 2.173 1.213 -0.333 1.476 0.000 1.845 -1.755 -0.914 0.000 1.467 -0.459 0.744 3.102 1.927 1.609 0.987 1.600 0.896 1.170 1.393 +0 0.302 2.018 -1.277 0.597 1.259 1.128 0.103 1.665 2.173 1.425 0.883 0.157 0.000 0.998 1.104 -0.389 2.548 0.521 -0.086 -0.064 0.000 0.944 0.747 0.980 0.757 1.486 1.026 0.844 +0 0.576 -1.449 -0.841 1.060 0.461 0.406 -0.230 -0.552 0.000 0.727 0.336 -1.537 2.215 0.654 -0.839 0.407 0.000 0.528 -0.155 0.309 0.000 0.885 0.779 0.998 0.997 0.491 0.851 0.727 +0 0.689 -0.916 0.548 0.582 -0.777 1.086 -0.155 -0.016 2.173 0.534 -1.636 1.683 0.000 1.166 -0.829 -1.333 2.548 0.914 -0.271 1.172 0.000 0.780 0.686 0.986 0.736 1.408 0.917 0.770 +1 1.186 0.864 -1.263 0.128 -0.595 0.624 -0.351 -0.838 0.000 1.119 -0.320 0.439 1.107 1.216 -1.048 1.183 2.548 0.993 -0.835 -0.519 0.000 0.912 1.011 0.985 1.054 0.932 0.923 0.880 +1 0.971 -0.290 -0.067 0.518 -1.355 1.235 -0.582 1.097 2.173 0.454 0.731 -0.682 0.000 0.612 -2.476 -0.510 0.000 1.040 -1.711 -1.530 0.000 0.985 0.732 0.988 1.027 1.111 0.932 0.804 +0 1.612 -1.261 0.767 0.427 -0.792 1.001 0.579 1.631 2.173 0.632 0.206 -0.626 0.000 0.662 1.057 -0.479 0.000 0.536 -1.199 0.278 0.000 0.907 0.937 1.133 0.672 1.522 1.108 0.945 +0 0.977 -0.370 0.268 0.552 -0.557 0.660 0.655 -1.497 2.173 0.487 0.118 0.080 0.000 0.827 0.266 -1.264 2.548 1.086 -0.301 1.101 0.000 0.788 0.911 0.979 0.724 0.257 0.640 0.625 +0 1.102 0.438 0.216 2.674 -0.117 1.806 0.117 0.347 1.087 1.679 -0.558 1.620 0.000 1.705 -0.046 -1.712 0.000 2.180 -0.314 -1.167 3.102 0.774 0.970 1.008 1.229 2.121 1.557 1.657 +1 0.782 -1.167 0.637 1.254 0.318 0.591 -0.677 1.648 2.173 1.543 -0.682 -1.025 2.215 0.443 2.021 0.329 0.000 0.539 -0.013 -1.063 0.000 0.897 1.132 0.992 1.309 0.938 1.026 1.008 +0 0.410 1.046 -0.996 1.391 0.347 0.950 0.196 1.532 2.173 1.142 -0.547 -0.658 2.215 0.509 -1.036 0.232 0.000 0.367 -0.751 1.202 0.000 0.370 0.789 0.986 1.315 1.530 1.143 0.923 +0 0.479 0.381 -0.501 0.937 0.474 0.583 -0.236 -1.522 0.000 0.595 0.260 0.029 2.215 1.088 -0.392 1.074 2.548 0.984 0.463 -1.118 0.000 0.795 0.810 0.987 0.645 0.756 0.726 0.724 +0 0.560 0.745 -1.406 1.016 -0.085 1.266 1.348 0.797 0.000 2.000 -0.325 -1.488 0.000 1.941 0.705 -0.665 2.548 1.779 -0.105 0.245 0.000 1.021 1.017 0.987 0.704 1.061 1.038 0.952 +1 0.359 0.516 1.009 0.928 -1.661 1.737 -1.449 0.845 0.000 2.331 0.054 -1.149 1.107 0.668 -1.865 -0.914 0.000 0.645 -1.891 1.021 0.000 0.924 0.808 0.991 1.684 0.906 1.283 1.162 +0 0.583 1.670 0.232 1.047 -1.147 0.383 1.250 -0.708 1.087 0.861 0.839 -1.680 2.215 0.922 0.646 0.725 0.000 1.100 -0.605 0.498 0.000 0.897 0.983 1.024 0.790 0.670 0.778 0.800 +1 1.042 0.073 -1.709 0.733 -1.244 0.751 -1.236 0.407 0.000 0.987 -0.109 -1.042 1.107 1.164 0.851 -0.533 2.548 0.412 -0.746 -0.296 0.000 0.525 1.088 0.994 0.820 0.802 0.958 0.957 +0 1.561 0.156 1.161 0.981 0.477 0.919 0.613 -0.100 1.087 0.767 -0.225 -0.788 0.000 1.113 0.213 -1.480 0.000 0.697 0.890 1.516 3.102 0.892 1.152 0.990 0.634 0.863 0.765 0.819 +0 0.380 -1.728 -0.043 1.067 -1.011 0.663 -1.366 1.091 0.000 0.805 -0.549 0.209 2.215 1.101 -1.087 -0.932 2.548 0.528 -0.544 -1.575 0.000 0.695 0.843 0.981 0.755 0.915 0.695 0.658 +1 0.794 -1.267 -1.112 0.105 1.390 2.089 2.397 0.795 0.000 2.684 0.253 -1.161 0.000 1.144 -2.231 0.078 0.000 1.417 -0.103 0.286 3.102 0.684 0.801 0.977 0.765 0.809 0.865 0.769 +1 2.189 -1.398 0.462 0.401 0.213 0.883 -2.216 -1.411 0.000 0.710 -0.133 -1.077 0.000 0.711 -0.443 1.544 2.548 0.556 -0.851 0.821 0.000 0.893 0.687 0.998 0.967 0.720 0.954 0.850 +0 0.378 0.470 -0.426 1.218 1.485 0.638 0.128 1.089 0.000 0.902 -0.260 -0.009 2.215 0.872 1.224 -0.769 2.548 0.467 -0.762 -0.383 0.000 0.928 1.024 0.985 0.849 1.035 0.772 0.712 +0 0.822 -0.857 -1.585 2.376 1.716 1.383 1.061 0.435 2.173 0.729 -0.799 -0.180 0.000 0.823 0.693 -1.267 2.548 0.639 1.594 -0.003 0.000 1.639 1.146 0.987 2.053 1.344 1.349 1.245 +0 1.386 0.269 0.523 0.810 1.034 0.949 2.790 -1.579 0.000 1.040 -0.587 -0.154 2.215 0.908 0.943 -0.851 2.548 0.413 -1.365 0.921 0.000 4.451 2.470 0.990 1.141 1.124 2.102 1.673 +0 0.671 0.041 0.854 0.253 -0.983 0.611 -1.057 1.428 2.173 0.328 1.090 -1.242 0.000 0.560 -0.274 0.226 2.548 1.228 -1.112 0.238 0.000 1.522 0.877 0.988 0.657 0.705 0.721 0.634 +0 0.400 -0.065 0.333 1.495 -1.370 0.814 -0.855 -0.416 2.173 0.980 0.199 0.820 0.000 0.653 -1.121 0.945 0.000 0.456 1.466 1.137 0.000 0.897 1.103 1.071 0.519 0.764 0.697 0.679 +1 1.130 0.874 -0.450 1.158 -1.282 0.844 0.815 1.672 0.000 1.550 0.497 0.023 2.215 0.757 -2.651 -1.588 0.000 0.777 2.167 0.829 0.000 0.612 1.401 1.079 0.644 0.384 0.793 0.791 +1 0.869 1.335 -1.680 1.378 -1.025 1.077 1.231 0.802 1.087 0.902 0.928 -1.072 0.000 1.727 -0.573 0.491 0.000 1.029 0.306 1.705 3.102 0.888 0.710 0.991 1.263 0.953 0.933 0.845 +0 1.130 0.396 1.621 0.779 0.939 0.981 1.259 0.052 0.000 1.422 -0.275 -1.411 2.215 0.928 1.691 0.474 0.000 0.977 0.876 -0.842 3.102 0.815 0.844 0.989 0.883 0.925 1.264 1.130 +0 0.907 0.285 -1.367 0.727 -0.102 0.718 1.421 0.126 2.173 0.738 -1.752 -1.692 0.000 0.487 0.297 1.239 2.548 0.373 2.149 -0.085 0.000 3.096 1.674 1.023 0.926 0.756 1.421 1.116 +1 1.242 -0.631 0.797 0.402 1.067 0.727 1.354 -1.348 0.000 0.768 -1.334 0.655 0.000 1.586 0.488 -0.581 0.000 1.115 -0.354 -0.128 3.102 1.482 1.178 0.998 0.723 0.636 0.866 0.891 +1 0.994 -1.394 1.075 0.724 -0.228 0.775 -1.007 0.141 2.173 1.711 -1.670 -1.147 0.000 0.599 -1.570 0.832 0.000 0.687 -1.874 -1.445 0.000 0.655 0.830 1.084 0.669 0.921 0.623 0.570 +0 1.431 1.705 0.589 0.627 -0.072 1.273 0.069 -1.595 2.173 1.251 1.533 -0.424 0.000 0.685 -0.557 0.764 2.548 1.014 1.388 -0.945 0.000 0.980 1.322 0.980 1.655 1.068 1.196 1.134 +1 1.614 -1.055 -0.165 1.002 1.341 1.906 -0.172 1.364 0.000 1.846 0.055 -0.539 0.000 0.990 0.852 -1.227 2.548 0.645 0.378 -0.236 3.102 3.968 2.132 1.722 1.514 0.499 1.360 1.358 +1 1.113 1.125 1.698 1.183 0.996 0.923 0.040 0.400 2.173 1.709 0.299 -0.882 0.000 0.509 2.066 0.355 0.000 0.902 -0.714 -1.259 3.102 1.919 1.133 0.991 1.276 1.062 1.057 1.186 +1 0.971 1.015 0.913 0.381 0.209 0.907 0.309 -0.639 1.087 0.620 0.796 -1.188 2.215 0.941 0.448 1.723 0.000 0.526 0.093 -0.243 0.000 0.775 0.810 0.994 0.782 0.596 0.724 0.631 +1 1.596 -0.534 -0.244 0.993 0.482 1.555 -0.248 1.567 0.000 1.366 0.231 -0.813 2.215 0.703 -1.605 0.579 0.000 1.647 -0.339 0.589 0.000 0.868 0.655 1.062 1.123 0.725 0.901 0.815 +1 0.499 0.260 -1.622 0.717 1.398 1.161 -0.942 0.522 0.000 1.357 -1.229 -1.139 2.215 0.555 -1.240 -0.296 1.274 0.976 -1.948 -0.797 0.000 0.895 0.737 0.991 2.063 0.636 1.407 1.345 +1 0.338 -2.151 -1.013 1.110 1.079 0.672 0.569 1.450 2.173 0.972 -0.746 -0.986 0.000 1.012 -1.036 -0.251 0.000 0.892 0.044 0.272 3.102 0.977 0.858 0.987 1.081 0.748 0.902 0.850 +0 1.611 -1.225 -0.889 0.743 0.387 0.491 -0.905 -1.456 0.000 0.773 -2.126 -0.293 0.000 1.706 -0.399 1.026 2.548 0.788 -0.341 0.026 0.000 0.956 0.945 1.383 0.717 0.183 0.773 0.689 +1 1.185 0.360 -0.318 0.298 1.062 0.736 -0.466 -0.747 2.173 0.704 0.706 1.573 0.000 1.029 -0.302 1.039 0.000 1.242 1.958 1.368 0.000 0.943 1.178 0.989 0.711 0.877 0.925 0.792 +0 2.497 0.558 1.015 1.826 0.430 2.579 0.443 -0.915 0.000 0.255 0.725 -1.423 0.000 1.055 -0.227 1.333 2.548 0.695 1.208 -0.201 1.551 0.804 0.923 1.486 1.037 0.897 0.971 1.298 +0 0.698 -2.000 0.894 1.397 -0.337 1.332 -2.786 -0.959 0.000 0.481 0.291 1.501 2.215 1.006 -0.852 0.399 2.548 1.037 -1.562 -1.121 0.000 0.910 1.638 1.225 1.286 0.787 1.454 1.203 +1 0.337 -1.037 -0.039 0.893 0.340 1.412 1.135 0.583 0.000 3.551 0.863 -1.677 2.215 1.216 0.206 -0.585 2.548 2.482 0.665 0.137 0.000 0.926 1.034 0.996 1.696 1.984 1.661 1.336 +1 0.413 0.354 0.086 0.927 -1.095 0.817 0.410 0.710 0.000 0.815 0.191 1.278 0.000 1.081 -0.969 -0.486 2.548 0.940 0.627 -1.415 0.000 0.868 1.367 0.986 0.727 0.598 0.858 0.880 +1 0.773 0.328 -1.375 1.655 1.345 1.339 -0.134 -0.354 1.087 0.652 -0.895 0.946 2.215 0.978 0.154 0.256 0.000 1.306 1.670 -1.580 0.000 1.807 1.629 0.999 1.478 1.383 1.340 1.179 +1 0.665 -2.237 -1.046 0.826 -1.293 1.036 -0.730 0.685 2.173 0.334 -0.638 1.087 0.000 1.186 -0.374 -0.047 1.274 1.231 -0.859 -1.214 0.000 1.015 0.949 0.971 1.209 0.877 0.924 0.953 +0 0.286 0.656 0.666 0.731 0.745 0.713 -0.346 -1.022 0.000 0.625 -1.531 -1.727 2.215 0.778 -0.656 0.506 2.548 0.513 -1.869 -0.027 0.000 1.245 0.949 1.001 0.726 0.745 0.696 0.662 +1 0.882 -0.688 0.443 0.627 -1.538 0.817 0.408 1.045 0.000 1.656 0.301 -0.934 0.000 1.463 -0.310 1.069 2.548 1.254 -0.067 -0.445 0.000 0.882 0.649 1.007 0.696 0.766 0.872 0.788 +1 0.715 -0.257 1.054 0.861 -1.434 1.119 -0.832 -1.436 2.173 1.047 0.168 0.686 0.000 1.808 -0.025 0.028 0.000 0.400 -1.329 -0.973 0.000 1.096 0.589 0.987 0.731 0.615 0.819 0.738 +1 0.332 -0.322 0.340 0.585 -0.965 0.435 0.461 0.895 0.000 0.869 -0.835 0.379 2.215 1.393 -0.669 -1.228 2.548 0.757 1.083 1.704 0.000 0.693 1.041 0.983 1.068 1.163 0.962 0.859 +1 1.375 -0.400 -0.117 0.770 -0.626 0.940 -1.284 1.320 0.000 0.490 -1.242 -0.603 2.215 0.674 -1.940 0.813 0.000 0.396 -0.410 -1.394 3.102 0.883 0.934 0.981 0.575 0.306 0.578 0.840 +0 0.717 -0.219 -1.605 0.272 -0.213 0.711 2.248 0.563 0.000 0.788 1.019 -0.194 2.215 1.028 0.474 -1.402 2.548 0.643 1.488 1.594 0.000 0.864 0.951 0.993 0.590 0.885 0.847 0.782 +1 0.688 -0.868 -0.619 1.368 0.303 0.584 -1.894 -1.503 0.000 0.884 0.257 0.713 2.215 0.507 -0.246 1.642 1.274 1.206 -0.120 -0.619 0.000 1.580 0.974 0.994 0.903 0.562 0.913 0.838 +0 0.552 0.712 0.466 1.403 1.145 1.561 0.049 1.200 1.087 2.900 -0.894 -0.665 2.215 0.425 1.004 -0.154 0.000 0.556 -1.275 -0.995 0.000 0.999 1.215 0.985 0.708 3.489 1.732 1.328 +0 0.479 -0.536 -0.643 3.147 0.004 1.617 0.764 0.659 0.000 0.987 1.500 -0.739 1.107 2.318 -0.015 -1.730 0.000 3.221 -0.425 -1.623 3.102 1.129 1.480 0.985 1.733 2.273 1.810 1.721 +1 1.234 0.113 -1.238 1.294 -0.638 1.356 -0.155 -0.773 1.087 2.207 -0.615 -1.120 2.215 6.523 0.503 0.868 0.000 1.161 -0.997 1.248 0.000 0.818 1.234 0.992 0.801 0.992 1.110 0.912 +1 0.406 -1.686 -1.060 1.341 0.139 0.772 -1.264 1.619 2.173 1.449 -0.097 0.432 2.215 0.754 -2.615 -1.071 0.000 0.622 -0.822 -1.471 0.000 0.864 0.872 0.987 1.690 1.675 1.308 1.151 +0 0.601 1.261 -1.003 1.282 0.181 1.482 1.487 1.366 1.087 2.116 1.299 -0.495 1.107 1.137 0.870 1.645 0.000 0.895 1.411 0.516 0.000 1.036 0.892 1.066 0.862 2.600 1.296 1.072 +1 0.307 2.039 0.768 1.287 -0.088 2.655 1.139 1.135 0.000 2.068 0.645 -0.945 1.107 1.182 0.779 -0.269 0.000 2.898 0.008 -0.601 3.102 1.168 0.992 0.981 1.127 0.999 0.908 0.825 +1 1.739 -1.042 -0.513 0.496 1.343 1.236 -0.293 0.841 1.087 0.603 -2.822 -1.483 0.000 0.737 -1.904 -1.591 0.000 0.530 0.049 -0.903 3.102 0.439 0.932 1.280 1.350 0.870 1.239 1.083 +0 0.749 1.581 1.427 1.943 -1.168 0.874 1.595 -0.744 2.173 1.114 1.111 0.826 2.215 2.859 -0.080 0.539 0.000 1.621 1.114 -1.286 0.000 0.828 0.938 1.203 0.872 1.475 0.980 0.793 +1 0.988 -0.305 0.006 0.949 -1.014 0.819 -1.139 -1.625 2.173 0.920 0.339 0.741 0.000 0.528 0.575 -0.311 0.000 1.110 -0.927 -0.428 3.102 0.909 1.138 1.066 1.018 0.890 1.038 0.893 +0 1.306 0.494 -0.051 1.547 -0.598 1.329 0.836 -0.430 2.173 3.610 0.378 1.451 0.000 0.489 -0.807 -0.220 0.000 0.861 -0.777 1.103 3.102 2.464 1.519 0.991 0.596 1.595 1.640 1.453 +1 0.952 -1.626 1.588 1.204 -1.249 1.275 -1.147 0.069 2.173 0.416 -1.203 1.241 0.000 0.470 -2.106 -0.802 0.000 0.788 -0.304 0.657 3.102 0.757 0.985 0.990 0.965 0.688 1.045 0.830 +1 0.763 -0.093 1.647 1.519 0.864 0.889 0.751 -0.381 2.173 0.908 0.308 0.832 0.000 2.052 1.146 -1.000 2.548 0.375 1.043 0.737 0.000 0.374 0.951 0.986 1.619 0.987 1.238 1.011 +1 0.883 -1.128 -1.705 1.412 0.049 0.595 -0.553 1.025 0.000 1.737 -0.961 0.007 0.000 0.757 -0.635 1.495 0.000 1.126 -0.488 -0.748 1.551 0.949 1.185 1.546 0.881 0.473 0.807 0.789 +1 0.755 0.612 -0.190 0.410 -1.450 1.358 -0.040 -0.532 2.173 0.890 0.130 1.673 1.107 0.976 -1.832 0.616 0.000 1.683 0.531 1.223 0.000 0.807 1.105 0.987 0.891 1.486 0.971 0.812 +0 0.682 -0.171 -0.685 1.082 0.961 0.681 -0.546 -0.091 0.000 0.670 -0.725 -1.738 1.107 0.917 0.453 0.951 1.274 0.920 0.468 -1.346 0.000 0.881 0.932 1.185 0.717 0.778 0.710 0.658 +1 0.396 -0.764 -0.193 1.550 1.273 1.358 -0.155 1.647 0.000 1.739 1.096 -0.574 2.215 1.783 0.952 1.068 0.000 2.200 2.485 -0.509 0.000 0.893 0.860 1.052 1.885 1.400 1.270 1.040 +1 0.728 -2.210 0.418 0.481 -0.559 0.955 -1.134 -0.484 0.000 0.658 -1.216 -1.138 0.000 1.848 -1.375 1.376 0.000 1.122 -0.633 0.889 3.102 0.939 1.030 0.993 0.481 0.293 0.624 0.593 +0 0.701 0.605 -1.429 0.987 0.499 0.969 1.748 1.576 0.000 1.983 -0.745 0.034 2.215 1.189 -0.875 -1.154 2.548 0.826 -0.663 0.413 0.000 1.056 0.937 1.136 1.361 1.441 1.111 0.950 +1 0.734 -1.285 -0.627 0.787 -1.654 1.046 -0.965 0.156 2.173 0.882 -0.789 -1.494 0.000 1.117 -0.786 0.705 2.548 0.487 1.551 -1.147 0.000 1.536 1.307 0.982 1.089 0.643 1.097 1.059 +1 1.182 0.013 1.063 0.312 1.360 0.863 0.857 1.514 2.173 1.142 0.126 -0.658 0.000 1.063 0.777 -0.572 0.000 1.323 0.335 0.064 0.000 0.994 0.843 0.990 1.030 0.737 0.872 0.881 +1 0.653 0.439 0.083 0.428 0.052 0.960 -1.125 -1.130 0.000 1.129 -0.052 0.662 2.215 0.658 0.634 -1.529 2.548 0.702 -1.087 1.256 0.000 0.943 0.957 0.979 0.741 0.913 0.911 0.803 +1 2.221 -0.693 1.244 0.570 -1.156 0.899 -0.973 -0.331 2.173 0.356 0.207 0.120 0.000 0.465 0.923 -0.330 2.548 0.579 0.624 -1.335 0.000 0.593 0.855 1.294 1.034 0.944 0.980 0.800 +0 1.442 1.484 0.930 0.637 0.093 0.914 2.178 0.815 0.000 2.322 1.080 -0.996 1.107 0.922 0.957 -0.751 1.274 0.952 -1.445 0.403 0.000 0.846 1.047 0.987 1.490 0.344 1.129 1.017 +0 2.073 0.641 0.075 1.716 -0.228 4.010 -0.595 1.706 2.173 2.140 -0.629 -0.256 0.000 2.537 0.784 0.363 2.548 1.080 -2.189 -1.325 0.000 2.817 3.426 1.003 4.120 4.888 3.219 2.973 +1 0.768 0.407 -0.985 0.890 0.292 0.773 -0.445 -1.107 0.000 0.515 0.314 -0.521 0.000 0.894 1.221 0.926 2.548 1.352 -0.247 0.894 1.551 0.859 1.007 1.046 0.780 0.768 0.873 0.761 +1 1.125 1.229 -0.604 0.330 -1.455 0.987 0.847 0.456 0.000 1.506 1.097 1.573 2.215 0.639 1.191 -1.145 1.274 0.428 0.362 -0.484 0.000 0.773 0.829 0.991 1.065 0.671 0.820 0.803 +0 1.617 -0.783 0.262 0.372 0.937 0.547 -0.974 1.734 1.087 0.377 -1.260 -0.918 2.215 0.594 1.138 1.409 0.000 0.950 0.814 -0.630 0.000 0.807 0.939 0.992 0.899 0.467 0.767 0.872 +0 0.988 0.109 1.692 0.611 0.345 1.186 1.112 -0.674 2.173 1.218 -0.286 0.717 1.107 1.217 0.953 0.663 0.000 1.314 0.890 -1.584 0.000 1.253 1.287 1.008 1.162 2.163 1.243 1.017 +1 0.504 1.216 -0.733 1.923 -0.408 1.108 0.252 0.758 2.173 1.561 -1.496 -1.490 0.000 1.497 0.646 0.129 0.000 1.128 0.940 1.615 3.102 0.741 1.731 0.989 1.300 0.982 1.464 1.389 +1 0.720 1.102 1.415 0.355 0.036 1.241 2.773 1.585 0.000 1.353 -1.029 -0.807 2.215 1.136 -0.392 0.419 2.548 0.974 0.075 0.010 0.000 3.462 3.174 0.990 1.198 1.250 3.072 2.196 +1 1.290 -0.496 1.019 0.618 -0.358 1.181 -0.777 -0.856 0.000 1.017 -1.291 0.793 0.000 0.642 -0.080 0.202 2.548 0.505 -0.388 -1.636 1.551 0.857 0.762 1.170 0.612 0.441 0.506 0.536 +1 0.777 -0.634 1.117 0.893 -0.031 1.135 0.456 1.297 0.000 1.270 0.506 -0.685 2.215 0.991 -0.250 -1.297 2.548 1.181 -1.151 -0.463 0.000 2.574 1.556 0.991 1.083 0.793 1.192 1.022 +1 0.799 -1.564 -1.377 0.395 0.506 1.455 -0.782 0.798 2.173 0.783 -1.049 -1.210 0.000 0.980 -0.211 -0.403 0.000 1.204 1.335 -0.436 0.000 0.894 1.236 0.989 0.994 0.902 1.226 1.029 +1 0.839 0.838 1.258 0.783 -1.188 1.349 1.147 -1.164 0.000 1.859 -1.358 0.615 2.215 0.635 -0.652 0.662 0.000 1.431 -0.624 -0.850 3.102 0.532 0.890 0.988 1.115 1.506 1.590 1.203 +0 0.782 -1.153 -1.374 2.708 -0.599 0.736 0.639 1.362 0.000 1.258 -1.872 -1.618 0.000 2.063 -0.219 0.948 2.548 3.434 -0.880 0.170 0.000 0.767 0.942 1.297 0.669 0.536 1.079 1.095 +0 0.671 -0.423 0.105 0.436 1.464 0.904 0.539 0.102 1.087 1.300 0.066 -1.290 2.215 0.864 -1.040 0.974 0.000 0.419 0.137 0.740 0.000 0.735 1.047 0.992 1.140 1.564 1.053 0.906 +1 0.736 1.192 1.468 0.914 0.260 1.198 -0.367 -0.941 2.173 0.749 0.681 1.400 0.000 0.782 -0.596 0.473 2.548 0.541 0.070 0.746 0.000 0.530 1.208 1.007 0.897 1.167 1.056 0.877 +1 0.419 -2.247 1.488 0.852 0.832 1.935 -1.921 0.078 0.000 2.141 1.598 1.129 0.000 3.370 -0.016 -0.770 2.548 3.197 0.406 -1.536 3.102 13.098 7.392 0.996 1.578 1.721 4.543 3.278 +0 0.984 1.504 0.575 1.586 0.716 1.029 0.355 -0.866 0.000 0.523 -1.136 -0.922 0.000 1.502 -1.753 0.639 0.000 1.150 1.040 -0.702 0.000 1.030 0.862 0.982 1.255 0.506 0.976 1.216 +0 0.507 0.904 1.087 1.248 -0.508 0.881 0.095 1.054 2.173 0.516 0.596 -1.077 0.000 0.686 -0.733 -1.005 0.000 0.794 0.296 0.317 3.102 0.706 1.055 1.092 0.625 0.558 0.694 0.706 +1 0.770 -0.476 0.768 1.330 0.849 0.837 0.359 -1.081 0.000 0.827 0.098 -0.112 0.000 0.806 -2.542 0.034 0.000 1.521 0.757 -1.585 3.102 1.374 1.098 0.992 0.682 0.310 0.982 1.058 +0 0.802 0.631 0.901 1.141 0.431 0.531 1.633 -1.193 0.000 0.542 -0.211 1.625 2.215 0.485 -0.962 -1.619 0.000 0.542 -2.132 -0.100 0.000 0.724 0.729 0.973 0.870 0.576 0.749 1.024 +0 0.852 -1.002 0.740 0.304 -1.283 1.022 0.476 0.704 2.173 1.255 -1.051 -0.599 2.215 0.653 0.443 -1.406 0.000 1.022 -1.233 1.459 0.000 0.793 0.881 0.987 0.863 2.105 1.272 1.005 +1 0.714 0.081 1.160 0.892 0.012 0.915 -0.948 -1.475 0.000 0.804 -0.498 1.029 2.215 1.104 -1.006 -0.903 2.548 1.514 -0.682 0.047 0.000 1.767 1.106 0.987 0.646 1.032 0.857 0.791 +0 1.162 -0.039 0.307 0.826 -0.604 0.847 0.077 -0.270 0.000 0.916 -0.429 1.413 2.215 0.448 -0.511 -1.125 2.548 0.878 0.767 1.228 0.000 1.407 0.896 0.993 0.945 0.515 0.757 0.705 +1 0.609 2.089 1.548 1.898 -0.290 0.882 -0.102 0.620 0.000 0.690 -2.189 1.352 0.000 2.182 0.617 -0.222 2.548 0.731 0.948 -1.461 3.102 2.374 1.819 1.484 1.426 0.896 1.605 2.117 +1 1.925 0.683 -1.310 0.496 -0.611 0.627 -0.706 -0.532 2.173 0.454 -0.522 1.467 0.000 0.879 2.155 0.859 0.000 0.886 0.075 0.253 0.000 1.046 0.866 0.994 0.910 0.477 0.838 0.888 +0 0.667 -0.068 -1.425 0.427 0.836 0.870 -0.162 1.384 2.173 1.669 -0.115 -0.713 2.215 1.017 -0.715 0.575 0.000 0.512 0.129 0.466 0.000 0.778 0.908 0.995 1.032 1.683 0.961 0.858 +0 0.757 -0.232 -0.098 0.644 0.982 0.797 -0.924 -0.969 0.000 1.186 -0.578 0.456 2.215 0.763 -0.010 -1.511 0.000 1.142 -1.106 1.453 3.102 0.938 0.864 0.989 0.777 0.911 0.871 0.832 +1 0.533 1.404 -0.991 0.591 0.235 1.389 2.920 -1.354 0.000 1.257 0.468 -0.032 0.000 1.590 0.180 0.357 0.000 1.184 1.098 0.791 3.102 0.805 0.899 0.995 0.806 0.872 0.911 0.775 +1 0.618 -1.212 -0.702 0.560 0.389 1.122 -0.587 -1.710 1.087 0.747 0.146 -0.208 0.000 1.323 0.098 -1.490 0.000 1.415 0.123 0.460 3.102 1.088 0.802 0.990 0.983 1.335 1.060 0.893 +1 1.001 -0.516 -0.671 0.490 1.045 1.025 0.553 0.711 2.173 0.438 -1.570 -0.905 0.000 0.788 0.023 -0.496 0.000 0.731 -1.744 1.682 0.000 0.886 0.570 0.988 1.015 0.734 0.834 0.722 +0 0.403 0.667 -1.518 1.124 0.285 0.850 -1.029 -1.303 1.087 0.461 -1.096 0.392 0.000 1.110 0.427 0.452 0.000 0.652 -0.195 -1.016 0.000 0.969 1.233 0.987 0.614 0.968 0.799 0.730 +1 0.816 -0.544 -0.577 1.445 0.434 0.716 -0.382 1.544 2.173 0.381 0.094 -1.664 0.000 0.635 -1.875 -0.040 0.000 0.932 0.039 -0.979 3.102 1.155 1.022 1.189 0.791 0.685 0.744 0.721 +0 1.390 0.266 -0.533 1.271 -0.310 1.329 0.124 1.437 2.173 1.560 -0.585 -0.738 2.215 1.239 0.258 0.851 0.000 0.775 1.081 1.073 0.000 0.611 0.837 1.002 0.741 2.111 1.281 1.171 +1 1.262 0.316 -1.008 1.507 -1.171 2.164 1.238 0.913 0.000 1.485 -1.044 -0.227 2.215 0.633 -1.529 -1.602 0.000 1.104 -0.397 -0.919 3.102 4.601 2.788 1.008 2.010 0.760 2.286 1.947 +1 0.442 -1.143 1.198 0.812 -0.094 0.849 0.441 1.554 0.000 1.928 0.213 0.237 2.215 0.736 1.234 -1.428 2.548 1.047 -1.236 -1.596 0.000 1.672 1.310 0.980 1.828 1.473 1.563 1.447 +1 0.446 -1.308 -1.057 1.617 1.411 2.207 -0.844 -0.913 0.000 1.145 -0.271 0.804 2.215 2.078 -1.081 0.583 2.548 1.616 -0.967 0.108 0.000 0.911 0.883 0.989 1.027 0.836 0.812 0.740 +0 0.558 0.238 -1.479 1.509 0.812 0.752 0.121 0.833 2.173 0.563 0.451 -0.024 0.000 1.762 -0.504 -1.085 2.548 0.710 0.869 -0.637 0.000 0.496 0.963 1.119 0.649 1.500 0.923 0.809 +0 0.879 0.088 -0.737 0.743 -0.181 1.159 -0.258 1.321 0.000 1.509 0.371 0.229 2.215 1.999 -0.737 -1.512 2.548 0.760 -0.205 -1.688 0.000 0.938 0.975 0.982 0.880 2.183 1.243 0.998 +0 1.853 0.940 -0.872 0.647 1.591 0.955 0.257 0.644 1.087 0.548 -0.943 -0.240 0.000 0.816 0.476 1.296 2.548 0.456 0.478 -1.507 0.000 0.811 0.950 1.208 0.851 0.626 0.896 0.828 +0 0.749 -0.298 -0.212 0.459 1.728 0.790 0.136 -1.725 0.000 0.871 -1.631 1.473 0.000 1.865 0.798 -0.113 2.548 0.734 -1.265 -1.344 1.551 1.306 1.098 0.982 0.805 1.566 1.033 0.836 +0 0.748 -0.403 -0.542 0.774 -0.261 0.448 -1.752 0.466 0.000 0.665 -1.392 0.995 0.000 1.135 -1.160 -1.534 2.548 0.399 -0.942 -1.138 3.102 0.721 0.774 0.982 0.630 0.183 0.746 0.847 +0 0.333 0.384 0.592 1.948 -0.187 0.687 1.500 1.525 0.000 0.579 0.327 1.577 0.000 0.619 -0.220 -1.632 2.548 0.989 0.448 -0.595 3.102 0.861 0.755 0.986 0.806 0.537 0.615 0.621 +0 0.643 -1.776 -1.451 0.658 -1.041 0.694 -0.151 0.142 2.173 0.730 0.578 -0.964 2.215 0.912 0.373 1.172 0.000 0.581 1.708 0.857 0.000 0.769 0.987 0.995 0.855 0.966 0.796 0.868 +0 0.562 -0.537 -0.541 0.715 0.508 0.792 -0.274 -1.062 1.087 0.690 -1.489 0.374 0.000 0.642 0.188 1.457 2.548 1.459 -1.039 1.217 0.000 0.919 0.891 0.994 0.980 0.713 0.849 0.774 +0 0.799 -0.112 -0.026 0.556 1.105 0.728 1.610 1.536 0.000 0.566 0.829 -1.150 1.107 1.027 0.758 0.413 2.548 0.558 1.209 -0.753 0.000 0.862 0.910 0.987 0.703 0.800 0.640 0.644 +1 0.729 -0.072 0.129 0.760 0.674 0.773 0.324 -0.798 2.173 0.648 0.283 0.688 0.000 1.530 0.100 -1.487 2.548 0.553 1.118 -0.267 0.000 0.950 0.852 0.992 0.941 0.802 0.837 0.728 +0 1.111 0.975 -1.220 0.722 -0.156 1.408 0.437 -1.452 0.000 1.606 0.457 0.413 1.107 1.039 1.056 -0.441 2.548 0.881 -0.080 0.084 0.000 1.735 1.322 1.016 1.104 1.070 1.172 0.977 +1 1.372 -0.040 -0.325 0.336 -1.281 0.787 0.472 1.459 2.173 0.503 1.166 0.316 0.000 0.545 0.810 0.980 2.548 1.111 1.875 -0.726 0.000 0.951 0.733 0.985 0.995 0.382 0.734 0.766 +1 1.260 -0.415 -1.468 0.973 1.404 0.824 -0.539 -0.455 2.173 0.739 1.674 0.491 0.000 0.561 -1.421 0.723 0.000 1.077 0.906 -0.326 1.551 0.864 0.702 0.986 1.100 0.913 0.903 0.933 +1 0.726 -0.993 1.628 1.282 -0.936 1.181 0.242 0.924 1.087 0.798 -0.634 -1.144 0.000 0.988 -0.166 0.348 2.548 0.865 0.524 -1.041 0.000 0.891 0.888 0.988 1.444 0.728 1.006 0.921 +0 2.224 0.516 0.846 0.308 0.297 0.998 0.145 -1.248 2.173 0.898 -0.032 -0.134 0.000 0.940 1.017 -0.900 0.000 0.625 -0.646 -1.340 3.102 1.240 0.897 0.984 1.313 0.405 0.876 0.884 +0 1.402 0.815 -0.603 0.891 1.041 0.487 0.272 1.123 2.173 0.707 1.194 -1.526 1.107 0.492 -1.834 1.326 0.000 1.400 -0.249 -0.377 0.000 0.882 0.855 1.543 0.925 0.729 0.794 0.818 +1 0.881 0.421 0.029 0.684 1.615 1.138 0.455 -1.564 0.000 1.225 -0.489 0.139 2.215 0.658 0.119 -1.004 2.548 0.731 0.386 1.204 0.000 0.839 0.663 1.065 0.895 0.874 0.929 0.797 +1 0.851 -0.896 -0.661 0.450 0.932 0.357 1.549 1.027 0.000 0.931 -0.655 -1.499 2.215 0.561 -0.145 0.445 2.548 0.812 -1.033 -0.158 0.000 1.728 0.987 0.989 0.722 0.781 0.836 0.727 +0 1.292 -1.041 0.545 1.577 0.988 1.476 -0.377 0.391 1.087 0.977 0.325 -0.963 2.215 1.392 -1.271 -1.053 0.000 1.854 -0.357 -1.187 0.000 0.917 1.037 0.990 0.866 1.781 1.315 1.228 +0 1.349 -0.693 -0.284 1.086 -1.730 0.957 1.431 -0.865 0.000 0.836 0.023 1.665 1.107 1.368 -1.341 0.603 0.000 0.794 0.417 0.242 0.000 0.908 1.135 1.617 0.920 0.675 0.750 0.758 +0 0.621 1.075 -1.168 1.425 1.024 0.653 -0.049 1.297 2.173 1.368 0.077 -0.451 0.000 0.718 1.041 0.077 2.548 0.961 -0.170 -1.165 0.000 0.921 0.869 1.199 0.878 0.932 0.823 0.857 +1 0.870 0.786 1.607 0.205 -1.338 0.612 -2.319 -1.573 0.000 1.403 0.597 0.386 2.215 1.397 0.745 -0.492 2.548 0.705 0.320 -0.287 0.000 2.050 2.243 0.981 1.021 1.068 1.740 1.362 +0 1.551 0.529 -1.023 1.002 -1.610 0.806 -0.965 0.617 0.000 1.050 -0.901 0.086 0.000 1.118 -0.242 1.443 2.548 0.820 0.111 -0.401 3.102 0.902 0.849 0.990 0.830 0.744 0.767 0.952 +0 0.745 -1.345 1.399 1.516 0.432 0.833 -0.527 -0.350 2.173 0.890 -2.203 1.617 0.000 0.501 -2.448 1.262 0.000 0.463 0.599 -1.238 0.000 0.388 1.432 1.126 1.098 0.439 1.138 1.006 +1 0.686 0.142 -1.426 0.985 0.120 1.117 -0.531 0.171 0.000 0.981 -0.391 1.636 1.107 0.554 0.857 0.702 2.548 0.701 -1.958 -1.380 0.000 1.934 1.452 1.121 0.878 0.814 1.062 0.923 +1 0.900 -0.445 -0.014 1.414 0.748 0.893 1.252 1.002 0.000 1.715 -1.701 -0.508 0.000 1.062 2.145 -0.723 0.000 3.456 -0.317 -1.399 3.102 1.262 1.254 0.991 1.380 1.162 1.195 1.114 +0 1.143 0.593 0.060 0.792 1.341 0.800 1.178 -1.730 2.173 0.479 0.423 -1.523 0.000 0.962 -0.671 -0.248 1.274 0.518 -0.899 0.520 0.000 0.826 0.934 1.206 0.930 1.621 0.952 0.794 +1 0.406 1.032 -1.285 0.477 0.432 0.894 0.818 -0.251 2.173 1.279 0.633 1.697 0.000 0.689 -0.340 1.413 0.000 0.376 -0.736 0.161 3.102 0.853 0.752 0.981 0.863 0.631 0.866 0.744 +1 1.478 1.156 1.148 1.144 0.412 1.031 0.654 -0.999 2.173 0.614 0.264 1.500 2.215 0.610 1.644 -0.785 0.000 0.747 -0.548 -0.264 0.000 1.216 0.961 1.108 0.813 0.937 0.955 0.880 +1 0.547 -0.326 1.308 0.170 1.534 0.660 1.194 -0.126 0.000 1.052 1.071 1.685 2.215 0.600 0.464 -0.234 0.000 0.850 0.845 -0.551 0.000 0.962 1.064 0.999 0.530 0.531 0.681 0.615 +1 1.062 -0.076 0.594 0.903 -0.298 2.590 -1.528 -1.243 0.000 0.934 -0.878 -0.026 0.000 1.129 -1.416 0.418 0.000 2.018 -0.450 1.205 3.102 0.806 1.121 0.988 0.610 0.854 0.903 0.812 +0 0.860 0.224 -0.608 1.260 -1.455 0.506 0.371 -1.248 0.000 1.251 -0.004 0.534 2.215 1.548 -0.420 0.996 0.000 1.995 -0.679 -0.220 3.102 0.852 0.903 0.996 1.022 1.076 0.943 0.779 +1 1.231 -0.447 -0.724 0.658 -1.395 0.889 -0.337 0.081 2.173 0.314 -1.595 0.647 0.000 0.717 -0.215 -1.517 0.000 1.439 -0.769 1.551 3.102 0.874 0.940 0.990 0.861 1.214 0.847 0.735 +1 1.160 0.229 0.513 0.819 -0.530 1.210 0.110 -0.939 0.000 0.647 -1.320 0.616 2.215 0.615 0.573 0.924 2.548 0.811 0.386 -1.571 0.000 0.857 0.933 1.089 0.934 0.803 0.920 0.838 +1 0.389 -1.290 0.025 0.114 -0.786 0.735 -0.304 0.621 2.173 0.722 1.082 -1.237 0.000 0.596 0.263 1.477 0.000 0.728 -0.275 -0.345 3.102 0.774 1.114 0.987 0.526 0.592 0.716 0.637 +0 1.158 0.807 -0.364 1.093 -0.725 1.006 -0.601 0.594 2.173 0.396 0.157 -1.226 0.000 1.528 0.943 1.711 2.548 0.376 1.235 1.695 0.000 0.433 1.005 0.974 1.155 1.926 1.221 0.923 +0 0.728 -0.882 1.150 0.798 -0.694 0.965 -0.619 0.685 2.173 0.875 1.282 -1.109 0.000 0.909 -0.636 -0.962 2.548 0.723 0.992 0.906 0.000 0.899 0.940 1.051 0.862 1.164 1.047 0.918 +1 1.249 -0.167 -0.846 1.821 -0.240 0.921 -1.055 -1.730 2.173 0.570 -1.218 0.970 0.000 0.771 -0.058 0.913 2.548 0.530 0.713 0.520 0.000 0.946 1.006 1.086 0.940 0.906 1.017 0.885 +0 1.030 1.120 0.018 0.251 -0.492 0.886 1.534 -1.287 0.000 1.162 1.131 1.522 1.107 2.420 1.987 0.432 0.000 0.724 0.334 -0.221 0.000 0.983 0.866 0.990 0.528 0.504 0.661 0.620 +0 0.723 -1.223 0.403 0.461 -1.134 1.047 -0.519 1.136 2.173 0.831 -0.614 -0.524 0.000 0.532 1.003 -1.190 2.548 0.655 0.617 -0.087 0.000 0.802 0.738 0.991 1.092 1.164 1.010 0.929 +1 0.752 1.290 -0.907 0.710 0.893 0.790 0.244 1.034 2.173 1.095 0.933 -0.083 0.000 0.992 0.437 -0.627 0.000 1.266 -0.045 -1.463 3.102 0.847 1.014 1.011 0.881 0.838 0.890 0.796 +1 0.902 -0.291 0.639 1.229 1.317 0.647 -1.570 -0.516 0.000 0.524 0.033 -1.389 0.000 1.051 -0.009 -0.688 2.548 0.832 -0.020 0.844 1.551 0.610 0.614 0.986 0.537 0.702 0.666 0.613 +1 0.381 -0.758 -0.324 1.917 0.675 0.944 0.249 -0.471 0.000 1.031 0.076 -1.703 2.215 0.773 0.641 1.029 0.000 1.766 0.958 -1.263 1.551 0.933 0.854 0.989 1.861 0.831 1.324 1.096 +1 0.369 -1.145 -1.354 0.457 1.547 0.871 -0.258 0.216 2.173 0.870 -0.077 -1.248 0.000 0.874 0.857 -1.674 0.000 0.825 0.066 0.752 3.102 0.871 0.799 0.995 0.935 0.444 0.814 0.710 +0 0.799 1.230 1.526 0.741 0.595 0.418 1.845 -0.544 0.000 0.611 0.539 -1.294 2.215 0.637 0.798 0.204 0.000 0.471 -0.616 1.603 3.102 0.750 0.833 0.990 0.702 0.416 0.594 0.600 +1 0.605 0.386 1.666 0.609 -0.301 1.026 0.395 1.122 0.000 1.024 -0.581 -1.164 0.000 0.708 0.760 0.839 0.000 1.251 0.911 1.406 0.000 0.800 0.683 0.984 0.686 0.181 0.879 0.827 +0 1.482 -0.245 -0.208 0.538 0.254 1.036 -0.317 1.641 0.000 0.191 1.178 -1.043 0.000 0.317 -0.680 0.390 2.548 0.472 1.176 1.168 1.551 1.012 0.763 0.996 0.483 0.430 0.555 0.710 +1 1.334 0.459 0.112 1.735 -0.377 0.998 -0.007 1.465 0.000 0.803 1.274 -1.050 1.107 0.404 -0.387 -0.032 0.000 0.701 0.149 0.985 3.102 1.140 1.240 0.984 0.768 0.769 0.749 0.879 +0 0.973 -1.017 0.711 1.341 1.397 0.389 0.646 -0.905 0.000 0.977 0.066 0.133 1.107 1.509 -1.023 -1.142 0.000 0.463 -0.460 -1.021 0.000 0.420 0.705 0.987 0.731 0.212 0.781 0.747 +1 0.818 -0.461 -0.763 0.888 1.116 0.564 -0.732 1.623 1.087 1.077 -1.430 0.796 2.215 1.007 -0.612 -0.057 0.000 1.123 0.593 -0.544 0.000 0.978 0.927 1.172 0.953 0.890 0.892 0.778 +0 0.351 -1.774 0.847 2.170 0.511 0.725 1.059 -0.921 0.000 0.710 -0.490 1.682 2.215 0.868 1.167 -0.176 0.000 1.193 0.394 -1.417 3.102 0.901 0.788 1.001 0.923 0.509 0.821 1.047 +1 0.751 -1.273 -0.508 0.387 0.768 0.691 0.168 0.033 2.173 0.572 0.802 -1.273 2.215 0.655 0.220 0.876 0.000 1.172 -0.090 -1.498 0.000 0.832 0.879 0.989 0.821 0.908 0.681 0.643 +0 0.693 0.011 1.682 2.099 -1.084 0.926 -0.222 0.378 1.087 0.762 0.779 1.436 2.215 0.594 0.696 0.894 0.000 0.672 1.075 -0.402 0.000 0.667 0.829 1.011 0.903 1.206 1.007 0.838 +1 1.258 -1.820 0.777 0.822 -0.693 1.025 -0.102 -1.187 2.173 0.519 -1.171 0.231 0.000 0.566 -1.657 -1.288 0.000 0.927 -0.532 1.203 3.102 0.855 1.146 1.366 0.865 0.905 1.060 0.869 +1 0.360 -2.304 -1.175 0.378 0.944 0.488 0.859 0.762 2.173 0.436 -0.938 -0.106 2.215 0.600 -1.738 0.871 0.000 0.550 0.788 -1.391 0.000 1.367 0.876 0.977 0.966 0.859 0.768 0.703 +0 0.690 0.677 0.788 2.548 0.235 1.258 0.833 -1.272 1.087 1.138 -2.556 -0.296 0.000 2.626 0.184 1.602 2.548 0.572 -2.416 0.593 0.000 0.769 3.251 0.990 1.716 1.394 2.852 2.423 +0 0.560 -1.821 1.155 1.775 0.682 0.912 0.877 -0.712 2.173 0.666 -0.019 -0.892 0.000 0.680 -0.423 1.098 2.548 0.696 -0.354 -1.623 0.000 0.659 0.706 0.995 0.556 1.212 1.193 0.931 +0 1.835 0.947 -0.665 0.911 -1.381 0.848 -0.350 1.167 0.000 0.738 -1.441 0.961 0.000 0.734 -0.334 0.038 1.274 0.511 -0.821 1.245 3.102 1.049 0.938 1.076 0.955 0.440 0.758 1.053 +1 0.546 1.060 -0.017 1.378 1.673 1.224 0.542 -0.463 2.173 1.515 0.948 1.223 0.000 0.855 0.850 -1.399 2.548 0.798 0.050 0.293 0.000 1.263 0.988 1.201 1.193 0.984 1.040 0.908 +0 0.736 1.285 -0.513 1.678 -0.041 1.041 1.131 0.343 0.000 1.322 0.460 1.739 0.000 0.836 -0.461 -1.731 2.548 0.620 1.416 -1.357 0.000 0.871 0.793 0.979 1.391 0.113 1.231 1.095 +0 1.078 0.087 1.014 1.521 -1.548 0.636 -1.342 0.640 0.000 1.096 0.519 -0.489 2.215 0.464 0.249 -0.071 0.000 0.701 -1.286 -1.361 3.102 1.061 0.829 1.314 1.191 1.128 0.910 0.928 +0 0.812 0.041 -0.323 0.424 0.592 0.655 0.951 1.666 0.000 0.517 1.095 -1.114 2.215 0.879 0.109 -1.456 2.548 0.477 0.782 0.494 0.000 0.744 0.632 0.983 0.655 0.434 0.552 0.547 +1 0.888 -1.152 -1.011 0.659 1.355 0.484 1.141 0.198 2.173 0.441 0.479 -0.644 0.000 0.652 -0.309 -0.371 0.000 1.522 0.453 1.251 3.102 0.802 0.720 0.989 1.478 0.789 1.109 0.987 +0 0.526 1.430 -0.336 1.636 -1.386 0.733 0.339 0.297 0.000 1.049 1.336 1.171 0.000 1.212 0.340 -0.626 2.548 0.383 -0.817 1.328 1.551 1.658 1.090 1.042 0.876 0.637 0.873 0.899 +1 1.166 0.158 -0.711 0.606 -0.288 0.925 -0.444 0.664 2.173 1.069 -0.152 0.063 2.215 1.246 -0.179 -1.651 0.000 0.439 0.188 1.470 0.000 0.316 0.974 0.993 1.057 0.784 0.770 0.769 +1 0.903 0.728 1.547 0.458 -0.191 1.460 -2.134 -0.631 0.000 1.943 0.210 0.972 2.215 1.131 0.332 -0.648 0.000 1.487 -0.445 1.394 3.102 0.805 0.978 0.983 1.004 0.820 0.878 0.803 +1 0.405 0.985 -0.698 0.871 0.163 0.890 0.197 -1.579 0.000 0.436 -1.407 0.121 2.215 0.677 -1.085 0.683 0.000 1.104 1.076 1.707 1.551 0.848 1.029 0.994 0.795 1.309 1.240 1.078 +0 1.658 -1.042 0.132 1.080 0.533 0.930 -0.633 1.677 2.173 0.745 -0.272 -0.843 0.000 0.924 2.127 0.965 0.000 1.167 0.106 -0.481 3.102 0.805 1.192 0.990 1.386 1.113 1.460 1.905 +0 0.813 0.038 -1.396 1.185 -0.174 0.402 -1.269 1.534 0.000 0.679 0.800 0.271 0.000 0.801 0.843 1.737 0.000 0.369 -0.708 -1.556 3.102 0.770 0.539 1.213 0.628 0.148 0.466 0.489 +0 0.764 0.937 -0.993 0.867 -1.466 0.992 -0.363 0.954 0.000 0.681 -0.252 -0.136 1.107 0.571 -0.985 -0.113 0.000 0.771 0.195 -1.254 3.102 0.777 0.771 0.980 0.469 0.577 0.528 0.560 +1 1.421 1.077 0.298 0.253 -1.243 0.921 0.753 -1.492 0.000 0.582 0.817 0.956 2.215 1.224 -0.030 -0.327 2.548 1.091 -0.056 -1.669 0.000 1.121 1.074 0.988 0.611 0.915 0.758 0.715 +1 1.650 0.334 1.108 0.981 -1.428 0.508 -0.754 -0.133 0.000 0.452 0.047 1.361 0.000 0.732 -1.083 0.965 2.548 0.794 -0.610 -1.174 1.551 1.079 0.771 1.333 0.850 0.562 0.710 0.695 +0 0.792 -0.771 0.105 0.596 -1.237 0.569 -0.816 1.058 2.173 0.444 -0.335 -1.118 1.107 0.773 1.178 -0.979 0.000 1.512 0.520 0.901 0.000 0.663 1.066 0.985 0.607 0.706 0.689 0.726 +1 0.920 -1.089 0.682 0.129 -1.596 1.151 -0.931 -0.520 0.000 1.150 1.861 1.519 0.000 1.256 -2.119 1.399 0.000 2.006 -0.881 -0.930 0.000 0.849 1.114 0.987 0.459 0.387 0.705 0.728 +0 0.486 1.519 0.443 0.609 0.943 0.406 -0.679 -1.180 2.173 0.657 -0.385 1.420 2.215 0.595 0.767 -0.578 0.000 0.816 -1.699 -0.372 0.000 1.540 0.962 0.978 0.690 0.555 0.713 0.744 +1 4.314 0.390 0.801 1.470 0.748 2.193 -0.363 -1.123 0.000 1.204 0.238 -0.749 1.107 0.790 0.510 -0.224 0.000 0.906 1.321 -0.295 0.000 0.851 0.878 0.985 0.540 0.808 1.121 0.953 +0 1.768 0.606 1.739 0.554 -1.210 0.955 0.442 0.521 2.173 0.727 0.394 -0.031 0.000 1.274 -0.611 -1.166 2.548 1.205 -0.663 0.132 0.000 0.798 1.036 0.975 1.227 1.593 1.105 1.020 +0 0.695 0.554 0.615 1.008 1.378 1.564 0.975 -0.496 2.173 1.539 0.233 1.386 2.215 0.337 1.265 1.275 0.000 0.721 0.315 -0.542 0.000 0.614 0.799 0.988 1.356 2.426 1.294 0.959 +1 0.709 -1.443 0.093 0.644 -1.704 1.134 -0.541 1.388 2.173 1.116 -0.338 -0.110 0.000 1.148 -0.674 -0.594 0.000 0.948 -0.299 -1.056 3.102 0.810 0.679 0.988 1.077 0.891 0.945 0.895 +0 1.899 0.755 0.127 0.809 -1.464 0.684 -1.040 -1.266 2.173 0.720 0.311 1.343 2.215 0.429 -0.117 -1.605 0.000 0.772 0.787 1.571 0.000 0.397 0.775 1.700 1.065 1.059 1.106 0.845 +1 0.694 0.043 -1.538 1.484 -0.058 1.190 0.925 -0.583 2.173 1.096 -0.119 0.818 0.000 1.690 0.557 1.284 1.274 0.874 -0.465 1.652 0.000 0.914 0.853 1.367 1.105 1.779 1.178 1.035 +1 0.584 2.017 0.156 2.029 -0.060 1.297 -0.483 1.701 0.000 0.398 -1.163 0.748 2.215 0.647 0.392 -0.864 1.274 0.527 0.713 1.119 0.000 1.112 0.943 0.978 1.275 0.725 0.872 1.095 +0 0.829 -0.099 -1.673 1.868 1.312 0.837 -1.625 -0.796 0.000 1.136 -1.133 -0.370 0.000 0.816 0.485 0.435 2.548 0.857 -0.890 0.903 3.102 0.922 0.981 1.000 0.979 0.625 0.955 1.013 +1 2.427 1.329 0.795 0.241 0.633 0.456 1.495 -0.569 2.173 0.987 -0.001 -1.378 0.000 1.055 1.630 0.100 0.000 0.932 0.256 -0.970 3.102 0.726 1.123 0.977 1.048 0.503 0.807 1.021 +0 1.114 -0.469 0.602 1.864 0.224 0.863 0.940 -0.793 0.000 0.736 -0.597 1.589 2.215 1.486 0.721 -1.399 1.274 1.342 -1.072 0.781 0.000 1.134 0.921 0.990 1.023 0.975 1.229 1.491 +0 1.090 0.187 -0.191 1.284 0.515 0.434 0.015 0.575 2.173 0.295 1.378 1.592 0.000 0.927 0.320 -1.299 0.000 1.138 -0.272 -0.777 3.102 0.756 0.852 0.986 0.764 0.710 0.643 0.710 +1 0.583 1.109 -0.748 0.750 -1.632 1.035 0.691 0.472 2.173 0.808 -1.074 1.681 0.000 0.940 -0.294 1.581 0.000 1.407 0.523 -0.309 1.551 0.795 0.895 0.993 1.054 0.828 0.843 0.768 +1 0.489 2.130 0.147 0.337 0.594 0.985 0.543 0.902 1.087 1.478 0.877 -1.247 0.000 0.836 0.981 -0.725 0.000 0.738 0.507 -0.349 3.102 0.788 0.647 1.000 0.789 0.816 0.900 0.786 +1 0.595 -1.501 -0.727 0.688 -1.103 0.686 -0.497 0.814 0.000 2.037 -1.732 0.503 0.000 2.069 0.149 -1.063 2.548 1.034 -1.258 1.399 0.000 0.935 0.929 0.981 0.759 0.939 0.992 0.836 +1 0.996 0.855 1.263 0.860 1.213 1.590 0.046 0.575 2.173 1.027 0.250 -1.522 0.000 1.409 0.617 -0.551 0.000 1.287 1.615 -1.196 0.000 0.794 0.698 0.992 1.642 0.916 1.162 1.040 +1 0.651 -1.376 -1.337 1.135 0.084 1.097 -0.352 1.366 0.000 1.006 -1.606 0.956 0.000 1.028 -2.558 0.322 0.000 1.640 -0.790 -0.298 3.102 1.088 0.952 1.141 0.694 0.452 0.760 0.690 +1 1.295 0.962 -0.633 1.039 -1.417 0.515 0.094 -0.329 2.173 1.100 0.435 0.823 2.215 0.563 -1.429 1.205 0.000 0.715 -0.289 1.548 0.000 0.502 0.872 1.042 0.796 0.975 0.858 0.883 +1 2.256 -1.085 -0.494 0.836 0.817 0.310 -1.738 -1.237 0.000 0.973 -1.386 1.672 2.215 0.768 -0.341 0.264 0.000 2.076 0.320 1.484 3.102 0.849 0.897 1.760 1.276 1.345 1.247 1.025 +1 0.397 0.573 0.095 1.575 -0.081 0.792 0.096 1.580 2.173 0.635 -1.042 -1.284 0.000 0.392 -0.740 -0.007 0.000 0.689 -0.330 1.181 3.102 0.706 0.875 0.980 0.692 0.338 0.785 0.689 +1 0.857 1.579 0.283 0.963 -1.105 0.723 0.182 -1.533 2.173 0.403 -1.070 1.118 2.215 0.978 0.548 0.492 0.000 0.772 0.905 -0.667 0.000 0.861 0.965 1.194 1.270 0.770 0.976 0.834 +0 1.241 0.276 0.156 0.561 1.578 0.746 -0.745 -1.431 1.087 1.131 -0.028 0.857 2.215 1.143 -1.479 -0.830 0.000 0.431 -0.973 -0.427 0.000 0.328 1.246 1.108 1.041 1.292 0.895 0.879 +0 0.482 -0.944 -1.531 1.272 0.510 1.057 -0.565 0.843 2.173 0.390 -1.867 -1.346 0.000 0.789 -0.171 -0.664 2.548 0.750 -0.203 -1.365 0.000 0.867 1.062 1.046 0.768 1.133 0.760 0.689 +1 1.052 1.240 -0.065 0.590 -1.608 0.741 -0.949 0.587 0.000 0.721 -0.516 -1.676 2.215 0.743 0.758 -0.672 2.548 0.539 -0.618 -1.131 0.000 0.971 1.059 1.074 1.058 0.838 0.754 0.871 +1 0.601 1.661 -1.731 1.456 1.002 0.574 0.428 -1.618 0.000 1.079 0.804 -0.350 2.215 1.099 0.464 0.588 0.000 0.707 0.000 -0.279 0.000 0.934 0.918 0.989 1.738 0.959 1.314 1.109 +1 0.535 -1.733 0.749 0.914 0.930 0.968 0.829 -0.754 0.000 0.919 -0.899 1.250 0.000 1.037 -1.620 -0.714 0.000 0.864 -0.882 -1.486 3.102 0.828 0.719 0.993 0.686 0.434 0.515 0.592 +1 2.028 0.788 0.716 0.671 -1.629 0.749 -1.628 -1.024 0.000 0.884 0.458 0.483 0.000 1.351 -0.342 -0.729 1.274 0.704 -0.761 1.550 3.102 2.764 1.526 1.385 1.344 0.690 1.018 1.205 +0 0.655 1.474 1.227 0.755 0.011 1.058 0.477 -1.191 1.087 1.299 -0.137 0.182 2.215 0.499 0.329 0.699 0.000 1.531 0.624 1.732 0.000 0.795 0.883 0.987 1.458 1.719 1.286 1.038 +1 0.499 -0.978 -0.806 1.469 0.596 1.375 0.162 -1.437 0.000 1.343 0.114 -0.085 2.215 0.761 0.885 1.468 0.000 0.697 -0.160 0.875 3.102 1.194 0.926 1.131 1.044 0.679 1.000 1.027 +1 0.703 -0.292 -1.092 0.565 1.199 1.514 -0.152 -0.834 0.000 0.869 -0.740 0.903 0.000 1.210 0.372 0.993 2.548 1.710 -0.563 0.106 3.102 2.549 1.649 0.980 0.892 1.006 1.191 0.990 +1 0.421 -1.418 -0.657 0.738 -0.046 1.169 0.845 -1.250 1.087 1.037 -0.571 -0.523 0.000 1.819 1.881 0.808 0.000 0.873 0.133 0.754 3.102 0.813 0.733 0.997 1.161 1.106 0.914 0.803 +0 0.324 1.762 -1.683 3.700 1.443 1.385 -1.039 -0.163 2.173 0.604 -2.086 -0.456 0.000 0.393 0.910 -0.690 2.548 0.550 -1.662 0.734 0.000 0.662 0.888 0.995 1.124 1.191 4.075 4.044 +1 0.466 -0.675 1.055 0.589 0.649 1.094 -1.485 -1.269 0.000 1.218 -1.302 0.408 2.215 0.585 -1.523 1.478 0.000 0.554 0.032 -0.103 3.102 0.897 0.931 0.979 0.727 0.643 0.892 0.799 +1 1.134 -0.092 1.631 1.231 1.645 0.379 1.442 1.414 0.000 0.807 0.291 -0.507 2.215 1.290 0.711 0.516 2.548 0.923 0.757 -1.226 0.000 0.671 0.834 0.988 1.021 0.903 0.868 0.741 +1 1.243 0.198 1.111 0.266 -1.708 0.921 0.421 -1.010 2.173 0.751 0.037 -0.536 0.000 1.023 -0.241 1.326 0.000 1.324 -0.830 0.711 3.102 0.910 0.967 0.986 1.066 1.476 0.886 0.785 +0 0.728 0.876 1.365 0.600 -0.332 1.248 2.023 0.872 0.000 1.552 0.331 0.228 0.000 1.380 -0.547 -0.303 2.548 1.554 -0.454 -0.692 0.000 0.859 0.765 0.989 0.790 1.823 1.081 0.896 +1 1.128 -0.987 0.759 0.992 1.737 2.239 -0.375 -1.174 0.000 2.114 -0.444 0.340 1.107 1.549 0.324 0.144 0.000 1.091 0.301 1.299 3.102 1.149 0.899 1.131 1.207 1.191 0.911 0.874 +1 1.399 -1.384 -0.039 0.487 1.387 0.716 -1.093 0.494 0.000 1.575 -1.689 -1.513 0.000 0.790 -0.288 -1.513 2.548 1.040 0.287 0.236 0.000 1.083 1.013 1.098 0.590 0.485 0.631 0.653 +1 1.744 0.643 -1.368 0.874 1.555 1.065 -0.202 0.572 1.087 0.767 0.391 -0.994 2.215 0.555 1.587 -0.234 0.000 0.389 -1.086 0.285 0.000 1.144 0.862 0.986 1.381 1.376 1.001 0.894 +1 0.663 -0.902 0.581 0.602 -1.172 0.487 0.544 1.260 0.000 1.230 1.026 -1.195 2.215 1.008 0.839 0.570 0.000 0.396 1.375 -0.492 3.102 0.771 1.138 0.992 0.962 0.419 1.088 1.004 +0 0.878 -0.807 -0.795 1.466 -0.235 0.734 -1.203 1.449 2.173 0.609 -1.638 0.476 0.000 0.902 -0.079 1.294 2.548 0.876 -1.588 -1.315 0.000 0.957 0.982 0.987 1.224 0.619 0.895 0.858 +1 1.250 0.081 -0.122 1.090 -1.068 0.720 0.570 1.039 2.173 0.499 0.391 0.316 0.000 1.019 -0.487 1.674 2.548 0.481 1.577 -1.144 0.000 0.808 0.925 1.216 1.102 0.850 0.854 0.754 +1 0.889 0.439 0.395 1.363 -0.628 0.621 -1.143 0.793 2.173 0.796 -0.394 1.738 0.000 0.929 1.072 -0.647 0.000 0.831 0.230 1.122 0.000 1.069 0.854 1.214 1.242 0.719 0.853 0.828 +0 0.291 2.093 -0.176 1.875 1.128 0.868 -0.420 -0.879 1.087 0.959 -1.114 -1.022 0.000 1.406 0.287 0.530 2.548 0.727 0.767 -1.097 0.000 1.338 0.872 0.987 1.631 1.414 1.896 1.897 +0 0.911 -0.754 -0.992 0.476 0.530 0.781 -1.140 -0.581 0.000 0.933 1.246 1.098 0.000 0.619 0.889 1.727 2.548 0.819 0.157 1.081 3.102 0.889 0.646 0.984 0.899 0.372 0.636 0.780 +0 0.513 1.389 -1.573 1.377 -0.754 0.809 0.763 0.412 0.000 0.681 0.775 1.062 1.107 1.194 1.024 -1.137 2.548 1.043 2.431 0.404 0.000 0.769 0.990 0.982 0.853 0.892 0.673 0.692 +1 1.456 -1.233 0.352 1.908 -0.563 0.797 -0.550 0.645 0.000 1.924 -0.725 -1.349 2.215 0.821 -1.065 1.264 0.000 1.252 -0.837 -0.410 3.102 0.893 0.941 1.694 1.653 1.065 1.072 1.061 +1 0.859 -0.226 1.471 2.092 0.452 0.975 0.551 -0.916 2.173 0.646 0.195 1.593 0.000 0.641 0.349 0.436 2.548 0.493 -0.735 -0.856 0.000 0.725 0.845 1.475 0.749 0.929 0.975 0.813 +1 1.090 1.435 -1.715 1.789 1.430 1.230 2.009 -0.178 0.000 0.420 1.798 -0.999 0.000 0.538 1.217 -1.282 2.548 0.768 -0.670 0.356 3.102 1.039 0.834 1.006 1.094 0.800 1.017 1.085 +0 2.377 -0.255 -0.099 0.231 0.111 0.839 -2.473 -1.522 0.000 0.539 -1.224 0.859 2.215 0.583 -1.536 -0.902 0.000 1.523 -0.519 1.567 3.102 0.848 0.953 0.992 1.084 0.554 0.796 1.059 +1 0.358 -1.786 -0.012 1.796 1.139 0.732 0.191 -0.584 2.173 0.746 -0.212 1.715 2.215 0.483 -0.517 -1.010 0.000 0.442 -0.019 0.008 0.000 0.428 0.515 0.987 1.416 0.980 1.013 0.768 +1 0.688 -0.188 0.301 0.462 -0.683 0.886 -0.743 -0.924 0.000 0.408 -2.185 0.626 0.000 1.336 -0.958 0.897 2.548 1.015 0.169 1.699 3.102 1.639 1.236 0.986 0.750 0.838 0.917 0.785 +1 1.242 0.552 -1.537 0.830 -1.463 0.609 -0.039 0.033 2.173 0.933 -0.404 1.183 2.215 0.493 0.840 -0.752 0.000 0.507 1.557 0.412 0.000 0.926 0.889 0.999 1.023 0.977 0.819 0.753 +0 0.337 -0.619 -1.143 1.744 1.464 0.544 0.352 -0.219 0.000 0.869 0.814 0.827 2.215 0.570 1.887 -0.675 0.000 1.234 -0.436 -0.405 3.102 0.860 0.830 0.988 0.946 1.083 0.788 0.712 +0 0.699 -0.398 1.454 0.200 -1.529 0.949 0.788 0.915 2.173 1.224 -0.560 -0.439 1.107 0.840 -0.105 0.307 0.000 2.005 -0.003 -1.420 0.000 1.433 1.192 0.991 0.779 1.898 1.130 0.937 +1 0.581 -0.191 -1.465 0.269 1.169 0.890 -0.949 0.811 2.173 1.329 -0.473 -0.561 0.000 1.084 -1.007 -1.077 1.274 1.204 -0.025 0.368 0.000 0.797 1.236 0.982 0.636 1.217 0.848 0.742 +1 0.839 -0.255 1.497 0.199 1.436 0.656 0.208 1.725 2.173 0.641 -2.198 -0.113 0.000 0.516 0.045 -0.321 2.548 0.749 -1.311 0.318 0.000 0.466 0.750 0.982 0.808 0.700 0.911 0.794 +0 0.977 0.914 0.763 2.001 1.353 1.272 0.161 -0.900 2.173 1.525 0.182 -0.426 0.000 1.577 -0.112 0.774 2.548 0.458 0.872 0.195 0.000 0.752 0.887 0.988 1.011 1.779 1.294 1.161 +1 1.248 -0.435 1.554 1.281 0.872 0.485 -0.446 -1.234 0.000 0.731 -0.753 -0.713 2.215 1.425 -0.363 0.134 2.548 0.390 0.254 0.129 0.000 0.676 0.777 1.009 0.988 0.777 0.822 0.700 +1 1.029 -1.121 1.143 0.618 -0.826 0.717 -1.336 0.040 0.000 0.527 1.081 -1.696 2.215 0.941 -0.589 -1.010 2.548 0.892 -0.633 0.828 0.000 0.875 0.913 1.082 1.067 0.863 0.916 0.817 +1 0.895 0.360 0.185 1.693 -0.449 2.636 -0.732 1.317 0.000 1.383 0.347 -0.522 1.107 0.570 -1.009 -0.012 0.000 0.483 -1.614 -0.664 0.000 0.637 1.043 0.989 0.749 0.806 0.725 0.690 +0 1.240 0.246 -0.444 0.528 -0.670 0.964 0.909 0.843 1.087 0.497 2.192 0.467 0.000 1.350 1.225 -1.182 0.000 0.906 2.135 -1.213 0.000 0.883 0.869 0.987 1.353 0.588 0.920 1.072 +1 0.790 -1.255 -0.364 1.299 0.861 0.819 0.212 0.229 2.173 1.461 -0.883 -1.232 2.215 1.092 -0.177 -1.721 0.000 0.719 -0.462 0.198 0.000 0.981 0.987 1.254 1.192 1.826 1.147 0.953 +1 0.514 -0.878 -1.250 0.660 0.837 1.102 0.811 -0.895 2.173 0.800 0.710 1.141 0.000 0.597 -0.682 0.972 1.274 1.228 0.655 -0.050 0.000 1.136 0.881 0.983 1.017 1.333 0.910 0.792 +1 0.605 0.348 -0.325 1.493 -1.034 0.530 -0.563 0.267 2.173 1.157 0.207 1.023 2.215 0.695 0.288 -0.922 0.000 0.668 0.713 0.604 0.000 0.765 0.808 0.992 1.135 0.860 0.989 0.786 +0 0.338 0.647 0.201 1.775 1.315 1.107 -2.031 -1.308 0.000 0.823 -1.676 0.888 0.000 0.767 -1.404 0.007 0.000 1.569 -0.283 0.005 1.551 0.876 0.940 0.986 0.965 0.793 0.789 0.858 +0 0.629 0.294 1.035 1.534 0.191 0.883 -0.056 -1.298 0.000 0.713 -0.706 1.281 0.000 0.825 -1.063 -1.097 1.274 1.018 -0.400 0.083 3.102 1.357 0.947 0.987 0.543 0.660 0.720 0.782 +0 0.503 1.243 -0.503 1.248 0.411 0.515 0.988 1.673 0.000 0.594 -0.267 -1.114 2.215 0.320 0.022 0.782 0.000 0.659 -0.736 -1.474 3.102 0.884 0.922 0.979 0.793 0.251 0.656 0.679 +0 1.319 0.844 0.704 0.106 -0.559 0.870 -0.618 -0.898 0.000 0.740 -1.422 0.129 0.000 0.834 0.278 -1.569 2.548 0.651 -1.532 1.508 0.000 0.873 1.076 0.981 0.592 0.292 0.631 0.737 +0 0.761 -1.227 -0.603 3.297 -1.001 1.794 0.074 0.868 1.087 0.991 0.832 0.414 0.000 1.305 0.112 -0.716 2.548 0.523 2.436 0.415 0.000 0.926 1.128 0.988 2.435 1.888 1.640 1.612 +1 1.480 -0.774 0.402 0.765 0.841 0.276 -1.428 0.686 0.000 0.475 0.155 -0.617 2.215 1.989 -0.920 -1.268 2.548 0.677 0.401 1.439 0.000 0.845 1.010 0.986 0.785 0.858 0.937 0.786 +1 0.632 -0.704 -0.259 0.385 0.640 1.176 -1.250 -1.247 0.000 0.598 -1.697 -0.754 0.000 1.445 -1.318 0.803 2.548 0.624 -0.183 -0.347 3.102 0.883 0.805 0.979 0.717 0.784 0.871 0.766 +0 1.710 0.553 1.007 0.943 0.235 1.022 -0.675 -0.241 2.173 1.260 0.662 1.586 2.215 1.554 -0.704 -0.772 0.000 0.432 -1.096 -1.080 0.000 0.549 0.722 1.128 0.989 2.069 1.237 1.110 +0 0.937 -1.652 -0.302 1.216 0.524 0.600 -0.587 -1.675 2.173 0.506 0.758 1.730 2.215 0.799 -0.201 0.136 0.000 0.653 -1.808 1.351 0.000 1.150 0.947 1.002 1.378 0.604 1.015 0.876 +1 0.453 -1.869 1.212 1.513 0.292 0.750 -0.138 -1.448 2.173 0.669 0.474 1.043 2.215 0.643 0.593 -0.310 0.000 0.530 -1.153 -1.242 0.000 0.911 0.831 0.989 1.188 0.879 0.937 0.815 +1 0.892 -0.083 -0.819 0.682 1.029 0.705 -0.895 -0.672 2.173 0.786 0.109 1.161 0.000 1.009 -0.664 0.952 1.274 0.396 0.181 -1.524 0.000 1.034 1.102 1.076 0.743 1.048 0.743 0.689 +1 1.275 1.144 1.160 0.760 0.309 0.643 1.062 -0.762 2.173 0.247 0.942 0.023 0.000 0.452 1.742 0.727 0.000 0.524 -0.579 0.317 0.000 0.938 0.883 0.990 1.010 0.671 0.812 0.716 +1 0.552 -0.755 1.278 0.907 -0.688 0.574 -0.558 0.619 2.173 0.943 -0.374 -0.174 2.215 1.480 -0.800 -1.456 0.000 0.669 1.233 0.641 0.000 0.843 1.000 0.989 0.695 0.717 0.819 0.703 +1 0.427 -0.316 -1.412 2.032 1.559 1.207 1.522 0.239 2.173 0.545 1.243 -1.224 0.000 0.872 0.441 -0.619 2.548 0.416 -1.313 -0.188 0.000 1.199 0.811 0.988 1.572 1.137 1.107 0.982 +1 0.419 -0.195 0.841 1.880 0.141 0.836 0.628 1.421 2.173 0.634 2.623 -1.078 0.000 1.471 1.088 -1.273 2.548 0.901 0.792 0.048 0.000 1.285 1.050 0.991 1.075 0.985 0.966 0.998 +1 0.587 -0.784 1.290 0.148 -1.475 1.800 1.340 0.802 2.173 1.091 1.186 -1.070 2.215 2.017 1.338 -0.701 0.000 0.586 1.992 -0.384 0.000 0.664 0.665 0.979 1.263 2.052 1.291 1.141 +0 1.628 -2.105 0.660 0.399 1.496 0.659 -0.474 0.109 1.087 0.810 -0.595 -1.578 0.000 0.623 -1.486 -1.352 0.000 0.874 -0.073 -0.880 3.102 0.620 1.039 0.985 1.043 0.644 0.859 0.823 +0 1.072 -0.550 -1.394 0.138 -0.867 1.333 -1.024 1.610 2.173 1.381 0.818 -0.125 2.215 1.899 -0.917 0.360 0.000 0.445 -0.721 1.301 0.000 0.838 1.467 0.995 1.097 2.940 1.579 1.266 +1 1.637 0.528 1.738 0.246 0.256 1.435 0.134 -0.198 0.000 1.060 1.143 1.653 0.000 0.864 0.415 1.085 0.000 1.211 -0.597 -0.749 1.551 0.899 1.165 0.984 0.931 0.840 0.942 0.824 +0 2.520 0.563 0.730 0.628 -0.951 0.640 -0.497 1.302 0.000 0.999 -0.872 -0.734 2.215 1.196 0.446 -0.982 0.000 0.539 0.482 -0.556 3.102 1.610 0.946 1.740 1.602 0.545 1.000 0.973 +1 1.133 1.384 -1.226 1.025 -1.735 1.574 1.084 0.280 0.000 0.989 1.995 1.360 0.000 1.749 0.293 -1.220 2.548 0.905 -1.837 0.466 0.000 0.831 0.910 0.975 1.183 0.847 0.901 0.841 +0 1.372 -0.415 -0.373 1.040 -0.660 1.029 -1.197 1.186 2.173 0.402 -2.691 1.410 0.000 0.650 -1.294 0.540 0.000 0.536 0.327 -0.951 3.102 0.794 0.943 0.982 1.625 1.015 1.032 1.041 +0 0.418 1.038 1.293 3.230 -1.493 0.734 -0.423 0.147 2.173 0.663 0.959 -0.282 0.000 1.136 1.356 0.119 2.548 0.476 1.536 0.599 0.000 0.614 0.938 0.989 1.247 1.265 1.398 1.081 +1 0.516 -0.325 -1.148 1.427 0.533 2.421 1.287 -1.574 0.000 1.284 -0.794 0.019 2.215 2.020 -1.579 -0.187 0.000 1.739 -0.372 0.667 1.551 9.420 5.128 1.187 0.847 0.784 3.190 2.329 +1 0.726 0.348 0.200 1.322 0.989 1.293 -2.300 0.420 0.000 1.657 -0.726 0.378 2.215 5.502 -1.315 -1.158 2.548 1.610 0.121 1.334 0.000 0.760 1.174 0.991 3.316 3.354 2.412 1.831 +1 1.207 -0.169 0.687 2.252 1.136 1.195 0.715 -0.373 2.173 0.753 -0.144 -1.152 0.000 0.618 0.945 -1.268 2.548 0.489 -1.026 -1.624 0.000 0.556 1.166 0.997 0.964 0.791 1.136 0.989 +0 0.519 -1.206 1.470 1.417 -1.608 0.962 0.348 -0.085 0.000 0.944 0.401 0.487 2.215 1.218 0.919 1.560 2.548 1.216 0.932 -0.515 0.000 0.901 0.882 0.990 0.854 0.999 0.872 0.951 +0 1.024 -0.217 1.081 0.407 -0.941 1.281 -0.667 -1.727 2.173 0.884 -0.668 -0.488 2.215 0.576 1.281 -0.220 0.000 0.380 0.365 0.468 0.000 0.397 0.791 0.989 0.925 1.408 0.978 0.827 +1 0.288 -1.722 -1.172 1.034 0.304 0.592 1.169 1.720 1.087 0.655 0.689 0.407 2.215 0.655 1.235 -1.110 0.000 0.473 -0.772 -1.264 0.000 0.868 0.816 0.984 1.120 0.876 0.811 0.732 +1 1.556 -0.361 -0.947 0.857 1.688 0.934 0.078 0.418 1.087 0.514 0.107 1.197 0.000 0.419 -1.034 -0.558 1.274 0.469 -0.854 0.058 0.000 0.668 0.654 1.110 0.627 0.783 0.816 0.675 +0 0.379 0.950 -0.639 1.301 1.224 0.623 0.739 0.637 0.000 0.674 -0.074 -0.731 2.215 0.625 -2.111 -0.123 0.000 1.259 0.570 -1.408 3.102 0.865 0.927 0.989 0.688 0.575 0.671 0.653 +0 1.502 -1.716 0.053 0.998 -0.242 0.961 -1.457 -1.503 0.000 0.432 -0.393 -1.553 0.000 0.822 -1.166 1.162 2.548 0.743 -0.612 0.119 3.102 0.766 0.849 0.979 0.959 0.510 0.684 0.865 +1 1.895 -0.610 0.247 0.915 -0.219 1.240 -1.444 -1.644 2.173 0.503 -0.535 0.933 0.000 0.492 -0.796 -0.279 2.548 0.494 -1.647 -1.083 0.000 0.807 0.874 1.003 0.503 0.962 1.015 0.816 +0 2.285 0.628 -0.293 0.146 -1.137 0.682 0.480 1.685 0.000 0.583 0.136 0.635 0.000 0.841 -1.052 1.312 2.548 0.603 -0.308 -1.132 0.000 0.798 0.703 0.987 0.688 0.156 0.777 0.658 +1 1.184 0.663 0.361 1.183 -0.072 0.870 0.748 -1.226 2.173 0.703 -0.052 -0.820 0.000 0.913 0.522 1.246 2.548 0.992 1.913 1.055 0.000 1.926 1.227 0.998 1.203 0.884 0.898 0.924 +0 1.227 0.391 0.979 0.600 1.185 0.953 0.406 -1.619 2.173 1.089 -0.115 -0.110 1.107 0.813 1.145 0.062 0.000 0.805 -2.292 -0.715 0.000 0.845 1.246 1.001 0.965 1.520 1.235 1.310 +0 0.867 -1.036 0.005 0.575 -1.652 0.886 -0.850 -0.707 0.000 0.555 -0.611 -1.423 0.000 0.716 -0.277 0.969 2.548 1.418 -0.610 0.542 3.102 0.911 0.940 0.987 0.643 0.333 0.720 0.635 +1 0.596 0.239 1.590 2.467 0.959 0.912 -0.121 -0.468 0.000 1.432 0.733 -0.535 2.215 0.994 -1.281 1.264 0.000 0.772 1.204 1.424 0.000 0.734 0.830 0.983 0.875 1.028 1.040 0.820 +0 1.172 -0.194 1.462 0.682 -1.700 0.862 -1.113 -0.218 0.000 0.733 -0.965 0.687 2.215 1.442 -0.558 -1.538 2.548 1.584 0.645 -0.318 0.000 0.718 0.949 0.970 0.612 1.013 0.839 0.800 +1 2.157 -0.070 1.114 1.536 0.728 1.108 0.319 -0.610 2.173 0.541 -0.212 -1.125 0.000 0.637 -0.891 0.369 1.274 0.409 -0.925 -1.334 0.000 0.305 0.667 0.984 0.707 1.089 1.120 0.898 +0 0.754 -1.565 0.905 0.721 -0.372 1.041 -0.024 -0.154 2.173 0.517 -1.020 0.453 0.000 2.381 -0.399 -1.629 2.548 1.421 -1.774 1.503 0.000 0.919 1.229 0.991 1.348 1.948 1.312 1.125 +1 1.020 0.691 -0.298 0.197 0.663 1.419 0.295 -1.538 2.173 0.830 1.458 0.411 0.000 0.696 0.321 0.706 1.274 0.545 -0.440 -0.262 0.000 1.163 0.742 0.982 1.143 1.115 0.977 0.854 +0 0.708 0.328 -0.295 0.962 1.122 1.455 -0.163 -1.287 0.000 1.814 0.104 0.730 2.215 0.786 0.211 1.641 0.000 2.118 -0.832 -0.431 1.551 1.001 1.427 1.095 0.838 1.845 1.401 1.135 +0 0.923 0.125 -0.412 0.665 -1.093 0.740 1.071 -1.644 0.000 0.605 -0.104 0.910 1.107 0.865 0.469 0.075 2.548 0.694 1.310 0.711 0.000 0.970 0.929 0.991 0.851 0.579 0.685 0.685 +0 1.023 1.749 1.349 0.435 -1.271 0.828 0.965 0.292 2.173 0.848 -1.200 -1.581 1.107 0.717 0.078 -0.127 0.000 0.433 0.539 -1.556 0.000 0.616 0.869 0.976 0.933 2.060 1.266 0.958 +0 0.754 -0.272 0.435 0.955 1.450 0.702 -0.844 1.304 0.000 1.082 -0.608 -0.158 2.215 1.542 -0.370 -1.140 2.548 0.389 -1.329 -0.430 0.000 0.854 1.001 0.991 0.914 1.073 0.808 0.740 +0 1.133 -0.061 1.707 0.870 -0.792 1.336 0.803 -1.037 2.173 2.750 0.904 0.427 0.000 0.502 0.881 1.630 1.274 0.792 -0.645 1.273 0.000 2.274 1.400 1.070 0.973 0.690 1.395 1.204 +0 0.877 2.411 -0.894 0.696 -0.382 0.737 0.941 0.772 2.173 0.454 -0.624 -0.284 1.107 0.332 0.965 1.525 0.000 0.514 -0.086 1.732 0.000 0.293 0.530 0.997 1.134 1.022 0.948 0.742 +1 1.575 1.095 1.361 0.706 -0.377 0.536 0.462 -0.251 1.087 0.872 0.822 0.882 0.000 0.563 -1.025 -1.006 1.274 0.928 1.119 -0.931 0.000 1.206 0.937 1.461 1.193 0.742 0.874 0.814 +0 0.840 -0.246 0.692 0.613 -1.424 0.861 2.932 -1.688 0.000 0.935 2.431 -0.787 0.000 1.008 0.601 0.738 2.548 1.768 0.186 0.367 1.551 0.846 1.199 0.987 0.701 0.404 1.028 0.960 +0 0.353 -2.228 -0.435 1.961 -1.358 0.649 -0.505 0.852 0.000 0.899 0.346 0.309 1.107 0.575 -0.998 -1.591 0.000 0.997 -0.388 -0.252 3.102 0.945 0.971 0.986 0.858 0.549 1.025 0.899 +1 1.494 -0.034 -0.268 0.933 -0.910 1.080 -0.054 -1.469 1.087 1.102 1.338 0.683 2.215 0.428 2.136 1.068 0.000 0.780 1.228 -0.781 0.000 0.871 1.047 0.990 1.468 1.950 1.264 1.017 +1 0.602 2.366 0.556 0.896 -0.459 0.573 0.450 0.684 1.087 0.884 0.988 -1.200 0.000 1.278 0.443 1.408 0.000 1.413 1.252 -0.390 3.102 1.239 1.073 0.990 0.577 0.943 0.848 0.813 +1 1.368 1.271 -1.230 1.484 1.559 0.891 0.622 0.582 2.173 0.804 0.968 -0.943 0.000 1.171 0.872 0.025 0.000 0.646 0.210 -0.216 3.102 1.140 1.134 1.159 0.861 0.551 0.890 0.855 +1 0.639 0.517 1.404 0.908 0.294 2.246 2.203 -0.966 0.000 2.006 0.710 0.501 0.000 1.355 0.314 0.804 0.000 2.227 0.001 -1.711 3.102 0.840 0.642 0.984 0.847 0.858 0.937 0.788 +1 0.498 -0.293 1.398 1.341 -0.240 0.614 -1.077 0.734 2.173 0.657 0.592 0.813 2.215 0.604 -0.792 -0.735 0.000 0.414 1.812 -0.186 0.000 1.232 0.934 1.127 0.853 0.894 0.840 0.759 +0 0.993 1.122 1.419 1.040 0.934 0.697 0.393 -0.238 0.000 1.041 0.975 -0.779 1.107 1.335 0.008 1.438 0.000 0.780 1.732 -0.631 0.000 0.873 0.642 0.977 1.098 0.696 0.715 0.659 +0 1.246 0.479 0.982 0.769 -1.277 0.789 0.542 1.656 0.000 0.892 0.880 -0.492 2.215 0.712 -0.605 0.024 2.548 0.376 -0.284 -0.564 0.000 0.979 0.993 1.213 0.954 0.828 0.759 0.731 +1 0.950 -1.671 1.292 1.318 -0.641 1.461 -0.358 0.027 0.000 1.365 -1.512 -1.608 0.000 0.907 -2.258 -0.816 0.000 1.423 1.096 1.171 3.102 0.755 0.647 1.528 2.132 0.819 1.447 1.301 +0 2.325 -1.227 -0.936 0.493 1.374 1.102 -0.200 0.302 0.000 0.705 0.908 0.992 2.215 0.487 0.583 -0.256 0.000 1.079 -0.772 1.579 3.102 0.855 0.958 1.294 0.815 0.932 1.070 1.068 +0 0.704 -0.182 -1.558 2.905 -1.583 1.816 -0.965 0.199 2.173 0.953 -0.766 0.880 2.215 1.779 0.446 -1.028 0.000 0.708 -1.124 0.199 0.000 1.712 1.478 0.992 2.155 1.129 1.458 1.426 +1 2.573 -1.145 0.361 0.243 -0.714 0.894 -1.310 -1.093 0.000 0.585 -0.500 -1.421 1.107 0.721 -0.246 1.553 2.548 0.501 -1.783 -1.490 0.000 0.559 0.773 0.988 0.998 0.319 0.765 0.784 +1 0.338 -2.226 0.079 0.246 -0.567 0.789 -0.939 1.291 0.000 0.908 0.085 0.369 2.215 1.028 -0.358 -0.669 2.548 1.407 -0.946 -1.598 0.000 0.837 1.053 0.992 0.755 0.864 0.901 0.768 +1 0.469 0.928 1.710 0.463 -1.315 0.923 -0.718 0.246 0.000 1.047 -0.262 -0.965 2.215 1.676 1.617 -1.651 0.000 1.738 0.047 0.234 0.000 0.775 0.940 0.983 0.685 0.897 0.886 0.799 +1 3.112 -0.993 -0.505 1.488 0.976 1.218 -0.103 -1.348 2.173 1.569 -1.574 1.123 0.000 1.071 -0.204 -0.185 2.548 0.588 -1.095 0.635 0.000 0.567 1.250 2.899 2.070 1.236 1.386 1.359 +1 0.935 -0.311 -0.107 0.151 1.166 0.668 -0.345 -0.978 0.000 0.610 0.893 -1.120 0.000 1.156 0.676 -0.097 1.274 2.065 1.031 0.874 3.102 0.933 0.979 0.988 0.897 0.953 0.964 0.814 +1 1.105 -0.760 -1.537 0.295 -1.075 0.580 -0.919 1.019 0.000 0.443 -1.656 -0.193 0.000 0.978 -1.047 0.273 2.548 0.970 0.296 -0.881 3.102 0.955 0.932 0.996 0.843 0.889 0.734 0.680 +1 1.023 -0.940 0.622 0.471 1.292 1.300 -1.107 -1.070 0.000 1.186 0.057 0.600 1.107 0.625 0.078 -1.119 0.000 0.680 -0.806 -0.596 3.102 1.038 0.638 0.995 0.666 0.836 0.994 0.896 +1 0.716 -1.460 0.782 1.109 1.113 3.671 0.578 -1.698 0.000 2.633 -0.268 -0.142 2.215 3.606 1.040 0.156 0.000 0.606 0.581 0.469 1.551 1.450 1.329 1.003 1.477 0.830 2.172 1.816 +1 0.441 -1.170 0.457 2.301 1.309 1.019 -0.471 -0.205 2.173 0.459 -0.693 1.188 2.215 1.077 -1.339 -1.226 0.000 0.944 -0.569 0.381 0.000 1.192 1.114 0.985 0.471 0.964 0.886 0.825 +0 2.576 0.292 0.224 0.770 1.184 0.632 0.106 -1.176 2.173 1.292 -0.413 -1.737 2.215 0.526 -1.151 -0.523 0.000 0.979 0.997 -0.668 0.000 0.592 0.894 1.486 1.248 0.736 1.107 0.994 +0 1.594 0.209 -1.307 0.786 1.640 0.875 -0.356 0.631 0.000 1.094 0.130 -0.828 2.215 1.130 0.092 -0.127 0.000 1.161 -0.792 1.225 3.102 1.186 0.831 0.984 0.816 1.137 0.922 0.995 +1 0.385 0.138 -0.814 0.454 1.005 0.870 1.156 -1.282 2.173 0.509 1.020 0.558 2.215 0.491 -0.895 -0.576 0.000 0.518 -0.211 0.497 0.000 0.503 1.061 0.989 0.588 0.977 0.748 0.692 +0 1.470 -0.993 -0.651 0.936 1.639 0.930 -0.769 1.612 1.087 0.489 -1.514 -0.100 0.000 1.132 -0.835 0.706 2.548 1.140 -1.302 0.408 0.000 0.942 1.102 1.431 1.028 0.938 0.939 0.941 +1 0.640 1.352 1.370 2.096 0.592 0.917 1.661 -0.389 0.000 1.330 0.952 -1.393 2.215 0.667 1.158 -1.110 2.548 0.752 1.091 0.848 0.000 0.729 0.902 1.035 0.872 0.292 0.881 0.742 +1 0.305 -0.657 -1.369 0.448 0.308 0.949 -0.525 0.593 0.000 2.055 0.334 -0.989 2.215 1.284 0.892 0.638 0.000 1.266 -0.949 0.028 0.000 0.968 0.997 0.997 0.997 0.363 1.182 1.000 +0 0.785 1.286 1.552 1.035 1.643 0.690 1.215 0.065 0.000 0.650 0.534 -1.302 2.215 0.764 0.492 -0.759 0.000 0.869 0.186 0.719 0.000 0.903 0.949 0.977 0.929 0.571 0.770 0.817 +1 0.708 -1.985 -0.401 0.539 1.028 0.905 -0.720 0.048 2.173 1.192 -1.791 -1.481 0.000 0.585 0.206 -1.149 0.000 1.120 -0.532 -1.345 1.551 1.059 0.906 0.983 0.700 1.014 0.682 0.642 +0 0.681 -0.051 0.756 1.667 -0.187 0.564 0.538 1.109 2.173 0.650 1.936 -1.583 0.000 1.393 0.354 -0.661 2.548 0.763 -0.560 1.157 0.000 0.910 0.894 1.108 0.920 1.107 0.788 0.725 +0 0.661 -1.408 -0.363 0.853 -1.078 0.498 0.148 0.208 0.000 0.784 0.411 -0.819 2.215 1.295 -0.120 0.903 1.274 0.644 -1.618 1.477 0.000 1.335 0.960 0.983 1.541 1.112 1.276 1.095 +0 1.428 1.355 1.469 0.740 -0.524 0.360 0.929 0.247 0.000 0.495 0.599 1.685 0.000 0.889 0.261 0.679 2.548 1.863 0.324 -0.677 3.102 0.875 0.830 1.389 0.954 0.926 0.840 0.710 +0 0.703 1.104 0.869 0.851 -1.620 0.868 -0.077 -0.298 1.087 1.073 1.090 1.421 2.215 0.936 1.173 -1.077 0.000 0.754 -0.384 0.864 0.000 1.066 0.920 0.986 1.024 1.683 0.932 0.800 +1 0.691 -0.124 0.394 1.188 0.772 0.545 -1.023 -0.539 0.000 1.120 -1.478 -1.380 0.000 1.208 -0.368 -1.507 2.548 0.649 -0.020 0.010 1.551 1.211 0.985 0.989 0.585 0.675 0.733 1.046 +1 1.696 -0.505 0.798 0.339 -1.273 0.839 0.266 -0.975 2.173 0.590 -1.059 0.302 0.000 1.204 -1.156 -1.420 0.000 1.048 0.935 0.013 3.102 0.788 0.970 1.005 0.922 0.886 0.873 0.757 +1 0.499 -0.672 1.669 0.355 -0.133 1.201 0.166 0.594 2.173 1.938 -1.477 -1.473 0.000 1.369 0.218 -0.873 0.000 1.106 -0.356 -1.216 0.000 0.899 0.776 0.987 0.624 0.544 0.610 0.611 +1 1.029 -1.002 1.587 0.778 -1.162 0.531 -0.225 1.226 0.000 0.446 0.453 -1.391 0.000 1.184 0.124 -0.388 2.548 2.598 0.914 0.305 3.102 0.816 0.918 0.988 2.181 1.031 1.468 1.170 +1 2.857 -0.526 -0.384 0.111 -1.727 1.248 -0.406 1.522 2.173 0.793 0.392 0.319 2.215 0.963 0.187 -1.682 0.000 0.630 -0.585 0.378 0.000 0.918 0.825 0.987 0.907 1.432 1.152 0.937 +1 0.561 0.546 -0.005 0.544 -1.662 1.513 1.131 1.485 0.000 0.778 -0.757 0.529 2.215 1.928 -0.564 -0.511 2.548 0.960 0.472 -0.550 0.000 1.857 1.868 0.986 1.337 1.054 1.570 1.273 +0 0.456 1.699 1.131 0.593 -0.739 1.314 -0.026 -0.303 2.173 1.334 0.550 1.260 1.107 0.576 1.246 1.533 0.000 0.903 0.787 0.195 0.000 0.762 1.158 0.992 0.820 2.010 1.040 0.856 +0 0.493 1.481 0.711 3.382 0.233 1.099 0.050 0.032 2.173 2.022 -0.511 -1.601 2.215 1.078 0.870 -1.116 0.000 1.728 0.503 1.661 0.000 0.937 1.337 0.995 0.974 2.275 1.753 1.486 +1 0.696 1.069 1.236 0.820 -1.160 0.838 0.103 0.113 0.000 0.996 1.021 1.616 2.215 0.662 0.747 -0.845 2.548 0.629 2.011 0.688 0.000 1.008 0.886 0.985 0.625 0.696 0.607 0.579 +1 1.520 0.812 -1.610 1.616 -1.305 1.475 -0.149 0.501 0.000 0.928 0.079 0.071 2.215 1.127 0.827 -0.805 2.548 0.845 -0.102 -1.740 0.000 1.535 1.054 0.990 0.778 0.900 0.972 1.224 +0 1.147 0.341 -1.310 0.590 -0.148 0.650 -1.048 -1.580 2.173 0.469 -0.854 -0.026 0.000 0.923 -0.250 1.034 0.000 1.400 1.195 0.029 3.102 0.881 0.900 0.988 0.838 1.897 1.031 0.882 +1 1.053 0.253 0.526 1.596 -0.270 0.681 0.964 -1.266 2.173 0.848 -0.194 1.574 2.215 0.378 -1.302 -0.595 0.000 0.730 0.110 -1.711 0.000 0.700 0.854 1.180 1.103 0.928 0.941 0.791 +1 0.775 -0.102 0.184 1.164 1.569 1.916 0.282 -0.182 0.000 2.229 -1.929 1.587 0.000 0.489 0.991 -0.374 0.000 0.996 1.090 1.453 3.102 0.778 0.679 1.248 0.838 0.758 0.800 0.839 +0 0.705 0.104 -0.502 0.984 1.131 0.956 0.705 -0.107 2.173 1.185 -0.558 1.328 2.215 0.515 0.190 -1.385 0.000 0.925 -0.978 -1.505 0.000 0.705 0.943 1.148 0.859 1.853 0.978 0.810 +1 0.501 -1.232 -1.501 0.334 -1.063 0.879 1.023 0.309 0.000 0.722 0.082 1.652 2.215 0.537 -0.606 -0.749 0.000 0.698 0.676 1.171 1.551 0.378 0.788 0.996 0.622 0.359 0.593 0.556 +0 0.424 -1.379 -0.987 1.048 -0.640 0.723 0.165 1.731 0.000 1.075 -0.432 0.476 2.215 0.495 -0.569 -0.944 0.000 1.036 -0.967 0.667 3.102 0.840 0.904 1.000 0.957 0.385 0.743 0.740 +1 0.396 -1.560 0.925 1.587 -0.865 0.541 -0.119 1.456 0.000 0.562 1.441 -1.388 0.000 0.431 1.853 -0.087 0.000 1.343 0.044 0.445 0.000 0.868 0.656 1.098 0.696 0.219 0.661 0.661 +0 1.527 -0.559 -1.131 0.688 -0.239 1.129 -1.113 0.969 2.173 0.949 -0.744 1.740 0.000 1.487 -1.355 -0.482 2.548 1.200 1.896 -0.080 0.000 1.001 2.461 1.024 1.299 1.587 1.947 1.548 +0 1.099 0.339 -0.942 0.882 -0.040 0.492 1.737 -1.146 0.000 0.802 0.971 0.419 2.215 0.810 1.527 1.268 0.000 1.218 0.528 1.307 1.551 0.935 0.950 0.992 0.821 0.661 0.675 0.742 +1 1.330 0.713 -0.182 1.194 -0.991 1.348 0.420 1.030 2.173 0.603 -0.545 -1.287 0.000 0.519 -0.991 0.463 2.548 0.838 2.012 -1.362 0.000 1.946 1.484 1.161 1.478 1.005 1.181 1.105 +1 1.005 -0.134 0.804 0.900 -1.617 0.830 0.264 -1.310 0.000 1.414 -0.882 0.186 2.215 0.543 0.043 1.432 2.548 0.767 0.122 -0.656 0.000 0.683 0.599 1.080 1.093 0.958 0.901 0.810 +1 0.587 -1.723 -1.584 1.163 0.500 1.569 -1.331 -0.211 2.173 0.853 -0.989 1.045 0.000 0.491 -0.935 -0.911 0.000 0.473 -0.208 -1.208 3.102 0.975 1.290 1.091 0.701 0.885 0.797 0.761 +1 0.881 -0.756 0.638 0.825 0.596 0.863 0.120 0.465 2.173 1.191 0.818 1.738 0.000 1.096 -0.436 -0.389 2.548 1.870 0.667 -1.356 0.000 0.749 1.100 0.991 1.183 0.921 0.917 1.130 +0 0.574 -0.435 -0.208 0.921 0.753 0.507 -1.484 -1.268 0.000 0.766 -0.558 -0.815 2.215 0.702 -0.057 0.470 0.000 0.522 -1.215 1.562 3.102 1.361 0.936 0.983 0.559 0.546 0.586 0.605 +1 0.874 -0.231 1.279 0.469 -1.241 0.988 0.258 0.595 1.087 0.719 -1.052 -1.670 0.000 0.787 -1.047 -0.678 1.274 1.064 -2.121 -0.425 0.000 1.189 0.741 0.991 0.880 1.308 1.097 1.016 +0 1.773 1.594 -1.689 0.671 -0.863 1.122 -0.250 0.522 1.087 0.486 0.257 0.546 2.215 1.104 -0.209 -0.867 0.000 0.605 0.714 -1.381 0.000 0.725 0.692 1.025 1.940 0.284 1.240 1.003 +0 0.627 -0.171 0.273 1.031 -1.163 1.001 0.629 1.511 1.087 0.750 -0.820 0.742 0.000 0.777 0.718 -0.650 0.000 0.562 1.061 -0.234 0.000 0.942 0.589 1.071 1.010 0.326 0.767 0.720 +1 1.714 0.186 0.528 1.289 1.185 0.419 1.385 -0.250 0.000 0.817 1.379 -1.342 0.000 1.067 0.715 -0.607 2.548 0.632 1.038 -1.016 3.102 1.034 0.747 1.149 0.888 0.269 0.785 0.821 +1 0.880 0.268 -0.328 1.763 -1.128 1.348 0.849 1.488 0.000 1.335 -0.857 0.371 1.107 0.918 -2.090 -0.212 0.000 1.058 -1.066 -0.362 1.551 1.118 1.125 1.139 0.968 0.692 1.015 1.036 +0 1.091 -1.011 -0.472 0.587 -1.164 1.120 1.210 -1.544 2.173 1.256 0.181 1.483 0.000 1.440 0.441 0.437 0.000 1.543 0.854 0.271 0.000 0.937 0.691 0.988 2.388 1.576 1.567 1.310 +0 1.090 -1.603 -0.768 0.212 1.488 0.547 -0.992 -0.248 2.173 0.810 -1.672 1.398 0.000 0.568 -1.030 0.383 0.000 0.853 -0.164 1.645 3.102 0.880 0.889 0.981 0.676 0.776 0.643 0.617 +0 0.536 1.003 -0.547 1.158 1.736 1.231 1.996 0.237 0.000 1.488 1.500 1.442 2.215 1.273 -0.337 -1.143 2.548 0.850 -0.167 -0.292 0.000 0.626 0.768 0.987 0.881 1.939 1.036 0.849 +1 1.541 0.811 1.588 1.078 -1.276 0.845 -0.594 0.683 2.173 0.676 -0.490 -0.093 0.000 1.140 0.310 -0.113 2.548 0.771 -1.945 -0.948 0.000 0.667 0.697 0.984 1.380 1.001 1.048 0.932 +1 0.557 0.582 -1.081 1.370 0.459 1.026 0.333 -1.462 0.000 1.080 1.081 1.019 1.107 1.639 2.198 -0.534 0.000 0.601 -0.953 -0.653 0.000 0.833 1.087 1.190 0.730 0.715 0.732 0.688 +1 0.511 -1.812 -0.744 0.683 -0.679 0.837 -0.542 -1.488 0.000 1.183 0.102 0.801 2.215 0.674 1.938 0.955 0.000 1.212 0.594 -0.059 3.102 0.737 1.006 1.002 1.085 0.827 0.847 0.795 +1 0.773 0.120 1.103 1.161 -1.675 1.079 -2.118 -0.376 0.000 0.936 -1.601 1.130 0.000 1.121 -0.166 -1.252 2.548 1.931 -0.073 -0.086 3.102 0.796 1.149 0.983 1.086 0.978 0.949 1.045 +1 0.406 -1.893 0.722 1.207 1.209 1.436 2.323 -0.439 0.000 0.562 -2.204 -1.249 0.000 0.900 -0.975 1.266 2.548 0.973 -0.562 1.056 3.102 9.576 5.150 0.981 0.527 0.196 3.158 2.476 +0 1.250 -0.255 -0.634 0.954 -1.692 0.752 -1.231 -1.417 1.087 0.986 -0.765 0.945 0.000 1.334 -0.552 0.216 2.548 0.712 -0.114 -1.679 0.000 0.918 0.907 1.233 0.907 1.306 0.914 0.827 +0 0.719 -0.677 -1.742 1.184 0.783 0.459 0.459 -0.907 0.000 0.749 -0.658 -0.314 0.000 0.636 0.187 -1.236 2.548 0.494 -0.618 1.266 3.102 0.982 0.732 0.987 0.758 0.393 0.489 0.616 +1 1.326 -1.194 0.190 0.524 0.404 1.523 1.023 1.721 0.000 0.409 0.330 0.421 0.000 0.627 0.595 -0.091 0.000 0.886 -0.433 -1.110 0.000 0.914 0.763 0.992 0.827 0.386 0.632 0.671 +0 0.884 -1.211 0.814 0.539 0.800 1.025 0.215 -0.300 2.173 0.768 -1.103 -1.617 1.107 0.457 0.433 1.624 0.000 0.379 -2.467 1.372 0.000 1.245 0.829 0.977 1.095 1.540 1.018 0.897 +1 0.793 0.578 -1.437 1.190 1.452 1.517 -2.713 0.064 0.000 0.949 0.011 1.361 0.000 1.605 0.700 1.647 2.548 1.572 -0.838 1.631 3.102 1.060 0.861 0.987 0.606 1.226 0.945 0.936 +1 1.217 1.084 0.024 0.394 0.086 2.125 1.627 -1.580 0.000 2.598 0.250 0.139 2.215 0.899 -0.377 -1.339 0.000 0.779 0.503 1.586 1.551 1.274 0.771 0.995 0.769 1.259 0.935 0.799 +0 0.982 -1.470 1.681 0.499 -1.505 0.552 -0.975 -0.130 0.000 0.942 0.287 -1.059 2.215 0.689 0.648 1.574 0.000 1.465 -0.690 0.479 3.102 0.702 0.955 1.002 0.905 1.217 0.817 0.779 +0 1.270 -0.512 1.322 0.595 0.757 0.453 -2.820 0.081 0.000 1.641 -0.413 -0.654 2.215 0.971 2.571 1.152 0.000 1.293 -0.757 -1.655 3.102 10.180 5.401 0.980 1.295 1.081 3.300 2.413 +0 0.561 -0.002 -1.356 0.975 -0.001 0.691 0.555 0.704 0.000 0.780 2.101 -1.507 0.000 1.668 -0.561 -0.990 2.548 1.177 -0.808 0.438 3.102 1.986 1.896 0.990 0.761 1.046 1.521 1.207 +0 0.898 -0.888 1.496 0.852 0.488 0.786 -1.368 0.343 2.173 1.158 -1.175 -1.254 2.215 0.401 0.119 -1.475 0.000 0.499 -0.862 -0.404 0.000 0.507 0.750 0.988 0.949 1.397 0.856 0.685 +0 0.401 -1.310 1.347 1.524 -0.226 1.071 -0.651 -1.715 2.173 0.712 -2.171 -0.288 0.000 1.153 -0.911 1.069 0.000 1.555 -0.167 0.073 3.102 1.594 1.305 1.070 1.200 1.400 1.123 0.968 +0 0.753 1.034 1.414 0.999 0.420 1.665 0.426 -0.673 0.000 2.501 0.630 0.995 2.215 1.896 -0.072 -0.883 2.548 0.533 0.365 0.584 0.000 1.303 0.973 0.987 0.789 2.453 1.560 1.244 +0 1.460 -1.466 0.411 0.866 -0.507 1.207 -0.818 -1.229 0.000 0.803 -0.383 0.600 2.215 0.281 -0.974 1.283 0.000 1.262 0.329 -1.674 0.000 0.815 0.857 1.146 0.883 0.299 0.741 0.796 +0 1.057 0.392 -1.160 0.794 -0.089 0.859 0.138 0.084 2.173 1.851 -0.622 1.398 0.000 0.793 -1.359 0.919 0.000 0.574 1.664 -0.325 0.000 1.128 1.219 1.044 0.824 0.895 1.108 1.020 +1 0.854 0.541 0.634 1.425 -0.009 1.668 1.234 -1.529 2.173 0.823 2.188 -0.303 0.000 0.539 0.790 -0.559 0.000 1.796 1.467 0.970 0.000 0.800 0.690 0.996 1.605 0.253 0.986 0.923 +1 0.315 1.685 1.697 1.676 0.882 1.172 -0.317 -1.414 2.173 2.171 -0.269 -0.726 0.000 1.801 1.381 0.950 1.274 0.842 -0.339 0.570 0.000 0.839 1.211 0.987 0.781 2.447 1.690 1.416 +0 0.869 0.648 -1.549 0.999 1.491 0.399 0.568 -0.446 0.000 0.500 -0.269 0.387 2.215 0.351 2.252 -0.311 0.000 0.926 1.255 -0.188 3.102 0.743 0.802 0.988 0.783 0.694 0.745 0.675 +0 0.912 1.504 1.203 0.790 0.084 0.846 -1.057 1.581 0.000 1.488 0.111 -0.017 2.215 1.213 -0.412 -0.375 0.000 1.831 1.837 -1.681 0.000 1.875 1.209 0.995 1.179 0.934 1.061 1.188 +1 0.728 -0.063 -0.311 1.206 1.060 0.817 -0.510 0.935 0.000 1.118 -1.131 -0.131 2.215 1.036 0.179 1.304 0.000 1.702 -1.809 -0.738 0.000 0.973 0.938 1.226 1.061 0.930 1.104 0.941 +1 1.040 1.814 0.953 0.785 -0.364 0.974 2.446 -0.476 0.000 0.613 2.292 1.264 0.000 0.945 0.785 1.731 2.548 0.431 1.207 1.453 1.551 1.644 0.982 1.160 0.870 0.189 0.830 0.754 +1 1.215 -0.573 -0.050 0.248 0.289 1.277 -1.133 0.243 0.000 1.883 -1.086 -1.589 0.000 1.283 -0.705 -1.390 2.548 0.489 -1.173 -0.290 0.000 1.000 1.055 0.989 0.932 0.377 0.883 0.775 +0 1.924 -0.015 0.357 0.038 1.009 0.934 0.426 -1.028 1.087 1.252 1.215 -0.350 0.000 2.856 -1.044 1.313 0.000 1.395 1.008 1.049 0.000 0.758 1.239 0.829 1.130 1.309 1.349 1.200 +0 1.267 1.313 0.403 0.729 1.386 0.615 -0.538 0.839 1.087 0.480 0.230 -1.035 2.215 1.548 -0.878 -0.817 0.000 0.922 0.674 -1.456 0.000 1.509 0.876 1.032 1.125 0.856 0.841 1.014 +0 3.593 0.242 0.321 1.261 0.651 1.850 0.154 -1.486 2.173 0.817 2.302 -1.007 0.000 0.866 -0.558 0.698 0.000 1.041 0.931 -1.162 0.000 0.840 0.580 0.975 2.398 0.760 1.517 1.259 +0 0.873 -0.567 -1.218 1.008 1.333 1.187 2.130 -0.527 0.000 1.423 -0.476 1.142 2.215 1.407 0.418 0.258 2.548 1.354 1.086 -0.926 0.000 1.059 1.461 0.987 0.778 1.306 1.757 1.539 +0 0.874 1.474 0.182 1.018 -0.547 1.012 -0.139 1.026 2.173 0.931 0.858 -0.188 2.215 1.054 -0.586 1.578 0.000 0.965 -0.643 -0.908 0.000 0.875 1.002 0.987 0.548 1.481 0.996 0.992 +1 0.359 -0.913 0.872 1.107 1.432 1.509 -0.149 0.046 0.000 0.636 -1.672 -1.575 0.000 1.227 1.568 -1.287 0.000 1.343 0.713 1.602 0.000 0.915 1.014 0.984 0.803 0.840 0.919 0.806 +0 1.900 0.155 0.948 1.185 0.584 0.645 1.440 -1.265 0.000 0.661 -0.942 0.465 2.215 1.128 -0.421 -0.793 2.548 0.857 -0.803 -1.398 0.000 0.501 0.714 0.992 1.261 0.864 0.940 0.884 +0 3.131 -0.361 -0.598 0.745 1.556 1.909 -0.735 1.204 0.000 1.456 -1.008 -0.380 1.107 0.815 -0.377 1.355 1.274 1.005 -1.625 1.242 0.000 1.361 0.818 1.972 1.253 1.214 1.208 1.331 +0 0.673 -0.700 1.510 1.549 -1.554 0.476 -0.059 0.045 0.000 0.775 -0.708 0.666 0.000 0.830 0.599 -0.653 2.548 0.398 -1.126 0.317 0.000 0.813 0.910 0.985 0.818 0.715 0.705 0.764 +0 0.468 -0.783 -0.745 1.750 1.042 1.198 -1.845 -0.575 0.000 2.480 -0.125 1.375 2.215 1.703 0.085 -0.700 0.000 1.753 -0.175 0.264 3.102 0.925 0.872 1.253 1.112 1.587 1.165 1.029 +0 1.199 0.277 -0.242 0.308 0.591 1.072 -0.169 0.965 2.173 1.379 0.361 -0.795 2.215 0.593 -0.045 0.463 0.000 1.072 -0.074 -1.514 0.000 0.860 0.907 0.981 0.979 1.855 1.001 0.818 +1 2.513 -0.748 1.308 0.061 -1.302 0.892 -1.071 -0.890 2.173 0.806 -1.090 0.380 0.000 0.332 0.892 0.068 2.548 0.697 1.539 -0.204 0.000 0.241 1.604 0.982 0.814 0.977 0.940 1.034 +0 0.630 -1.672 -1.182 2.606 -1.578 0.731 -0.752 0.856 0.000 0.766 -0.702 -0.114 0.000 1.425 -0.196 -0.091 2.548 0.427 0.005 0.530 3.102 1.220 0.955 0.981 1.143 0.323 1.386 1.279 +1 1.101 -2.331 -0.601 0.580 0.611 0.571 -0.150 1.585 2.173 0.980 -1.958 0.169 0.000 0.576 -1.793 0.842 0.000 0.760 -0.723 -1.026 3.102 0.658 0.784 0.987 1.371 0.557 0.896 0.834 +1 1.929 0.444 -0.895 0.308 -0.778 1.688 0.644 0.965 2.173 1.353 2.194 -1.070 0.000 1.111 0.309 0.111 1.274 0.500 0.839 1.472 0.000 1.077 1.459 0.986 1.664 1.219 1.436 1.268 +1 0.846 1.102 0.846 0.465 1.693 0.538 0.672 1.645 0.000 2.005 -0.167 -0.178 2.215 1.088 -0.598 -1.039 2.548 0.719 0.229 1.040 0.000 0.531 0.869 0.987 1.835 1.165 1.405 1.128 +0 2.721 -0.080 0.877 0.396 1.161 0.743 -0.583 -0.843 0.000 0.671 0.159 -0.901 0.000 0.925 -0.990 -0.385 2.548 0.916 -0.248 1.211 3.102 0.628 0.721 1.000 1.096 0.753 0.717 0.776 +0 0.335 1.069 -1.529 0.156 0.552 0.669 1.616 0.856 0.000 1.524 -0.610 -1.003 2.215 0.754 0.358 1.021 2.548 0.867 2.003 0.016 0.000 0.923 0.874 0.978 0.826 1.259 1.472 1.191 +0 1.532 -0.071 -0.894 0.077 -0.853 1.843 0.233 -0.458 2.173 3.116 -1.174 1.737 1.107 2.168 1.211 0.455 0.000 2.108 0.421 0.732 0.000 1.158 2.167 0.998 1.946 4.263 2.984 2.207 +0 0.962 -0.519 0.792 1.389 1.306 1.237 2.711 -1.296 0.000 1.350 0.717 0.558 0.000 2.291 0.250 -0.034 1.274 0.435 -1.540 -1.657 0.000 0.848 0.819 0.992 1.262 0.544 1.587 1.654 +0 0.626 0.301 -1.678 0.230 1.094 1.020 -1.143 0.513 2.173 1.183 0.209 -0.928 2.215 0.635 -1.149 1.490 0.000 0.519 -1.270 -0.335 0.000 0.637 0.979 0.992 1.794 1.963 1.369 1.119 +0 0.589 -1.076 -1.285 1.041 0.270 1.329 -1.797 0.219 0.000 1.519 -1.480 1.702 0.000 0.869 -1.034 -1.048 2.548 1.106 -0.409 1.525 0.000 0.950 0.838 1.070 0.575 0.255 0.490 0.602 +1 0.725 1.494 -0.973 1.366 -0.197 1.281 0.573 1.407 2.173 0.505 0.758 -0.718 0.000 0.475 1.593 0.769 0.000 0.395 -1.058 -0.922 0.000 0.823 1.050 0.988 0.915 0.830 1.118 0.885 +0 0.459 0.027 -1.104 1.304 1.564 1.428 0.314 -0.528 2.173 1.706 0.791 1.011 2.215 0.991 0.540 0.130 0.000 0.668 0.114 -1.404 0.000 0.906 1.027 0.982 1.482 2.330 1.483 1.151 +0 0.665 -0.224 1.578 1.155 -0.342 1.073 0.488 -1.440 0.000 0.730 -0.015 0.094 2.215 0.810 -0.795 0.454 0.000 1.132 0.762 0.973 1.551 0.934 0.793 1.199 0.726 0.705 0.650 0.598 +0 0.464 -0.327 0.009 1.798 -1.317 1.031 1.264 0.290 0.000 0.802 0.971 1.131 2.215 0.720 1.188 -0.358 0.000 1.143 -1.065 -1.712 3.102 0.857 1.186 1.176 1.129 1.301 0.885 0.849 +1 0.473 1.141 0.371 1.032 -1.253 2.915 0.341 -1.518 0.000 1.908 0.474 0.491 0.000 1.836 -0.395 -0.234 1.274 2.080 -0.587 0.937 3.102 1.206 1.336 0.989 1.125 1.315 1.109 1.023 +0 0.968 -2.219 -1.049 1.000 -1.408 0.852 0.083 0.364 2.173 0.683 0.656 -1.737 0.000 0.546 0.680 -0.427 0.000 0.607 0.641 0.993 0.000 0.867 0.996 0.990 0.693 0.465 0.995 1.014 +0 0.776 -1.556 0.544 0.437 1.725 0.453 1.130 -1.011 1.087 1.080 0.530 1.614 1.107 0.730 -2.313 0.199 0.000 0.504 0.194 -0.626 0.000 1.334 1.753 0.986 1.042 0.785 1.328 1.082 +1 2.332 0.058 1.446 0.780 -1.460 1.884 -1.155 -0.807 0.000 1.186 -0.377 0.440 0.000 1.115 -0.906 0.453 0.000 0.806 -0.284 0.854 3.102 0.545 0.621 0.987 0.667 0.528 0.727 0.818 +1 0.714 -0.154 1.130 0.623 -0.824 0.558 0.569 -1.271 0.000 0.886 0.513 0.167 2.215 1.313 0.225 -0.860 0.000 0.660 2.012 0.703 0.000 0.790 0.900 0.985 0.683 0.642 0.610 0.584 +1 1.550 -0.157 0.227 1.559 1.558 2.453 1.147 1.137 0.000 2.864 -0.195 -0.223 0.000 2.075 0.607 -0.651 1.274 3.513 0.228 -1.307 0.000 3.564 1.964 2.007 1.600 0.946 1.328 1.332 +1 0.360 0.689 -0.639 2.863 -0.031 1.313 0.146 1.230 0.000 0.766 -0.042 -1.128 0.000 1.123 0.427 -1.631 1.274 0.972 1.530 -0.932 0.000 0.974 0.756 0.987 0.768 0.884 0.867 0.771 +1 0.919 -0.701 -1.249 0.960 0.500 0.800 -1.109 0.748 2.173 0.901 -0.355 0.083 2.215 1.237 -0.191 -1.228 0.000 0.730 1.832 -1.525 0.000 1.615 1.469 1.301 0.893 0.855 1.282 1.086 +1 0.789 0.664 1.558 1.660 1.138 1.195 0.883 -0.415 2.173 0.778 0.528 -1.229 0.000 0.544 -0.253 0.022 2.548 0.905 0.953 0.958 0.000 1.061 0.818 0.989 1.492 0.731 1.045 0.897 +1 0.615 0.017 1.704 0.716 -0.399 1.086 -0.430 -0.758 1.087 0.619 1.515 1.698 0.000 1.682 0.134 0.625 0.000 0.603 -0.568 1.230 3.102 1.761 1.087 0.987 0.738 0.842 1.118 0.927 +0 0.556 -1.548 -1.037 0.856 0.212 0.933 -0.905 -1.292 2.173 0.920 2.570 -0.033 0.000 1.296 -1.176 0.440 0.000 1.451 -0.343 1.508 3.102 0.877 0.872 0.985 1.062 0.779 0.898 0.764 +1 0.522 0.561 -0.392 0.367 1.553 0.621 -0.646 -0.865 0.000 1.155 0.931 1.220 2.215 1.188 0.769 0.298 2.548 0.467 0.950 -1.077 0.000 0.853 1.063 0.995 0.828 0.922 0.931 0.854 +1 1.723 1.079 -0.866 0.191 0.790 1.872 0.813 0.741 2.173 0.968 0.703 -1.259 0.000 0.779 0.582 1.517 0.000 0.502 -0.368 -0.832 3.102 0.978 0.606 0.992 1.554 1.227 1.058 0.999 +1 0.348 -0.112 1.501 1.172 0.418 1.730 1.434 -1.573 2.173 2.008 1.420 -0.417 2.215 1.016 0.881 0.895 0.000 0.473 -0.128 -0.415 0.000 0.929 0.968 0.988 2.752 2.367 2.321 1.720 +1 0.329 0.542 0.901 1.701 -0.135 1.082 0.772 -1.682 2.173 0.532 1.597 0.693 0.000 0.677 0.211 -1.084 2.548 0.470 0.270 -0.217 0.000 0.664 0.972 0.990 0.819 0.623 0.887 0.743 +0 0.591 0.231 0.615 1.102 1.663 0.662 -0.145 1.699 1.087 0.759 0.400 0.948 0.000 1.008 0.362 -0.639 0.000 1.500 -0.176 -0.259 3.102 1.111 1.159 0.987 0.834 1.035 0.763 0.740 +0 1.025 -1.817 -1.011 1.285 -1.580 0.775 -0.523 -0.007 2.173 0.773 -1.053 1.045 0.000 0.858 0.359 -0.669 0.000 0.700 1.317 0.708 0.000 0.971 1.044 0.982 1.333 0.876 1.207 1.392 +1 1.991 -1.558 0.725 0.671 0.446 1.110 -1.014 -1.522 2.173 1.018 -0.341 -0.765 2.215 0.853 -0.362 -0.197 0.000 0.393 0.363 1.272 0.000 0.677 1.025 1.007 1.313 1.124 1.161 0.941 +0 1.172 0.155 -0.552 0.611 -1.569 0.671 -0.833 0.405 0.000 1.070 0.594 -1.652 2.215 0.600 0.481 0.773 0.000 1.276 -0.019 0.191 3.102 0.921 1.279 0.987 0.715 1.104 0.831 0.773 +0 0.410 -0.956 0.737 1.888 0.359 1.378 0.823 -1.663 0.000 0.962 0.625 -0.776 1.107 1.362 -0.845 0.097 2.548 1.040 1.416 -1.534 0.000 0.847 1.096 0.989 0.899 1.363 1.482 2.122 +0 0.317 -0.989 0.432 2.230 -0.761 1.020 0.823 1.739 2.173 0.693 -0.309 1.131 0.000 0.773 -0.682 -0.648 2.548 0.991 -1.804 0.407 0.000 0.883 1.209 1.023 0.504 1.338 1.160 1.289 +1 0.376 1.678 0.710 1.470 -1.277 0.965 1.029 -1.140 2.173 0.986 0.719 0.360 0.000 0.534 -0.036 1.293 2.548 0.705 0.847 -0.260 0.000 0.595 1.128 1.005 0.818 0.885 0.758 0.756 +0 0.506 -0.067 1.275 0.365 -0.463 0.634 -0.824 -0.877 2.173 0.632 -0.171 1.044 0.000 0.409 -1.426 -0.997 0.000 0.910 -0.659 0.873 3.102 0.946 0.952 0.986 0.568 0.804 0.616 0.565 +1 0.894 1.433 -0.254 0.554 1.419 0.746 0.360 -1.308 0.000 1.064 1.248 0.602 2.215 0.490 -1.058 1.443 0.000 1.277 0.536 1.343 3.102 1.131 0.870 0.986 0.730 0.736 0.909 0.857 +0 0.723 -0.942 0.570 2.772 -1.632 0.752 -0.857 -0.366 0.000 1.700 0.281 0.108 2.215 1.312 -0.374 -1.733 1.274 0.731 0.974 0.117 0.000 1.443 1.160 1.797 0.964 1.679 1.391 1.254 +1 1.328 0.802 1.246 1.645 0.636 1.199 0.544 -1.621 0.000 0.422 0.546 -0.296 0.000 1.736 0.602 -0.867 2.548 0.481 0.961 -0.738 3.102 0.937 0.836 1.069 0.728 0.192 0.857 0.757 +1 0.911 -0.888 -0.319 2.009 -1.365 1.312 0.399 0.802 2.173 0.407 0.172 -0.126 0.000 0.300 -0.363 -1.432 0.000 0.552 0.590 -1.104 0.000 0.519 0.829 1.515 0.823 0.498 1.171 0.877 +1 0.470 1.532 -0.364 0.980 1.076 0.668 -0.660 0.864 1.087 0.968 0.331 -0.828 2.215 0.389 2.034 -1.359 0.000 0.455 -0.812 -0.635 0.000 0.977 1.134 0.989 0.874 1.335 0.883 0.786 +1 0.471 1.800 -0.519 0.221 0.342 0.858 1.238 1.570 2.173 0.480 1.323 -0.275 0.000 0.790 1.782 0.863 0.000 1.056 0.245 0.776 3.102 0.857 0.932 0.976 0.627 0.830 0.680 0.656 +0 1.039 -0.887 -0.598 2.530 -0.884 0.738 -1.702 0.992 0.000 1.022 -2.322 0.721 0.000 0.689 -1.284 0.515 2.548 1.090 -1.122 -1.727 3.102 0.774 0.875 0.996 1.074 0.597 0.840 1.191 +0 0.407 1.132 -0.697 1.627 1.534 0.949 -0.443 -1.406 2.173 1.238 0.047 -0.145 0.000 2.054 -0.081 0.526 2.548 0.810 -1.056 -0.412 0.000 0.973 1.073 1.020 1.224 1.742 1.210 1.162 +1 0.574 -0.320 -1.254 0.758 -0.309 1.311 -0.021 -1.688 2.173 1.141 0.145 0.914 0.000 1.425 -0.284 -0.260 0.000 0.893 0.577 -0.567 0.000 0.878 1.340 0.988 0.602 0.678 0.784 0.741 +1 0.755 0.421 -1.067 1.036 1.670 0.690 -0.169 -0.360 1.087 2.072 0.802 0.612 2.215 0.780 0.212 1.553 0.000 0.541 0.173 -1.474 0.000 0.920 1.203 0.989 0.882 1.631 1.147 1.048 +1 1.544 -0.667 1.683 0.460 -0.673 0.901 -0.688 0.382 2.173 0.597 -0.377 -1.029 0.000 0.847 0.262 -0.240 2.548 0.812 0.275 1.323 0.000 0.842 1.027 0.995 0.868 0.803 0.810 0.721 +1 0.703 0.975 -1.061 1.224 1.205 0.560 1.361 -1.363 0.000 1.806 0.808 0.167 2.215 0.868 2.033 1.453 0.000 0.842 0.068 -0.932 3.102 0.903 0.929 1.145 1.182 1.024 1.036 0.901 +1 1.020 -0.172 0.051 0.827 1.679 0.850 0.651 -0.794 0.000 1.380 -0.696 1.451 2.215 1.563 -0.844 0.068 0.000 1.259 0.749 -1.442 0.000 0.891 0.735 1.266 0.985 0.908 0.987 0.886 +0 0.631 0.317 -0.817 0.249 -0.317 0.541 -0.592 1.415 0.000 0.835 -1.181 0.111 2.215 1.423 1.171 -1.178 1.274 1.342 -1.150 0.793 0.000 0.869 0.892 0.976 1.174 2.141 1.345 1.076 +1 0.567 1.648 -0.361 0.958 1.051 0.952 0.926 -0.732 2.173 0.832 0.466 1.667 0.000 1.031 1.290 0.131 2.548 1.429 1.008 1.331 0.000 0.678 1.012 0.988 0.947 0.916 0.891 0.781 +0 1.134 -0.303 -0.806 1.270 -1.358 0.720 0.140 0.842 0.000 0.410 0.097 -0.101 0.000 0.310 1.538 1.343 0.000 0.576 1.156 -1.669 3.102 0.868 0.746 0.984 0.866 0.484 0.709 0.735 +1 0.758 0.682 -0.444 0.938 -1.632 1.179 -0.084 0.624 0.000 0.880 0.535 0.412 0.000 1.320 0.587 1.483 2.548 2.797 0.726 -1.090 1.551 0.934 0.869 1.024 0.698 1.089 0.735 0.650 +1 1.095 -0.733 0.372 0.939 0.191 1.186 0.014 -1.148 2.173 0.896 -0.208 1.243 0.000 0.647 -2.172 -0.622 0.000 0.612 0.423 1.064 3.102 1.923 1.222 1.002 1.687 0.853 1.124 1.103 +1 0.433 0.852 -1.542 1.366 0.726 1.085 -0.534 -1.507 0.000 0.814 -0.632 -1.026 2.215 1.652 -1.256 -0.104 0.000 1.415 0.489 0.883 3.102 0.897 0.972 0.990 0.604 1.151 0.947 1.158 +0 0.416 -1.352 0.839 0.854 1.269 0.978 -0.442 -1.513 2.173 0.898 2.369 0.484 0.000 0.764 0.045 -0.462 2.548 1.698 -0.947 -0.406 0.000 4.819 2.601 0.983 0.871 0.915 1.898 1.486 +0 0.535 1.359 1.126 1.322 -1.572 1.203 1.087 -1.301 1.087 1.973 -0.763 0.410 2.215 1.053 -0.945 -0.083 0.000 0.593 -2.137 -0.134 0.000 0.789 0.844 0.992 0.815 3.348 1.654 1.385 +0 1.719 0.561 1.517 0.784 -1.006 0.942 0.932 0.153 0.000 0.345 0.832 1.035 0.000 0.795 0.857 -1.473 2.548 0.811 0.179 -0.663 3.102 0.866 0.917 1.229 0.758 0.469 0.615 0.697 +1 1.040 -0.803 1.637 0.264 -0.814 1.819 0.055 0.353 0.000 1.541 -0.934 -0.811 2.215 1.701 -0.112 -1.387 0.000 1.762 -0.267 1.163 3.102 1.301 1.135 0.979 0.914 1.534 1.142 1.018 \ No newline at end of file diff --git a/examples/trials/auto-gbdt/main.py b/examples/trials/auto-gbdt/main.py new file mode 100644 index 0000000000000000000000000000000000000000..ef7b487b1e022276cbee0f9088174e128dacf4c3 --- /dev/null +++ b/examples/trials/auto-gbdt/main.py @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +''' +This project is for automatically tuning parameters for GBDT. +''' +import logging + +import lightgbm as lgb +import pandas as pd +from sklearn.metrics import mean_squared_error + +import nni + +LOG = logging.getLogger('auto-gbdt') + +# specify your configurations as a dict +def get_default_parameters(): + params = { + 'boosting_type': 'gbdt', + 'objective': 'regression', + 'metric': {'l2', 'auc'}, + 'num_leaves': 31, + 'learning_rate': 0.05, + 'feature_fraction': 0.9, + 'bagging_fraction': 0.8, + 'bagging_freq': 5, + 'verbose': 0 + } + return params + + +def load_data(train_path='./data/regression.train', test_path='./data/regression.test'): + ''' + Load or create dataset + ''' + print('Load data...') + df_train = pd.read_csv(train_path, header=None, sep='\t') + df_test = pd.read_csv(test_path, header=None, sep='\t') + num = len(df_train) + split_num = int(0.9 * num) + + y_train = df_train[0].values + y_test = df_test[0].values + y_eval = y_train[split_num:] + y_train = y_train[:split_num] + + X_train = df_train.drop(0, axis=1).values + X_test = df_test.drop(0, axis=1).values + X_eval = X_train[split_num:, :] + X_train = X_train[:split_num, :] + + # create dataset for lightgbm + lgb_train = lgb.Dataset(X_train, y_train) + lgb_eval = lgb.Dataset(X_eval, y_eval, reference=lgb_train) + + return lgb_train, lgb_eval, X_test, y_test + +def run(lgb_train, lgb_eval, params, X_test, y_test): + print('Start training...') + + params['num_leaves'] = int(params['num_leaves']) + + # train + gbm = lgb.train(params, + lgb_train, + num_boost_round=20, + valid_sets=lgb_eval, + early_stopping_rounds=5) + + print('Start predicting...') + + # predict + y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration) + + # eval + rmse = mean_squared_error(y_test, y_pred) ** 0.5 + print('The rmse of prediction is:', rmse) + + nni.report_final_result(rmse) + +if __name__ == '__main__': + lgb_train, lgb_eval, X_test, y_test = load_data() + + try: + # get parameters from tuner + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = get_default_parameters() + PARAMS.update(RECEIVED_PARAMS) + LOG.debug(PARAMS) + + # train + run(lgb_train, lgb_eval, PARAMS, X_test, y_test) + except Exception as exception: + LOG.exception(exception) + raise \ No newline at end of file diff --git a/examples/trials/auto-gbdt/requirments.txt b/examples/trials/auto-gbdt/requirments.txt new file mode 100644 index 0000000000000000000000000000000000000000..182230bed876bf877e7658496dfeb72535248e62 --- /dev/null +++ b/examples/trials/auto-gbdt/requirments.txt @@ -0,0 +1,2 @@ +lightgbm +pandas diff --git a/examples/trials/auto-gbdt/search_space.json b/examples/trials/auto-gbdt/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..e55aaa6b79c812f7fabc5c5605f9dd11361a8158 --- /dev/null +++ b/examples/trials/auto-gbdt/search_space.json @@ -0,0 +1,18 @@ +{ + "num_leaves": { + "_type": "randint", + "_value": [20, 31] + }, + "learning_rate": { + "_type": "choice", + "_value": [0.01, 0.05, 0.1, 0.2] + }, + "bagging_fraction": { + "_type": "uniform", + "_value": [0.7, 1.0] + }, + "bagging_freq": { + "_type": "choice", + "_value": [1, 2, 4, 8, 10] + } +} diff --git a/examples/trials/benchmarking/automlbenchmark/.gitignore b/examples/trials/benchmarking/automlbenchmark/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5184f9196f0ff51dd0e56b615cfc8fed069ce532 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/.gitignore @@ -0,0 +1,13 @@ +# data files +nni/data/ + +# benchmark repository +automlbenchmark/ + +# all experiment results +results* + +# intermediate outputs of tuners +smac3-output* +param_config_space.pcs +scenario.txt \ No newline at end of file diff --git a/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-binary.yaml b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-binary.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a3b94d5af900390f89187b832378db98e1780112 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-binary.yaml @@ -0,0 +1,29 @@ +--- +- name: __defaults__ + folds: 2 + cores: 2 + max_runtime_seconds: 300 + +- name: Australian + openml_task_id: 146818 + +- name: blood-transfusion + openml_task_id: 10101 + +- name: christine + openml_task_id: 168908 + +- name: credit-g + openml_task_id: 31 + +- name: kc1 + openml_task_id: 3917 + +- name: kr-vs-kp + openml_task_id: 3 + +- name: phoneme + openml_task_id: 9952 + +- name: sylvine + openml_task_id: 168912 diff --git a/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-multiclass.yaml b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-multiclass.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16a252eb3b725ee858084e512c7d7743fbe265dc --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-multiclass.yaml @@ -0,0 +1,29 @@ +--- +- name: __defaults__ + folds: 2 + cores: 2 + max_runtime_seconds: 300 + +- name: car + openml_task_id: 146821 + +- name: cnae-9 + openml_task_id: 9981 + +- name: dilbert + openml_task_id: 168909 + +- name: fabert + openml_task_id: 168910 + +- name: jasmine + openml_task_id: 168911 + +- name: mfeat-factors + openml_task_id: 12 + +- name: segment + openml_task_id: 146822 + +- name: vehicle + openml_task_id: 53 diff --git a/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-regression.yaml b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-regression.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7382d6b423c17cc3a80111a774a3c21ba04c324c --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall-regression.yaml @@ -0,0 +1,30 @@ +--- +- name: __defaults__ + folds: 2 + cores: 2 + max_runtime_seconds: 300 + +- name: cholesterol + openml_task_id: 2295 + +- name: liver-disorders + openml_task_id: 52948 + +- name: kin8nm + openml_task_id: 2280 + +- name: cpu_small + openml_task_id: 4883 + +- name: titanic_2 + openml_task_id: 211993 + +- name: boston + openml_task_id: 4857 + +- name: stock + openml_task_id: 2311 + +- name: space_ga + openml_task_id: 4835 + \ No newline at end of file diff --git a/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall.yaml b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b68dc898da6421497ee2ac88c91b411d4adb3c4 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall.yaml @@ -0,0 +1,77 @@ +--- +- name: __defaults__ + folds: 2 + cores: 2 + max_runtime_seconds: 300 + +- name: cholesterol + openml_task_id: 2295 + +- name: liver-disorders + openml_task_id: 52948 + +- name: kin8nm + openml_task_id: 2280 + +- name: cpu_small + openml_task_id: 4883 + +- name: titanic_2 + openml_task_id: 211993 + +- name: boston + openml_task_id: 4857 + +- name: stock + openml_task_id: 2311 + +- name: space_ga + openml_task_id: 4835 + +- name: Australian + openml_task_id: 146818 + +- name: blood-transfusion + openml_task_id: 10101 + +- name: car + openml_task_id: 146821 + +- name: christine + openml_task_id: 168908 + +- name: cnae-9 + openml_task_id: 9981 + +- name: credit-g + openml_task_id: 31 + +- name: dilbert + openml_task_id: 168909 + +- name: fabert + openml_task_id: 168910 + +- name: jasmine + openml_task_id: 168911 + +- name: kc1 + openml_task_id: 3917 + +- name: kr-vs-kp + openml_task_id: 3 + +- name: mfeat-factors + openml_task_id: 12 + +- name: phoneme + openml_task_id: 9952 + +- name: segment + openml_task_id: 146822 + +- name: sylvine + openml_task_id: 168912 + +- name: vehicle + openml_task_id: 53 diff --git a/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall_description.txt b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall_description.txt new file mode 100644 index 0000000000000000000000000000000000000000..d8d0aa0ec40adc284cfaf9bd498cdc4baaf35247 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnismall_description.txt @@ -0,0 +1,152 @@ +nnismall: +This benchmark contains 24 tasks: 8 tasks each for binary classfication, multi-class classification, and regression. + +Binary Classification: +- name: Australian + openml_task_id: 146818 + Introduction: Australian Credit Approval dataset, originating from the StatLog project. It concerns credit card applications. + Features: 6 numerical and 8 categorical features, all normalized to [-1,1]. + Number of instances: 690 + +- name: blood-transfusion + openml_task_id: 10101 + Introduction: Data taken from the Blood Transfusion Service Center in Hsin-Chu City in Taiwan. The target attribute is a binary variable representing whether he/she donated blood in March 2007 (2 stands for donating blood; 1 stands for not donating blood). + Features: 4 numerical features. + Number of instances: 748 + +- name: christine + openml_task_id: 168908 + Introduction: An Openml challenge dataset on classification. The identity of the datasets and the type of data is concealed. + Features: 1599 numerical features and 38 categorical features + Number of instances: 5418 + +- name: credit-g + openml_task_id: 31 + Introduction: This dataset classifies people described by a set of attributes as good or bad credit risks. + Features: 7 numerical features and 13 categorical features + Number of instances: 1000 + +- name: kc1 + openml_task_id: 3917 + Introduction: One of the NASA Metrics Data Program defect data sets. Data from software for storage management for receiving and processing ground data. + Features: 21 numerical features + Number of instances: 2109 + +- name: kr-vs-kp + openml_task_id: 3 + Introduction: Given a board configuration, predict whether white can win or not. + Features: 37 categorical features + Number of instances: 3196 + +- name: phoneme + openml_task_id: 9952 + Introduction: The aim of this dataset is to distinguish between nasal (class 0) and oral sounds (class 1). + Features: 5 numerical features + Number of instances: 5404 + +- name: sylvine + openml_task_id: 168912 + Introduction: An Openml challenge dataset on classification. The identity of the datasets and the type of data is concealed. + Features: 20 numerical features + Number of instances: 5124 + + + +Multi-class Classification +- name: car + openml_task_id: 146821 + Introduction: The model evaluates cars using six intermediate concepts. + Features: 6 categorical features + Number of instances: 1728 + +- name: cnae-9 + openml_task_id: 9981 + Introduction: This is a data set containing 1080 documents of free text business descriptions of Brazilian companies categorized into a subset of 9 categories. + Features: 856 numerical features (word frequency) + Number of instances: 1080 + +- name: dilbert + openml_task_id: 168909 + Introduction: An Openml challenge dataset on classification. The identity of the datasets and the type of data is concealed. + Features: 2000 numerical features + Number of instances: 10000 + +- name: fabert + openml_task_id: 168910 + Introduction: An Openml challenge dataset on classification. The identity of the datasets and the type of data is concealed. + Features: 800 numerical features + Number of instances: 8237 + +- name: jasmine + openml_task_id: 168911 + Introduction: An Openml challenge dataset on classification. The identity of the datasets and the type of data is concealed. + Features: 8 numerical features and 137 categorical features + Number of instances: 2984 + +- name: mfeat-factors + openml_task_id: 12 + Introduction: Hand-written numeral classification. + Features: 216 numerical features(corresponding to binarized image) + Number of instances: 2000 + +- name: segment + openml_task_id: 146822 + Introduction: segmentation of outdoor images into 7 classes + Features: 19 numerical features + Number of instances: 2310 (3x3 patches from 7 images) + +- name: vehicle + openml_task_id: 53 + Introduction: Classify a given silhouette as one of four types of vehicle, using a set of features extracted from the silhouette. The vehicle may be viewed from one of many different angles. + Features: 18 numerical features + Number of instances: 846 + + +Regression +- name: cholesterol + openml_task_id: 2295 + Introduction: Predict the cholesterol level of patients. + Features: 6 numerical features and 7 categorical features + Number of instances: 303 + +- name: liver-disorders + openml_task_id: 52948 + Introduction: Predict alcohol assumption based on blood test results. + Features: 5 numerical features + Number of instances: 345 + +- name: kin8nm + openml_task_id: 2280 + Introduction: This dataset is concerned with the forward kinematics of an 8 link robot arm. + Features: 8 numerical features + Number of instances: 8192 + +- name: cpu_small + openml_task_id: 4883 + Introduction: Predict the portion of time that cpus run in user mode. + Features: 12 numerical features + Number of instances: 8192 + +- name: titanic_2 + openml_task_id: 211993 + Introduction: Predict probability of survival + Features: 7 numerical features + Number of instances: 891 + +- name: boston + openml_task_id: 4857 + Introduction: Boston house price. + Features: 11 numerical features and 2 categorical features + Number of instances: 506 + +- name: stock + openml_task_id: 2311 + Introduction: This is a dataset obtained from the StatLib repository. The data provided are daily stock prices from January 1988 through October 1991, for ten aerospace companies. + Features: 11 numerical features + Number of instances: 950 + +- name: space_ga + openml_task_id: 4835 + Introduction: Predict the log of the proportion of votes cast for both candidates in the 1980 presidential election. + Features: 6 numerical attributes + Number of instances: 3107 diff --git a/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnivalid.yaml b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnivalid.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ffc4d3d8ee60db3bea78b1fee0d650c37fbb2bea --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/benchmarks/nnivalid.yaml @@ -0,0 +1,43 @@ +--- +#for doc purpose using syntax when it applies. + +#FORMAT: global defaults are defined in config.yaml +- name: __dummy-task + enabled: false # actual default is `true` of course... + openml_task_id: 0 + metric: # the first metric in the task list will be optimized against and used for the main result, the other ones are optional and purely informative. Only the metrics annotated with (*) can be used as a performance metric. + - # classification + - acc # (*) accuracy + - auc # (*) array under curve + - logloss # (*) log loss + - f1 # F1 score + - # regression + - mae # (*) mean absolute error + - mse # (*) mean squared error + - rmse # root mean squared error + - rmsle # root mean squared log error + - r2 # R^2 score + folds: 1 + max_runtime_seconds: 600 + cores: 1 + max_mem_size_mb: -1 + ec2_instance_type: m5.large + + +# local defaults (applying only to tasks defined in this file) can be defined in a task named "__defaults__" +- name: __defaults__ + folds: 2 + cores: 2 + max_runtime_seconds: 180 + +- name: kc2 + openml_task_id: 3913 + description: "binary test dataset" + +- name: iris + openml_task_id: 59 + description: "multiclass test dataset" + +- name: cholesterol + openml_task_id: 2295 + description: "regression test dataset" diff --git a/examples/trials/benchmarking/automlbenchmark/nni/config.yaml b/examples/trials/benchmarking/automlbenchmark/nni/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4eb5360d5058e99f994ae019e55a1e1a860dfe0 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/config.yaml @@ -0,0 +1,16 @@ +--- +input_dir: '{user}/data' + +frameworks: + definition_file: + - '{root}/resources/frameworks.yaml' + - '{user}/frameworks.yaml' + +benchmarks: + definition_dir: + - '{user}/benchmarks' + - '{root}/resources/benchmarks' + + constraints_file: + - '{user}/constraints.yaml' + - '{root}/resources/constraints.yaml' diff --git a/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/.marker_setup_safe_to_delete b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/.marker_setup_safe_to_delete new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/__init__.py b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5a2aa16a9ce8a0b2ff0f5e14d4a734e546b525 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +def run(*args, **kwargs): + from .exec import run + return run(*args, **kwargs) diff --git a/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/architectures/run_mlp.py b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/architectures/run_mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..fccfb4538286c6ecd8f7ea1100e0e8b882f7b3cc --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/architectures/run_mlp.py @@ -0,0 +1,154 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import sklearn +import time +import numpy as np + +from sklearn.impute import SimpleImputer +from sklearn.compose import ColumnTransformer +from sklearn.preprocessing import OneHotEncoder, StandardScaler +from sklearn.pipeline import Pipeline +from sklearn.neural_network import MLPClassifier, MLPRegressor +from sklearn.model_selection import cross_val_score + +from amlb.benchmark import TaskConfig +from amlb.data import Dataset +from amlb.datautils import impute +from amlb.utils import Timer +from amlb.results import save_predictions_to_file + + +arch_choices = [(16), (64), (128), (256), + (16, 16), (64, 64), (128, 128), (256, 256), + (16, 16, 16), (64, 64, 64), (128, 128, 128), (256, 256, 256), + (256, 128, 64, 16), (128, 64, 16), (64, 16), + (16, 64, 128, 256), (16, 64, 128), (16, 64)] + +SEARCH_SPACE = { + "hidden_layer_sizes": {"_type":"choice", "_value": arch_choices}, + "learning_rate_init": {"_type":"choice", "_value": [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001]}, + "alpha": {"_type":"choice", "_value": [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]}, + "momentum": {"_type":"uniform","_value":[0, 1]}, + "beta_1": {"_type":"uniform","_value":[0, 1]}, + "tol": {"_type":"choice", "_value": [0.001, 0.0005, 0.0001, 0.00005, 0.00001]}, + "max_iter": {"_type":"randint", "_value": [2, 256]}, +} + +def preprocess_mlp(dataset, log): + ''' + For MLP: + - For numerical features, normalize them after null imputation. + - For categorical features, use one-hot encoding after null imputation. + ''' + cat_columns, num_columns = [], [] + shift_amount = 0 + for i, f in enumerate(dataset.features): + if f.is_target: + shift_amount += 1 + continue + elif f.is_categorical(): + cat_columns.append(i - shift_amount) + else: + num_columns.append(i - shift_amount) + + cat_pipeline = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), + ('onehot_encoder', OneHotEncoder()), + ]) + + num_pipeline = Pipeline([('imputer', SimpleImputer(strategy='mean')), + ('standard_scaler', StandardScaler()), + ]) + + data_pipeline = ColumnTransformer([ + ('categorical', cat_pipeline, cat_columns), + ('numerical', num_pipeline, num_columns), + ]) + + data_pipeline.fit(np.concatenate([dataset.train.X, dataset.test.X], axis=0)) + + X_train = data_pipeline.transform(dataset.train.X) + X_test = data_pipeline.transform(dataset.test.X) + + return X_train, X_test + + +def run_mlp(dataset, config, tuner, log): + """ + Using the given tuner, tune a random forest within the given time constraint. + This function uses cross validation score as the feedback score to the tuner. + The search space on which tuners search on is defined above empirically as a global variable. + """ + + limit_type, trial_limit = config.framework_params['limit_type'], None + if limit_type == 'ntrials': + trial_limit = int(config.framework_params['trial_limit']) + + X_train, X_test = preprocess_mlp(dataset, log) + y_train, y_test = dataset.train.y, dataset.test.y + + is_classification = config.type == 'classification' + estimator = MLPClassifier if is_classification else MLPRegressor + + best_score, best_params, best_model = None, None, None + score_higher_better = True + + tuner.update_search_space(SEARCH_SPACE) + + start_time = time.time() + trial_count = 0 + intermediate_scores = [] + intermediate_best_scores = [] # should be monotonically increasing + + while True: + try: + param_idx, cur_params = tuner.generate_parameters() + if cur_params is not None and cur_params != {}: + trial_count += 1 + train_params = cur_params.copy() + + if 'TRIAL_BUDGET' in cur_params: + train_params.pop('TRIAL_BUDGET') + + log.info("Trial {}: \n{}\n".format(param_idx, train_params)) + + cur_model = estimator(random_state=config.seed, **train_params) + + # Here score is the output of score() from the estimator + cur_score = cross_val_score(cur_model, X_train, y_train) + cur_score = sum(cur_score) / float(len(cur_score)) + if np.isnan(cur_score): + cur_score = 0 + + log.info("Score: {}\n".format(cur_score)) + if best_score is None or (score_higher_better and cur_score > best_score) or (not score_higher_better and cur_score < best_score): + best_score, best_params, best_model = cur_score, cur_params, cur_model + + intermediate_scores.append(cur_score) + intermediate_best_scores.append(best_score) + tuner.receive_trial_result(param_idx, cur_params, cur_score) + + if limit_type == 'time': + current_time = time.time() + elapsed_time = current_time - start_time + if elapsed_time >= config.max_runtime_seconds: + break + elif limit_type == 'ntrials': + if trial_count >= trial_limit: + break + except: + break + + # This line is required to fully terminate some advisors + tuner.handle_terminate() + + log.info("Tuning done, the best parameters are:\n{}\n".format(best_params)) + + # retrain on the whole dataset + with Timer() as training: + best_model.fit(X_train, y_train) + predictions = best_model.predict(X_test) + probabilities = best_model.predict_proba(X_test) if is_classification else None + + return probabilities, predictions, training, y_test, intermediate_scores, intermediate_best_scores diff --git a/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/architectures/run_random_forest.py b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/architectures/run_random_forest.py new file mode 100644 index 0000000000000000000000000000000000000000..8741a60bfc0060ca05c708417b8475fa87a203b8 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/architectures/run_random_forest.py @@ -0,0 +1,175 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import sklearn +import time +import numpy as np + +from sklearn.impute import SimpleImputer +from sklearn.compose import ColumnTransformer +from sklearn.preprocessing import OrdinalEncoder +from sklearn.pipeline import Pipeline +from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor +from sklearn.model_selection import cross_val_score + +from amlb.benchmark import TaskConfig +from amlb.data import Dataset +from amlb.datautils import impute +from amlb.utils import Timer +from amlb.results import save_predictions_to_file + + +SEARCH_SPACE = { + "n_estimators": {"_type":"randint", "_value": [4, 2048]}, + "max_depth": {"_type":"choice", "_value": [4, 8, 16, 32, 64, 128, 256, 0]}, # 0 for None + "min_samples_leaf": {"_type":"randint", "_value": [1, 8]}, + "min_samples_split": {"_type":"randint", "_value": [2, 16]}, + "max_leaf_nodes": {"_type":"randint", "_value": [0, 4096]} # 0 for None +} + +# change SEARCH_SPACE to the following spaces to experiment on different search spaces + +# SEARCH_SPACE_CHOICE = { +# "n_estimators": {"_type":"choice", "_value": [4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]}, +# "max_depth": {"_type":"choice", "_value": [4, 8, 16, 32, 64, 128, 256, 0]}, # 0 for None +# "min_samples_leaf": {"_type":"choice", "_value": [1, 2, 4, 8]}, +# "min_samples_split": {"_type":"choice", "_value": [2, 4, 8, 16]}, +# "max_leaf_nodes": {"_type":"choice", "_value": [8, 32, 128, 512, 1024, 2048, 4096, 0]} # 0 for None +# } + +# SEARCH_SPACE_LOG = { +# "n_estimators": {"_type":"loguniform", "_value": [4, 2048]}, +# "max_depth": {"_type":"choice", "_value": [4, 8, 16, 32, 64, 128, 256, 0]}, # 0 for None +# "min_samples_leaf": {"_type":"randint", "_value": [1, 8]}, +# "min_samples_split": {"_type":"randint", "_value": [2, 16]}, +# "max_leaf_nodes": {"_type":"loguniform", "_value": [4, 4096]} # 0 for None +# } + +# SEARCH_SPACE_SIMPLE = { +# "n_estimators": {"_type":"choice", "_value": [10]}, +# "max_depth": {"_type":"choice", "_value": [5]}, +# "min_samples_leaf": {"_type":"choice", "_value": [8]}, +# "min_samples_split": {"_type":"choice", "_value": [16]}, +# "max_leaf_nodes": {"_type":"choice", "_value": [64]} +# } + + +def preprocess_random_forest(dataset, log): + ''' + For random forest: + - Do nothing for numerical features except null imputation. + - For categorical features, use ordinal encoding to map them into integers. + ''' + cat_columns, num_columns = [], [] + shift_amount = 0 + for i, f in enumerate(dataset.features): + if f.is_target: + shift_amount += 1 + continue + elif f.is_categorical(): + cat_columns.append(i - shift_amount) + else: + num_columns.append(i - shift_amount) + + cat_pipeline = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), + ('ordinal_encoder', OrdinalEncoder()), + ]) + + num_pipeline = Pipeline([('imputer', SimpleImputer(strategy='mean')), + ]) + + data_pipeline = ColumnTransformer([ + ('categorical', cat_pipeline, cat_columns), + ('numerical', num_pipeline, num_columns), + ]) + + data_pipeline.fit(np.concatenate([dataset.train.X, dataset.test.X], axis=0)) + + X_train = data_pipeline.transform(dataset.train.X) + X_test = data_pipeline.transform(dataset.test.X) + + return X_train, X_test + + +def run_random_forest(dataset, config, tuner, log): + """ + Using the given tuner, tune a random forest within the given time constraint. + This function uses cross validation score as the feedback score to the tuner. + The search space on which tuners search on is defined above empirically as a global variable. + """ + + limit_type, trial_limit = config.framework_params['limit_type'], None + if limit_type == 'ntrials': + trial_limit = int(config.framework_params['trial_limit']) + + X_train, X_test = preprocess_random_forest(dataset, log) + y_train, y_test = dataset.train.y, dataset.test.y + + is_classification = config.type == 'classification' + estimator = RandomForestClassifier if is_classification else RandomForestRegressor + + best_score, best_params, best_model = None, None, None + score_higher_better = True + + tuner.update_search_space(SEARCH_SPACE) + + start_time = time.time() + trial_count = 0 + intermediate_scores = [] + intermediate_best_scores = [] # should be monotonically increasing + + while True: + try: + param_idx, cur_params = tuner.generate_parameters() + if cur_params is not None and cur_params != {}: + trial_count += 1 + train_params = cur_params.copy() + train_params = {x: int(train_params[x]) for x in train_params.keys()} + if 'TRIAL_BUDGET' in cur_params: + train_params.pop('TRIAL_BUDGET') + if cur_params['max_leaf_nodes'] == 0: + train_params.pop('max_leaf_nodes') + if cur_params['max_depth'] == 0: + train_params.pop('max_depth') + log.info("Trial {}: \n{}\n".format(param_idx, train_params)) + + cur_model = estimator(random_state=config.seed, **train_params) + + # Here score is the output of score() from the estimator + cur_score = cross_val_score(cur_model, X_train, y_train) + cur_score = sum(cur_score) / float(len(cur_score)) + if np.isnan(cur_score): + cur_score = 0 + + log.info("Score: {}\n".format(cur_score)) + if best_score is None or (score_higher_better and cur_score > best_score) or (not score_higher_better and cur_score < best_score): + best_score, best_params, best_model = cur_score, cur_params, cur_model + + intermediate_scores.append(cur_score) + intermediate_best_scores.append(best_score) + tuner.receive_trial_result(param_idx, cur_params, cur_score) + + if limit_type == 'time': + current_time = time.time() + elapsed_time = current_time - start_time + if elapsed_time >= config.max_runtime_seconds: + break + elif limit_type == 'ntrials': + if trial_count >= trial_limit: + break + except: + break + + # This line is required to fully terminate some advisors + tuner.handle_terminate() + + log.info("Tuning done, the best parameters are:\n{}\n".format(best_params)) + + # retrain on the whole dataset + with Timer() as training: + best_model.fit(X_train, y_train) + predictions = best_model.predict(X_test) + probabilities = best_model.predict_proba(X_test) if is_classification else None + + return probabilities, predictions, training, y_test, intermediate_scores, intermediate_best_scores diff --git a/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/exec.py b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/exec.py new file mode 100644 index 0000000000000000000000000000000000000000..a79e0d3b5c3298ab0ad518559886565bb1cff321 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/exec.py @@ -0,0 +1,71 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +from .tuners import NNITuner +from .run_experiment import * + +from amlb.benchmark import TaskConfig +from amlb.data import Dataset +from amlb.results import save_predictions_to_file +from amlb.utils import Timer + + +log = logging.getLogger(__name__) + + +def validate_config(config: TaskConfig): + if 'tuner_type' not in config.framework_params: + raise RuntimeError('framework.yaml does not have a "tuner_type" field.') + if 'limit_type' not in config.framework_params: + raise RuntimeError('framework.yaml does not have a "limit_type" field.') + if config.framework_params['limit_type'] not in ['time', 'ntrials']: + raise RuntimeError('"limit_type" field must be "time" or "ntrials".') + if config.framework_params['limit_type'] == 'ntrials': + if 'trial_limit' not in config.framework_params: + raise RuntimeError('framework.yaml does not have a "limit" field.') + else: + try: + _ = int(config.framework_params['trial_limit']) + except: + raise RuntimeError('"trial_limit" field must be an integer.') + + +def save_scores_to_file(intermediate_scores, intermediate_best_scores, out_file): + """ + Save statistics of every trial to a log file for generating reports. + """ + with open(out_file, 'w') as f: + f.write('ntrials,trial_score,best_score\n') + for i, (trial_score, best_score) in enumerate(zip(intermediate_scores, intermediate_best_scores)): + f.write('{},{},{}\n'.format(i+1, trial_score, best_score)) + + +def run(dataset: Dataset, config: TaskConfig): + validate_config(config) + tuner = NNITuner(config) + if config.framework_params['limit_type'] == 'time': + log.info("Tuning {} with NNI {} with a maximum time of {}s\n" + .format(config.framework_params['arch_type'], tuner.description, config.max_runtime_seconds)) + elif config.framework_params['limit_type'] == 'ntrials': + log.info("Tuning {} with NNI {} with a maximum number of trials of {}\n" + .format(config.framework_params['arch_type'], tuner.description, config.framework_params['trial_limit'])) + log.info("Note: any time constraints are ignored.") + + probabilities, predictions, train_timer, y_test, intermediate_scores, intermediate_best_scores = run_experiment(dataset, config, tuner, log) + + save_predictions_to_file(dataset=dataset, + output_file=config.output_predictions_file, + probabilities=probabilities, + predictions=predictions, + truth=y_test) + + scores_file = '/'.join(config.output_predictions_file.split('/')[:-3]) + '/scorelogs/' + config.output_predictions_file.split('/')[-1] + assert(len(intermediate_scores) == len(intermediate_best_scores)) + save_scores_to_file(intermediate_scores, intermediate_best_scores, scores_file) + + return dict( + models_count=1, + training_duration=train_timer.duration + ) diff --git a/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/run_experiment.py b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/run_experiment.py new file mode 100644 index 0000000000000000000000000000000000000000..28016bfed4e19e5d73f7c35bc507c6cd4e7ed4b6 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/run_experiment.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .architectures.run_random_forest import * +from .architectures.run_mlp import * + + +def run_experiment(dataset, config, tuner, log): + if 'arch_type' not in config.framework_params: + raise RuntimeError('framework.yaml does not have a "arch_type" field.') + + if config.framework_params['arch_type'] == 'random_forest': + return run_random_forest(dataset, config, tuner, log) + + elif config.framework_params['arch_type'] == 'mlp': + return run_mlp(dataset, config, tuner, log) + + else: + raise RuntimeError('The requested arch type in framework.yaml is unavailable.') diff --git a/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/tuners.py b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/tuners.py new file mode 100644 index 0000000000000000000000000000000000000000..84bece36ac077e0b3417bc14554c5b00db9505f5 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/extensions/NNI/tuners.py @@ -0,0 +1,156 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import yaml +import importlib + +import nni +from nni.runtime.config import get_config_file +from nni.utils import MetricType +from nni.tuner import Tuner +from nni.runtime.msg_dispatcher_base import MsgDispatcherBase + +from amlb.benchmark import TaskConfig + + +def get_tuner_class_dict(): + config_file = str(get_config_file('registered_algorithms.yml')) + if os.path.exists(config_file): + with open(config_file, 'r') as f: + config = yaml.safe_load(f) + else: + config = {} + ret = {} + for t in ['tuners', 'advisors']: + for entry in config[t]: + ret[entry['builtinName']] = entry['className'] + return ret + + +def get_tuner(config: TaskConfig): + name2tuner = get_tuner_class_dict() + if config.framework_params['tuner_type'] not in name2tuner: + raise RuntimeError('The requested tuner type is unavailable.') + else: + module_name = name2tuner[config.framework_params['tuner_type']] + tuner_name = module_name.split('.')[-1] + module_name = '.'.join(module_name.split('.')[:-1]) + tuner_type = getattr(importlib.import_module(module_name), tuner_name) + + # special handlings for tuner initialization + tuner = None + if config.framework_params['tuner_type'] == 'TPE': + tuner = tuner_type('tpe') + + elif config.framework_params['tuner_type'] == 'Random': + tuner = tuner_type('random_search') + + elif config.framework_params['tuner_type'] == 'Anneal': + tuner = tuner_type('anneal') + + elif config.framework_params['tuner_type'] == 'Hyperband': + if 'max_resource' in config.framework_params: + tuner = tuner_type(R=config.framework_params['max_resource']) + else: + tuner = tuner_type() + + elif config.framework_params['tuner_type'] == 'BOHB': + if 'max_resource' in config.framework_params: + tuner = tuner_type(max_budget=config.framework_params['max_resource']) + else: + tuner = tuner_type(max_budget=60) + + else: + tuner = tuner_type() + + assert(tuner is not None) + + return tuner, config.framework_params['tuner_type'] + + +class NNITuner: + ''' + A specialized wrapper for the automlbenchmark framework. + Abstracts the different behaviors of tuners and advisors into a tuner API. + ''' + def __init__(self, config: TaskConfig): + self.config = config + self.core, self.description = get_tuner(config) + + # 'tuner' or 'advisor' + self.core_type = None + if isinstance(self.core, Tuner): + self.core_type = 'tuner' + elif isinstance(self.core, MsgDispatcherBase): + self.core_type = 'advisor' + else: + raise RuntimeError('Unsupported tuner or advisor type') + + # note: tuners and advisors use this variable differently + self.cur_param_id = 0 + + + def __del__(self): + self.handle_terminate() + + + def update_search_space(self, search_space): + if self.core_type == 'tuner': + self.core.update_search_space(search_space) + + elif self.core_type == 'advisor': + self.core.handle_update_search_space(search_space) + # special initializations for BOHB Advisor + from nni.algorithms.hpo.hyperband_advisor import Hyperband + if isinstance(self.core, Hyperband): + pass + else: + from nni.algorithms.hpo.bohb_advisor.bohb_advisor import BOHB + from nni.algorithms.hpo.bohb_advisor.config_generator import CG_BOHB + if isinstance(self.core, BOHB): + self.core.cg = CG_BOHB(configspace=self.core.search_space, + min_points_in_model=self.core.min_points_in_model, + top_n_percent=self.core.top_n_percent, + num_samples=self.core.num_samples, + random_fraction=self.core.random_fraction, + bandwidth_factor=self.core.bandwidth_factor, + min_bandwidth=self.core.min_bandwidth) + self.core.generate_new_bracket() + + + def generate_parameters(self): + self.cur_param_id += 1 + if self.core_type == 'tuner': + self.cur_param = self.core.generate_parameters(self.cur_param_id-1) + return self.cur_param_id-1, self.cur_param + + elif self.core_type == 'advisor': + self.cur_param = self.core._get_one_trial_job() + hyperparams = self.cur_param['parameters'].copy() + #hyperparams.pop('TRIAL_BUDGET') + return self.cur_param['parameter_id'], hyperparams + + + def receive_trial_result(self, parameter_id, parameters, value): + if self.core_type == 'tuner': + return self.core.receive_trial_result(parameter_id, parameters, value) + + elif self.core_type == 'advisor': + metric_report = {} + metric_report['parameter_id'] = parameter_id + metric_report['trial_job_id'] = self.cur_param_id + metric_report['type'] = MetricType.FINAL + metric_report['value'] = str(value) + metric_report['sequence'] = self.cur_param_id + return self.core.handle_report_metric_data(metric_report) + + + def handle_terminate(self): + if self.core_type == 'tuner': + pass + + elif self.core_type == 'advisor': + self.core.stopping = True + + diff --git a/examples/trials/benchmarking/automlbenchmark/nni/frameworks.yaml b/examples/trials/benchmarking/automlbenchmark/nni/frameworks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dcab03f625aa3c09050a8f9f229f79654e68995d --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/nni/frameworks.yaml @@ -0,0 +1,93 @@ +--- + +NNI: + module: extensions.NNI + version: 'stable' + project: https://github.com/microsoft/nni + +# type in ['TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'GPTuner', 'MetisTuner', 'DNGOTuner', 'Hyperband', 'BOHB'] +# arch_type in ['random_forest', 'mlp'] +# limit_type in ['time', 'ntrials'] +# limit must be an integer + +TPE: + extends: NNI + params: + tuner_type: 'TPE' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +Random: + extends: NNI + params: + tuner_type: 'Random' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +Anneal: + extends: NNI + params: + tuner_type: 'Anneal' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +Evolution: + extends: NNI + params: + tuner_type: 'Evolution' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +SMAC: + extends: NNI + params: + tuner_type: 'SMAC' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +GPTuner: + extends: NNI + params: + tuner_type: 'GPTuner' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +MetisTuner: + extends: NNI + params: + tuner_type: 'MetisTuner' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +DNGOTuner: + extends: NNI + params: + tuner_type: 'DNGOTuner' + arch_type: 'random_forest' + limit_type: 'ntrials' + trial_limit: 10 + +Hyperband: + extends: NNI + params: + tuner_type: 'Hyperband' + arch_type: 'random_forest' + max_resource: 60 + limit_type: 'ntrials' + trial_limit: 10 + +BOHB: + extends: NNI + params: + tuner_type: 'BOHB' + arch_type: 'random_forest' + max_resource: 60 + limit_type: 'ntrials' + trial_limit: 10 diff --git a/examples/trials/benchmarking/automlbenchmark/parse_result_csv.py b/examples/trials/benchmarking/automlbenchmark/parse_result_csv.py new file mode 100644 index 0000000000000000000000000000000000000000..b10827da36b6af2e25c1042f5441809e6b960bbf --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/parse_result_csv.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import pandas as pd +import sys +import matplotlib.pyplot as plt +from matplotlib.lines import Line2D + + +def generate_perf_report(result_file_name): + """ + Generate a performance report. + The input result_file_name should be the path of the "results.csv" generated by automlbenchmark. + This function outputs 1) a formatted report named "performances.txt" in the "reports/" directory + located in the same parent directory as "results.csv" and 2) a report named "rankings.txt" in the + same directory ranking the tuners contained in "results.csv". + """ + result = pd.read_csv(result_file_name) + task_ids = result['id'].unique() + tuners = result['framework'].unique() + metric_types = ['rmse', 'auc', 'logloss'] + metric2taskres = {} + for m in metric_types: + metric2taskres[m] = [] + keep_parameters = ['framework', 'constraint', 'result', 'metric', 'params', 'utc', 'duration'] + list(result.columns[16:]) + + # performance report: one table per task + with open(result_file_name.replace('results.csv', 'reports/performances.txt'), 'w') as out_f: + for task_id in task_ids: + task_results = result[result['id'] == task_id] + task_name = task_results.task.unique()[0] + out_f.write("====================================================\n") + out_f.write("Task ID: {}\n".format(task_id)) + out_f.write("Task Name: {}\n".format(task_name)) + folds = task_results['fold'].unique() + for fold in folds: + out_f.write("Fold {}:\n".format(fold)) + res = task_results[task_results['fold'] == fold][keep_parameters] + out_f.write(res.to_string()) + out_f.write('\n') + # save results for the next step + res_list = [] + for _, row in res.iterrows(): + res_list.append([row['framework'], row['result']]) + metric2taskres[res['metric'].unique()[0]].append(res_list) + out_f.write('\n') + + # rankings report: per task and per tuner + with open(result_file_name.replace('results.csv', 'reports/rankings.txt'), 'w') as out_f: + # generate reports per task + ranking_aggs = {} + for metric_type in metric_types: + sorted_lists = [] + if metric_type in ['auc']: + for l in metric2taskres[metric_type]: + l_sorted = sorted(l, key=(lambda x: x[-1]), reverse=True) + l_sorted = [[x[0], x[1], i+1] for (i, x) in enumerate(l_sorted)] + sorted_lists.append(l_sorted) + elif metric_type in ['rmse', 'logloss']: + for l in metric2taskres[metric_type]: + l_sorted = sorted(l, key=(lambda x: x[-1])) + l_sorted = [[x[0], x[1], i+1] for (i, x) in enumerate(l_sorted)] + sorted_lists.append(l_sorted) + metric2taskres[metric_type] = sorted_lists + + out_f.write("====================================================\n") + out_f.write("Average rankings for metric {}:\n".format(metric_type)) + ranking_agg = [[t, 0] for t in tuners] + for i, tuner in enumerate(tuners): + for trial_res in metric2taskres[metric_type]: + for t, s, r in trial_res: + if t == tuner: + ranking_agg[i][-1] += r + + ranking_agg = [[x[0], x[1]/float(len(metric2taskres[metric_type]))] for x in ranking_agg] + ranking_agg = sorted(ranking_agg, key=(lambda x: x[-1])) + for t, r in ranking_agg: + out_f.write('{:<12} {:.2f}\n'.format(t, r)) + ranking_aggs[metric_type] = ranking_agg + out_f.write('\n') + + # generate reports per tuner + out_f.write("====================================================\n") + out_f.write("Average rankings for tuners:\n") + header_string = '{:<12}' + for _ in metric_types: + header_string += ' {:<12}' + header_string += '\n' + out_f.write(header_string.format("Tuner", *metric_types)) + for tuner in tuners: + tuner_ranks = [] + for m in metric_types: + for t, r in ranking_aggs[m]: + if t == tuner: + tuner_ranks.append('{:.2f}'.format(r)) + break + out_f.write(header_string.format(tuner, *tuner_ranks)) + out_f.write('\n') + + +def generate_graphs(result_file_name): + """ + Generate graphs describing performance statistics. + The input result_file_name should be the path of the "results.csv" generated by automlbenchmark. + For each task, this function outputs two graphs in the "reports/" directory located in the same + parent directory as "results.csv". + The graph named task_foldx_1.jpg summarizes the best score each tuner gets after n trials. + The graph named task_foldx_2.jpg summarizes the score each tuner gets in each trial. + """ + markers = list(Line2D.markers.keys()) + result = pd.read_csv(result_file_name) + scorelog_dir = result_file_name.replace('results.csv', 'scorelogs/') + output_dir = result_file_name.replace('results.csv', 'reports/') + task_ids = result['id'].unique() + for task_id in task_ids: + task_results = result[result['id'] == task_id] + task_name = task_results.task.unique()[0] + folds = task_results['fold'].unique() + + for fold in folds: + # load scorelog files + trial_scores, best_scores = [], [] + tuners = list(task_results[task_results.fold == fold]['framework'].unique()) + for tuner in tuners: + scorelog_name = '{}_{}_{}.csv'.format(tuner.lower(), task_name, fold) + intermediate_scores = pd.read_csv(scorelog_dir + scorelog_name) + bs = list(intermediate_scores['best_score']) + ts = [(i+1, x) for i, x in enumerate(list(intermediate_scores['trial_score'])) if x != 0] + best_scores.append([tuner, bs]) + trial_scores.append([tuner, ts]) + + # generate the best score graph + plt.figure(figsize=(16, 8)) + for i, (tuner, score) in enumerate(best_scores): + plt.plot(score, label=tuner, marker=markers[i]) + plt.title('{} Fold {}'.format(task_name, fold)) + plt.xlabel("Number of Trials") + plt.ylabel("Best Score") + plt.legend() + plt.savefig(output_dir + '{}_fold{}_1.jpg'.format(task_name, fold)) + plt.close() + + # generate the trial score graph + plt.figure(figsize=(16, 8)) + for i, (tuner, score) in enumerate(trial_scores): + x = [l[0] for l in score] + y = [l[1] for l in score] + plt.plot(x, y, label=tuner) #, marker=markers[i]) + plt.title('{} Fold {}'.format(task_name, fold)) + plt.xlabel("Trial Number") + plt.ylabel("Trial Score") + plt.legend() + plt.savefig(output_dir + '{}_fold{}_2.jpg'.format(task_name, fold)) + plt.close() + + +def main(): + if len(sys.argv) != 2: + print("Usage: python parse_result_csv.py ") + exit(0) + generate_perf_report(sys.argv[1]) + generate_graphs(sys.argv[1]) + + +if __name__ == '__main__': + main() diff --git a/examples/trials/benchmarking/automlbenchmark/requirements.txt b/examples/trials/benchmarking/automlbenchmark/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9a7b00059df5d0bfc8c506351cf79bdfa60866d3 --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/requirements.txt @@ -0,0 +1,3 @@ +pandas>=1.2.0 +pyyaml>=5.4.1 +matplotlib>=3.4.1 diff --git a/examples/trials/benchmarking/automlbenchmark/runbenchmark_nni.sh b/examples/trials/benchmarking/automlbenchmark/runbenchmark_nni.sh new file mode 100755 index 0000000000000000000000000000000000000000..d939ca38a251d2bcb31a202f7d74cfaced287d2a --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/runbenchmark_nni.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +time=$(date "+%Y%m%d%H%M%S") +installation='automlbenchmark' +outdir="results_$time" +benchmark='nnivalid' # 'nnismall' 'nnismall-regression' 'nnismall-binary' 'nnismall-multiclass' +serialize=true # if false, run all experiments together in background + +mkdir $outdir $outdir/scorelogs $outdir/reports + +if [ "$#" -eq 0 ]; then + tuner_array=('TPE' 'Random' 'Anneal' 'Evolution' 'GPTuner' 'MetisTuner' 'Hyperband') +else + tuner_array=( "$@" ) +fi + +if [ "$serialize" = true ]; then + # run tuners serially + for tuner in ${tuner_array[*]}; do + echo "python $installation/runbenchmark.py $tuner $benchmark -o $outdir -u nni" + python $installation/runbenchmark.py $tuner $benchmark -o $outdir -u nni + done + + # parse final results + echo "python parse_result_csv.py $outdir/results.csv" + python parse_result_csv.py "$outdir/results.csv" + +else + # run all the tuners in background + for tuner in ${tuner_array[*]}; do + mkdir "$outdir/$tuner" "$outdir/$tuner/scorelogs" + echo "python $installation/runbenchmark.py $tuner $benchmark -o $outdir/$tuner -u nni &" + python $installation/runbenchmark.py $tuner $benchmark -o $outdir/$tuner -u nni & + done + + wait + + # aggregate results + touch "$outdir/results.csv" + let i=0 + for tuner in ${tuner_array[*]}; do + cp "$outdir/$tuner/scorelogs"/* $outdir/scorelogs + if [ $i -eq 0 ]; then + cp "$outdir/$tuner/results.csv" "$outdir/results.csv" + else + let nlines=`cat "$outdir/$tuner/results.csv" | wc -l` + ((nlines=nlines-1)) + tail -n $nlines "$outdir/$tuner/results.csv" >> "$outdir/results.csv" + fi + ((i=i+1)) + done + + # parse final results + echo "python parse_result_csv.py $outdir/results.csv" + python parse_result_csv.py "$outdir/results.csv" +fi diff --git a/examples/trials/benchmarking/automlbenchmark/setup.sh b/examples/trials/benchmarking/automlbenchmark/setup.sh new file mode 100755 index 0000000000000000000000000000000000000000..3e30fdf7edf400abbe99a6f0846c4bffb3abe88a --- /dev/null +++ b/examples/trials/benchmarking/automlbenchmark/setup.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# download automlbenchmark repository +if [ ! -d './automlbenchmark' ] ; then + git clone https://github.com/openml/automlbenchmark.git --branch v1.6 --depth 1 +fi + +# install dependencies +pip3 install -r automlbenchmark/requirements.txt +pip3 install -r requirements.txt --ignore-installed diff --git a/examples/trials/benchmarking/hyperband/config_hyperband.yml b/examples/trials/benchmarking/hyperband/config_hyperband.yml new file mode 100644 index 0000000000000000000000000000000000000000..e79e3e0d8ebccfdbd91146c1bbe27dbfd3a955af --- /dev/null +++ b/examples/trials/benchmarking/hyperband/config_hyperband.yml @@ -0,0 +1,20 @@ +advisor: + name: Hyperband + classArgs: + optimize_mode: maximize + + #R: the maximum trial budget (could be the number of mini-batches or epochs) can be + # allocated to a trial. Each trial should use trial budget to control how long it runs. + R: 60 + + #eta: proportion of discarded trials + eta: 3 + + #choice: serial, parallelism + exec_mode: serial + +searchSpaceFile: search_space.json +trialCommand: python3 main.py +trialConcurrency: 10 +trainingService: + platform: local diff --git a/examples/trials/benchmarking/hyperband/main.py b/examples/trials/benchmarking/hyperband/main.py new file mode 100644 index 0000000000000000000000000000000000000000..44de00e85ff2d8d29adbb8a13e34ab0e4c8b237d --- /dev/null +++ b/examples/trials/benchmarking/hyperband/main.py @@ -0,0 +1,52 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +A test for hyperband, using nasbench201. So it need install the dependencies for nasbench201 at first. +""" +import argparse +import logging +import random +import time + +import nni +from nni.utils import merge_parameter +from nni.nas.benchmarks.nasbench201 import query_nb201_trial_stats + + +logger = logging.getLogger('test_hyperband') + + +def main(args): + r = args.pop('TRIAL_BUDGET') + dataset = [t for t in query_nb201_trial_stats(args, 200, 'cifar100', include_intermediates=True)] + test_acc = random.choice(dataset)['intermediates'][r - 1]['ori_test_acc'] / 100 + time.sleep(random.randint(0, 10)) + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + +def get_params(): + parser = argparse.ArgumentParser(description='Hyperband Test') + parser.add_argument("--0_1", type=str, default='none') + parser.add_argument("--0_2", type=str, default='none') + parser.add_argument("--0_3", type=str, default='none') + parser.add_argument("--1_2", type=str, default='none') + parser.add_argument("--1_3", type=str, default='none') + parser.add_argument("--2_3", type=str, default='none') + parser.add_argument("--TRIAL_BUDGET", type=int, default=200) + + args, _ = parser.parse_known_args() + return args + +if __name__ == '__main__': + try: + # get parameters form tuner + tuner_params = nni.get_next_parameter() + logger.debug(tuner_params) + params = vars(merge_parameter(get_params(), tuner_params)) + print(params) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/benchmarking/hyperband/search_space.json b/examples/trials/benchmarking/hyperband/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..68c91fc94223dca9aee37bcdd1a8ae86a211092e --- /dev/null +++ b/examples/trials/benchmarking/hyperband/search_space.json @@ -0,0 +1,8 @@ +{ + "0_1": {"_type": "choice", "_value": ["none", "skip_connect", "conv_1x1", "conv_3x3", "avg_pool_3x3"]}, + "0_2": {"_type": "choice", "_value": ["none", "skip_connect", "conv_1x1", "conv_3x3", "avg_pool_3x3"]}, + "0_3": {"_type": "choice", "_value": ["none", "skip_connect", "conv_1x1", "conv_3x3", "avg_pool_3x3"]}, + "1_2": {"_type": "choice", "_value": ["none", "skip_connect", "conv_1x1", "conv_3x3", "avg_pool_3x3"]}, + "1_3": {"_type": "choice", "_value": ["none", "skip_connect", "conv_1x1", "conv_3x3", "avg_pool_3x3"]}, + "2_3": {"_type": "choice", "_value": ["none", "skip_connect", "conv_1x1", "conv_3x3", "avg_pool_3x3"]} +} diff --git a/examples/trials/cifar10_pytorch/README.md b/examples/trials/cifar10_pytorch/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cc641538210a1fa165d002950e217babd63d0594 --- /dev/null +++ b/examples/trials/cifar10_pytorch/README.md @@ -0,0 +1,6 @@ +This example requires pytorch. +pytorch install package should be chosen based on python version and cuda version. + +Here is an example of the environment python==3.5 and cuda == 8.0, then using the following commands to install pytorch: +python3 -m pip install http://download.pytorch.org/whl/cu80/torch-0.4.1-cp35-cp35m-linux_x86_64.whl +python3 -m pip install torchvision \ No newline at end of file diff --git a/examples/trials/cifar10_pytorch/README_zh_CN.md b/examples/trials/cifar10_pytorch/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..821f0dd51eedf8fe65d8b390e90f814cda1cded4 --- /dev/null +++ b/examples/trials/cifar10_pytorch/README_zh_CN.md @@ -0,0 +1,3 @@ +此示例需要安装 Pytorch。 Pytorch 安装包需要选择所基于的 Python 和 CUDA 版本。 + +以下是 python==3.5 和 cuda == 8.0 下的环境示例,使用下列命令来安装 Pytorch: python3 -m pip install http://download.pytorch.org/whl/cu80/torch-0.4.1-cp35-cp35m-linux_x86_64.whl python3 -m pip install torchvision \ No newline at end of file diff --git a/examples/trials/cifar10_pytorch/adl.Dockerfile b/examples/trials/cifar10_pytorch/adl.Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5ed36a80fe0739d465c7763f17b7b04d5a8cf895 --- /dev/null +++ b/examples/trials/cifar10_pytorch/adl.Dockerfile @@ -0,0 +1,8 @@ +# Dockerfile for building AdaptDL-enabled CIFAR10 image +# Set docker build context to current folder + +FROM pytorch/pytorch:1.4-cuda10.1-cudnn7-runtime + +RUN pip install nni adaptdl tensorboard + +COPY ./ /cifar10 diff --git a/examples/trials/cifar10_pytorch/config.yml b/examples/trials/cifar10_pytorch/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..b70083916e02f3028ded557b8eaa8b93d414f8fd --- /dev/null +++ b/examples/trials/cifar10_pytorch/config.yml @@ -0,0 +1,14 @@ +searchSpaceFile: search_space.json +trialCommand: python3 main.py +trialGpuNumber: 1 +trialConcurrency: 4 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local + maxTrialNumberPerGpu: 2 + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/cifar10_pytorch/config_adl.yml b/examples/trials/cifar10_pytorch/config_adl.yml new file mode 100644 index 0000000000000000000000000000000000000000..5bef531b601ecd7c0e36ef936c9deffcbce8dda6 --- /dev/null +++ b/examples/trials/cifar10_pytorch/config_adl.yml @@ -0,0 +1,39 @@ +authorName: default +experimentName: example_pytorch_cifar10 +trialConcurrency: 1 +maxExecDuration: 100h +maxTrialNum: 10 +nniManagerIp: {replace_with_your_ip} +trainingServicePlatform: adl +searchSpacePath: search_space_adl.json +logCollection: http +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + namespace: default + command: python3 /cifar10/main_adl.py + codeDir: /cifar10 + gpuNum: 1 + # the user needs to have a docker image built by the adl.Dockerfile + # the docker image should be pushed to a registry for the cluster to pull + # in our example we provide a docker image from our public docker hub + image: petuum/nni:cifar-example + # optional: + # the user needs to provide the secret if the image is pulled from a private registry + # imagePullSecrets: + # - name: {secret} + adaptive: true + checkpoint: + # the user needs to determine the storageClass in Kubenetes + # For example, for MicroK8s, ‘microk8s-hostpath’ can be used + storageClass: microk8s-hostpath + storageSize: 1Gi + cpuNum: 1 + memorySize: 1Gi diff --git a/examples/trials/cifar10_pytorch/config_windows.yml b/examples/trials/cifar10_pytorch/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..7ef4807196fcacf0b49b919024c06c1abaa68097 --- /dev/null +++ b/examples/trials/cifar10_pytorch/config_windows.yml @@ -0,0 +1,14 @@ +searchSpaceFile: search_space.json +trialCommand: python main.py +trialGpuNumber: 1 +trialConcurrency: 4 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local + maxTrialNumberPerGpu: 2 + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/cifar10_pytorch/main.py b/examples/trials/cifar10_pytorch/main.py new file mode 100644 index 0000000000000000000000000000000000000000..37222fd53fcf6d429a12f6bbeb4d706cc0c21abe --- /dev/null +++ b/examples/trials/cifar10_pytorch/main.py @@ -0,0 +1,204 @@ +'''Train CIFAR10 with PyTorch.''' +from __future__ import print_function +import argparse +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse +import logging + +from models import * +from utils import progress_bar + +import nni + +_logger = logging.getLogger("cifar10_pytorch_automl") + +trainloader = None +testloader = None +net = None +criterion = None +optimizer = None +device = 'cuda' if torch.cuda.is_available() else 'cpu' +best_acc = 0.0 # best test accuracy +start_epoch = 0 # start from epoch 0 or last checkpoint epoch + +def prepare(args): + global trainloader + global testloader + global net + global criterion + global optimizer + + # Data + print('==> Preparing data..') + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) + + testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) + testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) + + #classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') + + # Model + print('==> Building model..') + if args['model'] == 'vgg': + net = VGG('VGG19') + if args['model'] == 'resnet18': + net = ResNet18() + if args['model'] == 'googlenet': + net = GoogLeNet() + if args['model'] == 'densenet121': + net = DenseNet121() + if args['model'] == 'mobilenet': + net = MobileNet() + if args['model'] == 'dpn92': + net = DPN92() + if args['model'] == 'shufflenetg2': + net = ShuffleNetG2() + if args['model'] == 'senet18': + net = SENet18() + + net = net.to(device) + if device == 'cuda': + net = torch.nn.DataParallel(net) + cudnn.benchmark = True + + criterion = nn.CrossEntropyLoss() + #optimizer = optim.SGD(net.parameters(), lr=args['lr'], momentum=0.9, weight_decay=5e-4) + + if args['optimizer'] == 'SGD': + optimizer = optim.SGD(net.parameters(), lr=args['lr'], momentum=0.9, weight_decay=5e-4) + if args['optimizer'] == 'Adadelta': + optimizer = optim.Adadelta(net.parameters(), lr=args['lr']) + if args['optimizer'] == 'Adagrad': + optimizer = optim.Adagrad(net.parameters(), lr=args['lr']) + if args['optimizer'] == 'Adam': + optimizer = optim.Adam(net.parameters(), lr=args['lr']) + if args['optimizer'] == 'Adamax': + optimizer = optim.Adam(net.parameters(), lr=args['lr']) + + +# Training +def train(epoch, batches=-1): + global trainloader + global testloader + global net + global criterion + global optimizer + + print('\nEpoch: %d' % epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.*correct/total + + progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + if batches > 0 and (batch_idx+1) >= batches: + return + +def test(epoch): + global best_acc + global trainloader + global testloader + global net + global criterion + global optimizer + + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.*correct/total + + progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + # Save checkpoint. + acc = 100.*correct/total + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/ckpt.t7') + best_acc = acc + return acc, best_acc + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--epochs", type=int, default=200) + + # Maximum mini-batches per epoch, for code testing purpose + parser.add_argument("--batches", type=int, default=-1) + + args, _ = parser.parse_known_args() + + try: + RCV_CONFIG = nni.get_next_parameter() + #RCV_CONFIG = {'lr': 0.1, 'optimizer': 'Adam', 'model':'senet18'} + _logger.debug(RCV_CONFIG) + + prepare(RCV_CONFIG) + acc = 0.0 + best_acc = 0.0 + for epoch in range(start_epoch, start_epoch+args.epochs): + train(epoch, args.batches) + acc, best_acc = test(epoch) + nni.report_intermediate_result(acc) + + nni.report_final_result(best_acc) + except Exception as exception: + _logger.exception(exception) + raise diff --git a/examples/trials/cifar10_pytorch/main_adl.py b/examples/trials/cifar10_pytorch/main_adl.py new file mode 100644 index 0000000000000000000000000000000000000000..f2485b3d8d6148636f42b7c983daf21aa0b8258a --- /dev/null +++ b/examples/trials/cifar10_pytorch/main_adl.py @@ -0,0 +1,170 @@ +# Copyright 2020 Petuum, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Train CIFAR10 with PyTorch and AdaptDL. This example is based on: +https://github.com/petuum/adaptdl/blob/master/examples/pytorch-cifar/main.py +''' +import torch +import torch.nn as nn +import torch.optim as optim +import torch.backends.cudnn as cudnn +import torch.distributed as dist + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse + +from models import * + +import adaptdl +import adaptdl.torch as adl + +from torch.optim.lr_scheduler import MultiStepLR +from torch.utils.tensorboard import SummaryWriter + +import nni + + +parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') +parser.add_argument('--bs', default=128, type=int, help='batch size') +parser.add_argument('--lr', default=0.1, type=float, help='learning rate') +parser.add_argument('--epochs', default=30, type=int, help='number of epochs') +parser.add_argument('--model', default='ResNet18', type=str, help='model') +parser.add_argument('--autoscale-bsz', dest='autoscale_bsz', default=True, action='store_true', help='autoscale batchsize') +args = parser.parse_args() + +# load the parameters from nni +RCV_CONFIG = nni.get_next_parameter() +args.lr = RCV_CONFIG["lr"] + +device = 'cuda' if torch.cuda.is_available() else 'cpu' + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +adaptdl.torch.init_process_group("nccl" if torch.cuda.is_available() else "gloo") + +if adaptdl.env.replica_rank() == 0: + trainset = torchvision.datasets.CIFAR10(root=adaptdl.env.share_path(), train=True, download=True, transform=transform_train) + trainloader = adl.AdaptiveDataLoader(trainset, batch_size=args.bs, shuffle=True, num_workers=2, drop_last=True) + dist.barrier() # We use a barrier here so that non-master replicas would wait for master to download the data +else: + dist.barrier() + trainset = torchvision.datasets.CIFAR10(root=adaptdl.env.share_path(), train=True, download=False, transform=transform_train) + trainloader = adl.AdaptiveDataLoader(trainset, batch_size=args.bs, shuffle=True, num_workers=2, drop_last=True) + +if args.autoscale_bsz: + trainloader.autoscale_batch_size(4096, local_bsz_bounds=(32, 1024), gradient_accumulation=True) + +validset = torchvision.datasets.CIFAR10(root=adaptdl.env.share_path(), train=False, download=False, transform=transform_test) +validloader = adl.AdaptiveDataLoader(validset, batch_size=100, shuffle=False, num_workers=2) + +# Model +print('==> Building model..') +net = eval(args.model)() +net = net.to(device) +if device == 'cuda': + cudnn.benchmark = True + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD([{"params": [param]} for param in net.parameters()], + lr=args.lr, momentum=0.9, weight_decay=5e-4) +lr_scheduler = MultiStepLR(optimizer, [30, 45], 0.1) + +net = adl.AdaptiveDataParallel(net, optimizer, lr_scheduler) + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + net.train() + stats = adl.Accumulator() + for inputs, targets in trainloader: + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + stats["loss_sum"] += loss.item() * targets.size(0) + _, predicted = outputs.max(1) + stats["total"] += targets.size(0) + stats["correct"] += predicted.eq(targets).sum().item() + + trainloader.to_tensorboard(writer, epoch, tag_prefix="AdaptDL/Data/") + net.to_tensorboard(writer, epoch, tag_prefix="AdaptDL/Model/") + with stats.synchronized(): + stats["loss_avg"] = stats["loss_sum"] / stats["total"] + stats["accuracy"] = stats["correct"] / stats["total"] + writer.add_scalar("Loss/Train", stats["loss_avg"], epoch) + writer.add_scalar("Accuracy/Train", stats["accuracy"], epoch) + print("Train:", stats) + +def valid(epoch): + net.eval() + stats = adl.Accumulator() + with torch.no_grad(): + for inputs, targets in validloader: + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + stats["loss_sum"] += loss.item() * targets.size(0) + _, predicted = outputs.max(1) + stats["total"] += targets.size(0) + stats["correct"] += predicted.eq(targets).sum().item() + + with stats.synchronized(): + stats["loss_avg"] = stats["loss_sum"] / stats["total"] + stats["accuracy"] = stats["correct"] / stats["total"] + writer.add_scalar("Loss/Valid", stats["loss_avg"], epoch) + writer.add_scalar("Accuracy/Valid", stats["accuracy"], epoch) + + if adaptdl.env.replica_rank() == 0: + nni.report_intermediate_result(stats["accuracy"]) + + print("Valid:", stats) + return stats["accuracy"] + + +tensorboard_dir = os.path.join( + os.getenv("ADAPTDL_TENSORBOARD_LOGDIR", "/adaptdl/tensorboard"), + os.getenv("NNI_TRIAL_JOB_ID", "cifar-adaptdl") +) +if not os.path.exists(tensorboard_dir): + os.makedirs(tensorboard_dir) + +with SummaryWriter(tensorboard_dir) as writer: + acc = 0 + for epoch in adl.remaining_epochs_until(args.epochs): + train(epoch) + acc = valid(epoch) + lr_scheduler.step() + + if adaptdl.env.replica_rank() == 0: + nni.report_final_result(acc) diff --git a/examples/trials/cifar10_pytorch/models/__init__.py b/examples/trials/cifar10_pytorch/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67f5290d31f650da7123cee45d9842dac1296ca7 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/__init__.py @@ -0,0 +1,11 @@ +from .vgg import * +from .densenet import * +from .dpn import * +from .googlenet import * +from .lenet import * +from .mobilenet import * +from .pnasnet import * +from .resnet import * +from .senet import * +from .shufflenet import * + diff --git a/examples/trials/cifar10_pytorch/models/densenet.py b/examples/trials/cifar10_pytorch/models/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..47ebbbe08e40503d6785711acd8bd7dd2cdba768 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/densenet.py @@ -0,0 +1,107 @@ +'''DenseNet in PyTorch.''' +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Bottleneck(nn.Module): + def __init__(self, in_planes, growth_rate): + super(Bottleneck, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False) + self.bn2 = nn.BatchNorm2d(4*growth_rate) + self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False) + + def forward(self, x): + out = self.conv1(F.relu(self.bn1(x))) + out = self.conv2(F.relu(self.bn2(out))) + out = torch.cat([out,x], 1) + return out + + +class Transition(nn.Module): + def __init__(self, in_planes, out_planes): + super(Transition, self).__init__() + self.bn = nn.BatchNorm2d(in_planes) + self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False) + + def forward(self, x): + out = self.conv(F.relu(self.bn(x))) + out = F.avg_pool2d(out, 2) + return out + + +class DenseNet(nn.Module): + def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10): + super(DenseNet, self).__init__() + self.growth_rate = growth_rate + + num_planes = 2*growth_rate + self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False) + + self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0]) + num_planes += nblocks[0]*growth_rate + out_planes = int(math.floor(num_planes*reduction)) + self.trans1 = Transition(num_planes, out_planes) + num_planes = out_planes + + self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1]) + num_planes += nblocks[1]*growth_rate + out_planes = int(math.floor(num_planes*reduction)) + self.trans2 = Transition(num_planes, out_planes) + num_planes = out_planes + + self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2]) + num_planes += nblocks[2]*growth_rate + out_planes = int(math.floor(num_planes*reduction)) + self.trans3 = Transition(num_planes, out_planes) + num_planes = out_planes + + self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3]) + num_planes += nblocks[3]*growth_rate + + self.bn = nn.BatchNorm2d(num_planes) + self.linear = nn.Linear(num_planes, num_classes) + + def _make_dense_layers(self, block, in_planes, nblock): + layers = [] + for i in range(nblock): + layers.append(block(in_planes, self.growth_rate)) + in_planes += self.growth_rate + return nn.Sequential(*layers) + + def forward(self, x): + out = self.conv1(x) + out = self.trans1(self.dense1(out)) + out = self.trans2(self.dense2(out)) + out = self.trans3(self.dense3(out)) + out = self.dense4(out) + out = F.avg_pool2d(F.relu(self.bn(out)), 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + +def DenseNet121(): + return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32) + +def DenseNet169(): + return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32) + +def DenseNet201(): + return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32) + +def DenseNet161(): + return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48) + +def densenet_cifar(): + return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12) + +def test(): + net = densenet_cifar() + x = torch.randn(1,3,32,32) + y = net(x) + print(y) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/dpn.py b/examples/trials/cifar10_pytorch/models/dpn.py new file mode 100644 index 0000000000000000000000000000000000000000..d334367fcc9876b104a94b7ae333362ea0a64469 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/dpn.py @@ -0,0 +1,98 @@ +'''Dual Path Networks in PyTorch.''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Bottleneck(nn.Module): + def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer): + super(Bottleneck, self).__init__() + self.out_planes = out_planes + self.dense_depth = dense_depth + + self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(in_planes) + self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False) + self.bn2 = nn.BatchNorm2d(in_planes) + self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(out_planes+dense_depth) + + self.shortcut = nn.Sequential() + if first_layer: + self.shortcut = nn.Sequential( + nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(out_planes+dense_depth) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + x = self.shortcut(x) + d = self.out_planes + out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1) + out = F.relu(out) + return out + + +class DPN(nn.Module): + def __init__(self, cfg): + super(DPN, self).__init__() + in_planes, out_planes = cfg['in_planes'], cfg['out_planes'] + num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth'] + + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.last_planes = 64 + self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1) + self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2) + self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2) + self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2) + self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10) + + def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for i,stride in enumerate(strides): + layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0)) + self.last_planes = out_planes + (i+2) * dense_depth + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def DPN26(): + cfg = { + 'in_planes': (96,192,384,768), + 'out_planes': (256,512,1024,2048), + 'num_blocks': (2,2,2,2), + 'dense_depth': (16,32,24,128) + } + return DPN(cfg) + +def DPN92(): + cfg = { + 'in_planes': (96,192,384,768), + 'out_planes': (256,512,1024,2048), + 'num_blocks': (3,4,20,3), + 'dense_depth': (16,32,24,128) + } + return DPN(cfg) + + +def test(): + net = DPN92() + x = torch.randn(1,3,32,32) + y = net(x) + print(y) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/googlenet.py b/examples/trials/cifar10_pytorch/models/googlenet.py new file mode 100644 index 0000000000000000000000000000000000000000..de036d87d323ee4f9c5a2f7944068a9322b0630d --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/googlenet.py @@ -0,0 +1,107 @@ +'''GoogLeNet with PyTorch.''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Inception(nn.Module): + def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes): + super(Inception, self).__init__() + # 1x1 conv branch + self.b1 = nn.Sequential( + nn.Conv2d(in_planes, n1x1, kernel_size=1), + nn.BatchNorm2d(n1x1), + nn.ReLU(True), + ) + + # 1x1 conv -> 3x3 conv branch + self.b2 = nn.Sequential( + nn.Conv2d(in_planes, n3x3red, kernel_size=1), + nn.BatchNorm2d(n3x3red), + nn.ReLU(True), + nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), + nn.BatchNorm2d(n3x3), + nn.ReLU(True), + ) + + # 1x1 conv -> 5x5 conv branch + self.b3 = nn.Sequential( + nn.Conv2d(in_planes, n5x5red, kernel_size=1), + nn.BatchNorm2d(n5x5red), + nn.ReLU(True), + nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), + nn.BatchNorm2d(n5x5), + nn.ReLU(True), + nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), + nn.BatchNorm2d(n5x5), + nn.ReLU(True), + ) + + # 3x3 pool -> 1x1 conv branch + self.b4 = nn.Sequential( + nn.MaxPool2d(3, stride=1, padding=1), + nn.Conv2d(in_planes, pool_planes, kernel_size=1), + nn.BatchNorm2d(pool_planes), + nn.ReLU(True), + ) + + def forward(self, x): + y1 = self.b1(x) + y2 = self.b2(x) + y3 = self.b3(x) + y4 = self.b4(x) + return torch.cat([y1,y2,y3,y4], 1) + + +class GoogLeNet(nn.Module): + def __init__(self): + super(GoogLeNet, self).__init__() + self.pre_layers = nn.Sequential( + nn.Conv2d(3, 192, kernel_size=3, padding=1), + nn.BatchNorm2d(192), + nn.ReLU(True), + ) + + self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) + self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) + + self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) + + self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) + self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) + self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) + self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) + self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) + + self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) + self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) + + self.avgpool = nn.AvgPool2d(8, stride=1) + self.linear = nn.Linear(1024, 10) + + def forward(self, x): + out = self.pre_layers(x) + out = self.a3(out) + out = self.b3(out) + out = self.maxpool(out) + out = self.a4(out) + out = self.b4(out) + out = self.c4(out) + out = self.d4(out) + out = self.e4(out) + out = self.maxpool(out) + out = self.a5(out) + out = self.b5(out) + out = self.avgpool(out) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def test(): + net = GoogLeNet() + x = torch.randn(1,3,32,32) + y = net(x) + print(y.size()) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/lenet.py b/examples/trials/cifar10_pytorch/models/lenet.py new file mode 100644 index 0000000000000000000000000000000000000000..d657b7482a75a3058e5795f367dfbb32e948b9d5 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/lenet.py @@ -0,0 +1,23 @@ +'''LeNet in PyTorch.''' +import torch.nn as nn +import torch.nn.functional as F + +class LeNet(nn.Module): + def __init__(self): + super(LeNet, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16*5*5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + out = F.relu(self.conv1(x)) + out = F.max_pool2d(out, 2) + out = F.relu(self.conv2(out)) + out = F.max_pool2d(out, 2) + out = out.view(out.size(0), -1) + out = F.relu(self.fc1(out)) + out = F.relu(self.fc2(out)) + out = self.fc3(out) + return out diff --git a/examples/trials/cifar10_pytorch/models/mobilenet.py b/examples/trials/cifar10_pytorch/models/mobilenet.py new file mode 100644 index 0000000000000000000000000000000000000000..497ef1e867d2a597b9b444ebc7a6f30cd5219777 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/mobilenet.py @@ -0,0 +1,61 @@ +'''MobileNet in PyTorch. + +See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" +for more details. +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Block(nn.Module): + '''Depthwise conv + Pointwise conv''' + def __init__(self, in_planes, out_planes, stride=1): + super(Block, self).__init__() + self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False) + self.bn1 = nn.BatchNorm2d(in_planes) + self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn2 = nn.BatchNorm2d(out_planes) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + return out + + +class MobileNet(nn.Module): + # (128,2) means conv planes=128, conv stride=2, by default conv stride=1 + cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024] + + def __init__(self, num_classes=10): + super(MobileNet, self).__init__() + self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.layers = self._make_layers(in_planes=32) + self.linear = nn.Linear(1024, num_classes) + + def _make_layers(self, in_planes): + layers = [] + for x in self.cfg: + out_planes = x if isinstance(x, int) else x[0] + stride = 1 if isinstance(x, int) else x[1] + layers.append(Block(in_planes, out_planes, stride)) + in_planes = out_planes + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layers(out) + out = F.avg_pool2d(out, 2) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def test(): + net = MobileNet() + x = torch.randn(1,3,32,32) + y = net(x) + print(y.size()) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/mobilenetv2.py b/examples/trials/cifar10_pytorch/models/mobilenetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..17e5823ef4426ceceae462782a267f89b1ecbc76 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/mobilenetv2.py @@ -0,0 +1,86 @@ +'''MobileNetV2 in PyTorch. + +See the paper "Inverted Residuals and Linear Bottlenecks: +Mobile Networks for Classification, Detection and Segmentation" for more details. +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Block(nn.Module): + '''expand + depthwise + pointwise''' + def __init__(self, in_planes, out_planes, expansion, stride): + super(Block, self).__init__() + self.stride = stride + + planes = expansion * in_planes + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn3 = nn.BatchNorm2d(out_planes) + + self.shortcut = nn.Sequential() + if stride == 1 and in_planes != out_planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_planes), + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out = out + self.shortcut(x) if self.stride==1 else out + return out + + +class MobileNetV2(nn.Module): + # (expansion, out_planes, num_blocks, stride) + cfg = [(1, 16, 1, 1), + (6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10 + (6, 32, 3, 2), + (6, 64, 4, 2), + (6, 96, 3, 1), + (6, 160, 3, 2), + (6, 320, 1, 1)] + + def __init__(self, num_classes=10): + super(MobileNetV2, self).__init__() + # NOTE: change conv1 stride 2 -> 1 for CIFAR10 + self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.layers = self._make_layers(in_planes=32) + self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False) + self.bn2 = nn.BatchNorm2d(1280) + self.linear = nn.Linear(1280, num_classes) + + def _make_layers(self, in_planes): + layers = [] + for expansion, out_planes, num_blocks, stride in self.cfg: + strides = [stride] + [1]*(num_blocks-1) + for stride in strides: + layers.append(Block(in_planes, out_planes, expansion, stride)) + in_planes = out_planes + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layers(out) + out = F.relu(self.bn2(self.conv2(out))) + # NOTE: change pooling kernel_size 7 -> 4 for CIFAR10 + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def test(): + net = MobileNetV2() + x = torch.randn(2,3,32,32) + y = net(x) + print(y.size()) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/pnasnet.py b/examples/trials/cifar10_pytorch/models/pnasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..de8c4d51f2667f84eab86f29be9a00ea7d0ad1c3 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/pnasnet.py @@ -0,0 +1,125 @@ +'''PNASNet in PyTorch. + +Paper: Progressive Neural Architecture Search +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class SepConv(nn.Module): + '''Separable Convolution.''' + def __init__(self, in_planes, out_planes, kernel_size, stride): + super(SepConv, self).__init__() + self.conv1 = nn.Conv2d(in_planes, out_planes, + kernel_size, stride, + padding=(kernel_size-1)//2, + bias=False, groups=in_planes) + self.bn1 = nn.BatchNorm2d(out_planes) + + def forward(self, x): + return self.bn1(self.conv1(x)) + + +class CellA(nn.Module): + def __init__(self, in_planes, out_planes, stride=1): + super(CellA, self).__init__() + self.stride = stride + self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride) + if stride==2: + self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn1 = nn.BatchNorm2d(out_planes) + + def forward(self, x): + y1 = self.sep_conv1(x) + y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1) + if self.stride==2: + y2 = self.bn1(self.conv1(y2)) + return F.relu(y1+y2) + +class CellB(nn.Module): + def __init__(self, in_planes, out_planes, stride=1): + super(CellB, self).__init__() + self.stride = stride + # Left branch + self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride) + self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride) + # Right branch + self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride) + if stride==2: + self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn1 = nn.BatchNorm2d(out_planes) + # Reduce channels + self.conv2 = nn.Conv2d(2*out_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn2 = nn.BatchNorm2d(out_planes) + + def forward(self, x): + # Left branch + y1 = self.sep_conv1(x) + y2 = self.sep_conv2(x) + # Right branch + y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1) + if self.stride==2: + y3 = self.bn1(self.conv1(y3)) + y4 = self.sep_conv3(x) + # Concat & reduce channels + b1 = F.relu(y1+y2) + b2 = F.relu(y3+y4) + y = torch.cat([b1,b2], 1) + return F.relu(self.bn2(self.conv2(y))) + +class PNASNet(nn.Module): + def __init__(self, cell_type, num_cells, num_planes): + super(PNASNet, self).__init__() + self.in_planes = num_planes + self.cell_type = cell_type + + self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(num_planes) + + self.layer1 = self._make_layer(num_planes, num_cells=6) + self.layer2 = self._downsample(num_planes*2) + self.layer3 = self._make_layer(num_planes*2, num_cells=6) + self.layer4 = self._downsample(num_planes*4) + self.layer5 = self._make_layer(num_planes*4, num_cells=6) + + self.linear = nn.Linear(num_planes*4, 10) + + def _make_layer(self, planes, num_cells): + layers = [] + for _ in range(num_cells): + layers.append(self.cell_type(self.in_planes, planes, stride=1)) + self.in_planes = planes + return nn.Sequential(*layers) + + def _downsample(self, planes): + layer = self.cell_type(self.in_planes, planes, stride=2) + self.in_planes = planes + return layer + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = self.layer5(out) + out = F.avg_pool2d(out, 8) + out = self.linear(out.view(out.size(0), -1)) + return out + + +def PNASNetA(): + return PNASNet(CellA, num_cells=6, num_planes=44) + +def PNASNetB(): + return PNASNet(CellB, num_cells=6, num_planes=32) + + +def test(): + net = PNASNetB() + x = torch.randn(1,3,32,32) + y = net(x) + print(y) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/preact_resnet.py b/examples/trials/cifar10_pytorch/models/preact_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..abb1bc313c011d2ee650c353c515e2cd404503f3 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/preact_resnet.py @@ -0,0 +1,118 @@ +'''Pre-activation ResNet in PyTorch. + +Reference: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Identity Mappings in Deep Residual Networks. arXiv:1603.05027 +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class PreActBlock(nn.Module): + '''Pre-activation version of the BasicBlock.''' + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(PreActBlock, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False) + ) + + def forward(self, x): + out = F.relu(self.bn1(x)) + shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x + out = self.conv1(out) + out = self.conv2(F.relu(self.bn2(out))) + out += shortcut + return out + + +class PreActBottleneck(nn.Module): + '''Pre-activation version of the original Bottleneck module.''' + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(PreActBottleneck, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) + + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False) + ) + + def forward(self, x): + out = F.relu(self.bn1(x)) + shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x + out = self.conv1(out) + out = self.conv2(F.relu(self.bn2(out))) + out = self.conv3(F.relu(self.bn3(out))) + out += shortcut + return out + + +class PreActResNet(nn.Module): + def __init__(self, block, num_blocks, num_classes=10): + super(PreActResNet, self).__init__() + self.in_planes = 64 + + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + self.linear = nn.Linear(512*block.expansion, num_classes) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x): + out = self.conv1(x) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def PreActResNet18(): + return PreActResNet(PreActBlock, [2,2,2,2]) + +def PreActResNet34(): + return PreActResNet(PreActBlock, [3,4,6,3]) + +def PreActResNet50(): + return PreActResNet(PreActBottleneck, [3,4,6,3]) + +def PreActResNet101(): + return PreActResNet(PreActBottleneck, [3,4,23,3]) + +def PreActResNet152(): + return PreActResNet(PreActBottleneck, [3,8,36,3]) + + +def test(): + net = PreActResNet18() + y = net((torch.randn(1,3,32,32))) + print(y.size()) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/resnet.py b/examples/trials/cifar10_pytorch/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f01cd8021ae5fbf60c6ee8cfd33b7f37e090680c --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/resnet.py @@ -0,0 +1,121 @@ +'''ResNet in PyTorch. + +For Pre-activation ResNet, see 'preact_resnet.py'. + +Reference: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(self.expansion*planes) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(self.expansion*planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(self.expansion*planes) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class ResNet(nn.Module): + def __init__(self, block, num_blocks, num_classes=10): + super(ResNet, self).__init__() + self.in_planes = 64 + + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + self.linear = nn.Linear(512*block.expansion, num_classes) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def ResNet18(): + return ResNet(BasicBlock, [2,2,2,2]) + +def ResNet34(): + return ResNet(BasicBlock, [3,4,6,3]) + +def ResNet50(): + return ResNet(Bottleneck, [3,4,6,3]) + +def ResNet101(): + return ResNet(Bottleneck, [3,4,23,3]) + +def ResNet152(): + return ResNet(Bottleneck, [3,8,36,3]) + + +def test(): + net = ResNet18() + y = net(torch.randn(1,3,32,32)) + print(y.size()) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/resnext.py b/examples/trials/cifar10_pytorch/models/resnext.py new file mode 100644 index 0000000000000000000000000000000000000000..7a08f3e7d9fdf3b65aad5b773d4d113c6b796423 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/resnext.py @@ -0,0 +1,95 @@ +'''ResNeXt in PyTorch. + +See the paper "Aggregated Residual Transformations for Deep Neural Networks" for more details. +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Block(nn.Module): + '''Grouped convolution block.''' + expansion = 2 + + def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1): + super(Block, self).__init__() + group_width = cardinality * bottleneck_width + self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(group_width) + self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False) + self.bn2 = nn.BatchNorm2d(group_width) + self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(self.expansion*group_width) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*group_width: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(self.expansion*group_width) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class ResNeXt(nn.Module): + def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10): + super(ResNeXt, self).__init__() + self.cardinality = cardinality + self.bottleneck_width = bottleneck_width + self.in_planes = 64 + + self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(num_blocks[0], 1) + self.layer2 = self._make_layer(num_blocks[1], 2) + self.layer3 = self._make_layer(num_blocks[2], 2) + # self.layer4 = self._make_layer(num_blocks[3], 2) + self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes) + + def _make_layer(self, num_blocks, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for stride in strides: + layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride)) + self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width + # Increase bottleneck_width by 2 after each stage. + self.bottleneck_width *= 2 + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + # out = self.layer4(out) + out = F.avg_pool2d(out, 8) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def ResNeXt29_2x64d(): + return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64) + +def ResNeXt29_4x64d(): + return ResNeXt(num_blocks=[3,3,3], cardinality=4, bottleneck_width=64) + +def ResNeXt29_8x64d(): + return ResNeXt(num_blocks=[3,3,3], cardinality=8, bottleneck_width=64) + +def ResNeXt29_32x4d(): + return ResNeXt(num_blocks=[3,3,3], cardinality=32, bottleneck_width=4) + +def test_resnext(): + net = ResNeXt29_2x64d() + x = torch.randn(1,3,32,32) + y = net(x) + print(y.size()) + +# test_resnext() diff --git a/examples/trials/cifar10_pytorch/models/senet.py b/examples/trials/cifar10_pytorch/models/senet.py new file mode 100644 index 0000000000000000000000000000000000000000..98bfa0ca51dcd07b586432c9f9460be8d1f0b745 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/senet.py @@ -0,0 +1,121 @@ +'''SENet in PyTorch. + +SENet is the winner of ImageNet-2017. The paper is not released yet. +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes) + ) + + # SE layers + self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear + self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + + # Squeeze + w = F.avg_pool2d(out, out.size(2)) + w = F.relu(self.fc1(w)) + w = F.sigmoid(self.fc2(w)) + # Excitation + out = out * w # New broadcasting feature from v0.2! + + out += self.shortcut(x) + out = F.relu(out) + return out + + +class PreActBlock(nn.Module): + def __init__(self, in_planes, planes, stride=1): + super(PreActBlock, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + + if stride != 1 or in_planes != planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) + ) + + # SE layers + self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) + self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1) + + def forward(self, x): + out = F.relu(self.bn1(x)) + shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x + out = self.conv1(out) + out = self.conv2(F.relu(self.bn2(out))) + + # Squeeze + w = F.avg_pool2d(out, out.size(2)) + w = F.relu(self.fc1(w)) + w = F.sigmoid(self.fc2(w)) + # Excitation + out = out * w + + out += shortcut + return out + + +class SENet(nn.Module): + def __init__(self, block, num_blocks, num_classes=10): + super(SENet, self).__init__() + self.in_planes = 64 + + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + self.linear = nn.Linear(512, num_classes) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def SENet18(): + return SENet(PreActBlock, [2,2,2,2]) + + +def test(): + net = SENet18() + y = net(torch.randn(1,3,32,32)) + print(y.size()) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/shufflenet.py b/examples/trials/cifar10_pytorch/models/shufflenet.py new file mode 100644 index 0000000000000000000000000000000000000000..3682fd3b1d7d2654d539fb548e4ec7c4705f64e9 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/shufflenet.py @@ -0,0 +1,109 @@ +'''ShuffleNet in PyTorch. + +See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details. +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ShuffleBlock(nn.Module): + def __init__(self, groups): + super(ShuffleBlock, self).__init__() + self.groups = groups + + def forward(self, x): + '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]''' + N,C,H,W = x.size() + g = self.groups + return x.view(N,g,C/g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W) + + +class Bottleneck(nn.Module): + def __init__(self, in_planes, out_planes, stride, groups): + super(Bottleneck, self).__init__() + self.stride = stride + + mid_planes = out_planes/4 + g = 1 if in_planes==24 else groups + self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes) + self.shuffle1 = ShuffleBlock(groups=g) + self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False) + self.bn2 = nn.BatchNorm2d(mid_planes) + self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False) + self.bn3 = nn.BatchNorm2d(out_planes) + + self.shortcut = nn.Sequential() + if stride == 2: + self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1)) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.shuffle1(out) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + res = self.shortcut(x) + out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res) + return out + + +class ShuffleNet(nn.Module): + def __init__(self, cfg): + super(ShuffleNet, self).__init__() + out_planes = cfg['out_planes'] + num_blocks = cfg['num_blocks'] + groups = cfg['groups'] + + self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(24) + self.in_planes = 24 + self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups) + self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups) + self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups) + self.linear = nn.Linear(out_planes[2], 10) + + def _make_layer(self, out_planes, num_blocks, groups): + layers = [] + for i in range(num_blocks): + stride = 2 if i == 0 else 1 + cat_planes = self.in_planes if i == 0 else 0 + layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups)) + self.in_planes = out_planes + return nn.Sequential(*layers) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def ShuffleNetG2(): + cfg = { + 'out_planes': [200,400,800], + 'num_blocks': [4,8,4], + 'groups': 2 + } + return ShuffleNet(cfg) + +def ShuffleNetG3(): + cfg = { + 'out_planes': [240,480,960], + 'num_blocks': [4,8,4], + 'groups': 3 + } + return ShuffleNet(cfg) + + +def test(): + net = ShuffleNetG2() + x = torch.randn(1,3,32,32) + y = net(x) + print(y) + +# test() diff --git a/examples/trials/cifar10_pytorch/models/vgg.py b/examples/trials/cifar10_pytorch/models/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..08347ffa999c0cf71418f36a50fa649bd4dabea7 --- /dev/null +++ b/examples/trials/cifar10_pytorch/models/vgg.py @@ -0,0 +1,47 @@ +'''VGG11/13/16/19 in Pytorch.''' +import torch +import torch.nn as nn + + +cfg = { + 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +class VGG(nn.Module): + def __init__(self, vgg_name): + super(VGG, self).__init__() + self.features = self._make_layers(cfg[vgg_name]) + self.classifier = nn.Linear(512, 10) + + def forward(self, x): + out = self.features(x) + out = out.view(out.size(0), -1) + out = self.classifier(out) + return out + + def _make_layers(self, cfg): + layers = [] + in_channels = 3 + for x in cfg: + if x == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), + nn.BatchNorm2d(x), + nn.ReLU(inplace=True)] + in_channels = x + layers += [nn.AvgPool2d(kernel_size=1, stride=1)] + return nn.Sequential(*layers) + + +def test(): + net = VGG('VGG11') + x = torch.randn(2,3,32,32) + y = net(x) + print(y.size()) + +# test() diff --git a/examples/trials/cifar10_pytorch/search_space.json b/examples/trials/cifar10_pytorch/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..562f041183b044d1c36466a37762099b2b9a4934 --- /dev/null +++ b/examples/trials/cifar10_pytorch/search_space.json @@ -0,0 +1,14 @@ +{ + "lr": { + "_type": "choice", + "_value": [0.1, 0.01, 0.001, 0.0001] + }, + "optimizer": { + "_type": "choice", + "_value": ["SGD", "Adadelta", "Adagrad", "Adam", "Adamax"] + }, + "model": { + "_type": "choice", + "_value": ["vgg", "resnet18", "googlenet", "densenet121", "mobilenet", "dpn92", "senet18"] + } +} diff --git a/examples/trials/cifar10_pytorch/search_space_adl.json b/examples/trials/cifar10_pytorch/search_space_adl.json new file mode 100644 index 0000000000000000000000000000000000000000..0dadf05f6a55a4514e4e31372f5cc917476b818a --- /dev/null +++ b/examples/trials/cifar10_pytorch/search_space_adl.json @@ -0,0 +1,5 @@ +{ + "lr":{"_type":"choice", "_value":[0.1, 0.01, 0.001]}, + "bs":{"_type":"choice","_value":[64, 96, 128]}, + "model":{"_type":"choice", "_value":["ResNet18", "SENet18", "MobileNet"]} +} diff --git a/examples/trials/cifar10_pytorch/utils.py b/examples/trials/cifar10_pytorch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..65e0d086b830664b69377fa8b980165448e9e3b4 --- /dev/null +++ b/examples/trials/cifar10_pytorch/utils.py @@ -0,0 +1,127 @@ +'''Some helper functions for PyTorch, including: + - get_mean_and_std: calculate the mean and std value of dataset. + - msr_init: net parameter initialization. + - progress_bar: progress bar mimic xlua.progress. +''' +import os +import sys +import time + +import torch +import torch.nn as nn +import torch.nn.init as init + + +def get_mean_and_std(dataset): + '''Compute the mean and std value of dataset.''' + dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2) + mean = torch.zeros(3) + std = torch.zeros(3) + print('==> Computing mean and std..') + for inputs, targets in dataloader: + for i in range(3): + mean[i] += inputs[:,i,:,:].mean() + std[i] += inputs[:,i,:,:].std() + mean.div_(len(dataset)) + std.div_(len(dataset)) + return mean, std + +def init_params(net): + '''Init layer parameters.''' + for m in net.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal(m.weight, mode='fan_out') + if m.bias: + init.constant(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + init.constant(m.weight, 1) + init.constant(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal(m.weight, std=1e-3) + if m.bias: + init.constant(m.bias, 0) + +term_width = 0 +try: + term_width = os.get_terminal_size().columns +except Exception as exception: + term_width = 200 +term_width = int(term_width) + +TOTAL_BAR_LENGTH = 65. +last_time = time.time() +begin_time = last_time +def progress_bar(current, total, msg=None): + global last_time, begin_time + if current == 0: + begin_time = time.time() # Reset for new bar. + + cur_len = int(TOTAL_BAR_LENGTH*current/total) + rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1 + + sys.stdout.write(' [') + for i in range(cur_len): + sys.stdout.write('=') + sys.stdout.write('>') + for i in range(rest_len): + sys.stdout.write('.') + sys.stdout.write(']') + + cur_time = time.time() + step_time = cur_time - last_time + last_time = cur_time + tot_time = cur_time - begin_time + + L = [] + L.append(' Step: %s' % format_time(step_time)) + L.append(' | Tot: %s' % format_time(tot_time)) + if msg: + L.append(' | ' + msg) + + msg = ''.join(L) + sys.stdout.write(msg) + for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3): + sys.stdout.write(' ') + + # Go back to the center of the bar. + for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2): + sys.stdout.write('\b') + sys.stdout.write(' %d/%d ' % (current+1, total)) + + if current < total-1: + sys.stdout.write('\r') + else: + sys.stdout.write('\n') + sys.stdout.flush() + +def format_time(seconds): + days = int(seconds / 3600/24) + seconds = seconds - days*3600*24 + hours = int(seconds / 3600) + seconds = seconds - hours*3600 + minutes = int(seconds / 60) + seconds = seconds - minutes*60 + secondsf = int(seconds) + seconds = seconds - secondsf + millis = int(seconds*1000) + + f = '' + i = 1 + if days > 0: + f += str(days) + 'D' + i += 1 + if hours > 0 and i <= 2: + f += str(hours) + 'h' + i += 1 + if minutes > 0 and i <= 2: + f += str(minutes) + 'm' + i += 1 + if secondsf > 0 and i <= 2: + f += str(secondsf) + 's' + i += 1 + if millis > 0 and i <= 2: + f += str(millis) + 'ms' + i += 1 + if f == '': + f = '0ms' + return f diff --git a/examples/trials/efficientnet/.gitignore b/examples/trials/efficientnet/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d94725b323285c163fcc0c75a2b2258f40aef0fd --- /dev/null +++ b/examples/trials/efficientnet/.gitignore @@ -0,0 +1 @@ +EfficientNet-PyTorch \ No newline at end of file diff --git a/examples/trials/efficientnet/README.md b/examples/trials/efficientnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0ed4a844e05bc63ea519ddca126643b6a7a249ca --- /dev/null +++ b/examples/trials/efficientnet/README.md @@ -0,0 +1 @@ +[Documentation](https://nni.readthedocs.io/en/latest/TrialExample/EfficientNet.html) diff --git a/examples/trials/efficientnet/README_zh_CN.md b/examples/trials/efficientnet/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..45092ef3b767a5d012bdc5d618013f516c710043 --- /dev/null +++ b/examples/trials/efficientnet/README_zh_CN.md @@ -0,0 +1 @@ +[文档](https://nni.readthedocs.io/zh/latest/TrialExample/EfficientNet.html) \ No newline at end of file diff --git a/examples/trials/efficientnet/config.yml b/examples/trials/efficientnet/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..0849b74477468aa9c763e110e45b0834f7b2f166 --- /dev/null +++ b/examples/trials/efficientnet/config.yml @@ -0,0 +1,15 @@ +searchSpaceFile: search_net.json +trialCodeDirectory: EfficientNet-PyTorch +trialCommand: python3 main.py /data/imagenet -j 12 -a efficientnet --batch-size 48 --lr 0.048 --wd 1e-5 --epochs 5 --request-from-nni +trialGpuNumber: 1 +trialConcurrency: 4 +maxTrialNumber: 100 +tuner: + className: tuner.FixedProductTuner + codeDirectory: . + classArgs: + product: 2 +trainingService: # For other platforms, check mnist-pytorch example + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegputrial: diff --git a/examples/trials/efficientnet/config_windows.yml b/examples/trials/efficientnet/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..46f77b7de8b24b648a4939be1356cbc6e6c00e2b --- /dev/null +++ b/examples/trials/efficientnet/config_windows.yml @@ -0,0 +1,15 @@ +searchSpaceFile: search_net.json +trialCodeDirectory: EfficientNet-PyTorch +trialCommand: python main.py /data/imagenet -j 12 -a efficientnet --batch-size 48 --lr 0.048 --wd 1e-5 --epochs 5 --request-from-nni +trialGpuNumber: 1 +trialConcurrency: 4 +maxTrialNumber: 100 +tuner: + className: tuner.FixedProductTuner + codeDirectory: . + classArgs: + product: 2 +trainingService: # For other platforms, check mnist-pytorch example + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegputrial: diff --git a/examples/trials/efficientnet/search_net.json b/examples/trials/efficientnet/search_net.json new file mode 100644 index 0000000000000000000000000000000000000000..360441711fbf05aa7e3a8c8a4ba1e1fe6c5906de --- /dev/null +++ b/examples/trials/efficientnet/search_net.json @@ -0,0 +1,14 @@ +{ + "alpha": { + "_type": "quniform", + "_value": [1.0, 2.0, 0.05] + }, + "beta": { + "_type": "quniform", + "_value": [1.0, 1.5, 0.05] + }, + "gamma": { + "_type": "quniform", + "_value": [1.0, 1.5, 0.05] + } +} diff --git a/examples/trials/efficientnet/tuner.py b/examples/trials/efficientnet/tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..1917fdcf11167577a339e19e4adbc90da459cf90 --- /dev/null +++ b/examples/trials/efficientnet/tuner.py @@ -0,0 +1,29 @@ +from nni.algorithms.hpo.gridsearch_tuner import GridSearchTuner + + +class FixedProductTuner(GridSearchTuner): + """ + This tuner is essentially grid search, but it guarantees all the parameters with alpha * beta^2 * gamma^2 is + approximately `product`. + """ + + def __init__(self, product): + """ + :param product: the constant provided, should be 2 in EfficientNet-B1 + """ + super().__init__() + self.product = product + + def _expand_parameters(self, para): + """ + Filter out all qualified parameters + """ + para = super()._expand_parameters(para) + if all([key in para[0] for key in ["alpha", "beta", "gamma"]]): # if this is an interested set + ret_para = [] + for p in para: + prod = p["alpha"] * (p["beta"] ** 2) * (p["gamma"] ** 2) + if abs(prod - self.product) < 0.1: + ret_para.append(p) + return ret_para + return para diff --git a/examples/trials/ga_squad/README.md b/examples/trials/ga_squad/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e4e2054f6165a52006eb6c04457e10210f7e98cb --- /dev/null +++ b/examples/trials/ga_squad/README.md @@ -0,0 +1,308 @@ +# Automatic Model Architecture Search for Reading Comprehension +This example shows us how to use Genetic Algorithm to find good model architectures for Reading Comprehension task. + +## Search Space +Since attention and recurrent neural network (RNN) module have been proven effective in Reading Comprehension. +We conclude the search space as follow: + +1. IDENTITY (Effectively means keep training). +2. INSERT-RNN-LAYER (Inserts a LSTM. Comparing the performance of GRU and LSTM in our experiment, we decided to use LSTM here.) +3. REMOVE-RNN-LAYER +4. INSERT-ATTENTION-LAYER(Inserts a attention layer.) +5. REMOVE-ATTENTION-LAYER +6. ADD-SKIP (Identity between random layers). +7. REMOVE-SKIP (Removes random skip). + +![ga-squad-logo](./ga_squad.png) + +## New version +Also we have another version which time cost is less and performance is better. We will release soon. + +# How to run this example? + +## Run this example on local or remote + +### Use downloading script to download data + +Execute the following command to download needed files +using the downloading script: + +``` +chmod +x ./download.sh +./download.sh +``` + +### Download manually + +1. download "dev-v1.1.json" and "train-v1.1.json" in https://rajpurkar.github.io/SQuAD-explorer/ + + ```bash + wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json + wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json + ``` + +2. download "glove.840B.300d.txt" in https://nlp.stanford.edu/projects/glove/ + + ```bash + wget http://nlp.stanford.edu/data/glove.840B.300d.zip + unzip glove.840B.300d.zip + ``` + +### Update configuration +Modify `nni/examples/trials/ga_squad/config.yml`, here is the default configuration: + +``` +authorName: default +experimentName: example_ga_squad +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 1 +#choice: local, remote +trainingServicePlatform: local +#choice: true, false +useAnnotation: false +tuner: + codeDir: ~/nni/examples/tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize +trial: + command: python3 trial.py + codeDir: ~/nni/examples/trials/ga_squad + gpuNum: 0 +``` + +In the "trial" part, if you want to use GPU to perform the architecture search, change `gpuNum` from `0` to `1`. You need to increase the `maxTrialNum` and `maxExecDuration`, according to how long you want to wait for the search result. + +`trialConcurrency` is the number of trials running concurrently, which is the number of GPUs you want to use, if you are setting `gpuNum` to 1. + +### submit this job + +``` +nnictl create --config ~/nni/examples/trials/ga_squad/config.yml +``` + +## Run this example on OpenPAI + +Due to the memory limitation of upload, we only upload the source code and complete the data download and training on OpenPAI. This experiment requires sufficient memory that `memoryMB >= 32G`, and the training may last for several hours. + +### Update configuration +Modify `nni/examples/trials/ga_squad/config_pai.yml`, here is the default configuration: + +``` +authorName: default +experimentName: example_ga_squad +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: pai +#choice: true, false +useAnnotation: false +#Your nni_manager ip +nniManagerIp: 10.10.10.10 +tuner: + codeDir: ../../tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize +trial: + command: chmod +x ./download.sh && ./download.sh && python3 trial.py + codeDir: . + gpuNum: 0 + cpuNum: 1 + memoryMB: 32869 + #The docker image to run NNI job on OpenPAI + image: msranni/nni:latest +paiConfig: + #The username to login OpenPAI + userName: username + #The password to login OpenPAI + passWord: password + #The host of restful server of OpenPAI + host: 10.10.10.10 +``` + +Please change the default value to your personal account and machine information. Including `nniManagerIp`, `userName`, `passWord` and `host`. + +In the "trial" part, if you want to use GPU to perform the architecture search, change `gpuNum` from `0` to `1`. You need to increase the `maxTrialNum` and `maxExecDuration`, according to how long you want to wait for the search result. + +`trialConcurrency` is the number of trials running concurrently, which is the number of GPUs you want to use, if you are setting `gpuNum` to 1. + +### submit this job + +``` +nnictl create --config ~/nni/examples/trials/ga_squad/config_pai.yml +``` + +# Techinal details about the trial + +## How does it works +The evolution-algorithm based architecture for question answering has two different parts just like any other examples: the trial and the tuner. + +### The trial + +The trial has a lot of different files, functions and classes. Here we will only give most of those files a brief introduction: + +* `attention.py` contains an implementation for attention mechanism in Tensorflow. +* `data.py` contains functions for data preprocessing. +* `evaluate.py` contains the evaluation script. +* `graph.py` contains the definition of the computation graph. +* `rnn.py` contains an implementation for GRU in Tensorflow. +* `train_model.py` is a wrapper for the whole question answering model. + +Among those files, `trial.py` and `graph_to_tf.py` is special. + +`graph_to_tf.py` has a function named as `graph_to_network`, here is its skeleton code: + +``` +def graph_to_network(input1, + input2, + input1_lengths, + input2_lengths, + graph, + dropout_rate, + is_training, + num_heads=1, + rnn_units=256): + topology = graph.is_topology() + layers = dict() + layers_sequence_lengths = dict() + num_units = input1.get_shape().as_list()[-1] + layers[0] = input1*tf.sqrt(tf.cast(num_units, tf.float32)) + \ + positional_encoding(input1, scale=False, zero_pad=False) + layers[1] = input2*tf.sqrt(tf.cast(num_units, tf.float32)) + layers[0] = dropout(layers[0], dropout_rate, is_training) + layers[1] = dropout(layers[1], dropout_rate, is_training) + layers_sequence_lengths[0] = input1_lengths + layers_sequence_lengths[1] = input2_lengths + for _, topo_i in enumerate(topology): + if topo_i == '|': + continue + if graph.layers[topo_i].graph_type == LayerType.input.value: + # ...... + elif graph.layers[topo_i].graph_type == LayerType.attention.value: + # ...... + # More layers to handle +``` + +As we can see, this function is actually a compiler, that converts the internal model DAG configuration (which will be introduced in the `Model configuration format` section) `graph`, to a Tensorflow computation graph. + +``` +topology = graph.is_topology() +``` + +performs topological sorting on the internal graph representation, and the code inside the loop: + +``` +for _, topo_i in enumerate(topology): +``` + +performs actually conversion that maps each layer to a part in Tensorflow computation graph. + +### The tuner + +The tuner is much more simple than the trial. They actually share the same `graph.py`. Besides, the tuner has a `customer_tuner.py`, the most important class in which is `CustomerTuner`: + +``` +class CustomerTuner(Tuner): + # ...... + + def generate_parameters(self, parameter_id): + """Returns a set of trial graph config, as a serializable object. + parameter_id : int + """ + if len(self.population) <= 0: + logger.debug("the len of poplution lower than zero.") + raise Exception('The population is empty') + pos = -1 + for i in range(len(self.population)): + if self.population[i].result == None: + pos = i + break + if pos != -1: + indiv = copy.deepcopy(self.population[pos]) + self.population.pop(pos) + temp = json.loads(graph_dumps(indiv.config)) + else: + random.shuffle(self.population) + if self.population[0].result > self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + self.population.pop(1) + indiv.mutation() + graph = indiv.config + temp = json.loads(graph_dumps(graph)) + + # ...... +``` + +As we can see, the overloaded method `generate_parameters` implements a pretty naive mutation algorithm. The code lines: + +``` + if self.population[0].result > self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) +``` + +controls the mutation process. It will always take two random individuals in the population, only keeping and mutating the one with better result. + +## Model configuration format + +Here is an example of the model configuration, which is passed from the tuner to the trial in the architecture search procedure. + +``` +{ + "max_layer_num": 50, + "layers": [ + { + "input_size": 0, + "type": 3, + "output_size": 1, + "input": [], + "size": "x", + "output": [4, 5], + "is_delete": false + }, + { + "input_size": 0, + "type": 3, + "output_size": 1, + "input": [], + "size": "y", + "output": [4, 5], + "is_delete": false + }, + { + "input_size": 1, + "type": 4, + "output_size": 0, + "input": [6], + "size": "x", + "output": [], + "is_delete": false + }, + { + "input_size": 1, + "type": 4, + "output_size": 0, + "input": [5], + "size": "y", + "output": [], + "is_delete": false + }, + {"Comment": "More layers will be here for actual graphs."} + ] +} +``` + +Every model configuration will has a "layers" section, which is a JSON list of layer definitions. The definition of each layer is also a JSON object, where: + + * `type` is the type of the layer. 0, 1, 2, 3, 4 correspond to attention, self-attention, RNN, input and output layer respectively. + * `size` is the length of the output. "x", "y" correspond to document length / question length, respectively. + * `input_size` is the number of inputs the layer has. + * `input` is the indices of layers taken as input of this layer. + * `output` is the indices of layers use this layer's output as their input. + * `is_delete` means whether the layer is still available. diff --git a/examples/trials/ga_squad/README_zh_CN.md b/examples/trials/ga_squad/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..325ed83e3538b007bea26a884a32df2c0a6005cf --- /dev/null +++ b/examples/trials/ga_squad/README_zh_CN.md @@ -0,0 +1,301 @@ +# 在阅读理解上使用自动模型架构搜索 + +该示例展示了如何使用遗传算法为阅读理解任务找到好的模型架构。 + +## 搜索空间 + +对于阅读理解项目,注意力和循环神经网络(RNN)模块已经被证明非常有效。 使用的搜索空间如下: + +1. IDENTITY (Effectively 表示继续训练)。 +2. INSERT-RNN-LAYER (插入 LSTM。 在 Experiment 中比较了 GRU 和 LSTM 的性能后,我们决定在这里采用 LSTM。) +3. REMOVE-RNN-LAYER +4. INSERT-ATTENTION-LAYER (插入注意力层。) +5. REMOVE-ATTENTION-LAYER +6. ADD-SKIP (在随机层之间一致). +7. REMOVE-SKIP (移除随机跳过). + +![ga-squad-logo](./ga_squad.png) + +## 新版本 + +另一个时间更快,性能更好的版本正在开发中。 很快将发布。 + +# 如何运行此示例? + +## 在本机或远程上运行此示例 + +### 使用下载脚本来下载数据 + +执行下列命令来下载所需要的数据: + + chmod +x ./download.sh + ./download.sh + + +### 手动下载 + +1. 在 https://rajpurkar.github.io/SQuAD-explorer/ 下载 "dev-v1.1.json" 和 "train-v1.1.json"。 + + ```bash + wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json + wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json + ``` + +2. 在 https://nlp.stanford.edu/projects/glove/ 下载 "glove.840B.300d.txt"。 + + ```bash + wget http://nlp.stanford.edu/data/glove.840B.300d.zip + unzip glove.840B.300d.zip + ``` + +### 更新配置 + +修改 `nni/examples/trials/ga_squad/config.yml`,以下是默认配置: + + authorName: default + experimentName: example_ga_squad + trialConcurrency: 1 + maxExecDuration: 1h + maxTrialNum: 1 + #可选项: local, remote + trainingServicePlatform: local + #可选项: true, false + useAnnotation: false + tuner: + codeDir: ~/nni/examples/tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize + trial: + command: python3 trial.py + codeDir: ~/nni/examples/trials/ga_squad + gpuNum: 0 + + +在 "trial" 部分中,如果需要使用 GPU 来进行架构搜索,可将 `gpuNum` 从 `0` 改为 `1`。 根据训练时长,可以增加 `maxTrialNum` 和 `maxExecDuration`。 + +`trialConcurrency` 是并发运行的 Trial 的数量。如果将 `gpuNum` 设置为 1,则需要与 GPU 数量一致。 + +### 提交任务 + + nnictl create --config ~/nni/examples/trials/ga_squad/config.yml + + +## 在 OpenPAI 上运行此示例 + +根据上传大小的限制,仅上传源代码,并在训练过程中下载数据。 本 Experiment 需要的内存 `memoryMB >= 32G`,训练过程可能需要数小时。 + +### 更新配置 + +修改 `nni/examples/trials/ga_squad/config_pai.yml`,以下是默认配置: + + authorName: default + experimentName: example_ga_squad + trialConcurrency: 1 + maxExecDuration: 1h + maxTrialNum: 10 + #可选项: local, remote, pai + trainingServicePlatform: pai + #可选项: true, false + useAnnotation: false + # nni_manager 的 ip + nniManagerIp: 10.10.10.10 + tuner: + codeDir: ../../tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize + trial: + command: chmod +x ./download.sh && ./download.sh && python3 trial.py + codeDir: . + gpuNum: 0 + cpuNum: 1 + memoryMB: 32869 + # 在 OpenPAI 上运行 NNI 的 Docker 映像 + image: msranni/nni:latest + paiConfig: + # 登录 OpenPAI 的用户名 + userName: username + # 登录 OpenPAI 的密码 + passWord: password + # OpenPAI 的 RestFUL 服务器地址 + host: 10.10.10.10 + + +将默认值改为个人账户和服务器信息。 包括 `nniManagerIp`, `userName`, `passWord` 和 `host`. + +在 "trial" 部分中,如果需要使用 GPU 来进行架构搜索,可将 `gpuNum` 从 `0` 改为 `1`。 根据训练时长,可以增加 `maxTrialNum` 和 `maxExecDuration`。 + +`trialConcurrency` 是并发运行的 Trial 的数量。如果将 `gpuNum` 设置为 1,则需要与 GPU 数量一致。 + +### 提交任务 + + nnictl create --config ~/nni/examples/trials/ga_squad/config_pai.yml + + +# 关于此 Trial 的技术细节 + +## 实现方法 + +基于进化算法架构的问答和其它示例一样,有两个部分:Trial 和 Tuner。 + +### Trial + +Trial 有大量的文件、函数和类。 这里只简单介绍最重要的文件: + +* `attention.py` 包含了 Tensorflow 注意力算法的实现。 +* `data.py` 包含了数据处理函数。 +* `evaluate.py` 包含了评估脚本。 +* `graph.py` 包含了计算图的定义。 +* `rnn.py` 包含了 TensorFlow 的 GRU 实现。 +* `train_model.py` 是整个文档模型的封装。 + +这些文件中,`trial.py` 和 `graph_to_tf.py` 非常特别。 + +`graph_to_tf.py` 有一个叫做 `graph_to_network`的函数,其框架代码如下: + + def graph_to_network(input1, + input2, + input1_lengths, + input2_lengths, + graph, + dropout_rate, + is_training, + num_heads=1, + rnn_units=256): + topology = graph.is_topology() + layers = dict() + layers_sequence_lengths = dict() + num_units = input1.get_shape().as_list()[-1] + layers[0] = input1*tf.sqrt(tf.cast(num_units, tf.float32)) + \ + positional_encoding(input1, scale=False, zero_pad=False) + layers[1] = input2*tf.sqrt(tf.cast(num_units, tf.float32)) + layers[0] = dropout(layers[0], dropout_rate, is_training) + layers[1] = dropout(layers[1], dropout_rate, is_training) + layers_sequence_lengths[0] = input1_lengths + layers_sequence_lengths[1] = input2_lengths + for _, topo_i in enumerate(topology): + if topo_i == '|': + continue + if graph.layers[topo_i].graph_type == LayerType.input.value: + # ...... + elif graph.layers[topo_i].graph_type == LayerType.attention.value: + # ...... + # 处理更多层 + + +正如我们看到的,这个函数实际上是个编译器。它将内部模型的 DAG 配置`图`(在`模型配置格式`章节介绍)转换为 Tensorflow 的计算图。 + + topology = graph.is_topology() + + +将内部图表示进行拓扑排序,代码在下列循环中: + + for _, topo_i in enumerate(topology): + + +执行实际转换,将每层映射为 TensorFlow 计算图中的一部分。 + +### Tuner + +Tuner 比 Trial 代码简单很多。 它们共用了同样的 `graph.py`。 此外,Tuner 有 `customer_tuner.py`,其中最重要的类是 `CustomerTuner`: + + class CustomerTuner(Tuner): + # ...... + + def generate_parameters(self, parameter_id): + """将一组 Trial 图配置作为序列化对象返回。 + parameter_id : int + """ + if len(self.population) <= 0: + logger.debug("the len of poplution lower than zero.") + raise Exception('The population is empty') + pos = -1 + for i in range(len(self.population)): + if self.population[i].result == None: + pos = i + break + if pos != -1: + indiv = copy.deepcopy(self.population[pos]) + self.population.pop(pos) + temp = json.loads(graph_dumps(indiv.config)) + else: + random.shuffle(self.population) + if self.population[0].result > self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + self.population.pop(1) + indiv.mutation() + graph = indiv.config + temp = json.loads(graph_dumps(graph)) + + # ...... + + +重载函数 `generate_parameters` 实现了简单的变异算法。 代码如下: + + if self.population[0].result > self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + + +控制突变过程。 它会在种群中随机取出两个个体,对更好结果的一个保留数据,并突变另一个。 + +## 模型配置格式 + +这是模型配置的示例,在架构搜索过程中,从 Tuner 传入 Trial 的代码。 + + { + "max_layer_num": 50, + "layers": [ + { + "input_size": 0, + "type": 3, + "output_size": 1, + "input": [], + "size": "x", + "output": [4, 5], + "is_delete": false + }, + { + "input_size": 0, + "type": 3, + "output_size": 1, + "input": [], + "size": "y", + "output": [4, 5], + "is_delete": false + }, + { + "input_size": 1, + "type": 4, + "output_size": 0, + "input": [6], + "size": "x", + "output": [], + "is_delete": false + }, + { + "input_size": 1, + "type": 4, + "output_size": 0, + "input": [5], + "size": "y", + "output": [], + "is_delete": false + }, + {"Comment": "More layers will be here for actual graphs."} + ] + } + + +每个模型配置都有一个 "layers" 部分,这是层定义的 JSON 列表。 每层的定义也是一个 JSON 对象: + +* `type` 是层的类型。 0, 1, 2, 3, 4 对应注意力、自注意力、RNN、输入和输出层。 +* `size` 是输出的长度。 "x", "y" 对应文档长度和问题长度。 +* `input_size` 是该层的输入数量。 +* `input` 表示输入层的索引。 +* `output` 是输出层的索引,该层会作为这些层的输入。 +* `is_delete` 表示此层是否可用。 \ No newline at end of file diff --git a/examples/trials/ga_squad/attention.py b/examples/trials/ga_squad/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..7a7e02d74a5a20b472dc82a78e152473ea4ac098 --- /dev/null +++ b/examples/trials/ga_squad/attention.py @@ -0,0 +1,169 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import math + +import tensorflow as tf +from tensorflow.python.ops.rnn_cell_impl import RNNCell + + +def _get_variable(variable_dict, name, shape, initializer=None, dtype=tf.float32): + if name not in variable_dict: + variable_dict[name] = tf.get_variable( + name=name, shape=shape, initializer=initializer, dtype=dtype) + return variable_dict[name] + +class DotAttention: + ''' + DotAttention + ''' + def __init__(self, name, + hidden_dim, + is_vanilla=True, + is_identity_transform=False, + need_padding=False): + self._name = '/'.join([name, 'dot_att']) + self._hidden_dim = hidden_dim + self._is_identity_transform = is_identity_transform + self._need_padding = need_padding + self._is_vanilla = is_vanilla + self._var = {} + + @property + def is_identity_transform(self): + return self._is_identity_transform + + @property + def is_vanilla(self): + return self._is_vanilla + + @property + def need_padding(self): + return self._need_padding + + @property + def hidden_dim(self): + return self._hidden_dim + + @property + def name(self): + return self._name + + @property + def var(self): + return self._var + + def _get_var(self, name, shape, initializer=None): + with tf.variable_scope(self.name): + return _get_variable(self.var, name, shape, initializer) + + def _define_params(self, src_dim, tgt_dim): + hidden_dim = self.hidden_dim + self._get_var('W', [src_dim, hidden_dim]) + if not self.is_vanilla: + self._get_var('V', [src_dim, hidden_dim]) + if self.need_padding: + self._get_var('V_s', [src_dim, src_dim]) + self._get_var('V_t', [tgt_dim, tgt_dim]) + if not self.is_identity_transform: + self._get_var('T', [tgt_dim, src_dim]) + self._get_var('U', [tgt_dim, hidden_dim]) + self._get_var('b', [1, hidden_dim]) + self._get_var('v', [hidden_dim, 1]) + + def get_pre_compute(self, s): + ''' + :param s: [src_sequence, batch_size, src_dim] + :return: [src_sequence, batch_size. hidden_dim] + ''' + hidden_dim = self.hidden_dim + src_dim = s.get_shape().as_list()[-1] + assert src_dim is not None, 'src dim must be defined' + W = self._get_var('W', shape=[src_dim, hidden_dim]) + b = self._get_var('b', shape=[1, hidden_dim]) + return tf.tensordot(s, W, [[2], [0]]) + b + + def get_prob(self, src, tgt, mask, pre_compute, return_logits=False): + ''' + :param s: [src_sequence_length, batch_size, src_dim] + :param h: [batch_size, tgt_dim] or [tgt_sequence_length, batch_size, tgt_dim] + :param mask: [src_sequence_length, batch_size]\ + or [tgt_sequence_length, src_sequence_length, batch_sizse] + :param pre_compute: [src_sequence_length, batch_size, hidden_dim] + :return: [src_sequence_length, batch_size]\ + or [tgt_sequence_length, src_sequence_length, batch_size] + ''' + s_shape = src.get_shape().as_list() + h_shape = tgt.get_shape().as_list() + src_dim = s_shape[-1] + tgt_dim = h_shape[-1] + assert src_dim is not None, 'src dimension must be defined' + assert tgt_dim is not None, 'tgt dimension must be defined' + + self._define_params(src_dim, tgt_dim) + + if len(h_shape) == 2: + tgt = tf.expand_dims(tgt, 0) + if pre_compute is None: + pre_compute = self.get_pre_compute(src) + + buf0 = pre_compute + buf1 = tf.tensordot(tgt, self.var['U'], axes=[[2], [0]]) + buf2 = tf.tanh(tf.expand_dims(buf0, 0) + tf.expand_dims(buf1, 1)) + + if not self.is_vanilla: + xh1 = tgt + xh2 = tgt + s1 = src + if self.need_padding: + xh1 = tf.tensordot(xh1, self.var['V_t'], 1) + xh2 = tf.tensordot(xh2, self.var['S_t'], 1) + s1 = tf.tensordot(s1, self.var['V_s'], 1) + if not self.is_identity_transform: + xh1 = tf.tensordot(xh1, self.var['T'], 1) + xh2 = tf.tensordot(xh2, self.var['T'], 1) + buf3 = tf.expand_dims(s1, 0) * tf.expand_dims(xh1, 1) + buf3 = tf.tanh(tf.tensordot(buf3, self.var['V'], axes=[[3], [0]])) + buf = tf.reshape(tf.tanh(buf2 + buf3), shape=tf.shape(buf3)) + else: + buf = buf2 + v = self.var['v'] + e = tf.tensordot(buf, v, [[3], [0]]) + e = tf.squeeze(e, axis=[3]) + tmp = tf.reshape(e + (mask - 1) * 10000.0, shape=tf.shape(e)) + prob = tf.nn.softmax(tmp, 1) + if len(h_shape) == 2: + prob = tf.squeeze(prob, axis=[0]) + tmp = tf.squeeze(tmp, axis=[0]) + if return_logits: + return prob, tmp + return prob + + def get_att(self, s, prob): + ''' + :param s: [src_sequence_length, batch_size, src_dim] + :param prob: [src_sequence_length, batch_size]\ + or [tgt_sequence_length, src_sequence_length, batch_size] + :return: [batch_size, src_dim] or [tgt_sequence_length, batch_size, src_dim] + ''' + buf = s * tf.expand_dims(prob, axis=-1) + att = tf.reduce_sum(buf, axis=-3) + return att \ No newline at end of file diff --git a/examples/trials/ga_squad/config.yml b/examples/trials/ga_squad/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..dcf8c5e8ed88063a61ad6a275a9633ccb6595328 --- /dev/null +++ b/examples/trials/ga_squad/config.yml @@ -0,0 +1,13 @@ +trialCommand: python3 trial.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +maxExperimentDuration: 1h +searchSpace: {} # hard-coded in tuner +tuner: + className: customer_tuner.CustomerTuner + codeDirectory: ../../tuners/ga_customer_tuner + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/ga_squad/config_windows.yml b/examples/trials/ga_squad/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..102f715d4f6bf3f6d4c0498203b28baee88bbd77 --- /dev/null +++ b/examples/trials/ga_squad/config_windows.yml @@ -0,0 +1,13 @@ +trialCommand: python trial.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +maxExperimentDuration: 1h +searchSpace: {} # hard-coded in tuner +tuner: + className: customer_tuner.CustomerTuner + codeDirectory: ../../tuners/ga_customer_tuner + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/ga_squad/data.py b/examples/trials/ga_squad/data.py new file mode 100644 index 0000000000000000000000000000000000000000..638ae1e84f6b69bc78759d2a5548cc2646871cc9 --- /dev/null +++ b/examples/trials/ga_squad/data.py @@ -0,0 +1,267 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Data processing script for the QA model. +''' + +import csv +import json +from random import shuffle + +import numpy as np + + +class WhitespaceTokenizer: + ''' + Tokenizer for whitespace + ''' + def tokenize(self, text): + ''' + tokenize function in Tokenizer. + ''' + start = -1 + tokens = [] + for i, character in enumerate(text): + if character == ' ' or character == '\t': + if start >= 0: + word = text[start:i] + tokens.append({ + 'word': word, + 'original_text': word, + 'char_begin': start, + 'char_end': i}) + start = -1 + else: + if start < 0: + start = i + if start >= 0: + tokens.append({ + 'word': text[start:len(text)], + 'original_text': text[start:len(text)], + 'char_begin': start, + 'char_end': len(text) + }) + return tokens + + +def load_from_file(path, fmt=None, is_training=True): + ''' + load data from file + ''' + if fmt is None: + fmt = 'squad' + assert fmt in ['squad', 'csv'], 'input format must be squad or csv' + qp_pairs = [] + if fmt == 'squad': + with open(path) as data_file: + data = json.load(data_file)['data'] + for doc in data: + for paragraph in doc['paragraphs']: + passage = paragraph['context'] + for qa_pair in paragraph['qas']: + question = qa_pair['question'] + qa_id = qa_pair['id'] + if not is_training: + qp_pairs.append( + {'passage': passage, 'question': question, 'id': qa_id}) + else: + for answer in qa_pair['answers']: + answer_begin = int(answer['answer_start']) + answer_end = answer_begin + len(answer['text']) + qp_pairs.append({'passage': passage, + 'question': question, + 'id': qa_id, + 'answer_begin': answer_begin, + 'answer_end': answer_end}) + else: + with open(path, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + line_num = 0 + for row in reader: + qp_pairs.append( + {'passage': row[1], 'question': row[0], 'id': line_num}) + line_num += 1 + return qp_pairs + + +def tokenize(qp_pair, tokenizer=None, is_training=False): + ''' + tokenize function. + ''' + question_tokens = tokenizer.tokenize(qp_pair['question']) + passage_tokens = tokenizer.tokenize(qp_pair['passage']) + if is_training: + question_tokens = question_tokens[:300] + passage_tokens = passage_tokens[:300] + passage_tokens.insert( + 0, {'word': '', 'original_text': '', 'char_begin': 0, 'char_end': 0}) + passage_tokens.append( + {'word': '', 'original_text': '', 'char_begin': 0, 'char_end': 0}) + qp_pair['question_tokens'] = question_tokens + qp_pair['passage_tokens'] = passage_tokens + + +def collect_vocab(qp_pairs): + ''' + Build the vocab from corpus. + ''' + vocab = set() + for qp_pair in qp_pairs: + for word in qp_pair['question_tokens']: + vocab.add(word['word']) + for word in qp_pair['passage_tokens']: + vocab.add(word['word']) + return vocab + + +def shuffle_step(entries, step): + ''' + Shuffle the step + ''' + answer = [] + for i in range(0, len(entries), step): + sub = entries[i:i+step] + shuffle(sub) + answer += sub + return answer + + +def get_batches(qp_pairs, batch_size, need_sort=True): + ''' + Get batches data and shuffle. + ''' + if need_sort: + qp_pairs = sorted(qp_pairs, key=lambda qp: ( + len(qp['passage_tokens']), qp['id']), reverse=True) + batches = [{'qp_pairs': qp_pairs[i:(i + batch_size)]} + for i in range(0, len(qp_pairs), batch_size)] + shuffle(batches) + return batches + + +def get_char_input(data, char_dict, max_char_length): + ''' + Get char input. + ''' + batch_size = len(data) + sequence_length = max(len(d) for d in data) + char_id = np.zeros((max_char_length, sequence_length, + batch_size), dtype=np.int32) + char_lengths = np.zeros((sequence_length, batch_size), dtype=np.float32) + for batch_idx in range(0, min(len(data), batch_size)): + batch_data = data[batch_idx] + for sample_idx in range(0, min(len(batch_data), sequence_length)): + word = batch_data[sample_idx]['word'] + char_lengths[sample_idx, batch_idx] = min(len(word), max_char_length) + for i in range(0, min(len(word), max_char_length)): + char_id[i, sample_idx, batch_idx] = get_id(char_dict, word[i]) + return char_id, char_lengths + + +def get_word_input(data, word_dict, embed, embed_dim): + ''' + Get word input. + ''' + batch_size = len(data) + max_sequence_length = max(len(d) for d in data) + sequence_length = max_sequence_length + word_input = np.zeros((max_sequence_length, batch_size, + embed_dim), dtype=np.float32) + ids = np.zeros((sequence_length, batch_size), dtype=np.int32) + masks = np.zeros((sequence_length, batch_size), dtype=np.float32) + lengths = np.zeros([batch_size], dtype=np.int32) + + for batch_idx in range(0, min(len(data), batch_size)): + batch_data = data[batch_idx] + + lengths[batch_idx] = len(batch_data) + + for sample_idx in range(0, min(len(batch_data), sequence_length)): + word = batch_data[sample_idx]['word'].lower() + if word in word_dict.keys(): + word_input[sample_idx, batch_idx] = embed[word_dict[word]] + ids[sample_idx, batch_idx] = word_dict[word] + masks[sample_idx, batch_idx] = 1 + + word_input = np.reshape(word_input, (-1, embed_dim)) + return word_input, ids, masks, lengths + + +def get_word_index(tokens, char_index): + ''' + Given word return word index. + ''' + for (i, token) in enumerate(tokens): + if token['char_end'] == 0: + continue + if token['char_begin'] <= char_index and char_index <= token['char_end']: + return i + return 0 + + +def get_answer_begin_end(data): + ''' + Get answer's index of begin and end. + ''' + begin = [] + end = [] + for qa_pair in data: + tokens = qa_pair['passage_tokens'] + char_begin = qa_pair['answer_begin'] + char_end = qa_pair['answer_end'] + word_begin = get_word_index(tokens, char_begin) + word_end = get_word_index(tokens, char_end) + begin.append(word_begin) + end.append(word_end) + return np.asarray(begin), np.asarray(end) + + +def get_id(word_dict, word): + ''' + Given word, return word id. + ''' + if word in word_dict.keys(): + return word_dict[word] + return word_dict[''] + + +def get_buckets(min_length, max_length, bucket_count): + ''' + Get bucket by length. + ''' + if bucket_count <= 0: + return [max_length] + unit_length = int((max_length - min_length) // (bucket_count)) + buckets = [min_length + unit_length * + (i + 1) for i in range(0, bucket_count)] + buckets[-1] = max_length + return buckets + + +def find_bucket(length, buckets): + ''' + Find bucket. + ''' + for bucket in buckets: + if length <= bucket: + return bucket + return buckets[-1] diff --git a/examples/trials/ga_squad/download.sh b/examples/trials/ga_squad/download.sh new file mode 100644 index 0000000000000000000000000000000000000000..308fbaedbf093a4ac967ed8332a46a210aec36cc --- /dev/null +++ b/examples/trials/ga_squad/download.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json +wget http://nlp.stanford.edu/data/glove.840B.300d.zip +unzip glove.840B.300d.zip \ No newline at end of file diff --git a/examples/trials/ga_squad/evaluate.py b/examples/trials/ga_squad/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..27ffd93da9bd74371d0c3979e2810dc6677a6edb --- /dev/null +++ b/examples/trials/ga_squad/evaluate.py @@ -0,0 +1,159 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Evaluation scripts for QA model. +''' + +from __future__ import print_function +from collections import Counter +import string +import re +import argparse +import json +import sys + +def normalize_answer(str_input): + """Lower text and remove punctuation, articles and extra whitespace.""" + def remove_articles(text): + ''' + Remove "a|an|the" + ''' + return re.sub(r'\b(a|an|the)\b', ' ', text) + + def white_space_fix(text): + ''' + Remove unnessary whitespace + ''' + return ' '.join(text.split()) + + def remove_punc(text): + ''' + Remove punc + ''' + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + + def lower(text): + ''' + Change string to lower form. + ''' + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(str_input)))) + +def f1_score(prediction, ground_truth): + ''' + Calculate the f1 score. + ''' + prediction_tokens = normalize_answer(prediction).split() + ground_truth_tokens = normalize_answer(ground_truth).split() + common = Counter(prediction_tokens) & Counter(ground_truth_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(ground_truth_tokens) + f1_result = (2 * precision * recall) / (precision + recall) + return f1_result + +def exact_match_score(prediction, ground_truth): + ''' + Calculate the match score with prediction and ground truth. + ''' + return normalize_answer(prediction) == normalize_answer(ground_truth) + +def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + ''' + Metric max over the ground truths. + ''' + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + +def _evaluate(dataset, predictions): + ''' + Evaluate function. + ''' + f1_result = exact_match = total = 0 + count = 0 + for article in dataset: + for paragraph in article['paragraphs']: + for qa_pair in paragraph['qas']: + total += 1 + if qa_pair['id'] not in predictions: + count += 1 + continue + ground_truths = list(map(lambda x: x['text'], qa_pair['answers'])) + prediction = predictions[qa_pair['id']] + exact_match += metric_max_over_ground_truths( + exact_match_score, prediction, ground_truths) + f1_result += metric_max_over_ground_truths( + f1_score, prediction, ground_truths) + print('total', total, 'exact_match', exact_match, 'unanswer_question ', count) + exact_match = 100.0 * exact_match / total + f1_result = 100.0 * f1_result / total + return {'exact_match': exact_match, 'f1': f1_result} + +def evaluate(data_file, pred_file): + ''' + Evaluate. + ''' + expected_version = '1.1' + with open(data_file) as dataset_file: + dataset_json = json.load(dataset_file) + if dataset_json['version'] != expected_version: + print('Evaluation expects v-' + expected_version + + ', but got dataset with v-' + dataset_json['version'], + file=sys.stderr) + dataset = dataset_json['data'] + with open(pred_file) as prediction_file: + predictions = json.load(prediction_file) + # print(json.dumps(evaluate(dataset, predictions))) + result = _evaluate(dataset, predictions) + # print('em:', result['exact_match'], 'f1:', result['f1']) + return result['exact_match'] + +def evaluate_with_predictions(data_file, predictions): + ''' + Evalutate with predictions/ + ''' + expected_version = '1.1' + with open(data_file) as dataset_file: + dataset_json = json.load(dataset_file) + if dataset_json['version'] != expected_version: + print('Evaluation expects v-' + expected_version + + ', but got dataset with v-' + dataset_json['version'], + file=sys.stderr) + dataset = dataset_json['data'] + result = _evaluate(dataset, predictions) + return result['exact_match'] + +if __name__ == '__main__': + EXPECT_VERSION = '1.1' + parser = argparse.ArgumentParser( + description='Evaluation for SQuAD ' + EXPECT_VERSION) + parser.add_argument('dataset_file', help='Dataset file') + parser.add_argument('prediction_file', help='Prediction File') + args = parser.parse_args() + print(evaluate(args.dataset_file, args.prediction_file)) diff --git a/examples/trials/ga_squad/ga_squad.png b/examples/trials/ga_squad/ga_squad.png new file mode 100644 index 0000000000000000000000000000000000000000..4c82cd4654b935778bb74da6e4d051fae67eaf38 Binary files /dev/null and b/examples/trials/ga_squad/ga_squad.png differ diff --git a/examples/trials/ga_squad/graph.py b/examples/trials/ga_squad/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..c8da15fe9d86bf58628c1b9f959ac10a1f077a2a --- /dev/null +++ b/examples/trials/ga_squad/graph.py @@ -0,0 +1,287 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +''' +Graph is customed-define class, this module contains related class and function about graph. +''' + + +import copy +import json +import random +from enum import Enum, unique + +@unique +class LayerType(Enum): + ''' + Layer type + ''' + attention = 0 + self_attention = 1 + rnn = 2 + input = 3 + output = 4 + +class Layer(object): + ''' + Layer class, which contains the information of graph. + ''' + def __init__(self, graph_type, inputs=None, output=None, size=None): + self.input = inputs if inputs is not None else [] + self.output = output if output is not None else [] + self.graph_type = graph_type + self.is_delete = False + self.size = size + if graph_type == LayerType.attention.value: + self.input_size = 2 + self.output_size = 1 + elif graph_type == LayerType.rnn.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.self_attention.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.input.value: + self.input_size = 0 + self.output_size = 1 + elif graph_type == LayerType.output.value: + self.input_size = 1 + self.output_size = 0 + else: + print(graph_type) + def set_size(self, graph_id, size): + ''' + Set size. + ''' + if self.graph_type == LayerType.attention.value: + if self.input[0] == graph_id: + self.size = size + if self.graph_type == LayerType.rnn.value: + self.size = size + if self.graph_type == LayerType.self_attention.value: + self.size = size + if self.graph_type == LayerType.output.value: + if self.size != size: + return False + return True + + def clear_size(self): + ''' + Clear size + ''' + if self.graph_type == LayerType.attention.value or \ + LayerType.rnn.value or LayerType.self_attention.value: + self.size = None + + def __str__(self): + return 'input:' + str(self.input) + ' output:' + str(self.output) + ' type:' + str( + self.graph_type) + ' is_delete:' + str(self.is_delete) + ' size:' + str(self.size) + +def graph_dumps(graph): + ''' + Dump the graph. + ''' + return json.dumps(graph, default=lambda obj: obj.__dict__) + +def graph_loads(graph_json): + ''' + Load graph + ''' + layers = [] + for layer in graph_json['layers']: + layer_info = Layer(layer['type'], layer['input'], layer['output'], layer['size']) + layer_info.is_delete = layer['is_delete'] + layers.append(layer_info) + graph = Graph(graph_json['max_layer_num'], [], [], []) + graph.layers = layers + return graph + +class Graph(object): + ''' + Customed Graph class. + ''' + def __init__(self, max_layer_num, inputs, output, hide): + self.layers = [] + self.max_layer_num = max_layer_num + + for layer in inputs: + self.layers.append(layer) + for layer in output: + self.layers.append(layer) + if hide is not None: + for layer in hide: + self.layers.append(layer) + assert self.is_legal() + + def is_topology(self, layers=None): + ''' + valid the topology + ''' + if layers is None: + layers = self.layers + layers_nodle = [] + result = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + layers_nodle.append(i) + while True: + flag_break = True + layers_toremove = [] + for layer1 in layers_nodle: + flag_arrive = True + for layer2 in layers[layer1].input: + if layer2 in layers_nodle: + flag_arrive = False + if flag_arrive is True: + for layer2 in layers[layer1].output: + # Size is error + if layers[layer2].set_size(layer1, layers[layer1].size) is False: + return False + layers_toremove.append(layer1) + result.append(layer1) + flag_break = False + for layer in layers_toremove: + layers_nodle.remove(layer) + result.append('|') + if flag_break: + break + # There is loop in graph || some layers can't to arrive + if layers_nodle: + return False + return result + + def layer_num(self, layers=None): + ''' + Reutn number of layer. + ''' + if layers is None: + layers = self.layers + layer_num = 0 + for layer in layers: + if layer.is_delete is False and layer.graph_type != LayerType.input.value\ + and layer.graph_type != LayerType.output.value: + layer_num += 1 + return layer_num + + def is_legal(self, layers=None): + ''' + Judge whether is legal for layers + ''' + if layers is None: + layers = self.layers + + for layer in layers: + if layer.is_delete is False: + if len(layer.input) != layer.input_size: + return False + if len(layer.output) < layer.output_size: + return False + + # layer_num <= max_layer_num + if self.layer_num(layers) > self.max_layer_num: + return False + + # There is loop in graph || some layers can't to arrive + if self.is_topology(layers) is False: + return False + + return True + + def mutation(self, only_add=False): + ''' + Mutation for a graph + ''' + types = [] + if self.layer_num() < self.max_layer_num: + types.append(0) + types.append(1) + if self.layer_num() > 5 and only_add is False: + types.append(2) + types.append(3) + # 0 : add a layer , delete a edge + # 1 : add a layer , change a edge + # 2 : delete a layer, delete a edge + # 3 : delete a layer, change a edge + graph_type = random.choice(types) + layer_type = random.choice([LayerType.attention.value,\ + LayerType.self_attention.value, LayerType.rnn.value]) + layers = copy.deepcopy(self.layers) + cnt_try = 0 + while True: + layers_in = [] + layers_out = [] + layers_del = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + if layer.graph_type != LayerType.output.value: + layers_in.append(i) + if layer.graph_type != LayerType.input.value: + layers_out.append(i) + if layer.graph_type != LayerType.output.value\ + and layer.graph_type != LayerType.input.value: + layers_del.append(i) + if graph_type <= 1: + new_id = len(layers) + out = random.choice(layers_out) + inputs = [] + output = [out] + pos = random.randint(0, len(layers[out].input) - 1) + last_in = layers[out].input[pos] + layers[out].input[pos] = new_id + if graph_type == 0: + layers[last_in].output.remove(out) + if graph_type == 1: + layers[last_in].output.remove(out) + layers[last_in].output.append(new_id) + inputs = [last_in] + lay = Layer(graph_type=layer_type, inputs=inputs, output=output) + while len(inputs) < lay.input_size: + layer1 = random.choice(layers_in) + inputs.append(layer1) + layers[layer1].output.append(new_id) + lay.input = inputs + layers.append(lay) + else: + layer1 = random.choice(layers_del) + for layer2 in layers[layer1].output: + layers[layer2].input.remove(layer1) + if graph_type == 2: + random_in = random.choice(layers_in) + else: + random_in = random.choice(layers[layer1].input) + layers[layer2].input.append(random_in) + layers[random_in].output.append(layer2) + for layer2 in layers[layer1].input: + layers[layer2].output.remove(layer1) + layers[layer1].is_delete = True + + if self.is_legal(layers): + self.layers = layers + break + else: + layers = copy.deepcopy(self.layers) + cnt_try += 1 + + def __str__(self): + info = "" + for l_id, layer in enumerate(self.layers): + if layer.is_delete is False: + info += 'id:%d ' % l_id + str(layer) + '\n' + return info diff --git a/examples/trials/ga_squad/graph_to_tf.py b/examples/trials/ga_squad/graph_to_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3476290d2cc545f019cf17ab9ff8067f761683 --- /dev/null +++ b/examples/trials/ga_squad/graph_to_tf.py @@ -0,0 +1,338 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import tensorflow as tf +from rnn import XGRUCell +from util import dropout +from graph import LayerType + + +def normalize(inputs, + epsilon=1e-8, + scope="ln"): + '''Applies layer normalization. + + Args: + inputs: A tensor with 2 or more dimensions, where the first dimension has + `batch_size`. + epsilon: A floating number. A very small number for preventing ZeroDivision Error. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns: + A tensor with the same shape and data dtype as `inputs`. + ''' + with tf.variable_scope(scope): + inputs_shape = inputs.get_shape() + params_shape = inputs_shape[-1:] + + mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) + beta = tf.Variable(tf.zeros(params_shape)) + gamma = tf.Variable(tf.ones(params_shape)) + normalized = (inputs - mean) / ((variance + epsilon) ** (.5)) + outputs = gamma * normalized + beta + + return outputs + + +def multihead_attention(queries, + keys, + scope="multihead_attention", + num_units=None, + num_heads=4, + dropout_rate=0, + is_training=True, + causality=False): + '''Applies multihead attention. + + Args: + queries: A 3d tensor with shape of [N, T_q, C_q]. + keys: A 3d tensor with shape of [N, T_k, C_k]. + num_units: A cdscalar. Attention size. + dropout_rate: A floating point number. + is_training: Boolean. Controller of mechanism for dropout. + causality: Boolean. If true, units that reference the future are masked. + num_heads: An int. Number of heads. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns + A 3d tensor with shape of (N, T_q, C) + ''' + global look5 + with tf.variable_scope(scope): + # Set the fall back option for num_units + if num_units is None: + num_units = queries.get_shape().as_list()[-1] + + Q_ = [] + K_ = [] + V_ = [] + for _ in range(num_heads): + Q = tf.layers.dense(queries, num_units / num_heads, + activation=tf.nn.relu) # (N, T_q, C) + K = tf.layers.dense(keys, num_units / num_heads, + activation=tf.nn.relu) # (N, T_k, C) + V = tf.layers.dense(keys, num_units / num_heads, + activation=tf.nn.relu) # (N, T_k, C) + Q_.append(Q) + K_.append(K) + V_.append(V) + + # Split and concat + Q_ = tf.concat(Q_, axis=0) # (h*N, T_q, C/h) + K_ = tf.concat(K_, axis=0) # (h*N, T_k, C/h) + V_ = tf.concat(V_, axis=0) # (h*N, T_k, C/h) + + # Multiplication + outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k) + + # Scale + outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5) + + # Key Masking + key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k) + key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k) + key_masks = tf.tile(tf.expand_dims(key_masks, 1), + [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k) + + paddings = tf.ones_like(outputs) * (-2 ** 32 + 1) + outputs = tf.where(tf.equal(key_masks, 0), paddings, + outputs) # (h*N, T_q, T_k) + + # Causality = Future blinding + if causality: + diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k) + tril = tf.contrib.linalg.LinearOperatorTriL( + diag_vals).to_dense() # (T_q, T_k) + masks = tf.tile(tf.expand_dims(tril, 0), + [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k) + + paddings = tf.ones_like(masks) * (-2 ** 32 + 1) + outputs = tf.where(tf.equal(masks, 0), paddings, + outputs) # (h*N, T_q, T_k) + + # Activation + look5 = outputs + outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k) + + # Query Masking + query_masks = tf.sign( + tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q) + query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q) + query_masks = tf.tile(tf.expand_dims( + query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k) + outputs *= query_masks # broadcasting. (N, T_q, C) + + # Dropouts + outputs = dropout(outputs, dropout_rate, is_training) + + # Weighted sum + outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h) + + # Restore shape + outputs = tf.concat(tf.split(outputs, num_heads, + axis=0), axis=2) # (N, T_q, C) + + # Residual connection + if queries.get_shape().as_list()[-1] == num_units: + outputs += queries + + # Normalize + outputs = normalize(outputs, scope=scope) # (N, T_q, C) + + return outputs + + +def positional_encoding(inputs, + num_units=None, + zero_pad=True, + scale=True, + scope="positional_encoding", + reuse=None): + ''' + Return positinal embedding. + ''' + Shape = tf.shape(inputs) + N = Shape[0] + T = Shape[1] + num_units = Shape[2] + with tf.variable_scope(scope, reuse=reuse): + position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) + + # First part of the PE function: sin and cos argument + # Second part, apply the cosine to even columns and sin to odds. + X = tf.expand_dims(tf.cast(tf.range(T), tf.float32), axis=1) + Y = tf.expand_dims( + tf.cast(10000 ** -(2 * tf.range(num_units) / num_units), tf.float32), axis=0) + h1 = tf.cast((tf.range(num_units) + 1) % 2, tf.float32) + h2 = tf.cast((tf.range(num_units) % 2), tf.float32) + position_enc = tf.multiply(X, Y) + position_enc = tf.sin(position_enc) * tf.multiply(tf.ones_like(X), h1) + \ + tf.cos(position_enc) * tf.multiply(tf.ones_like(X), h2) + + # Convert to a tensor + lookup_table = position_enc + + if zero_pad: + lookup_table = tf.concat((tf.zeros(shape=[1, num_units]), + lookup_table[1:, :]), 0) + outputs = tf.nn.embedding_lookup(lookup_table, position_ind) + + if scale: + outputs = outputs * tf.sqrt(tf.cast(num_units, tf.float32)) + + return outputs + + +def feedforward(inputs, + num_units, + scope="multihead_attention"): + '''Point-wise feed forward net. + + Args: + inputs: A 3d tensor with shape of [N, T, C]. + num_units: A list of two integers. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns: + A 3d tensor with the same shape and dtype as inputs + ''' + with tf.variable_scope(scope): + # Inner layer + params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, + "activation": tf.nn.relu, "use_bias": True} + outputs = tf.layers.conv1d(**params) + + # Readout layer + params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, + "activation": None, "use_bias": True} + outputs = tf.layers.conv1d(**params) + + # Residual connection + outputs += inputs + + # Normalize + outputs = normalize(outputs) + + return outputs + + +def rnn(input_states, sequence_lengths, dropout_rate, is_training, num_units): + layer_cnt = 1 + states = [] + xs = tf.transpose(input_states, perm=[1, 0, 2]) + for i in range(0, layer_cnt): + xs = dropout(xs, dropout_rate, is_training) + with tf.variable_scope('layer_' + str(i)): + cell_fw = XGRUCell(num_units) + cell_bw = XGRUCell(num_units) + outputs, _ = tf.nn.bidirectional_dynamic_rnn( + cell_fw=cell_fw, + cell_bw=cell_bw, + dtype=tf.float32, + sequence_length=sequence_lengths, + inputs=xs, + time_major=True) + + y_lr, y_rl = outputs + xs = tf.concat([y_lr, y_rl], 2) + states.append(xs) + + return tf.transpose(dropout(tf.concat(states, axis=2), + dropout_rate, + is_training), perm=[1, 0, 2]) + + +def graph_to_network(input1, + input2, + input1_lengths, + input2_lengths, + graph, + dropout_rate, + is_training, + num_heads=1, + rnn_units=256): + topology = graph.is_topology() + layers = dict() + layers_sequence_lengths = dict() + num_units = input1.get_shape().as_list()[-1] + layers[0] = input1*tf.sqrt(tf.cast(num_units, tf.float32)) + \ + positional_encoding(input1, scale=False, zero_pad=False) + layers[1] = input2*tf.sqrt(tf.cast(num_units, tf.float32)) + layers[0] = dropout(layers[0], dropout_rate, is_training) + layers[1] = dropout(layers[1], dropout_rate, is_training) + layers_sequence_lengths[0] = input1_lengths + layers_sequence_lengths[1] = input2_lengths + for _, topo_i in enumerate(topology): + if topo_i == '|': + continue + if graph.layers[topo_i].graph_type == LayerType.input.value: + continue + elif graph.layers[topo_i].graph_type == LayerType.attention.value: + with tf.variable_scope('attation_%d' % topo_i): + layer = multihead_attention(layers[graph.layers[topo_i].input[0]], + layers[graph.layers[topo_i].input[1]], + scope="multihead_attention%d" % topo_i, + dropout_rate=dropout_rate, + is_training=is_training, + num_heads=num_heads, + num_units=rnn_units * 2) + layer = feedforward(layer, scope="feedforward%d" % topo_i, + num_units=[rnn_units * 2 * 4, rnn_units * 2]) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + graph.layers[topo_i].input[0]] + elif graph.layers[topo_i].graph_type == LayerType.self_attention.value: + with tf.variable_scope('self-attation_%d' % topo_i): + layer = multihead_attention(layers[graph.layers[topo_i].input[0]], + layers[graph.layers[topo_i].input[0]], + scope="multihead_attention%d" % topo_i, + dropout_rate=dropout_rate, + is_training=is_training, + num_heads=num_heads, + num_units=rnn_units * 2) + layer = feedforward(layer, scope="feedforward%d" % topo_i, + num_units=[rnn_units * 2 * 4, rnn_units * 2]) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + graph.layers[topo_i].input[0]] + elif graph.layers[topo_i].graph_type == LayerType.rnn.value: + with tf.variable_scope('rnn_%d' % topo_i): + layer = rnn(layers[graph.layers[topo_i].input[0]], + layers_sequence_lengths[graph.layers[topo_i].input[0]], + dropout_rate, + is_training, + rnn_units) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + graph.layers[topo_i].input[0]] + elif graph.layers[topo_i].graph_type == LayerType.output.value: + layers[topo_i] = layers[graph.layers[topo_i].input[0]] + if layers[topo_i].get_shape().as_list()[-1] != rnn_units * 1 * 2: + with tf.variable_scope('add_dense'): + layers[topo_i] = tf.layers.dense( + layers[topo_i], units=rnn_units*2) + return layers[2], layers[3] diff --git a/examples/trials/ga_squad/requirements.txt b/examples/trials/ga_squad/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..58f286f85752324320f8ea3a4341894398ac0736 --- /dev/null +++ b/examples/trials/ga_squad/requirements.txt @@ -0,0 +1 @@ +tensorflow==1.15.4 diff --git a/examples/trials/ga_squad/rnn.py b/examples/trials/ga_squad/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..82f7d070bf1e560f69f25aa990eee12959684941 --- /dev/null +++ b/examples/trials/ga_squad/rnn.py @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import tensorflow as tf +from tensorflow.python.ops.rnn_cell_impl import RNNCell + + +class GRU: + ''' + GRU class. + ''' + def __init__(self, name, input_dim, hidden_dim): + self.name = '/'.join([name, 'gru']) + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.w_matrix = None + self.U = None + self.bias = None + + def define_params(self): + ''' + Define parameters. + ''' + input_dim = self.input_dim + hidden_dim = self.hidden_dim + prefix = self.name + self.w_matrix = tf.Variable(tf.random_normal([input_dim, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'W'])) + self.U = tf.Variable(tf.random_normal([hidden_dim, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'U'])) + self.bias = tf.Variable(tf.random_normal([1, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'b'])) + return self + + def build(self, x, h, mask=None): + ''' + Build the GRU cell. + ''' + xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1) + hu = tf.split(tf.matmul(h, self.U), 3, 1) + r = tf.sigmoid(xw[0] + hu[0]) + z = tf.sigmoid(xw[1] + hu[1]) + h1 = tf.tanh(xw[2] + r * hu[2]) + next_h = h1 * (1 - z) + h * z + if mask is not None: + next_h = next_h * mask + h * (1 - mask) + return next_h + + def build_sequence(self, xs, masks, init, is_left_to_right): + ''' + Build GRU sequence. + ''' + states = [] + last = init + if is_left_to_right: + for i, xs_i in enumerate(xs): + h = self.build(xs_i, last, masks[i]) + states.append(h) + last = h + else: + for i in range(len(xs) - 1, -1, -1): + h = self.build(xs[i], last, masks[i]) + states.insert(0, h) + last = h + return states + + +class XGRUCell(RNNCell): + + def __init__(self, hidden_dim, reuse=None): + super(XGRUCell, self).__init__(self, _reuse=reuse) + self._num_units = hidden_dim + self._activation = tf.tanh + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + def call(self, inputs, state): + + input_dim = inputs.get_shape()[-1] + assert input_dim is not None, "input dimension must be defined" + W = tf.get_variable( + name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) + U = tf.get_variable( + name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) + b = tf.get_variable( + name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) + + xw = tf.split(tf.matmul(inputs, W) + b, 3, 1) + hu = tf.split(tf.matmul(state, U), 3, 1) + r = tf.sigmoid(xw[0] + hu[0]) + z = tf.sigmoid(xw[1] + hu[1]) + h1 = self._activation(xw[2] + r * hu[2]) + next_h = h1 * (1 - z) + state * z + return next_h, next_h diff --git a/examples/trials/ga_squad/train_model.py b/examples/trials/ga_squad/train_model.py new file mode 100644 index 0000000000000000000000000000000000000000..36ea2d5ccd6b460b69644a257f55ede3c5ce4b1a --- /dev/null +++ b/examples/trials/ga_squad/train_model.py @@ -0,0 +1,264 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Train the network combined by RNN and attention. +''' + +import tensorflow as tf + +from attention import DotAttention +from rnn import XGRUCell +from util import dropout +from graph_to_tf import graph_to_network + + +class GAGConfig: + """The class for model hyper-parameter configuration.""" + def __init__(self): + self.batch_size = 128 + + self.dropout = 0.1 + + self.char_vcb_size = 1500 + self.max_char_length = 20 + self.char_embed_dim = 100 + + self.max_query_length = 40 + self.max_passage_length = 800 + + self.att_is_vanilla = True + self.att_need_padding = False + self.att_is_id = False + + self.ptr_dim = 70 + self.learning_rate = 0.1 + self.labelsmoothing = 0.1 + self.num_heads = 1 + self.rnn_units = 256 + + +class GAG: + """The class for the computation graph based QA model.""" + def __init__(self, cfg, embed, graph): + self.cfg = cfg + self.embed = embed + self.graph = graph + + self.query_word = None + self.query_mask = None + self.query_lengths = None + self.passage_word = None + self.passage_mask = None + self.passage_lengths = None + self.answer_begin = None + self.answer_end = None + self.query_char_ids = None + self.query_char_lengths = None + self.passage_char_ids = None + self.passage_char_lengths = None + self.passage_states = None + self.query_states = None + self.query_init = None + self.begin_prob = None + self.end_prob = None + self.loss = None + self.train_op = None + + + def build_net(self, is_training): + """Build the whole neural network for the QA model.""" + cfg = self.cfg + with tf.device('/cpu:0'): + word_embed = tf.get_variable( + name='word_embed', initializer=self.embed, dtype=tf.float32, trainable=False) + char_embed = tf.get_variable(name='char_embed', + shape=[cfg.char_vcb_size, + cfg.char_embed_dim], + dtype=tf.float32) + + # [query_length, batch_size] + self.query_word = tf.placeholder(dtype=tf.int32, + shape=[None, None], + name='query_word') + self.query_mask = tf.placeholder(dtype=tf.float32, + shape=[None, None], + name='query_mask') + # [batch_size] + self.query_lengths = tf.placeholder( + dtype=tf.int32, shape=[None], name='query_lengths') + + # [passage_length, batch_size] + self.passage_word = tf.placeholder( + dtype=tf.int32, shape=[None, None], name='passage_word') + self.passage_mask = tf.placeholder( + dtype=tf.float32, shape=[None, None], name='passage_mask') + # [batch_size] + self.passage_lengths = tf.placeholder( + dtype=tf.int32, shape=[None], name='passage_lengths') + + if is_training: + self.answer_begin = tf.placeholder( + dtype=tf.int32, shape=[None], name='answer_begin') + self.answer_end = tf.placeholder( + dtype=tf.int32, shape=[None], name='answer_end') + + self.query_char_ids = tf.placeholder(dtype=tf.int32, + shape=[ + self.cfg.max_char_length, None, None], + name='query_char_ids') + # sequence_length, batch_size + self.query_char_lengths = tf.placeholder( + dtype=tf.int32, shape=[None, None], name='query_char_lengths') + + self.passage_char_ids = tf.placeholder(dtype=tf.int32, + shape=[ + self.cfg.max_char_length, None, None], + name='passage_char_ids') + # sequence_length, batch_size + self.passage_char_lengths = tf.placeholder(dtype=tf.int32, + shape=[None, None], + name='passage_char_lengths') + + query_char_states = self.build_char_states(char_embed=char_embed, + is_training=is_training, + reuse=False, + char_ids=self.query_char_ids, + char_lengths=self.query_char_lengths) + + passage_char_states = self.build_char_states(char_embed=char_embed, + is_training=is_training, + reuse=True, + char_ids=self.passage_char_ids, + char_lengths=self.passage_char_lengths) + + with tf.variable_scope("encoding") as scope: + query_states = tf.concat([tf.nn.embedding_lookup( + word_embed, self.query_word), query_char_states], axis=2) + scope.reuse_variables() + passage_states = tf.concat([tf.nn.embedding_lookup( + word_embed, self.passage_word), passage_char_states], axis=2) + passage_states = tf.transpose(passage_states, perm=[1, 0, 2]) + query_states = tf.transpose(query_states, perm=[1, 0, 2]) + self.passage_states = passage_states + self.query_states = query_states + + output, output2 = graph_to_network(passage_states, query_states, + self.passage_lengths, self.query_lengths, + self.graph, self.cfg.dropout, + is_training, num_heads=cfg.num_heads, + rnn_units=cfg.rnn_units) + + passage_att_mask = self.passage_mask + batch_size_x = tf.shape(self.query_lengths) + answer_h = tf.zeros( + tf.concat([batch_size_x, tf.constant([cfg.ptr_dim], dtype=tf.int32)], axis=0)) + + answer_context = tf.reduce_mean(output2, axis=1) + + query_init_w = tf.get_variable( + 'query_init_w', shape=[output2.get_shape().as_list()[-1], cfg.ptr_dim]) + self.query_init = query_init_w + answer_context = tf.matmul(answer_context, query_init_w) + + output = tf.transpose(output, perm=[1, 0, 2]) + + with tf.variable_scope('answer_ptr_layer'): + ptr_att = DotAttention('ptr', + hidden_dim=cfg.ptr_dim, + is_vanilla=self.cfg.att_is_vanilla, + is_identity_transform=self.cfg.att_is_id, + need_padding=self.cfg.att_need_padding) + answer_pre_compute = ptr_att.get_pre_compute(output) + ptr_gru = XGRUCell(hidden_dim=cfg.ptr_dim) + begin_prob, begin_logits = ptr_att.get_prob(output, answer_context, passage_att_mask, + answer_pre_compute, True) + att_state = ptr_att.get_att(output, begin_prob) + (_, answer_h) = ptr_gru.call(inputs=att_state, state=answer_h) + answer_context = answer_h + end_prob, end_logits = ptr_att.get_prob(output, answer_context, + passage_att_mask, answer_pre_compute, + True) + + self.begin_prob = tf.transpose(begin_prob, perm=[1, 0]) + self.end_prob = tf.transpose(end_prob, perm=[1, 0]) + begin_logits = tf.transpose(begin_logits, perm=[1, 0]) + end_logits = tf.transpose(end_logits, perm=[1, 0]) + + if is_training: + def label_smoothing(inputs, masks, epsilon=0.1): + """Modify target for label smoothing.""" + epsilon = cfg.labelsmoothing + num_of_channel = tf.shape(inputs)[-1] # number of channels + inputs = tf.cast(inputs, tf.float32) + return (((1 - epsilon) * inputs) + (epsilon / + tf.cast(num_of_channel, tf.float32))) * masks + cost1 = tf.reduce_mean( + tf.losses.softmax_cross_entropy(label_smoothing( + tf.one_hot(self.answer_begin, + depth=tf.shape(self.passage_word)[0]), + tf.transpose(self.passage_mask, perm=[1, 0])), begin_logits)) + cost2 = tf.reduce_mean( + tf.losses.softmax_cross_entropy( + label_smoothing(tf.one_hot(self.answer_end, + depth=tf.shape(self.passage_word)[0]), + tf.transpose(self.passage_mask, perm=[1, 0])), end_logits)) + + reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + l2_loss = tf.reduce_sum(reg_ws) + loss = cost1 + cost2 + l2_loss + self.loss = loss + + optimizer = tf.train.AdamOptimizer(learning_rate=cfg.learning_rate) + self.train_op = optimizer.minimize(self.loss) + + return tf.stack([self.begin_prob, self.end_prob]) + + def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths): + """Build char embedding network for the QA model.""" + max_char_length = self.cfg.max_char_length + + inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids), + self.cfg.dropout, is_training) + inputs = tf.reshape( + inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim]) + char_lengths = tf.reshape(char_lengths, shape=[-1]) + with tf.variable_scope('char_encoding', reuse=reuse): + cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) + cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) + _, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn( + cell_fw=cell_fw, + cell_bw=cell_bw, + sequence_length=char_lengths, + inputs=inputs, + time_major=True, + dtype=tf.float32 + ) + + left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim]) + + right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim]) + + states = tf.concat([left_right, right_left], axis=1) + out_shape = tf.shape(char_ids)[1:3] + out_shape = tf.concat([out_shape, tf.constant( + value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0) + return tf.reshape(states, shape=out_shape) diff --git a/examples/trials/ga_squad/trial.py b/examples/trials/ga_squad/trial.py new file mode 100644 index 0000000000000000000000000000000000000000..1e9b53b8957f7188ce94ebb979e9fbc0af7ba923 --- /dev/null +++ b/examples/trials/ga_squad/trial.py @@ -0,0 +1,450 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os + +import logging +logger = logging.getLogger('ga_squad') + +try: + import argparse + import heapq + import json + import numpy as np + import pickle + import graph + + from util import Timer + + import nni + import data + import evaluate + from train_model import * + + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +except: + logger.exception('Catch exception in trial.py.') + raise + + +def get_config(): + ''' + Get config from argument parser. + ''' + parser = argparse.ArgumentParser( + description='This program is using genetic algorithm to search architecture for SQuAD.') + parser.add_argument('--input_file', type=str, + default='./train-v1.1.json', help='input file') + parser.add_argument('--dev_file', type=str, + default='./dev-v1.1.json', help='dev file') + parser.add_argument('--embedding_file', type=str, + default='./glove.840B.300d.txt', help='dev file') + parser.add_argument('--root_path', default='./data/', + type=str, help='Root path of models') + parser.add_argument('--batch_size', type=int, default=64, help='batch size') + parser.add_argument('--save_path', type=str, + default='./save', help='save path dir') + parser.add_argument('--learning_rate', type=float, default=0.0001, + help='set half of original learning rate reload data and train.') + parser.add_argument('--max_epoch', type=int, default=30) + parser.add_argument('--dropout_rate', type=float, + default=0.1, help='dropout_rate') + parser.add_argument('--labelsmoothing', type=float, + default=0.1, help='labelsmoothing') + parser.add_argument('--num_heads', type=int, default=1, help='num_heads') + parser.add_argument('--rnn_units', type=int, default=256, help='rnn_units') + + args = parser.parse_args() + return args + + +def get_id(word_dict, word): + ''' + Return word id. + ''' + return word_dict.get(word, word_dict['']) + + +def load_embedding(path): + ''' + return embedding for a specific file by given file path. + ''' + EMBEDDING_DIM = 300 + embedding_dict = {} + with open(path, 'r', encoding='utf-8') as file: + pairs = [line.strip('\r\n').split() for line in file.readlines()] + for pair in pairs: + if len(pair) == EMBEDDING_DIM + 1: + embedding_dict[pair[0]] = [float(x) for x in pair[1:]] + logger.debug('embedding_dict size: %d', len(embedding_dict)) + return embedding_dict + + +class MaxQueue: + ''' + Queue for max value. + ''' + + def __init__(self, capacity): + assert capacity > 0, 'queue size must be larger than 0' + self._capacity = capacity + self._entries = [] + + @property + def entries(self): + return self._entries + + @property + def capacity(self): + return self._capacity + + @property + def size(self): + return len(self._entries) + + def clear(self): + self._entries = [] + + def push(self, item): + if self.size < self.capacity: + heapq.heappush(self.entries, item) + else: + heapq.heappushpop(self.entries, item) + + +def find_best_answer_span(left_prob, right_prob, passage_length, max_answer_length): + left = 0 + right = 0 + max_prob = left_prob[0] * right_prob[0] + for i in range(0, passage_length): + left_p = left_prob[i] + for j in range(i, min(i + max_answer_length, passage_length)): + total_prob = left_p * right_prob[j] + if max_prob < total_prob: + left, right, max_prob = i, j, total_prob + return [(max_prob, left, right)] + + +def write_prediction(path, position1_result, position2_result): + import codecs + + with codecs.open(path, 'w', encoding='utf8') as file: + batch_num = len(position1_result) + for i in range(batch_num): + position1_batch = position1_result[i] + position2_batch = position2_result[i] + + for j in range(position1_batch.shape[0]): + file.write(str(position1_batch[j]) + + '\t' + str(position2_batch[j]) + '\n') + + +def find_kbest_answer_span(k, left_prob, right_prob, passage_length, max_answer_length): + if k == 1: + return find_best_answer_span(left_prob, right_prob, passage_length, max_answer_length) + + queue = MaxQueue(k) + for i in range(0, passage_length): + left_p = left_prob[i] + for j in range(i, min(i + max_answer_length, passage_length)): + total_prob = left_p * right_prob[j] + queue.push((total_prob, i, j)) + return list(sorted(queue.entries, key=lambda x: -x[0])) + + +def run_epoch(batches, answer_net, is_training): + if not is_training: + position1_result = [] + position2_result = [] + contexts = [] + ids = [] + + loss_sum = 0 + timer = Timer() + count = 0 + for batch in batches: + used = timer.get_elapsed(False) + count += 1 + qps = batch['qp_pairs'] + question_tokens = [qp['question_tokens'] for qp in qps] + passage_tokens = [qp['passage_tokens'] for qp in qps] + context = [(qp['passage'], qp['passage_tokens']) for qp in qps] + sample_id = [qp['id'] for qp in qps] + + _, query, query_mask, query_lengths = data.get_word_input( + data=question_tokens, word_dict=word_vcb, embed=embed, embed_dim=cfg.word_embed_dim) + _, passage, passage_mask, passage_lengths = data.get_word_input( + data=passage_tokens, word_dict=word_vcb, embed=embed, embed_dim=cfg.word_embed_dim) + + query_char, query_char_lengths = data.get_char_input( + data=question_tokens, char_dict=char_vcb, max_char_length=cfg.max_char_length) + + passage_char, passage_char_lengths = data.get_char_input( + data=passage_tokens, char_dict=char_vcb, max_char_length=cfg.max_char_length) + + if is_training: + answer_begin, answer_end = data.get_answer_begin_end(qps) + + if is_training: + feed_dict = {answer_net.query_word: query, + answer_net.query_mask: query_mask, + answer_net.query_lengths: query_lengths, + answer_net.passage_word: passage, + answer_net.passage_mask: passage_mask, + answer_net.passage_lengths: passage_lengths, + answer_net.query_char_ids: query_char, + answer_net.query_char_lengths: query_char_lengths, + answer_net.passage_char_ids: passage_char, + answer_net.passage_char_lengths: passage_char_lengths, + answer_net.answer_begin: answer_begin, + answer_net.answer_end: answer_end} + loss, _, = sess.run( + [answer_net.loss, answer_net.train_op], feed_dict=feed_dict) + if count % 100 == 0: + logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss) + loss_sum += loss + else: + feed_dict = {answer_net.query_word: query, + answer_net.query_mask: query_mask, + answer_net.query_lengths: query_lengths, + answer_net.passage_word: passage, + answer_net.passage_mask: passage_mask, + answer_net.passage_lengths: passage_lengths, + answer_net.query_char_ids: query_char, + answer_net.query_char_lengths: query_char_lengths, + answer_net.passage_char_ids: passage_char, + answer_net.passage_char_lengths: passage_char_lengths} + position1, position2 = sess.run( + [answer_net.begin_prob, answer_net.end_prob], feed_dict=feed_dict) + position1_result += position1.tolist() + position2_result += position2.tolist() + contexts += context + ids = np.concatenate((ids, sample_id)) + if count % 100 == 0: + logger.debug('%d %g except:%g', count, used, used / count * len(batches)) + loss = loss_sum / len(batches) + if is_training: + return loss + return loss, position1_result, position2_result, ids, contexts + + +def generate_predict_json(position1_result, position2_result, ids, passage_tokens): + ''' + Generate json by prediction. + ''' + predict_len = len(position1_result) + logger.debug('total prediction num is %s', str(predict_len)) + + answers = {} + for i in range(predict_len): + sample_id = ids[i] + passage, tokens = passage_tokens[i] + kbest = find_best_answer_span( + position1_result[i], position2_result[i], len(tokens), 23) + _, start, end = kbest[0] + answer = passage[tokens[start]['char_begin']:tokens[end]['char_end']] + answers[sample_id] = answer + logger.debug('generate predict done.') + return answers + + +def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False): + ''' + Generate data + ''' + global root_path + qp_pairs = data.load_from_file(path=path, is_training=is_training) + + tokenized_sent = 0 + # qp_pairs = qp_pairs[:1000]1 + for qp_pair in qp_pairs: + tokenized_sent += 1 + data.tokenize(qp_pair, tokenizer, is_training) + for word in qp_pair['question_tokens']: + word_vcb.add(word['word']) + for char in word['word']: + char_vcb.add(char) + for word in qp_pair['passage_tokens']: + word_vcb.add(word['word']) + for char in word['word']: + char_vcb.add(char) + + max_query_length = max(len(x['question_tokens']) for x in qp_pairs) + max_passage_length = max(len(x['passage_tokens']) for x in qp_pairs) + #min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs) + cfg.max_query_length = max_query_length + cfg.max_passage_length = max_passage_length + + return qp_pairs + + +def train_with_graph(graph, qp_pairs, dev_qp_pairs): + ''' + Train a network from a specific graph. + ''' + global sess + with tf.Graph().as_default(): + train_model = GAG(cfg, embed, graph) + train_model.build_net(is_training=True) + tf.get_variable_scope().reuse_variables() + dev_model = GAG(cfg, embed, graph) + dev_model.build_net(is_training=False) + with tf.Session() as sess: + logger.debug('init variables') + init = tf.global_variables_initializer() + sess.run(init) + # writer = tf.summary.FileWriter('%s/graph/'%execution_path, sess.graph) + logger.debug('assign to graph') + + saver = tf.train.Saver() + train_loss = None + bestacc = 0 + patience = 5 + patience_increase = 2 + improvement_threshold = 0.995 + + for epoch in range(max_epoch): + logger.debug('begin to train') + train_batches = data.get_batches(qp_pairs, cfg.batch_size) + train_loss = run_epoch(train_batches, train_model, True) + logger.debug('epoch ' + str(epoch) + + ' loss: ', str(train_loss)) + dev_batches = list(data.get_batches( + dev_qp_pairs, cfg.batch_size)) + _, position1, position2, ids, contexts = run_epoch( + dev_batches, dev_model, False) + + answers = generate_predict_json( + position1, position2, ids, contexts) + if save_path is not None: + with open(os.path.join(save_path, 'epoch%d.prediction' % epoch), 'w') as file: + json.dump(answers, file) + else: + answers = json.dumps(answers) + answers = json.loads(answers) + iter = epoch + 1 + + acc = evaluate.evaluate_with_predictions( + args.dev_file, answers) + + logger.debug('Send intermediate acc: %s', str(acc)) + nni.report_intermediate_result(acc) + + logger.debug('Send intermediate result done.') + + if acc > bestacc: + if acc * improvement_threshold > bestacc: + patience = max(patience, iter * patience_increase) + bestacc = acc + + if save_path is not None: + saver.save(os.path.join(sess, save_path + 'epoch%d.model' % epoch)) + with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file: + pickle.dump( + (position1, position2, ids, contexts), file) + logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc) + if patience <= iter: + break + logger.debug('save done.') + return train_loss, bestacc + + +embed = None +char_vcb = None +tokenizer = None +word_vcb = None + + +def load_data(): + global embed, char_vcb, tokenizer, word_vcb + logger.debug('tokenize data') + tokenizer = data.WhitespaceTokenizer() + + char_set = set() + word_set = set() + logger.debug('generate train data') + qp_pairs = generate_data(input_file, tokenizer, + char_set, word_set, is_training=True) + logger.debug('generate dev data') + dev_qp_pairs = generate_data( + dev_file, tokenizer, char_set, word_set, is_training=False) + logger.debug('generate data done.') + + char_vcb = {char: sample_id for sample_id, char in enumerate(char_set)} + word_vcb = {word: sample_id for sample_id, word in enumerate(word_set)} + + timer.start() + logger.debug('read embedding table') + + cfg.word_embed_dim = 300 + embed = np.zeros((len(word_vcb), cfg.word_embed_dim), dtype=np.float32) + + embedding = load_embedding(args.embedding_file) + for word, sample_id in enumerate(word_vcb): + if word in embedding: + embed[sample_id] = embedding[word] + + # add UNK into dict + unk = np.zeros((1, cfg.word_embed_dim), dtype=np.float32) + embed = np.concatenate((unk, embed), axis=0) + word_vcb = {key: value + 1 for key, value in word_vcb.items()} + + return qp_pairs, dev_qp_pairs + + +if __name__ == '__main__': + try: + args = get_config() + + root_path = os.path.expanduser(args.root_path) + input_file = os.path.expanduser(args.input_file) + dev_file = os.path.expanduser(args.dev_file) + save_path = None + max_epoch = args.max_epoch + + cfg = GAGConfig() + cfg.batch_size = args.batch_size + cfg.learning_rate = float(args.learning_rate) + cfg.dropout = args.dropout_rate + cfg.rnn_units = args.rnn_units + cfg.labelsmoothing = args.labelsmoothing + cfg.num_heads = args.num_heads + timer = Timer() + + qp_pairs, dev_qp_pairs = load_data() + logger.debug('Init finish.') + + original_params = nni.get_next_parameter() + ''' + with open('data.json') as f: + original_params = json.load(f) + ''' + try: + graph = graph.graph_loads(original_params) + except Exception: + logger.debug('Can\'t load graph.') + train_loss, best_acc = train_with_graph(graph, qp_pairs, dev_qp_pairs) + + logger.debug('Send best acc: %s', str(best_acc)) + nni.report_final_result(best_acc) + logger.debug('Send final result done') + except: + logger.exception('Catch exception in trial.py.') + raise diff --git a/examples/trials/ga_squad/util.py b/examples/trials/ga_squad/util.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9f363003ad86955af8c42c86578741506db367 --- /dev/null +++ b/examples/trials/ga_squad/util.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Util Module +''' + +import time + +import tensorflow as tf + + +def shape(tensor): + ''' + Get shape of variable. + Return type is tuple. + ''' + temp_s = tensor.get_shape() + return tuple([temp_s[i].value for i in range(0, len(temp_s))]) + + +def get_variable(name, temp_s): + ''' + Get variable by name. + ''' + return tf.Variable(tf.zeros(temp_s), name=name) + + +def dropout(tensor, drop_prob, is_training): + ''' + Dropout except test. + ''' + if not is_training: + return tensor + return tf.nn.dropout(tensor, 1.0 - drop_prob) + + +class Timer: + ''' + Class Timer is for calculate time. + ''' + def __init__(self): + self.__start = time.time() + + def start(self): + ''' + Start to calculate time. + ''' + self.__start = time.time() + + def get_elapsed(self, restart=True): + ''' + Calculate time span. + ''' + end = time.time() + span = end - self.__start + if restart: + self.__start = end + return span diff --git a/examples/trials/kaggle-tgs-salt/README.md b/examples/trials/kaggle-tgs-salt/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b37f3ff686069620a14cff25bb7307bcbc7e89e4 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/README.md @@ -0,0 +1,56 @@ +## 33rd place solution code for Kaggle [TGS Salt Identification Chanllenge](https://www.kaggle.com/c/tgs-salt-identification-challenge) + +This example shows how to enable AutoML for competition code by running it on NNI without any code change. +To run this code on NNI, firstly you need to run it standalone, then configure the config.yml and: +``` +nnictl create --config config.yml +``` + +This code can still run standalone, the code is for reference, it requires at least one week effort to reproduce the competition result. + +[Solution summary](https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/69593) + +Preparation: + +Download competition data, run preprocess.py to prepare training data. + +Stage 1: + +Train fold 0-3 for 100 epochs, for each fold, train 3 models: +``` +python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV4 +python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV5 --layers 50 +python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV6 +``` + +Stage 2: + +Fine tune stage 1 models for 300 epochs with cosine annealing lr scheduler: + +``` +python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4 +``` + +Stage 3: + +Fine tune Stage 2 models with depths channel: + +``` +python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4 --depths +``` + +Stage 4: + +Make prediction for each model, then ensemble the result to generate peasdo labels. + +Stage 5: + +Fine tune stage 3 models with pseudo labels + +``` +python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4 --depths --pseudo +``` + +Stage 6: +Ensemble all stage 3 and stage 5 models. + diff --git a/examples/trials/kaggle-tgs-salt/README_zh_CN.md b/examples/trials/kaggle-tgs-salt/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..fd68bbaa813b43015cbb19d6db676efde30d2c5f --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/README_zh_CN.md @@ -0,0 +1,50 @@ +## Kaggle 比赛 [TGS Salt Identification Chanllenge](https://www.kaggle.com/c/tgs-salt-identification-challenge) 第 33 名的解决方案 + +本示例展示了如何不改动代码的情况下通过 NNI 来为竞赛代码使用自动机器学习。 要在 NNI 上运行此代码,首先需要单独运行它,然后配置 config.yml: + + nnictl create --config config.yml + + +此代码仍然能够单独运行,但需要至少一周来重现竞赛的结果。 + +[解决方案概述](https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/69593) + +准备: + +下载完整的数据,运行 preprocess.py 来准备数据。 + +阶段 1: + +将目录 0-3 训练 100 个 epoch,对于每个目录,训练三个模型: + + python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV4 + python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV5 --layers 50 + python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV6 + + +阶段 2: + +使用余弦退火学习率调度器运行 300 次 epoch 来微调阶段 1 的模型: + + python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4 + + +阶段 3: + +用深度通道微调阶段 2 的模型: + + python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4 --depths + + +阶段 4: + +为每个模型进行预测,组合结果生成伪标签。 + +阶段 5: + +用伪标签微调阶段 3 的模型 + + python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4 --depths --pseudo + + +阶段 6: 将所有阶段 3 和阶段 5 的模型组合起来。 \ No newline at end of file diff --git a/examples/trials/kaggle-tgs-salt/augmentation.py b/examples/trials/kaggle-tgs-salt/augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..ca434900252df9b8dc3281722444f01ae073a95a --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/augmentation.py @@ -0,0 +1,241 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import cv2 +import numpy as np +import random +import torchvision.transforms.functional as F +from torchvision.transforms import RandomResizedCrop, ColorJitter, RandomAffine +import PIL +from PIL import Image +import collections + +import settings + + +class RandomHFlipWithMask(object): + def __init__(self, p=0.5): + self.p = p + def __call__(self, *imgs): + if random.random() < self.p: + return map(F.hflip, imgs) + else: + return imgs + +class RandomVFlipWithMask(object): + def __init__(self, p=0.5): + self.p = p + def __call__(self, *imgs): + if random.random() < self.p: + return map(F.vflip, imgs) + else: + return imgs + +class RandomResizedCropWithMask(RandomResizedCrop): + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR): + super(RandomResizedCropWithMask, self).__init__(size, scale, ratio, interpolation) + def __call__(self, *imgs): + i, j, h, w = self.get_params(imgs[0], self.scale, self.ratio) + #print(i,j,h,w) + return map(lambda x: F.resized_crop(x, i, j, h, w, self.size, self.interpolation), imgs) + +class RandomAffineWithMask(RandomAffine): + def __init__(self, degrees, translate=None, scale=None, shear=None, resample='edge'): + super(RandomAffineWithMask, self).__init__(degrees, translate, scale, shear, resample) + def __call__(self, *imgs): + ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, imgs[0].size) + w, h = imgs[0].size + imgs = map(lambda x: F.pad(x, w//2, 0, self.resample), imgs) + imgs = map(lambda x: F.affine(x, *ret, resample=0), imgs) + imgs = map(lambda x: F.center_crop(x, (w, h)), imgs) + return imgs + +class RandomRotateWithMask(object): + def __init__(self, degrees, pad_mode='reflect', expand=False, center=None): + self.pad_mode = pad_mode + self.expand = expand + self.center = center + self.degrees = degrees + + def __call__(self, *imgs): + angle = self.get_angle() + if angle == int(angle) and angle % 90 == 0: + if angle == 0: + return imgs + else: + #print(imgs) + return map(lambda x: F.rotate(x, angle, False, False, None), imgs) + else: + return map(lambda x: self._pad_rotate(x, angle), imgs) + + def get_angle(self): + if isinstance(self.degrees, collections.Sequence): + index = int(random.random() * len(self.degrees)) + return self.degrees[index] + else: + return random.uniform(-self.degrees, self.degrees) + + def _pad_rotate(self, img, angle): + w, h = img.size + img = F.pad(img, w//2, 0, self.pad_mode) + img = F.rotate(img, angle, False, self.expand, self.center) + img = F.center_crop(img, (w, h)) + return img + +class CropWithMask(object): + def __init__(self, i, j, h, w): + self.i = i + self.j = j + self.h = h + self.w = w + def __call__(self, *imgs): + return map(lambda x: F.crop(x, self.i, self.j, self.h, self.w), imgs) + +class PadWithMask(object): + def __init__(self, padding, padding_mode): + self.padding = padding + self.padding_mode = padding_mode + def __call__(self, *imgs): + return map(lambda x: F.pad(x, self.padding, padding_mode=self.padding_mode), imgs) + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, *imgs): + for t in self.transforms: + imgs = t(*imgs) + return imgs + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string + +def get_img_mask_augments(train_mode, pad_mode): + if pad_mode == 'resize': + img_mask_aug_train = Compose([ + RandomHFlipWithMask(), + RandomAffineWithMask(10, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=None) + ]) + img_mask_aug_val = None + else: + img_mask_aug_train = Compose([ + PadWithMask((28, 28), padding_mode=pad_mode), + RandomHFlipWithMask(), + RandomAffineWithMask(10, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=None), + RandomResizedCropWithMask(128, scale=(1., 1.), ratio=(1., 1.)) + ]) + img_mask_aug_val = PadWithMask((13, 14), padding_mode=pad_mode) + + return img_mask_aug_train, img_mask_aug_val + + +def test_transform(): + img_id = '0b73b427d1.png' + img = Image.open(os.path.join(settings.TRAIN_IMG_DIR, img_id)).convert('RGB') + mask = Image.open(os.path.join(settings.TRAIN_MASK_DIR, img_id)).convert('L').point(lambda x: 0 if x < 128 else 1, 'L') + + img_id = '0a1ea1af4.jpg' + img = Image.open(os.path.join(r'D:\data\ship\train_v2', img_id)).convert('RGB') + mask = Image.open(os.path.join(r'D:\data\ship\train_masks', img_id)).convert('L').point(lambda x: 0 if x < 128 else 1, 'L') + + trans = Compose([ + RandomHFlipWithMask(), + RandomVFlipWithMask(), + RandomRotateWithMask([0, 90, 180, 270]), + #RandomRotateWithMask(15), + RandomResizedCropWithMask(768, scale=(0.81, 1)) + ]) + + trans2 = RandomAffineWithMask(45, (0.2,0.2), (0.9, 1.1)) + trans3, trans4 = get_img_mask_augments(True, 'edge') + + img, mask = trans4(img, mask) + + img.show() + mask.point(lambda x: x*255).show() + +def test_color_trans(): + img_id = '00abc623a.jpg' + img = Image.open(os.path.join(settings.TRAIN_IMG_DIR, img_id)).convert('RGB') + trans = ColorJitter(0.1, 0.1, 0.1, 0.1) + + img2 = trans(img) + img.show() + img2.show() + + +class TTATransform(object): + def __init__(self, index): + self.index = index + def __call__(self, img): + trans = { + 0: lambda x: x, + 1: lambda x: F.hflip(x), + 2: lambda x: F.vflip(x), + 3: lambda x: F.vflip(F.hflip(x)), + 4: lambda x: F.rotate(x, 90, False, False), + 5: lambda x: F.hflip(F.rotate(x, 90, False, False)), + 6: lambda x: F.vflip(F.rotate(x, 90, False, False)), + 7: lambda x: F.vflip(F.hflip(F.rotate(x, 90, False, False))) + } + return trans[self.index](img) + +# i is tta index, 0: no change, 1: horizon flip, 2: vertical flip, 3: do both +def tta_back_mask_np(img, index): + print(img.shape) + trans = { + 0: lambda x: x, + 1: lambda x: np.flip(x, 2), + 2: lambda x: np.flip(x, 1), + 3: lambda x: np.flip(np.flip(x, 2), 1), + 4: lambda x: np.rot90(x, 3, axes=(1,2)), + 5: lambda x: np.rot90(np.flip(x, 2), 3, axes=(1,2)), + 6: lambda x: np.rot90(np.flip(x, 1), 3, axes=(1,2)), + 7: lambda x: np.rot90(np.flip(np.flip(x,2), 1), 3, axes=(1,2)) + } + + return trans[index](img) + +def test_tta(): + img_f = os.path.join(settings.TEST_IMG_DIR, '0c2637aa9.jpg') + img = Image.open(img_f) + img = img.convert('RGB') + + tta_index = 7 + trans1 = TTATransform(tta_index) + img = trans1(img) + #img.show() + + img_np = np.array(img) + img_np = np.expand_dims(img_np, 0) + print(img_np.shape) + img_np = tta_back_mask_np(img_np, tta_index) + img_np = np.reshape(img_np, (768, 768, 3)) + img_back = F.to_pil_image(img_np) + img_back.show() + +if __name__ == '__main__': + test_transform() diff --git a/examples/trials/kaggle-tgs-salt/config.yml b/examples/trials/kaggle-tgs-salt/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..d385a3fa4d8984edb8e7756234d2c4fb80657a20 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/config.yml @@ -0,0 +1,11 @@ +useAnnotation: true +trialCommand: python3 train.py +trialGpuNumber: 0 +trialConcurrency: 2 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/kaggle-tgs-salt/config_windows.yml b/examples/trials/kaggle-tgs-salt/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..9321b60dc6c26c45d47cf6f46b5a24ccbb6e304e --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/config_windows.yml @@ -0,0 +1,11 @@ +useAnnotation: true +trialCommand: python train.py +trialGpuNumber: 0 +trialConcurrency: 2 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/kaggle-tgs-salt/focal_loss.py b/examples/trials/kaggle-tgs-salt/focal_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ec3f1a26cd368de7bc8496a868cd88b9dee0eb5a --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/focal_loss.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +class FocalLoss2d(nn.Module): + + def __init__(self, gamma=2, size_average=True): + super(FocalLoss2d, self).__init__() + self.gamma = gamma + self.size_average = size_average + + + def forward(self, logit, target, class_weight=None, type='sigmoid'): + target = target.view(-1, 1).long() + + if type=='sigmoid': + if class_weight is None: + class_weight = [1]*2 #[0.5, 0.5] + + prob = torch.sigmoid(logit) + prob = prob.view(-1, 1) + prob = torch.cat((1-prob, prob), 1) + select = torch.FloatTensor(len(prob), 2).zero_().cuda() + select.scatter_(1, target, 1.) + + elif type=='softmax': + B,C,H,W = logit.size() + if class_weight is None: + class_weight =[1]*C #[1/C]*C + + logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C) + prob = F.softmax(logit,1) + select = torch.FloatTensor(len(prob), C).zero_().cuda() + select.scatter_(1, target, 1.) + + class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1) + class_weight = torch.gather(class_weight, 0, target) + + prob = (prob*select).sum(1).view(-1,1) + prob = torch.clamp(prob,1e-8,1-1e-8) + batch_loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log() + + if self.size_average: + loss = batch_loss.mean() + else: + loss = batch_loss + + return loss + + +if __name__ == '__main__': + L = FocalLoss2d() + out = torch.randn(2, 3, 3).cuda() + target = (torch.sigmoid(out) > 0.5).float() + loss = L(out, target) + print(loss) diff --git a/examples/trials/kaggle-tgs-salt/loader.py b/examples/trials/kaggle-tgs-salt/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..db19e0dfeb51def5e898950c98763ce1652b8452 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/loader.py @@ -0,0 +1,291 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os, cv2, glob +import numpy as np +from PIL import Image + +import torch +import torch.utils.data as data +from torchvision import datasets, models, transforms +from utils import read_masks, get_test_meta, get_nfold_split +import augmentation as aug +from settings import * + +class ImageDataset(data.Dataset): + def __init__(self, train_mode, meta, augment_with_target=None, + image_augment=None, image_transform=None, mask_transform=None): + self.augment_with_target = augment_with_target + self.image_augment = image_augment + self.image_transform = image_transform + self.mask_transform = mask_transform + + self.train_mode = train_mode + self.meta = meta + + self.img_ids = meta[ID_COLUMN].values + self.salt_exists = meta['salt_exists'].values + self.is_train = meta['is_train'].values + + if self.train_mode: + self.mask_filenames = meta[Y_COLUMN].values + + def __getitem__(self, index): + base_img_fn = '{}.png'.format(self.img_ids[index]) + if self.is_train[index]: #self.train_mode: + img_fn = os.path.join(TRAIN_IMG_DIR, base_img_fn) + else: + img_fn = os.path.join(TEST_IMG_DIR, base_img_fn) + img = self.load_image(img_fn) + + if self.train_mode: + base_mask_fn = '{}.png'.format(self.img_ids[index]) + if self.is_train[index]: + mask_fn = os.path.join(TRAIN_MASK_DIR, base_mask_fn) + else: + mask_fn = os.path.join(TEST_DIR, 'masks', base_mask_fn) + mask = self.load_image(mask_fn, True) + img, mask = self.aug_image(img, mask) + return img, mask, self.salt_exists[index] + else: + img = self.aug_image(img) + return [img] + + def aug_image(self, img, mask=None): + if mask is not None: + if self.augment_with_target is not None: + img, mask = self.augment_with_target(img, mask) + if self.image_augment is not None: + img = self.image_augment(img) + if self.mask_transform is not None: + mask = self.mask_transform(mask) + if self.image_transform is not None: + img = self.image_transform(img) + return img, mask + else: + if self.image_augment is not None: + img = self.image_augment(img) + if self.image_transform is not None: + img = self.image_transform(img) + return img + + def load_image(self, img_filepath, grayscale=False): + image = Image.open(img_filepath, 'r') + if not grayscale: + image = image.convert('RGB') + else: + image = image.convert('L').point(lambda x: 0 if x < 128 else 1, 'L') + return image + + def __len__(self): + return len(self.img_ids) + + def collate_fn(self, batch): + imgs = [x[0] for x in batch] + inputs = torch.stack(imgs) + + if self.train_mode: + masks = [x[1] for x in batch] + labels = torch.stack(masks) + + salt_target = [x[2] for x in batch] + return inputs, labels, torch.FloatTensor(salt_target) + else: + return inputs + +def mask_to_tensor(x): + x = np.array(x).astype(np.float32) + x = np.expand_dims(x, axis=0) + x = torch.from_numpy(x) + return x + +img_transforms = [ + transforms.Grayscale(num_output_channels=3), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ] + +def get_tta_transforms(index, pad_mode): + tta_transforms = { + 0: [], + 1: [transforms.RandomHorizontalFlip(p=2.)], + 2: [transforms.RandomVerticalFlip(p=2.)], + 3: [transforms.RandomHorizontalFlip(p=2.), transforms.RandomVerticalFlip(p=2.)] + } + if pad_mode == 'resize': + return transforms.Compose([transforms.Resize((H, W)), *(tta_transforms[index]), *img_transforms]) + else: + return transforms.Compose([*(tta_transforms[index]), *img_transforms]) + +def get_image_transform(pad_mode): + if pad_mode == 'resize': + return transforms.Compose([transforms.Resize((H, W)), *img_transforms]) + else: + return transforms.Compose(img_transforms) + +def get_mask_transform(pad_mode): + if pad_mode == 'resize': + return transforms.Compose( + [ + transforms.Resize((H, W)), + transforms.Lambda(mask_to_tensor), + ] + ) + else: + return transforms.Compose( + [ + transforms.Lambda(mask_to_tensor), + ] + ) + +def get_img_mask_augments(pad_mode, depths_channel=False): + if depths_channel: + affine_aug = aug.RandomAffineWithMask(5, translate=(0.1, 0.), scale=(0.9, 1.1), shear=None) + else: + affine_aug = aug.RandomAffineWithMask(15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=None) + + if pad_mode == 'resize': + img_mask_aug_train = aug.Compose([ + aug.RandomHFlipWithMask(), + affine_aug + ]) + img_mask_aug_val = None + else: + img_mask_aug_train = aug.Compose([ + aug.PadWithMask((28, 28), padding_mode=pad_mode), + aug.RandomHFlipWithMask(), + affine_aug, + aug.RandomResizedCropWithMask(H, scale=(1., 1.), ratio=(1., 1.)) + ]) + img_mask_aug_val = aug.PadWithMask((13, 13, 14, 14), padding_mode=pad_mode) + + return img_mask_aug_train, img_mask_aug_val + +def get_train_loaders(ifold, batch_size=8, dev_mode=False, pad_mode='edge', meta_version=1, pseudo_label=False, depths=False): + train_shuffle = True + train_meta, val_meta = get_nfold_split(ifold, nfold=10, meta_version=meta_version) + + if pseudo_label: + test_meta = get_test_meta() + train_meta = train_meta.append(test_meta, sort=True) + + if dev_mode: + train_shuffle = False + train_meta = train_meta.iloc[:10] + val_meta = val_meta.iloc[:10] + #print(val_meta[X_COLUMN].values[:5]) + #print(val_meta[Y_COLUMN].values[:5]) + print(train_meta.shape, val_meta.shape) + img_mask_aug_train, img_mask_aug_val = get_img_mask_augments(pad_mode, depths) + + train_set = ImageDataset(True, train_meta, + augment_with_target=img_mask_aug_train, + image_augment=transforms.ColorJitter(0.2, 0.2, 0.2, 0.2), + image_transform=get_image_transform(pad_mode), + mask_transform=get_mask_transform(pad_mode)) + + train_loader = data.DataLoader(train_set, batch_size=batch_size, shuffle=train_shuffle, num_workers=4, collate_fn=train_set.collate_fn, drop_last=True) + train_loader.num = len(train_set) + + val_set = ImageDataset(True, val_meta, + augment_with_target=img_mask_aug_val, + image_augment=None, + image_transform=get_image_transform(pad_mode), + mask_transform=get_mask_transform(pad_mode)) + val_loader = data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=val_set.collate_fn) + val_loader.num = len(val_set) + val_loader.y_true = read_masks(val_meta[ID_COLUMN].values) + + return train_loader, val_loader + +def get_test_loader(batch_size=16, index=0, dev_mode=False, pad_mode='edge'): + test_meta = get_test_meta() + if dev_mode: + test_meta = test_meta.iloc[:10] + test_set = ImageDataset(False, test_meta, + image_augment=None if pad_mode == 'resize' else transforms.Pad((13,13,14,14), padding_mode=pad_mode), + image_transform=get_tta_transforms(index, pad_mode)) + test_loader = data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=test_set.collate_fn, drop_last=False) + test_loader.num = len(test_set) + test_loader.meta = test_set.meta + + return test_loader + +depth_channel_tensor = None + +def get_depth_tensor(pad_mode): + global depth_channel_tensor + + if depth_channel_tensor is not None: + return depth_channel_tensor + + depth_tensor = None + + if pad_mode == 'resize': + depth_tensor = np.zeros((H, W)) + for row, const in enumerate(np.linspace(0, 1, H)): + depth_tensor[row, :] = const + else: + depth_tensor = np.zeros((ORIG_H, ORIG_W)) + for row, const in enumerate(np.linspace(0, 1, ORIG_H)): + depth_tensor[row, :] = const + depth_tensor = np.pad(depth_tensor, (14,14), mode=pad_mode) # edge or reflect + depth_tensor = depth_tensor[:H, :W] + + depth_channel_tensor = torch.Tensor(depth_tensor) + return depth_channel_tensor + +def add_depth_channel(img_tensor, pad_mode): + ''' + img_tensor: N, C, H, W + ''' + img_tensor[:, 1] = get_depth_tensor(pad_mode) + img_tensor[:, 2] = img_tensor[:, 0] * get_depth_tensor(pad_mode) + + +def test_train_loader(): + train_loader, val_loader = get_train_loaders(1, batch_size=4, dev_mode=False, pad_mode='edge', meta_version=2, pseudo_label=True) + print(train_loader.num, val_loader.num) + for i, data in enumerate(train_loader): + imgs, masks, salt_exists = data + #pdb.set_trace() + print(imgs.size(), masks.size(), salt_exists.size()) + print(salt_exists) + add_depth_channel(imgs, 'resize') + print(masks) + break + #print(imgs) + #print(masks) + +def test_test_loader(): + test_loader = get_test_loader(4, pad_mode='resize') + print(test_loader.num) + for i, data in enumerate(test_loader): + print(data.size()) + if i > 5: + break + +if __name__ == '__main__': + test_test_loader() + #test_train_loader() + #small_dict, img_ids = load_small_train_ids() + #print(img_ids[:10]) + #print(get_tta_transforms(3, 'edge')) diff --git a/examples/trials/kaggle-tgs-salt/lovasz_losses.py b/examples/trials/kaggle-tgs-salt/lovasz_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..8c19bc59d2720eab755dffb72520c66b91bc5b76 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/lovasz_losses.py @@ -0,0 +1,252 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +from __future__ import print_function, division + +import torch +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np + +try: + from itertools import ifilterfalse +except ImportError: # py3k + from itertools import filterfalse + + +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True): + """ + IoU for foreground class + binary: 1 foreground, 0 background + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + intersection = ((label == 1) & (pred == 1)).sum() + union = ((label == 1) | ((pred == 1) & (label != ignore))).sum() + if not union: + iou = EMPTY + else: + iou = float(intersection) / union + ious.append(iou) + iou = mean(ious) # mean accross images if per_image + return 100 * iou + + +def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False): + """ + Array of IoU for each (non ignored) class + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + iou = [] + for i in range(C): + if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes) + intersection = ((label == i) & (pred == i)).sum() + union = ((label == i) | ((pred == i) & (label != ignore))).sum() + if not union: + iou.append(EMPTY) + else: + iou.append(float(intersection) / union) + ious.append(iou) + ious = map(mean, zip(*ious)) # mean accross images if per_image + return 100 * np.array(ious) + + +# --------------------------- BINARY LOSSES --------------------------- + + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) + for log, lab in zip(logits, labels)) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * Variable(signs)) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.elu(errors_sorted)+1, Variable(grad)) + #loss = torch.dot(F.relu(errors_sorted), Variable(grad)) + + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + scores = scores.view(-1) + labels = labels.view(-1) + if ignore is None: + return scores, labels + valid = (labels != ignore) + vscores = scores[valid] + vlabels = labels[valid] + return vscores, vlabels + + +class StableBCELoss(torch.nn.modules.Module): + def __init__(self): + super(StableBCELoss, self).__init__() + def forward(self, input, target): + neg_abs = - input.abs() + loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() + return loss.mean() + + +def binary_xloss(logits, labels, ignore=None): + """ + Binary Cross entropy loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + ignore: void class id + """ + logits, labels = flatten_binary_scores(logits, labels, ignore) + loss = StableBCELoss()(logits, Variable(labels.float())) + return loss + + +# --------------------------- MULTICLASS LOSSES --------------------------- + + +def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None): + """ + Multi-class Lovasz-Softmax loss + probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1) + labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) + only_present: average only on classes present in ground truth + per_image: compute the loss per image instead of per batch + ignore: void class labels + """ + if per_image: + loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), only_present=only_present) + for prob, lab in zip(probas, labels)) + else: + loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present) + return loss + + +def lovasz_softmax_flat(probas, labels, only_present=False): + """ + Multi-class Lovasz-Softmax loss + probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) + labels: [P] Tensor, ground truth labels (between 0 and C - 1) + only_present: average only on classes present in ground truth + """ + C = probas.size(1) + losses = [] + for c in range(C): + fg = (labels == c).float() # foreground for class c + if only_present and fg.sum() == 0: + continue + errors = (Variable(fg) - probas[:, c]).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) + return mean(losses) + + +def flatten_probas(probas, labels, ignore=None): + """ + Flattens predictions in the batch + """ + B, C, H, W = probas.size() + probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C + labels = labels.view(-1) + if ignore is None: + return probas, labels + valid = (labels != ignore) + vprobas = probas[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobas, vlabels + +def xloss(logits, labels, ignore=None): + """ + Cross entropy loss + """ + return F.cross_entropy(logits, Variable(labels), ignore_index=255) + + +# --------------------------- HELPER FUNCTIONS --------------------------- + +def mean(l, ignore_nan=False, empty=0): + """ + nanmean compatible with generators. + """ + l = iter(l) + if ignore_nan: + l = ifilterfalse(np.isnan, l) + try: + n = 1 + acc = next(l) + except StopIteration: + if empty == 'raise': + raise ValueError('Empty mean') + return empty + for n, v in enumerate(l, 2): + acc += v + if n == 1: + return acc + return acc / n diff --git a/examples/trials/kaggle-tgs-salt/metrics.py b/examples/trials/kaggle-tgs-salt/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..e253fec5cd8dcbccda3013a9b8b35b8d3a1384b5 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/metrics.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import numpy as np +from pycocotools import mask as cocomask +from utils import get_segmentations + + +def iou(gt, pred): + gt[gt > 0] = 1. + pred[pred > 0] = 1. + intersection = gt * pred + union = gt + pred + union[union > 0] = 1. + intersection = np.sum(intersection) + union = np.sum(union) + if union == 0: + union = 1e-09 + return intersection / union + + +def compute_ious(gt, predictions): + gt_ = get_segmentations(gt) + predictions_ = get_segmentations(predictions) + + if len(gt_) == 0 and len(predictions_) == 0: + return np.ones((1, 1)) + elif len(gt_) != 0 and len(predictions_) == 0: + return np.zeros((1, 1)) + else: + iscrowd = [0 for _ in predictions_] + ious = cocomask.iou(gt_, predictions_, iscrowd) + if not np.array(ious).size: + ious = np.zeros((1, 1)) + return ious + + +def compute_precision_at(ious, threshold): + mx1 = np.max(ious, axis=0) + mx2 = np.max(ious, axis=1) + tp = np.sum(mx2 >= threshold) + fp = np.sum(mx2 < threshold) + fn = np.sum(mx1 < threshold) + return float(tp) / (tp + fp + fn) + + +def compute_eval_metric(gt, predictions): + thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] + ious = compute_ious(gt, predictions) + precisions = [compute_precision_at(ious, th) for th in thresholds] + return sum(precisions) / len(precisions) + + +def intersection_over_union(y_true, y_pred): + ious = [] + for y_t, y_p in list(zip(y_true, y_pred)): + iou = compute_ious(y_t, y_p) + iou_mean = 1.0 * np.sum(iou) / len(iou) + ious.append(iou_mean) + return np.mean(ious) + + +def intersection_over_union_thresholds(y_true, y_pred): + iouts = [] + for y_t, y_p in list(zip(y_true, y_pred)): + iouts.append(compute_eval_metric(y_t, y_p)) + return np.mean(iouts) diff --git a/examples/trials/kaggle-tgs-salt/models.py b/examples/trials/kaggle-tgs-salt/models.py new file mode 100644 index 0000000000000000000000000000000000000000..8fefe881afb073d9f0cd962ec42ce3129d7a3373 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/models.py @@ -0,0 +1,622 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +from torch import nn +from torch.nn import functional as F +import torch +from torchvision import models +from torchvision.models import resnet34, resnet101, resnet50, resnet152 +import torchvision + + +def conv3x3(in_, out): + return nn.Conv2d(in_, out, 3, padding=1) + + +class ConvRelu(nn.Module): + def __init__(self, in_, out): + super().__init__() + self.conv = conv3x3(in_, out) + self.activation = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.activation(x) + return x + + +class ConvBn2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=(3,3), stride=(1,1), padding=(1,1)): + super(ConvBn2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_channels) + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + +# Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks +# https://arxiv.org/abs/1803.02579 + +class ChannelAttentionGate(nn.Module): + def __init__(self, channel, reduction=16): + super(ChannelAttentionGate, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction), + nn.ReLU(inplace=True), + nn.Linear(channel // reduction, channel), + nn.Sigmoid() + ) + + def forward(self, x): + b, c, _, _ = x.size() + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, c, 1, 1) + return y + + +class SpatialAttentionGate(nn.Module): + def __init__(self, channel, reduction=16): + super(SpatialAttentionGate, self).__init__() + self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0) + self.fc2 = nn.Conv2d(reduction, 1, kernel_size=1, padding=0) + + def forward(self, x): + x = self.fc1(x) + x = F.relu(x, inplace=True) + x = self.fc2(x) + x = torch.sigmoid(x) + #print(x.size()) + return x + +class DecoderBlock(nn.Module): + def __init__(self, in_channels, middle_channels, out_channels): + super(DecoderBlock, self).__init__() + self.conv1 = ConvBn2d(in_channels, middle_channels) + self.conv2 = ConvBn2d(middle_channels, out_channels) + #self.deconv = nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2, padding=1) + #self.bn = nn.BatchNorm2d(out_channels) + self.spatial_gate = SpatialAttentionGate(out_channels) + self.channel_gate = ChannelAttentionGate(out_channels) + + def forward(self, x, e=None): + x = F.upsample(x, scale_factor=2, mode='bilinear', align_corners=True) + if e is not None: + x = torch.cat([x,e], 1) + + x = F.relu(self.conv1(x), inplace=True) + x = F.relu(self.conv2(x), inplace=True) + + g1 = self.spatial_gate(x) + g2 = self.channel_gate(x) + x = x*g1 + x*g2 + + return x + +class EncoderBlock(nn.Module): + def __init__(self, block, out_channels): + super(EncoderBlock, self).__init__() + self.block = block + self.out_channels = out_channels + self.spatial_gate = SpatialAttentionGate(out_channels) + self.channel_gate = ChannelAttentionGate(out_channels) + + def forward(self, x): + x = self.block(x) + g1 = self.spatial_gate(x) + g2 = self.channel_gate(x) + + return x*g1 + x*g2 + + +def create_resnet(layers): + if layers == 34: + return resnet34(pretrained=True), 512 + elif layers == 50: + return resnet50(pretrained=True), 2048 + elif layers == 101: + return resnet101(pretrained=True), 2048 + elif layers == 152: + return resnet152(pretrained=True), 2048 + else: + raise NotImplementedError('only 34, 50, 101, 152 version of Resnet are implemented') + +class UNetResNetV4(nn.Module): + def __init__(self, encoder_depth, num_classes=1, num_filters=32, dropout_2d=0.4, + pretrained=True, is_deconv=True): + super(UNetResNetV4, self).__init__() + self.name = 'UNetResNetV4_'+str(encoder_depth) + self.num_classes = num_classes + self.dropout_2d = dropout_2d + + self.resnet, bottom_channel_nr = create_resnet(encoder_depth) + + self.encoder1 = EncoderBlock( + nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu), + num_filters*2 + ) + self.encoder2 = EncoderBlock(self.resnet.layer1, bottom_channel_nr//8) + self.encoder3 = EncoderBlock(self.resnet.layer2, bottom_channel_nr//4) + self.encoder4 = EncoderBlock(self.resnet.layer3, bottom_channel_nr//2) + self.encoder5 = EncoderBlock(self.resnet.layer4, bottom_channel_nr) + + center_block = nn.Sequential( + ConvBn2d(bottom_channel_nr, bottom_channel_nr, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ConvBn2d(bottom_channel_nr, bottom_channel_nr//2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2) + ) + self.center = EncoderBlock(center_block, bottom_channel_nr//2) + + self.decoder5 = DecoderBlock(bottom_channel_nr + bottom_channel_nr // 2, num_filters * 16, 64) + self.decoder4 = DecoderBlock(64 + bottom_channel_nr // 2, num_filters * 8, 64) + self.decoder3 = DecoderBlock(64 + bottom_channel_nr // 4, num_filters * 4, 64) + self.decoder2 = DecoderBlock(64 + bottom_channel_nr // 8, num_filters * 2, 64) + self.decoder1 = DecoderBlock(64, num_filters, 64) + + self.logit = nn.Sequential( + nn.Conv2d(320, 64, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(64, 1, kernel_size=1, padding=0) + ) + + def forward(self, x): + x = self.encoder1(x) #; print('x:', x.size()) + e2 = self.encoder2(x) #; print('e2:', e2.size()) + e3 = self.encoder3(e2) #; print('e3:', e3.size()) + e4 = self.encoder4(e3) #; print('e4:', e4.size()) + e5 = self.encoder5(e4) #; print('e5:', e5.size()) + + center = self.center(e5) #; print('center:', center.size()) + + d5 = self.decoder5(center, e5) #; print('d5:', d5.size()) + d4 = self.decoder4(d5, e4) #; print('d4:', d4.size()) + d3 = self.decoder3(d4, e3) #; print('d3:', d3.size()) + d2 = self.decoder2(d3, e2) #; print('d2:', d2.size()) + d1 = self.decoder1(d2) #; print('d1:', d1.size()) + + f = torch.cat([ + d1, + F.upsample(d2, scale_factor=2, mode='bilinear', align_corners=False), + F.upsample(d3, scale_factor=4, mode='bilinear', align_corners=False), + F.upsample(d4, scale_factor=8, mode='bilinear', align_corners=False), + F.upsample(d5, scale_factor=16, mode='bilinear', align_corners=False), + ], 1) + + f = F.dropout2d(f, p=self.dropout_2d) + + return self.logit(f), None + + def freeze_bn(self): + '''Freeze BatchNorm layers.''' + for layer in self.modules(): + if isinstance(layer, nn.BatchNorm2d): + layer.eval() + + def get_params(self, base_lr): + group1 = [self.encoder1, self.encoder2, self.encoder3, self.encoder4, self.encoder5] + group2 = [self.decoder1, self.decoder2, self.decoder3, self.decoder4, self.decoder5, self.center, self.logit] + + params1 = [] + for x in group1: + for p in x.parameters(): + params1.append(p) + + param_group1 = {'params': params1, 'lr': base_lr / 5} + + params2 = [] + for x in group2: + for p in x.parameters(): + params2.append(p) + param_group2 = {'params': params2, 'lr': base_lr} + + return [param_group1, param_group2] + +class DecoderBlockV5(nn.Module): + def __init__(self, in_channels_x, in_channels_e, middle_channels, out_channels): + super(DecoderBlockV5, self).__init__() + self.in_channels = in_channels_x + in_channels_e + self.conv1 = ConvBn2d(self.in_channels, middle_channels) + self.conv2 = ConvBn2d(middle_channels, out_channels) + self.deconv = nn.ConvTranspose2d(in_channels_x, in_channels_x, kernel_size=4, stride=2, padding=1) + self.bn = nn.BatchNorm2d(self.in_channels) + self.spatial_gate = SpatialAttentionGate(out_channels) + self.channel_gate = ChannelAttentionGate(out_channels) + + def forward(self, x, e=None): + #x = F.upsample(x, scale_factor=2, mode='bilinear', align_corners=True) + x = self.deconv(x) + if e is not None: + x = torch.cat([x,e], 1) + x = self.bn(x) + + x = F.relu(self.conv1(x), inplace=True) + x = F.relu(self.conv2(x), inplace=True) + + g1 = self.spatial_gate(x) + g2 = self.channel_gate(x) + x = x*g1 + x*g2 + + return x + + + +class UNetResNetV5(nn.Module): + def __init__(self, encoder_depth, num_classes=1, num_filters=32, dropout_2d=0.5): + super(UNetResNetV5, self).__init__() + self.name = 'UNetResNetV5_'+str(encoder_depth) + self.num_classes = num_classes + self.dropout_2d = dropout_2d + + self.resnet, bottom_channel_nr = create_resnet(encoder_depth) + + self.encoder1 = EncoderBlock( + nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu), + num_filters*2 + ) + self.encoder2 = EncoderBlock(self.resnet.layer1, bottom_channel_nr//8) + self.encoder3 = EncoderBlock(self.resnet.layer2, bottom_channel_nr//4) + self.encoder4 = EncoderBlock(self.resnet.layer3, bottom_channel_nr//2) + self.encoder5 = EncoderBlock(self.resnet.layer4, bottom_channel_nr) + + center_block = nn.Sequential( + ConvBn2d(bottom_channel_nr, bottom_channel_nr, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ConvBn2d(bottom_channel_nr, bottom_channel_nr//2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2) + ) + self.center = EncoderBlock(center_block, bottom_channel_nr//2) + + self.decoder5 = DecoderBlockV5(bottom_channel_nr // 2, bottom_channel_nr, num_filters * 16, 64) + self.decoder4 = DecoderBlockV5(64, bottom_channel_nr // 2, num_filters * 8, 64) + self.decoder3 = DecoderBlockV5(64, bottom_channel_nr // 4, num_filters * 4, 64) + self.decoder2 = DecoderBlockV5(64, bottom_channel_nr // 8, num_filters * 2, 64) + self.decoder1 = DecoderBlockV5(64, 0, num_filters, 64) + + self.logit = nn.Sequential( + nn.Conv2d(320, 64, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(64, 1, kernel_size=1, padding=0) + ) + + def forward(self, x): + x = self.encoder1(x) #; print('x:', x.size()) + e2 = self.encoder2(x) #; print('e2:', e2.size()) + e3 = self.encoder3(e2) #; print('e3:', e3.size()) + e4 = self.encoder4(e3) #; print('e4:', e4.size()) + e5 = self.encoder5(e4) #; print('e5:', e5.size()) + + center = self.center(e5) #; print('center:', center.size()) + + d5 = self.decoder5(center, e5) #; print('d5:', d5.size()) + d4 = self.decoder4(d5, e4) #; print('d4:', d4.size()) + d3 = self.decoder3(d4, e3) #; print('d3:', d3.size()) + d2 = self.decoder2(d3, e2) #; print('d2:', d2.size()) + d1 = self.decoder1(d2) #; print('d1:', d1.size()) + + f = torch.cat([ + d1, + F.interpolate(d2, scale_factor=2, mode='bilinear', align_corners=False), + F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=False), + F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=False), + F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=False), + ], 1) + + f = F.dropout2d(f, p=self.dropout_2d) + + return self.logit(f), None + +class UNetResNetV6(nn.Module): + ''' + 1. Remove first pool from UNetResNetV5, such that resolution is doubled + 2. Remove scSE from center block + 3. Increase default dropout + ''' + def __init__(self, encoder_depth, num_filters=32, dropout_2d=0.5): + super(UNetResNetV6, self).__init__() + assert encoder_depth == 34, 'UNetResNetV6: only 34 layers is supported!' + self.name = 'UNetResNetV6_'+str(encoder_depth) + self.dropout_2d = dropout_2d + + self.resnet, bottom_channel_nr = create_resnet(encoder_depth) + + self.encoder1 = EncoderBlock( + nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu), + num_filters*2 + ) + + self.encoder2 = EncoderBlock(self.resnet.layer1, bottom_channel_nr//8) + self.encoder3 = EncoderBlock(self.resnet.layer2, bottom_channel_nr//4) + self.encoder4 = EncoderBlock(self.resnet.layer3, bottom_channel_nr//2) + self.encoder5 = EncoderBlock(self.resnet.layer4, bottom_channel_nr) + + self.center = nn.Sequential( + ConvBn2d(bottom_channel_nr, bottom_channel_nr, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ConvBn2d(bottom_channel_nr, bottom_channel_nr//2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2) + ) + #self.center = EncoderBlock(center_block, bottom_channel_nr//2) + + self.decoder5 = DecoderBlockV5(bottom_channel_nr // 2, bottom_channel_nr, num_filters * 16, 64) + self.decoder4 = DecoderBlockV5(64, bottom_channel_nr // 2, num_filters * 8, 64) + self.decoder3 = DecoderBlockV5(64, bottom_channel_nr // 4, num_filters * 4, 64) + self.decoder2 = DecoderBlockV5(64, bottom_channel_nr // 8, num_filters * 2, 64) + self.decoder1 = DecoderBlockV5(64, 0, num_filters, 64) + + self.logit = nn.Sequential( + nn.Conv2d(512, 64, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(64, 1, kernel_size=1, padding=0) + ) + + self.logit_image = nn.Sequential( + nn.Linear(512, 128), + nn.ReLU(inplace=True), + nn.Linear(128, 1) + ) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) + x = self.encoder1(x) #; print('x:', x.size()) + e2 = self.encoder2(x) #; print('e2:', e2.size()) + e3 = self.encoder3(e2) #; print('e3:', e3.size()) + e4 = self.encoder4(e3) #; print('e4:', e4.size()) + e5 = self.encoder5(e4) #; print('e5:', e5.size()) + + center = self.center(e5) #; print('center:', center.size()) + + d5 = self.decoder5(center, e5) #; print('d5:', d5.size()) + d4 = self.decoder4(d5, e4) #; print('d4:', d4.size()) + d3 = self.decoder3(d4, e3) #; print('d3:', d3.size()) + d2 = self.decoder2(d3, e2) #; print('d2:', d2.size()) + #d1 = self.decoder1(d2) ; print('d1:', d1.size()) + + f = torch.cat([ + d2, + F.interpolate(d3, scale_factor=2, mode='bilinear', align_corners=False), + F.interpolate(d4, scale_factor=4, mode='bilinear', align_corners=False), + F.interpolate(d5, scale_factor=8, mode='bilinear', align_corners=False), + F.interpolate(center, scale_factor=16, mode='bilinear', align_corners=False), + ], 1) + + f = F.dropout2d(f, p=self.dropout_2d, training=self.training) + + # empty mask classifier + img_f = F.adaptive_avg_pool2d(e5, 1).view(x.size(0), -1) + img_f = F.dropout(img_f, p=0.5, training=self.training) + img_logit = self.logit_image(img_f).view(-1) + + return self.logit(f), img_logit + + +class DecoderBlockV7(nn.Module): + def __init__(self, in_channels_x, in_channels_e, middle_channels, out_channels): + super(DecoderBlockV7, self).__init__() + self.in_channels = in_channels_x + in_channels_e + self.conv1 = ConvBn2d(self.in_channels, middle_channels) + self.conv2 = ConvBn2d(middle_channels, out_channels) + self.deconv = nn.ConvTranspose2d(in_channels_x, in_channels_x, kernel_size=4, stride=2, padding=1) + self.bn = nn.BatchNorm2d(self.in_channels) + self.spatial_gate = SpatialAttentionGate(out_channels) + self.channel_gate = ChannelAttentionGate(out_channels) + + def forward(self, x, e=None, upsample=True): + #x = F.upsample(x, scale_factor=2, mode='bilinear', align_corners=True) + if upsample: + x = self.deconv(x) + if e is not None: + x = torch.cat([x,e], 1) + x = self.bn(x) + + x = F.relu(self.conv1(x), inplace=True) + x = F.relu(self.conv2(x), inplace=True) + + g1 = self.spatial_gate(x) + g2 = self.channel_gate(x) + x = x*g1 + x*g2 + + return x + +class UNet7(nn.Module): + def __init__(self, encoder_depth, num_classes=1, num_filters=32, dropout_2d=0.5): + super(UNet7, self).__init__() + nf = num_filters + self.name = 'UNet7_'+str(encoder_depth)+'_nf'+str(nf) + self.num_classes = num_classes + self.dropout_2d = dropout_2d + + self.resnet, nbtm = create_resnet(encoder_depth) + + self.encoder1 = EncoderBlock( + nn.Sequential( + nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + ), + 64 + ) + self.encoder2 = EncoderBlock( + nn.Sequential( + nn.MaxPool2d(kernel_size=2, stride=2), + self.resnet.layer1, + ), + nbtm//8 + ) + self.encoder3 = EncoderBlock(self.resnet.layer2, nbtm//4) + self.encoder4 = EncoderBlock(self.resnet.layer3, nbtm//2) + self.encoder5 = EncoderBlock(self.resnet.layer4, nbtm) + + center_block = nn.Sequential( + ConvBn2d(nbtm, nbtm, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ConvBn2d(nbtm, nbtm//2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + #nn.MaxPool2d(kernel_size=2, stride=2) # remove + ) + self.center = EncoderBlock(center_block, nbtm//2) + + self.decoder5 = DecoderBlockV7(nbtm // 2, nbtm, nf * 16, nf*2) + self.decoder4 = DecoderBlockV7(nf*2, nbtm // 2, nf * 8, nf*2) + self.decoder3 = DecoderBlockV7(nf*2, nbtm // 4, nf * 4, nf*2) + self.decoder2 = DecoderBlockV7(nf*2, nbtm // 8, nf * 2, nf*2) + self.decoder1 = DecoderBlockV7(nf*2, 64, nf*2, nf*2) + + self.logit = nn.Sequential( + nn.Conv2d(nf*10, 64, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(64, 1, kernel_size=1, padding=0) + ) + + self.logit_image = nn.Sequential( + nn.Linear(nbtm, 128), + nn.ReLU(inplace=True), + nn.Linear(128, 1), + ) + + def forward(self, x): + e1 = self.encoder1(x) #; print('e1:', e1.size()) + e2 = self.encoder2(e1) #; print('e2:', e2.size()) + e3 = self.encoder3(e2) #; print('e3:', e3.size()) + e4 = self.encoder4(e3) #; print('e4:', e4.size()) + e5 = self.encoder5(e4) #; print('e5:', e5.size()) + + center = self.center(e5) #; print('center:', center.size()) + + d5 = self.decoder5(center, e5, upsample=False) #; print('d5:', d5.size()) + d4 = self.decoder4(d5, e4) #; print('d4:', d4.size()) + d3 = self.decoder3(d4, e3) #; print('d3:', d3.size()) + d2 = self.decoder2(d3, e2) #; print('d2:', d2.size()) + d1 = self.decoder1(d2, e1) #; print('d1:', d1.size()) + + f = torch.cat([ + d1, + F.interpolate(d2, scale_factor=2, mode='bilinear', align_corners=False), + F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=False), + F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=False), + F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=False), + ], 1) + + f = F.dropout2d(f, p=self.dropout_2d) + + # empty mask classifier + img_f = F.adaptive_avg_pool2d(e5, 1).view(x.size(0), -1) + img_f = F.dropout(img_f, p=0.5, training=self.training) + img_logit = self.logit_image(img_f).view(-1) + + return self.logit(f), img_logit + + +class UNet8(nn.Module): + def __init__(self, encoder_depth, num_classes=1, num_filters=32, dropout_2d=0.5): + super(UNet8, self).__init__() + nf = num_filters + self.name = 'UNet8_'+str(encoder_depth)+'_nf'+str(nf) + self.num_classes = num_classes + self.dropout_2d = dropout_2d + + self.resnet, nbtm = create_resnet(encoder_depth) + + self.encoder1 = EncoderBlock( + nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu), + 64 + ) + + self.encoder2 = EncoderBlock(self.resnet.layer1, nbtm//8) + self.encoder3 = EncoderBlock(self.resnet.layer2, nbtm//4) + self.encoder4 = EncoderBlock(self.resnet.layer3, nbtm//2) + self.encoder5 = EncoderBlock(self.resnet.layer4, nbtm) + + center_block = nn.Sequential( + ConvBn2d(nbtm, nbtm, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ConvBn2d(nbtm, nbtm//2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + #nn.MaxPool2d(kernel_size=2, stride=2) # remove + ) + self.center = EncoderBlock(center_block, nbtm//2) + + self.decoder5 = DecoderBlockV7(nbtm // 2, nbtm, nf * 16, nf*2) + self.decoder4 = DecoderBlockV7(nf*2, nbtm // 2, nf * 8, nf*2) + self.decoder3 = DecoderBlockV7(nf*2, nbtm // 4, nf * 4, nf*2) + self.decoder2 = DecoderBlockV7(nf*2, nbtm // 8, nf * 2, nf*2) + self.decoder1 = DecoderBlockV7(nf*2+64, 3, nf*2, nf*2) + + self.logit = nn.Sequential( + nn.Conv2d(nf*10, 64, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(64, 1, kernel_size=1, padding=0) + ) + + self.logit_image = nn.Sequential( + nn.Linear(nbtm, 128), + nn.ReLU(inplace=True), + nn.Linear(128, 1), + ) + + def forward(self, x): + e1 = self.encoder1(x) #; print('e1:', e1.size()) + e2 = self.encoder2(e1) #; print('e2:', e2.size()) + e3 = self.encoder3(e2) #; print('e3:', e3.size()) + e4 = self.encoder4(e3) #; print('e4:', e4.size()) + e5 = self.encoder5(e4) #; print('e5:', e5.size()) + + center = self.center(e5) #; print('center:', center.size()) + + d5 = self.decoder5(center, e5, upsample=False) #; print('d5:', d5.size()) + d4 = self.decoder4(d5, e4) #; print('d4:', d4.size()) + d3 = self.decoder3(d4, e3) #; print('d3:', d3.size()) + d2 = self.decoder2(d3, e2) #; print('d2:', d2.size()) + d1 = self.decoder1(torch.cat([d2, e1], 1), x) #; print('d1:', d1.size()) + + f = torch.cat([ + d1, + F.interpolate(d2, scale_factor=2, mode='bilinear', align_corners=False), + F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=False), + F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=False), + F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=False), + ], 1) + + f = F.dropout2d(f, p=self.dropout_2d) + + # empty mask classifier + img_f = F.adaptive_avg_pool2d(e5, 1).view(x.size(0), -1) + img_f = F.dropout(img_f, p=0.5, training=self.training) + img_logit = self.logit_image(img_f).view(-1) + + return self.logit(f), img_logit + + +def test(): + model = UNet8(50, num_filters=32).cuda() + inputs = torch.randn(2,3,128,128).cuda() + out, _ = model(inputs) + #print(model) + print(out.size(), _.size()) #, cls_taret.size()) + #print(out) + + +if __name__ == '__main__': + test() diff --git a/examples/trials/kaggle-tgs-salt/postprocessing.py b/examples/trials/kaggle-tgs-salt/postprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..9da2b8a7e7d1f4e4c50372dcbdcd18a50315f800 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/postprocessing.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import numpy as np +import pandas as pd +from scipy import ndimage as ndi +import cv2 + +from utils import get_crop_pad_sequence, run_length_decoding +import settings + +def resize_image(image, target_size): + resized_image = cv2.resize(image, target_size) + return resized_image + +def crop_image(image, target_size): + top_crop, right_crop, bottom_crop, left_crop = get_crop_pad_sequence(image.shape[0] - target_size[0], + image.shape[1] - target_size[1]) + cropped_image = image[top_crop:image.shape[0] - bottom_crop, left_crop:image.shape[1] - right_crop] + return cropped_image + +def binarize(image, threshold): + image_binarized = (image > threshold).astype(np.uint8) + return image_binarized + +def save_pseudo_label_masks(submission_file): + df = pd.read_csv(submission_file, na_filter=False) + print(df.head()) + + img_dir = os.path.join(settings.TEST_DIR, 'masks') + + for i, row in enumerate(df.values): + decoded_mask = run_length_decoding(row[1], (101,101)) + filename = os.path.join(img_dir, '{}.png'.format(row[0])) + rgb_mask = cv2.cvtColor(decoded_mask,cv2.COLOR_GRAY2RGB) + print(filename) + cv2.imwrite(filename, decoded_mask) + if i % 100 == 0: + print(i) + + + +if __name__ == '__main__': + save_pseudo_label_masks('V456_ensemble_1011.csv') \ No newline at end of file diff --git a/examples/trials/kaggle-tgs-salt/predict.py b/examples/trials/kaggle-tgs-salt/predict.py new file mode 100644 index 0000000000000000000000000000000000000000..2780d2570811194f30739d79a822d550baa02e46 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/predict.py @@ -0,0 +1,200 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import glob +import argparse +import numpy as np +import torch +import torch.optim as optim +import torch.nn.functional as F + +import settings +from loader import get_test_loader, add_depth_channel +from models import UNetResNetV4, UNetResNetV5, UNetResNetV6, UNet7, UNet8 +from postprocessing import crop_image, binarize, resize_image +from metrics import intersection_over_union, intersection_over_union_thresholds +from utils import create_submission + +def do_tta_predict(args, model, ckp_path, tta_num=4): + ''' + return 18000x128x128 np array + ''' + model.eval() + preds = [] + meta = None + + # i is tta index, 0: no change, 1: horizon flip, 2: vertical flip, 3: do both + for flip_index in range(tta_num): + print('flip_index:', flip_index) + test_loader = get_test_loader(args.batch_size, index=flip_index, dev_mode=False, pad_mode=args.pad_mode) + meta = test_loader.meta + outputs = None + with torch.no_grad(): + for i, img in enumerate(test_loader): + add_depth_channel(img, args.pad_mode) + img = img.cuda() + output, _ = model(img) + output = torch.sigmoid(output) + if outputs is None: + outputs = output.squeeze() + else: + outputs = torch.cat([outputs, output.squeeze()], 0) + + print('{} / {}'.format(args.batch_size*(i+1), test_loader.num), end='\r') + outputs = outputs.cpu().numpy() + # flip back masks + if flip_index == 1: + outputs = np.flip(outputs, 2) + elif flip_index == 2: + outputs = np.flip(outputs, 1) + elif flip_index == 3: + outputs = np.flip(outputs, 2) + outputs = np.flip(outputs, 1) + #print(outputs.shape) + preds.append(outputs) + + parent_dir = ckp_path+'_out' + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + np_file = os.path.join(parent_dir, 'pred.npy') + + model_pred_result = np.mean(preds, 0) + np.save(np_file, model_pred_result) + + return model_pred_result, meta + +def predict(args, model, checkpoint, out_file): + print('predicting {}...'.format(checkpoint)) + pred, meta = do_tta_predict(args, model, checkpoint, tta_num=2) + print(pred.shape) + y_pred_test = generate_preds(pred, (settings.ORIG_H, settings.ORIG_W), pad_mode=args.pad_mode) + + submission = create_submission(meta, y_pred_test) + submission.to_csv(out_file, index=None, encoding='utf-8') + + +def ensemble(args, model, checkpoints): + preds = [] + meta = None + for checkpoint in checkpoints: + model.load_state_dict(torch.load(checkpoint)) + model = model.cuda() + print('predicting...', checkpoint) + + pred, meta = do_tta_predict(args, model, checkpoint, tta_num=2) + preds.append(pred) + + y_pred_test = generate_preds(np.mean(preds, 0), (settings.ORIG_H, settings.ORIG_W), args.pad_mode) + + submission = create_submission(meta, y_pred_test) + submission.to_csv(args.sub_file, index=None, encoding='utf-8') + +def ensemble_np(args, np_files, save_np=None): + preds = [] + for np_file in np_files: + pred = np.load(np_file) + print(np_file, pred.shape) + preds.append(pred) + + y_pred_test = generate_preds(np.mean(preds, 0), (settings.ORIG_H, settings.ORIG_W), args.pad_mode) + + if save_np is not None: + np.save(save_np, np.mean(preds, 0)) + + meta = get_test_loader(args.batch_size, index=0, dev_mode=False, pad_mode=args.pad_mode).meta + + submission = create_submission(meta, y_pred_test) + submission.to_csv(args.sub_file, index=None, encoding='utf-8') + +def generate_preds(outputs, target_size, pad_mode, threshold=0.5): + preds = [] + + for output in outputs: + #print(output.shape) + if pad_mode == 'resize': + cropped = resize_image(output, target_size=target_size) + else: + cropped = crop_image(output, target_size=target_size) + pred = binarize(cropped, threshold) + preds.append(pred) + + return preds + + +def ensemble_predict(args): + model = eval(args.model_name)(args.layers, num_filters=args.nf) + + checkpoints = [ + r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_5.pth', + r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_6.pth', + r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_8.pth', + r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_9.pth' + ] + print(checkpoints) + + ensemble(args, model, checkpoints) + +def ensemble_np_results(args): + np_files1 = glob.glob(r'D:\data\salt\models\depths\UNetResNetV5_50\edge\*pth_out\*.npy') + np_files2 = glob.glob(r'D:\data\salt\models\depths\UNetResNetV4_34\edge\*pth_out\*.npy') + np_files3 = glob.glob(r'D:\data\salt\models\depths\UNetResNetV6_34\edge\*pth_out\*.npy') + np_files6 = glob.glob(r'D:\data\salt\models\ensemble\*.npy') + np_files = np_files1 + np_files2 + np_files3 + np_files6 + print(np_files) + ensemble_np(args, np_files) + +def predict_model(args): + model = eval(args.model_name)(args.layers, num_filters=args.nf) + model_subdir = args.pad_mode + if args.meta_version == 2: + model_subdir = args.pad_mode+'_meta2' + if args.exp_name is None: + model_file = os.path.join(settings.MODEL_DIR, model.name,model_subdir, 'best_{}.pth'.format(args.ifold)) + else: + model_file = os.path.join(settings.MODEL_DIR, args.exp_name, model.name, model_subdir, 'best_{}.pth'.format(args.ifold)) + + if os.path.exists(model_file): + print('loading {}...'.format(model_file)) + model.load_state_dict(torch.load(model_file)) + else: + raise ValueError('model file not found: {}'.format(model_file)) + model = model.cuda() + predict(args, model, model_file, args.sub_file) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Salt segmentation') + parser.add_argument('--model_name', required=True, type=str, help='') + parser.add_argument('--layers', default=34, type=int, help='model layers') + parser.add_argument('--nf', default=32, type=int, help='num_filters param for model') + parser.add_argument('--ifold', required=True, type=int, help='kfold indices') + parser.add_argument('--batch_size', default=32, type=int, help='batch_size') + parser.add_argument('--pad_mode', required=True, choices=['reflect', 'edge', 'resize'], help='pad method') + parser.add_argument('--exp_name', default='depths', type=str, help='exp name') + parser.add_argument('--meta_version', default=2, type=int, help='meta version') + parser.add_argument('--sub_file', default='all_ensemble.csv', type=str, help='submission file') + + args = parser.parse_args() + + predict_model(args) + #ensemble_predict(args) + #ensemble_np_results(args) diff --git a/examples/trials/kaggle-tgs-salt/preprocess.py b/examples/trials/kaggle-tgs-salt/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..1a03315c2437aa06efd9aa4547e162ee62f71aa3 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/preprocess.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import pandas as pd +import numpy as np +import json +import torch +import torch.nn as nn +from keras.preprocessing.image import load_img +from sklearn.model_selection import StratifiedKFold +import settings +import utils + +DATA_DIR = settings.DATA_DIR + +def prepare_metadata(): + print('creating metadata') + meta = utils.generate_metadata(train_images_dir=settings.TRAIN_DIR, + test_images_dir=settings.TEST_DIR, + depths_filepath=settings.DEPTHS_FILE + ) + meta.to_csv(settings.META_FILE, index=None) + +def cov_to_class(val): + for i in range(0, 11): + if val * 10 <= i : + return i + +def generate_stratified_metadata(): + train_df = pd.read_csv(os.path.join(DATA_DIR, "train.csv"), index_col="id", usecols=[0]) + depths_df = pd.read_csv(os.path.join(DATA_DIR, "depths.csv"), index_col="id") + train_df = train_df.join(depths_df) + train_df["masks"] = [np.array(load_img(os.path.join(DATA_DIR, "train", "masks", "{}.png".format(idx)), grayscale=True)) / 255 for idx in train_df.index] + train_df["coverage"] = train_df.masks.map(np.sum) / pow(settings.ORIG_H, 2) + train_df["coverage_class"] = train_df.coverage.map(cov_to_class) + train_df["salt_exists"] = train_df.coverage_class.map(lambda x: 0 if x == 0 else 1) + train_df["is_train"] = 1 + train_df["file_path_image"] = train_df.index.map(lambda x: os.path.join(settings.TRAIN_IMG_DIR, '{}.png'.format(x))) + train_df["file_path_mask"] = train_df.index.map(lambda x: os.path.join(settings.TRAIN_MASK_DIR, '{}.png'.format(x))) + + train_df.to_csv(os.path.join(settings.DATA_DIR, 'train_meta2.csv'), + columns=['file_path_image','file_path_mask','is_train','z','salt_exists', 'coverage_class', 'coverage']) + train_splits = {} + + kf = StratifiedKFold(n_splits=10) + for i, (train_index, valid_index) in enumerate(kf.split(train_df.index.values.reshape(-1), train_df.coverage_class.values.reshape(-1))): + train_splits[str(i)] = { + 'train_index': train_index.tolist(), + 'val_index': valid_index.tolist() + } + with open(os.path.join(settings.DATA_DIR, 'train_split.json'), 'w') as f: + json.dump(train_splits, f, indent=4) + + print('done') + + +def test(): + meta = pd.read_csv(settings.META_FILE) + meta_train = meta[meta['is_train'] == 1] + print(type(meta_train)) + + cv = utils.KFoldBySortedValue() + for train_idx, valid_idx in cv.split(meta_train[settings.DEPTH_COLUMN].values.reshape(-1)): + print(len(train_idx), len(valid_idx)) + print(train_idx[:10]) + print(valid_idx[:10]) + #break + + meta_train_split, meta_valid_split = meta_train.iloc[train_idx], meta_train.iloc[valid_idx] + print(type(meta_train_split)) + print(meta_train_split[settings.X_COLUMN].values[:10]) + +if __name__ == '__main__': + generate_stratified_metadata() diff --git a/examples/trials/kaggle-tgs-salt/settings.py b/examples/trials/kaggle-tgs-salt/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..a5d232bb8c7663b6d85c0cdb5260438a3ee79464 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/settings.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os + +DATA_DIR = r'/mnt/chicm/data/salt' + +TRAIN_DIR = os.path.join(DATA_DIR, 'train') +TEST_DIR = os.path.join(DATA_DIR, 'test') + +TRAIN_IMG_DIR = os.path.join(TRAIN_DIR, 'images') +TRAIN_MASK_DIR = os.path.join(TRAIN_DIR, 'masks') +TEST_IMG_DIR = os.path.join(TEST_DIR, 'images') + +LABEL_FILE = os.path.join(DATA_DIR, 'train.csv') +DEPTHS_FILE = os.path.join(DATA_DIR, 'depths.csv') +META_FILE = os.path.join(DATA_DIR, 'meta.csv') + +MODEL_DIR = os.path.join(DATA_DIR, 'models') + +ID_COLUMN = 'id' +DEPTH_COLUMN = 'z' +X_COLUMN = 'file_path_image' +Y_COLUMN = 'file_path_mask' + +H = W = 128 +ORIG_H = ORIG_W = 101 \ No newline at end of file diff --git a/examples/trials/kaggle-tgs-salt/train.py b/examples/trials/kaggle-tgs-salt/train.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1b139b3594e8fbf62d0e48ca6e0b9e02b6a1d6 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/train.py @@ -0,0 +1,258 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import argparse +import time + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +from torch.optim.lr_scheduler import CosineAnnealingLR, ReduceLROnPlateau + +from loader import get_train_loaders, add_depth_channel +from models import UNetResNetV4, UNetResNetV5, UNetResNetV6 +from lovasz_losses import lovasz_hinge +from focal_loss import FocalLoss2d +from postprocessing import binarize, crop_image, resize_image +from metrics import intersection_over_union, intersection_over_union_thresholds +import settings + +MODEL_DIR = settings.MODEL_DIR +focal_loss2d = FocalLoss2d() + +def weighted_loss(args, output, target, epoch=0): + mask_output, salt_output = output + mask_target, salt_target = target + + lovasz_loss = lovasz_hinge(mask_output, mask_target) + focal_loss = focal_loss2d(mask_output, mask_target) + + focal_weight = 0.2 + + if salt_output is not None and args.train_cls: + salt_loss = F.binary_cross_entropy_with_logits(salt_output, salt_target) + return salt_loss, focal_loss.item(), lovasz_loss.item(), salt_loss.item(), lovasz_loss.item() + focal_loss.item()*focal_weight + + return lovasz_loss+focal_loss*focal_weight, focal_loss.item(), lovasz_loss.item(), 0., lovasz_loss.item() + focal_loss.item()*focal_weight + +def train(args): + print('start training...') + + """@nni.variable(nni.choice('UNetResNetV4', 'UNetResNetV5', 'UNetResNetV6'), name=model_name)""" + model_name = args.model_name + + model = eval(model_name)(args.layers, num_filters=args.nf) + model_subdir = args.pad_mode + if args.meta_version == 2: + model_subdir = args.pad_mode+'_meta2' + if args.exp_name is None: + model_file = os.path.join(MODEL_DIR, model.name,model_subdir, 'best_{}.pth'.format(args.ifold)) + else: + model_file = os.path.join(MODEL_DIR, args.exp_name, model.name, model_subdir, 'best_{}.pth'.format(args.ifold)) + + parent_dir = os.path.dirname(model_file) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + if args.init_ckp is not None: + CKP = args.init_ckp + else: + CKP = model_file + if os.path.exists(CKP): + print('loading {}...'.format(CKP)) + model.load_state_dict(torch.load(CKP)) + model = model.cuda() + + if args.optim == 'Adam': + optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0001) + else: + optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0001) + + train_loader, val_loader = get_train_loaders(args.ifold, batch_size=args.batch_size, dev_mode=args.dev_mode, \ + pad_mode=args.pad_mode, meta_version=args.meta_version, pseudo_label=args.pseudo, depths=args.depths) + + if args.lrs == 'plateau': + lr_scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=args.factor, patience=args.patience, min_lr=args.min_lr) + else: + lr_scheduler = CosineAnnealingLR(optimizer, args.t_max, eta_min=args.min_lr) + + print('epoch | lr | % | loss | avg | f loss | lovaz | iou | iout | best | time | save | salt |') + + best_iout, _iou, _f, _l, _salt, best_mix_score = validate(args, model, val_loader, args.start_epoch) + print('val | | | | | {:.4f} | {:.4f} | {:.4f} | {:.4f} | {:.4f} | | | {:.4f} |'.format( + _f, _l, _iou, best_iout, best_iout, _salt)) + if args.val: + return + + model.train() + + if args.lrs == 'plateau': + lr_scheduler.step(best_iout) + else: + lr_scheduler.step() + + for epoch in range(args.start_epoch, args.epochs): + train_loss = 0 + + current_lr = get_lrs(optimizer) + bg = time.time() + for batch_idx, data in enumerate(train_loader): + img, target, salt_target = data + if args.depths: + add_depth_channel(img, args.pad_mode) + img, target, salt_target = img.cuda(), target.cuda(), salt_target.cuda() + optimizer.zero_grad() + output, salt_out = model(img) + + loss, *_ = weighted_loss(args, (output, salt_out), (target, salt_target), epoch=epoch) + loss.backward() + + if args.optim == 'Adam' and args.adamw: + wd = 0.0001 + for group in optimizer.param_groups: + for param in group['params']: + param.data = param.data.add(-wd * group['lr'], param.data) + + optimizer.step() + + train_loss += loss.item() + print('\r {:4d} | {:.5f} | {:4d}/{} | {:.4f} | {:.4f} |'.format( + epoch, float(current_lr[0]), args.batch_size*(batch_idx+1), train_loader.num, loss.item(), train_loss/(batch_idx+1)), end='') + + iout, iou, focal_loss, lovaz_loss, salt_loss, mix_score = validate(args, model, val_loader, epoch=epoch) + """@nni.report_intermediate_result(iout)""" + + _save_ckp = '' + if iout > best_iout: + best_iout = iout + torch.save(model.state_dict(), model_file) + _save_ckp = '*' + if args.store_loss_model and mix_score > best_mix_score: + best_mix_score = mix_score + torch.save(model.state_dict(), model_file+'_loss') + _save_ckp += '.' + print(' {:.4f} | {:.4f} | {:.4f} | {:.4f} | {:.4f} | {:.2f} | {:4s} | {:.4f} |'.format( + focal_loss, lovaz_loss, iou, iout, best_iout, (time.time() - bg) / 60, _save_ckp, salt_loss)) + + model.train() + + if args.lrs == 'plateau': + lr_scheduler.step(best_iout) + else: + lr_scheduler.step() + + del model, train_loader, val_loader, optimizer, lr_scheduler + """@nni.report_final_result(best_iout)""" + +def get_lrs(optimizer): + lrs = [] + for pgs in optimizer.state_dict()['param_groups']: + lrs.append(pgs['lr']) + lrs = ['{:.6f}'.format(x) for x in lrs] + return lrs + +def validate(args, model, val_loader, epoch=0, threshold=0.5): + model.eval() + outputs = [] + focal_loss, lovaz_loss, salt_loss, w_loss = 0, 0, 0, 0 + with torch.no_grad(): + for img, target, salt_target in val_loader: + if args.depths: + add_depth_channel(img, args.pad_mode) + img, target, salt_target = img.cuda(), target.cuda(), salt_target.cuda() + output, salt_out = model(img) + + _, floss, lovaz, _salt_loss, _w_loss = weighted_loss(args, (output, salt_out), (target, salt_target), epoch=epoch) + focal_loss += floss + lovaz_loss += lovaz + salt_loss += _salt_loss + w_loss += _w_loss + output = torch.sigmoid(output) + + for o in output.cpu(): + outputs.append(o.squeeze().numpy()) + + n_batches = val_loader.num // args.batch_size if val_loader.num % args.batch_size == 0 else val_loader.num // args.batch_size + 1 + + # y_pred, list of np array, each np array's shape is 101,101 + y_pred = generate_preds(args, outputs, (settings.ORIG_H, settings.ORIG_W), threshold) + + iou_score = intersection_over_union(val_loader.y_true, y_pred) + iout_score = intersection_over_union_thresholds(val_loader.y_true, y_pred) + + return iout_score, iou_score, focal_loss / n_batches, lovaz_loss / n_batches, salt_loss / n_batches, iout_score*4 - w_loss + + +def generate_preds(args, outputs, target_size, threshold=0.5): + preds = [] + + for output in outputs: + if args.pad_mode == 'resize': + cropped = resize_image(output, target_size=target_size) + else: + cropped = crop_image(output, target_size=target_size) + pred = binarize(cropped, threshold) + preds.append(pred) + + return preds + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='TGS Salt segmentation') + parser.add_argument('--layers', default=34, type=int, help='model layers') + parser.add_argument('--nf', default=32, type=int, help='num_filters param for model') + parser.add_argument('--lr', default=0.001, type=float, help='learning rate') + parser.add_argument('--min_lr', default=0.0001, type=float, help='min learning rate') + parser.add_argument('--ifolds', default='0', type=str, help='kfold indices') + parser.add_argument('--batch_size', default=32, type=int, help='batch_size') + parser.add_argument('--start_epoch', default=0, type=int, help='start epoch') + parser.add_argument('--epochs', default=200, type=int, help='epoch') + parser.add_argument('--optim', default='SGD', choices=['SGD', 'Adam'], help='optimizer') + parser.add_argument('--lrs', default='cosine', choices=['cosine', 'plateau'], help='LR sceduler') + parser.add_argument('--patience', default=6, type=int, help='lr scheduler patience') + parser.add_argument('--factor', default=0.5, type=float, help='lr scheduler factor') + parser.add_argument('--t_max', default=15, type=int, help='lr scheduler patience') + parser.add_argument('--pad_mode', default='edge', choices=['reflect', 'edge', 'resize'], help='pad method') + parser.add_argument('--exp_name', default=None, type=str, help='exp name') + parser.add_argument('--model_name', default='UNetResNetV4', type=str, help='') + parser.add_argument('--init_ckp', default=None, type=str, help='resume from checkpoint path') + parser.add_argument('--val', action='store_true') + parser.add_argument('--store_loss_model', action='store_true') + parser.add_argument('--train_cls', action='store_true') + parser.add_argument('--meta_version', default=2, type=int, help='meta version') + parser.add_argument('--pseudo', action='store_true') + parser.add_argument('--depths', action='store_true') + parser.add_argument('--dev_mode', action='store_true') + parser.add_argument('--adamw', action='store_true') + + args = parser.parse_args() + + '''@nni.get_next_parameter()''' + + print(args) + ifolds = [int(x) for x in args.ifolds.split(',')] + print(ifolds) + + for i in ifolds: + args.ifold = i + train(args) diff --git a/examples/trials/kaggle-tgs-salt/utils.py b/examples/trials/kaggle-tgs-salt/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..55db8744b9f5f45809f670c81b7914dd57e55b98 --- /dev/null +++ b/examples/trials/kaggle-tgs-salt/utils.py @@ -0,0 +1,179 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import json +import sys +import time +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +from pycocotools import mask as cocomask +from sklearn.model_selection import KFold + +import settings + +def create_submission(meta, predictions): + output = [] + for image_id, mask in zip(meta['id'].values, predictions): + rle_encoded = ' '.join(str(rle) for rle in run_length_encoding(mask)) + output.append([image_id, rle_encoded]) + + submission = pd.DataFrame(output, columns=['id', 'rle_mask']).astype(str) + return submission + + +def encode_rle(predictions): + return [run_length_encoding(mask) for mask in predictions] + + +def read_masks(img_ids): + masks = [] + for img_id in img_ids: + base_filename = '{}.png'.format(img_id) + mask = Image.open(os.path.join(settings.TRAIN_MASK_DIR, base_filename)) + mask = np.asarray(mask.convert('L').point(lambda x: 0 if x < 128 else 1)).astype(np.uint8) + masks.append(mask) + return masks + + +def run_length_encoding(x): + bs = np.where(x.T.flatten())[0] + + rle = [] + prev = -2 + for b in bs: + if (b > prev + 1): rle.extend((b + 1, 0)) + rle[-1] += 1 + prev = b + return rle + + +def run_length_decoding(mask_rle, shape): + s = mask_rle.split() + starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])] + starts -= 1 + ends = starts + lengths + img = np.zeros(shape[1] * shape[0], dtype=np.uint8) + for lo, hi in zip(starts, ends): + img[lo:hi] = 255 + return img.reshape((shape[1], shape[0])).T + +def get_salt_existence(): + train_mask = pd.read_csv(settings.LABEL_FILE) + salt_exists_dict = {} + for row in train_mask.values: + salt_exists_dict[row[0]] = 0 if (row[1] is np.nan or len(row[1]) < 1) else 1 + return salt_exists_dict + +def generate_metadata(train_images_dir, test_images_dir, depths_filepath): + depths = pd.read_csv(depths_filepath) + salt_exists_dict = get_salt_existence() + + metadata = {} + for filename in tqdm(os.listdir(os.path.join(train_images_dir, 'images'))): + image_filepath = os.path.join(train_images_dir, 'images', filename) + mask_filepath = os.path.join(train_images_dir, 'masks', filename) + image_id = filename.split('.')[0] + depth = depths[depths['id'] == image_id]['z'].values[0] + + metadata.setdefault('file_path_image', []).append(image_filepath) + metadata.setdefault('file_path_mask', []).append(mask_filepath) + metadata.setdefault('is_train', []).append(1) + metadata.setdefault('id', []).append(image_id) + metadata.setdefault('z', []).append(depth) + metadata.setdefault('salt_exists', []).append(salt_exists_dict[image_id]) + + for filename in tqdm(os.listdir(os.path.join(test_images_dir, 'images'))): + image_filepath = os.path.join(test_images_dir, 'images', filename) + image_id = filename.split('.')[0] + depth = depths[depths['id'] == image_id]['z'].values[0] + + metadata.setdefault('file_path_image', []).append(image_filepath) + metadata.setdefault('file_path_mask', []).append(None) + metadata.setdefault('is_train', []).append(0) + metadata.setdefault('id', []).append(image_id) + metadata.setdefault('z', []).append(depth) + metadata.setdefault('salt_exists', []).append(0) + + return pd.DataFrame(metadata) + +def rle_from_binary(prediction): + prediction = np.asfortranarray(prediction) + return cocomask.encode(prediction) + + +def binary_from_rle(rle): + return cocomask.decode(rle) + + +def get_segmentations(labeled): + nr_true = labeled.max() + segmentations = [] + for i in range(1, nr_true + 1): + msk = labeled == i + segmentation = rle_from_binary(msk.astype('uint8')) + segmentation['counts'] = segmentation['counts'].decode("UTF-8") + segmentations.append(segmentation) + return segmentations + + +def get_crop_pad_sequence(vertical, horizontal): + top = int(vertical / 2) + bottom = vertical - top + right = int(horizontal / 2) + left = horizontal - right + return (top, right, bottom, left) + + +def get_nfold_split(ifold, nfold=10, meta_version=1): + if meta_version == 2: + return get_nfold_split2(ifold, nfold) + + meta = pd.read_csv(settings.META_FILE, na_filter=False) + meta_train = meta[meta['is_train'] == 1] + + kf = KFold(n_splits=nfold) + for i, (train_index, valid_index) in enumerate(kf.split(meta_train[settings.ID_COLUMN].values.reshape(-1))): + if i == ifold: + break + return meta_train.iloc[train_index], meta_train.iloc[valid_index] + +def get_nfold_split2(ifold, nfold=10): + meta_train = pd.read_csv(os.path.join(settings.DATA_DIR, 'train_meta2.csv')) + + with open(os.path.join(settings.DATA_DIR, 'train_split.json'), 'r') as f: + train_splits = json.load(f) + train_index = train_splits[str(ifold)]['train_index'] + valid_index = train_splits[str(ifold)]['val_index'] + + return meta_train.iloc[train_index], meta_train.iloc[valid_index] + + +def get_test_meta(): + meta = pd.read_csv(settings.META_FILE, na_filter=False) + test_meta = meta[meta['is_train'] == 0] + print(len(test_meta.values)) + return test_meta + +if __name__ == '__main__': + get_nfold_split(2) diff --git a/examples/trials/mnist-advisor/config_bohb.yml b/examples/trials/mnist-advisor/config_bohb.yml new file mode 100644 index 0000000000000000000000000000000000000000..a7502ed6b9d818ac2ef8f53d857427f32e9a2ab7 --- /dev/null +++ b/examples/trials/mnist-advisor/config_bohb.yml @@ -0,0 +1,18 @@ +# Run following command first to install dependencies of BOHB tuner: +# $ python3 -m pip install nni[BOHB] + +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 10h +maxTrialNumber: 1000 +advisor: + name: BOHB + classArgs: + max_budget: 27 + min_budget: 1 + eta: 3 + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/mnist-advisor/config_hyperband.yml b/examples/trials/mnist-advisor/config_hyperband.yml new file mode 100644 index 0000000000000000000000000000000000000000..2cf2de9b32bd41c3a277365a6577d6c136048ce8 --- /dev/null +++ b/examples/trials/mnist-advisor/config_hyperband.yml @@ -0,0 +1,16 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 2 +maxExperimentDuration: 100h +maxTrialNumber: 10000 +advisor: + name: Hyperband + classArgs: + R: 100 # the maximum trial budget (could be the number of mini-batches or epochs) can be + # allocated to a trial. Each trial should use trial budget to control how long it runs. + eta: 3 # proportion of discarded trials + optimize_mode: maximize # maximize or minimize + exec_mode: parallelism # serial or parallelism +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/mnist-advisor/mnist.py b/examples/trials/mnist-advisor/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..52094c1a2e1bdeb910161212142117f0fcee6638 --- /dev/null +++ b/examples/trials/mnist-advisor/mnist.py @@ -0,0 +1,239 @@ +"""A deep MNIST classifier using convolutional layers.""" + +import argparse +import logging +import math +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +import nni + +FLAGS = None + +logger = logging.getLogger('mnist_AutoML') + + +class MnistNetwork(object): + ''' + MnistNetwork is for initializing and building basic network for mnist. + ''' + def __init__(self, + channel_1_num, + channel_2_num, + conv_size, + hidden_size, + pool_size, + learning_rate, + x_dim=784, + y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + self.conv_size = conv_size + self.hidden_size = hidden_size + self.pool_size = pool_size + self.learning_rate = learning_rate + self.x_dim = x_dim + self.y_dim = y_dim + + self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') + self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + + self.train_step = None + self.accuracy = None + + def build_network(self): + ''' + Building network for mnist + ''' + + # Reshape to use within a convolutional neural net. + # Last dimension is for "features" - there is only one here, since images are + # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + print( + 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) + raise + x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) + + # First convolutional layer - maps one grayscale image to 32 feature maps. + with tf.name_scope('conv1'): + w_conv1 = weight_variable( + [self.conv_size, self.conv_size, 1, self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) + + # Pooling layer - downsamples by 2X. + with tf.name_scope('pool1'): + h_pool1 = max_pool(h_conv1, self.pool_size) + + # Second convolutional layer -- maps 32 feature maps to 64. + with tf.name_scope('conv2'): + w_conv2 = weight_variable([self.conv_size, self.conv_size, + self.channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) + + # Second pooling layer. + with tf.name_scope('pool2'): + h_pool2 = max_pool(h_conv2, self.pool_size) + + # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image + # is down to 7x7x64 feature maps -- maps this to 1024 features. + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + w_fc1 = weight_variable( + [last_dim * last_dim * self.channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + + h_pool2_flat = tf.reshape( + h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) + + # Dropout - controls the complexity of the model, prevents co-adaptation of features. + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + + # Map the 1024 features to 10 classes, one for each digit + with tf.name_scope('fc2'): + w_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 + + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer( + self.learning_rate).minimize(cross_entropy) + + with tf.name_scope('accuracy'): + correct_prediction = tf.equal( + tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) + self.accuracy = tf.reduce_mean( + tf.cast(correct_prediction, tf.float32)) + + +def conv2d(x_input, w_matrix): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') + + +def max_pool(x_input, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + ''' + Main function, build mnist network, run and send result to NNI. + ''' + # Import data + mnist = download_mnist_retry(params['data_dir']) + print('Mnist download data done.') + logger.debug('Mnist download data done.') + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], + channel_2_num=params['channel_2_num'], + conv_size=params['conv_size'], + hidden_size=params['hidden_size'], + pool_size=params['pool_size'], + learning_rate=params['learning_rate']) + mnist_network.build_network() + logger.debug('Mnist build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + for i in range(params['batch_num']): + batch = mnist.train.next_batch(params['batch_size']) + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: 1 - params['dropout_rate']} + ) + + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + +def get_params(): + ''' Get parameters from command line ''' + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory") + parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate") + parser.add_argument("--channel_1_num", type=int, default=32) + parser.add_argument("--channel_2_num", type=int, default=64) + parser.add_argument("--conv_size", type=int, default=5) + parser.add_argument("--pool_size", type=int, default=2) + parser.add_argument("--hidden_size", type=int, default=1024) + parser.add_argument("--learning_rate", type=float, default=1e-4) + parser.add_argument("--batch_num", type=int, default=2700) + parser.add_argument("--batch_size", type=int, default=32) + + args, _ = parser.parse_known_args() + return args + +if __name__ == '__main__': + try: + # get parameters form tuner + tuner_params = nni.get_next_parameter() + logger.debug(tuner_params) + tuner_params['batch_num'] = tuner_params['TRIAL_BUDGET'] * 100 + params = vars(get_params()) + params.update(tuner_params) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-advisor/search_space.json b/examples/trials/mnist-advisor/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..540f2708cb888baa2a9700436f9879724e049328 --- /dev/null +++ b/examples/trials/mnist-advisor/search_space.json @@ -0,0 +1,7 @@ +{ + "dropout_rate":{"_type":"uniform","_value":[0.5,0.9]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "batch_size": {"_type":"choice","_value":[8, 16, 32, 64]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]} +} diff --git a/examples/trials/mnist-annotation/config.yml b/examples/trials/mnist-annotation/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..937ec916de79c6a394f59bcb779127747c21916c --- /dev/null +++ b/examples/trials/mnist-annotation/config.yml @@ -0,0 +1,12 @@ +useAnnotation: true +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +maxExperimentDuration: 1h +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/mnist-annotation/config_windows.yml b/examples/trials/mnist-annotation/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..e6ab6536ff6393a785dd9196d2417c5f527a792b --- /dev/null +++ b/examples/trials/mnist-annotation/config_windows.yml @@ -0,0 +1,12 @@ +useAnnotation: true +trialCommand: python mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +maxExperimentDuration: 1h +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/mnist-annotation/mnist.py b/examples/trials/mnist-annotation/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..dba535ed1d18209e795d57fdf79898d34b66b5ed --- /dev/null +++ b/examples/trials/mnist-annotation/mnist.py @@ -0,0 +1,247 @@ +"""A deep MNIST classifier using convolutional layers.""" + +import argparse +import logging +import math +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +FLAGS = None + +logger = logging.getLogger('mnist_AutoML') + + +class MnistNetwork(object): + ''' + MnistNetwork is for initializing and building basic network for mnist. + ''' + def __init__(self, + channel_1_num, + channel_2_num, + conv_size, + hidden_size, + pool_size, + learning_rate, + x_dim=784, + y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + """@nni.variable(nni.choice(2, 3, 5, 7),name=self.conv_size)""" + self.conv_size = conv_size + """@nni.variable(nni.choice(124, 512, 1024), name=self.hidden_size)""" + self.hidden_size = hidden_size + self.pool_size = pool_size + """@nni.variable(nni.loguniform(0.0001, 0.1), name=self.learning_rate)""" + self.learning_rate = learning_rate + self.x_dim = x_dim + self.y_dim = y_dim + + self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') + self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + + self.train_step = None + self.accuracy = None + + def build_network(self): + ''' + Building network for mnist + ''' + + # Reshape to use within a convolutional neural net. + # Last dimension is for "features" - there is only one here, since images are + # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + print( + 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) + raise + x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) + + # First convolutional layer - maps one grayscale image to 32 feature maps. + with tf.name_scope('conv1'): + w_conv1 = weight_variable( + [self.conv_size, self.conv_size, 1, self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + """@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)""" + h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) + + # Pooling layer - downsamples by 2X. + with tf.name_scope('pool1'): + """@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)""" + h_pool1 = max_pool(h_conv1, self.pool_size) + + # Second convolutional layer -- maps 32 feature maps to 64. + with tf.name_scope('conv2'): + w_conv2 = weight_variable([self.conv_size, self.conv_size, + self.channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) + + # Second pooling layer. + with tf.name_scope('pool2'): + h_pool2 = max_pool(h_conv2, self.pool_size) + + # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image + # is down to 7x7x64 feature maps -- maps this to 1024 features. + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + w_fc1 = weight_variable( + [last_dim * last_dim * self.channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + + h_pool2_flat = tf.reshape( + h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) + + # Dropout - controls the complexity of the model, prevents co-adaptation of features. + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + + # Map the 1024 features to 10 classes, one for each digit + with tf.name_scope('fc2'): + w_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 + + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer( + self.learning_rate).minimize(cross_entropy) + + with tf.name_scope('accuracy'): + correct_prediction = tf.equal( + tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) + self.accuracy = tf.reduce_mean( + tf.cast(correct_prediction, tf.float32)) + + +def conv2d(x_input, w_matrix): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') + + +def max_pool(x_input, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def avg_pool(x_input, pool_size): + return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + ''' + Main function, build mnist network, run and send result to NNI. + ''' + # Import data + mnist = download_mnist_retry(params['data_dir']) + print('Mnist download data done.') + logger.debug('Mnist download data done.') + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], + channel_2_num=params['channel_2_num'], + conv_size=params['conv_size'], + hidden_size=params['hidden_size'], + pool_size=params['pool_size'], + learning_rate=params['learning_rate']) + mnist_network.build_network() + logger.debug('Mnist build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + """@nni.variable(nni.choice(16, 32), name=batch_size)""" + batch_size = params['batch_size'] + for i in range(params['batch_num']): + batch = mnist.train.next_batch(batch_size) + """@nni.variable(nni.choice(0.5, 0.9), name=dropout_rate)""" + dropout_rate = params['dropout_rate'] + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: 1 - dropout_rate} + ) + + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_intermediate_result(test_acc)""" + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_final_result(test_acc)""" + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + +def get_params(): + ''' Get parameters from command line ''' + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory") + parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate") + parser.add_argument("--channel_1_num", type=int, default=32) + parser.add_argument("--channel_2_num", type=int, default=64) + parser.add_argument("--conv_size", type=int, default=5) + parser.add_argument("--pool_size", type=int, default=2) + parser.add_argument("--hidden_size", type=int, default=1024) + parser.add_argument("--learning_rate", type=float, default=1e-4) + parser.add_argument("--batch_num", type=int, default=2000) + parser.add_argument("--batch_size", type=int, default=32) + + args, _ = parser.parse_known_args() + return args + +if __name__ == '__main__': + '''@nni.get_next_parameter()''' + try: + main(vars(get_params())) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-batch-tune-keras/config.yml b/examples/trials/mnist-batch-tune-keras/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..fae372a7aa778a6c15e9d9e7ffbbeb75c89ec0bf --- /dev/null +++ b/examples/trials/mnist-batch-tune-keras/config.yml @@ -0,0 +1,10 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist-keras.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +maxExperimentDuration: 1h +tuner: + name: BatchTuner +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/mnist-batch-tune-keras/config_windows.yml b/examples/trials/mnist-batch-tune-keras/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..43cedb1fbc06b460aaaeeaccfaa44c33210a2b1b --- /dev/null +++ b/examples/trials/mnist-batch-tune-keras/config_windows.yml @@ -0,0 +1,10 @@ +searchSpaceFile: search_space.json +trialCommand: python mnist-keras.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +maxExperimentDuration: 1h +tuner: + name: BatchTuner +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/mnist-batch-tune-keras/mnist-keras.py b/examples/trials/mnist-batch-tune-keras/mnist-keras.py new file mode 100644 index 0000000000000000000000000000000000000000..40aa9f33e468e940340bb70bcec14c24a7949238 --- /dev/null +++ b/examples/trials/mnist-batch-tune-keras/mnist-keras.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging + +import os +import keras +import numpy as np +from keras import backend as K +from keras.callbacks import TensorBoard +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +import nni + +LOG = logging.getLogger('mnist_keras') +K.set_image_data_format('channels_last') +TENSORBOARD_DIR = os.environ['NNI_OUTPUT_DIR'] + +H, W = 28, 28 +NUM_CLASSES = 10 + +def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): + ''' + Create simple convolutional model + ''' + layers = [ + Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), + Conv2D(64, (3, 3), activation='relu'), + MaxPooling2D(pool_size=(2, 2)), + Flatten(), + Dense(100, activation='relu'), + Dense(num_classes, activation='softmax') + ] + + model = Sequential(layers) + + if hyper_params['optimizer'] == 'Adam': + optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) + else: + optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) + + return model + +def load_mnist_data(args): + ''' + Load MNIST dataset + ''' + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] + x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] + y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] + y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] + + LOG.debug('x_train shape: %s', (x_train.shape,)) + LOG.debug('x_test shape: %s', (x_test.shape,)) + + return x_train, y_train, x_test, y_test + +class SendMetrics(keras.callbacks.Callback): + ''' + Keras callback to send metrics to NNI framework + ''' + def on_epoch_end(self, epoch, logs={}): + ''' + Run on end of each epoch + ''' + LOG.debug(logs) + # TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy` + if 'val_acc' in logs: + nni.report_intermediate_result(logs['val_acc']) + else: + nni.report_intermediate_result(logs['val_accuracy']) + +def train(args, params): + ''' + Train model + ''' + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + # nni + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + LOG.debug('Final result is: %d', acc) + nni.report_final_result(acc) + +def generate_default_params(): + ''' + Generate default hyper parameters + ''' + return { + 'optimizer': 'Adam', + 'learning_rate': 0.001 + } + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=60000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=10000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + + try: + # get parameters from tuner + # RECEIVED_PARAMS = {"optimizer": "Adam", "learning_rate": 0.00001} + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = generate_default_params() + PARAMS.update(RECEIVED_PARAMS) + # train + train(ARGS, PARAMS) + except Exception as e: + LOG.exception(e) + raise diff --git a/examples/trials/mnist-batch-tune-keras/requirements.txt b/examples/trials/mnist-batch-tune-keras/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..95bd0a423f69d6d5704e2dd0621724d9f39258b8 --- /dev/null +++ b/examples/trials/mnist-batch-tune-keras/requirements.txt @@ -0,0 +1 @@ +keras>=2.2.4 diff --git a/examples/trials/mnist-batch-tune-keras/search_space.json b/examples/trials/mnist-batch-tune-keras/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..a802136a273cc45c7b546915ca819d832f9db6a6 --- /dev/null +++ b/examples/trials/mnist-batch-tune-keras/search_space.json @@ -0,0 +1,12 @@ +{ + "combine_params": + { + "_type" : "choice", + "_value" : [{"optimizer": "Adam", "learning_rate": 0.00001}, + {"optimizer": "Adam", "learning_rate": 0.0001}, + {"optimizer": "Adam", "learning_rate": 0.001}, + {"optimizer": "SGD", "learning_rate": 0.01}, + {"optimizer": "SGD", "learning_rate": 0.005}, + {"optimizer": "SGD", "learning_rate": 0.0002}] + } +} \ No newline at end of file diff --git a/examples/trials/mnist-distributed-pytorch/config_kubeflow.yml b/examples/trials/mnist-distributed-pytorch/config_kubeflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..98f12a2cd7ff952d30132f850e428a7afcec817d --- /dev/null +++ b/examples/trials/mnist-distributed-pytorch/config_kubeflow.yml @@ -0,0 +1,40 @@ +authorName: default +experimentName: example_mnist_distributed_pytorch +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: kubeflow +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: minimize +trial: + codeDir: . + master: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 2048 + image: msranni/nni:latest + worker: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 2048 + image: msranni/nni:latest +kubeflowConfig: + operator: pytorch-operator + apiVersion: v1alpha2 + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} diff --git a/examples/trials/mnist-distributed-pytorch/dist_mnist.py b/examples/trials/mnist-distributed-pytorch/dist_mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..f69a2c9137c1a994c6fbef398b6eed8334ba532f --- /dev/null +++ b/examples/trials/mnist-distributed-pytorch/dist_mnist.py @@ -0,0 +1,174 @@ +# Copyright 2018 The Kubeflow Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# NNI (https://github.com/Microsoft/nni) modified this code to show how to +# integrate distributed pytorch training with NNI SDK +# + +import os +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import nni +import logging + +from math import ceil +from random import Random +from torch.autograd import Variable +from torchvision import datasets, transforms + +logger = logging.getLogger('nni_pytorch_dist') + +class Partition(object): + """ Dataset-like object, but only access a subset of it. """ + + def __init__(self, data, index): + self.data = data + self.index = index + + def __len__(self): + return len(self.index) + + def __getitem__(self, index): + data_idx = self.index[index] + return self.data[data_idx] + + +class DataPartitioner(object): + """ Partitions a dataset into different chuncks. """ + + def __init__(self, data, sizes=[0.7, 0.2, 0.1], seed=1234): + self.data = data + self.partitions = [] + rng = Random() + rng.seed(seed) + data_len = len(data) + indexes = [x for x in range(0, data_len)] + rng.shuffle(indexes) + + for frac in sizes: + part_len = int(frac * data_len) + self.partitions.append(indexes[0:part_len]) + indexes = indexes[part_len:] + + def use(self, partition): + return Partition(self.data, self.partitions[partition]) + + +class Net(nn.Module): + """ Network architecture. """ + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def partition_dataset(): + """ Partitioning MNIST """ + dataset = datasets.MNIST( + './data', + train=True, + download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307, ), (0.3081, )) + ])) + size = dist.get_world_size() + bsz = 128 / float(size) + partition_sizes = [1.0 / size for _ in range(size)] + partition = DataPartitioner(dataset, partition_sizes) + partition = partition.use(dist.get_rank()) + train_set = torch.utils.data.DataLoader( + partition, batch_size=int(bsz), shuffle=True) + return train_set, bsz + + +def average_gradients(model): + """ Gradient averaging. """ + size = float(dist.get_world_size()) + for param in model.parameters(): + dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0) + param.grad.data /= size + + +def run(params): + """ Distributed Synchronous SGD Example """ + rank = dist.get_rank() + torch.manual_seed(1234) + train_set, bsz = partition_dataset() + model = Net() + model = model + optimizer = optim.SGD(model.parameters(), lr=params['learning_rate'], momentum=params['momentum']) + + num_batches = ceil(len(train_set.dataset) / float(bsz)) + total_loss = 0.0 + for epoch in range(3): + epoch_loss = 0.0 + for data, target in train_set: + data, target = Variable(data), Variable(target) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + epoch_loss += loss.item() + loss.backward() + average_gradients(model) + optimizer.step() + #logger.debug('Rank: ', rank, ', epoch: ', epoch, ': ', epoch_loss / num_batches) + if rank == 0: + nni.report_intermediate_result(epoch_loss / num_batches) + total_loss += (epoch_loss / num_batches) + total_loss /= 3 + logger.debug('Final loss: {}'.format(total_loss)) + if rank == 0: + nni.report_final_result(total_loss) + + +def init_processes(fn, params, backend='tcp'): + """ Initialize the distributed environment. """ + dist.init_process_group(backend) + fn(params) + +def generate_default_params(): + ''' + Generate default parameters for mnist network. + ''' + params = { + 'learning_rate': 0.01, + 'momentum': 0.5} + return params + +if __name__ == "__main__": + RCV_PARAMS = nni.get_next_parameter() + logger.debug(RCV_PARAMS) + params = generate_default_params() + params.update(RCV_PARAMS) + init_processes(run, params) + + diff --git a/examples/trials/mnist-distributed-pytorch/search_space.json b/examples/trials/mnist-distributed-pytorch/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..df9f30e841db3c31cffa010d7cdfc54e90bd0297 --- /dev/null +++ b/examples/trials/mnist-distributed-pytorch/search_space.json @@ -0,0 +1,4 @@ +{ + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"choice","_value":[0.4, 0.5, 0.6]} +} diff --git a/examples/trials/mnist-distributed-tfv1/config_kubeflow.yml b/examples/trials/mnist-distributed-tfv1/config_kubeflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..96d32bf59a127b6d09d2bf0ea40de4f25b50d1c0 --- /dev/null +++ b/examples/trials/mnist-distributed-tfv1/config_kubeflow.yml @@ -0,0 +1,45 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 2 +maxExecDuration: 1h +maxTrialNum: 20 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: kubeflow +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: . + worker: + replicas: 2 + command: python3 dist_mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 8196 + image: msranni/nni:latest + ps: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 8196 + image: msranni/nni:latest +kubeflowConfig: + operator: tf-operator + apiVersion: v1alpha2 + storage: nfs + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} diff --git a/examples/trials/mnist-distributed-tfv1/dist_mnist.py b/examples/trials/mnist-distributed-tfv1/dist_mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..9e3f17277f5468a2815335f0370f8691896a0ddc --- /dev/null +++ b/examples/trials/mnist-distributed-tfv1/dist_mnist.py @@ -0,0 +1,354 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# +# NNI (https://github.com/Microsoft/nni) modified this code to show how to +# integrate distributed tensorflow training with NNI SDK +# +"""Distributed MNIST training and validation, with model replicas. + +A simple softmax model with one hidden layer is defined. The parameters +(weights and biases) are located on one parameter server (ps), while the ops +are executed on two worker nodes by default. The TF sessions also run on the +worker node. +Multiple invocations of this script can be done in parallel, with different +values for --task_index. There should be exactly one invocation with +--task_index, which will create a master session that carries out variable +initialization. The other, non-master, sessions will wait for the master +session to finish the initialization before proceeding to the training stage. + +The coordination between the multiple worker invocations occurs due to +the definition of the parameters on the same ps devices. The parameter updates +from one worker is visible to all other workers. As such, the workers can +perform forward computation and gradient calculation in parallel, which +should lead to increased training speed for the simple model. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import math +import os +import sys +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data +import nni + +flags = tf.app.flags +flags.DEFINE_string("data_dir", "/tmp/mnist-data", + "Directory for storing mnist data") +flags.DEFINE_boolean( + "download_only", False, + "Only perform downloading of data; Do not proceed to " + "session preparation, model definition or training") +flags.DEFINE_integer( + "task_index", None, "Worker task index, should be >= 0. task_index=0 is " + "the master worker task the performs the variable " + "initialization ") +flags.DEFINE_integer( + "num_gpus", 1, "Total number of gpus for each machine." + "If you don't use GPU, please set it to '0'") +flags.DEFINE_integer( + "replicas_to_aggregate", None, + "Number of replicas to aggregate before parameter update" + "is applied (For sync_replicas mode only; default: " + "num_workers)") +flags.DEFINE_integer("train_steps", 20000, + "Number of (global) training steps to perform") +flags.DEFINE_boolean( + "sync_replicas", False, + "Use the sync_replicas (synchronized replicas) mode, " + "wherein the parameter updates from workers are aggregated " + "before applied to avoid stale gradients") +flags.DEFINE_boolean( + "existing_servers", False, "Whether servers already exists. If True, " + "will use the worker hosts via their GRPC URLs (one client process " + "per worker host). Otherwise, will create an in-process TensorFlow " + "server.") +flags.DEFINE_string("ps_hosts", "localhost:2222", + "Comma-separated list of hostname:port pairs") +flags.DEFINE_string("worker_hosts", "localhost:2223,localhost:2224", + "Comma-separated list of hostname:port pairs") +flags.DEFINE_string("job_name", None, "job name: worker or ps") + +FLAGS = flags.FLAGS + +IMAGE_PIXELS = 28 + +# Example: +# cluster = {'ps': ['host1:2222', 'host2:2222'], +# 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} +# os.environ['TF_CONFIG'] = json.dumps( +# {'cluster': cluster, +# 'task': {'type': 'worker', 'index': 1}}) + + +def generate_default_params(): + ''' + Generate default hyper parameters + ''' + return { + 'learning_rate': 0.01, + 'batch_size': 100, + 'hidden_units': 100, + } + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(unused_argv): + # Receive NNI hyper parameter and update it onto default params + RECEIVED_PARAMS = nni.get_next_parameter() + PARAMS = generate_default_params() + PARAMS.update(RECEIVED_PARAMS) + + # Parse environment variable TF_CONFIG to get job_name and task_index + + # If not explicitly specified in the constructor and the TF_CONFIG + # environment variable is present, load cluster_spec from TF_CONFIG. + tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}') + task_config = tf_config.get('task', {}) + task_type = task_config.get('type') + task_index = task_config.get('index') + + FLAGS.job_name = task_type + FLAGS.task_index = task_index + + mnist = download_mnist_retry(FLAGS.data_dir) + if FLAGS.download_only: + sys.exit(0) + + if FLAGS.job_name is None or FLAGS.job_name == "": + raise ValueError("Must specify an explicit `job_name`") + if FLAGS.task_index is None or FLAGS.task_index == "": + raise ValueError("Must specify an explicit `task_index`") + + print("job name = %s" % FLAGS.job_name) + print("task index = %d" % FLAGS.task_index) + + cluster_config = tf_config.get('cluster', {}) + ps_hosts = cluster_config.get('ps') + worker_hosts = cluster_config.get('worker') + + ps_hosts_str = ','.join(ps_hosts) + worker_hosts_str = ','.join(worker_hosts) + + FLAGS.ps_hosts = ps_hosts_str + FLAGS.worker_hosts = worker_hosts_str + + # Construct the cluster and start the server + ps_spec = FLAGS.ps_hosts.split(",") + worker_spec = FLAGS.worker_hosts.split(",") + + # Get the number of workers. + num_workers = len(worker_spec) + + cluster = tf.train.ClusterSpec({"ps": ps_spec, "worker": worker_spec}) + + if not FLAGS.existing_servers: + # Not using existing servers. Create an in-process server. + server = tf.train.Server( + cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index) + if FLAGS.job_name == "ps": + server.join() + + is_chief = (FLAGS.task_index == 0) + if FLAGS.num_gpus > 0: + # Avoid gpu allocation conflict: now allocate task_num -> #gpu + # for each worker in the corresponding machine + gpu = (FLAGS.task_index % FLAGS.num_gpus) + worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu) + elif FLAGS.num_gpus == 0: + # Just allocate the CPU to worker server + cpu = 0 + worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu) + # The device setter will automatically place Variables ops on separate + # parameter servers (ps). The non-Variable ops will be placed on the workers. + # The ps use CPU and workers use corresponding GPU + with tf.device( + tf.train.replica_device_setter( + worker_device=worker_device, + ps_device="/job:ps/cpu:0", + cluster=cluster)): + global_step = tf.Variable(0, name="global_step", trainable=False) + + # Variables of the hidden layer + hid_w = tf.Variable( + tf.truncated_normal( + [IMAGE_PIXELS * IMAGE_PIXELS, PARAMS['hidden_units']], + stddev=1.0 / IMAGE_PIXELS), + name="hid_w") + hid_b = tf.Variable(tf.zeros([PARAMS['hidden_units']]), name="hid_b") + + # Variables of the softmax layer + sm_w = tf.Variable( + tf.truncated_normal( + [PARAMS['hidden_units'], 10], + stddev=1.0 / math.sqrt(PARAMS['hidden_units'])), + name="sm_w") + sm_b = tf.Variable(tf.zeros([10]), name="sm_b") + + # Ops: located on the worker specified with FLAGS.task_index + x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS]) + y_ = tf.placeholder(tf.float32, [None, 10]) + + hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b) + hid = tf.nn.relu(hid_lin) + + y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b)) + cross_entropy = -tf.reduce_sum( + y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) + + opt = tf.train.AdamOptimizer(PARAMS['learning_rate']) + + if FLAGS.sync_replicas: + if FLAGS.replicas_to_aggregate is None: + replicas_to_aggregate = num_workers + else: + replicas_to_aggregate = FLAGS.replicas_to_aggregate + + opt = tf.train.SyncReplicasOptimizer( + opt, + replicas_to_aggregate=replicas_to_aggregate, + total_num_replicas=num_workers, + name="mnist_sync_replicas") + + train_step = opt.minimize(cross_entropy, global_step=global_step) + + if FLAGS.sync_replicas: + local_init_op = opt.local_step_init_op + if is_chief: + local_init_op = opt.chief_init_op + + ready_for_local_init_op = opt.ready_for_local_init_op + + # Initial token and chief queue runners required by the sync_replicas mode + chief_queue_runner = opt.get_chief_queue_runner() + sync_init_op = opt.get_init_tokens_op() + + init_op = tf.global_variables_initializer() + train_dir = tempfile.mkdtemp() + + if FLAGS.sync_replicas: + sv = tf.train.Supervisor( + is_chief=is_chief, + logdir=train_dir, + init_op=init_op, + local_init_op=local_init_op, + ready_for_local_init_op=ready_for_local_init_op, + recovery_wait_secs=1, + global_step=global_step) + else: + sv = tf.train.Supervisor( + is_chief=is_chief, + logdir=train_dir, + init_op=init_op, + recovery_wait_secs=1, + global_step=global_step) + + sess_config = tf.ConfigProto( + allow_soft_placement=True, + log_device_placement=False, + device_filters=[ + "/job:ps", "/job:worker/task:%d" % FLAGS.task_index + ]) + + # The chief worker (task_index==0) session will prepare the session, + # while the remaining workers will wait for the preparation to complete. + if is_chief: + print("Worker %d: Initializing session..." % FLAGS.task_index) + else: + print("Worker %d: Waiting for session to be initialized..." % + FLAGS.task_index) + + if FLAGS.existing_servers: + server_grpc_url = "grpc://" + worker_spec[FLAGS.task_index] + print("Using existing server at: %s" % server_grpc_url) + + sess = sv.prepare_or_wait_for_session( + server_grpc_url, config=sess_config) + else: + sess = sv.prepare_or_wait_for_session( + server.target, config=sess_config) + + print("Worker %d: Session initialization complete." % FLAGS.task_index) + + if FLAGS.sync_replicas and is_chief: + # Chief worker will start the chief queue runner and call the init op. + sess.run(sync_init_op) + sv.start_queue_runners(sess, [chief_queue_runner]) + + # Perform training + time_begin = time.time() + print("Training begins @ %f" % time_begin) + + local_step = 0 + while True: + # Training feed + batch_xs, batch_ys = mnist.train.next_batch(PARAMS['batch_size']) + train_feed = {x: batch_xs, y_: batch_ys} + + _, step = sess.run([train_step, global_step], feed_dict=train_feed) + local_step += 1 + + now = time.time() + print("%f: Worker %d: training step %d done (global step: %d)" % + (now, FLAGS.task_index, local_step, step)) + + if step > 0 and step % 5000 == 0 and is_chief: + val_feed = { + x: mnist.validation.images, + y_: mnist.validation.labels + } + interim_val_xent = sess.run(cross_entropy, feed_dict=val_feed) + print( + "After %d training step(s), validation cross entropy = %g" + % (step, interim_val_xent)) + + # Only chief worker can report intermediate metrics + nni.report_intermediate_result(interim_val_xent) + + if step >= FLAGS.train_steps: + break + + time_end = time.time() + print("Training ends @ %f" % time_end) + training_time = time_end - time_begin + print("Training elapsed time: %f s" % training_time) + + # Validation feed + val_feed = {x: mnist.validation.images, y_: mnist.validation.labels} + val_xent = sess.run(cross_entropy, feed_dict=val_feed) + print("After %d training step(s), validation cross entropy = %g" % + (FLAGS.train_steps, val_xent)) + + # Only chief worker can report final metrics + if is_chief: + nni.report_final_result(val_xent) + + +if __name__ == "__main__": + tf.app.run() diff --git a/examples/trials/mnist-distributed-tfv1/search_space.json b/examples/trials/mnist-distributed-tfv1/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c19838e469e53b30d3ece7f0f986296544b0714a --- /dev/null +++ b/examples/trials/mnist-distributed-tfv1/search_space.json @@ -0,0 +1,5 @@ +{ + "hidden_units":{"_type":"choice","_value":[100, 120, 140, 160, 180, 200]}, + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]} +} \ No newline at end of file diff --git a/examples/trials/mnist-keras/config.yml b/examples/trials/mnist-keras/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..f0f628779f3ff6e8dcf2de0f7660104b24daf300 --- /dev/null +++ b/examples/trials/mnist-keras/config.yml @@ -0,0 +1,21 @@ +authorName: default +experimentName: example_mnist-keras +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 mnist-keras.py + codeDir: . + gpuNum: 0 diff --git a/examples/trials/mnist-keras/config_pai.yml b/examples/trials/mnist-keras/config_pai.yml new file mode 100644 index 0000000000000000000000000000000000000000..21e8e6030aefb23347a6407ab15b25cd7e95eb44 --- /dev/null +++ b/examples/trials/mnist-keras/config_pai.yml @@ -0,0 +1,35 @@ +authorName: default +experimentName: example_mnist-keras +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: pai +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 mnist-keras.py + codeDir: . + gpuNum: 0 + cpuNum: 1 + memoryMB: 8196 + #The docker image to run nni job on pai + image: msranni/nni:latest + nniManagerNFSMountPath: {replace_to_your_nfs_mount_path} + containerNFSMountPath: {replace_to_your_container_mount_path} + paiStorageConfigName: {replace_to_your_storage_config_name} +paiConfig: + #The username to login pai + userName: username + #The token to login pai + token: token + #The host of restful server of pai + host: 10.10.10.10 \ No newline at end of file diff --git a/examples/trials/mnist-keras/config_windows.yml b/examples/trials/mnist-keras/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..e0c12746f2bfe90cdb55a8042f89feb571f58e41 --- /dev/null +++ b/examples/trials/mnist-keras/config_windows.yml @@ -0,0 +1,21 @@ +authorName: default +experimentName: example_mnist-keras +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python mnist-keras.py + codeDir: . + gpuNum: 0 diff --git a/examples/trials/mnist-keras/mnist-keras.py b/examples/trials/mnist-keras/mnist-keras.py new file mode 100644 index 0000000000000000000000000000000000000000..794b7deb2a8b06b992ccd97beb0127e03d020c7d --- /dev/null +++ b/examples/trials/mnist-keras/mnist-keras.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging + +import os +import keras +import numpy as np +from keras import backend as K +from keras.callbacks import TensorBoard +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +import nni + +LOG = logging.getLogger('mnist_keras') +K.set_image_data_format('channels_last') +TENSORBOARD_DIR = os.environ['NNI_OUTPUT_DIR'] + +H, W = 28, 28 +NUM_CLASSES = 10 + +def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): + ''' + Create simple convolutional model + ''' + layers = [ + Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), + Conv2D(64, (3, 3), activation='relu'), + MaxPooling2D(pool_size=(2, 2)), + Flatten(), + Dense(100, activation='relu'), + Dense(num_classes, activation='softmax') + ] + + model = Sequential(layers) + + if hyper_params['optimizer'] == 'Adam': + optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) + else: + optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) + + return model + +def load_mnist_data(args): + ''' + Load MNIST dataset + ''' + mnist_path = os.path.join(os.environ.get('NNI_OUTPUT_DIR'), 'mnist.npz') + (x_train, y_train), (x_test, y_test) = mnist.load_data(path=mnist_path) + os.remove(mnist_path) + + x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] + x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] + y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] + y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] + + LOG.debug('x_train shape: %s', (x_train.shape,)) + LOG.debug('x_test shape: %s', (x_test.shape,)) + + return x_train, y_train, x_test, y_test + +class SendMetrics(keras.callbacks.Callback): + ''' + Keras callback to send metrics to NNI framework + ''' + def on_epoch_end(self, epoch, logs={}): + ''' + Run on end of each epoch + ''' + LOG.debug(logs) + # TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy` + if 'val_acc' in logs: + nni.report_intermediate_result(logs['val_acc']) + else: + nni.report_intermediate_result(logs['val_accuracy']) + +def train(args, params): + ''' + Train model + ''' + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + LOG.debug('Final result is: %d', acc) + nni.report_final_result(acc) + +def generate_default_params(): + ''' + Generate default hyper parameters + ''' + return { + 'optimizer': 'Adam', + 'learning_rate': 0.001 + } + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=60000, help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=10000, help="Number of test samples to be used, maximum 10000", required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + + try: + # get parameters from tuner + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = generate_default_params() + PARAMS.update(RECEIVED_PARAMS) + # train + train(ARGS, PARAMS) + except Exception as e: + LOG.exception(e) + raise diff --git a/examples/trials/mnist-keras/search_space.json b/examples/trials/mnist-keras/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..774941a55e3213ef77b3baaa24e294f710fc1c9f --- /dev/null +++ b/examples/trials/mnist-keras/search_space.json @@ -0,0 +1,4 @@ +{ + "optimizer":{"_type":"choice","_value":["Adam", "SGD"]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.002, 0.005, 0.01]} +} diff --git a/examples/trials/mnist-nested-search-space/config.yml b/examples/trials/mnist-nested-search-space/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..2cff01c6550ee88971182d52a5fe1d081b9136a7 --- /dev/null +++ b/examples/trials/mnist-nested-search-space/config.yml @@ -0,0 +1,14 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 2 +maxTrialNumber: 100 +maxExperimentDuration: 1h +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/mnist-nested-search-space/config_windows.yml b/examples/trials/mnist-nested-search-space/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..b5e405436694e54ce5cbbc6faa8c85c1e5e536aa --- /dev/null +++ b/examples/trials/mnist-nested-search-space/config_windows.yml @@ -0,0 +1,14 @@ +searchSpaceFile: search_space.json +trialCommand: python mnist.py +trialGpuNumber: 0 +trialConcurrency: 2 +maxTrialNumber: 100 +maxExperimentDuration: 1h +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/mnist-nested-search-space/mnist.py b/examples/trials/mnist-nested-search-space/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..bbf23580301cbc9b510cd68dc18bb6a8a16015eb --- /dev/null +++ b/examples/trials/mnist-nested-search-space/mnist.py @@ -0,0 +1,176 @@ +''' +mnist.py is an example to show: how to use iterative search space to tune architecture network for mnist. +''' +from __future__ import absolute_import, division, print_function + +import logging +import math +import tempfile +import time +import argparse + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +import nni + +logger = logging.getLogger('mnist_nested_search_space') +FLAGS = None + +class MnistNetwork(object): + def __init__(self, params, feature_size = 784): + config = [] + + for i in range(4): + config.append(params['layer'+str(i)]) + self.config = config + self.feature_size = feature_size + self.label_size = 10 + + + def is_expand_dim(self, input): + # input is a tensor + shape = len(input.get_shape().as_list()) + if shape < 4: + return True + return False + + + def is_flatten(self, input): + # input is a tensor + shape = len(input.get_shape().as_list()) + if shape > 2: + return True + return False + + + def get_layer(self, layer_config, input, in_height, in_width, id): + if layer_config[0] == 'Empty': + return input + + if self.is_expand_dim(input): + input = tf.reshape(input, [-1, in_height, in_width, 1]) + h, w = layer_config[1], layer_config[2] + + if layer_config[0] == 'Conv': + conv_filter = tf.Variable(tf.random_uniform([h, w, 1, 1]), name='id_%d_conv_%d_%d' % (id, h, w)) + return tf.nn.conv2d(input, filter=conv_filter, strides=[1, 1, 1, 1], padding='SAME') + if layer_config[0] == 'Max_pool': + return tf.nn.max_pool(input, ksize=[1, h, w, 1], strides=[1, 1, 1, 1], padding='SAME') + if layer_config[0] == 'Avg_pool': + return tf.nn.avg_pool(input, ksize=[1, h, w, 1], strides=[1, 1, 1, 1], padding='SAME') + + print('error:', layer_config) + raise Exception('%s layer is illegal'%layer_config[0]) + + + def build_network(self): + layer_configs = self.config + feature_size = 784 + + # define placeholder + self.x = tf.placeholder(tf.float32, [None, feature_size], name="input_x") + self.y = tf.placeholder(tf.int32, [None, self.label_size], name="input_y") + label_number = 10 + + # define network + input_layer = self.x + in_height = in_width = int(math.sqrt(feature_size)) + for i, layer_config in enumerate(layer_configs): + input_layer = tf.nn.relu(self.get_layer(layer_config, input_layer, in_height, in_width, i)) + + output_layer = input_layer + if self.is_flatten(output_layer): + output_layer = tf.contrib.layers.flatten(output_layer) # flatten + output_layer = tf.layers.dense(output_layer, label_number) + child_logit = tf.nn.softmax_cross_entropy_with_logits(logits=output_layer, labels=self.y) + child_loss = tf.reduce_mean(child_logit) + + self.train_step = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(child_loss) + child_accuracy = tf.equal(tf.argmax(output_layer, 1), tf.argmax(self.y, 1)) + self.accuracy = tf.reduce_mean(tf.cast(child_accuracy, "float")) # add a reduce_mean + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + # Import data + mnist = download_mnist_retry(params['data_dir']) + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork(params) + mnist_network.build_network() + print('build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + #print('Saving graph to: %s' % graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + for i in range(params['batch_num']): + batch = mnist.train.next_batch(params['batch_size']) + mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1]}) + + if i % 100 == 0: + train_accuracy = mnist_network.accuracy.eval(feed_dict={ + mnist_network.x: batch[0], mnist_network.y: batch[1]}) + print('step %d, training accuracy %g' % (i, train_accuracy)) + + test_acc = mnist_network.accuracy.eval(feed_dict={ + mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels}) + + nni.report_final_result(test_acc) + +def get_params(): + ''' Get parameters from command line ''' + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory") + parser.add_argument("--batch_num", type=int, default=1000) + parser.add_argument("--batch_size", type=int, default=200) + args, _ = parser.parse_known_args() + return args + +def parse_init_json(data): + params = {} + for key in data: + value = data[key] + layer_name = value["_name"] + if layer_name == 'Empty': + # Empty Layer + params[key] = ['Empty'] + elif layer_name == 'Conv': + # Conv layer + params[key] = [layer_name, value['kernel_size'], value['kernel_size']] + else: + # Pooling Layer + params[key] = [layer_name, value['pooling_size'], value['pooling_size']] + return params + + +if __name__ == '__main__': + try: + # get parameters form tuner + data = nni.get_next_parameter() + logger.debug(data) + + RCV_PARAMS = parse_init_json(data) + logger.debug(RCV_PARAMS) + params = vars(get_params()) + params.update(RCV_PARAMS) + print(RCV_PARAMS) + + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-nested-search-space/requirments.txt b/examples/trials/mnist-nested-search-space/requirments.txt new file mode 100644 index 0000000000000000000000000000000000000000..a77b0fa943315fcd8d7766ef76c5cd75c9405cc7 --- /dev/null +++ b/examples/trials/mnist-nested-search-space/requirments.txt @@ -0,0 +1,3 @@ +tensorflow >= 1.3 +six == 1.11.0 +numpy == 1.13.3 diff --git a/examples/trials/mnist-nested-search-space/sample.json b/examples/trials/mnist-nested-search-space/sample.json new file mode 100644 index 0000000000000000000000000000000000000000..518dfeb05f6e61181d03aac267c8f7a4c0b5175e --- /dev/null +++ b/examples/trials/mnist-nested-search-space/sample.json @@ -0,0 +1,17 @@ +{ + "layer0": { + "_name": "Avg_pool", + "pooling_size": 3 + }, + "layer1": { + "_name": "Conv", + "kernel_size": 2 + }, + "layer2": { + "_name": "Empty" + }, + "layer3": { + "_name": "Conv", + "kernel_size": 5 + } +} \ No newline at end of file diff --git a/examples/trials/mnist-nested-search-space/search_space.json b/examples/trials/mnist-nested-search-space/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..185bc00253ef3aab6c420071ad45e2bb01c46040 --- /dev/null +++ b/examples/trials/mnist-nested-search-space/search_space.json @@ -0,0 +1,114 @@ +{ + "layer0": { + "_type": "choice", + "_value": [{ + "_name": "Empty" + }, + { + "_name": "Conv", + "kernel_size": { + "_type": "choice", + "_value": [1, 2, 3, 5] + } + }, + { + "_name": "Max_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + }, + { + "_name": "Avg_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + } + ] + }, + "layer1": { + "_type": "choice", + "_value": [{ + "_name": "Empty" + }, + { + "_name": "Conv", + "kernel_size": { + "_type": "choice", + "_value": [1, 2, 3, 5] + } + }, + { + "_name": "Max_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + }, + { + "_name": "Avg_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + } + ] + }, + "layer2": { + "_type": "choice", + "_value": [{ + "_name": "Empty" + }, + { + "_name": "Conv", + "kernel_size": { + "_type": "choice", + "_value": [1, 2, 3, 5] + } + }, + { + "_name": "Max_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + }, + { + "_name": "Avg_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + } + ] + }, + "layer3": { + "_type": "choice", + "_value": [{ + "_name": "Empty" + }, + { + "_name": "Conv", + "kernel_size": { + "_type": "choice", + "_value": [1, 2, 3, 5] + } + }, + { + "_name": "Max_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + }, + { + "_name": "Avg_pool", + "pooling_size": { + "_type": "choice", + "_value": [2, 3, 5] + } + } + ] + } +} diff --git a/examples/trials/mnist-pbt-tuner-pytorch/.gitignore b/examples/trials/mnist-pbt-tuner-pytorch/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a9a5aecf429fd8a0d81fbd5fd37006bfa498d5c1 --- /dev/null +++ b/examples/trials/mnist-pbt-tuner-pytorch/.gitignore @@ -0,0 +1 @@ +tmp diff --git a/examples/trials/mnist-pbt-tuner-pytorch/config.yml b/examples/trials/mnist-pbt-tuner-pytorch/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..8d648c88933bc46933c4a382c8c245f33ae5735b --- /dev/null +++ b/examples/trials/mnist-pbt-tuner-pytorch/config.yml @@ -0,0 +1,14 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 1 +trialConcurrency: 3 +maxTrialNumber: 100 +maxExperimentDuration: 2h +tuner: + name: PBTTuner + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/mnist-pbt-tuner-pytorch/config_windows.yml b/examples/trials/mnist-pbt-tuner-pytorch/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..49dbe4594105f1182c3e56bba52421dce9ec642d --- /dev/null +++ b/examples/trials/mnist-pbt-tuner-pytorch/config_windows.yml @@ -0,0 +1,14 @@ +searchSpaceFile: search_space.json +trialCommand: python mnist.py +trialGpuNumber: 1 +trialConcurrency: 3 +maxTrialNumber: 100 +maxExperimentDuration: 2h +tuner: + name: PBTTuner + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/mnist-pbt-tuner-pytorch/mnist.py b/examples/trials/mnist-pbt-tuner-pytorch/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..6c632a864aebbd3ba23f27ffd2186bef18639928 --- /dev/null +++ b/examples/trials/mnist-pbt-tuner-pytorch/mnist.py @@ -0,0 +1,183 @@ +import argparse +import logging + +import os +import nni +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms + + +logger = logging.getLogger('mnist_pbt_tuner_pytorch_AutoML') + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4*4*50, 512) + self.fc2 = nn.Linear(512, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args['log_interval'] == 0: + logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # sum up batch loss + test_loss += F.nll_loss(output, target, reduction='sum').item() + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + accuracy = 100. * correct / len(test_loader.dataset) + + logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), accuracy)) + + return accuracy + + +def save_checkpoint(model, checkpoint_path): + torch.save(model.state_dict(), checkpoint_path) + + +def load_checkpoint(checkpoint_path): + model_state_dict = torch.load(checkpoint_path) + return model_state_dict + + +def main(args): + use_cuda = not args['no_cuda'] and torch.cuda.is_available() + + torch.manual_seed(args['seed']) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} + + data_dir = args['data_dir'] + + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args['batch_size'], shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=1000, shuffle=True, **kwargs) + + model = Net().to(device) + + save_checkpoint_dir = args['save_checkpoint_dir'] + save_checkpoint_path = os.path.join(save_checkpoint_dir, 'model.pth') + load_checkpoint_path = os.path.join(args['load_checkpoint_dir'], 'model.pth') + + if os.path.isfile(load_checkpoint_path): + model_state_dict = load_checkpoint(load_checkpoint_path) + logger.info("test : ", load_checkpoint_path) + logger.info(type(model_state_dict)) + model.load_state_dict(model_state_dict) + + optimizer = optim.SGD(model.parameters(), lr=args['lr'], + momentum=args['momentum']) + + #epoch is perturbation interval + for epoch in range(1, args['epochs'] + 1): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + + if epoch < args['epochs']: + # report intermediate result + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + else: + # report final result + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + if not os.path.exists(save_checkpoint_dir): + os.makedirs(save_checkpoint_dir) + save_checkpoint(model, save_checkpoint_path) + + +def get_params(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument("--data_dir", type=str, + default='./data', help="data directory") + parser.add_argument('--batch_size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--epochs', type=int, default=1, metavar='N', + help='number of epochs to train (default: 1)') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--no_cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--log_interval', type=int, default=1000, metavar='N', + help='how many batches to wait before logging training status') + + parser.add_argument('--save_checkpoint_dir', type=str, + help='where to save checkpoint of this trial') + parser.add_argument('--load_checkpoint_dir', type=str, + help='where to load the model') + + + args, _ = parser.parse_known_args() + return args + + +if __name__ == '__main__': + try: + # get parameters form tuner + tuner_params = nni.get_next_parameter() + logger.debug(tuner_params) + params = vars(get_params()) + params.update(tuner_params) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-pbt-tuner-pytorch/search_space.json b/examples/trials/mnist-pbt-tuner-pytorch/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..978497f8fadf2a22d375a2a46bc32d2af27339e6 --- /dev/null +++ b/examples/trials/mnist-pbt-tuner-pytorch/search_space.json @@ -0,0 +1,5 @@ +{ + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "lr":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"uniform","_value":[0, 1]} +} diff --git a/examples/trials/mnist-pytorch/.gitignore b/examples/trials/mnist-pytorch/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1269488f7fb1f4b56a8c0e5eb48cecbfadfa9219 --- /dev/null +++ b/examples/trials/mnist-pytorch/.gitignore @@ -0,0 +1 @@ +data diff --git a/examples/trials/mnist-pytorch/.nniignore b/examples/trials/mnist-pytorch/.nniignore new file mode 100644 index 0000000000000000000000000000000000000000..b22de6bbe6b35e1ba35c2acaee9ff18e5e8c23ba --- /dev/null +++ b/examples/trials/mnist-pytorch/.nniignore @@ -0,0 +1,4 @@ +# Exclude the following directory when uploading codeDir +data + +# They can also be files \ No newline at end of file diff --git a/examples/trials/mnist-pytorch/config.yml b/examples/trials/mnist-pytorch/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..7fd35c0e9aa06366e9fc7c204bbaa6af58e8322d --- /dev/null +++ b/examples/trials/mnist-pytorch/config.yml @@ -0,0 +1,14 @@ +# This is the minimal config file for an NNI experiment. +# Use "nnictl create --config config.yml" to launch this experiment. +# Afterwards, you can check "config_detailed.yml" for more explanation. + +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py # NOTE: change "python3" to "python" if you are using Windows +trialGpuNumber: 0 +trialConcurrency: 1 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local diff --git a/examples/trials/mnist-pytorch/config_adl.yml b/examples/trials/mnist-pytorch/config_adl.yml new file mode 100644 index 0000000000000000000000000000000000000000..feacff8d93abd0a48551cddc947d8184b8b30def --- /dev/null +++ b/examples/trials/mnist-pytorch/config_adl.yml @@ -0,0 +1,28 @@ +authorName: default +experimentName: example_mnist_pytorch +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +nniManagerIp: {replace_with_your_ip} +logCollection: http +trainingServicePlatform: adl + +searchSpacePath: search_space.json +useAnnotation: false +tuner: + builtinTunerName: TPE + classArgs: + optimize_mode: maximize + +trial: + # the user needs to have a docker image built by the adl.Dockerfile + # the docker image should be pushed to a registry for the cluster to pull + # in our example we provide a docker image from our public docker hub + image: petuum/train-demo:mnist-pytorch-adl + # optional: + # the user needs to provide the secret if the image is pulled from a private registry + # imagePullSecrets: + # - name: {secret} + command: python3 mnist.py + codeDir: . + gpuNum: 1 diff --git a/examples/trials/mnist-pytorch/config_aml.yml b/examples/trials/mnist-pytorch/config_aml.yml new file mode 100644 index 0000000000000000000000000000000000000000..3c87d38123b1746e54bb803b3bcf5e4ee1dd7988 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_aml.yml @@ -0,0 +1,15 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: aml + dockerImage: msranni/nni + subscriptionId: ${your subscription ID} + resourceGroup: ${your resource group} + workspaceName: ${your workspace name} + computeTarget: ${your compute target} diff --git a/examples/trials/mnist-pytorch/config_assessor.yml b/examples/trials/mnist-pytorch/config_assessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..dd885ba4210ae6a2aa302b802e480a76029276a0 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_assessor.yml @@ -0,0 +1,16 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py # NOTE: change "python3" to "python" if you are using Windows +trialGpuNumber: 0 +trialConcurrency: 4 +maxTrialNumber: 20 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +assessor: + name: Curvefitting + classArgs: + epoch_num: 20 + threshold: 0.9 +trainingService: + platform: local diff --git a/examples/trials/mnist-pytorch/config_detailed.yml b/examples/trials/mnist-pytorch/config_detailed.yml new file mode 100644 index 0000000000000000000000000000000000000000..d85fb48f76243033187fdaf498810846947530cb --- /dev/null +++ b/examples/trials/mnist-pytorch/config_detailed.yml @@ -0,0 +1,44 @@ +# This example shows more configurable fields comparing to the minimal "config.yml" +# You can use "nnictl create --config config_detailed.yml" to launch this experiment. +# If you see an error message saying "port 8080 is used", use "nnictl stop --all" to stop previous experiments. + +experimentName: MNIST # An optional name to help you distinguish experiments. + +# Hyper-parameter search space can either be configured here or in a seperate file. +# "config.yml" shows how to specify a seperate search space file. +# The common schema of search space is documented here: +# https://nni.readthedocs.io/en/stable/Tutorial/SearchSpaceSpec.html +searchSpace: + batch_size: + _type: choice + _value: [16, 32, 64, 128] + hidden_size: + _type: choice + _value: [128, 256, 512, 1024] + lr: + _type: choice + _value: [0.0001, 0.001, 0.01, 0.1] + momentum: + _type: uniform + _value: [0, 1] + +trialCommand: python3 mnist.py # The command to launch a trial. NOTE: change "python3" to "python" if you are using Windows. +trialCodeDirectory: . # The path of trial code. By default it's ".", which means the same directory of this config file. +trialGpuNumber: 1 # How many GPUs should each trial use. CUDA is required when it's greater than zero. + +trialConcurrency: 4 # Run 4 trials concurrently. +maxTrialNumber: 10 # Generate at most 10 trials. +maxExperimentDuration: 1h # Stop generating trials after 1 hour. + +tuner: # Configure the tuning algorithm. + name: TPE # Supported algorithms: TPE, Random, Anneal, Evolution, GridSearch, GPTuner, PBTTuner, etc. + # Full list: https://nni.readthedocs.io/en/latest/Tuner/BuiltinTuner.html + classArgs: # Algorithm specific arguments. See the tuner's doc for details. + optimize_mode: maximize # "minimize" or "maximize" + +# Configure the training platform. +# Supported platforms: local, remote, openpai, aml, kubeflow, kubernetes, adl. +trainingService: + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Reason and details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/mnist-pytorch/config_dlc.yml b/examples/trials/mnist-pytorch/config_dlc.yml new file mode 100644 index 0000000000000000000000000000000000000000..d4372acad48e7136733c0b4c0dd902f82e00bfe7 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_dlc.yml @@ -0,0 +1,25 @@ +# working directory on DSW, please provie FULL path +searchSpaceFile: search_space.json +# the command on trial runner(or, DLC container), be aware of data_dir +trialCommand: python mnist.py --data_dir /root/data/{your_data_dir} +trialConcurrency: 1 # NOTE: please provide number <= 3 due to DLC system limit. +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +# ref: https://help.aliyun.com/document_detail/203290.html?spm=a2c4g.11186623.6.727.6f9b5db6bzJh4x +trainingService: + platform: dlc + type: Worker + image: registry-vpc.cn-beijing.aliyuncs.com/pai-dlc/pytorch-training:1.6.0-gpu-py37-cu101-ubuntu18.04 + jobType: PyTorchJob # choices: [TFJob, PyTorchJob] + podCount: 1 + ecsSpec: ecs.c6.large + region: cn-hangzhou + nasDataSourceId: ${your_nas_data_source_id} + accessKeyId: ${your_ak_id} + accessKeySecret: ${your_ak_key} + nasDataSourceId: ${your_nas_data_source_id} # NAS datasource ID,e.g., datat56by9n1xt0a + localStorageMountPoint: /home/admin/workspace/ # default NAS path on DSW, MUST provide full path. + containerStorageMountPoint: /root/data/ # default NAS path on DLC container, change it according your setting diff --git a/examples/trials/mnist-pytorch/config_frameworkcontroller.yml b/examples/trials/mnist-pytorch/config_frameworkcontroller.yml new file mode 100644 index 0000000000000000000000000000000000000000..6047f4daf67039a20c5ab2de94d79315fb04a537 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_frameworkcontroller.yml @@ -0,0 +1,40 @@ +authorName: default +experimentName: example_mnist_pytorch +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: frameworkcontroller +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: . + taskRoles: + - name: worker + taskNum: 1 + command: python3 mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceededTaskCount: 1 +frameworkcontrollerConfig: + storage: nfs + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} \ No newline at end of file diff --git a/examples/trials/mnist-pytorch/config_frameworkcontroller_custom.yml b/examples/trials/mnist-pytorch/config_frameworkcontroller_custom.yml new file mode 100644 index 0000000000000000000000000000000000000000..91498db79c337e23611019e2bdfb9d6079d8a54e --- /dev/null +++ b/examples/trials/mnist-pytorch/config_frameworkcontroller_custom.yml @@ -0,0 +1,29 @@ +authorName: default +experimentName: example_mnist_pytorch +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +logLevel: trace +#choice: local, remote, pai, kubeflow +trainingServicePlatform: frameworkcontroller +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: . +frameworkcontrollerConfig: + configPath: fc_template.yml + storage: pvc + namespace: "default" + pvc: + path: "/tmp/mount" diff --git a/examples/trials/mnist-pytorch/config_hybrid.yml b/examples/trials/mnist-pytorch/config_hybrid.yml new file mode 100644 index 0000000000000000000000000000000000000000..1ae3a2cc3fcca51a6cc9a0fe9f04b8da1418f785 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_hybrid.yml @@ -0,0 +1,23 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 5 +maxTrialNumber: 20 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +# For local, remote, openpai, and aml, NNI can use multiple training services at one time +trainingService: + - platform: local + - platform: remote + machineList: + - host: ${your server's IP or domain name} + user: ${your user name} + ssh_key_file: ~/.ssh/id_rsa + - platform: aml + dockerImage: msranni/nni + subscriptionId: ${your subscription ID} + resourceGroup: ${your resource group} + workspaceName: ${your workspace name} + computeTarget: ${your compute target} diff --git a/examples/trials/mnist-pytorch/config_kubeflow.yml b/examples/trials/mnist-pytorch/config_kubeflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..c0cef8889ee54ff7e3fdfa82e42bd898d4bcc625 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_kubeflow.yml @@ -0,0 +1,32 @@ +authorName: default +experimentName: example_dist_pytorch +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 1 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: kubeflow +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + codeDir: . + worker: + replicas: 1 + command: python3 mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest +kubeflowConfig: + operator: tf-operator + apiVersion: v1alpha2 + storage: nfs + nfs: + server: 10.10.10.10 + path: /var/nfs/general \ No newline at end of file diff --git a/examples/trials/mnist-pytorch/config_openpai.yml b/examples/trials/mnist-pytorch/config_openpai.yml new file mode 100644 index 0000000000000000000000000000000000000000..01f90ecfaed2c4d31f15e15e4588197add64fd54 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_openpai.yml @@ -0,0 +1,20 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: openpai + host: http://123.123.123.123 + username: ${your user name} + token: ${your token} + dockerImage: msranni/nni + trialCpuNumber: 1 + trialMemorySize: 8GB + storageConfigName: ${your storage config name} + localStorageMountPoint: ${NFS mount point on local machine} + containerStorageMountPoint: ${NFS mount point inside Docker container} diff --git a/examples/trials/mnist-pytorch/config_remote.yml b/examples/trials/mnist-pytorch/config_remote.yml new file mode 100644 index 0000000000000000000000000000000000000000..42a8546848676f48e4e20077f9ea04f2707fc5cf --- /dev/null +++ b/examples/trials/mnist-pytorch/config_remote.yml @@ -0,0 +1,24 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 4 +maxTrialNumber: 20 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: remote + machineList: + - host: ${your server's IP or domain name} + user: ${your user name} + ssh_key_file: ~/.ssh/id_rsa # We recommend public key over password, it's more secure and convenient. + # You can specify more than one SSH servers: + - host: 123.123.123.123 + port: 10022 + user: nniuser + password: 12345 + pythonPath: /usr/bin # Other examples: + # /opt/python3.9/bin + # C:/Python39 + # C:/Users/USERNAME/.conda/envs/ENVNAME;C:/Users/USERNAME/.conda/envs/ENVNAME/Scripts;C:/Users/USERNAME/.conda/envs/ENVNAME/Library/bin diff --git a/examples/trials/mnist-pytorch/config_tensorboard.yml b/examples/trials/mnist-pytorch/config_tensorboard.yml new file mode 100644 index 0000000000000000000000000000000000000000..9c8839bca2341d65653a3ba41c50152a4c3be138 --- /dev/null +++ b/examples/trials/mnist-pytorch/config_tensorboard.yml @@ -0,0 +1,11 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist_tensorboard.py # NOTE: change "python3" to "python" if you are using Windows +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local diff --git a/examples/trials/mnist-pytorch/config_windows.yml b/examples/trials/mnist-pytorch/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..e1bdf156b2fde552dccad12e3175d753c9b656cf --- /dev/null +++ b/examples/trials/mnist-pytorch/config_windows.yml @@ -0,0 +1,10 @@ +searchSpaceFile: search_space.json +trialCommand: python mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local diff --git a/examples/trials/mnist-pytorch/fc_template.yml b/examples/trials/mnist-pytorch/fc_template.yml new file mode 100644 index 0000000000000000000000000000000000000000..c25b81714b0289d6c29aed76f531ba5e75a52dc5 --- /dev/null +++ b/examples/trials/mnist-pytorch/fc_template.yml @@ -0,0 +1,49 @@ +apiVersion: frameworkcontroller.microsoft.com/v1 +kind: Framework +metadata: + name: pytorchcpu + namespace: default +spec: + executionType: Start + retryPolicy: + fancyRetryPolicy: true + maxRetryCount: 2 + taskRoles: + - name: worker + taskNumber: 1 + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceededTaskCount: 3 + task: + retryPolicy: + fancyRetryPolicy: false + maxRetryCount: 0 + podGracefulDeletionTimeoutSec: 1800 + pod: + spec: + restartPolicy: Never + hostNetwork: false + containers: + - name: mnist-pytorch + image: msranni/nni:latest + command: ["python", "mnist.py"] + ports: + - containerPort: 5001 + volumeMounts: + - name: frameworkbarrier-volume + mountPath: /mnt/frameworkbarrier + - name: data-volume + mountPath: /tmp/mount + serviceAccountName: frameworkbarrier + initContainers: + - name: frameworkbarrier + image: frameworkcontroller/frameworkbarrier + volumeMounts: + - name: frameworkbarrier-volume + mountPath: /mnt/frameworkbarrier + volumes: + - name: frameworkbarrier-volume + emptyDir: {} + - name: data-volume + persistentVolumeClaim: + claimName: nni-storage diff --git a/examples/trials/mnist-pytorch/mnist.py b/examples/trials/mnist-pytorch/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..a7ca27816a46936d9e46283d07efebf1dddaa018 --- /dev/null +++ b/examples/trials/mnist-pytorch/mnist.py @@ -0,0 +1,166 @@ +""" +A deep MNIST classifier using convolutional layers. + +This file is a modification of the official pytorch mnist example: +https://github.com/pytorch/examples/blob/master/mnist/main.py +""" + +import os +import argparse +import logging +import nni +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from nni.utils import merge_parameter +from torchvision import datasets, transforms + +logger = logging.getLogger('mnist_AutoML') + + +class Net(nn.Module): + def __init__(self, hidden_size): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4*4*50, hidden_size) + self.fc2 = nn.Linear(hidden_size, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + if (args['batch_num'] is not None) and batch_idx >= args['batch_num']: + break + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args['log_interval'] == 0: + logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # sum up batch loss + test_loss += F.nll_loss(output, target, reduction='sum').item() + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + accuracy = 100. * correct / len(test_loader.dataset) + + logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), accuracy)) + + return accuracy + + +def main(args): + use_cuda = not args['no_cuda'] and torch.cuda.is_available() + + torch.manual_seed(args['seed']) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} + + data_dir = args['data_dir'] + + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args['batch_size'], shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=1000, shuffle=True, **kwargs) + + hidden_size = args['hidden_size'] + + model = Net(hidden_size=hidden_size).to(device) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], + momentum=args['momentum']) + + for epoch in range(1, args['epochs'] + 1): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + + # report intermediate result + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + # report final result + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + +def get_params(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument("--data_dir", type=str, + default='./data', help="data directory") + parser.add_argument('--batch_size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument("--batch_num", type=int, default=None) + parser.add_argument("--hidden_size", type=int, default=512, metavar='N', + help='hidden layer size (default: 512)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--no_cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--log_interval', type=int, default=1000, metavar='N', + help='how many batches to wait before logging training status') + + + args, _ = parser.parse_known_args() + return args + + +if __name__ == '__main__': + try: + # get parameters form tuner + tuner_params = nni.get_next_parameter() + logger.debug(tuner_params) + params = vars(merge_parameter(get_params(), tuner_params)) + print(params) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-pytorch/mnist_tensorboard.py b/examples/trials/mnist-pytorch/mnist_tensorboard.py new file mode 100644 index 0000000000000000000000000000000000000000..3aa373a75d4c3c831381c052a0cc42d2b0da9d77 --- /dev/null +++ b/examples/trials/mnist-pytorch/mnist_tensorboard.py @@ -0,0 +1,173 @@ +""" +A deep MNIST classifier using convolutional layers. + +This file is a modification of the official pytorch mnist example: +https://github.com/pytorch/examples/blob/master/mnist/main.py +""" + +import os +import argparse +import logging +import nni +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.utils.tensorboard import SummaryWriter +from nni.utils import merge_parameter +from torchvision import datasets, transforms + + +logger = logging.getLogger('mnist_AutoML') + +writer = SummaryWriter(log_dir=os.path.join(os.environ['NNI_OUTPUT_DIR'], 'tensorboard')) + +class Net(nn.Module): + def __init__(self, hidden_size): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4*4*50, hidden_size) + self.fc2 = nn.Linear(hidden_size, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + if (args['batch_num'] is not None) and batch_idx >= args['batch_num']: + break + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + writer.add_scalar('Loss/train', loss, epoch) + loss.backward() + optimizer.step() + if batch_idx % args['log_interval'] == 0: + logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # sum up batch loss + test_loss += F.nll_loss(output, target, reduction='sum').item() + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + accuracy = 100. * correct / len(test_loader.dataset) + + logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), accuracy)) + + return accuracy + + +def main(args): + use_cuda = not args['no_cuda'] and torch.cuda.is_available() + + torch.manual_seed(args['seed']) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} + + data_dir = args['data_dir'] + + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args['batch_size'], shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=1000, shuffle=True, **kwargs) + + hidden_size = args['hidden_size'] + + model = Net(hidden_size=hidden_size).to(device) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], + momentum=args['momentum']) + + for epoch in range(1, args['epochs'] + 1): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + writer.add_scalar('Accuracy/test', test_acc, epoch) + + # report intermediate result + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + writer.close() + + # report final result + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + +def get_params(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument("--data_dir", type=str, + default='./data', help="data directory") + parser.add_argument('--batch_size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument("--batch_num", type=int, default=None) + parser.add_argument("--hidden_size", type=int, default=512, metavar='N', + help='hidden layer size (default: 512)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--no_cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--log_interval', type=int, default=1000, metavar='N', + help='how many batches to wait before logging training status') + + + args, _ = parser.parse_known_args() + return args + + +if __name__ == '__main__': + try: + # get parameters form tuner + tuner_params = nni.get_next_parameter() + logger.debug(tuner_params) + params = vars(merge_parameter(get_params(), tuner_params)) + print(params) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-pytorch/requirements.txt b/examples/trials/mnist-pytorch/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..01f6b72556cf7029846f160e5d5743849a5ca244 --- /dev/null +++ b/examples/trials/mnist-pytorch/requirements.txt @@ -0,0 +1,2 @@ +torch +torchvision diff --git a/examples/trials/mnist-pytorch/search_space.json b/examples/trials/mnist-pytorch/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c26cdce369fa13e3fdf7c34f10b9cd89a6fc931e --- /dev/null +++ b/examples/trials/mnist-pytorch/search_space.json @@ -0,0 +1,6 @@ +{ + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "hidden_size":{"_type":"choice","_value":[128, 256, 512, 1024]}, + "lr":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"uniform","_value":[0, 1]} +} diff --git a/examples/trials/mnist-sharedstorage/config_azureblob.yml b/examples/trials/mnist-sharedstorage/config_azureblob.yml new file mode 100644 index 0000000000000000000000000000000000000000..36baddd248e0956920885091f71251b51aec66df --- /dev/null +++ b/examples/trials/mnist-sharedstorage/config_azureblob.yml @@ -0,0 +1,39 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: remote + machineList: + - host: ${your server's IP or domain name} + user: ${your user name} + ssh_key_file: ~/.ssh/id_rsa # We recommend public key over password, it's more secure and convenient. + # You can specify more than one SSH servers: + - host: 123.123.123.123 + port: 10022 + user: nniuser + password: 12345 + pythonPath: /usr/bin # Other examples: + # /opt/python3.9/bin + # C:/Python39 + # C:/Users/USERNAME/.conda/envs/ENVNAME;C:/Users/USERNAME/.conda/envs/ENVNAME/Scripts;C:/Users/USERNAME/.conda/envs/ENVNAME/Library/bin +sharedStorage: + storageType: AzureBlob + # please set localMountPoint as absolute path and localMountPoint should outside the code directory + # because nni will copy user code to localMountPoint + localMountPoint: ${your/local/mount/point} + # remoteMountPoint is the mount point on training service machine, it can be set as both absolute path and relative path + # make sure you have `sudo` permission without password on training service machine + remoteMountPoint: ${your/remote/mount/point} + storageAccountName: ${replace_to_your_storageAccountName} + storageAccountKey: ${replace_to_your_storageAccountKey} + containerName: ${replace_to_your_containerName} + # usermount means you have already mount this storage on localMountPoint + # nnimount means nni will try to mount this storage on localMountPoint + # nomount means storage will not mount in local machine, will support partial storages in the future + localMounted: nnimount \ No newline at end of file diff --git a/examples/trials/mnist-sharedstorage/config_nfs.yml b/examples/trials/mnist-sharedstorage/config_nfs.yml new file mode 100644 index 0000000000000000000000000000000000000000..17139829838330cc6c005a2b2bbdf3dc65162cf4 --- /dev/null +++ b/examples/trials/mnist-sharedstorage/config_nfs.yml @@ -0,0 +1,34 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +maxTrialNumber: 10 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: remote + machineList: + - host: ${your server's IP or domain name} + user: ${your user name} + ssh_key_file: ~/.ssh/id_rsa # We recommend public key over password, it's more secure and convenient. + # You can specify more than one SSH servers: + - host: 123.123.123.123 + port: 10022 + user: nniuser + password: 12345 + pythonPath: /usr/bin # Other examples: + # /opt/python3.9/bin + # C:/Python39 + # C:/Users/USERNAME/.conda/envs/ENVNAME;C:/Users/USERNAME/.conda/envs/ENVNAME/Scripts;C:/Users/USERNAME/.conda/envs/ENVNAME/Library/bin +sharedStorage: + storageType: NFS + localMountPoint: ${your/local/mount/point} + remoteMountPoint: ${your/remote/mount/point} + nfsServer: ${nfs-server-ip} + exportedDirectory: ${nfs/exported/directory} + # usermount means you have already mount this storage on localMountPoint + # nnimount means nni will try to mount this storage on localMountPoint + # nomount means storage will not mount in local machine, will support partial storages in the future + localMounted: nnimount diff --git a/examples/trials/mnist-sharedstorage/mnist.py b/examples/trials/mnist-sharedstorage/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..a7ca27816a46936d9e46283d07efebf1dddaa018 --- /dev/null +++ b/examples/trials/mnist-sharedstorage/mnist.py @@ -0,0 +1,166 @@ +""" +A deep MNIST classifier using convolutional layers. + +This file is a modification of the official pytorch mnist example: +https://github.com/pytorch/examples/blob/master/mnist/main.py +""" + +import os +import argparse +import logging +import nni +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from nni.utils import merge_parameter +from torchvision import datasets, transforms + +logger = logging.getLogger('mnist_AutoML') + + +class Net(nn.Module): + def __init__(self, hidden_size): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4*4*50, hidden_size) + self.fc2 = nn.Linear(hidden_size, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + if (args['batch_num'] is not None) and batch_idx >= args['batch_num']: + break + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args['log_interval'] == 0: + logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # sum up batch loss + test_loss += F.nll_loss(output, target, reduction='sum').item() + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + accuracy = 100. * correct / len(test_loader.dataset) + + logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), accuracy)) + + return accuracy + + +def main(args): + use_cuda = not args['no_cuda'] and torch.cuda.is_available() + + torch.manual_seed(args['seed']) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} + + data_dir = args['data_dir'] + + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args['batch_size'], shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=1000, shuffle=True, **kwargs) + + hidden_size = args['hidden_size'] + + model = Net(hidden_size=hidden_size).to(device) + optimizer = optim.SGD(model.parameters(), lr=args['lr'], + momentum=args['momentum']) + + for epoch in range(1, args['epochs'] + 1): + train(args, model, device, train_loader, optimizer, epoch) + test_acc = test(args, model, device, test_loader) + + # report intermediate result + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + # report final result + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + +def get_params(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument("--data_dir", type=str, + default='./data', help="data directory") + parser.add_argument('--batch_size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument("--batch_num", type=int, default=None) + parser.add_argument("--hidden_size", type=int, default=512, metavar='N', + help='hidden layer size (default: 512)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--no_cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--log_interval', type=int, default=1000, metavar='N', + help='how many batches to wait before logging training status') + + + args, _ = parser.parse_known_args() + return args + + +if __name__ == '__main__': + try: + # get parameters form tuner + tuner_params = nni.get_next_parameter() + logger.debug(tuner_params) + params = vars(merge_parameter(get_params(), tuner_params)) + print(params) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-sharedstorage/requirements.txt b/examples/trials/mnist-sharedstorage/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..01f6b72556cf7029846f160e5d5743849a5ca244 --- /dev/null +++ b/examples/trials/mnist-sharedstorage/requirements.txt @@ -0,0 +1,2 @@ +torch +torchvision diff --git a/examples/trials/mnist-sharedstorage/search_space.json b/examples/trials/mnist-sharedstorage/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c26cdce369fa13e3fdf7c34f10b9cd89a6fc931e --- /dev/null +++ b/examples/trials/mnist-sharedstorage/search_space.json @@ -0,0 +1,6 @@ +{ + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "hidden_size":{"_type":"choice","_value":[128, 256, 512, 1024]}, + "lr":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"uniform","_value":[0, 1]} +} diff --git a/examples/trials/mnist-tfv1/.config.yml.swp b/examples/trials/mnist-tfv1/.config.yml.swp new file mode 100644 index 0000000000000000000000000000000000000000..01ea1aa4c5932418915f144ffbb0d84f6e462152 Binary files /dev/null and b/examples/trials/mnist-tfv1/.config.yml.swp differ diff --git a/examples/trials/mnist-tfv1/.nniignore b/examples/trials/mnist-tfv1/.nniignore new file mode 100644 index 0000000000000000000000000000000000000000..04886e1d800a15efa04346bf9c34061a2fed87a4 --- /dev/null +++ b/examples/trials/mnist-tfv1/.nniignore @@ -0,0 +1,7 @@ +# Exclude the following directories when uploading codeDir. +data +logs +checkpoints + +# They can also be files +outputs.log diff --git a/examples/trials/mnist-tfv1/config.yml b/examples/trials/mnist-tfv1/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..878384509993757adbbcf141516d03a1044c83f1 --- /dev/null +++ b/examples/trials/mnist-tfv1/config.yml @@ -0,0 +1,23 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 2h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 mnist.py + codeDir: . + gpuNum: 1 +localConfig: + useActiveGpu: true diff --git a/examples/trials/mnist-tfv1/config_aml.yml b/examples/trials/mnist-tfv1/config_aml.yml new file mode 100644 index 0000000000000000000000000000000000000000..cfc5fcaaf535f212bc6e8d4513d77890bf425074 --- /dev/null +++ b/examples/trials/mnist-tfv1/config_aml.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +trainingServicePlatform: aml +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 mnist.py + codeDir: . + image: msranni/nni +amlConfig: + subscriptionId: ${replace_to_your_subscriptionId} + resourceGroup: ${replace_to_your_resourceGroup} + workspaceName: ${replace_to_your_workspaceName} + computeTarget: ${replace_to_your_computeTarget} diff --git a/examples/trials/mnist-tfv1/config_assessor.yml b/examples/trials/mnist-tfv1/config_assessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..ed627a0897aace29a103383275a422c2e7ec1d72 --- /dev/null +++ b/examples/trials/mnist-tfv1/config_assessor.yml @@ -0,0 +1,27 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 50 +#choice: local, remote +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +assessor: + #choice: Medianstop, Curvefitting + builtinAssessorName: Curvefitting + classArgs: + epoch_num: 20 + threshold: 0.9 +trial: + command: python3 mnist.py + codeDir: . + gpuNum: 0 diff --git a/examples/trials/mnist-tfv1/config_dlts.yml b/examples/trials/mnist-tfv1/config_dlts.yml new file mode 100644 index 0000000000000000000000000000000000000000..e4de6782bc1c75e810f0bea208dbefd003148e68 --- /dev/null +++ b/examples/trials/mnist-tfv1/config_dlts.yml @@ -0,0 +1,34 @@ +debug: true +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: dlts +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 mnist.py + codeDir: . + gpuNum: 1 + #The docker image to run nni job on dlts + image: msranni/nni:latest +dltsConfig: + dashboard: http://azure-eastus-p40-dev1-infra01.eastus.cloudapp.azure.com/ + + # The following fields are all optional and could be retrieved from environment + # variables if running in DLTS job container. + + # cluster: .default + # team: platform + # email: example@microsoft.com + # password: # Paste from DLTS dashboard diff --git a/examples/trials/mnist-tfv1/config_frameworkcontroller.yml b/examples/trials/mnist-tfv1/config_frameworkcontroller.yml new file mode 100644 index 0000000000000000000000000000000000000000..63cdb34585fe51a70755355c5afe15a49ce0451b --- /dev/null +++ b/examples/trials/mnist-tfv1/config_frameworkcontroller.yml @@ -0,0 +1,40 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: frameworkcontroller +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: . + taskRoles: + - name: worker + taskNum: 1 + command: python3 mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceededTaskCount: 1 +frameworkcontrollerConfig: + storage: nfs + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} \ No newline at end of file diff --git a/examples/trials/mnist-tfv1/config_hybrid.yml b/examples/trials/mnist-tfv1/config_hybrid.yml new file mode 100644 index 0000000000000000000000000000000000000000..ec0dbdf29532ee2345cfeef2d792cbb88fb30484 --- /dev/null +++ b/examples/trials/mnist-tfv1/config_hybrid.yml @@ -0,0 +1,31 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 3 +maxExecDuration: 1h +maxTrialNum: 10 +trainingServicePlatform: hybrid +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 mnist.py + codeDir: . + gpuNum: 0 +hybridConfig: + trainingServicePlatforms: + - local + - remote +remoteConfig: + reuse: true +machineList: + - ip: 10.1.1.1 + username: xxx + passwd: xxx + port: 22 diff --git a/examples/trials/mnist-tfv1/config_hybrid_v2.yml b/examples/trials/mnist-tfv1/config_hybrid_v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..084b0062837116514a01310131c26a4593655e59 --- /dev/null +++ b/examples/trials/mnist-tfv1/config_hybrid_v2.yml @@ -0,0 +1,24 @@ +experimentName: example_mnist +trialConcurrency: 3 +maxExperimentDuration: 1h +maxTrialNumber: 10 +searchSpaceFile: search_space.json + +trialCodeDirectory: . +trialCommand: python3 mnist.py +trialGpuNumber: 0 +tuner: + name: TPE + classArgs: + optimize_mode: maximize + +trainingService: + - platform: local + - platform: remote + reuseMode: true + machineList: + - host: 10.1.1.1 + user: xxx + password: xxx + #port can be skip if using default ssh port 22 + port: 22 diff --git a/examples/trials/mnist-tfv1/config_kubeflow.yml b/examples/trials/mnist-tfv1/config_kubeflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..f460b37cb61aa200db5ee2412d391f279aa90631 --- /dev/null +++ b/examples/trials/mnist-tfv1/config_kubeflow.yml @@ -0,0 +1,32 @@ +authorName: default +experimentName: example_dist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 1 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: kubeflow +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + codeDir: . + worker: + replicas: 1 + command: python3 mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest +kubeflowConfig: + operator: tf-operator + apiVersion: v1alpha2 + storage: nfs + nfs: + server: 10.10.10.10 + path: /var/nfs/general \ No newline at end of file diff --git a/examples/trials/mnist-tfv1/config_pai.yml b/examples/trials/mnist-tfv1/config_pai.yml new file mode 100644 index 0000000000000000000000000000000000000000..d2e5a200253ad46a408bd5b091e2d8e08d38be6f --- /dev/null +++ b/examples/trials/mnist-tfv1/config_pai.yml @@ -0,0 +1,35 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: pai +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 mnist.py + codeDir: . + gpuNum: 0 + cpuNum: 1 + memoryMB: 8196 + #The docker image to run nni job on pai + image: msranni/nni:latest + nniManagerNFSMountPath: {replace_to_your_nfs_mount_path} + containerNFSMountPath: {replace_to_your_container_mount_path} + paiStorageConfigName: {replace_to_your_storage_config_name} +paiConfig: + #The username to login pai + userName: username + #The token to login pai + token: token + #The host of restful server of pai + host: 10.10.10.10 \ No newline at end of file diff --git a/examples/trials/mnist-tfv1/config_windows.yml b/examples/trials/mnist-tfv1/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..2cecf8c76e6177e6508e2dd2f3ad2cb12d56ed68 --- /dev/null +++ b/examples/trials/mnist-tfv1/config_windows.yml @@ -0,0 +1,21 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python mnist.py + codeDir: . + gpuNum: 0 diff --git a/examples/trials/mnist-tfv1/launch_hybrid.py b/examples/trials/mnist-tfv1/launch_hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..16660a279c8a3075923f97381e2dcb96a3e56002 --- /dev/null +++ b/examples/trials/mnist-tfv1/launch_hybrid.py @@ -0,0 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Example showing how to create experiment with Python code. +""" + +from pathlib import Path + +from nni.experiment import Experiment, RemoteMachineConfig + +search_space = { + "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] }, + "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] }, + "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] }, + "batch_size": { "_type": "choice", "_value": [16, 32] }, + "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] } +} + +experiment = Experiment(['local', 'remote']) +experiment.config.experiment_name = 'test' +experiment.config.trial_concurrency = 3 +experiment.config.max_trial_number = 10 +experiment.config.search_space = search_space +experiment.config.trial_command = 'python3 mnist.py' +experiment.config.trial_code_directory = Path(__file__).parent +experiment.config.tuner.name = 'TPE' +experiment.config.tuner.class_args['optimize_mode'] = 'maximize' +experiment.config.training_service[0].use_active_gpu = True +experiment.config.training_service[1].reuse_mode = True +rm_conf = RemoteMachineConfig() +rm_conf.host = '10.1.1.1' +rm_conf.user = 'xxx' +rm_conf.password = 'xxx' +rm_conf.port = 22 +experiment.config.training_service[1].machine_list = [rm_conf] + +experiment.run(26780, debug=True) diff --git a/examples/trials/mnist-tfv1/mnist.py b/examples/trials/mnist-tfv1/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..9f5db70fca1199a715ea7e5c6d1dd3ac84ac7016 --- /dev/null +++ b/examples/trials/mnist-tfv1/mnist.py @@ -0,0 +1,238 @@ +"""A deep MNIST classifier using convolutional layers.""" + +import argparse +import logging +import math +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +import nni + +FLAGS = None + +logger = logging.getLogger('mnist_AutoML') + + +class MnistNetwork(object): + ''' + MnistNetwork is for initializing and building basic network for mnist. + ''' + def __init__(self, + channel_1_num, + channel_2_num, + conv_size, + hidden_size, + pool_size, + learning_rate, + x_dim=784, + y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + self.conv_size = conv_size + self.hidden_size = hidden_size + self.pool_size = pool_size + self.learning_rate = learning_rate + self.x_dim = x_dim + self.y_dim = y_dim + + self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') + self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + + self.train_step = None + self.accuracy = None + + def build_network(self): + ''' + Building network for mnist + ''' + + # Reshape to use within a convolutional neural net. + # Last dimension is for "features" - there is only one here, since images are + # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + print( + 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) + raise + x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) + + # First convolutional layer - maps one grayscale image to 32 feature maps. + with tf.name_scope('conv1'): + w_conv1 = weight_variable( + [self.conv_size, self.conv_size, 1, self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) + + # Pooling layer - downsamples by 2X. + with tf.name_scope('pool1'): + h_pool1 = max_pool(h_conv1, self.pool_size) + + # Second convolutional layer -- maps 32 feature maps to 64. + with tf.name_scope('conv2'): + w_conv2 = weight_variable([self.conv_size, self.conv_size, + self.channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) + + # Second pooling layer. + with tf.name_scope('pool2'): + h_pool2 = max_pool(h_conv2, self.pool_size) + + # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image + # is down to 7x7x64 feature maps -- maps this to 1024 features. + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + w_fc1 = weight_variable( + [last_dim * last_dim * self.channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + + h_pool2_flat = tf.reshape( + h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) + + # Dropout - controls the complexity of the model, prevents co-adaptation of features. + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + + # Map the 1024 features to 10 classes, one for each digit + with tf.name_scope('fc2'): + w_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 + + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer( + self.learning_rate).minimize(cross_entropy) + + with tf.name_scope('accuracy'): + correct_prediction = tf.equal( + tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) + self.accuracy = tf.reduce_mean( + tf.cast(correct_prediction, tf.float32)) + + +def conv2d(x_input, w_matrix): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') + + +def max_pool(x_input, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + ''' + Main function, build mnist network, run and send result to NNI. + ''' + # Import data + mnist = download_mnist_retry(params['data_dir']) + print('Mnist download data done.') + logger.debug('Mnist download data done.') + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], + channel_2_num=params['channel_2_num'], + conv_size=params['conv_size'], + hidden_size=params['hidden_size'], + pool_size=params['pool_size'], + learning_rate=params['learning_rate']) + mnist_network.build_network() + logger.debug('Mnist build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + for i in range(params['batch_num']): + batch = mnist.train.next_batch(params['batch_size']) + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: 1 - params['dropout_rate']} + ) + + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + +def get_params(): + ''' Get parameters from command line ''' + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory") + parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate") + parser.add_argument("--channel_1_num", type=int, default=32) + parser.add_argument("--channel_2_num", type=int, default=64) + parser.add_argument("--conv_size", type=int, default=5) + parser.add_argument("--pool_size", type=int, default=2) + parser.add_argument("--hidden_size", type=int, default=1024) + parser.add_argument("--learning_rate", type=float, default=1e-4) + parser.add_argument("--batch_num", type=int, default=2000) + parser.add_argument("--batch_size", type=int, default=32) + + args, _ = parser.parse_known_args() + return args + +if __name__ == '__main__': + try: + # get parameters form tuner + tuner_params = nni.get_next_parameter() + logger.debug(tuner_params) + params = vars(get_params()) + params.update(tuner_params) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-tfv1/mnist_before.py b/examples/trials/mnist-tfv1/mnist_before.py new file mode 100644 index 0000000000000000000000000000000000000000..563ea25831e5574410d1fe7bbeaa5369ba51b7e4 --- /dev/null +++ b/examples/trials/mnist-tfv1/mnist_before.py @@ -0,0 +1,232 @@ +"""A deep MNIST classifier using convolutional layers.""" +import argparse +import logging +import math +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +FLAGS = None + +logger = logging.getLogger('mnist_AutoML') + + +class MnistNetwork(object): + ''' + MnistNetwork is for initializing and building basic network for mnist. + ''' + + def __init__(self, + channel_1_num, + channel_2_num, + conv_size, + hidden_size, + pool_size, + learning_rate, + x_dim=784, + y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + self.conv_size = conv_size + self.hidden_size = hidden_size + self.pool_size = pool_size + self.learning_rate = learning_rate + self.x_dim = x_dim + self.y_dim = y_dim + + self.images = tf.placeholder( + tf.float32, [None, self.x_dim], name='input_x') + self.labels = tf.placeholder( + tf.float32, [None, self.y_dim], name='input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + + self.train_step = None + self.accuracy = None + + def build_network(self): + ''' + Building network for mnist + ''' + + # Reshape to use within a convolutional neural net. + # Last dimension is for "features" - there is only one here, since images are + # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + print( + 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) + raise + x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) + + # First convolutional layer - maps one grayscale image to 32 feature maps. + with tf.name_scope('conv1'): + w_conv1 = weight_variable( + [self.conv_size, self.conv_size, 1, self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) + + # Pooling layer - downsamples by 2X. + with tf.name_scope('pool1'): + h_pool1 = max_pool(h_conv1, self.pool_size) + + # Second convolutional layer -- maps 32 feature maps to 64. + with tf.name_scope('conv2'): + w_conv2 = weight_variable([self.conv_size, self.conv_size, + self.channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) + + # Second pooling layer. + with tf.name_scope('pool2'): + h_pool2 = max_pool(h_conv2, self.pool_size) + + # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image + # is down to 7x7x64 feature maps -- maps this to 1024 features. + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + w_fc1 = weight_variable( + [last_dim * last_dim * self.channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + + h_pool2_flat = tf.reshape( + h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) + + # Dropout - controls the complexity of the model, prevents co-adaptation of features. + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + + # Map the 1024 features to 10 classes, one for each digit + with tf.name_scope('fc2'): + w_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 + + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer( + self.learning_rate).minimize(cross_entropy) + + with tf.name_scope('accuracy'): + correct_prediction = tf.equal( + tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) + self.accuracy = tf.reduce_mean( + tf.cast(correct_prediction, tf.float32)) + + +def conv2d(x_input, w_matrix): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') + + +def max_pool(x_input, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + ''' + Main function, build mnist network, run and send result to NNI. + ''' + # Import data + mnist = download_mnist_retry(params['data_dir']) + print('Mnist download data done.') + logger.debug('Mnist download data done.') + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], + channel_2_num=params['channel_2_num'], + conv_size=params['conv_size'], + hidden_size=params['hidden_size'], + pool_size=params['pool_size'], + learning_rate=params['learning_rate']) + mnist_network.build_network() + logger.debug('Mnist build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + for i in range(params['batch_num']): + batch = mnist.train.next_batch(params['batch_size']) + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: 1 - params['dropout_rate']} + ) + + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + +def get_params(): + ''' Get parameters from command line ''' + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory") + parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate") + parser.add_argument("--channel_1_num", type=int, default=32) + parser.add_argument("--channel_2_num", type=int, default=64) + parser.add_argument("--conv_size", type=int, default=5) + parser.add_argument("--pool_size", type=int, default=2) + parser.add_argument("--hidden_size", type=int, default=1024) + parser.add_argument("--learning_rate", type=float, default=1e-4) + parser.add_argument("--batch_num", type=int, default=2000) + parser.add_argument("--batch_size", type=int, default=32) + + args, _ = parser.parse_known_args() + return args + +if __name__ == '__main__': + try: + params = vars(get_params()) + main(params) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/mnist-tfv1/search_space.json b/examples/trials/mnist-tfv1/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c9b68a7a4fa5a901cbf429a200ebb89e024b0a7e --- /dev/null +++ b/examples/trials/mnist-tfv1/search_space.json @@ -0,0 +1,7 @@ +{ + "dropout_rate":{"_type":"uniform","_value":[0.5, 0.9]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "batch_size": {"_type":"choice", "_value": [1, 4, 8, 16, 32]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]} +} diff --git a/examples/trials/mnist-tfv2/config.yml b/examples/trials/mnist-tfv2/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..7fd35c0e9aa06366e9fc7c204bbaa6af58e8322d --- /dev/null +++ b/examples/trials/mnist-tfv2/config.yml @@ -0,0 +1,14 @@ +# This is the minimal config file for an NNI experiment. +# Use "nnictl create --config config.yml" to launch this experiment. +# Afterwards, you can check "config_detailed.yml" for more explanation. + +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py # NOTE: change "python3" to "python" if you are using Windows +trialGpuNumber: 0 +trialConcurrency: 1 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local diff --git a/examples/trials/mnist-tfv2/config_assessor.yml b/examples/trials/mnist-tfv2/config_assessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..1a138d8e17699645bb639e3cc397f12cc2123969 --- /dev/null +++ b/examples/trials/mnist-tfv2/config_assessor.yml @@ -0,0 +1,16 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 2 +maxTrialNumber: 50 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +assessor: # Specify early-stop algorithm + name: Curvefitting + classArgs: + epoch_num: 20 + threshold: 0.9 +trainingService: + platform: local diff --git a/examples/trials/mnist-tfv2/config_detailed.yml b/examples/trials/mnist-tfv2/config_detailed.yml new file mode 100644 index 0000000000000000000000000000000000000000..ed31da19da1675f8f179551bcb8ec376a84b3482 --- /dev/null +++ b/examples/trials/mnist-tfv2/config_detailed.yml @@ -0,0 +1,48 @@ +# This example shows more configurable fields comparing to the minimal "config.yml" +# You can use "nnictl create --config config_detailed.yml" to launch this experiment. +# If you see an error message saying "port 8080 is used", use "nnictl stop --all" to stop previous experiments. + +experimentName: MNIST # An optional name to help you distinguish experiments. + +# Hyper-parameter search space can either be configured here or in a seperate file. +# "config.yml" shows how to specify a seperate search space file. +# The common schema of search space is documented here: +# https://nni.readthedocs.io/en/stable/Tutorial/SearchSpaceSpec.html +searchSpace: + dropout_rate: + _type: uniform + _value: [0.5, 0.9] + conv_size: + _type: choice + _value: [2, 3, 5, 7] + hidden_size: + _type: choice + _value: [128, 512, 1024] + batch_size: + _type: choice + _value: [16, 32] + learning_rate: + _type: choice + _value: [0.0001, 0.001, 0.01, 0.1] + +trialCommand: python3 mnist.py # The command to launch a trial. NOTE: change "python3" to "python" if you are using Windows. +trialCodeDirectory: . # The path of trial code. By default it's ".", which means the same directory of this config file. +trialGpuNumber: 1 # How many GPUs should each trial use. CUDA is required when it's greater than zero. + +trialConcurrency: 4 # Run 4 trials concurrently. +maxTrialNumber: 10 # Generate at most 10 trials. +maxExperimentDuration: 1h # Stop generating trials after 1 hour. + +tuner: # Configure the tuning algorithm. + name: TPE # Supported algorithms: TPE, Random, Anneal, Evolution, GridSearch, GPTuner, PBTTuner, etc. + # Full list: https://nni.readthedocs.io/en/latest/Tuner/BuiltinTuner.html + classArgs: # Algorithm specific arguments. See the tuner's doc for details. + optimize_mode: maximize # "minimize" or "maximize" + +# Configure the training platform. +# Supported platforms: local, remote, openpai, aml, kubeflow, kubernetes, adl. +# You can find config template of some platforms in this directory, and others in mnist-pytorch example. +trainingService: + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Reason and details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/mnist-tfv2/config_hybrid.yml b/examples/trials/mnist-tfv2/config_hybrid.yml new file mode 100644 index 0000000000000000000000000000000000000000..31d4c797ca4fa96fa68b46a5b9eca48a4b66e210 --- /dev/null +++ b/examples/trials/mnist-tfv2/config_hybrid.yml @@ -0,0 +1,61 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 5 +maxTrialNumber: 20 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +# For local, remote, openpai, and aml, NNI can use multiple training services at one time +trainingService: + - platform: local + - platform: remote + machineList: + - host: ${your server's IP or domain name} + user: ${your user name} + ssh_key_file: ~/.ssh/id_rsa + - platform: aml + dockerImage: msranni/nni + subscriptionId: ${your subscription ID} + resourceGroup: ${your resource group} + workspaceName: ${your workspace name} + computeTarget: ${your compute target} + - platform: kubeflow + reuseMode: true + worker: + command: + code_directory: + dockerImage: msranni/nni + cpuNumber: + gpuNumber: + memorySize: + replicas: + operator: tf-operator + storage: + storageType: + azureAccount: + azureShare: + keyVaultName: + keyVaultKey: + apiVersion: v1 + - platform: frameworkcontroller + reuseMode: true + serviceAccountName: + taskRoles: + - name: worker + dockerImage: 'msranni/nni:latest' + taskNumber: + command: + gpuNumber: + cpuNumber: + memorySize: + framework_attempt_completion_policy: + min_failed_task_count: 1 + minSucceedTaskCount: 1 + storage: + storageType: + azureAccount: + azureShare: + keyVaultName: + keyVaultKey: diff --git a/examples/trials/mnist-tfv2/config_remote.yml b/examples/trials/mnist-tfv2/config_remote.yml new file mode 100644 index 0000000000000000000000000000000000000000..42a8546848676f48e4e20077f9ea04f2707fc5cf --- /dev/null +++ b/examples/trials/mnist-tfv2/config_remote.yml @@ -0,0 +1,24 @@ +searchSpaceFile: search_space.json +trialCommand: python3 mnist.py +trialGpuNumber: 0 +trialConcurrency: 4 +maxTrialNumber: 20 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: remote + machineList: + - host: ${your server's IP or domain name} + user: ${your user name} + ssh_key_file: ~/.ssh/id_rsa # We recommend public key over password, it's more secure and convenient. + # You can specify more than one SSH servers: + - host: 123.123.123.123 + port: 10022 + user: nniuser + password: 12345 + pythonPath: /usr/bin # Other examples: + # /opt/python3.9/bin + # C:/Python39 + # C:/Users/USERNAME/.conda/envs/ENVNAME;C:/Users/USERNAME/.conda/envs/ENVNAME/Scripts;C:/Users/USERNAME/.conda/envs/ENVNAME/Library/bin diff --git a/examples/trials/mnist-tfv2/config_windows.yml b/examples/trials/mnist-tfv2/config_windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..e1bdf156b2fde552dccad12e3175d753c9b656cf --- /dev/null +++ b/examples/trials/mnist-tfv2/config_windows.yml @@ -0,0 +1,10 @@ +searchSpaceFile: search_space.json +trialCommand: python mnist.py +trialGpuNumber: 0 +trialConcurrency: 1 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local diff --git a/examples/trials/mnist-tfv2/launch.py b/examples/trials/mnist-tfv2/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..e6ac12480a6284ee88802897b764e562994e746b --- /dev/null +++ b/examples/trials/mnist-tfv2/launch.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Example showing how to create experiment with Python code. +""" + +from pathlib import Path + +from nni.experiment import Experiment + +search_space = { + "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] }, + "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] }, + "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] }, + "batch_size": { "_type": "choice", "_value": [16, 32] }, + "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] } +} + +experiment = Experiment('local') +experiment.config.experiment_name = 'MNIST example' +experiment.config.trial_concurrency = 2 +experiment.config.max_trial_number = 10 +experiment.config.search_space = search_space +experiment.config.trial_command = 'python3 mnist.py' +experiment.config.trial_code_directory = Path(__file__).parent +experiment.config.tuner.name = 'TPE' +experiment.config.tuner.class_args['optimize_mode'] = 'maximize' +experiment.config.training_service.use_active_gpu = True + +experiment.run(8080) diff --git a/examples/trials/mnist-tfv2/mnist.py b/examples/trials/mnist-tfv2/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..59cedbbb3926378b240616299459001f3c104416 --- /dev/null +++ b/examples/trials/mnist-tfv2/mnist.py @@ -0,0 +1,146 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +NNI example trial code. + +- Experiment type: Hyper-parameter Optimization +- Trial framework: Tensorflow v2.x (Keras API) +- Model: LeNet-5 +- Dataset: MNIST +""" + +import logging + +import tensorflow as tf +from tensorflow.keras import Model +from tensorflow.keras.callbacks import Callback +from tensorflow.keras.layers import (Conv2D, Dense, Dropout, Flatten, MaxPool2D) +from tensorflow.keras.optimizers import Adam + +import nni + +_logger = logging.getLogger('mnist_example') +_logger.setLevel(logging.INFO) + + +class MnistModel(Model): + """ + LeNet-5 Model with customizable hyper-parameters + """ + def __init__(self, conv_size, hidden_size, dropout_rate): + """ + Initialize hyper-parameters. + + Parameters + ---------- + conv_size : int + Kernel size of convolutional layers. + hidden_size : int + Dimensionality of last hidden layer. + dropout_rate : float + Dropout rate between two fully connected (dense) layers, to prevent co-adaptation. + """ + super().__init__() + self.conv1 = Conv2D(filters=32, kernel_size=conv_size, activation='relu') + self.pool1 = MaxPool2D(pool_size=2) + self.conv2 = Conv2D(filters=64, kernel_size=conv_size, activation='relu') + self.pool2 = MaxPool2D(pool_size=2) + self.flatten = Flatten() + self.fc1 = Dense(units=hidden_size, activation='relu') + self.dropout = Dropout(rate=dropout_rate) + self.fc2 = Dense(units=10, activation='softmax') + + def call(self, x): + """Override ``Model.call`` to build LeNet-5 model.""" + x = self.conv1(x) + x = self.pool1(x) + x = self.conv2(x) + x = self.pool2(x) + x = self.flatten(x) + x = self.fc1(x) + x = self.dropout(x) + return self.fc2(x) + + +class ReportIntermediates(Callback): + """ + Callback class for reporting intermediate accuracy metrics. + + This callback sends accuracy to NNI framework every 100 steps, + so you can view the learning curve on web UI. + + If an assessor is configured in experiment's YAML file, + it will use these metrics for early stopping. + """ + def on_epoch_end(self, epoch, logs=None): + """Reports intermediate accuracy to NNI framework""" + # TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy` + if 'val_acc' in logs: + nni.report_intermediate_result(logs['val_acc']) + else: + nni.report_intermediate_result(logs['val_accuracy']) + + +def load_dataset(): + """Download and reformat MNIST dataset""" + mnist = tf.keras.datasets.mnist + (x_train, y_train), (x_test, y_test) = mnist.load_data() + x_train, x_test = x_train / 255.0, x_test / 255.0 + x_train = x_train[..., tf.newaxis] + x_test = x_test[..., tf.newaxis] + return (x_train, y_train), (x_test, y_test) + + +def main(params): + """ + Main program: + - Build network + - Prepare dataset + - Train the model + - Report accuracy to tuner + """ + model = MnistModel( + conv_size=params['conv_size'], + hidden_size=params['hidden_size'], + dropout_rate=params['dropout_rate'] + ) + optimizer = Adam(learning_rate=params['learning_rate']) + model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']) + _logger.info('Model built') + + (x_train, y_train), (x_test, y_test) = load_dataset() + _logger.info('Dataset loaded') + + model.fit( + x_train, + y_train, + batch_size=params['batch_size'], + epochs=10, + verbose=0, + callbacks=[ReportIntermediates()], + validation_data=(x_test, y_test) + ) + _logger.info('Training completed') + + loss, accuracy = model.evaluate(x_test, y_test, verbose=0) + nni.report_final_result(accuracy) # send final accuracy to NNI tuner and web UI + _logger.info('Final accuracy reported: %s', accuracy) + + +if __name__ == '__main__': + params = { + 'dropout_rate': 0.5, + 'conv_size': 5, + 'hidden_size': 1024, + 'batch_size': 32, + 'learning_rate': 1e-4, + } + + # fetch hyper-parameters from HPO tuner + # comment out following two lines to run the code without NNI framework + tuned_params = nni.get_next_parameter() + params.update(tuned_params) + + _logger.info('Hyper-parameters: %s', params) + main(params) diff --git a/examples/trials/mnist-tfv2/search_space.json b/examples/trials/mnist-tfv2/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..d4e9c3bf725e8e2c03c5f6d62430f7ff4e19a69e --- /dev/null +++ b/examples/trials/mnist-tfv2/search_space.json @@ -0,0 +1,7 @@ +{ + "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] }, + "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] }, + "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] }, + "batch_size": { "_type": "choice", "_value": [16, 32] }, + "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] } +} diff --git a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py new file mode 100644 index 0000000000000000000000000000000000000000..a73a02a01357fbdd4dcd83db66c333a2bcdea91f --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py @@ -0,0 +1,204 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import os + +import tensorflow as tf +import keras +from keras.callbacks import EarlyStopping, TensorBoard +from keras.datasets import fashion_mnist +from keras.optimizers import SGD, Adadelta, Adagrad, Adam, Adamax, RMSprop +from keras.utils import multi_gpu_model, to_categorical +import keras.backend.tensorflow_backend as KTF + +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# set the logger format +logger = logging.getLogger("FashionMNIST-network-morphism-keras") + + +# restrict gpu usage background +config = tf.ConfigProto() +# pylint: disable=E1101,W0603 +config.gpu_options.allow_growth = True +sess = tf.Session(config=config) + +KTF.set_session(sess) + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("fashion_mnist") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument( + "--weight_decay", + type=float, + default=1e-5, + help="weight decay of the learning rate", + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +args = get_args() +TENSORBOARD_DIR = os.environ["NNI_OUTPUT_DIR"] + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_keras_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + + # Loading Data + logger.debug("Preparing data..") + + (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + x_train = x_train.reshape(x_train.shape+(1,)).astype("float32") + x_test = x_test.reshape(x_test.shape+(1,)).astype("float32") + x_train /= 255.0 + x_test /= 255.0 + trainloader = (x_train, y_train) + testloader = (x_test, y_test) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + # parallel model + try: + available_devices = os.environ["HIP_VISIBLE_DEVICES"] + gpus = len(available_devices.split(",")) + if gpus > 1: + net = multi_gpu_model(net, gpus) + except KeyError: + logger.debug("parallel model not support in this config settings") + + if args.optimizer == "SGD": + optimizer = SGD(lr=args.learning_rate, momentum=0.9, decay=args.weight_decay) + if args.optimizer == "Adadelta": + optimizer = Adadelta(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adagrad": + optimizer = Adagrad(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adam": + optimizer = Adam(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adamax": + optimizer = Adamax(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "RMSprop": + optimizer = RMSprop(lr=args.learning_rate, decay=args.weight_decay) + + # Compile the model + net.compile( + loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] + ) + return 0 + + +class SendMetrics(keras.callbacks.Callback): + """ + Keras callback to send metrics to NNI framework + """ + + def on_epoch_end(self, epoch, logs=None): + """ + Run on end of each epoch + """ + if logs is None: + logs = dict() + logger.debug(logs) + # TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy` + if 'val_acc' in logs: + nni.report_intermediate_result(logs['val_acc']) + else: + nni.report_intermediate_result(logs['val_accuracy']) + + +# Training +def train_eval(): + """ train and eval the model + """ + + global trainloader + global testloader + global net + + (x_train, y_train) = trainloader + (x_test, y_test) = testloader + + # train procedure + net.fit( + x=x_train, + y=y_train, + batch_size=args.batch_size, + validation_data=(x_test, y_test), + epochs=args.epochs, + shuffle=True, + callbacks=[ + SendMetrics(), + EarlyStopping(min_delta=0.001, patience=10), + TensorBoard(log_dir=TENSORBOARD_DIR), + ], + ) + + # trial report final acc to tuner + _, acc = net.evaluate(x_test, y_test) + logger.debug("Final result is: %.3f", acc) + nni.report_final_result(acc) + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + parse_rev_args(RCV_CONFIG) + train_eval() + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..931c4d0c60c58b9b573ab0b4891169a1809afd74 --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py @@ -0,0 +1,255 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import sys + +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision + +import utils + + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# pylint: disable=W0603 +# set the logger format +logger = logging.getLogger("FashionMNIST-network-morphism-pytorch") + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("FashionMNIST") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument("--cutout", action="store_true", default=False, help="use cutout") + parser.add_argument("--cutout_length", type=int, default=8, help="cutout length") + parser.add_argument( + "--model_path", type=str, default="./", help="Path to save the destination model" + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +criterion = None +optimizer = None +device = "cuda" if torch.cuda.is_available() else "cpu" +best_acc = 0.0 +args = get_args() + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_torch_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + global criterion + global optimizer + + # Loading Data + logger.debug("Preparing data..") + + raw_train_data = torchvision.datasets.FashionMNIST( + root="./data", train=True, download=True + ) + + dataset_mean, dataset_std = ( + [raw_train_data.train_data.float().mean() / 255], + [raw_train_data.train_data.float().std() / 255], + ) + + transform_train, transform_test = utils.data_transforms_mnist( + args, dataset_mean, dataset_std + ) + + trainset = torchvision.datasets.FashionMNIST( + root="./data", train=True, download=True, transform=transform_train + ) + trainloader = torch.utils.data.DataLoader( + trainset, batch_size=args.batch_size, shuffle=True, num_workers=2 + ) + + testset = torchvision.datasets.FashionMNIST( + root="./data", train=False, download=True, transform=transform_test + ) + testloader = torch.utils.data.DataLoader( + testset, batch_size=args.batch_size, shuffle=False, num_workers=2 + ) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + net = net.to(device) + criterion = nn.CrossEntropyLoss() + + if args.optimizer == "SGD": + optimizer = optim.SGD( + net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4 + ) + if args.optimizer == "Adadelta": + optimizer = optim.Adadelta(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adagrad": + optimizer = optim.Adagrad(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adam": + optimizer = optim.Adam(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adamax": + optimizer = optim.Adamax(net.parameters(), lr=args.learning_rate) + if args.optimizer == "RMSprop": + optimizer = optim.RMSprop(net.parameters(), lr=args.learning_rate) + + return 0 + + +# Training +def train(epoch): + """ train model on each epoch in trainset + """ + + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Epoch: %d", epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + train_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + return acc + + +def test(epoch): + """ eval model on each epoch in testset + """ + global best_acc + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Eval on epoch: %d", epoch) + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + test_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + acc = 100.0 * correct / total + if acc > best_acc: + best_acc = acc + return acc, best_acc + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + + parse_rev_args(RCV_CONFIG) + train_acc = 0.0 + best_acc = 0.0 + early_stop = utils.EarlyStopping(mode="max") + for ep in range(args.epochs): + train_acc = train(ep) + test_acc, best_acc = test(ep) + nni.report_intermediate_result(test_acc) + logger.debug(test_acc) + if early_stop.step(test_acc): + break + + # trial report best_acc to tuner + nni.report_final_result(best_acc) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/FashionMNIST/__init__.py b/examples/trials/network_morphism/FashionMNIST/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/trials/network_morphism/FashionMNIST/config.yml b/examples/trials/network_morphism/FashionMNIST/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..70a6d720a97840be0831a58157ac061bb9b4a7e4 --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/config.yml @@ -0,0 +1,18 @@ +trialCommand: python3 FashionMNIST_keras.py +trialGpuNumber: 1 +trialConcurrency: 4 +maxExperimentDuration: 48h +maxTrialNumber: 200 +searchSpace: {} # search space of NetworkMorphism is provided via classArgs +tuner: + name: NetworkMorphism + classArgs: + optimize_mode: maximize # maximize or minimize + task: cv # for now, this tuner only supports cv domain + input_width: 28 # input image width + input_channel: 1 # input image channel + n_output_node: 10 # number of classes +trainingService: + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/network_morphism/FashionMNIST/utils.py b/examples/trials/network_morphism/FashionMNIST/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..24869227db37c346c5e12f295c6e0d0a1a820a64 --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/utils.py @@ -0,0 +1,196 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.init as init +import torchvision.transforms as transforms + + +class EarlyStopping: + """ EarlyStopping class to keep NN from overfitting + """ + + # pylint: disable=E0202 + def __init__(self, mode="min", min_delta=0, patience=10, percentage=False): + self.mode = mode + self.min_delta = min_delta + self.patience = patience + self.best = None + self.num_bad_epochs = 0 + self.is_better = None + self._init_is_better(mode, min_delta, percentage) + + if patience == 0: + self.is_better = lambda a, b: True + self.step = lambda a: False + + def step(self, metrics): + """ EarlyStopping step on each epoch + Arguments: + metrics {float} -- metric value + """ + + if self.best is None: + self.best = metrics + return False + + if np.isnan(metrics): + return True + + if self.is_better(metrics, self.best): + self.num_bad_epochs = 0 + self.best = metrics + else: + self.num_bad_epochs += 1 + + if self.num_bad_epochs >= self.patience: + return True + + return False + + def _init_is_better(self, mode, min_delta, percentage): + if mode not in {"min", "max"}: + raise ValueError("mode " + mode + " is unknown!") + if not percentage: + if mode == "min": + self.is_better = lambda a, best: a < best - min_delta + if mode == "max": + self.is_better = lambda a, best: a > best + min_delta + else: + if mode == "min": + self.is_better = lambda a, best: a < best - (best * min_delta / 100) + if mode == "max": + self.is_better = lambda a, best: a > best + (best * min_delta / 100) + + +class Cutout: + """Randomly mask out one or more patches from an image. + Args: + n_holes (int): Number of patches to cut out of each image. + length (int): The length (in pixels) of each square patch. + """ + + def __init__(self, length): + self.length = length + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W). + Returns: + Tensor: Image with n_holes of dimension length x length cut out of it. + """ + h_img, w_img = img.size(1), img.size(2) + mask = np.ones((h_img, w_img), np.float32) + y_img = np.random.randint(h_img) + x_img = np.random.randint(w_img) + + y1_img = np.clip(y_img - self.length // 2, 0, h_img) + y2_img = np.clip(y_img + self.length // 2, 0, h_img) + x1_img = np.clip(x_img - self.length // 2, 0, w_img) + x2_img = np.clip(x_img + self.length // 2, 0, w_img) + + mask[y1_img:y2_img, x1_img:x2_img] = 0.0 + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + return img + + +def data_transforms_cifar10(args): + """ data_transforms for cifar10 dataset + """ + + cifar_mean = [0.49139968, 0.48215827, 0.44653124] + cifar_std = [0.24703233, 0.24348505, 0.26158768] + + train_transform = transforms.Compose( + [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(cifar_mean, cifar_std), + ] + ) + if args.cutout: + train_transform.transforms.append(Cutout(args.cutout_length)) + + valid_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize(cifar_mean, cifar_std)] + ) + return train_transform, valid_transform + + +def data_transforms_mnist(args, mnist_mean=None, mnist_std=None): + """ data_transforms for mnist dataset + """ + if mnist_mean is None: + mnist_mean = [0.5] + + if mnist_std is None: + mnist_std = [0.5] + + train_transform = transforms.Compose( + [ + transforms.RandomCrop(28, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mnist_mean, mnist_std), + ] + ) + if args.cutout: + train_transform.transforms.append(Cutout(args.cutout_length)) + + valid_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std)] + ) + return train_transform, valid_transform + + +def get_mean_and_std(dataset): + """Compute the mean and std value of dataset.""" + dataloader = torch.utils.data.DataLoader( + dataset, batch_size=1, shuffle=True, num_workers=2 + ) + mean = torch.zeros(3) + std = torch.zeros(3) + print("==> Computing mean and std..") + for inputs, _ in dataloader: + for i in range(3): + mean[i] += inputs[:, i, :, :].mean() + std[i] += inputs[:, i, :, :].std() + mean.div_(len(dataset)) + std.div_(len(dataset)) + return mean, std + + +def init_params(net): + """Init layer parameters.""" + for module in net.modules(): + if isinstance(module, nn.Conv2d): + init.kaiming_normal(module.weight, mode="fan_out") + if module.bias: + init.constant(module.bias, 0) + elif isinstance(module, nn.BatchNorm2d): + init.constant(module.weight, 1) + init.constant(module.bias, 0) + elif isinstance(module, nn.Linear): + init.normal(module.weight, std=1e-3) + if module.bias: + init.constant(module.bias, 0) diff --git a/examples/trials/network_morphism/README.md b/examples/trials/network_morphism/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9997945872ddf00e6192ed692d8d8a0502af3c9e --- /dev/null +++ b/examples/trials/network_morphism/README.md @@ -0,0 +1,108 @@ +# Network Morphism for Automatic Model Architecture Search in NNI +The Network Morphism is a build-in Tuner using network morphism techniques to search and evaluate the new network architecture. This example shows us how to use it to find good model architectures for deep learning. + +## How to run this example? + +### 1. Training framework support + +The network morphism now is framework-based, and we have not implemented the framework-free methods. The training frameworks which we have supported yet are PyTorch and Keras. If you get familiar with the intermediate JSON format, you can build your own model in your own training framework. In the future, we will change to intermediate format from JSON to ONNX in order to get a [standard intermediate representation spec](https://github.com/onnx/onnx/blob/master/docs/IR.md). + + +### 2. Install the requirements + +```bash +# install the requirements packages +cd examples/trials/network_morphism/ +pip install -r requirements.txt +``` + +### 3. Update configuration + +Modify `examples/trials/network_morphism/cifar10/config.yml` to fit your own task, note that searchSpacePath is not required in our configuration. Here is the default configuration: + +```yaml +authorName: default +experimentName: example_cifar10-network-morphism +trialConcurrency: 1 +maxExecDuration: 48h +maxTrialNum: 200 +#choice: local, remote, pai +trainingServicePlatform: local +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + #for now, this tuner only supports cv domain + task: cv + #modify to fit your input image width + input_width: 32 + #modify to fit your input image channel + input_channel: 3 + #modify to fit your number of classes + n_output_node: 10 +trial: + # your own command here + command: python3 cifar10_keras.py + codeDir: . + gpuNum: 0 +``` + +In the "trial" part, if you want to use GPU to perform the architecture search, change `gpuNum` from `0` to `1`. You need to increase the `maxTrialNum` and `maxExecDuration`, according to how long you want to wait for the search result. + +`trialConcurrency` is the number of trials running concurrently, which is the number of GPUs you want to use, if you are setting `gpuNum` to 1. + +### 4. Call "json\_to\_graph()" function in your own code + +Modify your code and call "json\_to\_graph()" function to build a pytorch model or keras model from received json string. Here is the simple example. + +```python +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +def build_graph_from_json(ir_model_json): + """build a pytorch model from json representation + """ + graph = json_to_graph(ir_model_json) + model = graph.produce_torch_model() + return model + +# trial get next parameter from network morphism tuner +RCV_CONFIG = nni.get_next_parameter() +# call the function to build pytorch model or keras model +net = build_graph_from_json(RCV_CONFIG) + +# training procedure +# .... + +# report the final accuracy to NNI +nni.report_final_result(best_acc) +``` + +### 5. Submit this job + +```bash +# You can use NNI command tool "nnictl" to create the a job which submit to the NNI +# finally you successfully commit a Network Morphism Job to NNI +nnictl create --config config.yml +``` + +## Trial Examples + +The trial has some examples which can guide you which located in `examples/trials/network_morphism/`. You can refer to it and modify to your own task. Hope this will help you to build your code. + +### FashionMNIST + +`Fashion-MNIST` is a dataset of [Zalando](https://jobs.zalando.com/tech/)'s article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. It is a modern image classification dataset widely used to replacing MNIST as a baseline dataset, because the dataset MNIST is too easy and overused. + +There are two examples, [FashionMNIST-keras.py](./FashionMNIST/FashionMNIST_keras.py) and [FashionMNIST-pytorch.py](./FashionMNIST/FashionMNIST_pytorch.py). Attention, you should change the `input_width` to 28 and `input_channel` to 1 in `config.yml` for this dataset. + +### Cifar10 + +The `CIFAR-10` dataset [Canadian Institute For Advanced Research](https://www.cifar.ca/) is a collection of images that are commonly used to train machine learning and computer vision algorithms. It is one of the most widely used datasets for machine learning research. The CIFAR-10 dataset contains 60,000 32x32 color images in 10 different classes. + +There are two examples, [cifar10-keras.py](./cifar10/cifar10_keras.py) and [cifar10-pytorch.py](./cifar10/cifar10_pytorch.py). The value `input_width` is 32 and the value `input_channel` is 3 in `config.yml` for this dataset. diff --git a/examples/trials/network_morphism/README_zh_CN.md b/examples/trials/network_morphism/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..5ad0db080a2b4fb97bf1dddac324cc51b76d4a72 --- /dev/null +++ b/examples/trials/network_morphism/README_zh_CN.md @@ -0,0 +1,108 @@ +# 在 NNI 中用网络形态算法来进行自动模型结构搜索 + +Network Morphism (网络形态)是内置的 Tuner,它使用了网络形态技术来搜索和评估新的网络结构。 该示例展示了如何使用它来为深度学习找到好的模型架构。 + +## 如何运行此示例? + +### 1. 训练框架支持 + +网络形态当前基于框架,还没有实现与框架脱离的方法。 当前支持 PyTorch 和 Keras。 如果熟悉 JSON 中间格式,可以在自定义的训练框架中生成自己的模型。 随后,我们会将中间结果从 JSON 转换为 ONNX,从而能够成为[标准的中间表示](https://github.com/onnx/onnx/blob/master/docs/IR.md)。 + +### 2. 安装需求 + +```bash +# 安装依赖包 +cd examples/trials/network_morphism/ +pip install -r requirements.txt +``` + +### 3. 更新配置 + +修改 `examples/trials/network_morphism/cifar10/config.yml` 来适配自己的任务。注意,searchSpacePath 在配置中不需要。 默认配置: + +```yaml +authorName: default +experimentName: example_cifar10-network-morphism +trialConcurrency: 1 +maxExecDuration: 48h +maxTrialNum: 200 +#可选项: local, remote, pai +trainingServicePlatform: local +#可选项: true, false +useAnnotation: false +tuner: + #可选项: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism + #SMAC (SMAC 需要通过 nnictl 安装) + builtinTunerName: NetworkMorphism + classArgs: + #可选项: maximize, minimize + optimize_mode: maximize + #当前仅支持视觉领域 + task: cv + #修改来适配自己的图像大小 + input_width: 32 + #修改来适配自己的图像通道 + input_channel: 3 + #修改来适配自己的分类数量 + n_output_node: 10 +trial: + # 自己的命令 + command: python3 cifar10_keras.py + codeDir: . + gpuNum: 0 +``` + +在 "trial" 部分中,如果需要使用 GPU 来进行架构搜索,可将 `gpuNum` 从 `0` 改为 `1`。 根据训练时长,可以增加 `maxTrialNum` 和 `maxExecDuration`。 + +`trialConcurrency` 是并发运行的 Trial 的数量。如果将 `gpuNum` 设置为 1,则需要与 GPU 数量一致。 + +### 4. 在代码中调用 "json\_to\_graph()" 函数 + +修改代码来调用 "json\_to\_graph()" 函数来从收到的 JSON 字符串生成一个 Pytorch 或 Keras 模型。 简单示例: + +```python +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +def build_graph_from_json(ir_model_json): + """从 JSON 生成 Pytorch 模型 + """ + graph = json_to_graph(ir_model_json) + model = graph.produce_torch_model() + return model + +# 从网络形态 Tuner 中获得下一组参数 +RCV_CONFIG = nni.get_next_parameter() +# 调用函数来生成 Pytorch 或 Keras 模型 +net = build_graph_from_json(RCV_CONFIG) + +# 训练过程 +# .... + +# 将最终精度返回给 NNI +nni.report_final_result(best_acc) +``` + +### 5. 提交任务 + +```bash +# 可以使用命令行工具 "nnictl" 来创建任务 +# 最终会成功提交一个网络形态任务到 NNI +nnictl create --config config.yml +``` + +## Trial 示例 + +下面的代码可在 `examples/trials/network_morphism/` 中找到。 可参考此代码来更新自己的任务。 希望它对你有用。 + +### FashionMNIST + +`Fashion-MNIST` 是来自 [Zalando](https://jobs.zalando.com/tech/) 文章的图片 — 有 60,000 个示例的训练集和 10,000 个示例的测试集。 每个示例是 28x28 的灰度图,分为 10 个类别。 由于 MNIST 数据集过于简单,该数据集现在开始被广泛使用,用来替换 MNIST 作为基准数据集。 + +这里有两个示例,[FashionMNIST-keras.py](./FashionMNIST/FashionMNIST_keras.py) 和 [FashionMNIST-pytorch.py](./FashionMNIST/FashionMNIST_pytorch.py)。 注意,在 `config.yml` 中,需要为此数据集修改 `input_width` 为 28,以及 `input_channel` 为 1。 + +### Cifar10 + +`CIFAR-10` 数据集 [Canadian Institute For Advanced Research](https://www.cifar.ca/) 是广泛用于机器学习和视觉算法训练的数据集。 它是机器学习领域最广泛使用的数据集之一。 CIFAR-10 数据集包含了 60,000 张 32x32 的彩色图片,分为 10 类。 + +这里有两个示例,[cifar10-keras.py](./cifar10/cifar10_keras.py) 和 [cifar10-pytorch.py](./cifar10/cifar10_pytorch.py)。 在 `config.yml` 中,该数据集 `input_width` 的值是 32,并且 `input_channel` 是 3。 \ No newline at end of file diff --git a/examples/trials/network_morphism/cifar10/__init__.py b/examples/trials/network_morphism/cifar10/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/trials/network_morphism/cifar10/cifar10_keras.py b/examples/trials/network_morphism/cifar10/cifar10_keras.py new file mode 100644 index 0000000000000000000000000000000000000000..46f6171586087374818b78fd1ed3ceadf5fb7948 --- /dev/null +++ b/examples/trials/network_morphism/cifar10/cifar10_keras.py @@ -0,0 +1,204 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import os + +import tensorflow as tf +import keras +from keras.callbacks import EarlyStopping, TensorBoard +from keras.datasets import cifar10 +from keras.optimizers import SGD, Adadelta, Adagrad, Adam, Adamax, RMSprop +from keras.utils import multi_gpu_model, to_categorical +import keras.backend.tensorflow_backend as KTF + +import nni +from nni.algorithms.hpo.networkmorphism_tuner.graph import json_to_graph + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# set the logger format +logger = logging.getLogger("cifar10-network-morphism-keras") + + +# restrict gpu usage background +config = tf.ConfigProto() +# pylint: disable=E1101,W0603 +config.gpu_options.allow_growth = True +sess = tf.Session(config=config) + +KTF.set_session(sess) + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("cifar10") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument( + "--weight_decay", + type=float, + default=1e-5, + help="weight decay of the learning rate", + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +args = get_args() +TENSORBOARD_DIR = os.environ["NNI_OUTPUT_DIR"] + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_keras_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + + # Loading Data + logger.debug("Preparing data..") + + (x_train, y_train), (x_test, y_test) = cifar10.load_data() + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + x_train = x_train.astype("float32") + x_test = x_test.astype("float32") + x_train /= 255.0 + x_test /= 255.0 + trainloader = (x_train, y_train) + testloader = (x_test, y_test) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + # parallel model + try: + available_devices = os.environ["HIP_VISIBLE_DEVICES"] + gpus = len(available_devices.split(",")) + if gpus > 1: + net = multi_gpu_model(net, gpus) + except KeyError: + logger.debug("parallel model not support in this config settings") + + if args.optimizer == "SGD": + optimizer = SGD(lr=args.learning_rate, momentum=0.9, decay=args.weight_decay) + if args.optimizer == "Adadelta": + optimizer = Adadelta(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adagrad": + optimizer = Adagrad(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adam": + optimizer = Adam(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adamax": + optimizer = Adamax(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "RMSprop": + optimizer = RMSprop(lr=args.learning_rate, decay=args.weight_decay) + + # Compile the model + net.compile( + loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] + ) + return 0 + + +class SendMetrics(keras.callbacks.Callback): + """ + Keras callback to send metrics to NNI framework + """ + + def on_epoch_end(self, epoch, logs=None): + """ + Run on end of each epoch + """ + if logs is None: + logs = dict() + logger.debug(logs) + # TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy` + if 'val_acc' in logs: + nni.report_intermediate_result(logs['val_acc']) + else: + nni.report_intermediate_result(logs['val_accuracy']) + + +# Training +def train_eval(): + """ train and eval the model + """ + + global trainloader + global testloader + global net + + (x_train, y_train) = trainloader + (x_test, y_test) = testloader + + # train procedure + net.fit( + x=x_train, + y=y_train, + batch_size=args.batch_size, + validation_data=(x_test, y_test), + epochs=args.epochs, + shuffle=True, + callbacks=[ + SendMetrics(), + EarlyStopping(min_delta=0.001, patience=10), + TensorBoard(log_dir=TENSORBOARD_DIR), + ], + ) + + # trial report final acc to tuner + _, acc = net.evaluate(x_test, y_test) + logger.debug("Final result is: %.3f", acc) + nni.report_final_result(acc) + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + parse_rev_args(RCV_CONFIG) + train_eval() + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/cifar10/cifar10_pytorch.py b/examples/trials/network_morphism/cifar10/cifar10_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..0a00618b20d673669d7dcd8ce8f6137c692df692 --- /dev/null +++ b/examples/trials/network_morphism/cifar10/cifar10_pytorch.py @@ -0,0 +1,246 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import sys + +import nni +from nni.algorithms.hpo.networkmorphism_tuner.graph import json_to_graph + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision + +import utils + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# pylint: disable=W0603 +# set the logger format +logger = logging.getLogger("cifar10-network-morphism-pytorch") + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("cifar10") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument("--cutout", action="store_true", default=False, help="use cutout") + parser.add_argument("--cutout_length", type=int, default=8, help="cutout length") + parser.add_argument( + "--model_path", type=str, default="./", help="Path to save the destination model" + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +criterion = None +optimizer = None +device = "cuda" if torch.cuda.is_available() else "cpu" +best_acc = 0.0 +args = get_args() + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_torch_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + global criterion + global optimizer + + # Loading Data + logger.debug("Preparing data..") + + transform_train, transform_test = utils.data_transforms_cifar10(args) + + trainset = torchvision.datasets.CIFAR10( + root="./data", train=True, download=True, transform=transform_train + ) + trainloader = torch.utils.data.DataLoader( + trainset, batch_size=args.batch_size, shuffle=True, num_workers=2 + ) + + testset = torchvision.datasets.CIFAR10( + root="./data", train=False, download=True, transform=transform_test + ) + testloader = torch.utils.data.DataLoader( + testset, batch_size=args.batch_size, shuffle=False, num_workers=2 + ) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + net = net.to(device) + criterion = nn.CrossEntropyLoss() + if device == "cuda" and torch.cuda.device_count() > 1: + net = torch.nn.DataParallel(net) + + if args.optimizer == "SGD": + optimizer = optim.SGD( + net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4 + ) + if args.optimizer == "Adadelta": + optimizer = optim.Adadelta(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adagrad": + optimizer = optim.Adagrad(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adam": + optimizer = optim.Adam(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adamax": + optimizer = optim.Adamax(net.parameters(), lr=args.learning_rate) + if args.optimizer == "RMSprop": + optimizer = optim.RMSprop(net.parameters(), lr=args.learning_rate) + + + return 0 + + +# Training +def train(epoch): + """ train model on each epoch in trainset + """ + + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Epoch: %d", epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + train_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + return acc + + +def test(epoch): + """ eval model on each epoch in testset + """ + global best_acc + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Eval on epoch: %d", epoch) + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + test_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + acc = 100.0 * correct / total + if acc > best_acc: + best_acc = acc + return acc, best_acc + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + + parse_rev_args(RCV_CONFIG) + train_acc = 0.0 + best_acc = 0.0 + early_stop = utils.EarlyStopping(mode="max") + for ep in range(args.epochs): + train_acc = train(ep) + test_acc, best_acc = test(ep) + nni.report_intermediate_result(test_acc) + logger.debug(test_acc) + if early_stop.step(test_acc): + break + + # trial report best_acc to tuner + nni.report_final_result(best_acc) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/cifar10/config.yml b/examples/trials/network_morphism/cifar10/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..0e6cc00b6fd9d9f3c452b7395bb858a5e671c022 --- /dev/null +++ b/examples/trials/network_morphism/cifar10/config.yml @@ -0,0 +1,18 @@ +trialCommand: python3 cifar10_keras.py +trialGpuNumber: 1 +trialConcurrency: 4 +maxExperimentDuration: 48h +maxTrialNumber: 200 +searchSpace: {} # search space of NetworkMorphism is provided via classArgs +tuner: + name: NetworkMorphism + classArgs: + optimize_mode: maximize # maximize or minimize + task: cv # for now, this tuner only supports cv domain + input_width: 32 # input image width + input_channel: 3 # input image channel + n_output_node: 10 # number of classes +trainingService: + platform: local + useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop) + # Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu diff --git a/examples/trials/network_morphism/cifar10/utils.py b/examples/trials/network_morphism/cifar10/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..24869227db37c346c5e12f295c6e0d0a1a820a64 --- /dev/null +++ b/examples/trials/network_morphism/cifar10/utils.py @@ -0,0 +1,196 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.init as init +import torchvision.transforms as transforms + + +class EarlyStopping: + """ EarlyStopping class to keep NN from overfitting + """ + + # pylint: disable=E0202 + def __init__(self, mode="min", min_delta=0, patience=10, percentage=False): + self.mode = mode + self.min_delta = min_delta + self.patience = patience + self.best = None + self.num_bad_epochs = 0 + self.is_better = None + self._init_is_better(mode, min_delta, percentage) + + if patience == 0: + self.is_better = lambda a, b: True + self.step = lambda a: False + + def step(self, metrics): + """ EarlyStopping step on each epoch + Arguments: + metrics {float} -- metric value + """ + + if self.best is None: + self.best = metrics + return False + + if np.isnan(metrics): + return True + + if self.is_better(metrics, self.best): + self.num_bad_epochs = 0 + self.best = metrics + else: + self.num_bad_epochs += 1 + + if self.num_bad_epochs >= self.patience: + return True + + return False + + def _init_is_better(self, mode, min_delta, percentage): + if mode not in {"min", "max"}: + raise ValueError("mode " + mode + " is unknown!") + if not percentage: + if mode == "min": + self.is_better = lambda a, best: a < best - min_delta + if mode == "max": + self.is_better = lambda a, best: a > best + min_delta + else: + if mode == "min": + self.is_better = lambda a, best: a < best - (best * min_delta / 100) + if mode == "max": + self.is_better = lambda a, best: a > best + (best * min_delta / 100) + + +class Cutout: + """Randomly mask out one or more patches from an image. + Args: + n_holes (int): Number of patches to cut out of each image. + length (int): The length (in pixels) of each square patch. + """ + + def __init__(self, length): + self.length = length + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W). + Returns: + Tensor: Image with n_holes of dimension length x length cut out of it. + """ + h_img, w_img = img.size(1), img.size(2) + mask = np.ones((h_img, w_img), np.float32) + y_img = np.random.randint(h_img) + x_img = np.random.randint(w_img) + + y1_img = np.clip(y_img - self.length // 2, 0, h_img) + y2_img = np.clip(y_img + self.length // 2, 0, h_img) + x1_img = np.clip(x_img - self.length // 2, 0, w_img) + x2_img = np.clip(x_img + self.length // 2, 0, w_img) + + mask[y1_img:y2_img, x1_img:x2_img] = 0.0 + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + return img + + +def data_transforms_cifar10(args): + """ data_transforms for cifar10 dataset + """ + + cifar_mean = [0.49139968, 0.48215827, 0.44653124] + cifar_std = [0.24703233, 0.24348505, 0.26158768] + + train_transform = transforms.Compose( + [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(cifar_mean, cifar_std), + ] + ) + if args.cutout: + train_transform.transforms.append(Cutout(args.cutout_length)) + + valid_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize(cifar_mean, cifar_std)] + ) + return train_transform, valid_transform + + +def data_transforms_mnist(args, mnist_mean=None, mnist_std=None): + """ data_transforms for mnist dataset + """ + if mnist_mean is None: + mnist_mean = [0.5] + + if mnist_std is None: + mnist_std = [0.5] + + train_transform = transforms.Compose( + [ + transforms.RandomCrop(28, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mnist_mean, mnist_std), + ] + ) + if args.cutout: + train_transform.transforms.append(Cutout(args.cutout_length)) + + valid_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std)] + ) + return train_transform, valid_transform + + +def get_mean_and_std(dataset): + """Compute the mean and std value of dataset.""" + dataloader = torch.utils.data.DataLoader( + dataset, batch_size=1, shuffle=True, num_workers=2 + ) + mean = torch.zeros(3) + std = torch.zeros(3) + print("==> Computing mean and std..") + for inputs, _ in dataloader: + for i in range(3): + mean[i] += inputs[:, i, :, :].mean() + std[i] += inputs[:, i, :, :].std() + mean.div_(len(dataset)) + std.div_(len(dataset)) + return mean, std + + +def init_params(net): + """Init layer parameters.""" + for module in net.modules(): + if isinstance(module, nn.Conv2d): + init.kaiming_normal(module.weight, mode="fan_out") + if module.bias: + init.constant(module.bias, 0) + elif isinstance(module, nn.BatchNorm2d): + init.constant(module.weight, 1) + init.constant(module.bias, 0) + elif isinstance(module, nn.Linear): + init.normal(module.weight, std=1e-3) + if module.bias: + init.constant(module.bias, 0) diff --git a/examples/trials/network_morphism/requirements.txt b/examples/trials/network_morphism/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e07ad3d7bdcd42e70977e8525b989d3550350290 --- /dev/null +++ b/examples/trials/network_morphism/requirements.txt @@ -0,0 +1,5 @@ +numpy==1.18.5 +tensorflow==1.15.4 +torchvision==0.2.1 +Keras==2.3.1 +torch==0.4.1 diff --git a/examples/trials/pix2pix-pytorch/.gitignore b/examples/trials/pix2pix-pytorch/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f8b882528dfe38c3729b042f96d2e90e8e699844 --- /dev/null +++ b/examples/trials/pix2pix-pytorch/.gitignore @@ -0,0 +1,5 @@ +# datasets +data/ + +# pix2pix library +pix2pixlib/ \ No newline at end of file diff --git a/examples/trials/pix2pix-pytorch/base_params.py b/examples/trials/pix2pix-pytorch/base_params.py new file mode 100644 index 0000000000000000000000000000000000000000..62cbce2543b716963fe4bb059f08f61cd243124c --- /dev/null +++ b/examples/trials/pix2pix-pytorch/base_params.py @@ -0,0 +1,72 @@ + + +def get_base_params(dataset_name, checkpoint_dir): + params = {} + + # change name and gpuid later + basic_params = {'dataset': dataset_name, + 'dataroot': './data/' + dataset_name, + 'name': '', + 'gpu_ids': [0], + 'checkpoints_dir': checkpoint_dir, + 'verbose': False, + 'print_freq': 100 + } + params.update(basic_params) + + dataset_params = {'dataset_mode': 'aligned', + 'direction': 'BtoA', + 'num_threads': 4, + 'max_dataset_size': float('inf'), + 'preprocess': 'resize_and_crop', + 'display_winsize': 256, + 'input_nc': 3, + 'output_nc': 3} + params.update(dataset_params) + + model_params = {'model': 'pix2pix', + # 'ngf': 64, + # 'ndf': 64, + # 'netD': 'basic', + # 'netG': 'unet_256', + 'n_layers_D': 3, + # 'norm': 'batch', + # 'gan_mode': 'lsgan', + # 'init_type': 'normal', + 'init_gain': 0.02, + 'no_dropout': False} + params.update(model_params) + + train_params = {'phase': 'train', + 'isTrain': True, + 'serial_batches': False, + 'load_size': 286, + 'crop_size': 256, + 'no_flip': False, + # 'batch_size': 1, + # 'beta1': 0.5, + 'pool_size': 0, + # 'lr_policy': 'linear', + 'lr_decay_iters': 50, + #'lr': 0.0002, + # 'lambda_L1': 100, + 'epoch_count': 1, + # 'n_epochs': 10, # 100 + # 'n_epochs_decay': 0, # 100 + 'continue_train': False} + train_params.update(params) + + test_params = {'phase': 'test', + 'isTrain': False, + 'load_iter': -1, + 'epoch': 'latest', + 'load_size': 256, + 'crop_size': 256, + # 'batch_size': 1, + 'serial_batches': True, + 'no_flip': True, + 'eval': True} + test_params.update(params) + + return train_params, test_params + diff --git a/examples/trials/pix2pix-pytorch/config.yml b/examples/trials/pix2pix-pytorch/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..8c859fa4beff8305409c7fffda3f8c471fc600d2 --- /dev/null +++ b/examples/trials/pix2pix-pytorch/config.yml @@ -0,0 +1,47 @@ +experimentName: example_pix2pix +searchSpace: + ngf: + _type: choice + _value: [16, 32, 64, 128] + ndf: + _type: choice + _value: [16, 32, 64, 128] + netG: + _type: choice + _value: ["unet_256", "resnet_9blocks"] + netD: + _type: choice + _value: ["basic", "pixel", "n_layers"] + norm: + _type: choice + _value: ["batch", "instance", "none"] + init_type: + _type: choice + _value: ["xavier", "normal", "kaiming", "orthogonal"] + lr: + _type: choice + _value: [0.0001, 0.0002, 0.0005, 0.001, 0.005, 0.01, 0.1] + beta1: + _type: uniform + _value: [0, 1] + lr_policy: + _type: choice + _value: ["linear", "step", "plateau", "cosine"] + gan_mode: + _type: choice + _value: ["vanilla", "lsgan", "wgangp"] + lambda_L1: + _type: choice + _value: [1, 5, 10, 100, 250, 500] +trainingService: + platform: local + useActiveGpu: true + gpuIndices: '0' +trialCodeDirectory: . +trialCommand: python3 pix2pix.py +trialConcurrency: 1 +trialGpuNumber: 1 +tuner: + name: TPE + classArgs: + optimize_mode: minimize \ No newline at end of file diff --git a/examples/trials/pix2pix-pytorch/pix2pix.py b/examples/trials/pix2pix-pytorch/pix2pix.py new file mode 100644 index 0000000000000000000000000000000000000000..f096fae8f9f9c38c44eb154fb78d05b78b5a6242 --- /dev/null +++ b/examples/trials/pix2pix-pytorch/pix2pix.py @@ -0,0 +1,185 @@ +import sys +sys.path.insert(0, './pix2pixlib') + +import os +import pathlib +import logging +import time +import argparse +from collections import namedtuple +import numpy as np +import torch +import torch.utils.data as data +import nni +from nni.utils import merge_parameter +from pix2pixlib.data.aligned_dataset import AlignedDataset +from pix2pixlib.models.pix2pix_model import Pix2PixModel +from base_params import get_base_params + + +_logger = logging.getLogger('example_pix2pix') + + +class CustomDatasetDataLoader(): + """Wrapper class of Dataset class that performs multi-threaded data loading""" + + def __init__(self, opt, ds): + """Initialize this class + Step 1: create a dataset instance given the name [dataset_mode] + Step 2: create a multi-threaded data loader. + """ + self.opt = opt + self.dataset = ds + self.dataloader = data.DataLoader(self.dataset, + batch_size=opt.batch_size, + shuffle=not opt.serial_batches, + num_workers=int(opt.num_threads)) + + def load_data(self): + return self + + def __len__(self): + """Return the number of data in the dataset""" + return min(len(self.dataset), self.opt.max_dataset_size) + + def __iter__(self): + """Return a batch of data""" + for i, data in enumerate(self.dataloader): + if i * self.opt.batch_size >= self.opt.max_dataset_size: + break + yield data + + +def download_dataset(dataset_name): + # code adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix + assert(dataset_name in ['facades', 'night2day', 'edges2handbags', 'edges2shoes', 'maps']) + if os.path.exists('./data/' + dataset_name): + _logger.info("Already downloaded dataset " + dataset_name) + else: + _logger.info("Downloading dataset " + dataset_name) + if not os.path.exists('./data/'): + pathlib.Path('./data/').mkdir(parents=True, exist_ok=True) + pathlib.Path('./data/' + dataset_name).mkdir(parents=True, exist_ok=True) + URL = 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/{}.tar.gz'.format(dataset_name) + TAR_FILE = './data/{}.tar.gz'.format(dataset_name) + TARGET_DIR = './data/{}/'.format(dataset_name) + os.system('wget -N {} -O {}'.format(URL, TAR_FILE)) + pathlib.Path(TARGET_DIR).mkdir(parents=True, exist_ok=True) + os.system('tar -zxvf {} -C ./data/'.format(TAR_FILE)) + os.system('rm ' + TAR_FILE) + + +def setup_trial_checkpoint_dir(): + checkpoint_dir = os.environ['NNI_OUTPUT_DIR'] + '/checkpoints/' + pathlib.Path(checkpoint_dir).mkdir(parents=True, exist_ok=True) + return checkpoint_dir + + +def parse_args(): + # Settings that may be overrided by parameters from nni + parser = argparse.ArgumentParser(description='PyTorch Pix2pix Example') + parser.add_argument('--ngf', type=int, default=64, + help='# of generator filters in the last conv layer') + parser.add_argument('--ndf', type=int, default=64, + help='# of discriminator filters in the first conv layer') + parser.add_argument('--netD', type=str, default='basic', + help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') + parser.add_argument('--netG', type=str, default='resnet_9blocks', + help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') + parser.add_argument('--init_type', type=str, default='normal', + help='network initialization [normal | xavier | kaiming | orthogonal]') + parser.add_argument('--beta1', type=float, default=0.5, + help='momentum term of adam') + parser.add_argument('--lr', type=float, default=0.0002, + help='initial learning rate for adam') + parser.add_argument('--lr_policy', type=str, default='linear', + help='learning rate policy. [linear | step | plateau | cosine]') + parser.add_argument('--gan_mode', type=str, default='lsgan', + help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') + parser.add_argument('--norm', type=str, default='instance', + help='instance normalization or batch normalization [instance | batch | none]') + parser.add_argument('--lambda_L1', type=float, default=100, + help='weight of L1 loss in the generator objective') + + # Additional training settings + parser.add_argument('--batch_size', type=int, default=1, + help='input batch size for training (default: 1)') + parser.add_argument('--n_epochs', type=int, default=100, + help='number of epochs with the initial learning rate') + parser.add_argument('--n_epochs_decay', type=int, default=100, + help='number of epochs to linearly decay learning rate to zero') + + args, _ = parser.parse_known_args() + return args + + +def evaluate_L1(config, model, dataset): + if config.eval: + model.eval() + scores = [] + for i, data in enumerate(dataset): + model.set_input(data) # unpack data from data loader + model.test() # run inference + visuals = model.get_current_visuals() + score = torch.mean(torch.abs(visuals['fake_B']-visuals['real_B'])).detach().cpu().numpy() + scores.append(score) + return np.mean(np.array(scores)) + + +def main(dataset_name, train_params, test_params): + download_dataset(dataset_name) + + train_config = namedtuple('Struct', train_params.keys())(*train_params.values()) + test_config = namedtuple('Struct', test_params.keys())(*test_params.values()) + + train_dataset, test_dataset = AlignedDataset(train_config), AlignedDataset(test_config) + print(train_dataset, train_config) + train_dataset = CustomDatasetDataLoader(train_config, train_dataset) + test_dataset = CustomDatasetDataLoader(test_config, test_dataset) + _logger.info('Number of training images = {}'.format(len(train_dataset))) + _logger.info('Number of testing images = {}'.format(len(test_dataset))) + + model = Pix2PixModel(train_config) + model.setup(train_config) + + # training + total_iters = 0 # the total number of training iterations + for epoch in range(train_config.epoch_count, train_config.n_epochs + train_config.n_epochs_decay + 1): + _logger.info('Training epoch {}'.format(epoch)) + epoch_start_time = time.time() # timer for entire epoch + iter_data_time = time.time() # timer for data loading per iteration + epoch_iter = 0 + model.update_learning_rate() + for i, data in enumerate(train_dataset): # inner loop within one epoch + iter_start_time = time.time() # timer for computation per iteration + if total_iters % train_config.print_freq == 0: + t_data = iter_start_time - iter_data_time + total_iters += train_config.batch_size + epoch_iter += train_config.batch_size + model.set_input(data) # unpack data from dataset and apply preprocessing + model.optimize_parameters() # calculate loss functions, get gradients, update network weights + iter_data_time = time.time() + _logger.info('End of epoch {} / {} \t Time Taken: {} sec'.format(epoch, train_config.n_epochs + train_config.n_epochs_decay, time.time() - epoch_start_time)) + + model.save_networks('latest') + _logger.info("Training done. Saving the final model.") + + l1_score = evaluate_L1(test_config, model, test_dataset) + _logger.info("The final L1 loss the test set is {}".format(l1_score)) + nni.report_final_result(l1_score) + + +if __name__ == '__main__': + dataset_name = 'facades' + + checkpoint_dir = setup_trial_checkpoint_dir() + + params_from_cl = vars(parse_args()) + params_for_tuning = nni.get_next_parameter() + train_params, test_params = get_base_params(dataset_name, checkpoint_dir) + train_params.update(params_from_cl) + test_params.update(params_from_cl) + train_params = merge_parameter(train_params, params_for_tuning) + + main(dataset_name, train_params, test_params) + diff --git a/examples/trials/pix2pix-pytorch/setup.sh b/examples/trials/pix2pix-pytorch/setup.sh new file mode 100755 index 0000000000000000000000000000000000000000..553eb231d22b0d0f104ae95ec53a2256ace39aac --- /dev/null +++ b/examples/trials/pix2pix-pytorch/setup.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# download pix2pix repository +if [ ! -d './pix2pixlib' ] ; then + git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix.git pix2pixlib +fi diff --git a/examples/trials/pix2pix-pytorch/test.py b/examples/trials/pix2pix-pytorch/test.py new file mode 100644 index 0000000000000000000000000000000000000000..be525d7c4fde3ae0191420ca76df00947cd0f069 --- /dev/null +++ b/examples/trials/pix2pix-pytorch/test.py @@ -0,0 +1,140 @@ +import sys +sys.path.insert(0, './pix2pixlib') + +import os +import pathlib +import logging +import argparse +import json +from collections import namedtuple +from PIL import Image +import numpy as np +import torch +from nni.utils import merge_parameter +from pix2pixlib.data.aligned_dataset import AlignedDataset +from pix2pixlib.data import CustomDatasetDataLoader +from pix2pixlib.models.pix2pix_model import Pix2PixModel +from pix2pixlib.util.util import tensor2im +from base_params import get_base_params + + +_logger = logging.getLogger('example_pix2pix') + + +def download_dataset(dataset_name): + # code adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix + assert(dataset_name in ['facades', 'night2day', 'edges2handbags', 'edges2shoes', 'maps']) + if os.path.exists('./data/' + dataset_name): + _logger.info("Already downloaded dataset " + dataset_name) + else: + _logger.info("Downloading dataset " + dataset_name) + if not os.path.exists('./data/'): + pathlib.Path('./data/').mkdir(parents=True, exist_ok=True) + pathlib.Path('./data/' + dataset_name).mkdir(parents=True, exist_ok=True) + URL = 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/{}.tar.gz'.format(dataset_name) + TAR_FILE = './data/{}.tar.gz'.format(dataset_name) + TARGET_DIR = './data/{}/'.format(dataset_name) + os.system('wget -N {} -O {}'.format(URL, TAR_FILE)) + pathlib.Path(TARGET_DIR).mkdir(parents=True, exist_ok=True) + os.system('tar -zxvf {} -C ./data/'.format(TAR_FILE)) + os.system('rm ' + TAR_FILE) + + +def parse_args(): + parser = argparse.ArgumentParser(description='PyTorch Pix2pix Example') + + # required arguments + parser.add_argument('-c', '--checkpoint', type=str, required=True, + help='Checkpoint directory') + parser.add_argument('-p', '--parameter_cfg', type=str, required=True, + help='parameter.cfg file generated by nni trial') + parser.add_argument('-d', '--dataset', type=str, required=True, + help='dataset name (facades, night2day, edges2handbags, edges2shoes, maps)') + parser.add_argument('-o', '--output_dir', type=str, required=True, + help='Where to save the test results') + + # Settings that may be overrided by parameters from nni + parser.add_argument('--ngf', type=int, default=64, + help='# of generator filters in the last conv layer') + parser.add_argument('--ndf', type=int, default=64, + help='# of discriminator filters in the first conv layer') + parser.add_argument('--netD', type=str, default='basic', + help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') + parser.add_argument('--netG', type=str, default='resnet_9blocks', + help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') + parser.add_argument('--init_type', type=str, default='normal', + help='network initialization [normal | xavier | kaiming | orthogonal]') + parser.add_argument('--beta1', type=float, default=0.5, + help='momentum term of adam') + parser.add_argument('--lr', type=float, default=0.0002, + help='initial learning rate for adam') + parser.add_argument('--lr_policy', type=str, default='linear', + help='learning rate policy. [linear | step | plateau | cosine]') + parser.add_argument('--gan_mode', type=str, default='lsgan', + help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') + parser.add_argument('--norm', type=str, default='instance', + help='instance normalization or batch normalization [instance | batch | none]') + parser.add_argument('--lambda_L1', type=float, default=100, + help='weight of L1 loss in the generator objective') + + # Additional training settings + parser.add_argument('--batch_size', type=int, default=1, + help='input batch size for training (default: 1)') + parser.add_argument('--n_epochs', type=int, default=100, + help='number of epochs with the initial learning rate') + parser.add_argument('--n_epochs_decay', type=int, default=100, + help='number of epochs to linearly decay learning rate to zero') + + args, _ = parser.parse_known_args() + return args + + +def main(test_params): + test_config = namedtuple('Struct', test_params.keys())(*test_params.values()) + assert os.path.exists(test_config.checkpoint), "Checkpoint does not exist" + + download_dataset(test_config.dataset) + + test_dataset = AlignedDataset(test_config) + test_dataset = CustomDatasetDataLoader(test_config, test_dataset) + _logger.info('Number of testing images = {}'.format(len(test_dataset))) + + model = Pix2PixModel(test_config) + model.setup(test_config) + + if test_config.eval: + model.eval() + + for i, data in enumerate(test_dataset): + print('Testing on {} image {}'.format(test_config.dataset, i), end='\r') + model.set_input(data) + model.test() + + visuals = model.get_current_visuals() + cur_input = tensor2im(visuals['real_A']) + cur_label = tensor2im(visuals['real_B']) + cur_output = tensor2im(visuals['fake_B']) + + image_name = '{}_test_{}.png'.format(test_config.dataset, i) + Image.fromarray(cur_input).save(os.path.join(test_config.output_dir, 'input', image_name)) + Image.fromarray(cur_label).save(os.path.join(test_config.output_dir, 'label', image_name)) + Image.fromarray(cur_output).save(os.path.join(test_config.output_dir, 'output', image_name)) + + _logger.info("Images successfully saved to " + test_config.output_dir) + + +if __name__ == '__main__': + params_from_cl = vars(parse_args()) + _, test_params = get_base_params(params_from_cl['dataset'], params_from_cl['checkpoint']) + test_params.update(params_from_cl) + + with open(test_params['parameter_cfg'], 'r') as f: + params_from_nni = json.loads(f.readline().strip())['parameters'] + test_params = merge_parameter(test_params, params_from_nni) + + pathlib.Path(params_from_cl['output_dir'] + '/input').mkdir(parents=True, exist_ok=True) + pathlib.Path(params_from_cl['output_dir'] + '/label').mkdir(parents=True, exist_ok=True) + pathlib.Path(params_from_cl['output_dir'] + '/output').mkdir(parents=True, exist_ok=True) + + main(test_params) + diff --git a/examples/trials/sklearn/classification/config.yml b/examples/trials/sklearn/classification/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..a1e6bec0f68c360ca3b744a5f11a2788badc6a1a --- /dev/null +++ b/examples/trials/sklearn/classification/config.yml @@ -0,0 +1,11 @@ +searchSpaceFile: search_space.json +trialCommand: python3 main.py +trialConcurrency: 1 +maxTrialNumber: 100 +maxExperimentDuration: 1h +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/sklearn/classification/main.py b/examples/trials/sklearn/classification/main.py new file mode 100644 index 0000000000000000000000000000000000000000..fff86b41ac4fb8ac3ab8924f3fe27c976a889e85 --- /dev/null +++ b/examples/trials/sklearn/classification/main.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import nni +from sklearn.model_selection import train_test_split +from sklearn.datasets import load_digits +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC +import logging +import numpy as np + +LOG = logging.getLogger('sklearn_classification') + +def load_data(): + '''Load dataset, use 20newsgroups dataset''' + digits = load_digits() + X_train, X_test, y_train, y_test = train_test_split( + digits.data, digits.target, random_state=99, test_size=0.25) + + ss = StandardScaler() + X_train = ss.fit_transform(X_train) + X_test = ss.transform(X_test) + + return X_train, X_test, y_train, y_test + +def get_default_parameters(): + '''get default parameters''' + params = { + 'C': 1.0, + 'kernel': 'linear', + 'degree': 3, + 'gamma': 0.01, + 'coef0': 0.01 + } + return params + +def get_model(PARAMS): + '''Get model according to parameters''' + model = SVC() + model.C = PARAMS.get('C') + model.kernel = PARAMS.get('kernel') + model.degree = PARAMS.get('degree') + model.gamma = PARAMS.get('gamma') + model.coef0 = PARAMS.get('coef0') + + return model + +def run(X_train, X_test, y_train, y_test, model): + '''Train model and predict result''' + model.fit(X_train, y_train) + score = model.score(X_test, y_test) + LOG.debug('score: %s', score) + nni.report_final_result(score) + +if __name__ == '__main__': + X_train, X_test, y_train, y_test = load_data() + + try: + # get parameters from tuner + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = get_default_parameters() + PARAMS.update(RECEIVED_PARAMS) + LOG.debug(PARAMS) + model = get_model(PARAMS) + run(X_train, X_test, y_train, y_test, model) + except Exception as exception: + LOG.exception(exception) + raise diff --git a/examples/trials/sklearn/classification/python_api_connect.ipynb b/examples/trials/sklearn/classification/python_api_connect.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..ef73f937f7f701274dbdd6462062ceeb518b5dfa --- /dev/null +++ b/examples/trials/sklearn/classification/python_api_connect.ipynb @@ -0,0 +1,195 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "white-electron", + "metadata": {}, + "source": [ + "## Connect and Manage an Exist Experiment" + ] + }, + { + "cell_type": "markdown", + "id": "recent-italic", + "metadata": {}, + "source": [ + "### 1. Connect Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "statistical-repair", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:18:28] Connect to port 8080 success, experiment id is DH8pVfXc, status is RUNNING.\n" + ] + } + ], + "source": [ + "from nni.experiment import Experiment\n", + "experiment = Experiment.connect(8080)" + ] + }, + { + "cell_type": "markdown", + "id": "defensive-scratch", + "metadata": {}, + "source": [ + "### 2. Experiment View & Control" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "independent-touch", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': 'DH8pVfXc',\n", + " 'revision': 4,\n", + " 'execDuration': 10,\n", + " 'logDir': '/home/ningshang/nni-experiments/DH8pVfXc',\n", + " 'nextSequenceId': 1,\n", + " 'params': {'authorName': 'default',\n", + " 'experimentName': 'example_sklearn-classification',\n", + " 'trialConcurrency': 1,\n", + " 'maxExecDuration': 3600,\n", + " 'maxTrialNum': 100,\n", + " 'searchSpace': '{\"C\": {\"_type\": \"uniform\", \"_value\": [0.1, 1]}, \"kernel\": {\"_type\": \"choice\", \"_value\": [\"linear\", \"rbf\", \"poly\", \"sigmoid\"]}, \"degree\": {\"_type\": \"choice\", \"_value\": [1, 2, 3, 4]}, \"gamma\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}, \"coef0\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}}',\n", + " 'trainingServicePlatform': 'local',\n", + " 'tuner': {'builtinTunerName': 'TPE',\n", + " 'classArgs': {'optimize_mode': 'maximize'},\n", + " 'checkpointDir': '/home/ningshang/nni-experiments/DH8pVfXc/checkpoint'},\n", + " 'versionCheck': True,\n", + " 'clusterMetaData': [{'key': 'trial_config',\n", + " 'value': {'command': 'python3 main.py',\n", + " 'codeDir': '/home/ningshang/nni/examples/trials/sklearn/classification/.',\n", + " 'gpuNum': 0}}]},\n", + " 'startTime': 1614946699989}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_experiment_profile()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "printable-bookmark", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:18:32] (root) Successfully update maxTrialNum.\n" + ] + } + ], + "source": [ + "experiment.update_max_trial_number(200)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "marine-serial", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': 'DH8pVfXc',\n", + " 'revision': 5,\n", + " 'execDuration': 14,\n", + " 'logDir': '/home/ningshang/nni-experiments/DH8pVfXc',\n", + " 'nextSequenceId': 1,\n", + " 'params': {'authorName': 'default',\n", + " 'experimentName': 'example_sklearn-classification',\n", + " 'trialConcurrency': 1,\n", + " 'maxExecDuration': 3600,\n", + " 'maxTrialNum': 200,\n", + " 'searchSpace': '{\"C\": {\"_type\": \"uniform\", \"_value\": [0.1, 1]}, \"kernel\": {\"_type\": \"choice\", \"_value\": [\"linear\", \"rbf\", \"poly\", \"sigmoid\"]}, \"degree\": {\"_type\": \"choice\", \"_value\": [1, 2, 3, 4]}, \"gamma\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}, \"coef0\": {\"_type\": \"uniform\", \"_value\": [0.01, 0.1]}}',\n", + " 'trainingServicePlatform': 'local',\n", + " 'tuner': {'builtinTunerName': 'TPE',\n", + " 'classArgs': {'optimize_mode': 'maximize'},\n", + " 'checkpointDir': '/home/ningshang/nni-experiments/DH8pVfXc/checkpoint'},\n", + " 'versionCheck': True,\n", + " 'clusterMetaData': [{'key': 'trial_config',\n", + " 'value': {'command': 'python3 main.py',\n", + " 'codeDir': '/home/ningshang/nni/examples/trials/sklearn/classification/.',\n", + " 'gpuNum': 0}}]},\n", + " 'startTime': 1614946699989}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_experiment_profile()" + ] + }, + { + "cell_type": "markdown", + "id": "opened-lounge", + "metadata": {}, + "source": [ + "### 3. Stop Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "emotional-machinery", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:18:36] Stopping experiment, please wait...\n", + "[2021-03-05 12:18:38] Experiment stopped\n" + ] + } + ], + "source": [ + "experiment.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nni-dev", + "language": "python", + "name": "nni-dev" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/trials/sklearn/classification/python_api_start.ipynb b/examples/trials/sklearn/classification/python_api_start.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..95ceb6d99b1ad983d06633cd803741b5cc202369 --- /dev/null +++ b/examples/trials/sklearn/classification/python_api_start.ipynb @@ -0,0 +1,214 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "technological-script", + "metadata": {}, + "source": [ + "## Start and Manage a New Experiment" + ] + }, + { + "cell_type": "markdown", + "id": "reported-somerset", + "metadata": {}, + "source": [ + "### 1. Configure Search Space" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "potential-williams", + "metadata": {}, + "outputs": [], + "source": [ + "search_space = {\n", + " \"C\": {\"_type\":\"quniform\",\"_value\":[0.1, 1, 0.1]},\n", + " \"kernel\": {\"_type\":\"choice\",\"_value\":[\"linear\", \"rbf\", \"poly\", \"sigmoid\"]},\n", + " \"degree\": {\"_type\":\"choice\",\"_value\":[1, 2, 3, 4]},\n", + " \"gamma\": {\"_type\":\"quniform\",\"_value\":[0.01, 0.1, 0.01]},\n", + " \"coef0\": {\"_type\":\"quniform\",\"_value\":[0.01, 0.1, 0.01]}\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "greek-archive", + "metadata": {}, + "source": [ + "### 2. Configure Experiment " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "fiscal-expansion", + "metadata": {}, + "outputs": [], + "source": [ + "from nni.experiment import Experiment\n", + "experiment = Experiment('local')\n", + "experiment.config.experiment_name = 'Example'\n", + "experiment.config.trial_concurrency = 2\n", + "experiment.config.max_trial_number = 10\n", + "experiment.config.search_space = search_space\n", + "experiment.config.trial_command = 'python3 main.py'\n", + "experiment.config.trial_code_directory = './'\n", + "experiment.config.tuner.name = 'TPE'\n", + "experiment.config.tuner.class_args['optimize_mode'] = 'maximize'\n", + "experiment.config.training_service.use_active_gpu = True" + ] + }, + { + "cell_type": "markdown", + "id": "received-tattoo", + "metadata": {}, + "source": [ + "### 3. Start Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "pleasant-patent", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:12:19] Creating experiment, Experiment ID: wdt0le3v\n", + "[2021-03-05 12:12:19] Starting web server...\n", + "[2021-03-05 12:12:20] Setting up...\n", + "[2021-03-05 12:12:20] Web UI URLs: http://127.0.0.1:8080 http://10.0.1.5:8080 http://172.17.0.1:8080\n" + ] + } + ], + "source": [ + "experiment.start(8080)" + ] + }, + { + "cell_type": "markdown", + "id": "miniature-prison", + "metadata": {}, + "source": [ + "### 4. Experiment View & Control" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "animated-english", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'RUNNING'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_status()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "alpha-ottawa", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[TrialResult(parameter={'C': 0.30000000000000004, 'kernel': 'linear', 'degree': 3, 'gamma': 0.03, 'coef0': 0.07}, value=0.9888888888888889, trialJobId='VLqU9'),\n", + " TrialResult(parameter={'C': 0.5, 'kernel': 'sigmoid', 'degree': 1, 'gamma': 0.03, 'coef0': 0.07}, value=0.8888888888888888, trialJobId='DLo6r')]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.export_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "unique-rendering", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'DLo6r': [TrialMetricData(timestamp=1614946351592, trialJobId='DLo6r', parameterId='1', type='FINAL', sequence=0, data=0.8888888888888888)],\n", + " 'VLqU9': [TrialMetricData(timestamp=1614946351607, trialJobId='VLqU9', parameterId='0', type='FINAL', sequence=0, data=0.9888888888888889)]}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experiment.get_job_metrics()" + ] + }, + { + "cell_type": "markdown", + "id": "welsh-difference", + "metadata": {}, + "source": [ + "### 5. Stop Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "technological-cleanup", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2021-03-05 12:12:40] Stopping experiment, please wait...\n", + "[2021-03-05 12:12:42] Experiment stopped\n" + ] + } + ], + "source": [ + "experiment.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nni-dev", + "language": "python", + "name": "nni-dev" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/trials/sklearn/classification/search_space.json b/examples/trials/sklearn/classification/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c4b4ffb0c89cad4479a7cf4c5adc91ca084920ad --- /dev/null +++ b/examples/trials/sklearn/classification/search_space.json @@ -0,0 +1,7 @@ +{ + "C": {"_type":"uniform","_value":[0.1, 1]}, + "kernel": {"_type":"choice","_value":["linear", "rbf", "poly", "sigmoid"]}, + "degree": {"_type":"choice","_value":[1, 2, 3, 4]}, + "gamma": {"_type":"uniform","_value":[0.01, 0.1]}, + "coef0": {"_type":"uniform","_value":[0.01, 0.1]} +} \ No newline at end of file diff --git a/examples/trials/sklearn/regression/config.yml b/examples/trials/sklearn/regression/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..c3fcf52c3e03182c4b1311c3c0279ece3a3118ca --- /dev/null +++ b/examples/trials/sklearn/regression/config.yml @@ -0,0 +1,11 @@ +searchSpaceFile: search_space.json +trialCommand: python3 main.py +trialConcurrency: 1 +maxTrialNumber: 30 +maxExperimentDuration: 1h +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: # For other platforms, check mnist-pytorch example + platform: local diff --git a/examples/trials/sklearn/regression/main.py b/examples/trials/sklearn/regression/main.py new file mode 100644 index 0000000000000000000000000000000000000000..512111de903a5bc8fa172315e1db5596f9164921 --- /dev/null +++ b/examples/trials/sklearn/regression/main.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import nni +from sklearn.datasets import load_boston +from sklearn.model_selection import train_test_split +from sklearn import linear_model +import logging +import numpy as np +from sklearn.metrics import r2_score +from sklearn.preprocessing import StandardScaler +from sklearn.linear_model import LinearRegression +from sklearn.linear_model import Ridge +from sklearn.linear_model import Lars +from sklearn.linear_model import ARDRegression + +LOG = logging.getLogger('sklearn_regression') + +def load_data(): + '''Load dataset, use boston dataset''' + boston = load_boston() + X_train, X_test, y_train, y_test = train_test_split( + boston.data, boston.target, random_state=99, test_size=0.25) + #normalize data + ss_X = StandardScaler() + ss_y = StandardScaler() + + X_train = ss_X.fit_transform(X_train) + X_test = ss_X.transform(X_test) + y_train = ss_y.fit_transform(y_train[:, None])[:, 0] + y_test = ss_y.transform(y_test[:, None])[:, 0] + + return X_train, X_test, y_train, y_test + +def get_default_parameters(): + '''get default parameters''' + params = {'model_name': 'LinearRegression'} + return params + +def get_model(PARAMS): + '''Get model according to parameters''' + model_dict = { + 'LinearRegression': LinearRegression(), + 'Ridge': Ridge(), + 'Lars': Lars(), + 'ARDRegression': ARDRegression() + + } + if not model_dict.get(PARAMS['model_name']): + LOG.exception('Not supported model!') + exit(1) + + model = model_dict[PARAMS['model_name']] + model.normalize = bool(PARAMS['normalize']) + + return model + +def run(X_train, X_test, y_train, y_test, model): + '''Train model and predict result''' + model.fit(X_train, y_train) + predict_y = model.predict(X_test) + score = r2_score(y_test, predict_y) + LOG.debug('r2 score: %s', score) + nni.report_final_result(score) + +if __name__ == '__main__': + X_train, X_test, y_train, y_test = load_data() + + try: + # get parameters from tuner + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = get_default_parameters() + PARAMS.update(RECEIVED_PARAMS) + LOG.debug(PARAMS) + model = get_model(PARAMS) + run(X_train, X_test, y_train, y_test, model) + except Exception as exception: + LOG.exception(exception) + raise diff --git a/examples/trials/sklearn/regression/search_space.json b/examples/trials/sklearn/regression/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..d069b9d2b45c9b27cdf86a89ece6923a12f7f41d --- /dev/null +++ b/examples/trials/sklearn/regression/search_space.json @@ -0,0 +1,4 @@ +{ + "model_name":{"_type":"choice","_value":["LinearRegression", "Lars", "Ridge", "ARDRegression"]}, + "normalize": {"_type":"choice","_value":["true", "false"]} +} \ No newline at end of file diff --git a/examples/trials/sklearn/requirements.txt b/examples/trials/sklearn/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7282a456fd1b360e0e32dbe4933d9b39e87ec544 --- /dev/null +++ b/examples/trials/sklearn/requirements.txt @@ -0,0 +1,2 @@ +sudo apt-get install libblas-dev liblapack-dev libatlas-base-dev gfortran +python3 -m pip install --user numpy scipy sklearn diff --git a/examples/trials/systems_auto_tuning/opevo/Dockerfile b/examples/trials/systems_auto_tuning/opevo/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a1c589c3684e6dd04877d19228e4fb1cca585d2c --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/Dockerfile @@ -0,0 +1,42 @@ +FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04 + +ENV PYTHONDONTWRITEBYTECODE 1 +ENV HIP_PLATFORM hcc +ENV PATH $PATH:/opt/rocm/bin:/usr/local/nvidia/lib64/bin +ENV TVM_HOME=/opt/tvm +ENV PYTHONPATH=/usr/local/rocm/src:$TVM_HOME/python:$TVM_HOME/topi/python:$TVM_HOME/nnvm/python +ENV HSA_USERPTR_FOR_PAGED_MEM=0 + +RUN env > /etc/environment + +RUN apt-get update && apt install -y --no-install-recommends git ca-certificates \ + python3-pip python3-wheel python3-setuptools python3-dev python3-pytest \ + vim less netcat-openbsd inetutils-ping curl patch iproute2 \ + g++ libpci3 libnuma-dev make cmake file openssh-server kmod gdb libopenmpi-dev openmpi-bin \ + autoconf automake autotools-dev libtool multiarch-support \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -sL http://repo.radeon.com/rocm/apt/debian/rocm.gpg.key | apt-key add - && \ + printf "deb [arch=amd64] http://repo.radeon.com/rocm/apt/3.3/ xenial main" | tee /etc/apt/sources.list.d/rocm_hip.list && \ + apt update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + rocm-dev zlib1g-dev unzip librdmacm-dev rocblas hipsparse rccl rocfft rocrand miopen-hip && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN ln -sf libcudart.so /usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart_static.a + +RUN pip3 install tornado psutil xgboost==0.80 numpy decorator attrs && rm -rf ~/.cache +RUN git clone https://github.com/dmlc/tvm $TVM_HOME + +RUN cd $TVM_HOME && git checkout v0.6 && git submodule init && git submodule update && \ + mkdir -p build && cd build && cp ../cmake/config.cmake . && \ + sed -i 's/LLVM ON/LLVM OFF/g' config.cmake && sed -i 's/CUDA OFF/CUDA ON/g' config.cmake && \ + cmake .. && make -j16 + +RUN pip3 install nni==1.5 && rm -rf ~/.cache +RUN pip3 install torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html && rm -rf ~/.cache + +ADD tvm_patches/tvm_v0.6.patch $TVM_HOME/tvm_v0.6.patch +ADD tvm_patches/libcuda.so.1 $TVM_HOME/build +RUN ln -sf libcuda.so.1 $TVM_HOME/build/libcudart.so.10.0 +RUN cd $TVM_HOME && git apply tvm_v0.6.patch && cd build && make -j16 + +ADD src /root/ + diff --git a/examples/trials/systems_auto_tuning/opevo/Makefile b/examples/trials/systems_auto_tuning/opevo/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ba7ea78c189ea7a7bdd87f3b262399f2b28a8c28 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/Makefile @@ -0,0 +1,14 @@ +rocm-env: build + docker run -it --rm --privileged -v /:/host -w /root \ + -e BACKEND=c-rocm -p 8080:8080 \ + tvm4nni bash || true + +cuda-env: build + docker run -it --rm --privileged -v /:/host -w /root \ + -e BACKEND=c-cuda -p 8080:8080 \ + -v /usr/lib/x86_64-linux-gnu/libcuda.so.1:/usr/lib/x86_64-linux-gnu/libcuda.so.1 \ + -v $(shell dirname `ldd /usr/lib/x86_64-linux-gnu/libcuda.so.1 | grep nvidia-fatbinaryloader | awk '{print $$3}'`):/usr/local/nvidia/lib64 \ + tvm4nni bash || true + +build: + docker build -t tvm4nni --network=host . diff --git a/examples/trials/systems_auto_tuning/opevo/screenshot.png b/examples/trials/systems_auto_tuning/opevo/screenshot.png new file mode 100755 index 0000000000000000000000000000000000000000..dcbc89e0f5fd48bea63dec8aa2c259dbaec654a9 Binary files /dev/null and b/examples/trials/systems_auto_tuning/opevo/screenshot.png differ diff --git a/examples/trials/systems_auto_tuning/opevo/src/algorithms/gbfs.py b/examples/trials/systems_auto_tuning/opevo/src/algorithms/gbfs.py new file mode 100644 index 0000000000000000000000000000000000000000..5a4a91e21bd80318d42ab4920da7014ce98c73c4 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/algorithms/gbfs.py @@ -0,0 +1,278 @@ +import math +import random +import logging +import copy + +import nni +from nni.tuner import Tuner + + +class Factor(object): + """factor type parameter + """ + def __init__(self, value): + self.product, self.num = value + self.partition = [1] * self.num + self.partition[0] = self.product + + def pick_out(self): + return self.partition + + def step(self, action): + tmp = copy.deepcopy(self) + tmp.partition[action[0]] = int(tmp.partition[action[0]] / action[2]) + tmp.partition[action[1]] = int(tmp.partition[action[1]] * action[2]) + + return tmp + + def get_actions(self): + actions = [] + prime_factors = self._get_prime_factors(self.product, False) + for i in range(self.num): + for j in range(self.num): + if i != j: + for k in range(len(prime_factors)): + action = [i] + action.append(j) + action.append(prime_factors[k]) + if self.partition[action[0]] % action[2] == 0: + actions.append(action) + return actions + + def __repr__(self): + string = "[" + for factor in self.partition: + string += factor.__repr__() + " " + string = string[:-1] + "]" + + return string + + def _get_prime_factors(self, n, repeat=True): + prime_factors = [] + + while n % 2 == 0: + if 2 not in prime_factors: + prime_factors.append(2) + elif repeat: + prime_factors.append(2) + n = n / 2 + + for i in range(3, int(math.sqrt(n)) + 1, 2): + while n % i == 0: + if i not in prime_factors: + prime_factors.append(i) + elif repeat: + prime_factors.append(i) + n = n / i + + if n > 2: + prime_factors.append(int(n)) + + return prime_factors + + +class Configuration(object): + """Configuration class + """ + def __init__(self, search_space): + self.params = {} + for key in search_space.keys(): + if search_space[key]['_type'] == 'factor': + self.params[key] = \ + Factor(search_space[key]['_value']) + else: + raise RuntimeError( + "G_BFS Tuner doesn't support this kind of parameter: " + + str(search_space[key]['_type']) + ) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + else: + return False + + def __repr__(self): + string = "" + for param in self.params: + string += param.__repr__() + '\n' + + return string + + def pick_out(self): + output = {} + for key in self.params.keys(): + output[key] = self.params[key].pick_out() + + return output + + def step(self, action): + config = copy.deepcopy(self) + config.params[action[0]] = config.params[action[0]].step(action[1]) + + return config + + def get_actions(self): + actions = [] + for key, value in self.params.items(): + subactions = value.get_actions() + for subaction in subactions: + action = [key] + action.append(subaction) + actions.append(action) + + return actions + + +class Population(object): + """Population class + """ + + def __init__(self, opt_mode, search_space, num_samples): + self.opt_mode = opt_mode + self.search_space = search_space + self.num_samples = num_samples + + self.queue = [] + self.population = [] + self.fitness = [] + + def append(self, individual, fitness): + if self.opt_mode == "minimize": + fitness = -1 * fitness + + self.population.append(individual) + self.queue.insert(0, individual) + self.fitness.insert(0, fitness) + + i = 0 + while (i < len(self.fitness) - 1 + and self.fitness[i] < self.fitness[i + 1]): + self.fitness[i], self.fitness[i + 1] = \ + self.fitness[i + 1], self.fitness[i] + self.queue[i], self.queue[i + 1] = \ + self.queue[i + 1], self.queue[i] + i += 1 + + def generate(self): + if not self.fitness and not self.population: + return [Configuration(self.search_space)] + elif not self.fitness and self.population: + return [] + else: + self.fitness.pop(0) + config = self.queue.pop(0) + + action_space = config.get_actions() + num = len(action_space) + if num > self.num_samples: + indices = random.sample(range(num), self.num_samples) + else: + indices = range(num) + + res = [] + for idx in indices: + tmp = config.step(action_space[idx]) + if tmp not in self.population: + res.append(tmp) + + return res + + +class G_BFS(Tuner): + """G-BFS Tuner + Based on paper Compiler-Level Matrix Multiplication Optimization for Deep Learning + + Parameters + ---------- + optimize_mode: str, 'maximize' or 'minimize' + num_samples: int, + The random selection parameter rho + """ + def __init__(self, optimize_mode="maximize", num_samples=5): + self.logger = logging.getLogger( + self.__module__ + "." + self.__class__.__name__) + self.logger.setLevel('DEBUG') + + self.opt_mode = optimize_mode + self.num_samples = num_samples + + self.request_list = [] + self.serve_list = [] + self.wait_dict = {} + + def update_search_space(self, search_space): + """Update the self.bounds and self.types by the search_space.json file. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if not isinstance(search_space, dict): + self.logger.info("The format of search space is not a dict.") + raise RuntimeError("The format of search space is not a dict.") + + self.population = \ + Population(self.opt_mode, search_space, self.num_samples) + + if not self.serve_list: + self.serve_list = self.population.generate() + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """Returns multiple sets of trial (hyper-)parameters, + as iterable of serializable objects. + """ + result = [] + self.send_trial_callback = kwargs['st_callback'] + for parameter_id in parameter_id_list: + had_exception = False + try: + self.logger.debug("generating param for %s", parameter_id) + res = self.generate_parameters(parameter_id, **kwargs) + except nni.NoMoreTrialError: + had_exception = True + if not had_exception: + result.append(res) + return result + + def generate_parameters(self, parameter_id, **kwargs): + """Method which provides one set of hyper-parameters. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if self.serve_list: + self.wait_dict[parameter_id] = self.serve_list.pop() + return self.wait_dict[parameter_id].pick_out() + else: + self.request_list.append(parameter_id) + raise nni.NoMoreTrialError('no more parameters now.') + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """Method invoked when a trial reports its final result. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if isinstance(value, dict): + value = value['default'] + + self.population.append(self.wait_dict[parameter_id], value) + del self.wait_dict[parameter_id] + + if not self.serve_list and not self.wait_dict: + self.serve_list = self.population.generate() + if not self.serve_list: + raise RuntimeError("Tuner stopped since no candidates") + + while self.request_list and self.serve_list: + param_id = self.request_list[0] + self.wait_dict[param_id] = self.serve_list.pop() + self.send_trial_callback( + param_id, self.wait_dict[param_id].pick_out()) + self.request_list.pop(0) + + def trial_end(self, parameter_id, success, **kwargs): + """Method invoked when a trial is completed or terminated. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if not success: + self.population.append(self.wait_dict[parameter_id], 0.0) + del self.wait_dict[parameter_id] diff --git a/examples/trials/systems_auto_tuning/opevo/src/algorithms/na2c.py b/examples/trials/systems_auto_tuning/opevo/src/algorithms/na2c.py new file mode 100644 index 0000000000000000000000000000000000000000..7c51a36e40821f5219c07504a7e12183c635b9d7 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/algorithms/na2c.py @@ -0,0 +1,401 @@ +import math +import random +import logging +import copy + +import torch +from torch import optim +from torch import nn +import torch.nn.functional as F +import numpy as np + +import nni +from nni.tuner import Tuner + + +class Factor(object): + """factor type parameter + """ + def __init__(self, value): + self.product, self.num = value + self.partition = [1] * self.num + self.partition[0] = self.product + + def pick_out(self): + return self.partition + + def step(self, action): + if self.partition[action[0]] % action[2] == 0: + self.partition[action[0]] /= action[2] + self.partition[action[1]] *= action[2] + status = True + else: + status = False + + return status + + def get_actions(self): + actions = [] + prime_factors = self._get_prime_factors(self.product, False) + for i in range(self.num): + for j in range(self.num): + if i != j: + for k in range(len(prime_factors)): + action = [i] + action.append(j) + action.append(prime_factors[k]) + actions.append(action) + + return actions + + def __repr__(self): + return self.partition.__repr__() + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + else: + return False + + def _get_prime_factors(self, n, repeat=True): + prime_factors = [] + + while n % 2 == 0: + if 2 not in prime_factors: + prime_factors.append(2) + elif repeat: + prime_factors.append(2) + n = n / 2 + + for i in range(3, int(math.sqrt(n)) + 1, 2): + while n % i == 0: + if i not in prime_factors: + prime_factors.append(i) + elif repeat: + prime_factors.append(i) + n = n / i + + if n > 2: + prime_factors.append(int(n)) + + return prime_factors + + +class Configuration(object): + """Configuration class + """ + def __init__(self, search_space): + self.params = {} + self.key_order = [] + for key in search_space.keys(): + if search_space[key]['_type'] == 'factor': + self.key_order.append(key) + self.params[key] = \ + Factor(search_space[key]['_value']) + else: + raise RuntimeError( + "N_A2C Tuner doesn't support this kind of parameter: " + + str(search_space[key]['_type']) + ) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + else: + return False + + def __repr__(self): + string = "" + for key, value in self.params.items(): + string += key + ': ' + value.__repr__() + ' ' + + return string + + def pick_out(self): + output = {} + for key in self.params.keys(): + output[key] = self.params[key].pick_out() + + return output + + def step(self, action): + config = copy.deepcopy(self) + status = config.params[action[0]].step(action[1]) + + return status, config + + def get_actions(self): + actions = [] + for key, value in self.params.items(): + subactions = value.get_actions() + for subaction in subactions: + action = [key] + action.append(subaction) + actions.append(action) + + return actions + + def to_torch(self): + states = [] + for key in self.key_order: + state = torch.tensor(self.params[key].partition).float() / \ + self.params[key].product - 0.5 + states.append(state) + + return torch.cat(states).float() + + +class ActorCritic(nn.Module): + def __init__(self, num_states, num_actions, hidden_size): + super(ActorCritic, self).__init__() + + self.num_actions = num_actions + self.fc = nn.Linear(num_states, hidden_size) + self.critic_linear2 = nn.Linear(hidden_size, 1) + self.actor_linear2 = nn.Linear(hidden_size, num_actions) + + def forward(self, state): + x = F.relu(self.fc(state)) + value = self.critic_linear2(x) + policy_dist = F.softmax(self.actor_linear2(x)) + + return value, policy_dist + + +class Population(object): + """Population class + """ + def __init__(self, search_space, opt_mode, n_states, n_steps, + hidden_size, lr): + self.search_space = search_space + self.opt_mode = opt_mode + self.n_states = n_states + self.n_steps = n_steps + self.hidden_size = hidden_size + self.lr = lr + + self.config = Configuration(search_space) + self.max_reward = 0.0 + + self.action_space = self.config.get_actions() + self.dim_actions = len(self.action_space) + self.dim_states = len(self.config.to_torch()) + self.log_probs = [] + self.values = [] + self.rewards = [] + + self.population = [] + + self.actor_critic = ActorCritic( + self.dim_states, self.dim_actions, self.hidden_size + ) + self.ac_optimizer = optim.Adam( + self.actor_critic.parameters(), lr=self.lr + ) + + def append(self, individual, fitness): + if self.opt_mode == "minimize": + fitness = -1 * fitness + + self.population.append(individual) + + if self.max_reward < fitness: + self.max_reward = fitness + self.config = individual + + if self.collect: + idx = self.collect.index(individual) + self.waiting_rewards[idx] = fitness + del self.collect[idx] + else: + raise RuntimeError("Received unexpected trials.") + + if not self.collect: + self.rewards.extend(self.waiting_rewards) + + self.ac_optimizer.zero_grad() + gradient_loss = 0 + value_loss = 0 + for i in range(len(self.values)): + advantage = self.rewards[i] - self.values[i] + gradient_loss += self.log_probs[i] * advantage + value_loss += torch.pow(advantage, 2) + loss = gradient_loss + value_loss + loss.backward() + self.ac_optimizer.step() + + self.rewards = [] + self.values = [] + self.log_probs = [] + self.collect = [] + + def generate(self): + self.collect = [] + while len(self.collect) < self.n_states: + config = self.config + for i in range(self.n_steps): + value, policy_dist = self.actor_critic(config.to_torch()) + dist = policy_dist.detach().numpy() + + if random.uniform(0, 1) < 0.1: + action = random.choice(range(self.dim_actions)) + else: + action = np.random.choice( + self.dim_actions, p=np.squeeze(dist)) + + log_prob = torch.log(policy_dist.squeeze(0)[action]) + # entropy = -np.sum(np.mean(dist) * np.log(dist)) + flag, new_config = config.step(self.action_space[action]) + + if (flag and new_config not in self.population + and new_config not in self.collect): + self.collect.append(new_config) + self.log_probs.append(log_prob) + self.values.append(value) + + config = new_config + # print([math.exp(float(i)) for i in self.log_probs]) + + self.waiting_rewards = [0.0] * len(self.collect) + return copy.deepcopy(self.collect) + + +class N_A2C(Tuner): + """N-A2C Tuner + Based on paper Compiler-Level Matrix Multiplication Optimization for Deep Learning + + Parameters + ---------- + optimize_mode: str, 'maximize' or 'minimize' + n_states: int, + The maximum search steps Tau + n_steps: int + number of steps to train the policy and critic networks each iteration + hidden_size: int, + number of hidden size of the policy and critic networks + lr: float, + learning rate of the policy and critic networks + """ + + def __init__(self, + optimize_mode="maximize", + n_states=6, + n_steps=3, + hidden_size=128, + lr=1e-3): + self.logger = logging.getLogger( + self.__module__ + "." + self.__class__.__name__) + self.logger.setLevel('DEBUG') + + self.opt_mode = optimize_mode + self.n_states = n_states + self.n_steps = n_steps + self.hidden_size = 128 + self.lr = lr + + self.request_list = [] + self.serve_list = [] + self.wait_dict = {} + + def update_search_space(self, search_space): + """Update the self.bounds and self.types by the search_space.json file. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if not isinstance(search_space, dict): + self.logger.info("The format of search space is not a dict.") + raise RuntimeError("The format of search space is not a dict.") + + self.population = \ + Population( + search_space, + self.opt_mode, + self.n_states, + self.n_steps, + self.hidden_size, + self.lr + ) + + if not self.serve_list: + self.serve_list = self.population.generate() + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """Returns multiple sets of trial (hyper-)parameters, + as iterable of serializable objects. + """ + result = [] + self.send_trial_callback = kwargs['st_callback'] + for parameter_id in parameter_id_list: + had_exception = False + try: + self.logger.debug("generating param for %s", parameter_id) + res = self.generate_parameters(parameter_id, **kwargs) + except nni.NoMoreTrialError: + had_exception = True + if not had_exception: + result.append(res) + return result + + def generate_parameters(self, parameter_id, **kwargs): + """Method which provides one set of hyper-parameters. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if self.serve_list: + self.wait_dict[parameter_id] = self.serve_list.pop() + return self.wait_dict[parameter_id].pick_out() + else: + self.request_list.append(parameter_id) + raise nni.NoMoreTrialError('no more parameters now.') + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """Method invoked when a trial reports its final result. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if isinstance(value, dict): + value = value['default'] + + self.population.append(self.wait_dict[parameter_id], value) + del self.wait_dict[parameter_id] + + if not self.serve_list and not self.wait_dict: + self.serve_list = self.population.generate() + if not self.serve_list: + raise RuntimeError("Tuner stopped since no candidates") + + while self.request_list and self.serve_list: + param_id = self.request_list[0] + self.wait_dict[param_id] = self.serve_list.pop() + self.send_trial_callback( + param_id, self.wait_dict[param_id].pick_out()) + self.request_list.pop(0) + + # print('request_list: ' + str(len(self.request_list))) + # print('serve_list: ' + str(len(self.serve_list))) + # print('wait_dict: ' + str(len(self.wait_dict.keys()))) + + def trial_end(self, parameter_id, success, **kwargs): + """Method invoked when a trial is completed or terminated. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if not success: + self.population.append(self.wait_dict[parameter_id], 0.0) + del self.wait_dict[parameter_id] + + if not self.serve_list and not self.wait_dict: + self.serve_list = self.population.generate() + if not self.serve_list: + raise RuntimeError("Tuner stopped since no candidates") + + while self.request_list and self.serve_list: + param_id = self.request_list[0] + self.wait_dict[param_id] = self.serve_list.pop() + self.send_trial_callback( + param_id, self.wait_dict[param_id].pick_out()) + self.request_list.pop(0) + + # print('trial_end request_list: ' + str(len(self.request_list))) + # print('trial_end serve_list: ' + str(len(self.serve_list))) + # print('trial_end wait_dict: ' + str(len(self.wait_dict.keys()))) diff --git a/examples/trials/systems_auto_tuning/opevo/src/algorithms/opevo.py b/examples/trials/systems_auto_tuning/opevo/src/algorithms/opevo.py new file mode 100644 index 0000000000000000000000000000000000000000..5eb439750ccebd28fe818b296970052473d18fe1 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/algorithms/opevo.py @@ -0,0 +1,454 @@ +import math +import logging +import copy +import random +import numpy as np +from itertools import permutations, combinations + +import nni +from nni.tuner import Tuner + + +class Parameter(object): + """Base class for all types of parameters + """ + def mutate(self): + raise NotImplementedError + + def reset(self): + raise NotImplementedError + + def pick_out(self): + raise NotImplementedError + + def get_cardinality(self): + raise NotImplementedError + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + else: + return False + + +class Choice(Parameter): + """choice type parameter + """ + def __init__(self, choices, mutate_rate): + self.choices = choices + self.value = random.choice(self.choices) + self.mutate_rate = mutate_rate + + def get_cardinality(self): + return len(self.choices) + + def reset(self): + self.value = random.choice(self.choices) + + def mutate(self): + child = copy.deepcopy(self) + while random.uniform(0, 1) < child.mutate_rate: + choices = copy.deepcopy(child.choices) + choices.remove(child.value) + if choices: + child.value = random.choice(choices) + else: + break + + return child + + def pick_out(self): + return self.value + + +class Discrete(Parameter): + """choice type parameter + """ + def __init__(self, numbers, mutate_rate): + numbers.sort() + self.numbers = numbers + self.value = random.choice(self.numbers) + self.mutate_rate = mutate_rate + + def get_cardinality(self): + return len(self.numbers) + + def reset(self): + self.value = random.choice(self.numbers) + + def mutate(self): + child = copy.deepcopy(self) + while random.uniform(0, 1) < child.mutate_rate: + idx = child.numbers.index(child.value) + if idx == 0 and idx + 1 < len(child.numbers): + child.value = child.numbers[idx + 1] + elif idx + 1 == len(child.numbers) and idx - 1 >= 0: + child.value = child.numbers[idx - 1] + elif idx == 0 and idx + 1 == len(child.numbers): + break + else: + shift = random.choice([-1, 1]) + child.value = child.numbers[idx + shift] + + return child + + def pick_out(self): + return self.value + + +class Factor(Parameter): + """factor type parameter + """ + def __init__(self, value, mutate_rate): + self.product, self.num = value + self.mutate_rate = mutate_rate + self.all_partitions = self._get_all_partitions(self.product, self.num) + self.partition = random.choice(self.all_partitions) + + def reset(self): + self.partition = random.choice(self.all_partitions) + + def get_cardinality(self): + return len(self.all_partitions) + + def mutate(self): + child = copy.deepcopy(self) + while random.uniform(0, 1) < self.mutate_rate: + action = random.choice(child._get_actions()) + child._step(action) + + return child + + def pick_out(self): + return self.partition + + def _step(self, action): + self.partition[action[0]] = int(self.partition[action[0]] / action[2]) + self.partition[action[1]] = int(self.partition[action[1]] * action[2]) + + def _get_actions(self): + actions = [] + prime_factors = self._get_prime_factors(self.product, False) + for i in range(self.num): + for j in range(self.num): + if i != j: + for k in range(len(prime_factors)): + action = [i] + action.append(j) + action.append(prime_factors[k]) + if self.partition[action[0]] % action[2] == 0: + actions.append(action) + return actions + + def __repr__(self): + string = "[" + for factor in self.partition: + string += factor.__repr__() + " " + string = string[:-1] + "]" + + return string + + def _get_all_partitions(self, product, num): + # get all prime factors with repetition + prime_factors = self._get_prime_factors(product) + + # group all prime factors + groups = {} + for prime_factor in prime_factors: + if prime_factor in groups.keys(): + groups[prime_factor] += 1 + else: + groups[prime_factor] = 1 + + # partition each group + for key, value in groups.items(): + partitions = [] + for comb in combinations(range(value + num - 1), num - 1): + # print(comb) + partition = [] + start_idx = -1 + for idx in comb: + partition.append(key**(idx - start_idx - 1)) + start_idx = idx + partition.append(key**(value + num - 2 - start_idx)) + partitions.append(partition) + groups[key] = partitions + + # generate partitions + partitions = [] + + def part(groups, mul=[]): + if not groups: + partition = [1] * num + for i in range(num): + for m in mul: + partition[i] *= m[i] + partitions.append(partition) + + for key, group in groups.items(): + for partition in group: + mul.append(partition) + tmp = copy.deepcopy(groups) + del tmp[key] + part(tmp, mul) + mul.pop() + break + + part(groups) + return partitions + + def _get_prime_factors(self, n, repeat=True): + prime_factors = [] + + while n % 2 == 0: + if 2 not in prime_factors: + prime_factors.append(2) + elif repeat: + prime_factors.append(2) + n = n / 2 + + for i in range(3, int(math.sqrt(n)) + 1, 2): + while n % i == 0: + if i not in prime_factors: + prime_factors.append(i) + elif repeat: + prime_factors.append(i) + n = n / i + + if n > 2: + prime_factors.append(int(n)) + + return prime_factors + + +class Individual(object): + """Individual class + """ + def __init__(self, search_space, mutate_rate): + self.params = {} + for key in search_space.keys(): + if search_space[key]['_type'] == 'choice': + self.params[key] = \ + Choice(search_space[key]['_value'], mutate_rate) + elif search_space[key]['_type'] == 'discrete': + self.params[key] = \ + Discrete(search_space[key]['_value'], mutate_rate) + elif search_space[key]['_type'] == 'factor': + self.params[key] = \ + Factor(search_space[key]['_value'], mutate_rate) + else: + raise RuntimeError( + "OpEvo Tuner doesn't support this kind of parameter: " + + str(search_space[key]['_type']) + ) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + else: + return False + + def __repr__(self): + string = "" + for param in self.params: + string += param.__repr__() + '\n' + + return string + + def mutate(self): + child = copy.deepcopy(self) + for key in child.params.keys(): + child.params[key] = child.params[key].mutate() + + return child + + def reset(self): + for key in self.params.keys(): + self.params[key].reset() + + return self + + def pick_out(self): + output = {} + for key in self.params.keys(): + output[key] = self.params[key].pick_out() + + return output + + +class Population(object): + """Population class + """ + + def __init__(self, search_space, mutate_rate, opt_mode='maximize'): + self.search_space = search_space + self.mutate_rate = mutate_rate + self.opt_mode = opt_mode + self.population = [] + self.fitness = [] + + self.individual = Individual(self.search_space, self.mutate_rate) + self.volume = 1 + for key, value in self.individual.params.items(): + self.volume *= self.individual.params[key].get_cardinality() + + def append(self, individual, fitness): + if self.opt_mode == "minimize": + fitness = -1 * fitness + + self.population.insert(0, individual) + self.fitness.insert(0, fitness) + + i = 0 + while (i < len(self.fitness) - 1 + and self.fitness[i] < self.fitness[i + 1]): + self.fitness[i], self.fitness[i + 1] = \ + self.fitness[i + 1], self.fitness[i] + self.population[i], self.population[i + 1] = \ + self.population[i + 1], self.population[i] + i += 1 + + def get_offspring(self, parents_size, offspring_size): + children = [] + if len(self.fitness) < parents_size: + for _ in range(offspring_size): + child = copy.deepcopy(self.individual.reset()) + while child in self.population or child in children: + child = child.mutate() + children.append(child) + elif self.fitness[0] < 1e-3: + for _ in range(offspring_size): + child = copy.deepcopy(self.individual.reset()) + while child in self.population or child in children: + child = child.mutate() + children.append(child) + else: + prob = np.array(self.fitness[:parents_size]) / \ + np.sum(self.fitness[:parents_size]) + + for _ in range(offspring_size): + child = copy.deepcopy(self.population[0]) + for key in child.params.keys(): + idx = np.random.choice(range(parents_size), p=prob) + child.params[key] = self.population[idx].params[key] + child = child.mutate() + while child in self.population or child in children: + child = child.mutate() + children.append(child) + + return children + + +class OpEvo(Tuner): + """OpEvo Tuner + + Parameters + ---------- + optimize_mode: str, 'maximize' or 'minimize' + parents_size: int + offspring_size: int + parents_size and offspring_size govern the diversity in evolutionary + process. OpEvo with large parents_size and offspring_size tends to get + rid of suboptimum but sacrifice data efficiency, while one with smaller + parants_size and offspring_size is easier to converge but suffers suboptimum. + mutate_rate: float, (0, 1) + Mutation rate ranging from 0 to 1. It trade-offs the exploration and + exploitation. OpEvo tends to exploration as q approaches 0, while tends + to exploitation as q approaches 1. + """ + + def __init__(self, + optimize_mode="maximize", + parents_size=20, + offspring_size=20, + mutate_rate=0.5): + self.logger = logging.getLogger( + self.__module__ + "." + self.__class__.__name__) + self.logger.setLevel('DEBUG') + + self.optimize_mode = optimize_mode + self.parents_size = parents_size + self.offspring_size = offspring_size + self.mutate_rate = mutate_rate + + self.request_list = [] + self.serve_list = [] + self.wait_dict = {} + + def update_search_space(self, search_space): + """Update the self.bounds and self.types by the search_space.json file. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if not isinstance(search_space, dict): + self.logger.info("The format of search space is not a dict.") + raise RuntimeError("The format of search space is not a dict.") + + self.population = Population(search_space, + self.mutate_rate, + self.optimize_mode) + self.logger.debug('Total search space volume: ', str(self.population.volume)) + + if not self.serve_list: + self.serve_list = self.population.get_offspring( + self.parents_size, self.offspring_size) + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """Returns multiple sets of trial (hyper-)parameters, + as iterable of serializable objects. + """ + result = [] + self.send_trial_callback = kwargs['st_callback'] + for parameter_id in parameter_id_list: + had_exception = False + try: + self.logger.debug("generating param for %s", parameter_id) + res = self.generate_parameters(parameter_id, **kwargs) + except nni.NoMoreTrialError: + had_exception = True + if not had_exception: + result.append(res) + return result + + def generate_parameters(self, parameter_id, **kwargs): + """Method which provides one set of hyper-parameters. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if self.serve_list: + self.wait_dict[parameter_id] = self.serve_list.pop() + return self.wait_dict[parameter_id].pick_out() + else: + self.request_list.append(parameter_id) + raise nni.NoMoreTrialError('no more parameters now.') + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """Method invoked when a trial reports its final result. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if isinstance(value, dict): + value = value['default'] + + self.population.append(self.wait_dict[parameter_id], value) + del self.wait_dict[parameter_id] + + if not self.serve_list: + self.serve_list = self.population.get_offspring( + self.parents_size, self.offspring_size) + + while self.request_list and self.serve_list: + param_id = self.request_list[0] + self.wait_dict[param_id] = self.serve_list.pop() + self.send_trial_callback( + param_id, self.wait_dict[param_id].pick_out()) + self.request_list.pop(0) + + def trial_end(self, parameter_id, success, **kwargs): + """Method invoked when a trial is completed or terminated. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if not success: + self.population.append(self.wait_dict[parameter_id], 0.0) + del self.wait_dict[parameter_id] diff --git a/examples/trials/systems_auto_tuning/opevo/src/compiler_auto_tune_stable.py b/examples/trials/systems_auto_tuning/opevo/src/compiler_auto_tune_stable.py new file mode 100644 index 0000000000000000000000000000000000000000..31f62cc64da974c83f7a519d18930b3bdb5d016b --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/compiler_auto_tune_stable.py @@ -0,0 +1,399 @@ +#!/usr/bin/env python3 + +## TODO: optimize c-mcpu metric; early-stop handler; fp16/int8; Kill pyRPC; + +import numpy as np +import tvm +import logging +import math +import re +import sys, time, subprocess, os, random, hashlib +from tvm import autotvm +import topi +import json +from topi.util import get_const_tuple +import importlib +from tvm.autotvm.task.dispatcher import ApplyConfig +from tvm.autotvm.task import ConfigEntity +from threading import Timer + +backend = os.environ['BACKEND'] if 'BACKEND' in os.environ else 'c-cuda' + +def system_lock(key_ids): + import socket, time + occupied_sock = None + while not occupied_sock: + for key_id in key_ids: + try: + sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('127.0.0.1', 9050 + key_id)) + sock.listen(1) + occupied_sock = (sock, key_id) + break + except: + try: + sock.shutdown(socket.SHUT_RDWR) + sock.close() + except: + sock.close() + if occupied_sock: + break + # print('still waiting ..') + time.sleep(0.2) + + # print('Using key_id = %d' % occupied_sock[1]) + sock = occupied_sock[0] + + def unlock_fd(): + try: + sock.shutdown(socket.SHUT_RDWR) + sock.close() + except: + sock.close() + return unlock_fd, occupied_sock[1] + +def show_search_space(config_space, printable): + search_space = {} + for _, name in enumerate(config_space.space_map): + curr = config_space.space_map[name] + if (curr.__class__ == tvm.autotvm.task.space.SplitSpace): + search_space[name] = {"_type": "factor", "_value": [curr.product, curr.num_output]} + elif (curr.__class__ == tvm.autotvm.task.space.OtherOptionSpace): + search_space[name] = {"_type": "choice", "_value": [x.val for x in curr.entities]} + else: + raise Exception("Cannot recognize search space type: %s" % (config_space.space_map[name].__class__)) + json_space = json.dumps(search_space) + print("\n>> Search Space = %s" % json_space) + if printable: + print("\n>> Writing Search Space to './search_space.json'..") + with open("search_space.json", "w") as fp: + fp.write(json_space) + print("\n>> Done") + sys.exit(0) + +def get_tuning_parallism(): + if 'DEV_NUM' in os.environ: + dev_num = int(os.environ['DEV_NUM']) + else: + if backend in ['c-rocm', '#rocm']: + devices = subprocess.getoutput('/opt/rocm/bin/rocm_agent_enumerator | grep -v gfx000').split() + if not devices: + raise Exception("Not valid rocm device found.") + dev_num = len(devices) + elif backend in ['c-cuda', '#cuda']: + devices = subprocess.getoutput('ls /dev/nvidia[0-9]* 2>/dev/null').split() + if not devices: + raise Exception("Not valid rocm device found.") + dev_num = len(devices) + else: + raise Exception("Unrecognized backend: %s" % backend) + print(' >> Tuning parallism = %d' % dev_num) + return dev_num + +def local_get_dir_file(rel_file, dir_sid=None): + if not dir_sid: + dir_sid = os.environ['DIR_SID'] if 'DIR_SID' in os.environ else '_' + dir_space = '/tmp/tvm_autotvm_engine' + os.system('mkdir -p "%s/%s"' % (dir_space, dir_sid)) + return "%s/%s/%s" % (dir_space, dir_sid, rel_file) + +def run_process_with_timeout(args, timeout=None, envs=None): + try: + proc = subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, env=envs) + retcode = proc.wait(timeout=timeout) + return retcode == 0 + except subprocess.TimeoutExpired: + print('Timed out - killing', proc.pid) + proc.kill() + return False + +def parse_launch_bounds(code): + func_arr = code.split('extern "C" __global__ ') + for i in range(1, len(func_arr)): + axis_map = dict() + lines = func_arr[i].split('\n') + for it in lines: + if it.startswith(' // [thread_extent] '): + words = it.split(' ') + nthread = int(words[-1]) + axis = words[-3] + if axis in axis_map: + if axis_map[axis] != nthread: + assert(False) + else: + axis_map[axis] = nthread + block_bound = axis_map.get('threadIdx.x', 1) * axis_map.get('threadIdx.y', 1) * axis_map.get('threadIdx.z', 1) + func_arr[i] = 'extern "C" __global__ __launch_bounds__(%d) %s' % (block_bound, func_arr[i]) + + code = ''.join(func_arr) + return code + +def translate_code(code): + if backend == 'c-rocm': + code = parse_launch_bounds(code) + code = '#include \n#include \n\n'+ code.replace('(__shared__ float4*)', '(float4*)').replace('#include ', '').replace('typedef unsigned long long uint64_t;', '') + elif backend in ['#cuda', 'c-cuda']: + code = parse_launch_bounds(code) + code = '#include \n#include \n\n' + code + else: + raise Exception("Unrecognized backend: %s" % backend) + return code + +@tvm.register_func +def tvm_callback_backend_proc(code): + native_code = translate_code(code) + # Compile code + module_data = None + if backend == 'c-rocm': + gcn_arch = subprocess.getoutput('/opt/rocm/bin/rocm_agent_enumerator | sort | uniq | grep -v gfx000 | tail -n 1').strip() + if not gcn_arch: + raise RuntimeError("Compilation error: no valid gcn_arch gpu detected!") + temp_code = local_get_dir_file("my_kernel.cc") + temp_cobj = local_get_dir_file("my_kernel.hsaco") + args = ['/opt/rocm/bin/lpl', temp_code, '-t=' + gcn_arch, '-f="-Wno-ignored-attributes -D__HIP_PLATFORM_HCC__=1"', '-o', temp_cobj] + elif backend in ['#cuda', 'c-cuda']: + temp_code = local_get_dir_file("my_kernel.cu") + temp_cobj = local_get_dir_file("my_kernel.ptx") + args = ['/usr/local/cuda/bin/nvcc', temp_code, '--ptx', '-O3', '-o', temp_cobj] + else: + raise Exception("Unrecognized backend: %s" % backend) + with open(temp_code, 'w') as fp: + fp.write(native_code) + print('[Build @%x]' % os.getpid(), ' '.join(args)) + if not run_process_with_timeout(args, 10): + raise Exception("Compilation failed or time limit exceeded") + if module_data is None: + module_data = bytearray(open(temp_cobj, "rb").read()) + return module_data + +def run_config_entity(params_given, dir_sid, expected_timecost='inf', tune_slot_id=0): + dir_sid = str(dir_sid) + result_file = local_get_dir_file('result.txt', dir_sid) + try: + os.remove(result_file) + except: + pass + config_str = json.dumps(params_given) + envs = os.environ.copy() + envs['CONFIG'] = config_str + envs['DIR_SID'] = dir_sid + envs['HIP_VISIBLE_DEVICES'] = str(tune_slot_id) + print(" >> Try param_entity on sid = %s: config = %s, slot_id = %d" % (dir_sid, config_str, tune_slot_id)) + try: + assert(True == run_process_with_timeout(["python%d" % sys.version_info.major] + sys.argv, envs=envs)) + result = float(open(result_file, 'r').read().strip()) + except: + result = float('inf') + print(" >> Try param_entity on sid = %s: result = `%.6f`" % (dir_sid, result)) + return result + +def compute_gflops(flop, t): + return flop / (t * 1e3) / 1e6 + +def search_op_config(code_only=False): + tvm_target = 'cuda' + logging.getLogger('autotvm').setLevel(logging.DEBUG) + logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout)) + + default_tune_op = importlib.import_module('templates.' + (os.environ['OP'])) + print(' >> Backend = %s, Python PID = %s, Task = %s;' % (backend, os.getpid(), default_tune_op.__name__)) + + task = autotvm.task.create(default_tune_op.get_template_op, args=(), target=tvm_target) + op_attributes = default_tune_op.op_attributes + op_summary = '_'.join([k + str(op_attributes[k]) for k in op_attributes]) + + def json_to_config(json_dict): + config = ConfigEntity.from_json_dict({"i": -1, "t": "", "c": None, "e": json_dict}) + return config + + def config_to_json(config): + jobj = config.to_json_dict()['e'] + json_dict = dict() + for i in range(len(jobj)): + assert(jobj[i][1] in ['sp', 'ot']) + json_dict[jobj[i][0]] = jobj[i][2] + return json_dict + + num_trials = int(os.environ['STEP']) if 'STEP' in os.environ else 0 + + if 'CONFIG' in os.environ: + params_given = json.loads(os.environ['CONFIG']) + print("====>> [Current Config Option]", os.environ['CONFIG']) + + trial_config = [] + for key in params_given: + trial_config.append([key, "sp" if type(params_given[key]) is list else "ot", params_given[key]]) + best_config = json_to_config(trial_config) + + elif 'NNI_TRIAL_JOB_ID' in os.environ: + show_search_space(task.config_space, os.environ['NNI_TRIAL_JOB_ID'] == '@') + import nni + params_given = nni.get_next_parameter() + if params_given is None: + raise + local_dir_id = os.environ['NNI_TRIAL_JOB_ID'] + t = run_config_entity(params_given, local_dir_id) + gflops = compute_gflops(task.flop, t) + print('[TVM-engine] Final entity result is: %g' % gflops) + try: + nni.report_final_result(gflops) + except: + print('[TVM-engine] (not reporting final result to NNI.)') + exit(0) + + elif num_trials > 0: + n_parallel = 16 if 'BATCH' not in os.environ else int(os.environ['BATCH']) + measure_option = autotvm.measure_option( + builder=autotvm.LocalBuilder(n_parallel=n_parallel), + runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4) + ) + # if DO_TUNING: + tuner = autotvm.tuner.XGBTuner(task, num_threads=8) + + from concurrent.futures import ThreadPoolExecutor + thread_pool = ThreadPoolExecutor(max_workers=n_parallel) + + dev_num = get_tuning_parallism() + + def parse_configs(task, configs): + results = [] + futures = [] + expected_timecost = 'inf' + for i in range(len(configs)): + futures.append(thread_pool.submit(run_config_entity, config_to_json(configs[i]), i, expected_timecost, i % dev_num)) + for i in range(len(configs)): + t = futures[i].result() + if t < tuner.task.best_config[0]: + tuner.task.best_config = (t, configs[i]) + results.append(autotvm.measure.MeasureResult(costs=(t,), error_no=0, all_cost=i, timestamp=time.time())) + return results + + tuner.task.best_config = (float('inf'), None) + tuner.parse_configs = parse_configs + + tuner.tune(n_trial=num_trials, measure_option=measure_option, callbacks=[]) + assert(not math.isinf(tuner.task.best_config[0])) + best_config = tuner.task.best_config[1] + print('\n[Best Config]', json.dumps(config_to_json(best_config))) + else: + best_config = task.config_space + + with ApplyConfig(best_config): + with tvm.target.create(tvm_target): + s, arg_bufs = default_tune_op.get_template_op() + lower_source = str(tvm.lower(s, arg_bufs, simple_mode=True)) + + # Verify Source Code + assert(len(('\n' + lower_source).split('\nproduce ')) == 2) + lower_file = local_get_dir_file('my_kernel.lower') + with open(lower_file, 'w') as fp: + fp.write(lower_source) + + max_threads_per_block = tvm.ndarray.gpu(0).max_threads_per_block + max_shared_memory_per_block = tvm.ndarray.gpu(0).max_shared_memory_per_block + + thread_extents = subprocess.getoutput("cat '%s' | grep '^ *// attr.*iter_var.*thread_extent'" % (lower_file)).split('\n') + reserved_axes = dict({'threadIdx.x': None, 'threadIdx.y': None, 'threadIdx.z': None, 'blockIdx.x': None, 'blockIdx.y': None, 'blockIdx.z': None}) + for line in thread_extents: + thread_name = line.split('[iter_var(')[-1].split(',')[0] + if thread_name in reserved_axes: + thread_val = int(line.split('thread_extent = ')[-1]) + if reserved_axes[thread_name] is not None: + if reserved_axes[thread_name] != thread_val: + assert(False) + else: + reserved_axes[thread_name] = thread_val + else: + raise Exception("Invalid thread_axis name: %s" % thread_name) + + num_threads = 1 + for thread_name in ['threadIdx.x', 'threadIdx.y', 'threadIdx.z']: + if reserved_axes[thread_name] is not None: + num_threads *= reserved_axes[thread_name] + if num_threads > max_threads_per_block: + raise Exception("Invalid kernel code: using num_threads %d > max_threads_per_block %d" % (num_threads, max_threads_per_block)) + + allocate_shared = subprocess.getoutput("cat '%s' | grep 'allocate .*shared\[.*\]'" % (lower_file)).split('\n') + shared_memory_in_bytes = 0 + for line in allocate_shared: + if not line: + continue + parts = line.split('[') + assert(len(parts) == 2) + parts = parts[1].split(' * ') + assert(len(parts) == 2) + assert(parts[1][-1] == ']') + allocate_type = parts[0] + allocate_val = int(parts[1][:-1]) + if allocate_type in ['float32']: + shared_memory_in_bytes += allocate_val * 4 + else: + raise Exception("Unrecognized shared memory data type: %s" % allocate_type) + if shared_memory_in_bytes > max_shared_memory_per_block: + raise Exception("Invalid kernel code: using shared_memory_in_bytes %d > max_shared_memory_per_block %d" % (shared_memory_in_bytes, max_shared_memory_per_block)) + + func = tvm.build(s, arg_bufs, tvm_target, name='template_op') + + assert(len(func.imported_modules) == 1) + device_source = translate_code(func.imported_modules[0].get_source()) + + if code_only: + return device_source + + if lower_source and device_source: + tune_slot_id = 0 if 'HIP_VISIBLE_DEVICES' not in os.environ else int(os.environ['HIP_VISIBLE_DEVICES']) + exec_fd, _ = system_lock([tune_slot_id]) + gpu_id = 0 + ctx = tvm.context(tvm_target, gpu_id) + tensors, outs = [], [] + for arg in arg_bufs: + shape = [int(x) for x in arg.shape] + is_output = arg.op.__class__ != tvm.tensor.PlaceholderOp + from tvm._ffi.ndarray import empty + td = empty(shape, arg.dtype, ctx) + if is_output: + outs.append(td) + tensors.append(td) + + def timeout_handler(): + print("Error: Timeout during Kernel warmup") + os._exit(1) + + my_timer = Timer(10, timeout_handler, []) + my_timer.start() + # Warmup + func(*tensors) + tvm.ndarray.gpu(gpu_id).sync() + # Estimate + t_start = time.time() + func(*tensors) + tvm.ndarray.gpu(gpu_id).sync() + t_diff = time.time() - t_start + my_timer.cancel() + del my_timer + + num_runs = max(3, min(100, math.floor(1.0 / t_diff))) + timeout_seconds = math.ceil((num_runs + 5) * t_diff) + my_timer = Timer(timeout_seconds, timeout_handler, []) + my_timer.start() + timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs) + t = timer_f(*tensors).mean + my_timer.cancel() + exec_fd() + + gflops = compute_gflops(task.flop, t) + print("[TVM-engine] Average time cost of %d runs = %g ms, %g gflops." % (num_runs, t * 1e3, gflops)) + + with open(local_get_dir_file('result.txt'), 'w') as fp: + fp.write(str(t)) + + +if __name__ == '__main__': + try: + search_op_config() + except SystemExit: + sys.exit(0) + except: + import traceback + traceback.print_exc() diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PNN/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PNN/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..5e2854e6d262b2ea8c4817d7d06e54ab33441b63 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PNN/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: BatchMatMul_B960N128K128M64PNN_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=batch_matmul B=960 N=128 K=128 M=64 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PNN/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PNN/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..ef533e6ece1b5ddc343e64bfdfae700332b5034d --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PNN/search_space.json @@ -0,0 +1 @@ +{"B": {"_type": "factor", "_value": [960, 2]}, "K": {"_type": "factor", "_value": [128, 3]}, "X": {"_type": "factor", "_value": [128, 4]}, "Y": {"_type": "factor", "_value": [64, 4]}} \ No newline at end of file diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PTN/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PTN/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..a84b80a70d9e652c277678830ab287d099c6bec4 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PTN/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: BatchMatMul_B960N128K128M64PTN_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=batch_matmul B=960 N=128 K=128 M=64 P=TN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PTN/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PTN/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..ef533e6ece1b5ddc343e64bfdfae700332b5034d --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K128M64PTN/search_space.json @@ -0,0 +1 @@ +{"B": {"_type": "factor", "_value": [960, 2]}, "K": {"_type": "factor", "_value": [128, 3]}, "X": {"_type": "factor", "_value": [128, 4]}, "Y": {"_type": "factor", "_value": [64, 4]}} \ No newline at end of file diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K64M128PNT/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K64M128PNT/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..36e04b4da3c5193f068654e45289b9662eb0b575 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K64M128PNT/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: BatchMatMul_B960N128K64M128PNT_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=batch_matmul B=960 N=128 K=64 M=128 P=NT ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K64M128PNT/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K64M128PNT/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..a34020c3499642283d69438f152d1162727ed9f6 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/bmm/B960N128K64M128PNT/search_space.json @@ -0,0 +1 @@ +{"B": {"_type": "factor", "_value": [960, 2]}, "K": {"_type": "factor", "_value": [64, 3]}, "X": {"_type": "factor", "_value": [128, 4]}, "Y": {"_type": "factor", "_value": [128, 4]}} \ No newline at end of file diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C3HW227F64K11ST4PD0/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C3HW227F64K11ST4PD0/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..d1b88e883ceb7fe9ecc7143ef6a6c94f3ec67327 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C3HW227F64K11ST4PD0/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: Conv_N512C3HW227F64K11ST4PD0_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=convfwd_direct N=512 C=3 H=227 W=227 F=64 K=11 ST=4 PD=0 ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C3HW227F64K11ST4PD0/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C3HW227F64K11ST4PD0/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..7f818119b9e8c055a30c073d02ab4d89b5bc4487 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C3HW227F64K11ST4PD0/search_space.json @@ -0,0 +1 @@ +{"tile_f": {"_type": "factor", "_value": [64, 4]}, "tile_y": {"_type": "factor", "_value": [55, 4]}, "tile_x": {"_type": "factor", "_value": [55, 4]}, "tile_rc": {"_type": "factor", "_value": [3, 2]}, "tile_ry": {"_type": "factor", "_value": [11, 2]}, "tile_rx": {"_type": "factor", "_value": [11, 2]}, "auto_unroll_max_step": {"_type": "discrete", "_value": [0, 125, 256]}, "unroll_explicit": {"_type": "choice", "_value": [0, 1]}} diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C64HW27F192K5ST1PD2/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C64HW27F192K5ST1PD2/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..1f84af8d67e40af2f9968a056fbfc47087f95aad --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C64HW27F192K5ST1PD2/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: Conv_N512C64HW27F192K5ST1PD2_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=convfwd_direct N=512 C=64 H=27 W=27 F=192 K=5 ST=1 PD=2 ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C64HW27F192K5ST1PD2/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C64HW27F192K5ST1PD2/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..ee2e1fc2bae60d4d90c1407f328cc734640b5ebd --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/conv/N512C64HW27F192K5ST1PD2/search_space.json @@ -0,0 +1 @@ +{"tile_f": {"_type": "factor", "_value": [192, 4]}, "tile_y": {"_type": "factor", "_value": [27, 4]}, "tile_x": {"_type": "factor", "_value": [27, 4]}, "tile_rc": {"_type": "factor", "_value": [64, 2]}, "tile_ry": {"_type": "factor", "_value": [5, 2]}, "tile_rx": {"_type": "factor", "_value": [5, 2]}, "auto_unroll_max_step": {"_type": "discrete", "_value": [0, 125, 256]}, "unroll_explicit": {"_type": "choice", "_value": [0, 1]}} diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_gbfs.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_gbfs.yml new file mode 100644 index 0000000000000000000000000000000000000000..b24388fc59b4f76341568e66437bc670b38ba9c1 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_gbfs.yml @@ -0,0 +1,23 @@ +authorName: default +experimentName: MatMul_N512K1024M1024_GBFS +trialConcurrency: 5 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: gbfs.py + className: G_BFS + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + num_samples: 5 +trial: + command: OP=matmul N=512 K=1024 M=1024 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_na2c.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_na2c.yml new file mode 100644 index 0000000000000000000000000000000000000000..e7eb2df09e50ccefbb472fcdb7e1b56bca4b9c44 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_na2c.yml @@ -0,0 +1,22 @@ +authorName: default +experimentName: MatMul_N512K1024M1024_NA2C +trialConcurrency: 6 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: na2c.py + className: N_A2C + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize +trial: + command: OP=matmul N=512 K=1024 M=1024 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..bf04dbbf6b72151d7a57de4f28ed7cb3d88f9fac --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: MatMul_N512K1024M1024_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=matmul N=512 K=1024 M=1024 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..af331ca6cbf05551095e15972a63e46cfc380b56 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M1024/search_space.json @@ -0,0 +1 @@ +{"K": {"_type": "factor", "_value": [1024, 3]}, "X": {"_type": "factor", "_value": [512, 4]}, "Y": {"_type": "factor", "_value": [1024, 4]}} diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_gbfs.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_gbfs.yml new file mode 100644 index 0000000000000000000000000000000000000000..1a26abf2ecf3318b1b825ead90d9bf67b8f85d52 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_gbfs.yml @@ -0,0 +1,23 @@ +authorName: default +experimentName: MatMul_N512K1024M4096_GBFS +trialConcurrency: 5 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: gbfs.py + className: G_BFS + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + num_samples: 5 +trial: + command: OP=matmul N=512 K=1024 M=4096 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_na2c.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_na2c.yml new file mode 100644 index 0000000000000000000000000000000000000000..6defbf207249e3699c6784c112a6b2915d53c709 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_na2c.yml @@ -0,0 +1,22 @@ +authorName: default +experimentName: MatMul_N512K1024M4096_NA2C +trialConcurrency: 6 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: na2c.py + className: N_A2C + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize +trial: + command: OP=matmul N=512 K=1024 M=4096 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..6bb7b8306556dbb596dc33807f297f1e37687e8b --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: MatMul_N512K1024M4096_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=matmul N=512 K=1024 M=4096 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..fd7d44910716d691faa50d9621e358efbef6d0e3 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K1024M4096/search_space.json @@ -0,0 +1 @@ +{"K": {"_type": "factor", "_value": [1024, 3]}, "X": {"_type": "factor", "_value": [512, 4]}, "Y": {"_type": "factor", "_value": [4096, 4]}} \ No newline at end of file diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_gbfs.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_gbfs.yml new file mode 100644 index 0000000000000000000000000000000000000000..1de78a51f9396414d2bf25a7ab85a23d78947ddd --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_gbfs.yml @@ -0,0 +1,23 @@ +authorName: default +experimentName: MatMul_N512K4096M1024_GBFS +trialConcurrency: 5 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: gbfs.py + className: G_BFS + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + num_samples: 5 +trial: + command: OP=matmul N=512 K=4096 M=1024 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_na2c.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_na2c.yml new file mode 100644 index 0000000000000000000000000000000000000000..2ba096ce60a7a91f4ebc7d45f621920a2635def3 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_na2c.yml @@ -0,0 +1,22 @@ +authorName: default +experimentName: MatMul_N512K4096M1024_NA2C +trialConcurrency: 6 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: na2c.py + className: N_A2C + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize +trial: + command: OP=matmul N=512 K=4096 M=1024 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_opevo.yml b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_opevo.yml new file mode 100644 index 0000000000000000000000000000000000000000..eb07e78b4803958586638568c5fb5ccba79029a6 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/config_opevo.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: MatMul_N512K4096M1024_OPEVO +trialConcurrency: 8 +maxExecDuration: 24h +maxTrialNum: 512 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: /root/algorithms/ + classFileName: opevo.py + className: OpEvo + # Any parameter need to pass to your tuner class __init__ constructor + # can be specified in this optional classArgs field, for example + classArgs: + optimize_mode: maximize + parents_size: 8 + offspring_size: 8 + mutate_rate: 0.5 +trial: + command: OP=matmul N=512 K=4096 M=1024 P=NN ./run.sh + codeDir: /root + # gpuNum: 0 diff --git a/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/search_space.json b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..3dce7264583bbd88f5ea7650a6b6750df28dfe50 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/experiments/mm/N512K4096M1024/search_space.json @@ -0,0 +1 @@ +{"K": {"_type": "factor", "_value": [4096, 3]}, "X": {"_type": "factor", "_value": [512, 4]}, "Y": {"_type": "factor", "_value": [1024, 4]}} \ No newline at end of file diff --git a/examples/trials/systems_auto_tuning/opevo/src/run.sh b/examples/trials/systems_auto_tuning/opevo/src/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..3b4c484ed010a9bfe07b125a3b17c1cfc6c3e49f --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/run.sh @@ -0,0 +1,25 @@ +#!/bin/bash -e + +cd $(dirname $0) + +export BACKEND=${BACKEND:-c-cuda} + +if [[ "${BACKEND}" == "c-cuda" ]]; then + export BACKEND="#cuda" +fi + +if [[ "${BACKEND}" != "#cuda" ]]; then + export LD_LIBRARY_PATH=/opt/tvm/build +else + export LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 +fi + +export HIP_PLATFORM=hcc +export HSA_USERPTR_FOR_PAGED_MEM=0 +export PYTHONDONTWRITEBYTECODE=1 +export PYTHONPATH=/opt/tvm/python:/opt/tvm/topi/python:/opt/tvm/nnvm/python:/usr/local/rocm/src + +ldconfig + +time OP=${OP:-matmul} S=${S:-0} python3 ./compiler_auto_tune_stable.py "$@" + diff --git a/examples/trials/systems_auto_tuning/opevo/src/templates/batch_matmul.py b/examples/trials/systems_auto_tuning/opevo/src/templates/batch_matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..4c84fbf56a0c2c3b9c8e352930c274b08435be4b --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/templates/batch_matmul.py @@ -0,0 +1,119 @@ +import numpy as np +import tvm +import logging +import sys, time, subprocess +from tvm import autotvm +import topi +import json +from topi.util import get_const_tuple +import os + + +op_attributes = { + "B": int(os.environ['B']) if 'B' in os.environ else 6, + "N": int(os.environ['N']) if 'N' in os.environ else 1024, + "K": int(os.environ['K']) if 'K' in os.environ else 64, + "M": int(os.environ['M']) if 'M' in os.environ else 4096, + "P": os.environ['P'] if 'P' in os.environ else "NN", +} + +@autotvm.template +def get_template_op(**kargs): + batch = op_attributes["B"] + M = op_attributes["N"] + K = op_attributes["K"] + N = op_attributes["M"] + pose = op_attributes["P"] + + if pose == 'NN': + A = tvm.placeholder((batch, M, K), name='A', dtype="float32") + B = tvm.placeholder((batch, K, N), name='B', dtype="float32") + k = tvm.reduce_axis((0, K), name='k') + C = tvm.compute((batch, M, N), lambda b, i, j: tvm.sum( + A[b, i, k] * B[b, k, j], axis=k), name='C') + elif pose == 'NT': + A = tvm.placeholder((batch, M, K), name='A', dtype="float32") + B = tvm.placeholder((batch, N, K), name='B', dtype="float32") + k = tvm.reduce_axis((0, K), name='k') + C = tvm.compute((batch, M, N), lambda b, i, j: tvm.sum( + A[b, i, k] * B[b, j, k], axis=k), name='C') + elif pose == 'TN': + A = tvm.placeholder((batch, K, M), name='A', dtype="float32") + B = tvm.placeholder((batch, K, N), name='B', dtype="float32") + k = tvm.reduce_axis((0, K), name='k') + C = tvm.compute((batch, M, N), lambda b, i, j: tvm.sum( + A[b, k, i] * B[b, k, j], axis=k), name='C') + elif pose == 'TT': + A = tvm.placeholder((batch, K, M), name='A', dtype="float32") + B = tvm.placeholder((batch, N, K), name='B', dtype="float32") + k = tvm.reduce_axis((0, K), name='k') + C = tvm.compute((batch, M, N), lambda b, i, j: tvm.sum( + A[b, k, i] * B[b, j, k], axis=k), name='C') + else: + raise + + cfg = autotvm.get_config() + s = tvm.create_schedule(C.op) + AA = s.cache_read(A, "shared", [C]) + AL = s.cache_read(AA, "local", [C]) + BB = s.cache_read(B, "shared", [C]) + BL = s.cache_read(BB, "local", [C]) + CC = s.cache_write(C, "local") + + b, y, x = C.op.axis + k = CC.op.reduce_axis[0] + + cfg.define_split('B', cfg.axis(b), num_outputs=2) + bo, bi = cfg['B'].apply(s, C, b) + + cfg.define_split('K', cfg.axis(k), num_outputs=3) + ko, kt, ki = cfg['K'].apply(s, CC, k) + + block_x = tvm.thread_axis('blockIdx.x') + block_y = tvm.thread_axis('blockIdx.y') + block_z = tvm.thread_axis('blockIdx.z') + thread_x = tvm.thread_axis('threadIdx.x') + thread_y = tvm.thread_axis('threadIdx.y') + thread_z = tvm.thread_axis('threadIdx.z') + + cfg.define_split('X', cfg.axis(y), num_outputs=4) + cfg.define_split('Y', cfg.axis(x), num_outputs=4) + + by, tyz, ty, yi = cfg['X'].apply(s, C, y) + bx, txz, tx, xi = cfg['Y'].apply(s, C, x) + + s[C].bind(bo, block_z) + s[C].bind(by, block_y) + s[C].bind(bx, block_x) + s[C].bind(tyz, tvm.thread_axis('vthread')) + s[C].bind(txz, tvm.thread_axis('vthread')) + s[C].bind(bi, thread_z) + s[C].bind(ty, thread_y) + s[C].bind(tx, thread_x) + s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi) + + s[CC].compute_at(s[C], tx) + + bo, yo, xo = CC.op.axis + s[CC].reorder(ko, kt, yo, xo, ki) + s[CC].unroll(kt) + + for stage in [AL, BL]: + s[stage].compute_at(s[CC], kt) + s[stage].double_buffer() + + for stage in [AA, BB]: + s[stage].compute_at(s[CC], ko) + + fused = s[stage].fuse(*s[stage].op.axis) + ty, tx = s[stage].split(fused, nparts=cfg['X'].size[2]) + tx, xi = s[stage].split(tx, nparts=cfg['Y'].size[2]) + _, xi = s[stage].split(xi, factor=4) + + s[stage].bind(ty, thread_y) + s[stage].bind(tx, thread_x) + s[stage].vectorize(xi) + s[stage].double_buffer() + + cfg.add_flop(batch * M * K * N * 2.0) + return s, [A, B, C] diff --git a/examples/trials/systems_auto_tuning/opevo/src/templates/convfwd_direct.py b/examples/trials/systems_auto_tuning/opevo/src/templates/convfwd_direct.py new file mode 100644 index 0000000000000000000000000000000000000000..5f40abff80c479b4174ba410ee68e7241a04236e --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/templates/convfwd_direct.py @@ -0,0 +1,130 @@ +import numpy as np +import tvm +import logging +import sys, time, subprocess +from tvm import autotvm +import topi +import json +from topi.util import get_const_tuple +import os + + +op_attributes = { + "N": int(os.environ['N']) if 'N' in os.environ else 64, + "C": int(os.environ['C']) if 'C' in os.environ else 3, + "H": int(os.environ['H']) if 'H' in os.environ else 229, + "W": int(os.environ['W']) if 'W' in os.environ else 229, + "F": int(os.environ['F']) if 'F' in os.environ else 32, + "K": int(os.environ['K']) if 'K' in os.environ else 5, + "ST": int(os.environ['ST']) if 'ST' in os.environ else 1, + "PD": int(os.environ['PD']) if 'PD' in os.environ else 2, +} + + +@autotvm.template +def get_template_op(**kargs): + N = op_attributes["N"] + CI = op_attributes["C"] + H = op_attributes["H"] + W = op_attributes["W"] + H = op_attributes["H"] + CO = op_attributes["F"] + KH = KW = op_attributes["K"] + stride = op_attributes["ST"] + padding = op_attributes["PD"] + dilation = 1 + + data = tvm.placeholder((N, CI, H, W), name='data') + kernel = tvm.placeholder((CO, CI, KH, KW), name='kernel') + conv = topi.nn.conv2d_nchw( + data, kernel, (stride, stride), (padding, padding), dilation=1, out_dtype='float32') + s = tvm.create_schedule([conv.op]) + cfg = autotvm.get_config() + + ##### space definition begin ##### + n, f, y, x = s[conv].op.axis + rc, ry, rx = s[conv].op.reduce_axis + cfg.define_split("tile_f", f, num_outputs=4) + cfg.define_split("tile_y", y, num_outputs=4) + cfg.define_split("tile_x", x, num_outputs=4) + cfg.define_split("tile_rc", rc, num_outputs=2) + cfg.define_split("tile_ry", ry, num_outputs=2) + cfg.define_split("tile_rx", rx, num_outputs=2) + cfg.define_knob("auto_unroll_max_step", [0, 125, 256]) + + target = tvm.target.current_target() + if target.target_name in ['nvptx', 'rocm']: + cfg.define_knob("unroll_explicit", [1]) + else: + cfg.define_knob("unroll_explicit", [0, 1]) + + pad_data, kernel = s[conv].op.input_tensors + + s[pad_data].compute_inline() + if isinstance(kernel.op, tvm.tensor.ComputeOp) and 'dilate' in kernel.op.tag: + s[kernel].compute_inline() + + if conv.op in s.outputs: + output = conv + OL = s.cache_write(conv, 'local') + else: + output = s.outputs[0].output(0) + s[conv].set_scope('local') + OL = conv + + # create cache stage + AA = s.cache_read(pad_data, 'shared', [OL]) + WW = s.cache_read(kernel, 'shared', [OL]) + + # tile and bind spatial axes + n, f, y, x = s[output].op.axis + kernel_scope, n = s[output].split(n, nparts=1) + + bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f) + by, vy, ty, yi = cfg["tile_y"].apply(s, output, y) + bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x) + + bf = s[output].fuse(n, bf) + s[output].bind(bf, tvm.thread_axis("blockIdx.z")) + s[output].bind(by, tvm.thread_axis("blockIdx.y")) + s[output].bind(bx, tvm.thread_axis("blockIdx.x")) + s[output].bind(vf, tvm.thread_axis("vthread")) + s[output].bind(vy, tvm.thread_axis("vthread")) + s[output].bind(vx, tvm.thread_axis("vthread")) + s[output].bind(tf, tvm.thread_axis("threadIdx.z")) + s[output].bind(ty, tvm.thread_axis("threadIdx.y")) + s[output].bind(tx, tvm.thread_axis("threadIdx.x")) + s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi) + s[OL].compute_at(s[output], tx) + + # tile reduction axes + n, f, y, x = s[OL].op.axis + rc, ry, rx = s[OL].op.reduce_axis + rco, rci = cfg['tile_rc'].apply(s, OL, rc) + ryo, ryi = cfg['tile_rx'].apply(s, OL, ry) + rxo, rxi = cfg['tile_ry'].apply(s, OL, rx) + s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x) + + s[AA].compute_at(s[OL], rxo) + s[WW].compute_at(s[OL], rxo) + + # cooperative fetching + for load in [AA, WW]: + n, f, y, x = s[load].op.axis + fused = s[load].fuse(n, f, y, x) + tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2]) + ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2]) + tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2]) + s[load].bind(tz, tvm.thread_axis("threadIdx.z")) + s[load].bind(ty, tvm.thread_axis("threadIdx.y")) + s[load].bind(tx, tvm.thread_axis("threadIdx.x")) + + # unroll + s[output].pragma(kernel_scope, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val) + s[output].pragma(kernel_scope, 'unroll_explicit', cfg['unroll_explicit'].val) + + N, CO, OH, OW = get_const_tuple(output.shape) + _, KH, KW, CI = get_const_tuple(kernel.shape) + + cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW) + return s, [data, kernel, conv] diff --git a/examples/trials/systems_auto_tuning/opevo/src/templates/matmul.py b/examples/trials/systems_auto_tuning/opevo/src/templates/matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..7cbd75c45890c37eb77dc45990007eccf05561fd --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/src/templates/matmul.py @@ -0,0 +1,111 @@ +import numpy as np +import tvm +import logging +import sys, time, subprocess +from tvm import autotvm +import topi +import json +from topi.util import get_const_tuple +import os + + +op_attributes = { + "N": int(os.environ['N']) if 'N' in os.environ else 1024, + "K": int(os.environ['K']) if 'K' in os.environ else 64, + "M": int(os.environ['M']) if 'M' in os.environ else 4096, + "P": os.environ['P'] if 'P' in os.environ else "NN", +} + +@autotvm.template +def get_template_op(**kargs): + batch = op_attributes["N"] + in_dim = op_attributes["K"] + out_dim = op_attributes["M"] + pose = op_attributes["P"] + + if pose == 'NN': + A = tvm.placeholder((batch, in_dim), name='A', dtype="float32") + B = tvm.placeholder((in_dim, out_dim), name='B', dtype="float32") + k = tvm.reduce_axis((0, in_dim), name='k') + C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum( + A[i, k] * B[k, j], axis=k), name='C') + elif pose == 'NT': + A = tvm.placeholder((batch, in_dim), name='A', dtype="float32") + B = tvm.placeholder((out_dim, in_dim), name='B', dtype="float32") + k = tvm.reduce_axis((0, in_dim), name='k') + C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum( + A[i, k] * B[j, k], axis=k), name='C') + elif pose == 'TN': + A = tvm.placeholder((in_dim, batch), name='A', dtype="float32") + B = tvm.placeholder((in_dim, out_dim), name='B', dtype="float32") + k = tvm.reduce_axis((0, in_dim), name='k') + C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum( + A[k, i] * B[k, j], axis=k), name='C') + elif pose == 'TT': + A = tvm.placeholder((in_dim, batch), name='A', dtype="float32") + B = tvm.placeholder((out_dim, in_dim), name='B', dtype="float32") + k = tvm.reduce_axis((0, in_dim), name='k') + C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum( + A[k, i] * B[j, k], axis=k), name='C') + else: + raise + + cfg = autotvm.get_config() + s = tvm.create_schedule(C.op) + + cfg.add_flop(batch * in_dim * out_dim * 2.0) + + AA = s.cache_read(A, "shared", [C]) + AL = s.cache_read(AA, "local", [C]) + BB = s.cache_read(B, "shared", [C]) + BL = s.cache_read(BB, "local", [C]) + CC = s.cache_write(C, "local") + + y, x = C.op.axis + k = CC.op.reduce_axis[0] + + cfg.define_split('K', cfg.axis(k), num_outputs=3) + cfg.define_split('X', cfg.axis(y), num_outputs=4) + cfg.define_split('Y', cfg.axis(x), num_outputs=4) + + ko, kt, ki = cfg['K'].apply(s, CC, k) + + block_x = tvm.thread_axis('blockIdx.x') + block_y = tvm.thread_axis('blockIdx.y') + thread_x = tvm.thread_axis('threadIdx.x') + thread_y = tvm.thread_axis('threadIdx.y') + + by, tyz, ty, yi = cfg['X'].apply(s, C, y) + bx, txz, tx, xi = cfg['Y'].apply(s, C, x) + + s[C].bind(by, block_y) + s[C].bind(bx, block_x) + s[C].bind(tyz, tvm.thread_axis('vthread')) + s[C].bind(txz, tvm.thread_axis('vthread')) + s[C].bind(ty, thread_y) + s[C].bind(tx, thread_x) + s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi) + + s[CC].compute_at(s[C], tx) + + yo, xo = CC.op.axis + s[CC].reorder(ko, kt, yo, xo, ki) + s[CC].unroll(kt) + + for stage in [AL, BL]: + s[stage].compute_at(s[CC], kt) + + for stage in [AA, BB]: + s[stage].compute_at(s[CC], ko) + + fused = s[stage].fuse(*s[stage].op.axis) + ty, tx = s[stage].split(fused, nparts=cfg['X'].size[2]) + tx, xi = s[stage].split(tx, nparts=cfg['Y'].size[2]) + _, xi = s[stage].split(xi, factor=4) + + s[stage].bind(ty, thread_y) + s[stage].bind(tx, thread_x) + s[stage].vectorize(xi) + s[stage].double_buffer() + + return s, [A, B, C] diff --git a/examples/trials/systems_auto_tuning/opevo/tvm_patches/libcuda.so.1 b/examples/trials/systems_auto_tuning/opevo/tvm_patches/libcuda.so.1 new file mode 100644 index 0000000000000000000000000000000000000000..8417946346afe0b7629f59f4a96ba5b6dac257ab Binary files /dev/null and b/examples/trials/systems_auto_tuning/opevo/tvm_patches/libcuda.so.1 differ diff --git a/examples/trials/systems_auto_tuning/opevo/tvm_patches/tvm_v0.6.patch b/examples/trials/systems_auto_tuning/opevo/tvm_patches/tvm_v0.6.patch new file mode 100644 index 0000000000000000000000000000000000000000..e290b230e1976ce920bff70d32e4e524bba16614 --- /dev/null +++ b/examples/trials/systems_auto_tuning/opevo/tvm_patches/tvm_v0.6.patch @@ -0,0 +1,84 @@ +diff --git a/python/tvm/autotvm/tuner/tuner.py b/python/tvm/autotvm/tuner/tuner.py +index 76d088f4c..7ed4ff02a 100644 +--- a/python/tvm/autotvm/tuner/tuner.py ++++ b/python/tvm/autotvm/tuner/tuner.py +@@ -122,7 +122,7 @@ class Tuner(object): + configs = self.next_batch(min(n_parallel, n_trial - i)) + + inputs = [MeasureInput(self.task.target, self.task, config) for config in configs] +- results = measure_batch(inputs) ++ results = self.parse_configs(self.task, configs) if hasattr(self, 'parse_configs') else measure_batch(inputs) + + # keep best config + for k, (inp, res) in enumerate(zip(inputs, results)): +diff --git a/src/codegen/codegen_c.cc b/src/codegen/codegen_c.cc +index eab542dd3..2f1a11303 100644 +--- a/src/codegen/codegen_c.cc ++++ b/src/codegen/codegen_c.cc +@@ -808,6 +808,7 @@ void CodeGenC::VisitStmt_(const AttrStmt* op) { + IterVar iv = Downcast(op->node); + if (iv->thread_tag.length() != 0) { + if (!var_idmap_.count(iv->var.get())) { ++ this->currentOp = op; + BindThreadIndex(iv); + } + } +diff --git a/src/codegen/codegen_c.h b/src/codegen/codegen_c.h +index 8701cda1e..7d3d56ddc 100644 +--- a/src/codegen/codegen_c.h ++++ b/src/codegen/codegen_c.h +@@ -174,6 +174,8 @@ class CodeGenC : + // Get a cast type from to + virtual std::string CastFromTo(std::string value, Type from, Type target); + ++ const AttrStmt* currentOp; ++ + protected: + // Print reference to struct location + std::string GetStructRef( +diff --git a/src/codegen/codegen_cuda.cc b/src/codegen/codegen_cuda.cc +index 6656fa077..a4f0f962d 100644 +--- a/src/codegen/codegen_cuda.cc ++++ b/src/codegen/codegen_cuda.cc +@@ -106,6 +106,9 @@ void CodeGenCUDA::BindThreadIndex(const IterVar& iv) { + CHECK(!var_idmap_.count(iv->var.get())); + var_idmap_[iv->var.get()] = + CastFromTo(iv->thread_tag, UInt(32), iv->var.type()); ++ int nthread = static_cast(this->currentOp->value.as()->value); ++ if (iv->thread_tag.find("threadIdx.") == 0 || iv->thread_tag.find("blockIdx.") == 0) ++ this->stream << " // [thread_extent] " << iv->thread_tag << " = " << nthread << "\n"; + } + + void CodeGenCUDA::PrintType(Type t, std::ostream& os) { // NOLINT(*) +diff --git a/src/codegen/opt/build_cuda_on.cc b/src/codegen/opt/build_cuda_on.cc +index 1992ac5d9..9b0ff4cd9 100644 +--- a/src/codegen/opt/build_cuda_on.cc ++++ b/src/codegen/opt/build_cuda_on.cc +@@ -137,6 +137,9 @@ runtime::Module BuildCUDA(Array funcs) { + cg.AddFunction(f); + } + std::string code = cg.Finish(); ++ const auto* backendproc = Registry::Get("tvm_callback_backend_proc"); ++ if (backendproc) ++ return CUDAModuleCreate((*backendproc)(code).operator std::string(), "cubin", ExtractFuncInfo(funcs), code); + + if (const auto* f = Registry::Get("tvm_callback_cuda_postproc")) { + code = (*f)(code).operator std::string(); +diff --git a/src/lang/expr_operator.cc b/src/lang/expr_operator.cc +index 220d4378c..cc435d138 100644 +--- a/src/lang/expr_operator.cc ++++ b/src/lang/expr_operator.cc +@@ -208,11 +208,11 @@ Expr operator%(Expr a, Expr b) { + + // TODO(tqchen): switch to floordiv + Expr indexdiv(Expr a, Expr b) { +- return floordiv(a, b); ++ return truncdiv(a, b); + } + + Expr indexmod(Expr a, Expr b) { +- return floormod(a, b); ++ return truncmod(a, b); + } + + Expr floordiv(Expr a, Expr b) { diff --git a/examples/trials/systems_auto_tuning/rocksdb-fillrandom/config_smac.yml b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/config_smac.yml new file mode 100644 index 0000000000000000000000000000000000000000..5eb66ad909465a22bb814aac6b370d12e449d6dd --- /dev/null +++ b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/config_smac.yml @@ -0,0 +1,15 @@ +experimentName: auto_rocksdb_SMAC +searchSpaceFile: search_space.json +trialCommand: python3 main.py +trialCodeDirectory: . +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 24h +maxTrialNumber: 256 +tuner: + name: SMAC + classArgs: + optimize_mode: maximize +trainingService: + platform: local + useActiveGpu: False diff --git a/examples/trials/systems_auto_tuning/rocksdb-fillrandom/config_tpe.yml b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/config_tpe.yml new file mode 100644 index 0000000000000000000000000000000000000000..acbb000193f430c8d0adfdfc44ef3f4c9b1bb8a7 --- /dev/null +++ b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/config_tpe.yml @@ -0,0 +1,15 @@ +experimentName: auto_rocksdb_TPE +searchSpaceFile: search_space.json +trialCommand: python3 main.py +trialCodeDirectory: . +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 24h +maxTrialNumber: 256 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local + useActiveGpu: False diff --git a/examples/trials/systems_auto_tuning/rocksdb-fillrandom/db_bench_installation.sh b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/db_bench_installation.sh new file mode 100755 index 0000000000000000000000000000000000000000..462387cb76af9645cfad989df30b8c104dc709fa --- /dev/null +++ b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/db_bench_installation.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Install db_bench and its dependencies on Ubuntu + +pushd $PWD 1>/dev/null + +# install snappy +echo "****************** Installing snappy *******************" +sudo apt-get install libsnappy-dev -y + +# install gflag +echo "****************** Installing gflag ********************" +cd /tmp +git clone https://github.com/gflags/gflags.git +cd gflags +git checkout v2.0 +./configure && make && sudo make install + +# install rocksdb +echo "****************** Installing rocksdb ******************" +cd /tmp +git clone https://github.com/facebook/rocksdb.git +cd rocksdb +CPATH=/usr/local/include LIBRARY_PATH=/usr/local/lib DEBUG_LEVEL=0 make db_bench -j7 + +DIR=$HOME/.local/bin/ +if [[ ! -e $DIR ]]; then + mkdir $dir +elif [[ ! -d $DIR ]]; then + echo "$DIR already exists but is not a directory" 1>&2 + exit +fi +mv db_bench $HOME/.local/bin && +echo "Successfully installed rocksed in "$DIR" !" && +echo "Please add "$DIR" to your PATH for runing this example." + +popd 1>/dev/null diff --git a/examples/trials/systems_auto_tuning/rocksdb-fillrandom/main.py b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/main.py new file mode 100644 index 0000000000000000000000000000000000000000..f6aa55dbf11e1185d26a4b8432a6d3e56b994483 --- /dev/null +++ b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/main.py @@ -0,0 +1,96 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import nni +import subprocess +import logging + +LOG = logging.getLogger('rocksdb-fillrandom') + + +def run(**parameters): + '''Run rocksdb benchmark and return throughput''' + bench_type = parameters['benchmarks'] + # recover args + args = ["--{}={}".format(k, v) for k, v in parameters.items()] + # subprocess communicate + process = subprocess.Popen(['db_bench'] + args, stdout=subprocess.PIPE) + out, err = process.communicate() + # split into lines + lines = out.decode("utf8").splitlines() + + match_lines = [] + for line in lines: + # find the line with matched str + if bench_type not in line: + continue + else: + match_lines.append(line) + break + + results = {} + for line in match_lines: + key, _, value = line.partition(":") + key = key.strip() + value = value.split("op")[1] + results[key] = float(value) + + return results[bench_type] + + +def generate_params(received_params): + '''generate parameters based on received parameters''' + params = { + "benchmarks": "fillrandom", + "threads": 1, + "key_size": 20, + "value_size": 100, + "num": 13107200, + "db": "/tmp/rockdb", + "disable_wal": 1, + "max_background_flushes": 1, + "max_background_compactions": 4, + "write_buffer_size": 67108864, + "max_write_buffer_number": 16, + "min_write_buffer_number_to_merge": 2, + "level0_file_num_compaction_trigger": 2, + "max_bytes_for_level_base": 268435456, + "max_bytes_for_level_multiplier": 10, + "target_file_size_base": 33554432, + "target_file_size_multiplier": 1 + } + + for k, v in received_params.items(): + params[k] = int(v) + + return params + + +if __name__ == "__main__": + try: + # get parameters from tuner + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = generate_params(RECEIVED_PARAMS) + LOG.debug(PARAMS) + # run benchmark + throughput = run(**PARAMS) + # report throughput to nni + nni.report_final_result(throughput) + except Exception as exception: + LOG.exception(exception) + raise diff --git a/examples/trials/systems_auto_tuning/rocksdb-fillrandom/plot.png b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/plot.png new file mode 100644 index 0000000000000000000000000000000000000000..075bf7ac77e538903f81300af598a85490a14b03 Binary files /dev/null and b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/plot.png differ diff --git a/examples/trials/systems_auto_tuning/rocksdb-fillrandom/search_space.json b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c664466b0209959fcd0b420905fd654e795f064d --- /dev/null +++ b/examples/trials/systems_auto_tuning/rocksdb-fillrandom/search_space.json @@ -0,0 +1,14 @@ +{ + "write_buffer_size": { + "_type": "quniform", + "_value": [2097152, 16777216, 1048576] + }, + "min_write_buffer_number_to_merge": { + "_type": "quniform", + "_value": [2, 16, 1] + }, + "level0_file_num_compaction_trigger": { + "_type": "quniform", + "_value": [2, 16, 1] + } +} diff --git a/examples/trials/weight_sharing/ga_squad/attention.py b/examples/trials/weight_sharing/ga_squad/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..812db53221ea2397511ed33465f08e650e42bc35 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/attention.py @@ -0,0 +1,171 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import math + +import tensorflow as tf +from tensorflow.python.ops.rnn_cell_impl import RNNCell + + +def _get_variable(variable_dict, name, shape, initializer=None, dtype=tf.float32): + if name not in variable_dict: + variable_dict[name] = tf.get_variable( + name=name, shape=shape, initializer=initializer, dtype=dtype) + return variable_dict[name] + + +class DotAttention: + ''' + DotAttention + ''' + + def __init__(self, name, + hidden_dim, + is_vanilla=True, + is_identity_transform=False, + need_padding=False): + self._name = '/'.join([name, 'dot_att']) + self._hidden_dim = hidden_dim + self._is_identity_transform = is_identity_transform + self._need_padding = need_padding + self._is_vanilla = is_vanilla + self._var = {} + + @property + def is_identity_transform(self): + return self._is_identity_transform + + @property + def is_vanilla(self): + return self._is_vanilla + + @property + def need_padding(self): + return self._need_padding + + @property + def hidden_dim(self): + return self._hidden_dim + + @property + def name(self): + return self._name + + @property + def var(self): + return self._var + + def _get_var(self, name, shape, initializer=None): + with tf.variable_scope(self.name): + return _get_variable(self.var, name, shape, initializer) + + def _define_params(self, src_dim, tgt_dim): + hidden_dim = self.hidden_dim + self._get_var('W', [src_dim, hidden_dim]) + if not self.is_vanilla: + self._get_var('V', [src_dim, hidden_dim]) + if self.need_padding: + self._get_var('V_s', [src_dim, src_dim]) + self._get_var('V_t', [tgt_dim, tgt_dim]) + if not self.is_identity_transform: + self._get_var('T', [tgt_dim, src_dim]) + self._get_var('U', [tgt_dim, hidden_dim]) + self._get_var('b', [1, hidden_dim]) + self._get_var('v', [hidden_dim, 1]) + + def get_pre_compute(self, s): + ''' + :param s: [src_sequence, batch_size, src_dim] + :return: [src_sequence, batch_size. hidden_dim] + ''' + hidden_dim = self.hidden_dim + src_dim = s.get_shape().as_list()[-1] + assert src_dim is not None, 'src dim must be defined' + W = self._get_var('W', shape=[src_dim, hidden_dim]) + b = self._get_var('b', shape=[1, hidden_dim]) + return tf.tensordot(s, W, [[2], [0]]) + b + + def get_prob(self, src, tgt, mask, pre_compute, return_logits=False): + ''' + :param s: [src_sequence_length, batch_size, src_dim] + :param h: [batch_size, tgt_dim] or [tgt_sequence_length, batch_size, tgt_dim] + :param mask: [src_sequence_length, batch_size]\ + or [tgt_sequence_length, src_sequence_length, batch_sizse] + :param pre_compute: [src_sequence_length, batch_size, hidden_dim] + :return: [src_sequence_length, batch_size]\ + or [tgt_sequence_length, src_sequence_length, batch_size] + ''' + s_shape = src.get_shape().as_list() + h_shape = tgt.get_shape().as_list() + src_dim = s_shape[-1] + tgt_dim = h_shape[-1] + assert src_dim is not None, 'src dimension must be defined' + assert tgt_dim is not None, 'tgt dimension must be defined' + + self._define_params(src_dim, tgt_dim) + + if len(h_shape) == 2: + tgt = tf.expand_dims(tgt, 0) + if pre_compute is None: + pre_compute = self.get_pre_compute(src) + + buf0 = pre_compute + buf1 = tf.tensordot(tgt, self.var['U'], axes=[[2], [0]]) + buf2 = tf.tanh(tf.expand_dims(buf0, 0) + tf.expand_dims(buf1, 1)) + + if not self.is_vanilla: + xh1 = tgt + xh2 = tgt + s1 = src + if self.need_padding: + xh1 = tf.tensordot(xh1, self.var['V_t'], 1) + xh2 = tf.tensordot(xh2, self.var['S_t'], 1) + s1 = tf.tensordot(s1, self.var['V_s'], 1) + if not self.is_identity_transform: + xh1 = tf.tensordot(xh1, self.var['T'], 1) + xh2 = tf.tensordot(xh2, self.var['T'], 1) + buf3 = tf.expand_dims(s1, 0) * tf.expand_dims(xh1, 1) + buf3 = tf.tanh(tf.tensordot(buf3, self.var['V'], axes=[[3], [0]])) + buf = tf.reshape(tf.tanh(buf2 + buf3), shape=tf.shape(buf3)) + else: + buf = buf2 + v = self.var['v'] + e = tf.tensordot(buf, v, [[3], [0]]) + e = tf.squeeze(e, axis=[3]) + tmp = tf.reshape(e + (mask - 1) * 10000.0, shape=tf.shape(e)) + prob = tf.nn.softmax(tmp, 1) + if len(h_shape) == 2: + prob = tf.squeeze(prob, axis=[0]) + tmp = tf.squeeze(tmp, axis=[0]) + if return_logits: + return prob, tmp + return prob + + def get_att(self, s, prob): + ''' + :param s: [src_sequence_length, batch_size, src_dim] + :param prob: [src_sequence_length, batch_size]\ + or [tgt_sequence_length, src_sequence_length, batch_size] + :return: [batch_size, src_dim] or [tgt_sequence_length, batch_size, src_dim] + ''' + buf = s * tf.expand_dims(prob, axis=-1) + att = tf.reduce_sum(buf, axis=-3) + return att diff --git a/examples/trials/weight_sharing/ga_squad/config_remote.yml b/examples/trials/weight_sharing/ga_squad/config_remote.yml new file mode 100644 index 0000000000000000000000000000000000000000..d9af6632b59ed37136cb306f0b900376d85c70d9 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/config_remote.yml @@ -0,0 +1,31 @@ +authorName: default +experimentName: ga_squad_weight_sharing +trialConcurrency: 2 +maxExecDuration: 1h +maxTrialNum: 200 +#choice: local, remote, pai +trainingServicePlatform: remote +#choice: true, false +useAnnotation: false +multiThread: true +tuner: + codeDir: ../../../tuners/weight_sharing/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize + population_size: 32 + save_dir_root: /mnt/nfs/nni/ga_squad +trial: + command: python3 trial.py --input_file /mnt/nfs/nni/train-v1.1.json --dev_file /mnt/nfs/nni/dev-v1.1.json --max_epoch 1 --embedding_file /mnt/nfs/nni/glove.6B.300d.txt + codeDir: . + gpuNum: 1 +machineList: + - ip: remote-ip-0 + port: 8022 + username: root + passwd: screencast + - ip: remote-ip-1 + port: 8022 + username: root + passwd: screencast diff --git a/examples/trials/weight_sharing/ga_squad/data.py b/examples/trials/weight_sharing/ga_squad/data.py new file mode 100644 index 0000000000000000000000000000000000000000..875ff91d8f79a8cac4c13048b7f907396a86149f --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/data.py @@ -0,0 +1,267 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Data processing script for the QA model. +''' + +import csv +import json +from random import shuffle + +import numpy as np + + +class WhitespaceTokenizer: + ''' + Tokenizer for whitespace + ''' + + def tokenize(self, text): + ''' + tokenize function in Tokenizer. + ''' + start = -1 + tokens = [] + for i, character in enumerate(text): + if character == ' ' or character == '\t': + if start >= 0: + word = text[start:i] + tokens.append({ + 'word': word, + 'original_text': word, + 'char_begin': start, + 'char_end': i}) + start = -1 + else: + if start < 0: + start = i + if start >= 0: + tokens.append({ + 'word': text[start:len(text)], + 'original_text': text[start:len(text)], + 'char_begin': start, + 'char_end': len(text) + }) + return tokens + + +def load_from_file(path, fmt=None, is_training=True): + ''' + load data from file + ''' + if fmt is None: + fmt = 'squad' + assert fmt in ['squad', 'csv'], 'input format must be squad or csv' + qp_pairs = [] + if fmt == 'squad': + with open(path) as data_file: + data = json.load(data_file)['data'] + for doc in data: + for paragraph in doc['paragraphs']: + passage = paragraph['context'] + for qa_pair in paragraph['qas']: + question = qa_pair['question'] + qa_id = qa_pair['id'] + if not is_training: + qp_pairs.append( + {'passage': passage, 'question': question, 'id': qa_id}) + else: + for answer in qa_pair['answers']: + answer_begin = int(answer['answer_start']) + answer_end = answer_begin + len(answer['text']) + qp_pairs.append({'passage': passage, + 'question': question, + 'id': qa_id, + 'answer_begin': answer_begin, + 'answer_end': answer_end}) + else: + with open(path, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + line_num = 0 + for row in reader: + qp_pairs.append( + {'passage': row[1], 'question': row[0], 'id': line_num}) + line_num += 1 + return qp_pairs + + +def tokenize(qp_pair, tokenizer=None, is_training=False): + ''' + tokenize function. + ''' + question_tokens = tokenizer.tokenize(qp_pair['question']) + passage_tokens = tokenizer.tokenize(qp_pair['passage']) + if is_training: + question_tokens = question_tokens[:300] + passage_tokens = passage_tokens[:300] + passage_tokens.insert( + 0, {'word': '', 'original_text': '', 'char_begin': 0, 'char_end': 0}) + passage_tokens.append( + {'word': '', 'original_text': '', 'char_begin': 0, 'char_end': 0}) + qp_pair['question_tokens'] = question_tokens + qp_pair['passage_tokens'] = passage_tokens + + +def collect_vocab(qp_pairs): + ''' + Build the vocab from corpus. + ''' + vocab = set() + for qp_pair in qp_pairs: + for word in qp_pair['question_tokens']: + vocab.add(word['word']) + for word in qp_pair['passage_tokens']: + vocab.add(word['word']) + return vocab + + +def shuffle_step(entries, step): + ''' + Shuffle the step + ''' + answer = [] + for i in range(0, len(entries), step): + sub = entries[i:i+step] + shuffle(sub) + answer += sub + return answer + + +def get_batches(qp_pairs, batch_size, need_sort=True): + ''' + Get batches data and shuffle. + ''' + if need_sort: + qp_pairs = sorted(qp_pairs, key=lambda qp: ( + len(qp['passage_tokens']), qp['id']), reverse=True) + batches = [{'qp_pairs': qp_pairs[i:(i + batch_size)]} + for i in range(0, len(qp_pairs), batch_size)] + shuffle(batches) + return batches + + +def get_char_input(data, char_dict, max_char_length): + ''' + Get char input. + ''' + batch_size = len(data) + sequence_length = max(len(d) for d in data) + char_id = np.zeros((max_char_length, sequence_length, + batch_size), dtype=np.int32) + char_lengths = np.zeros((sequence_length, batch_size), dtype=np.float32) + for batch_idx in range(0, min(len(data), batch_size)): + batch_data = data[batch_idx] + for sample_idx in range(0, min(len(batch_data), sequence_length)): + word = batch_data[sample_idx]['word'] + char_lengths[sample_idx, batch_idx] = min( + len(word), max_char_length) + for i in range(0, min(len(word), max_char_length)): + char_id[i, sample_idx, batch_idx] = get_id(char_dict, word[i]) + return char_id, char_lengths + + +def get_word_input(data, word_dict, embed, embed_dim): + ''' + Get word input. + ''' + batch_size = len(data) + max_sequence_length = max(len(d) for d in data) + sequence_length = max_sequence_length + word_input = np.zeros((max_sequence_length, batch_size, + embed_dim), dtype=np.float32) + ids = np.zeros((sequence_length, batch_size), dtype=np.int32) + masks = np.zeros((sequence_length, batch_size), dtype=np.float32) + lengths = np.zeros([batch_size], dtype=np.int32) + + for batch_idx in range(0, min(len(data), batch_size)): + batch_data = data[batch_idx] + + lengths[batch_idx] = len(batch_data) + + for sample_idx in range(0, min(len(batch_data), sequence_length)): + word = batch_data[sample_idx]['word'].lower() + if word in word_dict.keys(): + word_input[sample_idx, batch_idx] = embed[word_dict[word]] + ids[sample_idx, batch_idx] = word_dict[word] + masks[sample_idx, batch_idx] = 1 + + word_input = np.reshape(word_input, (-1, embed_dim)) + return word_input, ids, masks, lengths + + +def get_word_index(tokens, char_index): + ''' + Given word return word index. + ''' + for (i, token) in enumerate(tokens): + if token['char_end'] == 0: + continue + if token['char_begin'] <= char_index and char_index <= token['char_end']: + return i + return 0 + + +def get_answer_begin_end(data): + ''' + Get answer's index of begin and end. + ''' + begin = [] + end = [] + for qa_pair in data: + tokens = qa_pair['passage_tokens'] + char_begin = qa_pair['answer_begin'] + char_end = qa_pair['answer_end'] + word_begin = get_word_index(tokens, char_begin) + word_end = get_word_index(tokens, char_end) + begin.append(word_begin) + end.append(word_end) + return np.asarray(begin), np.asarray(end) + + +def get_id(word_dict, word): + ''' + Given word, return word id. + ''' + return word_dict.get(word, word_dict['']) + + +def get_buckets(min_length, max_length, bucket_count): + ''' + Get bucket by length. + ''' + if bucket_count <= 0: + return [max_length] + unit_length = int((max_length - min_length) // (bucket_count)) + buckets = [min_length + unit_length * + (i + 1) for i in range(0, bucket_count)] + buckets[-1] = max_length + return buckets + + +def find_bucket(length, buckets): + ''' + Find bucket. + ''' + for bucket in buckets: + if length <= bucket: + return bucket + return buckets[-1] diff --git a/examples/trials/weight_sharing/ga_squad/download.sh b/examples/trials/weight_sharing/ga_squad/download.sh new file mode 100644 index 0000000000000000000000000000000000000000..308fbaedbf093a4ac967ed8332a46a210aec36cc --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/download.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json +wget http://nlp.stanford.edu/data/glove.840B.300d.zip +unzip glove.840B.300d.zip \ No newline at end of file diff --git a/examples/trials/weight_sharing/ga_squad/evaluate.py b/examples/trials/weight_sharing/ga_squad/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..6db1abbc9979fdc72ab68cbaab6ca80dcb651e01 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/evaluate.py @@ -0,0 +1,174 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Evaluation scripts for QA model. +''' + +from __future__ import print_function +from collections import Counter +import string +import re +import argparse +import json +import sys + + +def normalize_answer(str_input): + """Lower text and remove punctuation, articles and extra whitespace.""" + def remove_articles(text): + ''' + Remove "a|an|the" + ''' + return re.sub(r'\b(a|an|the)\b', ' ', text) + + def white_space_fix(text): + ''' + Remove unnessary whitespace + ''' + return ' '.join(text.split()) + + def remove_punc(text): + ''' + Remove punc + ''' + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + + def lower(text): + ''' + Change string to lower form. + ''' + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(str_input)))) + + +def f1_score(prediction, ground_truth): + ''' + Calculate the f1 score. + ''' + prediction_tokens = normalize_answer(prediction).split() + ground_truth_tokens = normalize_answer(ground_truth).split() + common = Counter(prediction_tokens) & Counter(ground_truth_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0 + if not prediction_tokens: + raise ValueError("empty prediction tokens") + precision = 1.0 * num_same / len(prediction_tokens) + + if not ground_truth_tokens: + raise ValueError("empty groundtruth tokens") + recall = 1.0 * num_same / len(ground_truth_tokens) + f1_result = (2 * precision * recall) / (precision + recall + 1e-10) + return f1_result + + +def exact_match_score(prediction, ground_truth): + ''' + Calculate the match score with prediction and ground truth. + ''' + return normalize_answer(prediction) == normalize_answer(ground_truth) + + +def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + ''' + Metric max over the ground truths. + ''' + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + + +def _evaluate(dataset, predictions): + ''' + Evaluate function. + ''' + f1_result = exact_match = total = 0 + count = 0 + for article in dataset: + for paragraph in article['paragraphs']: + for qa_pair in paragraph['qas']: + total += 1 + if qa_pair['id'] not in predictions: + count += 1 + continue + ground_truths = list( + map(lambda x: x['text'], qa_pair['answers'])) + prediction = predictions[qa_pair['id']] + exact_match += metric_max_over_ground_truths( + exact_match_score, prediction, ground_truths) + f1_result += metric_max_over_ground_truths( + f1_score, prediction, ground_truths) + print('total', total, 'exact_match', + exact_match, 'unanswer_question ', count) + exact_match = 100.0 * exact_match / total + f1_result = 100.0 * f1_result / total + return {'exact_match': exact_match, 'f1': f1_result} + + +def evaluate(data_file, pred_file): + ''' + Evaluate. + ''' + expected_version = '1.1' + with open(data_file) as dataset_file: + dataset_json = json.load(dataset_file) + if dataset_json['version'] != expected_version: + print('Evaluation expects v-' + expected_version + + ', but got dataset with v-' + dataset_json['version'], + file=sys.stderr) + dataset = dataset_json['data'] + with open(pred_file) as prediction_file: + predictions = json.load(prediction_file) + # print(json.dumps(evaluate(dataset, predictions))) + result = _evaluate(dataset, predictions) + # print('em:', result['exact_match'], 'f1:', result['f1']) + return result['exact_match'] + + +def evaluate_with_predictions(data_file, predictions): + ''' + Evalutate with predictions/ + ''' + expected_version = '1.1' + with open(data_file) as dataset_file: + dataset_json = json.load(dataset_file) + if dataset_json['version'] != expected_version: + print('Evaluation expects v-' + expected_version + + ', but got dataset with v-' + dataset_json['version'], + file=sys.stderr) + dataset = dataset_json['data'] + result = _evaluate(dataset, predictions) + return result['exact_match'] + + +if __name__ == '__main__': + EXPECT_VERSION = '1.1' + parser = argparse.ArgumentParser( + description='Evaluation for SQuAD ' + EXPECT_VERSION) + parser.add_argument('dataset_file', help='Dataset file') + parser.add_argument('prediction_file', help='Prediction File') + args = parser.parse_args() + print(evaluate(args.dataset_file, args.prediction_file)) diff --git a/examples/trials/weight_sharing/ga_squad/graph.py b/examples/trials/weight_sharing/ga_squad/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..8e675a06ffe9ad1ea6bb72bbd6b77b582fbddae7 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/graph.py @@ -0,0 +1,336 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +''' +Graph is customed-define class, this module contains related class and function about graph. +''' + + +import copy +import hashlib +import logging +import json +import random +from collections import deque +from enum import Enum, unique +from typing import Iterable + +import numpy as np + +_logger = logging.getLogger('ga_squad_graph') + +@unique +class LayerType(Enum): + ''' + Layer type + ''' + attention = 0 + self_attention = 1 + rnn = 2 + input = 3 + output = 4 + +class Layer(object): + ''' + Layer class, which contains the information of graph. + ''' + def __init__(self, graph_type, inputs=None, output=None, size=None, hash_id=None): + self.input = inputs if inputs is not None else [] + self.output = output if output is not None else [] + self.graph_type = graph_type + self.is_delete = False + self.size = size + self.hash_id = hash_id + if graph_type == LayerType.attention.value: + self.input_size = 2 + self.output_size = 1 + elif graph_type == LayerType.rnn.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.self_attention.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.input.value: + self.input_size = 0 + self.output_size = 1 + if self.hash_id is None: + hasher = hashlib.md5() + hasher.update(np.random.bytes(100)) + self.hash_id = hasher.hexdigest() + elif graph_type == LayerType.output.value: + self.input_size = 1 + self.output_size = 0 + else: + raise ValueError('Unsupported LayerType: {}'.format(graph_type)) + + def update_hash(self, layers: Iterable): + """ + Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers + """ + if self.graph_type == LayerType.input.value: + return + hasher = hashlib.md5() + hasher.update(LayerType(self.graph_type).name.encode('ascii')) + hasher.update(str(self.size).encode('ascii')) + for i in self.input: + if layers[i].hash_id is None: + raise ValueError('Hash id of layer {}: {} not generated!'.format(i, layers[i])) + hasher.update(layers[i].hash_id.encode('ascii')) + self.hash_id = hasher.hexdigest() + + def set_size(self, graph_id, size): + ''' + Set size. + ''' + if self.graph_type == LayerType.attention.value: + if self.input[0] == graph_id: + self.size = size + if self.graph_type == LayerType.rnn.value: + self.size = size + if self.graph_type == LayerType.self_attention.value: + self.size = size + if self.graph_type == LayerType.output.value: + if self.size != size: + return False + return True + + def clear_size(self): + ''' + Clear size + ''' + if self.graph_type == LayerType.attention.value or \ + LayerType.rnn.value or LayerType.self_attention.value: + self.size = None + + def __str__(self): + return 'input:' + str(self.input) + ' output:' + str(self.output) + ' type:' + str(self.graph_type) + ' is_delete:' + str(self.is_delete) + ' size:' + str(self.size) + +def graph_dumps(graph): + ''' + Dump the graph. + ''' + return json.dumps(graph, default=lambda obj: obj.__dict__) + +def graph_loads(graph_json): + ''' + Load graph + ''' + layers = [] + for layer in graph_json['layers']: + layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id']) + layer_info.is_delete = layer['is_delete'] + _logger.debug('append layer {}'.format(layer_info)) + layers.append(layer_info) + graph = Graph(graph_json['max_layer_num'], graph_json['min_layer_num'], [], [], []) + graph.layers = layers + _logger.debug('graph {} loaded'.format(graph)) + return graph + +class Graph(object): + ''' + Customed Graph class. + ''' + def __init__(self, max_layer_num, min_layer_num, inputs, output, hide): + self.layers = [] + self.max_layer_num = max_layer_num + self.min_layer_num = min_layer_num + assert min_layer_num < max_layer_num + + for layer in inputs: + self.layers.append(layer) + for layer in output: + self.layers.append(layer) + if hide is not None: + for layer in hide: + self.layers.append(layer) + assert self.is_legal() + + def is_topology(self, layers=None): + ''' + valid the topology + ''' + if layers is None: + layers = self.layers + layers_nodle = [] + result = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + layers_nodle.append(i) + while True: + flag_break = True + layers_toremove = [] + for layer1 in layers_nodle: + flag_arrive = True + for layer2 in layers[layer1].input: + if layer2 in layers_nodle: + flag_arrive = False + if flag_arrive is True: + for layer2 in layers[layer1].output: + # Size is error + if layers[layer2].set_size(layer1, layers[layer1].size) is False: + return False + layers_toremove.append(layer1) + result.append(layer1) + flag_break = False + for layer in layers_toremove: + layers_nodle.remove(layer) + result.append('|') + if flag_break: + break + # There is loop in graph || some layers can't to arrive + if layers_nodle: + return False + return result + + def layer_num(self, layers=None): + ''' + Reutn number of layer. + ''' + if layers is None: + layers = self.layers + layer_num = 0 + for layer in layers: + if layer.is_delete is False and layer.graph_type != LayerType.input.value\ + and layer.graph_type != LayerType.output.value: + layer_num += 1 + return layer_num + + def is_legal(self, layers=None): + ''' + Judge whether is legal for layers + ''' + if layers is None: + layers = self.layers + + for layer in layers: + if layer.is_delete is False: + if len(layer.input) != layer.input_size: + return False + if len(layer.output) < layer.output_size: + return False + + # layer_num <= max_layer_num + if self.layer_num(layers) > self.max_layer_num: + return False + + # There is loop in graph || some layers can't to arrive + if self.is_topology(layers) is False: + return False + + return True + + def update_hash(self): + """ + update hash id of each layer, in topological order/recursively + hash id will be used in weight sharing + """ + _logger.debug('update hash') + layer_in_cnt = [len(layer.input) for layer in self.layers] + topo_queue = deque([i for i, layer in enumerate(self.layers) if not layer.is_delete and layer.graph_type == LayerType.input.value]) + while topo_queue: + layer_i = topo_queue.pop() + self.layers[layer_i].update_hash(self.layers) + for layer_j in self.layers[layer_i].output: + layer_in_cnt[layer_j] -= 1 + if layer_in_cnt[layer_j] == 0: + topo_queue.appendleft(layer_j) + + def mutation(self, only_add=False): + ''' + Mutation for a graph + ''' + types = [] + if self.layer_num() < self.max_layer_num: + types.append(0) + types.append(1) + if self.layer_num() > self.min_layer_num and only_add is False: + types.append(2) + types.append(3) + # 0 : add a layer , delete a edge + # 1 : add a layer , change a edge + # 2 : delete a layer, delete a edge + # 3 : delete a layer, change a edge + graph_type = random.choice(types) + layer_type = random.choice([LayerType.attention.value,\ + LayerType.self_attention.value, LayerType.rnn.value]) + layers = copy.deepcopy(self.layers) + cnt_try = 0 + while True: + layers_in = [] + layers_out = [] + layers_del = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + if layer.graph_type != LayerType.output.value: + layers_in.append(i) + if layer.graph_type != LayerType.input.value: + layers_out.append(i) + if layer.graph_type != LayerType.output.value\ + and layer.graph_type != LayerType.input.value: + layers_del.append(i) + if graph_type <= 1: + new_id = len(layers) + out = random.choice(layers_out) + inputs = [] + output = [out] + pos = random.randint(0, len(layers[out].input) - 1) + last_in = layers[out].input[pos] + layers[out].input[pos] = new_id + if graph_type == 0: + layers[last_in].output.remove(out) + if graph_type == 1: + layers[last_in].output.remove(out) + layers[last_in].output.append(new_id) + inputs = [last_in] + lay = Layer(graph_type=layer_type, inputs=inputs, output=output) + while len(inputs) < lay.input_size: + layer1 = random.choice(layers_in) + inputs.append(layer1) + layers[layer1].output.append(new_id) + lay.input = inputs + layers.append(lay) + else: + layer1 = random.choice(layers_del) + for layer2 in layers[layer1].output: + layers[layer2].input.remove(layer1) + if graph_type == 2: + random_in = random.choice(layers_in) + else: + random_in = random.choice(layers[layer1].input) + layers[layer2].input.append(random_in) + layers[random_in].output.append(layer2) + for layer2 in layers[layer1].input: + layers[layer2].output.remove(layer1) + layers[layer1].is_delete = True + + if self.is_legal(layers): + self.layers = layers + break + else: + layers = copy.deepcopy(self.layers) + cnt_try += 1 + self.update_hash() + + def __str__(self): + info = "" + for l_id, layer in enumerate(self.layers): + if layer.is_delete is False: + info += 'id:%d ' % l_id + str(layer) + '\n' + return info diff --git a/examples/trials/weight_sharing/ga_squad/graph_to_tf.py b/examples/trials/weight_sharing/ga_squad/graph_to_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..82bbf960148547ffcfc24051d74be4e6ab4a7c11 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/graph_to_tf.py @@ -0,0 +1,342 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import tensorflow as tf +from rnn import XGRUCell +from util import dropout +from graph import LayerType + + +def normalize(inputs, + epsilon=1e-8, + scope="ln"): + '''Applies layer normalization. + + Args: + inputs: A tensor with 2 or more dimensions, where the first dimension has + `batch_size`. + epsilon: A floating number. A very small number for preventing ZeroDivision Error. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns: + A tensor with the same shape and data dtype as `inputs`. + ''' + with tf.variable_scope(scope): + inputs_shape = inputs.get_shape() + params_shape = inputs_shape[-1:] + + mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) + beta = tf.Variable(tf.zeros(params_shape)) + gamma = tf.Variable(tf.ones(params_shape)) + normalized = (inputs - mean) / ((variance + epsilon) ** (.5)) + outputs = gamma * normalized + beta + + return outputs + + +def multihead_attention(queries, + keys, + scope="multihead_attention", + num_units=None, + num_heads=4, + dropout_rate=0, + is_training=True, + causality=False): + '''Applies multihead attention. + + Args: + queries: A 3d tensor with shape of [N, T_q, C_q]. + keys: A 3d tensor with shape of [N, T_k, C_k]. + num_units: A cdscalar. Attention size. + dropout_rate: A floating point number. + is_training: Boolean. Controller of mechanism for dropout. + causality: Boolean. If true, units that reference the future are masked. + num_heads: An int. Number of heads. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns + A 3d tensor with shape of (N, T_q, C) + ''' + global look5 + with tf.variable_scope(scope): + # Set the fall back option for num_units + if num_units is None: + num_units = queries.get_shape().as_list()[-1] + + Q_ = [] + K_ = [] + V_ = [] + for head_i in range(num_heads): + Q = tf.layers.dense(queries, num_units / num_heads, + activation=tf.nn.relu, name='Query' + str(head_i)) # (N, T_q, C) + K = tf.layers.dense(keys, num_units / num_heads, + activation=tf.nn.relu, name='Key' + str(head_i)) # (N, T_k, C) + V = tf.layers.dense(keys, num_units / num_heads, + activation=tf.nn.relu, name='Value' + str(head_i)) # (N, T_k, C) + Q_.append(Q) + K_.append(K) + V_.append(V) + + # Split and concat + Q_ = tf.concat(Q_, axis=0) # (h*N, T_q, C/h) + K_ = tf.concat(K_, axis=0) # (h*N, T_k, C/h) + V_ = tf.concat(V_, axis=0) # (h*N, T_k, C/h) + + # Multiplication + outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k) + + # Scale + outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5) + + # Key Masking + key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k) + key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k) + key_masks = tf.tile(tf.expand_dims(key_masks, 1), + [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k) + + paddings = tf.ones_like(outputs) * (-2 ** 32 + 1) + outputs = tf.where(tf.equal(key_masks, 0), paddings, + outputs) # (h*N, T_q, T_k) + + # Causality = Future blinding + if causality: + diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k) + tril = tf.contrib.linalg.LinearOperatorTriL( + diag_vals).to_dense() # (T_q, T_k) + masks = tf.tile(tf.expand_dims(tril, 0), + [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k) + + paddings = tf.ones_like(masks) * (-2 ** 32 + 1) + outputs = tf.where(tf.equal(masks, 0), paddings, + outputs) # (h*N, T_q, T_k) + + # Activation + look5 = outputs + outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k) + + # Query Masking + query_masks = tf.sign( + tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q) + query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q) + query_masks = tf.tile(tf.expand_dims( + query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k) + outputs *= query_masks # broadcasting. (N, T_q, C) + + # Dropouts + outputs = dropout(outputs, dropout_rate, is_training) + + # Weighted sum + outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h) + + # Restore shape + outputs = tf.concat(tf.split(outputs, num_heads, + axis=0), axis=2) # (N, T_q, C) + + # Residual connection + if queries.get_shape().as_list()[-1] == num_units: + outputs += queries + + # Normalize + outputs = normalize(outputs, scope=scope) # (N, T_q, C) + + return outputs + + +def positional_encoding(inputs, + num_units=None, + zero_pad=True, + scale=True, + scope="positional_encoding", + reuse=None): + ''' + Return positinal embedding. + ''' + Shape = tf.shape(inputs) + N = Shape[0] + T = Shape[1] + num_units = Shape[2] + with tf.variable_scope(scope, reuse=reuse): + position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) + + # First part of the PE function: sin and cos argument + # Second part, apply the cosine to even columns and sin to odds. + X = tf.expand_dims(tf.cast(tf.range(T), tf.float32), axis=1) + Y = tf.expand_dims( + tf.cast(10000 ** -(2 * tf.range(num_units) / num_units), tf.float32), axis=0) + h1 = tf.cast((tf.range(num_units) + 1) % 2, tf.float32) + h2 = tf.cast((tf.range(num_units) % 2), tf.float32) + position_enc = tf.multiply(X, Y) + position_enc = tf.sin(position_enc) * tf.multiply(tf.ones_like(X), h1) + \ + tf.cos(position_enc) * tf.multiply(tf.ones_like(X), h2) + + # Convert to a tensor + lookup_table = position_enc + + if zero_pad: + lookup_table = tf.concat((tf.zeros(shape=[1, num_units]), + lookup_table[1:, :]), 0) + outputs = tf.nn.embedding_lookup(lookup_table, position_ind) + + if scale: + outputs = outputs * tf.sqrt(tf.cast(num_units, tf.float32)) + + return outputs + + +def feedforward(inputs, + num_units, + scope="multihead_attention"): + '''Point-wise feed forward net. + + Args: + inputs: A 3d tensor with shape of [N, T, C]. + num_units: A list of two integers. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns: + A 3d tensor with the same shape and dtype as inputs + ''' + with tf.variable_scope(scope): + # Inner layer + params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, + "activation": tf.nn.relu, "use_bias": True} + outputs = tf.layers.conv1d(**params) + + # Readout layer + params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, + "activation": None, "use_bias": True} + outputs = tf.layers.conv1d(**params) + + # Residual connection + outputs += inputs + + # Normalize + outputs = normalize(outputs) + + return outputs + + +def rnn(input_states, sequence_lengths, dropout_rate, is_training, num_units): + layer_cnt = 1 + states = [] + xs = tf.transpose(input_states, perm=[1, 0, 2]) + for i in range(0, layer_cnt): + xs = dropout(xs, dropout_rate, is_training) + with tf.variable_scope('layer_' + str(i)): + cell_fw = XGRUCell(num_units) + cell_bw = XGRUCell(num_units) + outputs, _ = tf.nn.bidirectional_dynamic_rnn( + cell_fw=cell_fw, + cell_bw=cell_bw, + dtype=tf.float32, + sequence_length=sequence_lengths, + inputs=xs, + time_major=True) + + y_lr, y_rl = outputs + xs = tf.concat([y_lr, y_rl], 2) + states.append(xs) + + return tf.transpose(dropout(tf.concat(states, axis=2), + dropout_rate, + is_training), perm=[1, 0, 2]) + + +def graph_to_network(input1, + input2, + input1_lengths, + input2_lengths, + p_graph, + dropout_rate, + is_training, + num_heads=1, + rnn_units=256): + topology = p_graph.is_topology() + layers = dict() + layers_sequence_lengths = dict() + num_units = input1.get_shape().as_list()[-1] + layers[0] = input1*tf.sqrt(tf.cast(num_units, tf.float32)) + \ + positional_encoding(input1, scale=False, zero_pad=False) + layers[1] = input2*tf.sqrt(tf.cast(num_units, tf.float32)) + layers[0] = dropout(layers[0], dropout_rate, is_training) + layers[1] = dropout(layers[1], dropout_rate, is_training) + layers_sequence_lengths[0] = input1_lengths + layers_sequence_lengths[1] = input2_lengths + for _, topo_i in enumerate(topology): + if topo_i == '|': + continue + + # Note: here we use the `hash_id` of layer as scope name, + # so that we can automatically load sharable weights from previous trained models + with tf.variable_scope(p_graph.layers[topo_i].hash_id, reuse=tf.AUTO_REUSE): + if p_graph.layers[topo_i].graph_type == LayerType.input.value: + continue + elif p_graph.layers[topo_i].graph_type == LayerType.attention.value: + with tf.variable_scope('attention'): + layer = multihead_attention(layers[p_graph.layers[topo_i].input[0]], + layers[p_graph.layers[topo_i].input[1]], + scope="multihead_attention", + dropout_rate=dropout_rate, + is_training=is_training, + num_heads=num_heads, + num_units=rnn_units * 2) + layer = feedforward(layer, scope="feedforward", + num_units=[rnn_units * 2 * 4, rnn_units * 2]) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + p_graph.layers[topo_i].input[0]] + elif p_graph.layers[topo_i].graph_type == LayerType.self_attention.value: + with tf.variable_scope('self-attention'): + layer = multihead_attention(layers[p_graph.layers[topo_i].input[0]], + layers[p_graph.layers[topo_i].input[0]], + scope="multihead_attention", + dropout_rate=dropout_rate, + is_training=is_training, + num_heads=num_heads, + num_units=rnn_units * 2) + layer = feedforward(layer, scope="feedforward", + num_units=[rnn_units * 2 * 4, rnn_units * 2]) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + p_graph.layers[topo_i].input[0]] + elif p_graph.layers[topo_i].graph_type == LayerType.rnn.value: + with tf.variable_scope('rnn'): + layer = rnn(layers[p_graph.layers[topo_i].input[0]], + layers_sequence_lengths[p_graph.layers[topo_i].input[0]], + dropout_rate, + is_training, + rnn_units) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + p_graph.layers[topo_i].input[0]] + elif p_graph.layers[topo_i].graph_type == LayerType.output.value: + layers[topo_i] = layers[p_graph.layers[topo_i].input[0]] + if layers[topo_i].get_shape().as_list()[-1] != rnn_units * 1 * 2: + with tf.variable_scope('add_dense'): + layers[topo_i] = tf.layers.dense( + layers[topo_i], units=rnn_units*2) + return layers[2], layers[3] diff --git a/examples/trials/weight_sharing/ga_squad/rnn.py b/examples/trials/weight_sharing/ga_squad/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..82f7d070bf1e560f69f25aa990eee12959684941 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/rnn.py @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import tensorflow as tf +from tensorflow.python.ops.rnn_cell_impl import RNNCell + + +class GRU: + ''' + GRU class. + ''' + def __init__(self, name, input_dim, hidden_dim): + self.name = '/'.join([name, 'gru']) + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.w_matrix = None + self.U = None + self.bias = None + + def define_params(self): + ''' + Define parameters. + ''' + input_dim = self.input_dim + hidden_dim = self.hidden_dim + prefix = self.name + self.w_matrix = tf.Variable(tf.random_normal([input_dim, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'W'])) + self.U = tf.Variable(tf.random_normal([hidden_dim, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'U'])) + self.bias = tf.Variable(tf.random_normal([1, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'b'])) + return self + + def build(self, x, h, mask=None): + ''' + Build the GRU cell. + ''' + xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1) + hu = tf.split(tf.matmul(h, self.U), 3, 1) + r = tf.sigmoid(xw[0] + hu[0]) + z = tf.sigmoid(xw[1] + hu[1]) + h1 = tf.tanh(xw[2] + r * hu[2]) + next_h = h1 * (1 - z) + h * z + if mask is not None: + next_h = next_h * mask + h * (1 - mask) + return next_h + + def build_sequence(self, xs, masks, init, is_left_to_right): + ''' + Build GRU sequence. + ''' + states = [] + last = init + if is_left_to_right: + for i, xs_i in enumerate(xs): + h = self.build(xs_i, last, masks[i]) + states.append(h) + last = h + else: + for i in range(len(xs) - 1, -1, -1): + h = self.build(xs[i], last, masks[i]) + states.insert(0, h) + last = h + return states + + +class XGRUCell(RNNCell): + + def __init__(self, hidden_dim, reuse=None): + super(XGRUCell, self).__init__(self, _reuse=reuse) + self._num_units = hidden_dim + self._activation = tf.tanh + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + def call(self, inputs, state): + + input_dim = inputs.get_shape()[-1] + assert input_dim is not None, "input dimension must be defined" + W = tf.get_variable( + name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) + U = tf.get_variable( + name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) + b = tf.get_variable( + name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) + + xw = tf.split(tf.matmul(inputs, W) + b, 3, 1) + hu = tf.split(tf.matmul(state, U), 3, 1) + r = tf.sigmoid(xw[0] + hu[0]) + z = tf.sigmoid(xw[1] + hu[1]) + h1 = self._activation(xw[2] + r * hu[2]) + next_h = h1 * (1 - z) + state * z + return next_h, next_h diff --git a/examples/trials/weight_sharing/ga_squad/train_model.py b/examples/trials/weight_sharing/ga_squad/train_model.py new file mode 100644 index 0000000000000000000000000000000000000000..b8240bc9605f22818b016348f0c2ea00113589db --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/train_model.py @@ -0,0 +1,263 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Train the network combined by RNN and attention. +''' + +import tensorflow as tf + +from attention import DotAttention +from rnn import XGRUCell +from util import dropout +from graph_to_tf import graph_to_network + + +class GAGConfig: + """The class for model hyper-parameter configuration.""" + def __init__(self): + self.batch_size = 128 + + self.dropout = 0.1 + + self.char_vcb_size = 1500 + self.max_char_length = 20 + self.char_embed_dim = 100 + + self.max_query_length = 40 + self.max_passage_length = 800 + + self.att_is_vanilla = True + self.att_need_padding = False + self.att_is_id = False + + self.ptr_dim = 70 + self.learning_rate = 0.1 + self.labelsmoothing = 0.1 + self.num_heads = 1 + self.rnn_units = 256 + + +class GAG: + """The class for the computation graph based QA model.""" + def __init__(self, cfg, embed, p_graph): + self.cfg = cfg + self.embed = embed + self.graph = p_graph + + self.query_word = None + self.query_mask = None + self.query_lengths = None + self.passage_word = None + self.passage_mask = None + self.passage_lengths = None + self.answer_begin = None + self.answer_end = None + self.query_char_ids = None + self.query_char_lengths = None + self.passage_char_ids = None + self.passage_char_lengths = None + self.passage_states = None + self.query_states = None + self.query_init = None + self.begin_prob = None + self.end_prob = None + self.loss = None + self.train_op = None + + + def build_net(self, is_training): + """Build the whole neural network for the QA model.""" + cfg = self.cfg + word_embed = tf.get_variable( + name='word_embed', initializer=self.embed, dtype=tf.float32, trainable=False) + char_embed = tf.get_variable(name='char_embed', + shape=[cfg.char_vcb_size, + cfg.char_embed_dim], + dtype=tf.float32) + + # [query_length, batch_size] + self.query_word = tf.placeholder(dtype=tf.int32, + shape=[None, None], + name='query_word') + self.query_mask = tf.placeholder(dtype=tf.float32, + shape=[None, None], + name='query_mask') + # [batch_size] + self.query_lengths = tf.placeholder( + dtype=tf.int32, shape=[None], name='query_lengths') + + # [passage_length, batch_size] + self.passage_word = tf.placeholder( + dtype=tf.int32, shape=[None, None], name='passage_word') + self.passage_mask = tf.placeholder( + dtype=tf.float32, shape=[None, None], name='passage_mask') + # [batch_size] + self.passage_lengths = tf.placeholder( + dtype=tf.int32, shape=[None], name='passage_lengths') + + if is_training: + self.answer_begin = tf.placeholder( + dtype=tf.int32, shape=[None], name='answer_begin') + self.answer_end = tf.placeholder( + dtype=tf.int32, shape=[None], name='answer_end') + + self.query_char_ids = tf.placeholder(dtype=tf.int32, + shape=[ + self.cfg.max_char_length, None, None], + name='query_char_ids') + # sequence_length, batch_size + self.query_char_lengths = tf.placeholder( + dtype=tf.int32, shape=[None, None], name='query_char_lengths') + + self.passage_char_ids = tf.placeholder(dtype=tf.int32, + shape=[ + self.cfg.max_char_length, None, None], + name='passage_char_ids') + # sequence_length, batch_size + self.passage_char_lengths = tf.placeholder(dtype=tf.int32, + shape=[None, None], + name='passage_char_lengths') + + query_char_states = self.build_char_states(char_embed=char_embed, + is_training=is_training, + reuse=False, + char_ids=self.query_char_ids, + char_lengths=self.query_char_lengths) + + passage_char_states = self.build_char_states(char_embed=char_embed, + is_training=is_training, + reuse=True, + char_ids=self.passage_char_ids, + char_lengths=self.passage_char_lengths) + + with tf.variable_scope("encoding") as scope: + query_states = tf.concat([tf.nn.embedding_lookup( + word_embed, self.query_word), query_char_states], axis=2) + scope.reuse_variables() + passage_states = tf.concat([tf.nn.embedding_lookup( + word_embed, self.passage_word), passage_char_states], axis=2) + passage_states = tf.transpose(passage_states, perm=[1, 0, 2]) + query_states = tf.transpose(query_states, perm=[1, 0, 2]) + self.passage_states = passage_states + self.query_states = query_states + + output, output2 = graph_to_network(passage_states, query_states, + self.passage_lengths, self.query_lengths, + self.graph, self.cfg.dropout, + is_training, num_heads=cfg.num_heads, + rnn_units=cfg.rnn_units) + + passage_att_mask = self.passage_mask + batch_size_x = tf.shape(self.query_lengths) + answer_h = tf.zeros( + tf.concat([batch_size_x, tf.constant([cfg.ptr_dim], dtype=tf.int32)], axis=0)) + + answer_context = tf.reduce_mean(output2, axis=1) + + query_init_w = tf.get_variable( + 'query_init_w', shape=[output2.get_shape().as_list()[-1], cfg.ptr_dim]) + self.query_init = query_init_w + answer_context = tf.matmul(answer_context, query_init_w) + + output = tf.transpose(output, perm=[1, 0, 2]) + + with tf.variable_scope('answer_ptr_layer'): + ptr_att = DotAttention('ptr', + hidden_dim=cfg.ptr_dim, + is_vanilla=self.cfg.att_is_vanilla, + is_identity_transform=self.cfg.att_is_id, + need_padding=self.cfg.att_need_padding) + answer_pre_compute = ptr_att.get_pre_compute(output) + ptr_gru = XGRUCell(hidden_dim=cfg.ptr_dim) + begin_prob, begin_logits = ptr_att.get_prob(output, answer_context, passage_att_mask, + answer_pre_compute, True) + att_state = ptr_att.get_att(output, begin_prob) + (_, answer_h) = ptr_gru.call(inputs=att_state, state=answer_h) + answer_context = answer_h + end_prob, end_logits = ptr_att.get_prob(output, answer_context, + passage_att_mask, answer_pre_compute, + True) + + self.begin_prob = tf.transpose(begin_prob, perm=[1, 0]) + self.end_prob = tf.transpose(end_prob, perm=[1, 0]) + begin_logits = tf.transpose(begin_logits, perm=[1, 0]) + end_logits = tf.transpose(end_logits, perm=[1, 0]) + + if is_training: + def label_smoothing(inputs, masks, epsilon=0.1): + """Modify target for label smoothing.""" + epsilon = cfg.labelsmoothing + num_of_channel = tf.shape(inputs)[-1] # number of channels + inputs = tf.cast(inputs, tf.float32) + return (((1 - epsilon) * inputs) + (epsilon / + tf.cast(num_of_channel, tf.float32))) * masks + cost1 = tf.reduce_mean( + tf.losses.softmax_cross_entropy(label_smoothing( + tf.one_hot(self.answer_begin, + depth=tf.shape(self.passage_word)[0]), + tf.transpose(self.passage_mask, perm=[1, 0])), begin_logits)) + cost2 = tf.reduce_mean( + tf.losses.softmax_cross_entropy( + label_smoothing(tf.one_hot(self.answer_end, + depth=tf.shape(self.passage_word)[0]), + tf.transpose(self.passage_mask, perm=[1, 0])), end_logits)) + + reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + l2_loss = tf.reduce_sum(reg_ws) + loss = cost1 + cost2 + l2_loss + self.loss = loss + + optimizer = tf.train.AdamOptimizer(learning_rate=cfg.learning_rate) + self.train_op = optimizer.minimize(self.loss) + + return tf.stack([self.begin_prob, self.end_prob]) + + def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths): + """Build char embedding network for the QA model.""" + max_char_length = self.cfg.max_char_length + + inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids), + self.cfg.dropout, is_training) + inputs = tf.reshape( + inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim]) + char_lengths = tf.reshape(char_lengths, shape=[-1]) + with tf.variable_scope('char_encoding', reuse=reuse): + cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) + cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) + _, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn( + cell_fw=cell_fw, + cell_bw=cell_bw, + sequence_length=char_lengths, + inputs=inputs, + time_major=True, + dtype=tf.float32 + ) + + left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim]) + + right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim]) + + states = tf.concat([left_right, right_left], axis=1) + out_shape = tf.shape(char_ids)[1:3] + out_shape = tf.concat([out_shape, tf.constant( + value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0) + return tf.reshape(states, shape=out_shape) diff --git a/examples/trials/weight_sharing/ga_squad/trial.py b/examples/trials/weight_sharing/ga_squad/trial.py new file mode 100644 index 0000000000000000000000000000000000000000..78aeca99b8496bd9cbc238ef5425d7b752e61ee5 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/trial.py @@ -0,0 +1,458 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import heapq +import json +import os +import pickle + +import logging +logger = logging.getLogger('ga_squad') + +import numpy as np +from tensorflow.train import init_from_checkpoint + +import graph + +from util import Timer + +import nni +import data +import evaluate +from train_model import * + + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + + +def get_config(): + ''' + Get config from arument parser. + ''' + parser = argparse.ArgumentParser( + description='This program is using genetic algorithm to search architecture for SQuAD.') + parser.add_argument('--input_file', type=str, + default='./train-v1.1.json', help='input file') + parser.add_argument('--dev_file', type=str, + default='./dev-v1.1.json', help='dev file') + parser.add_argument('--embedding_file', type=str, + default='./glove.840B.300d.txt', help='dev file') + parser.add_argument('--root_path', default='./data/', + type=str, help='Root path of models') + parser.add_argument('--batch_size', type=int, default=64, help='batch size') + parser.add_argument('--save_path', type=str, + default='./save', help='save path dir') + parser.add_argument('--learning_rate', type=float, default=0.0001, + help='set half of original learning rate reload data and train.') + parser.add_argument('--max_epoch', type=int, default=30) + parser.add_argument('--dropout_rate', type=float, + default=0.1, help='dropout_rate') + parser.add_argument('--labelsmoothing', type=float, + default=0.1, help='labelsmoothing') + parser.add_argument('--num_heads', type=int, default=1, help='num_heads') + parser.add_argument('--rnn_units', type=int, default=256, help='rnn_units') + + args = parser.parse_args() + return args + + +def get_id(word_dict, word): + ''' + Return word id. + ''' + if word in word_dict.keys(): + return word_dict[word] + return word_dict[''] + + +def load_embedding(path): + ''' + return embedding for a specif file by given file path. + ''' + EMBEDDING_DIM = 300 + embedding_dict = {} + with open(path, 'r', encoding='utf-8') as file: + pairs = [line.strip('\r\n').split() for line in file.readlines()] + for pair in pairs: + if len(pair) == EMBEDDING_DIM + 1: + embedding_dict[pair[0]] = [float(x) for x in pair[1:]] + logger.debug('embedding_dict size: %d', len(embedding_dict)) + return embedding_dict + + +class MaxQueue: + ''' + Queue for max value. + ''' + + def __init__(self, capacity): + assert capacity > 0, 'queue size must be larger than 0' + self._capacity = capacity + self._entries = [] + + @property + def entries(self): + return self._entries + + @property + def capacity(self): + return self._capacity + + @property + def size(self): + return len(self._entries) + + def clear(self): + self._entries = [] + + def push(self, item): + if self.size < self.capacity: + heapq.heappush(self.entries, item) + else: + heapq.heappushpop(self.entries, item) + + +def find_best_answer_span(left_prob, right_prob, passage_length, max_answer_length): + left = 0 + right = 0 + max_prob = left_prob[0] * right_prob[0] + for i in range(0, passage_length): + left_p = left_prob[i] + for j in range(i, min(i + max_answer_length, passage_length)): + total_prob = left_p * right_prob[j] + if max_prob < total_prob: + left, right, max_prob = i, j, total_prob + return [(max_prob, left, right)] + + +def write_prediction(path, position1_result, position2_result): + import codecs + + with codecs.open(path, 'w', encoding='utf8') as file: + batch_num = len(position1_result) + for i in range(batch_num): + position1_batch = position1_result[i] + position2_batch = position2_result[i] + + for j in range(position1_batch.shape[0]): + file.write(str(position1_batch[j]) + + '\t' + str(position2_batch[j]) + '\n') + + +def find_kbest_answer_span(k, left_prob, right_prob, passage_length, max_answer_length): + if k == 1: + return find_best_answer_span(left_prob, right_prob, passage_length, max_answer_length) + + queue = MaxQueue(k) + for i in range(0, passage_length): + left_p = left_prob[i] + for j in range(i, min(i + max_answer_length, passage_length)): + total_prob = left_p * right_prob[j] + queue.push((total_prob, i, j)) + return list(sorted(queue.entries, key=lambda x: -x[0])) + + +def run_epoch(batches, answer_net, is_training): + if not is_training: + position1_result = [] + position2_result = [] + contexts = [] + ids = [] + + loss_sum = 0 + timer = Timer() + count = 0 + for batch in batches: + used = timer.get_elapsed(False) + count += 1 + qps = batch['qp_pairs'] + question_tokens = [qp['question_tokens'] for qp in qps] + passage_tokens = [qp['passage_tokens'] for qp in qps] + context = [(qp['passage'], qp['passage_tokens']) for qp in qps] + sample_id = [qp['id'] for qp in qps] + + _, query, query_mask, query_lengths = data.get_word_input( + data=question_tokens, word_dict=word_vcb, embed=embed, embed_dim=cfg.word_embed_dim) + _, passage, passage_mask, passage_lengths = data.get_word_input( + data=passage_tokens, word_dict=word_vcb, embed=embed, embed_dim=cfg.word_embed_dim) + + query_char, query_char_lengths = data.get_char_input( + data=question_tokens, char_dict=char_vcb, max_char_length=cfg.max_char_length) + + passage_char, passage_char_lengths = data.get_char_input( + data=passage_tokens, char_dict=char_vcb, max_char_length=cfg.max_char_length) + + if is_training: + answer_begin, answer_end = data.get_answer_begin_end(qps) + + if is_training: + feed_dict = {answer_net.query_word: query, + answer_net.query_mask: query_mask, + answer_net.query_lengths: query_lengths, + answer_net.passage_word: passage, + answer_net.passage_mask: passage_mask, + answer_net.passage_lengths: passage_lengths, + answer_net.query_char_ids: query_char, + answer_net.query_char_lengths: query_char_lengths, + answer_net.passage_char_ids: passage_char, + answer_net.passage_char_lengths: passage_char_lengths, + answer_net.answer_begin: answer_begin, + answer_net.answer_end: answer_end} + loss, _, = sess.run( + [answer_net.loss, answer_net.train_op], feed_dict=feed_dict) + if count % 100 == 0: + logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss) + loss_sum += loss + else: + feed_dict = {answer_net.query_word: query, + answer_net.query_mask: query_mask, + answer_net.query_lengths: query_lengths, + answer_net.passage_word: passage, + answer_net.passage_mask: passage_mask, + answer_net.passage_lengths: passage_lengths, + answer_net.query_char_ids: query_char, + answer_net.query_char_lengths: query_char_lengths, + answer_net.passage_char_ids: passage_char, + answer_net.passage_char_lengths: passage_char_lengths} + position1, position2 = sess.run( + [answer_net.begin_prob, answer_net.end_prob], feed_dict=feed_dict) + position1_result += position1.tolist() + position2_result += position2.tolist() + contexts += context + ids = np.concatenate((ids, sample_id)) + if count % 100 == 0: + logger.debug('%d %g except:%g', count, used, used / count * len(batches)) + loss = loss_sum / len(batches) + if is_training: + return loss + return loss, position1_result, position2_result, ids, contexts + + +def generate_predict_json(position1_result, position2_result, ids, passage_tokens): + ''' + Generate json by prediction. + ''' + predict_len = len(position1_result) + logger.debug('total prediction num is %s', str(predict_len)) + + answers = {} + for i in range(predict_len): + sample_id = ids[i] + passage, tokens = passage_tokens[i] + kbest = find_best_answer_span( + position1_result[i], position2_result[i], len(tokens), 23) + _, start, end = kbest[0] + answer = passage[tokens[start]['char_begin']:tokens[end]['char_end']] + answers[sample_id] = answer + logger.debug('generate predict done.') + return answers + + +def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False): + ''' + Generate data + ''' + global root_path + qp_pairs = data.load_from_file(path=path, is_training=is_training) + + tokenized_sent = 0 + # qp_pairs = qp_pairs[:1000]1 + for qp_pair in qp_pairs: + tokenized_sent += 1 + data.tokenize(qp_pair, tokenizer, is_training) + for word in qp_pair['question_tokens']: + word_vcb.add(word['word']) + for char in word['word']: + char_vcb.add(char) + for word in qp_pair['passage_tokens']: + word_vcb.add(word['word']) + for char in word['word']: + char_vcb.add(char) + + max_query_length = max(len(x['question_tokens']) for x in qp_pairs) + max_passage_length = max(len(x['passage_tokens']) for x in qp_pairs) + #min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs) + cfg.max_query_length = max_query_length + cfg.max_passage_length = max_passage_length + + return qp_pairs + + +def train_with_graph(p_graph, qp_pairs, dev_qp_pairs): + ''' + Train a network from a specific graph. + ''' + global sess + with tf.Graph().as_default(): + train_model = GAG(cfg, embed, p_graph) + train_model.build_net(is_training=True) + tf.get_variable_scope().reuse_variables() + dev_model = GAG(cfg, embed, p_graph) + dev_model.build_net(is_training=False) + with tf.Session() as sess: + if restore_path is not None: + restore_mapping = dict(zip(restore_shared, restore_shared)) + logger.debug('init shared variables from {}, restore_scopes: {}'.format(restore_path, restore_shared)) + init_from_checkpoint(restore_path, restore_mapping) + logger.debug('init variables') + logger.debug(sess.run(tf.report_uninitialized_variables())) + init = tf.global_variables_initializer() + sess.run(init) + # writer = tf.summary.FileWriter('%s/graph/'%execution_path, sess.graph) + logger.debug('assign to graph') + + saver = tf.train.Saver() + train_loss = None + bestacc = 0 + patience = 5 + patience_increase = 2 + improvement_threshold = 0.995 + + for epoch in range(max_epoch): + logger.debug('begin to train') + train_batches = data.get_batches(qp_pairs, cfg.batch_size) + train_loss = run_epoch(train_batches, train_model, True) + logger.debug('epoch ' + str(epoch) + + ' loss: ', str(train_loss)) + dev_batches = list(data.get_batches( + dev_qp_pairs, cfg.batch_size)) + _, position1, position2, ids, contexts = run_epoch( + dev_batches, dev_model, False) + + answers = generate_predict_json( + position1, position2, ids, contexts) + if save_path is not None: + logger.info('save prediction file to {}'.format(save_path)) + with open(os.path.join(save_path, 'epoch%d.prediction' % epoch), 'w') as file: + json.dump(answers, file) + else: + answers = json.dumps(answers) + answers = json.loads(answers) + iter = epoch + 1 + + acc = evaluate.evaluate_with_predictions( + args.dev_file, answers) + + logger.debug('Send intermediate acc: %s', str(acc)) + nni.report_intermediate_result(acc) + + logger.debug('Send intermediate result done.') + + if acc > bestacc: + if acc * improvement_threshold > bestacc: + patience = max(patience, iter * patience_increase) + bestacc = acc + + if save_path is not None: + logger.info('save model & prediction to {}'.format(save_path)) + saver.save(sess, os.path.join(save_path, 'epoch%d.model' % epoch)) + with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file: + pickle.dump( + (position1, position2, ids, contexts), file) + logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc) + if patience <= iter: + break + logger.debug('save done.') + return train_loss, bestacc + + +embed = None +char_vcb = None +tokenizer = None +word_vcb = None + + +def load_data(): + global embed, char_vcb, tokenizer, word_vcb + logger.debug('tokenize data') + tokenizer = data.WhitespaceTokenizer() + + char_set = set() + word_set = set() + logger.debug('generate train data') + qp_pairs = generate_data(input_file, tokenizer, + char_set, word_set, is_training=True) + logger.debug('generate dev data') + dev_qp_pairs = generate_data( + dev_file, tokenizer, char_set, word_set, is_training=False) + logger.debug('generate data done.') + + char_vcb = {char: sample_id for sample_id, char in enumerate(char_set)} + word_vcb = {word: sample_id for sample_id, word in enumerate(word_set)} + + timer.start() + logger.debug('read embedding table') + + cfg.word_embed_dim = 300 + embed = np.zeros((len(word_vcb), cfg.word_embed_dim), dtype=np.float32) + + embedding = load_embedding(args.embedding_file) + for word, sample_id in enumerate(word_vcb): + if word in embedding: + embed[sample_id] = embedding[word] + + # add UNK into dict + unk = np.zeros((1, cfg.word_embed_dim), dtype=np.float32) + embed = np.concatenate((unk, embed), axis=0) + word_vcb = {key: value + 1 for key, value in word_vcb.items()} + + return qp_pairs, dev_qp_pairs + + +if __name__ == '__main__': + try: + args = get_config() + + root_path = os.path.expanduser(args.root_path) + input_file = os.path.expanduser(args.input_file) + dev_file = os.path.expanduser(args.dev_file) + max_epoch = args.max_epoch + + cfg = GAGConfig() + cfg.batch_size = args.batch_size + cfg.learning_rate = float(args.learning_rate) + cfg.dropout = args.dropout_rate + cfg.rnn_units = args.rnn_units + cfg.labelsmoothing = args.labelsmoothing + cfg.num_heads = args.num_heads + timer = Timer() + + qp_pairs, dev_qp_pairs = load_data() + logger.debug('Init finish.') + + original_params = nni.get_next_parameter() + ''' + with open('data.json') as f: + original_params = json.load(f) + ''' + p_graph = graph.graph_loads(original_params['graph']) + save_path = original_params['save_dir'] + os.makedirs(save_path) + restore_path = original_params['restore_dir'] + restore_shared = [hash_id + '/' for hash_id in original_params['shared_id']] if original_params['shared_id'] is not None else [] + ['word_embed', 'char_embed', 'char_encoding/'] + train_loss, best_acc = train_with_graph(p_graph, qp_pairs, dev_qp_pairs) + + logger.debug('Send best acc: %s', str(best_acc)) + nni.report_final_result(best_acc) + logger.debug('Send final result done') + except: + logger.exception('Catch exception in trial.py.') + raise diff --git a/examples/trials/weight_sharing/ga_squad/util.py b/examples/trials/weight_sharing/ga_squad/util.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9f363003ad86955af8c42c86578741506db367 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/util.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Util Module +''' + +import time + +import tensorflow as tf + + +def shape(tensor): + ''' + Get shape of variable. + Return type is tuple. + ''' + temp_s = tensor.get_shape() + return tuple([temp_s[i].value for i in range(0, len(temp_s))]) + + +def get_variable(name, temp_s): + ''' + Get variable by name. + ''' + return tf.Variable(tf.zeros(temp_s), name=name) + + +def dropout(tensor, drop_prob, is_training): + ''' + Dropout except test. + ''' + if not is_training: + return tensor + return tf.nn.dropout(tensor, 1.0 - drop_prob) + + +class Timer: + ''' + Class Timer is for calculate time. + ''' + def __init__(self): + self.__start = time.time() + + def start(self): + ''' + Start to calculate time. + ''' + self.__start = time.time() + + def get_elapsed(self, restart=True): + ''' + Calculate time span. + ''' + end = time.time() + span = end - self.__start + if restart: + self.__start = end + return span diff --git a/examples/tuners/customized_tuner/README.md b/examples/tuners/customized_tuner/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bde0a06c2e3b85247497f2ffe1e3e540be7646df --- /dev/null +++ b/examples/tuners/customized_tuner/README.md @@ -0,0 +1,3 @@ +# How to install this customized tuner as a builtin tuner + +Reference [this document](https://github.com/microsoft/nni/blob/master/docs/en_US/Tuner/InstallCustomizedTuner.md) to install this customized tuner as a builtin tuner. \ No newline at end of file diff --git a/examples/tuners/customized_tuner/README_zh_CN.md b/examples/tuners/customized_tuner/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..0ede4d17222de92d12237781a0743b405940cbd3 --- /dev/null +++ b/examples/tuners/customized_tuner/README_zh_CN.md @@ -0,0 +1,3 @@ +# 如何将自定义的 Tuner 安装为内置 Tuner + +参考[文档](https://github.com/microsoft/nni/blob/master/docs/zh_CN/Tuner/InstallCustomizedTuner.md), 安装自定义 Tuner。 \ No newline at end of file diff --git a/examples/tuners/customized_tuner/demo_tuner/__init__.py b/examples/tuners/customized_tuner/demo_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe22174ff3cca3706ba69156ca8193e3fd9f95b7 --- /dev/null +++ b/examples/tuners/customized_tuner/demo_tuner/__init__.py @@ -0,0 +1 @@ +from .demo_tuner import DemoTuner, MyClassArgsValidator diff --git a/examples/tuners/customized_tuner/demo_tuner/demo_tuner.py b/examples/tuners/customized_tuner/demo_tuner/demo_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..1881d831235feeadb38f15f8b32d406b16599867 --- /dev/null +++ b/examples/tuners/customized_tuner/demo_tuner/demo_tuner.py @@ -0,0 +1,35 @@ +import random +import numpy as np +from nni.tuner import Tuner +from nni.utils import ClassArgsValidator + +class DemoTuner(Tuner): + def __init__(self, optimize_mode='maximize'): + # optimize_mode is used to demo how to create ClassArgsValidator + self.optimize_mode = optimize_mode + + def update_search_space(self, search_space): + self._space = search_space + + def generate_parameters(self, parameter_id, **kwargs): + params = {} + for k in self._space: + t, v = self._space[k]['_type'], self._space[k]['_value'] + if t == 'choice': + params[k] = random.choice(v) + elif t == 'randint': + params[k] = random.choice(range(v[0], v[1])) + elif t == 'uniform': + params[k] = np.random.uniform(v[0], v[1]) + else: + raise RuntimeError('parameter type {} is supported by DemoTuner!'.format(t)) + return params + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + pass + +class MyClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + if 'optimize_mode' in kwargs: + assert kwargs['optimize_mode'] in ['maximize', 'minimize'], \ + 'optimize_mode {} is invalid!'.format(kwargs['optimize_mode']) diff --git a/examples/tuners/customized_tuner/meta_file.yml b/examples/tuners/customized_tuner/meta_file.yml new file mode 100644 index 0000000000000000000000000000000000000000..64a79f6c9fc95dfa0dbe66cd8b34174f846fffef --- /dev/null +++ b/examples/tuners/customized_tuner/meta_file.yml @@ -0,0 +1,4 @@ +algoType: tuner +builtinName: demotuner +className: demo_tuner.DemoTuner +classArgsValidator: demo_tuner.MyClassArgsValidator diff --git a/examples/tuners/customized_tuner/setup.py b/examples/tuners/customized_tuner/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..9c321149173300df7b3f87ff7540d7eb76183f41 --- /dev/null +++ b/examples/tuners/customized_tuner/setup.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import setuptools + +setuptools.setup( + name = 'demo-tuner', + version = '0.1', + packages = setuptools.find_packages(exclude=['*test*']), + + python_requires = '>=3.6', + classifiers = [ + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: MIT License', + 'Operating System :: ' + ], + author = 'Microsoft NNI Team', + author_email = 'nni@microsoft.com', + description = 'NNI control for Neural Network Intelligence project', + license = 'MIT', + url = 'https://github.com/Microsoft/nni' +) diff --git a/examples/tuners/ga_customer_tuner/README.md b/examples/tuners/ga_customer_tuner/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0f80a53918288b54a9f26ebed634e8ea2bfb622b --- /dev/null +++ b/examples/tuners/ga_customer_tuner/README.md @@ -0,0 +1,15 @@ +# How to use ga_customer_tuner? +This tuner is a customized tuner which only suitable for trial whose code path is "~/nni/examples/trials/ga_squad", +type `cd ~/nni/examples/trials/ga_squad` and check readme.md to get more information for ga_squad trial. + +# config +If you want to use ga_customer_tuner in your experiment, you could set config file as following format: + +``` +tuner: + codeDir: ~/nni/examples/tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize +``` diff --git a/examples/tuners/ga_customer_tuner/README_zh_CN.md b/examples/tuners/ga_customer_tuner/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..26b73955994a489d99e355f9c95cbe3cf37b01b7 --- /dev/null +++ b/examples/tuners/ga_customer_tuner/README_zh_CN.md @@ -0,0 +1,14 @@ +# 如何使用 ga_customer_tuner? + +此定制的 Tuner 仅适用于代码 "~/nni/examples/trials/ga_squad",输入 `cd ~/nni/examples/trials/ga_squad` 查看 readme.md 来了解 ga_squad 的更多信息。 + +# 配置 + +如果要在 Experiment 中使用 ga_customer_tuner 可按照下列格式来配置: + + tuner: + codeDir: ~/nni/examples/tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize \ No newline at end of file diff --git a/examples/tuners/ga_customer_tuner/__init__.py b/examples/tuners/ga_customer_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/tuners/ga_customer_tuner/customer_tuner.py b/examples/tuners/ga_customer_tuner/customer_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..2620d86499f2aebc2a4965fe183ac3d86e4809f8 --- /dev/null +++ b/examples/tuners/ga_customer_tuner/customer_tuner.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +from graph import * + +import copy +import json +import logging +import random +import numpy as np + +from nni.tuner import Tuner +from nni.utils import extract_scalar_reward + +logger = logging.getLogger('ga_customer_tuner') + + +@unique +class OptimizeMode(Enum): + Minimize = 'minimize' + Maximize = 'maximize' + + +def init_population(population_size=32): + population = [] + graph = Graph(4, + input=[Layer(LayerType.input.value, output=[4, 5], size='x'), Layer(LayerType.input.value, output=[4, 5], size='y')], + output=[Layer(LayerType.output.value, input=[4], size='x'), Layer(LayerType.output.value, input=[5], size='y')], + hide=[Layer(LayerType.attention.value, input=[0, 1], output=[2]), Layer(LayerType.attention.value, input=[1, 0], output=[3])]) + for _ in range(population_size): + g = copy.deepcopy(graph) + for _ in range(1): + g.mutation() + population.append(Individual(g, result=None)) + return population + + +class Individual(object): + def __init__(self, config=None, info=None, result=None, save_dir=None): + self.config = config + self.result = result + self.info = info + self.restore_dir = None + self.save_dir = save_dir + + def __str__(self): + return "info: " + str(self.info) + ", config :" + str(self.config) + ", result: " + str(self.result) + + def mutation(self, config=None, info=None, save_dir=None): + self.result = None + if config is not None: + self.config = config + self.config.mutation() + self.restore_dir = self.save_dir + self.save_dir = save_dir + self.info = info + + +class CustomerTuner(Tuner): + def __init__(self, optimize_mode, population_size = 32): + self.optimize_mode = OptimizeMode(optimize_mode) + self.population = init_population(population_size) + + assert len(self.population) == population_size + logger.debug('init population done.') + return + + def generate_parameters(self, parameter_id, **kwargs): + """Returns a set of trial graph config, as a serializable object. + parameter_id : int + """ + if len(self.population) <= 0: + logger.debug("the len of poplution lower than zero.") + raise Exception('The population is empty') + pos = -1 + for i in range(len(self.population)): + if self.population[i].result == None: + pos = i + break + if pos != -1: + indiv = copy.deepcopy(self.population[pos]) + self.population.pop(pos) + temp = json.loads(graph_dumps(indiv.config)) + else: + random.shuffle(self.population) + if self.population[0].result < self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + self.population.pop(1) + indiv.mutation() + graph = indiv.config + temp = json.loads(graph_dumps(graph)) + logger.debug('generate_parameter return value is:') + logger.debug(temp) + return temp + + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + ''' + Record an observation of the objective function + parameter_id : int + parameters : dict of parameters + value: final metrics of the trial, including reward + ''' + reward = extract_scalar_reward(value) + if self.optimize_mode is OptimizeMode.Minimize: + reward = -reward + + logger.debug('receive trial result is:\n') + logger.debug(str(parameters)) + logger.debug(str(reward)) + + indiv = Individual(graph_loads(parameters), result=reward) + self.population.append(indiv) + return + + def update_search_space(self, data): + pass + +if __name__ =='__main__': + tuner = CustomerTuner(OptimizeMode.Maximize) + config = tuner.generate_parameters(0) + with open('./data.json', 'w') as outfile: + json.dump(config, outfile) + tuner.receive_trial_result(0, config, 0.99) diff --git a/examples/tuners/ga_customer_tuner/graph.py b/examples/tuners/ga_customer_tuner/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..97032ccdbf8c6e76ae8647e5238c5bbcbc2bc4a5 --- /dev/null +++ b/examples/tuners/ga_customer_tuner/graph.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- + +import copy +import json +import random +from enum import Enum, unique + +@unique +class LayerType(Enum): + attention = 0 + self_attention = 1 + rnn = 2 + input = 3 + output = 4 + +class Layer(object): + def __init__(self, type, input=None, output=None, size=None): + self.input = input if input is not None else [] + self.output = output if output is not None else [] + self.type = type + self.is_delete = False + self.size = size + if type == LayerType.attention.value: + self.input_size = 2 + self.output_size = 1 + elif type == LayerType.rnn.value: + self.input_size = 1 + self.output_size = 1 + elif type == LayerType.self_attention.value: + self.input_size = 1 + self.output_size = 1 + elif type == LayerType.input.value: + self.input_size = 0 + self.output_size = 1 + elif type == LayerType.output.value: + self.input_size = 1 + self.output_size = 0 + else: + print(type) + def set_size(self, id, size): + if self.type == LayerType.attention.value: + if self.input[0] == id: + self.size = size + if self.type == LayerType.rnn.value: + self.size = size + if self.type == LayerType.self_attention.value: + self.size = size + if self.type == LayerType.output.value: + if self.size != size: + return False + return True + + def clear_size(self): + if self.type == LayerType.attention.value or LayerType.rnn.value or LayerType.self_attention.value: + self.size = None + + def __str__(self): + return 'input:' + str(self.input) + ' output:' + str(self.output) + ' type:' + str( + self.type) + ' is_delete:' + str(self.is_delete) + ' size:' + str(self.size) + +def graph_dumps(graph): + return json.dumps(graph, default=lambda obj: obj.__dict__) + +def graph_loads(js): + layers = [] + for layer in js['layers']: + p = Layer(layer['type'],layer['input'],layer['output'],layer['size']) + p.is_delete = layer['is_delete'] + layers.append(p) + graph = Graph(js['max_layer_num'],[], [], []) + graph.layers = layers + return graph + +class Graph(object): + def __init__(self, max_layer_num, input, output, hide): + self.layers = [] + self.max_layer_num = max_layer_num + + for layer in input: + self.layers.append(layer) + for layer in output: + self.layers.append(layer) + if hide is not None: + for layer in hide: + self.layers.append(layer) + assert self.is_legal() + + def is_topology(self, layers=None): + if layers == None: + layers = self.layers + layers_nodle = [] + xx = [] + for i in range(len(layers)): + if layers[i].is_delete == False: + layers_nodle.append(i) + while True: + flag_break = True + layers_toremove = [] + for layer1 in layers_nodle: + flag_arrive = True + for layer2 in layers[layer1].input: + if layer2 in layers_nodle: + flag_arrive = False + if flag_arrive == True: + for layer2 in layers[layer1].output: + if layers[layer2].set_size(layer1, layers[layer1].size) == False: # Size is error + return False + layers_toremove.append(layer1) + xx.append(layer1) + flag_break = False + for layer in layers_toremove: + layers_nodle.remove(layer) + xx.append('|') + if flag_break == True: + break + if len(layers_nodle) > 0: # There is loop in graph || some layers can't to arrive + return False + return xx + + def layer_num(self, layers=None): + if layers == None: + layers = self.layers + layer_num = 0 + for layer in layers: + if layer.is_delete == False and layer.type != LayerType.input.value and layer.type != LayerType.output.value: + layer_num += 1 + return layer_num + + def is_legal(self, layers=None): + if layers == None: + layers = self.layers + + for layer in layers: + if layer.is_delete == False: + if len(layer.input) != layer.input_size: + return False + if len(layer.output) < layer.output_size: + return False + + # layer_num <= max_layer_num + if self.layer_num(layers) > self.max_layer_num: + return False + + if self.is_topology(layers) == False: # There is loop in graph || some layers can't to arrive + return False + + return True + + def mutation(self, only_add=False): + types = [] + if self.layer_num() < self.max_layer_num: + types.append(0) + types.append(1) + if self.layer_num() > 0: + types.append(2) + types.append(3) + # 0 : add a layer , delete a edge + # 1 : add a layer , change a edge + # 2 : delete a layer, delete a edge + # 3 : delete a layer, change a edge + type = random.choice(types) + layer_type = random.choice([LayerType.attention.value, LayerType.self_attention.value, LayerType.rnn.value]) + layers = copy.deepcopy(self.layers) + cnt_try = 0 + while True: + layers_in = [] + layers_out = [] + layers_del = [] + for layer1 in range(len(layers)): + layer = layers[layer1] + if layer.is_delete == False: + if layer.type != LayerType.output.value: + layers_in.append(layer1) + if layer.type != LayerType.input.value: + layers_out.append(layer1) + if layer.type != LayerType.output.value and layer.type != LayerType.input.value: + layers_del.append(layer1) + if type <= 1: + new_id = len(layers) + out = random.choice(layers_out) + input = [] + output = [out] + pos = random.randint(0, len(layers[out].input) - 1) + last_in = layers[out].input[pos] + layers[out].input[pos] = new_id + if type == 0: + layers[last_in].output.remove(out) + if type == 1: + layers[last_in].output.remove(out) + layers[last_in].output.append(new_id) + input = [last_in] + lay = Layer(type=layer_type, input=input, output=output) + while len(input) < lay.input_size: + layer1 = random.choice(layers_in) + input.append(layer1) + layers[layer1].output.append(new_id) + lay.input = input + layers.append(lay) + else: + layer1 = random.choice(layers_del) + for layer2 in layers[layer1].output: + layers[layer2].input.remove(layer1) + if type == 2: + v2 = random.choice(layers_in) + else: + v2 = random.choice(layers[layer1].input) + layers[layer2].input.append(v2) + layers[v2].output.append(layer2) + for layer2 in layers[layer1].input: + layers[layer2].output.remove(layer1) + layers[layer1].is_delete = True + + if self.is_legal(layers): + self.layers = layers + break + else: + layers = copy.deepcopy(self.layers) + cnt_try += 1 + + def __str__(self): + info = "" + for id, layer in enumerate(self.layers): + if layer.is_delete == False: + info += 'id:%d ' % id + str(layer) + '\n' + return info + +if __name__ == '__main__': + graph = Graph(10, + input=[Layer(LayerType.input.value, output=[4, 5], size='x'), Layer(LayerType.input.value, output=[4, 5], size='y')], + output=[Layer(LayerType.output.value, input=[4], size='x'), Layer(LayerType.output.value, input=[5], size='y')], + hide=[Layer(LayerType.attention.value, input=[0, 1], output=[2]), Layer(LayerType.attention.value, input=[1, 0], output=[3])]) + + s = graph_dumps(graph) + g = graph_loads(json.loads(s)) + print(g) + print(s) + + s = '''{"count":2,"array":[{"input":%s,"output":{"output":0.7}}]}'''%s + print(len(s)) + print(s) \ No newline at end of file diff --git a/examples/tuners/mnist_keras_customized_advisor/config.yml b/examples/tuners/mnist_keras_customized_advisor/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..0d8d987ac3e949fe8ce825e9e876c2364898b642 --- /dev/null +++ b/examples/tuners/mnist_keras_customized_advisor/config.yml @@ -0,0 +1,20 @@ +authorName: default +experimentName: example_customized_advisor +trialConcurrency: 4 +maxExecDuration: 1h +maxTrialNum: 200 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +advisor: + codeDir: . + classFileName: dummy_advisor.py + className: DummyAdvisor + classArgs: + k: 3 +trial: + command: python3 mnist_keras.py --epochs 100 --num_train 600 --num_test 100 + codeDir: . + gpuNum: 0 diff --git a/examples/tuners/mnist_keras_customized_advisor/dummy_advisor.py b/examples/tuners/mnist_keras_customized_advisor/dummy_advisor.py new file mode 100644 index 0000000000000000000000000000000000000000..5123b598fa09c5a63b66a2df6691c79be95cb92e --- /dev/null +++ b/examples/tuners/mnist_keras_customized_advisor/dummy_advisor.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import logging +from collections import defaultdict + +import json_tricks +import numpy as np +from nni import parameter_expressions as param +from nni.msg_dispatcher_base import MsgDispatcherBase +from nni.protocol import CommandType, send +from nni.utils import MetricType + +logger = logging.getLogger('customized_advisor') + + +class DummyAdvisor(MsgDispatcherBase): + """WARNING: Advisor API is subject to change in future releases. + + This advisor creates a new trial when validation accuracy of any one of the trials just dropped. + The trial is killed if the validation accuracy doesn't improve for at least k last-reported metrics. + To demonstrate the high flexibility of writing advisors, we don't use tuners or the standard definition of + search space. This is just a demo to customize an advisor. It's not intended to make any sense. + """ + def __init__(self, k=3): + super(DummyAdvisor, self).__init__() + self.k = k + self.random_state = np.random.RandomState() + + def handle_initialize(self, data): + logger.info("Advisor initialized: {}".format(data)) + self.handle_update_search_space(data) + self.parameters_count = 0 + self.parameter_best_metric = defaultdict(float) + self.parameter_cooldown = defaultdict(int) + send(CommandType.Initialized, '') + + def _send_new_trial(self): + self.parameters_count += 1 + new_trial = { + "parameter_id": self.parameters_count, + "parameters": { + "optimizer": param.choice(self.searchspace_json["optimizer"], self.random_state), + "learning_rate": param.loguniform(self.searchspace_json["learning_rate"][0], + self.searchspace_json["learning_rate"][1], + self.random_state) + }, + "parameter_source": "algorithm" + } + logger.info("New trial sent: {}".format(new_trial)) + send(CommandType.NewTrialJob, json_tricks.dumps(new_trial)) + + def handle_request_trial_jobs(self, data): + logger.info("Request trial jobs: {}".format(data)) + for _ in range(data): + self._send_new_trial() + + def handle_update_search_space(self, data): + logger.info("Search space update: {}".format(data)) + self.searchspace_json = data + + def handle_trial_end(self, data): + logger.info("Trial end: {}".format(data)) # do nothing + + def handle_report_metric_data(self, data): + logger.info("Metric reported: {}".format(data)) + if data['type'] == MetricType.REQUEST_PARAMETER: + raise ValueError("Request parameter not supported") + elif data["type"] == MetricType.PERIODICAL: + parameter_id = data["parameter_id"] + if data["value"] > self.parameter_best_metric[parameter_id]: + self.parameter_best_metric[parameter_id] = data["value"] + self.parameter_cooldown[parameter_id] = 0 + else: + self.parameter_cooldown[parameter_id] += 1 + logger.info("Accuracy dropped, cooldown {}, sending a new trial".format( + self.parameter_cooldown[parameter_id])) + self._send_new_trial() + if self.parameter_cooldown[parameter_id] >= self.k: + logger.info("Send kill signal to {}".format(data)) + send(CommandType.KillTrialJob, json_tricks.dumps(data["trial_job_id"])) diff --git a/examples/tuners/mnist_keras_customized_advisor/mnist_keras.py b/examples/tuners/mnist_keras_customized_advisor/mnist_keras.py new file mode 100644 index 0000000000000000000000000000000000000000..ee74a085ca0a7f379e7f9d55355c877c9468066b --- /dev/null +++ b/examples/tuners/mnist_keras_customized_advisor/mnist_keras.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging + +import os +import keras +import numpy as np +from keras import backend as K +from keras.callbacks import TensorBoard +from keras.datasets import mnist +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D +from keras.models import Sequential + +import nni + +LOG = logging.getLogger('mnist_keras') +K.set_image_data_format('channels_last') +TENSORBOARD_DIR = os.environ['NNI_OUTPUT_DIR'] + +H, W = 28, 28 +NUM_CLASSES = 10 + + +def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): + """ + Create simple convolutional model + """ + layers = [ + Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), + Conv2D(64, (3, 3), activation='relu'), + MaxPooling2D(pool_size=(2, 2)), + Flatten(), + Dense(100, activation='relu'), + Dense(num_classes, activation='softmax') + ] + + model = Sequential(layers) + + if hyper_params['optimizer'] == 'Adam': + optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) + else: + optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) + + return model + + +def load_mnist_data(args): + """ + Load MNIST dataset + """ + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] + x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] + y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] + y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] + + LOG.debug('x_train shape: %s', (x_train.shape,)) + LOG.debug('x_test shape: %s', (x_test.shape,)) + + return x_train, y_train, x_test, y_test + + +class SendMetrics(keras.callbacks.Callback): + """ + Keras callback to send metrics to NNI framework + """ + + def on_epoch_end(self, epoch, logs={}): + """ + Run on end of each epoch + """ + LOG.debug(logs) + # Should this be val_acc or val_accuracy? Seems inconsistent behavior of Keras? + nni.report_intermediate_result(logs["val_accuracy"]) + + +def train(args, params): + """ + Train model + """ + x_train, y_train, x_test, y_test = load_mnist_data(args) + model = create_mnist_model(params) + + model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, + validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)]) + + _, acc = model.evaluate(x_test, y_test, verbose=0) + LOG.debug('Final result is: %d', acc) + nni.report_final_result(acc) + + +def generate_default_params(): + """ + Generate default hyper parameters + """ + return { + 'optimizer': 'Adam', + 'learning_rate': 0.001 + } + + +if __name__ == '__main__': + PARSER = argparse.ArgumentParser() + PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False) + PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False) + PARSER.add_argument("--num_train", type=int, default=60000, + help="Number of train samples to be used, maximum 60000", required=False) + PARSER.add_argument("--num_test", type=int, default=10000, help="Number of test samples to be used, maximum 10000", + required=False) + + ARGS, UNKNOWN = PARSER.parse_known_args() + + # get parameters from tuner + RECEIVED_PARAMS = nni.get_next_parameter() + LOG.debug(RECEIVED_PARAMS) + PARAMS = generate_default_params() + PARAMS.update(RECEIVED_PARAMS) + # train + train(ARGS, PARAMS) diff --git a/examples/tuners/mnist_keras_customized_advisor/search_space.json b/examples/tuners/mnist_keras_customized_advisor/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..dadb04bc25114feb7369e9c800703ef679d276b6 --- /dev/null +++ b/examples/tuners/mnist_keras_customized_advisor/search_space.json @@ -0,0 +1,5 @@ +{ + "README": "To demonstrate the flexibility, this search space does not follow the standard definition.", + "optimizer": ["Adam", "SGD"], + "learning_rate": [0.001, 0.1] +} diff --git a/examples/tuners/random_nas_tuner/random_nas_tuner.py b/examples/tuners/random_nas_tuner/random_nas_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..4888176a0b0cbabd17449879e42ea420390e1084 --- /dev/null +++ b/examples/tuners/random_nas_tuner/random_nas_tuner.py @@ -0,0 +1,54 @@ +import numpy as np + +from nni.tuner import Tuner + + +def random_archi_generator(nas_ss, random_state): + '''random + ''' + chosen_arch = {} + for key, val in nas_ss.items(): + assert val['_type'] in ['layer_choice', 'input_choice'], \ + "Random NAS Tuner only receives NAS search space whose _type is 'layer_choice' or 'input_choice'" + if val['_type'] == 'layer_choice': + choices = val['_value'] + index = random_state.randint(len(choices)) + chosen_arch[key] = {'_value': choices[index], '_idx': index} + elif val['_type'] == 'input_choice': + choices = val['_value']['candidates'] + n_chosen = val['_value']['n_chosen'] + chosen = [] + idxs = [] + for _ in range(n_chosen): + index = random_state.randint(len(choices)) + chosen.append(choices[index]) + idxs.append(index) + chosen_arch[key] = {'_value': chosen, '_idx': idxs} + else: + raise ValueError('Unknown key %s and value %s' % (key, val)) + return chosen_arch + + +class RandomNASTuner(Tuner): + '''RandomNASTuner + ''' + + def __init__(self): + self.searchspace_json = None + self.random_state = None + + def update_search_space(self, search_space): + '''update + ''' + self.searchspace_json = search_space + self.random_state = np.random.RandomState() + + def generate_parameters(self, parameter_id, **kwargs): + '''generate + ''' + return random_archi_generator(self.searchspace_json, self.random_state) + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + '''receive + ''' + pass diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/README.md b/examples/tuners/weight_sharing/ga_customer_tuner/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0f80a53918288b54a9f26ebed634e8ea2bfb622b --- /dev/null +++ b/examples/tuners/weight_sharing/ga_customer_tuner/README.md @@ -0,0 +1,15 @@ +# How to use ga_customer_tuner? +This tuner is a customized tuner which only suitable for trial whose code path is "~/nni/examples/trials/ga_squad", +type `cd ~/nni/examples/trials/ga_squad` and check readme.md to get more information for ga_squad trial. + +# config +If you want to use ga_customer_tuner in your experiment, you could set config file as following format: + +``` +tuner: + codeDir: ~/nni/examples/tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize +``` diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/README_zh_CN.md b/examples/tuners/weight_sharing/ga_customer_tuner/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..26b73955994a489d99e355f9c95cbe3cf37b01b7 --- /dev/null +++ b/examples/tuners/weight_sharing/ga_customer_tuner/README_zh_CN.md @@ -0,0 +1,14 @@ +# 如何使用 ga_customer_tuner? + +此定制的 Tuner 仅适用于代码 "~/nni/examples/trials/ga_squad",输入 `cd ~/nni/examples/trials/ga_squad` 查看 readme.md 来了解 ga_squad 的更多信息。 + +# 配置 + +如果要在 Experiment 中使用 ga_customer_tuner 可按照下列格式来配置: + + tuner: + codeDir: ~/nni/examples/tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize \ No newline at end of file diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/__init__.py b/examples/tuners/weight_sharing/ga_customer_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py b/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..68b110d051fe88b3d26bc81a29271b826aeb3934 --- /dev/null +++ b/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py @@ -0,0 +1,225 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +import copy +import json +import logging +import random +import os + +from threading import Event, Lock, current_thread + +from nni.tuner import Tuner +from nni.utils import extract_scalar_reward + +from graph import Graph, Layer, LayerType, Enum, graph_dumps, graph_loads, unique + +logger = logging.getLogger('ga_customer_tuner') + + +@unique +class OptimizeMode(Enum): + Minimize = 'minimize' + Maximize = 'maximize' + + + + +class Individual(object): + """ + Basic Unit for evolution algorithm + """ + def __init__(self, graph_cfg: Graph = None, info=None, result=None, indiv_id=None): + self.config = graph_cfg + self.result = result + self.info = info + self.indiv_id = indiv_id + self.parent_id = None + self.shared_ids = {layer.hash_id for layer in self.config.layers if layer.is_delete is False} + + def __str__(self): + return "info: " + str(self.info) + ", config :" + str(self.config) + ", result: " + str(self.result) + + def mutation(self, indiv_id: int, graph_cfg: Graph = None, info=None): + self.result = None + if graph_cfg is not None: + self.config = graph_cfg + self.config.mutation() + self.info = info + self.parent_id = self.indiv_id + self.indiv_id = indiv_id + self.shared_ids.intersection_update({layer.hash_id for layer in self.config.layers if layer.is_delete is False}) + + +class CustomerTuner(Tuner): + """ + NAS Tuner using Evolution Algorithm, with weight sharing enabled + """ + def __init__(self, optimize_mode, save_dir_root, population_size=32, graph_max_layer=6, graph_min_layer=3): + self.optimize_mode = OptimizeMode(optimize_mode) + self.indiv_counter = 0 + self.events = [] + self.thread_lock = Lock() + self.save_dir_root = save_dir_root + self.population = self.init_population(population_size, graph_max_layer, graph_min_layer) + assert len(self.population) == population_size + logger.debug('init population done.') + return + + def generate_new_id(self): + """ + generate new id and event hook for new Individual + """ + self.events.append(Event()) + indiv_id = self.indiv_counter + self.indiv_counter += 1 + return indiv_id + + def save_dir(self, indiv_id): + if indiv_id is None: + return None + else: + return os.path.join(self.save_dir_root, str(indiv_id)) + + def init_population(self, population_size, graph_max_layer, graph_min_layer): + """ + initialize populations for evolution tuner + """ + population = [] + graph = Graph(max_layer_num=graph_max_layer, min_layer_num=graph_min_layer, + inputs=[Layer(LayerType.input.value, output=[4, 5], size='x'), Layer(LayerType.input.value, output=[4, 5], size='y')], + output=[Layer(LayerType.output.value, inputs=[4], size='x'), Layer(LayerType.output.value, inputs=[5], size='y')], + hide=[Layer(LayerType.attention.value, inputs=[0, 1], output=[2]), + Layer(LayerType.attention.value, inputs=[1, 0], output=[3])]) + for _ in range(population_size): + graph_tmp = copy.deepcopy(graph) + graph_tmp.mutation() + population.append(Individual(indiv_id=self.generate_new_id(), graph_cfg=graph_tmp, result=None)) + return population + + def generate_parameters(self, parameter_id, **kwargs): + """Returns a set of trial graph config, as a serializable object. + An example configuration: + ```json + { + "shared_id": [ + "4a11b2ef9cb7211590dfe81039b27670", + "370af04de24985e5ea5b3d72b12644c9", + "11f646e9f650f5f3fedc12b6349ec60f", + "0604e5350b9c734dd2d770ee877cfb26", + "6dbeb8b022083396acb721267335f228", + "ba55380d6c84f5caeb87155d1c5fa654" + ], + "graph": { + "layers": [ + ... + { + "hash_id": "ba55380d6c84f5caeb87155d1c5fa654", + "is_delete": false, + "size": "x", + "graph_type": 0, + "output": [ + 6 + ], + "output_size": 1, + "input": [ + 7, + 1 + ], + "input_size": 2 + }, + ... + ] + }, + "restore_dir": "/mnt/nfs/nni/ga_squad/87", + "save_dir": "/mnt/nfs/nni/ga_squad/95" + } + ``` + `restore_dir` means the path in which to load the previous trained model weights. if null, init from stratch. + `save_dir` means the path to save trained model for current trial. + `graph` is the configuration of model network. + Note: each configuration of layers has a `hash_id` property, + which tells tuner & trial code whether to share trained weights or not. + `shared_id` is the hash_id of layers that should be shared with previously trained model. + """ + logger.debug('acquiring lock for param {}'.format(parameter_id)) + self.thread_lock.acquire() + logger.debug('lock for current thread acquired') + if not self.population: + logger.debug("the len of poplution lower than zero.") + raise Exception('The population is empty') + pos = -1 + for i in range(len(self.population)): + if self.population[i].result is None: + pos = i + break + if pos != -1: + indiv = copy.deepcopy(self.population[pos]) + self.population.pop(pos) + graph_param = json.loads(graph_dumps(indiv.config)) + else: + random.shuffle(self.population) + if self.population[0].result < self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + self.population.pop(1) + indiv.mutation(indiv_id = self.generate_new_id()) + graph_param = json.loads(graph_dumps(indiv.config)) + param_json = { + 'graph': graph_param, + 'restore_dir': self.save_dir(indiv.parent_id), + 'save_dir': self.save_dir(indiv.indiv_id), + 'shared_id': list(indiv.shared_ids) if indiv.parent_id is not None else None, + } + logger.debug('generate_parameter return value is:') + logger.debug(param_json) + logger.debug('releasing lock') + self.thread_lock.release() + if indiv.parent_id is not None: + logger.debug("new trial {} pending on parent experiment {}".format(indiv.indiv_id, indiv.parent_id)) + self.events[indiv.parent_id].wait() + logger.debug("trial {} ready".format(indiv.indiv_id)) + return param_json + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + ''' + Record an observation of the objective function + parameter_id : int + parameters : dict of parameters + value: final metrics of the trial, including reward + ''' + logger.debug('acquiring lock for param {}'.format(parameter_id)) + self.thread_lock.acquire() + logger.debug('lock for current acquired') + reward = extract_scalar_reward(value) + if self.optimize_mode is OptimizeMode.Minimize: + reward = -reward + + logger.debug('receive trial result is:\n') + logger.debug(str(parameters)) + logger.debug(str(reward)) + + indiv = Individual(indiv_id=int(os.path.split(parameters['save_dir'])[1]), + graph_cfg=graph_loads(parameters['graph']), result=reward) + self.population.append(indiv) + logger.debug('releasing lock') + self.thread_lock.release() + self.events[indiv.indiv_id].set() + + def update_search_space(self, data): + pass diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/graph.py b/examples/tuners/weight_sharing/ga_customer_tuner/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..8e675a06ffe9ad1ea6bb72bbd6b77b582fbddae7 --- /dev/null +++ b/examples/tuners/weight_sharing/ga_customer_tuner/graph.py @@ -0,0 +1,336 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +''' +Graph is customed-define class, this module contains related class and function about graph. +''' + + +import copy +import hashlib +import logging +import json +import random +from collections import deque +from enum import Enum, unique +from typing import Iterable + +import numpy as np + +_logger = logging.getLogger('ga_squad_graph') + +@unique +class LayerType(Enum): + ''' + Layer type + ''' + attention = 0 + self_attention = 1 + rnn = 2 + input = 3 + output = 4 + +class Layer(object): + ''' + Layer class, which contains the information of graph. + ''' + def __init__(self, graph_type, inputs=None, output=None, size=None, hash_id=None): + self.input = inputs if inputs is not None else [] + self.output = output if output is not None else [] + self.graph_type = graph_type + self.is_delete = False + self.size = size + self.hash_id = hash_id + if graph_type == LayerType.attention.value: + self.input_size = 2 + self.output_size = 1 + elif graph_type == LayerType.rnn.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.self_attention.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.input.value: + self.input_size = 0 + self.output_size = 1 + if self.hash_id is None: + hasher = hashlib.md5() + hasher.update(np.random.bytes(100)) + self.hash_id = hasher.hexdigest() + elif graph_type == LayerType.output.value: + self.input_size = 1 + self.output_size = 0 + else: + raise ValueError('Unsupported LayerType: {}'.format(graph_type)) + + def update_hash(self, layers: Iterable): + """ + Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers + """ + if self.graph_type == LayerType.input.value: + return + hasher = hashlib.md5() + hasher.update(LayerType(self.graph_type).name.encode('ascii')) + hasher.update(str(self.size).encode('ascii')) + for i in self.input: + if layers[i].hash_id is None: + raise ValueError('Hash id of layer {}: {} not generated!'.format(i, layers[i])) + hasher.update(layers[i].hash_id.encode('ascii')) + self.hash_id = hasher.hexdigest() + + def set_size(self, graph_id, size): + ''' + Set size. + ''' + if self.graph_type == LayerType.attention.value: + if self.input[0] == graph_id: + self.size = size + if self.graph_type == LayerType.rnn.value: + self.size = size + if self.graph_type == LayerType.self_attention.value: + self.size = size + if self.graph_type == LayerType.output.value: + if self.size != size: + return False + return True + + def clear_size(self): + ''' + Clear size + ''' + if self.graph_type == LayerType.attention.value or \ + LayerType.rnn.value or LayerType.self_attention.value: + self.size = None + + def __str__(self): + return 'input:' + str(self.input) + ' output:' + str(self.output) + ' type:' + str(self.graph_type) + ' is_delete:' + str(self.is_delete) + ' size:' + str(self.size) + +def graph_dumps(graph): + ''' + Dump the graph. + ''' + return json.dumps(graph, default=lambda obj: obj.__dict__) + +def graph_loads(graph_json): + ''' + Load graph + ''' + layers = [] + for layer in graph_json['layers']: + layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id']) + layer_info.is_delete = layer['is_delete'] + _logger.debug('append layer {}'.format(layer_info)) + layers.append(layer_info) + graph = Graph(graph_json['max_layer_num'], graph_json['min_layer_num'], [], [], []) + graph.layers = layers + _logger.debug('graph {} loaded'.format(graph)) + return graph + +class Graph(object): + ''' + Customed Graph class. + ''' + def __init__(self, max_layer_num, min_layer_num, inputs, output, hide): + self.layers = [] + self.max_layer_num = max_layer_num + self.min_layer_num = min_layer_num + assert min_layer_num < max_layer_num + + for layer in inputs: + self.layers.append(layer) + for layer in output: + self.layers.append(layer) + if hide is not None: + for layer in hide: + self.layers.append(layer) + assert self.is_legal() + + def is_topology(self, layers=None): + ''' + valid the topology + ''' + if layers is None: + layers = self.layers + layers_nodle = [] + result = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + layers_nodle.append(i) + while True: + flag_break = True + layers_toremove = [] + for layer1 in layers_nodle: + flag_arrive = True + for layer2 in layers[layer1].input: + if layer2 in layers_nodle: + flag_arrive = False + if flag_arrive is True: + for layer2 in layers[layer1].output: + # Size is error + if layers[layer2].set_size(layer1, layers[layer1].size) is False: + return False + layers_toremove.append(layer1) + result.append(layer1) + flag_break = False + for layer in layers_toremove: + layers_nodle.remove(layer) + result.append('|') + if flag_break: + break + # There is loop in graph || some layers can't to arrive + if layers_nodle: + return False + return result + + def layer_num(self, layers=None): + ''' + Reutn number of layer. + ''' + if layers is None: + layers = self.layers + layer_num = 0 + for layer in layers: + if layer.is_delete is False and layer.graph_type != LayerType.input.value\ + and layer.graph_type != LayerType.output.value: + layer_num += 1 + return layer_num + + def is_legal(self, layers=None): + ''' + Judge whether is legal for layers + ''' + if layers is None: + layers = self.layers + + for layer in layers: + if layer.is_delete is False: + if len(layer.input) != layer.input_size: + return False + if len(layer.output) < layer.output_size: + return False + + # layer_num <= max_layer_num + if self.layer_num(layers) > self.max_layer_num: + return False + + # There is loop in graph || some layers can't to arrive + if self.is_topology(layers) is False: + return False + + return True + + def update_hash(self): + """ + update hash id of each layer, in topological order/recursively + hash id will be used in weight sharing + """ + _logger.debug('update hash') + layer_in_cnt = [len(layer.input) for layer in self.layers] + topo_queue = deque([i for i, layer in enumerate(self.layers) if not layer.is_delete and layer.graph_type == LayerType.input.value]) + while topo_queue: + layer_i = topo_queue.pop() + self.layers[layer_i].update_hash(self.layers) + for layer_j in self.layers[layer_i].output: + layer_in_cnt[layer_j] -= 1 + if layer_in_cnt[layer_j] == 0: + topo_queue.appendleft(layer_j) + + def mutation(self, only_add=False): + ''' + Mutation for a graph + ''' + types = [] + if self.layer_num() < self.max_layer_num: + types.append(0) + types.append(1) + if self.layer_num() > self.min_layer_num and only_add is False: + types.append(2) + types.append(3) + # 0 : add a layer , delete a edge + # 1 : add a layer , change a edge + # 2 : delete a layer, delete a edge + # 3 : delete a layer, change a edge + graph_type = random.choice(types) + layer_type = random.choice([LayerType.attention.value,\ + LayerType.self_attention.value, LayerType.rnn.value]) + layers = copy.deepcopy(self.layers) + cnt_try = 0 + while True: + layers_in = [] + layers_out = [] + layers_del = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + if layer.graph_type != LayerType.output.value: + layers_in.append(i) + if layer.graph_type != LayerType.input.value: + layers_out.append(i) + if layer.graph_type != LayerType.output.value\ + and layer.graph_type != LayerType.input.value: + layers_del.append(i) + if graph_type <= 1: + new_id = len(layers) + out = random.choice(layers_out) + inputs = [] + output = [out] + pos = random.randint(0, len(layers[out].input) - 1) + last_in = layers[out].input[pos] + layers[out].input[pos] = new_id + if graph_type == 0: + layers[last_in].output.remove(out) + if graph_type == 1: + layers[last_in].output.remove(out) + layers[last_in].output.append(new_id) + inputs = [last_in] + lay = Layer(graph_type=layer_type, inputs=inputs, output=output) + while len(inputs) < lay.input_size: + layer1 = random.choice(layers_in) + inputs.append(layer1) + layers[layer1].output.append(new_id) + lay.input = inputs + layers.append(lay) + else: + layer1 = random.choice(layers_del) + for layer2 in layers[layer1].output: + layers[layer2].input.remove(layer1) + if graph_type == 2: + random_in = random.choice(layers_in) + else: + random_in = random.choice(layers[layer1].input) + layers[layer2].input.append(random_in) + layers[random_in].output.append(layer2) + for layer2 in layers[layer1].input: + layers[layer2].output.remove(layer1) + layers[layer1].is_delete = True + + if self.is_legal(layers): + self.layers = layers + break + else: + layers = copy.deepcopy(self.layers) + cnt_try += 1 + self.update_hash() + + def __str__(self): + info = "" + for l_id, layer in enumerate(self.layers): + if layer.is_delete is False: + info += 'id:%d ' % l_id + str(layer) + '\n' + return info diff --git a/nni/__init__.py b/nni/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..26a19c7d057d606900080b748215ad7de87084fd --- /dev/null +++ b/nni/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +try: + from .version import __version__ # pylint: disable=import-error +except ModuleNotFoundError: + __version__ = '999.dev0' + +from .runtime.log import init_logger +init_logger() + +from .common.serializer import trace, dump, load +from .runtime.env_vars import dispatcher_env_vars +from .utils import ClassArgsValidator + +if dispatcher_env_vars.SDK_PROCESS != 'dispatcher': + from .trial import * + from .smartparam import * + from .common.nas_utils import training_update + +class NoMoreTrialError(Exception): + def __init__(self, ErrorInfo): + super().__init__(self) + self.errorinfo = ErrorInfo + + def __str__(self): + return self.errorinfo diff --git a/nni/__main__.py b/nni/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..227a21d780e26edaeb51b1e02e984c566c892df4 --- /dev/null +++ b/nni/__main__.py @@ -0,0 +1,115 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import argparse +import logging +import json +import base64 + +from .runtime.common import enable_multi_thread +from .runtime.msg_dispatcher import MsgDispatcher +from .tools.package_utils import create_builtin_class_instance, create_customized_class_instance + +logger = logging.getLogger('nni.main') +logger.debug('START') + +if os.environ.get('COVERAGE_PROCESS_START'): + import coverage + coverage.process_startup() + + +def main(): + parser = argparse.ArgumentParser(description='Dispatcher command line parser') + parser.add_argument('--exp_params', type=str, required=True) + args, _ = parser.parse_known_args() + + exp_params_decode = base64.b64decode(args.exp_params).decode('utf-8') + logger.debug('decoded exp_params: [%s]', exp_params_decode) + exp_params = json.loads(exp_params_decode) + logger.debug('exp_params json obj: [%s]', json.dumps(exp_params, indent=4)) + + if exp_params.get('deprecated', {}).get('multiThread'): + enable_multi_thread() + + if 'trainingServicePlatform' in exp_params: # config schema is v1 + from .experiment.config.convert import convert_algo + for algo_type in ['tuner', 'assessor', 'advisor']: + if algo_type in exp_params: + exp_params[algo_type] = convert_algo(algo_type, exp_params[algo_type]) + + if exp_params.get('advisor') is not None: + # advisor is enabled and starts to run + _run_advisor(exp_params) + else: + # tuner (and assessor) is enabled and starts to run + assert exp_params.get('tuner') is not None + tuner = _create_tuner(exp_params) + if exp_params.get('assessor') is not None: + assessor = _create_assessor(exp_params) + else: + assessor = None + dispatcher = MsgDispatcher(tuner, assessor) + + try: + dispatcher.run() + tuner._on_exit() + if assessor is not None: + assessor._on_exit() + except Exception as exception: + logger.exception(exception) + tuner._on_error() + if assessor is not None: + assessor._on_error() + raise + + +def _run_advisor(exp_params): + if exp_params.get('advisor').get('name'): + dispatcher = create_builtin_class_instance( + exp_params['advisor']['name'], + exp_params['advisor'].get('classArgs'), + 'advisors') + else: + dispatcher = create_customized_class_instance(exp_params.get('advisor')) + if dispatcher is None: + raise AssertionError('Failed to create Advisor instance') + try: + dispatcher.run() + except Exception as exception: + logger.exception(exception) + raise + + +def _create_tuner(exp_params): + if exp_params['tuner'].get('name'): + tuner = create_builtin_class_instance( + exp_params['tuner']['name'], + exp_params['tuner'].get('classArgs'), + 'tuners') + else: + tuner = create_customized_class_instance(exp_params['tuner']) + if tuner is None: + raise AssertionError('Failed to create Tuner instance') + return tuner + + +def _create_assessor(exp_params): + if exp_params['assessor'].get('name'): + assessor = create_builtin_class_instance( + exp_params['assessor']['name'], + exp_params['assessor'].get('classArgs'), + 'assessors') + else: + assessor = create_customized_class_instance(exp_params['assessor']) + if assessor is None: + raise AssertionError('Failed to create Assessor instance') + return assessor + + +if __name__ == '__main__': + try: + main() + except Exception as exception: + logger.exception(exception) + raise diff --git a/nni/algorithms/__init__.py b/nni/algorithms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/compression/__init__.py b/nni/algorithms/compression/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/compression/pytorch/__init__.py b/nni/algorithms/compression/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/compression/pytorch/auto_compress/__init__.py b/nni/algorithms/compression/pytorch/auto_compress/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8026bdd94c3b100fb9ee9d568b095ce4a702d4a9 --- /dev/null +++ b/nni/algorithms/compression/pytorch/auto_compress/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .experiment import AutoCompressionExperiment +from .interface import AbstractAutoCompressionModule +from .utils import AutoCompressionSearchSpaceGenerator diff --git a/nni/algorithms/compression/pytorch/auto_compress/auto_compress_engine.py b/nni/algorithms/compression/pytorch/auto_compress/auto_compress_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..b1af71ec7fbf31ee3e3a28976aa6eb0f2fabfbfa --- /dev/null +++ b/nni/algorithms/compression/pytorch/auto_compress/auto_compress_engine.py @@ -0,0 +1,164 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from typing import Optional, Callable + +import json_tricks +from torch.nn import Module +from torch.optim import Optimizer + +import nni +from .constants import PRUNER_DICT, QUANTIZER_DICT +from .interface import BaseAutoCompressionEngine, AbstractAutoCompressionModule +from .utils import import_ + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + +class AutoCompressionEngine(BaseAutoCompressionEngine): + @classmethod + def __convert_compact_pruner_params_to_config_list(cls, compact_config: dict) -> list: + config_dict = {} + for key, value in compact_config.items(): + _, op_types, op_names, var_name = key.split('::') + config_dict.setdefault((op_types, op_names), {}) + config_dict[(op_types, op_names)][var_name] = value + + config_list = [] + for key, config in config_dict.items(): + op_types, op_names = key + op_types = op_types.split(':') if op_types else [] + op_names = op_names.split(':') if op_names else [] + if op_types: + config['op_types'] = op_types + if op_names: + config['op_names'] = op_names + if 'op_types' in config or 'op_names' in config: + config_list.append(config) + + return config_list + + @classmethod + def __convert_compact_quantizer_params_to_config_list(cls, compact_config: dict) -> list: + config_dict = {} + for key, value in compact_config.items(): + _, quant_types, op_types, op_names, var_name = key.split('::') + config_dict.setdefault((quant_types, op_types, op_names), {}) + config_dict[(quant_types, op_types, op_names)][var_name] = value + + config_list = [] + for key, config in config_dict.items(): + quant_types, op_types, op_names = key + quant_types = quant_types.split(':') + op_types = op_types.split(':') + op_names = op_names.split(':') + if quant_types: + config['quant_types'] = quant_types + else: + continue + if op_types: + config['op_types'] = op_types + if op_names: + config['op_names'] = op_names + if 'op_types' in config or 'op_names' in config: + config_list.append(config) + + return config_list + + @classmethod + def _convert_compact_params_to_config_list(cls, compressor_type: str, compact_config: dict) -> list: + func_dict = { + 'pruner': cls.__convert_compact_pruner_params_to_config_list, + 'quantizer': cls.__convert_compact_quantizer_params_to_config_list + } + return func_dict[compressor_type](compact_config) + + @classmethod + def __compress_pruning(cls, algorithm_name: str, + model: Module, + config_list: list, + optimizer_factory: Optional[Callable], + criterion: Optional[Callable], + sparsifying_trainer: Optional[Callable[[Module, Optimizer, Callable, int], None]], + finetuning_trainer: Optional[Callable[[Module, Optimizer, Callable, int], None]], + finetuning_epochs: int, + **compressor_parameter_dict) -> Module: + if algorithm_name in ['level', 'l1', 'l2', 'fpgm']: + pruner = PRUNER_DICT[algorithm_name](model, config_list, **compressor_parameter_dict) + elif algorithm_name in ['slim', 'taylorfo', 'apoz', 'mean_activation']: + optimizer = None if optimizer_factory is None else optimizer_factory(model.parameters()) + pruner = PRUNER_DICT[algorithm_name](model, config_list, optimizer, sparsifying_trainer, criterion, **compressor_parameter_dict) + else: + raise ValueError('Unsupported compression algorithm: {}.'.format(algorithm_name)) + compressed_model = pruner.compress() + if finetuning_trainer is not None: + # note that in pruning process, finetuning will use an un-patched optimizer + optimizer = optimizer_factory(compressed_model.parameters()) + for i in range(finetuning_epochs): + finetuning_trainer(compressed_model, optimizer, criterion, i) + pruner.get_pruned_weights() + return compressed_model + + @classmethod + def __compress_quantization(cls, algorithm_name: str, + model: Module, + config_list: list, + optimizer_factory: Optional[Callable], + criterion: Optional[Callable], + sparsifying_trainer: Optional[Callable[[Module, Optimizer, Callable, int], None]], + finetuning_trainer: Optional[Callable[[Module, Optimizer, Callable, int], None]], + finetuning_epochs: int, + **compressor_parameter_dict) -> Module: + optimizer = None if optimizer_factory is None else optimizer_factory(model.parameters()) + quantizer = QUANTIZER_DICT[algorithm_name](model, config_list, optimizer, **compressor_parameter_dict) + compressed_model = quantizer.compress() + if finetuning_trainer is not None: + # note that in quantization process, finetuning will use a patched optimizer + for i in range(finetuning_epochs): + finetuning_trainer(compressed_model, optimizer, criterion, i) + return compressed_model + + @classmethod + def _compress(cls, compressor_type: str, + algorithm_name: str, + model: Module, + config_list: list, + optimizer_factory: Optional[Callable], + criterion: Optional[Callable], + sparsifying_trainer: Optional[Callable[[Module, Optimizer, Callable, int], None]], + finetuning_trainer: Optional[Callable[[Module, Optimizer, Callable, int], None]], + finetuning_epochs: int, + **compressor_parameter_dict) -> Module: + func_dict = { + 'pruner': cls.__compress_pruning, + 'quantizer': cls.__compress_quantization + } + _logger.info('%s compressor config_list:\n%s', algorithm_name, json_tricks.dumps(config_list, indent=4)) + compressed_model = func_dict[compressor_type](algorithm_name, model, config_list, optimizer_factory, criterion, sparsifying_trainer, + finetuning_trainer, finetuning_epochs, **compressor_parameter_dict) + return compressed_model + + @classmethod + def trial_execute_compress(cls, module_name): + auto_compress_module: AbstractAutoCompressionModule = import_(module_name) + + algorithm_config = nni.get_next_parameter()['algorithm_name'] + algorithm_name = algorithm_config['_name'] + compact_config = {k: v for k, v in algorithm_config.items() if k.startswith('config_list::')} + parameter_dict = {k.split('parameter::')[1]: v for k, v in algorithm_config.items() if k.startswith('parameter::')} + + compressor_type = 'quantizer' if algorithm_name in QUANTIZER_DICT else 'pruner' + + config_list = cls._convert_compact_params_to_config_list(compressor_type, compact_config) + + model, evaluator = auto_compress_module.model(), auto_compress_module.evaluator() + optimizer_factory, criterion = auto_compress_module.optimizer_factory(), auto_compress_module.criterion() + sparsifying_trainer = auto_compress_module.sparsifying_trainer(algorithm_name) + finetuning_trainer = auto_compress_module.post_compress_finetuning_trainer(algorithm_name) + finetuning_epochs = auto_compress_module.post_compress_finetuning_epochs(algorithm_name) + + compressed_model = cls._compress(compressor_type, algorithm_name, model, config_list, optimizer_factory, + criterion, sparsifying_trainer, finetuning_trainer, finetuning_epochs, **parameter_dict) + + nni.report_final_result(evaluator(compressed_model)) diff --git a/nni/algorithms/compression/pytorch/auto_compress/constants.py b/nni/algorithms/compression/pytorch/auto_compress/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..008da9e9f9285bc6ef737bb650529cc652435a9f --- /dev/null +++ b/nni/algorithms/compression/pytorch/auto_compress/constants.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from ..pruning import LevelPruner, SlimPruner, L1FilterPruner, L2FilterPruner, FPGMPruner, TaylorFOWeightFilterPruner, \ + ActivationAPoZRankFilterPruner, ActivationMeanRankFilterPruner +from ..quantization import NaiveQuantizer, QAT_Quantizer, DoReFaQuantizer, BNNQuantizer + + +PRUNER_DICT = { + 'level': LevelPruner, + 'slim': SlimPruner, + 'l1': L1FilterPruner, + 'l2': L2FilterPruner, + 'fpgm': FPGMPruner, + 'taylorfo': TaylorFOWeightFilterPruner, + 'apoz': ActivationAPoZRankFilterPruner, + 'mean_activation': ActivationMeanRankFilterPruner +} + +QUANTIZER_DICT = { + 'naive': NaiveQuantizer, + 'qat': QAT_Quantizer, + 'dorefa': DoReFaQuantizer, + 'bnn': BNNQuantizer +} diff --git a/nni/algorithms/compression/pytorch/auto_compress/experiment.py b/nni/algorithms/compression/pytorch/auto_compress/experiment.py new file mode 100644 index 0000000000000000000000000000000000000000..07c5c4b3c857aca1348bc557e5e9714914e84d48 --- /dev/null +++ b/nni/algorithms/compression/pytorch/auto_compress/experiment.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import inspect +from pathlib import Path, PurePath +from typing import overload, Union, List + +from nni.experiment import Experiment, ExperimentConfig +from nni.algorithms.compression.pytorch.auto_compress.interface import AbstractAutoCompressionModule + + +class AutoCompressionExperiment(Experiment): + + @overload + def __init__(self, auto_compress_module: AbstractAutoCompressionModule, config: ExperimentConfig) -> None: + """ + Prepare an experiment. + + Use `Experiment.run()` to launch it. + + Parameters + ---------- + auto_compress_module + The module provided by the user implements the `AbstractAutoCompressionModule` interfaces. + Remember put the module file under `trial_code_directory`. + config + Experiment configuration. + """ + ... + + @overload + def __init__(self, auto_compress_module: AbstractAutoCompressionModule, training_service: Union[str, List[str]]) -> None: + """ + Prepare an experiment, leaving configuration fields to be set later. + + Example usage:: + + experiment = Experiment(auto_compress_module, 'remote') + experiment.config.trial_command = 'python3 trial.py' + experiment.config.machines.append(RemoteMachineConfig(ip=..., user_name=...)) + ... + experiment.run(8080) + + Parameters + ---------- + auto_compress_module + The module provided by the user implements the `AbstractAutoCompressionModule` interfaces. + Remember put the module file under `trial_code_directory`. + training_service + Name of training service. + Supported value: "local", "remote", "openpai", "aml", "kubeflow", "frameworkcontroller", "adl" and hybrid training service. + """ + ... + + def __init__(self, auto_compress_module: AbstractAutoCompressionModule, config=None, training_service=None): + super().__init__(config, training_service) + + self.module_file_path = str(PurePath(inspect.getfile(auto_compress_module))) + self.module_name = auto_compress_module.__name__ + + def start(self, port: int, debug: bool) -> None: + trial_code_directory = str(PurePath(Path(self.config.trial_code_directory).absolute())) + '/' + assert self.module_file_path.startswith(trial_code_directory), \ + 'The file path of the user-provided module should under trial_code_directory.' + relative_module_path = self.module_file_path.split(trial_code_directory)[1] + # only support linux, need refactor? + command = 'python3 -m nni.algorithms.compression.pytorch.auto_compress.trial_entry --module_file_name {} --module_class_name {}' + self.config.trial_command = command.format(relative_module_path, self.module_name) + super().start(port=port, debug=debug) diff --git a/nni/algorithms/compression/pytorch/auto_compress/interface.py b/nni/algorithms/compression/pytorch/auto_compress/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..208d0ef6a15369b32c97c0df609f973c650697e4 --- /dev/null +++ b/nni/algorithms/compression/pytorch/auto_compress/interface.py @@ -0,0 +1,122 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from abc import ABC, abstractmethod +from typing import Optional, Callable, Iterable + +from torch.nn import Module +from torch.optim import Optimizer + + +class BaseAutoCompressionEngine(ABC): + @classmethod + @abstractmethod + def trial_execute_compress(cls): + """ + Execute the compressing trial. + """ + pass + + +class AbstractAutoCompressionModule(ABC): + """ + The abstract container that user need to implement. + """ + @classmethod + @abstractmethod + def model(cls) -> Module: + """ + Returns + ------- + torch.nn.Module + Model to be compress. + """ + pass + + @classmethod + @abstractmethod + def evaluator(cls) -> Callable[[Module], float]: + """ + Returns + ------- + function + The function used to evaluate the compressed model, return a scalar. + """ + pass + + @classmethod + @abstractmethod + def optimizer_factory(cls) -> Optional[Callable[[Iterable], Optimizer]]: + """ + Returns + ------- + Optional[Callable[[Iterable], Optimizer]] + Optimizer factory function. Input is a iterable value, i.e. `model.parameters()`. + Output is the `torch.optim.Optimizer` instance. + """ + pass + + @classmethod + @abstractmethod + def criterion(cls) -> Optional[Callable]: + """ + Returns + ------- + Optional[Callable] + The criterion function used to train the model. + """ + pass + + @classmethod + @abstractmethod + def sparsifying_trainer(cls, compress_algorithm_name: str) -> Optional[Callable[[Module, Optimizer, Callable, int], None]]: + """ + The trainer is used in sparsifying process. + + Parameters + ---------- + compress_algorithm_name: str + The name of pruner and quantizer, i.e. 'level', 'l1', 'qat'. + + Returns + ------- + Optional[Callable[[Module, Optimizer, Callable, int], None]] + Used to train model in compress stage, include `model, optimizer, criterion, current_epoch` as function arguments. + """ + pass + + @classmethod + @abstractmethod + def post_compress_finetuning_trainer(cls, compress_algorithm_name: str) -> Optional[Callable[[Module, Optimizer, Callable, int], None]]: + """ + The trainer is used in post-compress finetuning process. + + Parameters + ---------- + compress_algorithm_name: str + The name of pruner and quantizer, i.e. 'level', 'l1', 'qat'. + + Returns + ------- + Optional[Callable[[Module, Optimizer, Callable, int], None]] + Used to train model in finetune stage, include `model, optimizer, criterion, current_epoch` as function arguments. + """ + pass + + @classmethod + @abstractmethod + def post_compress_finetuning_epochs(cls, compress_algorithm_name: str) -> int: + """ + The epochs in post-compress finetuning process. + + Parameters + ---------- + compress_algorithm_name: str + The name of pruner and quantizer, i.e. 'level', 'l1', 'qat'. + + Returns + ------- + int + The finetuning epoch number. + """ + pass diff --git a/nni/algorithms/compression/pytorch/auto_compress/trial_entry.py b/nni/algorithms/compression/pytorch/auto_compress/trial_entry.py new file mode 100644 index 0000000000000000000000000000000000000000..c502f4a1854df5388c2f451fec42b6a7b05af903 --- /dev/null +++ b/nni/algorithms/compression/pytorch/auto_compress/trial_entry.py @@ -0,0 +1,21 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Entrypoint for trials. +""" +import argparse +from pathlib import Path +import re + +from .auto_compress_engine import AutoCompressionEngine + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='trial entry for auto compression.') + parser.add_argument('--module_file_name', required=True, dest='module_file_name', help='the path of auto compression module file') + parser.add_argument('--module_class_name', required=True, dest='module_class_name', help='the name of auto compression module') + args = parser.parse_args() + + module_name = Path(args.module_file_name).as_posix() + module_name = re.sub(re.escape('.py') + '$', '', module_name).replace('/', '.') + '.' + args.module_class_name + AutoCompressionEngine.trial_execute_compress(module_name) diff --git a/nni/algorithms/compression/pytorch/auto_compress/utils.py b/nni/algorithms/compression/pytorch/auto_compress/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aab372b984179caa9119a8fb359d410edc8255ce --- /dev/null +++ b/nni/algorithms/compression/pytorch/auto_compress/utils.py @@ -0,0 +1,117 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import Any + +from .constants import PRUNER_DICT, QUANTIZER_DICT + + +class AutoCompressionSearchSpaceGenerator: + """ + For convenient generation of search space that can be used by tuner. + """ + + def __init__(self): + self.algorithm_choice_list = [] + + def add_config(self, algorithm_name: str, config_list: list, **algo_kwargs): + """ + This function used for distinguish algorithm type is pruning or quantization. + Then call `self._add_pruner_config()` or `self._add_quantizer_config()`. + """ + if algorithm_name in PRUNER_DICT: + self._add_pruner_config(algorithm_name, config_list, **algo_kwargs) + if algorithm_name in QUANTIZER_DICT: + self._add_quantizer_config(algorithm_name, config_list, **algo_kwargs) + + def _add_pruner_config(self, pruner_name: str, config_list: list, **algo_kwargs): + """ + Parameters + ---------- + pruner_name + Supported pruner name: 'level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation'. + config_list + Except 'op_types' and 'op_names', other config value can be written as ``{'_type': ..., '_value': ...}``. + **algo_kwargs + The additional pruner parameters except 'model', 'config_list', 'optimizer', 'trainer', 'criterion'. + i.e., you can set ``statistics_batch_num={'_type': 'choice', '_value': [1, 2, 3]}`` + in TaylorFOWeightFilterPruner or just ``statistics_batch_num=1``. + """ + sub_search_space = {'_name': pruner_name} + for config in config_list: + op_types = config.pop('op_types', []) + op_names = config.pop('op_names', []) + key_prefix = 'config_list::{}::{}'.format(':'.join(op_types), ':'.join(op_names)) + for var_name, var_search_space in config.items(): + sub_search_space['{}::{}'.format(key_prefix, var_name)] = self._wrap_single_value(var_search_space) + for parameter_name, parameter_search_space in algo_kwargs.items(): + key_prefix = 'parameter' + sub_search_space['{}::{}'.format(key_prefix, parameter_name)] = self._wrap_single_value(parameter_search_space) + self.algorithm_choice_list.append(sub_search_space) + + def _add_quantizer_config(self, quantizer_name: str, config_list: list, **algo_kwargs): + """ + Parameters + ---------- + quantizer_name + Supported pruner name: 'naive', 'qat', 'dorefa', 'bnn'. + config_list + Except 'quant_types', 'op_types' and 'op_names', other config value can be written as `{'_type': ..., '_value': ...}`. + **algo_kwargs + The additional pruner parameters except 'model', 'config_list', 'optimizer'. + """ + sub_search_space = {'_name': quantizer_name} + for config in config_list: + quant_types = config.pop('quant_types', []) + op_types = config.pop('op_types', []) + op_names = config.pop('op_names', []) + key_prefix = 'config_list::{}::{}::{}'.format(':'.join(quant_types), ':'.join(op_types), ':'.join(op_names)) + for var_name, var_search_space in config.items(): + sub_search_space['{}::{}'.format(key_prefix, var_name)] = self._wrap_single_value(var_search_space) + for parameter_name, parameter_search_space in algo_kwargs.items(): + key_prefix = 'parameter' + sub_search_space['{}::{}'.format(key_prefix, parameter_name)] = self._wrap_single_value(parameter_search_space) + self.algorithm_choice_list.append(sub_search_space) + + def dumps(self) -> dict: + """ + Dump the search space as a dict. + """ + search_space = { + 'algorithm_name': { + '_type': 'choice', + '_value': self.algorithm_choice_list + } + } + return search_space + + @classmethod + def loads(cls, search_space: dict): + """ + Return a AutoCompressionSearchSpaceGenerator instance load from a search space dict. + """ + generator = AutoCompressionSearchSpaceGenerator() + generator.algorithm_choice_list = search_space['algorithm_name']['_value'] + return generator + + def _wrap_single_value(self, value) -> dict: + if not isinstance(value, dict): + converted_value = { + '_type': 'choice', + '_value': [value] + } + elif '_type' not in value: + converted_value = {} + for k, v in value.items(): + converted_value[k] = self._wrap_single_value(v) + else: + converted_value = value + return converted_value + + +def import_(target: str, allow_none: bool = False) -> Any: + if target is None: + return None + path, identifier = target.rsplit('.', 1) + module = __import__(path, globals(), locals(), [identifier]) + return getattr(module, identifier) diff --git a/nni/algorithms/compression/pytorch/pruning/__init__.py b/nni/algorithms/compression/pytorch/pruning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d92454859e88eb7cefdcbb4377dbc5044eb1fd6 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .finegrained_pruning_masker import * +from .structured_pruning_masker import * +from .transformer_pruning_head_masker import * +from .one_shot_pruner import * +from .iterative_pruner import * +from .lottery_ticket import LotteryTicketPruner +from .simulated_annealing_pruner import SimulatedAnnealingPruner +from .net_adapt_pruner import NetAdaptPruner +from .auto_compress_pruner import AutoCompressPruner +from .sensitivity_pruner import SensitivityPruner +from .amc import AMCPruner +from .transformer_pruner import TransformerHeadPruner diff --git a/nni/algorithms/compression/pytorch/pruning/amc/__init__.py b/nni/algorithms/compression/pytorch/pruning/amc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3c89a879c6af42ed372f38f3cc11ea1eb9eb6870 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/amc/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .amc_pruner import AMCPruner diff --git a/nni/algorithms/compression/pytorch/pruning/amc/amc_pruner.py b/nni/algorithms/compression/pytorch/pruning/amc/amc_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9caa80d453b8b8dba7892a236bcf79939151a0 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/amc/amc_pruner.py @@ -0,0 +1,294 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import logging +from copy import deepcopy +from argparse import Namespace +import numpy as np +import torch +from torch.utils.tensorboard import SummaryWriter + +from nni.compression.pytorch.compressor import Pruner +from .channel_pruning_env import ChannelPruningEnv +from .lib.agent import DDPG +from .lib.utils import get_output_folder + +_logger = logging.getLogger(__name__) + +class AMCPruner(Pruner): + """ + A pytorch implementation of AMC: AutoML for Model Compression and Acceleration on Mobile Devices. + (https://arxiv.org/pdf/1802.03494.pdf) + + Parameters: + model: nn.Module + The model to be pruned. + config_list: list + Configuration list to configure layer pruning. + Supported keys: + - op_types: operation type to be pruned + - op_names: operation name to be pruned + evaluator: function + function to evaluate the pruned model. + The prototype of the function: + >>> def evaluator(val_loader, model): + >>> ... + >>> return acc + val_loader: torch.utils.data.DataLoader + Data loader of validation dataset. + suffix: str + suffix to help you remember what experiment you ran. Default: None. + + # parameters for pruning environment + model_type: str + model type to prune, currently 'mobilenet' and 'mobilenetv2' are supported. Default: mobilenet + flops_ratio: float + preserve flops ratio. Default: 0.5 + lbound: float + minimum weight preserve ratio for each layer. Default: 0.2 + rbound: float + maximum weight preserve ratio for each layer. Default: 1.0 + reward: function + reward function type: + - acc_reward: accuracy * 0.01 + - acc_flops_reward: - (100 - accuracy) * 0.01 * np.log(flops) + Default: acc_reward + # parameters for channel pruning + n_calibration_batches: int + number of batches to extract layer information. Default: 60 + n_points_per_layer: int + number of feature points per layer. Default: 10 + channel_round: int + round channel to multiple of channel_round. Default: 8 + + # parameters for ddpg agent + hidden1: int + hidden num of first fully connect layer. Default: 300 + hidden2: int + hidden num of second fully connect layer. Default: 300 + lr_c: float + learning rate for critic. Default: 1e-3 + lr_a: float + learning rate for actor. Default: 1e-4 + warmup: int + number of episodes without training but only filling the replay memory. During warmup episodes, + random actions ares used for pruning. Default: 100 + discount: float + next Q value discount for deep Q value target. Default: 0.99 + bsize: int + minibatch size for training DDPG agent. Default: 64 + rmsize: int + memory size for each layer. Default: 100 + window_length: int + replay buffer window length. Default: 1 + tau: float + moving average for target network being used by soft_update. Default: 0.99 + # noise + init_delta: float + initial variance of truncated normal distribution + delta_decay: float + delta decay during exploration + + # parameters for training ddpg agent + max_episode_length: int + maximum episode length + output_dir: str + output directory to save log files and model files. Default: ./logs + debug: boolean + debug mode + train_episode: int + train iters each timestep. Default: 800 + epsilon: int + linear decay of exploration policy. Default: 50000 + seed: int + random seed to set for reproduce experiment. Default: None + """ + + def __init__( + self, + model, + config_list, + evaluator, + val_loader, + suffix=None, + model_type='mobilenet', + dataset='cifar10', + flops_ratio=0.5, + lbound=0.2, + rbound=1., + reward='acc_reward', + n_calibration_batches=60, + n_points_per_layer=10, + channel_round=8, + hidden1=300, + hidden2=300, + lr_c=1e-3, + lr_a=1e-4, + warmup=100, + discount=1., + bsize=64, + rmsize=100, + window_length=1, + tau=0.01, + init_delta=0.5, + delta_decay=0.99, + max_episode_length=1e9, + output_dir='./logs', + debug=False, + train_episode=800, + epsilon=50000, + seed=None): + + self.val_loader = val_loader + self.evaluator = evaluator + + if seed is not None: + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + + checkpoint = deepcopy(model.state_dict()) + + super().__init__(model, config_list, optimizer=None) + + # build folder and logs + base_folder_name = '{}_{}_r{}_search'.format(model_type, dataset, flops_ratio) + if suffix is not None: + self.output_dir = os.path.join(output_dir, base_folder_name + '-' + suffix) + else: + self.output_dir = get_output_folder(output_dir, base_folder_name) + + self.env_args = Namespace( + model_type=model_type, + preserve_ratio=flops_ratio, + lbound=lbound, + rbound=rbound, + reward=reward, + n_calibration_batches=n_calibration_batches, + n_points_per_layer=n_points_per_layer, + channel_round=channel_round, + output=self.output_dir + ) + self.env = ChannelPruningEnv( + self, evaluator, val_loader, checkpoint, args=self.env_args) + _logger.info('=> Saving logs to %s', self.output_dir) + self.tfwriter = SummaryWriter(log_dir=self.output_dir) + self.text_writer = open(os.path.join(self.output_dir, 'log.txt'), 'w') + _logger.info('=> Output path: %s...', self.output_dir) + + nb_states = self.env.layer_embedding.shape[1] + nb_actions = 1 # just 1 action here + + rmsize = rmsize * len(self.env.prunable_idx) # for each layer + _logger.info('** Actual replay buffer size: %d', rmsize) + + self.ddpg_args = Namespace( + hidden1=hidden1, + hidden2=hidden2, + lr_c=lr_c, + lr_a=lr_a, + warmup=warmup, + discount=discount, + bsize=bsize, + rmsize=rmsize, + window_length=window_length, + tau=tau, + init_delta=init_delta, + delta_decay=delta_decay, + max_episode_length=max_episode_length, + debug=debug, + train_episode=train_episode, + epsilon=epsilon + ) + self.agent = DDPG(nb_states, nb_actions, self.ddpg_args) + + + def compress(self): + self.train(self.ddpg_args.train_episode, self.agent, self.env, self.output_dir) + + def train(self, num_episode, agent, env, output_dir): + agent.is_training = True + step = episode = episode_steps = 0 + episode_reward = 0. + observation = None + T = [] # trajectory + while episode < num_episode: # counting based on episode + # reset if it is the start of episode + if observation is None: + observation = deepcopy(env.reset()) + agent.reset(observation) + + # agent pick action ... + if episode <= self.ddpg_args.warmup: + action = agent.random_action() + # action = sample_from_truncated_normal_distribution(lower=0., upper=1., mu=env.preserve_ratio, sigma=0.5) + else: + action = agent.select_action(observation, episode=episode) + + # env response with next_observation, reward, terminate_info + observation2, reward, done, info = env.step(action) + + T.append([reward, deepcopy(observation), deepcopy(observation2), action, done]) + + # fix-length, never reach here + # if max_episode_length and episode_steps >= max_episode_length - 1: + # done = True + + # [optional] save intermideate model + if num_episode / 3 <= 1 or episode % int(num_episode / 3) == 0: + agent.save_model(output_dir) + + # update + step += 1 + episode_steps += 1 + episode_reward += reward + observation = deepcopy(observation2) + + if done: # end of episode + _logger.info( + '#%d: episode_reward: %.4f acc: %.4f, ratio: %.4f', + episode, episode_reward, + info['accuracy'], + info['compress_ratio'] + ) + self.text_writer.write( + '#{}: episode_reward:{:.4f} acc: {:.4f}, ratio: {:.4f}\n'.format( + episode, episode_reward, + info['accuracy'], + info['compress_ratio'] + ) + ) + final_reward = T[-1][0] + # print('final_reward: {}'.format(final_reward)) + # agent observe and update policy + for _, s_t, s_t1, a_t, done in T: + agent.observe(final_reward, s_t, s_t1, a_t, done) + if episode > self.ddpg_args.warmup: + agent.update_policy() + + #agent.memory.append( + # observation, + # agent.select_action(observation, episode=episode), + # 0., False + #) + + # reset + observation = None + episode_steps = 0 + episode_reward = 0. + episode += 1 + T = [] + + self.tfwriter.add_scalar('reward/last', final_reward, episode) + self.tfwriter.add_scalar('reward/best', env.best_reward, episode) + self.tfwriter.add_scalar('info/accuracy', info['accuracy'], episode) + self.tfwriter.add_scalar('info/compress_ratio', info['compress_ratio'], episode) + self.tfwriter.add_text('info/best_policy', str(env.best_strategy), episode) + # record the preserve rate for each layer + for i, preserve_rate in enumerate(env.strategy): + self.tfwriter.add_scalar('preserve_rate/{}'.format(i), preserve_rate, episode) + + self.text_writer.write('best reward: {}\n'.format(env.best_reward)) + self.text_writer.write('best policy: {}\n'.format(env.best_strategy)) + self.text_writer.close() diff --git a/nni/algorithms/compression/pytorch/pruning/amc/channel_pruning_env.py b/nni/algorithms/compression/pytorch/pruning/amc/channel_pruning_env.py new file mode 100644 index 0000000000000000000000000000000000000000..428f7e7532da931abcce5d71ac0847c7234b30aa --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/amc/channel_pruning_env.py @@ -0,0 +1,543 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import logging +import time +import math +import copy +import numpy as np +import torch +import torch.nn as nn + +from nni.compression.pytorch.compressor import PrunerModuleWrapper +from .. import AMCWeightMasker + +_logger = logging.getLogger(__name__) + +# for pruning +def acc_reward(net, acc, flops): + return acc * 0.01 + + +def acc_flops_reward(net, acc, flops): + error = (100 - acc) * 0.01 + return -error * np.log(flops) + + +class ChannelPruningEnv: + """ + Env for channel pruning search. + This class is used to prune model using specified pruner. It prunes one layer when + step() is called. When the last layer is pruned, it evaluate the pruned model using + evaluator, and use the returned value of evaluator as reward of the episode. + + Usage: + env = ChannelPruningEnv(pruner, evaluator, val_loader, checkpoint, env_args) + episode = 0 + T = [] + while episode < num_episode: + action = agent.select_action(observation) + observation2, reward, done, info = env.step(action) + T.append([reward, deepcopy(observation), deepcopy(observation2), action, done]) + + if done: # end of episode, last layer pruned + episode += 1 + # train agent with episode data + for _, s_t, s_t1, a_t, done in T: + agent.observe(final_reward, s_t, s_t1, a_t, done) + agent.update_policy() + T = [] + + Attributes: + prunable_idx: layer indices for pruable layers, the index values are the index + of list(self.model.modules()). Pruable layers are pointwise Conv2d layers and Linear + layers. + buffer_idx: layer indices for buffer layers which refers the depthwise layers. + Each depthwise layer is always followd by a pointwise layer for both mobilenet and + mobilenetv2. The depthwise layer's filters are pruned when its next pointwise layer's + corresponding input channels are pruned. + shared_idx: layer indices for layers which share input. + For example: [[1,4], [8, 10, 15]] means layer 1 and 4 share same input, and layer + 8, 10 and 15 share another input. + layer_embedding: embeddings for each prunable layers, the embedding is used as + observation for DDPG agent. + layer_info_dict: flops and number of parameters of each layer. + min_strategy_dict: key is layer index, value is a tuple, the first value is the minimum + action of input channel, the second value is the minimum action value of output channel. + strategy_dict: key is layer index, value is a tuple, the first value is the action of input + channel, the second value is the action of output channel. + + Parameters: + pruner: Pruner + NNI Pruner instance used to prune model. + evaluator: function + function to evaluate the pruned model. + The prototype of the function: + >>> def evaluator(val_loader, model): + >>> ... + >>> return acc + val_loader: torch.utils.data.DataLoader + Data loader of validation dataset. + checkpoint: dict + checkpoint of the model to be pruned. It is used to reset model at beginning of each + episode. + args: + A Namespace object containing following arguments: + model_type: str + model type to prune, currently 'mobilenet', 'mobilenetv2' and 'resnet' are supported. + flops_ratio: float + preserve flops ratio. + lbound: float + minimum weight preserve ratio for each layer. + rbound: float + maximum weight preserve ratio for each layer. + reward: function + reward function type + + # parameters for channel pruning + n_calibration_batches: int + number of batches to extract layer information. + n_points_per_layer: int + number of feature points per layer. + channel_round: int + round channel to multiple of channel_round. + + """ + def __init__(self, pruner, evaluator, val_loader, checkpoint, args): + self.pruner = pruner + self.model = pruner.bound_model + self.checkpoint = checkpoint + self.batch_size = val_loader.batch_size + self.preserve_ratio = args.preserve_ratio + self.channel_prune_masker = AMCWeightMasker(self.model, self.pruner, args.channel_round) + + # options from args + self.args = args + self.lbound = args.lbound + self.rbound = args.rbound + + self.n_calibration_batches = args.n_calibration_batches + self.n_points_per_layer = args.n_points_per_layer + self.channel_round = args.channel_round + + # sanity check + assert self.preserve_ratio > self.lbound, 'Error! You can not achieve preserve_ratio smaller than lbound!' + + # prepare data + self._val_loader = val_loader + self._validate = evaluator + + # build indexs + self._build_index() + self.n_prunable_layer = len(self.prunable_idx) + + # extract information for preparing + self._extract_layer_information() + + # build embedding (static part) + self._build_state_embedding() + + # build reward + self.reset() # restore weight + self.org_acc = self._validate(self._val_loader, self.model) + _logger.info('=> original acc: %.3f', self.org_acc) + self.org_model_size = sum(self.wsize_list) + _logger.info('=> original weight size: %.4f M param', self.org_model_size * 1. / 1e6) + self.org_flops = sum(self.flops_list) + _logger.info('=> FLOPs:') + _logger.info([self.layer_info_dict[idx]['flops']/1e6 for idx in sorted(self.layer_info_dict.keys())]) + _logger.info('=> original FLOPs: %.4f M', self.org_flops * 1. / 1e6) + + self.expected_preserve_computation = self.preserve_ratio * self.org_flops + + self.reward = eval(args.reward) + + self.best_reward = -math.inf + self.best_strategy = None + self.best_d_prime_list = None + self.best_masks = None + + self.org_w_size = sum(self.wsize_list) + + def step(self, action): + # Pseudo prune and get the corresponding statistics. The real pruning happens till the end of all pseudo pruning + if self.visited[self.cur_ind]: + action = self.strategy_dict[self.prunable_idx[self.cur_ind]][0] + preserve_idx = self.index_buffer[self.cur_ind] + else: + action = self._action_wall(action) # percentage to preserve + preserve_idx = None + # prune and update action + action, d_prime, preserve_idx = self.prune_kernel(self.prunable_idx[self.cur_ind], action, preserve_idx) + if not self.visited[self.cur_ind]: + for group in self.shared_idx: + if self.cur_ind in group: # set the shared ones + for g_idx in group: + self.strategy_dict[self.prunable_idx[g_idx]][0] = action + self.strategy_dict[self.prunable_idx[g_idx - 1]][1] = action + self.visited[g_idx] = True + self.index_buffer[g_idx] = preserve_idx.copy() + + self.strategy.append(action) # save action to strategy + self.d_prime_list.append(d_prime) + + self.strategy_dict[self.prunable_idx[self.cur_ind]][0] = action + if self.cur_ind > 0: + self.strategy_dict[self.prunable_idx[self.cur_ind - 1]][1] = action + + # all the actions are made + if self._is_final_layer(): + assert len(self.strategy) == len(self.prunable_idx) + current_flops = self._cur_flops() + acc_t1 = time.time() + acc = self._validate(self._val_loader, self.model) + acc_t2 = time.time() + self.val_time = acc_t2 - acc_t1 + compress_ratio = current_flops * 1. / self.org_flops + info_set = {'compress_ratio': compress_ratio, 'accuracy': acc, 'strategy': self.strategy.copy()} + reward = self.reward(self, acc, current_flops) + + if reward > self.best_reward: + self.best_reward = reward + self.best_strategy = self.strategy.copy() + self.best_d_prime_list = self.d_prime_list.copy() + best_model = os.path.join(self.args.output, 'best_model.pth') + best_mask = os.path.join(self.args.output, 'best_mask.pth') + self.pruner.export_model(model_path=best_model, mask_path=best_mask) + _logger.info('New best reward: %.4f, acc: %.4f, compress: %.4f', self.best_reward, acc, compress_ratio) + _logger.info('New best policy: %s', self.best_strategy) + _logger.info('New best d primes: %s', self.best_d_prime_list) + obs = self.layer_embedding[self.cur_ind, :].copy() # actually the same as the last state + done = True + return obs, reward, done, info_set + + info_set = None + reward = 0 + done = False + self.visited[self.cur_ind] = True # set to visited + self.cur_ind += 1 # the index of next layer + # build next state (in-place modify) + self.layer_embedding[self.cur_ind][-3] = self._cur_reduced() * 1. / self.org_flops # reduced + self.layer_embedding[self.cur_ind][-2] = sum(self.flops_list[self.cur_ind + 1:]) * 1. / self.org_flops # rest + self.layer_embedding[self.cur_ind][-1] = self.strategy[-1] # last action + obs = self.layer_embedding[self.cur_ind, :].copy() + + return obs, reward, done, info_set + + def reset(self): + # restore env by loading the checkpoint + self.pruner.reset(self.checkpoint) + self.cur_ind = 0 + self.strategy = [] # pruning strategy + self.d_prime_list = [] + self.strategy_dict = copy.deepcopy(self.min_strategy_dict) + # reset layer embeddings + self.layer_embedding[:, -1] = 1. + self.layer_embedding[:, -2] = 0. + self.layer_embedding[:, -3] = 0. + obs = self.layer_embedding[0].copy() + obs[-2] = sum(self.wsize_list[1:]) * 1. / sum(self.wsize_list) + self.extract_time = 0 + self.fit_time = 0 + self.val_time = 0 + # for share index + self.visited = [False] * len(self.prunable_idx) + self.index_buffer = {} + return obs + + def prune_kernel(self, op_idx, preserve_ratio, preserve_idx=None): + m_list = list(self.model.modules()) + op = m_list[op_idx] + assert (0. < preserve_ratio <= 1.) + assert type(op) == PrunerModuleWrapper + if preserve_ratio == 1: # do not prune + if (preserve_idx is None) or (len(preserve_idx) == op.module.weight.size(1)): + return 1., op.module.weight.size(1), None # should be a full index + op.input_feat = self.layer_info_dict[op_idx]['input_feat'] + op.output_feat = self.layer_info_dict[op_idx]['output_feat'] + + masks = self.channel_prune_masker.calc_mask(sparsity=1-preserve_ratio, wrapper=op, preserve_idx=preserve_idx) + m = masks['weight_mask'].cpu().data + if type(op.module) == nn.Conv2d: + d_prime = (m.sum((0, 2, 3)) > 0).sum().item() + preserve_idx = np.nonzero((m.sum((0, 2, 3)) > 0).numpy())[0] + else: + assert type(op.module) == nn.Linear + d_prime = (m.sum(1) > 0).sum().item() + preserve_idx = np.nonzero((m.sum(1) > 0).numpy())[0] + + op.weight_mask = masks['weight_mask'] + if hasattr(op.module, 'bias') and op.module.bias is not None and 'bias_mask' in masks: + op.bias_mask = masks['bias_mask'] + + action = (m == 1).sum().item() / m.numel() + return action, d_prime, preserve_idx + + def _is_final_layer(self): + return self.cur_ind == len(self.prunable_idx) - 1 + + def _action_wall(self, action): + """ + Limit the action generated by DDPG for this layer by two constraints: + 1. The total flops must meet the flops reduce target. + For example: the original flops of entire model is 1000, target flops ratio is 0.5, target flops + is 1000*0.5 = 500. The reduced flops of other layers is 400, so the remaining flops quota is 500-400=100, + if the total original flops of this layer is 250, then the maximum ratio is 100/250 = 0.4. So the + action of this layer can not be greater than 0.4. + 2. The action must be greater than lbound which is stored in self.strategy_dict. + """ + assert len(self.strategy) == self.cur_ind + + action = float(action) + action = np.clip(action, 0, 1) + + other_comp = 0 + this_comp = 0 + for i, idx in enumerate(self.prunable_idx): + flop = self.layer_info_dict[idx]['flops'] + buffer_flop = self._get_buffer_flops(idx) + + if i == self.cur_ind - 1: # TODO: add other member in the set + this_comp += flop * self.strategy_dict[idx][0] + # add buffer (but not influenced by ratio) + other_comp += buffer_flop * self.strategy_dict[idx][0] + elif i == self.cur_ind: + this_comp += flop * self.strategy_dict[idx][1] + # also add buffer here (influenced by ratio) + this_comp += buffer_flop + else: + other_comp += flop * self.strategy_dict[idx][0] * self.strategy_dict[idx][1] + # add buffer + other_comp += buffer_flop * self.strategy_dict[idx][0] # only consider input reduction + + self.expected_min_preserve = other_comp + this_comp * action + max_preserve_ratio = (self.expected_preserve_computation - other_comp) * 1. / this_comp + + action = np.minimum(action, max_preserve_ratio) + action = np.maximum(action, self.strategy_dict[self.prunable_idx[self.cur_ind]][0]) # impossible (should be) + + return action + + def _get_buffer_flops(self, idx): + buffer_idx = self.buffer_dict[idx] + buffer_flop = sum([self.layer_info_dict[_]['flops'] for _ in buffer_idx]) + return buffer_flop + + def _cur_flops(self): + flops = 0 + for idx in self.prunable_idx: + c, n = self.strategy_dict[idx] # input, output pruning ratio + flops += self.layer_info_dict[idx]['flops'] * c * n + # add buffer computation + flops += self._get_buffer_flops(idx) * c # only related to input channel reduction + return flops + + def _cur_reduced(self): + # return the reduced weight + reduced = self.org_flops - self._cur_flops() + return reduced + + def _build_index(self): + """ + Build following information/data for later pruning: + self.prunable_idx: layer indices for pruable layers, the index values are the index + of list(self.model.modules()). Pruable layers are pointwise Conv2d layers and Linear + layers. + self.prunable_ops: prunable modules + self.buffer_idx: layer indices for buffer layers which refers the depthwise layers. + Each depthwise layer is always followd by a pointwise layer for both mobilenet and + mobilenetv2. The depthwise layer's filters are pruned when its next pointwise layer's + corresponding input channels are pruned. + self.shared_idx: layer indices for layers which share input. + For example: [[1,4], [8, 10, 15]] means layer 1 and 4 share same input, and layer + 8, 10 and 15 share another input. + self.org_channels: number of input channels for each layer + self.min_strategy_dict: key is layer index, value is a tuple, the first value is the minimum + action of input channel, the second value is the minimum action value of output channel. + self.strategy_dict: same as self.min_strategy_dict, but it will be updated later. + """ + self.prunable_idx = [] + self.prunable_ops = [] + self.layer_type_dict = {} + self.strategy_dict = {} + self.buffer_dict = {} + this_buffer_list = [] + self.org_channels = [] + # build index and the min strategy dict + for i, m in enumerate(self.model.modules()): + if isinstance(m, PrunerModuleWrapper): + m = m.module + if type(m) == nn.Conv2d and m.groups == m.in_channels: # depth-wise conv, buffer + this_buffer_list.append(i) + else: # really prunable + self.prunable_idx.append(i) + self.prunable_ops.append(m) + self.layer_type_dict[i] = type(m) + self.buffer_dict[i] = this_buffer_list + this_buffer_list = [] # empty + self.org_channels.append(m.in_channels if type(m) == nn.Conv2d else m.in_features) + + self.strategy_dict[i] = [self.lbound, self.lbound] + + self.strategy_dict[self.prunable_idx[0]][0] = 1 # modify the input + self.strategy_dict[self.prunable_idx[-1]][1] = 1 # modify the output + + self.shared_idx = [] + if self.args.model_type == 'mobilenetv2': # TODO: to be tested! Share index for residual connection + connected_idx = [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32] # to be partitioned + last_ch = -1 + share_group = None + for c_idx in connected_idx: + if self.prunable_ops[c_idx].in_channels != last_ch: # new group + last_ch = self.prunable_ops[c_idx].in_channels + if share_group is not None: + self.shared_idx.append(share_group) + share_group = [c_idx] + else: # same group + share_group.append(c_idx) + self.shared_idx.append(share_group) + _logger.info('=> Conv layers to share channels: %s', self.shared_idx) + + self.min_strategy_dict = copy.deepcopy(self.strategy_dict) + + self.buffer_idx = [] + for _, v in self.buffer_dict.items(): + self.buffer_idx += v + + _logger.info('=> Prunable layer idx: %s', self.prunable_idx) + _logger.info('=> Buffer layer idx: %s', self.buffer_idx) + _logger.info('=> Shared idx: %s', self.shared_idx) + _logger.info('=> Initial min strategy dict: %s', self.min_strategy_dict) + + # added for supporting residual connections during pruning + self.visited = [False] * len(self.prunable_idx) + self.index_buffer = {} + + def _extract_layer_information(self): + m_list = list(self.model.modules()) + + self.data_saver = [] + self.layer_info_dict = dict() + self.wsize_list = [] + self.flops_list = [] + + from .lib.utils import measure_layer_for_pruning + + # extend the forward fn to record layer info + def new_forward(m): + def lambda_forward(x): + m.input_feat = x.clone() + #TODO replace this flops counter with nni.compression.torch.utils.counter.count_flops_params + measure_layer_for_pruning(m, x) + y = m.old_forward(x) + m.output_feat = y.clone() + return y + + return lambda_forward + + device = None + for idx in self.prunable_idx + self.buffer_idx: # get all + m = m_list[idx] + m.old_forward = m.forward + m.forward = new_forward(m) + if device is None and type(m) == PrunerModuleWrapper: + device = m.module.weight.device + + # now let the image flow + _logger.info('=> Extracting information...') + with torch.no_grad(): + for i_b, (inputs, target) in enumerate(self._val_loader): # use image from train set + if i_b == self.n_calibration_batches: + break + self.data_saver.append((inputs.clone(), target.clone())) + input_var = torch.autograd.Variable(inputs).to(device) + + # inference and collect stats + _ = self.model(input_var) + + if i_b == 0: # first batch + for idx in self.prunable_idx + self.buffer_idx: + self.layer_info_dict[idx] = dict() + self.layer_info_dict[idx]['params'] = m_list[idx].params + self.layer_info_dict[idx]['flops'] = m_list[idx].flops + self.wsize_list.append(m_list[idx].params) + self.flops_list.append(m_list[idx].flops) + _logger.info('flops: %s', self.flops_list) + for idx in self.prunable_idx: + f_in_np = m_list[idx].input_feat.data.cpu().numpy() + f_out_np = m_list[idx].output_feat.data.cpu().numpy() + if len(f_in_np.shape) == 4: # conv + if self.prunable_idx.index(idx) == 0: # first conv + f_in2save, f_out2save = None, None + elif m_list[idx].module.weight.size(3) > 1: # normal conv + f_in2save, f_out2save = f_in_np, f_out_np + else: # 1x1 conv + # assert f_out_np.shape[2] == f_in_np.shape[2] # now support k=3 + randx = np.random.randint(0, f_out_np.shape[2] - 0, self.n_points_per_layer) + randy = np.random.randint(0, f_out_np.shape[3] - 0, self.n_points_per_layer) + # input: [N, C, H, W] + self.layer_info_dict[idx][(i_b, 'randx')] = randx.copy() + self.layer_info_dict[idx][(i_b, 'randy')] = randy.copy() + + f_in2save = f_in_np[:, :, randx, randy].copy().transpose(0, 2, 1)\ + .reshape(self.batch_size * self.n_points_per_layer, -1) + + f_out2save = f_out_np[:, :, randx, randy].copy().transpose(0, 2, 1) \ + .reshape(self.batch_size * self.n_points_per_layer, -1) + else: + assert len(f_in_np.shape) == 2 + f_in2save = f_in_np.copy() + f_out2save = f_out_np.copy() + if 'input_feat' not in self.layer_info_dict[idx]: + self.layer_info_dict[idx]['input_feat'] = f_in2save + self.layer_info_dict[idx]['output_feat'] = f_out2save + else: + self.layer_info_dict[idx]['input_feat'] = np.vstack( + (self.layer_info_dict[idx]['input_feat'], f_in2save)) + self.layer_info_dict[idx]['output_feat'] = np.vstack( + (self.layer_info_dict[idx]['output_feat'], f_out2save)) + + def _build_state_embedding(self): + # build the static part of the state embedding + _logger.info('Building state embedding...') + layer_embedding = [] + module_list = list(self.model.modules()) + for i, ind in enumerate(self.prunable_idx): + m = module_list[ind].module + this_state = [] + if type(m) == nn.Conv2d: + this_state.append(i) # index + this_state.append(0) # layer type, 0 for conv + this_state.append(m.in_channels) # in channels + this_state.append(m.out_channels) # out channels + this_state.append(m.stride[0]) # stride + this_state.append(m.kernel_size[0]) # kernel size + this_state.append(np.prod(m.weight.size())) # weight size + elif type(m) == nn.Linear: + this_state.append(i) # index + this_state.append(1) # layer type, 1 for fc + this_state.append(m.in_features) # in channels + this_state.append(m.out_features) # out channels + this_state.append(0) # stride + this_state.append(1) # kernel size + this_state.append(np.prod(m.weight.size())) # weight size + + # this 3 features need to be changed later + this_state.append(0.) # reduced + this_state.append(0.) # rest + this_state.append(1.) # a_{t-1} + layer_embedding.append(np.array(this_state)) + + # normalize the state + layer_embedding = np.array(layer_embedding, 'float') + _logger.info('=> shape of embedding (n_layer * n_dim): %s', layer_embedding.shape) + assert len(layer_embedding.shape) == 2, layer_embedding.shape + for i in range(layer_embedding.shape[1]): + fmin = min(layer_embedding[:, i]) + fmax = max(layer_embedding[:, i]) + if fmax - fmin > 0: + layer_embedding[:, i] = (layer_embedding[:, i] - fmin) / (fmax - fmin) + + self.layer_embedding = layer_embedding + diff --git a/nni/algorithms/compression/pytorch/pruning/amc/lib/__init__.py b/nni/algorithms/compression/pytorch/pruning/amc/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/compression/pytorch/pruning/amc/lib/agent.py b/nni/algorithms/compression/pytorch/pruning/amc/lib/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..fe066301b8b5e4325f92a1b98885ae547a7ecee3 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/amc/lib/agent.py @@ -0,0 +1,232 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np + +import torch +import torch.nn as nn +from torch.optim import Adam + +from .memory import SequentialMemory +from .utils import to_numpy, to_tensor + +criterion = nn.MSELoss() +USE_CUDA = torch.cuda.is_available() + + +class Actor(nn.Module): + def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300): + super(Actor, self).__init__() + self.fc1 = nn.Linear(nb_states, hidden1) + self.fc2 = nn.Linear(hidden1, hidden2) + self.fc3 = nn.Linear(hidden2, nb_actions) + self.relu = nn.ReLU() + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + out = self.fc1(x) + out = self.relu(out) + out = self.fc2(out) + out = self.relu(out) + out = self.fc3(out) + out = self.sigmoid(out) + return out + + +class Critic(nn.Module): + def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300): + super(Critic, self).__init__() + self.fc11 = nn.Linear(nb_states, hidden1) + self.fc12 = nn.Linear(nb_actions, hidden1) + self.fc2 = nn.Linear(hidden1, hidden2) + self.fc3 = nn.Linear(hidden2, 1) + self.relu = nn.ReLU() + + def forward(self, xs): + x, a = xs + out = self.fc11(x) + self.fc12(a) + out = self.relu(out) + out = self.fc2(out) + out = self.relu(out) + out = self.fc3(out) + return out + + +class DDPG(object): + def __init__(self, nb_states, nb_actions, args): + + self.nb_states = nb_states + self.nb_actions = nb_actions + + # Create Actor and Critic Network + net_cfg = { + 'hidden1': args.hidden1, + 'hidden2': args.hidden2, + # 'init_w': args.init_w + } + self.actor = Actor(self.nb_states, self.nb_actions, **net_cfg) + self.actor_target = Actor(self.nb_states, self.nb_actions, **net_cfg) + self.actor_optim = Adam(self.actor.parameters(), lr=args.lr_a) + + self.critic = Critic(self.nb_states, self.nb_actions, **net_cfg) + self.critic_target = Critic(self.nb_states, self.nb_actions, **net_cfg) + self.critic_optim = Adam(self.critic.parameters(), lr=args.lr_c) + + self.hard_update(self.actor_target, self.actor) # Make sure target is with the same weight + self.hard_update(self.critic_target, self.critic) + + # Create replay buffer + self.memory = SequentialMemory(limit=args.rmsize, window_length=args.window_length) + # self.random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=args.ou_theta, mu=args.ou_mu, + # sigma=args.ou_sigma) + + # Hyper-parameters + self.batch_size = args.bsize + self.tau = args.tau + self.discount = args.discount + self.depsilon = 1.0 / args.epsilon + self.lbound = 0. # args.lbound + self.rbound = 1. # args.rbound + + # noise + self.init_delta = args.init_delta + self.delta_decay = args.delta_decay + self.warmup = args.warmup + + # + self.epsilon = 1.0 + # self.s_t = None # Most recent state + # self.a_t = None # Most recent action + self.is_training = True + + # + if USE_CUDA: self.cuda() + + # moving average baseline + self.moving_average = None + self.moving_alpha = 0.5 # based on batch, so small + + def update_policy(self): + # Sample batch + state_batch, action_batch, reward_batch, \ + next_state_batch, terminal_batch = self.memory.sample_and_split(self.batch_size) + + # normalize the reward + batch_mean_reward = np.mean(reward_batch) + if self.moving_average is None: + self.moving_average = batch_mean_reward + else: + self.moving_average += self.moving_alpha * (batch_mean_reward - self.moving_average) + reward_batch -= self.moving_average + # if reward_batch.std() > 0: + # reward_batch /= reward_batch.std() + + # Prepare for the target q batch + with torch.no_grad(): + next_q_values = self.critic_target([ + to_tensor(next_state_batch), + self.actor_target(to_tensor(next_state_batch)), + ]) + + target_q_batch = to_tensor(reward_batch) + \ + self.discount * to_tensor(terminal_batch.astype(np.float)) * next_q_values + + # Critic update + self.critic.zero_grad() + + q_batch = self.critic([to_tensor(state_batch), to_tensor(action_batch)]) + + value_loss = criterion(q_batch, target_q_batch) + value_loss.backward() + self.critic_optim.step() + + # Actor update + self.actor.zero_grad() + + policy_loss = -self.critic([ # pylint: disable=all + to_tensor(state_batch), + self.actor(to_tensor(state_batch)) + ]) + + policy_loss = policy_loss.mean() + policy_loss.backward() + self.actor_optim.step() + + # Target update + self.soft_update(self.actor_target, self.actor) + self.soft_update(self.critic_target, self.critic) + + def eval(self): + self.actor.eval() + self.actor_target.eval() + self.critic.eval() + self.critic_target.eval() + + def cuda(self): + self.actor.cuda() + self.actor_target.cuda() + self.critic.cuda() + self.critic_target.cuda() + + def observe(self, r_t, s_t, s_t1, a_t, done): + if self.is_training: + self.memory.append(s_t, a_t, r_t, done) # save to memory + # self.s_t = s_t1 + + def random_action(self): + action = np.random.uniform(self.lbound, self.rbound, self.nb_actions) + # self.a_t = action + return action + + def select_action(self, s_t, episode): + # assert episode >= self.warmup, 'Episode: {} warmup: {}'.format(episode, self.warmup) + action = to_numpy(self.actor(to_tensor(np.array(s_t).reshape(1, -1)))).squeeze(0) + delta = self.init_delta * (self.delta_decay ** (episode - self.warmup)) + # action += self.is_training * max(self.epsilon, 0) * self.random_process.sample() + action = self.sample_from_truncated_normal_distribution(lower=self.lbound, upper=self.rbound, mu=action, sigma=delta) + action = np.clip(action, self.lbound, self.rbound) + + # self.a_t = action + return action + + def reset(self, obs): + pass + # self.s_t = obs + # self.random_process.reset_states() + + def load_weights(self, output): + if output is None: return + + self.actor.load_state_dict( + torch.load('{}/actor.pkl'.format(output)) + ) + + self.critic.load_state_dict( + torch.load('{}/critic.pkl'.format(output)) + ) + + def save_model(self, output): + torch.save( + self.actor.state_dict(), + '{}/actor.pkl'.format(output) + ) + torch.save( + self.critic.state_dict(), + '{}/critic.pkl'.format(output) + ) + + def soft_update(self, target, source): + for target_param, param in zip(target.parameters(), source.parameters()): + target_param.data.copy_( + target_param.data * (1.0 - self.tau) + param.data * self.tau + ) + + def hard_update(self, target, source): + for target_param, param in zip(target.parameters(), source.parameters()): + target_param.data.copy_(param.data) + + def sample_from_truncated_normal_distribution(self, lower, upper, mu, sigma, size=1): + from scipy import stats + return stats.truncnorm.rvs((lower-mu)/sigma, (upper-mu)/sigma, loc=mu, scale=sigma, size=size) + + diff --git a/nni/algorithms/compression/pytorch/pruning/amc/lib/memory.py b/nni/algorithms/compression/pytorch/pruning/amc/lib/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..57bbcfceb86a20092968c9dc75a618221e119174 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/amc/lib/memory.py @@ -0,0 +1,227 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import +from collections import deque, namedtuple +import warnings +import random + +import numpy as np + +# [reference] https://github.com/matthiasplappert/keras-rl/blob/master/rl/memory.py + +# This is to be understood as a transition: Given `state0`, performing `action` +# yields `reward` and results in `state1`, which might be `terminal`. +Experience = namedtuple('Experience', 'state0, action, reward, state1, terminal1') + + +def sample_batch_indexes(low, high, size): + if high - low >= size: + # We have enough data. Draw without replacement, that is each index is unique in the + # batch. We cannot use `np.random.choice` here because it is horribly inefficient as + # the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion. + # `random.sample` does the same thing (drawing without replacement) and is way faster. + r = range(low, high) + batch_idxs = random.sample(r, size) + else: + # Not enough data. Help ourselves with sampling from the range, but the same index + # can occur multiple times. This is not good and should be avoided by picking a + # large enough warm-up phase. + warnings.warn( + 'Not enough entries to sample without replacement. ' + 'Consider increasing your warm-up phase to avoid oversampling!') + batch_idxs = np.random.random_integers(low, high - 1, size=size) + assert len(batch_idxs) == size + return batch_idxs + + +class RingBuffer(object): + def __init__(self, maxlen): + self.maxlen = maxlen + self.start = 0 + self.length = 0 + self.data = [None for _ in range(maxlen)] + + def __len__(self): + return self.length + + def __getitem__(self, idx): + if idx < 0 or idx >= self.length: + raise KeyError() + return self.data[(self.start + idx) % self.maxlen] + + def append(self, v): + if self.length < self.maxlen: + # We have space, simply increase the length. + self.length += 1 + elif self.length == self.maxlen: + # No space, "remove" the first item. + self.start = (self.start + 1) % self.maxlen + else: + # This should never happen. + raise RuntimeError() + self.data[(self.start + self.length - 1) % self.maxlen] = v + + +def zeroed_observation(observation): + if hasattr(observation, 'shape'): + return np.zeros(observation.shape) + elif hasattr(observation, '__iter__'): + out = [] + for x in observation: + out.append(zeroed_observation(x)) + return out + else: + return 0. + + +class Memory(object): + def __init__(self, window_length, ignore_episode_boundaries=False): + self.window_length = window_length + self.ignore_episode_boundaries = ignore_episode_boundaries + + self.recent_observations = deque(maxlen=window_length) + self.recent_terminals = deque(maxlen=window_length) + + def sample(self, batch_size, batch_idxs=None): + raise NotImplementedError() + + def append(self, observation, action, reward, terminal, training=True): + self.recent_observations.append(observation) + self.recent_terminals.append(terminal) + + def get_recent_state(self, current_observation): + # This code is slightly complicated by the fact that subsequent observations might be + # from different episodes. We ensure that an experience never spans multiple episodes. + # This is probably not that important in practice but it seems cleaner. + state = [current_observation] + idx = len(self.recent_observations) - 1 + for offset in range(0, self.window_length - 1): + current_idx = idx - offset + current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False + if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): + # The previously handled observation was terminal, don't add the current one. + # Otherwise we would leak into a different episode. + break + state.insert(0, self.recent_observations[current_idx]) + while len(state) < self.window_length: + state.insert(0, zeroed_observation(state[0])) + return state + + def get_config(self): + config = { + 'window_length': self.window_length, + 'ignore_episode_boundaries': self.ignore_episode_boundaries, + } + return config + + +class SequentialMemory(Memory): + def __init__(self, limit, **kwargs): + super(SequentialMemory, self).__init__(**kwargs) + + self.limit = limit + + # Do not use deque to implement the memory. This data structure may seem convenient but + # it is way too slow on random access. Instead, we use our own ring buffer implementation. + self.actions = RingBuffer(limit) + self.rewards = RingBuffer(limit) + self.terminals = RingBuffer(limit) + self.observations = RingBuffer(limit) + + def sample(self, batch_size, batch_idxs=None): + if batch_idxs is None: + # Draw random indexes such that we have at least a single entry before each + # index. + batch_idxs = sample_batch_indexes(0, self.nb_entries - 1, size=batch_size) + batch_idxs = np.array(batch_idxs) + 1 + assert np.min(batch_idxs) >= 1 + assert np.max(batch_idxs) < self.nb_entries + assert len(batch_idxs) == batch_size + + # Create experiences + experiences = [] + for idx in batch_idxs: + terminal0 = self.terminals[idx - 2] if idx >= 2 else False + while terminal0: + # Skip this transition because the environment was reset here. Select a new, random + # transition and use this instead. This may cause the batch to contain the same + # transition twice. + idx = sample_batch_indexes(1, self.nb_entries, size=1)[0] + terminal0 = self.terminals[idx - 2] if idx >= 2 else False + assert 1 <= idx < self.nb_entries + + # This code is slightly complicated by the fact that subsequent observations might be + # from different episodes. We ensure that an experience never spans multiple episodes. + # This is probably not that important in practice but it seems cleaner. + state0 = [self.observations[idx - 1]] + for offset in range(0, self.window_length - 1): + current_idx = idx - 2 - offset + current_terminal = self.terminals[current_idx - 1] if current_idx - 1 > 0 else False + if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): + # The previously handled observation was terminal, don't add the current one. + # Otherwise we would leak into a different episode. + break + state0.insert(0, self.observations[current_idx]) + while len(state0) < self.window_length: + state0.insert(0, zeroed_observation(state0[0])) + action = self.actions[idx - 1] + reward = self.rewards[idx - 1] + terminal1 = self.terminals[idx - 1] + + # Okay, now we need to create the follow-up state. This is state0 shifted on timestep + # to the right. Again, we need to be careful to not include an observation from the next + # episode if the last state is terminal. + state1 = [np.copy(x) for x in state0[1:]] + state1.append(self.observations[idx]) + + assert len(state0) == self.window_length + assert len(state1) == len(state0) + experiences.append(Experience(state0=state0, action=action, reward=reward, + state1=state1, terminal1=terminal1)) + assert len(experiences) == batch_size + return experiences + + def sample_and_split(self, batch_size, batch_idxs=None): + experiences = self.sample(batch_size, batch_idxs) + + state0_batch = [] + reward_batch = [] + action_batch = [] + terminal1_batch = [] + state1_batch = [] + for e in experiences: + state0_batch.append(e.state0) + state1_batch.append(e.state1) + reward_batch.append(e.reward) + action_batch.append(e.action) + terminal1_batch.append(0. if e.terminal1 else 1.) + + # Prepare and validate parameters. + state0_batch = np.array(state0_batch, 'double').reshape(batch_size, -1) + state1_batch = np.array(state1_batch, 'double').reshape(batch_size, -1) + terminal1_batch = np.array(terminal1_batch, 'double').reshape(batch_size, -1) + reward_batch = np.array(reward_batch, 'double').reshape(batch_size, -1) + action_batch = np.array(action_batch, 'double').reshape(batch_size, -1) + + return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch + + def append(self, observation, action, reward, terminal, training=True): + super(SequentialMemory, self).append(observation, action, reward, terminal, training=training) + + # This needs to be understood as follows: in `observation`, take `action`, obtain `reward` + # and weather the next state is `terminal` or not. + if training: + self.observations.append(observation) + self.actions.append(action) + self.rewards.append(reward) + self.terminals.append(terminal) + + @property + def nb_entries(self): + return len(self.observations) + + def get_config(self): + config = super(SequentialMemory, self).get_config() + config['limit'] = self.limit + return config diff --git a/nni/algorithms/compression/pytorch/pruning/amc/lib/net_measure.py b/nni/algorithms/compression/pytorch/pruning/amc/lib/net_measure.py new file mode 100644 index 0000000000000000000000000000000000000000..2c9e815116288c11de98b4061b4dbfc3df029a7e --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/amc/lib/net_measure.py @@ -0,0 +1,123 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch + +# [reference] https://github.com/ShichenLiu/CondenseNet/blob/master/utils.py + + +def get_num_gen(gen): + return sum(1 for _ in gen) + + +def is_leaf(model): + return get_num_gen(model.children()) == 0 + + +def get_layer_info(layer): + layer_str = str(layer) + type_name = layer_str[:layer_str.find('(')].strip() + return type_name + + +def get_layer_param(model): + import operator + import functools + + return sum([functools.reduce(operator.mul, i.size(), 1) for i in model.parameters()]) + +count_ops = 0 +count_params = 0 + +def measure_layer(layer, x): + global count_ops, count_params + delta_ops = 0 + delta_params = 0 + multi_add = 1 + type_name = get_layer_info(layer) + + # ops_conv + if type_name in ['Conv2d']: + out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) / + layer.stride[0] + 1) + out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) / + layer.stride[1] + 1) + delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \ + layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add + delta_params = get_layer_param(layer) + + # ops_nonlinearity + elif type_name in ['ReLU']: + delta_ops = x.numel() / x.size(0) + delta_params = get_layer_param(layer) + + # ops_pooling + elif type_name in ['AvgPool2d']: + in_w = x.size()[2] + kernel_ops = layer.kernel_size * layer.kernel_size + out_w = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1) + out_h = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1) + delta_ops = x.size()[1] * out_w * out_h * kernel_ops + delta_params = get_layer_param(layer) + + elif type_name in ['AdaptiveAvgPool2d']: + delta_ops = x.size()[1] * x.size()[2] * x.size()[3] + delta_params = get_layer_param(layer) + + # ops_linear + elif type_name in ['Linear']: + weight_ops = layer.weight.numel() * multi_add + bias_ops = layer.bias.numel() + delta_ops = weight_ops + bias_ops + delta_params = get_layer_param(layer) + + # ops_nothing + elif type_name in ['BatchNorm2d', 'Dropout2d', 'DropChannel', 'Dropout']: + delta_params = get_layer_param(layer) + + # unknown layer type + else: + delta_params = get_layer_param(layer) + + count_ops += delta_ops + count_params += delta_params + + return + + +def measure_model(model, H, W, device): + global count_ops, count_params + count_ops = 0 + count_params = 0 + data = torch.zeros(2, 3, H, W).to(device) + + def should_measure(x): + return is_leaf(x) + + def modify_forward(model): + for child in model.children(): + if should_measure(child): + def new_forward(m): + def lambda_forward(x): + measure_layer(m, x) + return m.old_forward(x) + return lambda_forward + child.old_forward = child.forward + child.forward = new_forward(child) + else: + modify_forward(child) + + def restore_forward(model): + for child in model.children(): + # leaf node + if is_leaf(child) and hasattr(child, 'old_forward'): + child.forward = child.old_forward + child.old_forward = None + else: + restore_forward(child) + + modify_forward(model) + model.forward(data) + restore_forward(model) + + return count_ops, count_params diff --git a/nni/algorithms/compression/pytorch/pruning/amc/lib/utils.py b/nni/algorithms/compression/pytorch/pruning/amc/lib/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f875e8d7d9b363d848aa2424b4f06ddaeb1dc1a6 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/amc/lib/utils.py @@ -0,0 +1,113 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch + +class TextLogger(object): + """Write log immediately to the disk""" + def __init__(self, filepath): + self.f = open(filepath, 'w') + self.fid = self.f.fileno() + self.filepath = filepath + + def close(self): + self.f.close() + + def write(self, content): + self.f.write(content) + self.f.flush() + os.fsync(self.fid) + + def write_buf(self, content): + self.f.write(content) + + def print_and_write(self, content): + print(content) + self.write(content+'\n') + +def to_numpy(var): + use_cuda = torch.cuda.is_available() + return var.cpu().data.numpy() if use_cuda else var.data.numpy() + + +def to_tensor(ndarray, requires_grad=False): # return a float tensor by default + tensor = torch.from_numpy(ndarray).float() # by default does not require grad + if requires_grad: + tensor.requires_grad_() + return tensor.cuda() if torch.cuda.is_available() else tensor + + +def measure_layer_for_pruning(wrapper, x): + def get_layer_type(layer): + layer_str = str(layer) + return layer_str[:layer_str.find('(')].strip() + + def get_layer_param(model): + import operator + import functools + + return sum([functools.reduce(operator.mul, i.size(), 1) for i in model.parameters()]) + + multi_add = 1 + layer = wrapper.module + type_name = get_layer_type(layer) + + # ops_conv + if type_name in ['Conv2d']: + out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) / + layer.stride[0] + 1) + out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) / + layer.stride[1] + 1) + wrapper.flops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \ + layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add + wrapper.params = get_layer_param(layer) + # ops_linear + elif type_name in ['Linear']: + weight_ops = layer.weight.numel() * multi_add + bias_ops = layer.bias.numel() + wrapper.flops = weight_ops + bias_ops + wrapper.params = get_layer_param(layer) + return + + +def least_square_sklearn(X, Y): + from sklearn.linear_model import LinearRegression + reg = LinearRegression(fit_intercept=False) + reg.fit(X, Y) + return reg.coef_ + + +def get_output_folder(parent_dir, env_name): + """Return save folder. + Assumes folders in the parent_dir have suffix -run{run + number}. Finds the highest run number and sets the output folder + to that number + 1. This is just convenient so that if you run the + same script multiple times tensorboard can plot all of the results + on the same plots with different names. + Parameters + ---------- + parent_dir: str + Path of the directory containing all experiment runs. + Returns + ------- + parent_dir/run_dir + Path to this run's save directory. + """ + os.makedirs(parent_dir, exist_ok=True) + experiment_id = 0 + for folder_name in os.listdir(parent_dir): + if not os.path.isdir(os.path.join(parent_dir, folder_name)): + continue + try: + folder_name = int(folder_name.split('-run')[-1]) + if folder_name > experiment_id: + experiment_id = folder_name + except: + pass + experiment_id += 1 + + parent_dir = os.path.join(parent_dir, env_name) + parent_dir = parent_dir + '-run{}'.format(experiment_id) + os.makedirs(parent_dir, exist_ok=True) + return parent_dir diff --git a/nni/algorithms/compression/pytorch/pruning/apply_compression.py b/nni/algorithms/compression/pytorch/pruning/apply_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..8e6b023f5b90b8483e21bd0bc575b19b4a4df023 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/apply_compression.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch + +logger = logging.getLogger('torch apply compression') + +def apply_compression_results(model, masks_file, map_location=None): + """ + Apply the masks from ```masks_file``` to the model + Note: this API is for inference, because it simply multiplies weights with + corresponding masks when this API is called. + + Parameters + ---------- + model : torch.nn.Module + The model to be compressed + masks_file : str + The path of the mask file + map_location : str + the device on which masks are placed, same to map_location in ```torch.load``` + """ + masks = torch.load(masks_file, map_location) + for name, module in model.named_modules(): + if name in masks: + module.weight.data = module.weight.data.mul_(masks[name]['weight']) + if hasattr(module, 'bias') and module.bias is not None and 'bias' in masks[name]: + module.bias.data = module.bias.data.mul_(masks[name]['bias']) \ No newline at end of file diff --git a/nni/algorithms/compression/pytorch/pruning/auto_compress_pruner.py b/nni/algorithms/compression/pytorch/pruning/auto_compress_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..9b082ab504743926679d1bdbcb1f6bce10e6a434 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/auto_compress_pruner.py @@ -0,0 +1,239 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import copy +import torch +from schema import And, Optional + +from nni.utils import OptimizeMode +from nni.compression.pytorch import ModelSpeedup + +from nni.compression.pytorch.compressor import Pruner +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from .simulated_annealing_pruner import SimulatedAnnealingPruner +from .iterative_pruner import ADMMPruner + +_logger = logging.getLogger(__name__) + + +class AutoCompressPruner(Pruner): + """ + A Pytorch implementation of AutoCompress pruning algorithm. + + Parameters + ---------- + model : pytorch model + The model to be pruned. + config_list : list + Supported keys: + - sparsity : The target overall sparsity. + - op_types : The operation type to prune. + trainer : function + Function used for the first subproblem of ADMM Pruner. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion: function + Function used to calculate the loss between the target and the output. By default, we use CrossEntropyLoss. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + evaluator : function + function to evaluate the pruned model. + This function should include `model` as the only parameter, and returns a scalar value. + Example:: + + def evaluator(model): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + val_loader = ... + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in val_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + accuracy = correct / len(val_loader.dataset) + return accuracy + dummy_input : pytorch tensor + The dummy input for ```jit.trace```, users should put it on right device before pass in. + num_iterations : int + Number of overall iterations. + optimize_mode : str + optimize mode, `maximize` or `minimize`, by default `maximize`. + base_algo : str + Base pruning algorithm. `level`, `l1`, `l2` or `fpgm`, by default `l1`. Given the sparsity distribution among + the ops, the assigned `base_algo` is used to decide which filters/channels/weights to prune. + start_temperature : float + Start temperature of the simulated annealing process. + stop_temperature : float + Stop temperature of the simulated annealing process. + cool_down_rate : float + Cool down rate of the temperature. + perturbation_magnitude : float + Initial perturbation magnitude to the sparsities. The magnitude decreases with current temperature. + admm_num_iterations : int + Number of iterations of ADMM Pruner. + admm_epochs_per_iteration : int + Training epochs of the first optimization subproblem of ADMMPruner. + row : float + Penalty parameters for ADMM training. + experiment_data_dir : string + PATH to store temporary experiment data. + """ + + def __init__(self, model, config_list, trainer, evaluator, dummy_input, criterion=torch.nn.CrossEntropyLoss(), + num_iterations=3, optimize_mode='maximize', base_algo='l1', + # SimulatedAnnealing related + start_temperature=100, stop_temperature=20, cool_down_rate=0.9, perturbation_magnitude=0.35, + # ADMM related + admm_num_iterations=30, admm_epochs_per_iteration=5, row=1e-4, + experiment_data_dir='./'): + # original model + self._model_to_prune = model + self._base_algo = base_algo + + self._trainer = trainer + self._criterion = criterion + self._evaluator = evaluator + self._dummy_input = dummy_input + self._num_iterations = num_iterations + self._optimize_mode = OptimizeMode(optimize_mode) + + # hyper parameters for SA algorithm + self._start_temperature = start_temperature + self._stop_temperature = stop_temperature + self._cool_down_rate = cool_down_rate + self._perturbation_magnitude = perturbation_magnitude + + # hyper parameters for ADMM algorithm + self._admm_num_iterations = admm_num_iterations + self._admm_epochs_per_iteration = admm_epochs_per_iteration + self._row = row + + # overall pruning rate + self._sparsity = config_list[0]['sparsity'] + + self._experiment_data_dir = experiment_data_dir + if not os.path.exists(self._experiment_data_dir): + os.makedirs(self._experiment_data_dir) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + + if self._base_algo == 'level': + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + elif self._base_algo in ['l1', 'l2', 'fpgm']: + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + 'op_types': ['Conv2d'], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + + schema.validate(config_list) + + def calc_mask(self, wrapper, **kwargs): + return None + + def compress(self): + """ + Compress the model with AutoCompress. + + Returns + ------- + torch.nn.Module + model with specified modules compressed. + """ + _logger.info('Starting AutoCompress pruning...') + + sparsity_each_round = 1 - pow(1 - self._sparsity, 1 / self._num_iterations) + + for i in range(self._num_iterations): + _logger.info('Pruning iteration: %d', i) + _logger.info('Target sparsity this round: %s', + 1 - pow(1 - sparsity_each_round, i + 1)) + + # SimulatedAnnealingPruner + _logger.info( + 'Generating sparsities with SimulatedAnnealingPruner...') + SApruner = SimulatedAnnealingPruner( + model=copy.deepcopy(self._model_to_prune), + config_list=[ + {"sparsity": sparsity_each_round, "op_types": ['Conv2d']}], + evaluator=self._evaluator, + optimize_mode=self._optimize_mode, + base_algo=self._base_algo, + start_temperature=self._start_temperature, + stop_temperature=self._stop_temperature, + cool_down_rate=self._cool_down_rate, + perturbation_magnitude=self._perturbation_magnitude, + experiment_data_dir=self._experiment_data_dir) + config_list = SApruner.compress(return_config_list=True) + _logger.info("Generated config_list : %s", config_list) + + # ADMMPruner + _logger.info('Performing structured pruning with ADMMPruner...') + ADMMpruner = ADMMPruner( + model=copy.deepcopy(self._model_to_prune), + config_list=config_list, + criterion=self._criterion, + trainer=self._trainer, + num_iterations=self._admm_num_iterations, + epochs_per_iteration=self._admm_epochs_per_iteration, + row=self._row, + base_algo=self._base_algo) + ADMMpruner.compress() + + ADMMpruner.export_model(os.path.join(self._experiment_data_dir, 'model_admm_masked.pth'), os.path.join( + self._experiment_data_dir, 'mask.pth')) + + # use speed up to prune the model before next iteration, + # because SimulatedAnnealingPruner & ADMMPruner don't take masked models + self._model_to_prune.load_state_dict(torch.load(os.path.join( + self._experiment_data_dir, 'model_admm_masked.pth'))) + + masks_file = os.path.join(self._experiment_data_dir, 'mask.pth') + device = next(self._model_to_prune.parameters()).device + + _logger.info('Speeding up models...') + m_speedup = ModelSpeedup(self._model_to_prune, self._dummy_input, masks_file, device) + m_speedup.speedup_model() + + evaluation_result = self._evaluator(self._model_to_prune) + _logger.info('Evaluation result of the pruned model in iteration %d: %s', i, evaluation_result) + + _logger.info('----------Compression finished--------------') + + os.remove(os.path.join(self._experiment_data_dir, 'model_admm_masked.pth')) + os.remove(os.path.join(self._experiment_data_dir, 'mask.pth')) + + return self._model_to_prune + + def export_model(self, model_path, mask_path=None, onnx_path=None, input_shape=None, device=None): + _logger.info("AutoCompressPruner export directly the pruned model without mask") + + torch.save(self._model_to_prune.state_dict(), model_path) + _logger.info('Model state_dict saved to %s', model_path) + + if onnx_path is not None: + assert input_shape is not None, 'input_shape must be specified to export onnx model' + # input info needed + if device is None: + device = torch.device('cpu') + input_data = torch.Tensor(*input_shape) + torch.onnx.export(self._model_to_prune, input_data.to(device), onnx_path) + _logger.info('Model in onnx with input shape %s saved to %s', input_data.shape, onnx_path) diff --git a/nni/algorithms/compression/pytorch/pruning/constants.py b/nni/algorithms/compression/pytorch/pruning/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..24b84340cc6f5b0c362ab45288638a43926809b7 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/constants.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +from . import LevelPrunerMasker, SlimPrunerMasker, L1FilterPrunerMasker, \ + L2FilterPrunerMasker, FPGMPrunerMasker, TaylorFOWeightFilterPrunerMasker, \ + ActivationAPoZRankFilterPrunerMasker, ActivationMeanRankFilterPrunerMasker + +MASKER_DICT = { + 'level': LevelPrunerMasker, + 'slim': SlimPrunerMasker, + 'l1': L1FilterPrunerMasker, + 'l2': L2FilterPrunerMasker, + 'fpgm': FPGMPrunerMasker, + 'taylorfo': TaylorFOWeightFilterPrunerMasker, + 'apoz': ActivationAPoZRankFilterPrunerMasker, + 'mean_activation': ActivationMeanRankFilterPrunerMasker +} diff --git a/nni/algorithms/compression/pytorch/pruning/constants_pruner.py b/nni/algorithms/compression/pytorch/pruning/constants_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..55ba9506f3ba6a528a6dfdf87b6c878d61176c9f --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/constants_pruner.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +from .one_shot_pruner import LevelPruner, L1FilterPruner, L2FilterPruner, FPGMPruner + +PRUNER_DICT = { + 'level': LevelPruner, + 'l1': L1FilterPruner, + 'l2': L2FilterPruner, + 'fpgm': FPGMPruner +} diff --git a/nni/algorithms/compression/pytorch/pruning/dependency_aware_pruner.py b/nni/algorithms/compression/pytorch/pruning/dependency_aware_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..2fbfe9475e5ccd53f1add2465d03baa225adfe64 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/dependency_aware_pruner.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from schema import And, Optional +from nni.common.graph_utils import TorchModuleGraph +from nni.compression.pytorch.utils.shape_dependency import ChannelDependency, GroupDependency +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from nni.compression.pytorch.compressor import Pruner +from .constants import MASKER_DICT + +__all__ = ['DependencyAwarePruner'] + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class DependencyAwarePruner(Pruner): + """ + DependencyAwarePruner has two ways to calculate the masks + for conv layers. In the normal way, the DependencyAwarePruner + will calculate the mask of each layer separately. For example, each + conv layer determine which filters should be pruned according to its L1 + norm. In constrast, in the dependency-aware way, the layers that in a + dependency group will be pruned jointly and these layers will be forced + to prune the same channels. + """ + + def __init__(self, model, config_list, optimizer=None, pruning_algorithm='level', dependency_aware=False, + dummy_input=None, **algo_kwargs): + super().__init__(model, config_list=config_list, optimizer=optimizer) + + self.dependency_aware = dependency_aware + self.dummy_input = dummy_input + + if self.dependency_aware: + if not self._supported_dependency_aware(): + raise ValueError('This pruner does not support dependency aware!') + + errmsg = "When dependency_aware is set, the dummy_input should not be None" + assert self.dummy_input is not None, errmsg + # Get the TorchModuleGraph of the target model + # to trace the model, we need to unwrap the wrappers + self._unwrap_model() + self.graph = TorchModuleGraph(model, dummy_input) + self._wrap_model() + self.channel_depen = ChannelDependency(model, dummy_input, traced_model=self.graph.trace) + self.group_depen = GroupDependency(model, dummy_input, traced_model=self.graph.trace) + self.channel_depen = self.channel_depen.dependency_sets + self.channel_depen = { + name: sets for sets in self.channel_depen for name in sets} + self.group_depen = self.group_depen.dependency_sets + + self.masker = MASKER_DICT[pruning_algorithm]( + model, self, **algo_kwargs) + # set the dependency-aware switch for the masker + self.masker.dependency_aware = dependency_aware + self.set_wrappers_attribute("if_calculated", False) + + def calc_mask(self, wrapper, wrapper_idx=None): + if not wrapper.if_calculated: + sparsity = wrapper.config['sparsity'] + masks = self.masker.calc_mask( + sparsity=sparsity, wrapper=wrapper, wrapper_idx=wrapper_idx) + + # masker.calc_mask returns None means calc_mask is not calculated sucessfully, can try later + if masks is not None: + wrapper.if_calculated = True + return masks + else: + return None + + def update_mask(self): + if not self.dependency_aware: + # if we use the normal way to update the mask, + # then call the update_mask of the father class + super(DependencyAwarePruner, self).update_mask() + else: + # if we update the mask in a dependency-aware way + # then we call _dependency_update_mask + self._dependency_update_mask() + + def validate_config(self, model, config_list): + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + Optional('op_types'): ['Conv2d'], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + def _supported_dependency_aware(self): + raise NotImplementedError + + def _dependency_calc_mask(self, wrappers, channel_dsets, wrappers_idx=None): + """ + calculate the masks for the conv layers in the same + channel dependecy set. All the layers passed in have + the same number of channels. + + Parameters + ---------- + wrappers: list + The list of the wrappers that in the same channel dependency + set. + wrappers_idx: list + The list of the indexes of wrapppers. + Returns + ------- + masks: dict + A dict object that contains the masks of the layers in this + dependency group, the key is the name of the convolutional layers. + """ + # The number of the groups for each conv layers + # Note that, this number may be different from its + # original number of groups of filters. + groups = [self.group_depen[_w.name] for _w in wrappers] + sparsities = [_w.config['sparsity'] for _w in wrappers] + masks = self.masker.calc_mask( + sparsities, wrappers, wrappers_idx, channel_dsets=channel_dsets, groups=groups) + if masks is not None: + # if masks is None, then the mask calculation fails. + # for example, in activation related maskers, we should + # pass enough batches of data to the model, so that the + # masks can be calculated successfully. + for _w in wrappers: + _w.if_calculated = True + return masks + + def _dependency_update_mask(self): + """ + In the original update_mask, the wraper of each layer will update its + own mask according to the sparsity specified in the config_list. However, in + the _dependency_update_mask, we may prune several layers at the same + time according the sparsities and the channel/group dependencies. + """ + name2wrapper = {x.name: x for x in self.get_modules_wrapper()} + wrapper2index = {x: i for i, x in enumerate(self.get_modules_wrapper())} + for wrapper in self.get_modules_wrapper(): + if wrapper.if_calculated: + continue + # find all the conv layers that have channel dependecy with this layer + # and prune all these layers at the same time. + _names = [x for x in self.channel_depen[wrapper.name]] + logger.info('Pruning the dependent layers: %s', ','.join(_names)) + _wrappers = [name2wrapper[name] + for name in _names if name in name2wrapper] + _wrapper_idxes = [wrapper2index[_w] for _w in _wrappers] + + masks = self._dependency_calc_mask( + _wrappers, _names, wrappers_idx=_wrapper_idxes) + if masks is not None: + for layer in masks: + for mask_type in masks[layer]: + assert hasattr(name2wrapper[layer], mask_type), "there is no attribute '%s' in wrapper on %s" \ + % (mask_type, layer) + setattr(name2wrapper[layer], mask_type, masks[layer][mask_type]) diff --git a/nni/algorithms/compression/pytorch/pruning/finegrained_pruning_masker.py b/nni/algorithms/compression/pytorch/pruning/finegrained_pruning_masker.py new file mode 100644 index 0000000000000000000000000000000000000000..f4aa174233e977eed7b59c6ba65136526a738603 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/finegrained_pruning_masker.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from .weight_masker import WeightMasker + +__all__ = ['LevelPrunerMasker'] + +logger = logging.getLogger('torch pruner') + + +class LevelPrunerMasker(WeightMasker): + """ + Prune to an exact pruning level specification + """ + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None): + weight = wrapper.module.weight.data.clone() + if wrapper.weight_mask is not None: + # apply base mask for iterative pruning + weight = weight * wrapper.weight_mask + + w_abs = weight.abs() + k = int(weight.numel() * sparsity) + if k == 0: + return {'weight_mask': torch.ones(weight.shape).type_as(weight)} + threshold = torch.topk(w_abs.view(-1), k, largest=False)[0].max() + mask_weight = torch.gt(w_abs, threshold).type_as(weight) + mask = {'weight_mask': mask_weight} + return mask diff --git a/nni/algorithms/compression/pytorch/pruning/iterative_pruner.py b/nni/algorithms/compression/pytorch/pruning/iterative_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..aea5646b64b6bb190625631e501167c86130d996 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/iterative_pruner.py @@ -0,0 +1,608 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import copy +import torch +from schema import And, Optional +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from .constants import MASKER_DICT +from .dependency_aware_pruner import DependencyAwarePruner + +__all__ = ['AGPPruner', 'ADMMPruner', 'SlimPruner', 'TaylorFOWeightFilterPruner', 'ActivationAPoZRankFilterPruner', + 'ActivationMeanRankFilterPruner'] + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class IterativePruner(DependencyAwarePruner): + """ + Prune model during the training process. + """ + + def __init__(self, model, config_list, optimizer=None, pruning_algorithm='slim', trainer=None, criterion=None, + num_iterations=20, epochs_per_iteration=5, dependency_aware=False, dummy_input=None, **algo_kwargs): + """ + Parameters + ---------- + model: torch.nn.Module + Model to be pruned + config_list: list + List on pruning configs + optimizer: torch.optim.Optimizer + Optimizer used to train model + pruning_algorithm: str + algorithms being used to prune model + trainer: function + Function used to train the model. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion: function + Function used to calculate the loss between the target and the output. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + num_iterations: int + Total number of iterations in pruning process. We will calculate mask at the end of an iteration. + epochs_per_iteration: Union[int, list] + The number of training epochs for each iteration. `int` represents the same value for each iteration. + `list` represents the specific value for each iteration. + dependency_aware: bool + If prune the model in a dependency-aware way. + dummy_input: torch.Tensor + The dummy input to analyze the topology constraints. Note that, + the dummy_input should on the same device with the model. + algo_kwargs: dict + Additional parameters passed to pruning algorithm masker class + """ + super().__init__(model, config_list, optimizer, pruning_algorithm, dependency_aware, dummy_input, **algo_kwargs) + + if isinstance(epochs_per_iteration, list): + assert len(epochs_per_iteration) == num_iterations, 'num_iterations should equal to the length of epochs_per_iteration' + self.epochs_per_iteration = epochs_per_iteration + else: + assert num_iterations > 0, 'num_iterations should >= 1' + self.epochs_per_iteration = [epochs_per_iteration] * num_iterations + + self._validate_iteration_params() + + self._trainer = trainer + self._criterion = criterion + + def _fresh_calculated(self): + for wrapper in self.get_modules_wrapper(): + wrapper.if_calculated = False + + def _validate_iteration_params(self): + assert all(num >= 0 for num in self.epochs_per_iteration), 'all epoch number need >= 0' + + def compress(self): + training = self.bound_model.training + self.bound_model.train() + for _, epochs_num in enumerate(self.epochs_per_iteration): + self._fresh_calculated() + for epoch in range(epochs_num): + self._trainer(self.bound_model, optimizer=self.optimizer, criterion=self._criterion, epoch=epoch) + # NOTE: workaround for statistics_batch_num bigger than max batch number in one epoch, need refactor + if hasattr(self.masker, 'statistics_batch_num') and hasattr(self, 'iterations'): + if self.iterations < self.masker.statistics_batch_num: # pylint: disable=access-member-before-definition + self.iterations = self.masker.statistics_batch_num + self.update_mask() + self.bound_model.train(training) + + return self.bound_model + + +class AGPPruner(IterativePruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned. + config_list : listlist + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : See supported type in your specific pruning algorithm. + optimizer: torch.optim.Optimizer + Optimizer used to train model. + trainer: function + Function to train the model + criterion: function + Function used to calculate the loss between the target and the output. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + num_iterations: int + Total number of iterations in pruning process. We will calculate mask at the end of an iteration. + epochs_per_iteration: int + The number of training epochs for each iteration. + pruning_algorithm: str + Algorithms being used to prune model, + choose from `['level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation']`, by default `level` + """ + + def __init__(self, model, config_list, optimizer, trainer, criterion, + num_iterations=10, epochs_per_iteration=1, pruning_algorithm='level'): + super().__init__(model, config_list, optimizer=optimizer, trainer=trainer, criterion=criterion, + num_iterations=num_iterations, epochs_per_iteration=epochs_per_iteration) + assert isinstance(optimizer, torch.optim.Optimizer), "AGP pruner is an iterative pruner, please pass optimizer of the model to it" + self.masker = MASKER_DICT[pruning_algorithm](model, self) + self.now_epoch = 0 + self.freq = epochs_per_iteration + self.end_epoch = epochs_per_iteration * num_iterations + self.set_wrappers_attribute("if_calculated", False) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 <= n <= 1), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + def _supported_dependency_aware(self): + return False + + def calc_mask(self, wrapper, wrapper_idx=None): + """ + Calculate the mask of given layer. + Scale factors with the smallest absolute value in the BN layer are masked. + Parameters + ---------- + wrapper : Module + the layer to instrument the compression operation + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + dict | None + Dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + + config = wrapper.config + + if wrapper.if_calculated: + return None + + if not self.now_epoch % self.freq == 0: + return None + + target_sparsity = self.compute_target_sparsity(config) + new_mask = self.masker.calc_mask(sparsity=target_sparsity, wrapper=wrapper, wrapper_idx=wrapper_idx) + + if new_mask is not None: + wrapper.if_calculated = True + + return new_mask + + def compute_target_sparsity(self, config): + """ + Calculate the sparsity for pruning + Parameters + ---------- + config : dict + Layer's pruning config + Returns + ------- + float + Target sparsity to be pruned + """ + + initial_sparsity = 0 + self.target_sparsity = final_sparsity = config.get('sparsity', 0) + + if initial_sparsity >= final_sparsity: + logger.warning('your initial_sparsity >= final_sparsity') + return final_sparsity + + if self.end_epoch == 1 or self.end_epoch <= self.now_epoch: + return final_sparsity + + span = ((self.end_epoch - 1) // self.freq) * self.freq + assert span > 0 + self.target_sparsity = (final_sparsity + (initial_sparsity - final_sparsity) * (1.0 - (self.now_epoch / span)) ** 3) + return self.target_sparsity + + def update_epoch(self, epoch): + """ + Update epoch + Parameters + ---------- + epoch : int + current training epoch + """ + + if epoch > 0: + self.now_epoch = epoch + for wrapper in self.get_modules_wrapper(): + wrapper.if_calculated = False + + # TODO: need refactor + def compress(self): + training = self.bound_model.training + self.bound_model.train() + + for epoch in range(self.end_epoch): + self.update_epoch(epoch) + self._trainer(self.bound_model, optimizer=self.optimizer, criterion=self._criterion, epoch=epoch) + self.update_mask() + logger.info(f'sparsity is {self.target_sparsity:.2f} at epoch {epoch}') + self.get_pruned_weights() + + self.bound_model.train(training) + + return self.bound_model + + +class ADMMPruner(IterativePruner): + """ + A Pytorch implementation of ADMM Pruner algorithm. + + Parameters + ---------- + model : torch.nn.Module + Model to be pruned. + config_list : list + List on pruning configs. + trainer : function + Function used for the first subproblem. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion: function + Function used to calculate the loss between the target and the output. By default, we use CrossEntropyLoss in ADMMPruner. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + num_iterations: int + Total number of iterations in pruning process. We will calculate mask after we finish all iterations in ADMMPruner. + epochs_per_iteration: int + Training epochs of the first subproblem. + row : float + Penalty parameters for ADMM training. + base_algo : str + Base pruning algorithm. `level`, `l1`, `l2` or `fpgm`, by default `l1`. Given the sparsity distribution among + the ops, the assigned `base_algo` is used to decide which filters/channels/weights to prune. + """ + + def __init__(self, model, config_list, trainer, criterion=torch.nn.CrossEntropyLoss(), + num_iterations=30, epochs_per_iteration=5, row=1e-4, base_algo='l1'): + self._base_algo = base_algo + + super().__init__(model, config_list) + + self._trainer = trainer + self.optimizer = torch.optim.Adam( + self.bound_model.parameters(), lr=1e-3, weight_decay=5e-5) + self._criterion = criterion + self._num_iterations = num_iterations + self._training_epochs = epochs_per_iteration + self._row = row + + self.set_wrappers_attribute("if_calculated", False) + self.masker = MASKER_DICT[self._base_algo](self.bound_model, self) + + self.patch_optimizer_before(self._callback) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + + if self._base_algo == 'level': + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + elif self._base_algo in ['l1', 'l2', 'fpgm']: + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + 'op_types': ['Conv2d'], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + def _supported_dependency_aware(self): + return False + + def _projection(self, weight, sparsity, wrapper): + ''' + Return the Euclidean projection of the weight matrix according to the pruning mode. + + Parameters + ---------- + weight : tensor + original matrix + sparsity : float + the ratio of parameters which need to be set to zero + wrapper: PrunerModuleWrapper + layer wrapper of this layer + + Returns + ------- + tensor + the projected matrix + ''' + wrapper_copy = copy.deepcopy(wrapper) + wrapper_copy.module.weight.data = weight + return weight.data.mul(self.masker.calc_mask(sparsity, wrapper_copy)['weight_mask']) + + def _callback(self): + # callback function to do additonal optimization, refer to the deriatives of Formula (7) + for i, wrapper in enumerate(self.get_modules_wrapper()): + wrapper.module.weight.data -= self._row * \ + (wrapper.module.weight.data - self.Z[i] + self.U[i]) + + def compress(self): + """ + Compress the model with ADMM. + + Returns + ------- + torch.nn.Module + model with specified modules compressed. + """ + logger.info('Starting ADMM Compression...') + + # initiaze Z, U + # Z_i^0 = W_i^0 + # U_i^0 = 0 + self.Z = [] + self.U = [] + for wrapper in self.get_modules_wrapper(): + z = wrapper.module.weight.data + self.Z.append(z) + self.U.append(torch.zeros_like(z)) + + # Loss = cross_entropy + l2 regulization + \Sum_{i=1}^N \row_i ||W_i - Z_i^k + U_i^k||^2 + # optimization iteration + for k in range(self._num_iterations): + logger.info('ADMM iteration : %d', k) + + # step 1: optimize W with AdamOptimizer + for epoch in range(self._training_epochs): + self._trainer(self.bound_model, optimizer=self.optimizer, criterion=self._criterion, epoch=epoch) + + # step 2: update Z, U + # Z_i^{k+1} = projection(W_i^{k+1} + U_i^k) + # U_i^{k+1} = U^k + W_i^{k+1} - Z_i^{k+1} + for i, wrapper in enumerate(self.get_modules_wrapper()): + z = wrapper.module.weight.data + self.U[i] + self.Z[i] = self._projection(z, wrapper.config['sparsity'], wrapper) + torch.cuda.empty_cache() + self.U[i] = self.U[i] + wrapper.module.weight.data - self.Z[i] + + # apply prune + self.update_mask() + + logger.info('Compression finished.') + + return self.bound_model + + +class SlimPruner(IterativePruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : Only BatchNorm2d is supported in Slim Pruner. + optimizer : torch.optim.Optimizer + Optimizer used to train model + trainer : function + Function used to sparsify BatchNorm2d scaling factors. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion : function + Function used to calculate the loss between the target and the output. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + sparsifying_training_epochs: int + The number of channel sparsity regularization training epochs before pruning. + scale : float + Penalty parameters for sparsification. + dependency_aware: bool + If prune the model in a dependency-aware way. If it is `True`, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if this flag is set True + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model, config_list, optimizer, trainer, criterion, sparsifying_training_epochs=10, scale=0.0001, + dependency_aware=False, dummy_input=None): + super().__init__(model, config_list, optimizer=optimizer, pruning_algorithm='slim', trainer=trainer, criterion=criterion, + num_iterations=1, epochs_per_iteration=sparsifying_training_epochs, dependency_aware=dependency_aware, + dummy_input=dummy_input) + self.scale = scale + self.patch_optimizer_before(self._callback) + + def validate_config(self, model, config_list): + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + 'op_types': ['BatchNorm2d'], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + if len(config_list) > 1: + logger.warning('Slim pruner only supports 1 configuration') + + def _supported_dependency_aware(self): + return True + + def _callback(self): + for _, wrapper in enumerate(self.get_modules_wrapper()): + wrapper.module.weight.grad.data.add_(self.scale * torch.sign(wrapper.module.weight.data)) + + +class TaylorFOWeightFilterPruner(IterativePruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : How much percentage of convolutional filters are to be pruned. + - op_types : Currently only Conv2d is supported in TaylorFOWeightFilterPruner. + optimizer: torch.optim.Optimizer + Optimizer used to train model + trainer : function + Function used to sparsify BatchNorm2d scaling factors. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion : function + Function used to calculate the loss between the target and the output. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + sparsifying_training_batches: int + The number of batches to collect the contributions. Note that the number need to be less than the maximum batch number in one epoch. + dependency_aware: bool + If prune the model in a dependency-aware way. If it is `True`, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if this flag is set True + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + global_sort: bool + Only support TaylorFOWeightFilterPruner currently. + If prune the model in a global-sort way. If it is `True`, this pruner will prune + the model according to the global contributions information which means channel contributions + will be sorted globally and whether specific channel will be pruned depends on global information. + """ + + def __init__(self, model, config_list, optimizer, trainer, criterion, sparsifying_training_batches=1, + dependency_aware=False, dummy_input=None, global_sort=False): + super().__init__(model, config_list, optimizer=optimizer, pruning_algorithm='taylorfo', trainer=trainer, + criterion=criterion, statistics_batch_num=sparsifying_training_batches, num_iterations=1, + epochs_per_iteration=1, dependency_aware=dependency_aware, + dummy_input=dummy_input) + self.masker.global_sort = global_sort + + def _supported_dependency_aware(self): + return True + + +class ActivationAPoZRankFilterPruner(IterativePruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : How much percentage of convolutional filters are to be pruned. + - op_types : Only Conv2d is supported in ActivationAPoZRankFilterPruner. + optimizer: torch.optim.Optimizer + Optimizer used to train model + trainer: function + Function used to train the model. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion : function + Function used to calculate the loss between the target and the output. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + activation: str + The activation type. + sparsifying_training_batches: int + The number of batches to collect the contributions. Note that the number need to be less than the maximum batch number in one epoch. + dependency_aware: bool + If prune the model in a dependency-aware way. If it is `True`, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if this flag is set True + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + + """ + + def __init__(self, model, config_list, optimizer, trainer, criterion, activation='relu', + sparsifying_training_batches=1, dependency_aware=False, dummy_input=None): + super().__init__(model, config_list, pruning_algorithm='apoz', optimizer=optimizer, trainer=trainer, + criterion=criterion, dependency_aware=dependency_aware, dummy_input=dummy_input, + activation=activation, statistics_batch_num=sparsifying_training_batches, num_iterations=1, + epochs_per_iteration=1) + self.patch_optimizer(self.update_mask) + + def _supported_dependency_aware(self): + return True + + +class ActivationMeanRankFilterPruner(IterativePruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : How much percentage of convolutional filters are to be pruned. + - op_types : Only Conv2d is supported in ActivationMeanRankFilterPruner. + optimizer: torch.optim.Optimizer + Optimizer used to train model. + trainer: function + Function used to train the model. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion : function + Function used to calculate the loss between the target and the output. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + activation: str + The activation type. + sparsifying_training_batches: int + The number of batches to collect the contributions. Note that the number need to be less than the maximum batch number in one epoch. + dependency_aware: bool + If prune the model in a dependency-aware way. If it is `True`, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if this flag is set True + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model, config_list, optimizer, trainer, criterion, activation='relu', + sparsifying_training_batches=1, dependency_aware=False, dummy_input=None): + super().__init__(model, config_list, pruning_algorithm='mean_activation', optimizer=optimizer, trainer=trainer, + criterion=criterion, dependency_aware=dependency_aware, dummy_input=dummy_input, + activation=activation, statistics_batch_num=sparsifying_training_batches, num_iterations=1, + epochs_per_iteration=1) + self.patch_optimizer(self.update_mask) + + def _supported_dependency_aware(self): + return True diff --git a/nni/algorithms/compression/pytorch/pruning/lottery_ticket.py b/nni/algorithms/compression/pytorch/pruning/lottery_ticket.py new file mode 100644 index 0000000000000000000000000000000000000000..0e09fae90475ae9f7d8a205f12ae1b42945e33fb --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/lottery_ticket.py @@ -0,0 +1,147 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import logging +import torch +from schema import And, Optional +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from nni.compression.pytorch.compressor import Pruner +from .finegrained_pruning_masker import LevelPrunerMasker + +logger = logging.getLogger('torch pruner') + +class LotteryTicketPruner(Pruner): + """ + Parameters + ---------- + model : pytorch model + The model to be pruned + config_list : list + Supported keys: + - prune_iterations : The number of rounds for the iterative pruning. + - sparsity : The final sparsity when the compression is done. + optimizer : pytorch optimizer + The optimizer for the model + lr_scheduler : pytorch lr scheduler + The lr scheduler for the model if used + reset_weights : bool + Whether reset weights and optimizer at the beginning of each round. + """ + def __init__(self, model, config_list, optimizer=None, lr_scheduler=None, reset_weights=True): + # save init weights and optimizer + self.reset_weights = reset_weights + if self.reset_weights: + self._model = model + self._optimizer = optimizer + self._model_state = copy.deepcopy(model.state_dict()) + self._optimizer_state = copy.deepcopy(optimizer.state_dict()) + self._lr_scheduler = lr_scheduler + if lr_scheduler is not None: + self._scheduler_state = copy.deepcopy(lr_scheduler.state_dict()) + + super().__init__(model, config_list, optimizer) + self.curr_prune_iteration = None + self.prune_iterations = config_list[0]['prune_iterations'] + self.masker = LevelPrunerMasker(model, self) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - prune_iterations : The number of rounds for the iterative pruning. + - sparsity : The final sparsity when the compression is done. + """ + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + 'prune_iterations': And(int, lambda n: n > 0), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + assert len(set([x['prune_iterations'] for x in config_list])) == 1, 'The values of prune_iterations must be equal in your config' + + def _calc_sparsity(self, sparsity): + keep_ratio_once = (1 - sparsity) ** (1 / self.prune_iterations) + curr_keep_ratio = keep_ratio_once ** self.curr_prune_iteration + return max(1 - curr_keep_ratio, 0) + + def _calc_mask(self, wrapper, sparsity): + weight = wrapper.module.weight.data + if self.curr_prune_iteration == 0: + mask = {'weight_mask': torch.ones(weight.shape).type_as(weight)} + else: + curr_sparsity = self._calc_sparsity(sparsity) + mask = self.masker.calc_mask(sparsity=curr_sparsity, wrapper=wrapper) + return mask + + def calc_mask(self, wrapper, **kwargs): + """ + Generate mask for the given ``weight``. + + Parameters + ---------- + wrapper : Module + The layer to be pruned + + Returns + ------- + tensor + The mask for this weight, it is ```None``` because this pruner + calculates and assigns masks in ```prune_iteration_start```, + no need to do anything in this function. + """ + return None + + def get_prune_iterations(self): + """ + Return the range for iterations. + In the first prune iteration, masks are all one, thus, add one more iteration + + Returns + ------- + list + A list for pruning iterations + """ + return range(self.prune_iterations + 1) + + def prune_iteration_start(self): + """ + Control the pruning procedure on updated epoch number. + Should be called at the beginning of the epoch. + """ + if self.curr_prune_iteration is None: + self.curr_prune_iteration = 0 + else: + self.curr_prune_iteration += 1 + assert self.curr_prune_iteration < self.prune_iterations + 1, 'Exceed the configured prune_iterations' + + modules_wrapper = self.get_modules_wrapper() + modules_to_compress = self.get_modules_to_compress() + for layer, config in modules_to_compress: + module_wrapper = None + for wrapper in modules_wrapper: + if wrapper.name == layer.name: + module_wrapper = wrapper + break + assert module_wrapper is not None + + sparsity = config.get('sparsity') + mask = self._calc_mask(module_wrapper, sparsity) + # TODO: directly use weight_mask is not good + module_wrapper.weight_mask = mask['weight_mask'] + # there is no mask for bias + + # reinit weights back to original after new masks are generated + if self.reset_weights: + # should use this member function to reset model weights + self.load_model_state_dict(self._model_state) + self._optimizer.load_state_dict(self._optimizer_state) + if self._lr_scheduler is not None: + self._lr_scheduler.load_state_dict(self._scheduler_state) diff --git a/nni/algorithms/compression/pytorch/pruning/net_adapt_pruner.py b/nni/algorithms/compression/pytorch/pruning/net_adapt_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..4087bb0c575ce1ac9499069d5eed1c4d5099f910 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/net_adapt_pruner.py @@ -0,0 +1,353 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import copy +import json +import torch +from schema import And, Optional + +from nni.utils import OptimizeMode + +from nni.compression.pytorch.compressor import Pruner +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from nni.compression.pytorch.utils.num_param_counter import get_total_num_weights +from .constants_pruner import PRUNER_DICT + + +_logger = logging.getLogger(__name__) + + +class NetAdaptPruner(Pruner): + """ + A Pytorch implementation of NetAdapt compression algorithm. + + Parameters + ---------- + model : pytorch model + The model to be pruned. + config_list : list + Supported keys: + - sparsity : The target overall sparsity. + - op_types : The operation type to prune. + short_term_fine_tuner : function + function to short-term fine tune the masked model. + This function should include `model` as the only parameter, + and fine tune the model for a short term after each pruning iteration. + Example:: + + def short_term_fine_tuner(model, epoch=3): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + train_loader = ... + criterion = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + model.train() + for _ in range(epoch): + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + evaluator : function + function to evaluate the masked model. + This function should include `model` as the only parameter, and returns a scalar value. + Example:: + + def evaluator(model): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + val_loader = ... + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in val_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + accuracy = correct / len(val_loader.dataset) + return accuracy + optimize_mode : str + optimize mode, `maximize` or `minimize`, by default `maximize`. + base_algo : str + Base pruning algorithm. `level`, `l1`, `l2` or `fpgm`, by default `l1`. Given the sparsity distribution among the ops, + the assigned `base_algo` is used to decide which filters/channels/weights to prune. + sparsity_per_iteration : float + sparsity to prune in each iteration. + experiment_data_dir : str + PATH to save experiment data, + including the config_list generated for the base pruning algorithm and the performance of the pruned model. + """ + + def __init__(self, model, config_list, short_term_fine_tuner, evaluator, + optimize_mode='maximize', base_algo='l1', sparsity_per_iteration=0.05, experiment_data_dir='./'): + # models used for iterative pruning and evaluation + self._model_to_prune = copy.deepcopy(model) + self._base_algo = base_algo + + super().__init__(model, config_list) + + self._short_term_fine_tuner = short_term_fine_tuner + self._evaluator = evaluator + self._optimize_mode = OptimizeMode(optimize_mode) + + # hyper parameters for NetAdapt algorithm + self._sparsity_per_iteration = sparsity_per_iteration + + # overall pruning rate + self._sparsity = config_list[0]['sparsity'] + + # config_list + self._config_list_generated = [] + + self._experiment_data_dir = experiment_data_dir + if not os.path.exists(self._experiment_data_dir): + os.makedirs(self._experiment_data_dir) + + self._tmp_model_path = os.path.join(self._experiment_data_dir, 'tmp_model.pth') + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + + if self._base_algo == 'level': + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + elif self._base_algo in ['l1', 'l2', 'fpgm']: + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + 'op_types': ['Conv2d'], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + + schema.validate(config_list) + + def calc_mask(self, wrapper, **kwargs): + return None + + def _update_config_list(self, config_list, op_name, sparsity): + ''' + update sparsity of op_name in config_list + ''' + config_list_updated = copy.deepcopy(config_list) + + for idx, item in enumerate(config_list): + if op_name in item['op_names']: + config_list_updated[idx]['sparsity'] = sparsity + return config_list_updated + + # if op_name is not in self._config_list_generated, create a new json item + if self._base_algo in ['l1', 'l2', 'fpgm']: + config_list_updated.append( + {'sparsity': sparsity, 'op_types': ['Conv2d'], 'op_names': [op_name]}) + elif self._base_algo == 'level': + config_list_updated.append( + {'sparsity': sparsity, 'op_names': [op_name]}) + + return config_list_updated + + def _get_op_num_weights_remained(self, op_name, module): + ''' + Get the number of weights remained after channel pruning with current sparsity + + Returns + ------- + int + remained number of weights of the op + ''' + + # if op is wrapped by the pruner + for wrapper in self.get_modules_wrapper(): + if wrapper.name == op_name: + return wrapper.weight_mask.sum().item() + + # if op is not wrapped by the pruner + return module.weight.data.numel() + + def _get_op_sparsity(self, op_name): + for config in self._config_list_generated: + if 'op_names' in config and op_name in config['op_names']: + return config['sparsity'] + return 0 + + def _calc_num_related_weights(self, op_name): + ''' + Calculate total number weights of the op and the next op, applicable only for models without dependencies among ops + + Parameters + ---------- + op_name : str + + Returns + ------- + int + total number of all the realted (current and the next) op weights + ''' + num_weights = 0 + flag_found = False + previous_name = None + previous_module = None + + for name, module in self._model_to_prune.named_modules(): + if not flag_found and name != op_name and type(module).__name__ in ['Conv2d', 'Linear']: + previous_name = name + previous_module = module + if not flag_found and name == op_name: + _logger.debug("original module found: %s", name) + num_weights = module.weight.data.numel() + + # consider related pruning in this op caused by previous op's pruning + if previous_module: + sparsity_previous_op = self._get_op_sparsity(previous_name) + if sparsity_previous_op: + _logger.debug( + "decrease op's weights by %s due to previous op %s's pruning...", sparsity_previous_op, previous_name) + num_weights *= (1-sparsity_previous_op) + + flag_found = True + continue + if flag_found and type(module).__name__ in ['Conv2d', 'Linear']: + _logger.debug("related module found: %s", name) + # channel/filter pruning crossing is considered here, so only the num_weights after channel pruning is valuable + num_weights += self._get_op_num_weights_remained(name, module) + break + + _logger.debug("num related weights of op %s : %d", op_name, num_weights) + + return num_weights + + def compress(self): + """ + Compress the model. + + Returns + ------- + torch.nn.Module + model with specified modules compressed. + """ + _logger.info('Starting NetAdapt Compression...') + + pruning_iteration = 0 + current_sparsity = 0 + delta_num_weights_per_iteration = \ + int(get_total_num_weights(self._model_to_prune, ['Conv2d', 'Linear']) * self._sparsity_per_iteration) + + # stop condition + while current_sparsity < self._sparsity: + _logger.info('Pruning iteration: %d', pruning_iteration) + + # calculate target sparsity of this iteration + target_sparsity = current_sparsity + self._sparsity_per_iteration + + # variable to store the info of the best layer found in this iteration + best_op = {} + + for wrapper in self.get_modules_wrapper(): + _logger.debug("op name : %s", wrapper.name) + _logger.debug("op weights : %d", wrapper.weight_mask.numel()) + _logger.debug("op left weights : %d", wrapper.weight_mask.sum().item()) + + current_op_sparsity = 1 - wrapper.weight_mask.sum().item() / wrapper.weight_mask.numel() + _logger.debug("current op sparsity : %s", current_op_sparsity) + + # sparsity that this layer needs to prune to satisfy the requirement + target_op_sparsity = current_op_sparsity + delta_num_weights_per_iteration / self._calc_num_related_weights(wrapper.name) + + if target_op_sparsity >= 1: + _logger.info('Layer %s has no enough weights (remained) to prune', wrapper.name) + continue + + config_list = self._update_config_list(self._config_list_generated, wrapper.name, target_op_sparsity) + _logger.debug("config_list used : %s", config_list) + + pruner = PRUNER_DICT[self._base_algo](copy.deepcopy(self._model_to_prune), config_list) + model_masked = pruner.compress() + + # Short-term fine tune the pruned model + self._short_term_fine_tuner(model_masked) + + performance = self._evaluator(model_masked) + _logger.info("Layer : %s, evaluation result after short-term fine tuning : %s", wrapper.name, performance) + + if not best_op \ + or (self._optimize_mode is OptimizeMode.Maximize and performance > best_op['performance']) \ + or (self._optimize_mode is OptimizeMode.Minimize and performance < best_op['performance']): + _logger.debug("updating best layer to %s...", wrapper.name) + # find weight mask of this layer + for w in pruner.get_modules_wrapper(): + if w.name == wrapper.name: + masks = {'weight_mask': w.weight_mask, + 'bias_mask': w.bias_mask} + break + best_op = { + 'op_name': wrapper.name, + 'sparsity': target_op_sparsity, + 'performance': performance, + 'masks': masks + } + + # save model weights + pruner.export_model(self._tmp_model_path) + + if not best_op: + # decrease pruning step + self._sparsity_per_iteration *= 0.5 + _logger.info("No more layers to prune, decrease pruning step to %s", self._sparsity_per_iteration) + continue + + # Pick the best layer to prune, update iterative information + # update config_list + self._config_list_generated = self._update_config_list( + self._config_list_generated, best_op['op_name'], best_op['sparsity']) + + # update weights parameters + self._model_to_prune.load_state_dict(torch.load(self._tmp_model_path)) + + # update mask of the chosen op + for wrapper in self.get_modules_wrapper(): + if wrapper.name == best_op['op_name']: + for k in best_op['masks']: + setattr(wrapper, k, best_op['masks'][k]) + break + + current_sparsity = target_sparsity + _logger.info('Pruning iteration %d finished, current sparsity: %s', pruning_iteration, current_sparsity) + _logger.info('Layer %s seleted with sparsity %s, performance after pruning & short term fine-tuning : %s', + best_op['op_name'], best_op['sparsity'], best_op['performance']) + pruning_iteration += 1 + + self._final_performance = best_op['performance'] + + # load weights parameters + self.load_model_state_dict(torch.load(self._tmp_model_path)) + os.remove(self._tmp_model_path) + + _logger.info('----------Compression finished--------------') + _logger.info('config_list generated: %s', self._config_list_generated) + _logger.info("Performance after pruning: %s", self._final_performance) + _logger.info("Masked sparsity: %.6f", current_sparsity) + + # save best config found and best performance + with open(os.path.join(self._experiment_data_dir, 'search_result.json'), 'w') as jsonfile: + json.dump({ + 'performance': self._final_performance, + 'config_list': json.dumps(self._config_list_generated) + }, jsonfile) + + _logger.info('search history and result saved to foler : %s', self._experiment_data_dir) + + return self.bound_model diff --git a/nni/algorithms/compression/pytorch/pruning/one_shot_pruner.py b/nni/algorithms/compression/pytorch/pruning/one_shot_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..39b2201aa2a00bd217fc4eabe4674793a43bcb52 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/one_shot_pruner.py @@ -0,0 +1,170 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from schema import And, Optional + +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from .dependency_aware_pruner import DependencyAwarePruner + +__all__ = ['LevelPruner', 'L1FilterPruner', 'L2FilterPruner', 'FPGMPruner'] + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class OneshotPruner(DependencyAwarePruner): + """ + Prune model to an exact pruning level for one time. + """ + + def __init__(self, model, config_list, pruning_algorithm='level', dependency_aware=False, dummy_input=None, + **algo_kwargs): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + pruning_algorithm: str + algorithms being used to prune model + dependency_aware: bool + If prune the model in a dependency-aware way. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, + the dummy_input should on the same device with the model. + algo_kwargs: dict + Additional parameters passed to pruning algorithm masker class + """ + super().__init__(model, config_list, None, pruning_algorithm, dependency_aware, dummy_input, **algo_kwargs) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + +class LevelPruner(OneshotPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : Operation types to prune. + """ + + def __init__(self, model, config_list): + super().__init__(model, config_list, pruning_algorithm='level') + + def _supported_dependency_aware(self): + return False + + +class L1FilterPruner(OneshotPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : Only Conv2d is supported in L1FilterPruner. + dependency_aware: bool + If prune the model in a dependency-aware way. If it is `True`, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if this flag is set True + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model, config_list, dependency_aware=False, dummy_input=None): + super().__init__(model, config_list, pruning_algorithm='l1', dependency_aware=dependency_aware, + dummy_input=dummy_input) + + def _supported_dependency_aware(self): + return True + + +class L2FilterPruner(OneshotPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : Only Conv2d is supported in L2FilterPruner. + dependency_aware: bool + If prune the model in a dependency-aware way. If it is `True`, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if this flag is set True + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model, config_list, dependency_aware=False, dummy_input=None): + super().__init__(model, config_list, pruning_algorithm='l2', dependency_aware=dependency_aware, + dummy_input=dummy_input) + + def _supported_dependency_aware(self): + return True + + +class FPGMPruner(OneshotPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : Only Conv2d is supported in FPGM Pruner. + dependency_aware: bool + If prune the model in a dependency-aware way. If it is `True`, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if this flag is set True + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : torch.Tensor + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model, config_list, dependency_aware=False, dummy_input=None): + super().__init__(model, config_list, pruning_algorithm='fpgm', dependency_aware=dependency_aware, + dummy_input=dummy_input) + + def _supported_dependency_aware(self): + return True diff --git a/nni/algorithms/compression/pytorch/pruning/sensitivity_pruner.py b/nni/algorithms/compression/pytorch/pruning/sensitivity_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..a8193a4649d7948ae1f461c021285f94c6dc2235 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/sensitivity_pruner.py @@ -0,0 +1,416 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import os +import csv +import copy +import json +import logging +import torch + +from schema import And, Optional +from nni.compression.pytorch.compressor import Pruner +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from nni.compression.pytorch.utils.sensitivity_analysis import SensitivityAnalysis + +from .constants_pruner import PRUNER_DICT + + +MAX_PRUNE_RATIO_PER_ITER = 0.95 + +_logger = logging.getLogger('Sensitivity_Pruner') +_logger.setLevel(logging.INFO) + +class SensitivityPruner(Pruner): + """ + This function prune the model based on the sensitivity + for each layer. + + Parameters + ---------- + model: torch.nn.Module + model to be compressed + evaluator: function + validation function for the model. This function should return the accuracy + of the validation dataset. The input parameters of evaluator can be specified + in the parameter `eval_args` and 'eval_kwargs' of the compress function if needed. + Example: + >>> def evaluator(model): + >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + >>> val_loader = ... + >>> model.eval() + >>> correct = 0 + >>> with torch.no_grad(): + >>> for data, target in val_loader: + >>> data, target = data.to(device), target.to(device) + >>> output = model(data) + >>> # get the index of the max log-probability + >>> pred = output.argmax(dim=1, keepdim=True) + >>> correct += pred.eq(target.view_as(pred)).sum().item() + >>> accuracy = correct / len(val_loader.dataset) + >>> return accuracy + finetuner: function + finetune function for the model. This parameter is not essential, if is not None, + the sensitivity pruner will finetune the model after pruning in each iteration. + The input parameters of finetuner can be specified in the parameter of compress + called `finetune_args` and `finetune_kwargs` if needed. + Example: + >>> def finetuner(model, epoch=3): + >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + >>> train_loader = ... + >>> criterion = torch.nn.CrossEntropyLoss() + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + >>> model.train() + >>> for _ in range(epoch): + >>> for _, (data, target) in enumerate(train_loader): + >>> data, target = data.to(device), target.to(device) + >>> optimizer.zero_grad() + >>> output = model(data) + >>> loss = criterion(output, target) + >>> loss.backward() + >>> optimizer.step() + base_algo: str + base pruning algorithm. `level`, `l1`, `l2` or `fpgm`, by default `l1`. + sparsity_proportion_calc: function + This function generate the sparsity proportion between the conv layers according to the + sensitivity analysis results. We provide a default function to quantify the sparsity + proportion according to the sensitivity analysis results. Users can also customize + this function according to their needs. The input of this function is a dict, + for example : {'conv1' : {0.1: 0.9, 0.2 : 0.8}, 'conv2' : {0.1: 0.9, 0.2 : 0.8}}, + in which, 'conv1' and is the name of the conv layer, and 0.1:0.9 means when the + sparsity of conv1 is 0.1 (10%), the model's val accuracy equals to 0.9. + sparsity_per_iter: float + The sparsity of the model that the pruner try to prune in each iteration. + acc_drop_threshold : float + The hyperparameter used to quantifiy the sensitivity for each layer. + checkpoint_dir: str + The dir path to save the checkpoints during the pruning. + """ + + def __init__(self, model, config_list, evaluator, + finetuner=None, base_algo='l1', sparsity_proportion_calc=None, + sparsity_per_iter=0.1, acc_drop_threshold=0.05, checkpoint_dir=None): + + self.base_algo = base_algo + self.model = model + super(SensitivityPruner, self).__init__(model, config_list) + # unwrap the model + self._unwrap_model() + _logger.debug(str(self.model)) + self.evaluator = evaluator + self.finetuner = finetuner + self.analyzer = SensitivityAnalysis( + self.model, self.evaluator, prune_type=base_algo, \ + early_stop_mode='dropped', early_stop_value=acc_drop_threshold) + # Get the original accuracy of the pretrained model + self.ori_acc = None + # Copy the original weights before pruning + self.ori_state_dict = copy.deepcopy(self.model.state_dict()) + self.sensitivities = {} + # Save the weight count for each layer + self.weight_count = {} + self.weight_sum = 0 + # Map the layer name to the layer module + self.named_module = {} + + self.Pruner = PRUNER_DICT[self.base_algo] + # Count the total weight count of the model + for name, submodule in self.model.named_modules(): + self.named_module[name] = submodule + if name in self.analyzer.target_layer: + # Currently, only count the weights in the conv layers + # else the fully connected layer (which contains + # the most weights) may make the pruner prune the + # model too hard + # if hasattr(submodule, 'weight'): # Count all the weights of the model + self.weight_count[name] = submodule.weight.data.numel() + self.weight_sum += self.weight_count[name] + # function to generate the sparsity proportion between the conv layers + if sparsity_proportion_calc is None: + self.sparsity_proportion_calc = self._max_prune_ratio + else: + self.sparsity_proportion_calc = sparsity_proportion_calc + # The ratio of remained weights is 1.0 at the begining + self.remained_ratio = 1.0 + self.sparsity_per_iter = sparsity_per_iter + self.acc_drop_threshold = acc_drop_threshold + self.checkpoint_dir = checkpoint_dir + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.module + Model to be pruned + config_list : list + List on pruning configs + """ + + if self.base_algo == 'level': + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + elif self.base_algo in ['l1', 'l2', 'fpgm']: + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + 'op_types': ['Conv2d'], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + + schema.validate(config_list) + + def load_sensitivity(self, filepath): + """ + load the sensitivity results exported by the sensitivity analyzer + """ + assert os.path.exists(filepath) + with open(filepath, 'r') as csvf: + csv_r = csv.reader(csvf) + header = next(csv_r) + sparsities = [float(x) for x in header[1:]] + sensitivities = {} + for row in csv_r: + layername = row[0] + accuracies = [float(x) for x in row[1:]] + sensitivities[layername] = {} + for i, accuracy in enumerate(accuracies): + sensitivities[layername][sparsities[i]] = accuracy + return sensitivities + + def _max_prune_ratio(self, ori_acc, threshold, sensitivities): + """ + Find the maximum prune ratio for a single layer whose accuracy + drop is lower than the threshold. + + Parameters + ---------- + ori_acc: float + Original accuracy + threshold: float + Accuracy drop threshold + sensitivities: dict + The dict object that stores the sensitivity results for each layer. + For example: {'conv1' : {0.1: 0.9, 0.2 : 0.8}} + Returns + ------- + max_ratios: dict + return the maximum prune ratio for each layer. For example: + {'conv1':0.1, 'conv2':0.2} + """ + max_ratio = {} + for layer in sensitivities: + prune_ratios = sorted(sensitivities[layer].keys()) + last_ratio = 0 + for ratio in prune_ratios: + last_ratio = ratio + cur_acc = sensitivities[layer][ratio] + if cur_acc + threshold < ori_acc: + break + max_ratio[layer] = last_ratio + return max_ratio + + def normalize(self, ratios, target_pruned): + """ + Normalize the prune ratio of each layer according to the + total already pruned ratio and the final target total pruning + ratio + + Parameters + ---------- + ratios: + Dict object that save the prune ratio for each layer + target_pruned: + The amount of the weights expected to be pruned in this + iteration + + Returns + ------- + new_ratios: + return the normalized prune ratios for each layer. + + """ + w_sum = 0 + _Max = 0 + for layername, ratio in ratios.items(): + wcount = self.weight_count[layername] + w_sum += ratio * wcount * \ + (1-self.analyzer.already_pruned[layername]) + target_count = self.weight_sum * target_pruned + for layername in ratios: + ratios[layername] = ratios[layername] * target_count / w_sum + _Max = max(_Max, ratios[layername]) + # Cannot Prune too much in a single iteration + # If a layer's prune ratio is larger than the + # MAX_PRUNE_RATIO_PER_ITER we rescal all prune + # ratios under this threshold + if _Max > MAX_PRUNE_RATIO_PER_ITER: + + for layername in ratios: + ratios[layername] = ratios[layername] * \ + MAX_PRUNE_RATIO_PER_ITER / _Max + return ratios + + def create_cfg(self, ratios): + """ + Generate the cfg_list for the pruner according to the prune ratios. + + Parameters + --------- + ratios: + For example: {'conv1' : 0.2} + + Returns + ------- + cfg_list: + For example: [{'sparsity':0.2, 'op_names':['conv1'], 'op_types':['Conv2d']}] + """ + cfg_list = [] + for layername in ratios: + prune_ratio = ratios[layername] + remain = 1 - self.analyzer.already_pruned[layername] + sparsity = remain * prune_ratio + \ + self.analyzer.already_pruned[layername] + if sparsity > 0: + # Pruner does not allow the prune ratio to be zero + cfg = {'sparsity': sparsity, 'op_names': [ + layername], 'op_types': ['Conv2d']} + cfg_list.append(cfg) + return cfg_list + + def current_sparsity(self): + """ + The sparsity of the weight. + """ + pruned_weight = 0 + for layer_name in self.analyzer.already_pruned: + w_count = self.weight_count[layer_name] + prune_ratio = self.analyzer.already_pruned[layer_name] + pruned_weight += w_count * prune_ratio + return pruned_weight / self.weight_sum + + def compress(self, eval_args=None, eval_kwargs=None, + finetune_args=None, finetune_kwargs=None, resume_sensitivity=None): + """ + This function iteratively prune the model according to the results of + the sensitivity analysis. + + Parameters + ---------- + eval_args: list + eval_kwargs: list& dict + Parameters for the val_funtion, the val_function will be called like + evaluator(\*eval_args, \*\*eval_kwargs) + finetune_args: list + finetune_kwargs: dict + Parameters for the finetuner function if needed. + resume_sensitivity: + resume the sensitivity results from this file. + """ + # pylint suggest not use the empty list and dict + # as the default input parameter + if not eval_args: + eval_args = [] + if not eval_kwargs: + eval_kwargs = {} + if not finetune_args: + finetune_args = [] + if not finetune_kwargs: + finetune_kwargs = {} + if self.ori_acc is None: + self.ori_acc = self.evaluator(*eval_args, **eval_kwargs) + assert isinstance(self.ori_acc, float) or isinstance(self.ori_acc, int) + if not resume_sensitivity: + self.sensitivities = self.analyzer.analysis( + val_args=eval_args, val_kwargs=eval_kwargs) + else: + self.sensitivities = self.load_sensitivity(resume_sensitivity) + self.analyzer.sensitivities = self.sensitivities + # the final target sparsity of the model + target_ratio = 1 - self.config_list[0]['sparsity'] + cur_ratio = self.remained_ratio + ori_acc = self.ori_acc + iteration_count = 0 + if self.checkpoint_dir is not None: + os.makedirs(self.checkpoint_dir, exist_ok=True) + modules_wrapper_final = None + while cur_ratio > target_ratio: + iteration_count += 1 + # Each round have three steps: + # 1) Get the current sensitivity for each layer(the sensitivity + # of each layer may change during the pruning) + # 2) Prune each layer according the sensitivies + # 3) finetune the model + _logger.info('Current base accuracy %f', ori_acc) + _logger.info('Remained %f weights', cur_ratio) + # determine the sparsity proportion between different + # layers according to the sensitivity result + proportion = self.sparsity_proportion_calc( + ori_acc, self.acc_drop_threshold, self.sensitivities) + + new_pruneratio = self.normalize(proportion, self.sparsity_per_iter) + cfg_list = self.create_cfg(new_pruneratio) + if not cfg_list: + _logger.error('The threshold is too small, please set a larger threshold') + return self.model + _logger.debug('Pruner Config: %s', str(cfg_list)) + cfg_str = ['%s:%.3f'%(cfg['op_names'][0], cfg['sparsity']) for cfg in cfg_list] + _logger.info('Current Sparsities: %s', ','.join(cfg_str)) + + pruner = self.Pruner(self.model, cfg_list) + pruner.compress() + pruned_acc = self.evaluator(*eval_args, **eval_kwargs) + _logger.info('Accuracy after pruning: %f', pruned_acc) + finetune_acc = pruned_acc + if self.finetuner is not None: + # if the finetune function is None, then skip the finetune + self.finetuner(*finetune_args, **finetune_kwargs) + finetune_acc = self.evaluator(*eval_args, **eval_kwargs) + _logger.info('Accuracy after finetune: %f', finetune_acc) + ori_acc = finetune_acc + # unwrap the pruner + pruner._unwrap_model() + # update the already prune ratio of each layer befor the new + # sensitivity analysis + for layer_cfg in cfg_list: + name = layer_cfg['op_names'][0] + sparsity = layer_cfg['sparsity'] + self.analyzer.already_pruned[name] = sparsity + # update the cur_ratio + cur_ratio = 1 - self.current_sparsity() + modules_wrapper_final = pruner.get_modules_wrapper() + del pruner + _logger.info('Currently remained weights: %f', cur_ratio) + + if self.checkpoint_dir is not None: + checkpoint_name = 'Iter_%d_finetune_acc_%.5f_sparsity_%.4f' % ( + iteration_count, finetune_acc, cur_ratio) + checkpoint_path = os.path.join( + self.checkpoint_dir, '%s.pth' % checkpoint_name) + cfg_path = os.path.join( + self.checkpoint_dir, '%s_pruner.json' % checkpoint_name) + sensitivity_path = os.path.join( + self.checkpoint_dir, '%s_sensitivity.csv' % checkpoint_name) + torch.save(self.model.state_dict(), checkpoint_path) + with open(cfg_path, 'w') as jf: + json.dump(cfg_list, jf) + self.analyzer.export(sensitivity_path) + + if cur_ratio > target_ratio: + # If this is the last prune iteration, skip the time-consuming + # sensitivity analysis + + self.analyzer.load_state_dict(self.model.state_dict()) + self.sensitivities = self.analyzer.analysis( + val_args=eval_args, val_kwargs=eval_kwargs) + + _logger.info('After Pruning: %.2f weights remains', cur_ratio) + self.modules_wrapper = modules_wrapper_final + + self._wrap_model() + return self.model + + def calc_mask(self, wrapper, **kwargs): + return None diff --git a/nni/algorithms/compression/pytorch/pruning/simulated_annealing_pruner.py b/nni/algorithms/compression/pytorch/pruning/simulated_annealing_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..b501e34aef5a134c3b0ad501c03043f516ac8906 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/simulated_annealing_pruner.py @@ -0,0 +1,359 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import math +import copy +import csv +import json +import numpy as np +from schema import And, Optional + +from nni.utils import OptimizeMode + +from nni.compression.pytorch.compressor import Pruner +from nni.compression.pytorch.utils.config_validation import PrunerSchema +from .constants_pruner import PRUNER_DICT + + +_logger = logging.getLogger(__name__) + + +class SimulatedAnnealingPruner(Pruner): + """ + A Pytorch implementation of Simulated Annealing compression algorithm. + + Parameters + ---------- + model : pytorch model + The model to be pruned. + config_list : list + Supported keys: + - sparsity : The target overall sparsity. + - op_types : The operation type to prune. + evaluator : function + Function to evaluate the pruned model. + This function should include `model` as the only parameter, and returns a scalar value. + Example:: + + def evaluator(model): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + val_loader = ... + model.eval() + correct = 0 + with torch.no_grad(): + for data, target in val_loader: + data, target = data.to(device), target.to(device) + output = model(data) + # get the index of the max log-probability + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + accuracy = correct / len(val_loader.dataset) + return accuracy + optimize_mode : str + Optimize mode, `maximize` or `minimize`, by default `maximize`. + base_algo : str + Base pruning algorithm. `level`, `l1`, `l2` or `fpgm`, by default `l1`. Given the sparsity distribution among the ops, + the assigned `base_algo` is used to decide which filters/channels/weights to prune. + start_temperature : float + Start temperature of the simulated annealing process. + stop_temperature : float + Stop temperature of the simulated annealing process. + cool_down_rate : float + Cool down rate of the temperature. + perturbation_magnitude : float + Initial perturbation magnitude to the sparsities. The magnitude decreases with current temperature. + experiment_data_dir : string + PATH to save experiment data, + including the config_list generated for the base pruning algorithm, the performance of the pruned model and the pruning history. + + """ + + def __init__(self, model, config_list, evaluator, optimize_mode='maximize', base_algo='l1', + start_temperature=100, stop_temperature=20, cool_down_rate=0.9, perturbation_magnitude=0.35, experiment_data_dir='./'): + # original model + self._model_to_prune = copy.deepcopy(model) + self._base_algo = base_algo + + super().__init__(model, config_list) + + self._evaluator = evaluator + self._optimize_mode = OptimizeMode(optimize_mode) + + # hyper parameters for SA algorithm + self._start_temperature = start_temperature + self._current_temperature = start_temperature + self._stop_temperature = stop_temperature + self._cool_down_rate = cool_down_rate + self._perturbation_magnitude = perturbation_magnitude + + # overall pruning rate + self._sparsity = config_list[0]['sparsity'] + # pruning rates of the layers + self._sparsities = None + + # init current performance & best performance + self._current_performance = -np.inf + self._best_performance = -np.inf + self._best_config_list = [] + + self._search_history = [] + + self._experiment_data_dir = experiment_data_dir + if not os.path.exists(self._experiment_data_dir): + os.makedirs(self._experiment_data_dir) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + + if self._base_algo == 'level': + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + elif self._base_algo in ['l1', 'l2', 'fpgm']: + schema = PrunerSchema([{ + Optional('sparsity'): And(float, lambda n: 0 < n < 1), + 'op_types': ['Conv2d'], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, _logger) + + schema.validate(config_list) + + def _sparsities_2_config_list(self, sparsities): + ''' + convert sparsities vector into config_list for LevelPruner or L1FilterPruner + + Parameters + ---------- + sparsities : list + list of sparsities + + Returns + ------- + list of dict + config_list for LevelPruner or L1FilterPruner + ''' + config_list = [] + + sparsities = sorted(sparsities) + self.modules_wrapper = sorted( + self.modules_wrapper, key=lambda wrapper: wrapper.module.weight.data.numel()) + + # a layer with more weights will have no less pruning rate + for idx, wrapper in enumerate(self.get_modules_wrapper()): + # L1Filter Pruner requires to specify op_types + if self._base_algo in ['l1', 'l2', 'fpgm']: + config_list.append( + {'sparsity': sparsities[idx], 'op_types': ['Conv2d'], 'op_names': [wrapper.name]}) + elif self._base_algo == 'level': + config_list.append( + {'sparsity': sparsities[idx], 'op_names': [wrapper.name]}) + + config_list = [val for val in config_list if not math.isclose(val['sparsity'], 0, abs_tol=1e-6)] + + return config_list + + def _rescale_sparsities(self, sparsities, target_sparsity): + ''' + Rescale the sparsities list to satisfy the target overall sparsity + + Parameters + ---------- + sparsities : list + + target_sparsity : float + the target overall sparsity + + Returns + ------- + list + the rescaled sparsities + ''' + num_weights = [] + for wrapper in self.get_modules_wrapper(): + num_weights.append(wrapper.module.weight.data.numel()) + + num_weights = sorted(num_weights) + sparsities = sorted(sparsities) + + total_weights = 0 + total_weights_pruned = 0 + + # calculate the scale + for idx, num_weight in enumerate(num_weights): + total_weights += num_weight + total_weights_pruned += int(num_weight*sparsities[idx]) + if total_weights_pruned == 0: + return None + scale = target_sparsity / (total_weights_pruned/total_weights) + + # rescale the sparsities + sparsities = np.asarray(sparsities)*scale + + return sparsities + + def _init_sparsities(self): + ''' + Generate a sorted sparsities vector + ''' + # repeatedly generate a distribution until satisfies the overall sparsity requirement + _logger.info('Gererating sparsities...') + while True: + sparsities = sorted(np.random.uniform( + 0, 1, len(self.get_modules_wrapper()))) + + sparsities = self._rescale_sparsities( + sparsities, target_sparsity=self._sparsity) + + if sparsities is not None and sparsities[0] >= 0 and sparsities[-1] < 1: + _logger.info('Initial sparsities generated : %s', sparsities) + self._sparsities = sparsities + break + + def _generate_perturbations(self): + ''' + Generate perturbation to the current sparsities distribution. + + Returns: + -------- + list + perturbated sparsities + ''' + _logger.info("Gererating perturbations to the current sparsities...") + + # decrease magnitude with current temperature + magnitude = self._current_temperature / \ + self._start_temperature * self._perturbation_magnitude + _logger.info('current perturation magnitude:%s', magnitude) + + while True: + perturbation = np.random.uniform(-magnitude, magnitude, len(self.get_modules_wrapper())) + sparsities = np.clip(0, self._sparsities + perturbation, None) + _logger.debug("sparsities before rescalling:%s", sparsities) + + sparsities = self._rescale_sparsities(sparsities, target_sparsity=self._sparsity) + _logger.debug("sparsities after rescalling:%s", sparsities) + + if sparsities is not None and sparsities[0] >= 0 and sparsities[-1] < 1: + _logger.info("Sparsities perturbated:%s", sparsities) + return sparsities + + def calc_mask(self, wrapper, **kwargs): + return None + + def compress(self, return_config_list=False): + """ + Compress the model with Simulated Annealing. + + Returns + ------- + torch.nn.Module + model with specified modules compressed. + """ + _logger.info('Starting Simulated Annealing Compression...') + + # initiaze a randomized action + pruning_iteration = 0 + self._init_sparsities() + + # stop condition + self._current_temperature = self._start_temperature + while self._current_temperature > self._stop_temperature: + _logger.info('Pruning iteration: %d', pruning_iteration) + _logger.info('Current temperature: %d, Stop temperature: %d', + self._current_temperature, self._stop_temperature) + while True: + # generate perturbation + sparsities_perturbated = self._generate_perturbations() + config_list = self._sparsities_2_config_list( + sparsities_perturbated) + _logger.info( + "config_list for Pruner generated: %s", config_list) + + # fast evaluation + pruner = PRUNER_DICT[self._base_algo](copy.deepcopy(self._model_to_prune), config_list) + model_masked = pruner.compress() + evaluation_result = self._evaluator(model_masked) + + self._search_history.append( + {'sparsity': self._sparsity, 'performance': evaluation_result, 'config_list': config_list}) + + if self._optimize_mode is OptimizeMode.Minimize: + evaluation_result *= -1 + + # if better evaluation result, then accept the perturbation + if evaluation_result > self._current_performance: + self._current_performance = evaluation_result + self._sparsities = sparsities_perturbated + + # save best performance and best params + if evaluation_result > self._best_performance: + _logger.info('updating best model...') + self._best_performance = evaluation_result + self._best_config_list = config_list + + # save the overall best masked model + self.bound_model = model_masked + # the ops with sparsity 0 are not included in this modules_wrapper + modules_wrapper_final = pruner.get_modules_wrapper() + break + # if not, accept with probability e^(-deltaE/current_temperature) + else: + delta_E = np.abs(evaluation_result - + self._current_performance) + probability = math.exp(-1 * delta_E / + self._current_temperature) + if np.random.uniform(0, 1) < probability: + self._current_performance = evaluation_result + self._sparsities = sparsities_perturbated + break + + # cool down + self._current_temperature *= self._cool_down_rate + pruning_iteration += 1 + + _logger.info('----------Compression finished--------------') + _logger.info('Best performance: %s', self._best_performance) + _logger.info('config_list found : %s', + self._best_config_list) + + # save search history + with open(os.path.join(self._experiment_data_dir, 'search_history.csv'), 'w') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=['sparsity', 'performance', 'config_list']) + writer.writeheader() + for item in self._search_history: + writer.writerow({'sparsity': item['sparsity'], 'performance': item['performance'], 'config_list': json.dumps( + item['config_list'])}) + + # save best config found and best performance + if self._optimize_mode is OptimizeMode.Minimize: + self._best_performance *= -1 + with open(os.path.join(self._experiment_data_dir, 'search_result.json'), 'w+') as jsonfile: + json.dump({ + 'performance': self._best_performance, + 'config_list': json.dumps(self._best_config_list) + }, jsonfile) + + _logger.info('search history and result saved to foler : %s', + self._experiment_data_dir) + + if return_config_list: + return self._best_config_list + + # This should be done only at the final stage, + # because the modules_wrapper with all the ops are used during the annealing process + self.modules_wrapper = modules_wrapper_final + + return self.bound_model diff --git a/nni/algorithms/compression/pytorch/pruning/structured_pruning_masker.py b/nni/algorithms/compression/pytorch/pruning/structured_pruning_masker.py new file mode 100644 index 0000000000000000000000000000000000000000..65e0204dc8ddd6b1d49c0f1eb7be26ef1d996f84 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/structured_pruning_masker.py @@ -0,0 +1,892 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import math +import numpy as np +import torch +from .weight_masker import WeightMasker + +__all__ = ['L1FilterPrunerMasker', 'L2FilterPrunerMasker', 'FPGMPrunerMasker', + 'TaylorFOWeightFilterPrunerMasker', 'ActivationAPoZRankFilterPrunerMasker', + 'ActivationMeanRankFilterPrunerMasker', 'SlimPrunerMasker', 'AMCWeightMasker'] + +logger = logging.getLogger('torch filter pruners') + + +class StructuredWeightMasker(WeightMasker): + """ + A structured pruning masker base class that prunes convolutional layer filters. + + Parameters + ---------- + model: nn.Module + model to be pruned + pruner: Pruner + A Pruner instance used to prune the model + preserve_round: int + after pruning, preserve filters/channels round to `preserve_round`, for example: + for a Conv2d layer, output channel is 32, sparsity is 0.2, if preserve_round is + 1 (no preserve round), then there will be int(32 * 0.2) = 6 filters pruned, and + 32 - 6 = 26 filters are preserved. If preserve_round is 4, preserved filters will + be round up to 28 (which can be divided by 4) and only 4 filters are pruned. + + """ + + def __init__(self, model, pruner, preserve_round=1, dependency_aware=False, global_sort=False): + self.model = model + self.pruner = pruner + self.preserve_round = preserve_round + self.dependency_aware = dependency_aware + self.global_sort = global_sort + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None, **depen_kwargs): + """ + calculate the mask for `wrapper`. + + Parameters + ---------- + sparsity: float/list of float + The target sparsity of the wrapper. If we calculate the mask in + the normal way, then sparsity is a float number. In contrast, if + we calculate the mask in the dependency-aware way, sparsity is a + list of float numbers, each float number corressponds to a sparsity + of a layer. + wrapper: PrunerModuleWrapper/list of PrunerModuleWrappers + The wrapper of the target layer. If we calculate the mask in the normal + way, then `wrapper` is an instance of PrunerModuleWrapper, else `wrapper` + is a list of PrunerModuleWrapper. + wrapper_idx: int/list of int + The index of the wrapper. + depen_kwargs: dict + The kw_args for the dependency-aware mode. + """ + if self.global_sort: + # if the global_sort switch is on, calculate the mask based + # on global model information + return self._global_calc_mask(sparsity, wrapper, wrapper_idx) + elif not self.dependency_aware: + # calculate the mask in the normal way, each layer calculate its + # own mask separately + return self._normal_calc_mask(sparsity, wrapper, wrapper_idx) + else: + # if the dependency_aware switch is on, then calculate the mask + # in the dependency-aware way + return self._dependency_calc_mask(sparsity, wrapper, wrapper_idx, **depen_kwargs) + + def _get_current_state(self, sparsity, wrapper, wrapper_idx=None): + """ + Some pruner may prune the layers in a iterative way. In each pruning iteration, + we may get the current state of this wrapper/layer, and continue to prune this layer + based on the current state. This function is to get the current pruning state of the + target wrapper/layer. + Parameters + ---------- + sparsity: float + pruning ratio, preserved weight ratio is `1 - sparsity` + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + base_mask: dict + dict object that stores the mask of this wrapper in this iteration, if it is the + first iteration, then we create a new mask with all ones. If there is already a + mask in this wrapper, then we return the existing mask. + weight: tensor + the current weight of this layer + num_prune: int + how many filters we should prune + """ + msg = 'module type {} is not supported!'.format(wrapper.type) + assert wrapper.type == 'Conv2d', msg + weight = wrapper.module.weight.data + bias = None + if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None: + bias = wrapper.module.bias.data + + if wrapper.weight_mask is None: + mask_weight = torch.ones(weight.size()).type_as(weight).detach() + else: + mask_weight = wrapper.weight_mask.clone() + if bias is not None: + if wrapper.bias_mask is None: + mask_bias = torch.ones(bias.size()).type_as(bias).detach() + else: + mask_bias = wrapper.bias_mask.clone() + else: + mask_bias = None + mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias} + + num_total = weight.size(0) + num_prune = int(num_total * sparsity) + if self.preserve_round > 1: + num_preserve = num_total - num_prune + num_preserve = int( + math.ceil(num_preserve * 1. / self.preserve_round) * self.preserve_round) + if num_preserve > num_total: + num_preserve = int(math.floor( + num_total * 1. / self.preserve_round) * self.preserve_round) + num_prune = num_total - num_preserve + # weight*mask_weight: apply base mask for iterative pruning + return mask, weight * mask_weight, num_prune + + def _global_calc_mask(self, sparsity, wrapper, wrapper_idx=None): + num_prune = self._get_global_num_prune(wrapper, wrapper_idx) + mask, weight, _ = self._get_current_state( + sparsity, wrapper, wrapper_idx) + return self.get_mask(mask, weight, num_prune, wrapper, wrapper_idx) + + def _normal_calc_mask(self, sparsity, wrapper, wrapper_idx=None): + """ + Calculate the mask of given layer. + Parameters + ---------- + sparsity: float + pruning ratio, preserved weight ratio is `1 - sparsity` + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + dict + dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + mask, weight, num_prune = self._get_current_state( + sparsity, wrapper, wrapper_idx) + num_total = weight.size(0) + if num_total < 2 or num_prune < 1: + return mask + + return self.get_mask(mask, weight, num_prune, wrapper, wrapper_idx) + + def _common_channel_to_prune(self, sparsities, wrappers, wrappers_idx, channel_dsets, groups): + """ + Calculate the common channels should be pruned by all the layers in this group. + This function is for filter pruning of Conv layers. if want to support the dependency-aware + mode for others ops, you need to inherit this class and overwrite `_common_channel_to_prune`. + + Parameters + ---------- + sparsities : list + List of float that specify the sparsity for each conv layer. + wrappers : list + List of wrappers + groups : list + The number of the filter groups of each layer. + wrappers_idx : list + The indexes of the wrappers + """ + # sparsity configs for each wrapper + # sparsities = [_w.config['sparsity'] for _w in wrappers] + # check the type of the input wrappers + for _w in wrappers: + msg = 'module type {} is not supported!'.format(_w.type) + assert _w.type == 'Conv2d', msg + # Among the dependent layers, the layer with smallest + # sparsity determines the final benefit of the speedup + # module. To better harvest the speed benefit, we need + # to ensure that these dependent layers have at least + # `min_sparsity` pruned channel are the same. + if len(channel_dsets) == len(wrappers): + # all the layers in the dependency sets are pruned + min_sparsity = min(sparsities) + else: + # not all the layers in the dependency set + # are pruned + min_sparsity = 0 + # donnot prune the channels that we cannot harvest the speed from + sparsities = [min_sparsity] * len(sparsities) + # find the max number of the filter groups of the dependent + # layers. The group constraint of this dependency set is decided + # by the layer with the max groups. + + # should use the least common multiple for all the groups + # the max_group is lower than the channel_count, because + # the number of the filter is always divisible by the number of the group + max_group = np.lcm.reduce(groups) + channel_count = wrappers[0].module.weight.data.size(0) + device = wrappers[0].module.weight.device + channel_sum = torch.zeros(channel_count).to(device) + for _w, _w_idx in zip(wrappers, wrappers_idx): + # calculate the L1/L2 sum for all channels + c_sum = self.get_channel_sum(_w, _w_idx) + + if c_sum is None: + # if the channel sum cannot be calculated + # now, return None + return None + channel_sum += c_sum + + # prune the same `min_sparsity` channels based on channel_sum + # for all the layers in the channel sparsity + target_pruned = int(channel_count * min_sparsity) + # pruned_per_group may be zero, for example dw conv + pruned_per_group = int(target_pruned / max_group) + group_step = int(channel_count / max_group) + + channel_masks = [] + for gid in range(max_group): + _start = gid * group_step + _end = (gid + 1) * group_step + if pruned_per_group > 0: + threshold = torch.topk( + channel_sum[_start: _end], pruned_per_group, largest=False)[0].max() + group_mask = torch.gt(channel_sum[_start:_end], threshold) + else: + group_mask = torch.ones(group_step).to(device) + channel_masks.append(group_mask) + channel_masks = torch.cat(channel_masks, dim=0) + pruned_channel_index = ( + channel_masks == False).nonzero().squeeze(1).tolist() + logger.info('Prune the %s channels for all dependent', + ','.join([str(x) for x in pruned_channel_index])) + return channel_masks + + def _dependency_calc_mask(self, sparsities, wrappers, wrappers_idx, channel_dsets, groups): + """ + Calculate the masks for the layers in the same dependency sets. + Similar to the traditional original calc_mask, _dependency_calc_mask + will prune the target layers based on the L1/L2 norm of the weights. + However, StructuredWeightMasker prunes the filter completely based on the + L1/L2 norm of each filter. In contrast, _dependency_calc_mask + will try to satisfy the channel/group dependency(see nni.compression.torch. + utils.shape_dependency for details). Specifically, _dependency_calc_mask + will try to prune the same channels for the layers that have channel dependency. + In addition, this mask calculator will also ensure that the number of filters + pruned in each group is the same(meet the group dependency). + + Parameters + ---------- + sparsities : list + List of float that specify the sparsity for each conv layer. + wrappers : list + List of wrappers + groups : list + The number of the filter groups of each layer. + wrappers_idx : list + The indexes of the wrappers + """ + channel_masks = self._common_channel_to_prune( + sparsities, wrappers, wrappers_idx, channel_dsets, groups) + # calculate the mask for each layer based on channel_masks, first + # every layer will prune the same channels masked in channel_masks. + # If the sparsity of a layers is larger than min_sparsity, then it + # will continue prune sparsity - min_sparsity channels to meet the sparsity + # config. + masks = {} + for _pos, _w in enumerate(wrappers): + _w_idx = wrappers_idx[_pos] + sparsity = sparsities[_pos] + name = _w.name + + # _tmp_mask = self._normal_calc_mask( + # sparsity, _w, _w_idx, channel_masks) + base_mask, current_weight, num_prune = self._get_current_state( + sparsity, _w, _w_idx) + num_total = current_weight.size(0) + if num_total < 2 or num_prune < 1: + masks[name] = base_mask + continue + _tmp_mask = self.get_mask( + base_mask, current_weight, num_prune, _w, _w_idx, channel_masks) + + if _tmp_mask is None: + # if the mask calculation fails + return None + masks[name] = _tmp_mask + return masks + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, channel_masks=None): + """ + Calculate the mask of given layer. + + Parameters + ---------- + base_mask: dict + The basic mask with the same shape of weight, all item in the basic mask is 1. + weight: tensor + the module weight to be pruned + num_prune: int + Num of filters to prune + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + channel_masks: Tensor + If mask some channels for this layer in advance. In the dependency-aware + mode, before calculating the masks for each layer, we will calculate a common + mask for all the layers in the dependency set. For the pruners that doesnot + support dependency-aware mode, they can just ignore this parameter. + + Returns + ------- + dict + dictionary for storing masks + """ + raise NotImplementedError( + '{} get_mask is not implemented'.format(self.__class__.__name__)) + + def get_channel_sum(self, wrapper, wrapper_idx): + """ + Calculate the importance weight for each channel. If want to support the + dependency-aware mode for this one-shot pruner, this function must be + implemented. + Parameters + ---------- + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + tensor + Tensor that indicates the importance of each channel + """ + raise NotImplementedError( + '{} get_channel_sum is not implemented'.format(self.__class__.__name__)) + + +class L1FilterPrunerMasker(StructuredWeightMasker): + """ + A structured pruning algorithm that prunes the filters of smallest magnitude + weights sum in the convolution layers to achieve a preset level of network sparsity. + Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet and Hans Peter Graf, + "PRUNING FILTERS FOR EFFICIENT CONVNETS", 2017 ICLR + https://arxiv.org/abs/1608.08710 + """ + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, channel_masks=None): + # get the l1-norm sum for each filter + w_abs_structured = self.get_channel_sum(wrapper, wrapper_idx) + if channel_masks is not None: + # if we need to mask some channels in advance + w_abs_structured = w_abs_structured * channel_masks + threshold = torch.topk(w_abs_structured.view(-1), + num_prune, largest=False)[0].max() + mask_weight = torch.gt(w_abs_structured, threshold)[ + :, None, None, None].expand_as(weight).type_as(weight) + mask_bias = torch.gt(w_abs_structured, threshold).type_as( + weight).detach() if base_mask['bias_mask'] is not None else None + + return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias} + + def get_channel_sum(self, wrapper, wrapper_idx): + weight = wrapper.module.weight.data + filters = weight.shape[0] + w_abs = weight.abs() + w_abs_structured = w_abs.view(filters, -1).sum(dim=1) + return w_abs_structured + + +class L2FilterPrunerMasker(StructuredWeightMasker): + """ + A structured pruning algorithm that prunes the filters with the + smallest L2 norm of the weights. + """ + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, channel_masks=None): + # get the l2-norm sum for each filter + w_l2_norm = self.get_channel_sum(wrapper, wrapper_idx) + if channel_masks is not None: + # if we need to mask some channels in advance + w_l2_norm = w_l2_norm * channel_masks + threshold = torch.topk( + w_l2_norm.view(-1), num_prune, largest=False)[0].max() + mask_weight = torch.gt(w_l2_norm, threshold)[ + :, None, None, None].expand_as(weight).type_as(weight) + mask_bias = torch.gt(w_l2_norm, threshold).type_as( + weight).detach() if base_mask['bias_mask'] is not None else None + + return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias} + + def get_channel_sum(self, wrapper, wrapper_idx): + weight = wrapper.module.weight.data + filters = weight.shape[0] + w = weight.view(filters, -1) + w_l2_norm = torch.sqrt((w ** 2).sum(dim=1)) + return w_l2_norm + + +class FPGMPrunerMasker(StructuredWeightMasker): + """ + A filter pruner via geometric median. + "Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration", + https://arxiv.org/pdf/1811.00250.pdf + """ + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, channel_masks=None): + min_gm_idx = self._get_min_gm_kernel_idx( + num_prune, wrapper, wrapper_idx, channel_masks) + for idx in min_gm_idx: + base_mask['weight_mask'][idx] = 0. + if base_mask['bias_mask'] is not None: + base_mask['bias_mask'][idx] = 0. + return base_mask + + def _get_min_gm_kernel_idx(self, num_prune, wrapper, wrapper_idx, channel_masks): + channel_dist = self.get_channel_sum(wrapper, wrapper_idx) + if channel_masks is not None: + channel_dist = channel_dist * channel_masks + dist_list = [(channel_dist[i], i) + for i in range(channel_dist.size(0))] + min_gm_kernels = sorted(dist_list, key=lambda x: x[0])[:num_prune] + return [x[1] for x in min_gm_kernels] + + def _get_distance_sum(self, weight, out_idx): + """ + Calculate the total distance between a specified filter (by out_idex and in_idx) and + all other filters. + Parameters + ---------- + weight: Tensor + convolutional filter weight + out_idx: int + output channel index of specified filter, this method calculates the total distance + between this specified filter and all other filters. + Returns + ------- + float32 + The total distance + """ + logger.debug('weight size: %s', weight.size()) + assert len(weight.size()) in [3, 4], 'unsupported weight shape' + + w = weight.view(weight.size(0), -1) + anchor_w = w[out_idx].unsqueeze(0).expand(w.size(0), w.size(1)) + x = w - anchor_w + x = (x * x).sum(-1) + x = torch.sqrt(x) + return x.sum() + + def get_channel_sum(self, wrapper, wrapper_idx): + weight = wrapper.module.weight.data + assert len(weight.size()) in [3, 4] + dist_list = [] + for out_i in range(weight.size(0)): + dist_sum = self._get_distance_sum(weight, out_i) + dist_list.append(dist_sum) + return torch.Tensor(dist_list).to(weight.device) + + +class TaylorFOWeightFilterPrunerMasker(StructuredWeightMasker): + """ + A structured pruning algorithm that prunes the filters with the smallest + importance approximations based on the first order taylor expansion on the weight. + Molchanov, Pavlo and Mallya, Arun and Tyree, Stephen and Frosio, Iuri and Kautz, Jan, + "Importance Estimation for Neural Network Pruning", CVPR 2019. + http://jankautz.com/publications/Importance4NNPruning_CVPR19.pdf + """ + + def __init__(self, model, pruner, statistics_batch_num=1): + super().__init__(model, pruner) + self.statistics_batch_num = statistics_batch_num + self.pruner.iterations = 0 + self.pruner.set_wrappers_attribute("contribution", None) + self.pruner.patch_optimizer(self.calc_contributions) + self.global_threshold = None + + def _get_global_threshold(self): + channel_contribution_list = [] + for wrapper_idx, wrapper in enumerate(self.pruner.get_modules_wrapper()): + channel_contribution = self.get_channel_sum(wrapper, wrapper_idx) + wrapper_size = wrapper.module.weight.size().numel() + channel_size = wrapper.module.weight.size(0) + contribution_expand = channel_contribution.expand(int(wrapper_size / channel_size), channel_size).reshape(-1) + channel_contribution_list.append(contribution_expand) + all_channel_contributions = torch.cat(channel_contribution_list) + k = int(all_channel_contributions.shape[0] * self.pruner.config_list[0]['sparsity']) + self.global_threshold = torch.topk( + all_channel_contributions.view(-1), k, largest=False)[0].max() + + def _get_global_num_prune(self, wrapper, wrapper_idx): + if self.global_threshold is None: + self._get_global_threshold() + weight = wrapper.module.weight.data + filters = weight.size(0) + channel_contribution = self.get_channel_sum(wrapper, wrapper_idx) + num_prune = channel_contribution[channel_contribution < self.global_threshold].size()[0] + if num_prune == filters: + num_prune -= 1 + return num_prune + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, channel_masks=None): + channel_contribution = self.get_channel_sum(wrapper, wrapper_idx) + if channel_contribution is None: + # iteration is not enough + return None + if channel_masks is not None: + channel_contribution = channel_contribution * channel_masks + prune_indices = torch.argsort(channel_contribution)[:num_prune] + for idx in prune_indices: + base_mask['weight_mask'][idx] = 0. + if base_mask['bias_mask'] is not None: + base_mask['bias_mask'][idx] = 0. + return base_mask + + def calc_contributions(self): + """ + Calculate the estimated importance of filters as a sum of individual contribution + based on the first order taylor expansion. + """ + if self.pruner.iterations >= self.statistics_batch_num: + return + + for wrapper in self.pruner.get_modules_wrapper(): + filters = wrapper.module.weight.size(0) + contribution = ( + wrapper.module.weight * wrapper.module.weight.grad).data.pow(2).view(filters, -1).sum(dim=1) + if wrapper.contribution is None: + wrapper.contribution = contribution + else: + wrapper.contribution += contribution + + self.pruner.iterations += 1 + + def get_channel_sum(self, wrapper, wrapper_idx): + if self.pruner.iterations < self.statistics_batch_num: + return None + if wrapper.contribution is None: + return None + return wrapper.contribution + + +class ActivationFilterPrunerMasker(StructuredWeightMasker): + def __init__(self, model, pruner, statistics_batch_num=1, activation='relu'): + super().__init__(model, pruner) + self.statistics_batch_num = statistics_batch_num + self.pruner.hook_id = self._add_activation_collector(self.pruner) + self.pruner.iterations = 0 + self.pruner.patch_optimizer(self._iteration_counter) + + assert activation in ['relu', 'relu6'] + if activation == 'relu': + self.pruner.activation = torch.nn.functional.relu + elif activation == 'relu6': + self.pruner.activation = torch.nn.functional.relu6 + else: + self.pruner.activation = None + + def _iteration_counter(self): + self.pruner.iterations += 1 + + def _add_activation_collector(self, pruner): + def collector(collected_activation): + def hook(module_, input_, output): + collected_activation.append( + pruner.activation(output.detach().cpu())) + return hook + pruner.collected_activation = {} + pruner._fwd_hook_id += 1 + pruner._fwd_hook_handles[pruner._fwd_hook_id] = [] + + for wrapper_idx, wrapper in enumerate(pruner.get_modules_wrapper()): + pruner.collected_activation[wrapper_idx] = [] + handle = wrapper.register_forward_hook( + collector(pruner.collected_activation[wrapper_idx])) + + pruner._fwd_hook_handles[pruner._fwd_hook_id].append(handle) + return pruner._fwd_hook_id + + +class ActivationAPoZRankFilterPrunerMasker(ActivationFilterPrunerMasker): + """ + A structured pruning algorithm that prunes the filters with the + smallest APoZ(average percentage of zeros) of output activations. + Hengyuan Hu, Rui Peng, Yu-Wing Tai and Chi-Keung Tang, + "Network Trimming: A Data-Driven Neuron Pruning Approach towards Efficient Deep Architectures", ICLR 2016. + https://arxiv.org/abs/1607.03250 + """ + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, channel_masks=None): + apoz = self.get_channel_sum(wrapper, wrapper_idx) + if apoz is None: + # the collected activations are not enough + return None + if channel_masks is not None: + apoz = apoz * channel_masks + + prune_indices = torch.argsort(apoz)[:num_prune] + for idx in prune_indices: + base_mask['weight_mask'][idx] = 0. + if base_mask['bias_mask'] is not None: + base_mask['bias_mask'][idx] = 0. + + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + + return base_mask + + def _calc_apoz(self, activations): + """ + Calculate APoZ(average percentage of zeros) of activations. + + Parameters + ---------- + activations : list + Layer's output activations + + Returns + ------- + torch.Tensor + Filter's APoZ(average percentage of zeros) of the activations + """ + activations = torch.cat(activations, 0) + _eq_zero = torch.eq(activations, torch.zeros_like(activations)) + _apoz = torch.sum(_eq_zero, dim=(0, 2, 3), dtype=torch.float64) / \ + torch.numel(_eq_zero[:, 0, :, :]) + return torch.ones_like(_apoz) - _apoz + + def get_channel_sum(self, wrapper, wrapper_idx): + assert wrapper_idx is not None + activations = self.pruner.collected_activation[wrapper_idx] + if len(activations) < self.statistics_batch_num: + # collected activations is not enough + return None + return self._calc_apoz(activations).to(wrapper.module.weight.device) + + +class ActivationMeanRankFilterPrunerMasker(ActivationFilterPrunerMasker): + """ + A structured pruning algorithm that prunes the filters with the + smallest mean value of output activations. + Pavlo Molchanov, Stephen Tyree, Tero Karras, Timo Aila and Jan Kautz, + "Pruning Convolutional Neural Networks for Resource Efficient Inference", ICLR 2017. + https://arxiv.org/abs/1611.06440 + """ + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, channel_masks=None): + + mean_activation = self.get_channel_sum(wrapper, wrapper_idx) + if mean_activation is None: + # the collected activation is not enough + return None + if channel_masks is not None: + mean_activation = mean_activation * channel_masks + + prune_indices = torch.argsort(mean_activation)[:num_prune] + for idx in prune_indices: + base_mask['weight_mask'][idx] = 0. + if base_mask['bias_mask'] is not None: + base_mask['bias_mask'][idx] = 0. + # if len(activations) < self.statistics_batch_num, the code + # cannot reach here + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + + return base_mask + + def _cal_mean_activation(self, activations): + """ + Calculate mean value of activations. + + Parameters + ---------- + activations : list + Layer's output activations + + Returns + ------- + torch.Tensor + Filter's mean value of the output activations + """ + activations = torch.cat(activations, 0) + mean_activation = torch.mean(activations, dim=(0, 2, 3)) + return mean_activation + + def get_channel_sum(self, wrapper, wrapper_idx): + assert wrapper_idx is not None + activations = self.pruner.collected_activation[wrapper_idx] + if len(activations) < self.statistics_batch_num: + return None + # the memory overhead here is acceptable, because only + # the mean_activation tensor returned by _cal_mean_activation + # is transfer to gpu. + return self._cal_mean_activation(activations).to(wrapper.module.weight.device) + + +class SlimPrunerMasker(WeightMasker): + """ + A structured pruning algorithm that prunes channels by pruning the weights of BN layers. + Zhuang Liu, Jianguo Li, Zhiqiang Shen, Gao Huang, Shoumeng Yan and Changshui Zhang + "Learning Efficient Convolutional Networks through Network Slimming", 2017 ICCV + https://arxiv.org/pdf/1708.06519.pdf + """ + + def __init__(self, model, pruner, **kwargs): + super().__init__(model, pruner) + self.global_threshold = None + + def _get_global_threshold(self): + weight_list = [] + for (layer, _) in self.pruner.get_modules_to_compress(): + weight_list.append(layer.module.weight.data.abs().clone()) + all_bn_weights = torch.cat(weight_list) + k = int(all_bn_weights.shape[0] * self.pruner.config_list[0]['sparsity']) + self.global_threshold = torch.topk( + all_bn_weights.view(-1), k, largest=False)[0].max() + print(f'set global threshold to {self.global_threshold}') + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None): + assert wrapper.type == 'BatchNorm2d', 'SlimPruner only supports 2d batch normalization layer pruning' + + if self.global_threshold is None: + self._get_global_threshold() + + weight = wrapper.module.weight.data.clone() + if wrapper.weight_mask is not None: + # apply base mask for iterative pruning + weight = weight * wrapper.weight_mask + + base_mask = torch.ones(weight.size()).type_as(weight).detach() + mask = {'weight_mask': base_mask.detach( + ), 'bias_mask': base_mask.clone().detach()} + filters = weight.size(0) + num_prune = int(filters * sparsity) + if filters >= 2 and num_prune >= 1: + w_abs = weight.abs() + mask_weight = torch.gt( + w_abs, self.global_threshold).type_as(weight) + mask_bias = mask_weight.clone() + mask = {'weight_mask': mask_weight.detach( + ), 'bias_mask': mask_bias.detach()} + return mask + +def least_square_sklearn(X, Y): + from sklearn.linear_model import LinearRegression + reg = LinearRegression(fit_intercept=False) + reg.fit(X, Y) + return reg.coef_ + + +class AMCWeightMasker(WeightMasker): + """ + Weight maskser class for AMC pruner. Currently, AMCPruner only supports pruning kernel + size 1x1 pointwise Conv2d layer. Before using this class to prune kernels, AMCPruner + collected input and output feature maps for each layer, the features maps are flattened + and save into wrapper.input_feat and wrapper.output_feat. + + Parameters + ---------- + model: nn.Module + model to be pruned + pruner: Pruner + A Pruner instance used to prune the model + preserve_round: int + after pruning, preserve filters/channels round to `preserve_round`, for example: + for a Conv2d layer, output channel is 32, sparsity is 0.2, if preserve_round is + 1 (no preserve round), then there will be int(32 * 0.2) = 6 filters pruned, and + 32 - 6 = 26 filters are preserved. If preserve_round is 4, preserved filters will + be round up to 28 (which can be divided by 4) and only 4 filters are pruned. + """ + + def __init__(self, model, pruner, preserve_round=1): + self.model = model + self.pruner = pruner + self.preserve_round = preserve_round + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None, preserve_idx=None): + """ + Calculate the mask of given layer. + Parameters + ---------- + sparsity: float + pruning ratio, preserved weight ratio is `1 - sparsity` + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + dict + dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + msg = 'module type {} is not supported!'.format(wrapper.type) + assert wrapper.type in ['Conv2d', 'Linear'], msg + weight = wrapper.module.weight.data + bias = None + if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None: + bias = wrapper.module.bias.data + + if wrapper.weight_mask is None: + mask_weight = torch.ones(weight.size()).type_as(weight).detach() + else: + mask_weight = wrapper.weight_mask.clone() + if bias is not None: + if wrapper.bias_mask is None: + mask_bias = torch.ones(bias.size()).type_as(bias).detach() + else: + mask_bias = wrapper.bias_mask.clone() + else: + mask_bias = None + mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias} + + num_total = weight.size(1) + num_prune = int(num_total * sparsity) + if self.preserve_round > 1: + num_preserve = num_total - num_prune + num_preserve = int( + math.ceil(num_preserve * 1. / self.preserve_round) * self.preserve_round) + if num_preserve > num_total: + num_preserve = num_total + num_prune = num_total - num_preserve + + if (num_total < 2 or num_prune < 1) and preserve_idx is None: + return mask + + return self.get_mask(mask, weight, num_preserve, wrapper, wrapper_idx, preserve_idx) + + def get_mask(self, base_mask, weight, num_preserve, wrapper, wrapper_idx, preserve_idx): + w = weight.data.cpu().numpy() + if wrapper.type == 'Linear': + w = w[:, :, None, None] + + if preserve_idx is None: + importance = np.abs(w).sum((0, 2, 3)) + # sum magnitude along C_in, sort descend + sorted_idx = np.argsort(-importance) + d_prime = num_preserve + preserve_idx = sorted_idx[:d_prime] # to preserve index + else: + d_prime = len(preserve_idx) + + assert len(preserve_idx) == d_prime + mask = np.zeros(w.shape[1], bool) + mask[preserve_idx] = True + + # reconstruct, X, Y <= [N, C] + X, Y = wrapper.input_feat, wrapper.output_feat + masked_X = X[:, mask] + if w.shape[2] == 1: # 1x1 conv or fc + rec_weight = least_square_sklearn(X=masked_X, Y=Y) + rec_weight = rec_weight.reshape(-1, 1, 1, d_prime) # (C_out, K_h, K_w, C_in') + rec_weight = np.transpose(rec_weight, (0, 3, 1, 2)) # (C_out, C_in', K_h, K_w) + + rec_weight_pad = np.zeros_like(w) + # pylint: disable=all + rec_weight_pad[:, mask, :, :] = rec_weight + rec_weight = rec_weight_pad + + if wrapper.type == 'Linear': + rec_weight = rec_weight.squeeze() + assert len(rec_weight.shape) == 2 + + # now assign + wrapper.module.weight.data = torch.from_numpy(rec_weight).to(weight.device) + + mask_weight = torch.zeros_like(weight) + if wrapper.type == 'Linear': + mask_weight[:, preserve_idx] = 1. + if base_mask['bias_mask'] is not None and wrapper.module.bias is not None: + mask_bias = torch.ones_like(wrapper.module.bias) + else: + mask_weight[:, preserve_idx, :, :] = 1. + mask_bias = None + + return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias} diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5d9b5dfdcad9625e81a1c6cdc0099a9c133b22 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -0,0 +1,337 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from schema import And, Optional + +from nni.common.graph_utils import TorchModuleGraph +from nni.compression.pytorch.utils.shape_dependency import AttentionWeightDependency +from nni.compression.pytorch.utils.config_validation import CompressorSchema +from nni.compression.pytorch.compressor import Pruner +from . import L1WeightHeadMasker, L2WeightHeadMasker, L1ActivationHeadMasker, L2ActivationHeadMasker, TaylorFOHeadMasker + +__all__ = ['TransformerHeadPruner'] + +MASKER_DICT = { + 'l1_weight': L1WeightHeadMasker, + 'l2_weight': L2WeightHeadMasker, + 'l1_activation': L1ActivationHeadMasker, + 'l2_activation': L2ActivationHeadMasker, + 'taylorfo': TaylorFOHeadMasker +} + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class TransformerHeadPruner(Pruner): + """ + A pruner specialized for pruning attention heads in models belong to the transformer family. + + Parameters + ---------- + model : torch.nn.Module + Model to be pruned. Expect a model from transformers library (e.g., BertModel). + This pruner can work with other customized transformer models, but some ranking modes might fail. + config_list : list + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : Optional. Operation types to prune. (Should be 'Linear' for this pruner.) + - op_names : Optional. Operation names to prune. + head_hidden_dim : int + Dimension of the hidden dimension of each attention head. (e.g., 64 for BERT) + We assume that this head_hidden_dim is constant across the entire model. + attention_name_groups : list (Optional) + List of groups of names for weights of each attention layer. Each element should be a four-element list, with + the first three corresponding to Q_proj, K_proj, V_proj (in any order) and the last one being output_proj. + dummy_input : torch.Tensor (Optional) + Input to model's forward method, used to infer module grouping if attention_name_groups is not specified. + This tensor is used by the underlying torch.jit.trace to infer the module graph. + ranking_criterion : str + The criterion for ranking attention heads. Currently we support: + - l1_weight: l1 norm of Q_proj, K_proj, and V_proj + - l2_weight: l2 norm of Q_proj, K_proj, and V_proj + - l1_activation: l1 norm of the output of attention computation + - l2_activation: l2 norm of the output of attention computation + - taylorfo: l1 norm of the output of attention computation * gradient for this output + (check more details in the masker documentation) + global_sort : bool + Whether rank the heads globally or locally before deciding heads to prune. + num_iterations : int + Number of pruning iterations. Defaults to 1 (ont-shot pruning). If num_iterations > 1, the pruner will split + the sparsity specified in config_list uniformly and assign a fraction to each pruning iteration. + epochs_per_iteration : int + Number of finetuning epochs before the next pruning iteration. + Only used when num_iterations > 1. + If num_iterations is 1, then no finetuning is performed by the pruner after pruning. + optimizer: torch.optim.Optimizer + Optimizer used to train model + trainer: function + Function used to finetune the model between pruning iterations. + Only used when num_iterations > 1 or ranking_criterion is 'taylorfo'. + Users should write this function as a normal function to train the PyTorch model and include + `model, optimizer, criterion, epoch` as function arguments. Note that the trainer is also used for collecting + gradients for pruning if ranking_criterion is 'taylorfo'. In that case, ``epoch=None`` will be passed. + criterion: function + Function used to calculate the loss between the target and the output. + Only used when num_iterations > 1 or ranking_criterion is 'taylorfo'. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + forward_runner: function + Function used to perform a "dry run" on the model on the entire train/validation dataset in order to collect + data for pruning required by the criteria 'l1_activation' or 'l2_activation'. + Only used when ranking_criterion is 'l1_activation' or 'l2_activation'. + Users should write this function as a normal function that accepts a PyTorch model and runs forward on the model + using the entire train/validation dataset. This function is not expected to perform any backpropagation or + parameter updates. + """ + def __init__(self, model, config_list,head_hidden_dim, attention_name_groups=None, dummy_input=None, + ranking_criterion='l1_weight', global_sort=False, num_iterations=1, epochs_per_iteration=1, + optimizer=None, trainer=None, criterion=None, forward_runner=None, + **algo_kwargs): + super().__init__(model, config_list) + + self.head_hidden_dim = int(head_hidden_dim) + self.attention_name_groups = attention_name_groups + self.dummy_input = dummy_input + self.ranking_criterion = ranking_criterion + assert self.ranking_criterion in ['l1_weight', 'l2_weight', 'l1_activation', 'l2_activation', 'taylorfo'], \ + "Unsupported ranking criteria." + self.global_sort = global_sort + self.num_iterations = int(num_iterations) + assert self.num_iterations >= 1, "num_iterations must be greater than or equal to 1" + self.epochs_per_iteration = int(epochs_per_iteration) + self._optimizer = optimizer + self._trainer = trainer + self._criterion = criterion + self._forward_runner = forward_runner + if self.ranking_criterion in ['taylorfo'] or num_iterations > 1: + assert self._trainer is not None + assert self._optimizer is not None + if self.ranking_criterion in ['l1_activation', 'l2_activation']: + assert self._forward_runner is not None + + # Group generation: one group per attention layer, four weights per group + self.masking_groups = [] + if self.attention_name_groups is not None: + logger.info("Note: weights for the same attention layer are grouped using the given attention_name_groups.") + self.group_weights_by_name() + else: + assert self.dummy_input is not None + logger.info("Note: weights for the same attention layer are grouped using model graph.") + self._unwrap_model() + self.group_weight_names_by_graph() + self._wrap_model() + + # Group sanity check + self.validate_weight_groups() + + # Remove any mistakenly captured ungrouped modules + self._unwrap_model() + self.remove_ungrouped_modules() + self._wrap_model() + + self.masker = MASKER_DICT[ranking_criterion](model, self, self.head_hidden_dim, **algo_kwargs) + self.pruned_heads = {i: set() for i in range(len(self.masking_groups))} + + def group_weights_by_name(self): + """ + Populate self.masking_groups using the groups specified by user in attention_name_groups. + """ + assert len(self.masking_groups) == 0 + # build up masking groups + name2group = {} + for layer_idx, layer in enumerate(self.attention_name_groups): + errmsg = 'Each name group must contain 4 weights, with the first three corresponding to Q_proj, K_proj, ' \ + 'V_proj (in any order) and the last one being output_proj.' + assert len(layer) == 4, errmsg + self.masking_groups.append([]) + for weight in layer: + name2group[weight] = layer_idx + + # group wrappers + for wrapper in self.get_modules_wrapper(): + if wrapper.name in name2group: + wrapper.group_idx = name2group[wrapper.name] + self.masking_groups[name2group[wrapper.name]].append(wrapper) + + logger.info('Grouping updated:') + logger.info([[x.name for x in group] for group in self.masking_groups]) + + def group_weight_names_by_graph(self): + """ + Populate self.attention_name_groups by running inference on the module graph. + Currently, the group inferred AttentionWeightDependency is limited to a set of four weights, with the first + three corresponding to Q_proj, K_proj, V_proj (in any order) and the last one being output_proj. + """ + try: + module_graph = TorchModuleGraph(self.bound_model, self.dummy_input) + dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) + self.attention_name_groups = dependency_tracer.dependency_sets + self.group_weights_by_name() + + except Exception as e: + raise RuntimeError('Graph trace failed: please check dummy_input, or specify attention_name_groups.\n' + 'Exception message: ' + str(e)) + + def validate_weight_groups(self): + """ + Sanity checks: + - Q, K, V projection weights in each groups must have the same shape + - output projection weight shape must match total hidden dimension (inferred from Q, K, V projection) + - Four weights in a group must have the same sparsity in their config + - If global_sort is specified, all weights must have the same sparsity + - head_hidden_dim must be a divisor of the output dimension of the projection weights (i.e., the resulting + head number must be an integer) + """ + errmsg = 'Attention weight group sanity check not passed' + sparsity = None + for group in self.masking_groups: + # allow empty groups - may be caused by config list filtering + if len(group) == 0: + continue + assert len(group) == 4, errmsg + ': each group must have four weights' + assert group[0].module.weight.size() == group[1].module.weight.size() and \ + group[1].module.weight.size() == group[2].module.weight.size(), \ + errmsg + ': the dimensions of Q, K, V projection matrices must be the same ' + assert group[0].module.weight.size()[0] == group[3].module.weight.size()[1], \ + errmsg + ': the dimension of attention results must match with input for output projection' + assert group[0].config['sparsity'] == group[1].config['sparsity'] == \ + group[2].config['sparsity'] == group[3].config['sparsity'], \ + errmsg + ': the sparsity of matrices in the same layer must be the same' + if sparsity is None: + sparsity = group[0].config['sparsity'] + if self.global_sort: + assert sparsity == group[0].config['sparsity'], \ + errmsg + ': for global_sort=True, the sparsity for all modules must be the same' + assert group[0].module.weight.size(0) % self.head_hidden_dim == 0, \ + errmsg + ': head_hidden_dim must be a divisor of the output dimension of the projection weights' + + def remove_ungrouped_modules(self): + """ + Remove non-attention weights that might be mistakenly captured by a simplified config_list. + Also update the corresponding list of layer information (self.modules_to_compress) + """ + care_of_modules = set([x for layer in self.masking_groups for x in layer]) + + modules_wrapper_new, modules_to_compress_new = [], [] + for wrapper, layer_info in zip(self.modules_wrapper, self.modules_to_compress): + if wrapper in care_of_modules: + modules_wrapper_new.append(wrapper) + modules_to_compress_new.append(layer_info) + + self.modules_wrapper = modules_wrapper_new + self.modules_to_compress = modules_to_compress_new + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + schema = CompressorSchema([{ + 'sparsity': And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str] + }], model, logger) + + schema.validate(config_list) + + def compress(self): + for pruning_iter in range(self.num_iterations): + if self.ranking_criterion in ['l1_activation', 'l2_activation']: + training = self.bound_model.training + self.bound_model.eval() + self._forward_runner(self.bound_model) # dry run, forward only + self.update_mask() + self.bound_model.train(training) + elif self.ranking_criterion in ['taylorfo']: + self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=None) + self.update_mask() + else: + self.update_mask() + + # for iterative pruning, if not the last iteration, finetune before next iteration + # Then, reset the maskers (may create additional hooks) + if self.num_iterations > 1 and pruning_iter != self.num_iterations - 1: + for e in range(self.epochs_per_iteration): + self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=e+1) + self.masker.reset() + + logger.info('Pruned heads after iteration %i', pruning_iter) + logger.info(self.pruned_heads) + + def update_mask(self): + """ + Calculate and update masks for each masking group. If global_sort is set, the masks for all groups are + calculated altogether, and then the groups are updated individually. + """ + masks_for_all_groups = None + if self.global_sort: + masks_for_all_groups = self._calc_mask_global() + assert len(masks_for_all_groups) == len(self.masking_groups) + for group_idx, layer_weight_group in enumerate(self.masking_groups): + if self.global_sort: + masks = masks_for_all_groups[group_idx] + else: + masks = self._calc_mask(layer_weight_group) + if masks is not None: + for i, mask in enumerate(masks): + for mask_type in mask: + assert hasattr(layer_weight_group[i], mask_type), \ + "there is no attribute '%s' in wrapper on %s" % (mask_type, layer_weight_group[i]) + setattr(layer_weight_group[i], mask_type, mask[mask_type]) + logger.debug(f'mask updated: {layer_weight_group[i].name} {mask_type}') + + def _calc_mask(self, weight_group): + """ + Calculate mask for each group using only layer-local information. + When global_sort is set for the pruner, _calc_mask_global should be called instead of this function. + + Parameters + ---------- + weight_group : list + A list of four wrappers generated by self.group_weights_by_name(). + + Returns + ------- + masks : list + A four element list corresponding to the masks for each element in the four-element weight group. + Each element in masks is a dict with keys "weight_mask" and "bias_mask" (optional). + masks can be None if the underlying masker returns None. This means that the mask calculation fails. + The calling function can try recalculate the mask at a later time. Note that the calling function might need + to call masker.reset() before attempting to recalculate the mask. + """ + iter_sparsity = weight_group[0].config['sparsity'] / self.num_iterations + masks = self.masker.calc_mask(sparsity=iter_sparsity, weight_group=weight_group) + + return masks + + def _calc_mask_global(self): + """ + Calculate mask for all groups using global information. + + Returns + ------- + masks_list : list + A list corresponding to the masks for each weight group in self.masking_groups. Each element in the + returned mask_list is a four-element list corresponding to the masks for each element in a four-element + weight group. + """ + if len(self.get_modules_wrapper()) == 0: + return [] + + overall_sparsity = self.get_modules_wrapper()[0].config['sparsity'] / self.num_iterations + n_heads_total = 0 + for group in self.masking_groups: + if len(group) != 0: + q_proj, _, _, _ = group + n_heads_total += int(q_proj.module.weight.size()[0] / self.head_hidden_dim) + n_heads_to_prune = int(n_heads_total * overall_sparsity) + + return self.masker.calc_mask_global(n_heads_to_prune) + + def calc_mask(self, wrapper, **kwargs): + raise RuntimeError("Applications should directly call TransformerHeadPruner's update_mask() method.") diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py new file mode 100644 index 0000000000000000000000000000000000000000..fc7f6f580837b5a02f0aea75508f65b17a2b47b6 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -0,0 +1,444 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from .weight_masker import WeightMasker + +__all__ = ['L1WeightHeadMasker', 'L2WeightHeadMasker', 'L1ActivationHeadMasker', 'L2ActivationHeadMasker', + 'TaylorFOHeadMasker'] + +logger = logging.getLogger('transformer head pruner') + + +class AttentionHeadMasker(WeightMasker): + """ + A structured pruning masker base class that prunes attention heads in attention layers. + + Parameters + ---------- + model: nn.Module + model to be pruned + pruner: Pruner + A Pruner instance used to prune the model + head_hidden_dim: int + Hidden dimension for each attention head (e.g., 64 for BERT base) + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner) + self.head_hidden_dim = head_hidden_dim + assert self.head_hidden_dim is not None, "head_hidden_dim must be specified." + + def reset(self): + """ + Derived classes can override this method to do preparations necessary for calculating importance scores. + This method is called during iterative pruning, before each iteration starts (except the first one). + """ + pass + + def calc_mask(self, sparsity, wrapper=None, wrapper_idx=None, weight_group=None, **kwargs): + """ + Calculate all the masks for a group of wrappers (specified in weight_group). + This function only utilizes local information for mask calculation. If global_sort is specified for the pruner, + the pruner should call calc_mask_global instead of this function. + + Parameters + ---------- + sparsity: float + The target (amount of increase of) sparsity of the wrapper list. + weight_group: list + A four-element list of module wrappers + wrapper: PrunerModuleWrapper/list of PrunerModuleWrappers + Should be None. Not used in this masker, just for consistency with the parent API. + wrapper_idx: int/list of int + Should be None. Not used in this masker, just for consistency with the parent API. + Returns + ------- + masks : list + masks for each element in the group. + Each element in the list masks is a dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + assert weight_group is not None + if len(weight_group) == 0: + return None + else: + num_total = weight_group[0].module.weight.data.size(0) // self.head_hidden_dim + if num_total < 2: + return None + num_prune = max(int(num_total * sparsity), 1) + return self.get_mask(num_prune, weight_group, **kwargs) + + def calc_mask_global(self, n_heads_to_prune): + """ + Calculate all the masks for all groups in the pruner. + + Parameters + ---------- + n_heads_to_prune : int + Total number of attention heads to prune. + Returns + ------- + all_masks : list + A list of masks for all groups, where each element is a list of masks for each module in the group. + """ + # calculate scores as normal (this step does not require global information) + head_importance_scores = [] + for group_idx, group in enumerate(self.pruner.masking_groups): + if len(group) != 0: + scores = self.get_head_importance_scores(group) + n_heads = group[0].module.weight.size(0) // self.head_hidden_dim + for head_idx in range(n_heads): + head_importance_scores.append([group_idx, head_idx, scores[head_idx]]) + + # determine which head to prune for each layer + n_selected = 0 + for group_idx, head_idx, _ in sorted(head_importance_scores, key=(lambda x: x[-1])): + n_heads_original = self.pruner.masking_groups[group_idx][0].module.weight.size(0) // self.head_hidden_dim + n_heads_remaining = n_heads_original - len(self.pruner.pruned_heads[group_idx]) + if n_heads_remaining > 1 and head_idx not in self.pruner.pruned_heads[group_idx]: + self.pruner.pruned_heads[group_idx].add(head_idx) + n_selected += 1 + if n_selected >= n_heads_to_prune: + break + + # generate masks + all_masks = [] + for group_idx, group in enumerate(self.pruner.masking_groups): + if len(group) == 0: + masks = None + else: + n_heads = group[0].module.weight.size(0) // self.head_hidden_dim + device = group[0].module.weight.device + head_level_mask = torch.tensor([i not in self.pruner.pruned_heads[group_idx] for i in range(n_heads)], device=device) # pylint: disable=not-callable + masks = self._get_layer_masks_from_head_mask(group, head_level_mask) + all_masks.append(masks) + + return all_masks + + def get_mask(self, num_prune, weight_group, **kwargs): + """ + Calculate the mask of given layer (weight_group). + + Parameters + ---------- + num_prune: int + Num of heads to prune + weight_group: list + A four-element list of module wrappers + Returns + ------- + masks : list + masks for each element in the group. + Each element in the list masks is a dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__)) + + def _get_layer_masks_from_head_mask(self, weight_group, head_mask_bool, device=None): + q_proj, _, _, output_proj = weight_group + if device is None: + device = q_proj.module.weight.device + + n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim + weight_mask_shape = q_proj.module.weight.data.view([n_heads, -1]).size() + bias_mask_shape = q_proj.module.bias.data.view([n_heads, -1]).size() + + mask_weight = head_mask_bool.unsqueeze(-1).expand(weight_mask_shape).type_as(q_proj.module.weight) + mask_bias = head_mask_bool.unsqueeze(-1).expand(bias_mask_shape).type_as(q_proj.module.weight) + + mask_weight_proj = mask_weight.contiguous().view(q_proj.module.weight.size()).detach().to(device) + mask_bias_proj = mask_bias.contiguous().view(-1).detach().to(device) + masks_for_proj = {'weight_mask': mask_weight_proj.detach()} + if hasattr(q_proj.module, 'bias') and q_proj.module.bias is not None: + masks_for_proj['bias_mask'] = mask_bias_proj + + mask_weight_dense = mask_bias_proj.expand_as(output_proj.module.weight.data).detach().to(device) + mask_bias_dense = torch.ones_like(output_proj.module.bias.data).to(device) + masks_for_dense = {'weight_mask': mask_weight_dense.detach()} + if hasattr(output_proj.module, 'bias') and output_proj.module.bias is not None: + masks_for_dense['bias_mask'] = mask_bias_dense + + masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] + + return masks + + def get_mask_by_importance_ranking(self, num_prune, weight_group): + """ + Calculate the mask of given layer by pruning out heads with lowest importance scores. + + Parameters + ---------- + num_prune: int + Num of heads to prune + weight_group: list + list of a group of weights for an attention layer + Returns + ------- + masks : list + masks for each element in the group. + Each element in the list masks is a dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + importance_scores = self.get_head_importance_scores(weight_group) + if importance_scores is None: + return None + + importance_scores = [[i, importance_scores[i]] for i in range(len(importance_scores))] + head_mask_bool = torch.ones(len(importance_scores)) + n_selected = 0 + for head_idx, _ in sorted(importance_scores, key=(lambda x: x[-1])): + head_mask_bool[head_idx] = 0 + if head_idx not in self.pruner.pruned_heads[weight_group[0].group_idx]: + n_selected += 1 + # update pruned_heads in pruner (mainly for iterative pruning) + self.pruner.pruned_heads[weight_group[0].group_idx].add(head_idx) + if n_selected == num_prune: + break + + return self._get_layer_masks_from_head_mask(weight_group, head_mask_bool) + + def get_head_importance_scores(self, weight_group): + """ + Calculate the importance score for each head. + Parameters + ---------- + weight_group: list + list of a group of weights for an attention layer + + Returns + ------- + importance_scores: tensor + Tensor that indicates the importance of each head + """ + raise NotImplementedError('{} get_channel_sum is not implemented'.format(self.__class__.__name__)) + + +class L1WeightHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads weight smallest weight magnitude for the query, head, + and key projection matrices. L1 norm is used for magnitude calculation. Note that in this implementation, weight + norms of q_proj, k_proj, v_proj from each head are summed as the final importance score for the head. + """ + def get_head_importance_scores(self, weight_group): + q_proj, k_proj, v_proj, _ = weight_group + + n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim + query_proj_weights = q_proj.module.weight.data.view([n_heads, -1]) + key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) + value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) + + query_norm_avg = torch.norm(query_proj_weights, 1, -1) + key_norm_avg = torch.norm(key_proj_weights, 1, -1) + value_norm_avg = torch.norm(value_proj_weights, 1, -1) + + return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() + + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) + + +class L2WeightHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads weight smallest weight magnitude for the query, head, + and key projection matrices. L2 norm is used for magnitude calculation. Note that in this implementation, weight + norms of q_proj, k_proj, v_proj from each head are summed as the final importance score for the head. + """ + def get_head_importance_scores(self, weight_group): + q_proj, k_proj, v_proj, _ = weight_group + + n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim + query_proj_weights = q_proj.module.weight.data.view([n_heads, -1]) + key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) + value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) + + query_norm_avg = torch.norm(query_proj_weights, 2, -1) + key_norm_avg = torch.norm(key_proj_weights, 2, -1) + value_norm_avg = torch.norm(value_proj_weights, 2, -1) + + return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() + + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) + + +class L1ActivationHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads with smallest final output value. + Note that this masker only relies on the output of the output layer of each attention layer. + The masker collects the L1 norm of the output of the last weight (output projection) in each group on the entire + train set, and prunes the heads producing the smallest output. + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner, head_hidden_dim) + self.reset() + + def reset(self): + self.pruner.hook_id = self._add_activation_collector(self.pruner) + + def get_head_importance_scores(self, weight_group): + _, _, _, output_proj = weight_group + activations = torch.stack(self.pruner.collected_activation[output_proj.group_idx], -1) + activations = torch.sum(activations, -1) + n_heads = activations.size()[0] // self.head_hidden_dim + scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() + + # clean up hooks + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + + return scores + + def _add_activation_collector(self, pruner): + def collector(collected_activation): + def hook(module_, input_, output): + if type(input_) is tuple: + input_ = input_[0] + raw_activation = torch.abs(input_.detach().cpu()) # L1-norm + raw_activation_reduced = torch.sum(raw_activation, [0, 1]) + collected_activation.append(raw_activation_reduced) + return hook + pruner.collected_activation = {} + pruner._fwd_hook_id += 1 + pruner._fwd_hook_handles[pruner._fwd_hook_id] = [] + + for _, _, _, output_proj in pruner.masking_groups: + pruner.collected_activation[output_proj.group_idx] = [] + handle = output_proj.register_forward_hook(collector(pruner.collected_activation[output_proj.group_idx])) + + pruner._fwd_hook_handles[pruner._fwd_hook_id].append(handle) + + return pruner._fwd_hook_id + + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) + + +class L2ActivationHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads with smallest final output value. + Note that this masker only relies on the output of the output layer of each attention layer. + The masker collects the L2 norm of the output of the last weight (output projection) in each group on the entire + train set, and prunes the heads producing the smallest output. + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner, head_hidden_dim) + self.reset() + + def reset(self): + self.pruner.hook_id = self._add_activation_collector(self.pruner) + + def get_head_importance_scores(self, weight_group): + _, _, _, output_proj = weight_group + activations = torch.stack(self.pruner.collected_activation[output_proj.group_idx], -1) + scores = torch.sum(activations, -1).detach().cpu() + # n_heads = activations.size()[0] // self.head_hidden_dim + # scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() + + # clean up hooks + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + + return scores + + def _add_activation_collector(self, pruner): + def collector(collected_activation, head_hidden_dim): + def hook(module_, input_, output): + if type(input_) is tuple: + input_ = input_[0] + raw_activation = input_.detach().cpu() ** 2 + n_heads = raw_activation.size(-1) // head_hidden_dim + raw_activation = raw_activation.view(raw_activation.size(0), raw_activation.size(1), n_heads, -1) + raw_activation = torch.norm(raw_activation, 2, -1) # (B, S, n_heads) + raw_activation_reduced = torch.sum(raw_activation, [0, 1]) # (n_heads,) + collected_activation.append(raw_activation_reduced) + + return hook + + pruner.collected_activation = {} + pruner._fwd_hook_id += 1 + pruner._fwd_hook_handles[pruner._fwd_hook_id] = [] + + for _, _, _, output_proj in pruner.masking_groups: + pruner.collected_activation[output_proj.group_idx] = [] + handle = output_proj.register_forward_hook(collector(pruner.collected_activation[output_proj.group_idx], + head_hidden_dim=self.head_hidden_dim)) + + pruner._fwd_hook_handles[pruner._fwd_hook_id].append(handle) + + return pruner._fwd_hook_id + + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) + + +class TaylorFOHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads with smallest final output contribution. + Note that this masker only relies on the output of the output layer of each attention layer. + The masker collects the output the last weight (output projection) in each group and the corresponding gradient + on the entire train set, and prunes the heads producing the smallest contribution as used in the following papers: + "Are Sixteen Heads Really Better than One?" (Michel et.al, 2019) + "Pruning convolutional neural networks for resource efficient inference." (Molchanov et. al., 2017) + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner, head_hidden_dim) + self.reset() + + def reset(self): + self.pruner.hook_id = self._add_activation_collector() # forward hooks for collecting activation + self.backward_hooks = {} # backward hooks for collecting gradient + self._add_gradient_collector() + + def get_head_importance_scores(self, weight_group): + _, _, _, output_proj = weight_group + result = output_proj.head_importance_scores + + # clean up hooks and cached data + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + self.backward_hooks[output_proj.group_idx].remove() + for attr in ['forward_output_cached', 'head_importance_scores']: + output_proj.__dict__.pop(attr, None) + + return result + + def _add_activation_collector(self): + def forward_hook(md, inp, out): + if type(inp) is tuple: + inp = inp[0] + n_heads_per_layer = inp.size(-1) // self.head_hidden_dim + heads_output = inp.view([inp.size(0), inp.size(1), n_heads_per_layer, -1]).detach() + md.forward_output_cached = heads_output + + self.pruner._fwd_hook_id += 1 + self.pruner._fwd_hook_handles[self.pruner._fwd_hook_id] = [] + + for _, _, _, output_proj in self.pruner.masking_groups: + handle = output_proj.register_forward_hook(forward_hook) + self.pruner._fwd_hook_handles[self.pruner._fwd_hook_id].append(handle) + + return self.pruner._fwd_hook_id + + def _add_gradient_collector(self): + def grad_hook(md, grad_in, grad_out): + if type(grad_in) is tuple: + grad_in = grad_in[0] + n_heads_per_layer = grad_in.size(-1) // self.head_hidden_dim + heads_grad = grad_in.view([grad_in.size(0), grad_in.size(1), n_heads_per_layer, -1]) + heads_scores = torch.abs(heads_grad * md.forward_output_cached) + heads_scores = torch.sum(heads_scores, [0, 1, 3]).detach().cpu() + if hasattr(md, 'head_importance_scores'): + md.head_importance_scores += heads_scores + else: + md.head_importance_scores = heads_scores + + for _, _, _, output_proj in self.pruner.masking_groups: + handle = output_proj.register_backward_hook(grad_hook) + self.backward_hooks[output_proj.group_idx] = handle + + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) diff --git a/nni/algorithms/compression/pytorch/pruning/weight_masker.py b/nni/algorithms/compression/pytorch/pruning/weight_masker.py new file mode 100644 index 0000000000000000000000000000000000000000..aec8444ced37bccfa5f05e0a9f81b2843c2a35e4 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/weight_masker.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +class WeightMasker(object): + def __init__(self, model, pruner, **kwargs): + self.model = model + self.pruner = pruner + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None): + """ + Calculate the mask of given layer. + Parameters + ---------- + sparsity: float + pruning ratio, preserved weight ratio is `1 - sparsity` + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + dict + dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + + raise NotImplementedError('{} calc_mask is not implemented'.format(self.__class__.__name__)) diff --git a/nni/algorithms/compression/pytorch/quantization/__init__.py b/nni/algorithms/compression/pytorch/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb632ece452922eaad81259b7a53f4c983cf14ad --- /dev/null +++ b/nni/algorithms/compression/pytorch/quantization/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .bnn_quantizer import BNNQuantizer +from .dorefa_quantizer import DoReFaQuantizer +from .lsq_quantizer import LsqQuantizer +from .native_quantizer import NaiveQuantizer +from .observer_quantizer import ObserverQuantizer +from .qat_quantizer import QAT_Quantizer + + +__all__ = ['NaiveQuantizer', 'QAT_Quantizer', 'DoReFaQuantizer', 'BNNQuantizer', 'LsqQuantizer', 'ObserverQuantizer'] diff --git a/nni/algorithms/compression/pytorch/quantization/bnn_quantizer.py b/nni/algorithms/compression/pytorch/quantization/bnn_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..7010536ac80add332cf6f5fa0d5ce7993ccf906a --- /dev/null +++ b/nni/algorithms/compression/pytorch/quantization/bnn_quantizer.py @@ -0,0 +1,122 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from schema import Schema, And, Or, Optional +from nni.compression.pytorch.utils.config_validation import QuantizerSchema +from nni.compression.pytorch.compressor import Quantizer, QuantGrad +from nni.compression.pytorch.quantization.literal import QuantType +from nni.compression.pytorch.quantization.utils import get_bits_length + + +logger = logging.getLogger(__name__) + + +class ClipGrad(QuantGrad): + @staticmethod + def quant_backward(tensor, grad_output, quant_type, scale, zero_point, qmin, qmax): + if quant_type == QuantType.OUTPUT: + grad_output[torch.abs(tensor) > 1] = 0 + return grad_output + + +class BNNQuantizer(Quantizer): + """Binarized Neural Networks, as defined in: + Binarized Neural Networks: Training Deep Neural Networks with Weights and Outputs Constrained to +1 or -1 + (https://arxiv.org/abs/1602.02830) + """ + + def __init__(self, model, config_list, optimizer): + assert isinstance(optimizer, torch.optim.Optimizer), "unrecognized optimizer type" + super().__init__(model, config_list, optimizer) + device = next(model.parameters()).device + self.quant_grad = ClipGrad.apply + modules_to_compress = self.get_modules_to_compress() + for layer, config in modules_to_compress: + if "weight" in config.get("quant_types", []): + weight_bits = get_bits_length(config, 'weight') + layer.module.register_buffer('weight_bits', torch.Tensor([int(weight_bits)])) + self.bound_model.to(device) + + def _del_simulated_attr(self, module): + """ + delete redundant parameters in quantize module + """ + del_attr_list = ['old_weight', 'weight_bits'] + for attr in del_attr_list: + if hasattr(module, attr): + delattr(module, attr) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list of dict + List of configurations + """ + schema = QuantizerSchema([{ + Optional('quant_types'): Schema([lambda x: x in ['weight', 'output']]), + Optional('quant_bits'): Or(And(int, lambda n: 0 < n < 32), Schema({ + Optional('weight'): And(int, lambda n: 0 < n < 32), + Optional('output'): And(int, lambda n: 0 < n < 32), + })), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + def quantize_weight(self, wrapper, **kwargs): + weight = wrapper.module.weight + weight = torch.sign(weight) + # remove zeros + weight[weight == 0] = 1 + wrapper.module.weight = weight + wrapper.module.weight_bits = torch.Tensor([1.0]) + return weight + + def quantize_output(self, output, wrapper, **kwargs): + out = torch.sign(output) + # remove zeros + out[out == 0] = 1 + return out + + def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None): + """ + Export quantized model weights and calibration parameters(optional) + + Parameters + ---------- + model_path : str + path to save quantized model weight + calibration_path : str + (optional) path to save quantize parameters after calibration + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model + device : torch.device + device of the model, used to place the dummy input tensor for exporting onnx file. + the tensor is placed on cpu if ```device``` is None + + Returns + ------- + Dict + """ + assert model_path is not None, 'model_path must be specified' + self._unwrap_model() + calibration_config = {} + + for name, module in self.bound_model.named_modules(): + if hasattr(module, 'weight_bits'): + calibration_config[name] = {} + calibration_config[name]['weight_bits'] = int(module.weight_bits) + self._del_simulated_attr(module) + + self.export_model_save(self.bound_model, model_path, calibration_config, calibration_path, onnx_path, input_shape, device) + + return calibration_config diff --git a/nni/algorithms/compression/pytorch/quantization/dorefa_quantizer.py b/nni/algorithms/compression/pytorch/quantization/dorefa_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..6e1f76e25929792106b55cef5db2e5380332f2db --- /dev/null +++ b/nni/algorithms/compression/pytorch/quantization/dorefa_quantizer.py @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from schema import Schema, And, Or, Optional +from nni.compression.pytorch.utils.config_validation import QuantizerSchema +from nni.compression.pytorch.compressor import Quantizer +from nni.compression.pytorch.quantization.utils import get_bits_length + + +logger = logging.getLogger(__name__) + + +class DoReFaQuantizer(Quantizer): + """Quantizer using the DoReFa scheme, as defined in: + Zhou et al., DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients + (https://arxiv.org/abs/1606.06160) + """ + + def __init__(self, model, config_list, optimizer): + assert isinstance(optimizer, torch.optim.Optimizer), "unrecognized optimizer type" + super().__init__(model, config_list, optimizer) + device = next(model.parameters()).device + modules_to_compress = self.get_modules_to_compress() + for layer, config in modules_to_compress: + if "weight" in config.get("quant_types", []): + weight_bits = get_bits_length(config, 'weight') + layer.module.register_buffer('weight_bits', torch.Tensor([int(weight_bits)])) + self.bound_model.to(device) + + def _del_simulated_attr(self, module): + """ + delete redundant parameters in quantize module + """ + del_attr_list = ['old_weight', 'weight_bits'] + for attr in del_attr_list: + if hasattr(module, attr): + delattr(module, attr) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list of dict + List of configurations + """ + schema = QuantizerSchema([{ + Optional('quant_types'): Schema([lambda x: x in ['weight']]), + Optional('quant_bits'): Or(And(int, lambda n: 0 < n < 32), Schema({ + Optional('weight'): And(int, lambda n: 0 < n < 32) + })), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + def quantize_weight(self, wrapper, **kwargs): + weight = wrapper.module.weight + weight_bits = int(wrapper.module.weight_bits) + weight = weight.tanh() + weight = weight / (2 * weight.abs().max()) + 0.5 + weight = self.quantize(weight, weight_bits) + weight = 2 * weight - 1 + wrapper.module.weight = weight + # wrapper.module.weight.data = weight + return weight + + def quantize(self, input_ri, q_bits): + scale = pow(2, q_bits) - 1 + output = torch.round(input_ri * scale) / scale + return output + + def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None): + """ + Export quantized model weights and calibration parameters(optional) + + Parameters + ---------- + model_path : str + path to save quantized model weight + calibration_path : str + (optional) path to save quantize parameters after calibration + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model + device : torch.device + device of the model, used to place the dummy input tensor for exporting onnx file. + the tensor is placed on cpu if ```device``` is None + + Returns + ------- + Dict + """ + assert model_path is not None, 'model_path must be specified' + self._unwrap_model() + calibration_config = {} + + for name, module in self.bound_model.named_modules(): + if hasattr(module, 'weight_bits'): + calibration_config[name] = {} + calibration_config[name]['weight_bits'] = int(module.weight_bits) + self._del_simulated_attr(module) + + self.export_model_save(self.bound_model, model_path, calibration_config, calibration_path, onnx_path, input_shape, device) + + return calibration_config diff --git a/nni/algorithms/compression/pytorch/quantization/lsq_quantizer.py b/nni/algorithms/compression/pytorch/quantization/lsq_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..ac6f86eb70fb163891ad0953ca95f7cfe74b8b1d --- /dev/null +++ b/nni/algorithms/compression/pytorch/quantization/lsq_quantizer.py @@ -0,0 +1,254 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from nni.compression.pytorch.compressor import BN_FOLD_TAG, Quantizer, QuantForward +from nni.compression.pytorch.quantization.utils import get_bits_length + + +logger = logging.getLogger(__name__) + + +class LsqQuantizer(Quantizer): + """Quantizer defined in: + Learned Step Size Quantization (ICLR 2020) + https://arxiv.org/pdf/1902.08153.pdf + """ + + def __init__(self, model, config_list, optimizer, dummy_input=None): + """ + Parameters + ---------- + model : torch.nn.Module + the model to be quantized + config_list : list of dict + list of configurations for quantization + supported keys for dict: + - quant_types : list of string + type of quantization you want to apply, currently support 'weight', 'input', 'output' + - quant_bits : int or dict of {str : int} + bits length of quantization, key is the quantization type, value is the length, eg. {'weight': 8}, + when the type is int, all quantization types share same bits length + - quant_start_step : int + disable quantization until model are run by certain number of steps, this allows the network to enter a more stable + state where output quantization ranges do not exclude a significant fraction of values, default value is 0 + - op_types : list of string + types of nn.module you want to apply quantization, eg. 'Conv2d' + - dummy_input : tuple of tensor + inputs to the model, which are used to get the graph of the module. The graph is used to find + Conv-Bn patterns. And then the batch normalization folding would be enabled. If dummy_input is not + given, the batch normalization folding would be disabled. + """ + assert isinstance(optimizer, torch.optim.Optimizer), "unrecognized optimizer type" + super().__init__(model, config_list, optimizer, dummy_input) + device = next(model.parameters()).device + self.quant_grad = QuantForward() + modules_to_compress = self.get_modules_to_compress() + self.bound_model.register_buffer("steps", torch.Tensor([1])) + for layer, config in modules_to_compress: + if "weight" in config.get("quant_types", []): + layer.module.register_parameter("weight_scale", torch.nn.Parameter(torch.Tensor([1.0]))) + # todo: support per-channel quantization for weight since TensorRT use it for conv weight + weight_bits = get_bits_length(config, "weight") + layer.module.register_buffer('weight_bits', torch.Tensor([weight_bits])) + qmax = 2 ** (weight_bits - 1) - 1 + qmin = -2 ** (weight_bits - 1) + init_weight_scale = layer.module.weight.data.detach().abs().mean() * 2 / (qmax ** 0.5) + layer.module.weight_scale = torch.nn.Parameter(init_weight_scale) + layer.module.weight_qmax = qmax + layer.module.weight_qmin = qmin + + self.optimizer.add_param_group({"params": layer.module.weight_scale}) + + if "output" in config.get("quant_types", []): + # scale of output will be initialized using the first batch data + layer.module.register_parameter("output_scale", torch.nn.Parameter(torch.Tensor([1.0]))) + output_bits = get_bits_length(config, "output") + layer.module.register_buffer('output_bits', torch.Tensor([output_bits])) + qmax = 2 ** (output_bits - 1) - 1 + qmin = -2 ** (output_bits - 1) + layer.module.output_qmax = qmax + layer.module.output_qmin = qmin + + self.optimizer.add_param_group({"params": layer.module.output_scale}) + + if "input" in config.get("quant_types", []): + # scale of input will be initialized using the first batch data + layer.module.register_parameter("input_scale", torch.nn.Parameter(torch.Tensor([1.0]))) + input_bits = get_bits_length(config, "input") + layer.module.register_buffer('input_bits', torch.Tensor([input_bits])) + qmax = 2 ** (input_bits - 1) - 1 + qmin = -2 ** (input_bits - 1) + layer.module.input_qmax = qmax + layer.module.input_qmin = qmin + + self.optimizer.add_param_group({"params": layer.module.input_scale}) + + self.bound_model.to(device) + + @staticmethod + def grad_scale(x, scale): + """ + Used to scale the gradient. Give tensor `x`, we have `y=grad_scale(x, scale)=x` in the forward pass, + which means that this function will not change the value of `x`. In the backward pass, we have: + + .. math: + + \frac{\alpha_L}{\alpha_x}=\frac{\alpha_L}{\alpha_y}*\frac{\alpha_y}{\alpha_x}=sclae*\frac{\alpha_L}{\alpha_x} + + This means that the origin gradient of x is scaled by a factor of `scale`. Applying this function + to a nn.Parameter will scale the gradient of it without changing its value. + """ + y = x + y_grad = x * scale + return (y - y_grad).detach() + y_grad + + @staticmethod + def round_pass(x): + """ + A simple way to achieve STE operation. + """ + y = x.round() + y_grad = x + return (y - y_grad).detach() + y_grad + + def quantize(self, x, scale, qmin, qmax): + grad_scale_factor = 1.0 / ((qmax * x.numel()) ** 0.5) + scale = self.grad_scale(scale, grad_scale_factor) + x = x / scale + x = torch.clamp(x, qmin, qmax) + x = self.round_pass(x) + x = x * scale + return x + + def quantize_weight(self, wrapper, **kwargs): + module = wrapper.module + weight = wrapper.module.weight + + # todo: add support for quantize bias. If we use TensorRT as backend, there is no need to quantize + # bias + weight = self.quantize(weight, module.weight_scale, module.weight_qmin, module.weight_qmax) + module.weight = weight + return weight + + def quantize_output(self, output, wrapper, **kwargs): + module = wrapper.module + + # initialize the scale + if self.bound_model.steps == 1: + qmax = module.output_qmax + init_oup_scale = output.data.detach().abs().mean() * 2 / (qmax ** 0.5) + module.output_scale.data = init_oup_scale + + output = self.quantize(output, module.output_scale, module.output_qmin, module.output_qmax) + return output + + def quantize_input(self, inputs, wrapper, **kwargs): + module = wrapper.module + # initialize the scale + if self.bound_model.steps == 1: + qmax = module.input_qmax + init_oup_scale = inputs.data.detach().abs().mean() * 2 / (qmax ** 0.5) + module.input_scale.data = init_oup_scale + + inputs = self.quantize(inputs, module.input_scale, module.input_qmin, module.input_qmax) + return inputs + + def load_calibration_config(self, calibration_config): + modules_to_compress = self.get_modules_to_compress() + for layer, _ in modules_to_compress: + name, module = layer.name, layer.module + if name not in calibration_config: + if hasattr(module, 'weight_bits') or hasattr(module, 'output_bits') or hasattr(module, 'input_bits'): + logger.warning(f"Can not find module {name}'s parameter in input config.") + continue + if hasattr(module, 'weight_bits'): + assert calibration_config[name]['weight_bits'] == int(module.weight_bits), f"weight bits of module {name} fail to match" + if hasattr(module, 'input_bits'): + assert calibration_config[name]['input_bits'] == int(module.input_bits), f"input bits of module {name} fail to match" + module.input_scale.data = torch.Tensor([float(calibration_config[name]['tracked_max_input'] / module.input_qmax)]) + + if hasattr(module, 'output_bits'): + assert calibration_config[name]['output_bits'] == int(module.output_bits), f"output bits of module {name} fail to match" + module.output_scale.data = torch.Tensor([float(calibration_config[name]['tracked_max_output'] / module.output_qmax)]) + + def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None): + """ + Export quantized model weights and calibration parameters(optional) + + Parameters + ---------- + model_path : str + path to save quantized model weight + calibration_path : str + (optional) path to save quantize parameters after calibration + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model + device : torch.device + device of the model, used to place the dummy input tensor for exporting onnx file. + the tensor is placed on cpu if ```device``` is None + + Returns + ------- + Dict + """ + assert model_path is not None, 'model_path must be specified' + self._unwrap_model() + calibration_config = {} + + for name, module in self.bound_model.named_modules(): + if hasattr(module, 'input_bits') or hasattr(module, 'weight_bits') or hasattr(module, 'output_bits'): + calibration_config[name] = {} + if hasattr(module, 'weight_bits'): + calibration_config[name]['weight_bits'] = int(module.weight_bits) + abs_max_input = float(module.input_scale * module.input_qmax) + calibration_config[name]['tracked_min_input'] = -abs_max_input + calibration_config[name]['tracked_max_input'] = abs_max_input + actual_weight = getattr(module, 'old_weight', None) + if actual_weight is None: + logger.warning("Can not recover weight for layer %s. " + "This may lead to a wrong accuracy performance on the backend.", name) + delattr(module, 'weight') + module.register_parameter('weight', actual_weight) + if hasattr(module, BN_FOLD_TAG): + actual_bias = getattr(module, 'old_bias', None) + delattr(module, 'bias') + if actual_bias is not None: + module.register_parameter('bias', actual_bias) + else: + setattr(module, 'bias', None) + if hasattr(module, 'input_bits'): + calibration_config[name]['input_bits'] = int(module.input_bits) + abs_max_input = float(module.input_scale * module.input_qmax) + calibration_config[name]['tracked_min_input'] = -abs_max_input + calibration_config[name]['tracked_max_input'] = abs_max_input + if hasattr(module, 'output_bits'): + calibration_config[name]['output_bits'] = int(module.output_bits) + abs_max_output = float(module.output_scale * module.output_qmax) + calibration_config[name]['tracked_min_output'] = -abs_max_output + calibration_config[name]['tracked_max_output'] = abs_max_output + self._del_simulated_attr(module) + + self.export_model_save(self.bound_model, model_path, calibration_config, calibration_path, onnx_path, + input_shape, device) + + return calibration_config + + def _del_simulated_attr(self, module): + """ + delete redundant parameters in quantize module + """ + del_attr_list = ['old_weight', 'tracked_min_input', 'tracked_max_input', 'tracked_min_output', \ + 'tracked_max_output', 'output_scale', 'input_scale', 'weight_scale','weight_bits', 'output_bits', 'input_bits', 'BN_FOLD_TAG'] + for attr in del_attr_list: + if hasattr(module, attr): + delattr(module, attr) + + def step_with_optimizer(self): + """ + override `compressor` `step` method, quantization only happens after certain number of steps + """ + self.bound_model.steps += 1 diff --git a/nni/algorithms/compression/pytorch/quantization/native_quantizer.py b/nni/algorithms/compression/pytorch/quantization/native_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..44a41716da4df59298b9ed7b86ccc9bef9d2ec5e --- /dev/null +++ b/nni/algorithms/compression/pytorch/quantization/native_quantizer.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from schema import Or, Optional +from nni.compression.pytorch.utils.config_validation import QuantizerSchema +from nni.compression.pytorch.compressor import Quantizer + + +logger = logging.getLogger(__name__) + + +class NaiveQuantizer(Quantizer): + """quantize weight to 8 bits + """ + + def __init__(self, model, config_list, optimizer=None): + super().__init__(model, config_list, optimizer) + self.layer_scale = {} + + def validate_config(self, model, config_list): + schema = QuantizerSchema([{ + Optional('quant_types'): ['weight'], + Optional('quant_bits'): Or(8, {'weight': 8}), + Optional('op_types'): [str], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + def quantize_weight(self, wrapper, **kwargs): + weight = wrapper.module.weight + new_scale = weight.abs().max() / 127 + scale = max(self.layer_scale.get(wrapper.name, 0), new_scale) + self.layer_scale[wrapper.name] = scale + orig_type = weight.type() # TODO: user layer + weight = weight.div(scale).type(torch.int8).type(orig_type).mul(scale) + wrapper.module.weight = weight + return weight diff --git a/nni/algorithms/compression/pytorch/quantization/observer_quantizer.py b/nni/algorithms/compression/pytorch/quantization/observer_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..bb793f373478e3e11ffa064a66f648f3e2486ed2 --- /dev/null +++ b/nni/algorithms/compression/pytorch/quantization/observer_quantizer.py @@ -0,0 +1,238 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from collections import defaultdict +import torch +from schema import Schema, And, Or, Optional +from nni.compression.pytorch.utils.config_validation import QuantizerSchema +from nni.compression.pytorch.compressor import Quantizer, QuantForward +from nni.compression.pytorch.quantization.observers import default_weight_observer, default_histogram_observer + + +logger = logging.getLogger(__name__) + + +class ObserverQuantizer(Quantizer): + """This quantizer uses observers to record weight/output statistics to get quantization information. + The whole process can be divided into three steps: + + 1. It will register observers to the place where quantization would happen (just like registering hooks). + 2. The observers would record tensors' statistics during calibration. + 3. Scale & zero point would be obtained after calibration. + + Note that the observer type, tensor dtype and quantization qscheme are hard coded for now. Their customization + are under development and will be ready soon. + """ + + def __init__(self, model, config_list, optimizer=None): + super().__init__(model, config_list, optimizer) + # NOTE: this quantizer is experimental for now. The dtype and qscheme of quantization + # is hard-coded. + # TODO: + # 1. support dtype and qscheme customization through config_list. Current settings: + # weight observer : per_tensor_symmetric, qint8 + # output observer : per_tensor_affine, quint8, reduce_range=True + # 2. add more kinds of observers, such as Kullback-Leibler divergence. + # 3. add batch normalization folding + assert not model.training, "Currently the observer quantizer only works in evaluation mode." + self.quant_grad = QuantForward() + self.device = next(model.parameters()).device + modules_to_compress = self.get_modules_to_compress() + all_observers = defaultdict(dict) + weight_qmin, weight_qmax = -127, 127 + output_qmin, output_qmax = 0, 127 # reduce_range is set to True + self.compressed = False + + for layer, config in modules_to_compress: + layer_name = layer.name + module = layer.module + if "weight" in config.get("quant_types", []): + all_observers[layer_name]["weight"] = default_weight_observer() + setattr(module, "weight_qmax", weight_qmax) + setattr(module, "weight_qmin", weight_qmin) + if "input" in config.get("quant_types", []): + all_observers[layer_name]["input"] = default_histogram_observer() + setattr(module, "input_qmax", output_qmax) + setattr(module, "input_qmin", output_qmin) + if "output" in config.get("quant_types", []): + all_observers[layer_name]["output"] = default_histogram_observer() + setattr(module, "output_qmax", output_qmax) + setattr(module, "output_qmin", output_qmin) + self.all_observers = all_observers + self.bound_model.to(self.device) + + def validate_config(self, model, config_list): + schema = QuantizerSchema([{ + Optional('quant_types'): Schema([lambda x: x in ['weight', 'output', 'input']]), + Optional('quant_bits'): Or(And(int, lambda n: n == 8), Schema({ + Optional('weight'): And(int, lambda n: n == 8), + Optional('output'): And(int, lambda n: n == 8), + Optional('input'): And(int, lambda n: n == 8), + })), + Optional('op_types'): [str], + Optional('op_names'): [str] + }], model, logger) + + schema.validate(config_list) + + def record(self, wrapper, quant_type, tensor): + name = wrapper.name + observer = self.all_observers[name][quant_type] + observer(tensor.cpu()) + + def calculate_qparams(self, name, quant_type): + observer = self.all_observers[name][quant_type] + scale, zero_point = observer.calculate_qparams() + return scale, zero_point + + def _quantize(self, x, scale, zero_point, qmin, qmax): + x = x / scale + zero_point + x = torch.clamp(x, qmin, qmax) + x = torch.round(x) + x = (x - zero_point) * scale + return x + + def quantize_input(self, inputs, wrapper, **kwargs): + if self.compressed: + module = wrapper.module + inputs = self._quantize(inputs, + module.input_scale, + module.input_zero_point, + module.input_qmin, + module.input_qmax) + else: + self.record(wrapper, 'input', inputs) + return inputs + + def quantize_weight(self, wrapper, **kwargs): + # If ObserverQuantizer.compress is executed, the weight will be set to + # the Pseudo-quantized one. So there is no need to quantize it + if self.compressed: + return + weight = wrapper.module.weight + self.record(wrapper, 'weight', weight) + + def quantize_output(self, output, wrapper, **kwargs): + if self.compressed: + module = wrapper.module + new_output = self._quantize(output, + module.output_scale, + module.output_zero_point, + module.output_qmin, + module.output_qmax) + else: + self.record(wrapper, 'output', output) + new_output = output + return new_output + + def compress(self): + """ + Calculate quantization information of each tensor. Note that the inference of + the compressed model will no longer update the corresponding. Instead, the quantization + process will be simulated, which is used to test the accuracy of the quantization. + """ + modules_to_compress = self.get_modules_to_compress() + for layer, config in modules_to_compress: + module = layer.module + if "weight" in config.get("quant_types", []): + scale, zero_point = self.calculate_qparams(layer.name, 'weight') + module.register_buffer('weight_scale', scale.to(self.device)) + module.register_buffer('weight_zero_point', zero_point.to(self.device)) + weight = module.weight + quantized_weight = self._quantize(weight, + module.weight_scale, + module.weight_zero_point, + module.weight_qmin, + module.weight_qmax) + delattr(module, 'weight') + module.register_buffer('weight', quantized_weight) + if "input" in config.get("quant_types", []): + scale, zero_point = self.calculate_qparams(layer.name, 'input') + module.register_buffer('input_scale', scale.to(self.device)) + module.register_buffer('input_zero_point', zero_point.to(self.device)) + if "output" in config.get("quant_types", []): + scale, zero_point = self.calculate_qparams(layer.name, 'output') + module.register_buffer('output_scale', scale.to(self.device)) + module.register_buffer('output_zero_point', zero_point.to(self.device)) + self.compressed = True + super().compress() + + def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None): + """ + Export quantized model weights and calibration parameters(optional) + + Parameters + ---------- + model_path : str + path to save quantized model weight + calibration_path : str + (optional) path to save quantize parameters after calibration + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model + device : torch.device + device of the model, used to place the dummy input tensor for exporting onnx file. + the tensor is placed on cpu if ```device``` is None + + Returns + ------- + Dict + """ + assert model_path is not None, 'model_path must be specified' + self._unwrap_model() + calibration_config = {} + + for name, module in self.bound_model.named_modules(): + if hasattr(module, 'weight_scale') or hasattr(module, 'input_scale') or hasattr(module, 'output_scale'): + calibration_config[name] = {} + if hasattr(module, 'weight_scale'): + calibration_config[name]['weight_bits'] = 8 + val = float(module.weight_scale * module.weight_qmax) + calibration_config[name]['tracked_max_weight'] = val + calibration_config[name]['tracked_min_weight'] = -val + calibration_config[name]['tracked_qmin_weight'] = -127 + calibration_config[name]['tracked_qmax_weight'] = 127 + weight = module.weight + quantized_weight = self._quantize(weight, + module.weight_scale, + module.weight_zero_point, + module.weight_qmin, + module.weight_qmax) + delattr(module, 'weight') + module.register_parameter('weight', torch.nn.Parameter(quantized_weight)) + # refactor these magic numbers when customizations of dtype and qscheme are ready. + if hasattr(module, 'input_scale'): + calibration_config[name]['input_bits'] = 8 + max_input = float(module.input_scale * (module.input_qmax - module.input_zero_point)) + min_input = float(module.input_scale * (module.input_qmin - module.input_zero_point)) + calibration_config[name]['tracked_min_input'] = min_input + calibration_config[name]['tracked_max_input'] = max_input + calibration_config[name]['tracked_qmin_input'] = 0 + calibration_config[name]['tracked_qmax_input'] = 127 + if hasattr(module, 'output_scale'): + calibration_config[name]['output_bits'] = 8 + max_input = float(module.output_scale * (module.output_qmax - module.output_zero_point)) + min_input = float(module.output_scale * (module.output_qmin - module.output_zero_point)) + calibration_config[name]['tracked_min_output'] = min_input + calibration_config[name]['tracked_max_output'] = max_input + calibration_config[name]['tracked_qmin_output'] = 0 + calibration_config[name]['tracked_qmax_output'] = 127 + self._del_simulated_attr(module) + + self.export_model_save(self.bound_model, model_path, calibration_config, calibration_path, onnx_path, + input_shape, device) + + return calibration_config + + def _del_simulated_attr(self, module): + """ + delete redundant parameters in quantize module + """ + del_attr_list = ['old_weight', 'steps', 'weight_qmax', 'weight_qmin', 'input_qmax', 'input_qmin', + 'output_qmax', 'output_qmin', 'weight_scale', 'weight_zero_point', 'input_scale', + 'input_zero_point', 'output_scale', 'output_zero_point'] + for attr in del_attr_list: + if hasattr(module, attr): + delattr(module, attr) diff --git a/nni/algorithms/compression/pytorch/quantization/qat_quantizer.py b/nni/algorithms/compression/pytorch/quantization/qat_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..ebb04446a2c751ca25dc913f4bdeb8ee7b1aff32 --- /dev/null +++ b/nni/algorithms/compression/pytorch/quantization/qat_quantizer.py @@ -0,0 +1,510 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from schema import Schema, And, Or, Optional +from nni.compression.pytorch.utils.config_validation import QuantizerSchema +from nni.compression.pytorch.compressor import BN_FOLD_TAG, Quantizer, QuantGrad +from nni.compression.pytorch.quantization.literal import ( + PER_CHANNEL_QUANT_SCHEME, + QuantScheme, + QuantDtype, + QuantType +) +from nni.compression.pytorch.quantization.settings import LayerQuantSetting +from nni.compression.pytorch.quantization.utils import ( + calculate_qmin_qmax, + get_min_max_value, + get_quant_shape +) + + +logger = logging.getLogger(__name__) + + +class QATGrad(QuantGrad): + @staticmethod + def quant_backward(tensor, grad_output, quant_type, scale, zero_point, qmin, qmax): + tensor_q = QuantGrad._quantize(tensor, scale, zero_point) + mask = (tensor_q < qmin) | (tensor_q > qmax) + grad_output[mask] = 0 + return grad_output + + +def update_quantization_param(bits, rmin, rmax, dtype, scheme): + """ + calculate the `zero_point` and `scale`. + + Parameters + ---------- + bits : int + quantization bits length + rmin : Tensor + min value of real value + rmax : Tensor + max value of real value + dtype : QuantDtype + quantized data type + scheme : QuantScheme + quantization scheme to be used + Returns + ------- + float, float + """ + + # extend the [min, max] interval to ensure that it contains 0. + # Otherwise, we would not meet the requirement that 0 be an exactly + # representable value. + # I think this is for activations that need to be pad in the training. + # However this is a default behavior in PyTorch quantization observer. + # So we also make it a default behavior + rmin = torch.min(rmin, torch.zeros_like(rmin)) + rmax = torch.max(rmax, torch.zeros_like(rmax)) + zero_point = torch.zeros_like(rmin) + + # todo: there is no need to calculate qmin and qmax again + qmin, qmax = calculate_qmin_qmax(bits, dtype) + + if scheme in [QuantScheme.PER_TENSOR_SYMMETRIC, QuantScheme.PER_CHANNEL_SYMMETRIC]: + abs_max = torch.max(torch.abs(rmin), torch.abs(rmax)) + scale = abs_max / (float(qmax - qmin) / 2) + if dtype == QuantDtype.UINT: + zero_point_val = (qmin + qmax) // 2 + zero_point = zero_point.new_full(zero_point.size(), zero_point_val) + else: + scale = (rmax - rmin) / float(qmax - qmin) + zero_point = qmin - torch.round(rmin / scale) + + zero_point = torch.clamp(zero_point, qmin, qmax) + + # todo: add these lines + # eps = torch.finfo(torch.float32).eps + # scale = torch.max(scale, eps) + + return scale, zero_point + + +def update_ema(biased_ema, value, decay): + """ + calculate biased stat and unbiased stat in each step using exponential moving average method + + Parameters + ---------- + biased_ema : float + previous stat value + value : float + current stat value + decay : float + the weight of previous stat value, larger means smoother curve + + Returns + ------- + float, float + """ + biased_ema = biased_ema * decay + (1 - decay) * value + return biased_ema + + +class QAT_Quantizer(Quantizer): + """Quantizer defined in: + Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference + http://openaccess.thecvf.com/content_cvpr_2018/papers/Jacob_Quantization_and_Training_CVPR_2018_paper.pdf + """ + + def __init__(self, model, config_list, optimizer, dummy_input=None): + """ + Parameters + ---------- + layer : LayerInfo + the layer to quantize + config_list : list of dict + list of configurations for quantization + supported keys for dict: + - quant_types : list of string + type of quantization you want to apply, currently support 'weight', 'input', 'output' + - quant_bits : int or dict of {str : int} + bits length of quantization, key is the quantization type, value is the length, eg. {'weight', 8}, + when the type is int, all quantization types share same bits length + - quant_start_step : int + disable quantization until model are run by certain number of steps, this allows the network to enter a more stable + state where output quantization ranges do not exclude a significant fraction of values, default value is 0 + - op_types : list of string + types of nn.module you want to apply quantization, eg. 'Conv2d' + - dummy_input : tuple of tensor + inputs to the model, which are used to get the graph of the module. The graph is used to find + Conv-Bn patterns. And then the batch normalization folding would be enabled. If dummy_input is not + given, the batch normalization folding would be disabled. + """ + + assert isinstance(optimizer, torch.optim.Optimizer), "unrecognized optimizer type" + super().__init__(model, config_list, optimizer, dummy_input) + self.quant_grad = QATGrad.apply + modules_to_compress = self.get_modules_to_compress() + device = next(model.parameters()).device + self.bound_model.register_buffer("steps", torch.tensor(1)) + for layer, config in modules_to_compress: + module = layer.module + name = layer.name + # TODO: may relax this limitation? + assert name in self.all_shapes, "Could not found shapes for layer {}".format(name) + input_shape, output_shape = self.all_shapes[name] + layer_quant_setting = LayerQuantSetting(config) + layer_quant_setting.ema_decay = 0.99 + quant_start_step = config.get('quant_start_step', 0) + layer_quant_setting.quant_start_step = quant_start_step + # todo: support other ranks and remove this check + if isinstance(module, torch.nn.Linear): + if "input" in config.get("quant_types", []) and \ + layer_quant_setting.input.quant_scheme in PER_CHANNEL_QUANT_SCHEME: + if len(input_shape) != 2: + logger.warning("When quantize torch.nn.Linear, make sure that the rank of the inputs " + "of the layer is 2. Skip quantization of layer %s.", name) + continue + if "output" in config.get("quant_types", []) and \ + layer_quant_setting.output.quant_scheme in PER_CHANNEL_QUANT_SCHEME: + if len(output_shape) != 2: + logger.warning("When quantize torch.nn.Linear, make sure that the rank of the outputs " + "of the layer is 2. Skip quantization of layer %s.", name) + continue + + if "weight" in config.get("quant_types", []): + quant_shape = get_quant_shape(module.weight.shape, QuantType.WEIGHT, layer_quant_setting.weight.quant_scheme) + module.register_buffer('weight_scale', torch.zeros(quant_shape)) + module.register_buffer('weight_zero_point', torch.zeros(quant_shape)) + + if "input" in config.get("quant_types", []): + quant_shape = get_quant_shape(input_shape, QuantType.INPUT, layer_quant_setting.input.quant_scheme) + module.register_buffer('tracked_min_input', torch.zeros(quant_shape)) + module.register_buffer('tracked_max_input', torch.zeros(quant_shape)) + module.register_buffer('input_scale', torch.zeros(quant_shape)) + module.register_buffer('input_zero_point', torch.zeros(quant_shape)) + + if "output" in config.get("quant_types", []): + quant_shape = get_quant_shape(output_shape, QuantType.OUTPUT, layer_quant_setting.output.quant_scheme) + module.register_buffer('tracked_min_output', torch.zeros(quant_shape)) + module.register_buffer('tracked_max_output', torch.zeros(quant_shape)) + module.register_buffer('output_scale', torch.zeros(quant_shape)) + module.register_buffer('output_zero_point', torch.zeros(quant_shape)) + + setattr(module, "layer_quant_setting", layer_quant_setting) + self.bound_model.to(device) + + def _del_simulated_attr(self, module): + """ + delete redundant parameters in quantize module + """ + del_attr_list = ['old_weight', 'old_bias', 'ema_decay', 'tracked_min_output', 'tracked_max_output', + 'tracked_min_input', 'tracked_max_input', 'BN_FOLD_TAG', + 'weight_scale', 'weight_zero_point', 'input_scale', 'input_zero_point', + 'output_scale', 'output_zero_point', 'layer_quant_setting'] + for attr in del_attr_list: + if hasattr(module, attr): + delattr(module, attr) + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list of dict + List of configurations + """ + SUPPORTED_OPS = ['Conv2d', 'Linear', 'ReLU', 'ReLU6'] + schema = QuantizerSchema([{ + Optional('quant_types'): Schema([lambda x: x in ['weight', 'output', 'input']]), + Optional('quant_bits'): Or(And(int, lambda n: 0 < n < 32), Schema({ + Optional('input'): And(int, lambda n: 0 < n < 32), + Optional('weight'): And(int, lambda n: 0 < n < 32), + Optional('output'): And(int, lambda n: 0 < n < 32), + })), + Optional('quant_scheme'): Or(lambda x: x in QuantScheme, Schema({ + Optional('input'): lambda x: x in QuantScheme, + Optional('weight'): lambda x: x in QuantScheme, + Optional('output'): lambda x: x in QuantScheme + })), + Optional('quant_dtype'): Or(lambda x: x in QuantDtype, Schema({ + Optional('input'): lambda x: x in QuantDtype, + Optional('weight'): lambda x: x in QuantDtype, + Optional('output'): lambda x: x in QuantDtype + })), + Optional('quant_start_step'): And(int, lambda n: n >= 0), + Optional('op_types'): [And(str, lambda n: n in SUPPORTED_OPS)], + Optional('op_names'): [str], + Optional('exclude'): bool + }], model, logger) + + schema.validate(config_list) + + def _quantize(self, real_value, scale, zero_point, qmin, qmax): + """ + quantize real value. + + Parameters + ---------- + real_value : torch.Tensor + the real value to be quantized + scale : torch.Tensor + quantization scale + zero_point : torch.Tensor + quantization zero point + qmin : int + lower bound of the int range + qmax : int + upper bound of the int range + + Returns + ------- + Tensor + """ + transformed_val = zero_point + real_value / scale + clamped_val = torch.clamp(transformed_val, qmin, qmax) + quantized_val = torch.round(clamped_val) + return quantized_val + + def _dequantize(self, quantized_val, scale, zero_point): + """ + dequantize quantized value. + Because we simulate quantization in training process, all the computations still happen as float point computations, which means we + first quantize tensors then dequantize them. For more details, please refer to the paper. + + Parameters + ---------- + quantized_val : torch.Tensor + the quantized value to be de-quantized + scale : torch.Tensor + quantization scale + zero_point : torch.Tensor + quantization zero point + + Returns + ------- + Tensor + """ + real_val = scale * (quantized_val - zero_point) + return real_val + + def quantize_weight(self, wrapper, **kwargs): + module = wrapper.module + weight = module.weight + layer_quant_setting = module.layer_quant_setting + tensor_quant_setting = layer_quant_setting.weight + + # layer-wise settings + quant_start_step = layer_quant_setting.quant_start_step + + # tensor-wise settings + dtype = tensor_quant_setting.quant_dtype + scheme = tensor_quant_setting.quant_scheme + qmin, qmax = tensor_quant_setting.get_qmin_qmax() + bits = tensor_quant_setting.bits + + # In evaluation mode, we only quantize weight without updating statistics + if not wrapper.training: + scale, zero_point = module.weight_scale, module.weight_zero_point + weight = self._quantize(weight, scale, zero_point, qmin, qmax) + weight = self._dequantize(weight, scale, zero_point) + module.weight = weight + return weight + + if quant_start_step > int(self.bound_model.steps): + return weight + + current_min, current_max = get_min_max_value(weight, QuantType.WEIGHT, scheme) + scale, zero_point = update_quantization_param(bits, current_min, current_max, dtype, scheme) + module.weight_scale.copy_(scale) + module.weight_zero_point.copy_(zero_point) + weight = self._quantize(weight, scale, zero_point, qmin, qmax) + weight = self._dequantize(weight, scale, zero_point) + # Weight can not be in-place modified, so when use torch.nn.DataParallel, this update + # will be lost after each forward process. However, this update takes effect on each + # replicated module during each forward process, which will make the quantized weight + # be used correctly. + wrapper.module.weight = weight + return weight + + def quantize_input(self, inputs, wrapper, **kwargs): + module = wrapper.module + + layer_quant_setting = module.layer_quant_setting + tensor_quant_setting = layer_quant_setting.input + + # layer-wise settings + quant_start_step = layer_quant_setting.quant_start_step + ema_decay = layer_quant_setting.ema_decay + + # tensor-wise settings + dtype = tensor_quant_setting.quant_dtype + scheme = tensor_quant_setting.quant_scheme + qmin, qmax = tensor_quant_setting.get_qmin_qmax() + bits = tensor_quant_setting.bits + + if not wrapper.training: + scale = module.input_scale + zero_point = module.input_zero_point + inputs = self._quantize(inputs, scale, zero_point, qmin, qmax) + inputs = self._dequantize(inputs, scale, zero_point) + return inputs + + current_min, current_max = get_min_max_value(inputs, QuantType.INPUT, scheme) + + if int(self.bound_model.steps) == 1: + module.tracked_min_input.copy_(current_min) + module.tracked_max_input.copy_(current_max) + + tracked_min_input = update_ema(module.tracked_min_input, current_min, ema_decay) + tracked_max_input = update_ema(module.tracked_max_input, current_max, ema_decay) + module.tracked_min_input.copy_(tracked_min_input) + module.tracked_max_input.copy_(tracked_max_input) + + if quant_start_step > int(self.bound_model.steps): + return inputs + + scale, zero_point = update_quantization_param( + bits, module.tracked_min_input, module.tracked_max_input, dtype, scheme) + module.input_scale.copy_(scale) + module.input_zero_point.copy_(zero_point) + + inputs = self._quantize(inputs, scale, zero_point, qmin, qmax) + inputs = self._dequantize(inputs, scale, zero_point) + return inputs + + def quantize_output(self, output, wrapper, **kwargs): + module = wrapper.module + layer_quant_setting = module.layer_quant_setting + tensor_quant_setting = layer_quant_setting.output + + # layer-wise settings + quant_start_step = layer_quant_setting.quant_start_step + ema_decay = layer_quant_setting.ema_decay + + # tensor-wise settings + dtype = tensor_quant_setting.quant_dtype + scheme = tensor_quant_setting.quant_scheme + qmin, qmax = tensor_quant_setting.get_qmin_qmax() + bits = tensor_quant_setting.bits + + if not wrapper.training: + scale = module.output_scale + zero_point = module.output_zero_point + output = self._quantize(output, scale, zero_point, qmin, qmax) + output = self._dequantize(output, scale, zero_point) + return output + + current_min, current_max = get_min_max_value(output, QuantType.OUTPUT, scheme) + + if int(self.bound_model.steps) == 1: + module.tracked_min_output.copy_(current_min) + module.tracked_max_output.copy_(current_max) + + tracked_min_output = update_ema(module.tracked_min_output, current_min, ema_decay) + tracked_max_output = update_ema(module.tracked_max_output, current_max, ema_decay) + module.tracked_min_output.copy_(tracked_min_output) + module.tracked_max_output.copy_(tracked_max_output) + + if quant_start_step > int(self.bound_model.steps): + return output + + scale, zero_point = update_quantization_param( + bits, module.tracked_min_output, module.tracked_max_output, dtype, scheme) + module.output_scale.copy_(scale) + module.output_zero_point.copy_(zero_point) + + output = self._quantize(output, scale, zero_point, qmin, qmax) + output = self._dequantize(output, scale, zero_point) + return output + + def load_calibration_config(self, calibration_config): + modules_to_compress = self.get_modules_to_compress() + for layer, _ in modules_to_compress: + name, module = layer.name, layer.module + if name not in calibration_config: + if module.layer_quant_setting.weight or module.layer_quant_setting.input or module.layer_quant_setting.output: + logger.warning(f"Can not find module {name}'s parameter in input config.") + continue + if module.layer_quant_setting.weight: + assert calibration_config[name]['weight_bits'] == module.layer_quant_setting.weight.bits, \ + f"weight bits of module {name} fail to match" + if module.layer_quant_setting.input: + assert calibration_config[name]['input_bits'] == module.layer_quant_setting.input.bits, \ + f"input bits of module {name} fail to match" + module.tracked_min_input.data = torch.tensor([calibration_config[name]['tracked_min_input']]) + module.tracked_max_input.data = torch.tensor([calibration_config[name]['tracked_max_input']]) + if module.layer_quant_setting.output: + assert calibration_config[name]['output_bits'] == module.layer_quant_setting.output.bits, \ + f"output bits of module {name} fail to match" + module.tracked_min_output.data = torch.tensor([calibration_config[name]['tracked_min_output']]) + module.tracked_max_output.data = torch.tensor([calibration_config[name]['tracked_max_output']]) + + def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None): + """ + Export quantized model weights and calibration parameters(optional) + + Parameters + ---------- + model_path : str + path to save quantized model weight + calibration_path : str + (optional) path to save quantize parameters after calibration + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model + device : torch.device + device of the model, used to place the dummy input tensor for exporting onnx file. + the tensor is placed on cpu if ```device``` is None + + Returns + ------- + Dict + """ + assert model_path is not None, 'model_path must be specified' + self._unwrap_model() + calibration_config = {} + + modules_to_compress = self.get_modules_to_compress() + for layer, _ in modules_to_compress: + name, module = layer.name, layer.module + if hasattr(module.layer_quant_setting, 'weight') or hasattr(module.layer_quant_setting, 'output'): + calibration_config[name] = {} + if module.layer_quant_setting.weight: + calibration_config[name]['weight_bits'] = int(module.layer_quant_setting.weight.bits) + calibration_config[name]['weight_scale'] = module.weight_scale + calibration_config[name]['weight_zero_point'] = module.weight_zero_point + + # Recover weight/bias for batch normalization folding + actual_weight = getattr(module, 'old_weight', None) + if actual_weight is None: + logger.warning("Can not recover weight for layer %s. " + "This may lead to a wrong accuracy performance on the backend.", name) + delattr(module, 'weight') + module.register_parameter('weight', actual_weight) + if hasattr(module, BN_FOLD_TAG): + actual_bias = getattr(module, 'old_bias', None) + delattr(module, 'bias') + if actual_bias is not None: + module.register_parameter('bias', actual_bias) + else: + setattr(module, 'bias', None) + + if module.layer_quant_setting.input: + calibration_config[name]['input_bits'] = int(module.layer_quant_setting.input.bits) + calibration_config[name]['tracked_min_input'] = float(module.tracked_min_input) + calibration_config[name]['tracked_max_input'] = float(module.tracked_max_input) + + if module.layer_quant_setting.output: + calibration_config[name]['output_bits'] = int(module.layer_quant_setting.output.bits) + calibration_config[name]['tracked_min_output'] = float(module.tracked_min_output) + calibration_config[name]['tracked_max_output'] = float(module.tracked_max_output) + self._del_simulated_attr(module) + + self.export_model_save(self.bound_model, model_path, calibration_config, calibration_path, onnx_path, input_shape, device) + + return calibration_config + + def step_with_optimizer(self): + """ + override `compressor` `step` method, quantization only happens after certain number of steps + """ + self.bound_model.steps.add_(1) diff --git a/nni/algorithms/compression/tensorflow/pruning/__init__.py b/nni/algorithms/compression/tensorflow/pruning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c535fd75123f8d37fba1084a35cb8615db426175 --- /dev/null +++ b/nni/algorithms/compression/tensorflow/pruning/__init__.py @@ -0,0 +1 @@ +from .one_shot_pruner import * diff --git a/nni/algorithms/compression/tensorflow/pruning/one_shot_pruner.py b/nni/algorithms/compression/tensorflow/pruning/one_shot_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1e1e3e0fa15884aa449246bc1a266c977513c6 --- /dev/null +++ b/nni/algorithms/compression/tensorflow/pruning/one_shot_pruner.py @@ -0,0 +1,110 @@ +import tensorflow as tf + +from nni.compression.tensorflow import Pruner + +__all__ = [ + 'LevelPruner', + 'SlimPruner', +] + +class OneshotPruner(Pruner): + def __init__(self, model, config_list, masker_class, **algo_kwargs): + super().__init__(model, config_list) + self.set_wrappers_attribute('calculated', False) + self.masker = masker_class(model, self, **algo_kwargs) + + def validate_config(self, model, config_list): + pass # TODO + + def calc_masks(self, wrapper, wrapper_idx=None): + if wrapper.calculated: + return None + sparsity = wrapper.config['sparsity'] + masks = self.masker.calc_masks(sparsity, wrapper, wrapper_idx) + if masks is not None: + wrapper.calculated = True + return masks + + +class LevelPruner(OneshotPruner): + def __init__(self, model, config_list): + super().__init__(model, config_list, LevelPrunerMasker) + + +class SlimPruner(OneshotPruner): + def __init__(self, model, config_list): + super().__init__(model, config_list, SlimPrunerMasker) + + +class WeightMasker: + def __init__(self, model, pruner, **kwargs): + self.model = model + self.pruner = pruner + + def calc_masks(self, sparsity, wrapper, wrapper_idx=None): + raise NotImplementedError() + + +class LevelPrunerMasker(WeightMasker): + def calc_masks(self, sparsity, wrapper, wrapper_idx=None): + masks = {} + for weight_variable in wrapper.layer.weights: + if 'bias' in weight_variable.name: + continue + + num_prune = int(tf.size(weight_variable).numpy() * sparsity) + if num_prune == 0: + continue + + weight = weight_variable.read_value() + if wrapper.masks.get(weight_variable.name) is not None: + weight = tf.math.multiply(weight, wrapper.masks[weight_variable.name]) + + w_abs = tf.math.abs(weight) + k = tf.size(weight) - num_prune + topk = tf.math.top_k(tf.reshape(w_abs, [-1]), k).values + if tf.size(topk) == 0: + mask = tf.zeros_like(weight) + else: + mask = tf.math.greater_equal(w_abs, topk[-1]) + masks[weight_variable.name] = tf.cast(mask, weight.dtype) + return masks + +class SlimPrunerMasker(WeightMasker): + def __init__(self, model, pruner, **kwargs): + super().__init__(model, pruner) + weight_list = [] + for wrapper in pruner.wrappers: + weights = [w for w in wrapper.layer.weights if '/gamma:' in w.name] + assert len(weights) == 1, f'Bad weights: {[w.name for w in wrapper.layer.weights]}' + weight_list.append(tf.math.abs(weights[0].read_value())) + all_bn_weights = tf.concat(weight_list, 0) + k = int(all_bn_weights.shape[0] * pruner.wrappers[0].config['sparsity']) + top_k = -tf.math.top_k(-tf.reshape(all_bn_weights, [-1]), k).values + self.global_threshold = top_k.numpy()[-1] + + def calc_masks(self, sparsity, wrapper, wrapper_idx=None): + assert isinstance(wrapper.layer, tf.keras.layers.BatchNormalization), \ + 'SlimPruner only supports 2D batch normalization layer pruning' + + weight = None + weight_name = None + bias_name = None + + for variable in wrapper.layer.weights: + if '/gamma:' in variable.name: + weight = variable.read_value() + weight_name = variable.name + elif '/beta:' in variable.name: + bias_name = variable.name + + assert weight is not None + if wrapper.masks.get(weight_name) is not None: + weight *= wrapper.masks[weight_name] + + mask = tf.cast(tf.math.abs(weight) > self.global_threshold, weight.dtype) + + masks = {weight_name: mask} + if bias_name: + masks[bias_name] = mask + return masks diff --git a/nni/algorithms/compression/v2/pytorch/__init__.py b/nni/algorithms/compression/v2/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/compression/v2/pytorch/base/__init__.py b/nni/algorithms/compression/v2/pytorch/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6efb20cabad8c02359acddfa02ef84ab9a104d65 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/base/__init__.py @@ -0,0 +1,3 @@ +from .compressor import Compressor, LayerInfo +from .pruner import Pruner, PrunerModuleWrapper +from .scheduler import BasePruningScheduler, Task, TaskResult diff --git a/nni/algorithms/compression/v2/pytorch/base/compressor.py b/nni/algorithms/compression/v2/pytorch/base/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..50e530cc8af1b64bf2e447eeb68e05213318e51b --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/base/compressor.py @@ -0,0 +1,308 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import collections +import logging +from typing import List, Dict, Optional, Tuple, Any + +import torch +from torch.nn import Module + +from nni.common.graph_utils import TorchModuleGraph +from nni.algorithms.compression.v2.pytorch.utils.pruning import get_module_by_name, weighted_modules + +_logger = logging.getLogger(__name__) + +__all__ = ['LayerInfo', 'Compressor'] + + +class LayerInfo: + def __init__(self, name: str, module: Module): + self.module = module + self.name = name + self.type = type(module).__name__ + + +def _setattr(model: Module, name: str, module: Module): + parent_module, _ = get_module_by_name(model, name) + if parent_module is not None: + name_list = name.split(".") + setattr(parent_module, name_list[-1], module) + else: + raise '{} not exist.'.format(name) + + +class Compressor: + """ + The abstract base pytorch compressor. + """ + + def __init__(self, model: Optional[Module], config_list: Optional[List[Dict]]): + """ + Parameters + ---------- + model + The model under compressed. + config_list + The config list used by compressor, usually specifies the 'op_types' or 'op_names' that want to compress. + """ + self.is_wrapped = False + if model is not None: + self.reset(model=model, config_list=config_list) + else: + _logger.warning('This compressor is not set model and config_list, waiting for reset() or pass this to scheduler.') + + def reset(self, model: Module, config_list: List[Dict]): + """ + Reset the compressor with model and config_list. + + Parameters + ---------- + model + The model under compressed. + config_list + The config list used by compressor, usually specifies the 'op_types' or 'op_names' that want to compress. + """ + assert isinstance(model, Module), 'Only support compressing pytorch Module, but the type of model is {}.'.format(type(model)) + self.bound_model = model + self.config_list = config_list + self.validate_config(model=model, config_list=config_list) + + self._unwrap_model() + + self._modules_to_compress = None + self.modules_wrapper = collections.OrderedDict() + for layer, config in self._detect_modules_to_compress(): + wrapper = self._wrap_modules(layer, config) + self.modules_wrapper[layer.name] = wrapper + + self._wrap_model() + + def clear_model_references(self): + """ + Clear all references to the model in this compressor. Just to free up memory. + Need reset first before the next time call compressor function. + """ + self._unwrap_model() + self.bound_model = None + self.config_list = None + self.modules_wrapper = None + self._modules_to_compress = None + + def _detect_modules_to_compress(self) -> List[Tuple[LayerInfo, Dict]]: + """ + Detect all modules should be compressed, and save the result in `self._modules_to_compress`. + The model will be instrumented and user should never edit it after calling this method. + """ + if self._modules_to_compress is None: + self._modules_to_compress = [] + for name, module in self.bound_model.named_modules(): + if module == self.bound_model: + continue + layer = LayerInfo(name, module) + config = self._select_config(layer) + if config is not None: + self._modules_to_compress.append((layer, config)) + return self._modules_to_compress + + def _select_config(self, layer: LayerInfo) -> Optional[Dict]: + """ + Find the configuration for `layer` by parsing `self.config_list`. + + Parameters + ---------- + layer + The layer that need to check if has compression configuration. + + Returns + ------- + Optional[Dict] + The retrieved configuration for this layer, if None, this layer should not be compressed. + """ + ret = None + for config in self.config_list: + config = config.copy() + # expand config if key `default` is in config['op_types'] + if 'op_types' in config and 'default' in config['op_types']: + expanded_op_types = [] + for op_type in config['op_types']: + if op_type == 'default': + expanded_op_types.extend(weighted_modules) + else: + expanded_op_types.append(op_type) + config['op_types'] = expanded_op_types + + # check if condition is satisified + if 'op_types' in config and layer.type not in config['op_types']: + continue + if 'op_names' in config and layer.name not in config['op_names']: + continue + + ret = config + if ret is None or 'exclude' in ret: + return None + return ret + + def get_modules_wrapper(self) -> Dict[str, Module]: + """ + Returns + ------- + OrderedDict[str, Module] + An ordered dict, key is the name of the module, value is the wrapper of the module. + """ + return self.modules_wrapper + + def _wrap_model(self): + """ + Wrap all modules that needed to be compressed. + """ + if not self.is_wrapped: + for _, wrapper in reversed(self.get_modules_wrapper().items()): + _setattr(self.bound_model, wrapper.name, wrapper) + self.is_wrapped = True + + def _unwrap_model(self): + """ + Unwrap all modules that needed to be compressed. + """ + if self.is_wrapped: + for _, wrapper in self.get_modules_wrapper().items(): + _setattr(self.bound_model, wrapper.name, wrapper.module) + self.is_wrapped = False + + def set_wrappers_attribute(self, name: str, value: Any): + """ + To register attributes used in wrapped module's forward method. + If the type of the value is Torch.tensor, then this value is registered as a buffer in wrapper, + which will be saved by model.state_dict. Otherwise, this value is just a regular variable in wrapper. + + Parameters + ---------- + name + Name of the variable. + value + Value of the variable. + """ + for wrapper in self.get_modules_wrapper(): + if isinstance(value, torch.Tensor): + wrapper.register_buffer(name, value.clone()) + else: + setattr(wrapper, name, value) + + def generate_graph(self, dummy_input: Any) -> TorchModuleGraph: + """ + Generate a `TorchModuleGraph` instance of `self.bound_model` based on `jit.trace`. + + Parameters + ---------- + dummy_input + The dummy input for `jit.trace`, users should put it on right device before pass in. + + Returns + ------- + TorchModuleGraph + A `TorchModuleGraph` instance. + """ + self._unwrap_model() + graph = TorchModuleGraph(model=self.bound_model, dummy_input=dummy_input) + self._wrap_model() + return graph + + def generate_module_groups(self) -> Dict[int, List[str]]: + """ + Get all module names in each config in config_list. + + Returns + ------- + Dict[int, List[str]] + A dict. The key is the config idx in config_list, the value is the module name list. i.e., {1: ['layer.0', 'layer.2']}. + """ + self._unwrap_model() + + module_groups = {} + for name, module in self.bound_model.named_modules(): + if module == self.bound_model: + continue + layer = LayerInfo(name, module) + ret = None + for idx, config in enumerate(self.config_list): + config = config.copy() + # expand config if key `default` is in config['op_types'] + if 'op_types' in config and 'default' in config['op_types']: + expanded_op_types = [] + for op_type in config['op_types']: + if op_type == 'default': + expanded_op_types.extend(weighted_modules) + else: + expanded_op_types.append(op_type) + config['op_types'] = expanded_op_types + # check if condition is satisified + if 'op_types' in config and layer.type not in config['op_types']: + continue + if 'op_names' in config and layer.name not in config['op_names']: + continue + ret = (idx, config) + if ret is not None and 'exclude' not in ret[1]: + module_groups.setdefault(ret[0], []) + module_groups[ret[0]].append(name) + + self._wrap_model() + return module_groups + + def get_origin2wrapped_parameter_name_map(self) -> Dict[str, str]: + """ + Get the name mapping of parameters from original model to wrapped model. + + Returns + ------- + Dict[str, str] + Return a dict `{original_model_parameter_name: wrapped_model_parameter_name}` + """ + if self.is_wrapped: + wrapped_param_names = {id(param): name for name, param in self.bound_model.named_parameters()} + self._unwrap_model() + parameter_name_map = {name: wrapped_param_names[id(param)] for name, param in self.bound_model.named_parameters()} + self._wrap_model() + return parameter_name_map + else: + raise Exception('When only the model is wrapped can get the parameter_name_map.') + + def _wrap_modules(self, layer: LayerInfo, config: Dict): + """ + This method is implemented in the subclasses, i.e., `Pruner` and `Quantizer` + + Parameters + ---------- + layer + the layer to instrument the compression operation + config + the configuration for compressing this layer + """ + raise NotImplementedError() + + def validate_config(self, model: Module, config_list: List[Dict]): + """ + Subclass can optionally implement this method to check if config_list is valid. + + Parameters + ---------- + model + The model under compressed. + config_list + The config list used by compressor, usually specifies the 'op_types' or 'op_names' that want to compress. + """ + pass + + def compress(self) -> Module: + """ + Compress the model with algorithm implemented by subclass. + + The model will be instrumented and user should never edit it after calling this method. + `self._modules_to_compress` records all the to-be-compressed layers. + + Returns + ------- + torch.nn.Module + model with specified modules compressed. + """ + return self.bound_model diff --git a/nni/algorithms/compression/v2/pytorch/base/pruner.py b/nni/algorithms/compression/v2/pytorch/base/pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..730b9c749493d56b5adb0b6fab1fccd139408f77 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/base/pruner.py @@ -0,0 +1,168 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from typing import Dict, List, Optional, Tuple + +import torch +from torch import Tensor +from torch.nn import Module + +from .compressor import Compressor, LayerInfo + +_logger = logging.getLogger(__name__) + +__all__ = ['Pruner'] + + +class PrunerModuleWrapper(Module): + def __init__(self, module: Module, module_name: str, config: Dict, pruner: Compressor): + """ + Wrap a module to enable data parallel, forward method customization and buffer registeration. + + Parameters + ---------- + module + The module user wants to compress. + config + The configurations that users specify for compression. + module_name + The name of the module to compress, wrapper module shares same name. + pruner + The pruner used to calculate mask. + """ + super().__init__() + # origin layer information + self.module = module + self.name = module_name + # config and pruner + self.config = config + self.pruner = pruner + + # register buffer for mask + self.register_buffer("weight_mask", torch.ones(self.module.weight.shape)) + if hasattr(self.module, 'bias') and self.module.bias is not None: + self.register_buffer("bias_mask", torch.ones(self.module.bias.shape)) + else: + self.register_buffer("bias_mask", None) + + def forward(self, *inputs): + # apply mask to weight, bias + self.module.weight.data = self.module.weight.data.mul_(self.weight_mask) + if hasattr(self.module, 'bias') and self.module.bias is not None: + self.module.bias.data = self.module.bias.data.mul_(self.bias_mask) + return self.module(*inputs) + + +class Pruner(Compressor): + """ + The abstract class for pruning algorithm. Inherit this class and implement the `_reset_tools` to customize a pruner. + """ + + def reset(self, model: Optional[Module] = None, config_list: Optional[List[Dict]] = None): + super().reset(model=model, config_list=config_list) + + def _wrap_modules(self, layer: LayerInfo, config: Dict): + """ + Create a wrapper module to replace the original one. + + Parameters + ---------- + layer + The layer to instrument the mask. + config + The configuration for generating the mask. + """ + _logger.debug("Module detected to compress : %s.", layer.name) + wrapper = PrunerModuleWrapper(layer.module, layer.name, config, self) + assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name + # move newly registered buffers to the same device of weight + wrapper.to(layer.module.weight.device) + return wrapper + + def load_masks(self, masks: Dict[str, Dict[str, Tensor]]): + """ + Load an exist masks on the wrapper. You can train the model with an exist masks after load the masks. + + Parameters + ---------- + masks + The masks dict with format {'op_name': {'weight': mask, 'bias': mask}}. + """ + wrappers = self.get_modules_wrapper() + for name, layer_mask in masks.items(): + assert name in wrappers, '{} is not in wrappers of this pruner, can not apply the mask.'.format(name) + if layer_mask.get('weight') is not None: + assert hasattr(wrappers[name], 'weight_mask'), 'There is no attribute weight_mask in wrapper.' + setattr(wrappers[name], 'weight_mask', layer_mask.get('weight')) + if layer_mask.get('bias') is not None: + assert hasattr(wrappers[name], 'bias_mask'), 'There is no attribute bias_mask in wrapper.' + setattr(wrappers[name], 'bias_mask', layer_mask.get('bias')) + + def compress(self) -> Tuple[Module, Dict[str, Dict[str, Tensor]]]: + """ + Returns + ------- + Tuple[Module, Dict] + Return the wrapped model and mask. + """ + return self.bound_model, {} + + # NOTE: need refactor dim with supporting list + def show_pruned_weights(self, dim: int = 0): + """ + Log the simulated prune sparsity. + + Parameters + ---------- + dim + The pruned dim. + """ + for _, wrapper in self.get_modules_wrapper().items(): + weight_mask = wrapper.weight_mask + mask_size = weight_mask.size() + if len(mask_size) == 1: + index = torch.nonzero(weight_mask.abs() != 0, as_tuple=False).tolist() + else: + sum_idx = list(range(len(mask_size))) + sum_idx.remove(dim) + index = torch.nonzero(weight_mask.abs().sum(sum_idx) != 0, as_tuple=False).tolist() + _logger.info(f'simulated prune {wrapper.name} remain/total: {len(index)}/{weight_mask.size(dim)}') + + def export_model(self, model_path: str, mask_path: Optional[str] = None): + """ + Export pruned model weights, masks and onnx model(optional) + + Parameters + ---------- + model_path + Path to save pruned model state_dict. The weight and bias have already multiplied the masks. + mask_path + Path to save mask dict. + """ + assert self.bound_model is not None, 'The bound model reference has been cleared.' + assert model_path is not None, 'model_path must be specified.' + mask_dict = {} + self._unwrap_model() + + for name, wrapper in self.get_modules_wrapper().items(): + weight_mask = wrapper.weight_mask + bias_mask = wrapper.bias_mask + if weight_mask is not None: + mask_sum = weight_mask.sum().item() + mask_num = weight_mask.numel() + _logger.debug('Layer: %s Sparsity: %.4f', name, 1 - mask_sum / mask_num) + wrapper.module.weight.data = wrapper.module.weight.data.mul(weight_mask) + if bias_mask is not None: + wrapper.module.bias.data = wrapper.module.bias.data.mul(bias_mask) + # save mask to dict + mask_dict[name] = {"weight": weight_mask, "bias": bias_mask} + + torch.save(self.bound_model.state_dict(), model_path) + _logger.info('Model state_dict saved to %s', model_path) + + if mask_path is not None: + torch.save(mask_dict, mask_path) + _logger.info('Mask dict saved to %s', mask_path) + + self._wrap_model() diff --git a/nni/algorithms/compression/v2/pytorch/base/scheduler.py b/nni/algorithms/compression/v2/pytorch/base/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..0c22b3eef099d6e50cd42c6c06b4c90b3759e91c --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/base/scheduler.py @@ -0,0 +1,198 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import gc +import logging +import os +from pathlib import Path +from typing import List, Dict, Tuple, Optional + +import json_tricks +import torch +from torch import Tensor +from torch.nn import Module + +_logger = logging.getLogger(__name__) + + +class Task: + # NOTE: If we want to support multi-thread, this part need to refactor, maybe use file and lock to sync. + _reference_counter = {} + + def __init__(self, task_id: int, model_path: str, masks_path: str, config_list_path: str, + speed_up: Optional[bool] = True, finetune: Optional[bool] = True, evaluate: Optional[bool] = True): + """ + Parameters + ---------- + task_id + The unique id of task. + model_path + The path of the unwrapped pytorch model that will be pruned in this task. + masks_path + The path of the masks that applied on the model before pruning. + config_list_path + The path of the config list that used in this task. + speed_up + Control if this task needs speed up, True means use scheduler default value, False means no speed up. + finetune + Control if this task needs finetune, True means use scheduler default value, False means no finetune. + evaluate + Control if this task needs evaluate, True means use scheduler default value, False means no evaluate. + """ + self.task_id = task_id + self.model_path = model_path + self.masks_path = masks_path + self.config_list_path = config_list_path + + self.speed_up = speed_up + self.finetune = finetune + self.evaluate = evaluate + + self.status = 'Pending' + self.score: Optional[float] = None + + self.state = {} + + for ref in self.referenced_paths(): + self._reference_counter.setdefault(ref, 0) + self._reference_counter[ref] += 1 + + self._cleaned = False + + def to_dict(self) -> Dict: + return { + 'task_id': self.task_id, + 'model_path': str(self.model_path), + 'masks_path': str(self.masks_path), + 'config_list_path': str(self.config_list_path), + 'speed_up': self.speed_up, + 'finetune': self.finetune, + 'evaluate': self.evaluate, + 'status': self.status, + 'score': self.score, + 'state': self.state + } + + def load_data(self) -> Tuple[Module, Dict[str, Dict[str, Tensor]], List[Dict]]: + """ + Returns + ------- + Tuple[Module, Dict[str, Dict[str, Tensor]], List[Dict]] + Return the model pruning in this task, the masks of the model before pruning, + the config list used in this task. + """ + model = torch.load(self.model_path) + masks = torch.load(self.masks_path) + with Path(self.config_list_path).open('r') as f: + config_list = json_tricks.load(f) + return model, masks, config_list + + def referenced_paths(self) -> List[str]: + """ + Return the path list that need to count reference in this task. + """ + return [self.model_path, self.masks_path, self.config_list_path] + + def clean_up(self): + """ + Counter of referenced file paths subtract 1. If the counter reach 0, then delete the file. + """ + if not self._cleaned: + for ref in self.referenced_paths(): + self._reference_counter[ref] -= 1 + if self._reference_counter[ref] <= 0: + os.remove(ref) + if self._reference_counter[ref] < 0: + _logger.warning('Referance counter error, the number of %s is %d', + ref, self._reference_counter[ref]) + self._cleaned = True + else: + _logger.warning('Already clean up task %d', self.task_id) + + +class TaskResult: + def __init__(self, task_id: int, compact_model: Module, compact_model_masks: Dict[str, Dict[str, Tensor]], + pruner_generated_masks: Dict[str, Dict[str, Tensor]], score: Optional[float]) -> None: + """ + Parameters + ---------- + task_id + The unique id of task. + compact_model + The unwrapped compact pytorch model after pruning. If the compact model has been speeduped during the pruning process, + it will have a smaller structure compare with the model before pruning. + If the compact model has not been speeduped, it will have the same structure with the model before pruning. + compact_model_masks + The masks on the compact model. If the compact model has been speeduped during the pruning process, + the `compact_model_masks` is always an empty dict. If the compact model has not been speeduped, + the `compact_model_masks` is same as `pruner_generated_masks`. + pruner_generated_masks + The masks that can apply on the before pruning model. It is always the output of `pruner.compress()`. + TODO: If the compact model has been speeduped, the auto infer masks maybe also need. + score + The score of the pruning effect. i.e., the accuracy or latency after pruning. + """ + self.task_id = task_id + self.compact_model = compact_model + self.compact_model_masks = compact_model_masks + self.pruner_generated_masks = pruner_generated_masks + self.score = score + + +class BasePruningScheduler: + def generate_task(self) -> Optional[Task]: + """ + Returns + ------- + Optional[Task] + Return the next pruning task. + """ + raise NotImplementedError() + + def record_task_result(self, task_result: TaskResult): + """ + Parameters + ---------- + task_result + The result of the task + """ + raise NotImplementedError() + + def pruning_one_step(self, task: Task) -> TaskResult: + """ + Pruning the model defined in task. + + Parameters + ---------- + task + The pruning task in this step. + + Returns + ------- + TaskResult + Return the result of the task in this step. + """ + raise NotImplementedError() + + def get_best_result(self) -> Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]: + """ + Returns + ------- + Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]] + Return the task result that has the best performance, + inculde task id, the compact model, the masks on the compact model, score and config list used in this task. + """ + raise NotImplementedError() + + def compress(self): + """ + The pruning schedule main loop. + """ + task = self.generate_task() + + while task is not None: + task_result = self.pruning_one_step(task) + self.record_task_result(task_result) + del task_result + gc.collect() + task = self.generate_task() diff --git a/nni/algorithms/compression/v2/pytorch/pruning/__init__.py b/nni/algorithms/compression/v2/pytorch/pruning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..90b49cc5c24a83647c05d7f30f5e418fdbcb1467 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/__init__.py @@ -0,0 +1,6 @@ +from .basic_pruner import * +from .basic_scheduler import PruningScheduler +from .iterative_pruner import * +from .movement_pruner import MovementPruner +from .auto_compress_pruner import AutoCompressPruner +from .amc_pruner import AMCPruner diff --git a/nni/algorithms/compression/v2/pytorch/pruning/amc_pruner.py b/nni/algorithms/compression/v2/pytorch/pruning/amc_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..3b4ee20a33bd940d29da766357a78029795fd789 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/amc_pruner.py @@ -0,0 +1,237 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy +from pathlib import Path +from typing import Dict, List, Callable, Optional + +import json_tricks +import torch +from torch import Tensor +from torch.nn import Module + +from nni.algorithms.compression.v2.pytorch.base import Task, TaskResult +from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity, config_list_canonical +from nni.compression.pytorch.utils.counter import count_flops_params + +from .iterative_pruner import IterativePruner, PRUNER_DICT +from .tools import TaskGenerator +from .tools.rl_env import DDPG, AMCEnv + + +class AMCTaskGenerator(TaskGenerator): + """ + Parameters + ---------- + total_episode + The total episode number. + dummy_input + Use to inference and count the flops. + origin_model + The origin unwrapped pytorch model to be pruned. + origin_config_list + The origin config list provided by the user. Note that this config_list is directly config the origin model. + This means the sparsity provided by the origin_masks should also be recorded in the origin_config_list. + origin_masks + The pre masks on the origin model. This mask maybe user-defined or maybe generate by previous pruning. + log_dir + The log directory use to saving the task generator log. + keep_intermediate_result + If keeping the intermediate result, including intermediate model and masks during each iteration. + ddpg_params + The ddpg agent parameters. + target : str + 'flops' or 'params'. Note that the sparsity in other pruners always means the parameters sparse, but in AMC, you can choose flops sparse. + This parameter is used to explain what the sparsity setting in config_list refers to. + """ + + def __init__(self, total_episode: int, dummy_input: Tensor, origin_model: Module, origin_config_list: List[Dict], + origin_masks: Dict[str, Dict[str, Tensor]] = {}, log_dir: str = '.', keep_intermediate_result: bool = False, + ddpg_params: Dict = {}, target: str = 'flops'): + self.total_episode = total_episode + self.current_episode = 0 + self.dummy_input = dummy_input + self.ddpg_params = ddpg_params + self.target = target + self.config_list_copy = deepcopy(origin_config_list) + + super().__init__(origin_model=origin_model, origin_masks=origin_masks, origin_config_list=origin_config_list, + log_dir=log_dir, keep_intermediate_result=keep_intermediate_result) + + def init_pending_tasks(self) -> List[Task]: + origin_model = torch.load(self._origin_model_path) + origin_masks = torch.load(self._origin_masks_path) + with open(self._origin_config_list_path, "r") as f: + origin_config_list = json_tricks.load(f) + + self.T = [] + self.action = None + self.observation = None + self.warmup_episode = self.ddpg_params['warmup'] if 'warmup' in self.ddpg_params.keys() else int(self.total_episode / 4) + + config_list_copy = config_list_canonical(origin_model, origin_config_list) + total_sparsity = config_list_copy[0]['total_sparsity'] + max_sparsity_per_layer = config_list_copy[0].get('max_sparsity_per_layer', 1.) + + self.env = AMCEnv(origin_model, origin_config_list, self.dummy_input, total_sparsity, max_sparsity_per_layer, self.target) + self.agent = DDPG(len(self.env.state_feature), 1, self.ddpg_params) + self.agent.is_training = True + task_result = TaskResult('origin', origin_model, origin_masks, origin_masks, None) + + return self.generate_tasks(task_result) + + def generate_tasks(self, task_result: TaskResult) -> List[Task]: + # append experience & update agent policy + if task_result.task_id != 'origin': + action, reward, observation, done = self.env.step(self.action, task_result.compact_model) + self.T.append([reward, self.observation, observation, self.action, done]) + self.observation = observation.copy() + + if done: + final_reward = task_result.score - 1 + # agent observe and update policy + for _, s_t, s_t1, a_t, d_t in self.T: + self.agent.observe(final_reward, s_t, s_t1, a_t, d_t) + if self.current_episode > self.warmup_episode: + self.agent.update_policy() + + self.current_episode += 1 + self.T = [] + self.action = None + self.observation = None + + # update current2origin_sparsity in log file + origin_model = torch.load(self._origin_model_path) + compact_model = task_result.compact_model + compact_model_masks = task_result.compact_model_masks + current2origin_sparsity, _, _ = compute_sparsity(origin_model, compact_model, compact_model_masks, self.temp_config_list) + self._tasks[task_result.task_id].state['current2origin_sparsity'] = current2origin_sparsity + current2origin_sparsity, _, _ = compute_sparsity(origin_model, compact_model, compact_model_masks, self.config_list_copy) + self._tasks[task_result.task_id].state['current_total_sparsity'] = current2origin_sparsity + flops, params, _ = count_flops_params(compact_model, self.dummy_input, verbose=False) + self._tasks[task_result.task_id].state['current_flops'] = '{:.2f} M'.format(flops / 1e6) + self._tasks[task_result.task_id].state['current_params'] = '{:.2f} M'.format(params / 1e6) + + # generate new action + if self.current_episode < self.total_episode: + if self.observation is None: + self.observation = self.env.reset().copy() + self.temp_config_list = [] + compact_model = torch.load(self._origin_model_path) + compact_model_masks = torch.load(self._origin_masks_path) + else: + compact_model = task_result.compact_model + compact_model_masks = task_result.compact_model_masks + if self.current_episode <= self.warmup_episode: + action = self.agent.random_action() + else: + action = self.agent.select_action(self.observation, episode=self.current_episode) + action = action.tolist()[0] + + self.action = self.env.correct_action(action, compact_model) + sub_config_list = [{'op_names': [self.env.current_op_name], 'total_sparsity': self.action}] + self.temp_config_list.extend(sub_config_list) + + task_id = self._task_id_candidate + if self.env.is_first_layer() or self.env.is_final_layer(): + task_config_list = self.temp_config_list + else: + task_config_list = sub_config_list + + config_list_path = Path(self._intermediate_result_dir, '{}_config_list.json'.format(task_id)) + with Path(config_list_path).open('w') as f: + json_tricks.dump(task_config_list, f, indent=4) + + model_path = Path(self._intermediate_result_dir, '{}_compact_model.pth'.format(task_result.task_id)) + masks_path = Path(self._intermediate_result_dir, '{}_compact_model_masks.pth'.format(task_result.task_id)) + torch.save(compact_model, model_path) + torch.save(compact_model_masks, masks_path) + + task = Task(task_id, model_path, masks_path, config_list_path) + if not self.env.is_final_layer(): + task.finetune = False + task.evaluate = False + + self._tasks[task_id] = task + self._task_id_candidate += 1 + return [task] + else: + return [] + + +class AMCPruner(IterativePruner): + """ + A pytorch implementation of AMC: AutoML for Model Compression and Acceleration on Mobile Devices. + (https://arxiv.org/pdf/1802.03494.pdf) + Suggust config all `total_sparsity` in `config_list` a same value. + AMC pruner will treat the first sparsity in `config_list` as the global sparsity. + + Parameters + ---------- + total_episode : int + The total episode number. + model : Module + The model to be pruned. + config_list : List[Dict] + Supported keys : + - total_sparsity : This is to specify the total sparsity for all layers in this config, each layer may have different sparsity. + - max_sparsity_per_layer : Always used with total_sparsity. Limit the max sparsity of each layer. + - op_types : Operation type to be pruned. + - op_names : Operation name to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + dummy_input : torch.Tensor + `dummy_input` is required for speed-up and tracing the model in RL environment. + evaluator : Callable[[Module], float] + Evaluate the pruned model and give a score. + pruning_algorithm : str + Supported pruning algorithm ['l1', 'l2', 'fpgm', 'apoz', 'mean_activation', 'taylorfo']. + This iterative pruner will use the chosen corresponding pruner to prune the model in each iteration. + log_dir : str + The log directory use to saving the result, you can find the best result under this folder. + keep_intermediate_result : bool + If keeping the intermediate result, including intermediate model and masks during each iteration. + finetuner : Optional[Callable[[Module], None]] + The finetuner handled all finetune logic, use a pytorch module as input, will be called in each iteration. + ddpg_params : Dict + Configuration dict to configure the DDPG agent, any key unset will be set to default implicitly. + - hidden1: hidden num of first fully connect layer. Default: 300 + - hidden2: hidden num of second fully connect layer. Default: 300 + - lr_c: learning rate for critic. Default: 1e-3 + - lr_a: learning rate for actor. Default: 1e-4 + - warmup: number of episodes without training but only filling the replay memory. During warmup episodes, random actions ares used for pruning. Default: 100 + - discount: next Q value discount for deep Q value target. Default: 0.99 + - bsize: minibatch size for training DDPG agent. Default: 64 + - rmsize: memory size for each layer. Default: 100 + - window_length: replay buffer window length. Default: 1 + - tau: moving average for target network being used by soft_update. Default: 0.99 + - init_delta: initial variance of truncated normal distribution. Default: 0.5 + - delta_decay: delta decay during exploration. Default: 0.99 + # parameters for training ddpg agent + - max_episode_length: maximum episode length. Default: 1e9 + - epsilon: linear decay of exploration policy. Default: 50000 + + pruning_params : Dict + If the pruner corresponding to the chosen pruning_algorithm has extra parameters, put them as a dict to pass in. + target : str + 'flops' or 'params'. Note that the sparsity in other pruners always means the parameters sparse, but in AMC, you can choose flops sparse. + This parameter is used to explain what the sparsity setting in config_list refers to. + """ + + def __init__(self, total_episode: int, model: Module, config_list: List[Dict], dummy_input: Tensor, + evaluator: Callable[[Module], float], pruning_algorithm: str = 'l1', log_dir: str = '.', + keep_intermediate_result: bool = False, finetuner: Optional[Callable[[Module], None]] = None, + ddpg_params: dict = {}, pruning_params: dict = {}, target: str = 'flops'): + assert pruning_algorithm in ['l1', 'l2', 'fpgm', 'apoz', 'mean_activation', 'taylorfo'], \ + "Only support pruning_algorithm in ['l1', 'l2', 'fpgm', 'apoz', 'mean_activation', 'taylorfo']" + task_generator = AMCTaskGenerator(total_episode=total_episode, + dummy_input=dummy_input, + origin_model=model, + origin_config_list=config_list, + log_dir=log_dir, + keep_intermediate_result=keep_intermediate_result, + ddpg_params=ddpg_params, + target=target) + pruner = PRUNER_DICT[pruning_algorithm](None, None, **pruning_params) + super().__init__(pruner, task_generator, finetuner=finetuner, speed_up=True, dummy_input=dummy_input, + evaluator=evaluator, reset_weight=False) diff --git a/nni/algorithms/compression/v2/pytorch/pruning/auto_compress_pruner.py b/nni/algorithms/compression/v2/pytorch/pruning/auto_compress_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..08f79b523f9ea57705fb88ff9b8fc54893bb9574 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/auto_compress_pruner.py @@ -0,0 +1,126 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from pathlib import Path +from typing import Dict, List, Callable, Optional + +from torch import Tensor +from torch.nn import Module + +from nni.algorithms.compression.v2.pytorch.utils import OptimizerConstructHelper + +from .basic_pruner import ADMMPruner +from .iterative_pruner import IterativePruner, SimulatedAnnealingPruner +from .tools import LotteryTicketTaskGenerator + +_logger = logging.getLogger(__name__) + + +class AutoCompressTaskGenerator(LotteryTicketTaskGenerator): + def __init__(self, total_iteration: int, origin_model: Module, origin_config_list: List[Dict], + origin_masks: Dict[str, Dict[str, Tensor]] = {}, sa_params: Dict = {}, log_dir: str = '.', + keep_intermediate_result: bool = False): + self.iterative_pruner = SimulatedAnnealingPruner(model=None, + config_list=None, + log_dir=Path(log_dir, 'SA'), + **sa_params) + super().__init__(total_iteration=total_iteration, + origin_model=origin_model, + origin_config_list=origin_config_list, + origin_masks=origin_masks, + log_dir=log_dir, + keep_intermediate_result=keep_intermediate_result) + + def reset(self, model: Module, config_list: List[Dict] = [], masks: Dict[str, Dict[str, Tensor]] = {}): + # TODO: replace with validation here + for config in config_list: + if 'sparsity' in config or 'sparsity_per_layer' in config: + _logger.warning('Only `total_sparsity` can be differentially allocated sparse ratio to each layer, `sparsity` or `sparsity_per_layer` will allocate fixed sparse ratio to layers. Make sure you know what this will lead to, otherwise please use `total_sparsity`.') + return super().reset(model, config_list, masks) + + def _iterative_pruner_reset(self, model: Module, config_list: List[Dict] = [], masks: Dict[str, Dict[str, Tensor]] = {}): + self.iterative_pruner.task_generator._log_dir = Path(self._log_dir_root, 'SA') + self.iterative_pruner.reset(model, config_list=config_list, masks=masks) + + def allocate_sparsity(self, new_config_list: List[Dict], model: Module, masks: Dict[str, Dict[str, Tensor]]): + self._iterative_pruner_reset(model, new_config_list, masks) + self.iterative_pruner.compress() + _, _, _, _, config_list = self.iterative_pruner.get_best_result() + return config_list + + +class AutoCompressPruner(IterativePruner): + """ + Parameters + ---------- + model : Module + The origin unwrapped pytorch model to be pruned. + config_list : List[Dict] + The origin config list provided by the user. + total_iteration : int + The total iteration number. + evaluator : Callable[[Module], float] + Evaluate the pruned model and give a score. + admm_params : Dict + The parameters passed to the ADMMPruner. + + - trainer : Callable[[Module, Optimizer, Callable]. + A callable function used to train model or just inference. Take model, optimizer, criterion as input. + The model will be trained or inferenced `training_epochs` epochs. + - traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) + The traced optimizer instance which the optimizer class is wrapped by nni.trace. + E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()). + - criterion : Callable[[Tensor, Tensor], Tensor]. + The criterion function used in trainer. Take model output and target value as input, and return the loss. + - iterations : int. + The total iteration number in admm pruning algorithm. + - training_epochs : int. + The epoch number for training model in each iteration. + + sa_params : Dict + The parameters passed to the SimulatedAnnealingPruner. + + - evaluator : Callable[[Module], float]. Required. + Evaluate the pruned model and give a score. + - start_temperature : float. Default: `100`. + Start temperature of the simulated annealing process. + - stop_temperature : float. Default: `20`. + Stop temperature of the simulated annealing process. + - cool_down_rate : float. Default: `0.9`. + Cooldown rate of the temperature. + - perturbation_magnitude : float. Default: `0.35`. + Initial perturbation magnitude to the sparsities. The magnitude decreases with current temperature. + - pruning_algorithm : str. Default: `'level'`. + Supported pruning algorithm ['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz', 'mean_activation', 'taylorfo', 'admm']. + - pruning_params : Dict. Default: `{}`. + If the chosen pruning_algorithm has extra parameters, put them as a dict to pass in. + + log_dir : str + The log directory used to save the result, you can find the best result under this folder. + keep_intermediate_result : bool + If keeping the intermediate result, including intermediate model and masks during each iteration. + finetuner : Optional[Callable[[Module], None]] + The finetuner handles all finetune logic, takes a pytorch module as input. + It will be called at the end of each iteration, usually for neutralizing the accuracy loss brought by the pruning in this iteration. + speed_up : bool + If set True, speed up the model at the end of each iteration to make the pruned model compact. + dummy_input : Optional[torch.Tensor] + If `speed_up` is True, `dummy_input` is required for tracing the model in speed up. + """ + + def __init__(self, model: Module, config_list: List[Dict], total_iteration: int, admm_params: Dict, + sa_params: Dict, log_dir: str = '.', keep_intermediate_result: bool = False, + finetuner: Optional[Callable[[Module], None]] = None, speed_up: bool = False, + dummy_input: Optional[Tensor] = None, evaluator: Callable[[Module], float] = None): + task_generator = AutoCompressTaskGenerator(total_iteration=total_iteration, + origin_model=model, + origin_config_list=config_list, + sa_params=sa_params, + log_dir=log_dir, + keep_intermediate_result=keep_intermediate_result) + if 'traced_optimizer' in admm_params: + admm_params['traced_optimizer'] = OptimizerConstructHelper.from_trace(model, admm_params['traced_optimizer']) + pruner = ADMMPruner(None, None, **admm_params) + super().__init__(pruner, task_generator, finetuner=finetuner, speed_up=speed_up, dummy_input=dummy_input, + evaluator=evaluator, reset_weight=False) diff --git a/nni/algorithms/compression/v2/pytorch/pruning/basic_pruner.py b/nni/algorithms/compression/v2/pytorch/pruning/basic_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..977c52ab1171a213d1e27bb42817caaa058687c4 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/basic_pruner.py @@ -0,0 +1,847 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy +import logging +from typing import List, Dict, Tuple, Callable, Optional + +from schema import And, Or, Optional as SchemaOptional, SchemaError +import torch +from torch import Tensor +import torch.nn as nn +from torch.nn import Module +from torch.optim import Optimizer + +from nni.common.serializer import Traceable +from nni.algorithms.compression.v2.pytorch.base.pruner import Pruner +from nni.algorithms.compression.v2.pytorch.utils import CompressorSchema, config_list_canonical, OptimizerConstructHelper + +from .tools import ( + DataCollector, + HookCollectorInfo, + WeightDataCollector, + WeightTrainerBasedDataCollector, + SingleHookTrainerBasedDataCollector +) + +from .tools import ( + MetricsCalculator, + NormMetricsCalculator, + MultiDataNormMetricsCalculator, + DistMetricsCalculator, + APoZRankMetricsCalculator, + MeanRankMetricsCalculator +) + +from .tools import ( + SparsityAllocator, + NormalSparsityAllocator, + GlobalSparsityAllocator, + Conv2dDependencyAwareAllocator +) + +_logger = logging.getLogger(__name__) + +__all__ = ['LevelPruner', 'L1NormPruner', 'L2NormPruner', 'FPGMPruner', 'SlimPruner', 'ActivationPruner', + 'ActivationAPoZRankPruner', 'ActivationMeanRankPruner', 'TaylorFOWeightPruner', 'ADMMPruner'] + +NORMAL_SCHEMA = { + Or('sparsity', 'sparsity_per_layer'): And(float, lambda n: 0 <= n < 1), + SchemaOptional('op_types'): [str], + SchemaOptional('op_names'): [str], + SchemaOptional('op_partial_names'): [str] +} + +GLOBAL_SCHEMA = { + 'total_sparsity': And(float, lambda n: 0 <= n < 1), + SchemaOptional('max_sparsity_per_layer'): And(float, lambda n: 0 < n <= 1), + SchemaOptional('op_types'): [str], + SchemaOptional('op_names'): [str], + SchemaOptional('op_partial_names'): [str] +} + +EXCLUDE_SCHEMA = { + 'exclude': bool, + SchemaOptional('op_types'): [str], + SchemaOptional('op_names'): [str], + SchemaOptional('op_partial_names'): [str] +} + +INTERNAL_SCHEMA = { + 'total_sparsity': And(float, lambda n: 0 <= n < 1), + SchemaOptional('max_sparsity_per_layer'): {str: float}, + SchemaOptional('op_types'): [str], + SchemaOptional('op_names'): [str] +} + + +class BasicPruner(Pruner): + def __init__(self, model: Module, config_list: List[Dict]): + self.data_collector: DataCollector = None + self.metrics_calculator: MetricsCalculator = None + self.sparsity_allocator: SparsityAllocator = None + + super().__init__(model, config_list) + + def validate_config(self, model: Module, config_list: List[Dict]): + self._validate_config_before_canonical(model, config_list) + self.config_list = config_list_canonical(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + pass + + def reset(self, model: Optional[Module], config_list: Optional[List[Dict]]): + super().reset(model=model, config_list=config_list) + self.reset_tools() + + def reset_tools(self): + """ + This function is used to reset `self.data_collector`, `self.metrics_calculator` and `self.sparsity_allocator`. + The subclass needs to implement this function to complete the pruning process. + See `compress()` to understand how NNI use these three part to generate mask for the bound model. + """ + raise NotImplementedError() + + def compress(self) -> Tuple[Module, Dict]: + """ + Used to generate the mask. Pruning process is divided in three stages. + `self.data_collector` collect the data used to calculate the specify metric. + `self.metrics_calculator` calculate the metric and `self.sparsity_allocator` generate the mask depend on the metric. + + Returns + ------- + Tuple[Module, Dict] + Return the wrapped model and mask. + """ + data = self.data_collector.collect() + _logger.debug('Collected Data:\n%s', data) + metrics = self.metrics_calculator.calculate_metrics(data) + _logger.debug('Metrics Calculate:\n%s', metrics) + masks = self.sparsity_allocator.generate_sparsity(metrics) + _logger.debug('Masks:\n%s', masks) + self.load_masks(masks) + return self.bound_model, masks + + +class LevelPruner(BasicPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - op_types : Operation types to be pruned. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + """ + + def __init__(self, model: Module, config_list: List[Dict]): + super().__init__(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(NORMAL_SCHEMA), deepcopy(EXCLUDE_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + schema = CompressorSchema(schema_list, model, _logger) + schema.validate(config_list) + + def reset_tools(self): + if self.data_collector is None: + self.data_collector = WeightDataCollector(self) + else: + self.data_collector.reset() + if self.metrics_calculator is None: + self.metrics_calculator = NormMetricsCalculator() + if self.sparsity_allocator is None: + self.sparsity_allocator = NormalSparsityAllocator(self) + + +class NormPruner(BasicPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - op_types : Conv2d and Linear are supported in NormPruner. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + p : int + The order of norm. + mode : str + 'normal' or 'dependency_aware'. + If prune the model in a dependency-aware way, this pruner will + prune the model according to the norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if set 'dependency_aware' + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : Optional[torch.Tensor] + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model: Module, config_list: List[Dict], p: int, + mode: str = 'normal', dummy_input: Optional[Tensor] = None): + self.p = p + self.mode = mode + self.dummy_input = dummy_input + super().__init__(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(NORMAL_SCHEMA), deepcopy(EXCLUDE_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + for sub_shcema in schema_list: + sub_shcema[SchemaOptional('op_types')] = ['Conv2d', 'Linear'] + schema = CompressorSchema(schema_list, model, _logger) + + schema.validate(config_list) + + def reset_tools(self): + if self.data_collector is None: + self.data_collector = WeightDataCollector(self) + else: + self.data_collector.reset() + if self.metrics_calculator is None: + self.metrics_calculator = NormMetricsCalculator(p=self.p, dim=0) + if self.sparsity_allocator is None: + if self.mode == 'normal': + self.sparsity_allocator = NormalSparsityAllocator(self, dim=0) + elif self.mode == 'dependency_aware': + self.sparsity_allocator = Conv2dDependencyAwareAllocator(self, 0, self.dummy_input) + else: + raise NotImplementedError('Only support mode `normal` and `dependency_aware`') + + +class L1NormPruner(NormPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - op_types : Conv2d and Linear are supported in L1NormPruner. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + mode : str + 'normal' or 'dependency_aware'. + If prune the model in a dependency-aware way, this pruner will + prune the model according to the l1-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if set 'dependency_aware' + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : Optional[torch.Tensor] + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model: Module, config_list: List[Dict], + mode: str = 'normal', dummy_input: Optional[Tensor] = None): + super().__init__(model, config_list, 1, mode, dummy_input) + + +class L2NormPruner(NormPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - op_types : Conv2d and Linear are supported in L1NormPruner. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + mode : str + 'normal' or 'dependency_aware'. + If prune the model in a dependency-aware way, this pruner will + prune the model according to the l2-norm of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if set 'dependency_aware' + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : Optional[torch.Tensor] + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model: Module, config_list: List[Dict], + mode: str = 'normal', dummy_input: Optional[Tensor] = None): + super().__init__(model, config_list, 2, mode, dummy_input) + + +class FPGMPruner(BasicPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - op_types : Conv2d and Linear are supported in FPGMPruner. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + mode : str + 'normal' or 'dependency_aware'. + If prune the model in a dependency-aware way, this pruner will + prune the model according to the FPGM of weights and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if set 'dependency_aware' + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : Optional[torch.Tensor] + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model: Module, config_list: List[Dict], + mode: str = 'normal', dummy_input: Optional[Tensor] = None): + self.mode = mode + self.dummy_input = dummy_input + super().__init__(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(NORMAL_SCHEMA), deepcopy(EXCLUDE_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + for sub_shcema in schema_list: + sub_shcema[SchemaOptional('op_types')] = ['Conv2d', 'Linear'] + schema = CompressorSchema(schema_list, model, _logger) + + schema.validate(config_list) + + def reset_tools(self): + if self.data_collector is None: + self.data_collector = WeightDataCollector(self) + else: + self.data_collector.reset() + if self.metrics_calculator is None: + self.metrics_calculator = DistMetricsCalculator(p=2, dim=0) + if self.sparsity_allocator is None: + if self.mode == 'normal': + self.sparsity_allocator = NormalSparsityAllocator(self, dim=0) + elif self.mode == 'dependency_aware': + self.sparsity_allocator = Conv2dDependencyAwareAllocator(self, 0, self.dummy_input) + else: + raise NotImplementedError('Only support mode `normal` and `dependency_aware`') + + +class SlimPruner(BasicPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - total_sparsity : This is to specify the total sparsity for all layers in this config, each layer may have different sparsity. + - max_sparsity_per_layer : Always used with total_sparsity. Limit the max sparsity of each layer. + - op_types : Only BatchNorm2d is supported in SlimPruner. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + trainer : Callable[[Module, Optimizer, Callable], None] + A callable function used to train model or just inference. Take model, optimizer, criterion as input. + The model will be trained or inferenced `training_epochs` epochs. + + Example:: + + def trainer(model: Module, optimizer: Optimizer, criterion: Callable[[Tensor, Tensor], Tensor]): + training = model.training + model.train(mode=True) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + # If you don't want to update the model, you can skip `optimizer.step()`, and set train mode False. + optimizer.step() + model.train(mode=training) + traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) + The traced optimizer instance which the optimizer class is wrapped by nni.trace. + E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()). + criterion : Callable[[Tensor, Tensor], Tensor] + The criterion function used in trainer. Take model output and target value as input, and return the loss. + training_epochs : int + The epoch number for training model to sparsify the BN weight. + scale : float + Penalty parameter for sparsification, which could reduce overfitting. + mode : str + 'normal' or 'global'. + If prune the model in a global way, all layer weights with same config will be considered uniformly. + That means a single layer may not reach or exceed the sparsity setting in config, + but the total pruned weights meet the sparsity setting. + """ + + def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None], + traced_optimizer: Traceable, criterion: Callable[[Tensor, Tensor], Tensor], + training_epochs: int, scale: float = 0.0001, mode='global'): + self.mode = mode + self.trainer = trainer + if isinstance(traced_optimizer, OptimizerConstructHelper): + self.optimizer_helper = traced_optimizer + else: + self.optimizer_helper = OptimizerConstructHelper.from_trace(model, traced_optimizer) + self.criterion = criterion + self.training_epochs = training_epochs + self._scale = scale + super().__init__(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(EXCLUDE_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + if self.mode == 'global': + schema_list.append(deepcopy(GLOBAL_SCHEMA)) + else: + schema_list.append(deepcopy(NORMAL_SCHEMA)) + for sub_shcema in schema_list: + sub_shcema[SchemaOptional('op_types')] = ['BatchNorm2d'] + schema = CompressorSchema(schema_list, model, _logger) + + try: + schema.validate(config_list) + except SchemaError as e: + if "Missing key: 'total_sparsity'" in str(e): + _logger.error('`config_list` validation failed. If global mode is set in this pruner, `sparsity_per_layer` and `sparsity` are not supported, make sure `total_sparsity` is set in config_list.') + raise e + + def criterion_patch(self, criterion: Callable[[Tensor, Tensor], Tensor]) -> Callable[[Tensor, Tensor], Tensor]: + def patched_criterion(input_tensor: Tensor, target: Tensor): + sum_l1 = 0 + for _, wrapper in self.get_modules_wrapper().items(): + sum_l1 += torch.norm(wrapper.module.weight, p=1) + return criterion(input_tensor, target) + self._scale * sum_l1 + return patched_criterion + + def reset_tools(self): + if self.data_collector is None: + self.data_collector = WeightTrainerBasedDataCollector(self, self.trainer, self.optimizer_helper, self.criterion, + self.training_epochs, criterion_patch=self.criterion_patch) + else: + self.data_collector.reset() + if self.metrics_calculator is None: + self.metrics_calculator = NormMetricsCalculator() + if self.sparsity_allocator is None: + if self.mode == 'normal': + self.sparsity_allocator = NormalSparsityAllocator(self) + elif self.mode == 'global': + self.sparsity_allocator = GlobalSparsityAllocator(self) + else: + raise NotImplementedError('Only support mode `normal` and `global`') + + +class ActivationPruner(BasicPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - op_types : Conv2d and Linear are supported in ActivationPruner. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + trainer : Callable[[Module, Optimizer, Callable], None] + A callable function used to train model or just inference. Take model, optimizer, criterion as input. + The model will be trained or inferenced `training_epochs` epochs. + + Example:: + + def trainer(model: Module, optimizer: Optimizer, criterion: Callable[[Tensor, Tensor], Tensor]): + training = model.training + model.train(mode=True) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + # If you don't want to update the model, you can skip `optimizer.step()`, and set train mode False. + optimizer.step() + model.train(mode=training) + traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) + The traced optimizer instance which the optimizer class is wrapped by nni.trace. + E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()). + criterion : Callable[[Tensor, Tensor], Tensor] + The criterion function used in trainer. Take model output and target value as input, and return the loss. + training_batches + The batch number used to collect activations. + mode : str + 'normal' or 'dependency_aware'. + If prune the model in a dependency-aware way, this pruner will + prune the model according to the activation-based metrics and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if set 'dependency_aware' + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + dummy_input : Optional[torch.Tensor] + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None], + traced_optimizer: Traceable, criterion: Callable[[Tensor, Tensor], Tensor], training_batches: int, activation: str = 'relu', + mode: str = 'normal', dummy_input: Optional[Tensor] = None): + self.mode = mode + self.dummy_input = dummy_input + self.trainer = trainer + if isinstance(traced_optimizer, OptimizerConstructHelper): + self.optimizer_helper = traced_optimizer + else: + self.optimizer_helper = OptimizerConstructHelper.from_trace(model, traced_optimizer) + self.criterion = criterion + self.training_batches = training_batches + self._activation = self._choose_activation(activation) + super().__init__(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(NORMAL_SCHEMA), deepcopy(EXCLUDE_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + for sub_shcema in schema_list: + sub_shcema[SchemaOptional('op_types')] = ['Conv2d', 'Linear'] + schema = CompressorSchema(schema_list, model, _logger) + + schema.validate(config_list) + + def _choose_activation(self, activation: str = 'relu') -> Callable: + if activation == 'relu': + return nn.functional.relu + elif activation == 'relu6': + return nn.functional.relu6 + else: + raise 'Unsupported activatoin {}'.format(activation) + + def _collector(self, buffer: List) -> Callable[[Module, Tensor, Tensor], None]: + assert len(buffer) == 0, 'Buffer pass to activation pruner collector is not empty.' + # The length of the buffer used in this pruner will always be 2. + # buffer[0] is the number of how many batches are counted in buffer[1]. + # buffer[1] is a tensor and the size of buffer[1] is same as the activation. + buffer.append(0) + + def collect_activation(_module: Module, _input: Tensor, output: Tensor): + if len(buffer) == 1: + buffer.append(torch.zeros_like(output)) + if buffer[0] < self.training_batches: + buffer[1] += self._activation_trans(output) + buffer[0] += 1 + return collect_activation + + def _activation_trans(self, output: Tensor) -> Tensor: + raise NotImplementedError() + + def reset_tools(self): + collector_info = HookCollectorInfo([layer_info for layer_info, _ in self._detect_modules_to_compress()], 'forward', self._collector) + if self.data_collector is None: + self.data_collector = SingleHookTrainerBasedDataCollector(self, self.trainer, self.optimizer_helper, self.criterion, + 1, collector_infos=[collector_info]) + else: + self.data_collector.reset(collector_infos=[collector_info]) + if self.metrics_calculator is None: + self.metrics_calculator = self._get_metrics_calculator() + if self.sparsity_allocator is None: + if self.mode == 'normal': + self.sparsity_allocator = NormalSparsityAllocator(self, dim=0) + elif self.mode == 'dependency_aware': + self.sparsity_allocator = Conv2dDependencyAwareAllocator(self, 0, self.dummy_input) + else: + raise NotImplementedError('Only support mode `normal` and `dependency_aware`') + + def _get_metrics_calculator(self) -> MetricsCalculator: + raise NotImplementedError() + + +class ActivationAPoZRankPruner(ActivationPruner): + def _activation_trans(self, output: Tensor) -> Tensor: + # return a matrix that the position of zero in `output` is one, others is zero. + return torch.eq(self._activation(output.detach()), torch.zeros_like(output)).type_as(output) + + def _get_metrics_calculator(self) -> MetricsCalculator: + return APoZRankMetricsCalculator(dim=1) + + +class ActivationMeanRankPruner(ActivationPruner): + def _activation_trans(self, output: Tensor) -> Tensor: + # return the activation of `output` directly. + return self._activation(output.detach()) + + def _get_metrics_calculator(self) -> MetricsCalculator: + return MeanRankMetricsCalculator(dim=1) + + +class TaylorFOWeightPruner(BasicPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - total_sparsity : This is to specify the total sparsity for all layers in this config, each layer may have different sparsity. + - max_sparsity_per_layer : Always used with total_sparsity. Limit the max sparsity of each layer. + - op_types : Conv2d and Linear are supported in TaylorFOWeightPruner. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + trainer : Callable[[Module, Optimizer, Callable] + A callable function used to train model or just inference. Take model, optimizer, criterion as input. + The model will be trained or inferenced `training_epochs` epochs. + + Example:: + + def trainer(model: Module, optimizer: Optimizer, criterion: Callable[[Tensor, Tensor], Tensor]): + training = model.training + model.train(mode=True) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + # If you don't want to update the model, you can skip `optimizer.step()`, and set train mode False. + optimizer.step() + model.train(mode=training) + traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) + The traced optimizer instance which the optimizer class is wrapped by nni.trace. + E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()). + criterion : Callable[[Tensor, Tensor], Tensor] + The criterion function used in trainer. Take model output and target value as input, and return the loss. + training_batches : int + The batch number used to collect activations. + mode : str + 'normal', 'dependency_aware' or 'global'. + + If prune the model in a dependency-aware way, this pruner will + prune the model according to the taylorFO and the channel-dependency or + group-dependency of the model. In this way, the pruner will force the conv layers + that have dependencies to prune the same channels, so the speedup module can better + harvest the speed benefit from the pruned model. Note that, if set 'dependency_aware' + , the dummy_input cannot be None, because the pruner needs a dummy input to trace the + dependency between the conv layers. + + If prune the model in a global way, all layer weights with same config will be considered uniformly. + That means a single layer may not reach or exceed the sparsity setting in config, + but the total pruned weights meet the sparsity setting. + dummy_input : Optional[torch.Tensor] + The dummy input to analyze the topology constraints. Note that, the dummy_input + should on the same device with the model. + """ + + def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None], + traced_optimizer: Traceable, criterion: Callable[[Tensor, Tensor], Tensor], training_batches: int, + mode: str = 'normal', dummy_input: Optional[Tensor] = None): + self.mode = mode + self.dummy_input = dummy_input + self.trainer = trainer + if isinstance(traced_optimizer, OptimizerConstructHelper): + self.optimizer_helper = traced_optimizer + else: + self.optimizer_helper = OptimizerConstructHelper.from_trace(model, traced_optimizer) + self.criterion = criterion + self.training_batches = training_batches + super().__init__(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(EXCLUDE_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + if self.mode == 'global': + schema_list.append(deepcopy(GLOBAL_SCHEMA)) + else: + schema_list.append(deepcopy(NORMAL_SCHEMA)) + for sub_shcema in schema_list: + sub_shcema[SchemaOptional('op_types')] = ['Conv2d', 'Linear'] + schema = CompressorSchema(schema_list, model, _logger) + + try: + schema.validate(config_list) + except SchemaError as e: + if "Missing key: 'total_sparsity'" in str(e): + _logger.error('`config_list` validation failed. If global mode is set in this pruner, `sparsity_per_layer` and `sparsity` are not supported, make sure `total_sparsity` is set in config_list.') + raise e + + def _collector(self, buffer: List, weight_tensor: Tensor) -> Callable[[Tensor], None]: + assert len(buffer) == 0, 'Buffer pass to taylor pruner collector is not empty.' + buffer.append(0) + buffer.append(torch.zeros_like(weight_tensor)) + + def collect_taylor(grad: Tensor): + if buffer[0] < self.training_batches: + buffer[1] += self._calculate_taylor_expansion(weight_tensor, grad) + buffer[0] += 1 + return collect_taylor + + def _calculate_taylor_expansion(self, weight_tensor: Tensor, grad: Tensor) -> Tensor: + return (weight_tensor.detach() * grad.detach()).data.pow(2) + + def reset_tools(self): + hook_targets = {layer_info.name: layer_info.module.weight for layer_info, _ in self._detect_modules_to_compress()} + collector_info = HookCollectorInfo(hook_targets, 'tensor', self._collector) + if self.data_collector is None: + self.data_collector = SingleHookTrainerBasedDataCollector(self, self.trainer, self.optimizer_helper, self.criterion, + 1, collector_infos=[collector_info]) + else: + self.data_collector.reset(collector_infos=[collector_info]) + if self.metrics_calculator is None: + self.metrics_calculator = MultiDataNormMetricsCalculator(p=1, dim=0) + if self.sparsity_allocator is None: + if self.mode == 'normal': + self.sparsity_allocator = NormalSparsityAllocator(self, dim=0) + elif self.mode == 'global': + self.sparsity_allocator = GlobalSparsityAllocator(self, dim=0) + elif self.mode == 'dependency_aware': + self.sparsity_allocator = Conv2dDependencyAwareAllocator(self, 0, self.dummy_input) + else: + raise NotImplementedError('Only support mode `normal`, `global` and `dependency_aware`') + + +class ADMMPruner(BasicPruner): + """ + ADMM (Alternating Direction Method of Multipliers) Pruner is a kind of mathematical optimization technique. + The metric used in this pruner is the absolute value of the weight. + In each iteration, the weight with small magnitudes will be set to zero. + Only in the final iteration, the mask will be generated and apply to model wrapper. + + The original paper refer to: https://arxiv.org/abs/1804.03294. + + Parameters + ---------- + model : torch.nn.Module + Model to be pruned. + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - rho : Penalty parameters in ADMM algorithm. + - op_types : Operation types to be pruned. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + trainer : Callable[[Module, Optimizer, Callable] + A callable function used to train model or just inference. Take model, optimizer, criterion as input. + The model will be trained or inferenced `training_epochs` epochs. + + Example:: + + def trainer(model: Module, optimizer: Optimizer, criterion: Callable[[Tensor, Tensor], Tensor]): + training = model.training + model.train(mode=True) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + # If you don't want to update the model, you can skip `optimizer.step()`, and set train mode False. + optimizer.step() + model.train(mode=training) + traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) + The traced optimizer instance which the optimizer class is wrapped by nni.trace. + E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()). + criterion : Callable[[Tensor, Tensor], Tensor] + The criterion function used in trainer. Take model output and target value as input, and return the loss. + iterations : int + The total iteration number in admm pruning algorithm. + training_epochs : int + The epoch number for training model in each iteration. + """ + + def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None], + traced_optimizer: Traceable, criterion: Callable[[Tensor, Tensor], Tensor], iterations: int, training_epochs: int): + self.trainer = trainer + if isinstance(traced_optimizer, OptimizerConstructHelper): + self.optimizer_helper = traced_optimizer + else: + self.optimizer_helper = OptimizerConstructHelper.from_trace(model, traced_optimizer) + self.criterion = criterion + self.iterations = iterations + self.training_epochs = training_epochs + super().__init__(model, config_list) + + def reset(self, model: Optional[Module], config_list: Optional[List[Dict]]): + super().reset(model, config_list) + self.Z = {name: wrapper.module.weight.data.clone().detach() for name, wrapper in self.get_modules_wrapper().items()} + self.U = {name: torch.zeros_like(z).to(z.device) for name, z in self.Z.items()} + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(NORMAL_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + for schema in schema_list: + schema.update({SchemaOptional('rho'): And(float, lambda n: n > 0)}) + schema_list.append(deepcopy(EXCLUDE_SCHEMA)) + schema = CompressorSchema(schema_list, model, _logger) + schema.validate(config_list) + + def criterion_patch(self, origin_criterion: Callable[[Tensor, Tensor], Tensor]): + def patched_criterion(output: Tensor, target: Tensor): + penalty = torch.tensor(0.0).to(output.device) + for name, wrapper in self.get_modules_wrapper().items(): + rho = wrapper.config.get('rho', 1e-4) + penalty += (rho / 2) * torch.sqrt(torch.norm(wrapper.module.weight - self.Z[name] + self.U[name])) + return origin_criterion(output, target) + penalty + return patched_criterion + + def reset_tools(self): + if self.data_collector is None: + self.data_collector = WeightTrainerBasedDataCollector(self, self.trainer, self.optimizer_helper, self.criterion, + self.training_epochs, criterion_patch=self.criterion_patch) + else: + self.data_collector.reset() + if self.metrics_calculator is None: + self.metrics_calculator = NormMetricsCalculator() + if self.sparsity_allocator is None: + self.sparsity_allocator = NormalSparsityAllocator(self) + + def compress(self) -> Tuple[Module, Dict]: + """ + Returns + ------- + Tuple[Module, Dict] + Return the wrapped model and mask. + """ + for i in range(self.iterations): + _logger.info('======= ADMM Iteration %d Start =======', i) + data = self.data_collector.collect() + + for name, weight in data.items(): + self.Z[name] = weight + self.U[name] + metrics = self.metrics_calculator.calculate_metrics(self.Z) + masks = self.sparsity_allocator.generate_sparsity(metrics) + + for name, mask in masks.items(): + self.Z[name] = self.Z[name].mul(mask['weight']) + self.U[name] = self.U[name] + data[name] - self.Z[name] + + self.Z = None + self.U = None + torch.cuda.empty_cache() + + metrics = self.metrics_calculator.calculate_metrics(data) + masks = self.sparsity_allocator.generate_sparsity(metrics) + + self.load_masks(masks) + return self.bound_model, masks diff --git a/nni/algorithms/compression/v2/pytorch/pruning/basic_scheduler.py b/nni/algorithms/compression/v2/pytorch/pruning/basic_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..a516877f339a0d8157409cd4af0964895add8aac --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/basic_scheduler.py @@ -0,0 +1,159 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy +from typing import Dict, List, Tuple, Callable, Optional + +import torch +from torch import Tensor +from torch.nn import Module + +from nni.algorithms.compression.v2.pytorch.base import Pruner, BasePruningScheduler, Task, TaskResult +from nni.compression.pytorch.speedup import ModelSpeedup + +from .tools import TaskGenerator + + +class PruningScheduler(BasePruningScheduler): + """ + Parameters + ---------- + pruner + The pruner used in pruner scheduler. + The scheduler will use `Pruner.reset(model, config_list)` to reset it in each iteration. + task_generator + Used to generate task for each iteration. + finetuner + The finetuner handled all finetune logic, use a pytorch module as input. + It will be called at the end of each iteration if reset_weight is False, will be called at the beginning of each iteration otherwise. + speed_up + If set True, speed up the model at the end of each iteration to make the pruned model compact. + dummy_input + If `speed_up` is True, `dummy_input` is required for tracing the model in speed up. + evaluator + Evaluate the pruned model and give a score. + If evaluator is None, the best result refers to the latest result. + reset_weight + If set True, the model weight will reset to the origin model weight at the end of each iteration step. + """ + def __init__(self, pruner: Pruner, task_generator: TaskGenerator, finetuner: Callable[[Module], None] = None, + speed_up: bool = False, dummy_input: Tensor = None, evaluator: Optional[Callable[[Module], float]] = None, + reset_weight: bool = False): + self.pruner = pruner + self.task_generator = task_generator + self.finetuner = finetuner + self.speed_up = speed_up + self.dummy_input = dummy_input + self.evaluator = evaluator + self.reset_weight = reset_weight + + def reset(self, model: Module, config_list: List[Dict], masks: Dict[str, Dict[str, Tensor]] = {}): + self.task_generator.reset(model, config_list, masks) + + def generate_task(self) -> Optional[Task]: + return self.task_generator.next() + + def record_task_result(self, task_result: TaskResult): + self.task_generator.receive_task_result(task_result) + + def pruning_one_step_normal(self, task: Task) -> TaskResult: + """ + generate masks -> speed up -> finetune -> evaluate + """ + model, masks, config_list = task.load_data() + self.pruner.reset(model, config_list) + self.pruner.load_masks(masks) + + # pruning model + compact_model, pruner_generated_masks = self.pruner.compress() + compact_model_masks = deepcopy(pruner_generated_masks) + + # show the pruning effect + self.pruner.show_pruned_weights() + self.pruner._unwrap_model() + + # speed up + if self.speed_up and task.speed_up: + ModelSpeedup(compact_model, self.dummy_input, pruner_generated_masks).speedup_model() + compact_model_masks = {} + + # finetune + if self.finetuner is not None and task.finetune: + if self.speed_up: + self.finetuner(compact_model) + else: + self.pruner._wrap_model() + self.finetuner(compact_model) + self.pruner._unwrap_model() + + # evaluate + if self.evaluator is not None and task.evaluate: + if self.speed_up: + score = self.evaluator(compact_model) + else: + self.pruner._wrap_model() + score = self.evaluator(compact_model) + self.pruner._unwrap_model() + else: + score = None + + # clear model references + self.pruner.clear_model_references() + + return TaskResult(task.task_id, compact_model, compact_model_masks, pruner_generated_masks, score) + + def pruning_one_step_reset_weight(self, task: Task) -> TaskResult: + """ + finetune -> generate masks -> reset weight -> speed up -> evaluate + """ + model, masks, config_list = task.load_data() + checkpoint = deepcopy(model.state_dict()) + self.pruner.reset(model, config_list) + self.pruner.load_masks(masks) + + # finetune + if self.finetuner is not None and task.finetune: + self.finetuner(model) + + # pruning model + compact_model, pruner_generated_masks = self.pruner.compress() + compact_model_masks = deepcopy(pruner_generated_masks) + + # show the pruning effect + self.pruner.show_pruned_weights() + self.pruner._unwrap_model() + + # reset model weight + compact_model.load_state_dict(checkpoint) + + # speed up + if self.speed_up and task.speed_up: + ModelSpeedup(compact_model, self.dummy_input, pruner_generated_masks).speedup_model() + compact_model_masks = {} + + # evaluate + if self.evaluator is not None and task.evaluate: + if self.speed_up: + score = self.evaluator(compact_model) + else: + self.pruner._wrap_model() + score = self.evaluator(compact_model) + self.pruner._unwrap_model() + else: + score = None + + # clear model references + self.pruner.clear_model_references() + + return TaskResult(task.task_id, compact_model, compact_model_masks, pruner_generated_masks, score) + + def pruning_one_step(self, task: Task) -> TaskResult: + if self.reset_weight: + result = self.pruning_one_step_reset_weight(task) + else: + result = self.pruning_one_step_normal(task) + torch.cuda.empty_cache() + return result + + def get_best_result(self) -> Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]: + return self.task_generator.get_best_result() diff --git a/nni/algorithms/compression/v2/pytorch/pruning/iterative_pruner.py b/nni/algorithms/compression/v2/pytorch/pruning/iterative_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..aa6e7e7030ec882db5551d6b80bdcaba7435279f --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/iterative_pruner.py @@ -0,0 +1,267 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from typing import Dict, List, Callable, Optional + +from torch import Tensor +from torch.nn import Module + +from nni.algorithms.compression.v2.pytorch.utils import OptimizerConstructHelper + +from .basic_pruner import ( + LevelPruner, + L1NormPruner, + L2NormPruner, + FPGMPruner, + SlimPruner, + ActivationAPoZRankPruner, + ActivationMeanRankPruner, + TaylorFOWeightPruner, + ADMMPruner +) +from .basic_scheduler import PruningScheduler +from .tools import ( + LinearTaskGenerator, + AGPTaskGenerator, + LotteryTicketTaskGenerator, + SimulatedAnnealingTaskGenerator +) + +_logger = logging.getLogger(__name__) + +__all__ = ['LinearPruner', 'AGPPruner', 'LotteryTicketPruner', 'SimulatedAnnealingPruner'] + + +PRUNER_DICT = { + 'level': LevelPruner, + 'l1': L1NormPruner, + 'l2': L2NormPruner, + 'fpgm': FPGMPruner, + 'slim': SlimPruner, + 'apoz': ActivationAPoZRankPruner, + 'mean_activation': ActivationMeanRankPruner, + 'taylorfo': TaylorFOWeightPruner, + 'admm': ADMMPruner +} + + +class IterativePruner(PruningScheduler): + def _wrap_model(self): + """ + Deprecated function. + """ + _logger.warning('Nothing will happen when calling this function.\ + This pruner is an iterative pruner and does not directly wrap the model.') + + def _unwrap_model(self): + """ + Deprecated function. + """ + _logger.warning('Nothing will happen when calling this function.\ + This pruner is an iterative pruner and does not directly wrap the model.') + + def export_model(self, *args, **kwargs): + """ + Deprecated function. + """ + _logger.warning('Nothing will happen when calling this function.\ + The best result (and intermediate result if keeped) during iteration is under `log_dir` (default: \\.).') + + +class LinearPruner(IterativePruner): + """ + Parameters + ---------- + model : Module + The origin unwrapped pytorch model to be pruned. + config_list : List[Dict] + The origin config list provided by the user. + pruning_algorithm : str + Supported pruning algorithm ['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz', 'mean_activation', 'taylorfo', 'admm']. + This iterative pruner will use the chosen corresponding pruner to prune the model in each iteration. + total_iteration : int + The total iteration number. + log_dir : str + The log directory use to saving the result, you can find the best result under this folder. + keep_intermediate_result : bool + If keeping the intermediate result, including intermediate model and masks during each iteration. + finetuner : Optional[Callable[[Module], None]] + The finetuner handled all finetune logic, use a pytorch module as input. + It will be called at the end of each iteration, usually for neutralizing the accuracy loss brought by the pruning in this iteration. + speed_up : bool + If set True, speed up the model at the end of each iteration to make the pruned model compact. + dummy_input : Optional[torch.Tensor] + If `speed_up` is True, `dummy_input` is required for tracing the model in speed up. + evaluator : Optional[Callable[[Module], float]] + Evaluate the pruned model and give a score. + If evaluator is None, the best result refers to the latest result. + pruning_params : Dict + If the chosen pruning_algorithm has extra parameters, put them as a dict to pass in. + """ + + def __init__(self, model: Module, config_list: List[Dict], pruning_algorithm: str, + total_iteration: int, log_dir: str = '.', keep_intermediate_result: bool = False, + finetuner: Optional[Callable[[Module], None]] = None, speed_up: bool = False, dummy_input: Optional[Tensor] = None, + evaluator: Optional[Callable[[Module], float]] = None, pruning_params: Dict = {}): + task_generator = LinearTaskGenerator(total_iteration=total_iteration, + origin_model=model, + origin_config_list=config_list, + log_dir=log_dir, + keep_intermediate_result=keep_intermediate_result) + if 'traced_optimizer' in pruning_params: + pruning_params['traced_optimizer'] = OptimizerConstructHelper.from_trace(model, pruning_params['traced_optimizer']) + pruner = PRUNER_DICT[pruning_algorithm](None, None, **pruning_params) + super().__init__(pruner, task_generator, finetuner=finetuner, speed_up=speed_up, dummy_input=dummy_input, + evaluator=evaluator, reset_weight=False) + + +class AGPPruner(IterativePruner): + """ + Parameters + ---------- + model : Module + The origin unwrapped pytorch model to be pruned. + config_list : List[Dict] + The origin config list provided by the user. + pruning_algorithm : str + Supported pruning algorithm ['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz', 'mean_activation', 'taylorfo', 'admm']. + This iterative pruner will use the chosen corresponding pruner to prune the model in each iteration. + total_iteration : int + The total iteration number. + log_dir : str + The log directory use to saving the result, you can find the best result under this folder. + keep_intermediate_result : bool + If keeping the intermediate result, including intermediate model and masks during each iteration. + finetuner : Optional[Callable[[Module], None]] + The finetuner handled all finetune logic, use a pytorch module as input. + It will be called at the end of each iteration, usually for neutralizing the accuracy loss brought by the pruning in this iteration. + speed_up : bool + If set True, speed up the model at the end of each iteration to make the pruned model compact. + dummy_input : Optional[torch.Tensor] + If `speed_up` is True, `dummy_input` is required for tracing the model in speed up. + evaluator : Optional[Callable[[Module], float]] + Evaluate the pruned model and give a score. + If evaluator is None, the best result refers to the latest result. + pruning_params : Dict + If the chosen pruning_algorithm has extra parameters, put them as a dict to pass in. + """ + + def __init__(self, model: Module, config_list: List[Dict], pruning_algorithm: str, + total_iteration: int, log_dir: str = '.', keep_intermediate_result: bool = False, + finetuner: Optional[Callable[[Module], None]] = None, speed_up: bool = False, dummy_input: Optional[Tensor] = None, + evaluator: Optional[Callable[[Module], float]] = None, pruning_params: Dict = {}): + task_generator = AGPTaskGenerator(total_iteration=total_iteration, + origin_model=model, + origin_config_list=config_list, + log_dir=log_dir, + keep_intermediate_result=keep_intermediate_result) + if 'traced_optimizer' in pruning_params: + pruning_params['traced_optimizer'] = OptimizerConstructHelper.from_trace(model, pruning_params['traced_optimizer']) + pruner = PRUNER_DICT[pruning_algorithm](None, None, **pruning_params) + super().__init__(pruner, task_generator, finetuner=finetuner, speed_up=speed_up, dummy_input=dummy_input, + evaluator=evaluator, reset_weight=False) + + +class LotteryTicketPruner(IterativePruner): + """ + Parameters + ---------- + model : Module + The origin unwrapped pytorch model to be pruned. + config_list : List[Dict] + The origin config list provided by the user. + pruning_algorithm : str + Supported pruning algorithm ['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz', 'mean_activation', 'taylorfo', 'admm']. + This iterative pruner will use the chosen corresponding pruner to prune the model in each iteration. + total_iteration : int + The total iteration number. + log_dir : str + The log directory use to saving the result, you can find the best result under this folder. + keep_intermediate_result : bool + If keeping the intermediate result, including intermediate model and masks during each iteration. + finetuner : Optional[Callable[[Module], None]] + The finetuner handled all finetune logic, use a pytorch module as input. + It will be called at the end of each iteration if reset_weight is False, will be called at the beginning of each iteration otherwise. + speed_up : bool + If set True, speed up the model at the end of each iteration to make the pruned model compact. + dummy_input : Optional[torch.Tensor] + If `speed_up` is True, `dummy_input` is required for tracing the model in speed up. + evaluator : Optional[Callable[[Module], float]] + Evaluate the pruned model and give a score. + If evaluator is None, the best result refers to the latest result. + reset_weight : bool + If set True, the model weight will reset to the original model weight at the end of each iteration step. + pruning_params : Dict + If the chosen pruning_algorithm has extra parameters, put them as a dict to pass in. + """ + + def __init__(self, model: Module, config_list: List[Dict], pruning_algorithm: str, + total_iteration: int, log_dir: str = '.', keep_intermediate_result: bool = False, + finetuner: Optional[Callable[[Module], None]] = None, speed_up: bool = False, dummy_input: Optional[Tensor] = None, + evaluator: Optional[Callable[[Module], float]] = None, reset_weight: bool = True, + pruning_params: Dict = {}): + task_generator = LotteryTicketTaskGenerator(total_iteration=total_iteration, + origin_model=model, + origin_config_list=config_list, + log_dir=log_dir, + keep_intermediate_result=keep_intermediate_result) + if 'traced_optimizer' in pruning_params: + pruning_params['traced_optimizer'] = OptimizerConstructHelper.from_trace(model, pruning_params['traced_optimizer']) + pruner = PRUNER_DICT[pruning_algorithm](None, None, **pruning_params) + super().__init__(pruner, task_generator, finetuner=finetuner, speed_up=speed_up, dummy_input=dummy_input, + evaluator=evaluator, reset_weight=reset_weight) + + +class SimulatedAnnealingPruner(IterativePruner): + """ + Parameters + ---------- + model : Module + The origin unwrapped pytorch model to be pruned. + config_list : List[Dict] + The origin config list provided by the user. + evaluator : Callable[[Module], float] + Evaluate the pruned model and give a score. + start_temperature : float + Start temperature of the simulated annealing process. + stop_temperature : float + Stop temperature of the simulated annealing process. + cool_down_rate : float + Cool down rate of the temperature. + perturbation_magnitude : float + Initial perturbation magnitude to the sparsities. The magnitude decreases with current temperature. + pruning_algorithm : str + Supported pruning algorithm ['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz', 'mean_activation', 'taylorfo', 'admm']. + This iterative pruner will use the chosen corresponding pruner to prune the model in each iteration. + pruning_params : Dict + If the chosen pruning_algorithm has extra parameters, put them as a dict to pass in. + log_dir : str + The log directory use to saving the result, you can find the best result under this folder. + keep_intermediate_result : bool + If keeping the intermediate result, including intermediate model and masks during each iteration. + finetuner : Optional[Callable[[Module], None]] + The finetuner handled all finetune logic, use a pytorch module as input, will be called in each iteration. + speed_up : bool + If set True, speed up the model at the end of each iteration to make the pruned model compact. + dummy_input : Optional[torch.Tensor] + If `speed_up` is True, `dummy_input` is required for tracing the model in speed up. + """ + + def __init__(self, model: Module, config_list: List[Dict], evaluator: Callable[[Module], float], start_temperature: float = 100, + stop_temperature: float = 20, cool_down_rate: float = 0.9, perturbation_magnitude: float = 0.35, + pruning_algorithm: str = 'level', pruning_params: Dict = {}, log_dir: str = '.', keep_intermediate_result: bool = False, + finetuner: Optional[Callable[[Module], None]] = None, speed_up: bool = False, dummy_input: Optional[Tensor] = None): + task_generator = SimulatedAnnealingTaskGenerator(origin_model=model, + origin_config_list=config_list, + start_temperature=start_temperature, + stop_temperature=stop_temperature, + cool_down_rate=cool_down_rate, + perturbation_magnitude=perturbation_magnitude, + log_dir=log_dir, + keep_intermediate_result=keep_intermediate_result) + if 'traced_optimizer' in pruning_params: + pruning_params['traced_optimizer'] = OptimizerConstructHelper.from_trace(model, pruning_params['traced_optimizer']) + pruner = PRUNER_DICT[pruning_algorithm](None, None, **pruning_params) + super().__init__(pruner, task_generator, finetuner=finetuner, speed_up=speed_up, dummy_input=dummy_input, + evaluator=evaluator, reset_weight=False) diff --git a/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py b/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py new file mode 100644 index 0000000000000000000000000000000000000000..3cde6b2554fd2b77b802c41723dc7ed8a1e0bffa --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py @@ -0,0 +1,291 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy +import logging +from typing import Dict, List, Tuple, Callable + +import torch +from torch import autograd, Tensor +from torch.nn import Module, Parameter +from torch.optim import Optimizer, Adam + +from nni.algorithms.compression.v2.pytorch.base.compressor import Compressor, _setattr, LayerInfo +from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import BasicPruner, NORMAL_SCHEMA, EXCLUDE_SCHEMA, INTERNAL_SCHEMA +from nni.algorithms.compression.v2.pytorch.utils import CompressorSchema, OptimizerConstructHelper +from nni.common.serializer import Traceable + +from .tools.base import TrainerBasedDataCollector + +from .tools import ( + StraightMetricsCalculator, + NormalSparsityAllocator +) + +_logger = logging.getLogger(__name__) + + +class PrunerScoredModuleWrapper(Module): + """ + Wrap a module to enable data parallel, forward method customization and buffer registeration. + Different from `PrunerModuleWrapper`, `PrunerScoredModuleWrapper` will record the gradient. + + Parameters + ---------- + module + The module user wants to compress. + config + The configurations that users specify for compression. + module_name + The name of the module to compress, wrapper module shares same name. + pruner + The pruner used to calculate mask. + """ + def __init__(self, module: Module, module_name: str, config: Dict, pruner: Compressor): + super().__init__() + # origin layer information + self.module = module + self.name = module_name + # config and pruner + self.config = config + self.pruner = pruner + + self.weight = Parameter(torch.empty(self.module.weight.size())) + self.weight_score = Parameter(torch.empty(self.weight.size())) + torch.nn.init.constant_(self.weight_score, val=0.0) + + # register buffer for mask + self.register_buffer("weight_mask", torch.ones(self.module.weight.shape)) + if hasattr(self.module, 'bias') and self.module.bias is not None: + self.register_buffer("bias_mask", torch.ones(self.module.bias.shape)) + self.bias = Parameter(torch.empty(self.module.bias.size())) + else: + self.register_buffer("bias_mask", None) + + def _weight2buffer(self): + """ + When using this wrapper to inference, call `_weight2buffer()` to make original weight untrainable. + The best place to call this function is in `Pruner._wrap_model()`. + """ + self.weight.data = self.module.weight.data + delattr(self.module, 'weight') + self.module.register_buffer('weight', self.weight.data) + if hasattr(self.module, 'bias') and self.module.bias is not None: + self.bias.data = self.module.bias.data + delattr(self.module, 'bias') + self.module.register_buffer('bias', self.bias.data) + + def _weight2parameter(self): + """ + When don't need to record score or need to export the model, call `_weight2parameter()` to make the original weight trainable. + The best place to call this function is in `Pruner._unwrap_model()`. + """ + delattr(self.module, 'weight') + self.module.weight = Parameter(torch.empty(self.weight.size())) + self.module.weight.data = torch.mul(self.weight, self.weight_mask) + if hasattr(self.module, 'bias') and self.module.bias is not None: + delattr(self.module, 'bias') + self.module.bias = Parameter(torch.empty(self.bias.size())) + self.module.bias.data = torch.mul(self.bias, self.bias_mask) + + def forward(self, *inputs): + # apply mask to weight, bias + self.module.weight = torch.mul(self.weight, _StraightThrough.apply(self.weight_score, self.weight_mask)) + if hasattr(self.module, 'bias') and self.module.bias is not None: + self.module.bias = torch.mul(self.bias, self.bias_mask) + return self.module(*inputs) + + +class _StraightThrough(autograd.Function): + """ + Straight through the gradient to the score, then the score = initial_score + sum(-lr * grad(weight) * weight). + """ + @staticmethod + def forward(self, score, masks): + return masks + + @staticmethod + def backward(ctx, gradOutput): + return gradOutput, None + + +class WeightScoreTrainerBasedDataCollector(TrainerBasedDataCollector): + """ + Collect all weight_score in wrappers as data used to calculate metrics. + """ + def collect(self) -> Dict[str, Tensor]: + for _ in range(self.training_epochs): + self.trainer(self.compressor.bound_model, self.optimizer, self.criterion) + + data = {} + for _, wrapper in self.compressor.get_modules_wrapper().items(): + data[wrapper.name] = wrapper.weight_score.data.clone().detach() + return data + + +class MovementPruner(BasicPruner): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned. + config_list : List[Dict] + Supported keys: + - sparsity : This is to specify the sparsity for each layer in this config to be compressed. + - sparsity_per_layer : Equals to sparsity. + - op_types : Operation types to be pruned. + - op_names : Operation names to be pruned. + - op_partial_names: Operation partial names to be pruned, will be autocompleted by NNI. + - exclude : Set True then the layers setting by op_types and op_names will be excluded from pruning. + trainer : Callable[[Module, Optimizer, Callable] + A callable function used to train model or just inference. Take model, optimizer, criterion as input. + The model will be trained or inferenced `training_epochs` epochs. + + Example:: + + def trainer(model: Module, optimizer: Optimizer, criterion: Callable[[Tensor, Tensor], Tensor]): + training = model.training + model.train(mode=True) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + # If you don't want to update the model, you can skip `optimizer.step()`, and set train mode False. + optimizer.step() + model.train(mode=training) + traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) + The traced optimizer instance which the optimizer class is wrapped by nni.trace. + E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()). + criterion : Callable[[Tensor, Tensor], Tensor] + The criterion function used in trainer. Take model output and target value as input, and return the loss. + training_epochs : int + The total epoch number for training the model. + Make sure the total `optimizer.step()` in `training_epochs` is bigger than `cool_down_beginning_step`. + warm_up_step : int + The total `optimizer.step()` number before start pruning for warm up. + Make sure `warm_up_step` is smaller than `cool_down_beginning_step`. + cool_down_beginning_step: int + The number of steps at which sparsity stops growing, note that the sparsity stop growing doesn't mean masks not changed. + The sparsity after each `optimizer.step()` is: + total_sparsity * (1 - (1 - (current_step - warm_up_step) / (cool_down_beginning_step - warm_up_step)) ** 3). + """ + def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None], + traced_optimizer: Traceable, criterion: Callable[[Tensor, Tensor], Tensor], training_epochs: int, warm_up_step: int, + cool_down_beginning_step: int): + self.trainer = trainer + if isinstance(traced_optimizer, OptimizerConstructHelper): + self.optimizer_helper = traced_optimizer + else: + self.optimizer_helper = OptimizerConstructHelper.from_trace(model, traced_optimizer) + self.criterion = criterion + self.training_epochs = training_epochs + self.warm_up_step = warm_up_step + self.cool_down_beginning_step = cool_down_beginning_step + assert self.warm_up_step < self.cool_down_beginning_step, '`warm_up_step` should smaller than `cool_down_beginning_step`' + super().__init__(model, config_list) + + def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): + schema_list = [deepcopy(NORMAL_SCHEMA), deepcopy(EXCLUDE_SCHEMA), deepcopy(INTERNAL_SCHEMA)] + schema = CompressorSchema(schema_list, model, _logger) + schema.validate(config_list) + + def cubic_schedule(self, current_step: int): + if self.warm_up_step < current_step <= self.cool_down_beginning_step: + wrapper_dict = self.get_modules_wrapper() + for config in self.config_list: + current_sparsity = config['total_sparsity'] * (1 - (1 - (current_step - self.warm_up_step) / (self.cool_down_beginning_step - self.warm_up_step)) ** 3) + for op_name in config['op_names']: + wrapper_dict[op_name].config['total_sparsity'] = current_sparsity + + def reset_tools(self): + if self.metrics_calculator is None: + self.metrics_calculator = StraightMetricsCalculator() + if self.sparsity_allocator is None: + self.sparsity_allocator = NormalSparsityAllocator(self, continuous_mask=False) + + # use Adam to update the weight_score + params = [{"params": [p for n, p in self.bound_model.named_parameters() if "weight_score" in n and p.requires_grad]}] + optimizer = Adam(params, 1e-2) + self.step_counter = 0 + + # update the masks after each optimzier step + def _optimizer_patch(): + optimizer.step() + optimizer.zero_grad() + self.step_counter += 1 + if self.step_counter > self.warm_up_step: + self.cubic_schedule(self.step_counter) + data = {} + for _, wrapper in self.get_modules_wrapper().items(): + data[wrapper.name] = wrapper.weight_score.data + metrics = self.metrics_calculator.calculate_metrics(data) + masks = self.sparsity_allocator.generate_sparsity(metrics) + self.load_masks(masks) + + if self.data_collector is None: + self.data_collector = WeightScoreTrainerBasedDataCollector(self, self.trainer, self.optimizer_helper, self.criterion, self.training_epochs, opt_after_tasks=[_optimizer_patch]) + else: + self.data_collector.reset() + + def _wrap_model(self): + """ + Wrap all modules that needed to be compressed. + Different from the parent function, call `wrapper._weight2buffer()` after replace the origin module to wrapper. + """ + if not self.is_wrapped: + for _, wrapper in reversed(self.get_modules_wrapper().items()): + _setattr(self.bound_model, wrapper.name, wrapper) + wrapper._weight2buffer() + self.is_wrapped = True + + def _unwrap_model(self): + """ + Unwrap all modules that needed to be compressed. + Different from the parent function, call `wrapper._weight2parameter()` after replace the wrapper to origin module. + """ + if self.is_wrapped: + for _, wrapper in self.get_modules_wrapper().items(): + _setattr(self.bound_model, wrapper.name, wrapper.module) + wrapper._weight2parameter() + self.is_wrapped = False + + def _wrap_modules(self, layer: LayerInfo, config: Dict): + """ + Create a wrapper module to replace the original one. + Different from the parent function, use `PrunerScoredModuleWrapper` instead of `PrunerModuleWrapper`. + + Parameters + ---------- + layer + The layer to instrument the mask. + config + The configuration for generating the mask. + """ + _logger.debug("Module detected to compress : %s.", layer.name) + wrapper = PrunerScoredModuleWrapper(layer.module, layer.name, config, self) + assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name + # move newly registered buffers to the same device of weight + wrapper.to(layer.module.weight.device) + return wrapper + + def get_origin2wrapped_parameter_name_map(self) -> Dict[str, str]: + if self.is_wrapped: + self._unwrap_model() + parameter_name_map = {name: name for name, _ in self.bound_model.named_parameters()} + self._wrap_model() + return parameter_name_map + else: + raise Exception('When only the model is wrapped can get the parameter_name_map.') + + def compress(self) -> Tuple[Module, Dict]: + # sparsity grow from 0 + for _, wrapper in self.get_modules_wrapper().items(): + wrapper.config['total_sparsity'] = 0 + result = super().compress() + # del weight_score + for _, wrapper in self.get_modules_wrapper().items(): + wrapper.weight_score = None + return result diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce1ca39d83456358f6f75b73959bfa3851318bf --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py @@ -0,0 +1,31 @@ +from .base import ( + HookCollectorInfo, + DataCollector, + MetricsCalculator, + SparsityAllocator, + TaskGenerator +) +from .data_collector import ( + WeightDataCollector, + WeightTrainerBasedDataCollector, + SingleHookTrainerBasedDataCollector +) +from .metrics_calculator import ( + StraightMetricsCalculator, + NormMetricsCalculator, + MultiDataNormMetricsCalculator, + DistMetricsCalculator, + APoZRankMetricsCalculator, + MeanRankMetricsCalculator +) +from .sparsity_allocator import ( + NormalSparsityAllocator, + GlobalSparsityAllocator, + Conv2dDependencyAwareAllocator +) +from .task_generator import ( + AGPTaskGenerator, + LinearTaskGenerator, + LotteryTicketTaskGenerator, + SimulatedAnnealingTaskGenerator +) diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/base.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/base.py new file mode 100644 index 0000000000000000000000000000000000000000..e9baabb92a348123457515d1e0444c321c775a1a --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/base.py @@ -0,0 +1,578 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from datetime import datetime +import logging +from pathlib import Path +import types +from typing import List, Dict, Tuple, Optional, Callable, Union + +import json_tricks +import torch +from torch import Tensor +from torch.nn import Module +from torch.optim import Optimizer + +from nni.algorithms.compression.v2.pytorch.base import Compressor, LayerInfo, Task, TaskResult +from nni.algorithms.compression.v2.pytorch.utils import OptimizerConstructHelper + +_logger = logging.getLogger(__name__) + + +class DataCollector: + """ + An abstract class for collect the data needed by the compressor. + """ + + def __init__(self, compressor: Compressor): + """ + Parameters + ---------- + compressor + The compressor binded with this DataCollector. + """ + self.compressor = compressor + + def reset(self): + """ + Reset the `DataCollector`. + """ + raise NotImplementedError() + + def collect(self) -> Dict: + """ + Collect the compressor needed data, i.e., module weight, the output of activation function. + + Returns + ------- + Dict + Usually has format like {module_name: tensor_type_data}. + """ + raise NotImplementedError() + + +class HookCollectorInfo: + def __init__(self, targets: Union[Dict[str, Tensor], List[LayerInfo]], hook_type: str, + collector: Union[Callable[[List, Tensor], Callable[[Tensor], None]], Callable[[List], Callable[[Module, Tensor, Tensor], None]]]): + """ + This class used to aggregate the information of what kind of hook is placed on which layers. + + Parameters + ---------- + targets + List of LayerInfo or Dict of {layer_name: weight_tensor}, the hook targets. + hook_type + 'forward' or 'backward'. + collector + A hook function generator, the input is a buffer (empty list) or a buffer (empty list) and tensor, the output is a hook function. + The buffer is used to store the data wanted to hook. + """ + self.targets = targets + self.hook_type = hook_type + self.collector = collector + + +class TrainerBasedDataCollector(DataCollector): + """ + This class includes some trainer based util functions, i.e., patch optimizer or criterion, add hooks. + """ + + def __init__(self, compressor: Compressor, trainer: Callable[[Module, Optimizer, Callable], None], optimizer_helper: OptimizerConstructHelper, + criterion: Callable[[Tensor, Tensor], Tensor], training_epochs: int, + opt_before_tasks: List = [], opt_after_tasks: List = [], + collector_infos: List[HookCollectorInfo] = [], criterion_patch: Callable[[Callable], Callable] = None): + """ + Parameters + ---------- + compressor + The compressor binded with this DataCollector. + trainer + A callable function used to train model or just inference. Take model, optimizer, criterion as input. + The model will be trained or inferenced `training_epochs` epochs. + + Example:: + + def trainer(model: Module, optimizer: Optimizer, criterion: Callable[[Tensor, Tensor], Tensor]): + training = model.training + model.train(mode=True) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + # If you don't want to update the model, you can skip `optimizer.step()`, and set train mode False. + optimizer.step() + model.train(mode=training) + optimizer + The optimizer instance used in trainer. Note that this optimizer might be patched during collect data, + so do not use this optimizer in other places. + criterion + The criterion function used in trainer. Take model output and target value as input, and return the loss. + training_epochs + The total number of calling trainer. + opt_before_tasks + A list of function that will be called one by one before origin `optimizer.step()`. + Note that these functions will be patched into `optimizer.step()`. + opt_after_tasks + A list of function that will be called one by one after origin `optimizer.step()`. + Note that these functions will be patched into `optimizer.step()`. + collector_infos + A list of `HookCollectorInfo` instance. And the hooks will be registered in `__init__`. + criterion_patch + A callable function used to patch the criterion. Take a criterion function as input and return a new one. + + Example:: + + def criterion_patch(criterion: Callable[[Tensor, Tensor], Tensor]) -> Callable[[Tensor, Tensor], Tensor]: + weight = ... + def patched_criterion(output, target): + return criterion(output, target) + torch.norm(weight) + return patched_criterion + """ + super().__init__(compressor) + self.trainer = trainer + self.training_epochs = training_epochs + self.optimizer_helper = optimizer_helper + self._origin_criterion = criterion + self._opt_before_tasks = opt_before_tasks + self._opt_after_tasks = opt_after_tasks + + self._criterion_patch = criterion_patch + + self.reset(collector_infos) + + def reset(self, collector_infos: List[HookCollectorInfo] = []): + # refresh optimizer and criterion + self._reset_optimizer() + + if self._criterion_patch is not None: + self.criterion = self._criterion_patch(self._origin_criterion) + else: + self.criterion = self._origin_criterion + + # patch optimizer + self._patch_optimizer() + + # hook + self._remove_all_hook() + self._hook_id = 0 + self._hook_handles = {} + self._hook_buffer = {} + + self._collector_infos = collector_infos + self._add_all_hook() + + def _reset_optimizer(self): + parameter_name_map = self.compressor.get_origin2wrapped_parameter_name_map() + self.optimizer = self.optimizer_helper.call(self.compressor.bound_model, parameter_name_map) + + def _patch_optimizer(self): + def patch_step(old_step): + def new_step(_, *args, **kwargs): + for task in self._opt_before_tasks: + task() + # call origin optimizer step method + output = old_step(*args, **kwargs) + for task in self._opt_after_tasks: + task() + return output + return new_step + if self.optimizer is not None: + self.optimizer.step = types.MethodType(patch_step(self.optimizer.step), self.optimizer) + + def _add_hook(self, collector_info: HookCollectorInfo) -> int: + self._hook_id += 1 + self._hook_handles[self._hook_id] = {} + self._hook_buffer[self._hook_id] = {} + + if collector_info.hook_type == 'forward': + self._add_forward_hook(self._hook_id, collector_info.targets, collector_info.collector) + elif collector_info.hook_type == 'backward': + self._add_backward_hook(self._hook_id, collector_info.targets, collector_info.collector) + elif collector_info.hook_type == 'tensor': + self._add_tensor_hook(self._hook_id, collector_info.targets, collector_info.collector) + else: + _logger.warning('Skip unsupported hook type: %s', collector_info.hook_type) + + return self._hook_id + + def _add_forward_hook(self, hook_id: int, layers: List[LayerInfo], + collector: Callable[[List], Callable[[Module, Tensor, Tensor], None]]): + assert all(isinstance(layer_info, LayerInfo) for layer_info in layers) + for layer in layers: + self._hook_buffer[hook_id][layer.name] = [] + handle = layer.module.register_forward_hook(collector(self._hook_buffer[hook_id][layer.name])) + self._hook_handles[hook_id][layer.name] = handle + + def _add_backward_hook(self, hook_id: int, layers: List[LayerInfo], + collector: Callable[[List], Callable[[Module, Tensor, Tensor], None]]): + assert all(isinstance(layer_info, LayerInfo) for layer_info in layers) + for layer in layers: + self._hook_buffer[hook_id][layer.name] = [] + handle = layer.module.register_backward_hook(collector(self._hook_buffer[hook_id][layer.name])) + self._hook_handles[hook_id][layer.name] = handle + + def _add_tensor_hook(self, hook_id: int, tensors: Dict[str, Tensor], + collector: Callable[[List, Tensor], Callable[[Tensor], None]]): + assert all(isinstance(tensor, Tensor) for _, tensor in tensors.items()) + for layer_name, tensor in tensors.items(): + self._hook_buffer[hook_id][layer_name] = [] + handle = tensor.register_hook(collector(self._hook_buffer[hook_id][layer_name], tensor)) + self._hook_handles[hook_id][layer_name] = handle + + def _remove_hook(self, hook_id: int): + if hook_id not in self._hook_handles: + raise ValueError("%s is not a valid collector id" % str(hook_id)) + for handle in self._hook_handles[hook_id].values(): + handle.remove() + del self._hook_handles[hook_id] + + def _add_all_hook(self): + for collector_info in self._collector_infos: + self._add_hook(collector_info) + + def _remove_all_hook(self): + if hasattr(self, '_hook_handles'): + for hook_id in list(self._hook_handles.keys()): + self._remove_hook(hook_id) + + +class MetricsCalculator: + """ + An abstract class for calculate a kind of metrics of the given data. + """ + def __init__(self, dim: Optional[Union[int, List[int]]] = None, + block_sparse_size: Optional[Union[int, List[int]]] = None): + """ + Parameters + ---------- + dim + The dimensions that corresponding to the under pruning weight dimensions in collected data. + None means one-to-one correspondence between pruned dimensions and data, which equal to set `dim` as all data dimensions. + Only these `dim` will be kept and other dimensions of the data will be reduced. + + Example: + + If you want to prune the Conv2d weight in filter level, and the weight size is (32, 16, 3, 3) [out-channel, in-channel, kernal-size-1, kernal-size-2]. + Then the under pruning dimensions is [0], which means you want to prune the filter or out-channel. + + Case 1: Directly collect the conv module weight as data to calculate the metric. + Then the data has size (32, 16, 3, 3). + Mention that the dimension 0 of the data is corresponding to the under pruning weight dimension 0. + So in this case, `dim=0` will set in `__init__`. + + Case 2: Use the output of the conv module as data to calculate the metric. + Then the data has size (batch_num, 32, feature_map_size_1, feature_map_size_2). + Mention that the dimension 1 of the data is corresponding to the under pruning weight dimension 0. + So in this case, `dim=1` will set in `__init__`. + + In both of these two case, the metric of this module has size (32,). + block_sparse_size + This used to describe the block size a metric value represented. By default, None means the block size is ones(len(dim)). + Make sure len(dim) == len(block_sparse_size), and the block_sparse_size dimension position is corresponding to dim. + + Example: + + The under pruning weight size is (768, 768), and you want to apply a block sparse on dim=[0] with block size [64, 768], + then you can set block_sparse_size=[64]. The final metric size is (12,). + """ + self.dim = dim if not isinstance(dim, int) else [dim] + self.block_sparse_size = block_sparse_size if not isinstance(block_sparse_size, int) else [block_sparse_size] + if self.block_sparse_size is not None: + assert all(i >= 1 for i in self.block_sparse_size) + elif self.dim is not None: + self.block_sparse_size = [1] * len(self.dim) + if self.dim is not None: + assert all(i >= 0 for i in self.dim) + self.dim, self.block_sparse_size = (list(t) for t in zip(*sorted(zip(self.dim, self.block_sparse_size)))) + + def calculate_metrics(self, data: Dict) -> Dict[str, Tensor]: + """ + Parameters + ---------- + data + A dict handle the data used to calculate metrics. Usually has format like {module_name: tensor_type_data}. + + Returns + ------- + Dict[str, Tensor] + The key is the layer_name, value is the metric. + Note that the metric has the same size with the data size on `dim`. + """ + raise NotImplementedError() + + +class SparsityAllocator: + """ + An abstract class for allocate mask based on metrics. + """ + + def __init__(self, pruner: Compressor, dim: Optional[Union[int, List[int]]] = None, + block_sparse_size: Optional[Union[int, List[int]]] = None, continuous_mask: bool = True): + """ + Parameters + ---------- + pruner + The pruner that binded with this `SparsityAllocator`. + dim + The under pruning weight dimensions, which metric size should equal to the under pruning weight size on these dimensions. + None means one-to-one correspondence between pruned dimensions and metric, which equal to set `dim` as all under pruning weight dimensions. + The mask will expand to the weight size depend on `dim`. + + Example: + + The under pruning weight has size (2, 3, 4), and `dim=1` means the under pruning weight dimension is 1. + Then the metric should have a size (3,), i.e., `metric=[0.9, 0.1, 0.8]`. + Assuming by some kind of `SparsityAllocator` get the mask on weight dimension 1 `mask=[1, 0, 1]`, + then the dimension mask will expand to the final mask `[[[1, 1, 1, 1], [0, 0, 0, 0], [1, 1, 1, 1]], [[1, 1, 1, 1], [0, 0, 0, 0], [1, 1, 1, 1]]]`. + block_sparse_size + This used to describe the block size a metric value represented. By default, None means the block size is ones(len(dim)). + Make sure len(dim) == len(block_sparse_size), and the block_sparse_size dimension position is corresponding to dim. + + Example: + + The metric size is (12,), and block_sparse_size=[64], then the mask will expand to (768,) at first before expand with `dim`. + continuous_mask + Inherit the mask already in the wrapper if set True. + """ + self.pruner = pruner + self.dim = dim if not isinstance(dim, int) else [dim] + self.block_sparse_size = block_sparse_size if not isinstance(block_sparse_size, int) else [block_sparse_size] + if self.block_sparse_size is not None: + assert all(i >= 1 for i in self.block_sparse_size) + elif self.dim is not None: + self.block_sparse_size = [1] * len(self.dim) + if self.dim is not None: + assert all(i >= 0 for i in self.dim) + self.dim, self.block_sparse_size = (list(t) for t in zip(*sorted(zip(self.dim, self.block_sparse_size)))) + self.continuous_mask = continuous_mask + + def generate_sparsity(self, metrics: Dict) -> Dict[str, Dict[str, Tensor]]: + """ + Parameters + ---------- + metrics + A metric dict. The key is the name of layer, the value is its metric. + """ + raise NotImplementedError() + + def _expand_mask(self, name: str, mask: Tensor) -> Dict[str, Tensor]: + """ + Parameters + ---------- + name + The masked module name. + mask + The reduced mask with `self.dim` and `self.block_sparse_size`. + + Returns + ------- + Dict[str, Tensor] + The key is `weight` or `bias`, value is the final mask. + """ + weight_mask = mask.clone() + + if self.block_sparse_size is not None: + # expend mask with block_sparse_size + expand_size = list(weight_mask.size()) + reshape_size = list(weight_mask.size()) + for i, block_width in reversed(list(enumerate(self.block_sparse_size))): + weight_mask = weight_mask.unsqueeze(i + 1) + expand_size.insert(i + 1, block_width) + reshape_size[i] *= block_width + weight_mask = weight_mask.expand(expand_size).reshape(reshape_size) + + wrapper = self.pruner.get_modules_wrapper()[name] + weight_size = wrapper.module.weight.data.size() + + if self.dim is None: + assert weight_mask.size() == weight_size + expand_mask = {'weight': weight_mask} + else: + # expand mask to weight size with dim + assert len(weight_mask.size()) == len(self.dim) + assert all(weight_size[j] == weight_mask.size(i) for i, j in enumerate(self.dim)) + + idxs = list(range(len(weight_size))) + [idxs.pop(i) for i in reversed(self.dim)] + for i in idxs: + weight_mask = weight_mask.unsqueeze(i) + expand_mask = {'weight': weight_mask.expand(weight_size).clone()} + # NOTE: assume we only mask output, so the mask and bias have a one-to-one correspondence. + # If we support more kind of masks, this place need refactor. + if wrapper.bias_mask is not None and weight_mask.size() == wrapper.bias_mask.size(): + expand_mask['bias'] = weight_mask.clone() + return expand_mask + + def _compress_mask(self, mask: Tensor) -> Tensor: + """ + This function will reduce the mask with `self.dim` and `self.block_sparse_size`. + e.g., a mask tensor with size [50, 60, 70], self.dim is (0, 1), self.block_sparse_size is [10, 10]. + Then, the reduced mask size is [50 / 10, 60 / 10] => [5, 6]. + + Parameters + ---------- + name + The masked module name. + mask + The entire mask has the same size with weight. + + Returns + ------- + Tensor + Reduced mask. + """ + if self.dim is None or len(mask.size()) == 1: + mask = mask.clone() + else: + mask_dim = list(range(len(mask.size()))) + for dim in self.dim: + mask_dim.remove(dim) + mask = torch.sum(mask, dim=mask_dim) + + if self.block_sparse_size is not None: + # operation like pooling + lower_case_letters = 'abcdefghijklmnopqrstuvwxyz' + ein_expression = '' + for i, step in enumerate(self.block_sparse_size): + mask = mask.unfold(i, step, step) + ein_expression += lower_case_letters[i] + ein_expression = '...{},{}'.format(ein_expression, ein_expression) + mask = torch.einsum(ein_expression, mask, torch.ones(self.block_sparse_size).to(mask.device)) + + return (mask != 0).type_as(mask) + + +class TaskGenerator: + """ + This class used to generate config list for pruner in each iteration. + + Parameters + ---------- + origin_model + The origin unwrapped pytorch model to be pruned. + origin_masks + The pre masks on the origin model. This mask maybe user-defined or maybe generate by previous pruning. + origin_config_list + The origin config list provided by the user. Note that this config_list is directly config the origin model. + This means the sparsity provided by the origin_masks should also be recorded in the origin_config_list. + log_dir + The log directory use to saving the task generator log. + keep_intermediate_result + If keeping the intermediate result, including intermediate model and masks during each iteration. + """ + def __init__(self, origin_model: Optional[Module], origin_masks: Optional[Dict[str, Dict[str, Tensor]]] = {}, + origin_config_list: Optional[List[Dict]] = [], log_dir: str = '.', keep_intermediate_result: bool = False): + self._log_dir = log_dir + self._keep_intermediate_result = keep_intermediate_result + + if origin_model is not None and origin_config_list is not None and origin_masks is not None: + self.reset(origin_model, origin_config_list, origin_masks) + + def reset(self, model: Module, config_list: List[Dict] = [], masks: Dict[str, Dict[str, Tensor]] = {}): + assert isinstance(model, Module), 'Only support pytorch module.' + + self._log_dir_root = Path(self._log_dir, datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')).absolute() + self._log_dir_root.mkdir(parents=True, exist_ok=True) + + self._intermediate_result_dir = Path(self._log_dir_root, 'intermediate_result') + self._intermediate_result_dir.mkdir(parents=True, exist_ok=True) + + # save origin data in {log_dir}/origin + self._origin_model_path = Path(self._log_dir_root, 'origin', 'model.pth') + self._origin_masks_path = Path(self._log_dir_root, 'origin', 'masks.pth') + self._origin_config_list_path = Path(self._log_dir_root, 'origin', 'config_list.json') + self._save_data('origin', model, masks, config_list) + + self._task_id_candidate = 0 + self._tasks: Dict[int, Task] = {} + self._pending_tasks: List[Task] = self.init_pending_tasks() + + self._best_score = None + self._best_task_id = None + + # dump self._tasks into {log_dir}/.tasks + self._dump_tasks_info() + + def _dump_tasks_info(self): + tasks = {task_id: task.to_dict() for task_id, task in self._tasks.items()} + with Path(self._log_dir_root, '.tasks').open('w') as f: + json_tricks.dump(tasks, f, indent=4) + + def _save_data(self, folder_name: str, model: Module, masks: Dict[str, Dict[str, Tensor]], config_list: List[Dict]): + Path(self._log_dir_root, folder_name).mkdir(parents=True, exist_ok=True) + torch.save(model, Path(self._log_dir_root, folder_name, 'model.pth')) + torch.save(masks, Path(self._log_dir_root, folder_name, 'masks.pth')) + with Path(self._log_dir_root, folder_name, 'config_list.json').open('w') as f: + json_tricks.dump(config_list, f, indent=4) + + def update_best_result(self, task_result: TaskResult): + score = task_result.score + task_id = task_result.task_id + task = self._tasks[task_id] + task.score = score + if self._best_score is None or (score is not None and score > self._best_score): + self._best_score = score + self._best_task_id = task_id + with Path(task.config_list_path).open('r') as fr: + best_config_list = json_tricks.load(fr) + self._save_data('best_result', task_result.compact_model, task_result.compact_model_masks, best_config_list) + + def init_pending_tasks(self) -> List[Task]: + raise NotImplementedError() + + def generate_tasks(self, task_result: TaskResult) -> List[Task]: + raise NotImplementedError() + + def receive_task_result(self, task_result: TaskResult): + """ + Parameters + ---------- + task_result + The result of the task. + """ + task_id = task_result.task_id + assert task_id in self._tasks, 'Task {} does not exist.'.format(task_id) + self.update_best_result(task_result) + + self._tasks[task_id].status = 'Finished' + self._dump_tasks_info() + + self._pending_tasks.extend(self.generate_tasks(task_result)) + self._dump_tasks_info() + + if not self._keep_intermediate_result: + self._tasks[task_id].clean_up() + + def next(self) -> Optional[Task]: + """ + Returns + ------- + Optional[Task] + Return the next task from pending tasks. + """ + if len(self._pending_tasks) == 0: + return None + else: + task = self._pending_tasks.pop(0) + task.status = 'Running' + self._dump_tasks_info() + return task + + def get_best_result(self) -> Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]: + """ + Returns + ------- + Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]] + If self._best_task_id is not None, + return best task id, best compact model, masks on the compact model, score, config list used in this task. + """ + if self._best_task_id is not None: + compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth')) + compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth')) + with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f: + config_list = json_tricks.load(f) + return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list + return None diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/data_collector.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/data_collector.py new file mode 100644 index 0000000000000000000000000000000000000000..3b2f5265555215bab923bfe8d869634532112fe4 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/data_collector.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from typing import Dict, List + +from torch import Tensor + +from .base import DataCollector, TrainerBasedDataCollector + +_logger = logging.getLogger(__name__) + +__all__ = ['WeightDataCollector', 'WeightTrainerBasedDataCollector', 'SingleHookTrainerBasedDataCollector'] + + +class WeightDataCollector(DataCollector): + """ + Collect all wrapper weights. + """ + + def reset(self): + pass + + def collect(self) -> Dict[str, Tensor]: + data = {} + for _, wrapper in self.compressor.get_modules_wrapper().items(): + data[wrapper.name] = wrapper.module.weight.data + return data + + +class WeightTrainerBasedDataCollector(TrainerBasedDataCollector): + """ + Collect all wrapper weights after training or inference. + """ + + def collect(self) -> Dict[str, Tensor]: + for _ in range(self.training_epochs): + self.trainer(self.compressor.bound_model, self.optimizer, self.criterion) + + data = {} + for _, wrapper in self.compressor.get_modules_wrapper().items(): + data[wrapper.name] = wrapper.module.weight.data + return data + + +class SingleHookTrainerBasedDataCollector(TrainerBasedDataCollector): + """ + Add hooks and collect data during training or inference. + Single means each wrapper only has one hook to collect data. + """ + + def collect(self) -> Dict[str, List[Tensor]]: + for _ in range(self.training_epochs): + self.trainer(self.compressor.bound_model, self.optimizer, self.criterion) + + data = {} + [data.update(buffer_dict) for _, buffer_dict in self._hook_buffer.items()] + return data diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py new file mode 100644 index 0000000000000000000000000000000000000000..d229d2a613fc968c0c27cf58b9219e05896af34c --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py @@ -0,0 +1,192 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import Dict, List, Optional, Union + +import torch +from torch import Tensor + +from .base import MetricsCalculator + +__all__ = ['NormMetricsCalculator', 'MultiDataNormMetricsCalculator', 'DistMetricsCalculator', + 'APoZRankMetricsCalculator', 'MeanRankMetricsCalculator', 'StraightMetricsCalculator'] + + +class StraightMetricsCalculator(MetricsCalculator): + """ + This metrics calculator directly returns a copy of data as metrics. + """ + def calculate_metrics(self, data: Dict[str, Tensor]) -> Dict[str, Tensor]: + metrics = {} + for name, tensor in data.items(): + metrics[name] = tensor.clone().detach() + return metrics + + +class NormMetricsCalculator(MetricsCalculator): + """ + Calculate the specify norm for each tensor in data. + L1, L2, Level, Slim pruner use this to calculate metric. + """ + + def __init__(self, dim: Optional[Union[int, List[int]]] = None, p: Optional[Union[int, float]] = None): + """ + Parameters + ---------- + dim + The dimensions that corresponding to the under pruning weight dimensions in collected data. + None means one-to-one correspondence between pruned dimensions and data, which equal to set `dim` as all data dimensions. + Only these `dim` will be kept and other dimensions of the data will be reduced. + + Example: + + If you want to prune the Conv2d weight in filter level, and the weight size is (32, 16, 3, 3) [out-channel, in-channel, kernal-size-1, kernal-size-2]. + Then the under pruning dimensions is [0], which means you want to prune the filter or out-channel. + + Case 1: Directly collect the conv module weight as data to calculate the metric. + Then the data has size (32, 16, 3, 3). + Mention that the dimension 0 of the data is corresponding to the under pruning weight dimension 0. + So in this case, `dim=0` will set in `__init__`. + + Case 2: Use the output of the conv module as data to calculate the metric. + Then the data has size (batch_num, 32, feature_map_size_1, feature_map_size_2). + Mention that the dimension 1 of the data is corresponding to the under pruning weight dimension 0. + So in this case, `dim=1` will set in `__init__`. + + In both of these two case, the metric of this module has size (32,). + p + The order of norm. None means Frobenius norm. + """ + super().__init__(dim=dim) + self.p = p if p is not None else 'fro' + + def calculate_metrics(self, data: Dict[str, Tensor]) -> Dict[str, Tensor]: + metrics = {} + for name, tensor in data.items(): + keeped_dim = list(range(len(tensor.size()))) if self.dim is None else self.dim + across_dim = list(range(len(tensor.size()))) + [across_dim.pop(i) for i in reversed(keeped_dim)] + if len(across_dim) == 0: + metrics[name] = tensor.abs() + else: + metrics[name] = tensor.norm(p=self.p, dim=across_dim) + return metrics + + +class MultiDataNormMetricsCalculator(NormMetricsCalculator): + """ + The data value format is a two-element list [batch_number, cumulative_data]. + Directly use the cumulative_data as new_data to calculate norm metric. + TaylorFO pruner uses this to calculate metric. + """ + + def calculate_metrics(self, data: Dict[str, List[Tensor]]) -> Dict[str, Tensor]: + new_data = {name: buffer[1] for name, buffer in data.items()} + return super().calculate_metrics(new_data) + + +class DistMetricsCalculator(MetricsCalculator): + """ + Calculate the sum of specify distance for each element with all other elements in specify `dim` in each tensor in data. + FPGM pruner uses this to calculate metric. + """ + + def __init__(self, p: float, dim: Union[int, List[int]]): + """ + Parameters + ---------- + dim + The dimensions that corresponding to the under pruning weight dimensions in collected data. + None means one-to-one correspondence between pruned dimensions and data, which equal to set `dim` as all data dimensions. + Only these `dim` will be kept and other dimensions of the data will be reduced. + + Example: + + If you want to prune the Conv2d weight in filter level, and the weight size is (32, 16, 3, 3) [out-channel, in-channel, kernal-size-1, kernal-size-2]. + Then the under pruning dimensions is [0], which means you want to prune the filter or out-channel. + + Case 1: Directly collect the conv module weight as data to calculate the metric. + Then the data has size (32, 16, 3, 3). + Mention that the dimension 0 of the data is corresponding to the under pruning weight dimension 0. + So in this case, `dim=0` will set in `__init__`. + + Case 2: Use the output of the conv module as data to calculate the metric. + Then the data has size (batch_num, 32, feature_map_size_1, feature_map_size_2). + Mention that the dimension 1 of the data is corresponding to the under pruning weight dimension 0. + So in this case, `dim=1` will set in `__init__`. + + In both of these two case, the metric of this module has size (32,). + p + The order of norm. + """ + super().__init__(dim=dim) + self.p = p + + def calculate_metrics(self, data: Dict[str, Tensor]) -> Dict[str, Tensor]: + metrics = {} + for name, tensor in data.items(): + keeped_dim = list(range(len(tensor.size()))) if self.dim is None else self.dim + reorder_dim = list(keeped_dim) + reorder_dim.extend([i for i in range(len(tensor.size())) if i not in keeped_dim]) + reorder_tensor = tensor.permute(*reorder_dim).clone() + + metric = torch.ones(*reorder_tensor.size()[:len(keeped_dim)], device=reorder_tensor.device) + across_dim = list(range(len(keeped_dim), len(reorder_dim))) + idxs = metric.nonzero(as_tuple=False) + for idx in idxs: + other = reorder_tensor + for i in idx: + other = other[i] + other = other.clone() + if len(across_dim) == 0: + dist_sum = torch.abs(reorder_tensor - other).sum() + else: + dist_sum = torch.norm((reorder_tensor - other), p=self.p, dim=across_dim).sum() + # NOTE: this place need refactor when support layer level pruning. + tmp_metric = metric + for i in idx[:-1]: + tmp_metric = tmp_metric[i] + tmp_metric[idx[-1]] = dist_sum + + metrics[name] = metric + return metrics + + +class APoZRankMetricsCalculator(MetricsCalculator): + """ + The data value format is a two-element list [batch_number, batch_wise_zeros_count_sum]. + This metric sum the zero number on `dim` then devide the (batch_number * across_dim_size) to calculate the non-zero rate. + Note that the metric we return is (1 - apoz), because we assume a higher metric value has higher importance. + APoZRank pruner uses this to calculate metric. + """ + def calculate_metrics(self, data: Dict[str, List]) -> Dict[str, Tensor]: + metrics = {} + for name, (num, zero_counts) in data.items(): + keeped_dim = list(range(len(zero_counts.size()))) if self.dim is None else self.dim + across_dim = list(range(len(zero_counts.size()))) + [across_dim.pop(i) for i in reversed(keeped_dim)] + # The element number on each keeped_dim in zero_counts + total_size = num + for dim, dim_size in enumerate(zero_counts.size()): + if dim not in keeped_dim: + total_size *= dim_size + _apoz = torch.sum(zero_counts, dim=across_dim).type_as(zero_counts) / total_size + # NOTE: the metric is (1 - apoz) because we assume the smaller metric value is more needed to be pruned. + metrics[name] = torch.ones_like(_apoz) - _apoz + return metrics + + +class MeanRankMetricsCalculator(MetricsCalculator): + """ + The data value format is a two-element list [batch_number, batch_wise_activation_sum]. + This metric simply calculate the average on `self.dim`, then divide by the batch_number. + MeanRank pruner uses this to calculate metric. + """ + def calculate_metrics(self, data: Dict[str, List[Tensor]]) -> Dict[str, Tensor]: + metrics = {} + for name, (num, activation_sum) in data.items(): + keeped_dim = list(range(len(activation_sum.size()))) if self.dim is None else self.dim + across_dim = list(range(len(activation_sum.size()))) + [across_dim.pop(i) for i in reversed(keeped_dim)] + metrics[name] = torch.mean(activation_sum, across_dim) / num + return metrics diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/__init__.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c09ce95334b033f04854a928a17a0aa3aba2a6c6 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/__init__.py @@ -0,0 +1,2 @@ +from .agent import DDPG +from .amc_env import AMCEnv diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/agent.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..71fc2a95276e986f3fe249b048cdc1a557bf7dd4 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/agent.py @@ -0,0 +1,221 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np + +import torch +import torch.nn as nn +from torch.optim import Adam + +from .memory import SequentialMemory + +criterion = nn.MSELoss() +USE_CUDA = torch.cuda.is_available() + + +def to_numpy(var): + use_cuda = torch.cuda.is_available() + return var.cpu().data.numpy() if use_cuda else var.data.numpy() + + +def to_tensor(ndarray, requires_grad=False): # return a float tensor by default + tensor = torch.from_numpy(ndarray).float() # by default does not require grad + if requires_grad: + tensor.requires_grad_() + return tensor.cuda() if torch.cuda.is_available() else tensor + + +class Actor(nn.Module): + def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300): + super(Actor, self).__init__() + self.fc1 = nn.Linear(nb_states, hidden1) + self.fc2 = nn.Linear(hidden1, hidden2) + self.fc3 = nn.Linear(hidden2, nb_actions) + self.relu = nn.ReLU() + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + out = self.fc1(x) + out = self.relu(out) + out = self.fc2(out) + out = self.relu(out) + out = self.fc3(out) + out = self.sigmoid(out) + return out + + +class Critic(nn.Module): + def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300): + super(Critic, self).__init__() + self.fc11 = nn.Linear(nb_states, hidden1) + self.fc12 = nn.Linear(nb_actions, hidden1) + self.fc2 = nn.Linear(hidden1, hidden2) + self.fc3 = nn.Linear(hidden2, 1) + self.relu = nn.ReLU() + + def forward(self, xs): + x, a = xs + out = self.fc11(x) + self.fc12(a) + out = self.relu(out) + out = self.fc2(out) + out = self.relu(out) + out = self.fc3(out) + return out + + +class DDPG(nn.Module): + def __init__(self, nb_states, nb_actions, args): + super(DDPG, self).__init__() + self.ddpg_params = {'hidden1': 300, 'hidden2': 300, 'lr_c': 1e-3, 'lr_a': 1e-4, 'warmup': 100, 'discount': 1., 'bsize': 64, + 'rmsize': 100, 'window_length': 1, 'tau': 0.01, 'init_delta': 0.5, 'delta_decay': 0.99, 'max_episode_length': 1e9, 'epsilon': 50000} + for key in args: + assert key in self.ddpg_params.keys(), "Error! Illegal key: {}".format(key) + self.ddpg_params[key] = args[key] + + self.nb_states = nb_states + self.nb_actions = nb_actions + + # Create Actor and Critic Networks + net_cfg = { + 'hidden1': self.ddpg_params['hidden1'], + 'hidden2': self.ddpg_params['hidden2'], + # 'init_w': self.ddpg_params['init_w + } + self.actor = Actor(self.nb_states, self.nb_actions, **net_cfg) + self.actor_target = Actor(self.nb_states, self.nb_actions, **net_cfg) + self.actor_optim = Adam(self.actor.parameters(), lr=self.ddpg_params['lr_a']) + + self.critic = Critic(self.nb_states, self.nb_actions, **net_cfg) + self.critic_target = Critic(self.nb_states, self.nb_actions, **net_cfg) + self.critic_optim = Adam(self.critic.parameters(), lr=self.ddpg_params['lr_c']) + + self.hard_update(self.actor_target, self.actor) # Make sure target is with the same weight + self.hard_update(self.critic_target, self.critic) + + # Create replay buffer + self.memory = SequentialMemory(limit=self.ddpg_params['rmsize'], window_length=self.ddpg_params['window_length']) + + # Hyper-parameters + self.batch_size = self.ddpg_params['bsize'] + self.tau = self.ddpg_params['tau'] + self.discount = self.ddpg_params['discount'] + self.depsilon = 1.0 / self.ddpg_params['epsilon'] + self.lbound = 0. # self.ddpg_params['lbound'] + self.rbound = 1. # self.ddpg_params['rbound'] + + # noise + self.init_delta = self.ddpg_params['init_delta'] + self.delta_decay = self.ddpg_params['delta_decay'] + self.warmup = self.ddpg_params['warmup'] + + self.epsilon = 1.0 + # self.s_t = None # Most recent state + # self.a_t = None # Most recent action + self.is_training = True + + # + if USE_CUDA: self.cuda() + + # moving average baseline + self.moving_average = None + self.moving_alpha = 0.5 # based on batch, so small + + def update_policy(self): + # Sample batch + state_batch, action_batch, reward_batch, \ + next_state_batch, terminal_batch = self.memory.sample_and_split(self.batch_size) + + # normalize the reward + batch_mean_reward = np.mean(reward_batch) + if self.moving_average is None: + self.moving_average = batch_mean_reward + else: + self.moving_average += self.moving_alpha * (batch_mean_reward - self.moving_average) + reward_batch -= self.moving_average + + # Prepare for the target q batch + with torch.no_grad(): + next_q_values = self.critic_target([ + to_tensor(next_state_batch), + self.actor_target(to_tensor(next_state_batch)), + ]) + + target_q_batch = to_tensor(reward_batch) + \ + self.discount * to_tensor(terminal_batch.astype(np.float)) * next_q_values + + # Critic update + self.critic.zero_grad() + + q_batch = self.critic([to_tensor(state_batch), to_tensor(action_batch)]) + + value_loss = criterion(q_batch, target_q_batch) + value_loss.backward() + self.critic_optim.step() + + # Actor update + self.actor.zero_grad() + + policy_loss = -self.critic([ # pylint: disable=all + to_tensor(state_batch), + self.actor(to_tensor(state_batch)) + ]) + + policy_loss = policy_loss.mean() + policy_loss.backward() + self.actor_optim.step() + + # Target update + self.soft_update(self.actor_target, self.actor) + self.soft_update(self.critic_target, self.critic) + + def observe(self, r_t, s_t, s_t1, a_t, done): + if self.is_training: + self.memory.append(s_t, a_t, r_t, done) # save to memory + + def random_action(self): + action = np.random.uniform(self.lbound, self.rbound, self.nb_actions) + # self.a_t = action + return action + + def select_action(self, s_t, episode): + action = to_numpy(self.actor(to_tensor(np.array(s_t).reshape(1, -1)))).squeeze(0) + delta = self.init_delta * (self.delta_decay ** (episode - self.warmup)) + # action += self.is_training * max(self.epsilon, 0) * self.random_process.sample() + action = self.sample_from_truncated_normal_distribution(lower=self.lbound, upper=self.rbound, mu=action, sigma=delta) + action = np.clip(action, self.lbound, self.rbound) + return action + + def load_weights(self, output): + if output is None: return + + self.actor.load_state_dict( + torch.load('{}/actor.pkl'.format(output)) + ) + + self.critic.load_state_dict( + torch.load('{}/critic.pkl'.format(output)) + ) + + def save_model(self, output): + torch.save( + self.actor.state_dict(), + '{}/actor.pkl'.format(output) + ) + torch.save( + self.critic.state_dict(), + '{}/critic.pkl'.format(output) + ) + + def soft_update(self, target, source): + for target_param, param in zip(target.parameters(), source.parameters()): + target_param.data.copy_( + target_param.data * (1.0 - self.tau) + param.data * self.tau + ) + + def hard_update(self, target, source): + for target_param, param in zip(target.parameters(), source.parameters()): + target_param.data.copy_(param.data) + + def sample_from_truncated_normal_distribution(self, lower, upper, mu, sigma, size=1): + from scipy import stats + return stats.truncnorm.rvs((lower-mu)/sigma, (upper-mu)/sigma, loc=mu, scale=sigma, size=size) diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/amc_env.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/amc_env.py new file mode 100644 index 0000000000000000000000000000000000000000..900c5f72cd4b2bc56c75089ff6731df99f6f04dd --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/amc_env.py @@ -0,0 +1,134 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from collections import OrderedDict +from copy import Error +import logging +from typing import Dict, List + +import numpy as np +from torch import Tensor +from torch.nn import Module + +from nni.algorithms.compression.v2.pytorch.utils import config_list_canonical +from nni.compression.pytorch.utils.counter import count_flops_params + +_logger = logging.getLogger(__name__) + + +class AMCEnv: + def __init__(self, model: Module, config_list: List[Dict], dummy_input: Tensor, total_sparsity: float, max_sparsity_per_layer: Dict[str, float], target: str = 'flops'): + pruning_op_names = [] + [pruning_op_names.extend(config['op_names']) for config in config_list_canonical(model, config_list)] + self.pruning_ops = OrderedDict() + self.pruning_types = [] + for i, (name, layer) in enumerate(model.named_modules()): + if name in pruning_op_names: + op_type = type(layer).__name__ + stride = np.power(np.prod(layer.stride), 1 / len(layer.stride)) if hasattr(layer, 'stride') else 0 + kernel_size = np.power(np.prod(layer.kernel_size), 1 / len(layer.kernel_size)) if hasattr(layer, 'kernel_size') else 1 + self.pruning_ops[name] = (i, op_type, stride, kernel_size) + self.pruning_types.append(op_type) + self.pruning_types = list(set(self.pruning_types)) + self.pruning_op_names = list(self.pruning_ops.keys()) + self.dummy_input = dummy_input + + self.total_sparsity = total_sparsity + self.max_sparsity_per_layer = max_sparsity_per_layer + assert target in ['flops', 'params'] + self.target = target + + self.origin_target, self.origin_params_num, self.origin_statistics = count_flops_params(model, dummy_input, verbose=False) + self.origin_statistics = {result['name']: result for result in self.origin_statistics} + + self.under_pruning_target = sum([self.origin_statistics[name][self.target] for name in self.pruning_op_names]) + self.excepted_pruning_target = self.total_sparsity * self.under_pruning_target + + def reset(self): + self.ops_iter = iter(self.pruning_ops) + # build embedding (static part) + self._build_state_embedding(self.origin_statistics) + observation = self.layer_embedding[0].copy() + return observation + + def correct_action(self, action: float, model: Module): + try: + op_name = next(self.ops_iter) + index = self.pruning_op_names.index(op_name) + _, _, current_statistics = count_flops_params(model, self.dummy_input, verbose=False) + current_statistics = {result['name']: result for result in current_statistics} + + total_current_target = sum([current_statistics[name][self.target] for name in self.pruning_op_names]) + previous_pruning_target = self.under_pruning_target - total_current_target + max_rest_pruning_target = sum([current_statistics[name][self.target] * self.max_sparsity_per_layer[name] for name in self.pruning_op_names[index + 1:]]) + min_current_pruning_target = self.excepted_pruning_target - previous_pruning_target - max_rest_pruning_target + max_current_pruning_target_1 = self.origin_statistics[op_name][self.target] * self.max_sparsity_per_layer[op_name] - (self.origin_statistics[op_name][self.target] - current_statistics[op_name][self.target]) + max_current_pruning_target_2 = self.excepted_pruning_target - previous_pruning_target + max_current_pruning_target = min(max_current_pruning_target_1, max_current_pruning_target_2) + min_action = min_current_pruning_target / current_statistics[op_name][self.target] + max_action = max_current_pruning_target / current_statistics[op_name][self.target] + if min_action > self.max_sparsity_per_layer[op_name]: + _logger.warning('[%s] min action > max sparsity per layer: %f > %f', op_name, min_action, self.max_sparsity_per_layer[op_name]) + action = max(0., min(max_action, max(min_action, action))) + + self.current_op_name = op_name + self.current_op_target = current_statistics[op_name][self.target] + except StopIteration: + raise Error('Something goes wrong, this should not happen.') + return action + + def step(self, action: float, model: Module): + _, _, current_statistics = count_flops_params(model, self.dummy_input, verbose=False) + current_statistics = {result['name']: result for result in current_statistics} + index = self.pruning_op_names.index(self.current_op_name) + action = 1 - current_statistics[self.current_op_name][self.target] / self.current_op_target + + total_current_target = sum([current_statistics[name][self.target] for name in self.pruning_op_names]) + previous_pruning_target = self.under_pruning_target - total_current_target + rest_target = sum([current_statistics[name][self.target] for name in self.pruning_op_names[index + 1:]]) + + self.layer_embedding[index][-3] = previous_pruning_target / self.under_pruning_target # reduced + self.layer_embedding[index][-2] = rest_target / self.under_pruning_target # rest + self.layer_embedding[index][-1] = action # last action + observation = self.layer_embedding[index, :].copy() + + return action, 0, observation, self.is_final_layer() + + def is_first_layer(self): + return self.pruning_op_names.index(self.current_op_name) == 0 + + def is_final_layer(self): + return self.pruning_op_names.index(self.current_op_name) == len(self.pruning_op_names) - 1 + + @property + def state_feature(self): + return ['index', 'layer_type', 'input_size', 'output_size', 'stride', 'kernel_size', 'params_size', 'reduced', 'rest', 'a_{t-1}'] + + def _build_state_embedding(self, statistics: Dict[str, Dict]): + _logger.info('Building state embedding...') + layer_embedding = [] + for name, (idx, op_type, stride, kernel_size) in self.pruning_ops.items(): + state = [] + state.append(idx) # index + state.append(self.pruning_types.index(op_type)) # layer type + state.append(np.prod(statistics[name]['input_size'])) # input size + state.append(np.prod(statistics[name]['output_size'])) # output size + state.append(stride) # stride + state.append(kernel_size) # kernel size + state.append(statistics[name]['params']) # params size + state.append(0.) # reduced + state.append(1.) # rest + state.append(0.) # a_{t-1} + layer_embedding.append(np.array(state)) + layer_embedding = np.array(layer_embedding, 'float') + _logger.info('=> shape of embedding (n_layer * n_dim): %s', layer_embedding.shape) + assert len(layer_embedding.shape) == 2, layer_embedding.shape + + # normalize the state + for i in range(layer_embedding.shape[1]): + fmin = min(layer_embedding[:, i]) + fmax = max(layer_embedding[:, i]) + if fmax - fmin > 0: + layer_embedding[:, i] = (layer_embedding[:, i] - fmin) / (fmax - fmin) + + self.layer_embedding = layer_embedding diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/memory.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..57bbcfceb86a20092968c9dc75a618221e119174 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/rl_env/memory.py @@ -0,0 +1,227 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import +from collections import deque, namedtuple +import warnings +import random + +import numpy as np + +# [reference] https://github.com/matthiasplappert/keras-rl/blob/master/rl/memory.py + +# This is to be understood as a transition: Given `state0`, performing `action` +# yields `reward` and results in `state1`, which might be `terminal`. +Experience = namedtuple('Experience', 'state0, action, reward, state1, terminal1') + + +def sample_batch_indexes(low, high, size): + if high - low >= size: + # We have enough data. Draw without replacement, that is each index is unique in the + # batch. We cannot use `np.random.choice` here because it is horribly inefficient as + # the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion. + # `random.sample` does the same thing (drawing without replacement) and is way faster. + r = range(low, high) + batch_idxs = random.sample(r, size) + else: + # Not enough data. Help ourselves with sampling from the range, but the same index + # can occur multiple times. This is not good and should be avoided by picking a + # large enough warm-up phase. + warnings.warn( + 'Not enough entries to sample without replacement. ' + 'Consider increasing your warm-up phase to avoid oversampling!') + batch_idxs = np.random.random_integers(low, high - 1, size=size) + assert len(batch_idxs) == size + return batch_idxs + + +class RingBuffer(object): + def __init__(self, maxlen): + self.maxlen = maxlen + self.start = 0 + self.length = 0 + self.data = [None for _ in range(maxlen)] + + def __len__(self): + return self.length + + def __getitem__(self, idx): + if idx < 0 or idx >= self.length: + raise KeyError() + return self.data[(self.start + idx) % self.maxlen] + + def append(self, v): + if self.length < self.maxlen: + # We have space, simply increase the length. + self.length += 1 + elif self.length == self.maxlen: + # No space, "remove" the first item. + self.start = (self.start + 1) % self.maxlen + else: + # This should never happen. + raise RuntimeError() + self.data[(self.start + self.length - 1) % self.maxlen] = v + + +def zeroed_observation(observation): + if hasattr(observation, 'shape'): + return np.zeros(observation.shape) + elif hasattr(observation, '__iter__'): + out = [] + for x in observation: + out.append(zeroed_observation(x)) + return out + else: + return 0. + + +class Memory(object): + def __init__(self, window_length, ignore_episode_boundaries=False): + self.window_length = window_length + self.ignore_episode_boundaries = ignore_episode_boundaries + + self.recent_observations = deque(maxlen=window_length) + self.recent_terminals = deque(maxlen=window_length) + + def sample(self, batch_size, batch_idxs=None): + raise NotImplementedError() + + def append(self, observation, action, reward, terminal, training=True): + self.recent_observations.append(observation) + self.recent_terminals.append(terminal) + + def get_recent_state(self, current_observation): + # This code is slightly complicated by the fact that subsequent observations might be + # from different episodes. We ensure that an experience never spans multiple episodes. + # This is probably not that important in practice but it seems cleaner. + state = [current_observation] + idx = len(self.recent_observations) - 1 + for offset in range(0, self.window_length - 1): + current_idx = idx - offset + current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False + if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): + # The previously handled observation was terminal, don't add the current one. + # Otherwise we would leak into a different episode. + break + state.insert(0, self.recent_observations[current_idx]) + while len(state) < self.window_length: + state.insert(0, zeroed_observation(state[0])) + return state + + def get_config(self): + config = { + 'window_length': self.window_length, + 'ignore_episode_boundaries': self.ignore_episode_boundaries, + } + return config + + +class SequentialMemory(Memory): + def __init__(self, limit, **kwargs): + super(SequentialMemory, self).__init__(**kwargs) + + self.limit = limit + + # Do not use deque to implement the memory. This data structure may seem convenient but + # it is way too slow on random access. Instead, we use our own ring buffer implementation. + self.actions = RingBuffer(limit) + self.rewards = RingBuffer(limit) + self.terminals = RingBuffer(limit) + self.observations = RingBuffer(limit) + + def sample(self, batch_size, batch_idxs=None): + if batch_idxs is None: + # Draw random indexes such that we have at least a single entry before each + # index. + batch_idxs = sample_batch_indexes(0, self.nb_entries - 1, size=batch_size) + batch_idxs = np.array(batch_idxs) + 1 + assert np.min(batch_idxs) >= 1 + assert np.max(batch_idxs) < self.nb_entries + assert len(batch_idxs) == batch_size + + # Create experiences + experiences = [] + for idx in batch_idxs: + terminal0 = self.terminals[idx - 2] if idx >= 2 else False + while terminal0: + # Skip this transition because the environment was reset here. Select a new, random + # transition and use this instead. This may cause the batch to contain the same + # transition twice. + idx = sample_batch_indexes(1, self.nb_entries, size=1)[0] + terminal0 = self.terminals[idx - 2] if idx >= 2 else False + assert 1 <= idx < self.nb_entries + + # This code is slightly complicated by the fact that subsequent observations might be + # from different episodes. We ensure that an experience never spans multiple episodes. + # This is probably not that important in practice but it seems cleaner. + state0 = [self.observations[idx - 1]] + for offset in range(0, self.window_length - 1): + current_idx = idx - 2 - offset + current_terminal = self.terminals[current_idx - 1] if current_idx - 1 > 0 else False + if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): + # The previously handled observation was terminal, don't add the current one. + # Otherwise we would leak into a different episode. + break + state0.insert(0, self.observations[current_idx]) + while len(state0) < self.window_length: + state0.insert(0, zeroed_observation(state0[0])) + action = self.actions[idx - 1] + reward = self.rewards[idx - 1] + terminal1 = self.terminals[idx - 1] + + # Okay, now we need to create the follow-up state. This is state0 shifted on timestep + # to the right. Again, we need to be careful to not include an observation from the next + # episode if the last state is terminal. + state1 = [np.copy(x) for x in state0[1:]] + state1.append(self.observations[idx]) + + assert len(state0) == self.window_length + assert len(state1) == len(state0) + experiences.append(Experience(state0=state0, action=action, reward=reward, + state1=state1, terminal1=terminal1)) + assert len(experiences) == batch_size + return experiences + + def sample_and_split(self, batch_size, batch_idxs=None): + experiences = self.sample(batch_size, batch_idxs) + + state0_batch = [] + reward_batch = [] + action_batch = [] + terminal1_batch = [] + state1_batch = [] + for e in experiences: + state0_batch.append(e.state0) + state1_batch.append(e.state1) + reward_batch.append(e.reward) + action_batch.append(e.action) + terminal1_batch.append(0. if e.terminal1 else 1.) + + # Prepare and validate parameters. + state0_batch = np.array(state0_batch, 'double').reshape(batch_size, -1) + state1_batch = np.array(state1_batch, 'double').reshape(batch_size, -1) + terminal1_batch = np.array(terminal1_batch, 'double').reshape(batch_size, -1) + reward_batch = np.array(reward_batch, 'double').reshape(batch_size, -1) + action_batch = np.array(action_batch, 'double').reshape(batch_size, -1) + + return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch + + def append(self, observation, action, reward, terminal, training=True): + super(SequentialMemory, self).append(observation, action, reward, terminal, training=training) + + # This needs to be understood as follows: in `observation`, take `action`, obtain `reward` + # and weather the next state is `terminal` or not. + if training: + self.observations.append(observation) + self.actions.append(action) + self.rewards.append(reward) + self.terminals.append(terminal) + + @property + def nb_entries(self): + return len(self.observations) + + def get_config(self): + config = super(SequentialMemory, self).get_config() + config['limit'] = self.limit + return config diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/sparsity_allocator.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/sparsity_allocator.py new file mode 100644 index 0000000000000000000000000000000000000000..247033e5b6a761830daf67fe28edb20aa4532739 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/sparsity_allocator.py @@ -0,0 +1,178 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import math +from typing import Any, Dict, List, Tuple, Union + +import numpy as np +import torch +from torch import Tensor + +from nni.algorithms.compression.v2.pytorch.base import Pruner +from nni.compression.pytorch.utils.shape_dependency import ChannelDependency, GroupDependency + +from .base import SparsityAllocator + + +class NormalSparsityAllocator(SparsityAllocator): + """ + This allocator simply pruned the weight with smaller metrics in layer level. + """ + def generate_sparsity(self, metrics: Dict[str, Tensor]) -> Dict[str, Dict[str, Tensor]]: + masks = {} + for name, wrapper in self.pruner.get_modules_wrapper().items(): + sparsity_rate = wrapper.config['total_sparsity'] + + assert name in metrics, 'Metric of {} is not calculated.'.format(name) + + # We assume the metric value are all positive right now. + metric = metrics[name] + if self.continuous_mask: + metric *= self._compress_mask(wrapper.weight_mask) + prune_num = int(sparsity_rate * metric.numel()) + if prune_num == 0: + threshold = metric.min() - 1 + else: + threshold = torch.topk(metric.view(-1), prune_num, largest=False)[0].max() + mask = torch.gt(metric, threshold).type_as(metric) + masks[name] = self._expand_mask(name, mask) + if self.continuous_mask: + masks[name]['weight'] *= wrapper.weight_mask + return masks + + +class GlobalSparsityAllocator(SparsityAllocator): + """ + This allocator pruned the weight with smaller metrics in group level. + This means all layers in a group will sort metrics uniformly. + The layers with the same config in config_list is a group. + """ + def generate_sparsity(self, metrics: Dict) -> Dict[str, Dict[str, Tensor]]: + masks = {} + # {group_index: {layer_name: metric}} + grouped_metrics = {idx: {name: metrics[name] for name in names} + for idx, names in self.pruner.generate_module_groups().items()} + for _, group_metric_dict in grouped_metrics.items(): + threshold, sub_thresholds = self._calculate_threshold(group_metric_dict) + for name, metric in group_metric_dict.items(): + mask = torch.gt(metric, min(threshold, sub_thresholds[name])).type_as(metric) + masks[name] = self._expand_mask(name, mask) + if self.continuous_mask: + masks[name]['weight'] *= self.pruner.get_modules_wrapper()[name].weight_mask + return masks + + def _calculate_threshold(self, group_metric_dict: Dict[str, Tensor]) -> Tuple[float, Dict[str, float]]: + metric_list = [] + sub_thresholds = {} + total_weight_num = 0 + + temp_wrapper_config = self.pruner.get_modules_wrapper()[list(group_metric_dict.keys())[0]].config + total_sparsity = temp_wrapper_config['total_sparsity'] + max_sparsity_per_layer = temp_wrapper_config.get('max_sparsity_per_layer', {}) + + for name, metric in group_metric_dict.items(): + wrapper = self.pruner.get_modules_wrapper()[name] + + # We assume the metric value are all positive right now. + if self.continuous_mask: + metric = metric * self._compress_mask(wrapper.weight_mask) + + layer_weight_num = wrapper.module.weight.data.numel() + total_weight_num += layer_weight_num + expend_times = int(layer_weight_num / metric.numel()) + + retention_ratio = 1 - max_sparsity_per_layer.get(name, 1) + retention_numel = math.ceil(retention_ratio * layer_weight_num) + removed_metric_num = math.ceil(retention_numel / (wrapper.weight_mask.numel() / metric.numel())) + stay_metric_num = metric.numel() - removed_metric_num + if stay_metric_num <= 0: + sub_thresholds[name] = metric.min().item() - 1 + continue + # Remove the weight parts that must be left + stay_metric = torch.topk(metric.view(-1), stay_metric_num, largest=False)[0] + sub_thresholds[name] = stay_metric.max() + if expend_times > 1: + stay_metric = stay_metric.expand(int(layer_weight_num / metric.numel()), stay_metric_num).contiguous().view(-1) + metric_list.append(stay_metric) + + total_prune_num = int(total_sparsity * total_weight_num) + if total_prune_num == 0: + threshold = torch.cat(metric_list).min().item() - 1 + else: + threshold = torch.topk(torch.cat(metric_list).view(-1), total_prune_num, largest=False)[0].max().item() + return threshold, sub_thresholds + + +class Conv2dDependencyAwareAllocator(SparsityAllocator): + """ + A specify allocator for Conv2d with dependency aware. + """ + + def __init__(self, pruner: Pruner, dim: int, dummy_input: Any): + assert isinstance(dim, int), 'Only support single dim in Conv2dDependencyAwareAllocator.' + super().__init__(pruner, dim=dim) + self.dummy_input = dummy_input + + def _get_dependency(self): + graph = self.pruner.generate_graph(dummy_input=self.dummy_input) + self.pruner._unwrap_model() + self.channel_depen = ChannelDependency(model=self.pruner.bound_model, dummy_input=self.dummy_input, traced_model=graph.trace).dependency_sets + self.group_depen = GroupDependency(model=self.pruner.bound_model, dummy_input=self.dummy_input, traced_model=graph.trace).dependency_sets + self.pruner._wrap_model() + + def generate_sparsity(self, metrics: Dict) -> Dict[str, Dict[str, Tensor]]: + self._get_dependency() + masks = {} + grouped_metrics = {} + for idx, names in enumerate(self.channel_depen): + grouped_metric = {name: metrics[name] for name in names if name in metrics} + if self.continuous_mask: + for name, metric in grouped_metric.items(): + metric *= self._compress_mask(self.pruner.get_modules_wrapper()[name].weight_mask) + if len(grouped_metric) > 0: + grouped_metrics[idx] = grouped_metric + for _, group_metric_dict in grouped_metrics.items(): + group_metric = self._group_metric_calculate(group_metric_dict) + + sparsities = {name: self.pruner.get_modules_wrapper()[name].config['total_sparsity'] for name in group_metric_dict.keys()} + min_sparsity = min(sparsities.values()) + + conv2d_groups = [self.group_depen[name] for name in group_metric_dict.keys()] + max_conv2d_group = np.lcm.reduce(conv2d_groups) + + pruned_per_conv2d_group = int(group_metric.numel() / max_conv2d_group * min_sparsity) + conv2d_group_step = int(group_metric.numel() / max_conv2d_group) + + group_mask = [] + for gid in range(max_conv2d_group): + _start = gid * conv2d_group_step + _end = (gid + 1) * conv2d_group_step + if pruned_per_conv2d_group > 0: + threshold = torch.topk(group_metric[_start: _end], pruned_per_conv2d_group, largest=False)[0].max() + conv2d_group_mask = torch.gt(group_metric[_start:_end], threshold).type_as(group_metric) + else: + conv2d_group_mask = torch.ones(conv2d_group_step, device=group_metric.device) + group_mask.append(conv2d_group_mask) + group_mask = torch.cat(group_mask, dim=0) + + for name, metric in group_metric_dict.items(): + # We assume the metric value are all positive right now. + metric = metric * group_mask + pruned_num = int(sparsities[name] * len(metric)) + threshold = torch.topk(metric, pruned_num, largest=False)[0].max() + mask = torch.gt(metric, threshold).type_as(metric) + masks[name] = self._expand_mask(name, mask) + if self.continuous_mask: + masks[name]['weight'] *= self.pruner.get_modules_wrapper()[name].weight_mask + return masks + + def _group_metric_calculate(self, group_metrics: Union[Dict[str, Tensor], List[Tensor]]) -> Tensor: + """ + Add all metric value in the same position in one group. + """ + group_metrics = list(group_metrics.values()) if isinstance(group_metrics, dict) else group_metrics + assert all(group_metrics[0].size() == group_metric.size() for group_metric in group_metrics), 'Metrics size do not match.' + group_sum_metric = torch.zeros(group_metrics[0].size(), device=group_metrics[0].device) + for group_metric in group_metrics: + group_sum_metric += group_metric + return group_sum_metric diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/task_generator.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/task_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..7d29d14fa0a4ba9212c7cc50038b5251825841ea --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/task_generator.py @@ -0,0 +1,361 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy +import logging +from pathlib import Path +from typing import Dict, List, Tuple +import json_tricks + +import numpy as np +from torch import Tensor +import torch +from torch.nn import Module + +from nni.algorithms.compression.v2.pytorch.base import Task, TaskResult +from nni.algorithms.compression.v2.pytorch.utils import ( + config_list_canonical, + compute_sparsity, + get_model_weights_numel +) +from .base import TaskGenerator + +_logger = logging.getLogger(__name__) + + +class FunctionBasedTaskGenerator(TaskGenerator): + def __init__(self, total_iteration: int, origin_model: Module, origin_config_list: List[Dict], + origin_masks: Dict[str, Dict[str, Tensor]] = {}, log_dir: str = '.', keep_intermediate_result: bool = False): + """ + Parameters + ---------- + total_iteration + The total iteration number. + origin_model + The origin unwrapped pytorch model to be pruned. + origin_config_list + The origin config list provided by the user. Note that this config_list is directly config the origin model. + This means the sparsity provided by the origin_masks should also be recorded in the origin_config_list. + origin_masks + The pre masks on the origin model. This mask maybe user-defined or maybe generate by previous pruning. + log_dir + The log directory use to saving the task generator log. + keep_intermediate_result + If keeping the intermediate result, including intermediate model and masks during each iteration. + """ + self.total_iteration = total_iteration + super().__init__(origin_model, origin_config_list=origin_config_list, origin_masks=origin_masks, + log_dir=log_dir, keep_intermediate_result=keep_intermediate_result) + + def reset(self, model: Module, config_list: List[Dict] = [], masks: Dict[str, Dict[str, Tensor]] = {}): + self.current_iteration = 0 + self.target_sparsity = config_list_canonical(model, config_list) + super().reset(model, config_list=config_list, masks=masks) + + def init_pending_tasks(self) -> List[Task]: + origin_model = torch.load(self._origin_model_path) + origin_masks = torch.load(self._origin_masks_path) + + task_result = TaskResult('origin', origin_model, origin_masks, origin_masks, None) + + return self.generate_tasks(task_result) + + def generate_tasks(self, task_result: TaskResult) -> List[Task]: + compact_model = task_result.compact_model + compact_model_masks = task_result.compact_model_masks + + # save intermediate result + model_path = Path(self._intermediate_result_dir, '{}_compact_model.pth'.format(task_result.task_id)) + masks_path = Path(self._intermediate_result_dir, '{}_compact_model_masks.pth'.format(task_result.task_id)) + torch.save(compact_model, model_path) + torch.save(compact_model_masks, masks_path) + + # get current2origin_sparsity and compact2origin_sparsity + origin_model = torch.load(self._origin_model_path) + current2origin_sparsity, compact2origin_sparsity, _ = compute_sparsity(origin_model, compact_model, compact_model_masks, self.target_sparsity) + _logger.debug('\nTask %s total real sparsity compared with original model is:\n%s', str(task_result.task_id), json_tricks.dumps(current2origin_sparsity, indent=4)) + if task_result.task_id != 'origin': + self._tasks[task_result.task_id].state['current2origin_sparsity'] = current2origin_sparsity + + # if reach the total_iteration, no more task will be generated + if self.current_iteration > self.total_iteration: + return [] + + task_id = self._task_id_candidate + new_config_list = self.generate_config_list(self.target_sparsity, self.current_iteration, compact2origin_sparsity) + new_config_list = self.allocate_sparsity(new_config_list, compact_model, compact_model_masks) + config_list_path = Path(self._intermediate_result_dir, '{}_config_list.json'.format(task_id)) + + with Path(config_list_path).open('w') as f: + json_tricks.dump(new_config_list, f, indent=4) + task = Task(task_id, model_path, masks_path, config_list_path) + + self._tasks[task_id] = task + + self._task_id_candidate += 1 + self.current_iteration += 1 + + return [task] + + def generate_config_list(self, target_sparsity: List[Dict], iteration: int, compact2origin_sparsity: List[Dict]) -> List[Dict]: + raise NotImplementedError() + + def allocate_sparsity(self, new_config_list: List[Dict], model: Module, masks: Dict[str, Dict[str, Tensor]]): + return new_config_list + + +class AGPTaskGenerator(FunctionBasedTaskGenerator): + def generate_config_list(self, target_sparsity: List[Dict], iteration: int, compact2origin_sparsity: List[Dict]) -> List[Dict]: + config_list = [] + for target, mo in zip(target_sparsity, compact2origin_sparsity): + ori_sparsity = (1 - (1 - iteration / self.total_iteration) ** 3) * target['total_sparsity'] + sparsity = max(0.0, (ori_sparsity - mo['total_sparsity']) / (1 - mo['total_sparsity'])) + assert 0 <= sparsity <= 1, 'sparsity: {}, ori_sparsity: {}, model_sparsity: {}'.format(sparsity, ori_sparsity, mo['total_sparsity']) + config_list.append(deepcopy(target)) + config_list[-1]['total_sparsity'] = sparsity + return config_list + + +class LinearTaskGenerator(FunctionBasedTaskGenerator): + def generate_config_list(self, target_sparsity: List[Dict], iteration: int, compact2origin_sparsity: List[Dict]) -> List[Dict]: + config_list = [] + for target, mo in zip(target_sparsity, compact2origin_sparsity): + ori_sparsity = iteration / self.total_iteration * target['total_sparsity'] + sparsity = max(0.0, (ori_sparsity - mo['total_sparsity']) / (1 - mo['total_sparsity'])) + assert 0 <= sparsity <= 1, 'sparsity: {}, ori_sparsity: {}, model_sparsity: {}'.format(sparsity, ori_sparsity, mo['total_sparsity']) + config_list.append(deepcopy(target)) + config_list[-1]['total_sparsity'] = sparsity + return config_list + + +class LotteryTicketTaskGenerator(FunctionBasedTaskGenerator): + def reset(self, model: Module, config_list: List[Dict] = [], masks: Dict[str, Dict[str, Tensor]] = {}): + self.current_iteration = 1 + self.target_sparsity = config_list_canonical(model, config_list) + super(FunctionBasedTaskGenerator, self).reset(model, config_list=config_list, masks=masks) + + def generate_config_list(self, target_sparsity: List[Dict], iteration: int, compact2origin_sparsity: List[Dict]) -> List[Dict]: + config_list = [] + for target, mo in zip(target_sparsity, compact2origin_sparsity): + # NOTE: The ori_sparsity calculation formula in compression v1 is as follow, it is different from the paper. + # But the formula in paper will cause numerical problems, so keep the formula in compression v1. + ori_sparsity = 1 - (1 - target['total_sparsity']) ** (iteration / self.total_iteration) + # The following is the formula in paper. + # ori_sparsity = (target['total_sparsity'] * 100) ** (iteration / self.total_iteration) / 100 + sparsity = max(0.0, (ori_sparsity - mo['total_sparsity']) / (1 - mo['total_sparsity'])) + assert 0 <= sparsity <= 1, 'sparsity: {}, ori_sparsity: {}, model_sparsity: {}'.format(sparsity, ori_sparsity, mo['total_sparsity']) + config_list.append(deepcopy(target)) + config_list[-1]['total_sparsity'] = sparsity + return config_list + + +class SimulatedAnnealingTaskGenerator(TaskGenerator): + def __init__(self, origin_model: Module, origin_config_list: List[Dict], origin_masks: Dict[str, Dict[str, Tensor]] = {}, + start_temperature: float = 100, stop_temperature: float = 20, cool_down_rate: float = 0.9, + perturbation_magnitude: float = 0.35, log_dir: str = '.', keep_intermediate_result: bool = False): + """ + Parameters + ---------- + origin_model + The origin unwrapped pytorch model to be pruned. + origin_config_list + The origin config list provided by the user. Note that this config_list is directly config the origin model. + This means the sparsity provided by the origin_masks should also be recorded in the origin_config_list. + origin_masks + The pre masks on the origin model. This mask maybe user-defined or maybe generate by previous pruning. + start_temperature + Start temperature of the simulated annealing process. + stop_temperature + Stop temperature of the simulated annealing process. + cool_down_rate + Cool down rate of the temperature. + perturbation_magnitude + Initial perturbation magnitude to the sparsities. The magnitude decreases with current temperature. + log_dir + The log directory use to saving the task generator log. + keep_intermediate_result + If keeping the intermediate result, including intermediate model and masks during each iteration. + """ + self.start_temperature = start_temperature + self.stop_temperature = stop_temperature + self.cool_down_rate = cool_down_rate + self.perturbation_magnitude = perturbation_magnitude + + super().__init__(origin_model, origin_masks=origin_masks, origin_config_list=origin_config_list, + log_dir=log_dir, keep_intermediate_result=keep_intermediate_result) + + def reset(self, model: Module, config_list: List[Dict] = [], masks: Dict[str, Dict[str, Tensor]] = {}): + self.current_temperature = self.start_temperature + + # TODO: replace with validation here + for config in config_list: + if 'sparsity' in config or 'sparsity_per_layer' in config: + _logger.warning('Only `total_sparsity` can be differentially allocated sparse ratio to each layer, `sparsity` or `sparsity_per_layer` will allocate fixed sparse ratio to layers. Make sure you know what this will lead to, otherwise please use `total_sparsity`.') + + self.weights_numel, self.masked_rate = get_model_weights_numel(model, config_list, masks) + self.target_sparsity_list = config_list_canonical(model, config_list) + self._adjust_target_sparsity() + + self._temp_config_list = None + self._current_sparsity_list = None + self._current_score = None + + super().reset(model, config_list=config_list, masks=masks) + + def _adjust_target_sparsity(self): + """ + If origin_masks is not empty, then re-scale the target sparsity. + """ + if len(self.masked_rate) > 0: + for config in self.target_sparsity_list: + sparsity, op_names = config['total_sparsity'], config['op_names'] + remaining_weight_numel = 0 + pruned_weight_numel = 0 + for name in op_names: + remaining_weight_numel += self.weights_numel[name] + if name in self.masked_rate and self.masked_rate[name] != 0: + pruned_weight_numel += 1 / (1 / self.masked_rate[name] - 1) * self.weights_numel[name] + total_mask_rate = pruned_weight_numel / (pruned_weight_numel + remaining_weight_numel) + config['total_sparsity'] = max(0, (sparsity - total_mask_rate) / (1 - total_mask_rate)) + + def _init_temp_config_list(self): + self._temp_config_list = [] + self._temp_sparsity_list = [] + for config in self.target_sparsity_list: + sparsity_config_list, sparsity = self._init_config_sparsity(config) + self._temp_config_list.extend(sparsity_config_list) + self._temp_sparsity_list.append(sparsity) + + def _init_config_sparsity(self, config: Dict) -> Tuple[List[Dict], List]: + assert 'total_sparsity' in config, 'Sparsity must be set in config: {}'.format(config) + target_sparsity = config['total_sparsity'] + op_names = config['op_names'] + + if target_sparsity == 0: + sparsity_config_list = [deepcopy(config) for i in range(len(op_names))] + for sparsity_config, op_name in zip(sparsity_config_list, op_names): + sparsity_config.update({'total_sparsity': 0, 'op_names': [op_name]}) + return sparsity_config_list, [] + + low_limit = 0 + while True: + # This is to speed up finding the legal sparsity. + low_limit = (1 - low_limit) * 0.05 + low_limit + random_sparsity = sorted(np.random.uniform(low_limit, 1, len(op_names))) + rescaled_sparsity = self._rescale_sparsity(random_sparsity, target_sparsity, op_names) + if rescaled_sparsity is not None and rescaled_sparsity[0] >= 0 and rescaled_sparsity[-1] < 1: + break + + return self._sparsity_to_config_list(rescaled_sparsity, config), rescaled_sparsity + + def _rescale_sparsity(self, random_sparsity: List, target_sparsity: float, op_names: List) -> List: + assert len(random_sparsity) == len(op_names) + + num_weights = sorted([self.weights_numel[op_name] for op_name in op_names]) + sparsity = sorted(random_sparsity) + + total_weights = 0 + total_weights_pruned = 0 + + # calculate the scale + for idx, num_weight in enumerate(num_weights): + total_weights += num_weight + total_weights_pruned += int(num_weight * sparsity[idx]) + if total_weights_pruned == 0: + return None + + scale = target_sparsity / (total_weights_pruned / total_weights) + + # rescale the sparsity + sparsity = np.asarray(sparsity) * scale + return sparsity + + def _sparsity_to_config_list(self, sparsity: List, config: Dict) -> List[Dict]: + sparsity = sorted(sparsity) + op_names = [k for k, _ in sorted(self.weights_numel.items(), key=lambda item: item[1]) if k in config['op_names']] + assert len(sparsity) == len(op_names) + sub_temp_config_list = [deepcopy(config) for i in range(len(op_names))] + for temp_config, sp, op_name in zip(sub_temp_config_list, sparsity, op_names): + temp_config.update({'total_sparsity': sp, 'op_names': [op_name]}) + return sub_temp_config_list + + def _update_with_perturbations(self): + self._temp_config_list = [] + self._temp_sparsity_list = [] + # decrease magnitude with current temperature + magnitude = self.current_temperature / self.start_temperature * self.perturbation_magnitude + for config, current_sparsity in zip(self.target_sparsity_list, self._current_sparsity_list): + if len(current_sparsity) == 0: + sub_temp_config_list = [deepcopy(config) for i in range(len(config['op_names']))] + for temp_config, op_name in zip(sub_temp_config_list, config['op_names']): + temp_config.update({'total_sparsity': 0, 'op_names': [op_name]}) + self._temp_config_list.extend(sub_temp_config_list) + self._temp_sparsity_list.append([]) + continue + while True: + perturbation = np.random.uniform(-magnitude, magnitude, len(current_sparsity)) + temp_sparsity = np.clip(0, current_sparsity + perturbation, None) + temp_sparsity = self._rescale_sparsity(temp_sparsity, config['total_sparsity'], config['op_names']) + if temp_sparsity is not None and temp_sparsity[0] >= 0 and temp_sparsity[-1] < 1: + self._temp_config_list.extend(self._sparsity_to_config_list(temp_sparsity, config)) + self._temp_sparsity_list.append(temp_sparsity) + break + + def _recover_real_sparsity(self, config_list: List[Dict]) -> List[Dict]: + """ + If the origin masks is not None, then the sparsity in new generated config_list need to be rescaled. + """ + for config in config_list: + assert len(config['op_names']) == 1 + op_name = config['op_names'][0] + if op_name in self.masked_rate: + config['total_sparsity'] = self.masked_rate[op_name] + config['total_sparsity'] * (1 - self.masked_rate[op_name]) + return config_list + + def init_pending_tasks(self) -> List[Task]: + origin_model = torch.load(self._origin_model_path) + origin_masks = torch.load(self._origin_masks_path) + + self.temp_model_path = Path(self._intermediate_result_dir, 'origin_compact_model.pth') + self.temp_masks_path = Path(self._intermediate_result_dir, 'origin_compact_model_masks.pth') + torch.save(origin_model, self.temp_model_path) + torch.save(origin_masks, self.temp_masks_path) + + task_result = TaskResult('origin', origin_model, origin_masks, origin_masks, None) + + return self.generate_tasks(task_result) + + def generate_tasks(self, task_result: TaskResult) -> List[Task]: + # initial/update temp config list + if self._temp_config_list is None: + self._init_temp_config_list() + else: + score = self._tasks[task_result.task_id].score + if self._current_sparsity_list is None: + self._current_sparsity_list = deepcopy(self._temp_sparsity_list) + self._current_score = score + else: + delta_E = np.abs(score - self._current_score) + probability = np.exp(-1 * delta_E / self.current_temperature) + if self._current_score < score or np.random.uniform(0, 1) < probability: + self._current_score = score + self._current_sparsity_list = deepcopy(self._temp_sparsity_list) + self.current_temperature *= self.cool_down_rate + if self.current_temperature < self.stop_temperature: + return [] + self._update_with_perturbations() + + task_id = self._task_id_candidate + new_config_list = self._recover_real_sparsity(deepcopy(self._temp_config_list)) + config_list_path = Path(self._intermediate_result_dir, '{}_config_list.json'.format(task_id)) + + with Path(config_list_path).open('w') as f: + json_tricks.dump(new_config_list, f, indent=4) + + task = Task(task_id, self.temp_model_path, self.temp_masks_path, config_list_path) + + self._tasks[task_id] = task + + self._task_id_candidate += 1 + + return [task] diff --git a/nni/algorithms/compression/v2/pytorch/utils/__init__.py b/nni/algorithms/compression/v2/pytorch/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6876b86da4c85d7882d03a26897b09718054ae52 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/utils/__init__.py @@ -0,0 +1,12 @@ +from .config_validation import CompressorSchema +from .pruning import ( + config_list_canonical, + unfold_config_list, + dedupe_config_list, + compute_sparsity_compact2origin, + compute_sparsity_mask2compact, + compute_sparsity, + get_model_weights_numel, + get_module_by_name +) +from .constructor_helper import * diff --git a/nni/algorithms/compression/v2/pytorch/utils/config_validation.py b/nni/algorithms/compression/v2/pytorch/utils/config_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..ec9568d4fdd3b3e540a6fd787176ef82950ef6b3 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/utils/config_validation.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from logging import Logger +from typing import Dict, List +from schema import Schema, And, SchemaError + +from torch.nn import Module + + +class CompressorSchema: + def __init__(self, data_schema: List[Dict], model: Module, logger: Logger): + assert isinstance(data_schema, list) + self.data_schema = data_schema + self.compressor_schema = Schema(self._modify_schema(data_schema, model, logger)) + + def _modify_schema(self, data_schema: List[Dict], model: Module, logger: Logger) -> List[Dict]: + if not data_schema: + return data_schema + + for i, sub_schema in enumerate(data_schema): + for k, old_schema in sub_schema.items(): + if k == 'op_types' or (isinstance(k, Schema) and k._schema == 'op_types'): + new_schema = And(old_schema, lambda n: validate_op_types(model, n, logger)) + sub_schema[k] = new_schema + if k == 'op_names' or (isinstance(k, Schema) and k._schema == 'op_names'): + new_schema = And(old_schema, lambda n: validate_op_names(model, n, logger)) + sub_schema[k] = new_schema + + data_schema[i] = And(sub_schema, lambda d: validate_op_types_op_names(d)) + + return data_schema + + def validate(self, data): + self.compressor_schema.validate(data) + + +def validate_op_names(model, op_names, logger): + found_names = set(map(lambda x: x[0], model.named_modules())) + + not_found_op_names = list(set(op_names) - found_names) + if not_found_op_names: + logger.warning('op_names %s not found in model', not_found_op_names) + + return True + + +def validate_op_types(model, op_types, logger): + found_types = set(['default']) | set(map(lambda x: type(x[1]).__name__, model.named_modules())) + + not_found_op_types = list(set(op_types) - found_types) + if not_found_op_types: + logger.warning('op_types %s not found in model', not_found_op_types) + + return True + + +def validate_op_types_op_names(data): + if not ('op_types' in data or 'op_names' in data or 'op_partial_names' in data): + raise SchemaError('At least one of the followings must be specified: op_types, op_names or op_partial_names.') + return True diff --git a/nni/algorithms/compression/v2/pytorch/utils/constructor_helper.py b/nni/algorithms/compression/v2/pytorch/utils/constructor_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..6e2cdd1b759452ddb96e5e572c5468a376ec7591 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/utils/constructor_helper.py @@ -0,0 +1,121 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy +from typing import Callable, Dict, List, Type + +from torch import Tensor +from torch.nn import Module +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler + +from nni.common.serializer import _trace_cls +from nni.common.serializer import Traceable + +__all__ = ['OptimizerConstructHelper', 'LRSchedulerConstructHelper'] + + +class ConstructHelper: + def __init__(self, callable_obj: Callable, *args, **kwargs): + assert callable(callable_obj), '`callable_obj` must be a callable object.' + self.callable_obj = callable_obj + self.args = deepcopy(args) + self.kwargs = deepcopy(kwargs) + + def call(self): + args = deepcopy(self.args) + kwargs = deepcopy(self.kwargs) + return self.callable_obj(*args, **kwargs) + + +class OptimizerConstructHelper(ConstructHelper): + def __init__(self, model: Module, optimizer_class: Type[Optimizer], *args, **kwargs): + assert isinstance(model, Module), 'Only support pytorch module.' + assert issubclass(optimizer_class, Optimizer), 'Only support pytorch optimizer' + + args = list(args) + if 'params' in kwargs: + kwargs['params'] = self.params2names(model, kwargs['params']) + else: + args[0] = self.params2names(model, args[0]) + super().__init__(optimizer_class, *args, **kwargs) + + def params2names(self, model: Module, params: List) -> List[Dict]: + param_groups = list(params) + assert len(param_groups) > 0 + if not isinstance(param_groups[0], dict): + param_groups = [{'params': param_groups}] + + for param_group in param_groups: + params = param_group['params'] + if isinstance(params, Tensor): + params = [params] + elif isinstance(params, set): + raise TypeError('optimizer parameters need to be organized in ordered collections, but ' + 'the ordering of tensors in sets will change between runs. Please use a list instead.') + else: + params = list(params) + param_ids = [id(p) for p in params] + param_group['params'] = [name for name, p in model.named_parameters() if id(p) in param_ids] + + return param_groups + + def names2params(self, wrapped_model: Module, origin2wrapped_name_map: Dict, params: List[Dict]) -> List[Dict]: + param_groups = deepcopy(params) + for param_group in param_groups: + wrapped_names = [origin2wrapped_name_map.get(name, name) for name in param_group['params']] + param_group['params'] = [p for name, p in wrapped_model.named_parameters() if name in wrapped_names] + return param_groups + + def call(self, wrapped_model: Module, origin2wrapped_name_map: Dict) -> Optimizer: + args = deepcopy(self.args) + kwargs = deepcopy(self.kwargs) + + if 'params' in kwargs: + kwargs['params'] = self.names2params(wrapped_model, origin2wrapped_name_map, kwargs['params']) + else: + args[0] = self.names2params(wrapped_model, origin2wrapped_name_map, args[0]) + + return self.callable_obj(*args, **kwargs) + + @staticmethod + def from_trace(model: Module, optimizer_trace: Traceable): + assert isinstance(optimizer_trace, Traceable), \ + 'Please use nni.trace to wrap the optimizer class before initialize the optimizer.' + assert isinstance(optimizer_trace, Optimizer), \ + 'It is not an instance of torch.nn.Optimizer.' + return OptimizerConstructHelper(model, + optimizer_trace._get_nni_attr('symbol'), + *optimizer_trace._get_nni_attr('args'), + **optimizer_trace._get_nni_attr('kwargs')) + + +class LRSchedulerConstructHelper(ConstructHelper): + def __init__(self, lr_scheduler_class: Type[_LRScheduler], *args, **kwargs): + args = list(args) + if 'optimizer' in kwargs: + kwargs['optimizer'] = None + else: + args[0] = None + super().__init__(lr_scheduler_class, *args, **kwargs) + + def call(self, optimizer: Optimizer) -> _LRScheduler: + args = deepcopy(self.args) + kwargs = deepcopy(self.kwargs) + + if 'optimizer' in kwargs: + kwargs['optimizer'] = optimizer + else: + args[0] = optimizer + + return self.callable_obj(*args, **kwargs) + + @staticmethod + def from_trace(lr_scheduler_trace: Traceable): + assert isinstance(lr_scheduler_trace, Traceable), \ + 'Please use nni.trace to wrap the lr scheduler class before initialize the scheduler.' + assert isinstance(lr_scheduler_trace, _LRScheduler), \ + 'It is not an instance of torch.nn.lr_scheduler._LRScheduler.' + return LRSchedulerConstructHelper(lr_scheduler_trace.trace_symbol, + *lr_scheduler_trace.trace_args, + **lr_scheduler_trace.trace_kwargs) diff --git a/nni/algorithms/compression/v2/pytorch/utils/pruning.py b/nni/algorithms/compression/v2/pytorch/utils/pruning.py new file mode 100644 index 0000000000000000000000000000000000000000..59a4852a58773de3e53287298b49d5db849a08b5 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/utils/pruning.py @@ -0,0 +1,269 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy +from typing import Dict, List, Tuple + +import torch +from torch import Tensor +from torch.nn import Module + +weighted_modules = [ + 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d', + 'Linear', 'Bilinear', + 'PReLU', + 'Embedding', 'EmbeddingBag', +] + + +def config_list_canonical(model: Module, config_list: List[Dict]) -> List[Dict]: + ''' + Split the config by op_names if 'sparsity' or 'sparsity_per_layer' in config, + and set the sub_config['total_sparsity'] = config['sparsity_per_layer']. + And every item in 'op_partial_names' will match corresponding 'op_names' in model, + then convert 'op_partial_names' to 'op_names' in config. + + Example:: + model = models.resnet18() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8, 'op_partial_names': ['conv1']}] + pruner = L1NormPruner(model, config_list) + pruner.compress() + pruner.show_pruned_weights() + + In this process, the config_list will implicitly convert to the following: + + [{'op_types': ['Conv2d'], 'sparsity_per_layer': 0.8, + 'op_names': ['conv1', 'layer1.0.conv1', 'layer1.1.conv1', + 'layer2.0.conv1', 'layer2.1.conv1', 'layer3.0.conv1', 'layer3.1.conv1', + 'layer4.0.conv1', 'layer4.1.conv1']}] + ''' + config_list = deepcopy(config_list) + + for config in config_list: + if 'sparsity' in config: + if 'sparsity_per_layer' in config: + raise ValueError("'sparsity' and 'sparsity_per_layer' have the same semantics, can not set both in one config.") + else: + config['sparsity_per_layer'] = config.pop('sparsity') + + for config in config_list: + if 'op_types' in config: + if 'default' in config['op_types']: + config['op_types'].remove('default') + config['op_types'].extend(weighted_modules) + + for config in config_list: + if 'op_partial_names' in config: + op_names = [] + for partial_name in config['op_partial_names']: + for name, _ in model.named_modules(): + if partial_name in name: + op_names.append(name) + if 'op_names' in config: + config['op_names'].extend(op_names) + config['op_names'] = list(set(config['op_names'])) + else: + config['op_names'] = op_names + config.pop('op_partial_names') + + config_list = dedupe_config_list(unfold_config_list(model, config_list)) + new_config_list = [] + + for config in config_list: + if 'sparsity_per_layer' in config: + sparsity_per_layer = config.pop('sparsity_per_layer') + op_names = config.pop('op_names') + for op_name in op_names: + sub_config = deepcopy(config) + sub_config['op_names'] = [op_name] + sub_config['total_sparsity'] = sparsity_per_layer + new_config_list.append(sub_config) + elif 'max_sparsity_per_layer' in config and isinstance(config['max_sparsity_per_layer'], float): + op_names = config.get('op_names', []) + max_sparsity_per_layer = {} + max_sparsity = config['max_sparsity_per_layer'] + for op_name in op_names: + max_sparsity_per_layer[op_name] = max_sparsity + config['max_sparsity_per_layer'] = max_sparsity_per_layer + new_config_list.append(config) + else: + new_config_list.append(config) + + return new_config_list + + +def unfold_config_list(model: Module, config_list: List[Dict]) -> List[Dict]: + ''' + Unfold config_list to op_names level. + ''' + unfolded_config_list = [] + for config in config_list: + op_names = [] + for module_name, module in model.named_modules(): + module_type = type(module).__name__ + if 'op_types' in config and module_type not in config['op_types']: + continue + if 'op_names' in config and module_name not in config['op_names']: + continue + op_names.append(module_name) + unfolded_config = deepcopy(config) + unfolded_config['op_names'] = op_names + unfolded_config_list.append(unfolded_config) + return unfolded_config_list + + +def dedupe_config_list(config_list: List[Dict]) -> List[Dict]: + ''' + Dedupe the op_names in unfolded config_list. + ''' + exclude = set() + exclude_idxes = [] + config_list = deepcopy(config_list) + for idx, config in reversed(list(enumerate(config_list))): + if 'exclude' in config: + exclude.update(config['op_names']) + exclude_idxes.append(idx) + continue + config['op_names'] = sorted(list(set(config['op_names']).difference(exclude))) + exclude.update(config['op_names']) + for idx in sorted(exclude_idxes, reverse=True): + config_list.pop(idx) + return config_list + + +def compute_sparsity_compact2origin(origin_model: Module, compact_model: Module, config_list: List[Dict]) -> List[Dict]: + """ + Compare origin model and compact model, return the sparsity of each group mentioned in config list. + A group means all layer mentioned in one config. + e.g., a linear named 'linear1' and its weight size is [100, 100] in origin model, but in compact model, + the layer weight size with same layer name is [100, 50], + then this function will return [{'op_names': 'linear1', 'total_sparsity': 0.5}]. + """ + compact2origin_sparsity = [] + for config in config_list: + left_weight_num = 0 + total_weight_num = 0 + for module_name, module in origin_model.named_modules(): + module_type = type(module).__name__ + if 'op_types' in config and module_type not in config['op_types']: + continue + if 'op_names' in config and module_name not in config['op_names']: + continue + total_weight_num += module.weight.data.numel() + for module_name, module in compact_model.named_modules(): + module_type = type(module).__name__ + if 'op_types' in config and module_type not in config['op_types']: + continue + if 'op_names' in config and module_name not in config['op_names']: + continue + left_weight_num += module.weight.data.numel() + compact2origin_sparsity.append(deepcopy(config)) + compact2origin_sparsity[-1]['total_sparsity'] = 1 - left_weight_num / total_weight_num + return compact2origin_sparsity + + +def compute_sparsity_mask2compact(compact_model: Module, compact_model_masks: Dict[str, Dict[str, Tensor]], config_list: List[Dict]): + """ + Apply masks on compact model, return the sparsity of each group mentioned in config list. + A group means all layer mentioned in one config. + This function count all zero elements of the masks in one group, + then divide by the elements number of the weights in this group to compute sparsity. + """ + mask2compact_sparsity = [] + for config in config_list: + left_weight_num = 0 + total_weight_num = 0 + for module_name, module in compact_model.named_modules(): + module_type = type(module).__name__ + if 'op_types' in config and module_type not in config['op_types']: + continue + if 'op_names' in config and module_name not in config['op_names']: + continue + module_weight_num = module.weight.data.numel() + total_weight_num += module_weight_num + if module_name in compact_model_masks: + weight_mask = compact_model_masks[module_name]['weight'] + left_weight_num += len(torch.nonzero(weight_mask, as_tuple=False)) + else: + left_weight_num += module_weight_num + mask2compact_sparsity.append(deepcopy(config)) + mask2compact_sparsity[-1]['total_sparsity'] = 1 - left_weight_num / total_weight_num + return mask2compact_sparsity + + +def compute_sparsity(origin_model: Module, compact_model: Module, compact_model_masks: Dict[str, Dict[str, Tensor]], + config_list: List[Dict]) -> Tuple[List[Dict], List[Dict], List[Dict]]: + """ + This function computes how much the origin model has been compressed in the current state. + The current state means `compact_model` + `compact_model_masks` + (i.e., `compact_model_masks` applied on `compact_model`). + The compact model is the origin model after pruning, + and it may have different structure with origin_model cause of speed up. + + Returns + ------- + Tuple[List[Dict], List[Dict], List[Dict]] + (current2origin_sparsity, compact2origin_sparsity, mask2compact_sparsity). + current2origin_sparsity is how much the origin model has been compressed in the current state. + compact2origin_sparsity is the sparsity obtained by comparing the structure of origin model and compact model. + mask2compact_sparsity is the sparsity computed by count the zero value in the mask. + """ + compact2origin_sparsity = compute_sparsity_compact2origin(origin_model, compact_model, config_list) + mask2compact_sparsity = compute_sparsity_mask2compact(compact_model, compact_model_masks, config_list) + assert len(compact2origin_sparsity) == len(mask2compact_sparsity), 'Length mismatch.' + current2origin_sparsity = [] + for c2o_sparsity, m2c_sparsity, config in zip(compact2origin_sparsity, mask2compact_sparsity, config_list): + current2origin_sparsity.append(deepcopy(config)) + current2origin_sparsity[-1]['total_sparsity'] = 1 - (1 - c2o_sparsity['total_sparsity']) * (1 - m2c_sparsity['total_sparsity']) + return current2origin_sparsity, compact2origin_sparsity, mask2compact_sparsity + + +def get_model_weights_numel(model: Module, config_list: List[Dict], masks: Dict[str, Dict[str, Tensor]] = {}) -> Dict: + """ + Count the layer weight elements number in config_list. + If masks is not empty, the masked weight will not be counted. + """ + model_weights_numel = {} + masked_rate = {} + for config in config_list: + for module_name, module in model.named_modules(): + module_type = type(module).__name__ + if 'op_types' in config and module_type not in config['op_types']: + continue + if 'op_names' in config and module_name not in config['op_names']: + continue + if module_name in masks and isinstance(masks[module_name]['weight'], Tensor): + weight_mask = masks[module_name]['weight'] + masked_rate[module_name] = 1 - (weight_mask.sum().item() / weight_mask.numel()) + model_weights_numel[module_name] = round(weight_mask.sum().item()) + else: + model_weights_numel[module_name] = module.weight.data.numel() + return model_weights_numel, masked_rate + + +# FIXME: to avoid circular import, copy this function in this place +def get_module_by_name(model, module_name): + """ + Get a module specified by its module name + Parameters + ---------- + model : pytorch model + the pytorch model from which to get its module + module_name : str + the name of the required module + Returns + ------- + module, module + the parent module of the required module, the required module + """ + name_list = module_name.split(".") + for name in name_list[:-1]: + if hasattr(model, name): + model = getattr(model, name) + else: + return None, None + if hasattr(model, name_list[-1]): + leaf_module = getattr(model, name_list[-1]) + return model, leaf_module + else: + return None, None diff --git a/nni/algorithms/feature_engineering/__init__.py b/nni/algorithms/feature_engineering/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/feature_engineering/gbdt_selector/__init__.py b/nni/algorithms/feature_engineering/gbdt_selector/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72970ab8565e11e62bae1242a4a8a00c06993cf7 --- /dev/null +++ b/nni/algorithms/feature_engineering/gbdt_selector/__init__.py @@ -0,0 +1 @@ +from .gbdt_selector import GBDTSelector \ No newline at end of file diff --git a/nni/algorithms/feature_engineering/gbdt_selector/gbdt_selector.py b/nni/algorithms/feature_engineering/gbdt_selector/gbdt_selector.py new file mode 100644 index 0000000000000000000000000000000000000000..9ee09c25a38f1dca27fb4ab068bbceeaec69a3bd --- /dev/null +++ b/nni/algorithms/feature_engineering/gbdt_selector/gbdt_selector.py @@ -0,0 +1,115 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +""" +gbdt_selector.py including: + class GBDTSelector +""" + +import random +from sklearn.model_selection import train_test_split + +# pylint: disable=E0401 +import lightgbm as lgb + +from nni.feature_engineering.feature_selector import FeatureSelector + + +class GBDTSelector(FeatureSelector): + + def __init__(self, **kwargs): + self.selected_features_ = None + self.X = None + self.y = None + self.feature_importance = None + self.lgb_params = None + self.eval_ratio = None + self.early_stopping_rounds = None + self.importance_type = None + self.num_boost_round = None + self.model = None + + + def fit(self, X, y, **kwargs): + """ + Fit the training data to FeatureSelector + + Paramters + --------- + X : array-like numpy matrix + The training input samples, which shape is [n_samples, n_features]. + y : array-like numpy matrix + The target values (class labels in classification, real numbers in + regression). Which shape is [n_samples]. + lgb_params : dict + Parameters of lightgbm + eval_ratio : float + The ratio of data size. It's used for split the eval data and train data from self.X. + early_stopping_rounds : int + The early stopping setting in lightgbm. + importance_type : str + Supporting type is 'gain' or 'split'. + num_boost_round : int + num_boost_round in lightgbm. + """ + assert kwargs['lgb_params'] + assert kwargs['eval_ratio'] + assert kwargs['early_stopping_rounds'] + assert kwargs['importance_type'] + assert kwargs['num_boost_round'] + + self.X = X + self.y = y + self.lgb_params = kwargs['lgb_params'] + self.eval_ratio = kwargs['eval_ratio'] + self.early_stopping_rounds = kwargs['early_stopping_rounds'] + self.importance_type = kwargs['importance_type'] + self.num_boost_round = kwargs['num_boost_round'] + + X_train, X_test, y_train, y_test = train_test_split(self.X, + self.y, + test_size=self.eval_ratio, + random_state=random.seed(41)) + lgb_train = lgb.Dataset(X_train, y_train) + lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) + + self.model = lgb.train(self.lgb_params, + lgb_train, + num_boost_round=self.num_boost_round, + valid_sets=lgb_eval, + early_stopping_rounds=self.early_stopping_rounds) + + self.feature_importance = self.model.feature_importance(self.importance_type) + + + def get_selected_features(self, topk): + """ + Fit the training data to FeatureSelector + + Returns + ------- + list : + Return the index of imprtant feature. + """ + assert topk > 0 + + self.selected_features_ = self.feature_importance.argsort()[-topk:][::-1] + + return self.selected_features_ diff --git a/nni/algorithms/feature_engineering/gbdt_selector/requirements.txt b/nni/algorithms/feature_engineering/gbdt_selector/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b73c05e89d156249906203b67c2e9f786d613738 --- /dev/null +++ b/nni/algorithms/feature_engineering/gbdt_selector/requirements.txt @@ -0,0 +1 @@ +lightgbm \ No newline at end of file diff --git a/nni/algorithms/feature_engineering/gradient_selector/__init__.py b/nni/algorithms/feature_engineering/gradient_selector/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a43cb7578dcb38cbccc33fa11ab6bafc0a71b1fd --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/__init__.py @@ -0,0 +1 @@ +from .gradient_selector import FeatureGradientSelector \ No newline at end of file diff --git a/nni/algorithms/feature_engineering/gradient_selector/constants.py b/nni/algorithms/feature_engineering/gradient_selector/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..0f70e2043af51bcf35c7514a81889203a9017ccb --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/constants.py @@ -0,0 +1,100 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + + +import numpy as np + + +class StorageLevel: + DISK = 'disk' + SPARSE = 'sparse' + DENSE = 'dense' + + +class DataFormat: + SVM = 'svm' + NUMPY = 'numpy' + ALL_FORMATS = [SVM, NUMPY] + + +class Preprocess: + """ + center the data to mean 0 and create unit variance + center the data to mean 0 + """ + ZSCORE = 'zscore' + CENTER = 'center' + + +class Device: + CUDA = 'cuda' + CPU = 'cpu' + + +class Checkpoint: + MODEL = 'model_state_dict' + OPT = 'optimizer_state_dict' + RNG = 'torch_rng_state' + + +class NanError(ValueError): + pass + + +class Initialization: + ZERO = 'zero' + ON = 'on' + OFF = 'off' + ON_HIGH = 'onhigh' + OFF_HIGH = 'offhigh' + SKLEARN = 'sklearn' + RANDOM = 'random' + VALUE_DICT = {ZERO: 0, + ON: 1, + OFF: -1, + ON_HIGH: 5, + OFF_HIGH: -1, + SKLEARN: None, + RANDOM: None} + + +class Coefficients: + """" + coefficients for sublinear estimator were computed running the sublinear + paper's authors' code + """ + SLE = {1: np.array([0.60355337]), + 2: np.array([1.52705001, -0.34841729]), + 3: np.array([2.90254224, -1.87216745, 0.]), + 4: np.array([4.63445685, -5.19936195, 0., 1.50391676]), + 5: np.array([6.92948049, -14.12216211, 9.4475009, 0., -1.21093546]), + 6: np.array([9.54431082, -28.09414643, 31.84703652, -11.18763791, -1.14175281, 0.]), + 7: np.array([12.54505041, -49.64891525, 79.78828031, -46.72250909, 0., 0., 5.02973646]), + 8: np.array([16.03550163, -84.286182, 196.86078756, -215.36747071, 92.63961263, 0., 0., -4.86280869]), + 9: np.array([19.86409184, -130.76801006, 390.95349861, -570.09210416, 354.77764899, 0., -73.84234865, 0., 10.09148767]), + 10: np.array([2.41117752e+01, -1.94946061e+02, 7.34214614e+02, -1.42851995e+03, 1.41567410e+03, \ + -5.81738134e+02, 0., 0., 3.11664751e+01, 1.05018365e+00]), + 11: np.array([28.75280839, -279.22576729, 1280.46325445, -3104.47148101, 3990.6092248, -2300.29413333, \ + 0., 427.35289033, 0., 0., -42.17587475]), + 12: np.array([33.85141912, -391.4229382, 2184.97827882, -6716.28280208, 11879.75233977, -11739.97267239, \ + 5384.94542245, 0., -674.23291712, 0., 0., 39.37456439])} + + +EPSILON = 1e-8 diff --git a/nni/algorithms/feature_engineering/gradient_selector/fginitialize.py b/nni/algorithms/feature_engineering/gradient_selector/fginitialize.py new file mode 100644 index 0000000000000000000000000000000000000000..6fe28ea5ee5494bc63a1dede2867ed3d2ec9f9a6 --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/fginitialize.py @@ -0,0 +1,611 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + + +import os +import pickle +import sys +import time + +import numpy as np +import scipy.sparse +from sklearn.datasets import load_svmlight_file + +import torch +from torch.utils.data import DataLoader, Dataset +# pylint: disable=E0611 +from torch.utils.data.dataloader import _SingleProcessDataLoaderIter, _MultiProcessingDataLoaderIter, _utils + +from . import constants +from . import syssettings + +torch.set_default_tensor_type(syssettings.torch.tensortype) +sparsetensor = syssettings.torch.sparse.tensortype + +BYTESPERREAL = 8. +BYTESPERGB = 1024. ** 3 + + +class PrepareData(Dataset): + + def __init__(self, + path_data=None, + data_format=constants.DataFormat.NUMPY, + D=None, N=None, + classification=True, + ordinal=False, + balanced=True, + preprocess=None, + n_to_estimate=None, + MAXMEMGB=syssettings.MAXMEMGB, + set_params=True, + path_mappings=None, + X=None, + y=None, + verbose=0, + n_classes=None, + device=constants.Device.CPU): + """ + Dataset class with helpful features and functions for being included in a dataloader + and managing memory usage. + can read following formats: + svm: svm light format (sklearn.datasets.load_svmlight_file) + numpy: Pass X and y as numpy or sparse arrays + + assumes + 1. if classification, y is in {-1, 1} or continuous and 0 indexed + 2. y can fit into memory + 3. consecutive calls to __getitem__() have consecutive idx values + + notes: + 1. this implementation is not careful wrt/ precise memory reqts. for + example, being able to store one dense row in memory is necessary, + but not sufficient. + 2. for y with 4.2 billion elements, 31.3 GB of memory is necessary + @ 8 bytes/scalar. Use partial fit to avoid loading the entire dataset + at once + 3. disk_size always refer to size of complete data file, even after + a split(). + + + Parameters + ---------- + path_data : str + Path to load data from + data_format : str + File ending for path data. + "numpy" is the default when passing in X and y + D : int + Number of features. + N : int + Number of rows. + classification : bool + If True, problem is classification, else regression. + ordinal: bool + If True, problem is ordinal classification. Requires classification to be True. + balanced : bool + If true, each class is weighted equally in optimization, otherwise + weighted is done via support of each class. Requires classification to be True. + prerocess : str + 'zscore' which refers to centering and normalizing data to unit variance or + 'center' which only centers the data to 0 mean + n_to_estimate : int + Number of rows of data to estimate + MAXMEMGB : float + Maximum allowable size for a minibatch + set_params : bool + Whether or not to determine the statistics of the dataset + path_mappings : str + Used when streaming from disk + X : array-like + Shape = [n_samples, n_features] + The training input samples. + y : array-like + Shape = [n_samples] + The target values (class labels in classification, real numbers in + regression). + verbose : int + Controls the verbosity when fitting. Set to 0 for no printing + 1 or higher for printing every verbose number of gradient steps. + device : str + 'cpu' to run on CPU and 'cuda' to run on GPU. Runs much faster on GPU + n_classes : int + number of classes + """ + + self.path_data = path_data + if self.path_data: + self.disk_size = os.path.getsize(path_data) + else: + assert X is not None, 'X must be specified if no path data' + self.disk_size = X.nbytes if not scipy.sparse.issparse( + X) else X.data.nbytes + assert data_format in constants.DataFormat.ALL_FORMATS, 'Format must in {0}.'.format( + ", ".join(constants.DataFormat.ALL_FORMATS)) + self.format = data_format + self.classification = classification + self.ordinal = ordinal + self.balanced = balanced + self.MAXMEMGB = MAXMEMGB + self.preprocess = preprocess + self.set_params = set_params + self.verbose = verbose + self.n_classes = n_classes + self.device = device + + self.path_data_stats = None + + if D is None: + assert self.disk_size / BYTESPERGB <= self.MAXMEMGB, \ + 'Cannot load data into memory. Supply D.' + + if self.format == constants.DataFormat.SVM: + self.X, self.y = load_svmlight_file(path_data) + elif self.format == constants.DataFormat.NUMPY: + assert X is not None, 'X must be specified in numpy mode' + assert y is not None, 'y must be specified in numpy mode' + self.X = X + self.y = y + if self.n_classes is None: + self.n_classes = np.unique(y).shape[0] + elif self.classification: + assert self.n_classes >= np.unique(y).shape[0], \ + 'n_classes given must be greater than or equal to the number of classes in y' + else: + raise NotImplementedError + self.y = torch.as_tensor(self.y, dtype=torch.get_default_dtype()) + + self.N, self.D = self.X.shape + + # assumes X was returned as a sparse array + self.storage_level = (constants.StorageLevel.SPARSE + if scipy.sparse.issparse(self.X) + else constants.StorageLevel.DENSE) + + else: + assert N is not None, 'Supply N.' + self.N, self.D = N, D + + # assume sparse matrix cannot fit into memory + self.storage_level = constants.StorageLevel.DISK + + self.dense_size_gb = self.get_dense_size() + + # check dense size + self.set_dense_X() + + self.max_rows = int(self.MAXMEMGB * BYTESPERGB / BYTESPERREAL / self.D) + assert self.max_rows, \ + 'Cannot fit one dense row into %d GB memory.' % self.MAXMEMGB + self.max_rows = self.max_batch_size() + sys.stdout.flush() + + if n_to_estimate is None: + self.n_to_estimate = self.max_batch_size() + else: + assert n_to_estimate <= self.N, 'n_to_estimate must be <= N.' + self.n_to_estimate = n_to_estimate + + # initialize disk loader + if self.storage_level == constants.StorageLevel.DISK and self.set_params: + if self.format == constants.DataFormat.SVM: + raise NotImplementedError( + 'Please use partial fit to train on datasets that do not fit in memory') + else: + raise NotImplementedError + + # TODO: use a passed-in RNG here + self.ix_statistics = np.random.permutation(self.N)[:self.n_to_estimate] + self.n_features = self.D + if self.set_params: + if self.verbose: + print('Finding data statistics...', end='') + sys.stdout.flush() + Xmn, sv1, Xsd, ymn, ysd = self.compute_data_stats() + self.set_data_stats(Xmn, sv1, Xsd, ymn, ysd) + if self.verbose: + print() + self.set_return_raw(False) + else: + self.set_return_raw(True) + + self.set_return_np(False) + + # this needs to occur after setting preprocessing params + if (self.storage_level == constants.StorageLevel.DISK and + self.format == constants.DataFormat.SVM and self.set_params): + self.loader.batchsize = 1 + + def get_dense_size(self): + return self.N * self.D * BYTESPERREAL / BYTESPERGB + + def set_dense_X(self): + if self.storage_level != constants.StorageLevel.DISK: + if self.dense_size_gb <= self.MAXMEMGB: + if self.storage_level == constants.StorageLevel.SPARSE: + self.X = self.X.toarray() + self.X = torch.as_tensor( + self.X, dtype=torch.get_default_dtype()) + self.storage_level = constants.StorageLevel.DENSE + + def set_return_np(self, boolean): + + self.return_np = boolean + + def set_return_raw(self, boolean): + + self.return_raw = boolean + + def save_data_stats(self, path_data_stats): + """ + Dumps dataset statistics to pickle file. + """ + + data_stats = { + 'Xmn': self.Xmn, + 'sv1': self.sv1, + 'Xsd': self.Xsd, + 'ymn': self.ymn, + 'ysd': self.ysd, + 'ix_statistics': self.ix_statistics, + } + pickle.dump(data_stats, open(path_data_stats, 'wb')) + + def load_data_stats(self, path_data_stats): + + stats = pickle.load(open(path_data_stats, 'rb')) + self.path_data_stats = path_data_stats + + self.set_data_stats(np.asarray(stats['Xmn']), stats['sv1'], + stats['Xsd'], stats['ymn'], stats['ysd']) + + if self.storage_level == constants.StorageLevel.DISK and hasattr( + self, 'path_mappings'): + if 'ix_statistics' in stats: + self.ix_statistics = stats['ix_statistics'] + else: + self.ix_statistics = range(self.N) + + self.set_return_raw(False) + + def reset(self): + """ + Resets the dataloader. Only implemented for disk StorageLevel. + """ + + if self.storage_level == constants.StorageLevel.DENSE: + pass + elif self.storage_level == constants.StorageLevel.SPARSE: + pass + elif self.storage_level == constants.StorageLevel.DISK: + if self.format == constants.DataFormat.SVM: + self.loader.reset() + else: + raise NotImplementedError + + def todense(self): + + assert hasattr(self, 'Xmn'), 'Set preprocess params first.' + assert len(self) <= self.max_batch_size( + ), 'N must be <= max_batch_size().' + + with torch.no_grad(): + dense, _ = self.split(range(len(self))) + Braw = self.return_raw + Bnp = self.return_np + self.set_return_raw(True) + self.set_return_np(True) + dense.X, dense.y = [], [] + + def f_Xy(X, y): + dense.X.append(X) + dense.y.append(y) + self.apply(f_Xy=f_Xy) + dense.X = dense.X[-1] + dense.y = dense.y[-1] + self.set_return_raw(Braw) + self.set_return_np(Bnp) + dense.storage_level = constants.StorageLevel.DENSE + + return dense + + def split(self, ix): + + assert hasattr(self, 'Xmn'), 'Run set_preprocess_params() first.' + + first = type(self)( + self.path_data, + self.format, + self.D, + N=len(ix), + classification=self.classification, + preprocess=self.preprocess, + n_to_estimate=None, + MAXMEMGB=self.MAXMEMGB, + set_params=False) + second = type(self)( + self.path_data, + self.format, + self.D, + N=self.N - len(ix), + classification=self.classification, + preprocess=self.preprocess, + n_to_estimate=None, + MAXMEMGB=self.MAXMEMGB, + set_params=False) + + first.storage_level = self.storage_level + second.storage_level = self.storage_level + + # copy preprocess params + if not self.classification: + first.ymn = self.ymn + second.ymn = self.ymn + first.ysd = self.ysd + second.ysd = self.ysd + + first.Xmn = self.Xmn + second.Xmn = self.Xmn + first.sv1 = self.sv1 + second.sv1 = self.sv1 + + if self.storage_level == constants.StorageLevel.DISK: + if self.format == constants.DataFormat.SVM: + first.Xsd = self.Xsd + second.Xsd = self.Xsd + else: + raise NotImplementedError + + # initialize data structures + if self.storage_level == constants.StorageLevel.DISK: + if self.format == constants.DataFormat.SVM: + raise NotImplementedError + raise NotImplementedError + elif self.storage_level in [constants.StorageLevel.SPARSE, + constants.StorageLevel.DENSE]: + first.X, first.y = self.X[ix], self.y[ix] + ixsec = list(set(range(self.N)).difference(set(ix))) + second.X, second.y = self.X[ixsec], self.y[ixsec] + + return first, second + + @staticmethod + def sparse_std(X, X_mean): + """ + Calculate the column wise standard deviations of a sparse matrix. + """ + X_copy = X.copy() + X_copy.data **= 2 # square non zero elements + E_x_squared = np.array(X_copy.mean(axis=0)).ravel() + Xsd = np.sqrt(E_x_squared - X_mean**2) + return Xsd + + def compute_data_stats(self): + """ + 1. computes/estimates feature means + 2. if preprocess == 'zscore', computes/estimates feature standard devs + 3. if not classification, computes/estimates target mean/standard dev + 4. estimates largest singular value of data matrix + """ + t = time.time() + X, y = self.X[self.ix_statistics], self.y[self.ix_statistics] + preprocess = self.preprocess + classification = self.classification + + Xmn = (X.mean(dim=0) + if not scipy.sparse.issparse(X) + else np.array(X.mean(axis=0)).ravel()) + + if preprocess == constants.Preprocess.ZSCORE: + Xsd = (X.std(dim=0) + if not scipy.sparse.issparse(X) + else PrepareData.sparse_std(X, Xmn)) + Xsd[Xsd == 0] = 1. + else: + Xsd = 1. + + if preprocess is not None and preprocess: + if preprocess == constants.Preprocess.ZSCORE: + Xc = (X - Xmn) / Xsd + else: + Xc = X - Xmn + else: + Xc = X - Xmn + + sv1 = scipy.sparse.linalg.svds(Xc / ( + torch.sqrt(torch.prod(torch.as_tensor(y.size(), dtype=torch.get_default_dtype()))) + if not scipy.sparse.issparse(X) else y.numpy().size), + k=1, + which='LM', + return_singular_vectors=False) + # avoid runaway sv1 + sv1 = np.array([min(np.finfo(np.float32).max, + sv1[0])]) + + if not classification: + ymn = y.mean() + ysd = y.std() + else: + # TODO: set these, for each class? + ymn = 0. + ysd = 1. + if self.verbose: + print(" computing data statistics took: ", time.time() - t) + + return Xmn, sv1, Xsd, ymn, ysd + + + def set_data_stats(self, Xmn, sv1, Xsd=1., ymn=0., ysd=1.): + """ + Saves dataset stats to self to be used for preprocessing. + """ + + self.Xmn = torch.as_tensor( + Xmn, dtype=torch.get_default_dtype()).to(self.device) + self.sv1 = torch.as_tensor( + sv1, dtype=torch.get_default_dtype()).to(self.device) + self.Xsd = torch.as_tensor( + Xsd, dtype=torch.get_default_dtype()).to(self.device) + self.ymn = torch.as_tensor( + ymn, dtype=torch.get_default_dtype()).to(self.device) + self.ysd = torch.as_tensor( + ysd, dtype=torch.get_default_dtype()).to(self.device) + + + def apply_preprocess(self, X, y): + """ + Faster on gpu device, while dataloading takes up a large portion of the time. + """ + + with torch.no_grad(): + if not self.classification: + y = (y.reshape((-1, 1)) - self.ymn) / self.ysd + else: + y = y.reshape((-1, 1)) + X = (X - self.Xmn) / self.sv1 + + if self.preprocess == constants.Preprocess.ZSCORE: + X /= self.Xsd + + return X, y + + + def max_batch_size(self): + """ + Return the maximum batchsize for the dataset. + """ + + return int(np.min([self.max_rows, self.N])) + + + def apply(self, ix_rows=None, ix_cols=None, f_Xy=None): + + if f_Xy is None: + return + + if ix_rows is None: + ix_rows = range(self.N) + + if ix_cols is None: + ix_cols = range(self.n_features) + + f_Xy((self.X[ix_rows, ix_cols] + if not self.storage_level == constants.StorageLevel.SPARSE + else self.X[ix_rows, ix_cols].toarray()), self.y[ix_rows]) + + + def get_dense_data(self, ix_cols=None, ix_rows=None): + + if ix_cols is None: + ix_cols = range(self.n_features) + + X = [np.zeros((0, len(ix_cols)))] + y = [np.zeros((0, 1))] + Bnp = self.return_np + + def f_Xy(Xb, yb, n): + X[-1] = np.concatenate((X[-1], Xb), axis=0) + y[-1] = np.concatenate((y[-1], yb), axis=0) + self.apply(f_Xy=f_Xy, ix_rows=ix_rows, ix_cols=ix_cols) + self.set_return_np(Bnp) + + return X[-1], y[-1] + + + def __len__(self): + + return self.N + + + def getXy(self, idx): + + if self.storage_level == constants.StorageLevel.DENSE: + X, y = self.X[idx], self.y[idx] + elif self.storage_level == constants.StorageLevel.SPARSE: + # assume subset can fit into memory even if whole matrix cant + X, y = self.X[idx].toarray(), self.y[idx] + else: + raise NotImplementedError + + return X, y + + + def __getitem__(self, idx): + + with torch.no_grad(): + X, y = self.getXy(idx) + X = X.toarray() if scipy.sparse.issparse(X) else X + + X = torch.as_tensor( + X, dtype=torch.get_default_dtype()).to(self.device) + y = torch.as_tensor( + y, dtype=torch.get_default_dtype()).to(self.device) + + if not self.return_raw: + X, y = self.apply_preprocess(X, y) + + if self.classification and ( + self.n_classes is None or self.n_classes == 2): + y[y == 0] = -1 + + if self.return_np: + if constants.Device.CPU not in self.device: + X = X.cpu() + y = y.cpu() + X = X.numpy() + y = y.numpy() + return X, y + + return X, y + + +class ChunkDataLoader(DataLoader): + """ + DataLoader class used to more quickly load a batch of indices at once. + """ + + def __iter__(self): + return _ChunkDataLoaderIter(self) + + +class _ChunkDataLoaderIter: + """ + DataLoaderIter class used to more quickly load a batch of indices at once. + """ + def __init__(self, dataloader): + if dataloader.num_workers == 0: + self.iter = _SingleProcessDataLoaderIter(dataloader) + else: + self.iter = _MultiProcessingDataLoaderIter(dataloader) + + def __next__(self): + # only chunk that is edited from base + if self.iter._num_workers == 0: # same-process loading + indices = next(self.iter._sampler_iter) # may raise StopIteration + if len(indices) > 1: + batch = self.iter._dataset[np.array(indices)] + else: + batch = self.iter._collate_fn([self.iter._dataset[i] for i in indices]) + + if self.iter._pin_memory: + batch = _utils.pin_memory.pin_memory_batch(batch) + return batch + else: + return next(self.iter) diff --git a/nni/algorithms/feature_engineering/gradient_selector/fgtrain.py b/nni/algorithms/feature_engineering/gradient_selector/fgtrain.py new file mode 100644 index 0000000000000000000000000000000000000000..377d72691613b93911e2bbc164e3c677ac31f574 --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/fgtrain.py @@ -0,0 +1,228 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + + +import time + +import numpy as np +import torch +from sklearn.feature_selection import SelectKBest, \ + f_classif, mutual_info_classif, f_regression, mutual_info_regression + +from . import constants +from . import syssettings +from .learnability import Solver +from .utils import EMA + +torch.set_default_tensor_type(syssettings.torch.tensortype) + + +def get_optim_f_stop(maxiter, maxtime, dftol_stop, freltol_stop, + minibatch=True): + """ + Check stopping conditions. + """ + + discount_factor = 1. / 3 + + total_t = [0.] + df_store = [np.nan] + it_store = [0] + relchange_store = [np.nan] + f_ma = EMA(discount_factor=discount_factor) + df_ma = EMA(discount_factor=discount_factor) + + def f_stop(f0, v0, it, t): + + flag_stop = False + + total_t[-1] += t + g = f0.x.grad.clone().cpu().detach() + df = g.abs().max().numpy().squeeze() + v = v0.clone().cpu().detach() + f = v.numpy().squeeze() + + if it >= maxiter: + flag_stop = True + + elif total_t[-1] >= maxtime: + flag_stop = True + + f_ma.update(f) + df_ma.update(df) + rel_change = f_ma.relchange() + + if ((not minibatch) and (df < dftol_stop)) \ + or (minibatch and (df_ma() < dftol_stop)): + flag_stop = True + + if rel_change < freltol_stop: + flag_stop = True + + if not minibatch: + df_store[-1] = df + else: + df_store[-1] = df_ma() + relchange_store[-1] = rel_change + it_store[-1] = it + + return flag_stop + + return f_stop, {'t': total_t, 'it': it_store, 'df': df_store, + 'relchange': relchange_store} + + +def get_init(data_train, init_type='on', rng=np.random.RandomState(0), prev_score=None): + """ + Initialize the 'x' variable with different settings + """ + + D = data_train.n_features + value_off = constants.Initialization.VALUE_DICT[ + constants.Initialization.OFF] + value_on = constants.Initialization.VALUE_DICT[ + constants.Initialization.ON] + + if prev_score is not None: + x0 = prev_score + elif not isinstance(init_type, str): + x0 = value_off * np.ones(D) + x0[init_type] = value_on + elif init_type.startswith(constants.Initialization.RANDOM): + d = int(init_type.replace(constants.Initialization.RANDOM, '')) + x0 = value_off * np.ones(D) + x0[rng.permutation(D)[:d]] = value_on + elif init_type == constants.Initialization.SKLEARN: + B = data_train.return_raw + X, y = data_train.get_dense_data() + data_train.set_return_raw(B) + ix = train_sk_dense(init_type, X, y, data_train.classification) + x0 = value_off * np.ones(D) + x0[ix] = value_on + elif init_type in constants.Initialization.VALUE_DICT: + x0 = constants.Initialization.VALUE_DICT[init_type] * np.ones(D) + else: + raise NotImplementedError( + 'init_type {0} not supported yet'.format(init_type)) + # pylint: disable=E1102 + return torch.tensor(x0.reshape((-1, 1)), + dtype=torch.get_default_dtype()) + + +def get_checkpoint(S, stop_conds, rng=None, get_state=True): + """ + Save the necessary information into a dictionary + """ + + m = {} + m['ninitfeats'] = S.ninitfeats + m['x0'] = S.x0 + x = S.x.clone().cpu().detach() + m['feats'] = np.where(x.numpy() >= 0)[0] + m.update({k: v[0] for k, v in stop_conds.items()}) + if get_state: + m.update({constants.Checkpoint.MODEL: S.state_dict(), + constants.Checkpoint.OPT: S.opt_train.state_dict(), + constants.Checkpoint.RNG: torch.get_rng_state(), + }) + if rng: + m.update({'rng_state': rng.get_state()}) + + return m + + +def _train(data_train, Nminibatch, order, C, rng, lr_train, debug, maxiter, + maxtime, init, dftol_stop, freltol_stop, dn_log, accum_steps, + path_save, shuffle, device=constants.Device.CPU, + verbose=1, + prev_checkpoint=None, + groups=None, + soft_groups=None): + """ + Main training loop. + """ + + t_init = time.time() + + x0 = get_init(data_train, init, rng) + if isinstance(init, str) and init == constants.Initialization.ZERO: + ninitfeats = -1 + else: + ninitfeats = np.where(x0.detach().numpy() > 0)[0].size + + S = Solver(data_train, order, + Nminibatch=Nminibatch, x0=x0, C=C, + ftransform=lambda x: torch.sigmoid(2 * x), + get_train_opt=lambda p: torch.optim.Adam(p, lr_train), + rng=rng, + accum_steps=accum_steps, + shuffle=shuffle, + groups=groups, + soft_groups=soft_groups, + device=device, + verbose=verbose) + S = S.to(device) + + S.ninitfeats = ninitfeats + S.x0 = x0 + + if prev_checkpoint: + S.load_state_dict(prev_checkpoint[constants.Checkpoint.MODEL]) + S.opt_train.load_state_dict(prev_checkpoint[constants.Checkpoint.OPT]) + torch.set_rng_state(prev_checkpoint[constants.Checkpoint.RNG]) + + minibatch = S.Ntrain != S.Nminibatch + + f_stop, stop_conds = get_optim_f_stop(maxiter, maxtime, dftol_stop, + freltol_stop, minibatch=minibatch) + + if debug: + pass + else: + f_callback = None + stop_conds['t'][-1] = time.time() - t_init + + S.train(f_stop=f_stop, f_callback=f_callback) + + return get_checkpoint(S, stop_conds, rng), S + + +def train_sk_dense(ty, X, y, classification): + if classification: + if ty.startswith('skf'): + d = int(ty.replace('skf', '')) + f_sk = f_classif + elif ty.startswith('skmi'): + d = int(ty.replace('skmi', '')) + f_sk = mutual_info_classif + else: + if ty.startswith('skf'): + d = int(ty.replace('skf', '')) + f_sk = f_regression + elif ty.startswith('skmi'): + d = int(ty.replace('skmi', '')) + f_sk = mutual_info_regression + t = time.time() + clf = SelectKBest(f_sk, k=d) + clf.fit_transform(X, y.squeeze()) + ix = np.argsort(-clf.scores_) + ix = ix[np.where(np.invert(np.isnan(clf.scores_[ix])))[0]][:d] + t = time.time() - t + return {'feats': ix, 't': t} diff --git a/nni/algorithms/feature_engineering/gradient_selector/gradient_selector.py b/nni/algorithms/feature_engineering/gradient_selector/gradient_selector.py new file mode 100644 index 0000000000000000000000000000000000000000..f7cb69f627442cb8b749f792cecfa0fea7526e1b --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/gradient_selector.py @@ -0,0 +1,631 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +import time + +import numpy as np +import pandas as pd + +from sklearn.base import BaseEstimator +from sklearn.feature_selection import SelectorMixin +from sklearn.utils.validation import check_is_fitted + +import torch + +from nni.feature_engineering.feature_selector import FeatureSelector +from . import constants +from .fginitialize import PrepareData +from .fgtrain import _train + + +class FeatureGradientSelector(FeatureSelector, BaseEstimator, SelectorMixin): + def __init__(self, + order=4, + penalty=1, + n_features=None, + max_features=None, + learning_rate=1e-1, + init='zero', + n_epochs=1, + shuffle=True, + batch_size=1000, + target_batch_size=1000, + max_time=np.inf, + classification=True, + ordinal=False, + balanced=True, + preprocess='zscore', + soft_grouping=False, + verbose=0, + device='cpu'): + """ + FeatureGradientSelector is a class that selects features for a machine + learning model using a gradient based search. + + Parameters + ---------- + order : int + What order of interactions to include. Higher orders + may be more accurate but increase the run time. 12 is the maximum allowed order. + penatly : int + Constant that multiplies the regularization term. + n_features: int + If None, will automatically choose number of features based on search. + Otherwise, number of top features to select. + max_features : int + If not None, will use the 'elbow method' to determine the number of features + with max_features as the upper limit. + learning_rate : float + init : str + How to initialize the vector of scores. 'zero' is the default. + Options: {'zero', 'on', 'off', 'onhigh', 'offhigh', 'sklearn'} + n_epochs : int + number of epochs to run + shuffle : bool + Shuffle "rows" prior to an epoch. + batch_size : int + Nnumber of "rows" to process at a time + target_batch_size : int + Number of "rows" to accumulate gradients over. + Useful when many rows will not fit into memory but are needed for accurate estimation. + classification : bool + If True, problem is classification, else regression. + ordinal : bool + If True, problem is ordinal classification. Requires classification to be True. + balanced : bool + If true, each class is weighted equally in optimization, otherwise + weighted is done via support of each class. Requires classification to be True. + prerocess : str + 'zscore' which refers to centering and normalizing data to unit variance or + 'center' which only centers the data to 0 mean + soft_grouping : bool + if True, groups represent features that come from the same source. + Used to encourage sparsity of groups and features within groups. + verbose : int + Controls the verbosity when fitting. Set to 0 for no printing + 1 or higher for printing every verbose number of gradient steps. + device : str + 'cpu' to run on CPU and 'cuda' to run on GPU. Runs much faster on GPU + """ + assert order <= 12 and order >= 1, 'order must be an integer between 1 and 12, inclusive' + assert n_features is None or max_features is None, \ + 'only specify one of n_features and max_features at a time' + + self.order = order + self.penalty = penalty + self.n_features = n_features + self.max_features = max_features + self.learning_rate = learning_rate + self.init = init + self.n_epochs = n_epochs + self.shuffle = shuffle + self.batch_size = batch_size + self.target_batch_size = target_batch_size + self.max_time = max_time + self.dftol_stop = -1 + self.freltol_stop = -1 + self.classification = classification + self.ordinal = ordinal + self.balanced = balanced + self.preprocess = preprocess + self.soft_grouping = soft_grouping + self.verbose = verbose + self.device = device + + self.model_ = None + self.scores_ = None + self._prev_checkpoint = None + self._data_train = None + + def partial_fit(self, X, y, + n_classes=None, + groups=None): + """ + Select Features via a gradient based search on (X, y) on the given samples. + Can be called repeatedly with different X and y to handle streaming datasets. + + Parameters + ---------- + X : array-like + Shape = [n_samples, n_features] + The training input samples. + y : array-like + Shape = [n_samples] + The target values (class labels in classification, real numbers in + regression). + n_classes : int + Number of classes + Classes across all calls to partial_fit. + Can be obtained by via `np.unique(y_all).shape[0]`, where y_all is the + target vector of the entire dataset. + This argument is expected for the first call to partial_fit, + otherwise will assume all classes are present in the batch of y given. + It will be ignored in the subsequent calls. + Note that y doesn't need to contain all labels in `classes`. + groups : array-like + Optional, shape = [n_features] + Groups of columns that must be selected as a unit + e.g. [0, 0, 1, 2] specifies the first two columns are part of a group. + This argument is expected for the first call to partial_fit, + otherwise will assume all classes are present in the batch of y given. + It will be ignored in the subsequent calls. + """ + try: + self._partial_fit(X, y, n_classes=n_classes, groups=groups) + except constants.NanError: + if hasattr(self, '_prev_checkpoint'): + # if it's already done some batches successfully just ignore it + print('failed fitting this batch, loss was nan') + else: + # if this is the first batch, reset and try with doubles + if self.verbose: + print('Loss was nan, trying with Doubles') + self._reset() + torch.set_default_tensor_type(torch.DoubleTensor) + self._partial_fit(X, y, n_classes=n_classes, groups=groups) + + return self + + def _partial_fit(self, X, y, n_classes=None, groups=None): + """ + Private function for partial_fit to enable trying floats before doubles. + """ + # pass in X and y in chunks + if hasattr(self, '_data_train'): + # just overwrite the X and y from the new chunk but make them tensors + # keep dataset stats from previous + self._data_train.X = X.values if isinstance(X, pd.DataFrame) else X + self._data_train.N, self._data_train.D = self._data_train.X.shape + self._data_train.dense_size_gb = self._data_train.get_dense_size() + self._data_train.set_dense_X() + + self._data_train.y = y.values if isinstance(y, pd.Series) else y + self._data_train.y = torch.as_tensor( + y, dtype=torch.get_default_dtype()) + else: + data_train = self._prepare_data(X, y, n_classes=n_classes) + self._data_train = data_train + + batch_size, _, accum_steps, max_iter = self._set_batch_size( + self._data_train) + + rng = None # not used + debug = 0 # {0,1} print messages and do other stuff? + dn_logs = None # tensorboard logs; only specify if debug=1 + path_save = None # intermediate models saves; only specify if debug=1 + m, solver = _train(self._data_train, + batch_size, + self.order, + self.penalty, + rng, + self.learning_rate, + debug, + max_iter, + self.max_time, + self.init, + self.dftol_stop, + self.freltol_stop, + dn_logs, + accum_steps, + path_save, + self.shuffle, + device=self.device, + verbose=self.verbose, + prev_checkpoint=self._prev_checkpoint if hasattr( + self, '_prev_checkpoint') else None, + groups=groups if not self.soft_grouping else None, + soft_groups=groups if self.soft_grouping else None) + + self._prev_checkpoint = m + self._process_results(m, solver, X, groups=groups) + return self + + def fit(self, X, y, + groups=None): + """ + Select Features via a gradient based search on (X, y). + + Parameters + ---------- + X : array-like + Shape = [n_samples, n_features] + The training input samples. + y : array-like + Shape = [n_samples] + The target values (class labels in classification, real numbers in + regression). + groups : array-like + Optional, shape = [n_features] + Groups of columns that must be selected as a unit + e.g. [0, 0, 1, 2] specifies the first two columns are part of a group. + """ + try: + self._fit(X, y, groups=groups) + except constants.NanError: + if self.verbose: + print('Loss was nan, trying with Doubles') + torch.set_default_tensor_type(torch.DoubleTensor) + self._fit(X, y, groups=groups) + return self + + def get_selected_features(self): + return self.selected_features_ + + def _prepare_data(self, X, y, n_classes=None): + """ + Returns a PrepareData object. + """ + return PrepareData(X=X.values if isinstance(X, pd.DataFrame) else X, + y=y.values if isinstance(y, pd.Series) else y, + data_format=constants.DataFormat.NUMPY, + classification=int(self.classification), + ordinal=self.ordinal, + balanced=self.balanced, + preprocess=self.preprocess, + verbose=self.verbose, + device=self.device, + n_classes=n_classes) + + def _fit(self, X, y, groups=None): + """ + Private function for fit to enable trying floats before doubles. + """ + data_train = self._prepare_data(X, y) + + batch_size, _, accum_steps, max_iter = self._set_batch_size( + data_train) + + rng = None # not used + debug = 0 # {0,1} print messages and log to tensorboard + dn_logs = None # tensorboard logs; only specify if debug=1 + path_save = None # intermediate models saves; only specify if debug=1 + m, solver = _train(data_train, + batch_size, + self.order, + self.penalty, + rng, + self.learning_rate, + debug, + max_iter, + self.max_time, + self.init, + self.dftol_stop, + self.freltol_stop, + dn_logs, + accum_steps, + path_save, + self.shuffle, + device=self.device, + verbose=self.verbose, + groups=groups if not self.soft_grouping else None, + soft_groups=groups if self.soft_grouping else None) + + self._process_results(m, solver, X, groups=groups) + return self + + def _process_torch_scores(self, scores): + """ + Convert scores into flat numpy arrays. + """ + if constants.Device.CUDA in scores.device.type: + scores = scores.cpu() + return scores.numpy().ravel() + + def _set_batch_size(self, data_train): + """ + Ensures that batch_size is less than the number of rows. + """ + batch_size = min(self.batch_size, data_train.N) + target_batch_size = min(max( + self.batch_size, self.target_batch_size), data_train.N) + accum_steps = max(int(np.ceil(target_batch_size / self.batch_size)), 1) + max_iter = self.n_epochs * (data_train.N // batch_size) + return batch_size, target_batch_size, accum_steps, max_iter + + def _process_results(self, m, solver, X, groups=None): + """ + Process the results of a run into something suitable for transform(). + """ + self.scores_ = self._process_torch_scores( + torch.sigmoid(m[constants.Checkpoint.MODEL]['x'] * 2)) + if self.max_features: + self.max_features = min([self.max_features, self.scores_.shape[0]]) + n_features = self._recommend_number_features(solver) + self.set_n_features(n_features, groups=groups) + elif self.n_features: + self.set_n_features(self.n_features, groups=groups) + else: + self.selected_features_ = m['feats'] + + # subtract elapsed time from max_time + self.max_time -= m['t'] + + self.model_ = m + + return self + + def transform(self, X): + """ + Returns selected features from X. + + Paramters + --------- + X: array-like + Shape = [n_samples, n_features] + The training input samples. + """ + + self._get_support_mask() + if self.selected_features_.shape[0] == 0: + raise ValueError( + 'No Features selected, consider lowering the penalty or specifying n_features') + return (X.iloc[:, self.selected_features_] + if isinstance(X, pd.DataFrame) + else X[:, self.selected_features_]) + + def get_support(self, indices=False): + """ + Get a mask, or integer index, of the features selected. + + Parameters + ---------- + indices : bool + Default False + If True, the return value will be an array of integers, rather than a boolean mask. + + Returns + ------- + list : + returns support: An index that selects the retained features from a feature vector. + If indices is False, this is a boolean array of shape [# input features], + in which an element is True iff its corresponding feature is selected for retention. + If indices is True, this is an integer array of shape [# output features] whose values + are indices into the input feature vector. + """ + self._get_support_mask() + if indices: + return self.selected_features_ + + mask = np.zeros_like(self.scores_, dtype=bool) + # pylint: disable=E1137 + mask[self.selected_features_] = True + return mask + + def inverse_transform(self, X): + """ + Returns transformed X to the original number of column. + This operation is lossy and all columns not in the transformed data + will be returned as columns of 0s. + """ + self._get_support_mask() + X_new = np.zeros((X.shape[0], self.scores_.shape[0])) + X_new[self.selected_features_] = X + return X_new + + def get_params(self, deep=True): + """ + Get parameters for this estimator. + """ + params = self.__dict__ + params = {key: val for (key, val) in params.items() + if not key.endswith('_')} + return params + + def set_params(self, **params): + """ + Set the parameters of this estimator. + """ + for param in params: + if hasattr(self, param): + setattr(self, param, params[param]) + return self + + def fit_transform(self, X, y): + """ + Select features and then return X with the selected features. + + Parameters + ---------- + X : array-like + Shape = [n_samples, n_features] + The training input samples. + y : array-like + Shape = [n_samples] + The target values (class labels in classification, real numbers in + regression). + """ + self.fit(X, y) + return self.transform(X) + + def _get_support_mask(self): + """ + Check if it is fitted. + """ + check_is_fitted(self, 'scores_') + + def _generate_scores(self, solver, xsub, ysub, step_size, feature_order): + """ + Generate forward passes to determine the number of features when max_features is set. + """ + scores = [] + for i in np.arange(1, self.max_features + 1, step_size): + # optimization possible since xsub is growing? + i = int(np.ceil(i)) + # pylint: disable=E1102 + score = solver.f_train(torch.tensor(np.ones(i), + dtype=torch.get_default_dtype() + ).unsqueeze(1).to(self.device), + xsub[:, feature_order[:i]], + ysub) + if constants.Device.CUDA in score.device.type: + score = score.cpu() + # score.numpy()[0][0] + scores.append(score) + return scores + + def set_n_features(self, n, groups=None): + """ + Set the number of features to return after fitting. + """ + self._get_support_mask() + self.n_features = n + return self._set_top_features(groups=groups) + + def _set_top_features(self, groups=None): + """ + Set the selected features after a run. + + With groups, ensures that if any member of a group is selected, all members are selected + """ + self._get_support_mask() + assert self.n_features <= self.scores_.shape[0], \ + 'n_features must be less than or equal to the number of columns in X' + # pylint: disable=E1130 + self.selected_features_ = np.argpartition( + self.scores_, -self.n_features)[-self.n_features:] + if groups is not None and not self.soft_grouping: + selected_feature_set = set(self.selected_features_.tolist()) + for _ in np.unique(groups): + group_members = np.where(groups == groups)[0].tolist() + if selected_feature_set.intersection(group_members): + selected_feature_set.update(group_members) + self.selected_features_ = np.array(list(selected_feature_set)) + self.selected_features_ = np.sort(self.selected_features_) + return self + + def set_top_percentile(self, percentile, groups=None): + """ + Set the percentile of features to return after fitting. + """ + self._get_support_mask() + assert percentile <= 1 and percentile >= 0, \ + 'percentile must between 0 and 1 inclusive' + self.n_features = int(self.scores_.shape[0] * percentile) + return self._set_top_features(groups=groups) + + def _recommend_number_features(self, solver, max_time=None): + """ + Get the recommended number of features by doing forward passes when max_features is set. + """ + max_time = max_time if max_time else self.max_time + if max_time < 0: + max_time = 60 # allow 1 minute extra if we already spent max_time + MAX_FORWARD_PASS = 200 + MAX_FULL_BATCHES = 3 # the forward passes can take longer than the fitting + # if we allow a full epoch of data to be included. By only doing 3 full batches at most + # we get enough accuracy without increasing the time too much. This + # constant may not be optimal + accum_steps = solver.accum_steps + step_size = max(self.max_features / MAX_FORWARD_PASS, 1) + # pylint: disable=E1130 + feature_order = np.argsort(-self.scores_) # note the negative + t = time.time() + + dataloader_iterator = iter(solver.ds_train) + full_scores = [] + # keep_going = True + with torch.no_grad(): + # might want to only consider a batch valid if there are at least + # two classes + for _ in range(accum_steps * MAX_FULL_BATCHES): + scores = [] + try: + xsub, ysub = next(dataloader_iterator) + except StopIteration: + # done with epoch, don't do more than one epoch + break + except Exception as e: + print(e) + break + if max_time and time.time() - t > max_time: + if self.verbose: + print( + "Stoppinn forward passes because they reached max_time: ", + max_time) + if not full_scores: + # no forward passes worked, return half of max_features + return self.max_features // 2 + break + if solver.multiclass: + for target_class in range(solver.n_classes): + ysub_binary = solver.transform_y_into_binary( + ysub, target_class) + scaling_value = solver._get_scaling_value( + ysub, target_class) + if not solver._skip_y_forward(ysub_binary): + scores = self._generate_scores( + solver, xsub, ysub_binary, step_size, feature_order) + # one row will represent one class that is present in the data + # all classes are weighted equally + full_scores.append( + [score * scaling_value for score in scores]) + else: + if not solver._skip_y_forward(ysub): + scores = self._generate_scores( + solver, xsub, ysub, step_size, feature_order) + full_scores.append(scores) + best_index = FeatureGradientSelector._find_best_index_elbow( + full_scores) + if self.verbose: + print("Forward passes took: ", time.time() - t) + # account for step size and off by one (n_features is 1 indexed, not 0 + # ) + return int( + np.ceil( + np.arange( + 1, + self.max_features + + 1, + step_size))[best_index]) + + @staticmethod + def _find_best_index_elbow(full_scores): + """ + Finds the point on the curve that maximizes distance from the line determined by the endpoints. + """ + scores = pd.DataFrame(full_scores).mean(0).values.tolist() + first_point = np.array([0, scores[0]]) + last_point = np.array([len(scores) - 1, scores[-1]]) + elbow_metric = [] + for i in range(len(scores)): + elbow_metric.append( + FeatureGradientSelector._distance_to_line( + first_point, last_point, np.array([i, scores[i]]))) + return np.argmax(elbow_metric) + + @staticmethod + def _distance_to_line(start_point, end_point, new_point): + """ + Calculates the shortest distance from new_point to the line determined by start_point and end_point. + """ + # for calculating elbow method + return np.cross(new_point - start_point, + end_point - start_point) / np.linalg.norm( + end_point - start_point) + + def _reset(self): + """ + Reset the estimator by deleting all private and fit parameters. + """ + params = self.__dict__ + for key, _ in params.items(): + if key.endswith('_') or key.startswith('_'): + delattr(self, key) + return self diff --git a/nni/algorithms/feature_engineering/gradient_selector/learnability.py b/nni/algorithms/feature_engineering/gradient_selector/learnability.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0ab4b39e7bef15101c793f3ec522395c05cd94 --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/learnability.py @@ -0,0 +1,534 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +import time + +import numpy as np +import scipy.special +import torch +import torch.nn as nn + +from . import constants +from . import syssettings +from .fginitialize import ChunkDataLoader + +torch.set_default_tensor_type(syssettings.torch.tensortype) +sparsetensor = syssettings.torch.sparse.tensortype + + +def def_train_opt(p): + """ + Return the default optimizer. + """ + return torch.optim.Adam(p, 1e-1, amsgrad=False) + + +def revcumsum(U): + """ + Reverse cumulative sum for faster performance. + """ + return U.flip(dims=[0]).cumsum(dim=0).flip(dims=[0]) + + +def triudr(X, r): + + Zr = torch.zeros_like(X, requires_grad=False) + U = X * r + Zr[:-1] = X[:-1] * revcumsum(U)[1:] + + return Zr + + +def triudl(X, l): + + Zl = torch.zeros_like(X, requires_grad=False) + U = X * l + Zl[1:] = X[1:] * (U.cumsum(dim=0)[:-1]) + + return Zl + + +class ramp(torch.autograd.Function): + """ + Ensures input is between 0 and 1 + """ + + @staticmethod + def forward(ctx, input_data): + ctx.save_for_backward(input_data) + return input_data.clamp(min=0, max=1) + + + @staticmethod + def backward(ctx, grad_output): + input_data, = ctx.saved_tensors + grad_input = grad_output.clone() + grad_input[input_data < 0] = 1e-2 + grad_input[input_data > 1] = -1e-2 + return grad_input + + +class safesqrt(torch.autograd.Function): + """ + Square root without dividing by 0. + """ + @staticmethod + def forward(ctx, input_data): + o = input_data.sqrt() + ctx.save_for_backward(input_data, o) + return o + + + @staticmethod + def backward(ctx, grad_output): + _, o = ctx.saved_tensors + grad_input = grad_output.clone() + grad_input *= 0.5 / (o + constants.EPSILON) + return grad_input + + +class LearnabilityMB(nn.Module): + """ + Calculates the learnability of a set of features. + mini-batch version w/ "left" and "right" multiplies + """ + + + def __init__(self, Nminibatch, D, coeff, groups=None, binary=False, + device=constants.Device.CPU): + super(LearnabilityMB, self).__init__() + + a = coeff / scipy.special.binom(Nminibatch, np.arange(coeff.size) + 2) + self.order = a.size + # pylint: disable=E1102 + self.a = torch.tensor(a, dtype=torch.get_default_dtype(), requires_grad=False) + self.binary = binary + + self.a = self.a.to(device) + + + def ret_val(self, z): + """ + Get the return value based on z. + """ + + if not self.binary: + return 1 - z + + else: + return 0.5 * (1 - safesqrt.apply(ramp.apply(z))) + + + def forward(self, s, X, y): + + l = y.clone() + r = y.clone() + z = 0 + + for i in range(self.order): + if i % 2 == 0: + Z = triudr(X, r) + r = torch.mm(Z, s) + else: + Z = triudl(X, l) + l = torch.mm(Z, s) + if self.a[i] != 0: + # same the computation if a[i] is 0 + p = torch.mm(l.t(), r) + z += self.a[i] * p + return self.ret_val(z) + + +class Solver(nn.Module): + """ + Class that performs the main optimization. + Keeps track of the current x and iterates through data to learn x given the penalty and order. + """ + + def __init__(self, + PreparedData, + order, + Nminibatch=None, + groups=None, + soft_groups=None, + x0=None, + C=1, + ftransform=torch.sigmoid, + get_train_opt=def_train_opt, + accum_steps=1, + rng=np.random.RandomState(0), + max_norm_clip=1., + shuffle=True, + device=constants.Device.CPU, + verbose=1): + """ + + Parameters + ---------- + PreparedData : Dataset of PrepareData class + order : int + What order of interactions to include. Higher orders + may be more accurate but increase the run time. 12 is the maximum allowed order. + Nminibatch : int + Number of rows in a mini batch + groups : array-like + Optional, shape = [n_features] + Groups of columns that must be selected as a unit + e.g. [0, 0, 1, 2] specifies the first two columns are part of a group. + soft_groups : array-like + optional, shape = [n_features] + Groups of columns come from the same source + Used to encourage sparsity of number of sources selected + e.g. [0, 0, 1, 2] specifies the first two columns are part of a group. + x0 : torch.tensor + Optional, initialization of x. + C : float + Penalty parameter. + get_train_opt : function + Function that returns a pytorch optimizer, Adam is the default + accum_steps : int + Number of steps + rng : random state + max_norm_clip : float + Maximum allowable size of the gradient + shuffle : bool + Whether or not to shuffle data within the dataloader + order : int + What order of interactions to include. Higher orders + may be more accurate but increase the run time. 12 is the maximum allowed order. + penalty : int + Constant that multiplies the regularization term. + ftransform : function + Function to transform the x. sigmoid is the default. + device : str + 'cpu' to run on CPU and 'cuda' to run on GPU. Runs much faster on GPU + verbose : int + Controls the verbosity when fitting. Set to 0 for no printing + 1 or higher for printing every verbose number of gradient steps. + """ + super(Solver, self).__init__() + + self.Ntrain, self.D = PreparedData.N, PreparedData.n_features + if groups is not None: + # pylint: disable=E1102 + groups = torch.tensor(groups, dtype=torch.long) + self.groups = groups + else: + self.groups = None + if soft_groups is not None: + # pylint: disable=E1102 + soft_groups = torch.tensor(soft_groups, dtype=torch.long) + self.soft_D = torch.unique(soft_groups).size()[0] + else: + self.soft_D = None + self.soft_groups = soft_groups + + if Nminibatch is None: + Nminibatch = self.Ntrain + else: + if Nminibatch > self.Ntrain: + print('Minibatch larger than sample size.' + + (' Reducing from %d to %d.' + % (Nminibatch, self.Ntrain))) + Nminibatch = self.Ntrain + if Nminibatch > PreparedData.max_rows: + print('Minibatch larger than mem-allowed.' + + (' Reducing from %d to %d.' % (Nminibatch, + PreparedData.max_rows))) + Nminibatch = int(np.min([Nminibatch, PreparedData.max_rows])) + self.Nminibatch = Nminibatch + self.accum_steps = accum_steps + + if x0 is None: + x0 = torch.zeros(self.D, 1, dtype=torch.get_default_dtype()) + self.ftransform = ftransform + self.x = nn.Parameter(x0) + self.max_norm = max_norm_clip + + self.device = device + self.verbose = verbose + + self.multiclass = PreparedData.classification and PreparedData.n_classes and PreparedData.n_classes > 2 + if self.multiclass: + self.n_classes = PreparedData.n_classes + else: + self.n_classes = None + # whether to treat all classes equally + self.balanced = PreparedData.balanced + self.ordinal = PreparedData.ordinal + + if (hasattr(PreparedData, 'mappings') + or PreparedData.storage_level == 'disk'): + num_workers = PreparedData.num_workers + elif PreparedData.storage_level == constants.StorageLevel.DENSE: + num_workers = 0 + else: + num_workers = 0 + + if constants.Device.CUDA in device: + pin_memory = False + else: + pin_memory = False + + if num_workers == 0: + timeout = 0 + else: + timeout = 60 + + self.ds_train = ChunkDataLoader( + PreparedData, + batch_size=self.Nminibatch, + shuffle=shuffle, + drop_last=True, + num_workers=num_workers, + pin_memory=pin_memory, + timeout=timeout) + self.f_train = LearnabilityMB(self.Nminibatch, self.D, + constants.Coefficients.SLE[order], + self.groups, + binary=PreparedData.classification, + device=self.device) + self.opt_train = get_train_opt(torch.nn.ParameterList([self.x])) + self.it = 0 + self.iters_per_epoch = int(np.ceil(len(self.ds_train.dataset) + / self.ds_train.batch_size)) + self.f_train = self.f_train.to(device) + # pylint: disable=E1102 + self.w = torch.tensor( + C / (C + 1), + dtype=torch.get_default_dtype(), requires_grad=False) + self.w = self.w.to(device) + + + def penalty(self, s): + """ + Calculate L1 Penalty. + """ + to_return = torch.sum(s) / self.D + if self.soft_groups is not None: + # if soft_groups, there is an additional penalty for using more + # groups + s_grouped = torch.zeros(self.soft_D, 1, + dtype=torch.get_default_dtype(), + device=self.device) + for group in torch.unique(self.soft_groups): + # groups should be indexed 0 to n_group - 1 + # TODO: consider other functions here + s_grouped[group] = s[self.soft_groups == group].max() + # each component of the penalty contributes .5 + # TODO: could make this a user given parameter + to_return = (to_return + torch.sum(s_grouped) / self.soft_D) * .5 + return to_return + + + def forward_and_backward(self, s, xsub, ysub, retain_graph=False): + """ + Completes the forward operation and computes gradients for learnability and penalty. + """ + f_train = self.f_train(s, xsub, ysub) + pen = self.penalty(s).unsqueeze(0).unsqueeze(0) + # pylint: disable=E1102 + grad_outputs = torch.tensor([[1]], dtype=torch.get_default_dtype(), + device=self.device) + g1, = torch.autograd.grad([f_train], [self.x], grad_outputs, + retain_graph=True) + # pylint: disable=E1102 + grad_outputs = torch.tensor([[1]], dtype=torch.get_default_dtype(), + device=self.device) + g2, = torch.autograd.grad([pen], [self.x], grad_outputs, + retain_graph=retain_graph) + return f_train, pen, g1, g2 + + + def combine_gradient(self, g1, g2): + """ + Combine gradients from learnability and penalty + + Parameters + ---------- + g1 : array-like + gradient from learnability + g2 : array-like + gradient from penalty + """ + to_return = ((1 - self.w) * g1 + self.w * g2) / self.accum_steps + if self.groups is not None: + # each column will get a gradient + # but we can only up or down groups, so the gradient for the group + # should be the average of the gradients of the columns + to_return_grouped = torch.zeros_like(self.x) + for group in torch.unique(self.groups): + to_return_grouped[self.groups == + group] = to_return[self.groups == group].mean() + to_return = to_return_grouped + return to_return + + + def combine_loss(self, f_train, pen): + """ + Combine the learnability and L1 penalty. + """ + return ((1 - self.w) * f_train.detach() + self.w * pen.detach()) \ + / self.accum_steps + + + def transform_y_into_binary(self, ysub, target_class): + """ + Transforms multiclass classification problems into a binary classification problem. + """ + with torch.no_grad(): + ysub_binary = torch.zeros_like(ysub) + if self.ordinal: + # turn ordinal problems into n-1 classifications of is this + # example less than rank k + if target_class == 0: + return None + + ysub_binary[ysub >= target_class] = 1 + ysub_binary[ysub < target_class] = -1 + else: + # turn multiclass problems into n binary classifications + ysub_binary[ysub == target_class] = 1 + ysub_binary[ysub != target_class] = -1 + return ysub_binary + + + def _get_scaling_value(self, ysub, target_class): + """ + Returns the weight given to a class for multiclass classification. + """ + if self.balanced: + if self.ordinal: + return 1 / (torch.unique(ysub).size()[0] - 1) + + return 1 / torch.unique(ysub).size()[0] + else: + if self.ordinal: + this_class_proportion = torch.mean(ysub >= target_class) + normalizing_constant = 0 + for i in range(1, self.n_classes): + normalizing_constant += torch.mean(ysub >= i) + return this_class_proportion / normalizing_constant + else: + return torch.mean(ysub == target_class) + + + def _skip_y_forward(self, y): + """ + Returns boolean of whether to skip the currrent y if there is nothing to be learned from it. + """ + if y is None: + return True + elif torch.unique(y).size()[0] < 2: + return True + else: + return False + + + def train(self, f_callback=None, f_stop=None): + """ + Trains the estimator to determine which features to include. + + Parameters + ---------- + f_callback : function + Function that performs a callback + f_stop: function + Function that tells you when to stop + """ + + t = time.time() + h = torch.zeros([1, 1], dtype=torch.get_default_dtype()) + h = h.to(self.device) + # h_complete is so when we divide by the number of classes + # we only do that for that minibatch if accumulating + h_complete = h.clone() + flag_stop = False + dataloader_iterator = iter(self.ds_train) + self.x.grad = torch.zeros_like(self.x) + while not flag_stop: + try: + xsub, ysub = next(dataloader_iterator) + except StopIteration: + dataloader_iterator = iter(self.ds_train) + xsub, ysub = next(dataloader_iterator) + try: + s = self.ftransform(self.x) + s = s.to(self.device) + if self.multiclass: + # accumulate gradients over each class, classes range from + # 0 to n_classes - 1 + #num_classes_batch = torch.unique(ysub).size()[0] + for target_class in range(self.n_classes): + ysub_binary = self.transform_y_into_binary( + ysub, target_class) + if self._skip_y_forward(ysub_binary): + continue + # should should skip if target class is not included + # but that changes what we divide by + scaling_value = self._get_scaling_value( + ysub, target_class) + f_train, pen, g1, g2 = self.forward_and_backward( + s, xsub, ysub_binary, retain_graph=True) + self.x.grad += self.combine_gradient( + g1, g2) * scaling_value + h += self.combine_loss(f_train, + pen) * scaling_value + else: + if not self._skip_y_forward(ysub): + f_train, pen, g1, g2 = self.forward_and_backward( + s, xsub, ysub) + self.x.grad += self.combine_gradient(g1, g2) + h += self.combine_loss(f_train, pen) + else: + continue + h_complete += h + self.it += 1 + if torch.isnan(h): + raise constants.NanError( + 'Loss is nan, something may be misconfigured') + if self.it % self.accum_steps == 0: + torch.nn.utils.clip_grad_norm_( + torch.nn.ParameterList([self.x]), + max_norm=self.max_norm) + self.opt_train.step() + + t = time.time() - t + if f_stop is not None: + flag_stop = f_stop(self, h, self.it, t) + + if f_callback is not None: + f_callback(self, h, self.it, t) + elif self.verbose and (self.it // self.accum_steps) % self.verbose == 0: + epoch = int(self.it / self.iters_per_epoch) + print( + '[Minibatch: %6d/ Epoch: %3d/ t: %3.3f s] Loss: %0.3f' % + (self.it, epoch, t, h_complete / self.accum_steps)) + + if flag_stop: + break + + self.opt_train.zero_grad() + h = 0 + h_complete = 0 + t = time.time() + except KeyboardInterrupt: + flag_stop = True + break diff --git a/nni/algorithms/feature_engineering/gradient_selector/requirements.txt b/nni/algorithms/feature_engineering/gradient_selector/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2aafc0c86f47ba4d296377d289acad7170321720 --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/requirements.txt @@ -0,0 +1,4 @@ +numpy==1.14.3 +scikit-learn>=0.23.2 +scipy==1.1.0 +torch==1.1.0 diff --git a/nni/algorithms/feature_engineering/gradient_selector/syssettings.py b/nni/algorithms/feature_engineering/gradient_selector/syssettings.py new file mode 100644 index 0000000000000000000000000000000000000000..df864b316601464a9f35a7b463933b1f05a9fe3f --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/syssettings.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + + +import torch + +# pytorch +torch.tensortype = torch.FloatTensor +torch.sparse.tensortype = torch.sparse.FloatTensor + +# mem +MAXMEMGB = 10 diff --git a/nni/algorithms/feature_engineering/gradient_selector/utils.py b/nni/algorithms/feature_engineering/gradient_selector/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab9b09a25cfb328586c39f00ab9546388b4f8d4 --- /dev/null +++ b/nni/algorithms/feature_engineering/gradient_selector/utils.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + + +import numpy as np + +class EMA(): + """ + maintains an exponential moving average + """ + + def __init__(self, f=np.nan, discount_factor=0.1, valid_after=None, + n_iters_relchange=3): + + self.f_ma = [f] + self.fs = [f] + self.gamma = discount_factor + self.rel_change = [np.nan] + if valid_after is None: + self.valid_after = int(1/discount_factor) + else: + self.valid_after = valid_after + self.n_iters_relchange = n_iters_relchange + self.initialized = False + + def reset(self, f): + + self.f_ma = [f] + self.fs = [f] + self.rel_change = [np.nan] + self.initialized = True + + def relchange(self): + + if self.num_updates() > np.max([self.valid_after, + self.n_iters_relchange]): + return np.max(self.rel_change[-self.n_iters_relchange:]) + else: + return np.nan + + def update(self, f_new): + + if not self.initialized: + self.reset(f_new) + else: + self.fs.append(f_new) + self.f_ma.append(self.f_ma[-1]*(1-self.gamma) + self.gamma*f_new) + if self.num_updates() > self.valid_after: + self.rel_change.append(np.abs((self.f_ma[-1]-self.f_ma[-2]) + / self.f_ma[-2])) + + def num_updates(self): + + return len(self.f_ma) + + def __call__(self): + + if self.num_updates() > self.valid_after: + return self.f_ma[-1] + else: + return np.nan diff --git a/nni/algorithms/hpo/__init__.py b/nni/algorithms/hpo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/hpo/batch_tuner.py b/nni/algorithms/hpo/batch_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..b2df2617cb88ff6b065fcfa96fb9959b190d1e02 --- /dev/null +++ b/nni/algorithms/hpo/batch_tuner.py @@ -0,0 +1,133 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +batch_tuner.py including: + class BatchTuner +""" + +import logging + +import nni +from nni.common.hpo_utils import validate_search_space +from nni.tuner import Tuner + +TYPE = '_type' +CHOICE = 'choice' +VALUE = '_value' + +LOGGER = logging.getLogger('batch_tuner_AutoML') + +class BatchTuner(Tuner): + """ + BatchTuner is tuner will running all the configure that user want to run batchly. + + Examples + -------- + The search space only be accepted like: + + :: + + {'combine_params': + { '_type': 'choice', + '_value': '[{...}, {...}, {...}]', + } + } + + """ + + def __init__(self): + self._count = -1 + self._values = [] + + def is_valid(self, search_space): + """ + Check the search space is valid: only contains 'choice' type + + Parameters + ---------- + search_space : dict + + Returns + ------- + None or list + If valid, return candidate values; else return None. + """ + if not len(search_space) == 1: + raise RuntimeError('BatchTuner only supprt one combined-paramreters key.') + + for param in search_space: + param_type = search_space[param][TYPE] + if not param_type == CHOICE: + raise RuntimeError('BatchTuner only supprt \ + one combined-paramreters type is choice.') + + if isinstance(search_space[param][VALUE], list): + return search_space[param][VALUE] + + raise RuntimeError('The combined-paramreters \ + value in BatchTuner is not a list.') + return None + + def update_search_space(self, search_space): + """Update the search space + + Parameters + ---------- + search_space : dict + """ + validate_search_space(search_space, ['choice']) + self._values = self.is_valid(search_space) + + def generate_parameters(self, parameter_id, **kwargs): + """Returns a dict of trial (hyper-)parameters, as a serializable object. + + Parameters + ---------- + parameter_id : int + + Returns + ------- + dict + A candidate parameter group. + """ + self._count += 1 + if self._count > len(self._values) - 1: + raise nni.NoMoreTrialError('no more parameters now.') + return self._values[self._count] + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + pass + + def import_data(self, data): + """Import additional data for tuning + + Parameters + ---------- + data: + a list of dictionarys, each of which has at least two keys, 'parameter' and 'value' + """ + if not self._values: + LOGGER.info("Search space has not been initialized, skip this data import") + return + + self._values = self._values[(self._count+1):] + self._count = -1 + + _completed_num = 0 + for trial_info in data: + LOGGER .info("Importing data, current processing \ + progress %s / %s", _completed_num, len(data)) + # simply validate data format + assert "parameter" in trial_info + _params = trial_info["parameter"] + assert "value" in trial_info + _value = trial_info['value'] + if not _value: + LOGGER.info("Useless trial data, value is %s, skip this trial data.", _value) + continue + _completed_num += 1 + if _params in self._values: + self._values.remove(_params) + LOGGER .info("Successfully import data to batch tuner, \ + total data: %d, imported data: %d.", len(data), _completed_num) diff --git a/nni/algorithms/hpo/bohb_advisor/__init__.py b/nni/algorithms/hpo/bohb_advisor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebb442e5cbe24103750f94c0ed931dc93b5803a --- /dev/null +++ b/nni/algorithms/hpo/bohb_advisor/__init__.py @@ -0,0 +1 @@ +from .bohb_advisor import BOHB, BOHBClassArgsValidator diff --git a/nni/algorithms/hpo/bohb_advisor/bohb_advisor.py b/nni/algorithms/hpo/bohb_advisor/bohb_advisor.py new file mode 100644 index 0000000000000000000000000000000000000000..40dbbaeb772d19fc8abb4b8c764cdf8ccc318cc6 --- /dev/null +++ b/nni/algorithms/hpo/bohb_advisor/bohb_advisor.py @@ -0,0 +1,687 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +bohb_advisor.py +''' +import sys +import math +import logging +from schema import Schema, Optional +import ConfigSpace as CS +import ConfigSpace.hyperparameters as CSH +from ConfigSpace.read_and_write import pcs_new + +import nni +from nni import ClassArgsValidator +from nni.runtime.protocol import CommandType, send +from nni.runtime.msg_dispatcher_base import MsgDispatcherBase +from nni.utils import OptimizeMode, MetricType, extract_scalar_reward +from nni.runtime.common import multi_phase_enabled + +from .config_generator import CG_BOHB + +logger = logging.getLogger('BOHB_Advisor') + +_next_parameter_id = 0 +_KEY = 'TRIAL_BUDGET' +_epsilon = 1e-6 + + +def create_parameter_id(): + """Create an id + + Returns + ------- + int + parameter id + """ + global _next_parameter_id + _next_parameter_id += 1 + return _next_parameter_id - 1 + + +def create_bracket_parameter_id(brackets_id, brackets_curr_decay, increased_id=-1): + """Create a full id for a specific bracket's hyperparameter configuration + + Parameters + ---------- + brackets_id: int + brackets id + brackets_curr_decay: int + brackets curr decay + increased_id: int + increased id + Returns + ------- + int + params id + """ + if increased_id == -1: + increased_id = str(create_parameter_id()) + params_id = '_'.join([str(brackets_id), + str(brackets_curr_decay), + increased_id]) + return params_id + + +class Bracket: + """ + A bracket in BOHB, all the information of a bracket is managed by + an instance of this class. + + Parameters + ---------- + s: int + The current Successive Halving iteration index. + s_max: int + total number of Successive Halving iterations + eta: float + In each iteration, a complete run of sequential halving is executed. In it, + after evaluating each configuration on the same subset size, only a fraction of + 1/eta of them 'advances' to the next round. + max_budget : float + The largest budget to consider. Needs to be larger than min_budget! + The budgets will be geometrically distributed + :math:`a^2 + b^2 = c^2 \\sim \\eta^k` for :math:`k\\in [0, 1, ... , num\\_subsets - 1]`. + optimize_mode: str + optimize mode, 'maximize' or 'minimize' + """ + def __init__(self, s, s_max, eta, max_budget, optimize_mode): + self.s = s + self.s_max = s_max + self.eta = eta + self.max_budget = max_budget + self.optimize_mode = OptimizeMode(optimize_mode) + + self.n = math.ceil((s_max + 1) * eta**s / (s + 1) - _epsilon) + self.r = max_budget / eta**s + self.i = 0 + self.hyper_configs = [] # [ {id: params}, {}, ... ] + self.configs_perf = [] # [ {id: [seq, acc]}, {}, ... ] + self.num_configs_to_run = [] # [ n, n, n, ... ] + self.num_finished_configs = [] # [ n, n, n, ... ] + self.no_more_trial = False + + def is_completed(self): + """check whether this bracket has sent out all the hyperparameter configurations""" + return self.no_more_trial + + def get_n_r(self): + """return the values of n and r for the next round""" + return math.floor(self.n / self.eta**self.i + _epsilon), math.floor(self.r * self.eta**self.i +_epsilon) + + def increase_i(self): + """i means the ith round. Increase i by 1""" + self.i += 1 + + def set_config_perf(self, i, parameter_id, seq, value): + """update trial's latest result with its sequence number, e.g., epoch number or batch number + + Parameters + ---------- + i: int + the ith round + parameter_id: int + the id of the trial/parameter + seq: int + sequence number, e.g., epoch number or batch number + value: int + latest result with sequence number seq + + Returns + ------- + None + """ + if parameter_id in self.configs_perf[i]: + if self.configs_perf[i][parameter_id][0] < seq: + self.configs_perf[i][parameter_id] = [seq, value] + else: + self.configs_perf[i][parameter_id] = [seq, value] + + def inform_trial_end(self, i): + """If the trial is finished and the corresponding round (i.e., i) has all its trials finished, + it will choose the top k trials for the next round (i.e., i+1) + + Parameters + ---------- + i: int + the ith round + + Returns + ------- + new trial or None: + If we have generated new trials after this trial end, we will return a new trial parameters. + Otherwise, we will return None. + """ + global _KEY + self.num_finished_configs[i] += 1 + logger.debug('bracket id: %d, round: %d %d, finished: %d, all: %d', + self.s, self.i, i, self.num_finished_configs[i], self.num_configs_to_run[i]) + if self.num_finished_configs[i] >= self.num_configs_to_run[i] and self.no_more_trial is False: + # choose candidate configs from finished configs to run in the next round + assert self.i == i + 1 + # finish this bracket + if self.i > self.s: + self.no_more_trial = True + return None + this_round_perf = self.configs_perf[i] + if self.optimize_mode is OptimizeMode.Maximize: + sorted_perf = sorted(this_round_perf.items( + ), key=lambda kv: kv[1][1], reverse=True) # reverse + else: + sorted_perf = sorted( + this_round_perf.items(), key=lambda kv: kv[1][1]) + logger.debug( + 'bracket %s next round %s, sorted hyper configs: %s', self.s, self.i, sorted_perf) + next_n, next_r = self.get_n_r() + logger.debug('bracket %s next round %s, next_n=%d, next_r=%d', + self.s, self.i, next_n, next_r) + hyper_configs = dict() + for k in range(next_n): + params_id = sorted_perf[k][0] + params = self.hyper_configs[i][params_id] + params[_KEY] = next_r # modify r + # generate new id + increased_id = params_id.split('_')[-1] + new_id = create_bracket_parameter_id( + self.s, self.i, increased_id) + hyper_configs[new_id] = params + self._record_hyper_configs(hyper_configs) + return [[key, value] for key, value in hyper_configs.items()] + return None + + def get_hyperparameter_configurations(self, num, r, config_generator): + """generate num hyperparameter configurations from search space using Bayesian optimization + + Parameters + ---------- + num: int + the number of hyperparameter configurations + + Returns + ------- + list + a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...] + """ + global _KEY + assert self.i == 0 + hyperparameter_configs = dict() + for _ in range(num): + params_id = create_bracket_parameter_id(self.s, self.i) + params = config_generator.get_config(r) + params[_KEY] = r + hyperparameter_configs[params_id] = params + self._record_hyper_configs(hyperparameter_configs) + return [[key, value] for key, value in hyperparameter_configs.items()] + + def _record_hyper_configs(self, hyper_configs): + """after generating one round of hyperconfigs, this function records the generated hyperconfigs, + creates a dict to record the performance when those hyperconifgs are running, set the number of finished configs + in this round to be 0, and increase the round number. + + Parameters + ---------- + hyper_configs: list + the generated hyperconfigs + """ + self.hyper_configs.append(hyper_configs) + self.configs_perf.append(dict()) + self.num_finished_configs.append(0) + self.num_configs_to_run.append(len(hyper_configs)) + self.increase_i() + +class BOHBClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'optimize_mode': self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('min_budget'): self.range('min_budget', int, 0, 9999), + Optional('max_budget'): self.range('max_budget', int, 0, 9999), + Optional('eta'): self.range('eta', int, 0, 9999), + Optional('min_points_in_model'): self.range('min_points_in_model', int, 0, 9999), + Optional('top_n_percent'): self.range('top_n_percent', int, 1, 99), + Optional('num_samples'): self.range('num_samples', int, 1, 9999), + Optional('random_fraction'): self.range('random_fraction', float, 0, 9999), + Optional('bandwidth_factor'): self.range('bandwidth_factor', float, 0, 9999), + Optional('min_bandwidth'): self.range('min_bandwidth', float, 0, 9999), + Optional('config_space'): self.path('config_space') + }).validate(kwargs) + +class BOHB(MsgDispatcherBase): + """ + BOHB performs robust and efficient hyperparameter optimization + at scale by combining the speed of Hyperband searches with the + guidance and guarantees of convergence of Bayesian Optimization. + Instead of sampling new configurations at random, BOHB uses + kernel density estimators to select promising candidates. + + Parameters + ---------- + optimize_mode: str + optimize mode, 'maximize' or 'minimize' + min_budget: float + The smallest budget to consider. Needs to be positive! + max_budget: float + The largest budget to consider. Needs to be larger than min_budget! + The budgets will be geometrically distributed + :math:`a^2 + b^2 = c^2 \\sim \\eta^k` for :math:`k\\in [0, 1, ... , num\\_subsets - 1]`. + eta: int + In each iteration, a complete run of sequential halving is executed. In it, + after evaluating each configuration on the same subset size, only a fraction of + 1/eta of them 'advances' to the next round. + Must be greater or equal to 2. + min_points_in_model: int + number of observations to start building a KDE. Default 'None' means + dim+1, the bare minimum. + top_n_percent: int + percentage ( between 1 and 99, default 15) of the observations that are considered good. + num_samples: int + number of samples to optimize EI (default 64) + random_fraction: float + fraction of purely random configurations that are sampled from the + prior without the model. + bandwidth_factor: float + to encourage diversity, the points proposed to optimize EI, are sampled + from a 'widened' KDE where the bandwidth is multiplied by this factor (default: 3) + min_bandwidth: float + to keep diversity, even when all (good) samples have the same value for one of the parameters, + a minimum bandwidth (Default: 1e-3) is used instead of zero. + """ + + def __init__(self, + optimize_mode='maximize', + min_budget=1, + max_budget=3, + eta=3, + min_points_in_model=None, + top_n_percent=15, + num_samples=64, + random_fraction=1/3, + bandwidth_factor=3, + min_bandwidth=1e-3, + config_space=None): + super(BOHB, self).__init__() + self.optimize_mode = OptimizeMode(optimize_mode) + self.min_budget = min_budget + self.max_budget = max_budget + self.eta = eta + self.min_points_in_model = min_points_in_model + self.top_n_percent = top_n_percent + self.num_samples = num_samples + self.random_fraction = random_fraction + self.bandwidth_factor = bandwidth_factor + self.min_bandwidth = min_bandwidth + self.config_space = config_space + + # all the configs waiting for run + self.generated_hyper_configs = [] + # all the completed configs + self.completed_hyper_configs = [] + + self.s_max = math.floor( + math.log(self.max_budget / self.min_budget, self.eta) + _epsilon) + # current bracket(s) number + self.curr_s = self.s_max + # In this case, tuner increases self.credit to issue a trial config sometime later. + self.credit = 0 + self.brackets = dict() + self.search_space = None + # [key, value] = [parameter_id, parameter] + self.parameters = dict() + + # config generator + self.cg = None + + # record the latest parameter_id of the trial job trial_job_id. + # if there is no running parameter_id, self.job_id_para_id_map[trial_job_id] == None + # new trial job is added to this dict and finished trial job is removed from it. + self.job_id_para_id_map = dict() + # record the unsatisfied parameter request from trial jobs + self.unsatisfied_jobs = [] + + def handle_initialize(self, data): + """Initialize Tuner, including creating Bayesian optimization-based parametric models + and search space formations + + Parameters + ---------- + data: search space + search space of this experiment + + Raises + ------ + ValueError + Error: Search space is None + """ + logger.info('start to handle_initialize') + # convert search space jason to ConfigSpace + self.handle_update_search_space(data) + + # generate BOHB config_generator using Bayesian optimization + if self.search_space: + self.cg = CG_BOHB(configspace=self.search_space, + min_points_in_model=self.min_points_in_model, + top_n_percent=self.top_n_percent, + num_samples=self.num_samples, + random_fraction=self.random_fraction, + bandwidth_factor=self.bandwidth_factor, + min_bandwidth=self.min_bandwidth) + else: + raise ValueError('Error: Search space is None') + # generate first brackets + self.generate_new_bracket() + send(CommandType.Initialized, '') + + def generate_new_bracket(self): + """generate a new bracket""" + logger.debug( + 'start to create a new SuccessiveHalving iteration, self.curr_s=%d', self.curr_s) + if self.curr_s < 0: + logger.info("s < 0, Finish this round of Hyperband in BOHB. Generate new round") + self.curr_s = self.s_max + self.brackets[self.curr_s] = Bracket( + s=self.curr_s, s_max=self.s_max, eta=self.eta, + max_budget=self.max_budget, optimize_mode=self.optimize_mode + ) + next_n, next_r = self.brackets[self.curr_s].get_n_r() + logger.debug( + 'new SuccessiveHalving iteration, next_n=%d, next_r=%d', next_n, next_r) + # rewrite with TPE + generated_hyper_configs = self.brackets[self.curr_s].get_hyperparameter_configurations( + next_n, next_r, self.cg) + self.generated_hyper_configs = generated_hyper_configs.copy() + + def handle_request_trial_jobs(self, data): + """recerive the number of request and generate trials + + Parameters + ---------- + data: int + number of trial jobs that nni manager ask to generate + """ + # Receive new request + self.credit += data + + for _ in range(self.credit): + self._request_one_trial_job() + + def _get_one_trial_job(self): + """get one trial job, i.e., one hyperparameter configuration. + + If this function is called, Command will be sent by BOHB: + a. If there is a parameter need to run, will return "NewTrialJob" with a dict: + { + 'parameter_id': id of new hyperparameter + 'parameter_source': 'algorithm' + 'parameters': value of new hyperparameter + } + b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with + { + 'parameter_id': '-1_0_0', + 'parameter_source': 'algorithm', + 'parameters': '' + } + """ + if not self.generated_hyper_configs: + ret = { + 'parameter_id': '-1_0_0', + 'parameter_source': 'algorithm', + 'parameters': '' + } + send(CommandType.NoMoreTrialJobs, nni.dump(ret)) + return None + assert self.generated_hyper_configs + params = self.generated_hyper_configs.pop(0) + ret = { + 'parameter_id': params[0], + 'parameter_source': 'algorithm', + 'parameters': params[1] + } + self.parameters[params[0]] = params[1] + return ret + + def _request_one_trial_job(self): + """get one trial job, i.e., one hyperparameter configuration. + + If this function is called, Command will be sent by BOHB: + a. If there is a parameter need to run, will return "NewTrialJob" with a dict: + { + 'parameter_id': id of new hyperparameter + 'parameter_source': 'algorithm' + 'parameters': value of new hyperparameter + } + b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with + { + 'parameter_id': '-1_0_0', + 'parameter_source': 'algorithm', + 'parameters': '' + } + """ + ret = self._get_one_trial_job() + if ret is not None: + send(CommandType.NewTrialJob, nni.dump(ret)) + self.credit -= 1 + + def handle_update_search_space(self, data): + """change json format to ConfigSpace format dict -> configspace + + Parameters + ---------- + data: JSON object + search space of this experiment + """ + search_space = data + cs = None + logger.debug(f'Received data: {data}') + if self.config_space: + logger.info(f'Got a ConfigSpace file path, parsing the search space directly from {self.config_space}. ' + 'The NNI search space is ignored.') + with open(self.config_space, 'r') as fh: + cs = pcs_new.read(fh) + else: + cs = CS.ConfigurationSpace() + for var in search_space: + _type = str(search_space[var]["_type"]) + if _type == 'choice': + cs.add_hyperparameter(CSH.CategoricalHyperparameter( + var, choices=search_space[var]["_value"])) + elif _type == 'randint': + cs.add_hyperparameter(CSH.UniformIntegerHyperparameter( + var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1] - 1)) + elif _type == 'uniform': + cs.add_hyperparameter(CSH.UniformFloatHyperparameter( + var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1])) + elif _type == 'quniform': + cs.add_hyperparameter(CSH.UniformFloatHyperparameter( + var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1], + q=search_space[var]["_value"][2])) + elif _type == 'loguniform': + cs.add_hyperparameter(CSH.UniformFloatHyperparameter( + var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1], + log=True)) + elif _type == 'qloguniform': + cs.add_hyperparameter(CSH.UniformFloatHyperparameter( + var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1], + q=search_space[var]["_value"][2], log=True)) + elif _type == 'normal': + cs.add_hyperparameter(CSH.NormalFloatHyperparameter( + var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2])) + elif _type == 'qnormal': + cs.add_hyperparameter(CSH.NormalFloatHyperparameter( + var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2], + q=search_space[var]["_value"][3])) + elif _type == 'lognormal': + cs.add_hyperparameter(CSH.NormalFloatHyperparameter( + var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2], + log=True)) + elif _type == 'qlognormal': + cs.add_hyperparameter(CSH.NormalFloatHyperparameter( + var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2], + q=search_space[var]["_value"][3], log=True)) + else: + raise ValueError( + 'unrecognized type in search_space, type is {}'.format(_type)) + + self.search_space = cs + + def handle_trial_end(self, data): + """receive the information of trial end and generate next configuaration. + + Parameters + ---------- + data: dict() + it has three keys: trial_job_id, event, hyper_params + trial_job_id: the id generated by training service + event: the job's state + hyper_params: the hyperparameters (a string) generated and returned by tuner + """ + logger.debug('Tuner handle trial end, result is %s', data) + hyper_params = nni.load(data['hyper_params']) + self._handle_trial_end(hyper_params['parameter_id']) + if data['trial_job_id'] in self.job_id_para_id_map: + del self.job_id_para_id_map[data['trial_job_id']] + + def _send_new_trial(self): + while self.unsatisfied_jobs: + ret = self._get_one_trial_job() + if ret is None: + break + one_unsatisfied = self.unsatisfied_jobs.pop(0) + ret['trial_job_id'] = one_unsatisfied['trial_job_id'] + ret['parameter_index'] = one_unsatisfied['parameter_index'] + # update parameter_id in self.job_id_para_id_map + self.job_id_para_id_map[ret['trial_job_id']] = ret['parameter_id'] + send(CommandType.SendTrialJobParameter, nni.dump(ret)) + for _ in range(self.credit): + self._request_one_trial_job() + + def _handle_trial_end(self, parameter_id): + s, i, _ = parameter_id.split('_') + hyper_configs = self.brackets[int(s)].inform_trial_end(int(i)) + + if hyper_configs is not None: + logger.debug( + 'bracket %s next round %s, hyper_configs: %s', s, i, hyper_configs) + self.generated_hyper_configs = self.generated_hyper_configs + hyper_configs + # Finish this bracket and generate a new bracket + elif self.brackets[int(s)].no_more_trial: + self.curr_s -= 1 + self.generate_new_bracket() + self._send_new_trial() + + def handle_report_metric_data(self, data): + """reveice the metric data and update Bayesian optimization with final result + + Parameters + ---------- + data: + it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'. + + Raises + ------ + ValueError + Data type not supported + """ + logger.debug('handle report metric data = %s', data) + if 'value' in data: + data['value'] = nni.load(data['value']) + if data['type'] == MetricType.REQUEST_PARAMETER: + assert multi_phase_enabled() + assert data['trial_job_id'] is not None + assert data['parameter_index'] is not None + assert data['trial_job_id'] in self.job_id_para_id_map + self._handle_trial_end(self.job_id_para_id_map[data['trial_job_id']]) + ret = self._get_one_trial_job() + if ret is None: + self.unsatisfied_jobs.append({'trial_job_id': data['trial_job_id'], 'parameter_index': data['parameter_index']}) + else: + ret['trial_job_id'] = data['trial_job_id'] + ret['parameter_index'] = data['parameter_index'] + # update parameter_id in self.job_id_para_id_map + self.job_id_para_id_map[data['trial_job_id']] = ret['parameter_id'] + send(CommandType.SendTrialJobParameter, nni.dump(ret)) + else: + assert 'value' in data + value = extract_scalar_reward(data['value']) + if self.optimize_mode is OptimizeMode.Maximize: + reward = -value + else: + reward = value + assert 'parameter_id' in data + s, i, _ = data['parameter_id'].split('_') + logger.debug('bracket id = %s, metrics value = %s, type = %s', s, value, data['type']) + s = int(s) + + # add to self.job_id_para_id_map here, + # because when the first parameter_id is created, trial_job_id is not known yet. + if data['trial_job_id'] in self.job_id_para_id_map: + assert self.job_id_para_id_map[data['trial_job_id']] == data['parameter_id'] + else: + self.job_id_para_id_map[data['trial_job_id']] = data['parameter_id'] + + assert 'type' in data + if data['type'] == MetricType.FINAL: + # and PERIODICAL metric are independent, thus, not comparable. + assert 'sequence' in data + self.brackets[s].set_config_perf( + int(i), data['parameter_id'], sys.maxsize, value) + self.completed_hyper_configs.append(data) + + _parameters = self.parameters[data['parameter_id']] + _parameters.pop(_KEY) + # update BO with loss, max_s budget, hyperparameters + self.cg.new_result(loss=reward, budget=data['sequence'], parameters=_parameters, update_model=True) + elif data['type'] == MetricType.PERIODICAL: + self.brackets[s].set_config_perf( + int(i), data['parameter_id'], data['sequence'], value) + else: + raise ValueError( + 'Data type not supported: {}'.format(data['type'])) + + def handle_add_customized_trial(self, data): + pass + + def handle_import_data(self, data): + """Import additional data for tuning + + Parameters + ---------- + data: + a list of dictionarys, each of which has at least two keys, 'parameter' and 'value' + + Raises + ------ + AssertionError + data doesn't have required key 'parameter' and 'value' + """ + for entry in data: + entry['value'] = nni.load(entry['value']) + _completed_num = 0 + for trial_info in data: + logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data)) + _completed_num += 1 + assert "parameter" in trial_info + _params = trial_info["parameter"] + assert "value" in trial_info + _value = trial_info['value'] + if not _value: + logger.info("Useless trial data, value is %s, skip this trial data.", _value) + continue + _value = extract_scalar_reward(_value) + budget_exist_flag = False + barely_params = dict() + for keys in _params: + if keys == _KEY: + _budget = _params[keys] + budget_exist_flag = True + else: + barely_params[keys] = _params[keys] + if not budget_exist_flag: + _budget = self.max_budget + logger.info("Set \"TRIAL_BUDGET\" value to %s (max budget)", self.max_budget) + if self.optimize_mode is OptimizeMode.Maximize: + reward = -_value + else: + reward = _value + self.cg.new_result(loss=reward, budget=_budget, parameters=barely_params, update_model=True) + logger.info("Successfully import tuning data to BOHB advisor.") diff --git a/nni/algorithms/hpo/bohb_advisor/config_generator.py b/nni/algorithms/hpo/bohb_advisor/config_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..e7e013f406ee8e513672aad73b19e19f7826e128 --- /dev/null +++ b/nni/algorithms/hpo/bohb_advisor/config_generator.py @@ -0,0 +1,344 @@ +# BSD 3-Clause License +# Copyright (c) 2017-2018, ML4AAD +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging + +import ConfigSpace +import ConfigSpace.hyperparameters +import ConfigSpace.util +import numpy as np +import scipy.stats as sps +import statsmodels.api as sm + +logger = logging.getLogger('BOHB_Advisor') + +class CG_BOHB: + def __init__(self, configspace, min_points_in_model=None, + top_n_percent=15, num_samples=64, random_fraction=1/3, + bandwidth_factor=3, min_bandwidth=1e-3): + """Fits for each given budget a kernel density estimator on the best N percent of the + evaluated configurations on this budget. + + + Parameters: + ----------- + configspace: ConfigSpace + Configuration space object + top_n_percent: int + Determines the percentile of configurations that will be used as training data + for the kernel density estimator, e.g if set to 10 the 10% best configurations will be considered + for training. + min_points_in_model: int + minimum number of datapoints needed to fit a model + num_samples: int + number of samples drawn to optimize EI via sampling + random_fraction: float + fraction of random configurations returned + bandwidth_factor: float + widens the bandwidth for contiuous parameters for proposed points to optimize EI + min_bandwidth: float + to keep diversity, even when all (good) samples have the same value for one of the parameters, + a minimum bandwidth (Default: 1e-3) is used instead of zero. + """ + self.top_n_percent = top_n_percent + self.configspace = configspace + self.bw_factor = bandwidth_factor + self.min_bandwidth = min_bandwidth + + self.min_points_in_model = min_points_in_model + if min_points_in_model is None: + self.min_points_in_model = len(self.configspace.get_hyperparameters())+1 + + if self.min_points_in_model < len(self.configspace.get_hyperparameters())+1: + logger.warning('Invalid min_points_in_model value. Setting it to %i', len(self.configspace.get_hyperparameters()) + 1) + self.min_points_in_model = len(self.configspace.get_hyperparameters()) + 1 + + self.num_samples = num_samples + self.random_fraction = random_fraction + + hps = self.configspace.get_hyperparameters() + + self.kde_vartypes = "" + self.vartypes = [] + + for h in hps: + if hasattr(h, 'choices'): + self.kde_vartypes += 'u' + self.vartypes += [len(h.choices)] + else: + self.kde_vartypes += 'c' + self.vartypes += [0] + + self.vartypes = np.array(self.vartypes, dtype=int) + + # store precomputed probs for the categorical parameters + self.cat_probs = [] + + self.configs = dict() + self.losses = dict() + self.good_config_rankings = dict() + self.kde_models = dict() + + def largest_budget_with_model(self): + if not self.kde_models: + return -float('inf') + return max(self.kde_models.keys()) + + def sample_from_largest_budget(self, info_dict): + """We opted for a single multidimensional KDE compared to the + hierarchy of one-dimensional KDEs used in TPE. The dimensional is + seperated by budget. This function sample a configuration from + largest budget. Firstly we sample "num_samples" configurations, + then prefer one with the largest l(x)/g(x). + + Parameters: + ----------- + info_dict: dict + record the information of this configuration + + Returns + ------- + dict: + new configuration named sample + dict: + info_dict, record the information of this configuration + """ + best = np.inf + best_vector = None + + budget = max(self.kde_models.keys()) + + l = self.kde_models[budget]['good'].pdf + g = self.kde_models[budget]['bad'].pdf + + minimize_me = lambda x: max(1e-32, g(x))/max(l(x), 1e-32) + + kde_good = self.kde_models[budget]['good'] + kde_bad = self.kde_models[budget]['bad'] + + for i in range(self.num_samples): + idx = np.random.randint(0, len(kde_good.data)) + datum = kde_good.data[idx] + vector = [] + + for m, bw, t in zip(datum, kde_good.bw, self.vartypes): + + bw = max(bw, self.min_bandwidth) + if t == 0: + bw = self.bw_factor*bw + vector.append(sps.truncnorm.rvs(-m/bw, (1-m)/bw, loc=m, scale=bw)) + else: + if np.random.rand() < (1-bw): + vector.append(int(m)) + else: + vector.append(np.random.randint(t)) + val = minimize_me(vector) + + if not np.isfinite(val): + logger.warning('sampled vector: %s has EI value %s', vector, val) + logger.warning("data in the KDEs:\n%s\n%s", kde_good.data, kde_bad.data) + logger.warning("bandwidth of the KDEs:\n%s\n%s", kde_good.bw, kde_bad.bw) + logger.warning("l(x) = %s", l(vector)) + logger.warning("g(x) = %s", g(vector)) + + # right now, this happens because a KDE does not contain all values for a categorical parameter + # this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one + # if the good_kde has a finite value, i.e. there is no config with that value in the bad kde, + # so it shouldn't be terrible. + if np.isfinite(l(vector)): + best_vector = vector + break + + if val < best: + best = val + best_vector = vector + + if best_vector is None: + logger.debug("Sampling based optimization with %i samples failed -> using random configuration", self.num_samples) + sample = self.configspace.sample_configuration().get_dictionary() + info_dict['model_based_pick'] = False + + else: + logger.debug('best_vector: %s, %s, %s, %s', best_vector, best, l(best_vector), g(best_vector)) + for i, _ in enumerate(best_vector): + hp = self.configspace.get_hyperparameter(self.configspace.get_hyperparameter_by_idx(i)) + if isinstance(hp, ConfigSpace.hyperparameters.CategoricalHyperparameter): + best_vector[i] = int(np.rint(best_vector[i])) + sample = ConfigSpace.Configuration(self.configspace, vector=best_vector).get_dictionary() + + sample = ConfigSpace.util.deactivate_inactive_hyperparameters( + configuration_space=self.configspace, + configuration=sample) + info_dict['model_based_pick'] = True + + return sample, info_dict + + def get_config(self, budget): + """Function to sample a new configuration + This function is called inside BOHB to query a new configuration + + Parameters: + ----------- + budget: float + the budget for which this configuration is scheduled + + Returns + ------- + config + return a valid configuration with parameters and budget + """ + logger.debug('start sampling a new configuration.') + sample = None + info_dict = {} + + # If no model is available, sample from prior + # also mix in a fraction of random configs + if not self.kde_models.keys() or np.random.rand() < self.random_fraction: + sample = self.configspace.sample_configuration() + info_dict['model_based_pick'] = False + + if sample is None: + sample, info_dict = self.sample_from_largest_budget(info_dict) + + sample = ConfigSpace.util.deactivate_inactive_hyperparameters( + configuration_space=self.configspace, + configuration=sample.get_dictionary() + ).get_dictionary() + + logger.debug('done sampling a new configuration.') + sample['TRIAL_BUDGET'] = budget + return sample + + def impute_conditional_data(self, array): + return_array = np.zeros(array.shape) + for i in range(array.shape[0]): + datum = np.copy(array[i]) + nan_indices = np.argwhere(np.isnan(datum)).flatten() + while np.any(nan_indices): + nan_idx = nan_indices[0] + valid_indices = np.argwhere(np.isfinite(array[:, nan_idx])).flatten() + if len(valid_indices) > 0: + # pick one of them at random and overwrite all NaN values + row_idx = np.random.choice(valid_indices) + datum[nan_indices] = array[row_idx, nan_indices] + else: + # no good point in the data has this value activated, so fill it with a valid but random value + t = self.vartypes[nan_idx] + if t == 0: + datum[nan_idx] = np.random.rand() + else: + datum[nan_idx] = np.random.randint(t) + nan_indices = np.argwhere(np.isnan(datum)).flatten() + return_array[i, :] = datum + return return_array + + def new_result(self, loss, budget, parameters, update_model=True): + """ + Function to register finished runs. Every time a run has finished, this function should be called + to register it with the loss. + + Parameters: + ----------- + loss: float + the loss of the parameters + budget: float + the budget of the parameters + parameters: dict + the parameters of this trial + update_model: bool + whether use this parameter to update BP model + + Returns + ------- + None + """ + if loss is None: + # One could skip crashed results, but we decided + # assign a +inf loss and count them as bad configurations + loss = np.inf + + if budget not in self.configs.keys(): + self.configs[budget] = [] + self.losses[budget] = [] + + # skip model building if we already have a bigger model + if max(list(self.kde_models.keys()) + [-np.inf]) > budget: + return + + # We want to get a numerical representation of the configuration in the original space + conf = ConfigSpace.Configuration(self.configspace, parameters) + self.configs[budget].append(conf.get_array()) + self.losses[budget].append(loss) + + # skip model building: + # a) if not enough points are available + if len(self.configs[budget]) <= self.min_points_in_model - 1: + logger.debug("Only %i run(s) for budget %f available, need more than %s \ + -> can't build model!", len(self.configs[budget]), budget, self.min_points_in_model+1) + return + # b) during warnm starting when we feed previous results in and only update once + if not update_model: + return + + train_configs = np.array(self.configs[budget]) + train_losses = np.array(self.losses[budget]) + + n_good = max(self.min_points_in_model, (self.top_n_percent * train_configs.shape[0])//100) + n_bad = max(self.min_points_in_model, ((100-self.top_n_percent)*train_configs.shape[0])//100) + + # Refit KDE for the current budget + idx = np.argsort(train_losses) + + train_data_good = self.impute_conditional_data(train_configs[idx[:n_good]]) + train_data_bad = self.impute_conditional_data(train_configs[idx[n_good:n_good+n_bad]]) + + if train_data_good.shape[0] <= train_data_good.shape[1]: + return + if train_data_bad.shape[0] <= train_data_bad.shape[1]: + return + + #more expensive crossvalidation method + #bw_estimation = 'cv_ls' + # quick rule of thumb + bw_estimation = 'normal_reference' + + bad_kde = sm.nonparametric.KDEMultivariate(data=train_data_bad, var_type=self.kde_vartypes, bw=bw_estimation) + good_kde = sm.nonparametric.KDEMultivariate(data=train_data_good, var_type=self.kde_vartypes, bw=bw_estimation) + + bad_kde.bw = np.clip(bad_kde.bw, self.min_bandwidth, None) + good_kde.bw = np.clip(good_kde.bw, self.min_bandwidth, None) + + self.kde_models[budget] = { + 'good': good_kde, + 'bad' : bad_kde + } + + # update probs for the categorical parameters for later sampling + logger.debug('done building a new model for budget %f based on %i/%i split\nBest loss for this budget:%f\n', + budget, n_good, n_bad, np.min(train_losses)) diff --git a/nni/algorithms/hpo/curvefitting_assessor/__init__.py b/nni/algorithms/hpo/curvefitting_assessor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3ed4d7a513b181e43e8767d28ead3cbc20af139 --- /dev/null +++ b/nni/algorithms/hpo/curvefitting_assessor/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .curvefitting_assessor import CurvefittingAssessor, CurvefittingClassArgsValidator diff --git a/nni/algorithms/hpo/curvefitting_assessor/curvefitting_assessor.py b/nni/algorithms/hpo/curvefitting_assessor/curvefitting_assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..b91583bb3902305095aaefff4f55c85f922f7f2a --- /dev/null +++ b/nni/algorithms/hpo/curvefitting_assessor/curvefitting_assessor.py @@ -0,0 +1,135 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import datetime +from schema import Schema, Optional + +from nni import ClassArgsValidator +from nni.assessor import Assessor, AssessResult +from nni.utils import extract_scalar_history +from .model_factory import CurveModel + +logger = logging.getLogger('curvefitting_Assessor') + +class CurvefittingClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'epoch_num': self.range('epoch_num', int, 0, 9999), + Optional('start_step'): self.range('start_step', int, 0, 9999), + Optional('threshold'): self.range('threshold', float, 0, 9999), + Optional('gap'): self.range('gap', int, 1, 9999), + }).validate(kwargs) + +class CurvefittingAssessor(Assessor): + """CurvefittingAssessor uses learning curve fitting algorithm to predict the learning curve performance in the future. + It stops a pending trial X at step S if the trial's forecast result at target step is convergence and lower than the + best performance in the history. + + Parameters + ---------- + epoch_num : int + The total number of epoch + start_step : int + only after receiving start_step number of reported intermediate results + threshold : float + The threshold that we decide to early stop the worse performance curve. + """ + + def __init__(self, epoch_num=20, start_step=6, threshold=0.95, gap=1): + if start_step <= 0: + logger.warning('It\'s recommended to set start_step to a positive number') + # Record the target position we predict + self.target_pos = epoch_num + # Start forecasting when historical data reaches start step + self.start_step = start_step + # Record the compared threshold + self.threshold = threshold + # Record the number of gap + self.gap = gap + # Record the number of intermediate result in the lastest judgment + self.last_judgment_num = dict() + # Record the best performance + self.set_best_performance = False + self.completed_best_performance = None + self.trial_history = [] + logger.info('Successfully initials the curvefitting assessor') + + def trial_end(self, trial_job_id, success): + """update the best performance of completed trial job + + Parameters + ---------- + trial_job_id : int + trial job id + success : bool + True if succssfully finish the experiment, False otherwise + """ + if success: + if self.set_best_performance: + self.completed_best_performance = max(self.completed_best_performance, self.trial_history[-1]) + else: + self.set_best_performance = True + self.completed_best_performance = self.trial_history[-1] + logger.info('Updated completed best performance, trial job id: %s', trial_job_id) + else: + logger.info('No need to update, trial job id: %s', trial_job_id) + + def assess_trial(self, trial_job_id, trial_history): + """assess whether a trial should be early stop by curve fitting algorithm + + Parameters + ---------- + trial_job_id : int + trial job id + trial_history : list + The history performance matrix of each trial + + Returns + ------- + bool + AssessResult.Good or AssessResult.Bad + + Raises + ------ + Exception + unrecognize exception in curvefitting_assessor + """ + scalar_trial_history = extract_scalar_history(trial_history) + self.trial_history = scalar_trial_history + if not self.set_best_performance: + return AssessResult.Good + curr_step = len(scalar_trial_history) + if curr_step < self.start_step: + return AssessResult.Good + + if trial_job_id in self.last_judgment_num.keys() and curr_step - self.last_judgment_num[trial_job_id] < self.gap: + return AssessResult.Good + self.last_judgment_num[trial_job_id] = curr_step + + try: + start_time = datetime.datetime.now() + # Predict the final result + curvemodel = CurveModel(self.target_pos) + predict_y = curvemodel.predict(scalar_trial_history) + log_message = "Prediction done. Trial job id = {}, Predict value = {}".format(trial_job_id, predict_y) + if predict_y is None: + logger.info('%s, wait for more information to predict precisely', log_message) + return AssessResult.Good + else: + logger.info(log_message) + standard_performance = self.completed_best_performance * self.threshold + + end_time = datetime.datetime.now() + if (end_time - start_time).seconds > 60: + logger.warning( + 'Curve Fitting Assessor Runtime Exceeds 60s, Trial Id = %s Trial History = %s', + trial_job_id, self.trial_history + ) + + if predict_y > standard_performance: + return AssessResult.Good + return AssessResult.Bad + + except Exception as exception: + logger.exception('unrecognize exception in curvefitting_assessor %s', exception) diff --git a/nni/algorithms/hpo/curvefitting_assessor/curvefunctions.py b/nni/algorithms/hpo/curvefitting_assessor/curvefunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..c7bfc3b9d817df38689d24ff317e1543ac0f88e9 --- /dev/null +++ b/nni/algorithms/hpo/curvefitting_assessor/curvefunctions.py @@ -0,0 +1,296 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +A family of functions used by CurvefittingAssessor +""" + +import numpy as np + +all_models = {} +model_para = {} +model_para_num = {} + +curve_combination_models = ['vap', 'pow3', 'linear', 'logx_linear', 'dr_hill_zero_background', 'log_power', 'pow4', 'mmf', + 'exp4', 'ilog2', 'weibull', 'janoschek'] + + +def vap(x, a, b, c): + """Vapor pressure model + + Parameters + ---------- + x : int + a : float + b : float + c : float + + Returns + ------- + float + np.exp(a+b/x+c*np.log(x)) + """ + return np.exp(a+b/x+c*np.log(x)) + + +all_models['vap'] = vap +model_para['vap'] = [-0.622028, -0.470050, 0.042322] +model_para_num['vap'] = 3 + + +def pow3(x, c, a, alpha): + """pow3 + + Parameters + ---------- + x : int + c : float + a : float + alpha : float + + Returns + ------- + float + c - a * x**(-alpha) + """ + return c - a * x**(-alpha) + + +all_models['pow3'] = pow3 +model_para['pow3'] = [0.84, 0.52, 0.01] +model_para_num['pow3'] = 3 + + +def linear(x, a, b): + """linear + + Parameters + ---------- + x : int + a : float + b : float + + Returns + ------- + float + a*x + b + """ + return a*x + b + + +all_models['linear'] = linear +model_para['linear'] = [1., 0] +model_para_num['linear'] = 2 + + +def logx_linear(x, a, b): + """logx linear + + Parameters + ---------- + x : int + a : float + b : float + + Returns + ------- + float + a * np.log(x) + b + """ + x = np.log(x) + return a*x + b + + +all_models['logx_linear'] = logx_linear +model_para['logx_linear'] = [0.378106, 0.046506] +model_para_num['logx_linear'] = 2 + + +def dr_hill_zero_background(x, theta, eta, kappa): + """dr hill zero background + + Parameters + ---------- + x : int + theta : float + eta : float + kappa : float + + Returns + ------- + float + (theta* x**eta) / (kappa**eta + x**eta) + """ + return (theta * x**eta) / (kappa**eta + x**eta) + + +all_models['dr_hill_zero_background'] = dr_hill_zero_background +model_para['dr_hill_zero_background'] = [0.772320, 0.586449, 2.460843] +model_para_num['dr_hill_zero_background'] = 3 + + +def log_power(x, a, b, c): + """"logistic power + + Parameters + ---------- + x : int + a : float + b : float + c : float + + Returns + ------- + float + a/(1.+(x/np.exp(b))**c) + """ + return a/(1.+(x/np.exp(b))**c) + + +all_models['log_power'] = log_power +model_para['log_power'] = [0.77, 2.98, -0.51] +model_para_num['log_power'] = 3 + + +def pow4(x, alpha, a, b, c): + """pow4 + + Parameters + ---------- + x : int + alpha : float + a : float + b : float + c : float + + Returns + ------- + float + c - (a*x+b)**-alpha + """ + return c - (a*x+b)**-alpha + + +all_models['pow4'] = pow4 +model_para['pow4'] = [0.1, 200, 0., 0.8] +model_para_num['pow4'] = 4 + + +def mmf(x, alpha, beta, kappa, delta): + """Morgan-Mercer-Flodin + http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm + + Parameters + ---------- + x : int + alpha : float + beta : float + kappa : float + delta : float + + Returns + ------- + float + alpha - (alpha - beta) / (1. + (kappa * x)**delta) + """ + return alpha - (alpha - beta) / (1. + (kappa * x)**delta) + + +all_models['mmf'] = mmf +model_para['mmf'] = [0.7, 0.1, 0.01, 5] +model_para_num['mmf'] = 4 + + +def exp4(x, c, a, b, alpha): + """exp4 + + Parameters + ---------- + x : int + c : float + a : float + b : float + alpha : float + + Returns + ------- + float + c - np.exp(-a*(x**alpha)+b) + """ + return c - np.exp(-a*(x**alpha)+b) + + +all_models['exp4'] = exp4 +model_para['exp4'] = [0.7, 0.8, -0.8, 0.3] +model_para_num['exp4'] = 4 + + +def ilog2(x, c, a): + """ilog2 + + Parameters + ---------- + x : int + c : float + a : float + + Returns + ------- + float + c - a / np.log(x) + """ + return c - a / np.log(x) + + +all_models['ilog2'] = ilog2 +model_para['ilog2'] = [0.78, 0.43] +model_para_num['ilog2'] = 2 + + +def weibull(x, alpha, beta, kappa, delta): + """Weibull model + http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm + + Parameters + ---------- + x : int + alpha : float + beta : float + kappa : float + delta : float + + Returns + ------- + float + alpha - (alpha - beta) * np.exp(-(kappa * x)**delta) + """ + return alpha - (alpha - beta) * np.exp(-(kappa * x)**delta) + + +all_models['weibull'] = weibull +model_para['weibull'] = [0.7, 0.1, 0.01, 1] +model_para_num['weibull'] = 4 + + +def janoschek(x, a, beta, k, delta): + """http://www.pisces-conservation.com/growthhelp/janoschek.htm + + Parameters + ---------- + x : int + a : float + beta : float + k : float + delta : float + + Returns + ------- + float + a - (a - beta) * np.exp(-k*x**delta) + """ + return a - (a - beta) * np.exp(-k*x**delta) + + +all_models['janoschek'] = janoschek +model_para['janoschek'] = [0.73, 0.07, 0.355, 0.46] +model_para_num['janoschek'] = 4 diff --git a/nni/algorithms/hpo/curvefitting_assessor/model_factory.py b/nni/algorithms/hpo/curvefitting_assessor/model_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a6ada9976293d98a44d1d763f787a7c7c9bea1 --- /dev/null +++ b/nni/algorithms/hpo/curvefitting_assessor/model_factory.py @@ -0,0 +1,330 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import numpy as np +from scipy import optimize +from .curvefunctions import * # pylint: disable=wildcard-import,unused-wildcard-import + +# Number of curve functions we prepared, more details can be found in "curvefunctions.py" +NUM_OF_FUNCTIONS = 12 +# Number of simulation time when we do MCMC sampling +NUM_OF_SIMULATION_TIME = 20 +# Number of samples we select when we do MCMC sampling +NUM_OF_INSTANCE = 10 +# The step size of each noise when we do MCMC sampling +STEP_SIZE = 0.0005 +# Number of least fitting function, if effective function is lower than this number, we will ask for more information +LEAST_FITTED_FUNCTION = 4 + +logger = logging.getLogger('curvefitting_Assessor') + +class CurveModel: + """Build a Curve Model to predict the performance + + Algorithm: https://github.com/Microsoft/nni/blob/master/src/sdk/pynni/nni/curvefitting_assessor/README.md + + Parameters + ---------- + target_pos : int + The point we need to predict + """ + def __init__(self, target_pos): + self.target_pos = target_pos + self.trial_history = [] + self.point_num = 0 + self.effective_model = [] + self.effective_model_num = 0 + self.weight_samples = [] + + def fit_theta(self): + """use least squares to fit all default curves parameter seperately + + Returns + ------- + None + """ + x = range(1, self.point_num + 1) + y = self.trial_history + for i in range(NUM_OF_FUNCTIONS): + model = curve_combination_models[i] + try: + # The maximum number of iterations to fit is 100*(N+1), where N is the number of elements in `x0`. + if model_para_num[model] == 2: + a, b = optimize.curve_fit(all_models[model], x, y)[0] + model_para[model][0] = a + model_para[model][1] = b + elif model_para_num[model] == 3: + a, b, c = optimize.curve_fit(all_models[model], x, y)[0] + model_para[model][0] = a + model_para[model][1] = b + model_para[model][2] = c + elif model_para_num[model] == 4: + a, b, c, d = optimize.curve_fit(all_models[model], x, y)[0] + model_para[model][0] = a + model_para[model][1] = b + model_para[model][2] = c + model_para[model][3] = d + except (RuntimeError, FloatingPointError, OverflowError, ZeroDivisionError): + # Ignore exceptions caused by numerical calculations + pass + except Exception as exception: + logger.critical("Exceptions in fit_theta: %s", exception) + + def filter_curve(self): + """filter the poor performing curve + + Returns + ------- + None + """ + avg = np.sum(self.trial_history) / self.point_num + standard = avg * avg * self.point_num + predict_data = [] + tmp_model = [] + for i in range(NUM_OF_FUNCTIONS): + var = 0 + model = curve_combination_models[i] + for j in range(1, self.point_num + 1): + y = self.predict_y(model, j) + var += (y - self.trial_history[j - 1]) * (y - self.trial_history[j - 1]) + if var < standard: + predict_data.append(y) + tmp_model.append(curve_combination_models[i]) + median = np.median(predict_data) + std = np.std(predict_data) + for model in tmp_model: + y = self.predict_y(model, self.target_pos) + epsilon = self.point_num / 10 * std + if y < median + epsilon and y > median - epsilon: + self.effective_model.append(model) + self.effective_model_num = len(self.effective_model) + logger.info('List of effective model: %s', self.effective_model) + + def predict_y(self, model, pos): + """return the predict y of 'model' when epoch = pos + + Parameters + ---------- + model : string + name of the curve function model + pos : int + the epoch number of the position you want to predict + + Returns + ------- + int + The expected matrix at pos + """ + if model_para_num[model] == 2: + y = all_models[model](pos, model_para[model][0], model_para[model][1]) + elif model_para_num[model] == 3: + y = all_models[model](pos, model_para[model][0], model_para[model][1], model_para[model][2]) + elif model_para_num[model] == 4: + y = all_models[model](pos, model_para[model][0], model_para[model][1], model_para[model][2], model_para[model][3]) + return y + + def f_comb(self, pos, sample): + """return the value of the f_comb when epoch = pos + + Parameters + ---------- + pos : int + the epoch number of the position you want to predict + sample : list + sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} + + Returns + ------- + int + The expected matrix at pos with all the active function's prediction + """ + ret = 0 + for i in range(self.effective_model_num): + model = self.effective_model[i] + y = self.predict_y(model, pos) + ret += sample[i] * y + return ret + + def normalize_weight(self, samples): + """normalize weight + + Parameters + ---------- + samples : list + a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix, + representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}} + + Returns + ------- + list + samples after normalize weight + """ + for i in range(NUM_OF_INSTANCE): + total = 0 + for j in range(self.effective_model_num): + total += samples[i][j] + for j in range(self.effective_model_num): + samples[i][j] /= total + return samples + + def sigma_sq(self, sample): + """returns the value of sigma square, given the weight's sample + + Parameters + ---------- + sample : list + sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} + + Returns + ------- + float + the value of sigma square, given the weight's sample + """ + ret = 0 + for i in range(1, self.point_num + 1): + temp = self.trial_history[i - 1] - self.f_comb(i, sample) + ret += temp * temp + return 1.0 * ret / self.point_num + + def normal_distribution(self, pos, sample): + """returns the value of normal distribution, given the weight's sample and target position + + Parameters + ---------- + pos : int + the epoch number of the position you want to predict + sample : list + sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} + + Returns + ------- + float + the value of normal distribution + """ + curr_sigma_sq = self.sigma_sq(sample) + delta = self.trial_history[pos - 1] - self.f_comb(pos, sample) + return np.exp(np.square(delta) / (-2.0 * curr_sigma_sq)) / np.sqrt(2 * np.pi * np.sqrt(curr_sigma_sq)) + + def likelihood(self, samples): + """likelihood + + Parameters + ---------- + sample : list + sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} + + Returns + ------- + float + likelihood + """ + ret = np.ones(NUM_OF_INSTANCE) + for i in range(NUM_OF_INSTANCE): + for j in range(1, self.point_num + 1): + ret[i] *= self.normal_distribution(j, samples[i]) + return ret + + def prior(self, samples): + """priori distribution + + Parameters + ---------- + samples : list + a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix, + representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}} + + Returns + ------- + float + priori distribution + """ + ret = np.ones(NUM_OF_INSTANCE) + for i in range(NUM_OF_INSTANCE): + for j in range(self.effective_model_num): + if not samples[i][j] > 0: + ret[i] = 0 + if self.f_comb(1, samples[i]) >= self.f_comb(self.target_pos, samples[i]): + ret[i] = 0 + return ret + + def target_distribution(self, samples): + """posterior probability + + Parameters + ---------- + samples : list + a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix, + representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}} + + Returns + ------- + float + posterior probability + """ + curr_likelihood = self.likelihood(samples) + curr_prior = self.prior(samples) + ret = np.ones(NUM_OF_INSTANCE) + for i in range(NUM_OF_INSTANCE): + ret[i] = curr_likelihood[i] * curr_prior[i] + return ret + + def mcmc_sampling(self): + """Adjust the weight of each function using mcmc sampling. + The initial value of each weight is evenly distribute. + Brief introduction: + (1)Definition of sample: + Sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} + (2)Definition of samples: + Samples is a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix, + representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}} + (3)Definition of model: + Model is the function we chose right now. Such as: 'wap', 'weibull'. + (4)Definition of pos: + Pos is the position we want to predict, corresponds to the value of epoch. + + Returns + ------- + None + """ + init_weight = np.ones((self.effective_model_num), dtype=np.float) / self.effective_model_num + self.weight_samples = np.broadcast_to(init_weight, (NUM_OF_INSTANCE, self.effective_model_num)) + for _ in range(NUM_OF_SIMULATION_TIME): + # sample new value from Q(i, j) + new_values = np.random.randn(NUM_OF_INSTANCE, self.effective_model_num) * STEP_SIZE + self.weight_samples + new_values = self.normalize_weight(new_values) + # compute alpha(i, j) = min{1, P(j)Q(j, i)/P(i)Q(i, j)} + alpha = np.minimum(1, self.target_distribution(new_values) / self.target_distribution(self.weight_samples)) + # sample u + u = np.random.rand(NUM_OF_INSTANCE) + # new value + change_value_flag = (u < alpha).astype(np.int) + for j in range(NUM_OF_INSTANCE): + new_values[j] = self.weight_samples[j] * (1 - change_value_flag[j]) + new_values[j] * change_value_flag[j] + self.weight_samples = new_values + + def predict(self, trial_history): + """predict the value of target position + + Parameters + ---------- + trial_history : list + The history performance matrix of each trial. + + Returns + ------- + float + expected final result performance of this hyperparameter config + """ + self.trial_history = trial_history + self.point_num = len(trial_history) + self.fit_theta() + self.filter_curve() + if self.effective_model_num < LEAST_FITTED_FUNCTION: + # different curve's predictions are too scattered, requires more information + return None + self.mcmc_sampling() + ret = 0 + for i in range(NUM_OF_INSTANCE): + ret += self.f_comb(self.target_pos, self.weight_samples[i]) + return ret / NUM_OF_INSTANCE diff --git a/nni/algorithms/hpo/dngo_tuner.py b/nni/algorithms/hpo/dngo_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..e71a158433b516895bf41abe9eed8606d26abede --- /dev/null +++ b/nni/algorithms/hpo/dngo_tuner.py @@ -0,0 +1,134 @@ +import logging +import warnings + +import numpy as np +import torch +from pybnn import DNGO +from torch.distributions import Normal + +import nni.parameter_expressions as parameter_expressions +from nni import ClassArgsValidator +from nni.common.hpo_utils import validate_search_space +from nni.tuner import Tuner + +_logger = logging.getLogger(__name__) + + +def _random_config(search_space, random_state): + chosen_config = {} + for key, val in search_space.items(): + if val['_type'] == 'choice': + choices = val['_value'] + index = random_state.randint(len(choices)) + if all([isinstance(c, (int, float)) for c in choices]): + chosen_config[key] = choices[index] + else: + raise ValueError('Choices with type other than int and float is not supported.') + elif val['_type'] == 'uniform': + chosen_config[key] = random_state.uniform(val['_value'][0], val['_value'][1]) + elif val['_type'] == 'randint': + chosen_config[key] = random_state.randint( + val['_value'][0], val['_value'][1]) + elif val['_type'] == 'quniform': + chosen_config[key] = parameter_expressions.quniform( + val['_value'][0], val['_value'][1], val['_value'][2], random_state) + elif val['_type'] == 'loguniform': + chosen_config[key] = parameter_expressions.loguniform( + val['_value'][0], val['_value'][1], random_state) + elif val['_type'] == 'qloguniform': + chosen_config[key] = parameter_expressions.qloguniform( + val['_value'][0], val['_value'][1], val['_value'][2], random_state) + else: + raise ValueError('Unknown key %s and value %s' % (key, val)) + return chosen_config + + +class DNGOTuner(Tuner): + + def __init__(self, optimize_mode='maximize', sample_size=1000, trials_per_update=20, num_epochs_per_training=500): + self.searchspace_json = None + self.random_state = None + self.model = DNGO(do_mcmc=False, num_epochs=num_epochs_per_training) + self._model_initialized = False + self.sample_size = sample_size + self.trials_per_update = trials_per_update + self.optimize_mode = optimize_mode + + self.x = [] + self.y = [] + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + self.x.append(parameters) + self.y.append(self._get_default_value(value)) + if len(self.y) % self.trials_per_update == 0: + self._update_model() + + def generate_parameters(self, parameter_id, **kwargs): + if not self._model_initialized: + return _random_config(self.searchspace_json, self.random_state) + else: + # random samples and pick best with model + candidate_x = [_random_config(self.searchspace_json, self.random_state) for _ in range(self.sample_size)] + + # The model has NaN issue when all the candidates are same + # Also we can save the predict time when this happens + if all(x == candidate_x[0] for x in candidate_x): + return candidate_x[0] + + x_test = np.array([np.array(list(xi.values())) for xi in candidate_x]) + m, v = self.model.predict(x_test) + + # The model has NaN issue when all the candidates are very close + if np.isnan(m).any() or np.isnan(v).any(): + return candidate_x[0] + + mean = torch.Tensor(m) + sigma = torch.Tensor(v) + u = (mean - torch.Tensor([0.95]).expand_as(mean)) / sigma + normal = Normal(torch.zeros_like(u), torch.ones_like(u)) + ucdf = normal.cdf(u) + updf = torch.exp(normal.log_prob(u)) + ei = sigma * (updf + u * ucdf) + + if self.optimize_mode == 'maximize': + ind = torch.argmax(ei) + else: + ind = torch.argmin(ei) + new_x = candidate_x[ind] + return new_x + + def update_search_space(self, search_space): + validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform', 'loguniform', 'qloguniform']) + self.searchspace_json = search_space + self.random_state = np.random.RandomState() + + def import_data(self, data): + for d in data: + self.x.append(d['parameter']) + self.y.append(self._get_default_value(d['value'])) + self._update_model() + + def _update_model(self): + _logger.info('Updating model on %d samples', len(self.x)) + x_arr = [] + for x in self.x: + x_arr.append([x[k] for k in sorted(x.keys())]) + try: + self.model.train(np.array(x_arr), np.array(self.y), do_optimize=True) + except np.linalg.LinAlgError as e: + warnings.warn(f'numpy linalg error encountered in DNGO model training: {e}') + self._model_initialized = True + + def _get_default_value(self, value): + if isinstance(value, dict) and 'default' in value: + return value['default'] + elif isinstance(value, float): + return value + else: + raise ValueError(f'Unsupported value: {value}') + + +class DNGOClassArgsValidator(ClassArgsValidator): + # DNGO tuner do not have much input arg, so the validation is actually hardly used + def validate_class_args(self, **kwargs): + pass diff --git a/nni/algorithms/hpo/evolution_tuner.py b/nni/algorithms/hpo/evolution_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..9277dcca3caac30134a0b787e4ed23e48295c855 --- /dev/null +++ b/nni/algorithms/hpo/evolution_tuner.py @@ -0,0 +1,283 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +evolution_tuner.py +""" + +import copy +import random +import logging + +from collections import deque +import numpy as np +from schema import Schema, Optional + +import nni +from nni import ClassArgsValidator +from nni.tuner import Tuner +from nni.utils import OptimizeMode, extract_scalar_reward, split_index, json2parameter, json2space + +logger = logging.getLogger(__name__) + +class Individual: + """ + Indicidual class to store the indv info. + + Attributes + ---------- + config : str + Search space. + info : str + The str to save information of individual. + result : float + The final metric of a individual. + """ + + def __init__(self, config=None, info=None, result=None): + """ + Parameters + ---------- + config : str + A config to represent a group of parameters. + info : str + result : float + save_dir : str + """ + self.config = config + self.result = result + self.info = info + + def __str__(self): + return "info: " + str(self.info) + \ + ", config :" + str(self.config) + ", result: " + str(self.result) + +class EvolutionClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'optimize_mode': self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('population_size'): self.range('population_size', int, 0, 99999), + }).validate(kwargs) + +class EvolutionTuner(Tuner): + """ + EvolutionTuner is tuner using navie evolution algorithm. + """ + + def __init__(self, optimize_mode="maximize", population_size=32): + """ + Parameters + ---------- + optimize_mode : str, default 'maximize' + population_size : int + initial population size. The larger population size, + the better evolution performance. + """ + self.optimize_mode = OptimizeMode(optimize_mode) + self.population_size = population_size + + self.searchspace_json = None + self.running_trials = {} + self.num_running_trials = 0 + self.random_state = None + self.population = None + self.space = None + self.credit = 0 # record the unsatisfied trial requests + self.send_trial_callback = None + self.param_ids = deque() + + def update_search_space(self, search_space): + """ + Update search space. + + Search_space contains the information that user pre-defined. + + Parameters + ---------- + search_space : dict + """ + self.searchspace_json = search_space + self.space = json2space(self.searchspace_json) + + self.random_state = np.random.RandomState() + self.population = [] + + for _ in range(self.population_size): + self._random_generate_individual() + + def trial_end(self, parameter_id, success, **kwargs): + """ + To deal with trial failure. If a trial fails, + random generate the parameters and add into the population. + Parameters + ---------- + parameter_id : int + Unique identifier for hyper-parameters used by this trial. + success : bool + True if the trial successfully completed; False if failed or terminated. + **kwargs + Not used + """ + self.num_running_trials -= 1 + logger.info('trial (%d) end', parameter_id) + + if not success: + self.running_trials.pop(parameter_id) + self._random_generate_individual() + + if self.credit > 1: + param_id = self.param_ids.popleft() + config = self._generate_individual(param_id) + logger.debug('Send new trial (%d, %s) for reducing credit', param_id, config) + self.send_trial_callback(param_id, config) + self.credit -= 1 + self.num_running_trials += 1 + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """ + Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects. + Parameters + ---------- + parameter_id_list : list of int + Unique identifiers for each set of requested hyper-parameters. + **kwargs + Not used + Returns + ------- + list + A list of newly generated configurations + """ + + result = [] + if 'st_callback' in kwargs: + self.send_trial_callback = kwargs['st_callback'] + else: + logger.warning('Send trial callback is not found in kwargs. Evolution tuner might not work properly.') + for parameter_id in parameter_id_list: + had_exception = False + try: + logger.debug("generating param for %s", parameter_id) + res = self.generate_parameters(parameter_id, **kwargs) + self.num_running_trials += 1 + except nni.NoMoreTrialError: + had_exception = True + if not had_exception: + result.append(res) + return result + + def _random_generate_individual(self): + is_rand = dict() + for item in self.space: + is_rand[item] = True + + config = json2parameter(self.searchspace_json, is_rand, self.random_state) + self.population.append(Individual(config=config)) + + def _generate_individual(self, parameter_id): + """ + This function will generate the config for a trial. + If at the first generation, randomly generates individuals to satisfy self.population_size. + Otherwise, random choose a pair of individuals and compare their fitnesses. + The worst of the pair will be removed. Copy the best of the pair and mutate it to generate a new individual. + + Parameters + ---------- + parameter_id : int + + Returns + ------- + dict + A group of candaidte parameters that evolution tuner generated. + """ + pos = -1 + + for i in range(len(self.population)): + if self.population[i].result is None: + pos = i + break + + if pos != -1: + indiv = copy.deepcopy(self.population[pos]) + self.population.pop(pos) + else: + random.shuffle(self.population) + # avoid only 1 individual has result + if len(self.population) > 1 and self.population[0].result < self.population[1].result: + self.population[0] = self.population[1] + + # mutation on the worse individual + space = json2space(self.searchspace_json, + self.population[0].config) + is_rand = dict() + mutation_pos = space[random.randint(0, len(space)-1)] + + for i in range(len(self.space)): + is_rand[self.space[i]] = (self.space[i] == mutation_pos) + config = json2parameter( + self.searchspace_json, is_rand, self.random_state, self.population[0].config) + + if len(self.population) > 1: + self.population.pop(1) + + indiv = Individual(config=config) + + # remove "_index" from config and save params-id + self.running_trials[parameter_id] = indiv + config = split_index(indiv.config) + return config + + + def generate_parameters(self, parameter_id, **kwargs): + """ + This function will returns a dict of trial (hyper-)parameters. + If no trial configration for now, self.credit plus 1 to send the config later + + Parameters + ---------- + parameter_id : int + + Returns + ------- + dict + One newly generated configuration. + """ + if not self.population: + raise RuntimeError('The population is empty') + + if self.num_running_trials >= self.population_size: + logger.warning("No enough trial config, population_size is suggested to be larger than trialConcurrency") + self.credit += 1 + self.param_ids.append(parameter_id) + raise nni.NoMoreTrialError('no more parameters now.') + + return self._generate_individual(parameter_id) + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Record the result from a trial + + Parameters + ---------- + parameter_id : int + parameters : dict + value : dict/float + if value is dict, it should have "default" key. + value is final metrics of the trial. + """ + reward = extract_scalar_reward(value) + + if parameter_id not in self.running_trials: + raise RuntimeError('Received parameter_id %s not in running_trials.', parameter_id) + + # restore the paramsters contains "_index" + config = self.running_trials[parameter_id].config + self.running_trials.pop(parameter_id) + + if self.optimize_mode == OptimizeMode.Minimize: + reward = -reward + + indiv = Individual(config=config, result=reward) + self.population.append(indiv) + + def import_data(self, data): + pass diff --git a/nni/algorithms/hpo/gp_tuner/__init__.py b/nni/algorithms/hpo/gp_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..17bedd38f4ee1fefea0a3d73ab1b5d3fb7ef2aed --- /dev/null +++ b/nni/algorithms/hpo/gp_tuner/__init__.py @@ -0,0 +1 @@ +from .gp_tuner import GPTuner, GPClassArgsValidator diff --git a/nni/algorithms/hpo/gp_tuner/gp_tuner.py b/nni/algorithms/hpo/gp_tuner/gp_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..6beaf2c3a9ebab6c6bae686fe2086bb4fbc8b726 --- /dev/null +++ b/nni/algorithms/hpo/gp_tuner/gp_tuner.py @@ -0,0 +1,183 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +GPTuner is a Bayesian Optimization method where Gaussian Process is used for modeling loss functions. + +See :class:`GPTuner` for details. +""" + +import warnings +import logging +import numpy as np +from schema import Schema, Optional + +from sklearn.gaussian_process.kernels import Matern +from sklearn.gaussian_process import GaussianProcessRegressor + +from nni import ClassArgsValidator +from nni.common.hpo_utils import validate_search_space +from nni.tuner import Tuner +from nni.utils import OptimizeMode, extract_scalar_reward + +from .target_space import TargetSpace +from .util import UtilityFunction, acq_max + +logger = logging.getLogger("GP_Tuner_AutoML") + +class GPClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('utility'): self.choices('utility', 'ei', 'ucb', 'poi'), + Optional('kappa'): float, + Optional('xi'): float, + Optional('nu'): float, + Optional('alpha'): float, + Optional('cold_start_num'): int, + Optional('selection_num_warm_up'): int, + Optional('selection_num_starting_points'): int, + }).validate(kwargs) + +class GPTuner(Tuner): + """ + GPTuner is a Bayesian Optimization method where Gaussian Process is used for modeling loss functions. + + Parameters + ---------- + optimize_mode : str + optimize mode, 'maximize' or 'minimize', by default 'maximize' + utility : str + utility function (also called 'acquisition funcition') to use, which can be 'ei', 'ucb' or 'poi'. By default 'ei'. + kappa : float + value used by utility function 'ucb'. The bigger kappa is, the more the tuner will be exploratory. By default 5. + xi : float + used by utility function 'ei' and 'poi'. The bigger xi is, the more the tuner will be exploratory. By default 0. + nu : float + used to specify Matern kernel. The smaller nu, the less smooth the approximated function is. By default 2.5. + alpha : float + Used to specify Gaussian Process Regressor. Larger values correspond to increased noise level in the observations. + By default 1e-6. + cold_start_num : int + Number of random exploration to perform before Gaussian Process. By default 10. + selection_num_warm_up : int + Number of random points to evaluate for getting the point which maximizes the acquisition function. By default 100000 + selection_num_starting_points : int + Number of times to run L-BFGS-B from a random starting point after the warmup. By default 250. + """ + + def __init__(self, optimize_mode="maximize", utility='ei', kappa=5, xi=0, nu=2.5, alpha=1e-6, cold_start_num=10, + selection_num_warm_up=100000, selection_num_starting_points=250): + self._optimize_mode = OptimizeMode(optimize_mode) + + # utility function related + self._utility = utility + self._kappa = kappa + self._xi = xi + + # target space + self._space = None + + self._random_state = np.random.RandomState() + + # nu, alpha are GPR related params + self._gp = GaussianProcessRegressor( + kernel=Matern(nu=nu), + alpha=alpha, + normalize_y=True, + n_restarts_optimizer=25, + random_state=self._random_state + ) + # num of random evaluations before GPR + self._cold_start_num = cold_start_num + + # params for acq_max + self._selection_num_warm_up = selection_num_warm_up + self._selection_num_starting_points = selection_num_starting_points + + # num of imported data + self._supplement_data_num = 0 + + def update_search_space(self, search_space): + """ + Update the self.bounds and self.types by the search_space.json file. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform', 'loguniform', 'qloguniform']) + self._space = TargetSpace(search_space, self._random_state) + + def generate_parameters(self, parameter_id, **kwargs): + """ + Method which provides one set of hyper-parameters. + If the number of trial result is lower than cold_start_number, GPTuner will first randomly generate some parameters. + Otherwise, choose the parameters by the Gussian Process Model. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + if self._space.len() < self._cold_start_num: + results = self._space.random_sample() + else: + # Sklearn's GP throws a large number of warnings at times, but + # we don't really need to see them here. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self._gp.fit(self._space.params, self._space.target) + + util = UtilityFunction( + kind=self._utility, kappa=self._kappa, xi=self._xi) + + results = acq_max( + f_acq=util.utility, + gp=self._gp, + y_max=self._space.target.max(), + bounds=self._space.bounds, + space=self._space, + num_warmup=self._selection_num_warm_up, + num_starting_points=self._selection_num_starting_points + ) + + results = self._space.array_to_params(results) + logger.info("Generate paramageters:\n %s", results) + return results + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Method invoked when a trial reports its final result. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + value = extract_scalar_reward(value) + if self._optimize_mode == OptimizeMode.Minimize: + value = -value + + logger.info("Received trial result.") + logger.info("value :%s", value) + logger.info("parameter : %s", parameters) + self._space.register(parameters, value) + + def import_data(self, data): + """ + Import additional data for tuning. + + Override of the abstract method in :class:`~nni.tuner.Tuner`. + """ + _completed_num = 0 + for trial_info in data: + logger.info( + "Importing data, current processing progress %s / %s", _completed_num, len(data)) + _completed_num += 1 + assert "parameter" in trial_info + _params = trial_info["parameter"] + assert "value" in trial_info + _value = trial_info['value'] + if not _value: + logger.info( + "Useless trial data, value is %s, skip this trial data.", _value) + continue + self._supplement_data_num += 1 + _parameter_id = '_'.join( + ["ImportData", str(self._supplement_data_num)]) + self.receive_trial_result( + parameter_id=_parameter_id, parameters=_params, value=_value) + logger.info("Successfully import data to GP tuner.") diff --git a/nni/algorithms/hpo/gp_tuner/target_space.py b/nni/algorithms/hpo/gp_tuner/target_space.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee52c0e9969a90f931bd9a9e43b194f354d81c4 --- /dev/null +++ b/nni/algorithms/hpo/gp_tuner/target_space.py @@ -0,0 +1,295 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Tool class to hold the param-space coordinates (X) and target values (Y). +""" + +import numpy as np +import nni.parameter_expressions as parameter_expressions + + +def _hashable(params): + """ + Transform list params to tuple format. Ensure that an point is hashable by a python dict. + + Parameters + ---------- + params : numpy array + array format of parameters + + Returns + ------- + tuple + tuple format of parameters + """ + return tuple(map(float, params)) + + +class TargetSpace(): + """ + Holds the param-space coordinates (X) and target values (Y) + + Parameters + ---------- + pbounds : dict + Dictionary with parameters names and legal values. + + random_state : int, RandomState, or None + optionally specify a seed for a random number generator, by default None. + """ + + def __init__(self, pbounds, random_state=None): + self._random_state = random_state + + # Get the name of the parameters + self._keys = sorted(pbounds) + + # Create an array with parameters bounds + self._bounds = np.array( + [item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])] + ) + + # check values type + for _bound in self._bounds: + if _bound['_type'] == 'choice': + try: + [float(val) for val in _bound['_value']] + except ValueError: + raise ValueError("GP Tuner supports only numerical values") + + # preallocated memory for X and Y points + self._params = np.empty(shape=(0, self.dim)) + self._target = np.empty(shape=(0)) + + # keep track of unique points we have seen so far + self._cache = {} + + def __contains__(self, params): + """ + check if a parameter is already registered + + Parameters + ---------- + params : numpy array + + Returns + ------- + bool + True if the parameter is already registered, else false + """ + return _hashable(params) in self._cache + + def len(self): + """ + length of registered params and targets + + Returns + ------- + int + """ + assert len(self._params) == len(self._target) + return len(self._target) + + @property + def params(self): + """ + registered parameters + + Returns + ------- + numpy array + """ + return self._params + + @property + def target(self): + """ + registered target values + + Returns + ------- + numpy array + """ + return self._target + + @property + def dim(self): + """ + dimension of parameters + + Returns + ------- + int + """ + return len(self._keys) + + @property + def keys(self): + """ + keys of parameters + + Returns + ------- + numpy array + """ + return self._keys + + @property + def bounds(self): + """ + bounds of parameters + + Returns + ------- + numpy array + """ + return self._bounds + + def params_to_array(self, params): + """ + dict to array + + Parameters + ---------- + params : dict + dict format of parameters + + Returns + ------- + numpy array + array format of parameters + """ + try: + assert set(params) == set(self.keys) + except AssertionError: + raise ValueError( + "Parameters' keys ({}) do ".format(sorted(params)) + + "not match the expected set of keys ({}).".format(self.keys) + ) + return np.asarray([params[key] for key in self.keys]) + + def array_to_params(self, x): + """ + array to dict + + maintain int type if the paramters is defined as int in search_space.json + Parameters + ---------- + x : numpy array + array format of parameters + + Returns + ------- + dict + dict format of parameters + """ + try: + assert len(x) == len(self.keys) + except AssertionError: + raise ValueError( + "Size of array ({}) is different than the ".format(len(x)) + + "expected number of parameters ({}).".format(self.dim) + ) + + params = {} + for i, _bound in enumerate(self._bounds): + if _bound['_type'] == 'choice' and all(isinstance(val, int) for val in _bound['_value']): + params.update({self.keys[i]: int(x[i])}) + elif _bound['_type'] in ['randint']: + params.update({self.keys[i]: int(x[i])}) + else: + params.update({self.keys[i]: x[i]}) + + return params + + def register(self, params, target): + """ + Append a point and its target value to the known data. + + Parameters + ---------- + params : dict + parameters + + target : float + target function value + """ + + x = self.params_to_array(params) + if x in self: + print('Data point {} is not unique'.format(x)) + + # Insert data into unique dictionary + self._cache[_hashable(x.ravel())] = target + + self._params = np.concatenate([self._params, x.reshape(1, -1)]) + self._target = np.concatenate([self._target, [target]]) + + def random_sample(self): + """ + Creates a random point within the bounds of the space. + + Returns + ------- + numpy array + one groupe of parameter + """ + params = np.empty(self.dim) + for col, _bound in enumerate(self._bounds): + if _bound['_type'] == 'choice': + params[col] = parameter_expressions.choice( + _bound['_value'], self._random_state) + elif _bound['_type'] == 'randint': + params[col] = self._random_state.randint( + _bound['_value'][0], _bound['_value'][1], size=1) + elif _bound['_type'] == 'uniform': + params[col] = parameter_expressions.uniform( + _bound['_value'][0], _bound['_value'][1], self._random_state) + elif _bound['_type'] == 'quniform': + params[col] = parameter_expressions.quniform( + _bound['_value'][0], _bound['_value'][1], _bound['_value'][2], self._random_state) + elif _bound['_type'] == 'loguniform': + params[col] = parameter_expressions.loguniform( + _bound['_value'][0], _bound['_value'][1], self._random_state) + elif _bound['_type'] == 'qloguniform': + params[col] = parameter_expressions.qloguniform( + _bound['_value'][0], _bound['_value'][1], _bound['_value'][2], self._random_state) + + return params + + def max(self): + """ + Get maximum target value found and its corresponding parameters. + + Returns + ------- + dict + target value and parameters, empty dict if nothing registered + """ + try: + res = { + 'target': self.target.max(), + 'params': dict( + zip(self.keys, self.params[self.target.argmax()]) + ) + } + except ValueError: + res = {} + return res + + def res(self): + """ + Get all target values found and corresponding parameters. + + Returns + ------- + list + a list of target values and their corresponding parameters + """ + params = [dict(zip(self.keys, p)) for p in self.params] + + return [ + {"target": target, "params": param} + for target, param in zip(self.target, params) + ] diff --git a/nni/algorithms/hpo/gp_tuner/util.py b/nni/algorithms/hpo/gp_tuner/util.py new file mode 100644 index 0000000000000000000000000000000000000000..6926f988997a1e0b1d0bd6286dd7f091c38fc8da --- /dev/null +++ b/nni/algorithms/hpo/gp_tuner/util.py @@ -0,0 +1,235 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +utility functions and classes for GPTuner +""" + +import warnings +import numpy as np +from scipy.stats import norm +from scipy.optimize import minimize + + +def _match_val_type(vals, bounds): + """ + Update values in the array, to match their corresponding type, make sure the value is legal. + + Parameters + ---------- + vals : numpy array + values of parameters + bounds : numpy array + list of dictionary which stores parameters names and legal values. + + Returns + ------- + vals_new : list + The closest legal value to the original value + """ + vals_new = [] + + for i, bound in enumerate(bounds): + _type = bound['_type'] + if _type == "choice": + # Find the closest integer in the array, vals_bounds + # pylint: disable=cell-var-from-loop + vals_new.append(min(bound['_value'], key=lambda x: abs(x - vals[i]))) + elif _type in ['quniform', 'randint']: + vals_new.append(np.around(vals[i])) + else: + vals_new.append(vals[i]) + + return vals_new + + +def acq_max(f_acq, gp, y_max, bounds, space, num_warmup, num_starting_points): + """ + A function to find the maximum of the acquisition function + + It uses a combination of random sampling (cheap) and the 'L-BFGS-B' + optimization method. First by sampling ``num_warmup`` points at random, + and then running L-BFGS-B from ``num_starting_points`` random starting points. + + Parameters + ---------- + f_acq : UtilityFunction.utility + The acquisition function object that return its point-wise value. + + gp : GaussianProcessRegressor + A gaussian process fitted to the relevant data. + + y_max : float + The current maximum known value of the target function. + + bounds : numpy array + The variables bounds to limit the search of the acq max. + + num_warmup : int + number of times to randomly sample the aquisition function + + num_starting_points : int + number of times to run scipy.minimize + + Returns + ------- + numpy array + The parameter which achieves max of the acquisition function. + """ + + # Warm up with random points + x_tries = [space.random_sample() + for _ in range(int(num_warmup))] + ys = f_acq(x_tries, gp=gp, y_max=y_max) + x_max = x_tries[ys.argmax()] + max_acq = ys.max() + + + # Explore the parameter space more throughly + x_seeds = [space.random_sample() for _ in range(int(num_starting_points))] + + bounds_minmax = np.array( + [[bound['_value'][0], bound['_value'][-1]] for bound in bounds]) + + for x_try in x_seeds: + # Find the minimum of minus the acquisition function + res = minimize(lambda x: -f_acq(x.reshape(1, -1), gp=gp, y_max=y_max), + x_try.reshape(1, -1), + bounds=bounds_minmax, + method="L-BFGS-B") + + # See if success + if not res.success: + continue + + # Store it if better than previous minimum(maximum). + if max_acq is None or -res.fun[0] >= max_acq: + x_max = _match_val_type(res.x, bounds) + max_acq = -res.fun[0] + + # Clip output to make sure it lies within the bounds. Due to floating + # point technicalities this is not always the case. + return np.clip(x_max, bounds_minmax[:, 0], bounds_minmax[:, 1]) + + +class UtilityFunction(): + """ + A class to compute different acquisition function values. + + Parameters + ---------- + kind : string + specification of utility function to use + kappa : float + parameter usedd for 'ucb' acquisition function + xi : float + parameter usedd for 'ei' and 'poi' acquisition function + """ + + def __init__(self, kind, kappa, xi): + self._kappa = kappa + self._xi = xi + + if kind not in ['ucb', 'ei', 'poi']: + err = "The utility function " \ + "{} has not been implemented, " \ + "please choose one of ucb, ei, or poi.".format(kind) + raise NotImplementedError(err) + self._kind = kind + + def utility(self, x, gp, y_max): + """ + return utility function + + Parameters + ---------- + x : numpy array + parameters + gp : GaussianProcessRegressor + y_max : float + maximum target value observed so far + + Returns + ------- + function + return corresponding function, return None if parameter is illegal + """ + if self._kind == 'ucb': + return self._ucb(x, gp, self._kappa) + if self._kind == 'ei': + return self._ei(x, gp, y_max, self._xi) + if self._kind == 'poi': + return self._poi(x, gp, y_max, self._xi) + return None + + @staticmethod + def _ucb(x, gp, kappa): + """ + Upper Confidence Bound (UCB) utility function + + Parameters + ---------- + x : numpy array + parameters + gp : GaussianProcessRegressor + kappa : float + + Returns + ------- + float + """ + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + mean, std = gp.predict(x, return_std=True) + + return mean + kappa * std + + @staticmethod + def _ei(x, gp, y_max, xi): + """ + Expected Improvement (EI) utility function + + Parameters + ---------- + x : numpy array + parameters + gp : GaussianProcessRegressor + y_max : float + maximum target value observed so far + xi : float + + Returns + ------- + float + """ + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + mean, std = gp.predict(x, return_std=True) + + z = (mean - y_max - xi)/std + return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z) + + @staticmethod + def _poi(x, gp, y_max, xi): + """ + Possibility Of Improvement (POI) utility function + + Parameters + ---------- + x : numpy array + parameters + gp : GaussianProcessRegressor + y_max : float + maximum target value observed so far + xi : float + + Returns + ------- + float + """ + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + mean, std = gp.predict(x, return_std=True) + + z = (mean - y_max - xi)/std + return norm.cdf(z) diff --git a/nni/algorithms/hpo/gridsearch_tuner.py b/nni/algorithms/hpo/gridsearch_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..37c73d16377fe22b1f81c763f43e237d9f473b11 --- /dev/null +++ b/nni/algorithms/hpo/gridsearch_tuner.py @@ -0,0 +1,253 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Grid search tuner for hyper-parameter optimization. + +For categorical parameters this tuner fully explore all combinations. +For numerical parameters it samples them at progressively decreased intervals. + +Use this tuner if you have abundant resource and want to find strictly optimal parameters. + +Grid search tuner has no argument. +""" + +__all__ = ['GridSearchTuner'] + +import logging +import math + +import numpy as np +from scipy.special import erfinv # pylint: disable=no-name-in-module + +import nni +from nni.common.hpo_utils import ParameterSpec, deformat_parameters, format_search_space +from nni.tuner import Tuner + +_logger = logging.getLogger('nni.tuner.gridsearch') + +## +# Grid search is a simple algorithm if only categorical parameters are considered. +# But to support continuous space, things get tricky. +# +# To support continuous space, we divide search process into "epochs". +# The first epoch only explores middle point of uniform and normal parameters. +# When first epoch is fully explored, the algorithm starts second epoch, +# where it divides non-categorical spaces by adding quartile points into the grid. +# Then in third epoch it adds [1/8, 3/8, 5/8, 7/8], and so on. +# +# We divide normal distributed spaces using inverse function of CDF. +# For example the 1/4 point of a normal distribution is defined as X where `normal_cdf(X) = 1/4`. +# +# Here is an example: +# +# search space: +# x: choices(5, 7) +# y: normal(0, 1) +# z: quniform(2, 3, 1) +# +# grid of first epoch: +# x: [5, 7] +# y: [1/2] +# z: [1/2] (results in [2], because round(2.5) == 2) +# generated parameters: +# (5,0,2) (7,0,2) +# +# grid of second epoch: +# x: [5, 7] +# y: [1/2, 1/4, 3/4] (results in [0, -0.67, 0.67]) +# z: [1/2, 3/4] (results in [2, 3], 1/4 is eliminated due to duplication) +# generated parameters: +# (5,0,3) (5,-0.67,2) (5,-0.67,3) (5,0.67,2) (5,0.67,3) +# (7,0,3) (7,-0.67,2) (7,-0.67,3) (7,0.67,2) (7,0.67,3) +## + +class GridSearchTuner(Tuner): + def __init__(self): + self.space = None + + # the grid to search in this epoch + # when the space is fully explored, grid is set to None + self.grid = None # list[int | float] + + # a paremter set is internally expressed as a vector + # for each dimension i, self.vector[i] is the parameter's index in self.grid[i] + # in second epoch of above example, vector [1, 2, 0] means parameters {x: 7, y: 0.67, z: 2} + self.vector = None # list[int] + + # this tells which parameters are derived from previous epoch + # in second epoch of above example, epoch_bar is [2, 1, 1] + self.epoch_bar = None # list[int] + + # this stores which intervals are possibly divisible (low < high after log and q) + # in first epoch of above example, divisions are: + # {1: [(0,1/2), (1/2,1)], 2: [(1/2,1)]} + # in second epoch: + # {1: [(0,1/4), (1/4,1/2), (1/2,3/4), (3/4,1)], 2: [(1/2,3/4)]} + # and in third epoch: + # {1: [(0,1/8), ..., (7/8,1)], 2: []} + self.divisions = {} # dict[int, list[tuple[float, float]]] + + # dumped JSON string of all tried parameters + self.history = set() + + def update_search_space(self, space): + self.space = format_search_space(space) + if not self.space: # the tuner will crash in this case, report it explicitly + raise ValueError('Search space is empty') + self._init_grid() + + def generate_parameters(self, *args, **kwargs): + while True: + params = self._suggest() + if params is None: + raise nni.NoMoreTrialError('Search space fully explored') + params = deformat_parameters(params, self.space) + + params_str = nni.dump(params, sort_keys=True) + if params_str not in self.history: + self.history.add(params_str) + return params + + def receive_trial_result(self, *args, **kwargs): + pass + + def import_data(self, data): + # TODO + # use tuple to dedup in case of order/precision issue causes matching failed + # and remove `epoch_bar` to use uniform dedup mechanism + for trial in data: + params_str = nni.dump(trial['parameter'], sort_keys=True) + self.history.add(params_str) + + def _suggest(self): + # returns next parameter set, or None if the space is already fully explored + while True: + if self.grid is None: # search space fully explored + return None + + self._next_vector() + + if self.vector is None: # epoch end, update grid and retry + self._next_grid() + continue + + old = all((self.vector[i] < self.epoch_bar[i]) for i in range(len(self.space))) + if old: # already explored in past epochs + continue + + # this vector is valid, stop + _logger.debug(f'vector: {self.vector}') + return self._current_parameters() + + def _next_vector(self): + # iterate to next vector of this epoch, set vector to None if epoch end + if self.vector is None: # first vector in this epoch + self.vector = [0] * len(self.space) + return + + # deal with nested choice, don't touch nested spaces that are not chosen by current vector + activated_dims = [] + params = self._current_parameters() + for i, spec in enumerate(self.space.values()): + if spec.is_activated_in(params): + activated_dims.append(i) + + for i in reversed(activated_dims): + if self.vector[i] + 1 < len(self.grid[i]): + self.vector[i] += 1 + return + else: + self.vector[i] = 0 + + self.vector = None # the loop ends without returning, no more vector in this epoch + + def _next_grid(self): + # update grid information (grid, epoch_bar, divisions) for next epoch + updated = False + for i, spec in enumerate(self.space.values()): + self.epoch_bar[i] = len(self.grid[i]) + if not spec.categorical: + # further divide intervals + new_vals = [] # values to append to grid + new_divs = [] # sub-intervals + for l, r in self.divisions[i]: + mid = (l + r) / 2 + diff_l = _less(l, mid, spec) + diff_r = _less(mid, r, spec) + if diff_l and diff_r: # we can skip these for non-q, but it will complicate the code + new_vals.append(mid) + updated = True + if diff_l: + new_divs.append((l, mid)) + if diff_r: + new_divs.append((mid, r)) + self.grid[i] += new_vals + self.divisions[i] = new_divs + + if not updated: # fully explored + _logger.info('Search space has been fully explored') + self.grid = None + else: + size = _grid_size_info(self.grid) + _logger.info(f'Grid subdivided, new size: {size}') + + def _init_grid(self): + self.epoch_bar = [0 for _ in self.space] + self.grid = [None for _ in self.space] + for i, spec in enumerate(self.space.values()): + if spec.categorical: + self.grid[i] = list(range(spec.size)) + else: + self.grid[i] = [0.5] + self.divisions[i] = [] + if _less(0, 0.5, spec): + self.divisions[i].append((0, 0.5)) + if _less(0.5, 1, spec): + self.divisions[i].append((0.5, 1)) + + size = _grid_size_info(self.grid) + _logger.info(f'Grid initialized, size: {size}') + + def _current_parameters(self): + # convert self.vector to "formatted" parameters + params = {} + for i, spec in enumerate(self.space.values()): + if spec.is_activated_in(params): + x = self.grid[i][self.vector[i]] + if spec.categorical: + params[spec.key] = x + else: + params[spec.key] = _cdf_inverse(x, spec) + return params + +def _less(x, y, spec): + #if spec.q is None: # TODO: comment out because of edge case UT uniform(99.9, 99.9) + # return x < y + real_x = _deformat_single_parameter(_cdf_inverse(x, spec), spec) + real_y = _deformat_single_parameter(_cdf_inverse(y, spec), spec) + return real_x < real_y + +def _cdf_inverse(x, spec): + # inverse function of spec's cumulative distribution function + if spec.normal_distributed: + return spec.mu + spec.sigma * math.sqrt(2) * erfinv(2 * x - 1) + else: + return spec.low + (spec.high - spec.low) * x + +def _deformat_single_parameter(x, spec): + if math.isinf(x): + return x + spec_dict = spec._asdict() + spec_dict['key'] = (spec.name,) + spec = ParameterSpec(**spec_dict) + params = deformat_parameters({spec.key: x}, {spec.key: spec}) + return params[spec.name] + +def _grid_size_info(grid): + if len(grid) == 1: + return str(len(grid[0])) + sizes = [len(candidates) for candidates in grid] + mul = '×'.join(str(s) for s in sizes) + total = np.prod(sizes) + return f'({mul}) = {total}' diff --git a/nni/algorithms/hpo/hyperband_advisor.py b/nni/algorithms/hpo/hyperband_advisor.py new file mode 100644 index 0000000000000000000000000000000000000000..f42d5936921087e3efe946853099e4fef1c33235 --- /dev/null +++ b/nni/algorithms/hpo/hyperband_advisor.py @@ -0,0 +1,469 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +hyperband_advisor.py +""" + +import copy +import logging +import math +import sys + +import numpy as np +from schema import Schema, Optional + +import nni +from nni import ClassArgsValidator +from nni.common.hpo_utils import validate_search_space +from nni.runtime.common import multi_phase_enabled +from nni.runtime.msg_dispatcher_base import MsgDispatcherBase +from nni.runtime.protocol import CommandType, send +from nni.utils import NodeType, OptimizeMode, MetricType, extract_scalar_reward +from nni import parameter_expressions + +_logger = logging.getLogger(__name__) + +_next_parameter_id = 0 +_KEY = 'TRIAL_BUDGET' +_epsilon = 1e-6 + + +def create_parameter_id(): + """Create an id + + Returns + ------- + int + parameter id + """ + global _next_parameter_id + _next_parameter_id += 1 + return _next_parameter_id - 1 + + +def create_bracket_parameter_id(brackets_id, brackets_curr_decay, increased_id=-1): + """Create a full id for a specific bracket's hyperparameter configuration + + Parameters + ---------- + brackets_id: string + brackets id + brackets_curr_decay: + brackets curr decay + increased_id: int + increased id + + Returns + ------- + int + params id + """ + if increased_id == -1: + increased_id = str(create_parameter_id()) + params_id = '_'.join([brackets_id, + str(brackets_curr_decay), + increased_id]) + return params_id + + +def json2parameter(ss_spec, random_state): + """Randomly generate values for hyperparameters from hyperparameter space i.e., x. + + Parameters + ---------- + ss_spec: + hyperparameter space + random_state: + random operator to generate random values + + Returns + ------- + Parameter: + Parameters in this experiment + """ + if isinstance(ss_spec, dict): + if NodeType.TYPE in ss_spec.keys(): + _type = ss_spec[NodeType.TYPE] + _value = ss_spec[NodeType.VALUE] + if _type == 'choice': + _index = random_state.randint(len(_value)) + chosen_params = json2parameter(ss_spec[NodeType.VALUE][_index], random_state) + else: + chosen_params = getattr(parameter_expressions, _type)(*(_value + [random_state])) + else: + chosen_params = dict() + for key in ss_spec.keys(): + chosen_params[key] = json2parameter(ss_spec[key], random_state) + elif isinstance(ss_spec, list): + chosen_params = list() + for _, subspec in enumerate(ss_spec): + chosen_params.append(json2parameter(subspec, random_state)) + else: + chosen_params = copy.deepcopy(ss_spec) + return chosen_params + + +class Bracket(): + """A bracket in Hyperband, all the information of a bracket is managed by an instance of this class + + Parameters + ---------- + bracket_id: string + The id of this bracket, usually be set as '{Hyperband index}-{SH iteration index}' + s: int + The current SH iteration index. + s_max: int + total number of SH iterations + eta: float + In each iteration, a complete run of sequential halving is executed. In it, + after evaluating each configuration on the same subset size, only a fraction of + 1/eta of them 'advances' to the next round. + R: + the budget associated with each stage + optimize_mode: str + optimize mode, 'maximize' or 'minimize' + """ + + def __init__(self, bracket_id, s, s_max, eta, R, optimize_mode): + self.bracket_id = bracket_id + self.s = s + self.s_max = s_max + self.eta = eta + self.n = math.ceil((s_max + 1) * (eta ** s) / (s + 1) - _epsilon) + self.r = R / eta ** s + self.i = 0 + self.hyper_configs = [] # [ {id: params}, {}, ... ] + self.configs_perf = [] # [ {id: [seq, acc]}, {}, ... ] + self.num_configs_to_run = [] # [ n, n, n, ... ] + self.num_finished_configs = [] # [ n, n, n, ... ] + self.optimize_mode = OptimizeMode(optimize_mode) + self.no_more_trial = False + + def is_completed(self): + """check whether this bracket has sent out all the hyperparameter configurations""" + return self.no_more_trial + + def get_n_r(self): + """return the values of n and r for the next round""" + return math.floor(self.n / self.eta ** self.i + _epsilon), math.floor(self.r * self.eta ** self.i + _epsilon) + + def increase_i(self): + """i means the ith round. Increase i by 1""" + self.i += 1 + if self.i > self.s: + self.no_more_trial = True + + def set_config_perf(self, i, parameter_id, seq, value): + """update trial's latest result with its sequence number, e.g., epoch number or batch number + + Parameters + ---------- + i: int + the ith round + parameter_id: int + the id of the trial/parameter + seq: int + sequence number, e.g., epoch number or batch number + value: int + latest result with sequence number seq + + Returns + ------- + None + """ + if parameter_id in self.configs_perf[i]: + if self.configs_perf[i][parameter_id][0] < seq: + self.configs_perf[i][parameter_id] = [seq, value] + else: + self.configs_perf[i][parameter_id] = [seq, value] + + def inform_trial_end(self, i): + """If the trial is finished and the corresponding round (i.e., i) has all its trials finished, + it will choose the top k trials for the next round (i.e., i+1) + + Parameters + ---------- + i: int + the ith round + """ + global _KEY + self.num_finished_configs[i] += 1 + _logger.debug('bracket id: %d, round: %d %d, finished: %d, all: %d', self.bracket_id, self.i, i, + self.num_finished_configs[i], self.num_configs_to_run[i]) + if self.num_finished_configs[i] >= self.num_configs_to_run[i] \ + and self.no_more_trial is False: + # choose candidate configs from finished configs to run in the next round + assert self.i == i + 1 + this_round_perf = self.configs_perf[i] + if self.optimize_mode is OptimizeMode.Maximize: + sorted_perf = sorted(this_round_perf.items(), key=lambda kv: kv[1][1], reverse=True) # reverse + else: + sorted_perf = sorted(this_round_perf.items(), key=lambda kv: kv[1][1]) + _logger.debug('bracket %s next round %s, sorted hyper configs: %s', self.bracket_id, self.i, sorted_perf) + next_n, next_r = self.get_n_r() + _logger.debug('bracket %s next round %s, next_n=%d, next_r=%d', self.bracket_id, self.i, next_n, next_r) + hyper_configs = dict() + for k in range(next_n): + params_id = sorted_perf[k][0] + params = self.hyper_configs[i][params_id] + params[_KEY] = next_r # modify r + # generate new id + increased_id = params_id.split('_')[-1] + new_id = create_bracket_parameter_id(self.bracket_id, self.i, increased_id) + hyper_configs[new_id] = params + self._record_hyper_configs(hyper_configs) + return [[key, value] for key, value in hyper_configs.items()] + return None + + def get_hyperparameter_configurations(self, num, r, searchspace_json, random_state): + """Randomly generate num hyperparameter configurations from search space + + Parameters + ---------- + num: int + the number of hyperparameter configurations + + Returns + ------- + list + a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...] + """ + global _KEY + assert self.i == 0 + hyperparameter_configs = dict() + for _ in range(num): + params_id = create_bracket_parameter_id(self.bracket_id, self.i) + params = json2parameter(searchspace_json, random_state) + params[_KEY] = r + hyperparameter_configs[params_id] = params + self._record_hyper_configs(hyperparameter_configs) + return [[key, value] for key, value in hyperparameter_configs.items()] + + def _record_hyper_configs(self, hyper_configs): + """after generating one round of hyperconfigs, this function records the generated hyperconfigs, + creates a dict to record the performance when those hyperconifgs are running, set the number of finished configs + in this round to be 0, and increase the round number. + + Parameters + ---------- + hyper_configs: list + the generated hyperconfigs + """ + self.hyper_configs.append(hyper_configs) + self.configs_perf.append(dict()) + self.num_finished_configs.append(0) + self.num_configs_to_run.append(len(hyper_configs)) + self.increase_i() + +class HyperbandClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'optimize_mode': self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('exec_mode'): self.choices('exec_mode', 'serial', 'parallelism'), + Optional('R'): int, + Optional('eta'): int + }).validate(kwargs) + +class Hyperband(MsgDispatcherBase): + """ + Hyperband inherit from MsgDispatcherBase rather than Tuner, because it integrates both tuner's functions and assessor's functions. + This is an implementation that could fully leverage available resources or follow the algorithm process, + i.e., high parallelism or serial. + A single execution of Hyperband takes a finite budget of (s_max + 1)B. + + Parameters + ---------- + R: int + the maximum amount of resource that can be allocated to a single configuration + eta: int + the variable that controls the proportion of configurations discarded in each round of SuccessiveHalving + optimize_mode: str + optimize mode, 'maximize' or 'minimize' + exec_mode: str + execution mode, 'serial' or 'parallelism' + """ + + def __init__(self, R=60, eta=3, optimize_mode='maximize', exec_mode='parallelism'): + """B = (s_max + 1)R""" + super(Hyperband, self).__init__() + self.R = R + self.eta = eta + self.brackets = dict() # dict of Bracket + self.generated_hyper_configs = [] # all the configs waiting for run + self.completed_hyper_configs = [] # all the completed configs + self.s_max = math.floor(math.log(self.R, self.eta) + _epsilon) + self.curr_s = self.s_max + self.curr_hb = 0 + self.exec_mode = exec_mode + self.curr_bracket_id = None + + self.searchspace_json = None + self.random_state = None + self.optimize_mode = OptimizeMode(optimize_mode) + + # This is for the case that nnimanager requests trial config, but tuner cannot provide immediately. + # In this case, tuner increases self.credit to issue a trial config sometime later. + self.credit = 0 + + # record the latest parameter_id of the trial job trial_job_id. + # if there is no running parameter_id, self.job_id_para_id_map[trial_job_id] == None + # new trial job is added to this dict and finished trial job is removed from it. + self.job_id_para_id_map = dict() + + def handle_initialize(self, data): + """callback for initializing the advisor + Parameters + ---------- + data: dict + search space + """ + self.handle_update_search_space(data) + send(CommandType.Initialized, '') + + def handle_request_trial_jobs(self, data): + """ + Parameters + ---------- + data: int + number of trial jobs + """ + self.credit += data + + for _ in range(self.credit): + self._request_one_trial_job() + + def _request_one_trial_job(self): + ret = self._get_one_trial_job() + if ret is not None: + send(CommandType.NewTrialJob, nni.dump(ret)) + self.credit -= 1 + + def _get_one_trial_job(self): + """get one trial job, i.e., one hyperparameter configuration.""" + if not self.generated_hyper_configs: + if self.exec_mode == 'parallelism' or \ + (self.exec_mode == 'serial' and (self.curr_bracket_id is None or self.brackets[self.curr_bracket_id].is_completed())): + if self.curr_s < 0: + self.curr_s = self.s_max + self.curr_hb += 1 + _logger.debug('create a new bracket, self.curr_hb=%d, self.curr_s=%d', self.curr_hb, self.curr_s) + self.curr_bracket_id = '{}-{}'.format(self.curr_hb, self.curr_s) + self.brackets[self.curr_bracket_id] = Bracket( + self.curr_bracket_id, self.curr_s, self.s_max, self.eta, self.R, self.optimize_mode) + next_n, next_r = self.brackets[self.curr_bracket_id].get_n_r() + _logger.debug('new bracket, next_n=%d, next_r=%d', next_n, next_r) + assert self.searchspace_json is not None and self.random_state is not None + generated_hyper_configs = self.brackets[self.curr_bracket_id].get_hyperparameter_configurations(next_n, next_r, + self.searchspace_json, + self.random_state) + self.generated_hyper_configs = generated_hyper_configs.copy() + self.curr_s -= 1 + else: + ret = { + 'parameter_id': '-1_0_0', + 'parameter_source': 'algorithm', + 'parameters': '' + } + send(CommandType.NoMoreTrialJobs, nni.dump(ret)) + return None + + assert self.generated_hyper_configs + params = self.generated_hyper_configs.pop(0) + ret = { + 'parameter_id': params[0], + 'parameter_source': 'algorithm', + 'parameters': params[1] + } + return ret + + def handle_update_search_space(self, data): + """data: JSON object, which is search space + """ + validate_search_space(data) + self.searchspace_json = data + self.random_state = np.random.RandomState() + + def _handle_trial_end(self, parameter_id): + """ + Parameters + ---------- + parameter_id: parameter id of the finished config + """ + bracket_id, i, _ = parameter_id.split('_') + hyper_configs = self.brackets[bracket_id].inform_trial_end(int(i)) + if hyper_configs is not None: + _logger.debug('bracket %s next round %s, hyper_configs: %s', bracket_id, i, hyper_configs) + self.generated_hyper_configs = self.generated_hyper_configs + hyper_configs + for _ in range(self.credit): + self._request_one_trial_job() + + def handle_trial_end(self, data): + """ + Parameters + ---------- + data: dict() + it has three keys: trial_job_id, event, hyper_params + trial_job_id: the id generated by training service + event: the job's state + hyper_params: the hyperparameters (a string) generated and returned by tuner + """ + hyper_params = nni.load(data['hyper_params']) + self._handle_trial_end(hyper_params['parameter_id']) + if data['trial_job_id'] in self.job_id_para_id_map: + del self.job_id_para_id_map[data['trial_job_id']] + + def handle_report_metric_data(self, data): + """ + Parameters + ---------- + data: + it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'. + + Raises + ------ + ValueError + Data type not supported + """ + if 'value' in data: + data['value'] = nni.load(data['value']) + # multiphase? need to check + if data['type'] == MetricType.REQUEST_PARAMETER: + assert multi_phase_enabled() + assert data['trial_job_id'] is not None + assert data['parameter_index'] is not None + assert data['trial_job_id'] in self.job_id_para_id_map + self._handle_trial_end(self.job_id_para_id_map[data['trial_job_id']]) + ret = self._get_one_trial_job() + if data['trial_job_id'] is not None: + ret['trial_job_id'] = data['trial_job_id'] + if data['parameter_index'] is not None: + ret['parameter_index'] = data['parameter_index'] + self.job_id_para_id_map[data['trial_job_id']] = ret['parameter_id'] + send(CommandType.SendTrialJobParameter, nni.dump(ret)) + else: + value = extract_scalar_reward(data['value']) + bracket_id, i, _ = data['parameter_id'].split('_') + + # add to self.job_id_para_id_map here, + # because when the first parameter_id is created, trial_job_id is not known yet. + if data['trial_job_id'] in self.job_id_para_id_map: + assert self.job_id_para_id_map[data['trial_job_id']] == data['parameter_id'] + else: + self.job_id_para_id_map[data['trial_job_id']] = data['parameter_id'] + + if data['type'] == MetricType.FINAL: + # sys.maxsize indicates this value is from FINAL metric data, because data['sequence'] from FINAL metric + # and PERIODICAL metric are independent, thus, not comparable. + self.brackets[bracket_id].set_config_perf(int(i), data['parameter_id'], sys.maxsize, value) + self.completed_hyper_configs.append(data) + elif data['type'] == MetricType.PERIODICAL: + self.brackets[bracket_id].set_config_perf(int(i), data['parameter_id'], data['sequence'], value) + else: + raise ValueError('Data type not supported: {}'.format(data['type'])) + + def handle_add_customized_trial(self, data): + pass + + def handle_import_data(self, data): + pass diff --git a/nni/algorithms/hpo/hyperopt_tuner.py b/nni/algorithms/hpo/hyperopt_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..07aaa1246f986465004721fe03b9e310a76cce44 --- /dev/null +++ b/nni/algorithms/hpo/hyperopt_tuner.py @@ -0,0 +1,501 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +hyperopt_tuner.py +""" + +import copy +import logging + +import hyperopt as hp +import numpy as np +from schema import Optional, Schema +from nni import ClassArgsValidator +from nni.common.hpo_utils import validate_search_space +from nni.tuner import Tuner +from nni.utils import NodeType, OptimizeMode, extract_scalar_reward, split_index + +logger = logging.getLogger('hyperopt_AutoML') + + +def json2space(in_x, name=NodeType.ROOT): + """ + Change json to search space in hyperopt. + + Parameters + ---------- + in_x : dict/list/str/int/float + The part of json. + name : str + name could be NodeType.ROOT, NodeType.TYPE, NodeType.VALUE or NodeType.INDEX, NodeType.NAME. + """ + out_y = copy.deepcopy(in_x) + if isinstance(in_x, dict): + if NodeType.TYPE in in_x.keys(): + _type = in_x[NodeType.TYPE] + name = name + '-' + _type + _value = json2space(in_x[NodeType.VALUE], name=name) + if _type == 'choice': + out_y = hp.hp.choice(name, _value) + elif _type == 'randint': + out_y = hp.hp.randint(name, _value[1] - _value[0]) + else: + if _type in ['loguniform', 'qloguniform']: + _value[:2] = np.log(_value[:2]) + out_y = getattr(hp.hp, _type)(name, *_value) + else: + out_y = dict() + for key in in_x.keys(): + out_y[key] = json2space(in_x[key], name + '[%s]' % str(key)) + elif isinstance(in_x, list): + out_y = list() + for i, x_i in enumerate(in_x): + if isinstance(x_i, dict): + if NodeType.NAME not in x_i.keys(): + raise RuntimeError( + '\'_name\' key is not found in this nested search space.' + ) + out_y.append(json2space(x_i, name + '[%d]' % i)) + return out_y + + +def json2parameter(in_x, parameter, name=NodeType.ROOT): + """ + Change json to parameters. + """ + out_y = copy.deepcopy(in_x) + if isinstance(in_x, dict): + if NodeType.TYPE in in_x.keys(): + _type = in_x[NodeType.TYPE] + name = name + '-' + _type + if _type == 'choice': + _index = parameter[name] + out_y = { + NodeType.INDEX: + _index, + NodeType.VALUE: + json2parameter(in_x[NodeType.VALUE][_index], + parameter, + name=name + '[%d]' % _index) + } + else: + if _type in ['quniform', 'qloguniform']: + out_y = np.clip(parameter[name], in_x[NodeType.VALUE][0], in_x[NodeType.VALUE][1]) + elif _type == 'randint': + out_y = parameter[name] + in_x[NodeType.VALUE][0] + else: + out_y = parameter[name] + else: + out_y = dict() + for key in in_x.keys(): + out_y[key] = json2parameter(in_x[key], parameter, + name + '[%s]' % str(key)) + elif isinstance(in_x, list): + out_y = list() + for i, x_i in enumerate(in_x): + if isinstance(x_i, dict): + if NodeType.NAME not in x_i.keys(): + raise RuntimeError( + '\'_name\' key is not found in this nested search space.' + ) + out_y.append(json2parameter(x_i, parameter, name + '[%d]' % i)) + return out_y + + +def json2vals(in_x, vals, out_y, name=NodeType.ROOT): + if isinstance(in_x, dict): + if NodeType.TYPE in in_x.keys(): + _type = in_x[NodeType.TYPE] + name = name + '-' + _type + + try: + out_y[name] = vals[NodeType.INDEX] + # TODO - catch exact Exception + except Exception: + out_y[name] = vals + + if _type == 'choice': + _index = vals[NodeType.INDEX] + json2vals(in_x[NodeType.VALUE][_index], + vals[NodeType.VALUE], + out_y, + name=name + '[%d]' % _index) + if _type == 'randint': + out_y[name] -= in_x[NodeType.VALUE][0] + else: + for key in in_x.keys(): + json2vals(in_x[key], vals[key], out_y, + name + '[%s]' % str(key)) + elif isinstance(in_x, list): + for i, temp in enumerate(in_x): + # nested json + if isinstance(temp, dict): + if NodeType.NAME not in temp.keys(): + raise RuntimeError( + '\'_name\' key is not found in this nested search space.' + ) + else: + json2vals(temp, vals[i], out_y, name + '[%d]' % i) + else: + json2vals(temp, vals[i], out_y, name + '[%d]' % i) + + +def _add_index(in_x, parameter): + """ + change parameters in NNI format to parameters in hyperopt format(This function also support nested dict.). + For example, receive parameters like: + {'dropout_rate': 0.8, 'conv_size': 3, 'hidden_size': 512} + Will change to format in hyperopt, like: + {'dropout_rate': 0.8, 'conv_size': {'_index': 1, '_value': 3}, 'hidden_size': {'_index': 1, '_value': 512}} + """ + if NodeType.TYPE not in in_x: # if at the top level + out_y = dict() + for key, value in parameter.items(): + out_y[key] = _add_index(in_x[key], value) + return out_y + elif isinstance(in_x, dict): + value_type = in_x[NodeType.TYPE] + value_format = in_x[NodeType.VALUE] + if value_type == "choice": + choice_name = parameter[0] if isinstance(parameter, + list) else parameter + for pos, item in enumerate( + value_format): # here value_format is a list + if isinstance( + item, + list): # this format is ["choice_key", format_dict] + choice_key = item[0] + choice_value_format = item[1] + if choice_key == choice_name: + return { + NodeType.INDEX: pos, + NodeType.VALUE: [ + choice_name, + _add_index(choice_value_format, parameter[1]) + ] + } + elif choice_name == item: + return {NodeType.INDEX: pos, NodeType.VALUE: item} + else: + return parameter + return None # note: this is not written by original author, feel free to modify if you think it's incorrect + +class HyperoptClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('parallel_optimize'): bool, + Optional('constant_liar_type'): self.choices('constant_liar_type', 'min', 'max', 'mean') + }).validate(kwargs) + +class HyperoptTuner(Tuner): + """ + HyperoptTuner is a tuner which using hyperopt algorithm. + """ + + def __init__(self, algorithm_name, optimize_mode='minimize', + parallel_optimize=False, constant_liar_type='min'): + """ + Parameters + ---------- + algorithm_name : str + algorithm_name includes "tpe", "random_search" and anneal". + optimize_mode : str + parallel_optimize : bool + More detail could reference: docs/en_US/Tuner/HyperoptTuner.md + constant_liar_type : str + constant_liar_type including "min", "max" and "mean" + More detail could reference: docs/en_US/Tuner/HyperoptTuner.md + """ + self.algorithm_name = algorithm_name + self.optimize_mode = OptimizeMode(optimize_mode) + self.json = None + self.total_data = {} + self.rval = None + self.supplement_data_num = 0 + + self.parallel = parallel_optimize + if self.parallel: + self.CL_rval = None + self.constant_liar_type = constant_liar_type + self.running_data = [] + self.optimal_y = None + + def _choose_tuner(self, algorithm_name): + """ + Parameters + ---------- + algorithm_name : str + algorithm_name includes "tpe", "random_search" and anneal" + """ + if algorithm_name == 'tpe': + return hp.tpe.suggest + if algorithm_name == 'random_search': + return hp.rand.suggest + if algorithm_name == 'anneal': + return hp.anneal.suggest + raise RuntimeError('Not support tuner algorithm in hyperopt.') + + def update_search_space(self, search_space): + """ + Update search space definition in tuner by search_space in parameters. + + Will called when first setup experiemnt or update search space in WebUI. + + Parameters + ---------- + search_space : dict + """ + validate_search_space(search_space) + self.json = search_space + + search_space_instance = json2space(self.json) + rstate = np.random.RandomState() + trials = hp.Trials() + domain = hp.Domain(None, + search_space_instance, + pass_expr_memo_ctrl=None) + algorithm = self._choose_tuner(self.algorithm_name) + self.rval = hp.FMinIter(algorithm, + domain, + trials, + max_evals=-1, + rstate=rstate, + verbose=0) + self.rval.catch_eval_exceptions = False + + def generate_parameters(self, parameter_id, **kwargs): + """ + Returns a set of trial (hyper-)parameters, as a serializable object. + + Parameters + ---------- + parameter_id : int + + Returns + ------- + params : dict + """ + total_params = self.get_suggestion(random_search=False) + # avoid generating same parameter with concurrent trials because hyperopt doesn't support parallel mode + if total_params in self.total_data.values(): + # but it can cause duplicate parameter rarely + total_params = self.get_suggestion(random_search=True) + self.total_data[parameter_id] = total_params + + if self.parallel: + self.running_data.append(parameter_id) + + params = split_index(total_params) + return params + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Record an observation of the objective function + + Parameters + ---------- + parameter_id : int + parameters : dict + value : dict/float + if value is dict, it should have "default" key. + value is final metrics of the trial. + """ + reward = extract_scalar_reward(value) + # restore the paramsters contains '_index' + if parameter_id not in self.total_data: + raise RuntimeError('Received parameter_id not in total_data.') + params = self.total_data[parameter_id] + + # code for parallel + if self.parallel: + constant_liar = kwargs.get('constant_liar', False) + + if constant_liar: + rval = self.CL_rval + else: + rval = self.rval + # ignore duplicated reported final result (due to aware of intermedate result) + if parameter_id not in self.running_data: + logger.info("Received duplicated final result with parameter id: %s", parameter_id) + return + self.running_data.remove(parameter_id) + + # update the reward of optimal_y + if self.optimal_y is None: + if self.constant_liar_type == 'mean': + self.optimal_y = [reward, 1] + else: + self.optimal_y = reward + else: + if self.constant_liar_type == 'mean': + _sum = self.optimal_y[0] + reward + _number = self.optimal_y[1] + 1 + self.optimal_y = [_sum, _number] + elif self.constant_liar_type == 'min': + self.optimal_y = min(self.optimal_y, reward) + elif self.constant_liar_type == 'max': + self.optimal_y = max(self.optimal_y, reward) + logger.debug("Update optimal_y with reward, optimal_y = %s", self.optimal_y) + else: + rval = self.rval + + + if self.optimize_mode is OptimizeMode.Maximize: + reward = -reward + + domain = rval.domain + trials = rval.trials + + new_id = len(trials) + + rval_specs = [None] + rval_results = [domain.new_result()] + rval_miscs = [dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)] + + vals = params + idxs = dict() + + out_y = dict() + json2vals(self.json, vals, out_y) + vals = out_y + for key in domain.params: + if key in [NodeType.VALUE, NodeType.INDEX]: + continue + if key not in vals or vals[key] is None or vals[key] == []: + idxs[key] = vals[key] = [] + else: + idxs[key] = [new_id] + vals[key] = [vals[key]] + + self.miscs_update_idxs_vals(rval_miscs, + idxs, + vals, + idxs_map={new_id: new_id}, + assert_all_vals_used=False) + + trial = trials.new_trial_docs([new_id], rval_specs, rval_results, + rval_miscs)[0] + trial['result'] = {'loss': reward, 'status': 'ok'} + trial['state'] = hp.JOB_STATE_DONE + trials.insert_trial_docs([trial]) + trials.refresh() + + def miscs_update_idxs_vals(self, + miscs, + idxs, + vals, + assert_all_vals_used=True, + idxs_map=None): + """ + Unpack the idxs-vals format into the list of dictionaries that is + `misc`. + + Parameters + ---------- + idxs_map : dict + idxs_map is a dictionary of id->id mappings so that the misc['idxs'] can + contain different numbers than the idxs argument. + """ + if idxs_map is None: + idxs_map = {} + + assert set(idxs.keys()) == set(vals.keys()) + + misc_by_id = {m['tid']: m for m in miscs} + for m in miscs: + m['idxs'] = {key: [] for key in idxs} + m['vals'] = {key: [] for key in idxs} + + for key in idxs: + assert len(idxs[key]) == len(vals[key]) + for tid, val in zip(idxs[key], vals[key]): + tid = idxs_map.get(tid, tid) + if assert_all_vals_used or tid in misc_by_id: + misc_by_id[tid]['idxs'][key] = [tid] + misc_by_id[tid]['vals'][key] = [val] + + def get_suggestion(self, random_search=False): + """ + get suggestion from hyperopt + + Parameters + ---------- + random_search : bool + flag to indicate random search or not (default: {False}) + + Returns + ---------- + total_params : dict + parameter suggestion + """ + if self.parallel and len(self.total_data) > 20 and self.running_data and self.optimal_y is not None: + self.CL_rval = copy.deepcopy(self.rval) + if self.constant_liar_type == 'mean': + _constant_liar_y = self.optimal_y[0] / self.optimal_y[1] + else: + _constant_liar_y = self.optimal_y + for _parameter_id in self.running_data: + self.receive_trial_result(parameter_id=_parameter_id, parameters=None, value=_constant_liar_y, constant_liar=True) + rval = self.CL_rval + + random_state = np.random.randint(2**31 - 1) + else: + rval = self.rval + random_state = rval.rstate.randint(2**31 - 1) + + trials = rval.trials + algorithm = rval.algo + new_ids = rval.trials.new_trial_ids(1) + rval.trials.refresh() + + if random_search: + new_trials = hp.rand.suggest(new_ids, rval.domain, trials, + random_state) + else: + new_trials = algorithm(new_ids, rval.domain, trials, random_state) + rval.trials.refresh() + vals = new_trials[0]['misc']['vals'] + parameter = dict() + for key in vals: + try: + parameter[key] = vals[key][0].item() + except (KeyError, IndexError): + parameter[key] = None + + # remove '_index' from json2parameter and save params-id + total_params = json2parameter(self.json, parameter) + return total_params + + def import_data(self, data): + """ + Import additional data for tuning + + Parameters + ---------- + data: + a list of dictionarys, each of which has at least two keys, 'parameter' and 'value' + """ + _completed_num = 0 + for trial_info in data: + logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data)) + _completed_num += 1 + if self.algorithm_name == 'random_search': + return + assert "parameter" in trial_info + _params = trial_info["parameter"] + assert "value" in trial_info + _value = trial_info['value'] + if not _value: + logger.info("Useless trial data, value is %s, skip this trial data.", _value) + continue + self.supplement_data_num += 1 + _parameter_id = '_'.join( + ["ImportData", str(self.supplement_data_num)]) + self.total_data[_parameter_id] = _add_index(in_x=self.json, + parameter=_params) + self.receive_trial_result(parameter_id=_parameter_id, + parameters=_params, + value=_value) + logger.info("Successfully import data to TPE/Anneal tuner.") diff --git a/nni/algorithms/hpo/medianstop_assessor.py b/nni/algorithms/hpo/medianstop_assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..56eb82a3c98e0ce65efaa794fa6ebea8ee989566 --- /dev/null +++ b/nni/algorithms/hpo/medianstop_assessor.py @@ -0,0 +1,125 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from schema import Schema, Optional + +from nni import ClassArgsValidator +from nni.assessor import Assessor, AssessResult +from nni.utils import extract_scalar_history + +logger = logging.getLogger('medianstop_Assessor') + +class MedianstopClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('start_step'): self.range('start_step', int, 0, 9999), + }).validate(kwargs) + +class MedianstopAssessor(Assessor): + """MedianstopAssessor is The median stopping rule stops a pending trial X at step S + if the trial’s best objective value by step S is strictly worse than the median value + of the running averages of all completed trials’ objectives reported up to step S + + Parameters + ---------- + optimize_mode : str + optimize mode, 'maximize' or 'minimize' + start_step : int + only after receiving start_step number of reported intermediate results + """ + def __init__(self, optimize_mode='maximize', start_step=0): + self._start_step = start_step + self._running_history = dict() + self._completed_avg_history = dict() + if optimize_mode == 'maximize': + self._high_better = True + elif optimize_mode == 'minimize': + self._high_better = False + else: + self._high_better = True + logger.warning('unrecognized optimize_mode %s', optimize_mode) + + def _update_data(self, trial_job_id, trial_history): + """update data + + Parameters + ---------- + trial_job_id : int + trial job id + trial_history : list + The history performance matrix of each trial + """ + if trial_job_id not in self._running_history: + self._running_history[trial_job_id] = [] + self._running_history[trial_job_id].extend(trial_history[len(self._running_history[trial_job_id]):]) + + def trial_end(self, trial_job_id, success): + """trial_end + + Parameters + ---------- + trial_job_id : int + trial job id + success : bool + True if succssfully finish the experiment, False otherwise + """ + if trial_job_id in self._running_history: + if success: + cnt = 0 + history_sum = 0 + self._completed_avg_history[trial_job_id] = [] + for each in self._running_history[trial_job_id]: + cnt += 1 + history_sum += each + self._completed_avg_history[trial_job_id].append(history_sum / cnt) + self._running_history.pop(trial_job_id) + else: + logger.warning('trial_end: trial_job_id does not exist in running_history') + + def assess_trial(self, trial_job_id, trial_history): + """assess_trial + + Parameters + ---------- + trial_job_id : int + trial job id + trial_history : list + The history performance matrix of each trial + + Returns + ------- + bool + AssessResult.Good or AssessResult.Bad + + Raises + ------ + Exception + unrecognize exception in medianstop_assessor + """ + curr_step = len(trial_history) + if curr_step < self._start_step: + return AssessResult.Good + + scalar_trial_history = extract_scalar_history(trial_history) + self._update_data(trial_job_id, scalar_trial_history) + if self._high_better: + best_history = max(scalar_trial_history) + else: + best_history = min(scalar_trial_history) + + avg_array = [] + for id_ in self._completed_avg_history: + if len(self._completed_avg_history[id_]) >= curr_step: + avg_array.append(self._completed_avg_history[id_][curr_step - 1]) + if avg_array: + avg_array.sort() + if self._high_better: + median = avg_array[(len(avg_array)-1) // 2] + return AssessResult.Bad if best_history < median else AssessResult.Good + else: + median = avg_array[len(avg_array) // 2] + return AssessResult.Bad if best_history > median else AssessResult.Good + else: + return AssessResult.Good diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GMM/CreateModel.py b/nni/algorithms/hpo/metis_tuner/Regression_GMM/CreateModel.py new file mode 100644 index 0000000000000000000000000000000000000000..4846c9a67d1d4579456e5c28ce416b9e274566b4 --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/Regression_GMM/CreateModel.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +from operator import itemgetter + +import sklearn.mixture as mm + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def create_model(samples_x, samples_y_aggregation, percentage_goodbatch=0.34): + ''' + Create the Gaussian Mixture Model + ''' + samples = [samples_x[i] + [samples_y_aggregation[i]] + for i in range(0, len(samples_x))] + + # Sorts so that we can get the top samples + samples = sorted(samples, key=itemgetter(-1)) + samples_goodbatch_size = int(len(samples) * percentage_goodbatch) + samples_goodbatch = samples[0:samples_goodbatch_size] + samples_badbatch = samples[samples_goodbatch_size:] + + samples_x_goodbatch = [sample_goodbatch[0:-1] + for sample_goodbatch in samples_goodbatch] + #samples_y_goodbatch = [sample_goodbatch[-1] for sample_goodbatch in samples_goodbatch] + samples_x_badbatch = [sample_badbatch[0:-1] + for sample_badbatch in samples_badbatch] + + # === Trains GMM clustering models === # + #sys.stderr.write("[%s] Train GMM's GMM model\n" % (os.path.basename(__file__))) + bgmm_goodbatch = mm.BayesianGaussianMixture( + n_components=max(1, samples_goodbatch_size - 1)) + bad_n_components = max(1, len(samples_x) - samples_goodbatch_size - 1) + bgmm_badbatch = mm.BayesianGaussianMixture(n_components=bad_n_components) + bgmm_goodbatch.fit(samples_x_goodbatch) + bgmm_badbatch.fit(samples_x_badbatch) + + model = {} + model['clusteringmodel_good'] = bgmm_goodbatch + model['clusteringmodel_bad'] = bgmm_badbatch + return model diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GMM/Selection.py b/nni/algorithms/hpo/metis_tuner/Regression_GMM/Selection.py new file mode 100644 index 0000000000000000000000000000000000000000..0bca96647d4642afc7d46c8a259f1634b4c41d81 --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/Regression_GMM/Selection.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import random +import sys + +from .. import lib_acquisition_function +from .. import lib_constraint_summation + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +CONSTRAINT_LOWERBOUND = None +CONSTRAINT_UPPERBOUND = None +CONSTRAINT_PARAMS_IDX = [] + + +def _ratio_scores(parameters_value, clusteringmodel_gmm_good, + clusteringmodel_gmm_bad): + ''' + The ratio is smaller the better + ''' + ratio = clusteringmodel_gmm_good.score( + [parameters_value]) / clusteringmodel_gmm_bad.score([parameters_value]) + sigma = 0 + return ratio, sigma + + +def selection_r(x_bounds, + x_types, + clusteringmodel_gmm_good, + clusteringmodel_gmm_bad, + num_starting_points=100, + minimize_constraints_fun=None): + ''' + Select using different types. + ''' + minimize_starting_points = clusteringmodel_gmm_good.sample(n_samples=num_starting_points) + + outputs = selection(x_bounds, x_types, + clusteringmodel_gmm_good, + clusteringmodel_gmm_bad, + minimize_starting_points[0], + minimize_constraints_fun) + + return outputs + + +def selection(x_bounds, + x_types, + clusteringmodel_gmm_good, + clusteringmodel_gmm_bad, + minimize_starting_points, + minimize_constraints_fun=None): + ''' + Select the lowest mu value + ''' + results = lib_acquisition_function.next_hyperparameter_lowest_mu( + _ratio_scores, [clusteringmodel_gmm_good, clusteringmodel_gmm_bad], + x_bounds, x_types, minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + return results + + +def _rand_with_constraints(x_bounds, x_types): + ''' + Random generate the variable with constraints + ''' + outputs = None + x_bounds_withconstraints = [x_bounds[i] for i in CONSTRAINT_PARAMS_IDX] + x_types_withconstraints = [x_types[i] for i in CONSTRAINT_PARAMS_IDX] + x_val_withconstraints = lib_constraint_summation.rand(x_bounds_withconstraints, + x_types_withconstraints, + CONSTRAINT_LOWERBOUND, + CONSTRAINT_UPPERBOUND) + if x_val_withconstraints is not None: + outputs = [None] * len(x_bounds) + for i, _ in enumerate(CONSTRAINT_PARAMS_IDX): + outputs[CONSTRAINT_PARAMS_IDX[i]] = x_val_withconstraints[i] + for i, _ in enumerate(outputs): + if outputs[i] is None: + outputs[i] = random.randint(x_bounds[i][0], x_bounds[i][1]) + return outputs + + +def _minimize_constraints_fun_summation(x): + ''' + Minimize constraints fun summation + ''' + summation = sum([x[i] for i in CONSTRAINT_PARAMS_IDX]) + return CONSTRAINT_UPPERBOUND >= summation >= CONSTRAINT_LOWERBOUND diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GMM/__init__.py b/nni/algorithms/hpo/metis_tuner/Regression_GMM/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GP/CreateModel.py b/nni/algorithms/hpo/metis_tuner/Regression_GP/CreateModel.py new file mode 100644 index 0000000000000000000000000000000000000000..98d1f22ce3c62c0491f91138e8c1df7ebd5e65ed --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/Regression_GP/CreateModel.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import numpy + +import sklearn.gaussian_process as gp + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def create_model(samples_x, samples_y_aggregation, + n_restarts_optimizer=250, is_white_kernel=False): + ''' + Trains GP regression model + ''' + kernel = gp.kernels.ConstantKernel(constant_value=1, + constant_value_bounds=(1e-12, 1e12)) * \ + gp.kernels.Matern(nu=1.5) + if is_white_kernel is True: + kernel += gp.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-12, 1e12)) + regressor = gp.GaussianProcessRegressor(kernel=kernel, + n_restarts_optimizer=n_restarts_optimizer, + normalize_y=True, + alpha=1e-10) + regressor.fit(numpy.array(samples_x), numpy.array(samples_y_aggregation)) + + model = {} + model['model'] = regressor + model['kernel_prior'] = str(kernel) + model['kernel_posterior'] = str(regressor.kernel_) + model['model_loglikelihood'] = regressor.log_marginal_likelihood(regressor.kernel_.theta) + + return model diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GP/OutlierDetection.py b/nni/algorithms/hpo/metis_tuner/Regression_GP/OutlierDetection.py new file mode 100644 index 0000000000000000000000000000000000000000..f07a93dd3e80a8fd6f9b0e8ceb3e87a5338e915d --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/Regression_GP/OutlierDetection.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +OutlierDectection.py +""" + +import os +import sys +from multiprocessing.dummy import Pool as ThreadPool + +from . import CreateModel as gp_create_model +from . import Prediction as gp_prediction + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def _outlierDetection_threaded(inputs): + """ + Detect the outlier + """ + [samples_idx, samples_x, samples_y_aggregation] = inputs + sys.stderr.write("[%s] DEBUG: Evaluating %dth of %d samples\n" + % (os.path.basename(__file__), samples_idx + 1, len(samples_x))) + outlier = None + + # Create a diagnostic regression model which removes the sample that we + # want to evaluate + diagnostic_regressor_gp = gp_create_model.create_model( + samples_x[0:samples_idx] + samples_x[samples_idx + 1:], + samples_y_aggregation[0:samples_idx] + samples_y_aggregation[samples_idx + 1:]) + mu, sigma = gp_prediction.predict( + samples_x[samples_idx], diagnostic_regressor_gp['model']) + + # 2.33 is the z-score for 98% confidence level + if abs(samples_y_aggregation[samples_idx] - mu) > (2.33 * sigma): + outlier = {"samples_idx": samples_idx, + "expected_mu": mu, + "expected_sigma": sigma, + "difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)} + return outlier + + +def outlierDetection_threaded(samples_x, samples_y_aggregation): + """ + Use Multi-thread to detect the outlier + """ + outliers = [] + + threads_inputs = [[samples_idx, samples_x, samples_y_aggregation] + for samples_idx in range(0, len(samples_x))] + threads_pool = ThreadPool(min(4, len(threads_inputs))) + threads_results = threads_pool.map( + _outlierDetection_threaded, threads_inputs) + threads_pool.close() + threads_pool.join() + + for threads_result in threads_results: + if threads_result is not None: + outliers.append(threads_result) + else: + print("Error: threads_result is None.") + + outliers = outliers if outliers else None + return outliers + + +def outlierDetection(samples_x, samples_y_aggregation): + outliers = [] + for samples_idx, _ in enumerate(samples_x): + #sys.stderr.write("[%s] DEBUG: Evaluating %d of %d samples\n" + # \ % (os.path.basename(__file__), samples_idx + 1, len(samples_x))) + diagnostic_regressor_gp = gp_create_model.create_model(\ + samples_x[0:samples_idx] + samples_x[samples_idx + 1:],\ + samples_y_aggregation[0:samples_idx] + samples_y_aggregation[samples_idx + 1:]) + mu, sigma = gp_prediction.predict(samples_x[samples_idx], + diagnostic_regressor_gp['model']) + # 2.33 is the z-score for 98% confidence level + if abs(samples_y_aggregation[samples_idx] - mu) > (2.33 * sigma): + outliers.append({"samples_idx": samples_idx, + "expected_mu": mu, + "expected_sigma": sigma, + "difference": \ + abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)}) + + outliers = outliers if outliers else None + return outliers diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GP/Prediction.py b/nni/algorithms/hpo/metis_tuner/Regression_GP/Prediction.py new file mode 100644 index 0000000000000000000000000000000000000000..08a32f0e63c86a6fd7dddad558f96c708916e41e --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/Regression_GP/Prediction.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys + +import numpy + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def predict(parameters_value, regressor_gp): + ''' + Predict by Gaussian Process Model + ''' + parameters_value = numpy.array(parameters_value).reshape(-1, len(parameters_value)) + mu, sigma = regressor_gp.predict(parameters_value, return_std=True) + + return mu[0], sigma[0] diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GP/Selection.py b/nni/algorithms/hpo/metis_tuner/Regression_GP/Selection.py new file mode 100644 index 0000000000000000000000000000000000000000..68383ed0f2e039d4fd77c64730d06932f8b85b77 --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/Regression_GP/Selection.py @@ -0,0 +1,97 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import random +import sys + +from .. import lib_acquisition_function +from .. import lib_constraint_summation +from .. import lib_data +from . import Prediction as gp_prediction + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + +CONSTRAINT_LOWERBOUND = None +CONSTRAINT_UPPERBOUND = None +CONSTRAINT_PARAMS_IDX = [] + + +def selection_r(acquisition_function, + samples_y_aggregation, + x_bounds, + x_types, + regressor_gp, + num_starting_points=100, + minimize_constraints_fun=None): + ''' + Selecte R value + ''' + minimize_starting_points = [lib_data.rand(x_bounds, x_types) \ + for i in range(0, num_starting_points)] + outputs = selection(acquisition_function, samples_y_aggregation, + x_bounds, x_types, regressor_gp, + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + return outputs + +def selection(acquisition_function, + samples_y_aggregation, + x_bounds, x_types, + regressor_gp, + minimize_starting_points, + minimize_constraints_fun=None): + ''' + selection + ''' + outputs = None + + sys.stderr.write("[%s] Exercise \"%s\" acquisition function\n" \ + % (os.path.basename(__file__), acquisition_function)) + + if acquisition_function == "ei": + outputs = lib_acquisition_function.next_hyperparameter_expected_improvement(\ + gp_prediction.predict, [regressor_gp], x_bounds, x_types, \ + samples_y_aggregation, minimize_starting_points, \ + minimize_constraints_fun=minimize_constraints_fun) + elif acquisition_function == "lc": + outputs = lib_acquisition_function.next_hyperparameter_lowest_confidence(\ + gp_prediction.predict, [regressor_gp], x_bounds, x_types,\ + minimize_starting_points, minimize_constraints_fun=minimize_constraints_fun) + elif acquisition_function == "lm": + outputs = lib_acquisition_function.next_hyperparameter_lowest_mu(\ + gp_prediction.predict, [regressor_gp], x_bounds, x_types,\ + minimize_starting_points, minimize_constraints_fun=minimize_constraints_fun) + return outputs + +def _rand_with_constraints(x_bounds, x_types): + ''' + Random generate with constraints + ''' + outputs = None + + x_bounds_withconstraints = [x_bounds[i] for i in CONSTRAINT_PARAMS_IDX] + x_types_withconstraints = [x_types[i] for i in CONSTRAINT_PARAMS_IDX] + x_val_withconstraints = lib_constraint_summation.rand(x_bounds_withconstraints, + x_types_withconstraints, + CONSTRAINT_LOWERBOUND, + CONSTRAINT_UPPERBOUND) + if x_val_withconstraints is not None: + outputs = [None] * len(x_bounds) + + for i, _ in enumerate(CONSTRAINT_PARAMS_IDX): + outputs[CONSTRAINT_PARAMS_IDX[i]] = x_val_withconstraints[i] + + for i, _ in enumerate(outputs): + if outputs[i] is None: + outputs[i] = random.randint(x_bounds[i][0], x_bounds[i][1]) + return outputs + + +def _minimize_constraints_fun_summation(x): + ''' + Minimize the constraints fun summation + ''' + summation = sum([x[i] for i in CONSTRAINT_PARAMS_IDX]) + return CONSTRAINT_UPPERBOUND >= summation >= CONSTRAINT_LOWERBOUND diff --git a/nni/algorithms/hpo/metis_tuner/Regression_GP/__init__.py b/nni/algorithms/hpo/metis_tuner/Regression_GP/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/hpo/metis_tuner/__init__.py b/nni/algorithms/hpo/metis_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4f9ceba61960bab8b741d760448815a93b3ae0b --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/__init__.py @@ -0,0 +1 @@ +from .metis_tuner import MetisTuner, MetisClassArgsValidator diff --git a/nni/algorithms/hpo/metis_tuner/lib_acquisition_function.py b/nni/algorithms/hpo/metis_tuner/lib_acquisition_function.py new file mode 100644 index 0000000000000000000000000000000000000000..f1b1edfe0165b61c18753dfa823bc2bc10aa8d05 --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/lib_acquisition_function.py @@ -0,0 +1,197 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +lib_acquisition_function.py +""" + +import sys +import numpy + +from scipy.stats import norm +from scipy.optimize import minimize + +from . import lib_data + + +def next_hyperparameter_expected_improvement(fun_prediction, + fun_prediction_args, + x_bounds, x_types, + samples_y_aggregation, + minimize_starting_points, + minimize_constraints_fun=None): + """ + "Expected Improvement" acquisition function + """ + best_x = None + best_acquisition_value = None + x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds] + x_bounds_minmax = numpy.array(x_bounds_minmax) + + for starting_point in numpy.array(minimize_starting_points): + res = minimize(fun=_expected_improvement, + x0=starting_point.reshape(1, -1), + bounds=x_bounds_minmax, + method="L-BFGS-B", + args=(fun_prediction, + fun_prediction_args, + x_bounds, + x_types, + samples_y_aggregation, + minimize_constraints_fun)) + + if (best_acquisition_value is None) or \ + (res.fun < best_acquisition_value): + res.x = numpy.ndarray.tolist(res.x) + res.x = lib_data.match_val_type(res.x, x_bounds, x_types) + if (minimize_constraints_fun is None) or \ + (minimize_constraints_fun(res.x) is True): + best_acquisition_value = res.fun + best_x = res.x + + outputs = None + if best_x is not None: + mu, sigma = fun_prediction(best_x, *fun_prediction_args) + outputs = {'hyperparameter': best_x, 'expected_mu': mu, + 'expected_sigma': sigma, 'acquisition_func': "ei"} + + return outputs + + +def _expected_improvement(x, fun_prediction, fun_prediction_args, + x_bounds, x_types, samples_y_aggregation, + minimize_constraints_fun): + # This is only for step-wise optimization + x = lib_data.match_val_type(x, x_bounds, x_types) + + expected_improvement = sys.maxsize + if (minimize_constraints_fun is None) or ( + minimize_constraints_fun(x) is True): + mu, sigma = fun_prediction(x, *fun_prediction_args) + + loss_optimum = min(samples_y_aggregation) + scaling_factor = -1 + + # In case sigma equals zero + with numpy.errstate(divide="ignore"): + Z = scaling_factor * (mu - loss_optimum) / sigma + expected_improvement = scaling_factor * (mu - loss_optimum) * \ + norm.cdf(Z) + sigma * norm.pdf(Z) + expected_improvement = 0.0 if sigma == 0.0 else expected_improvement + + # We want expected_improvement to be as large as possible + # (i.e., as small as possible for minimize(...)) + expected_improvement = -1 * expected_improvement + return expected_improvement + + +def next_hyperparameter_lowest_confidence(fun_prediction, + fun_prediction_args, + x_bounds, x_types, + minimize_starting_points, + minimize_constraints_fun=None): + """ + "Lowest Confidence" acquisition function + """ + best_x = None + best_acquisition_value = None + x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds] + x_bounds_minmax = numpy.array(x_bounds_minmax) + + for starting_point in numpy.array(minimize_starting_points): + res = minimize(fun=_lowest_confidence, + x0=starting_point.reshape(1, -1), + bounds=x_bounds_minmax, + method="L-BFGS-B", + args=(fun_prediction, + fun_prediction_args, + x_bounds, + x_types, + minimize_constraints_fun)) + + if (best_acquisition_value) is None or ( + res.fun < best_acquisition_value): + res.x = numpy.ndarray.tolist(res.x) + res.x = lib_data.match_val_type(res.x, x_bounds, x_types) + if (minimize_constraints_fun is None) or ( + minimize_constraints_fun(res.x) is True): + best_acquisition_value = res.fun + best_x = res.x + + outputs = None + if best_x is not None: + mu, sigma = fun_prediction(best_x, *fun_prediction_args) + outputs = {'hyperparameter': best_x, 'expected_mu': mu, + 'expected_sigma': sigma, 'acquisition_func': "lc"} + return outputs + + +def _lowest_confidence(x, fun_prediction, fun_prediction_args, + x_bounds, x_types, minimize_constraints_fun): + # This is only for step-wise optimization + x = lib_data.match_val_type(x, x_bounds, x_types) + + ci = sys.maxsize + if (minimize_constraints_fun is None) or ( + minimize_constraints_fun(x) is True): + mu, sigma = fun_prediction(x, *fun_prediction_args) + ci = (sigma * 1.96 * 2) / mu + # We want ci to be as large as possible + # (i.e., as small as possible for minimize(...), + # because this would mean lowest confidence + ci = -1 * ci + + return ci + + +def next_hyperparameter_lowest_mu(fun_prediction, + fun_prediction_args, + x_bounds, x_types, + minimize_starting_points, + minimize_constraints_fun=None): + """ + "Lowest Mu" acquisition function + """ + best_x = None + best_acquisition_value = None + x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds] + x_bounds_minmax = numpy.array(x_bounds_minmax) + + for starting_point in numpy.array(minimize_starting_points): + res = minimize(fun=_lowest_mu, + x0=starting_point.reshape(1, -1), + bounds=x_bounds_minmax, + method="L-BFGS-B", + args=(fun_prediction, fun_prediction_args, + x_bounds, x_types, minimize_constraints_fun)) + + if (best_acquisition_value is None) or ( + res.fun < best_acquisition_value): + res.x = numpy.ndarray.tolist(res.x) + res.x = lib_data.match_val_type(res.x, x_bounds, x_types) + if (minimize_constraints_fun is None) or ( + minimize_constraints_fun(res.x) is True): + best_acquisition_value = res.fun + best_x = res.x + + outputs = None + if best_x is not None: + mu, sigma = fun_prediction(best_x, *fun_prediction_args) + outputs = {'hyperparameter': best_x, 'expected_mu': mu, + 'expected_sigma': sigma, 'acquisition_func': "lm"} + return outputs + + +def _lowest_mu(x, fun_prediction, fun_prediction_args, + x_bounds, x_types, minimize_constraints_fun): + """ + Calculate the lowest mu + """ + # This is only for step-wise optimization + x = lib_data.match_val_type(x, x_bounds, x_types) + + mu = sys.maxsize + if (minimize_constraints_fun is None) or ( + minimize_constraints_fun(x) is True): + mu, _ = fun_prediction(x, *fun_prediction_args) + return mu diff --git a/nni/algorithms/hpo/metis_tuner/lib_constraint_summation.py b/nni/algorithms/hpo/metis_tuner/lib_constraint_summation.py new file mode 100644 index 0000000000000000000000000000000000000000..948be5b3ca3dcace3ae673ce9e12667c093ca7a4 --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/lib_constraint_summation.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +lib_constraint_summation.py +""" + +import math +import random + +from operator import itemgetter + + +def check_feasibility(x_bounds, lowerbound, upperbound): + ''' + This can have false positives. + For examples, parameters can only be 0 or 5, and the summation constraint is between 6 and 7. + ''' + # x_bounds should be sorted, so even for "discrete_int" type, + # the smallest and the largest number should the first and the last element + x_bounds_lowerbound = sum([x_bound[0] for x_bound in x_bounds]) + x_bounds_upperbound = sum([x_bound[-1] for x_bound in x_bounds]) + + # return ((x_bounds_lowerbound <= lowerbound) and (x_bounds_upperbound >= lowerbound)) or \ + # ((x_bounds_lowerbound <= upperbound) and (x_bounds_upperbound >= upperbound)) + return (x_bounds_lowerbound <= lowerbound <= x_bounds_upperbound) or \ + (x_bounds_lowerbound <= upperbound <= x_bounds_upperbound) + + +def rand(x_bounds, x_types, lowerbound, upperbound, max_retries=100): + ''' + Key idea is that we try to move towards upperbound, by randomly choose one + value for each parameter. However, for the last parameter, + we need to make sure that its value can help us get above lowerbound + ''' + outputs = None + + if check_feasibility(x_bounds, lowerbound, upperbound) is True: + # Order parameters by their range size. We want the smallest range first, + # because the corresponding parameter has less numbers to choose from + x_idx_sorted = [] + for i, _ in enumerate(x_bounds): + if x_types[i] == "discrete_int": + x_idx_sorted.append([i, len(x_bounds[i])]) + elif (x_types[i] == "range_int") or (x_types[i] == "range_continuous"): + x_idx_sorted.append( + [i, math.floor(x_bounds[i][1] - x_bounds[i][0])]) + x_idx_sorted = sorted(x_idx_sorted, key=itemgetter(1)) + + for _ in range(max_retries): + budget_allocated = 0 + outputs = [None] * len(x_bounds) + + for i, _ in enumerate(x_idx_sorted): + x_idx = x_idx_sorted[i][0] + # The amount of unallocated space that we have + budget_max = upperbound - budget_allocated + # NOT the Last x that we need to assign a random number + if i < (len(x_idx_sorted) - 1): + if x_bounds[x_idx][0] <= budget_max: + if x_types[x_idx] == "discrete_int": + # Note the valid integer + temp = [] + for j in x_bounds[x_idx]: + if j <= budget_max: + temp.append(j) + # Randomly pick a number from the integer array + if temp: + outputs[x_idx] = temp[random.randint( + 0, len(temp) - 1)] + + elif (x_types[x_idx] == "range_int") or \ + (x_types[x_idx] == "range_continuous"): + outputs[x_idx] = random.randint( + x_bounds[x_idx][0], min(x_bounds[x_idx][-1], budget_max)) + + else: + # The last x that we need to assign a random number + randint_lowerbound = lowerbound - budget_allocated + randint_lowerbound = 0 if randint_lowerbound < 0 else randint_lowerbound + + # This check: + # is our smallest possible value going to overflow the available budget space, + # and is our largest possible value going to underflow the + # lower bound + if (x_bounds[x_idx][0] <= budget_max) and \ + (x_bounds[x_idx][-1] >= randint_lowerbound): + if x_types[x_idx] == "discrete_int": + temp = [] + for j in x_bounds[x_idx]: + # if (j <= budget_max) and (j >= + # randint_lowerbound): + if randint_lowerbound <= j <= budget_max: + temp.append(j) + if temp: + outputs[x_idx] = temp[random.randint( + 0, len(temp) - 1)] + elif (x_types[x_idx] == "range_int") or \ + (x_types[x_idx] == "range_continuous"): + outputs[x_idx] = random.randint( + randint_lowerbound, min( + x_bounds[x_idx][1], budget_max)) + if outputs[x_idx] is None: + break + budget_allocated += outputs[x_idx] + if None not in outputs: + break + return outputs diff --git a/nni/algorithms/hpo/metis_tuner/lib_data.py b/nni/algorithms/hpo/metis_tuner/lib_data.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3f059514c8c30312acbd0c18ec000803cbe0ad --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/lib_data.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import math +import random + + +def match_val_type(vals, vals_bounds, vals_types): + ''' + Update values in the array, to match their corresponding type + ''' + vals_new = [] + + for i, _ in enumerate(vals_types): + if vals_types[i] == "discrete_int": + # Find the closest integer in the array, vals_bounds + # pylint: disable=cell-var-from-loop + vals_new.append(min(vals_bounds[i], key=lambda x: abs(x - vals[i]))) + elif vals_types[i] == "range_int": + # Round down to the nearest integer + vals_new.append(math.floor(vals[i])) + elif vals_types[i] == "range_continuous": + # Don't do any processing for continous numbers + vals_new.append(vals[i]) + else: + return None + + return vals_new + + +def rand(x_bounds, x_types): + ''' + Random generate variable value within their bounds + ''' + outputs = [] + + for i, _ in enumerate(x_bounds): + if x_types[i] == "discrete_int": + temp = x_bounds[i][random.randint(0, len(x_bounds[i]) - 1)] + outputs.append(temp) + elif x_types[i] == "range_int": + temp = random.randint(x_bounds[i][0], x_bounds[i][1] - 1) + outputs.append(temp) + elif x_types[i] == "range_continuous": + temp = random.uniform(x_bounds[i][0], x_bounds[i][1]) + outputs.append(temp) + else: + return None + + return outputs diff --git a/nni/algorithms/hpo/metis_tuner/metis_tuner.py b/nni/algorithms/hpo/metis_tuner/metis_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..0575f463e6995f6f9967dd1e78029171254eb61c --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/metis_tuner.py @@ -0,0 +1,653 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +metis_tuner.py +""" + +import copy +import logging +import random +import statistics +import warnings +from multiprocessing.dummy import Pool as ThreadPool +import numpy as np +from schema import Schema, Optional + +from nni import ClassArgsValidator +from nni.tuner import Tuner +from nni.common.hpo_utils import validate_search_space +from nni.utils import OptimizeMode, extract_scalar_reward +from . import lib_constraint_summation +from . import lib_data +from .Regression_GMM import CreateModel as gmm_create_model +from .Regression_GMM import Selection as gmm_selection +from .Regression_GP import CreateModel as gp_create_model +from .Regression_GP import OutlierDetection as gp_outlier_detection +from .Regression_GP import Prediction as gp_prediction +from .Regression_GP import Selection as gp_selection + +logger = logging.getLogger("Metis_Tuner_AutoML") + +NONE_TYPE = '' +CONSTRAINT_LOWERBOUND = None +CONSTRAINT_UPPERBOUND = None +CONSTRAINT_PARAMS_IDX = [] + +class MetisClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('no_resampling'): bool, + Optional('no_candidates'): bool, + Optional('selection_num_starting_points'): int, + Optional('cold_start_num'): int, + }).validate(kwargs) + +class MetisTuner(Tuner): + """ + Metis Tuner + + More algorithm information you could reference here: + https://www.microsoft.com/en-us/research/publication/metis-robustly-tuning-tail-latencies-cloud-systems/ + + Attributes + ---------- + optimize_mode : str + optimize_mode is a string that including two mode "maximize" and "minimize" + + no_resampling : bool + True or False. + Should Metis consider re-sampling as part of the search strategy? + If you are confident that the training dataset is noise-free, + then you do not need re-sampling. + + no_candidates : bool + True or False. + Should Metis suggest parameters for the next benchmark? + If you do not plan to do more benchmarks, + Metis can skip this step. + + selection_num_starting_points : int + How many times Metis should try to find the global optimal in the search space? + The higher the number, the longer it takes to output the solution. + + cold_start_num : int + Metis need some trial result to get cold start. + when the number of trial result is less than + cold_start_num, Metis will randomly sample hyper-parameter for trial. + + exploration_probability: float + The probability of Metis to select parameter from exploration instead of exploitation. + """ + + def __init__( + self, + optimize_mode="maximize", + no_resampling=True, + no_candidates=False, + selection_num_starting_points=600, + cold_start_num=10, + exploration_probability=0.9): + """ + Parameters + ---------- + optimize_mode : str + optimize_mode is a string that including two mode "maximize" and "minimize" + + no_resampling : bool + True or False. + Should Metis consider re-sampling as part of the search strategy? + If you are confident that the training dataset is noise-free, + then you do not need re-sampling. + + no_candidates : bool + True or False. + Should Metis suggest parameters for the next benchmark? + If you do not plan to do more benchmarks, + Metis can skip this step. + + selection_num_starting_points : int + How many times Metis should try to find the global optimal in the search space? + The higher the number, the longer it takes to output the solution. + + cold_start_num : int + Metis need some trial result to get cold start. + when the number of trial result is less than + cold_start_num, Metis will randomly sample hyper-parameter for trial. + + exploration_probability : float + The probability of Metis to select parameter from exploration instead of exploitation. + + x_bounds : list + The constration of parameters. + + x_types : list + The type of parameters. + """ + + self.samples_x = [] + self.samples_y = [] + self.samples_y_aggregation = [] + self.total_data = [] + self.space = None + self.no_resampling = no_resampling + self.no_candidates = no_candidates + self.optimize_mode = OptimizeMode(optimize_mode) + self.key_order = [] + self.cold_start_num = cold_start_num + self.selection_num_starting_points = selection_num_starting_points + self.exploration_probability = exploration_probability + self.minimize_constraints_fun = None + self.minimize_starting_points = None + self.supplement_data_num = 0 + self.x_bounds = [] + self.x_types = [] + + + def update_search_space(self, search_space): + """ + Update the self.x_bounds and self.x_types by the search_space.json + + Parameters + ---------- + search_space : dict + """ + validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform']) + + self.x_bounds = [[] for i in range(len(search_space))] + self.x_types = [NONE_TYPE for i in range(len(search_space))] + + for key in search_space: + self.key_order.append(key) + + key_type = {} + if isinstance(search_space, dict): + for key in search_space: + key_type = search_space[key]['_type'] + key_range = search_space[key]['_value'] + idx = self.key_order.index(key) + if key_type == 'quniform': + if key_range[2] == 1 and key_range[0].is_integer( + ) and key_range[1].is_integer(): + self.x_bounds[idx] = [key_range[0], key_range[1] + 1] + self.x_types[idx] = 'range_int' + else: + low, high, q = key_range + bounds = np.clip( + np.arange( + np.round( + low / q), + np.round( + high / q) + 1) * q, + low, + high) + self.x_bounds[idx] = bounds + self.x_types[idx] = 'discrete_int' + elif key_type == 'randint': + self.x_bounds[idx] = [key_range[0], key_range[1]] + self.x_types[idx] = 'range_int' + elif key_type == 'uniform': + self.x_bounds[idx] = [key_range[0], key_range[1]] + self.x_types[idx] = 'range_continuous' + elif key_type == 'choice': + self.x_bounds[idx] = key_range + + for key_value in key_range: + if not isinstance(key_value, (int, float)): + raise RuntimeError( + "Metis Tuner only support numerical choice.") + + self.x_types[idx] = 'discrete_int' + else: + logger.info( + "Metis Tuner doesn't support this kind of variable: %s", + str(key_type)) + raise RuntimeError( + "Metis Tuner doesn't support this kind of variable: %s" % + str(key_type)) + else: + logger.info("The format of search space is not a dict.") + raise RuntimeError("The format of search space is not a dict.") + + self.minimize_starting_points = _rand_init( + self.x_bounds, self.x_types, self.selection_num_starting_points) + + + def _pack_output(self, init_parameter): + """ + Pack the output + + Parameters + ---------- + init_parameter : dict + + Returns + ------- + output : dict + """ + output = {} + for i, param in enumerate(init_parameter): + output[self.key_order[i]] = param + + return output + + + def generate_parameters(self, parameter_id, **kwargs): + """ + Generate next parameter for trial + + If the number of trial result is lower than cold start number, + metis will first random generate some parameters. + Otherwise, metis will choose the parameters by + the Gussian Process Model and the Gussian Mixture Model. + + Parameters + ---------- + parameter_id : int + + Returns + ------- + result : dict + """ + if len(self.samples_x) < self.cold_start_num: + init_parameter = _rand_init(self.x_bounds, self.x_types, 1)[0] + results = self._pack_output(init_parameter) + else: + self.minimize_starting_points = _rand_init( + self.x_bounds, self.x_types, self.selection_num_starting_points) + results = self._selection( + self.samples_x, + self.samples_y_aggregation, + self.samples_y, + self.x_bounds, + self.x_types, + threshold_samplessize_resampling=( + None if self.no_resampling is True else 50), + no_candidates=self.no_candidates, + minimize_starting_points=self.minimize_starting_points, + minimize_constraints_fun=self.minimize_constraints_fun) + + logger.info("Generate paramageters: \n%s", str(results)) + return results + + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Tuner receive result from trial. + + Parameters + ---------- + parameter_id : int + The id of parameters, generated by nni manager. + parameters : dict + A group of parameters that trial has tried. + value : dict/float + if value is dict, it should have "default" key. + """ + value = extract_scalar_reward(value) + if self.optimize_mode == OptimizeMode.Maximize: + value = -value + + logger.info("Received trial result.") + logger.info("value is : %s", str(value)) + logger.info("parameter is : %s", str(parameters)) + + # parse parameter to sample_x + sample_x = [0 for i in range(len(self.key_order))] + for key in parameters: + idx = self.key_order.index(key) + sample_x[idx] = parameters[key] + + # parse value to sample_y + temp_y = [] + if sample_x in self.samples_x: + idx = self.samples_x.index(sample_x) + temp_y = self.samples_y[idx] + temp_y.append(value) + self.samples_y[idx] = temp_y + + # calculate y aggregation + median = get_median(temp_y) + self.samples_y_aggregation[idx] = [median] + else: + self.samples_x.append(sample_x) + self.samples_y.append([value]) + + # calculate y aggregation + self.samples_y_aggregation.append([value]) + + + def _selection( + self, + samples_x, + samples_y_aggregation, + samples_y, + x_bounds, + x_types, + max_resampling_per_x=3, + threshold_samplessize_exploitation=12, + threshold_samplessize_resampling=50, + no_candidates=False, + minimize_starting_points=None, + minimize_constraints_fun=None): + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + + next_candidate = None + candidates = [] + samples_size_all = sum([len(i) for i in samples_y]) + samples_size_unique = len(samples_y) + + # ===== STEP 1: Compute the current optimum ===== + gp_model = gp_create_model.create_model( + samples_x, samples_y_aggregation) + lm_current = gp_selection.selection( + "lm", + samples_y_aggregation, + x_bounds, + x_types, + gp_model['model'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + if not lm_current: + return None + logger.info({ + 'hyperparameter': lm_current['hyperparameter'], + 'expected_mu': lm_current['expected_mu'], + 'expected_sigma': lm_current['expected_sigma'], + 'reason': "exploitation_gp" + }) + + if no_candidates is False: + # ===== STEP 2: Get recommended configurations for exploration ==== + results_exploration = gp_selection.selection( + "lc", + samples_y_aggregation, + x_bounds, + x_types, + gp_model['model'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + if results_exploration is not None: + if _num_past_samples(results_exploration['hyperparameter'], samples_x, samples_y) == 0: + temp_candidate = { + 'hyperparameter': results_exploration['hyperparameter'], + 'expected_mu': results_exploration['expected_mu'], + 'expected_sigma': results_exploration['expected_sigma'], + 'reason': "exploration" + } + candidates.append(temp_candidate) + + logger.info("DEBUG: 1 exploration candidate selected\n") + logger.info(temp_candidate) + else: + logger.info("DEBUG: No suitable exploration candidates were") + + # ===== STEP 3: Get recommended configurations for exploitation === + if samples_size_all >= threshold_samplessize_exploitation: + logger.info("Getting candidates for exploitation...\n") + try: + gmm = gmm_create_model.create_model( + samples_x, samples_y_aggregation) + + if ("discrete_int" in x_types) or ("range_int" in x_types): + results_exploitation = gmm_selection.selection( + x_bounds, + x_types, + gmm['clusteringmodel_good'], + gmm['clusteringmodel_bad'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + else: + # If all parameters are of "range_continuous", + # let's use GMM to generate random starting points + results_exploitation = gmm_selection.selection_r( + x_bounds, + x_types, + gmm['clusteringmodel_good'], + gmm['clusteringmodel_bad'], + num_starting_points=self.selection_num_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + if results_exploitation is not None: + if _num_past_samples(results_exploitation['hyperparameter'], samples_x, samples_y) == 0: + temp_expected_mu, temp_expected_sigma = \ + gp_prediction.predict(results_exploitation['hyperparameter'], gp_model['model']) + temp_candidate = { + 'hyperparameter': results_exploitation['hyperparameter'], + 'expected_mu': temp_expected_mu, + 'expected_sigma': temp_expected_sigma, + 'reason': "exploitation_gmm" + } + candidates.append(temp_candidate) + + logger.info( + "DEBUG: 1 exploitation_gmm candidate selected\n") + logger.info(temp_candidate) + else: + logger.info( + "DEBUG: No suitable exploitation_gmm candidates were found\n") + + except ValueError as exception: + # The exception: ValueError: Fitting the mixture model failed + # because some components have ill-defined empirical covariance + # (for instance caused by singleton or collapsed samples). + # Try to decrease the number of components, or increase + # reg_covar. + logger.info( + "DEBUG: No suitable exploitation_gmm \ + candidates were found due to exception.") + logger.info(exception) + + # ===== STEP 4: Get a list of outliers ===== + if (threshold_samplessize_resampling is not None) and \ + (samples_size_unique >= threshold_samplessize_resampling): + logger.info("Getting candidates for re-sampling...\n") + results_outliers = gp_outlier_detection.outlierDetection_threaded( + samples_x, samples_y_aggregation) + + if results_outliers is not None: + for results_outlier in results_outliers: # pylint: disable=not-an-iterable + if _num_past_samples(samples_x[results_outlier['samples_idx']], samples_x, samples_y) < max_resampling_per_x: + temp_candidate = {'hyperparameter': samples_x[results_outlier['samples_idx']],\ + 'expected_mu': results_outlier['expected_mu'],\ + 'expected_sigma': results_outlier['expected_sigma'],\ + 'reason': "resampling"} + candidates.append(temp_candidate) + logger.info("DEBUG: %d re-sampling candidates selected\n") + logger.info(temp_candidate) + else: + logger.info( + "DEBUG: No suitable resampling candidates were found\n") + + if candidates: + # ===== STEP 5: Compute the information gain of each candidate + logger.info( + "Evaluating information gain of %d candidates...\n") + next_improvement = 0 + + threads_inputs = [[ + candidate, samples_x, samples_y, x_bounds, x_types, + minimize_constraints_fun, minimize_starting_points + ] for candidate in candidates] + threads_pool = ThreadPool(4) + # Evaluate what would happen if we actually sample each + # candidate + threads_results = threads_pool.map( + _calculate_lowest_mu_threaded, threads_inputs) + threads_pool.close() + threads_pool.join() + + for threads_result in threads_results: + if threads_result['expected_lowest_mu'] < lm_current['expected_mu']: + # Information gain + temp_improvement = threads_result['expected_lowest_mu'] - \ + lm_current['expected_mu'] + + if next_improvement > temp_improvement: + next_improvement = temp_improvement + next_candidate = threads_result['candidate'] + else: + # ===== STEP 6: If we have no candidates, randomly pick one === + logger.info( + "DEBUG: No candidates from exploration, exploitation,\ + and resampling. We will random a candidate for next_candidate\n" + ) + + next_candidate = _rand_with_constraints( + x_bounds, + x_types) if minimize_starting_points is None else minimize_starting_points[0] + next_candidate = lib_data.match_val_type( + next_candidate, x_bounds, x_types) + expected_mu, expected_sigma = gp_prediction.predict( + next_candidate, gp_model['model']) + next_candidate = { + 'hyperparameter': next_candidate, + 'reason': "random", + 'expected_mu': expected_mu, + 'expected_sigma': expected_sigma} + + # STEP 7: If current optimal hyperparameter occurs in the history + # or exploration probability is less than the threshold, take next + # config as exploration step + outputs = self._pack_output(lm_current['hyperparameter']) + ap = random.uniform(0, 1) + if outputs in self.total_data or ap <= self.exploration_probability: + if next_candidate is not None: + outputs = self._pack_output(next_candidate['hyperparameter']) + else: + random_parameter = _rand_init(x_bounds, x_types, 1)[0] + outputs = self._pack_output(random_parameter) + self.total_data.append(outputs) + return outputs + + def import_data(self, data): + """ + Import additional data for tuning + + Parameters + ---------- + data : a list of dict + each of which has at least two keys: 'parameter' and 'value'. + """ + _completed_num = 0 + for trial_info in data: + logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data)) + _completed_num += 1 + assert "parameter" in trial_info + _params = trial_info["parameter"] + assert "value" in trial_info + _value = trial_info['value'] + if not _value: + logger.info("Useless trial data, value is %s, skip this trial data.", _value) + continue + self.supplement_data_num += 1 + _parameter_id = '_'.join( + ["ImportData", str(self.supplement_data_num)]) + self.total_data.append(_params) + self.receive_trial_result( + parameter_id=_parameter_id, + parameters=_params, + value=_value) + logger.info("Successfully import data to metis tuner.") + + +def _rand_with_constraints(x_bounds, x_types): + outputs = None + x_bounds_withconstraints = [x_bounds[i] for i in CONSTRAINT_PARAMS_IDX] + x_types_withconstraints = [x_types[i] for i in CONSTRAINT_PARAMS_IDX] + + x_val_withconstraints = lib_constraint_summation.rand( + x_bounds_withconstraints, + x_types_withconstraints, + CONSTRAINT_LOWERBOUND, + CONSTRAINT_UPPERBOUND) + if not x_val_withconstraints: + outputs = [None] * len(x_bounds) + + for i, _ in enumerate(CONSTRAINT_PARAMS_IDX): + outputs[CONSTRAINT_PARAMS_IDX[i]] = x_val_withconstraints[i] + + for i, output in enumerate(outputs): + if not output: + outputs[i] = random.randint(x_bounds[i][0], x_bounds[i][1]) + return outputs + + +def _calculate_lowest_mu_threaded(inputs): + [candidate, samples_x, samples_y, x_bounds, x_types, + minimize_constraints_fun, minimize_starting_points] = inputs + + outputs = {"candidate": candidate, "expected_lowest_mu": None} + + for expected_mu in [ + candidate['expected_mu'] + + 1.96 * + candidate['expected_sigma'], + candidate['expected_mu'] - + 1.96 * + candidate['expected_sigma']]: + temp_samples_x = copy.deepcopy(samples_x) + temp_samples_y = copy.deepcopy(samples_y) + + try: + idx = temp_samples_x.index(candidate['hyperparameter']) + # This handles the case of re-sampling a potential outlier + temp_samples_y[idx].append(expected_mu) + except ValueError: + temp_samples_x.append(candidate['hyperparameter']) + temp_samples_y.append([expected_mu]) + + # Aggregates multiple observation of the sample sampling points + temp_y_aggregation = [statistics.median( + temp_sample_y) for temp_sample_y in temp_samples_y] + temp_gp = gp_create_model.create_model( + temp_samples_x, temp_y_aggregation) + temp_results = gp_selection.selection( + "lm", + temp_y_aggregation, + x_bounds, + x_types, + temp_gp['model'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + if outputs["expected_lowest_mu"] is None \ + or outputs["expected_lowest_mu"] > temp_results['expected_mu']: + outputs["expected_lowest_mu"] = temp_results['expected_mu'] + + return outputs + + +def _num_past_samples(x, samples_x, samples_y): + try: + idx = samples_x.index(x) + return len(samples_y[idx]) + except ValueError: + logger.info("x not in sample_x") + return 0 + + +def _rand_init(x_bounds, x_types, selection_num_starting_points): + ''' + Random sample some init seed within bounds. + ''' + return [lib_data.rand(x_bounds, x_types) for i + in range(0, selection_num_starting_points)] + + +def get_median(temp_list): + """ + Return median + """ + num = len(temp_list) + temp_list.sort() + print(temp_list) + if num % 2 == 0: + median = (temp_list[int(num / 2)] + temp_list[int(num / 2) - 1]) / 2 + else: + median = temp_list[int(num / 2)] + return median diff --git a/nni/algorithms/hpo/metis_tuner/requirments.txt b/nni/algorithms/hpo/metis_tuner/requirments.txt new file mode 100644 index 0000000000000000000000000000000000000000..3dfc2232a18c19a544eb3fb5b2f132e185958f98 --- /dev/null +++ b/nni/algorithms/hpo/metis_tuner/requirments.txt @@ -0,0 +1 @@ +scikit-learn>=0.23.2 diff --git a/nni/algorithms/hpo/networkmorphism_tuner/__init__.py b/nni/algorithms/hpo/networkmorphism_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b60da9c3892a06392397b7b8549de6615f4569cf --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/__init__.py @@ -0,0 +1 @@ +from .networkmorphism_tuner import NetworkMorphismTuner, NetworkMorphismClassArgsValidator diff --git a/nni/algorithms/hpo/networkmorphism_tuner/bayesian.py b/nni/algorithms/hpo/networkmorphism_tuner/bayesian.py new file mode 100644 index 0000000000000000000000000000000000000000..54c1996dc7708568a7065325d24a8b83fdb5a40f --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/bayesian.py @@ -0,0 +1,482 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import math +import random +from copy import deepcopy +from functools import total_ordering +from queue import PriorityQueue + +import numpy as np +from scipy.linalg import LinAlgError, cho_solve, cholesky, solve_triangular +from scipy.optimize import linear_sum_assignment +from sklearn.metrics.pairwise import rbf_kernel + +from nni.utils import OptimizeMode +from .graph_transformer import transform +from .utils import Constant +from .layers import is_layer + + +def layer_distance(a, b): + """The distance between two layers.""" + # pylint: disable=unidiomatic-typecheck + if not isinstance(a, type(b)): + return 1.0 + if is_layer(a, "Conv"): + att_diff = [ + (a.filters, b.filters), + (a.kernel_size, b.kernel_size), + (a.stride, b.stride), + ] + return attribute_difference(att_diff) + if is_layer(a, "Pooling"): + att_diff = [ + (a.padding, b.padding), + (a.kernel_size, b.kernel_size), + (a.stride, b.stride), + ] + return attribute_difference(att_diff) + return 0.0 + + +def attribute_difference(att_diff): + ''' The attribute distance. + ''' + + ret = 0 + for a_value, b_value in att_diff: + if max(a_value, b_value) == 0: + ret += 0 + else: + ret += abs(a_value - b_value) * 1.0 / max(a_value, b_value) + return ret * 1.0 / len(att_diff) + + +def layers_distance(list_a, list_b): + """The distance between the layers of two neural networks.""" + len_a = len(list_a) + len_b = len(list_b) + f = np.zeros((len_a + 1, len_b + 1)) + f[-1][-1] = 0 + for i in range(-1, len_a): + f[i][-1] = i + 1 + for j in range(-1, len_b): + f[-1][j] = j + 1 + for i in range(len_a): + for j in range(len_b): + f[i][j] = min( + f[i][j - 1] + 1, + f[i - 1][j] + 1, + f[i - 1][j - 1] + layer_distance(list_a[i], list_b[j]), + ) + return f[len_a - 1][len_b - 1] + + +def skip_connection_distance(a, b): + """The distance between two skip-connections.""" + if a[2] != b[2]: + return 1.0 + len_a = abs(a[1] - a[0]) + len_b = abs(b[1] - b[0]) + return (abs(a[0] - b[0]) + abs(len_a - len_b)) / \ + (max(a[0], b[0]) + max(len_a, len_b)) + + +def skip_connections_distance(list_a, list_b): + """The distance between the skip-connections of two neural networks.""" + distance_matrix = np.zeros((len(list_a), len(list_b))) + for i, a in enumerate(list_a): + for j, b in enumerate(list_b): + distance_matrix[i][j] = skip_connection_distance(a, b) + return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs( + len(list_a) - len(list_b) + ) + + +def edit_distance(x, y): + """The distance between two neural networks. + Args: + x: An instance of NetworkDescriptor. + y: An instance of NetworkDescriptor + Returns: + The edit-distance between x and y. + """ + + ret = layers_distance(x.layers, y.layers) + ret += Constant.KERNEL_LAMBDA * skip_connections_distance( + x.skip_connections, y.skip_connections + ) + return ret + + +class IncrementalGaussianProcess: + """Gaussian process regressor. + Attributes: + alpha: A hyperparameter. + """ + + def __init__(self): + self.alpha = 1e-10 + self._distance_matrix = None + self._x = None + self._y = None + self._first_fitted = False + self._l_matrix = None + self._alpha_vector = None + + @property + def kernel_matrix(self): + ''' Kernel matric. + ''' + return self._distance_matrix + + def fit(self, train_x, train_y): + """ Fit the regressor with more data. + Args: + train_x: A list of NetworkDescriptor. + train_y: A list of metric values. + """ + if self.first_fitted: + self.incremental_fit(train_x, train_y) + else: + self.first_fit(train_x, train_y) + + def incremental_fit(self, train_x, train_y): + """ Incrementally fit the regressor. """ + if not self._first_fitted: + raise ValueError( + "The first_fit function needs to be called first.") + + train_x, train_y = np.array(train_x), np.array(train_y) + + # Incrementally compute K + up_right_k = edit_distance_matrix(self._x, train_x) + down_left_k = np.transpose(up_right_k) + down_right_k = edit_distance_matrix(train_x) + up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1) + down_k = np.concatenate((down_left_k, down_right_k), axis=1) + temp_distance_matrix = np.concatenate((up_k, down_k), axis=0) + k_matrix = bourgain_embedding_matrix(temp_distance_matrix) + diagonal = np.diag_indices_from(k_matrix) + diagonal = (diagonal[0][-len(train_x):], diagonal[1][-len(train_x):]) + k_matrix[diagonal] += self.alpha + + try: + self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 + except LinAlgError: + return self + + self._x = np.concatenate((self._x, train_x), axis=0) + self._y = np.concatenate((self._y, train_y), axis=0) + self._distance_matrix = temp_distance_matrix + + self._alpha_vector = cho_solve( + (self._l_matrix, True), self._y) # Line 3 + + return self + + @property + def first_fitted(self): + ''' if it is firsr fitted + ''' + return self._first_fitted + + def first_fit(self, train_x, train_y): + """ Fit the regressor for the first time. """ + train_x, train_y = np.array(train_x), np.array(train_y) + + self._x = np.copy(train_x) + self._y = np.copy(train_y) + + self._distance_matrix = edit_distance_matrix(self._x) + k_matrix = bourgain_embedding_matrix(self._distance_matrix) + k_matrix[np.diag_indices_from(k_matrix)] += self.alpha + + self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 + + self._alpha_vector = cho_solve( + (self._l_matrix, True), self._y) # Line 3 + + self._first_fitted = True + return self + + def predict(self, train_x): + """Predict the result. + Args: + train_x: A list of NetworkDescriptor. + Returns: + y_mean: The predicted mean. + y_std: The predicted standard deviation. + """ + k_trans = np.exp(-np.power(edit_distance_matrix(train_x, self._x), 2)) + y_mean = k_trans.dot(self._alpha_vector) # Line 4 (y_mean = f_star) + + # compute inverse K_inv of K based on its Cholesky + # decomposition L and its inverse L_inv + l_inv = solve_triangular( + self._l_matrix.T, np.eye( + self._l_matrix.shape[0])) + k_inv = l_inv.dot(l_inv.T) + # Compute variance of predictive distribution + y_var = np.ones(len(train_x), dtype=np.float) + y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans) + + # Check if any of the variances is negative because of + # numerical issues. If yes: set the variance to 0. + y_var_negative = y_var < 0 + if np.any(y_var_negative): + y_var[y_var_negative] = 0.0 + return y_mean, np.sqrt(y_var) + + +def edit_distance_matrix(train_x, train_y=None): + """Calculate the edit distance. + Args: + train_x: A list of neural architectures. + train_y: A list of neural architectures. + Returns: + An edit-distance matrix. + """ + if train_y is None: + ret = np.zeros((train_x.shape[0], train_x.shape[0])) + for x_index, x in enumerate(train_x): + for y_index, y in enumerate(train_x): + if x_index == y_index: + ret[x_index][y_index] = 0 + elif x_index < y_index: + ret[x_index][y_index] = edit_distance(x, y) + else: + ret[x_index][y_index] = ret[y_index][x_index] + return ret + ret = np.zeros((train_x.shape[0], train_y.shape[0])) + for x_index, x in enumerate(train_x): + for y_index, y in enumerate(train_y): + ret[x_index][y_index] = edit_distance(x, y) + return ret + + +def vector_distance(a, b): + """The Euclidean distance between two vectors.""" + a = np.array(a) + b = np.array(b) + return np.linalg.norm(a - b) + + +def bourgain_embedding_matrix(distance_matrix): + """Use Bourgain algorithm to embed the neural architectures based on their edit-distance. + Args: + distance_matrix: A matrix of edit-distances. + Returns: + A matrix of distances after embedding. + """ + distance_matrix = np.array(distance_matrix) + n = len(distance_matrix) + if n == 1: + return distance_matrix + np.random.seed(123) + distort_elements = [] + r = range(n) + k = int(math.ceil(math.log(n) / math.log(2) - 1)) + t = int(math.ceil(math.log(n))) + counter = 0 + for i in range(0, k + 1): + for t in range(t): + s = np.random.choice(r, 2 ** i) + for j in r: + d = min([distance_matrix[j][s] for s in s]) + counter += len(s) + if i == 0 and t == 0: + distort_elements.append([d]) + else: + distort_elements[j].append(d) + return rbf_kernel(distort_elements, distort_elements) + + +class BayesianOptimizer: + """ A Bayesian optimizer for neural architectures. + Attributes: + searcher: The Searcher which is calling the Bayesian optimizer. + t_min: The minimum temperature for simulated annealing. + metric: An instance of the Metric subclasses. + gpr: A GaussianProcessRegressor for bayesian optimization. + beta: The beta in acquisition function. (refer to our paper) + search_tree: The network morphism search tree. + """ + + def __init__(self, searcher, t_min, optimizemode, beta=None): + self.searcher = searcher + self.t_min = t_min + self.optimizemode = optimizemode + self.gpr = IncrementalGaussianProcess() + self.beta = beta if beta is not None else Constant.BETA + self.search_tree = SearchTree() + + def fit(self, x_queue, y_queue): + """ Fit the optimizer with new architectures and performances. + Args: + x_queue: A list of NetworkDescriptor. + y_queue: A list of metric values. + """ + self.gpr.fit(x_queue, y_queue) + + def generate(self, descriptors): + """Generate new architecture. + Args: + descriptors: All the searched neural architectures. + Returns: + graph: An instance of Graph. A morphed neural network with weights. + father_id: The father node ID in the search tree. + """ + model_ids = self.search_tree.adj_list.keys() + + target_graph = None + father_id = None + descriptors = deepcopy(descriptors) + elem_class = Elem + if self.optimizemode is OptimizeMode.Maximize: + elem_class = ReverseElem + + # Initialize the priority queue. + pq = PriorityQueue() + temp_list = [] + for model_id in model_ids: + metric_value = self.searcher.get_metric_value_by_id(model_id) + temp_list.append((metric_value, model_id)) + temp_list = sorted(temp_list) + for metric_value, model_id in temp_list: + graph = self.searcher.load_model_by_id(model_id) + graph.clear_operation_history() + graph.clear_weights() + pq.put(elem_class(metric_value, model_id, graph)) + + t = 1.0 + t_min = self.t_min + alpha = 0.9 + opt_acq = self._get_init_opt_acq_value() + while not pq.empty() and t > t_min: + elem = pq.get() + if self.optimizemode is OptimizeMode.Maximize: + temp_exp = min((elem.metric_value - opt_acq) / t, 1.0) + else: + temp_exp = min((opt_acq - elem.metric_value) / t, 1.0) + ap = math.exp(temp_exp) + if ap >= random.uniform(0, 1): + for temp_graph in transform(elem.graph): + if contain(descriptors, temp_graph.extract_descriptor()): + continue + + temp_acq_value = self.acq(temp_graph) + pq.put( + elem_class( + temp_acq_value, + elem.father_id, + temp_graph)) + descriptors.append(temp_graph.extract_descriptor()) + if self._accept_new_acq_value(opt_acq, temp_acq_value): + opt_acq = temp_acq_value + father_id = elem.father_id + target_graph = deepcopy(temp_graph) + t *= alpha + + # Did not found a not duplicated architecture + if father_id is None: + return None, None + nm_graph = self.searcher.load_model_by_id(father_id) + for args in target_graph.operation_history: + getattr(nm_graph, args[0])(*list(args[1:])) + return nm_graph, father_id + + def acq(self, graph): + ''' estimate the value of generated graph + ''' + mean, std = self.gpr.predict(np.array([graph.extract_descriptor()])) + if self.optimizemode is OptimizeMode.Maximize: + return mean + self.beta * std + return mean - self.beta * std + + def _get_init_opt_acq_value(self): + if self.optimizemode is OptimizeMode.Maximize: + return -np.inf + return np.inf + + def _accept_new_acq_value(self, opt_acq, temp_acq_value): + if temp_acq_value > opt_acq and self.optimizemode is OptimizeMode.Maximize: + return True + if temp_acq_value < opt_acq and not self.optimizemode is OptimizeMode.Maximize: + return True + return False + + def add_child(self, father_id, model_id): + ''' add child to the search tree + Arguments: + father_id {int} -- father id + model_id {int} -- model id + ''' + + self.search_tree.add_child(father_id, model_id) + + +@total_ordering +class Elem: + """Elements to be sorted according to metric value.""" + + def __init__(self, metric_value, father_id, graph): + self.father_id = father_id + self.graph = graph + self.metric_value = metric_value + + def __eq__(self, other): + return self.metric_value == other.metric_value + + def __lt__(self, other): + return self.metric_value < other.metric_value + + +class ReverseElem(Elem): + """Elements to be reversely sorted according to metric value.""" + + def __lt__(self, other): + return self.metric_value > other.metric_value + + +def contain(descriptors, target_descriptor): + """Check if the target descriptor is in the descriptors.""" + for descriptor in descriptors: + if edit_distance(descriptor, target_descriptor) < 1e-5: + return True + return False + + +class SearchTree: + """The network morphism search tree.""" + + def __init__(self): + self.root = None + self.adj_list = {} + + def add_child(self, u, v): + ''' add child to search tree itself. + Arguments: + u {int} -- father id + v {int} -- child id + ''' + + if u == -1: + self.root = v + self.adj_list[v] = [] + return + if v not in self.adj_list[u]: + self.adj_list[u].append(v) + if v not in self.adj_list: + self.adj_list[v] = [] + + def get_dict(self, u=None): + """ A recursive function to return the content of the tree in a dict.""" + if u is None: + return self.get_dict(self.root) + children = [] + for v in self.adj_list[u]: + children.append(self.get_dict(v)) + ret = {"name": u, "children": children} + return ret diff --git a/nni/algorithms/hpo/networkmorphism_tuner/graph.py b/nni/algorithms/hpo/networkmorphism_tuner/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..9c96b6c2f07088aceb0cb029de02cb4b2f16ae32 --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/graph.py @@ -0,0 +1,995 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +from collections.abc import Iterable +from copy import deepcopy, copy +from queue import Queue + +import numpy as np +import torch + +from .layer_transformer import ( + add_noise, + wider_bn, + wider_next_conv, + wider_next_dense, + wider_pre_conv, + wider_pre_dense, + init_dense_weight, + init_conv_weight, + init_bn_weight, +) +from .layers import ( + StubAdd, + StubConcatenate, + StubReLU, + get_batch_norm_class, + get_conv_class, + is_layer, + layer_width, + set_keras_weight_to_stub, + set_stub_weight_to_keras, + set_stub_weight_to_torch, + set_torch_weight_to_stub, + to_real_keras_layer, + layer_description_extractor, + layer_description_builder, +) +from .utils import Constant + + +class NetworkDescriptor: + """A class describing the neural architecture for neural network kernel. + It only record the width of convolutional and dense layers, and the skip-connection types and positions. + """ + + CONCAT_CONNECT = "concat" + ADD_CONNECT = "add" + + def __init__(self): + self.skip_connections = [] + self.layers = [] + + @property + def n_layers(self): + return len(self.layers) + + def add_skip_connection(self, u, v, connection_type): + """ Add a skip-connection to the descriptor. + Args: + u: Number of convolutional layers before the starting point. + v: Number of convolutional layers before the ending point. + connection_type: Must be either CONCAT_CONNECT or ADD_CONNECT. + """ + if connection_type not in [self.CONCAT_CONNECT, self.ADD_CONNECT]: + raise ValueError( + "connection_type should be NetworkDescriptor.CONCAT_CONNECT " + "or NetworkDescriptor.ADD_CONNECT." + ) + self.skip_connections.append((u, v, connection_type)) + + def to_json(self): + ''' NetworkDescriptor to json representation + ''' + + skip_list = [] + for u, v, connection_type in self.skip_connections: + skip_list.append({"from": u, "to": v, "type": connection_type}) + return {"node_list": self.layers, "skip_list": skip_list} + + def add_layer(self, layer): + ''' add one layer + ''' + + self.layers.append(layer) + + +class Node: + """A class for intermediate output tensor (node) in the Graph. + Attributes: + shape: A tuple describing the shape of the tensor. + """ + + def __init__(self, shape): + self.shape = shape + + +class Graph: + """A class representing the neural architecture graph of a model. + Graph extracts the neural architecture graph from a model. + Each node in the graph is a intermediate tensor between layers. + Each layer is an edge in the graph. + Notably, multiple edges may refer to the same layer. + (e.g. Add layer is adding two tensor into one tensor. So it is related to two edges.) + Attributes: + weighted: A boolean of whether the weights and biases in the neural network + should be included in the graph. + input_shape: A tuple of integers, which does not include the batch axis. + node_list: A list of integers. The indices of the list are the identifiers. + layer_list: A list of stub layers. The indices of the list are the identifiers. + node_to_id: A dict instance mapping from node integers to their identifiers. + layer_to_id: A dict instance mapping from stub layers to their identifiers. + layer_id_to_input_node_ids: A dict instance mapping from layer identifiers + to their input nodes identifiers. + layer_id_to_output_node_ids: A dict instance mapping from layer identifiers + to their output nodes identifiers. + adj_list: A two dimensional list. The adjacency list of the graph. The first dimension is + identified by tensor identifiers. In each edge list, the elements are two-element tuples + of (tensor identifier, layer identifier). + reverse_adj_list: A reverse adjacent list in the same format as adj_list. + operation_history: A list saving all the network morphism operations. + vis: A dictionary of temporary storage for whether an local operation has been done + during the network morphism. + """ + + def __init__(self, input_shape, weighted=True): + """Initializer for Graph. + """ + self.input_shape = input_shape + self.weighted = weighted + self.node_list = [] + self.layer_list = [] + # node id start with 0 + self.node_to_id = {} + self.layer_to_id = {} + self.layer_id_to_input_node_ids = {} + self.layer_id_to_output_node_ids = {} + self.adj_list = {} + self.reverse_adj_list = {} + self.operation_history = [] + self.n_dim = len(input_shape) - 1 + self.conv = get_conv_class(self.n_dim) + self.batch_norm = get_batch_norm_class(self.n_dim) + + self.vis = None + self._add_node(Node(input_shape)) + + def add_layer(self, layer, input_node_id): + """Add a layer to the Graph. + Args: + layer: An instance of the subclasses of StubLayer in layers.py. + input_node_id: An integer. The ID of the input node of the layer. + Returns: + output_node_id: An integer. The ID of the output node of the layer. + """ + if isinstance(input_node_id, Iterable): + layer.input = list(map(lambda x: self.node_list[x], input_node_id)) + output_node_id = self._add_node(Node(layer.output_shape)) + for node_id in input_node_id: + self._add_edge(layer, node_id, output_node_id) + + else: + layer.input = self.node_list[input_node_id] + output_node_id = self._add_node(Node(layer.output_shape)) + self._add_edge(layer, input_node_id, output_node_id) + + layer.output = self.node_list[output_node_id] + return output_node_id + + def clear_operation_history(self): + self.operation_history = [] + + @property + def n_nodes(self): + """Return the number of nodes in the model.""" + return len(self.node_list) + + @property + def n_layers(self): + """Return the number of layers in the model.""" + return len(self.layer_list) + + def _add_node(self, node): + """Add a new node to node_list and give the node an ID. + Args: + node: An instance of Node. + Returns: + node_id: An integer. + """ + node_id = len(self.node_list) + self.node_to_id[node] = node_id + self.node_list.append(node) + self.adj_list[node_id] = [] + self.reverse_adj_list[node_id] = [] + return node_id + + def _add_edge(self, layer, input_id, output_id): + """Add a new layer to the graph. The nodes should be created in advance.""" + + if layer in self.layer_to_id: + layer_id = self.layer_to_id[layer] + if input_id not in self.layer_id_to_input_node_ids[layer_id]: + self.layer_id_to_input_node_ids[layer_id].append(input_id) + if output_id not in self.layer_id_to_output_node_ids[layer_id]: + self.layer_id_to_output_node_ids[layer_id].append(output_id) + else: + layer_id = len(self.layer_list) + self.layer_list.append(layer) + self.layer_to_id[layer] = layer_id + self.layer_id_to_input_node_ids[layer_id] = [input_id] + self.layer_id_to_output_node_ids[layer_id] = [output_id] + + self.adj_list[input_id].append((output_id, layer_id)) + self.reverse_adj_list[output_id].append((input_id, layer_id)) + + def _redirect_edge(self, u_id, v_id, new_v_id): + """Redirect the layer to a new node. + Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id` + while keeping all other property of the edge the same. + """ + layer_id = None + for index, edge_tuple in enumerate(self.adj_list[u_id]): + if edge_tuple[0] == v_id: + layer_id = edge_tuple[1] + self.adj_list[u_id][index] = (new_v_id, layer_id) + self.layer_list[layer_id].output = self.node_list[new_v_id] + break + + for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]): + if edge_tuple[0] == u_id: + layer_id = edge_tuple[1] + self.reverse_adj_list[v_id].remove(edge_tuple) + break + self.reverse_adj_list[new_v_id].append((u_id, layer_id)) + for index, value in enumerate( + self.layer_id_to_output_node_ids[layer_id]): + if value == v_id: + self.layer_id_to_output_node_ids[layer_id][index] = new_v_id + break + + def _replace_layer(self, layer_id, new_layer): + """Replace the layer with a new layer.""" + old_layer = self.layer_list[layer_id] + new_layer.input = old_layer.input + new_layer.output = old_layer.output + new_layer.output.shape = new_layer.output_shape + self.layer_list[layer_id] = new_layer + self.layer_to_id[new_layer] = layer_id + self.layer_to_id.pop(old_layer) + + @property + def topological_order(self): + """Return the topological order of the node IDs from the input node to the output node.""" + q = Queue() + in_degree = {} + for i in range(self.n_nodes): + in_degree[i] = 0 + for u in range(self.n_nodes): + for v, _ in self.adj_list[u]: + in_degree[v] += 1 + for i in range(self.n_nodes): + if in_degree[i] == 0: + q.put(i) + + order_list = [] + while not q.empty(): + u = q.get() + order_list.append(u) + for v, _ in self.adj_list[u]: + in_degree[v] -= 1 + if in_degree[v] == 0: + q.put(v) + return order_list + + def _get_pooling_layers(self, start_node_id, end_node_id): + """Given two node IDs, return all the pooling layers between them.""" + layer_list = [] + node_list = [start_node_id] + assert self._depth_first_search(end_node_id, layer_list, node_list) + ret = [] + for layer_id in layer_list: + layer = self.layer_list[layer_id] + if is_layer(layer, "Pooling"): + ret.append(layer) + elif is_layer(layer, "Conv") and layer.stride != 1: + ret.append(layer) + return ret + + def _depth_first_search(self, target_id, layer_id_list, node_list): + """Search for all the layers and nodes down the path. + A recursive function to search all the layers and nodes between the node in the node_list + and the node with target_id.""" + assert len(node_list) <= self.n_nodes + u = node_list[-1] + if u == target_id: + return True + + for v, layer_id in self.adj_list[u]: + layer_id_list.append(layer_id) + node_list.append(v) + if self._depth_first_search(target_id, layer_id_list, node_list): + return True + layer_id_list.pop() + node_list.pop() + + return False + + def _search(self, u, start_dim, total_dim, n_add): + """Search the graph for all the layers to be widened caused by an operation. + It is an recursive function with duplication check to avoid deadlock. + It searches from a starting node u until the corresponding layers has been widened. + Args: + u: The starting node ID. + start_dim: The position to insert the additional dimensions. + total_dim: The total number of dimensions the layer has before widening. + n_add: The number of dimensions to add. + """ + if (u, start_dim, total_dim, n_add) in self.vis: + return + self.vis[(u, start_dim, total_dim, n_add)] = True + for v, layer_id in self.adj_list[u]: + layer = self.layer_list[layer_id] + + if is_layer(layer, "Conv"): + new_layer = wider_next_conv( + layer, start_dim, total_dim, n_add, self.weighted + ) + self._replace_layer(layer_id, new_layer) + + elif is_layer(layer, "Dense"): + new_layer = wider_next_dense( + layer, start_dim, total_dim, n_add, self.weighted + ) + self._replace_layer(layer_id, new_layer) + + elif is_layer(layer, "BatchNormalization"): + new_layer = wider_bn( + layer, start_dim, total_dim, n_add, self.weighted) + self._replace_layer(layer_id, new_layer) + self._search(v, start_dim, total_dim, n_add) + + elif is_layer(layer, "Concatenate"): + if self.layer_id_to_input_node_ids[layer_id][1] == u: + # u is on the right of the concat + # next_start_dim += next_total_dim - total_dim + left_dim = self._upper_layer_width( + self.layer_id_to_input_node_ids[layer_id][0] + ) + next_start_dim = start_dim + left_dim + next_total_dim = total_dim + left_dim + else: + next_start_dim = start_dim + next_total_dim = total_dim + self._upper_layer_width( + self.layer_id_to_input_node_ids[layer_id][1] + ) + self._search(v, next_start_dim, next_total_dim, n_add) + + else: + self._search(v, start_dim, total_dim, n_add) + + for v, layer_id in self.reverse_adj_list[u]: + layer = self.layer_list[layer_id] + if is_layer(layer, "Conv"): + new_layer = wider_pre_conv(layer, n_add, self.weighted) + self._replace_layer(layer_id, new_layer) + elif is_layer(layer, "Dense"): + new_layer = wider_pre_dense(layer, n_add, self.weighted) + self._replace_layer(layer_id, new_layer) + elif is_layer(layer, "Concatenate"): + continue + else: + self._search(v, start_dim, total_dim, n_add) + + def _upper_layer_width(self, u): + for v, layer_id in self.reverse_adj_list[u]: + layer = self.layer_list[layer_id] + if is_layer(layer, "Conv") or is_layer(layer, "Dense"): + return layer_width(layer) + elif is_layer(layer, "Concatenate"): + a = self.layer_id_to_input_node_ids[layer_id][0] + b = self.layer_id_to_input_node_ids[layer_id][1] + return self._upper_layer_width(a) + self._upper_layer_width(b) + else: + return self._upper_layer_width(v) + return self.node_list[0].shape[-1] + + def to_deeper_model(self, target_id, new_layer): + """Insert a relu-conv-bn block after the target block. + Args: + target_id: A convolutional layer ID. The new block should be inserted after the block. + new_layer: An instance of StubLayer subclasses. + """ + self.operation_history.append( + ("to_deeper_model", target_id, new_layer)) + input_id = self.layer_id_to_input_node_ids[target_id][0] + output_id = self.layer_id_to_output_node_ids[target_id][0] + if self.weighted: + if is_layer(new_layer, "Dense"): + init_dense_weight(new_layer) + elif is_layer(new_layer, "Conv"): + init_conv_weight(new_layer) + elif is_layer(new_layer, "BatchNormalization"): + init_bn_weight(new_layer) + + self._insert_new_layers([new_layer], input_id, output_id) + + def to_wider_model(self, pre_layer_id, n_add): + """Widen the last dimension of the output of the pre_layer. + Args: + pre_layer_id: The ID of a convolutional layer or dense layer. + n_add: The number of dimensions to add. + """ + self.operation_history.append(("to_wider_model", pre_layer_id, n_add)) + pre_layer = self.layer_list[pre_layer_id] + output_id = self.layer_id_to_output_node_ids[pre_layer_id][0] + dim = layer_width(pre_layer) + self.vis = {} + self._search(output_id, dim, dim, n_add) + # Update the tensor shapes. + for u in self.topological_order: + for v, layer_id in self.adj_list[u]: + self.node_list[v].shape = self.layer_list[layer_id].output_shape + + def _insert_new_layers(self, new_layers, start_node_id, end_node_id): + """Insert the new_layers after the node with start_node_id.""" + new_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) + temp_output_id = new_node_id + for layer in new_layers[:-1]: + temp_output_id = self.add_layer(layer, temp_output_id) + + self._add_edge(new_layers[-1], temp_output_id, end_node_id) + new_layers[-1].input = self.node_list[temp_output_id] + new_layers[-1].output = self.node_list[end_node_id] + self._redirect_edge(start_node_id, end_node_id, new_node_id) + + def _block_end_node(self, layer_id, block_size): + ret = self.layer_id_to_output_node_ids[layer_id][0] + for _ in range(block_size - 2): + ret = self.adj_list[ret][0][0] + return ret + + def _dense_block_end_node(self, layer_id): + return self.layer_id_to_input_node_ids[layer_id][0] + + def _conv_block_end_node(self, layer_id): + """Get the input node ID of the last layer in the block by layer ID. + Return the input node ID of the last layer in the convolutional block. + Args: + layer_id: the convolutional layer ID. + """ + return self._block_end_node(layer_id, Constant.CONV_BLOCK_DISTANCE) + + def to_add_skip_model(self, start_id, end_id): + """Add a weighted add skip-connection from after start node to end node. + Args: + start_id: The convolutional layer ID, after which to start the skip-connection. + end_id: The convolutional layer ID, after which to end the skip-connection. + """ + self.operation_history.append(("to_add_skip_model", start_id, end_id)) + filters_end = self.layer_list[end_id].output.shape[-1] + filters_start = self.layer_list[start_id].output.shape[-1] + start_node_id = self.layer_id_to_output_node_ids[start_id][0] + + pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] + end_node_id = self.layer_id_to_output_node_ids[end_id][0] + + skip_output_id = self._insert_pooling_layer_chain( + start_node_id, end_node_id) + + # Add the conv layer + new_conv_layer = get_conv_class( + self.n_dim)( + filters_start, + filters_end, + 1) + skip_output_id = self.add_layer(new_conv_layer, skip_output_id) + + # Add the add layer. + add_input_node_id = self._add_node( + deepcopy(self.node_list[end_node_id])) + add_layer = StubAdd() + + self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id) + self._add_edge(add_layer, add_input_node_id, end_node_id) + self._add_edge(add_layer, skip_output_id, end_node_id) + add_layer.input = [ + self.node_list[add_input_node_id], + self.node_list[skip_output_id], + ] + add_layer.output = self.node_list[end_node_id] + self.node_list[end_node_id].shape = add_layer.output_shape + + # Set weights to the additional conv layer. + if self.weighted: + filter_shape = (1,) * self.n_dim + weights = np.zeros((filters_end, filters_start) + filter_shape) + bias = np.zeros(filters_end) + new_conv_layer.set_weights( + (add_noise(weights, np.array([0, 1])), add_noise( + bias, np.array([0, 1]))) + ) + + def to_concat_skip_model(self, start_id, end_id): + """Add a weighted add concatenate connection from after start node to end node. + Args: + start_id: The convolutional layer ID, after which to start the skip-connection. + end_id: The convolutional layer ID, after which to end the skip-connection. + """ + self.operation_history.append( + ("to_concat_skip_model", start_id, end_id)) + filters_end = self.layer_list[end_id].output.shape[-1] + filters_start = self.layer_list[start_id].output.shape[-1] + start_node_id = self.layer_id_to_output_node_ids[start_id][0] + + pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] + end_node_id = self.layer_id_to_output_node_ids[end_id][0] + + skip_output_id = self._insert_pooling_layer_chain( + start_node_id, end_node_id) + + concat_input_node_id = self._add_node( + deepcopy(self.node_list[end_node_id])) + self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id) + + concat_layer = StubConcatenate() + concat_layer.input = [ + self.node_list[concat_input_node_id], + self.node_list[skip_output_id], + ] + concat_output_node_id = self._add_node(Node(concat_layer.output_shape)) + self._add_edge( + concat_layer, + concat_input_node_id, + concat_output_node_id) + self._add_edge(concat_layer, skip_output_id, concat_output_node_id) + concat_layer.output = self.node_list[concat_output_node_id] + self.node_list[concat_output_node_id].shape = concat_layer.output_shape + + # Add the concatenate layer. + new_conv_layer = get_conv_class(self.n_dim)( + filters_start + filters_end, filters_end, 1 + ) + self._add_edge(new_conv_layer, concat_output_node_id, end_node_id) + new_conv_layer.input = self.node_list[concat_output_node_id] + new_conv_layer.output = self.node_list[end_node_id] + self.node_list[end_node_id].shape = new_conv_layer.output_shape + + if self.weighted: + filter_shape = (1,) * self.n_dim + weights = np.zeros((filters_end, filters_end) + filter_shape) + for i in range(filters_end): + filter_weight = np.zeros((filters_end,) + filter_shape) + center_index = (i,) + (0,) * self.n_dim + filter_weight[center_index] = 1 + weights[i, ...] = filter_weight + weights = np.concatenate( + (weights, np.zeros((filters_end, filters_start) + filter_shape)), axis=1 + ) + bias = np.zeros(filters_end) + new_conv_layer.set_weights( + (add_noise(weights, np.array([0, 1])), add_noise( + bias, np.array([0, 1]))) + ) + + def _insert_pooling_layer_chain(self, start_node_id, end_node_id): + skip_output_id = start_node_id + for layer in self._get_pooling_layers(start_node_id, end_node_id): + new_layer = deepcopy(layer) + if is_layer(new_layer, "Conv"): + filters = self.node_list[start_node_id].shape[-1] + new_layer = get_conv_class(self.n_dim)( + filters, filters, 1, layer.stride) + if self.weighted: + init_conv_weight(new_layer) + else: + new_layer = deepcopy(layer) + skip_output_id = self.add_layer(new_layer, skip_output_id) + skip_output_id = self.add_layer(StubReLU(), skip_output_id) + return skip_output_id + + def extract_descriptor(self): + """Extract the the description of the Graph as an instance of NetworkDescriptor.""" + main_chain = self.get_main_chain() + index_in_main_chain = {} + for index, u in enumerate(main_chain): + index_in_main_chain[u] = index + + ret = NetworkDescriptor() + for u in main_chain: + for v, layer_id in self.adj_list[u]: + if v not in index_in_main_chain: + continue + layer = self.layer_list[layer_id] + copied_layer = copy(layer) + copied_layer.weights = None + ret.add_layer(deepcopy(copied_layer)) + + for u in index_in_main_chain: + for v, layer_id in self.adj_list[u]: + if v not in index_in_main_chain: + temp_u = u + temp_v = v + temp_layer_id = layer_id + skip_type = None + while not ( + temp_v in index_in_main_chain and temp_u in index_in_main_chain): + if is_layer( + self.layer_list[temp_layer_id], "Concatenate"): + skip_type = NetworkDescriptor.CONCAT_CONNECT + if is_layer(self.layer_list[temp_layer_id], "Add"): + skip_type = NetworkDescriptor.ADD_CONNECT + temp_u = temp_v + temp_v, temp_layer_id = self.adj_list[temp_v][0] + ret.add_skip_connection( + index_in_main_chain[u], index_in_main_chain[temp_u], skip_type + ) + + elif index_in_main_chain[v] - index_in_main_chain[u] != 1: + skip_type = None + if is_layer(self.layer_list[layer_id], "Concatenate"): + skip_type = NetworkDescriptor.CONCAT_CONNECT + if is_layer(self.layer_list[layer_id], "Add"): + skip_type = NetworkDescriptor.ADD_CONNECT + ret.add_skip_connection( + index_in_main_chain[u], index_in_main_chain[v], skip_type + ) + + return ret + + def clear_weights(self): + ''' clear weights of the graph + ''' + self.weighted = False + for layer in self.layer_list: + layer.weights = None + + def produce_torch_model(self): + """Build a new Torch model based on the current graph.""" + return TorchModel(self) + + def produce_keras_model(self): + """Build a new keras model based on the current graph.""" + return KerasModel(self).model + + def produce_onnx_model(self): + """Build a new ONNX model based on the current graph.""" + return ONNXModel(self) + + def parsing_onnx_model(self, onnx_model): + '''to do in the future to use the onnx model + ''' + return ONNXModel(onnx_model) + + def produce_json_model(self): + """Build a new Json model based on the current graph.""" + return JSONModel(self).data + + @classmethod + def parsing_json_model(cls, json_model): + '''build a graph from json + ''' + return json_to_graph(json_model) + + def _layer_ids_in_order(self, layer_ids): + node_id_to_order_index = {} + for index, node_id in enumerate(self.topological_order): + node_id_to_order_index[node_id] = index + return sorted( + layer_ids, + key=lambda layer_id: node_id_to_order_index[ + self.layer_id_to_output_node_ids[layer_id][0] + ], + ) + + def _layer_ids_by_type(self, type_str): + return list( + filter( + lambda layer_id: is_layer(self.layer_list[layer_id], type_str), + range(self.n_layers), + ) + ) + + def get_main_chain_layers(self): + """Return a list of layer IDs in the main chain.""" + main_chain = self.get_main_chain() + ret = [] + for u in main_chain: + for v, layer_id in self.adj_list[u]: + if v in main_chain and u in main_chain: + ret.append(layer_id) + return ret + + def _conv_layer_ids_in_order(self): + return list( + filter( + lambda layer_id: is_layer(self.layer_list[layer_id], "Conv"), + self.get_main_chain_layers(), + ) + ) + + def _dense_layer_ids_in_order(self): + return self._layer_ids_in_order(self._layer_ids_by_type("Dense")) + + def deep_layer_ids(self): + ret = [] + for layer_id in self.get_main_chain_layers(): + layer = self.layer_list[layer_id] + if is_layer(layer, "GlobalAveragePooling"): + break + if is_layer(layer, "Add") or is_layer(layer, "Concatenate"): + continue + ret.append(layer_id) + return ret + + def wide_layer_ids(self): + return ( + self._conv_layer_ids_in_order( + )[:-1] + self._dense_layer_ids_in_order()[:-1] + ) + + def skip_connection_layer_ids(self): + return self.deep_layer_ids()[:-1] + + def size(self): + return sum(list(map(lambda x: x.size(), self.layer_list))) + + def get_main_chain(self): + """Returns the main chain node ID list.""" + pre_node = {} + distance = {} + for i in range(self.n_nodes): + distance[i] = 0 + pre_node[i] = i + for i in range(self.n_nodes - 1): + for u in range(self.n_nodes): + for v, _ in self.adj_list[u]: + if distance[u] + 1 > distance[v]: + distance[v] = distance[u] + 1 + pre_node[v] = u + temp_id = 0 + for i in range(self.n_nodes): + if distance[i] > distance[temp_id]: + temp_id = i + ret = [] + for i in range(self.n_nodes + 5): + ret.append(temp_id) + if pre_node[temp_id] == temp_id: + break + temp_id = pre_node[temp_id] + assert temp_id == pre_node[temp_id] + ret.reverse() + return ret + + +class TorchModel(torch.nn.Module): + """A neural network class using pytorch constructed from an instance of Graph.""" + + def __init__(self, graph): + super(TorchModel, self).__init__() + self.graph = graph + self.layers = [] + for layer in graph.layer_list: + self.layers.append(layer.to_real_layer()) + if graph.weighted: + for index, layer in enumerate(self.layers): + set_stub_weight_to_torch(self.graph.layer_list[index], layer) + for index, layer in enumerate(self.layers): + self.add_module(str(index), layer) + + def forward(self, input_tensor): + topo_node_list = self.graph.topological_order + output_id = topo_node_list[-1] + input_id = topo_node_list[0] + + node_list = deepcopy(self.graph.node_list) + node_list[input_id] = input_tensor + + for v in topo_node_list: + for u, layer_id in self.graph.reverse_adj_list[v]: + layer = self.graph.layer_list[layer_id] + torch_layer = self.layers[layer_id] + + if isinstance(layer, (StubAdd, StubConcatenate)): + edge_input_tensor = list( + map( + lambda x: node_list[x], + self.graph.layer_id_to_input_node_ids[layer_id], + ) + ) + else: + edge_input_tensor = node_list[u] + + temp_tensor = torch_layer(edge_input_tensor) + node_list[v] = temp_tensor + return node_list[output_id] + + def set_weight_to_graph(self): + self.graph.weighted = True + for index, layer in enumerate(self.layers): + set_torch_weight_to_stub(layer, self.graph.layer_list[index]) + + +class KerasModel: + def __init__(self, graph): + import keras + + self.graph = graph + self.layers = [] + for layer in graph.layer_list: + self.layers.append(to_real_keras_layer(layer)) + + # Construct the keras graph. + # Input + topo_node_list = self.graph.topological_order + output_id = topo_node_list[-1] + input_id = topo_node_list[0] + input_tensor = keras.layers.Input( + shape=graph.node_list[input_id].shape) + + node_list = deepcopy(self.graph.node_list) + node_list[input_id] = input_tensor + + # Output + for v in topo_node_list: + for u, layer_id in self.graph.reverse_adj_list[v]: + layer = self.graph.layer_list[layer_id] + keras_layer = self.layers[layer_id] + + if isinstance(layer, (StubAdd, StubConcatenate)): + edge_input_tensor = list( + map( + lambda x: node_list[x], + self.graph.layer_id_to_input_node_ids[layer_id], + ) + ) + else: + edge_input_tensor = node_list[u] + + temp_tensor = keras_layer(edge_input_tensor) + node_list[v] = temp_tensor + + output_tensor = node_list[output_id] + output_tensor = keras.layers.Activation("softmax", name="activation_add")( + output_tensor + ) + self.model = keras.models.Model( + inputs=input_tensor, outputs=output_tensor) + + if graph.weighted: + for index, layer in enumerate(self.layers): + set_stub_weight_to_keras(self.graph.layer_list[index], layer) + + def set_weight_to_graph(self): + self.graph.weighted = True + for index, layer in enumerate(self.layers): + set_keras_weight_to_stub(layer, self.graph.layer_list[index]) + + +class ONNXModel: + # to do in the future using onnx ir + def __init__(self, graph): + pass + + +class JSONModel: + def __init__(self, graph): + data = dict() + node_list = list() + layer_list = list() + operation_history = list() + + data["input_shape"] = graph.input_shape + vis = graph.vis + data["vis"] = list(vis.keys()) if vis is not None else None + data["weighted"] = graph.weighted + + for item in graph.operation_history: + if item[0] == "to_deeper_model": + operation_history.append( + [ + item[0], + item[1], + layer_description_extractor(item[2], graph.node_to_id), + ] + ) + else: + operation_history.append(item) + data["operation_history"] = operation_history + data["layer_id_to_input_node_ids"] = graph.layer_id_to_input_node_ids + data["layer_id_to_output_node_ids"] = graph.layer_id_to_output_node_ids + data["adj_list"] = graph.adj_list + data["reverse_adj_list"] = graph.reverse_adj_list + + for node in graph.node_list: + node_id = graph.node_to_id[node] + node_information = node.shape + node_list.append((node_id, node_information)) + + for layer_id, item in enumerate(graph.layer_list): + layer = graph.layer_list[layer_id] + layer_information = layer_description_extractor( + layer, graph.node_to_id) + layer_list.append((layer_id, layer_information)) + + data["node_list"] = node_list + data["layer_list"] = layer_list + + self.data = data + + +def graph_to_onnx(graph, onnx_model_path): + import onnx + # to do in the future using onnx ir + onnx_out = graph.produce_onnx_model() + onnx.save(onnx_out, onnx_model_path) + return onnx_out + + +def onnx_to_graph(onnx_model, input_shape): + # to do in the future using onnx ir + graph = Graph(input_shape, False) + graph.parsing_onnx_model(onnx_model) + return graph + + +def graph_to_json(graph, json_model_path): + json_out = graph.produce_json_model() + with open(json_model_path, "w") as fout: + json.dump(json_out, fout) + json_out = json.dumps(json_out) + return json_out + + +def json_to_graph(json_model: str): + json_model = json.loads(json_model) + # restore graph data from json data + input_shape = tuple(json_model["input_shape"]) + node_list = list() + node_to_id = dict() + id_to_node = dict() + layer_list = list() + layer_to_id = dict() + operation_history = list() + graph = Graph(input_shape, False) + + graph.input_shape = input_shape + vis = json_model["vis"] + graph.vis = { + tuple(item): True for item in vis} if vis is not None else None + graph.weighted = json_model["weighted"] + layer_id_to_input_node_ids = json_model["layer_id_to_input_node_ids"] + graph.layer_id_to_input_node_ids = { + int(k): v for k, v in layer_id_to_input_node_ids.items() + } + layer_id_to_output_node_ids = json_model["layer_id_to_output_node_ids"] + graph.layer_id_to_output_node_ids = { + int(k): v for k, v in layer_id_to_output_node_ids.items() + } + adj_list = {} + for k, v in json_model["adj_list"].items(): + adj_list[int(k)] = [tuple(i) for i in v] + graph.adj_list = adj_list + reverse_adj_list = {} + for k, v in json_model["reverse_adj_list"].items(): + reverse_adj_list[int(k)] = [tuple(i) for i in v] + graph.reverse_adj_list = reverse_adj_list + + for item in json_model["node_list"]: + new_node = Node(tuple(item[1])) + node_id = item[0] + node_list.append(new_node) + node_to_id[new_node] = node_id + id_to_node[node_id] = new_node + + for item in json_model["operation_history"]: + if item[0] == "to_deeper_model": + operation_history.append( + (item[0], item[1], layer_description_builder(item[2], id_to_node)) + ) + else: + operation_history.append(item) + graph.operation_history = operation_history + + for item in json_model["layer_list"]: + new_layer = layer_description_builder(item[1], id_to_node) + layer_id = int(item[0]) + layer_list.append(new_layer) + layer_to_id[new_layer] = layer_id + + graph.node_list = node_list + graph.node_to_id = node_to_id + graph.layer_list = layer_list + graph.layer_to_id = layer_to_id + + return graph diff --git a/nni/algorithms/hpo/networkmorphism_tuner/graph_transformer.py b/nni/algorithms/hpo/networkmorphism_tuner/graph_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..28fda7acb9444d45b4881a6cdeab39a9414b5fdc --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/graph_transformer.py @@ -0,0 +1,167 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from copy import deepcopy + +from random import randrange, sample + +from .graph import NetworkDescriptor +from .layers import ( + StubDense, + StubReLU, + get_batch_norm_class, + get_conv_class, + get_dropout_class, + get_pooling_class, + is_layer, +) +from .utils import Constant + + +def to_wider_graph(graph): + ''' wider graph + ''' + weighted_layer_ids = graph.wide_layer_ids() + weighted_layer_ids = list( + filter( + lambda x: graph.layer_list[x].output.shape[-1], weighted_layer_ids) + ) + wider_layers = sample(weighted_layer_ids, 1) + + for layer_id in wider_layers: + layer = graph.layer_list[layer_id] + if is_layer(layer, "Conv"): + n_add = layer.filters + else: + n_add = layer.units + + graph.to_wider_model(layer_id, n_add) + return graph + + +def to_skip_connection_graph(graph): + ''' skip connection graph + ''' + # The last conv layer cannot be widen since wider operator cannot be done + # over the two sides of flatten. + weighted_layer_ids = graph.skip_connection_layer_ids() + valid_connection = [] + for skip_type in sorted( + [NetworkDescriptor.ADD_CONNECT, NetworkDescriptor.CONCAT_CONNECT]): + for index_a in range(len(weighted_layer_ids)): + for index_b in range(len(weighted_layer_ids))[index_a + 1:]: + valid_connection.append((index_a, index_b, skip_type)) + + if not valid_connection: + return graph + for index_a, index_b, skip_type in sample(valid_connection, 1): + a_id = weighted_layer_ids[index_a] + b_id = weighted_layer_ids[index_b] + if skip_type == NetworkDescriptor.ADD_CONNECT: + graph.to_add_skip_model(a_id, b_id) + else: + graph.to_concat_skip_model(a_id, b_id) + return graph + + +def create_new_layer(layer, n_dim): + ''' create new layer for the graph + ''' + + input_shape = layer.output.shape + dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU] + conv_deeper_classes = [ + get_conv_class(n_dim), + get_batch_norm_class(n_dim), + StubReLU] + if is_layer(layer, "ReLU"): + conv_deeper_classes = [ + get_conv_class(n_dim), + get_batch_norm_class(n_dim)] + dense_deeper_classes = [StubDense, get_dropout_class(n_dim)] + elif is_layer(layer, "Dropout"): + dense_deeper_classes = [StubDense, StubReLU] + elif is_layer(layer, "BatchNormalization"): + conv_deeper_classes = [get_conv_class(n_dim), StubReLU] + + layer_class = None + if len(input_shape) == 1: + # It is in the dense layer part. + layer_class = sample(dense_deeper_classes, 1)[0] + else: + # It is in the conv layer part. + layer_class = sample(conv_deeper_classes, 1)[0] + + if layer_class == StubDense: + new_layer = StubDense(input_shape[0], input_shape[0]) + + elif layer_class == get_dropout_class(n_dim): + new_layer = layer_class(Constant.DENSE_DROPOUT_RATE) # pylint: disable=not-callable + + elif layer_class == get_conv_class(n_dim): + new_layer = layer_class( # pylint: disable=not-callable + input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1 + ) + + elif layer_class == get_batch_norm_class(n_dim): + new_layer = layer_class(input_shape[-1]) # pylint: disable=not-callable + + elif layer_class == get_pooling_class(n_dim): + new_layer = layer_class(sample((1, 3, 5), 1)[0]) # pylint: disable=not-callable + + else: + new_layer = layer_class() # pylint: disable=not-callable + + return new_layer + + +def to_deeper_graph(graph): + ''' deeper graph + ''' + + weighted_layer_ids = graph.deep_layer_ids() + if len(weighted_layer_ids) >= Constant.MAX_LAYERS: + return None + + deeper_layer_ids = sample(weighted_layer_ids, 1) + + for layer_id in deeper_layer_ids: + layer = graph.layer_list[layer_id] + new_layer = create_new_layer(layer, graph.n_dim) + graph.to_deeper_model(layer_id, new_layer) + return graph + + +def legal_graph(graph): + '''judge if a graph is legal or not. + ''' + + descriptor = graph.extract_descriptor() + skips = descriptor.skip_connections + if len(skips) != len(set(skips)): + return False + return True + + +def transform(graph): + '''core transform function for graph. + ''' + + graphs = [] + for _ in range(Constant.N_NEIGHBOURS * 2): + random_num = randrange(3) + temp_graph = None + if random_num == 0: + temp_graph = to_deeper_graph(deepcopy(graph)) + elif random_num == 1: + temp_graph = to_wider_graph(deepcopy(graph)) + elif random_num == 2: + temp_graph = to_skip_connection_graph(deepcopy(graph)) + + if temp_graph is not None and temp_graph.size() <= Constant.MAX_MODEL_SIZE: + graphs.append(temp_graph) + + if len(graphs) >= Constant.N_NEIGHBOURS: + break + + return graphs diff --git a/nni/algorithms/hpo/networkmorphism_tuner/layer_transformer.py b/nni/algorithms/hpo/networkmorphism_tuner/layer_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..6ffd1b20fb0e3b4ef0a6d2e54a02fb7cfa7cfc42 --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/layer_transformer.py @@ -0,0 +1,264 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np + +from .layers import ( + StubDense, + StubReLU, + get_batch_norm_class, + get_conv_class, + get_n_dim, +) + +NOISE_RATIO = 1e-4 + + +def deeper_conv_block(conv_layer, kernel_size, weighted=True): + '''deeper conv layer. + ''' + n_dim = get_n_dim(conv_layer) + filter_shape = (kernel_size,) * 2 + n_filters = conv_layer.filters + weight = np.zeros((n_filters, n_filters) + filter_shape) + center = tuple(map(lambda x: int((x - 1) / 2), filter_shape)) + for i in range(n_filters): + filter_weight = np.zeros((n_filters,) + filter_shape) + index = (i,) + center + filter_weight[index] = 1 + weight[i, ...] = filter_weight + bias = np.zeros(n_filters) + new_conv_layer = get_conv_class(n_dim)( + conv_layer.filters, n_filters, kernel_size=kernel_size + ) + bn = get_batch_norm_class(n_dim)(n_filters) + + if weighted: + new_conv_layer.set_weights( + (add_noise(weight, np.array([0, 1])), + add_noise(bias, np.array([0, 1]))) + ) + new_weights = [ + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + ] + bn.set_weights(new_weights) + + return [StubReLU(), new_conv_layer, bn] + + +def dense_to_deeper_block(dense_layer, weighted=True): + '''deeper dense layer. + ''' + units = dense_layer.units + weight = np.eye(units) + bias = np.zeros(units) + new_dense_layer = StubDense(units, units) + if weighted: + new_dense_layer.set_weights( + (add_noise(weight, np.array([0, 1])), + add_noise(bias, np.array([0, 1]))) + ) + return [StubReLU(), new_dense_layer] + + +def wider_pre_dense(layer, n_add, weighted=True): + '''wider previous dense layer. + ''' + if not weighted: + return StubDense(layer.input_units, layer.units + n_add) + + n_units2 = layer.units + + teacher_w, teacher_b = layer.get_weights() + rand = np.random.randint(n_units2, size=n_add) + student_w = teacher_w.copy() + student_b = teacher_b.copy() + + # target layer update (i) + for i in range(n_add): + teacher_index = rand[i] + new_weight = teacher_w[teacher_index, :] + new_weight = new_weight[np.newaxis, :] + student_w = np.concatenate( + (student_w, add_noise(new_weight, student_w)), axis=0) + student_b = np.append( + student_b, add_noise( + teacher_b[teacher_index], student_b)) + + new_pre_layer = StubDense(layer.input_units, n_units2 + n_add) + new_pre_layer.set_weights((student_w, student_b)) + + return new_pre_layer + + +def wider_pre_conv(layer, n_add_filters, weighted=True): + '''wider previous conv layer. + ''' + n_dim = get_n_dim(layer) + if not weighted: + return get_conv_class(n_dim)( + layer.input_channel, + layer.filters + n_add_filters, + kernel_size=layer.kernel_size, + ) + + n_pre_filters = layer.filters + rand = np.random.randint(n_pre_filters, size=n_add_filters) + teacher_w, teacher_b = layer.get_weights() + + student_w = teacher_w.copy() + student_b = teacher_b.copy() + # target layer update (i) + for i, _ in enumerate(rand): + teacher_index = rand[i] + new_weight = teacher_w[teacher_index, ...] + new_weight = new_weight[np.newaxis, ...] + student_w = np.concatenate((student_w, new_weight), axis=0) + student_b = np.append(student_b, teacher_b[teacher_index]) + new_pre_layer = get_conv_class(n_dim)( + layer.input_channel, n_pre_filters + n_add_filters, layer.kernel_size + ) + new_pre_layer.set_weights( + (add_noise(student_w, teacher_w), add_noise(student_b, teacher_b)) + ) + return new_pre_layer + + +def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True): + '''wider next conv layer. + ''' + n_dim = get_n_dim(layer) + if not weighted: + return get_conv_class(n_dim)(layer.input_channel + n_add, + layer.filters, + kernel_size=layer.kernel_size, + stride=layer.stride) + n_filters = layer.filters + teacher_w, teacher_b = layer.get_weights() + + new_weight_shape = list(teacher_w.shape) + new_weight_shape[1] = n_add + new_weight = np.zeros(tuple(new_weight_shape)) + + student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(), + add_noise(new_weight, teacher_w), + teacher_w[:, start_dim:total_dim, ...].copy()), axis=1) + new_layer = get_conv_class(n_dim)(layer.input_channel + n_add, + n_filters, + kernel_size=layer.kernel_size, + stride=layer.stride) + new_layer.set_weights((student_w, teacher_b)) + return new_layer + + +def wider_bn(layer, start_dim, total_dim, n_add, weighted=True): + '''wider batch norm layer. + ''' + n_dim = get_n_dim(layer) + if not weighted: + return get_batch_norm_class(n_dim)(layer.num_features + n_add) + + weights = layer.get_weights() + + new_weights = [ + add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])), + add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])), + ] + + student_w = tuple() + for weight, new_weight in zip(weights, new_weights): + temp_w = weight.copy() + temp_w = np.concatenate( + (temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim]) + ) + student_w += (temp_w,) + new_layer = get_batch_norm_class(n_dim)(layer.num_features + n_add) + new_layer.set_weights(student_w) + return new_layer + + +def wider_next_dense(layer, start_dim, total_dim, n_add, weighted=True): + '''wider next dense layer. + ''' + if not weighted: + return StubDense(layer.input_units + n_add, layer.units) + teacher_w, teacher_b = layer.get_weights() + student_w = teacher_w.copy() + n_units_each_channel = int(teacher_w.shape[1] / total_dim) + + new_weight = np.zeros((teacher_w.shape[0], n_add * n_units_each_channel)) + student_w = np.concatenate( + ( + student_w[:, : start_dim * n_units_each_channel], + add_noise(new_weight, student_w), + student_w[ + :, start_dim * n_units_each_channel: total_dim * n_units_each_channel + ], + ), + axis=1, + ) + + new_layer = StubDense(layer.input_units + n_add, layer.units) + new_layer.set_weights((student_w, teacher_b)) + return new_layer + + +def add_noise(weights, other_weights): + '''add noise to the layer. + ''' + w_range = np.ptp(other_weights.flatten()) + noise_range = NOISE_RATIO * w_range + noise = np.random.uniform(-noise_range / 2.0, + noise_range / 2.0, weights.shape) + return np.add(noise, weights) + + +def init_dense_weight(layer): + '''initilize dense layer weight. + ''' + units = layer.units + weight = np.eye(units) + bias = np.zeros(units) + layer.set_weights( + (add_noise(weight, np.array([0, 1])), + add_noise(bias, np.array([0, 1]))) + ) + + +def init_conv_weight(layer): + '''initilize conv layer weight. + ''' + n_filters = layer.filters + filter_shape = (layer.kernel_size,) * get_n_dim(layer) + weight = np.zeros((n_filters, n_filters) + filter_shape) + + center = tuple(map(lambda x: int((x - 1) / 2), filter_shape)) + for i in range(n_filters): + filter_weight = np.zeros((n_filters,) + filter_shape) + index = (i,) + center + filter_weight[index] = 1 + weight[i, ...] = filter_weight + bias = np.zeros(n_filters) + + layer.set_weights( + (add_noise(weight, np.array([0, 1])), + add_noise(bias, np.array([0, 1]))) + ) + + +def init_bn_weight(layer): + '''initilize batch norm layer weight. + ''' + n_filters = layer.num_features + new_weights = [ + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + ] + layer.set_weights(new_weights) diff --git a/nni/algorithms/hpo/networkmorphism_tuner/layers.py b/nni/algorithms/hpo/networkmorphism_tuner/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..a96c87b7801fe2687f61dfd62fdd13019ecd2ee0 --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/layers.py @@ -0,0 +1,862 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from abc import abstractmethod +from collections.abc import Iterable + +import torch +from torch import nn +from torch.nn import functional +from .utils import Constant + + +class AvgPool(nn.Module): + """ + AvgPool Module. + """ + + def __init__(self): + super().__init__() + + @abstractmethod + def forward(self, input_tensor): + pass + + +class GlobalAvgPool1d(AvgPool): + """ + GlobalAvgPool1d Module. + """ + + def forward(self, input_tensor): + return functional.avg_pool1d(input_tensor, input_tensor.size()[2:]).view( + input_tensor.size()[:2] + ) + + +class GlobalAvgPool2d(AvgPool): + """ + GlobalAvgPool2d Module. + """ + + def forward(self, input_tensor): + return functional.avg_pool2d(input_tensor, input_tensor.size()[2:]).view( + input_tensor.size()[:2] + ) + + +class GlobalAvgPool3d(AvgPool): + """ + GlobalAvgPool3d Module. + """ + + def forward(self, input_tensor): + return functional.avg_pool3d(input_tensor, input_tensor.size()[2:]).view( + input_tensor.size()[:2] + ) + + +class StubLayer: + """ + StubLayer Module. Base Module. + """ + + def __init__(self, input_node=None, output_node=None): + self.input = input_node + self.output = output_node + self.weights = None + + def build(self, shape): + """ + build shape. + """ + + def set_weights(self, weights): + """ + set weights. + """ + self.weights = weights + + def import_weights(self, torch_layer): + """ + import weights. + """ + + def import_weights_keras(self, keras_layer): + """ + import weights from keras layer. + """ + + def export_weights(self, torch_layer): + """ + export weights. + """ + + def export_weights_keras(self, keras_layer): + """ + export weights to keras layer. + """ + + def get_weights(self): + """ + get weights. + """ + return self.weights + + def size(self): + """ + size(). + """ + return 0 + + @property + def output_shape(self): + """ + output shape. + """ + return self.input.shape + + def to_real_layer(self): + """ + to real layer. + """ + + def __str__(self): + """ + str() function to print. + """ + return type(self).__name__[4:] + + +class StubWeightBiasLayer(StubLayer): + """ + StubWeightBiasLayer Module to set the bias. + """ + + def import_weights(self, torch_layer): + self.set_weights( + (torch_layer.weight.data.cpu().numpy(), + torch_layer.bias.data.cpu().numpy()) + ) + + def import_weights_keras(self, keras_layer): + self.set_weights(keras_layer.get_weights()) + + def export_weights(self, torch_layer): + torch_layer.weight.data = torch.Tensor(self.weights[0]) + torch_layer.bias.data = torch.Tensor(self.weights[1]) + + def export_weights_keras(self, keras_layer): + keras_layer.set_weights(self.weights) + + +class StubBatchNormalization(StubWeightBiasLayer): + """ + StubBatchNormalization Module. Batch Norm. + """ + + def __init__(self, num_features, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.num_features = num_features + + def import_weights(self, torch_layer): + self.set_weights( + ( + torch_layer.weight.data.cpu().numpy(), + torch_layer.bias.data.cpu().numpy(), + torch_layer.running_mean.cpu().numpy(), + torch_layer.running_var.cpu().numpy(), + ) + ) + + def export_weights(self, torch_layer): + torch_layer.weight.data = torch.Tensor(self.weights[0]) + torch_layer.bias.data = torch.Tensor(self.weights[1]) + torch_layer.running_mean = torch.Tensor(self.weights[2]) + torch_layer.running_var = torch.Tensor(self.weights[3]) + + def size(self): + return self.num_features * 4 + + @abstractmethod + def to_real_layer(self): + pass + + +class StubBatchNormalization1d(StubBatchNormalization): + """ + StubBatchNormalization1d Module. + """ + + def to_real_layer(self): + return torch.nn.BatchNorm1d(self.num_features) + + +class StubBatchNormalization2d(StubBatchNormalization): + """ + StubBatchNormalization2d Module. + """ + + def to_real_layer(self): + return torch.nn.BatchNorm2d(self.num_features) + + +class StubBatchNormalization3d(StubBatchNormalization): + """ + StubBatchNormalization3d Module. + """ + + def to_real_layer(self): + return torch.nn.BatchNorm3d(self.num_features) + + +class StubDense(StubWeightBiasLayer): + """ + StubDense Module. Linear. + """ + + def __init__(self, input_units, units, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.input_units = input_units + self.units = units + + @property + def output_shape(self): + return (self.units,) + + def import_weights_keras(self, keras_layer): + self.set_weights( + (keras_layer.get_weights()[0].T, + keras_layer.get_weights()[1])) + + def export_weights_keras(self, keras_layer): + keras_layer.set_weights((self.weights[0].T, self.weights[1])) + + def size(self): + return self.input_units * self.units + self.units + + def to_real_layer(self): + return torch.nn.Linear(self.input_units, self.units) + + +class StubConv(StubWeightBiasLayer): + """ + StubConv Module. Conv. + """ + + def __init__(self, input_channel, filters, kernel_size, + stride=1, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.input_channel = input_channel + self.filters = filters + self.kernel_size = kernel_size + self.stride = stride + self.padding = int(self.kernel_size / 2) + + @property + def output_shape(self): + ret = list(self.input.shape[:-1]) + for index, dim in enumerate(ret): + ret[index] = ( + int((dim + 2 * self.padding - self.kernel_size) / self.stride) + 1 + ) + ret = ret + [self.filters] + return tuple(ret) + + def import_weights_keras(self, keras_layer): + self.set_weights( + (keras_layer.get_weights()[0].T, + keras_layer.get_weights()[1])) + + def export_weights_keras(self, keras_layer): + keras_layer.set_weights((self.weights[0].T, self.weights[1])) + + def size(self): + return (self.input_channel * self.kernel_size * + self.kernel_size + 1) * self.filters + + @abstractmethod + def to_real_layer(self): + pass + + def __str__(self): + return ( + super().__str__() + + "(" + + ", ".join( + str(item) + for item in [ + self.input_channel, + self.filters, + self.kernel_size, + self.stride, + ] + ) + + ")" + ) + + +class StubConv1d(StubConv): + """ + StubConv1d Module. + """ + + def to_real_layer(self): + return torch.nn.Conv1d( + self.input_channel, + self.filters, + self.kernel_size, + stride=self.stride, + padding=self.padding, + ) + + +class StubConv2d(StubConv): + """ + StubConv2d Module. + """ + + def to_real_layer(self): + return torch.nn.Conv2d( + self.input_channel, + self.filters, + self.kernel_size, + stride=self.stride, + padding=self.padding, + ) + + +class StubConv3d(StubConv): + """ + StubConv3d Module. + """ + + def to_real_layer(self): + return torch.nn.Conv3d( + self.input_channel, + self.filters, + self.kernel_size, + stride=self.stride, + padding=self.padding, + ) + + +class StubAggregateLayer(StubLayer): + """ + StubAggregateLayer Module. + """ + + def __init__(self, input_nodes=None, output_node=None): + if input_nodes is None: + input_nodes = [] + super().__init__(input_nodes, output_node) + + +class StubConcatenate(StubAggregateLayer): + """StubConcatenate Module. + """ + @property + def output_shape(self): + ret = 0 + for current_input in self.input: + ret += current_input.shape[-1] + ret = self.input[0].shape[:-1] + (ret,) + return ret + + def to_real_layer(self): + return TorchConcatenate() + + +class StubAdd(StubAggregateLayer): + """ + StubAdd Module. + """ + @property + def output_shape(self): + return self.input[0].shape + + def to_real_layer(self): + return TorchAdd() + + +class StubFlatten(StubLayer): + """ + StubFlatten Module. + """ + @property + def output_shape(self): + ret = 1 + for dim in self.input.shape: + ret *= dim + return (ret,) + + def to_real_layer(self): + return TorchFlatten() + + +class StubReLU(StubLayer): + """ + StubReLU Module. + """ + + def to_real_layer(self): + return torch.nn.ReLU() + + +class StubSoftmax(StubLayer): + """ + StubSoftmax Module. + """ + + def to_real_layer(self): + return torch.nn.LogSoftmax(dim=1) + + +class StubDropout(StubLayer): + """ + StubDropout Module. + """ + + def __init__(self, rate, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.rate = rate + + @abstractmethod + def to_real_layer(self): + pass + + +class StubDropout1d(StubDropout): + """ + StubDropout1d Module. + """ + + def to_real_layer(self): + return torch.nn.Dropout(self.rate) + + +class StubDropout2d(StubDropout): + """ + StubDropout2d Module. + """ + + def to_real_layer(self): + return torch.nn.Dropout2d(self.rate) + + +class StubDropout3d(StubDropout): + """ + StubDropout3d Module. + """ + + def to_real_layer(self): + return torch.nn.Dropout3d(self.rate) + + +class StubInput(StubLayer): + """ + StubInput Module. + """ + + def __init__(self, input_node=None, output_node=None): + super().__init__(input_node, output_node) + + +class StubPooling(StubLayer): + """ + StubPooling Module. + """ + + def __init__(self, + kernel_size=None, + stride=None, + padding=0, + input_node=None, + output_node=None): + super().__init__(input_node, output_node) + self.kernel_size = ( + kernel_size if kernel_size is not None else Constant.POOLING_KERNEL_SIZE + ) + self.stride = stride if stride is not None else self.kernel_size + self.padding = padding + + @property + def output_shape(self): + ret = tuple() + for dim in self.input.shape[:-1]: + ret = ret + (max(int(dim / self.kernel_size), 1),) + ret = ret + (self.input.shape[-1],) + return ret + + @abstractmethod + def to_real_layer(self): + pass + + +class StubPooling1d(StubPooling): + """ + StubPooling1d Module. + """ + + def to_real_layer(self): + return torch.nn.MaxPool1d(self.kernel_size, stride=self.stride) + + +class StubPooling2d(StubPooling): + """ + StubPooling2d Module. + """ + + def to_real_layer(self): + return torch.nn.MaxPool2d(self.kernel_size, stride=self.stride) + + +class StubPooling3d(StubPooling): + """ + StubPooling3d Module. + """ + + def to_real_layer(self): + return torch.nn.MaxPool3d(self.kernel_size, stride=self.stride) + + +class StubGlobalPooling(StubLayer): + """ + StubGlobalPooling Module. + """ + + def __init__(self, input_node=None, output_node=None): + super().__init__(input_node, output_node) + + @property + def output_shape(self): + return (self.input.shape[-1],) + + @abstractmethod + def to_real_layer(self): + pass + + +class StubGlobalPooling1d(StubGlobalPooling): + """ + StubGlobalPooling1d Module. + """ + + def to_real_layer(self): + return GlobalAvgPool1d() + + +class StubGlobalPooling2d(StubGlobalPooling): + """ + StubGlobalPooling2d Module. + """ + + def to_real_layer(self): + return GlobalAvgPool2d() + + +class StubGlobalPooling3d(StubGlobalPooling): + """ + StubGlobalPooling3d Module. + """ + + def to_real_layer(self): + return GlobalAvgPool3d() + + +class TorchConcatenate(nn.Module): + """ + TorchConcatenate Module. + """ + + def forward(self, input_list): + return torch.cat(input_list, dim=1) + + +class TorchAdd(nn.Module): + """ + TorchAdd Module. + """ + + def forward(self, input_list): + return input_list[0] + input_list[1] + + +class TorchFlatten(nn.Module): + """ + TorchFlatten Module. + """ + + def forward(self, input_tensor): + return input_tensor.view(input_tensor.size(0), -1) + + +def keras_dropout(layer, rate): + """ + Keras dropout layer. + """ + + from keras import layers + + input_dim = len(layer.input.shape) + if input_dim == 2: + return layers.SpatialDropout1D(rate) + elif input_dim == 3: + return layers.SpatialDropout2D(rate) + elif input_dim == 4: + return layers.SpatialDropout3D(rate) + else: + return layers.Dropout(rate) + + +def to_real_keras_layer(layer): + """ + Real keras layer. + """ + from keras import layers + + if is_layer(layer, "Dense"): + return layers.Dense(layer.units, input_shape=(layer.input_units,)) + if is_layer(layer, "Conv"): + return layers.Conv2D( + layer.filters, + layer.kernel_size, + input_shape=layer.input.shape, + padding="same", + ) # padding + if is_layer(layer, "Pooling"): + return layers.MaxPool2D(2) + if is_layer(layer, "BatchNormalization"): + return layers.BatchNormalization(input_shape=layer.input.shape) + if is_layer(layer, "Concatenate"): + return layers.Concatenate() + if is_layer(layer, "Add"): + return layers.Add() + if is_layer(layer, "Dropout"): + return keras_dropout(layer, layer.rate) + if is_layer(layer, "ReLU"): + return layers.Activation("relu") + if is_layer(layer, "Softmax"): + return layers.Activation("softmax") + if is_layer(layer, "Flatten"): + return layers.Flatten() + if is_layer(layer, "GlobalAveragePooling"): + return layers.GlobalAveragePooling2D() + return None # note: this is not written by original author, feel free to modify if you think it's incorrect + + +def is_layer(layer, layer_type): + """ + Judge the layer type. + + Returns + ------- + bool + boolean -- True or False + """ + + if layer_type == "Input": + return isinstance(layer, StubInput) + elif layer_type == "Conv": + return isinstance(layer, StubConv) + elif layer_type == "Dense": + return isinstance(layer, (StubDense,)) + elif layer_type == "BatchNormalization": + return isinstance(layer, (StubBatchNormalization,)) + elif layer_type == "Concatenate": + return isinstance(layer, (StubConcatenate,)) + elif layer_type == "Add": + return isinstance(layer, (StubAdd,)) + elif layer_type == "Pooling": + return isinstance(layer, StubPooling) + elif layer_type == "Dropout": + return isinstance(layer, (StubDropout,)) + elif layer_type == "Softmax": + return isinstance(layer, (StubSoftmax,)) + elif layer_type == "ReLU": + return isinstance(layer, (StubReLU,)) + elif layer_type == "Flatten": + return isinstance(layer, (StubFlatten,)) + elif layer_type == "GlobalAveragePooling": + return isinstance(layer, StubGlobalPooling) + return None # note: this is not written by original author, feel free to modify if you think it's incorrect + + +def layer_description_extractor(layer, node_to_id): + """ + Get layer description. + """ + + layer_input = layer.input + layer_output = layer.output + if layer_input is not None: + if isinstance(layer_input, Iterable): + layer_input = list(map(lambda x: node_to_id[x], layer_input)) + else: + layer_input = node_to_id[layer_input] + + if layer_output is not None: + layer_output = node_to_id[layer_output] + + if isinstance(layer, StubConv): + return ( + type(layer).__name__, + layer_input, + layer_output, + layer.input_channel, + layer.filters, + layer.kernel_size, + layer.stride, + layer.padding, + ) + elif isinstance(layer, (StubDense,)): + return [ + type(layer).__name__, + layer_input, + layer_output, + layer.input_units, + layer.units, + ] + elif isinstance(layer, (StubBatchNormalization,)): + return (type(layer).__name__, layer_input, + layer_output, layer.num_features) + elif isinstance(layer, (StubDropout,)): + return (type(layer).__name__, layer_input, layer_output, layer.rate) + elif isinstance(layer, StubPooling): + return ( + type(layer).__name__, + layer_input, + layer_output, + layer.kernel_size, + layer.stride, + layer.padding, + ) + else: + return (type(layer).__name__, layer_input, layer_output) + + +def layer_description_builder(layer_information, id_to_node): + """build layer from description. + """ + layer_type = layer_information[0] + + layer_input_ids = layer_information[1] + if isinstance(layer_input_ids, Iterable): + layer_input = list(map(lambda x: id_to_node[x], layer_input_ids)) + else: + layer_input = id_to_node[layer_input_ids] + layer_output = id_to_node[layer_information[2]] + if layer_type.startswith("StubConv"): + input_channel = layer_information[3] + filters = layer_information[4] + kernel_size = layer_information[5] + stride = layer_information[6] + return globals()[layer_type]( + input_channel, filters, kernel_size, stride, layer_input, layer_output + ) + elif layer_type.startswith("StubDense"): + input_units = layer_information[3] + units = layer_information[4] + return globals()[layer_type](input_units, units, layer_input, layer_output) + elif layer_type.startswith("StubBatchNormalization"): + num_features = layer_information[3] + return globals()[layer_type](num_features, layer_input, layer_output) + elif layer_type.startswith("StubDropout"): + rate = layer_information[3] + return globals()[layer_type](rate, layer_input, layer_output) + elif layer_type.startswith("StubPooling"): + kernel_size = layer_information[3] + stride = layer_information[4] + padding = layer_information[5] + return globals()[layer_type](kernel_size, stride, padding, layer_input, layer_output) + else: + return globals()[layer_type](layer_input, layer_output) + + +def layer_width(layer): + """ + Get layer width. + """ + + if is_layer(layer, "Dense"): + return layer.units + if is_layer(layer, "Conv"): + return layer.filters + raise TypeError("The layer should be either Dense or Conv layer.") + + +def set_torch_weight_to_stub(torch_layer, stub_layer): + stub_layer.import_weights(torch_layer) + + +def set_keras_weight_to_stub(keras_layer, stub_layer): + stub_layer.import_weights_keras(keras_layer) + + +def set_stub_weight_to_torch(stub_layer, torch_layer): + stub_layer.export_weights(torch_layer) + + +def set_stub_weight_to_keras(stub_layer, keras_layer): + stub_layer.export_weights_keras(keras_layer) + + +def get_conv_class(n_dim): + conv_class_list = [StubConv1d, StubConv2d, StubConv3d] + return conv_class_list[n_dim - 1] + + +def get_dropout_class(n_dim): + dropout_class_list = [StubDropout1d, StubDropout2d, StubDropout3d] + return dropout_class_list[n_dim - 1] + + +def get_global_avg_pooling_class(n_dim): + global_avg_pooling_class_list = [ + StubGlobalPooling1d, + StubGlobalPooling2d, + StubGlobalPooling3d, + ] + return global_avg_pooling_class_list[n_dim - 1] + + +def get_pooling_class(n_dim): + pooling_class_list = [StubPooling1d, StubPooling2d, StubPooling3d] + return pooling_class_list[n_dim - 1] + + +def get_batch_norm_class(n_dim): + batch_norm_class_list = [ + StubBatchNormalization1d, + StubBatchNormalization2d, + StubBatchNormalization3d, + ] + return batch_norm_class_list[n_dim - 1] + + +def get_n_dim(layer): + if isinstance(layer, ( + StubConv1d, + StubDropout1d, + StubGlobalPooling1d, + StubPooling1d, + StubBatchNormalization1d, + )): + return 1 + if isinstance(layer, ( + StubConv2d, + StubDropout2d, + StubGlobalPooling2d, + StubPooling2d, + StubBatchNormalization2d, + )): + return 2 + if isinstance(layer, ( + StubConv3d, + StubDropout3d, + StubGlobalPooling3d, + StubPooling3d, + StubBatchNormalization3d, + )): + return 3 + return -1 diff --git a/nni/algorithms/hpo/networkmorphism_tuner/networkmorphism_tuner.py b/nni/algorithms/hpo/networkmorphism_tuner/networkmorphism_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..a61cefa666880208870fdb3046be94e02c7fd1d1 --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/networkmorphism_tuner.py @@ -0,0 +1,328 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +networkmorphsim_tuner.py +""" + +import logging +import os +from schema import Optional, Schema +from nni import ClassArgsValidator +from nni.tuner import Tuner +from nni.utils import OptimizeMode, extract_scalar_reward +from .bayesian import BayesianOptimizer +from .nn import CnnGenerator, MlpGenerator +from .utils import Constant +from .graph import graph_to_json, json_to_graph + +logger = logging.getLogger("NetworkMorphism_AutoML") + +class NetworkMorphismClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('task'): self.choices('task', 'cv', 'nlp', 'common'), + Optional('input_width'): int, + Optional('input_channel'): int, + Optional('n_output_node'): int + }).validate(kwargs) + +class NetworkMorphismTuner(Tuner): + """ + NetworkMorphismTuner is a tuner which using network morphism techniques. + + Attributes + ---------- + n_classes : int + The class number or output node number (default: ``10``) + input_shape : tuple + A tuple including: (input_width, input_width, input_channel) + t_min : float + The minimum temperature for simulated annealing. (default: ``Constant.T_MIN``) + beta : float + The beta in acquisition function. (default: ``Constant.BETA``) + algorithm_name : str + algorithm name used in the network morphism (default: ``"Bayesian"``) + optimize_mode : str + optimize mode "minimize" or "maximize" (default: ``"minimize"``) + verbose : bool + verbose to print the log (default: ``True``) + bo : BayesianOptimizer + The optimizer used in networkmorphsim tuner. + max_model_size : int + max model size to the graph (default: ``Constant.MAX_MODEL_SIZE``) + default_model_len : int + default model length (default: ``Constant.MODEL_LEN``) + default_model_width : int + default model width (default: ``Constant.MODEL_WIDTH``) + search_space : dict + """ + + def __init__( + self, + task="cv", + input_width=32, + input_channel=3, + n_output_node=10, + algorithm_name="Bayesian", + optimize_mode="maximize", + path="model_path", + verbose=True, + beta=Constant.BETA, + t_min=Constant.T_MIN, + max_model_size=Constant.MAX_MODEL_SIZE, + default_model_len=Constant.MODEL_LEN, + default_model_width=Constant.MODEL_WIDTH, + ): + """ + initilizer of the NetworkMorphismTuner. + """ + + if not os.path.exists(path): + os.makedirs(path) + self.path = os.path.join(os.getcwd(), path) + if task == "cv": + self.generators = [CnnGenerator] + elif task == "common": + self.generators = [MlpGenerator] + else: + raise NotImplementedError( + '{} task not supported in List ["cv","common"]') + + self.n_classes = n_output_node + self.input_shape = (input_width, input_width, input_channel) + + self.t_min = t_min + self.beta = beta + self.algorithm_name = algorithm_name + self.optimize_mode = OptimizeMode(optimize_mode) + self.json = None + self.total_data = {} + self.verbose = verbose + self.model_count = 0 + + self.bo = BayesianOptimizer( + self, self.t_min, self.optimize_mode, self.beta) + self.training_queue = [] + self.descriptors = [] + self.history = [] + + self.max_model_size = max_model_size + self.default_model_len = default_model_len + self.default_model_width = default_model_width + + self.search_space = dict() + + + def update_search_space(self, search_space): + """ + Update search space definition in tuner by search_space in neural architecture. + """ + self.search_space = search_space + + def generate_parameters(self, parameter_id, **kwargs): + """ + Returns a set of trial neural architecture, as a serializable object. + + Parameters + ---------- + parameter_id : int + """ + if not self.history: + self.init_search() + + new_father_id = None + generated_graph = None + if not self.training_queue: + new_father_id, generated_graph = self.generate() + new_model_id = self.model_count + self.model_count += 1 + self.training_queue.append( + (generated_graph, new_father_id, new_model_id)) + self.descriptors.append(generated_graph.extract_descriptor()) + + graph, father_id, model_id = self.training_queue.pop(0) + + # from graph to json + json_model_path = os.path.join(self.path, str(model_id) + ".json") + json_out = graph_to_json(graph, json_model_path) + self.total_data[parameter_id] = (json_out, father_id, model_id) + + return json_out + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Record an observation of the objective function. + + Parameters + ---------- + parameter_id : int + the id of a group of paramters that generated by nni manager. + parameters : dict + A group of parameters. + value : dict/float + if value is dict, it should have "default" key. + """ + reward = extract_scalar_reward(value) + + if parameter_id not in self.total_data: + raise RuntimeError("Received parameter_id not in total_data.") + + (_, father_id, model_id) = self.total_data[parameter_id] + + graph = self.bo.searcher.load_model_by_id(model_id) + + # to use the value and graph + self.add_model(reward, model_id) + self.update(father_id, graph, reward, model_id) + + + def init_search(self): + """ + Call the generators to generate the initial architectures for the search. + """ + if self.verbose: + logger.info("Initializing search.") + for generator in self.generators: + graph = generator(self.n_classes, self.input_shape).generate( + self.default_model_len, self.default_model_width + ) + model_id = self.model_count + self.model_count += 1 + self.training_queue.append((graph, -1, model_id)) + self.descriptors.append(graph.extract_descriptor()) + + if self.verbose: + logger.info("Initialization finished.") + + + def generate(self): + """ + Generate the next neural architecture. + + Returns + ------- + other_info : any object + Anything to be saved in the training queue together with the architecture. + generated_graph : Graph + An instance of Graph. + """ + generated_graph, new_father_id = self.bo.generate(self.descriptors) + if new_father_id is None: + new_father_id = 0 + generated_graph = self.generators[0]( + self.n_classes, self.input_shape + ).generate(self.default_model_len, self.default_model_width) + + return new_father_id, generated_graph + + def update(self, other_info, graph, metric_value, model_id): + """ + Update the controller with evaluation result of a neural architecture. + + Parameters + ---------- + other_info: any object + In our case it is the father ID in the search tree. + graph: graph.Graph + An instance of Graph. The trained neural architecture. + metric_value: float + The final evaluated metric value. + model_id: int + """ + father_id = other_info + self.bo.fit([graph.extract_descriptor()], [metric_value]) + self.bo.add_child(father_id, model_id) + + def add_model(self, metric_value, model_id): + """ + Add model to the history, x_queue and y_queue + + Parameters + ---------- + metric_value : float + graph : dict + model_id : int + + Returns + ------- + model : dict + """ + if self.verbose: + logger.info("Saving model.") + + # Update best_model text file + ret = {"model_id": model_id, "metric_value": metric_value} + self.history.append(ret) + if model_id == self.get_best_model_id(): + file = open(os.path.join(self.path, "best_model.txt"), "w") + file.write("best model: " + str(model_id)) + file.close() + return ret + + + def get_best_model_id(self): + """ + Get the best model_id from history using the metric value + """ + + if self.optimize_mode is OptimizeMode.Maximize: + return max(self.history, key=lambda x: x["metric_value"])[ + "model_id"] + return min(self.history, key=lambda x: x["metric_value"])["model_id"] + + + def load_model_by_id(self, model_id): + """ + Get the model by model_id + + Parameters + ---------- + model_id : int + model index + + Returns + ------- + load_model : graph.Graph + the model graph representation + """ + + with open(os.path.join(self.path, str(model_id) + ".json")) as fin: + json_str = fin.read().replace("\n", "") + + load_model = json_to_graph(json_str) + return load_model + + def load_best_model(self): + """ + Get the best model by model id + + Returns + ------- + load_model : graph.Graph + the model graph representation + """ + return self.load_model_by_id(self.get_best_model_id()) + + def get_metric_value_by_id(self, model_id): + """ + Get the model metric valud by its model_id + + Parameters + ---------- + model_id : int + model index + + Returns + ------- + float + the model metric + """ + for item in self.history: + if item["model_id"] == model_id: + return item["metric_value"] + return None + + def import_data(self, data): + pass diff --git a/nni/algorithms/hpo/networkmorphism_tuner/nn.py b/nni/algorithms/hpo/networkmorphism_tuner/nn.py new file mode 100644 index 0000000000000000000000000000000000000000..9e0072f9b39e3a68b865ba850157fe1080791983 --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/nn.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from abc import abstractmethod + +from .graph import Graph +from .layers import (StubDense, StubDropout1d, + StubReLU, get_batch_norm_class, + get_conv_class, + get_dropout_class, + get_global_avg_pooling_class, + get_pooling_class) +from .utils import Constant + + +class NetworkGenerator: + """The base class for generating a network. + It can be used to generate a CNN or Multi-Layer Perceptron. + Attributes: + n_output_node: Number of output nodes in the network. + input_shape: A tuple to represent the input shape. + """ + + def __init__(self, n_output_node, input_shape): + self.n_output_node = n_output_node + self.input_shape = input_shape + + @abstractmethod + def generate(self, model_len, model_width): + pass + + +class CnnGenerator(NetworkGenerator): + """A class to generate CNN. + Attributes: + n_dim: `len(self.input_shape) - 1` + conv: A class that represents `(n_dim-1)` dimensional convolution. + dropout: A class that represents `(n_dim-1)` dimensional dropout. + global_avg_pooling: A class that represents `(n_dim-1)` dimensional Global Average Pooling. + pooling: A class that represents `(n_dim-1)` dimensional pooling. + batch_norm: A class that represents `(n_dim-1)` dimensional batch normalization. + """ + + def __init__(self, n_output_node, input_shape): + super(CnnGenerator, self).__init__(n_output_node, input_shape) + self.n_dim = len(self.input_shape) - 1 + if len(self.input_shape) > 4: + raise ValueError("The input dimension is too high.") + if len(self.input_shape) < 2: + raise ValueError("The input dimension is too low.") + self.conv = get_conv_class(self.n_dim) + self.dropout = get_dropout_class(self.n_dim) + self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim) + self.pooling = get_pooling_class(self.n_dim) + self.batch_norm = get_batch_norm_class(self.n_dim) + + def generate(self, model_len=None, model_width=None): + """Generates a CNN. + Args: + model_len: An integer. Number of convolutional layers. + model_width: An integer. Number of filters for the convolutional layers. + Returns: + An instance of the class Graph. Represents the neural architecture graph of the generated model. + """ + + if model_len is None: + model_len = Constant.MODEL_LEN + if model_width is None: + model_width = Constant.MODEL_WIDTH + pooling_len = int(model_len / 4) + graph = Graph(self.input_shape, False) + temp_input_channel = self.input_shape[-1] + output_node_id = 0 + stride = 1 + for i in range(model_len): + output_node_id = graph.add_layer(StubReLU(), output_node_id) + output_node_id = graph.add_layer( + self.batch_norm( + graph.node_list[output_node_id].shape[-1]), output_node_id + ) + output_node_id = graph.add_layer( + self.conv( + temp_input_channel, + model_width, + kernel_size=3, + stride=stride), + output_node_id, + ) + temp_input_channel = model_width + if pooling_len == 0 or ( + (i + 1) % pooling_len == 0 and i != model_len - 1): + output_node_id = graph.add_layer( + self.pooling(), output_node_id) + + output_node_id = graph.add_layer( + self.global_avg_pooling(), output_node_id) + output_node_id = graph.add_layer( + self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id + ) + output_node_id = graph.add_layer( + StubDense(graph.node_list[output_node_id].shape[0], model_width), + output_node_id, + ) + output_node_id = graph.add_layer(StubReLU(), output_node_id) + graph.add_layer( + StubDense( + model_width, + self.n_output_node), + output_node_id) + return graph + + +class MlpGenerator(NetworkGenerator): + """A class to generate Multi-Layer Perceptron. + """ + + def __init__(self, n_output_node, input_shape): + """Initialize the instance. + Args: + n_output_node: An integer. Number of output nodes in the network. + input_shape: A tuple. Input shape of the network. If it is 1D, ensure the value is appended by a comma + in the tuple. + """ + super(MlpGenerator, self).__init__(n_output_node, input_shape) + if len(self.input_shape) > 1: + raise ValueError("The input dimension is too high.") + + def generate(self, model_len=None, model_width=None): + """Generates a Multi-Layer Perceptron. + Args: + model_len: An integer. Number of hidden layers. + model_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the + number of nodes in each hidden layer. If it is an integer, all hidden layers have nodes equal to this + value. + Returns: + An instance of the class Graph. Represents the neural architecture graph of the generated model. + """ + if model_len is None: + model_len = Constant.MODEL_LEN + if model_width is None: + model_width = Constant.MODEL_WIDTH + if isinstance(model_width, list) and not len(model_width) == model_len: + raise ValueError( + "The length of 'model_width' does not match 'model_len'") + elif isinstance(model_width, int): + model_width = [model_width] * model_len + + graph = Graph(self.input_shape, False) + output_node_id = 0 + n_nodes_prev_layer = self.input_shape[0] + for width in model_width: + output_node_id = graph.add_layer( + StubDense(n_nodes_prev_layer, width), output_node_id + ) + output_node_id = graph.add_layer( + StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id + ) + output_node_id = graph.add_layer(StubReLU(), output_node_id) + n_nodes_prev_layer = width + + graph.add_layer( + StubDense( + n_nodes_prev_layer, + self.n_output_node), + output_node_id) + return graph diff --git a/nni/algorithms/hpo/networkmorphism_tuner/utils.py b/nni/algorithms/hpo/networkmorphism_tuner/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0634e7f578bdeb505ce984be971beb3978c78a44 --- /dev/null +++ b/nni/algorithms/hpo/networkmorphism_tuner/utils.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +class Constant: + '''Constant for the Tuner. + ''' + MAX_LAYERS = 100 + N_NEIGHBOURS = 8 + MAX_MODEL_SIZE = 1 << 24 + KERNEL_LAMBDA = 1.0 + BETA = 2.576 + MLP_MODEL_LEN = 3 + MLP_MODEL_WIDTH = 5 + MODEL_LEN = 3 + MODEL_WIDTH = 64 + POOLING_KERNEL_SIZE = 2 + DENSE_DROPOUT_RATE = 0.5 + CONV_DROPOUT_RATE = 0.25 + MLP_DROPOUT_RATE = 0.25 + CONV_BLOCK_DISTANCE = 2 + BATCH_SIZE = 128 + T_MIN = 0.0001 diff --git a/nni/algorithms/hpo/pbt_tuner.py b/nni/algorithms/hpo/pbt_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..507c519a2a880ca99833433a6f61278e308a469f --- /dev/null +++ b/nni/algorithms/hpo/pbt_tuner.py @@ -0,0 +1,456 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import logging +import os +import random +import numpy as np +from schema import Schema, Optional + +import nni +from nni import ClassArgsValidator +import nni.parameter_expressions +from nni.tuner import Tuner +from nni.utils import OptimizeMode, extract_scalar_reward, split_index, json2parameter, json2space + + +logger = logging.getLogger('pbt_tuner_AutoML') + + +def perturbation(hyperparameter_type, value, resample_probablity, uv, ub, lv, lb, random_state): + """ + Perturbation for hyperparameters + + Parameters + ---------- + hyperparameter_type : str + type of hyperparameter + value : list + parameters for sampling hyperparameter + resample_probability : float + probability for resampling + uv : float/int + upper value after perturbation + ub : float/int + upper bound + lv : float/int + lower value after perturbation + lb : float/int + lower bound + random_state : RandomState + random state + """ + if random.random() < resample_probablity: + if hyperparameter_type == "choice": + return value.index(nni.parameter_expressions.choice(value, random_state)) + else: + return getattr(nni.parameter_expressions, hyperparameter_type)(*(value + [random_state])) + else: + if random.random() > 0.5: + return min(uv, ub) + else: + return max(lv, lb) + + +def exploit_and_explore(bot_trial_info, top_trial_info, factor, resample_probability, epoch, search_space): + """ + Replace checkpoint of bot_trial with top, and perturb hyperparameters + + Parameters + ---------- + bot_trial_info : TrialInfo + bottom model whose parameters should be replaced + top_trial_info : TrialInfo + better model + factor : float + factor for perturbation + resample_probability : float + probability for resampling + epoch : int + step of PBTTuner + search_space : dict + search_space to keep perturbed hyperparameters in range + """ + bot_checkpoint_dir = bot_trial_info.checkpoint_dir + top_hyper_parameters = top_trial_info.hyper_parameters + hyper_parameters = copy.deepcopy(top_hyper_parameters) + random_state = np.random.RandomState() + hyper_parameters['load_checkpoint_dir'] = hyper_parameters['save_checkpoint_dir'] + hyper_parameters['save_checkpoint_dir'] = os.path.join(bot_checkpoint_dir, str(epoch)) + for key in hyper_parameters.keys(): + hyper_parameter = hyper_parameters[key] + if key == 'load_checkpoint_dir' or key == 'save_checkpoint_dir': + continue + elif search_space[key]["_type"] == "choice": + choices = search_space[key]["_value"] + ub, uv = len(choices) - 1, choices.index(hyper_parameter) + 1 + lb, lv = 0, choices.index(hyper_parameter) - 1 + elif search_space[key]["_type"] == "randint": + lb, ub = search_space[key]["_value"][:2] + ub -= 1 + uv = hyper_parameter + 1 + lv = hyper_parameter - 1 + elif search_space[key]["_type"] == "uniform": + lb, ub = search_space[key]["_value"][:2] + perturb = (ub - lb) * factor + uv = hyper_parameter + perturb + lv = hyper_parameter - perturb + elif search_space[key]["_type"] == "quniform": + lb, ub, q = search_space[key]["_value"][:3] + multi = round(hyper_parameter / q) + uv = (multi + 1) * q + lv = (multi - 1) * q + elif search_space[key]["_type"] == "loguniform": + lb, ub = search_space[key]["_value"][:2] + perturb = (np.log(ub) - np.log(lb)) * factor + uv = np.exp(min(np.log(hyper_parameter) + perturb, np.log(ub))) + lv = np.exp(max(np.log(hyper_parameter) - perturb, np.log(lb))) + elif search_space[key]["_type"] == "qloguniform": + lb, ub, q = search_space[key]["_value"][:3] + multi = round(hyper_parameter / q) + uv = (multi + 1) * q + lv = (multi - 1) * q + elif search_space[key]["_type"] == "normal": + sigma = search_space[key]["_value"][1] + perturb = sigma * factor + uv = ub = hyper_parameter + perturb + lv = lb = hyper_parameter - perturb + elif search_space[key]["_type"] == "qnormal": + q = search_space[key]["_value"][2] + uv = ub = hyper_parameter + q + lv = lb = hyper_parameter - q + elif search_space[key]["_type"] == "lognormal": + sigma = search_space[key]["_value"][1] + perturb = sigma * factor + uv = ub = np.exp(np.log(hyper_parameter) + perturb) + lv = lb = np.exp(np.log(hyper_parameter) - perturb) + elif search_space[key]["_type"] == "qlognormal": + q = search_space[key]["_value"][2] + uv = ub = hyper_parameter + q + lv, lb = hyper_parameter - q, 1E-10 + else: + logger.warning("Illegal type to perturb: %s", search_space[key]["_type"]) + continue + + if search_space[key]["_type"] == "choice": + idx = perturbation(search_space[key]["_type"], search_space[key]["_value"], + resample_probability, uv, ub, lv, lb, random_state) + hyper_parameters[key] = choices[idx] + else: + hyper_parameters[key] = perturbation(search_space[key]["_type"], search_space[key]["_value"], + resample_probability, uv, ub, lv, lb, random_state) + bot_trial_info.hyper_parameters = hyper_parameters + bot_trial_info.clean_id() + + +class TrialInfo: + """ + Information of each trial, refresh for each epoch + + """ + + def __init__(self, checkpoint_dir=None, hyper_parameters=None, parameter_id=None, score=None): + self.checkpoint_dir = checkpoint_dir + self.hyper_parameters = hyper_parameters + self.parameter_id = parameter_id + self.score = score + + def clean_id(self): + self.parameter_id = None + +class PBTClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'optimize_mode': self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('all_checkpoint_dir'): str, + Optional('population_size'): self.range('population_size', int, 0, 99999), + Optional('factors'): float, + Optional('fraction'): float, + }).validate(kwargs) + +class PBTTuner(Tuner): + def __init__(self, optimize_mode="maximize", all_checkpoint_dir=None, population_size=10, factor=0.2, + resample_probability=0.25, fraction=0.2): + """ + Initialization + + Parameters + ---------- + optimize_mode : str + maximize or minimize + all_checkpoint_dir : str + directory to store training model checkpoint + population_size : int + number of trials for each epoch + factor : float + factor for perturbation + resample_probability : float + probability for resampling + fraction : float + fraction for selecting bottom and top trials + """ + self.optimize_mode = OptimizeMode(optimize_mode) + if all_checkpoint_dir is None: + all_checkpoint_dir = os.getenv('NNI_CHECKPOINT_DIRECTORY') + logger.info("Checkpoint dir is set to %s by default.", all_checkpoint_dir) + self.all_checkpoint_dir = all_checkpoint_dir + self.population_size = population_size + self.factor = factor + self.resample_probability = resample_probability + self.fraction = fraction + # defined in trial code + #self.perturbation_interval = perturbation_interval + + self.population = None + self.pos = -1 + self.param_ids = [] + self.running = {} + self.finished = [] + self.credit = 0 + self.finished_trials = 0 + self.epoch = 0 + + self.searchspace_json = None + self.space = None + + self.send_trial_callback = None + + logger.info('PBT tuner initialization') + + def update_search_space(self, search_space): + """ + Get search space + + Parameters + ---------- + search_space : dict + Search space + """ + logger.info('Update search space %s', search_space) + self.searchspace_json = search_space + self.space = json2space(self.searchspace_json) + + self.random_state = np.random.RandomState() + self.population = [] + is_rand = dict() + + for item in self.space: + is_rand[item] = True + + for i in range(self.population_size): + hyper_parameters = json2parameter( + self.searchspace_json, is_rand, self.random_state) + hyper_parameters = split_index(hyper_parameters) + checkpoint_dir = os.path.join(self.all_checkpoint_dir, str(i)) + hyper_parameters['load_checkpoint_dir'] = os.path.join(checkpoint_dir, str(self.epoch)) + hyper_parameters['save_checkpoint_dir'] = os.path.join(checkpoint_dir, str(self.epoch)) + self.population.append(TrialInfo(checkpoint_dir=checkpoint_dir, hyper_parameters=hyper_parameters)) + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """ + Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects. + + Parameters + ---------- + parameter_id_list : list of int + Unique identifiers for each set of requested hyper-parameters. + These will later be used in :meth:`receive_trial_result`. + **kwargs + Used for send_trial_callback. + + Returns + ------- + list + A list of newly generated configurations + """ + result = [] + self.send_trial_callback = kwargs['st_callback'] + for parameter_id in parameter_id_list: + had_exception = False + try: + logger.debug("generating param for %s", parameter_id) + res = self.generate_parameters(parameter_id, **kwargs) + except nni.NoMoreTrialError: + had_exception = True + if not had_exception: + result.append(res) + return result + + def generate_parameters(self, parameter_id, **kwargs): + """ + Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later + + Parameters + ---------- + parameter_id : int + Unique identifier for requested hyper-parameters. + This will later be used in :meth:`receive_trial_result`. + **kwargs + Not used + + Returns + ------- + dict + One newly generated configuration + + """ + if self.pos == self.population_size - 1: + logger.debug('Credit added by one in parameters request') + self.credit += 1 + self.param_ids.append(parameter_id) + raise nni.NoMoreTrialError('No more parameters now.') + self.pos += 1 + trial_info = self.population[self.pos] + trial_info.parameter_id = parameter_id + self.running[parameter_id] = trial_info + logger.info('Generate parameter : %s', trial_info.hyper_parameters) + return trial_info.hyper_parameters + + def _proceed_next_epoch(self): + """ + """ + logger.info('Proceeding to next epoch') + self.epoch += 1 + self.population = [] + self.pos = -1 + self.running = {} + #exploit and explore + reverse = True if self.optimize_mode == OptimizeMode.Maximize else False + self.finished = sorted(self.finished, key=lambda x: x.score, reverse=reverse) + cutoff = int(np.ceil(self.fraction * len(self.finished))) + tops = self.finished[:cutoff] + bottoms = self.finished[self.finished_trials - cutoff:] + for bottom in bottoms: + top = np.random.choice(tops) + exploit_and_explore(bottom, top, self.factor, self.resample_probability, self.epoch, self.searchspace_json) + for trial in self.finished: + if trial not in bottoms: + trial.clean_id() + trial.hyper_parameters['load_checkpoint_dir'] = trial.hyper_parameters['save_checkpoint_dir'] + trial.hyper_parameters['save_checkpoint_dir'] = os.path.join(trial.checkpoint_dir, str(self.epoch)) + self.finished_trials = 0 + for _ in range(self.population_size): + trial_info = self.finished.pop() + self.population.append(trial_info) + while self.credit > 0 and self.pos + 1 < len(self.population): + self.credit -= 1 + self.pos += 1 + parameter_id = self.param_ids.pop() + trial_info = self.population[self.pos] + trial_info.parameter_id = parameter_id + self.running[parameter_id] = trial_info + self.send_trial_callback(parameter_id, trial_info.hyper_parameters) + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Receive trial's result. if the number of finished trials equals ``self.population_size``, start the next epoch to + train the model. + + Parameters + ---------- + parameter_id : int + Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`. + parameters : dict + Hyper-parameters generated by :meth:`generate_parameters`. + value : dict + Result from trial (the return value of :func:`nni.report_final_result`). + """ + logger.info('Get one trial result, id = %d, value = %s', parameter_id, value) + value = extract_scalar_reward(value) + trial_info = self.running.pop(parameter_id, None) + trial_info.score = value + self.finished.append(trial_info) + self.finished_trials += 1 + if self.finished_trials == self.population_size: + self._proceed_next_epoch() + + def trial_end(self, parameter_id, success, **kwargs): + """ + Deal with trial failure + + Parameters + ---------- + parameter_id : int + Unique identifier for hyper-parameters used by this trial. + success : bool + True if the trial successfully completed; False if failed or terminated. + **kwargs + Unstable parameters which should be ignored by normal users. + """ + if success: + return + if self.optimize_mode == OptimizeMode.Minimize: + value = float('inf') + else: + value = float('-inf') + trial_info = self.running.pop(parameter_id, None) + trial_info.score = value + self.finished.append(trial_info) + self.finished_trials += 1 + if self.finished_trials == self.population_size: + self._proceed_next_epoch() + + def import_data(self, data): + """ + Parameters + ---------- + data : json obj + imported data records + + Returns + ------- + int + the start epoch number after data imported, only used for unittest + """ + if self.running: + logger.warning("Do not support importing data in the middle of experiment") + return + # the following is for experiment resume + _completed_num = 0 + epoch_data_dict = {} + for trial_info in data: + logger.info("Process data record %s / %s", _completed_num, len(data)) + _completed_num += 1 + # simply validate data format + _params = trial_info["parameter"] + _value = trial_info['value'] + # assign fake value for failed trials + if not _value: + logger.info("Useless trial data, value is %s, skip this trial data.", _value) + _value = float('inf') if self.optimize_mode == OptimizeMode.Minimize else float('-inf') + _value = extract_scalar_reward(_value) + if 'save_checkpoint_dir' not in _params: + logger.warning("Invalid data record: save_checkpoint_dir is missing, abandon data import.") + return + epoch_num = int(os.path.basename(_params['save_checkpoint_dir'])) + if epoch_num not in epoch_data_dict: + epoch_data_dict[epoch_num] = [] + epoch_data_dict[epoch_num].append((_params, _value)) + if not epoch_data_dict: + logger.warning("No valid epochs, abandon data import.") + return + # figure out start epoch for resume + max_epoch_num = max(epoch_data_dict, key=int) + if len(epoch_data_dict[max_epoch_num]) < self.population_size: + max_epoch_num -= 1 + # If there is no a single complete round, no data to import, start from scratch + if max_epoch_num < 0: + logger.warning("No completed epoch, abandon data import.") + return + assert len(epoch_data_dict[max_epoch_num]) == self.population_size + # check existence of trial save checkpoint dir + for params, _ in epoch_data_dict[max_epoch_num]: + if not os.path.isdir(params['save_checkpoint_dir']): + logger.warning("save_checkpoint_dir %s does not exist, data will not be resumed", params['save_checkpoint_dir']) + return + # resume data + self.epoch = max_epoch_num + self.finished_trials = self.population_size + for params, value in epoch_data_dict[max_epoch_num]: + checkpoint_dir = os.path.dirname(params['save_checkpoint_dir']) + self.finished.append(TrialInfo(checkpoint_dir=checkpoint_dir, hyper_parameters=params, score=value)) + self._proceed_next_epoch() + logger.info("Successfully import data to PBT tuner, total data: %d, imported data: %d.", len(data), self.population_size) + logger.info("Start from epoch %d ...", self.epoch) + return self.epoch # return for test diff --git a/nni/algorithms/hpo/ppo_tuner/__init__.py b/nni/algorithms/hpo/ppo_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..854090c93e71c710dec79c570953ac1fb920dcdc --- /dev/null +++ b/nni/algorithms/hpo/ppo_tuner/__init__.py @@ -0,0 +1 @@ +from .ppo_tuner import PPOTuner, PPOClassArgsValidator diff --git a/nni/algorithms/hpo/ppo_tuner/distri.py b/nni/algorithms/hpo/ppo_tuner/distri.py new file mode 100644 index 0000000000000000000000000000000000000000..8a2a5ed20c3db6086c74339428fd25e0cf806f57 --- /dev/null +++ b/nni/algorithms/hpo/ppo_tuner/distri.py @@ -0,0 +1,183 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +functions for sampling from hidden state +""" + +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +from .util import fc + + +class Pd: + """ + A particular probability distribution + """ + def flatparam(self): + raise NotImplementedError + def mode(self): + raise NotImplementedError + def neglogp(self, x): + # Usually it's easier to define the negative logprob + raise NotImplementedError + def kl(self, other): + raise NotImplementedError + def entropy(self): + raise NotImplementedError + def sample(self): + raise NotImplementedError + def logp(self, x): + return - self.neglogp(x) + def get_shape(self): + return self.flatparam().shape + @property + def shape(self): + return self.get_shape() + def __getitem__(self, idx): + return self.__class__(self.flatparam()[idx]) + +class PdType: + """ + Parametrized family of probability distributions + """ + def pdclass(self): + raise NotImplementedError + def pdfromflat(self, flat, mask, nsteps, size, is_act_model): + return self.pdclass()(flat, mask, nsteps, size, is_act_model) + def pdfromlatent(self, latent_vector, init_scale, init_bias): + raise NotImplementedError + def param_shape(self): + raise NotImplementedError + def sample_shape(self): + raise NotImplementedError + def sample_dtype(self): + raise NotImplementedError + + def param_placeholder(self, prepend_shape, name=None): + return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name) + def sample_placeholder(self, prepend_shape, name=None): + return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name) + +class CategoricalPd(Pd): + """ + Categorical probability distribution + """ + def __init__(self, logits, mask_npinf, nsteps, size, is_act_model): + self.logits = logits + self.mask_npinf = mask_npinf + self.nsteps = nsteps + self.size = size + self.is_act_model = is_act_model + def flatparam(self): + return self.logits + def mode(self): + return tf.argmax(self.logits, axis=-1) + + @property + def mean(self): + return tf.nn.softmax(self.logits) + def neglogp(self, x): + """ + return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x) + Note: we can't use sparse_softmax_cross_entropy_with_logits because + the implementation does not allow second-order derivatives... + """ + if x.dtype in {tf.uint8, tf.int32, tf.int64}: + # one-hot encoding + x_shape_list = x.shape.as_list() + logits_shape_list = self.logits.get_shape().as_list()[:-1] + for xs, ls in zip(x_shape_list, logits_shape_list): + if xs is not None and ls is not None: + assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls) + + x = tf.one_hot(x, self.logits.get_shape().as_list()[-1]) + else: + # already encoded + assert x.shape.as_list() == self.logits.shape.as_list() + + return tf.nn.softmax_cross_entropy_with_logits_v2( + logits=self.logits, + labels=x) + + def kl(self, other): + """kl""" + a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) + a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True) + ea0 = tf.exp(a0) + ea1 = tf.exp(a1) + z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) + z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True) + p0 = ea0 / z0 + return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1) + + def entropy(self): + """compute entropy""" + a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) + ea0 = tf.exp(a0) + z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) + p0 = ea0 / z0 + return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1) + + def sample(self): + """sample from logits""" + if not self.is_act_model: + re_res = tf.reshape(self.logits, [-1, self.nsteps, self.size]) + masked_res = tf.math.add(re_res, self.mask_npinf) + re_masked_res = tf.reshape(masked_res, [-1, self.size]) + + u = tf.random_uniform(tf.shape(re_masked_res), dtype=self.logits.dtype) + return tf.argmax(re_masked_res - tf.log(-1*tf.log(u)), axis=-1) + else: + u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype) + return tf.argmax(self.logits - tf.log(-1*tf.log(u)), axis=-1) + + @classmethod + def fromflat(cls, flat): + return cls(flat) # pylint: disable=no-value-for-parameter + +class CategoricalPdType(PdType): + """ + To create CategoricalPd + """ + def __init__(self, ncat, nsteps, np_mask, is_act_model): + self.ncat = ncat + self.nsteps = nsteps + self.np_mask = np_mask + self.is_act_model = is_act_model + def pdclass(self): + return CategoricalPd + + def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0): + """add fc and create CategoricalPd""" + pdparam, mask, mask_npinf = _matching_fc(latent_vector, 'pi', self.ncat, self.nsteps, + init_scale=init_scale, init_bias=init_bias, + np_mask=self.np_mask, is_act_model=self.is_act_model) + return self.pdfromflat(pdparam, mask_npinf, self.nsteps, self.ncat, self.is_act_model), pdparam, mask, mask_npinf + + def param_shape(self): + return [self.ncat] + def sample_shape(self): + return [] + def sample_dtype(self): + return tf.int32 + +def _matching_fc(tensor, name, size, nsteps, init_scale, init_bias, np_mask, is_act_model): + """ + Add fc op, and add mask op when not in action mode + """ + if tensor.shape[-1] == size: + assert False + return tensor + else: + mask = tf.get_variable("act_mask", dtype=tf.float32, initializer=np_mask[0], trainable=False) + mask_npinf = tf.get_variable("act_mask_npinf", dtype=tf.float32, initializer=np_mask[1], trainable=False) + res = fc(tensor, name, size, init_scale=init_scale, init_bias=init_bias) + if not is_act_model: + re_res = tf.reshape(res, [-1, nsteps, size]) + masked_res = tf.math.multiply(re_res, mask) + re_masked_res = tf.reshape(masked_res, [-1, size]) + return re_masked_res, mask, mask_npinf + else: + return res, mask, mask_npinf diff --git a/nni/algorithms/hpo/ppo_tuner/model.py b/nni/algorithms/hpo/ppo_tuner/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c6a8479c6d0d06fcf0f7c4b8a64a62ab968988aa --- /dev/null +++ b/nni/algorithms/hpo/ppo_tuner/model.py @@ -0,0 +1,152 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +the main model of policy/value network +""" + +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +from .util import initialize, get_session + +class Model: + """ + We use this object to : + __init__: + - Creates the step_model + - Creates the train_model + + train(): + - Make the training part (feedforward and retropropagation of gradients) + + save/load(): + - Save load the model + """ + def __init__(self, *, policy, nbatch_act, nbatch_train, + nsteps, ent_coef, vf_coef, max_grad_norm, microbatch_size=None, np_mask=None): + self.sess = sess = get_session() + + with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE): + # CREATE OUR TWO MODELS + # act_model that is used for sampling + act_model = policy(nbatch_act, 1, sess, np_mask=np_mask, is_act_model=True) + + # Train model for training + if microbatch_size is None: + train_model = policy(nbatch_train, nsteps, sess, np_mask=np_mask, is_act_model=False) + else: + train_model = policy(microbatch_size, nsteps, sess, np_mask=np_mask, is_act_model=False) + + # CREATE THE PLACEHOLDERS + self.A = A = train_model.pdtype.sample_placeholder([None]) + self.ADV = ADV = tf.placeholder(tf.float32, [None]) + self.R = R = tf.placeholder(tf.float32, [None]) + # Keep track of old actor + self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None]) + # Keep track of old critic + self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None]) + self.LR = LR = tf.placeholder(tf.float32, []) + # Cliprange + self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, []) + + neglogpac = train_model.pd.neglogp(A) + + # Calculate the entropy + # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy. + entropy = tf.reduce_mean(train_model.pd.entropy()) + + # CALCULATE THE LOSS + # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss + + # Clip the value to reduce variability during Critic training + # Get the predicted value + vpred = train_model.vf + vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE) + # Unclipped value + vf_losses1 = tf.square(vpred - R) + # Clipped value + vf_losses2 = tf.square(vpredclipped - R) + + vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2)) + + # Calculate ratio (pi current policy / pi old policy) + ratio = tf.exp(OLDNEGLOGPAC - neglogpac) + + # Defining Loss = - J is equivalent to max J + pg_losses = -ADV * ratio + + pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE) + + # Final PG loss + pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2)) + approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC)) + clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE))) + + # Total loss + loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef + + # UPDATE THE PARAMETERS USING LOSS + # 1. Get the model parameters + params = tf.trainable_variables('ppo2_model') + # 2. Build our trainer + self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5) + # 3. Calculate the gradients + grads_and_var = self.trainer.compute_gradients(loss, params) + grads, var = zip(*grads_and_var) + + if max_grad_norm is not None: + # Clip the gradients (normalize) + grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm) + grads_and_var = list(zip(grads, var)) + # zip aggregate each gradient with parameters associated + # For instance zip(ABCD, xyza) => Ax, By, Cz, Da + + self.grads = grads + self.var = var + self._train_op = self.trainer.apply_gradients(grads_and_var) + self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac'] + self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac] + + + self.train_model = train_model + self.act_model = act_model + self.step = act_model.step + self.value = act_model.value + self.initial_state = act_model.initial_state + + initialize() + + def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None): + """ + Train the model. + Here we calculate advantage A(s,a) = R + yV(s') - V(s) + + Returns + ------- + obj + = R + yV(s') + """ + advs = returns - values + + # Normalize the advantages + advs = (advs - advs.mean()) / (advs.std() + 1e-8) + + td_map = { + self.train_model.X : obs, + self.A : actions, + self.ADV : advs, + self.R : returns, + self.LR : lr, + self.CLIPRANGE : cliprange, + self.OLDNEGLOGPAC : neglogpacs, + self.OLDVPRED : values + } + if states is not None: + td_map[self.train_model.S] = states + td_map[self.train_model.M] = masks + + return self.sess.run( + self.stats_list + [self._train_op], + td_map + )[:-1] diff --git a/nni/algorithms/hpo/ppo_tuner/policy.py b/nni/algorithms/hpo/ppo_tuner/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..a35e514eaef36562c26c6a99e8224b4339ae768c --- /dev/null +++ b/nni/algorithms/hpo/ppo_tuner/policy.py @@ -0,0 +1,230 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +build policy/value network from model +""" + +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +from .distri import CategoricalPdType +from .util import lstm_model, fc, observation_placeholder, adjust_shape + + +class PolicyWithValue: + """ + Encapsulates fields and methods for RL policy and value function estimation with shared parameters + """ + + def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, np_mask=None, is_act_model=False, **tensors): + """ + Parameters + ---------- + env : obj + RL environment + observations : tensorflow placeholder + Tensorflow placeholder in which the observations will be fed + latent : tensor + Latent state from which policy distribution parameters should be inferred + vf_latent : tensor + Latent state from which value function should be inferred (if None, then latent is used) + sess : tensorflow session + Tensorflow session to run calculations in (if None, default session is used) + **tensors + Tensorflow tensors for additional attributes such as state or mask + """ + + self.X = observations + self.state = tf.constant([]) + self.initial_state = None + self.__dict__.update(tensors) + + vf_latent = vf_latent if vf_latent is not None else latent + + vf_latent = tf.layers.flatten(vf_latent) + latent = tf.layers.flatten(latent) + + # Based on the action space, will select what probability distribution type + self.np_mask = np_mask + self.pdtype = CategoricalPdType(env.action_space.n, env.nsteps, np_mask, is_act_model) + + self.act_latent = latent + self.nh = env.action_space.n + + self.pd, self.pi, self.mask, self.mask_npinf = self.pdtype.pdfromlatent(latent, init_scale=0.01) + + # Take an action + self.action = self.pd.sample() + + # Calculate the neg log of our probability + self.neglogp = self.pd.neglogp(self.action) + self.sess = sess or tf.get_default_session() + + assert estimate_q is False + self.vf = fc(vf_latent, 'vf', 1) + self.vf = self.vf[:, 0] + + if is_act_model: + self._build_model_for_step() + + def _evaluate(self, variables, observation, **extra_feed): + sess = self.sess + feed_dict = {self.X: adjust_shape(self.X, observation)} + for inpt_name, data in extra_feed.items(): + if inpt_name in self.__dict__.keys(): + inpt = self.__dict__[inpt_name] + if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder': + feed_dict[inpt] = adjust_shape(inpt, data) + + return sess.run(variables, feed_dict) + + def _build_model_for_step(self): + # multiply with weight and apply mask on self.act_latent to generate + self.act_step = step = tf.placeholder(shape=(), dtype=tf.int64, name='act_step') + with tf.variable_scope('pi', reuse=tf.AUTO_REUSE): + from .util import ortho_init + nin = self.act_latent.get_shape()[1].value + w = tf.get_variable("w", [nin, self.nh], initializer=ortho_init(0.01)) + b = tf.get_variable("b", [self.nh], initializer=tf.constant_initializer(0.0)) + logits = tf.matmul(self.act_latent, w)+b + piece = tf.slice(self.mask, [step, 0], [1, self.nh]) + re_piece = tf.reshape(piece, [-1]) + masked_logits = tf.math.multiply(logits, re_piece) + + npinf_piece = tf.slice(self.mask_npinf, [step, 0], [1, self.nh]) + re_npinf_piece = tf.reshape(npinf_piece, [-1]) + + def sample(logits, mask_npinf): + new_logits = tf.math.add(logits, mask_npinf) + u = tf.random_uniform(tf.shape(new_logits), dtype=logits.dtype) + return tf.argmax(new_logits - tf.log(-1*tf.log(u)), axis=-1) + + def neglogp(logits, x): + # return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x) + # Note: we can't use sparse_softmax_cross_entropy_with_logits because + # the implementation does not allow second-order derivatives... + if x.dtype in {tf.uint8, tf.int32, tf.int64}: + # one-hot encoding + x_shape_list = x.shape.as_list() + logits_shape_list = logits.get_shape().as_list()[:-1] + for xs, ls in zip(x_shape_list, logits_shape_list): + if xs is not None and ls is not None: + assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls) + + x = tf.one_hot(x, logits.get_shape().as_list()[-1]) + else: + # already encoded + assert x.shape.as_list() == logits.shape.as_list() + + return tf.nn.softmax_cross_entropy_with_logits_v2( + logits=logits, + labels=x) + + self.act_action = sample(masked_logits, re_npinf_piece) + self.act_neglogp = neglogp(masked_logits, self.act_action) + + + def step(self, step, observation, **extra_feed): + """ + Compute next action(s) given the observation(s) + + Parameters + ---------- + observation : np array + Observation data (either single or a batch) + **extra_feed + Additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) + + Returns + ------- + (action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple + """ + extra_feed['act_step'] = step + a, v, state, neglogp = self._evaluate([self.act_action, self.vf, self.state, self.act_neglogp], observation, **extra_feed) + if state.size == 0: + state = None + return a, v, state, neglogp + + def value(self, ob, *args, **kwargs): + """ + Compute value estimate(s) given the observation(s) + + Parameters + ---------- + observation : np array + Observation data (either single or a batch) + **extra_feed + Additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) + + Returns + ------- + Value estimate + """ + return self._evaluate(self.vf, ob, *args, **kwargs) + + +def build_lstm_policy(model_config, value_network=None, estimate_q=False, **policy_kwargs): + """ + Build lstm policy and value network, they share the same lstm network. + the parameters all use their default values. + + Parameter + --------- + model_config : obj + Configurations of the model + value_network : obj + The network for value function + estimate_q : bool + Whether to estimate ``q`` + **policy_kwargs + The kwargs for policy network, i.e., lstm model + + Returns + ------- + func + The policy network + """ + policy_network = lstm_model(**policy_kwargs) + + def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None, np_mask=None, is_act_model=False): + ob_space = model_config.observation_space + + X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch) + + extra_tensors = {} + + # encode_observation is not necessary anymore as we use embedding_lookup + encoded_x = X + + with tf.variable_scope('pi', reuse=tf.AUTO_REUSE): + policy_latent = policy_network(encoded_x, 1, model_config.observation_space.n) + if isinstance(policy_latent, tuple): + policy_latent, recurrent_tensors = policy_latent + + if recurrent_tensors is not None: + # recurrent architecture, need a few more steps + nenv = nbatch // nsteps + assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps) + policy_latent, recurrent_tensors = policy_network(encoded_x, nenv, model_config.observation_space.n) + extra_tensors.update(recurrent_tensors) + + _v_net = value_network + + assert _v_net is None or _v_net == 'shared' + vf_latent = policy_latent + + policy = PolicyWithValue( + env=model_config, + observations=X, + latent=policy_latent, + vf_latent=vf_latent, + sess=sess, + estimate_q=estimate_q, + np_mask=np_mask, + is_act_model=is_act_model, + **extra_tensors + ) + return policy + + return policy_fn diff --git a/nni/algorithms/hpo/ppo_tuner/ppo_tuner.py b/nni/algorithms/hpo/ppo_tuner/ppo_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..33b62d600e3b6bb788edab7db53a1223a846b2eb --- /dev/null +++ b/nni/algorithms/hpo/ppo_tuner/ppo_tuner.py @@ -0,0 +1,654 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +ppo_tuner.py including: + class PPOTuner +""" + +import copy +import logging +import numpy as np +from gym import spaces +from schema import Schema, Optional + +import nni +from nni import ClassArgsValidator +from nni.tuner import Tuner +from nni.utils import OptimizeMode, extract_scalar_reward + +from .model import Model +from .util import set_global_seeds +from .policy import build_lstm_policy + + +logger = logging.getLogger('ppo_tuner_AutoML') + +def _constfn(val): + """ + Wrap as function + """ + def f(_): + return val + return f + + +class ModelConfig: + """ + Configurations of the PPO model + """ + def __init__(self): + self.observation_space = None + self.action_space = None + self.num_envs = 0 + self.nsteps = 0 + + self.ent_coef = 0.0 + self.lr = 3e-4 + self.vf_coef = 0.5 + self.max_grad_norm = 0.5 + self.gamma = 0.99 + self.lam = 0.95 + self.cliprange = 0.2 + self.embedding_size = None # the embedding is for each action + + self.noptepochs = 4 # number of training epochs per update + self.total_timesteps = 5000 # number of timesteps (i.e. number of actions taken in the environment) + self.nminibatches = 4 # number of training minibatches per update. For recurrent policies, + # should be smaller or equal than number of environments run in parallel. + +class TrialsInfo: + """ + Informations of each trial from one model inference + """ + def __init__(self, obs, actions, values, neglogpacs, dones, last_value, inf_batch_size): + self.iter = 0 + self.obs = obs + self.actions = actions + self.values = values + self.neglogpacs = neglogpacs + self.dones = dones + self.last_value = last_value + + self.rewards = None + self.returns = None + + self.inf_batch_size = inf_batch_size + #self.states = None + + def get_next(self): + """ + Get actions of the next trial + """ + if self.iter >= self.inf_batch_size: + return None, None + actions = [] + for step in self.actions: + actions.append(step[self.iter]) + self.iter += 1 + return self.iter - 1, actions + + def update_rewards(self, rewards, returns): + """ + After the trial is finished, reward and return of this trial is updated + """ + self.rewards = rewards + self.returns = returns + + def convert_shape(self): + """ + Convert shape + """ + def sf01(arr): + """ + swap and then flatten axes 0 and 1 + """ + s = arr.shape + return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:]) + self.obs = sf01(self.obs) + self.returns = sf01(self.returns) + self.dones = sf01(self.dones) + self.actions = sf01(self.actions) + self.values = sf01(self.values) + self.neglogpacs = sf01(self.neglogpacs) + + +class PPOModel: + """ + PPO Model + """ + def __init__(self, model_config, mask): + self.model_config = model_config + self.states = None # initial state of lstm in policy/value network + self.nupdates = None # the number of func train is invoked, used to tune lr and cliprange + self.cur_update = 1 # record the current update + self.np_mask = mask # record the mask of each action within one trial + + set_global_seeds(None) + assert isinstance(self.model_config.lr, float) + self.lr = _constfn(self.model_config.lr) + assert isinstance(self.model_config.cliprange, float) + self.cliprange = _constfn(self.model_config.cliprange) + + # build lstm policy network, value share the same network + policy = build_lstm_policy(model_config) + + # Get the nb of env + nenvs = model_config.num_envs + + # Calculate the batch_size + self.nbatch = nbatch = nenvs * model_config.nsteps # num of record per update + nbatch_train = nbatch // model_config.nminibatches # get batch size + # self.nupdates is used to tune lr and cliprange + self.nupdates = self.model_config.total_timesteps // self.nbatch + + # Instantiate the model object (that creates act_model and train_model) + self.model = Model(policy=policy, nbatch_act=nenvs, nbatch_train=nbatch_train, + nsteps=model_config.nsteps, ent_coef=model_config.ent_coef, vf_coef=model_config.vf_coef, + max_grad_norm=model_config.max_grad_norm, np_mask=self.np_mask) + + self.states = self.model.initial_state + + logger.info('=== finished PPOModel initialization') + + def inference(self, num): + """ + Generate actions along with related info from policy network. + observation is the action of the last step. + + Parameters + ---------- + num: int + The number of trials to generate + + Returns + ------- + mb_obs : list + Observation of the ``num`` configurations + mb_actions : list + Actions of the ``num`` configurations + mb_values : list + Values from the value function of the ``num`` configurations + mb_neglogpacs : list + ``neglogp`` of the ``num`` configurations + mb_dones : list + To show whether the play is done, always ``True`` + last_values : tensorflow tensor + The last values of the ``num`` configurations, got with session run + """ + # Here, we init the lists that will contain the mb of experiences + mb_obs, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [] + # initial observation + # use the (n+1)th embedding to represent the first step action + first_step_ob = self.model_config.action_space.n + obs = [first_step_ob for _ in range(num)] + dones = [True for _ in range(num)] + states = self.states + # For n in range number of steps + for cur_step in range(self.model_config.nsteps): + # Given observations, get action value and neglopacs + # We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init + actions, values, states, neglogpacs = self.model.step(cur_step, obs, S=states, M=dones) + mb_obs.append(obs.copy()) + mb_actions.append(actions) + mb_values.append(values) + mb_neglogpacs.append(neglogpacs) + mb_dones.append(dones) + + # Take actions in env and look the results + # Infos contains a ton of useful informations + obs[:] = actions + if cur_step == self.model_config.nsteps - 1: + dones = [True for _ in range(num)] + else: + dones = [False for _ in range(num)] + + #batch of steps to batch of rollouts + np_obs = np.asarray(obs) + mb_obs = np.asarray(mb_obs, dtype=np_obs.dtype) + mb_actions = np.asarray(mb_actions) + mb_values = np.asarray(mb_values, dtype=np.float32) + mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32) + mb_dones = np.asarray(mb_dones, dtype=np.bool) + last_values = self.model.value(np_obs, S=states, M=dones) + + return mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values + + def compute_rewards(self, trials_info, trials_result): + """ + Compute the rewards of the trials in trials_info based on trials_result, + and update the rewards in trials_info + + Parameters + ---------- + trials_info : TrialsInfo + Info of the generated trials + trials_result : list + Final results (e.g., acc) of the generated trials + """ + mb_rewards = np.asarray([trials_result for _ in trials_info.actions], dtype=np.float32) + # discount/bootstrap off value fn + mb_returns = np.zeros_like(mb_rewards) + mb_advs = np.zeros_like(mb_rewards) + lastgaelam = 0 + last_dones = np.asarray([True for _ in trials_result], dtype=np.bool) # ugly + for t in reversed(range(self.model_config.nsteps)): + if t == self.model_config.nsteps - 1: + nextnonterminal = 1.0 - last_dones + nextvalues = trials_info.last_value + else: + nextnonterminal = 1.0 - trials_info.dones[t+1] + nextvalues = trials_info.values[t+1] + delta = mb_rewards[t] + self.model_config.gamma * nextvalues * nextnonterminal - trials_info.values[t] + lastgaelam = delta + self.model_config.gamma * self.model_config.lam * nextnonterminal * lastgaelam + mb_advs[t] = lastgaelam # pylint: disable=unsupported-assignment-operation + mb_returns = mb_advs + trials_info.values + + trials_info.update_rewards(mb_rewards, mb_returns) + trials_info.convert_shape() + + def train(self, trials_info, nenvs): + """ + Train the policy/value network using trials_info + + Parameters + ---------- + trials_info : TrialsInfo + Complete info of the generated trials from the previous inference + nenvs : int + The batch size of the (previous) inference + """ + # keep frac decay for future optimization + if self.cur_update <= self.nupdates: + frac = 1.0 - (self.cur_update - 1.0) / self.nupdates + else: + logger.warning('current update (self.cur_update) %d has exceeded total updates (self.nupdates) %d', + self.cur_update, self.nupdates) + frac = 1.0 - (self.nupdates - 1.0) / self.nupdates + lrnow = self.lr(frac) + cliprangenow = self.cliprange(frac) + self.cur_update += 1 + + states = self.states + + assert states is not None # recurrent version + assert nenvs % self.model_config.nminibatches == 0 + envsperbatch = nenvs // self.model_config.nminibatches + envinds = np.arange(nenvs) + flatinds = np.arange(nenvs * self.model_config.nsteps).reshape(nenvs, self.model_config.nsteps) + for _ in range(self.model_config.noptepochs): + np.random.shuffle(envinds) + for start in range(0, nenvs, envsperbatch): + end = start + envsperbatch + mbenvinds = envinds[start:end] + mbflatinds = flatinds[mbenvinds].ravel() + slices = (arr[mbflatinds] for arr in (trials_info.obs, trials_info.returns, trials_info.dones, + trials_info.actions, trials_info.values, trials_info.neglogpacs)) + mbstates = states[mbenvinds] + self.model.train(lrnow, cliprangenow, *slices, mbstates) + +class PPOClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'optimize_mode': self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('trials_per_update'): self.range('trials_per_update', int, 0, 99999), + Optional('epochs_per_update'): self.range('epochs_per_update', int, 0, 99999), + Optional('minibatch_size'): self.range('minibatch_size', int, 0, 99999), + Optional('ent_coef'): float, + Optional('lr'): float, + Optional('vf_coef'): float, + Optional('max_grad_norm'): float, + Optional('gamma'): float, + Optional('lam'): float, + Optional('cliprange'): float, + }).validate(kwargs) + +class PPOTuner(Tuner): + """ + PPOTuner, the implementation inherits the main logic of the implementation + [ppo2 from openai](https://github.com/openai/baselines/tree/master/baselines/ppo2), and is adapted for NAS scenario. + It uses ``lstm`` for its policy network and value network, policy and value share the same network. + """ + + def __init__(self, optimize_mode, trials_per_update=20, epochs_per_update=4, minibatch_size=4, + ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, cliprange=0.2): + """ + Initialization, PPO model is not initialized here as search space is not received yet. + + Parameters + ---------- + optimize_mode : str + maximize or minimize + trials_per_update : int + Number of trials to have for each model update + epochs_per_update : int + Number of epochs to run for each model update + minibatch_size : int + Minibatch size (number of trials) for the update + ent_coef : float + Policy entropy coefficient in the optimization objective + lr : float + Learning rate of the model (lstm network), constant + vf_coef : float + Value function loss coefficient in the optimization objective + max_grad_norm : float + Gradient norm clipping coefficient + gamma : float + Discounting factor + lam : float + Advantage estimation discounting factor (lambda in the paper) + cliprange : float + Cliprange in the PPO algorithm, constant + """ + self.optimize_mode = OptimizeMode(optimize_mode) + self.model_config = ModelConfig() + self.model = None + self.search_space = None + self.running_trials = {} # key: parameter_id, value: actions/states/etc. + self.inf_batch_size = trials_per_update # number of trials to generate in one inference + self.first_inf = True # indicate whether it is the first time to inference new trials + self.trials_result = [None for _ in range(self.inf_batch_size)] # results of finished trials + + self.credit = 0 # record the unsatisfied trial requests + self.param_ids = [] + self.finished_trials = 0 + self.chosen_arch_template = {} + + self.actions_spaces = None + self.actions_to_config = None + self.full_act_space = None + self.trials_info = None + + self.all_trials = {} # used to dedup the same trial, key: config, value: final result + + self.model_config.num_envs = self.inf_batch_size + self.model_config.noptepochs = epochs_per_update + self.model_config.nminibatches = minibatch_size + + self.send_trial_callback = None + logger.info('Finished PPOTuner initialization') + + def _process_nas_space(self, search_space): + actions_spaces = [] + actions_to_config = [] + for key, val in search_space.items(): + if val['_type'] == 'layer_choice': + actions_to_config.append((key, 'layer_choice')) + actions_spaces.append(val['_value']) + self.chosen_arch_template[key] = None + elif val['_type'] == 'input_choice': + candidates = val['_value']['candidates'] + n_chosen = val['_value']['n_chosen'] + if n_chosen not in [0, 1, [0, 1]]: + raise ValueError('Optional_input_size can only be 0, 1, or [0, 1], but the pecified one is %s' + % (n_chosen)) + if isinstance(n_chosen, list): + actions_to_config.append((key, 'input_choice')) + # FIXME: risk, candidates might also have None + actions_spaces.append(['None', *candidates]) + self.chosen_arch_template[key] = None + elif n_chosen == 1: + actions_to_config.append((key, 'input_choice')) + actions_spaces.append(candidates) + self.chosen_arch_template[key] = None + elif n_chosen == 0: + self.chosen_arch_template[key] = [] + else: + raise ValueError('Unsupported search space type: %s' % (val['_type'])) + + # calculate observation space + dedup = {} + for step in actions_spaces: + for action in step: + dedup[action] = 1 + full_act_space = [act for act, _ in dedup.items()] + assert len(full_act_space) == len(dedup) + observation_space = len(full_act_space) + nsteps = len(actions_spaces) + + return actions_spaces, actions_to_config, full_act_space, observation_space, nsteps + + def _generate_action_mask(self): + """ + Different step could have different action space. to deal with this case, we merge all the + possible actions into one action space, and use mask to indicate available actions for each step + """ + two_masks = [] + + mask = [] + for acts in self.actions_spaces: + one_mask = [0 for _ in range(len(self.full_act_space))] + for act in acts: + idx = self.full_act_space.index(act) + one_mask[idx] = 1 + mask.append(one_mask) + two_masks.append(mask) + + mask = [] + for acts in self.actions_spaces: + one_mask = [-np.inf for _ in range(len(self.full_act_space))] + for act in acts: + idx = self.full_act_space.index(act) + one_mask[idx] = 0 + mask.append(one_mask) + two_masks.append(mask) + + return np.asarray(two_masks, dtype=np.float32) + + def update_search_space(self, search_space): + """ + Get search space, currently the space only includes that for NAS + + Parameters + ---------- + search_space : dict + Search space for NAS + the format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html). + """ + logger.info('update search space %s', search_space) + assert self.search_space is None + self.search_space = search_space + + assert self.model_config.observation_space is None + assert self.model_config.action_space is None + + self.actions_spaces, self.actions_to_config, self.full_act_space, obs_space, nsteps = self._process_nas_space(search_space) + + self.model_config.observation_space = spaces.Discrete(obs_space) + self.model_config.action_space = spaces.Discrete(obs_space) + self.model_config.nsteps = nsteps + + # generate mask in numpy + mask = self._generate_action_mask() + + assert self.model is None + self.model = PPOModel(self.model_config, mask) + + def _actions_to_config(self, actions): + """ + Given actions, to generate the corresponding trial configuration + """ + chosen_arch = copy.deepcopy(self.chosen_arch_template) + for cnt, act in enumerate(actions): + act_name = self.full_act_space[act] + (_key, _type) = self.actions_to_config[cnt] + if _type == 'input_choice': + if act_name == 'None': + chosen_arch[_key] = {'_value': [], '_idx': []} + else: + candidates = self.search_space[_key]['_value']['candidates'] + idx = candidates.index(act_name) + chosen_arch[_key] = {'_value': [act_name], '_idx': [idx]} + elif _type == 'layer_choice': + idx = self.search_space[_key]['_value'].index(act_name) + chosen_arch[_key] = {'_value': act_name, '_idx': idx} + else: + raise ValueError('unrecognized key: {0}'.format(_type)) + return chosen_arch + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """ + Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects. + + Parameters + ---------- + parameter_id_list : list of int + Unique identifiers for each set of requested hyper-parameters. + These will later be used in :meth:`receive_trial_result`. + **kwargs + Not used + + Returns + ------- + list + A list of newly generated configurations + """ + result = [] + self.send_trial_callback = kwargs['st_callback'] + for parameter_id in parameter_id_list: + had_exception = False + try: + logger.debug("generating param for %s", parameter_id) + res = self.generate_parameters(parameter_id, **kwargs) + except nni.NoMoreTrialError: + had_exception = True + if not had_exception: + result.append(res) + return result + + def generate_parameters(self, parameter_id, **kwargs): + """ + Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later + + Parameters + ---------- + parameter_id : int + Unique identifier for requested hyper-parameters. + This will later be used in :meth:`receive_trial_result`. + **kwargs + Not used + + Returns + ------- + dict + One newly generated configuration + + """ + if self.first_inf: + self.trials_result = [None for _ in range(self.inf_batch_size)] + mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size) + self.trials_info = TrialsInfo(mb_obs, mb_actions, mb_values, mb_neglogpacs, + mb_dones, last_values, self.inf_batch_size) + self.first_inf = False + + trial_info_idx, actions = self.trials_info.get_next() + if trial_info_idx is None: + logger.debug('Credit added by one in parameters request') + self.credit += 1 + self.param_ids.append(parameter_id) + raise nni.NoMoreTrialError('no more parameters now.') + + self.running_trials[parameter_id] = trial_info_idx + new_config = self._actions_to_config(actions) + return new_config + + def _next_round_inference(self): + """ + Run a inference to generate next batch of configurations + """ + logger.debug('Start next round inference...') + self.finished_trials = 0 + self.model.compute_rewards(self.trials_info, self.trials_result) + self.model.train(self.trials_info, self.inf_batch_size) + self.running_trials = {} + # generate new trials + self.trials_result = [None for _ in range(self.inf_batch_size)] + mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size) + self.trials_info = TrialsInfo(mb_obs, mb_actions, + mb_values, mb_neglogpacs, + mb_dones, last_values, + self.inf_batch_size) + logger.debug('Next round inference complete.') + # check credit and submit new trials + for _ in range(self.credit): + trial_info_idx, actions = self.trials_info.get_next() + if trial_info_idx is None: + logger.warning('No enough trial config, trials_per_update is suggested to be larger than trialConcurrency') + break + assert self.param_ids + param_id = self.param_ids.pop() + self.running_trials[param_id] = trial_info_idx + new_config = self._actions_to_config(actions) + self.send_trial_callback(param_id, new_config) + self.credit -= 1 + logger.debug('Send new trial (%d, %s) for reducing credit', param_id, new_config) + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to + train the model. + + Parameters + ---------- + parameter_id : int + Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`. + parameters : dict + Hyper-parameters generated by :meth:`generate_parameters`. + value : dict + Result from trial (the return value of :func:`nni.report_final_result`). + """ + trial_info_idx = self.running_trials.pop(parameter_id, None) + assert trial_info_idx is not None + + value = extract_scalar_reward(value) + if self.optimize_mode == OptimizeMode.Minimize: + value = -value + + self.trials_result[trial_info_idx] = value + self.finished_trials += 1 + + logger.debug('receive_trial_result, parameter_id %d, trial_info_idx %d, finished_trials %d, inf_batch_size %d', + parameter_id, trial_info_idx, self.finished_trials, self.inf_batch_size) + if self.finished_trials == self.inf_batch_size: + logger.debug('Start next round inference in receive_trial_result') + self._next_round_inference() + + def trial_end(self, parameter_id, success, **kwargs): + """ + To deal with trial failure. If a trial fails, it is popped out from ``self.running_trials``, + and the final result of this trial is assigned with the average of the finished trials. + + Parameters + ---------- + parameter_id : int + Unique identifier for hyper-parameters used by this trial. + success : bool + True if the trial successfully completed; False if failed or terminated. + **kwargs + Not used + """ + if not success: + if parameter_id not in self.running_trials: + logger.warning('The trial is failed, but self.running_trial does not have this trial') + return + trial_info_idx = self.running_trials.pop(parameter_id, None) + assert trial_info_idx is not None + # use mean of finished trials as the result of this failed trial + values = [val for val in self.trials_result if val is not None] + logger.warning('In trial_end, values: %s', values) + self.trials_result[trial_info_idx] = (sum(values) / len(values)) if values else 0 + self.finished_trials += 1 + if self.finished_trials == self.inf_batch_size: + logger.debug('Start next round inference in trial_end') + self._next_round_inference() + + def import_data(self, data): + """ + Import additional data for tuning, not supported yet. + + Parameters + ---------- + data : list + A list of dictionarys, each of which has at least two keys, ``parameter`` and ``value`` + """ + logger.warning('PPOTuner cannot leverage imported data.') diff --git a/nni/algorithms/hpo/ppo_tuner/util.py b/nni/algorithms/hpo/ppo_tuner/util.py new file mode 100644 index 0000000000000000000000000000000000000000..605292de4002f114e935cb24a87c96921940e959 --- /dev/null +++ b/nni/algorithms/hpo/ppo_tuner/util.py @@ -0,0 +1,260 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +util functions +""" + +import os +import random +import multiprocessing +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() +from gym.spaces import Discrete, Box, MultiDiscrete + +def set_global_seeds(i): + """set global seeds""" + rank = 0 + myseed = i + 1000 * rank if i is not None else None + tf.set_random_seed(myseed) + np.random.seed(myseed) + random.seed(myseed) + +def batch_to_seq(h, nbatch, nsteps, flat=False): + """convert from batch to sequence""" + if flat: + h = tf.reshape(h, [nbatch, nsteps]) + else: + h = tf.reshape(h, [nbatch, nsteps, -1]) + return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)] + +def seq_to_batch(h, flat=False): + """convert from sequence to batch""" + shape = h[0].get_shape().as_list() + if not flat: + assert len(shape) > 1 + nh = h[0].get_shape()[-1].value + return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) + else: + return tf.reshape(tf.stack(values=h, axis=1), [-1]) + +def lstm(xs, ms, s, scope, nh, init_scale=1.0): + """lstm cell""" + _, nin = [v.value for v in xs[0].get_shape()] # the first is nbatch + with tf.variable_scope(scope): + wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) + wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) + b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) + + c, h = tf.split(axis=1, num_or_size_splits=2, value=s) + for idx, (x, m) in enumerate(zip(xs, ms)): + c = c*(1-m) + h = h*(1-m) + z = tf.matmul(x, wx) + tf.matmul(h, wh) + b + i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) + i = tf.nn.sigmoid(i) + f = tf.nn.sigmoid(f) + o = tf.nn.sigmoid(o) + u = tf.tanh(u) + c = f*c + i*u + h = o*tf.tanh(c) + xs[idx] = h + s = tf.concat(axis=1, values=[c, h]) + return xs, s + +def lstm_model(nlstm=128, layer_norm=False): + """ + Builds LSTM (Long-Short Term Memory) network to be used in a policy. + Note that the resulting function returns not only the output of the LSTM + (i.e. hidden state of lstm for each step in the sequence), but also a dictionary + with auxiliary tensors to be set as policy attributes. + + Specifically, + S is a placeholder to feed current state (LSTM state has to be managed outside policy) + M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too) + initial_state is a numpy array containing initial lstm state (usually zeros) + state is the output LSTM state (to be fed into S at the next call) + + + An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example + + Parameters + ---------- + nlstm : int + LSTM hidden state size + layer_norm : bool + if True, layer-normalized version of LSTM is used + + Returns + ------- + function that builds LSTM with a given input tensor / placeholder + """ + + def network_fn(X, nenv=1, obs_size=-1): + with tf.variable_scope("emb", reuse=tf.AUTO_REUSE): + w_emb = tf.get_variable("w_emb", [obs_size+1, 32]) + X = tf.nn.embedding_lookup(w_emb, X) + + nbatch = X.shape[0] + nsteps = nbatch // nenv + + h = tf.layers.flatten(X) + + M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) + S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states + + xs = batch_to_seq(h, nenv, nsteps) + ms = batch_to_seq(M, nenv, nsteps) + + assert not layer_norm + h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm) + + h = seq_to_batch(h5) + initial_state = np.zeros(S.shape.as_list(), dtype=float) + + return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state} + + return network_fn + +def ortho_init(scale=1.0): + """init approach""" + def _ortho_init(shape, dtype, partition_info=None): + #lasagne ortho init for tf + shape = tuple(shape) + if len(shape) == 2: + flat_shape = shape + elif len(shape) == 4: # assumes NHWC + flat_shape = (np.prod(shape[:-1]), shape[-1]) + else: + raise NotImplementedError + a = np.random.normal(0.0, 1.0, flat_shape) + u, _, v = np.linalg.svd(a, full_matrices=False) + q = u if u.shape == flat_shape else v # pick the one with the correct shape + q = q.reshape(shape) + return (scale * q[:shape[0], :shape[1]]).astype(np.float32) + return _ortho_init + +def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): + """fully connected op""" + with tf.variable_scope(scope): + nin = x.get_shape()[1].value + w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) + b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias)) + return tf.matmul(x, w)+b + +def _check_shape(placeholder_shape, data_shape): + """ + check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension) + """ + + return True + +# ================================================================ +# Shape adjustment for feeding into tf placeholders +# ================================================================ +def adjust_shape(placeholder, data): + """ + adjust shape of the data to the shape of the placeholder if possible. + If shape is incompatible, AssertionError is thrown + + Parameters + ---------- + placeholder + tensorflow input placeholder + data + input data to be (potentially) reshaped to be fed into placeholder + + Returns + ------- + reshaped data + """ + if not isinstance(data, np.ndarray) and not isinstance(data, list): + return data + if isinstance(data, list): + data = np.array(data) + + placeholder_shape = [x or -1 for x in placeholder.shape.as_list()] + + assert _check_shape(placeholder_shape, data.shape), \ + 'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape) + + return np.reshape(data, placeholder_shape) + +# ================================================================ +# Global session +# ================================================================ + +def get_session(config=None): + """Get default session or create one with a given config""" + sess = tf.get_default_session() + if sess is None: + sess = make_session(config=config, make_default=True) + return sess + +def make_session(config=None, num_cpu=None, make_default=False, graph=None): + """Returns a session that will use CPU's only""" + if num_cpu is None: + num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count())) + if config is None: + config = tf.ConfigProto( + allow_soft_placement=True, + inter_op_parallelism_threads=num_cpu, + intra_op_parallelism_threads=num_cpu) + config.gpu_options.allow_growth = True + + if make_default: + return tf.InteractiveSession(config=config, graph=graph) + else: + return tf.Session(config=config, graph=graph) + +ALREADY_INITIALIZED = set() + +def initialize(): + """Initialize all the uninitialized variables in the global scope.""" + new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED + get_session().run(tf.variables_initializer(new_variables)) + + ALREADY_INITIALIZED.update(new_variables) + +def observation_placeholder(ob_space, batch_size=None, name='Ob'): + """ + Create placeholder to feed observations into of the size appropriate to the observation space + + Parameters + ---------- + ob_space : gym.Space + observation space + batch_size : int + size of the batch to be fed into input. Can be left None in most cases. + name : str + name of the placeholder + + Returns + ------- + tensorflow placeholder tensor + """ + + assert isinstance(ob_space, (Discrete, Box, MultiDiscrete)), \ + 'Can only deal with Discrete and Box observation spaces for now' + + dtype = ob_space.dtype + if dtype == np.int8: + dtype = np.uint8 + + return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name) + +def explained_variance(ypred, y): + """ + Computes fraction of variance that ypred explains about y. + Returns 1 - Var[y-ypred] / Var[y] + + interpretation: + ev=0 => might as well have predicted zero + ev=1 => perfect prediction + ev<0 => worse than just predicting zero + + """ + assert y.ndim == 1 and ypred.ndim == 1 + vary = np.var(y) + return np.nan if vary == 0 else 1 - np.var(y-ypred)/vary diff --git a/nni/algorithms/hpo/random_tuner.py b/nni/algorithms/hpo/random_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..741311f288ff20636d3980092d2d654c91a80b9c --- /dev/null +++ b/nni/algorithms/hpo/random_tuner.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Naive random tuner for hyper-parameter optimization. + +You can specify an integer seed to determine random result. +""" + +__all__ = ['RandomTuner', 'suggest', 'suggest_parameter'] + +import logging + +import numpy as np +import schema + +from nni import ClassArgsValidator +from nni.common.hpo_utils import format_search_space, deformat_parameters +from nni.tuner import Tuner + +_logger = logging.getLogger('nni.tuner.random') + +class RandomTuner(Tuner): + def __init__(self, seed=None): + self.space = None + if seed is None: # explicitly generate a seed to make the experiment reproducible + seed = np.random.default_rng().integers(2 ** 31) + self.rng = np.random.default_rng(seed) + _logger.info(f'Using random seed {seed}') + + def update_search_space(self, space): + self.space = format_search_space(space) + + def generate_parameters(self, *args, **kwargs): + params = suggest(self.rng, self.space) + return deformat_parameters(params, self.space) + + def receive_trial_result(self, *args, **kwargs): + pass + +class RandomClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + schema.Schema({schema.Optional('seed'): int}).validate(kwargs) + +def suggest(rng, space): + params = {} + for key, spec in space.items(): + if spec.is_activated_in(params): + params[key] = suggest_parameter(rng, spec) + return params + +def suggest_parameter(rng, spec): + if spec.categorical: + return rng.integers(spec.size) + if spec.normal_distributed: + return rng.normal(spec.mu, spec.sigma) + else: + return rng.uniform(spec.low, spec.high) diff --git a/nni/algorithms/hpo/regularized_evolution_tuner.py b/nni/algorithms/hpo/regularized_evolution_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..ac756c33885b8ad150e862c6eb315136fb0d4b71 --- /dev/null +++ b/nni/algorithms/hpo/regularized_evolution_tuner.py @@ -0,0 +1,172 @@ +import copy +import logging +import random +from collections import deque + +from schema import Schema, Optional +import nni +from nni.tuner import Tuner +from nni import ClassArgsValidator +from nni.utils import OptimizeMode, extract_scalar_reward + +logger = logging.getLogger(__name__) + + +class FinishedIndividual: + def __init__(self, parameter_id, parameters, result): + """ + Parameters + ---------- + parameter_id: int + the index of the parameter + parameters : dict + chosen architecture and parameters + result : float + final metric of the chosen one + """ + self.parameter_id = parameter_id + self.parameters = parameters + self.result = result + + +class EvolutionClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'optimize_mode': self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('population_size'): self.range('population_size', int, 0, 99999), + Optional('sample_size'): self.range('sample_size', int, 0, 9999), + }).validate(kwargs) + + +class RegularizedEvolutionTuner(Tuner): + """ + RegularizedEvolutionTuner is tuner using Evolution NAS Tuner. + See ``Regularized Evolution for Image Classifier Architecture Search`` for details. + + Parameters + --- + optimize_mode: str + whether to maximize metric or not. default: 'maximize' + population_size: int + the maximum number of kept models + sample_size: int + the number of models chosen from population each time when evolution + """ + def __init__(self, optimize_mode="maximize", population_size=100, sample_size=25): + super(RegularizedEvolutionTuner, self).__init__() + self.optimize_mode = OptimizeMode(optimize_mode) + self.population_size = population_size + self.sample_size = sample_size + self.initial_population = deque() + self.population = deque() + self.history = {} + self.search_space = None + self._from_initial = {} # whether the parameter is from initial population + + def generate_parameters(self, parameter_id, **kwargs): + """ + This function will returns a dict of trial (hyper-)parameters, as a serializable object. + + Parameters + --- + parameter_id: int + the index of current set of parameters + """ + if self.initial_population: + arch = self.initial_population.popleft() + self.history[parameter_id] = arch + self._from_initial[parameter_id] = True + return arch + elif self.population: + sample = [] + while len(sample) < self.sample_size: + sample.append(random.choice(list(self.population))) + + candidate = max(sample, key=lambda x: x.result) + arch = self._mutate_model(candidate) + self.history[parameter_id] = arch + self._from_initial[parameter_id] = False + return arch + else: + raise nni.NoMoreTrialError + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Record the result from a trial + + Parameters + ---------- + parameter_id : int + parameters : dict + value : dict/float + if value is dict, it should have "default" key. + value is final metrics of the trial. + """ + reward = extract_scalar_reward(value) + if parameter_id not in self.history: + raise RuntimeError('Received parameter_id not in total_data.') + params = self.history[parameter_id] + + if self.optimize_mode == OptimizeMode.Minimize: + reward = -reward + + self.population.append(FinishedIndividual(parameter_id, params, reward)) + if len(self.population) > self.population_size: + self.population.popleft() + + def update_search_space(self, search_space): + """ + Update search space. + Search_space contains the information that user pre-defined. + + Parameters + ---------- + search_space : dict + """ + logger.info('update search space %s', search_space) + assert self.search_space is None + self.search_space = search_space + + for _, val in search_space.items(): + if val['_type'] != 'layer_choice' and val['_type'] != 'input_choice': + raise ValueError('Unsupported search space type: %s' % (val['_type'])) + + self._generate_initial_population() + + def trial_end(self, parameter_id, success, **kwargs): + if not success: + del self.history[parameter_id] + if self._from_initial[parameter_id]: + self.initial_population.append(self._random_model()) + del self._from_initial[parameter_id] + + def _mutate(self, key, individual): + mutate_val = self.search_space[key] + if mutate_val['_type'] == 'layer_choice': + idx = random.randint(0, len(mutate_val['_value']) - 1) + individual[key] = {'_value': mutate_val['_value'][idx], '_idx': idx} + elif mutate_val['_type'] == 'input_choice': + candidates = mutate_val['_value']['candidates'] + n_chosen = mutate_val['_value']['n_chosen'] + idxs = [random.randint(0, len(candidates) - 1) for _ in range(n_chosen)] + vals = [candidates[k] for k in idxs] + individual[key] = {'_value': vals, '_idx': idxs} + else: + raise KeyError + + def _random_model(self): + individual = {} + for key in self.search_space.keys(): + self._mutate(key, individual) + return individual + + def _mutate_model(self, model): + new_individual = copy.deepcopy(model.parameters) + mutate_key = random.choice(list(new_individual.keys())) + self._mutate(mutate_key, new_individual) + return new_individual + + def _generate_initial_population(self): + while len(self.initial_population) < self.population_size: + self.initial_population.append(self._random_model()) + logger.info('init population done.') diff --git a/nni/algorithms/hpo/smac_tuner/__init__.py b/nni/algorithms/hpo/smac_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ac4c4bc4809aa0d04d67127a25ebfefa577be3c --- /dev/null +++ b/nni/algorithms/hpo/smac_tuner/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .smac_tuner import SMACTuner, SMACClassArgsValidator diff --git a/nni/algorithms/hpo/smac_tuner/convert_ss_to_scenario.py b/nni/algorithms/hpo/smac_tuner/convert_ss_to_scenario.py new file mode 100644 index 0000000000000000000000000000000000000000..ef817c8cf4e10ad08185910b95bd4adc909b2dd3 --- /dev/null +++ b/nni/algorithms/hpo/smac_tuner/convert_ss_to_scenario.py @@ -0,0 +1,202 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json + +import numpy as np + + +def get_json_content(file_path): + """ + Load json file content + + Parameters + ---------- + file_path: + path to the file + + Raises + ------ + TypeError + Error with the file path + """ + try: + with open(file_path, 'r') as file: + return json.load(file) + except TypeError as err: + print('Error: ', err) + return None + + +def generate_pcs(nni_search_space_content): + """ + Generate the Parameter Configuration Space (PCS) which defines the + legal ranges of the parameters to be optimized and their default values. + Generally, the format is: + # parameter_name categorical {value_1, ..., value_N} [default value] + # parameter_name ordinal {value_1, ..., value_N} [default value] + # parameter_name integer [min_value, max_value] [default value] + # parameter_name integer [min_value, max_value] [default value] log + # parameter_name real [min_value, max_value] [default value] + # parameter_name real [min_value, max_value] [default value] log + Reference: https://automl.github.io/SMAC3/stable/options.html + + Parameters + ---------- + nni_search_space_content: search_space + The search space in this experiment in nni + + Returns + ------- + Parameter Configuration Space (PCS) + the legal ranges of the parameters to be optimized and their default values + + Raises + ------ + RuntimeError + unsupported type or value error or incorrect search space + """ + categorical_dict = {} + search_space = nni_search_space_content + + def dump_categorical(fd, key, categories): + choice_len = len(categories) + if key in categorical_dict: + raise RuntimeError( + '%s has already existed, please make sure search space has no duplicate key.' % key) + categorical_dict[key] = search_space[key]['_value'] + fd.write('%s categorical {%s} [0]\n' % (key, ','.join(map(str, range(choice_len))))) + + with open('param_config_space.pcs', 'w') as pcs_fd: + if isinstance(search_space, dict): + for key in search_space.keys(): + if isinstance(search_space[key], dict): + try: + if search_space[key]['_type'] == 'choice': + dump_categorical(pcs_fd, key, search_space[key]['_value']) + elif search_space[key]['_type'] == 'randint': + lower, upper = search_space[key]['_value'] + if lower + 1 == upper: + dump_categorical(pcs_fd, key, [lower]) + else: + pcs_fd.write('%s integer [%d, %d] [%d]\n' % (key, lower, upper - 1, lower)) + elif search_space[key]['_type'] == 'uniform': + low, high = search_space[key]['_value'] + if low == high: + dump_categorical(pcs_fd, key, [low]) + else: + pcs_fd.write('%s real [%s, %s] [%s]\n' % (key, low, high, low)) + elif search_space[key]['_type'] == 'loguniform': + # use np.round here to ensure that the rounded default value is in the range, + # which will be rounded in configure_space package + low, high = list(np.round(np.log(search_space[key]['_value']), 10)) + if low == high: + dump_categorical(pcs_fd, key, [search_space[key]['_value'][0]]) + else: + pcs_fd.write('%s real [%s, %s] [%s]\n' % (key, low, high, low)) + elif search_space[key]['_type'] == 'quniform': + low, high, q = search_space[key]['_value'][0:3] + vals = np.clip(np.arange(np.round(low / q), np.round(high / q) + 1) * q, low, high).tolist() + pcs_fd.write('%s ordinal {%s} [%s]\n' % ( + key, + json.dumps(vals)[1:-1], + json.dumps(vals[0]))) + else: + raise RuntimeError('unsupported _type %s' % search_space[key]['_type']) + except: + raise RuntimeError('_type or _value error.') + else: + raise RuntimeError('incorrect search space.') + return categorical_dict + return None + + +def generate_scenario(ss_content): + """ + Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and + can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file. + Reference: https://automl.github.io/SMAC3/stable/options.html + The format of the scenario file is one option per line: + OPTION1 = VALUE1 + OPTION2 = VALUE2 + ... + Parameters + ---------- + abort_on_first_run_crash: bool + If true, SMAC will abort if the first run of the target algorithm crashes. Default: True, + because trials reported to nni tuner would always in success state + algo: function + Specifies the target algorithm call that SMAC will optimize. Interpreted as a bash-command. + Not required by tuner, but required by nni's training service for running trials + always_race_default: + Race new incumbents always against default configuration + cost_for_crash: + Defines the cost-value for crashed runs on scenarios with quality as run-obj. Default: 2147483647.0. + Trials reported to nni tuner would always in success state + cutoff_time: + Maximum runtime, after which the target algorithm is cancelled. `Required if *run_obj* is runtime` + deterministic: bool + If true, the optimization process will be repeatable. + execdir: + Specifies the path to the execution-directory. Default: . + Trials are executed by nni's training service + feature_file: + Specifies the file with the instance-features. + No features specified or feature file is not supported + initial_incumbent: + DEFAULT is the default from the PCS. Default: DEFAULT. Must be from: [‘DEFAULT’, ‘RANDOM’]. + input_psmac_dirs: + For parallel SMAC, multiple output-directories are used. + Parallelism is supported by nni + instance_file: + Specifies the file with the training-instances. Not supported + intensification_percentage: + The fraction of time to be used on intensification (versus choice of next Configurations). Default: 0.5. + Not supported, trials are controlled by nni's training service and kill be assessor + maxR: int + Maximum number of calls per configuration. Default: 2000. + memory_limit: + Maximum available memory the target algorithm can occupy before being cancelled. + minR: int + Minimum number of calls per configuration. Default: 1. + output_dir: + Specifies the output-directory for all emerging files, such as logging and results. + Default: smac3-output_2018-01-22_15:05:56_807070. + overall_obj: + PARX, where X is an integer defining the penalty imposed on timeouts (i.e. runtimes that exceed the cutoff-time). + Timeout is not supported + paramfile: + Specifies the path to the PCS-file. + run_obj: + Defines what metric to optimize. When optimizing runtime, cutoff_time is required as well. + Must be from: [‘runtime’, ‘quality’]. + runcount_limit: int + Maximum number of algorithm-calls during optimization. Default: inf. + Use default because this is controlled by nni + shared_model: + Whether to run SMAC in parallel mode. Parallelism is supported by nni + test_instance_file: + Specifies the file with the test-instances. Instance is not supported + tuner-timeout: + Maximum amount of CPU-time used for optimization. Not supported + wallclock_limit: int + Maximum amount of wallclock-time used for optimization. Default: inf. + Use default because this is controlled by nni + + Returns + ------- + Scenario: + The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and can be constructed + either by providing an actual scenario-object, or by specifing the options in a scenario file + """ + with open('scenario.txt', 'w') as sce_fd: + sce_fd.write('deterministic = 0\n') + # sce_fd.write('output_dir = \n') + sce_fd.write('paramfile = param_config_space.pcs\n') + sce_fd.write('run_obj = quality\n') + + return generate_pcs(ss_content) + + +if __name__ == '__main__': + generate_scenario('search_space.json') diff --git a/nni/algorithms/hpo/smac_tuner/smac_tuner.py b/nni/algorithms/hpo/smac_tuner/smac_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..68f1d07ff5bd6438156f4c05b1114668498cbfd3 --- /dev/null +++ b/nni/algorithms/hpo/smac_tuner/smac_tuner.py @@ -0,0 +1,348 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +smac_tuner.py +""" + +import logging +import sys + +import numpy as np +from schema import Schema, Optional + +from smac.facade.epils_facade import EPILS +from smac.facade.roar_facade import ROAR +from smac.facade.smac_facade import SMAC +from smac.scenario.scenario import Scenario +from smac.utils.io.cmd_reader import CMDReader + +from ConfigSpaceNNI import Configuration + +import nni +from nni import ClassArgsValidator +from nni.common.hpo_utils import validate_search_space +from nni.tuner import Tuner +from nni.utils import OptimizeMode, extract_scalar_reward + +from .convert_ss_to_scenario import generate_scenario + +logger = logging.getLogger('smac_AutoML') + +class SMACClassArgsValidator(ClassArgsValidator): + def validate_class_args(self, **kwargs): + Schema({ + 'optimize_mode': self.choices('optimize_mode', 'maximize', 'minimize'), + Optional('config_dedup'): bool + }).validate(kwargs) + +class SMACTuner(Tuner): + """ + This is a wrapper of [SMAC](https://github.com/automl/SMAC3) following NNI tuner interface. + It only supports ``SMAC`` mode, and does not support the multiple instances of SMAC3 (i.e., + the same configuration is run multiple times). + """ + def __init__(self, optimize_mode="maximize", config_dedup=False): + """ + Parameters + ---------- + optimize_mode : str + Optimize mode, 'maximize' or 'minimize', by default 'maximize' + config_dedup : bool + If True, the tuner will not generate a configuration that has been already generated. + If False, a configuration may be generated twice, but it is rare for relatively large search space. + """ + self.logger = logger + self.optimize_mode = OptimizeMode(optimize_mode) + self.total_data = {} + self.optimizer = None + self.smbo_solver = None + self.first_one = True + self.update_ss_done = False + self.loguniform_key = set() + self.categorical_dict = {} + self.cs = None + self.dedup = config_dedup + + def _main_cli(self): + """ + Main function of SMAC for CLI interface. Some initializations of the wrapped SMAC are done + in this function. + + Returns + ------- + obj + The object of the SMAC optimizer + """ + self.logger.info("SMAC call: %s", " ".join(sys.argv)) + + cmd_reader = CMDReader() + args, _ = cmd_reader.read_cmd() + + root_logger = logging.getLogger() + root_logger.setLevel(args.verbose_level) + logger_handler = logging.StreamHandler(stream=sys.stdout) + if root_logger.level >= logging.INFO: + formatter = logging.Formatter("%(levelname)s:\t%(message)s") + else: + formatter = logging.Formatter( + "%(asctime)s:%(levelname)s:%(name)s:%(message)s", + "%Y-%m-%d %H:%M:%S") + logger_handler.setFormatter(formatter) + root_logger.addHandler(logger_handler) + # remove default handler + root_logger.removeHandler(root_logger.handlers[0]) + + # Create defaults + rh = None + initial_configs = None + stats = None + incumbent = None + + # Create scenario-object + scen = Scenario(args.scenario_file, []) + self.cs = scen.cs + + if args.mode == "SMAC": + optimizer = SMAC( + scenario=scen, + rng=np.random.RandomState(args.seed), + runhistory=rh, + initial_configurations=initial_configs, + stats=stats, + restore_incumbent=incumbent, + run_id=args.seed) + elif args.mode == "ROAR": + optimizer = ROAR( + scenario=scen, + rng=np.random.RandomState(args.seed), + runhistory=rh, + initial_configurations=initial_configs, + run_id=args.seed) + elif args.mode == "EPILS": + optimizer = EPILS( + scenario=scen, + rng=np.random.RandomState(args.seed), + runhistory=rh, + initial_configurations=initial_configs, + run_id=args.seed) + else: + optimizer = None + + return optimizer + + def update_search_space(self, search_space): + """ + Convert search_space to the format that ``SMAC3`` could recognize, thus, not all the search space types + are supported. In this function, we also do the initialization of `SMAC3`, i.e., calling ``self._main_cli``. + + NOTE: updating search space during experiment running is not supported. + + Parameters + ---------- + search_space : dict + The format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html). + """ + self.logger.info('update search space in SMAC.') + validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform', 'loguniform']) + if not self.update_ss_done: + self.categorical_dict = generate_scenario(search_space) + if self.categorical_dict is None: + raise RuntimeError('categorical dict is not correctly returned after parsing search space.') + # TODO: this is ugly, we put all the initialization work in this method, because initialization relies + # on search space, also because update_search_space is called at the beginning. + self.optimizer = self._main_cli() + self.smbo_solver = self.optimizer.solver + self.loguniform_key = {key for key in search_space.keys() if search_space[key]['_type'] == 'loguniform'} + self.update_ss_done = True + else: + self.logger.warning('update search space is not supported.') + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Receive a trial's final performance result reported through :func:``nni.report_final_result`` by the trial. + GridSearchTuner does not need trial's results. + + Parameters + ---------- + parameter_id : int + Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`. + parameters : dict + Hyper-parameters generated by :meth:`generate_parameters`. + value : dict + Result from trial (the return value of :func:`nni.report_final_result`). + + Raises + ------ + RuntimeError + Received parameter id not in ``self.total_data`` + """ + reward = extract_scalar_reward(value) + if self.optimize_mode is OptimizeMode.Maximize: + reward = -reward + + if parameter_id not in self.total_data: + raise RuntimeError('Received parameter_id not in total_data.') + if self.first_one: + self.smbo_solver.nni_smac_receive_first_run(self.total_data[parameter_id], reward) + self.first_one = False + else: + self.smbo_solver.nni_smac_receive_runs(self.total_data[parameter_id], reward) + + def param_postprocess(self, challenger_dict): + """ + Postprocessing for a set of hyperparameters includes: + 1. Convert the values of type ``loguniform`` back to their initial range. + 2. Convert ``categorical``: categorical values in search space are changed to list of numbers before, + those original values will be changed back in this function. + + Parameters + ---------- + challenger_dict : dict + challenger dict + + Returns + ------- + dict + dict which stores copy of challengers + """ + converted_dict = {} + for key, value in challenger_dict.items(): + # convert to loguniform + if key in self.loguniform_key: + converted_dict[key] = np.exp(challenger_dict[key]) + # convert categorical back to original value + elif key in self.categorical_dict: + idx = challenger_dict[key] + converted_dict[key] = self.categorical_dict[key][idx] + else: + converted_dict[key] = value + return converted_dict + + def generate_parameters(self, parameter_id, **kwargs): + """ + Generate one instance of hyperparameters (i.e., one configuration). + Get one from SMAC3's ``challengers``. + + Parameters + ---------- + parameter_id : int + Unique identifier for requested hyper-parameters. This will later be used in :meth:`receive_trial_result`. + **kwargs + Not used + + Returns + ------- + dict + One newly generated configuration + """ + if self.first_one: + init_challenger = self.smbo_solver.nni_smac_start() + self.total_data[parameter_id] = init_challenger + return self.param_postprocess(init_challenger.get_dictionary()) + else: + challengers = self.smbo_solver.nni_smac_request_challengers() + challengers_empty = True + for challenger in challengers: + challengers_empty = False + if self.dedup: + match = [v for k, v in self.total_data.items() \ + if v.get_dictionary() == challenger.get_dictionary()] + if match: + continue + self.total_data[parameter_id] = challenger + return self.param_postprocess(challenger.get_dictionary()) + assert challengers_empty is False, 'The case that challengers is empty is not handled.' + self.logger.info('In generate_parameters: No more new parameters.') + raise nni.NoMoreTrialError('No more new parameters.') + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """ + Generate mutiple instances of hyperparameters. If it is a first request, + retrieve the instances from initial challengers. While if it is not, request + new challengers and retrieve instances from the requested challengers. + + Parameters + ---------- + parameter_id_list: list of int + Unique identifiers for each set of requested hyper-parameters. + These will later be used in :meth:`receive_trial_result`. + **kwargs + Not used + + Returns + ------- + list + a list of newly generated configurations + """ + if self.first_one: + params = [] + for one_id in parameter_id_list: + init_challenger = self.smbo_solver.nni_smac_start() + self.total_data[one_id] = init_challenger + params.append(self.param_postprocess(init_challenger.get_dictionary())) + else: + challengers = self.smbo_solver.nni_smac_request_challengers() + cnt = 0 + params = [] + for challenger in challengers: + if cnt >= len(parameter_id_list): + break + if self.dedup: + match = [v for k, v in self.total_data.items() \ + if v.get_dictionary() == challenger.get_dictionary()] + if match: + continue + self.total_data[parameter_id_list[cnt]] = challenger + params.append(self.param_postprocess(challenger.get_dictionary())) + cnt += 1 + if self.dedup and not params: + self.logger.info('In generate_multiple_parameters: No more new parameters.') + return params + + def import_data(self, data): + """ + Import additional data for tuning. + + Parameters + ---------- + data : list of dict + Each of which has at least two keys, ``parameter`` and ``value``. + """ + _completed_num = 0 + for trial_info in data: + self.logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data)) + # simply validate data format + assert "parameter" in trial_info + _params = trial_info["parameter"] + assert "value" in trial_info + _value = trial_info['value'] + if not _value: + self.logger.info("Useless trial data, value is %s, skip this trial data.", _value) + continue + _value = extract_scalar_reward(_value) + # convert the keys in loguniform and categorical types + valid_entry = True + for key, value in _params.items(): + if key in self.loguniform_key: + _params[key] = np.log(value) + elif key in self.categorical_dict: + if value in self.categorical_dict[key]: + _params[key] = self.categorical_dict[key].index(value) + else: + self.logger.info("The value %s of key %s is not in search space.", str(value), key) + valid_entry = False + break + if not valid_entry: + continue + # start import this data entry + _completed_num += 1 + config = Configuration(self.cs, values=_params) + if self.optimize_mode is OptimizeMode.Maximize: + _value = -_value + if self.first_one: + self.smbo_solver.nni_smac_receive_first_run(config, _value) + self.first_one = False + else: + self.smbo_solver.nni_smac_receive_runs(config, _value) + self.logger.info("Successfully import data to smac tuner, total data: %d, imported data: %d.", len(data), _completed_num) diff --git a/nni/algorithms/hpo/tpe_tuner.py b/nni/algorithms/hpo/tpe_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..c233e97b230ce60e5de3bf0371f8e3b2479b2b1d --- /dev/null +++ b/nni/algorithms/hpo/tpe_tuner.py @@ -0,0 +1,392 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Tree-structured Parzen Estimator (TPE) tuner for hyper-parameter optimization. + +Paper: https://proceedings.neurips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf +Official code: https://github.com/hyperopt/hyperopt/blob/master/hyperopt/tpe.py + +This is a slightly modified re-implementation of the algorithm. +""" + +__all__ = ['TpeTuner', 'TpeArguments', 'suggest', 'suggest_parameter'] + +from collections import defaultdict +import logging +import math +from typing import NamedTuple, Optional, Union + +import numpy as np +from scipy.special import erf # pylint: disable=no-name-in-module + +from nni.tuner import Tuner +from nni.common.hpo_utils import OptimizeMode, format_search_space, deformat_parameters, format_parameters +from nni.utils import extract_scalar_reward +from . import random_tuner + +_logger = logging.getLogger('nni.tuner.tpe') + +## Public API part ## + +class TpeArguments(NamedTuple): + """ + These are the hyper-parameters of TPE algorithm itself. + To avoid confusing with trials' hyper-parameters, they are called "arguments" in this code. + + Parameters + ========== + constant_liar_type: 'best' | 'worst' | 'mean' | None (default: 'best') + TPE algorithm itself does not support parallel tuning. + This parameter specifies how to optimize for trial_concurrency > 1. + + None (or "null" in YAML) means do not optimize. This is the default behavior in legacy version. + + How each liar works is explained in paper's section 6.1. + In general "best" suit for small trial number and "worst" suit for large trial number. + + n_startup_jobs: int (default: 20) + The first N hyper-parameters are generated fully randomly for warming up. + If the search space is large, you can increase this value. + Or if max_trial_number is small, you may want to decrease it. + + n_ei_candidates: int (default: 24) + For each iteration TPE samples EI for N sets of parameters and choose the best one. (loosely speaking) + + linear_forgetting: int (default: 25) + TPE will lower the weights of old trials. + This controls how many iterations it takes for a trial to start decay. + + prior_weight: float (default: 1.0) + TPE treats user provided search space as prior. + When generating new trials, it also incorporates the prior in trial history by transforming the search space to + one trial configuration (i.e., each parameter of this configuration chooses the mean of its candidate range). + Here, prior_weight determines the weight of this trial configuration in the history trial configurations. + + With prior weight 1.0, the search space is treated as one good trial. + For example, "normal(0, 1)" effectly equals to a trial with x = 0 which has yielded good result. + + gamma: float (default: 0.25) + Controls how many trials are considered "good". + The number is calculated as "min(gamma * sqrt(N), linear_forgetting)". + """ + constant_liar_type: Optional[str] = 'best' + n_startup_jobs: int = 20 + n_ei_candidates: int = 24 + linear_forgetting: int = 25 + prior_weight: float = 1.0 + gamma: float = 0.25 + +class TpeTuner(Tuner): + """ + Parameters + ========== + optimze_mode: 'minimize' | 'maximize' (default: 'minimize') + Whether optimize to minimize or maximize trial result. + seed: int | None + The random seed. + tpe_args: dict[string, Any] | None + Advanced users can use this to customize TPE tuner. + See `TpeArguments` for details. + """ + + def __init__(self, optimize_mode='minimize', seed=None, tpe_args=None): + self.optimize_mode = OptimizeMode(optimize_mode) + self.args = TpeArguments(**(tpe_args or {})) + self.space = None + # concurrent generate_parameters() calls are likely to yield similar result, because they use same history + # the liar solves this problem by adding fake results to history + self.liar = create_liar(self.args.constant_liar_type) + + if seed is None: # explicitly generate a seed to make the experiment reproducible + seed = np.random.default_rng().integers(2 ** 31) + self.rng = np.random.default_rng(seed) + _logger.info(f'Using random seed {seed}') + + self._params = {} # parameter_id -> parameters (in internal format) + self._running_params = {} # subset of above, that has been submitted but has not yet received loss + self._history = defaultdict(list) # parameter key -> list of Record + + def update_search_space(self, space): + self.space = format_search_space(space) + + def generate_parameters(self, parameter_id, **kwargs): + if self.liar and self._running_params: + # give a fake loss for each concurrently running paramater set + history = {key: records.copy() for key, records in self._history.items()} # copy history + lie = self.liar.lie() + for param in self._running_params.values(): + for key, value in param.items(): + history[key].append(Record(value, lie)) + else: + history = self._history + + params = suggest(self.args, self.rng, self.space, history) + + self._params[parameter_id] = params + self._running_params[parameter_id] = params + return deformat_parameters(params, self.space) + + def receive_trial_result(self, parameter_id, _parameters, value, **kwargs): + if self.optimize_mode is OptimizeMode.Minimize: + loss = extract_scalar_reward(value) + else: + loss = -extract_scalar_reward(value) + if self.liar: + self.liar.update(loss) + params = self._running_params.pop(parameter_id) + for key, value in params.items(): + self._history[key].append(Record(value, loss)) + + def trial_end(self, parameter_id, _success, **kwargs): + self._running_params.pop(parameter_id, None) + + def import_data(self, data): # for resuming experiment + for trial in data: + param = format_parameters(trial['parameter'], self.space) + loss = trial['value'] + if self.optimize_mode is OptimizeMode.Maximize: + loss = -trial['value'] + for key, value in param.items(): + self._history[key].append(Record(value, loss)) + _logger.info(f'Replayed {len(data)} trials') + +def suggest(args, rng, space, history): + params = {} + for key, spec in space.items(): + if spec.is_activated_in(params): # nested search space is chosen + params[key] = suggest_parameter(args, rng, spec, history[key]) + return params + +def suggest_parameter(args, rng, spec, parameter_history): + if len(parameter_history) < args.n_startup_jobs: # not enough history, still warming up + return random_tuner.suggest_parameter(rng, spec) + + if spec.categorical: + return suggest_categorical(args, rng, parameter_history, spec.size) + + if spec.normal_distributed: + mu = spec.mu + sigma = spec.sigma + clip = None + else: + # TPE does not support uniform distribution natively + # they are converted to normal((low + high) / 2, high - low) + mu = (spec.low + spec.high) * 0.5 + sigma = spec.high - spec.low + clip = (spec.low, spec.high) + + return suggest_normal(args, rng, parameter_history, mu, sigma, clip) + +## Public API part end ## + +## Utilities part ## + +class Record(NamedTuple): + param: Union[int, float] + loss: float + +class BestLiar: # assume running parameters have best result, it accelerates "converging" + def __init__(self): + self._best = None + + def update(self, loss): + if self._best is None or loss < self._best: + self._best = loss + + def lie(self): + # when there is no real result, all of history is the same lie, so the value does not matter + # in this case, return 0 instead of infinity to prevent potential calculation error + return 0.0 if self._best is None else self._best + +class WorstLiar: # assume running parameters have worst result, it helps to jump out of local minimum + def __init__(self): + self._worst = None + + def update(self, loss): + if self._worst is None or loss > self._worst: + self._worst = loss + + def lie(self): + return 0.0 if self._worst is None else self._worst + +class MeanLiar: # assume running parameters have average result + def __init__(self): + self._sum = 0.0 + self._n = 0 + + def update(self, loss): + self._sum += loss + self._n += 1 + + def lie(self): + return 0.0 if self._n == 0 else (self._sum / self._n) + +def create_liar(liar_type): + if liar_type is None or liar_type.lower == 'none': + return None + liar_classes = { + 'best': BestLiar, + 'worst': WorstLiar, + 'mean': MeanLiar, + } + return liar_classes[liar_type.lower()]() + +## Utilities part end ## + +## Algorithm part ## + +# the algorithm is implemented in process-oriented style because I find it's easier to be understood in this way, +# you know exactly what data each step is processing. + +def suggest_categorical(args, rng, param_history, size): + """ + Suggest a categorical ("choice" or "randint") parameter. + """ + below, above = split_history(args, param_history) # split history into good ones and bad ones + + weights = linear_forgetting_weights(args, len(below)) + counts = np.bincount(below, weights, size) + p = (counts + args.prior_weight) / sum(counts + args.prior_weight) # calculate weight of good choices + samples = rng.choice(size, args.n_ei_candidates, p=p) # sample N EIs using the weights + below_llik = np.log(p[samples]) # the probablity of these samples to be good (llik means log-likelyhood) + + weights = linear_forgetting_weights(args, len(above)) + counts = np.bincount(above, weights, size) + p = (counts + args.prior_weight) / sum(counts + args.prior_weight) # calculate weight of bad choices + above_llik = np.log(p[samples]) # the probablity of above samples to be bad + + return samples[np.argmax(below_llik - above_llik)] # which one has best probability to be good + +def suggest_normal(args, rng, param_history, prior_mu, prior_sigma, clip): + """ + Suggest a normal distributed parameter. + Uniform has been converted to normal in the caller function; log and q will be handled by "deformat_parameters". + """ + below, above = split_history(args, param_history) # split history into good ones and bad ones + + weights, mus, sigmas = adaptive_parzen_normal(args, below, prior_mu, prior_sigma) # calculate weight of good segments + samples = gmm1(args, rng, weights, mus, sigmas, clip) # sample N EIs using the weights + below_llik = gmm1_lpdf(args, samples, weights, mus, sigmas, clip) # the probability of these samples to be good + + weights, mus, sigmas = adaptive_parzen_normal(args, above, prior_mu, prior_sigma) # calculate weight of bad segments + above_llik = gmm1_lpdf(args, samples, weights, mus, sigmas, clip) # the probability of above samples to be bad + + return samples[np.argmax(below_llik - above_llik)] # which one has best probability to be good + +def split_history(args, param_history): + """ + Divide trials into good ones (below) and bad ones (above). + """ + n_below = math.ceil(args.gamma * math.sqrt(len(param_history))) + n_below = min(n_below, args.linear_forgetting) + order = sorted(range(len(param_history)), key=(lambda i: param_history[i].loss)) # argsort by loss + below = [param_history[i].param for i in order[:n_below]] + above = [param_history[i].param for i in order[n_below:]] + return np.asarray(below), np.asarray(above) + +def linear_forgetting_weights(args, n): + """ + Calculate decayed weights of N trials. + """ + lf = args.linear_forgetting + if n < lf: + return np.ones(n) + else: + ramp = np.linspace(1.0 / n, 1.0, n - lf) + flat = np.ones(lf) + return np.concatenate([ramp, flat]) + +def adaptive_parzen_normal(args, history_mus, prior_mu, prior_sigma): + """ + The "Adaptive Parzen Estimator" described in paper section 4.2, for normal distribution. + + Because TPE internally only supports categorical and normal distributed space (domain), + this function is used for everything other than "choice" and "randint". + + Parameters + ========== + args: TpeArguments + Algorithm arguments. + history_mus: 1-d array of float + Parameter values evaluated in history. + These are the "observations" in paper section 4.2. ("placing density in the vicinity of K observations") + prior_mu: float + µ value of normal search space. + piror_sigma: float + σ value of normal search space. + + Returns + ======= + Tuple of three 1-d float arrays: (weight, µ, σ). + + The tuple represents N+1 "vicinity of observations" and each one's weight, + calculated from "N" history and "1" user provided prior. + + The result is sorted by µ. + """ + mus = np.append(history_mus, prior_mu) + order = np.argsort(mus) + mus = mus[order] + prior_index = np.searchsorted(mus, prior_mu) + + if len(mus) == 1: + sigmas = np.asarray([prior_sigma]) + elif len(mus) == 2: + sigmas = np.asarray([prior_sigma * 0.5, prior_sigma * 0.5]) + sigmas[prior_index] = prior_sigma + else: + l_delta = mus[1:-1] - mus[:-2] + r_delta = mus[2:] - mus[1:-1] + sigmas_mid = np.maximum(l_delta, r_delta) + sigmas = np.concatenate([[mus[1] - mus[0]], sigmas_mid, [mus[-1] - mus[-2]]]) + sigmas[prior_index] = prior_sigma + # "magic formula" in official implementation + n = min(100, len(mus) + 1) + sigmas = np.clip(sigmas, prior_sigma / n, prior_sigma) + + weights = np.append(linear_forgetting_weights(args, len(mus)), args.prior_weight) + weights = weights[order] + + return weights / np.sum(weights), mus, sigmas + +def gmm1(args, rng, weights, mus, sigmas, clip=None): + """ + Gaussian Mixture Model 1D. + """ + ret = np.asarray([]) + while len(ret) < args.n_ei_candidates: + n = args.n_ei_candidates - len(ret) + active = np.argmax(rng.multinomial(1, weights, n), axis=1) + samples = rng.normal(mus[active], sigmas[active]) + if clip: + samples = samples[(clip[0] <= samples) & (samples <= clip[1])] + ret = np.concatenate([ret, samples]) + return ret + +def gmm1_lpdf(_args, samples, weights, mus, sigmas, clip=None): + """ + Gaussian Mixture Model 1D's log probability distribution function. + """ + eps = 1e-12 + + if clip: + normal_cdf_low = erf((clip[0] - mus) / np.maximum(np.sqrt(2) * sigmas, eps)) * 0.5 + 0.5 + normal_cdf_high = erf((clip[1] - mus) / np.maximum(np.sqrt(2) * sigmas, eps)) * 0.5 + 0.5 + p_accept = np.sum(weights * (normal_cdf_high - normal_cdf_low)) + else: + p_accept = 1 + + # normal lpdf + dist = samples.reshape(-1, 1) - mus + mahal = (dist / np.maximum(sigmas, eps)) ** 2 + z = np.sqrt(2 * np.pi) * sigmas + coef = weights / z / p_accept + normal_lpdf = -0.5 * mahal + np.log(coef) + + # log sum rows + m = normal_lpdf.max(axis=1) + e = np.exp(normal_lpdf - m.reshape(-1, 1)) + return np.log(e.sum(axis=1)) + m + +## Algorithm part end ## diff --git a/nni/algorithms/nas/__init__.py b/nni/algorithms/nas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/nas/pytorch/__init__.py b/nni/algorithms/nas/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/nas/pytorch/cdarts/__init__.py b/nni/algorithms/nas/pytorch/cdarts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ab34902e0ec5d4f7cbbf943f2dcaf03d0a7bc97d --- /dev/null +++ b/nni/algorithms/nas/pytorch/cdarts/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .mutator import RegularizedDartsMutator, RegularizedMutatorParallel, DartsDiscreteMutator +from .trainer import CdartsTrainer diff --git a/nni/algorithms/nas/pytorch/cdarts/mutator.py b/nni/algorithms/nas/pytorch/cdarts/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..a0bf79040eafb86c336467246521fb9767f60144 --- /dev/null +++ b/nni/algorithms/nas/pytorch/cdarts/mutator.py @@ -0,0 +1,143 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch + +from apex.parallel import DistributedDataParallel # pylint: disable=import-error +from nni.algorithms.nas.pytorch.darts import DartsMutator # pylint: disable=wrong-import-order +from nni.nas.pytorch.mutables import LayerChoice # pylint: disable=wrong-import-order +from nni.nas.pytorch.mutator import Mutator # pylint: disable=wrong-import-order + + +class RegularizedDartsMutator(DartsMutator): + """ + This is :class:`~nni.algorithms.nas.pytorch.darts.DartsMutator` basically, with two differences. + + 1. Choices can be cut (bypassed). This is done by ``cut_choices``. Cutted choices will not be used in + forward pass and thus consumes no memory. + + 2. Regularization on choices, to prevent the mutator from overfitting on some choices. + """ + + def reset(self): + """ + Warnings + -------- + Renamed :func:`~reset_with_loss` to return regularization loss on reset. + """ + raise ValueError("You should probably call `reset_with_loss`.") + + def cut_choices(self, cut_num=2): + """ + Cut the choices with the smallest weights. + ``cut_num`` should be the accumulative number of cutting, e.g., if first time cutting + is 2, the second time should be 4 to cut another two. + + Parameters + ---------- + cut_num : int + Number of choices to cut, so far. + + Warnings + -------- + Though the parameters are set to :math:`-\infty` to be bypassed, they will still receive gradient of 0, + which introduced ``nan`` problem when calling ``optimizer.step()``. To solve this issue, a simple way is to + reset nan to :math:`-\infty` each time after the parameters are updated. + """ + # `cut_choices` is implemented but not used in current implementation of CdartsTrainer + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + _, idx = torch.topk(-self.choices[mutable.key], cut_num) + with torch.no_grad(): + for i in idx: + self.choices[mutable.key][i] = -float("inf") + + def reset_with_loss(self): + """ + Resample and return loss. If loss is 0, to avoid device issue, it will return ``None``. + + Currently loss penalty are proportional to the L1-norm of parameters corresponding + to modules if their type name contains certain substrings. These substrings include: ``poolwithoutbn``, + ``identity``, ``dilconv``. + """ + self._cache, reg_loss = self.sample_search() + return reg_loss + + def sample_search(self): + result = super().sample_search() + loss = [] + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + def need_reg(choice): + return any(t in str(type(choice)).lower() for t in ["poolwithoutbn", "identity", "dilconv"]) + + for i, choice in enumerate(mutable.choices): + if need_reg(choice): + norm = torch.abs(self.choices[mutable.key][i]) + if norm < 1E10: + loss.append(norm) + if not loss: + return result, None + return result, sum(loss) + + def export(self, logger=None): + """ + Export an architecture with logger. Genotype will be printed with logger. + + Returns + ------- + dict + A mapping from mutable keys to decisions. + """ + result = self.sample_final() + if hasattr(self.model, "plot_genotype") and logger is not None: + genotypes = self.model.plot_genotype(result, logger) + return result, genotypes + + +class RegularizedMutatorParallel(DistributedDataParallel): + """ + Parallelize :class:`~RegularizedDartsMutator`. + + This makes :func:`~RegularizedDartsMutator.reset_with_loss` method parallelized, + also allowing :func:`~RegularizedDartsMutator.cut_choices` and :func:`~RegularizedDartsMutator.export` + to be easily accessible. + """ + def reset_with_loss(self): + """ + Parallelized :func:`~RegularizedDartsMutator.reset_with_loss`. + """ + result = self.module.reset_with_loss() + self.callback_queued = False + return result + + def cut_choices(self, *args, **kwargs): + """ + Parallelized :func:`~RegularizedDartsMutator.cut_choices`. + """ + self.module.cut_choices(*args, **kwargs) + + def export(self, logger): + """ + Parallelized :func:`~RegularizedDartsMutator.export`. + """ + return self.module.export(logger) + + +class DartsDiscreteMutator(Mutator): + """ + A mutator that applies the final sampling result of a parent mutator on another model to train. + + Parameters + ---------- + model : nn.Module + The model to apply the mutator. + parent_mutator : nni.nas.pytorch.mutator.Mutator + The mutator that provides ``sample_final`` method, that will be called to get the architecture. + """ + def __init__(self, model, parent_mutator): + super().__init__(model) + self.__dict__["parent_mutator"] = parent_mutator # avoid parameters to be included + + def sample_search(self): + return self.parent_mutator.sample_final() diff --git a/nni/algorithms/nas/pytorch/cdarts/trainer.py b/nni/algorithms/nas/pytorch/cdarts/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..1a5174216fc4ffdc224e9cd6bd1483322005fe01 --- /dev/null +++ b/nni/algorithms/nas/pytorch/cdarts/trainer.py @@ -0,0 +1,275 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F +import apex # pylint: disable=import-error +from apex.parallel import DistributedDataParallel # pylint: disable=import-error +from .mutator import RegularizedDartsMutator, RegularizedMutatorParallel, DartsDiscreteMutator # pylint: disable=wrong-import-order +from nni.nas.pytorch.utils import AverageMeterGroup # pylint: disable=wrong-import-order + +from .utils import CyclicIterator, TorchTensorEncoder, accuracy, reduce_metrics + +PHASE_SMALL = "small" +PHASE_LARGE = "large" + + +class InteractiveKLLoss(nn.Module): + def __init__(self, temperature): + super().__init__() + self.temperature = temperature + # self.kl_loss = nn.KLDivLoss(reduction = 'batchmean') + self.kl_loss = nn.KLDivLoss() + + def forward(self, student, teacher): + return self.kl_loss(F.log_softmax(student / self.temperature, dim=1), + F.softmax(teacher / self.temperature, dim=1)) + + +class CdartsTrainer(object): + """ + CDARTS trainer. + + Parameters + ---------- + model_small : nn.Module + PyTorch model to be trained. This is the search network of CDARTS. + model_large : nn.Module + PyTorch model to be trained. This is the evaluation network of CDARTS. + criterion : callable + Receives logits and ground truth label, return a loss tensor, e.g., ``nn.CrossEntropyLoss()``. + loaders : list of torch.utils.data.DataLoader + List of train data and valid data loaders, for training weights and architecture weights respectively. + samplers : list of torch.utils.data.Sampler + List of train data and valid data samplers. This can be PyTorch standard samplers if not distributed. + In distributed mode, sampler needs to have ``set_epoch`` method. Refer to data utils in CDARTS example for details. + logger : logging.Logger + The logger for logging. Will use nni logger by default (if logger is ``None``). + regular_coeff : float + The coefficient of regular loss. + regular_ratio : float + The ratio of regular loss. + warmup_epochs : int + The epochs to warmup the search network + fix_head : bool + ``True`` if fixing the paramters of auxiliary heads, else unfix the paramters of auxiliary heads. + epochs : int + Number of epochs planned for training. + steps_per_epoch : int + Steps of one epoch. + loss_alpha : float + The loss coefficient. + loss_T : float + The loss coefficient. + distributed : bool + ``True`` if using distributed training, else non-distributed training. + log_frequency : int + Step count per logging. + grad_clip : float + Gradient clipping for weights. + interactive_type : string + ``kl`` or ``smoothl1``. + output_path : string + Log storage path. + w_lr : float + Learning rate of the search network parameters. + w_momentum : float + Momentum of the search and the evaluation network. + w_weight_decay : float + The weight decay the search and the evaluation network parameters. + alpha_lr : float + Learning rate of the architecture parameters. + alpha_weight_decay : float + The weight decay the architecture parameters. + nasnet_lr : float + Learning rate of the evaluation network parameters. + local_rank : int + The number of thread. + share_module : bool + ``True`` if sharing the stem and auxiliary heads, else not sharing these modules. + """ + def __init__(self, model_small, model_large, criterion, loaders, samplers, logger=None, + regular_coeff=5, regular_ratio=0.2, warmup_epochs=2, fix_head=True, + epochs=32, steps_per_epoch=None, loss_alpha=2, loss_T=2, distributed=True, + log_frequency=10, grad_clip=5.0, interactive_type='kl', output_path='./outputs', + w_lr=0.2, w_momentum=0.9, w_weight_decay=3e-4, alpha_lr=0.2, alpha_weight_decay=1e-4, + nasnet_lr=0.2, local_rank=0, share_module=True): + if logger is None: + logger = logging.getLogger(__name__) + train_loader, valid_loader = loaders + train_sampler, valid_sampler = samplers + self.train_loader = CyclicIterator(train_loader, train_sampler, distributed) + self.valid_loader = CyclicIterator(valid_loader, valid_sampler, distributed) + + self.regular_coeff = regular_coeff + self.regular_ratio = regular_ratio + self.warmup_epochs = warmup_epochs + self.fix_head = fix_head + self.epochs = epochs + self.steps_per_epoch = steps_per_epoch + if self.steps_per_epoch is None: + self.steps_per_epoch = min(len(self.train_loader), len(self.valid_loader)) + self.loss_alpha = loss_alpha + self.grad_clip = grad_clip + if interactive_type == "kl": + self.interactive_loss = InteractiveKLLoss(loss_T) + elif interactive_type == "smoothl1": + self.interactive_loss = nn.SmoothL1Loss() + self.loss_T = loss_T + self.distributed = distributed + self.log_frequency = log_frequency + self.main_proc = not distributed or local_rank == 0 + + self.logger = logger + self.checkpoint_dir = output_path + if self.main_proc: + os.makedirs(self.checkpoint_dir, exist_ok=True) + if distributed: + torch.distributed.barrier() + + self.model_small = model_small + self.model_large = model_large + if self.fix_head: + for param in self.model_small.aux_head.parameters(): + param.requires_grad = False + for param in self.model_large.aux_head.parameters(): + param.requires_grad = False + + self.mutator_small = RegularizedDartsMutator(self.model_small).cuda() + self.mutator_large = DartsDiscreteMutator(self.model_large, self.mutator_small).cuda() + self.criterion = criterion + + self.optimizer_small = torch.optim.SGD(self.model_small.parameters(), w_lr, + momentum=w_momentum, weight_decay=w_weight_decay) + self.optimizer_large = torch.optim.SGD(self.model_large.parameters(), nasnet_lr, + momentum=w_momentum, weight_decay=w_weight_decay) + self.optimizer_alpha = torch.optim.Adam(self.mutator_small.parameters(), alpha_lr, + betas=(0.5, 0.999), weight_decay=alpha_weight_decay) + + if distributed: + apex.parallel.convert_syncbn_model(self.model_small) + apex.parallel.convert_syncbn_model(self.model_large) + self.model_small = DistributedDataParallel(self.model_small, delay_allreduce=True) + self.model_large = DistributedDataParallel(self.model_large, delay_allreduce=True) + self.mutator_small = RegularizedMutatorParallel(self.mutator_small, delay_allreduce=True) + if share_module: + self.model_small.callback_queued = True + self.model_large.callback_queued = True + # mutator large never gets optimized, so do not need parallelized + + def _warmup(self, phase, epoch): + assert phase in [PHASE_SMALL, PHASE_LARGE] + if phase == PHASE_SMALL: + model, optimizer = self.model_small, self.optimizer_small + elif phase == PHASE_LARGE: + model, optimizer = self.model_large, self.optimizer_large + model.train() + meters = AverageMeterGroup() + for step in range(self.steps_per_epoch): + x, y = next(self.train_loader) + x, y = x.cuda(), y.cuda() + + optimizer.zero_grad() + logits_main, _ = model(x) + loss = self.criterion(logits_main, y) + loss.backward() + + self._clip_grad_norm(model) + optimizer.step() + prec1, prec5 = accuracy(logits_main, y, topk=(1, 5)) + metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} + metrics = reduce_metrics(metrics, self.distributed) + meters.update(metrics) + if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch): + self.logger.info("Epoch [%d/%d] Step [%d/%d] (%s) %s", epoch + 1, self.epochs, + step + 1, self.steps_per_epoch, phase, meters) + + def _clip_grad_norm(self, model): + if isinstance(model, DistributedDataParallel): + nn.utils.clip_grad_norm_(model.module.parameters(), self.grad_clip) + else: + nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip) + + def _reset_nan(self, parameters): + with torch.no_grad(): + for param in parameters: + for i, p in enumerate(param): + if p != p: # equivalent to `isnan(p)` + param[i] = float("-inf") + + def _joint_train(self, epoch): + self.model_large.train() + self.model_small.train() + meters = AverageMeterGroup() + for step in range(self.steps_per_epoch): + trn_x, trn_y = next(self.train_loader) + val_x, val_y = next(self.valid_loader) + trn_x, trn_y = trn_x.cuda(), trn_y.cuda() + val_x, val_y = val_x.cuda(), val_y.cuda() + + # step 1. optimize architecture + self.optimizer_alpha.zero_grad() + self.optimizer_large.zero_grad() + reg_decay = max(self.regular_coeff * (1 - float(epoch - self.warmup_epochs) / ( + (self.epochs - self.warmup_epochs) * self.regular_ratio)), 0) + loss_regular = self.mutator_small.reset_with_loss() + if loss_regular: + loss_regular *= reg_decay + logits_search, emsemble_logits_search = self.model_small(val_x) + logits_main, emsemble_logits_main = self.model_large(val_x) + loss_cls = (self.criterion(logits_search, val_y) + self.criterion(logits_main, val_y)) / self.loss_alpha + loss_interactive = self.interactive_loss(emsemble_logits_search, emsemble_logits_main) * (self.loss_T ** 2) * self.loss_alpha + loss = loss_cls + loss_interactive + loss_regular + loss.backward() + self._clip_grad_norm(self.model_large) + self.optimizer_large.step() + self.optimizer_alpha.step() + # NOTE: need to call here `self._reset_nan(self.mutator_small.parameters())` if `cut_choices` + + # step 2. optimize op weights + self.optimizer_small.zero_grad() + with torch.no_grad(): + # resample architecture since parameters have been changed + self.mutator_small.reset_with_loss() + logits_search_train, _ = self.model_small(trn_x) + loss_weight = self.criterion(logits_search_train, trn_y) + loss_weight.backward() + self._clip_grad_norm(self.model_small) + self.optimizer_small.step() + + metrics = {"loss_cls": loss_cls, "loss_interactive": loss_interactive, + "loss_regular": loss_regular, "loss_weight": loss_weight} + metrics = reduce_metrics(metrics, self.distributed) + meters.update(metrics) + + if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch): + self.logger.info("Epoch [%d/%d] Step [%d/%d] (joint) %s", epoch + 1, self.epochs, + step + 1, self.steps_per_epoch, meters) + + def train(self): + for epoch in range(self.epochs): + if epoch < self.warmup_epochs: + with torch.no_grad(): # otherwise grads will be retained on the architecture params + self.mutator_small.reset_with_loss() + self._warmup(PHASE_SMALL, epoch) + else: + with torch.no_grad(): + self.mutator_large.reset() + self._warmup(PHASE_LARGE, epoch) + self._joint_train(epoch) + + self.export(os.path.join(self.checkpoint_dir, "epoch_{:02d}.json".format(epoch)), + os.path.join(self.checkpoint_dir, "epoch_{:02d}.genotypes".format(epoch))) + + def export(self, file, genotype_file): + if self.main_proc: + mutator_export, genotypes = self.mutator_small.export(self.logger) + with open(file, "w") as f: + json.dump(mutator_export, f, indent=2, sort_keys=True, cls=TorchTensorEncoder) + with open(genotype_file, "w") as f: + f.write(str(genotypes)) diff --git a/nni/algorithms/nas/pytorch/cdarts/utils.py b/nni/algorithms/nas/pytorch/cdarts/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..96afa9425633811327c158f54b8c63be95775455 --- /dev/null +++ b/nni/algorithms/nas/pytorch/cdarts/utils.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import os + +import torch +import torch.distributed as dist + + +class CyclicIterator: + def __init__(self, loader, sampler, distributed): + self.loader = loader + self.sampler = sampler + self.epoch = 0 + self.distributed = distributed + self._next_epoch() + + def _next_epoch(self): + if self.distributed: + self.sampler.set_epoch(self.epoch) + self.iterator = iter(self.loader) + self.epoch += 1 + + def __len__(self): + return len(self.loader) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self.iterator) + except StopIteration: + self._next_epoch() + return next(self.iterator) + + +class TorchTensorEncoder(json.JSONEncoder): + def default(self, o): # pylint: disable=method-hidden + if isinstance(o, torch.Tensor): + return o.tolist() + return super().default(o) + + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0) + res.append(correct_k.mul_(1.0 / batch_size)) + return res + + +def reduce_tensor(tensor): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= float(os.environ["WORLD_SIZE"]) + return rt + + +def reduce_metrics(metrics, distributed=False): + if distributed: + return {k: reduce_tensor(v).item() for k, v in metrics.items()} + return {k: v.item() for k, v in metrics.items()} diff --git a/nni/algorithms/nas/pytorch/classic_nas/__init__.py b/nni/algorithms/nas/pytorch/classic_nas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec3f5a4894a0460d4a0582a0d6cd43af9bed77e2 --- /dev/null +++ b/nni/algorithms/nas/pytorch/classic_nas/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .mutator import get_and_apply_next_architecture diff --git a/nni/algorithms/nas/pytorch/classic_nas/mutator.py b/nni/algorithms/nas/pytorch/classic_nas/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..7254a8b0b4a34b5913dfc71d11f7364b86fc04b8 --- /dev/null +++ b/nni/algorithms/nas/pytorch/classic_nas/mutator.py @@ -0,0 +1,221 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os +import sys + +import torch + +import nni +from nni.runtime.env_vars import trial_env_vars +from nni.nas.pytorch.mutables import LayerChoice, InputChoice, MutableScope +from nni.nas.pytorch.mutator import Mutator + +logger = logging.getLogger(__name__) + +NNI_GEN_SEARCH_SPACE = "NNI_GEN_SEARCH_SPACE" +LAYER_CHOICE = "layer_choice" +INPUT_CHOICE = "input_choice" + + +def get_and_apply_next_architecture(model): + """ + Wrapper of :class:`~nni.nas.pytorch.classic_nas.mutator.ClassicMutator` to make it more meaningful, + similar to ``get_next_parameter`` for HPO. + + It will generate search space based on ``model``. + If env ``NNI_GEN_SEARCH_SPACE`` exists, this is in dry run mode for + generating search space for the experiment. + If not, there are still two mode, one is nni experiment mode where users + use ``nnictl`` to start an experiment. The other is standalone mode + where users directly run the trial command, this mode chooses the first + one(s) for each LayerChoice and InputChoice. + + Parameters + ---------- + model : nn.Module + User's model with search space (e.g., LayerChoice, InputChoice) embedded in it. + """ + ClassicMutator(model) + + +class ClassicMutator(Mutator): + """ + This mutator is to apply the architecture chosen from tuner. + It implements the forward function of LayerChoice and InputChoice, + to only activate the chosen ones. + + Parameters + ---------- + model : nn.Module + User's model with search space (e.g., LayerChoice, InputChoice) embedded in it. + """ + + def __init__(self, model): + super(ClassicMutator, self).__init__(model) + self._chosen_arch = {} + self._search_space = self._generate_search_space() + if NNI_GEN_SEARCH_SPACE in os.environ: + # dry run for only generating search space + self._dump_search_space(os.environ[NNI_GEN_SEARCH_SPACE]) + sys.exit(0) + + if trial_env_vars.NNI_PLATFORM is None: + logger.warning("This is in standalone mode, the chosen are the first one(s).") + self._chosen_arch = self._standalone_generate_chosen() + else: + # get chosen arch from tuner + self._chosen_arch = nni.get_next_parameter() + if self._chosen_arch is None: + if trial_env_vars.NNI_PLATFORM == "unittest": + # happens if NNI_PLATFORM is intentionally set, e.g., in UT + logger.warning("`NNI_PLATFORM` is set but `param` is None. Falling back to standalone mode.") + self._chosen_arch = self._standalone_generate_chosen() + else: + raise RuntimeError("Chosen architecture is None. This may be a platform error.") + self.reset() + + def _sample_layer_choice(self, mutable, idx, value, search_space_item): + """ + Convert layer choice to tensor representation. + + Parameters + ---------- + mutable : Mutable + idx : int + Number `idx` of list will be selected. + value : str + The verbose representation of the selected value. + search_space_item : list + The list for corresponding search space. + """ + # doesn't support multihot for layer choice yet + onehot_list = [False] * len(mutable) + assert 0 <= idx < len(mutable) and search_space_item[idx] == value, \ + "Index '{}' in search space '{}' is not '{}'".format(idx, search_space_item, value) + onehot_list[idx] = True + return torch.tensor(onehot_list, dtype=torch.bool) # pylint: disable=not-callable + + def _sample_input_choice(self, mutable, idx, value, search_space_item): + """ + Convert input choice to tensor representation. + + Parameters + ---------- + mutable : Mutable + idx : int + Number `idx` of list will be selected. + value : str + The verbose representation of the selected value. + search_space_item : list + The list for corresponding search space. + """ + candidate_repr = search_space_item["candidates"] + multihot_list = [False] * mutable.n_candidates + for i, v in zip(idx, value): + assert 0 <= i < mutable.n_candidates and candidate_repr[i] == v, \ + "Index '{}' in search space '{}' is not '{}'".format(i, candidate_repr, v) + assert not multihot_list[i], "'{}' is selected twice in '{}', which is not allowed.".format(i, idx) + multihot_list[i] = True + return torch.tensor(multihot_list, dtype=torch.bool) # pylint: disable=not-callable + + def sample_search(self): + """ + See :meth:`sample_final`. + """ + return self.sample_final() + + def sample_final(self): + """ + Convert the chosen arch and apply it on model. + """ + assert set(self._chosen_arch.keys()) == set(self._search_space.keys()), \ + "Unmatched keys, expected keys '{}' from search space, found '{}'.".format(self._search_space.keys(), + self._chosen_arch.keys()) + result = dict() + for mutable in self.mutables: + if isinstance(mutable, (LayerChoice, InputChoice)): + assert mutable.key in self._chosen_arch, \ + "Expected '{}' in chosen arch, but not found.".format(mutable.key) + data = self._chosen_arch[mutable.key] + assert isinstance(data, dict) and "_value" in data and "_idx" in data, \ + "'{}' is not a valid choice.".format(data) + if isinstance(mutable, LayerChoice): + result[mutable.key] = self._sample_layer_choice(mutable, data["_idx"], data["_value"], + self._search_space[mutable.key]["_value"]) + elif isinstance(mutable, InputChoice): + result[mutable.key] = self._sample_input_choice(mutable, data["_idx"], data["_value"], + self._search_space[mutable.key]["_value"]) + elif isinstance(mutable, MutableScope): + logger.info("Mutable scope '%s' is skipped during parsing choices.", mutable.key) + else: + raise TypeError("Unsupported mutable type: '%s'." % type(mutable)) + return result + + def _standalone_generate_chosen(self): + """ + Generate the chosen architecture for standalone mode, + i.e., choose the first one(s) for LayerChoice and InputChoice. + :: + { key_name: {"_value": "conv1", + "_idx": 0} } + { key_name: {"_value": ["in1"], + "_idx": [0]} } + Returns + ------- + dict + the chosen architecture + """ + chosen_arch = {} + for key, val in self._search_space.items(): + if val["_type"] == LAYER_CHOICE: + choices = val["_value"] + chosen_arch[key] = {"_value": choices[0], "_idx": 0} + elif val["_type"] == INPUT_CHOICE: + choices = val["_value"]["candidates"] + n_chosen = val["_value"]["n_chosen"] + if n_chosen is None: + n_chosen = len(choices) + chosen_arch[key] = {"_value": choices[:n_chosen], "_idx": list(range(n_chosen))} + else: + raise ValueError("Unknown key '%s' and value '%s'." % (key, val)) + return chosen_arch + + def _generate_search_space(self): + """ + Generate search space from mutables. + Here is the search space format: + :: + { key_name: {"_type": "layer_choice", + "_value": ["conv1", "conv2"]} } + { key_name: {"_type": "input_choice", + "_value": {"candidates": ["in1", "in2"], + "n_chosen": 1}} } + Returns + ------- + dict + the generated search space + """ + search_space = {} + for mutable in self.mutables: + # for now we only generate flattened search space + if isinstance(mutable, LayerChoice): + key = mutable.key + val = mutable.names + search_space[key] = {"_type": LAYER_CHOICE, "_value": val} + elif isinstance(mutable, InputChoice): + key = mutable.key + search_space[key] = {"_type": INPUT_CHOICE, + "_value": {"candidates": mutable.choose_from, + "n_chosen": mutable.n_chosen}} + elif isinstance(mutable, MutableScope): + logger.info("Mutable scope '%s' is skipped during generating search space.", mutable.key) + else: + raise TypeError("Unsupported mutable type: '%s'." % type(mutable)) + return search_space + + def _dump_search_space(self, file_path): + with open(file_path, "w") as ss_file: + json.dump(self._search_space, ss_file, sort_keys=True, indent=2) diff --git a/nni/algorithms/nas/pytorch/cream/__init__.py b/nni/algorithms/nas/pytorch/cream/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..43a038b4670c0e3e5adeb1d34a4ee025970a8b19 --- /dev/null +++ b/nni/algorithms/nas/pytorch/cream/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .trainer import CreamSupernetTrainer diff --git a/nni/algorithms/nas/pytorch/cream/trainer.py b/nni/algorithms/nas/pytorch/cream/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..b44f404669cbea6c37aa7aef562de8bee3df3257 --- /dev/null +++ b/nni/algorithms/nas/pytorch/cream/trainer.py @@ -0,0 +1,403 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from copy import deepcopy + +import torch +from nni.nas.pytorch.trainer import Trainer +from nni.nas.pytorch.utils import AverageMeterGroup + +from .utils import accuracy, reduce_metrics + +logger = logging.getLogger(__name__) + + +class CreamSupernetTrainer(Trainer): + """ + This trainer trains a supernet and output prioritized architectures that can be used for other tasks. + + Parameters + ---------- + model : nn.Module + Model with mutables. + loss : callable + Called with logits and targets. Returns a loss tensor. + val_loss : callable + Called with logits and targets for validation only. Returns a loss tensor. + optimizer : Optimizer + Optimizer that optimizes the model. + num_epochs : int + Number of epochs of training. + train_loader : iterablez + Data loader of training. Raise ``StopIteration`` when one epoch is exhausted. + valid_loader : iterablez + Data loader of validation. Raise ``StopIteration`` when one epoch is exhausted. + mutator : Mutator + A mutator object that has been initialized with the model. + batch_size : int + Batch size. + log_frequency : int + Number of mini-batches to log metrics. + meta_sta_epoch : int + start epoch of using meta matching network to pick teacher architecture + update_iter : int + interval of updating meta matching networks + slices : int + batch size of mini training data in the process of training meta matching network + pool_size : int + board size + pick_method : basestring + how to pick teacher network + choice_num : int + number of operations in supernet + sta_num : int + layer number of each stage in supernet (5 stage in supernet) + acc_gap : int + maximum accuracy improvement to omit the limitation of flops + flops_dict : Dict + dictionary of each layer's operations in supernet + flops_fixed : int + flops of fixed part in supernet + local_rank : int + index of current rank + callbacks : list of Callback + Callbacks to plug into the trainer. See Callbacks. + """ + + def __init__(self, model, loss, val_loss, + optimizer, num_epochs, train_loader, valid_loader, + mutator=None, batch_size=64, log_frequency=None, + meta_sta_epoch=20, update_iter=200, slices=2, + pool_size=10, pick_method='meta', choice_num=6, + sta_num=(4, 4, 4, 4, 4), acc_gap=5, + flops_dict=None, flops_fixed=0, local_rank=0, callbacks=None): + assert torch.cuda.is_available() + super(CreamSupernetTrainer, self).__init__(model, mutator, loss, None, + optimizer, num_epochs, None, None, + batch_size, None, None, log_frequency, callbacks) + self.model = model + self.loss = loss + self.val_loss = val_loss + self.train_loader = train_loader + self.valid_loader = valid_loader + self.log_frequency = log_frequency + self.batch_size = batch_size + self.optimizer = optimizer + self.model = model + self.loss = loss + self.num_epochs = num_epochs + self.meta_sta_epoch = meta_sta_epoch + self.update_iter = update_iter + self.slices = slices + self.pick_method = pick_method + self.pool_size = pool_size + self.local_rank = local_rank + self.choice_num = choice_num + self.sta_num = sta_num + self.acc_gap = acc_gap + self.flops_dict = flops_dict + self.flops_fixed = flops_fixed + + self.current_student_arch = None + self.current_teacher_arch = None + self.main_proc = (local_rank == 0) + self.current_epoch = 0 + + self.prioritized_board = [] + + # size of prioritized board + def _board_size(self): + return len(self.prioritized_board) + + # select teacher architecture according to the logit difference + def _select_teacher(self): + self._replace_mutator_cand(self.current_student_arch) + + if self.pick_method == 'top1': + meta_value, teacher_cand = 0.5, sorted( + self.prioritized_board, reverse=True)[0][3] + elif self.pick_method == 'meta': + meta_value, cand_idx, teacher_cand = -1000000000, -1, None + for now_idx, item in enumerate(self.prioritized_board): + inputx = item[4] + output = torch.nn.functional.softmax(self.model(inputx), dim=1) + weight = self.model.module.forward_meta(output - item[5]) + if weight > meta_value: + meta_value = weight + cand_idx = now_idx + teacher_cand = self.prioritized_board[cand_idx][3] + assert teacher_cand is not None + meta_value = torch.nn.functional.sigmoid(-weight) + else: + raise ValueError('Method Not supported') + + return meta_value, teacher_cand + + # check whether to update prioritized board + def _isUpdateBoard(self, prec1, flops): + if self.current_epoch <= self.meta_sta_epoch: + return False + + if len(self.prioritized_board) < self.pool_size: + return True + + if prec1 > self.prioritized_board[-1][1] + self.acc_gap: + return True + + if prec1 > self.prioritized_board[-1][1] and flops < self.prioritized_board[-1][2]: + return True + + return False + + # update prioritized board + def _update_prioritized_board(self, inputs, teacher_output, outputs, prec1, flops): + if self._isUpdateBoard(prec1, flops): + val_prec1 = prec1 + training_data = deepcopy(inputs[:self.slices].detach()) + if len(self.prioritized_board) == 0: + features = deepcopy(outputs[:self.slices].detach()) + else: + features = deepcopy( + teacher_output[:self.slices].detach()) + self.prioritized_board.append( + (val_prec1, + prec1, + flops, + self.current_student_arch, + training_data, + torch.nn.functional.softmax( + features, + dim=1))) + self.prioritized_board = sorted( + self.prioritized_board, reverse=True) + + if len(self.prioritized_board) > self.pool_size: + del self.prioritized_board[-1] + + # only update student network weights + def _update_student_weights_only(self, grad_1): + for weight, grad_item in zip( + self.model.module.rand_parameters(self.current_student_arch), grad_1): + weight.grad = grad_item + torch.nn.utils.clip_grad_norm_( + self.model.module.rand_parameters(self.current_student_arch), 1) + self.optimizer.step() + for weight, grad_item in zip( + self.model.module.rand_parameters(self.current_student_arch), grad_1): + del weight.grad + + # only update meta networks weights + def _update_meta_weights_only(self, teacher_cand, grad_teacher): + for weight, grad_item in zip(self.model.module.rand_parameters( + teacher_cand, self.pick_method == 'meta'), grad_teacher): + weight.grad = grad_item + + # clip gradients + torch.nn.utils.clip_grad_norm_( + self.model.module.rand_parameters( + self.current_student_arch, self.pick_method == 'meta'), 1) + + self.optimizer.step() + for weight, grad_item in zip(self.model.module.rand_parameters( + teacher_cand, self.pick_method == 'meta'), grad_teacher): + del weight.grad + + # simulate sgd updating + def _simulate_sgd_update(self, w, g, optimizer): + return g * optimizer.param_groups[-1]['lr'] + w + + # split training images into several slices + def _get_minibatch_input(self, input): # pylint: disable=redefined-builtin + slice = self.slices # pylint: disable=redefined-builtin + x = deepcopy(input[:slice].clone().detach()) + return x + + # calculate 1st gradient of student architectures + def _calculate_1st_gradient(self, kd_loss): + self.optimizer.zero_grad() + grad = torch.autograd.grad( + kd_loss, + self.model.module.rand_parameters(self.current_student_arch), + create_graph=True) + return grad + + # calculate 2nd gradient of meta networks + def _calculate_2nd_gradient(self, validation_loss, teacher_cand, students_weight): + self.optimizer.zero_grad() + grad_student_val = torch.autograd.grad( + validation_loss, + self.model.module.rand_parameters(self.current_student_arch), + retain_graph=True) + + grad_teacher = torch.autograd.grad( + students_weight[0], + self.model.module.rand_parameters( + teacher_cand, + self.pick_method == 'meta'), + grad_outputs=grad_student_val) + return grad_teacher + + # forward training data + def _forward_training(self, x, meta_value): + self._replace_mutator_cand(self.current_student_arch) + output = self.model(x) + + with torch.no_grad(): + self._replace_mutator_cand(self.current_teacher_arch) + teacher_output = self.model(x) + soft_label = torch.nn.functional.softmax(teacher_output, dim=1) + + kd_loss = meta_value * \ + self._cross_entropy_loss_with_soft_target(output, soft_label) + return kd_loss + + # calculate soft target loss + def _cross_entropy_loss_with_soft_target(self, pred, soft_target): + logsoftmax = torch.nn.LogSoftmax() + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + + # forward validation data + def _forward_validation(self, input, target): # pylint: disable=redefined-builtin + slice = self.slices # pylint: disable=redefined-builtin + x = input[slice:slice * 2].clone() + + self._replace_mutator_cand(self.current_student_arch) + output_2 = self.model(x) + + validation_loss = self.loss(output_2, target[slice:slice * 2]) + return validation_loss + + def _isUpdateMeta(self, batch_idx): + isUpdate = True + isUpdate &= (self.current_epoch > self.meta_sta_epoch) + isUpdate &= (batch_idx > 0) + isUpdate &= (batch_idx % self.update_iter == 0) + isUpdate &= (self._board_size() > 0) + return isUpdate + + def _replace_mutator_cand(self, cand): + self.mutator._cache = cand + + # update meta matching networks + def _run_update(self, input, target, batch_idx): # pylint: disable=redefined-builtin + if self._isUpdateMeta(batch_idx): + x = self._get_minibatch_input(input) + + meta_value, teacher_cand = self._select_teacher() + + kd_loss = self._forward_training(x, meta_value) + + # calculate 1st gradient + grad_1st = self._calculate_1st_gradient(kd_loss) + + # simulate updated student weights + students_weight = [ + self._simulate_sgd_update( + p, grad_item, self.optimizer) for p, grad_item in zip( + self.model.module.rand_parameters(self.current_student_arch), grad_1st)] + + # update student weights + self._update_student_weights_only(grad_1st) + + validation_loss = self._forward_validation(input, target) + + # calculate 2nd gradient + grad_teacher = self._calculate_2nd_gradient(validation_loss, teacher_cand, students_weight) + + # update meta matching networks + self._update_meta_weights_only(teacher_cand, grad_teacher) + + # delete internal variants + del grad_teacher, grad_1st, x, validation_loss, kd_loss, students_weight + + def _get_cand_flops(self, cand): + flops = 0 + for block_id, block in enumerate(cand): + if block == 'LayerChoice1' or block_id == 'LayerChoice23': + continue + for idx, choice in enumerate(cand[block]): + flops += self.flops_dict[block_id][idx] * (1 if choice else 0) + return flops + self.flops_fixed + + def train_one_epoch(self, epoch): + self.current_epoch = epoch + meters = AverageMeterGroup() + self.steps_per_epoch = len(self.train_loader) + for step, (input_data, target) in enumerate(self.train_loader): + self.mutator.reset() + self.current_student_arch = self.mutator._cache + + input_data, target = input_data.cuda(), target.cuda() + + # calculate flops of current architecture + cand_flops = self._get_cand_flops(self.mutator._cache) + + # update meta matching network + self._run_update(input_data, target, step) + + if self._board_size() > 0: + # select teacher architecture + meta_value, teacher_cand = self._select_teacher() + self.current_teacher_arch = teacher_cand + + # forward supernet + if self._board_size() == 0 or epoch <= self.meta_sta_epoch: + self._replace_mutator_cand(self.current_student_arch) + output = self.model(input_data) + + loss = self.loss(output, target) + kd_loss, teacher_output, teacher_cand = None, None, None + else: + self._replace_mutator_cand(self.current_student_arch) + output = self.model(input_data) + + gt_loss = self.loss(output, target) + + with torch.no_grad(): + self._replace_mutator_cand(self.current_teacher_arch) + teacher_output = self.model(input_data).detach() + + soft_label = torch.nn.functional.softmax(teacher_output, dim=1) + kd_loss = self._cross_entropy_loss_with_soft_target(output, soft_label) + + loss = (meta_value * kd_loss + (2 - meta_value) * gt_loss) / 2 + + # update network + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + # update metrics + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} + metrics = reduce_metrics(metrics) + meters.update(metrics) + + # update prioritized board + self._update_prioritized_board(input_data, teacher_output, output, metrics['prec1'], cand_flops) + + if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch): + logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, self.num_epochs, + step + 1, len(self.train_loader), meters) + + if self.main_proc and self.num_epochs == epoch + 1: + for idx, i in enumerate(self.prioritized_board): + logger.info("No.%s %s", idx, i[:4]) + + def validate_one_epoch(self, epoch): + self.model.eval() + meters = AverageMeterGroup() + with torch.no_grad(): + for step, (x, y) in enumerate(self.valid_loader): + self.mutator.reset() + logits = self.model(x) + loss = self.val_loss(logits, y) + prec1, prec5 = accuracy(logits, y, topk=(1, 5)) + metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} + metrics = reduce_metrics(metrics) + meters.update(metrics) + + if self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Epoch [%s/%s] Validation Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.valid_loader), meters) diff --git a/nni/algorithms/nas/pytorch/cream/utils.py b/nni/algorithms/nas/pytorch/cream/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d71faa715b379616c1affa6aaca319977435e65 --- /dev/null +++ b/nni/algorithms/nas/pytorch/cream/utils.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +import os +import torch.distributed as dist + + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.reshape(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(1.0 / batch_size)) + return res + + +def reduce_metrics(metrics): + return {k: reduce_tensor(v).item() for k, v in metrics.items()} + + +def reduce_tensor(tensor): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= float(os.environ["WORLD_SIZE"]) + return rt diff --git a/nni/algorithms/nas/pytorch/darts/__init__.py b/nni/algorithms/nas/pytorch/darts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1a22790fb90b37591dd58f590a3b528b78b8257e --- /dev/null +++ b/nni/algorithms/nas/pytorch/darts/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .mutator import DartsMutator +from .trainer import DartsTrainer diff --git a/nni/algorithms/nas/pytorch/darts/mutator.py b/nni/algorithms/nas/pytorch/darts/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c3898a9b531ea160ea64a17d2e345f6b67d040 --- /dev/null +++ b/nni/algorithms/nas/pytorch/darts/mutator.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch.mutator import Mutator +from nni.nas.pytorch.mutables import LayerChoice, InputChoice + +_logger = logging.getLogger(__name__) + + +class DartsMutator(Mutator): + """ + Connects the model in a DARTS (differentiable) way. + + An extra connection is automatically inserted for each LayerChoice, when this connection is selected, there is no + op on this LayerChoice (namely a ``ZeroOp``), in which case, every element in the exported choice list is ``false`` + (not chosen). + + All input choice will be fully connected in the search phase. On exporting, the input choice will choose inputs based + on keys in ``choose_from``. If the keys were to be keys of LayerChoices, the top logit of the corresponding LayerChoice + will join the competition of input choice to compete against other logits. Otherwise, the logit will be assumed 0. + + It's possible to cut branches by setting parameter ``choices`` in a particular position to ``-inf``. After softmax, the + value would be 0. Framework will ignore 0 values and not connect. Note that the gradient on the ``-inf`` location will + be 0. Since manipulations with ``-inf`` will be ``nan``, you need to handle the gradient update phase carefully. + + Attributes + ---------- + choices: ParameterDict + dict that maps keys of LayerChoices to weighted-connection float tensors. + """ + def __init__(self, model): + super().__init__(model) + self.choices = nn.ParameterDict() + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + self.choices[mutable.key] = nn.Parameter(1.0E-3 * torch.randn(mutable.length + 1)) + + def device(self): + for v in self.choices.values(): + return v.device + + def sample_search(self): + result = dict() + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + result[mutable.key] = F.softmax(self.choices[mutable.key], dim=-1)[:-1] + elif isinstance(mutable, InputChoice): + result[mutable.key] = torch.ones(mutable.n_candidates, dtype=torch.bool, device=self.device()) + return result + + def sample_final(self): + result = dict() + edges_max = dict() + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + max_val, index = torch.max(F.softmax(self.choices[mutable.key], dim=-1)[:-1], 0) + edges_max[mutable.key] = max_val + result[mutable.key] = F.one_hot(index, num_classes=len(mutable)).view(-1).bool() + for mutable in self.mutables: + if isinstance(mutable, InputChoice): + if mutable.n_chosen is not None: + weights = [] + for src_key in mutable.choose_from: + if src_key not in edges_max: + _logger.warning("InputChoice.NO_KEY in '%s' is weighted 0 when selecting inputs.", mutable.key) + weights.append(edges_max.get(src_key, 0.)) + weights = torch.tensor(weights) # pylint: disable=not-callable + _, topk_edge_indices = torch.topk(weights, mutable.n_chosen) + selected_multihot = [] + for i, src_key in enumerate(mutable.choose_from): + if i not in topk_edge_indices and src_key in result: + # If an edge is never selected, there is no need to calculate any op on this edge. + # This is to eliminate redundant calculation. + result[src_key] = torch.zeros_like(result[src_key]) + selected_multihot.append(i in topk_edge_indices) + result[mutable.key] = torch.tensor(selected_multihot, dtype=torch.bool, device=self.device()) # pylint: disable=not-callable + else: + result[mutable.key] = torch.ones(mutable.n_candidates, dtype=torch.bool, device=self.device()) # pylint: disable=not-callable + return result diff --git a/nni/algorithms/nas/pytorch/darts/trainer.py b/nni/algorithms/nas/pytorch/darts/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d8e1866b68ad259771140ab0a7b86c43185f67 --- /dev/null +++ b/nni/algorithms/nas/pytorch/darts/trainer.py @@ -0,0 +1,214 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import logging + +import torch +import torch.nn as nn +from nni.nas.pytorch.trainer import Trainer +from nni.nas.pytorch.utils import AverageMeterGroup + +from .mutator import DartsMutator + +logger = logging.getLogger(__name__) + + +class DartsTrainer(Trainer): + """ + DARTS trainer. + + Parameters + ---------- + model : nn.Module + PyTorch model to be trained. + loss : callable + Receives logits and ground truth label, return a loss tensor. + metrics : callable + Receives logits and ground truth label, return a dict of metrics. + optimizer : Optimizer + The optimizer used for optimizing the model. + num_epochs : int + Number of epochs planned for training. + dataset_train : Dataset + Dataset for training. Will be split for training weights and architecture weights. + dataset_valid : Dataset + Dataset for testing. + mutator : DartsMutator + Use in case of customizing your own DartsMutator. By default will instantiate a DartsMutator. + batch_size : int + Batch size. + workers : int + Workers for data loading. + device : torch.device + ``torch.device("cpu")`` or ``torch.device("cuda")``. + log_frequency : int + Step count per logging. + callbacks : list of Callback + list of callbacks to trigger at events. + arc_learning_rate : float + Learning rate of architecture parameters. + unrolled : float + ``True`` if using second order optimization, else first order optimization. + """ + def __init__(self, model, loss, metrics, + optimizer, num_epochs, dataset_train, dataset_valid, + mutator=None, batch_size=64, workers=4, device=None, log_frequency=None, + callbacks=None, arc_learning_rate=3.0E-4, unrolled=False): + super().__init__(model, mutator if mutator is not None else DartsMutator(model), + loss, metrics, optimizer, num_epochs, dataset_train, dataset_valid, + batch_size, workers, device, log_frequency, callbacks) + + self.ctrl_optim = torch.optim.Adam(self.mutator.parameters(), arc_learning_rate, betas=(0.5, 0.999), + weight_decay=1.0E-3) + self.unrolled = unrolled + + n_train = len(self.dataset_train) + split = n_train // 2 + indices = list(range(n_train)) + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) + self.train_loader = torch.utils.data.DataLoader(self.dataset_train, + batch_size=batch_size, + sampler=train_sampler, + num_workers=workers) + self.valid_loader = torch.utils.data.DataLoader(self.dataset_train, + batch_size=batch_size, + sampler=valid_sampler, + num_workers=workers) + self.test_loader = torch.utils.data.DataLoader(self.dataset_valid, + batch_size=batch_size, + num_workers=workers) + + def train_one_epoch(self, epoch): + self.model.train() + self.mutator.train() + meters = AverageMeterGroup() + for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(self.train_loader, self.valid_loader)): + trn_X, trn_y = trn_X.to(self.device), trn_y.to(self.device) + val_X, val_y = val_X.to(self.device), val_y.to(self.device) + + # phase 1. architecture step + self.ctrl_optim.zero_grad() + if self.unrolled: + self._unrolled_backward(trn_X, trn_y, val_X, val_y) + else: + self._backward(val_X, val_y) + self.ctrl_optim.step() + + # phase 2: child network step + self.optimizer.zero_grad() + logits, loss = self._logits_and_loss(trn_X, trn_y) + loss.backward() + nn.utils.clip_grad_norm_(self.model.parameters(), 5.) # gradient clipping + self.optimizer.step() + + metrics = self.metrics(logits, trn_y) + metrics["loss"] = loss.item() + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.train_loader), meters) + + def validate_one_epoch(self, epoch): + self.model.eval() + self.mutator.eval() + meters = AverageMeterGroup() + with torch.no_grad(): + self.mutator.reset() + for step, (X, y) in enumerate(self.test_loader): + X, y = X.to(self.device), y.to(self.device) + logits = self.model(X) + metrics = self.metrics(logits, y) + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.test_loader), meters) + + def _logits_and_loss(self, X, y): + self.mutator.reset() + logits = self.model(X) + loss = self.loss(logits, y) + self._write_graph_status() + return logits, loss + + def _backward(self, val_X, val_y): + """ + Simple backward with gradient descent + """ + _, loss = self._logits_and_loss(val_X, val_y) + loss.backward() + + def _unrolled_backward(self, trn_X, trn_y, val_X, val_y): + """ + Compute unrolled loss and backward its gradients + """ + backup_params = copy.deepcopy(tuple(self.model.parameters())) + + # do virtual step on training data + lr = self.optimizer.param_groups[0]["lr"] + momentum = self.optimizer.param_groups[0]["momentum"] + weight_decay = self.optimizer.param_groups[0]["weight_decay"] + self._compute_virtual_model(trn_X, trn_y, lr, momentum, weight_decay) + + # calculate unrolled loss on validation data + # keep gradients for model here for compute hessian + _, loss = self._logits_and_loss(val_X, val_y) + w_model, w_ctrl = tuple(self.model.parameters()), tuple(self.mutator.parameters()) + w_grads = torch.autograd.grad(loss, w_model + w_ctrl) + d_model, d_ctrl = w_grads[:len(w_model)], w_grads[len(w_model):] + + # compute hessian and final gradients + hessian = self._compute_hessian(backup_params, d_model, trn_X, trn_y) + with torch.no_grad(): + for param, d, h in zip(w_ctrl, d_ctrl, hessian): + # gradient = dalpha - lr * hessian + param.grad = d - lr * h + + # restore weights + self._restore_weights(backup_params) + + def _compute_virtual_model(self, X, y, lr, momentum, weight_decay): + """ + Compute unrolled weights w` + """ + # don't need zero_grad, using autograd to calculate gradients + _, loss = self._logits_and_loss(X, y) + gradients = torch.autograd.grad(loss, self.model.parameters()) + with torch.no_grad(): + for w, g in zip(self.model.parameters(), gradients): + m = self.optimizer.state[w].get("momentum_buffer", 0.) + w = w - lr * (momentum * m + g + weight_decay * w) + + def _restore_weights(self, backup_params): + with torch.no_grad(): + for param, backup in zip(self.model.parameters(), backup_params): + param.copy_(backup) + + def _compute_hessian(self, backup_params, dw, trn_X, trn_y): + """ + dw = dw` { L_val(w`, alpha) } + w+ = w + eps * dw + w- = w - eps * dw + hessian = (dalpha { L_trn(w+, alpha) } - dalpha { L_trn(w-, alpha) }) / (2*eps) + eps = 0.01 / ||dw|| + """ + self._restore_weights(backup_params) + norm = torch.cat([w.view(-1) for w in dw]).norm() + eps = 0.01 / norm + if norm < 1E-8: + logger.warning("In computing hessian, norm is smaller than 1E-8, cause eps to be %.6f.", norm.item()) + + dalphas = [] + for e in [eps, -2. * eps]: + # w+ = w + eps*dw`, w- = w - eps*dw` + with torch.no_grad(): + for p, d in zip(self.model.parameters(), dw): + p += e * d + + _, loss = self._logits_and_loss(trn_X, trn_y) + dalphas.append(torch.autograd.grad(loss, self.mutator.parameters())) + + dalpha_pos, dalpha_neg = dalphas # dalpha { L_trn(w+) }, # dalpha { L_trn(w-) } + hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)] + return hessian diff --git a/nni/algorithms/nas/pytorch/enas/__init__.py b/nni/algorithms/nas/pytorch/enas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d3372836ebba2387ade58161218aad731433e46b --- /dev/null +++ b/nni/algorithms/nas/pytorch/enas/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .mutator import EnasMutator +from .trainer import EnasTrainer diff --git a/nni/algorithms/nas/pytorch/enas/mutator.py b/nni/algorithms/nas/pytorch/enas/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..7fdba26b99bf72551293fdb77ff61beb974bba8f --- /dev/null +++ b/nni/algorithms/nas/pytorch/enas/mutator.py @@ -0,0 +1,197 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch.mutator import Mutator +from nni.nas.pytorch.mutables import LayerChoice, InputChoice, MutableScope + + +class StackedLSTMCell(nn.Module): + def __init__(self, layers, size, bias): + super().__init__() + self.lstm_num_layers = layers + self.lstm_modules = nn.ModuleList([nn.LSTMCell(size, size, bias=bias) + for _ in range(self.lstm_num_layers)]) + + def forward(self, inputs, hidden): + prev_h, prev_c = hidden + next_h, next_c = [], [] + for i, m in enumerate(self.lstm_modules): + curr_h, curr_c = m(inputs, (prev_h[i], prev_c[i])) + next_c.append(curr_c) + next_h.append(curr_h) + # current implementation only supports batch size equals 1, + # but the algorithm does not necessarily have this limitation + inputs = curr_h[-1].view(1, -1) + return next_h, next_c + + +class EnasMutator(Mutator): + """ + A mutator that mutates the graph with RL. + + Parameters + ---------- + model : nn.Module + PyTorch model. + lstm_size : int + Controller LSTM hidden units. + lstm_num_layers : int + Number of layers for stacked LSTM. + tanh_constant : float + Logits will be equal to ``tanh_constant * tanh(logits)``. Don't use ``tanh`` if this value is ``None``. + cell_exit_extra_step : bool + If true, RL controller will perform an extra step at the exit of each MutableScope, dump the hidden state + and mark it as the hidden state of this MutableScope. This is to align with the original implementation of paper. + skip_target : float + Target probability that skipconnect will appear. + temperature : float + Temperature constant that divides the logits. + branch_bias : float + Manual bias applied to make some operations more likely to be chosen. + Currently this is implemented with a hardcoded match rule that aligns with original repo. + If a mutable has a ``reduce`` in its key, all its op choices + that contains `conv` in their typename will receive a bias of ``+self.branch_bias`` initially; while others + receive a bias of ``-self.branch_bias``. + entropy_reduction : str + Can be one of ``sum`` and ``mean``. How the entropy of multi-input-choice is reduced. + """ + + def __init__(self, model, lstm_size=64, lstm_num_layers=1, tanh_constant=1.5, cell_exit_extra_step=False, + skip_target=0.4, temperature=None, branch_bias=0.25, entropy_reduction="sum"): + super().__init__(model) + self.lstm_size = lstm_size + self.lstm_num_layers = lstm_num_layers + self.tanh_constant = tanh_constant + self.temperature = temperature + self.cell_exit_extra_step = cell_exit_extra_step + self.skip_target = skip_target + self.branch_bias = branch_bias + + self.lstm = StackedLSTMCell(self.lstm_num_layers, self.lstm_size, False) + self.attn_anchor = nn.Linear(self.lstm_size, self.lstm_size, bias=False) + self.attn_query = nn.Linear(self.lstm_size, self.lstm_size, bias=False) + self.v_attn = nn.Linear(self.lstm_size, 1, bias=False) + self.g_emb = nn.Parameter(torch.randn(1, self.lstm_size) * 0.1) + self.skip_targets = nn.Parameter(torch.tensor([1.0 - self.skip_target, self.skip_target]), requires_grad=False) # pylint: disable=not-callable + assert entropy_reduction in ["sum", "mean"], "Entropy reduction must be one of sum and mean." + self.entropy_reduction = torch.sum if entropy_reduction == "sum" else torch.mean + self.cross_entropy_loss = nn.CrossEntropyLoss(reduction="none") + self.bias_dict = nn.ParameterDict() + + self.max_layer_choice = 0 + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + if self.max_layer_choice == 0: + self.max_layer_choice = len(mutable) + assert self.max_layer_choice == len(mutable), \ + "ENAS mutator requires all layer choice have the same number of candidates." + # We are judging by keys and module types to add biases to layer choices. Needs refactor. + if "reduce" in mutable.key: + def is_conv(choice): + return "conv" in str(type(choice)).lower() + bias = torch.tensor([self.branch_bias if is_conv(choice) else -self.branch_bias # pylint: disable=not-callable + for choice in mutable]) + self.bias_dict[mutable.key] = nn.Parameter(bias, requires_grad=False) + + self.embedding = nn.Embedding(self.max_layer_choice + 1, self.lstm_size) + self.soft = nn.Linear(self.lstm_size, self.max_layer_choice, bias=False) + + def sample_search(self): + self._initialize() + self._sample(self.mutables) + return self._choices + + def sample_final(self): + return self.sample_search() + + def _sample(self, tree): + mutable = tree.mutable + if isinstance(mutable, LayerChoice) and mutable.key not in self._choices: + self._choices[mutable.key] = self._sample_layer_choice(mutable) + elif isinstance(mutable, InputChoice) and mutable.key not in self._choices: + self._choices[mutable.key] = self._sample_input_choice(mutable) + for child in tree.children: + self._sample(child) + if isinstance(mutable, MutableScope) and mutable.key not in self._anchors_hid: + if self.cell_exit_extra_step: + self._lstm_next_step() + self._mark_anchor(mutable.key) + + def _initialize(self): + self._choices = dict() + self._anchors_hid = dict() + self._inputs = self.g_emb.data + self._c = [torch.zeros((1, self.lstm_size), + dtype=self._inputs.dtype, + device=self._inputs.device) for _ in range(self.lstm_num_layers)] + self._h = [torch.zeros((1, self.lstm_size), + dtype=self._inputs.dtype, + device=self._inputs.device) for _ in range(self.lstm_num_layers)] + self.sample_log_prob = 0 + self.sample_entropy = 0 + self.sample_skip_penalty = 0 + + def _lstm_next_step(self): + self._h, self._c = self.lstm(self._inputs, (self._h, self._c)) + + def _mark_anchor(self, key): + self._anchors_hid[key] = self._h[-1] + + def _sample_layer_choice(self, mutable): + self._lstm_next_step() + logit = self.soft(self._h[-1]) + if self.temperature is not None: + logit /= self.temperature + if self.tanh_constant is not None: + logit = self.tanh_constant * torch.tanh(logit) + if mutable.key in self.bias_dict: + logit += self.bias_dict[mutable.key] + branch_id = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1) + log_prob = self.cross_entropy_loss(logit, branch_id) + self.sample_log_prob += self.entropy_reduction(log_prob) + entropy = (log_prob * torch.exp(-log_prob)).detach() # pylint: disable=invalid-unary-operand-type + self.sample_entropy += self.entropy_reduction(entropy) + self._inputs = self.embedding(branch_id) + return F.one_hot(branch_id, num_classes=self.max_layer_choice).bool().view(-1) + + def _sample_input_choice(self, mutable): + query, anchors = [], [] + for label in mutable.choose_from: + if label not in self._anchors_hid: + self._lstm_next_step() + self._mark_anchor(label) # empty loop, fill not found + query.append(self.attn_anchor(self._anchors_hid[label])) + anchors.append(self._anchors_hid[label]) + query = torch.cat(query, 0) + query = torch.tanh(query + self.attn_query(self._h[-1])) + query = self.v_attn(query) + if self.temperature is not None: + query /= self.temperature + if self.tanh_constant is not None: + query = self.tanh_constant * torch.tanh(query) + + if mutable.n_chosen is None: + logit = torch.cat([-query, query], 1) # pylint: disable=invalid-unary-operand-type + + skip = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1) + skip_prob = torch.sigmoid(logit) + kl = torch.sum(skip_prob * torch.log(skip_prob / self.skip_targets)) + self.sample_skip_penalty += kl + log_prob = self.cross_entropy_loss(logit, skip) + self._inputs = (torch.matmul(skip.float(), torch.cat(anchors, 0)) / (1. + torch.sum(skip))).unsqueeze(0) + else: + assert mutable.n_chosen == 1, "Input choice must select exactly one or any in ENAS." + logit = query.view(1, -1) + index = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1) + skip = F.one_hot(index, num_classes=mutable.n_candidates).view(-1) + log_prob = self.cross_entropy_loss(logit, index) + self._inputs = anchors[index.item()] + + self.sample_log_prob += self.entropy_reduction(log_prob) + entropy = (log_prob * torch.exp(-log_prob)).detach() # pylint: disable=invalid-unary-operand-type + self.sample_entropy += self.entropy_reduction(entropy) + return skip.bool() diff --git a/nni/algorithms/nas/pytorch/enas/trainer.py b/nni/algorithms/nas/pytorch/enas/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..5e7a966580a107ceb6b9fe88f888dafefb8b69e7 --- /dev/null +++ b/nni/algorithms/nas/pytorch/enas/trainer.py @@ -0,0 +1,209 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from itertools import cycle + +import torch +import torch.nn as nn +import torch.optim as optim + +from nni.nas.pytorch.trainer import Trainer +from nni.nas.pytorch.utils import AverageMeterGroup, to_device +from .mutator import EnasMutator + +logger = logging.getLogger(__name__) + + +class EnasTrainer(Trainer): + """ + ENAS trainer. + + Parameters + ---------- + model : nn.Module + PyTorch model to be trained. + loss : callable + Receives logits and ground truth label, return a loss tensor. + metrics : callable + Receives logits and ground truth label, return a dict of metrics. + reward_function : callable + Receives logits and ground truth label, return a tensor, which will be feeded to RL controller as reward. + optimizer : Optimizer + The optimizer used for optimizing the model. + num_epochs : int + Number of epochs planned for training. + dataset_train : Dataset + Dataset for training. Will be split for training weights and architecture weights. + dataset_valid : Dataset + Dataset for testing. + mutator : EnasMutator + Use when customizing your own mutator or a mutator with customized parameters. + batch_size : int + Batch size. + workers : int + Workers for data loading. + device : torch.device + ``torch.device("cpu")`` or ``torch.device("cuda")``. + log_frequency : int + Step count per logging. + callbacks : list of Callback + list of callbacks to trigger at events. + entropy_weight : float + Weight of sample entropy loss. + skip_weight : float + Weight of skip penalty loss. + baseline_decay : float + Decay factor of baseline. New baseline will be equal to ``baseline_decay * baseline_old + reward * (1 - baseline_decay)``. + child_steps : int + How many mini-batches for model training per epoch. + mutator_lr : float + Learning rate for RL controller. + mutator_steps_aggregate : int + Number of steps that will be aggregated into one mini-batch for RL controller. + mutator_steps : int + Number of mini-batches for each epoch of RL controller learning. + aux_weight : float + Weight of auxiliary head loss. ``aux_weight * aux_loss`` will be added to total loss. + test_arc_per_epoch : int + How many architectures are chosen for direct test after each epoch. + """ + def __init__(self, model, loss, metrics, reward_function, + optimizer, num_epochs, dataset_train, dataset_valid, + mutator=None, batch_size=64, workers=4, device=None, log_frequency=None, callbacks=None, + entropy_weight=0.0001, skip_weight=0.8, baseline_decay=0.999, child_steps=500, + mutator_lr=0.00035, mutator_steps_aggregate=20, mutator_steps=50, aux_weight=0.4, + test_arc_per_epoch=1): + super().__init__(model, mutator if mutator is not None else EnasMutator(model), + loss, metrics, optimizer, num_epochs, dataset_train, dataset_valid, + batch_size, workers, device, log_frequency, callbacks) + self.reward_function = reward_function + self.mutator_optim = optim.Adam(self.mutator.parameters(), lr=mutator_lr) + self.batch_size = batch_size + self.workers = workers + + self.entropy_weight = entropy_weight + self.skip_weight = skip_weight + self.baseline_decay = baseline_decay + self.baseline = 0. + self.mutator_steps_aggregate = mutator_steps_aggregate + self.mutator_steps = mutator_steps + self.child_steps = child_steps + self.aux_weight = aux_weight + self.test_arc_per_epoch = test_arc_per_epoch + + self.init_dataloader() + + def init_dataloader(self): + n_train = len(self.dataset_train) + split = n_train // 10 + indices = list(range(n_train)) + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:-split]) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[-split:]) + self.train_loader = torch.utils.data.DataLoader(self.dataset_train, + batch_size=self.batch_size, + sampler=train_sampler, + num_workers=self.workers) + self.valid_loader = torch.utils.data.DataLoader(self.dataset_train, + batch_size=self.batch_size, + sampler=valid_sampler, + num_workers=self.workers) + self.test_loader = torch.utils.data.DataLoader(self.dataset_valid, + batch_size=self.batch_size, + num_workers=self.workers) + self.train_loader = cycle(self.train_loader) + self.valid_loader = cycle(self.valid_loader) + + def train_one_epoch(self, epoch): + # Sample model and train + self.model.train() + self.mutator.eval() + meters = AverageMeterGroup() + for step in range(1, self.child_steps + 1): + x, y = next(self.train_loader) + x, y = to_device(x, self.device), to_device(y, self.device) + self.optimizer.zero_grad() + + with torch.no_grad(): + self.mutator.reset() + self._write_graph_status() + logits = self.model(x) + + if isinstance(logits, tuple): + logits, aux_logits = logits + aux_loss = self.loss(aux_logits, y) + else: + aux_loss = 0. + metrics = self.metrics(logits, y) + loss = self.loss(logits, y) + loss = loss + self.aux_weight * aux_loss + loss.backward() + nn.utils.clip_grad_norm_(self.model.parameters(), 5.) + self.optimizer.step() + metrics["loss"] = loss.item() + meters.update(metrics) + + if self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Model Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, + self.num_epochs, step, self.child_steps, meters) + + # Train sampler (mutator) + self.model.eval() + self.mutator.train() + meters = AverageMeterGroup() + for mutator_step in range(1, self.mutator_steps + 1): + self.mutator_optim.zero_grad() + for step in range(1, self.mutator_steps_aggregate + 1): + x, y = next(self.valid_loader) + x, y = to_device(x, self.device), to_device(y, self.device) + + self.mutator.reset() + with torch.no_grad(): + logits = self.model(x) + self._write_graph_status() + metrics = self.metrics(logits, y) + reward = self.reward_function(logits, y) + if self.entropy_weight: + reward += self.entropy_weight * self.mutator.sample_entropy.item() + self.baseline = self.baseline * self.baseline_decay + reward * (1 - self.baseline_decay) + loss = self.mutator.sample_log_prob * (reward - self.baseline) + if self.skip_weight: + loss += self.skip_weight * self.mutator.sample_skip_penalty + metrics["reward"] = reward + metrics["loss"] = loss.item() + metrics["ent"] = self.mutator.sample_entropy.item() + metrics["log_prob"] = self.mutator.sample_log_prob.item() + metrics["baseline"] = self.baseline + metrics["skip"] = self.mutator.sample_skip_penalty + + loss /= self.mutator_steps_aggregate + loss.backward() + meters.update(metrics) + + cur_step = step + (mutator_step - 1) * self.mutator_steps_aggregate + if self.log_frequency is not None and cur_step % self.log_frequency == 0: + logger.info("RL Epoch [%d/%d] Step [%d/%d] [%d/%d] %s", epoch + 1, self.num_epochs, + mutator_step, self.mutator_steps, step, self.mutator_steps_aggregate, + meters) + + nn.utils.clip_grad_norm_(self.mutator.parameters(), 5.) + self.mutator_optim.step() + + def validate_one_epoch(self, epoch): + with torch.no_grad(): + for arc_id in range(self.test_arc_per_epoch): + meters = AverageMeterGroup() + for x, y in self.test_loader: + x, y = to_device(x, self.device), to_device(y, self.device) + self.mutator.reset() + logits = self.model(x) + if isinstance(logits, tuple): + logits, _ = logits + metrics = self.metrics(logits, y) + loss = self.loss(logits, y) + metrics["loss"] = loss.item() + meters.update(metrics) + + logger.info("Test Epoch [%d/%d] Arc [%d/%d] Summary %s", + epoch + 1, self.num_epochs, arc_id + 1, self.test_arc_per_epoch, + meters.summary()) diff --git a/nni/algorithms/nas/pytorch/fbnet/__init__.py b/nni/algorithms/nas/pytorch/fbnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa15cc64a77a6b7b4573d5a3bb03b9d6895d1f08 --- /dev/null +++ b/nni/algorithms/nas/pytorch/fbnet/__init__.py @@ -0,0 +1,11 @@ +from __future__ import absolute_import + +from .mutator import FBNetMutator # noqa: F401 +from .trainer import FBNetTrainer # noqa: F401 +from .utils import ( # noqa: F401 + LookUpTable, + NASConfig, + RegularizerLoss, + model_init, + supernet_sample, +) diff --git a/nni/algorithms/nas/pytorch/fbnet/mutator.py b/nni/algorithms/nas/pytorch/fbnet/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..42b46afcabbcefde53af840e8ddbf822d088ccc9 --- /dev/null +++ b/nni/algorithms/nas/pytorch/fbnet/mutator.py @@ -0,0 +1,268 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import torch +from torch import nn as nn +from torch.nn import functional as F +import numpy as np + +from nni.nas.pytorch.base_mutator import BaseMutator +from nni.nas.pytorch.mutables import LayerChoice + + +class MixedOp(nn.Module): + """ + This class is to instantiate and manage info of one LayerChoice. + It includes architecture weights and member functions for the weights. + """ + + def __init__(self, mutable, latency): + """ + Parameters + ---------- + mutable : LayerChoice + A LayerChoice in user model + latency : List + performance cost for each op in mutable + """ + super(MixedOp, self).__init__() + self.latency = latency + n_choices = len(mutable) + self.path_alpha = nn.Parameter( + torch.FloatTensor([1.0 / n_choices for i in range(n_choices)]) + ) + self.path_alpha.requires_grad = False + self.temperature = 1.0 + + def get_path_alpha(self): + """Return the architecture parameter.""" + return self.path_alpha + + def get_weighted_latency(self): + """Return the weighted perf_cost of current mutable.""" + soft_masks = self.probs_over_ops() + weighted_latency = sum(m * l for m, l in zip(soft_masks, self.latency)) + return weighted_latency + + def set_temperature(self, temperature): + """ + Set the annealed temperature for gumbel softmax. + + Parameters + ---------- + temperature : float + The annealed temperature for gumbel softmax + """ + self.temperature = temperature + + def to_requires_grad(self): + """Enable gradient calculation.""" + self.path_alpha.requires_grad = True + + def to_disable_grad(self): + """Disable gradient calculation.""" + self.path_alpha.requires_grad = False + + def probs_over_ops(self): + """Apply gumbel softmax to generate probability distribution.""" + return F.gumbel_softmax(self.path_alpha, self.temperature) + + def forward(self, mutable, x): + """ + Define forward of LayerChoice. + + Parameters + ---------- + mutable : LayerChoice + this layer's mutable + x : tensor + inputs of this layer, only support one input + + Returns + ------- + output: tensor + output of this layer + """ + candidate_ops = list(mutable) + soft_masks = self.probs_over_ops() + output = sum(m * op(x) for m, op in zip(soft_masks, candidate_ops)) + + return output + + @property + def chosen_index(self): + """ + choose the op with max prob + + Returns + ------- + int + index of the chosen one + """ + alphas = self.path_alpha.data.detach().cpu().numpy() + index = int(np.argmax(alphas)) + return index + + +class FBNetMutator(BaseMutator): + """ + This mutator initializes and operates all the LayerChoices of the supernet. + It is for the related trainer to control the training flow of LayerChoices, + coordinating with whole training process. + """ + + def __init__(self, model, lookup_table): + """ + Init a MixedOp instance for each mutable i.e., LayerChoice. + And register the instantiated MixedOp in corresponding LayerChoice. + If does not register it in LayerChoice, DataParallel does'nt work then, + for architecture weights are not included in the DataParallel model. + When MixedOPs are registered, we use ```requires_grad``` to control + whether calculate gradients of architecture weights. + + Parameters + ---------- + model : pytorch model + The model that users want to tune, + it includes search space defined with nni nas apis + lookup_table : class + lookup table object to manage model space information, + including candidate ops for each stage as the model space, + input channels/output channels/stride/fm_size as the layer config, + and the performance information for perf_cost accumulation. + + """ + super(FBNetMutator, self).__init__(model) + self.mutable_list = [] + + # Collect the op names of the candidate ops within each mutable + ops_names_mutable = dict() + left = 0 + right = 1 + for stage_name in lookup_table.layer_num: + right = lookup_table.layer_num[stage_name] + stage_ops = lookup_table.lut_ops[stage_name] + ops_names = [op_name for op_name in stage_ops] + + for i in range(left, left + right): + ops_names_mutable[i] = ops_names + left = right + + # Create the mixed op + for i, mutable in enumerate(self.undedup_mutables): + ops_names = ops_names_mutable[i] + latency_mutable = lookup_table.lut_perf[i] + latency = [latency_mutable[op_name] for op_name in ops_names] + self.mutable_list.append(mutable) + mutable.registered_module = MixedOp(mutable, latency) + + def on_forward_layer_choice(self, mutable, *args, **kwargs): + """ + Callback of layer choice forward. This function defines the forward + logic of the input mutable. So mutable is only interface, its real + implementation is defined in mutator. + + Parameters + ---------- + mutable: LayerChoice + forward logic of this input mutable + args: list of torch.Tensor + inputs of this mutable + kwargs: dict + inputs of this mutable + + Returns + ------- + torch.Tensor + output of this mutable, i.e., LayerChoice + int + index of the chosen op + """ + # FIXME: return mask, to be consistent with other algorithms + idx = mutable.registered_module.chosen_index + return mutable.registered_module(mutable, *args, **kwargs), idx + + def num_arch_params(self): + """ + The number of mutables, i.e., LayerChoice + + Returns + ------- + int + the number of LayerChoice in user model + """ + return len(self.mutable_list) + + def get_architecture_parameters(self): + """ + Get all the architecture parameters. + + yield + ----- + PyTorch Parameter + Return path_alpha of the traversed mutable + """ + for mutable in self.undedup_mutables: + yield mutable.registered_module.get_path_alpha() + + def get_weighted_latency(self): + """ + Get the latency weighted by gumbel softmax coefficients. + + yield + ----- + Tuple + Return the weighted_latency of the traversed mutable + """ + for mutable in self.undedup_mutables: + yield mutable.registered_module.get_weighted_latency() + + def set_temperature(self, temperature): + """ + Set the annealed temperature of the op for gumbel softmax. + + Parameters + ---------- + temperature : float + The annealed temperature for gumbel softmax + """ + for mutable in self.undedup_mutables: + mutable.registered_module.set_temperature(temperature) + + def arch_requires_grad(self): + """ + Make architecture weights require gradient + """ + for mutable in self.undedup_mutables: + mutable.registered_module.to_requires_grad() + + def arch_disable_grad(self): + """ + Disable gradient of architecture weights, i.e., does not + calculate gradient for them. + """ + for mutable in self.undedup_mutables: + mutable.registered_module.to_disable_grad() + + def sample_final(self): + """ + Generate the final chosen architecture. + + Returns + ------- + dict + the choice of each mutable, i.e., LayerChoice + """ + result = dict() + for mutable in self.undedup_mutables: + assert isinstance(mutable, LayerChoice) + index = mutable.registered_module.chosen_index + # pylint: disable=not-callable + result[mutable.key] = ( + F.one_hot(torch.tensor(index), num_classes=len(mutable)) + .view(-1) + .bool(), + ) + return result diff --git a/nni/algorithms/nas/pytorch/fbnet/trainer.py b/nni/algorithms/nas/pytorch/fbnet/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..1eaababef2490afabfc9bdb2c70757b38c48782c --- /dev/null +++ b/nni/algorithms/nas/pytorch/fbnet/trainer.py @@ -0,0 +1,413 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import json +import os +import time +import torch + +import numpy as np + +from torch.autograd import Variable +from nni.nas.pytorch.base_trainer import BaseTrainer +from nni.nas.pytorch.trainer import TorchTensorEncoder +from nni.nas.pytorch.utils import AverageMeter +from .mutator import FBNetMutator +from .utils import RegularizerLoss, accuracy + + +class FBNetTrainer(BaseTrainer): + def __init__( + self, + model, + model_optim, + criterion, + device, + device_ids, + lookup_table, + train_loader, + valid_loader, + n_epochs=120, + load_ckpt=False, + arch_path=None, + logger=None, + ): + """ + Parameters + ---------- + model : pytorch model + the user model, which has mutables + model_optim : pytorch optimizer + the user defined optimizer + criterion : pytorch loss + the main task loss, nn.CrossEntropyLoss() is for classification + device : pytorch device + the devices to train/search the model + device_ids : list of int + the indexes of devices used for training + lookup_table : class + lookup table object for fbnet training + train_loader : pytorch data loader + data loader for the training set + valid_loader : pytorch data loader + data loader for the validation set + n_epochs : int + number of epochs to train/search + load_ckpt : bool + whether load checkpoint + arch_path : str + the path to store chosen architecture + logger : logger + the logger + """ + self.model = model + self.model_optim = model_optim + self.train_loader = train_loader + self.valid_loader = valid_loader + self.device = device + self.dev_num = len(device_ids) + self.n_epochs = n_epochs + self.lookup_table = lookup_table + self.config = lookup_table.config + self.start_epoch = self.config.start_epoch + self.temp = self.config.init_temperature + self.exp_anneal_rate = self.config.exp_anneal_rate + self.mode = self.config.mode + + self.load_ckpt = load_ckpt + self.arch_path = arch_path + self.logger = logger + + # scheduler of learning rate + self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + model_optim, T_max=n_epochs, last_epoch=-1 + ) + + # init mutator + self.mutator = FBNetMutator(model, lookup_table) + self.mutator.set_temperature(self.temp) + + # DataParallel should be put behind the init of mutator + self.model = torch.nn.DataParallel(self.model, device_ids=device_ids) + self.model.to(device) + + # build architecture optimizer + self.arch_optimizer = torch.optim.AdamW( + self.mutator.get_architecture_parameters(), + self.config.nas_lr, + weight_decay=self.config.nas_weight_decay, + ) + self.reg_loss = RegularizerLoss(config=self.config) + + self.criterion = criterion + self.epoch = 0 + + def _layer_choice_sample(self): + """ + Sample the index of network within layer choice + """ + stages = [stage_name for stage_name in self.lookup_table.layer_num] + stage_lnum = [self.lookup_table.layer_num[stage] for stage in stages] + + # get the choice idx in each layer + choice_ids = list() + layer_id = 0 + for param in self.mutator.get_architecture_parameters(): + param_np = param.cpu().detach().numpy() + op_idx = np.argmax(param_np) + choice_ids.append(op_idx) + self.logger.info( + "layer {}: {}, index: {}".format(layer_id, param_np, op_idx) + ) + layer_id += 1 + + # get the arch_sample + choice_names = list() + layer_id = 0 + for i, stage_name in enumerate(stages): + ops_names = [op for op in self.lookup_table.lut_ops[stage_name]] + for _ in range(stage_lnum[i]): + searched_op = ops_names[choice_ids[layer_id]] + choice_names.append(searched_op) + layer_id += 1 + + self.logger.info(choice_names) + return choice_names + + def _get_perf_cost(self, requires_grad=True): + """ + Get the accumulated performance cost. + """ + perf_cost = Variable( + torch.zeros(1), requires_grad=requires_grad + ).to(self.device, non_blocking=True) + + for latency in self.mutator.get_weighted_latency(): + perf_cost = perf_cost + latency + + return perf_cost + + def _validate(self): + """ + Do validation. During validation, LayerChoices use the mixed-op. + + Returns + ------- + float, float, float + average loss, average top1 accuracy, average top5 accuracy + """ + self.valid_loader.batch_sampler.drop_last = False + batch_time = AverageMeter("batch_time") + losses = AverageMeter("losses") + top1 = AverageMeter("top1") + top5 = AverageMeter("top5") + + # test on validation set under eval mode + self.model.eval() + + end = time.time() + with torch.no_grad(): + for i, (images, labels) in enumerate(self.valid_loader): + images = images.to(self.device, non_blocking=True) + labels = labels.to(self.device, non_blocking=True) + + output = self.model(images) + + loss = self.criterion(output, labels) + acc1, acc5 = accuracy(output, labels, topk=(1, 5)) + losses.update(loss, images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0 or i + 1 == len(self.valid_loader): + test_log = ( + "Valid" + ": [{0}/{1}]\t" + "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" + "Loss {loss.val:.4f} ({loss.avg:.4f})\t" + "Top-1 acc {top1.val:.3f} ({top1.avg:.3f})\t" + "Top-5 acc {top5.val:.3f} ({top5.avg:.3f})".format( + i, + len(self.valid_loader) - 1, + batch_time=batch_time, + loss=losses, + top1=top1, + top5=top5, + ) + ) + self.logger.info(test_log) + + return losses.avg, top1.avg, top5.avg + + def _train_epoch(self, epoch, optimizer, arch_train=False): + """ + Train one epoch. + """ + batch_time = AverageMeter("batch_time") + data_time = AverageMeter("data_time") + losses = AverageMeter("losses") + top1 = AverageMeter("top1") + top5 = AverageMeter("top5") + + # switch to train mode + self.model.train() + + data_loader = self.valid_loader if arch_train else self.train_loader + end = time.time() + for i, (images, labels) in enumerate(data_loader): + data_time.update(time.time() - end) + images = images.to(self.device, non_blocking=True) + labels = labels.to(self.device, non_blocking=True) + + output = self.model(images) + loss = self.criterion(output, labels) + + # hardware-aware loss + perf_cost = self._get_perf_cost(requires_grad=True) + regu_loss = self.reg_loss(perf_cost) + if self.mode.startswith("mul"): + loss = loss * regu_loss + elif self.mode.startswith("add"): + loss = loss + regu_loss + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, labels, topk=(1, 5)) + losses.update(loss.item(), images.size(0)) + top1.update(acc1[0].item(), images.size(0)) + top5.update(acc5[0].item(), images.size(0)) + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0: + batch_log = ( + "Warmup Train [{0}][{1}]\t" + "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" + "Data {data_time.val:.3f} ({data_time.avg:.3f})\t" + "Loss {losses.val:.4f} ({losses.avg:.4f})\t" + "Top-1 acc {top1.val:.3f} ({top1.avg:.3f})\t" + "Top-5 acc {top5.val:.3f} ({top5.avg:.3f})\t".format( + epoch + 1, + i, + batch_time=batch_time, + data_time=data_time, + losses=losses, + top1=top1, + top5=top5, + ) + ) + self.logger.info(batch_log) + + def _warm_up(self): + """ + Warm up the model, while the architecture weights are not trained. + """ + for epoch in range(self.epoch, self.start_epoch): + self.logger.info("\n--------Warmup epoch: %d--------\n", epoch + 1) + self._train_epoch(epoch, self.model_optim) + # adjust learning rate + self.scheduler.step() + + # validation + val_loss, val_top1, val_top5 = self._validate() + val_log = ( + "Warmup Valid [{0}/{1}]\t" + "loss {2:.3f}\ttop-1 acc {3:.3f}\ttop-5 acc {4:.3f}".format( + epoch + 1, self.warmup_epochs, val_loss, val_top1, val_top5 + ) + ) + self.logger.info(val_log) + + if epoch % 10 == 0: + filename = os.path.join( + self.config.model_dir, "checkpoint_%s.pth" % epoch + ) + self.save_checkpoint(epoch, filename) + + def _train(self): + """ + Train the model, it trains model weights and architecute weights. + Architecture weights are trained according to the schedule. + Before updating architecture weights, ```requires_grad``` is enabled. + Then, it is disabled after the updating, in order not to update + architecture weights when training model weights. + """ + arch_param_num = self.mutator.num_arch_params() + self.logger.info("#arch_params: {}".format(arch_param_num)) + self.epoch = max(self.start_epoch, self.epoch) + + ckpt_path = self.config.model_dir + choice_names = None + top1_best = 0.0 + + for epoch in range(self.epoch, self.n_epochs): + self.logger.info("\n--------Train epoch: %d--------\n", epoch + 1) + # update the weight parameters + self._train_epoch(epoch, self.model_optim) + # adjust learning rate + self.scheduler.step() + + self.logger.info("Update architecture parameters") + # update the architecture parameters + self.mutator.arch_requires_grad() + self._train_epoch(epoch, self.arch_optimizer, True) + self.mutator.arch_disable_grad() + # temperature annealing + self.temp = self.temp * self.exp_anneal_rate + self.mutator.set_temperature(self.temp) + # sample the architecture of sub-network + choice_names = self._layer_choice_sample() + + # validate + val_loss, val_top1, val_top5 = self._validate() + val_log = ( + "Valid [{0}]\t" + "loss {1:.3f}\ttop-1 acc {2:.3f} \ttop-5 acc {3:.3f}".format( + epoch + 1, val_loss, val_top1, val_top5 + ) + ) + self.logger.info(val_log) + + if epoch % 10 == 0: + filename = os.path.join(ckpt_path, "checkpoint_%s.pth" % epoch) + self.save_checkpoint(epoch, filename, choice_names) + + val_top1 = val_top1.cpu().as_numpy() + if val_top1 > top1_best: + filename = os.path.join(ckpt_path, "checkpoint_best.pth") + self.save_checkpoint(epoch, filename, choice_names) + top1_best = val_top1 + + def save_checkpoint(self, epoch, filename, choice_names=None): + """ + Save checkpoint of the whole model. + Saving model weights and architecture weights as ```filename```, + and saving currently chosen architecture in ```arch_path```. + """ + state = { + "model": self.model.state_dict(), + "optim": self.model_optim.state_dict(), + "epoch": epoch, + "arch_sample": choice_names, + } + torch.save(state, filename) + self.logger.info("Save checkpoint to {0:}".format(filename)) + + if self.arch_path: + self.export(self.arch_path) + + def load_checkpoint(self, filename): + """ + Load the checkpoint from ```ckpt_path```. + """ + ckpt = torch.load(filename) + self.epoch = ckpt["epoch"] + self.model.load_state_dict(ckpt["model"]) + self.model_optim.load_state_dict(ckpt["optim"]) + + def train(self): + """ + Train the whole model. + """ + if self.load_ckpt: + ckpt_path = self.config.model_dir + filename = os.path.join(ckpt_path, "checkpoint_best.pth") + if os.path.exists(filename): + self.load_checkpoint(filename) + + if self.epoch < self.start_epoch: + self._warm_up() + self._train() + + def export(self, file_name): + """ + Export the chosen architecture into a file + + Parameters + ---------- + file_name : str + the file that stores exported chosen architecture + """ + exported_arch = self.mutator.sample_final() + with open(file_name, "w") as f: + json.dump( + exported_arch, + f, + indent=2, + sort_keys=True, + cls=TorchTensorEncoder, + ) + + def validate(self): + raise NotImplementedError + + def checkpoint(self): + raise NotImplementedError diff --git a/nni/algorithms/nas/pytorch/fbnet/utils.py b/nni/algorithms/nas/pytorch/fbnet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..925b612a507b7d723e85f888307e22a5d605f720 --- /dev/null +++ b/nni/algorithms/nas/pytorch/fbnet/utils.py @@ -0,0 +1,351 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import os +import timeit +import torch + +import numpy as np +import torch.nn as nn + +from nni.compression.pytorch.utils.counter import count_flops_params + +LUT_FILE = "lut.npy" +LUT_PATH = "lut" + + +class NASConfig: + def __init__( + self, + perf_metric="flops", + lut_load=False, + model_dir=None, + nas_lr=0.01, + nas_weight_decay=5e-4, + mode="mul", + alpha=0.25, + beta=0.6, + start_epoch=50, + init_temperature=5.0, + exp_anneal_rate=np.exp(-0.045), + search_space=None, + ): + # LUT of performance metric + # flops means the multiplies, latency means the time cost on platform + self.perf_metric = perf_metric + assert perf_metric in [ + "flops", + "latency", + ], "perf_metric should be ['flops', 'latency']" + # wether load or create lut file + self.lut_load = lut_load + # necessary dirs + self.lut_en = model_dir is not None + if self.lut_en: + self.model_dir = model_dir + os.makedirs(model_dir, exist_ok=True) + self.lut_path = os.path.join(model_dir, LUT_PATH) + os.makedirs(self.lut_path, exist_ok=True) + # NAS learning setting + self.nas_lr = nas_lr + self.nas_weight_decay = nas_weight_decay + # hardware-aware loss setting + self.mode = mode + assert mode in ["mul", "add"], "mode should be ['mul', 'add']" + self.alpha = alpha + self.beta = beta + # NAS training setting + self.start_epoch = start_epoch + self.init_temperature = init_temperature + self.exp_anneal_rate = exp_anneal_rate + # definition of search blocks and space + self.search_space = search_space + + +class RegularizerLoss(nn.Module): + """Auxilliary loss for hardware-aware NAS.""" + + def __init__(self, config): + """ + Parameters + ---------- + config : class + to manage the configuration for NAS training, and search space etc. + """ + super(RegularizerLoss, self).__init__() + self.mode = config.mode + self.alpha = config.alpha + self.beta = config.beta + + def forward(self, perf_cost, batch_size=1): + """ + Parameters + ---------- + perf_cost : tensor + the accumulated performance cost + batch_size : int + batch size for normalization + + Returns + ------- + output: tensor + the hardware-aware constraint loss + """ + if self.mode == "mul": + log_loss = torch.log(perf_cost / batch_size) ** self.beta + return self.alpha * log_loss + elif self.mode == "add": + linear_loss = (perf_cost / batch_size) ** self.beta + return self.alpha * linear_loss + else: + raise NotImplementedError + + +def accuracy(output, target, topk=(1,)): + """ + Computes the precision@k for the specified values of k + + Parameters + ---------- + output : pytorch tensor + output, e.g., predicted value + target : pytorch tensor + label + topk : tuple + specify top1 and top5 + + Returns + ------- + list + accuracy of top1 and top5 + """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def supernet_sample(model, state_dict, sampled_arch=[], lookup_table=None): + """ + Initialize the searched sub-model from supernet. + + Parameters + ---------- + model : pytorch model + the created subnet + state_dict : checkpoint + the checkpoint of supernet, including the pre-trained params + sampled_arch : list of str + the searched layer names of the subnet + lookup_table : class + to manage the candidate ops, layer information and layer performance + """ + replace = list() + stages = [stage for stage in lookup_table.layer_num] + stage_lnum = [lookup_table.layer_num[stage] for stage in stages] + + if sampled_arch: + layer_id = 0 + for i, stage in enumerate(stages): + ops_names = [op_name for op_name in lookup_table.lut_ops[stage]] + for _ in range(stage_lnum[i]): + searched_op = sampled_arch[layer_id] + op_i = ops_names.index(searched_op) + replace.append( + [ + "blocks.{}.".format(layer_id), + "blocks.{}.op.".format(layer_id), + "blocks.{}.{}.".format(layer_id, op_i), + ] + ) + layer_id += 1 + model_init(model, state_dict, replace=replace) + + +def model_init(model, state_dict, replace=[]): + """Initialize the model from state_dict.""" + prefix = "module." + param_dict = dict() + for k, v in state_dict.items(): + if k.startswith(prefix): + k = k[7:] + param_dict[k] = v + + for k, (name, m) in enumerate(model.named_modules()): + if replace: + for layer_replace in replace: + assert len(layer_replace) == 3, "The elements should be three." + pre_scope, key, replace_key = layer_replace + if pre_scope in name: + name = name.replace(key, replace_key) + + # Copy the state_dict to current model + if (name + ".weight" in param_dict) or ( + name + ".running_mean" in param_dict + ): + if isinstance(m, nn.BatchNorm2d): + shape = m.running_mean.shape + if shape == param_dict[name + ".running_mean"].shape: + if m.weight is not None: + m.weight.data = param_dict[name + ".weight"] + m.bias.data = param_dict[name + ".bias"] + m.running_mean = param_dict[name + ".running_mean"] + m.running_var = param_dict[name + ".running_var"] + + elif isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + shape = m.weight.data.shape + if shape == param_dict[name + ".weight"].shape: + m.weight.data = param_dict[name + ".weight"] + if m.bias is not None: + m.bias.data = param_dict[name + ".bias"] + + elif isinstance(m, nn.ConvTranspose2d): + m.weight.data = param_dict[name + ".weight"] + if m.bias is not None: + m.bias.data = param_dict[name + ".bias"] + + +class LookUpTable: + """Build look-up table for NAS.""" + + def __init__(self, config, primitives): + """ + Parameters + ---------- + config : class + to manage the configuration for NAS training, and search space etc. + """ + self.config = config + # definition of search blocks and space + self.search_space = config.search_space + # layers for NAS + self.cnt_layers = len(self.search_space["input_shape"]) + # constructors for each operation + self.lut_ops = { + stage_name: { + op_name: primitives[op_name] + for op_name in self.search_space["stages"][stage_name]["ops"] + } + for stage_name in self.search_space["stages"] + } + self.layer_num = { + stage_name: self.search_space["stages"][stage_name]["layer_num"] + for stage_name in self.search_space["stages"] + } + + # arguments for the ops constructors, input_shapes just for convinience + self.layer_configs, self.layer_in_shapes = self._layer_configs() + + # lookup_table + self.perf_metric = config.perf_metric + + if config.lut_en: + self.lut_perf = None + self.lut_file = os.path.join(config.lut_path, LUT_FILE) + if config.lut_load: + self._load_from_file() + else: + self._create_perfs() + + def _layer_configs(self): + """Generate basic params for different layers.""" + # layer_configs are : c_in, c_out, stride, fm_size + layer_configs = [ + [ + self.search_space["input_shape"][layer_id][0], + self.search_space["channel_size"][layer_id], + self.search_space["strides"][layer_id], + self.search_space["fm_size"][layer_id], + ] + for layer_id in range(self.cnt_layers) + ] + + # layer_in_shapes are (C_in, input_w, input_h) + layer_in_shapes = self.search_space["input_shape"] + + return layer_configs, layer_in_shapes + + def _create_perfs(self, cnt_of_runs=200): + """Create performance cost for each op.""" + if self.perf_metric == "latency": + self.lut_perf = self._calculate_latency(cnt_of_runs) + elif self.perf_metric == "flops": + self.lut_perf = self._calculate_flops() + + self._write_lut_to_file() + + def _calculate_flops(self, eps=0.001): + """FLOPs cost.""" + flops_lut = [{} for i in range(self.cnt_layers)] + layer_id = 0 + + for stage_name in self.lut_ops: + stage_ops = self.lut_ops[stage_name] + ops_num = self.layer_num[stage_name] + + for _ in range(ops_num): + for op_name in stage_ops: + layer_config = self.layer_configs[layer_id] + key_params = {"fm_size": layer_config[3]} + op = stage_ops[op_name](*layer_config[0:3], **key_params) + + # measured in Flops + in_shape = self.layer_in_shapes[layer_id] + x = (1, in_shape[0], in_shape[1], in_shape[2]) + flops, _, _ = count_flops_params(op, x, verbose=False) + flops = eps if flops == 0.0 else flops + flops_lut[layer_id][op_name] = float(flops) + layer_id += 1 + + return flops_lut + + def _calculate_latency(self, cnt_of_runs): + """Latency cost.""" + LATENCY_BATCH_SIZE = 1 + latency_lut = [{} for i in range(self.cnt_layers)] + layer_id = 0 + + for stage_name in self.lut_ops: + stage_ops = self.lut_ops[stage_name] + ops_num = self.layer_num[stage_name] + + for _ in range(ops_num): + for op_name in stage_ops: + layer_config = self.layer_configs[layer_id] + key_params = {"fm_size": layer_config[3]} + op = stage_ops[op_name](*layer_config[0:3], **key_params) + input_data = torch.randn( + (LATENCY_BATCH_SIZE, *self.layer_in_shapes[layer_id]) + ) + globals()["op"], globals()["input_data"] = op, input_data + total_time = timeit.timeit( + "output = op(input_data)", + setup="gc.enable()", + globals=globals(), + number=cnt_of_runs, + ) + # measured in micro-second + latency_lut[layer_id][op_name] = ( + total_time / cnt_of_runs / LATENCY_BATCH_SIZE * 1e6 + ) + layer_id += 1 + + return latency_lut + + def _write_lut_to_file(self): + """Save lut as numpy file.""" + np.save(self.lut_file, self.lut_perf) + + def _load_from_file(self): + """Load numpy file.""" + self.lut_perf = np.load(self.lut_file, allow_pickle=True) diff --git a/nni/algorithms/nas/pytorch/pdarts/__init__.py b/nni/algorithms/nas/pytorch/pdarts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d17764ba159b35bcc38efa82a2a30dc2366b76 --- /dev/null +++ b/nni/algorithms/nas/pytorch/pdarts/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .trainer import PdartsTrainer diff --git a/nni/algorithms/nas/pytorch/pdarts/mutator.py b/nni/algorithms/nas/pytorch/pdarts/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..09ad51c5e471b4acae51d513335682328924b90a --- /dev/null +++ b/nni/algorithms/nas/pytorch/pdarts/mutator.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy + +import numpy as np +import torch +from torch import nn + +from nni.algorithms.nas.pytorch.darts import DartsMutator +from nni.nas.pytorch.mutables import LayerChoice + + +class PdartsMutator(DartsMutator): + """ + It works with PdartsTrainer to calculate ops weights, + and drop weights in different PDARTS epochs. + """ + + def __init__(self, model, pdarts_epoch_index, pdarts_num_to_drop, switches={}): + self.pdarts_epoch_index = pdarts_epoch_index + self.pdarts_num_to_drop = pdarts_num_to_drop + if switches is None: + self.switches = {} + else: + self.switches = switches + + super(PdartsMutator, self).__init__(model) + + # this loop go through mutables with different keys, + # it's mainly to update length of choices. + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + + switches = self.switches.get(mutable.key, [True for j in range(len(mutable))]) + choices = self.choices[mutable.key] + + operations_count = np.sum(switches) + # +1 and -1 are caused by zero operation in darts network + # the zero operation is not in choices list in network, but its weight are in, + # so it needs one more weights and switch for zero. + self.choices[mutable.key] = nn.Parameter(1.0E-3 * torch.randn(operations_count + 1)) + self.switches[mutable.key] = switches + + # update LayerChoice instances in model, + # it's physically remove dropped choices operations. + for module in self.model.modules(): + if isinstance(module, LayerChoice): + switches = self.switches.get(module.key) + choices = self.choices[module.key] + if len(module) > len(choices): + # from last to first, so that it won't effect previous indexes after removed one. + for index in range(len(switches)-1, -1, -1): + if switches[index] == False: + del module[index] + assert len(module) <= len(choices), "Failed to remove dropped choices." + + def export(self): + # Cannot rely on super().export() because P-DARTS has deleted some of the choices and has misaligned length. + results = super().sample_final() + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + # As some operations are dropped physically, + # so it needs to fill back false to track dropped operations. + trained_result = results[mutable.key] + trained_index = 0 + switches = self.switches[mutable.key] + result = torch.Tensor(switches).bool() + for index in range(len(result)): + if result[index]: + result[index] = trained_result[trained_index] + trained_index += 1 + results[mutable.key] = result + return results + + def drop_paths(self): + """ + This method is called when a PDARTS epoch is finished. + It prepares switches for next epoch. + candidate operations with False switch will be doppped in next epoch. + """ + all_switches = copy.deepcopy(self.switches) + for key in all_switches: + switches = all_switches[key] + idxs = [] + for j in range(len(switches)): + if switches[j]: + idxs.append(j) + sorted_weights = self.choices[key].data.cpu().numpy()[:-1] + drop = np.argsort(sorted_weights)[:self.pdarts_num_to_drop[self.pdarts_epoch_index]] + for idx in drop: + switches[idxs[idx]] = False + return all_switches diff --git a/nni/algorithms/nas/pytorch/pdarts/trainer.py b/nni/algorithms/nas/pytorch/pdarts/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..7f23a6e222731ae6edc5c034f77eafaaf71b4c5e --- /dev/null +++ b/nni/algorithms/nas/pytorch/pdarts/trainer.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging + +from nni.nas.pytorch.callbacks import LRSchedulerCallback +from nni.algorithms.nas.pytorch.darts import DartsTrainer +from nni.nas.pytorch.trainer import BaseTrainer, TorchTensorEncoder + +from .mutator import PdartsMutator + +logger = logging.getLogger(__name__) + + +class PdartsTrainer(BaseTrainer): + """ + This trainer implements the PDARTS algorithm. + PDARTS bases on DARTS algorithm, and provides a network growth approach to find deeper and better network. + This class relies on pdarts_num_layers and pdarts_num_to_drop parameters to control how network grows. + pdarts_num_layers means how many layers more than first epoch. + pdarts_num_to_drop means how many candidate operations should be dropped in each epoch. + So that the grew network can in similar size. + """ + + def __init__(self, model_creator, init_layers, metrics, + num_epochs, dataset_train, dataset_valid, + pdarts_num_layers=[0, 6, 12], pdarts_num_to_drop=[3, 2, 1], + mutator=None, batch_size=64, workers=4, device=None, log_frequency=None, callbacks=None, unrolled=False): + super(PdartsTrainer, self).__init__() + self.model_creator = model_creator + self.init_layers = init_layers + self.pdarts_num_layers = pdarts_num_layers + self.pdarts_num_to_drop = pdarts_num_to_drop + self.pdarts_epoch = len(pdarts_num_to_drop) + self.darts_parameters = { + "metrics": metrics, + "num_epochs": num_epochs, + "dataset_train": dataset_train, + "dataset_valid": dataset_valid, + "batch_size": batch_size, + "workers": workers, + "device": device, + "log_frequency": log_frequency, + "unrolled": unrolled + } + self.callbacks = callbacks if callbacks is not None else [] + + def train(self): + + switches = None + for epoch in range(self.pdarts_epoch): + + layers = self.init_layers+self.pdarts_num_layers[epoch] + model, criterion, optim, lr_scheduler = self.model_creator(layers) + self.mutator = PdartsMutator(model, epoch, self.pdarts_num_to_drop, switches) + + for callback in self.callbacks: + callback.build(model, self.mutator, self) + callback.on_epoch_begin(epoch) + + darts_callbacks = [] + if lr_scheduler is not None: + darts_callbacks.append(LRSchedulerCallback(lr_scheduler)) + + self.trainer = DartsTrainer(model, mutator=self.mutator, loss=criterion, optimizer=optim, + callbacks=darts_callbacks, **self.darts_parameters) + logger.info("start pdarts training epoch %s...", epoch) + + self.trainer.train() + + switches = self.mutator.drop_paths() + + for callback in self.callbacks: + callback.on_epoch_end(epoch) + + def validate(self): + self.trainer.validate() + + def export(self, file): + mutator_export = self.mutator.export() + with open(file, "w") as f: + json.dump(mutator_export, f, indent=2, sort_keys=True, cls=TorchTensorEncoder) + + def checkpoint(self): + raise NotImplementedError("Not implemented yet") diff --git a/nni/algorithms/nas/pytorch/proxylessnas/__init__.py b/nni/algorithms/nas/pytorch/proxylessnas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..26feedba7d553c32d61ea9139620a68fca7c12d0 --- /dev/null +++ b/nni/algorithms/nas/pytorch/proxylessnas/__init__.py @@ -0,0 +1,2 @@ +from .mutator import ProxylessNasMutator +from .trainer import ProxylessNasTrainer diff --git a/nni/algorithms/nas/pytorch/proxylessnas/mutator.py b/nni/algorithms/nas/pytorch/proxylessnas/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..881a6b44038d5d6f2c97b2b106036954401a9cd3 --- /dev/null +++ b/nni/algorithms/nas/pytorch/proxylessnas/mutator.py @@ -0,0 +1,478 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import math +import torch +from torch import nn as nn +from torch.nn import functional as F +import numpy as np + +from nni.nas.pytorch.base_mutator import BaseMutator +from nni.nas.pytorch.mutables import LayerChoice +from .utils import detach_variable + +class ArchGradientFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, x, binary_gates, run_func, backward_func): + ctx.run_func = run_func + ctx.backward_func = backward_func + + detached_x = detach_variable(x) + with torch.enable_grad(): + output = run_func(detached_x) + ctx.save_for_backward(detached_x, output) + return output.data + + @staticmethod + def backward(ctx, grad_output): + detached_x, output = ctx.saved_tensors + + grad_x = torch.autograd.grad(output, detached_x, grad_output, only_inputs=True) + # compute gradients w.r.t. binary_gates + binary_grads = ctx.backward_func(detached_x.data, output.data, grad_output.data) + + return grad_x[0], binary_grads, None, None + +class MixedOp(nn.Module): + """ + This class is to instantiate and manage info of one LayerChoice. + It includes architecture weights, binary weights, and member functions + operating the weights. + + forward_mode: + forward/backward mode for LayerChoice: None, two, full, and full_v2. + For training architecture weights, we use full_v2 by default, and for training + model weights, we use None. + """ + forward_mode = None + def __init__(self, mutable): + """ + Parameters + ---------- + mutable : LayerChoice + A LayerChoice in user model + """ + super(MixedOp, self).__init__() + self.ap_path_alpha = nn.Parameter(torch.Tensor(len(mutable))) + self.ap_path_wb = nn.Parameter(torch.Tensor(len(mutable))) + self.ap_path_alpha.requires_grad = False + self.ap_path_wb.requires_grad = False + self.active_index = [0] + self.inactive_index = None + self.log_prob = None + self.current_prob_over_ops = None + self.n_choices = len(mutable) + + def get_ap_path_alpha(self): + return self.ap_path_alpha + + def to_requires_grad(self): + self.ap_path_alpha.requires_grad = True + self.ap_path_wb.requires_grad = True + + def to_disable_grad(self): + self.ap_path_alpha.requires_grad = False + self.ap_path_wb.requires_grad = False + + def forward(self, mutable, x): + """ + Define forward of LayerChoice. For 'full_v2', backward is also defined. + The 'two' mode is explained in section 3.2.1 in the paper. + The 'full_v2' mode is explained in Appendix D in the paper. + + Parameters + ---------- + mutable : LayerChoice + this layer's mutable + x : tensor + inputs of this layer, only support one input + + Returns + ------- + output: tensor + output of this layer + """ + if MixedOp.forward_mode == 'full' or MixedOp.forward_mode == 'two': + output = 0 + for _i in self.active_index: + oi = self.candidate_ops[_i](x) + output = output + self.ap_path_wb[_i] * oi + for _i in self.inactive_index: + oi = self.candidate_ops[_i](x) + output = output + self.ap_path_wb[_i] * oi.detach() + elif MixedOp.forward_mode == 'full_v2': + def run_function(key, candidate_ops, active_id): + def forward(_x): + return candidate_ops[active_id](_x) + return forward + + def backward_function(key, candidate_ops, active_id, binary_gates): + def backward(_x, _output, grad_output): + binary_grads = torch.zeros_like(binary_gates.data) + with torch.no_grad(): + for k in range(len(candidate_ops)): + if k != active_id: + out_k = candidate_ops[k](_x.data) + else: + out_k = _output.data + grad_k = torch.sum(out_k * grad_output) + binary_grads[k] = grad_k + return binary_grads + return backward + output = ArchGradientFunction.apply( + x, self.ap_path_wb, run_function(mutable.key, list(mutable), self.active_index[0]), + backward_function(mutable.key, list(mutable), self.active_index[0], self.ap_path_wb)) + else: + output = self.active_op(mutable)(x) + return output + + @property + def probs_over_ops(self): + """ + Apply softmax on alpha to generate probability distribution + + Returns + ------- + pytorch tensor + probability distribution + """ + probs = F.softmax(self.ap_path_alpha, dim=0) # softmax to probability + return probs + + @property + def chosen_index(self): + """ + choose the op with max prob + + Returns + ------- + int + index of the chosen one + numpy.float32 + prob of the chosen one + """ + probs = self.probs_over_ops.data.cpu().numpy() + index = int(np.argmax(probs)) + return index, probs[index] + + def active_op(self, mutable): + """ + assume only one path is active + + Returns + ------- + PyTorch module + the chosen operation + """ + return mutable[self.active_index[0]] + + @property + def active_op_index(self): + """ + return active op's index, the active op is sampled + + Returns + ------- + int + index of the active op + """ + return self.active_index[0] + + def set_chosen_op_active(self): + """ + set chosen index, active and inactive indexes + """ + chosen_idx, _ = self.chosen_index + self.active_index = [chosen_idx] + self.inactive_index = [_i for _i in range(0, chosen_idx)] + \ + [_i for _i in range(chosen_idx + 1, self.n_choices)] + + def binarize(self, mutable): + """ + Sample based on alpha, and set binary weights accordingly. + ap_path_wb is set in this function, which is called binarize. + + Parameters + ---------- + mutable : LayerChoice + this layer's mutable + """ + self.log_prob = None + # reset binary gates + self.ap_path_wb.data.zero_() + probs = self.probs_over_ops + if MixedOp.forward_mode == 'two': + # sample two ops according to probs + sample_op = torch.multinomial(probs.data, 2, replacement=False) + probs_slice = F.softmax(torch.stack([ + self.ap_path_alpha[idx] for idx in sample_op + ]), dim=0) + self.current_prob_over_ops = torch.zeros_like(probs) + for i, idx in enumerate(sample_op): + self.current_prob_over_ops[idx] = probs_slice[i] + # choose one to be active and the other to be inactive according to probs_slice + c = torch.multinomial(probs_slice.data, 1)[0] # 0 or 1 + active_op = sample_op[c].item() + inactive_op = sample_op[1-c].item() + self.active_index = [active_op] + self.inactive_index = [inactive_op] + # set binary gate + self.ap_path_wb.data[active_op] = 1.0 + else: + sample = torch.multinomial(probs, 1)[0].item() + self.active_index = [sample] + self.inactive_index = [_i for _i in range(0, sample)] + \ + [_i for _i in range(sample + 1, len(mutable))] + self.log_prob = torch.log(probs[sample]) + self.current_prob_over_ops = probs + self.ap_path_wb.data[sample] = 1.0 + # avoid over-regularization + for choice in mutable: + for _, param in choice.named_parameters(): + param.grad = None + + @staticmethod + def delta_ij(i, j): + if i == j: + return 1 + else: + return 0 + + def set_arch_param_grad(self, mutable): + """ + Calculate alpha gradient for this LayerChoice. + It is calculated using gradient of binary gate, probs of ops. + """ + binary_grads = self.ap_path_wb.grad.data + if self.active_op(mutable).is_zero_layer(): + self.ap_path_alpha.grad = None + return + if self.ap_path_alpha.grad is None: + self.ap_path_alpha.grad = torch.zeros_like(self.ap_path_alpha.data) + if MixedOp.forward_mode == 'two': + involved_idx = self.active_index + self.inactive_index + probs_slice = F.softmax(torch.stack([ + self.ap_path_alpha[idx] for idx in involved_idx + ]), dim=0).data + for i in range(2): + for j in range(2): + origin_i = involved_idx[i] + origin_j = involved_idx[j] + self.ap_path_alpha.grad.data[origin_i] += \ + binary_grads[origin_j] * probs_slice[j] * (MixedOp.delta_ij(i, j) - probs_slice[i]) + for _i, idx in enumerate(self.active_index): + self.active_index[_i] = (idx, self.ap_path_alpha.data[idx].item()) + for _i, idx in enumerate(self.inactive_index): + self.inactive_index[_i] = (idx, self.ap_path_alpha.data[idx].item()) + else: + probs = self.probs_over_ops.data + for i in range(self.n_choices): + for j in range(self.n_choices): + self.ap_path_alpha.grad.data[i] += binary_grads[j] * probs[j] * (MixedOp.delta_ij(i, j) - probs[i]) + return + + def rescale_updated_arch_param(self): + """ + rescale architecture weights for the 'two' mode. + """ + if not isinstance(self.active_index[0], tuple): + assert self.active_op.is_zero_layer() + return + involved_idx = [idx for idx, _ in (self.active_index + self.inactive_index)] + old_alphas = [alpha for _, alpha in (self.active_index + self.inactive_index)] + new_alphas = [self.ap_path_alpha.data[idx] for idx in involved_idx] + + offset = math.log( + sum([math.exp(alpha) for alpha in new_alphas]) / sum([math.exp(alpha) for alpha in old_alphas]) + ) + + for idx in involved_idx: + self.ap_path_alpha.data[idx] -= offset + + +class ProxylessNasMutator(BaseMutator): + """ + This mutator initializes and operates all the LayerChoices of the input model. + It is for the corresponding trainer to control the training process of LayerChoices, + coordinating with whole training process. + """ + def __init__(self, model): + """ + Init a MixedOp instance for each mutable i.e., LayerChoice. + And register the instantiated MixedOp in corresponding LayerChoice. + If does not register it in LayerChoice, DataParallel does not work then, + because architecture weights are not included in the DataParallel model. + When MixedOPs are registered, we use ```requires_grad``` to control + whether calculate gradients of architecture weights. + + Parameters + ---------- + model : pytorch model + The model that users want to tune, it includes search space defined with nni nas apis + """ + super(ProxylessNasMutator, self).__init__(model) + self._unused_modules = None + self.mutable_list = [] + for mutable in self.undedup_mutables: + self.mutable_list.append(mutable) + mutable.registered_module = MixedOp(mutable) + + def on_forward_layer_choice(self, mutable, *args, **kwargs): + """ + Callback of layer choice forward. This function defines the forward + logic of the input mutable. So mutable is only interface, its real + implementation is defined in mutator. + + Parameters + ---------- + mutable: LayerChoice + forward logic of this input mutable + args: list of torch.Tensor + inputs of this mutable + kwargs: dict + inputs of this mutable + + Returns + ------- + torch.Tensor + output of this mutable, i.e., LayerChoice + int + index of the chosen op + """ + # FIXME: return mask, to be consistent with other algorithms + idx = mutable.registered_module.active_op_index + return mutable.registered_module(mutable, *args, **kwargs), idx + + def reset_binary_gates(self): + """ + For each LayerChoice, binarize binary weights + based on alpha to only activate one op. + It traverses all the mutables in the model to do this. + """ + for mutable in self.undedup_mutables: + mutable.registered_module.binarize(mutable) + + def set_chosen_op_active(self): + """ + For each LayerChoice, set the op with highest alpha as the chosen op. + Usually used for validation. + """ + for mutable in self.undedup_mutables: + mutable.registered_module.set_chosen_op_active() + + def num_arch_params(self): + """ + The number of mutables, i.e., LayerChoice + + Returns + ------- + int + the number of LayerChoice in user model + """ + return len(self.mutable_list) + + def set_arch_param_grad(self): + """ + For each LayerChoice, calculate gradients for architecture weights, i.e., alpha + """ + for mutable in self.undedup_mutables: + mutable.registered_module.set_arch_param_grad(mutable) + + def get_architecture_parameters(self): + """ + Get all the architecture parameters. + + yield + ----- + PyTorch Parameter + Return ap_path_alpha of the traversed mutable + """ + for mutable in self.undedup_mutables: + yield mutable.registered_module.get_ap_path_alpha() + + def change_forward_mode(self, mode): + """ + Update forward mode of MixedOps, as training architecture weights and + model weights use different forward modes. + """ + MixedOp.forward_mode = mode + + def get_forward_mode(self): + """ + Get forward mode of MixedOp + + Returns + ------- + string + the current forward mode of MixedOp + """ + return MixedOp.forward_mode + + def rescale_updated_arch_param(self): + """ + Rescale architecture weights in 'two' mode. + """ + for mutable in self.undedup_mutables: + mutable.registered_module.rescale_updated_arch_param() + + def unused_modules_off(self): + """ + Remove unused modules for each mutables. + The removed modules are kept in ```self._unused_modules``` for resume later. + """ + self._unused_modules = [] + for mutable in self.undedup_mutables: + mixed_op = mutable.registered_module + unused = {} + if self.get_forward_mode() in ['full', 'two', 'full_v2']: + involved_index = mixed_op.active_index + mixed_op.inactive_index + else: + involved_index = mixed_op.active_index + for i in range(mixed_op.n_choices): + if i not in involved_index: + unused[i] = mutable[i] + mutable[i] = None + self._unused_modules.append(unused) + + def unused_modules_back(self): + """ + Resume the removed modules back. + """ + if self._unused_modules is None: + return + for m, unused in zip(self.mutable_list, self._unused_modules): + for i in unused: + m[i] = unused[i] + self._unused_modules = None + + def arch_requires_grad(self): + """ + Make architecture weights require gradient + """ + for mutable in self.undedup_mutables: + mutable.registered_module.to_requires_grad() + + def arch_disable_grad(self): + """ + Disable gradient of architecture weights, i.e., does not + calcuate gradient for them. + """ + for mutable in self.undedup_mutables: + mutable.registered_module.to_disable_grad() + + def sample_final(self): + """ + Generate the final chosen architecture. + + Returns + ------- + dict + the choice of each mutable, i.e., LayerChoice + """ + result = dict() + for mutable in self.undedup_mutables: + assert isinstance(mutable, LayerChoice) + index, _ = mutable.registered_module.chosen_index + # pylint: disable=not-callable + result[mutable.key] = F.one_hot(torch.tensor(index), num_classes=len(mutable)).view(-1).bool() + return result diff --git a/nni/algorithms/nas/pytorch/proxylessnas/trainer.py b/nni/algorithms/nas/pytorch/proxylessnas/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..d9c86a6a9f098792a4731db32dd140ce3708ea8f --- /dev/null +++ b/nni/algorithms/nas/pytorch/proxylessnas/trainer.py @@ -0,0 +1,500 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import math +import time +import json +import logging + +import torch +from torch import nn as nn + +from nni.nas.pytorch.base_trainer import BaseTrainer +from nni.nas.pytorch.trainer import TorchTensorEncoder +from nni.nas.pytorch.utils import AverageMeter +from .mutator import ProxylessNasMutator +from .utils import cross_entropy_with_label_smoothing, accuracy + +logger = logging.getLogger(__name__) + +class ProxylessNasTrainer(BaseTrainer): + def __init__(self, model, model_optim, device, + train_loader, valid_loader, label_smoothing=0.1, + n_epochs=120, init_lr=0.025, binary_mode='full_v2', + arch_init_type='normal', arch_init_ratio=1e-3, + arch_optim_lr=1e-3, arch_weight_decay=0, + grad_update_arch_param_every=5, grad_update_steps=1, + warmup=True, warmup_epochs=25, + arch_valid_frequency=1, + load_ckpt=False, ckpt_path=None, arch_path=None): + """ + Parameters + ---------- + model : pytorch model + the user model, which has mutables + model_optim : pytorch optimizer + the user defined optimizer + device : pytorch device + the devices to train/search the model + train_loader : pytorch data loader + data loader for the training set + valid_loader : pytorch data loader + data loader for the validation set + label_smoothing : float + for label smoothing + n_epochs : int + number of epochs to train/search + init_lr : float + init learning rate for training the model + binary_mode : str + the forward/backward mode for the binary weights in mutator + arch_init_type : str + the way to init architecture parameters + arch_init_ratio : float + the ratio to init architecture parameters + arch_optim_lr : float + learning rate of the architecture parameters optimizer + arch_weight_decay : float + weight decay of the architecture parameters optimizer + grad_update_arch_param_every : int + update architecture weights every this number of minibatches + grad_update_steps : int + during each update of architecture weights, the number of steps to train + warmup : bool + whether to do warmup + warmup_epochs : int + the number of epochs to do during warmup + arch_valid_frequency : int + frequency of printing validation result + load_ckpt : bool + whether load checkpoint + ckpt_path : str + checkpoint path, if load_ckpt is True, ckpt_path cannot be None + arch_path : str + the path to store chosen architecture + """ + self.model = model + self.model_optim = model_optim + self.train_loader = train_loader + self.valid_loader = valid_loader + self.device = device + self.n_epochs = n_epochs + self.init_lr = init_lr + self.warmup = warmup + self.warmup_epochs = warmup_epochs + self.arch_valid_frequency = arch_valid_frequency + self.label_smoothing = label_smoothing + + self.train_batch_size = train_loader.batch_sampler.batch_size + self.valid_batch_size = valid_loader.batch_sampler.batch_size + # update architecture parameters every this number of minibatches + self.grad_update_arch_param_every = grad_update_arch_param_every + # the number of steps per architecture parameter update + self.grad_update_steps = grad_update_steps + self.binary_mode = binary_mode + + self.load_ckpt = load_ckpt + self.ckpt_path = ckpt_path + self.arch_path = arch_path + + # init mutator + self.mutator = ProxylessNasMutator(model) + + # DataParallel should be put behind the init of mutator + self.model = torch.nn.DataParallel(self.model) + self.model.to(self.device) + + # iter of valid dataset for training architecture weights + self._valid_iter = None + # init architecture weights + self._init_arch_params(arch_init_type, arch_init_ratio) + # build architecture optimizer + self.arch_optimizer = torch.optim.Adam(self.mutator.get_architecture_parameters(), + arch_optim_lr, + weight_decay=arch_weight_decay, + betas=(0, 0.999), + eps=1e-8) + + self.criterion = nn.CrossEntropyLoss() + self.warmup_curr_epoch = 0 + self.train_curr_epoch = 0 + + def _init_arch_params(self, init_type='normal', init_ratio=1e-3): + """ + Initialize architecture weights + """ + for param in self.mutator.get_architecture_parameters(): + if init_type == 'normal': + param.data.normal_(0, init_ratio) + elif init_type == 'uniform': + param.data.uniform_(-init_ratio, init_ratio) + else: + raise NotImplementedError + + def _validate(self): + """ + Do validation. During validation, LayerChoices use the chosen active op. + + Returns + ------- + float, float, float + average loss, average top1 accuracy, average top5 accuracy + """ + self.valid_loader.batch_sampler.batch_size = self.valid_batch_size + self.valid_loader.batch_sampler.drop_last = False + + self.mutator.set_chosen_op_active() + # remove unused modules to save memory + self.mutator.unused_modules_off() + # test on validation set under train mode + self.model.train() + batch_time = AverageMeter('batch_time') + losses = AverageMeter('losses') + top1 = AverageMeter('top1') + top5 = AverageMeter('top5') + end = time.time() + with torch.no_grad(): + for i, (images, labels) in enumerate(self.valid_loader): + images, labels = images.to(self.device), labels.to(self.device) + output = self.model(images) + loss = self.criterion(output, labels) + acc1, acc5 = accuracy(output, labels, topk=(1, 5)) + losses.update(loss, images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0 or i + 1 == len(self.valid_loader): + test_log = 'Valid' + ': [{0}/{1}]\t'\ + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'\ + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'\ + 'Top-1 acc {top1.val:.3f} ({top1.avg:.3f})'.\ + format(i, len(self.valid_loader) - 1, batch_time=batch_time, loss=losses, top1=top1) + # return top5: + test_log += '\tTop-5 acc {top5.val:.3f} ({top5.avg:.3f})'.format(top5=top5) + logger.info(test_log) + self.mutator.unused_modules_back() + return losses.avg, top1.avg, top5.avg + + def _warm_up(self): + """ + Warm up the model, during warm up, architecture weights are not trained. + """ + lr_max = 0.05 + data_loader = self.train_loader + nBatch = len(data_loader) + T_total = self.warmup_epochs * nBatch # total num of batches + + for epoch in range(self.warmup_curr_epoch, self.warmup_epochs): + logger.info('\n--------Warmup epoch: %d--------\n', epoch + 1) + batch_time = AverageMeter('batch_time') + data_time = AverageMeter('data_time') + losses = AverageMeter('losses') + top1 = AverageMeter('top1') + top5 = AverageMeter('top5') + # switch to train mode + self.model.train() + + end = time.time() + logger.info('warm_up epoch: %d', epoch) + for i, (images, labels) in enumerate(data_loader): + data_time.update(time.time() - end) + # lr + T_cur = epoch * nBatch + i + warmup_lr = 0.5 * lr_max * (1 + math.cos(math.pi * T_cur / T_total)) + for param_group in self.model_optim.param_groups: + param_group['lr'] = warmup_lr + images, labels = images.to(self.device), labels.to(self.device) + # compute output + self.mutator.reset_binary_gates() # random sample binary gates + self.mutator.unused_modules_off() # remove unused module for speedup + output = self.model(images) + if self.label_smoothing > 0: + loss = cross_entropy_with_label_smoothing(output, labels, self.label_smoothing) + else: + loss = self.criterion(output, labels) + # measure accuracy and record loss + acc1, acc5 = accuracy(output, labels, topk=(1, 5)) + losses.update(loss, images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + # compute gradient and do SGD step + self.model.zero_grad() + loss.backward() + self.model_optim.step() + # unused modules back + self.mutator.unused_modules_back() + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0 or i + 1 == nBatch: + batch_log = 'Warmup Train [{0}][{1}/{2}]\t' \ + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \ + 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' \ + 'Loss {losses.val:.4f} ({losses.avg:.4f})\t' \ + 'Top-1 acc {top1.val:.3f} ({top1.avg:.3f})\t' \ + 'Top-5 acc {top5.val:.3f} ({top5.avg:.3f})\tlr {lr:.5f}'. \ + format(epoch + 1, i, nBatch - 1, batch_time=batch_time, data_time=data_time, + losses=losses, top1=top1, top5=top5, lr=warmup_lr) + logger.info(batch_log) + val_loss, val_top1, val_top5 = self._validate() + val_log = 'Warmup Valid [{0}/{1}]\tloss {2:.3f}\ttop-1 acc {3:.3f}\ttop-5 acc {4:.3f}\t' \ + 'Train top-1 {top1.avg:.3f}\ttop-5 {top5.avg:.3f}M'. \ + format(epoch + 1, self.warmup_epochs, val_loss, val_top1, val_top5, top1=top1, top5=top5) + logger.info(val_log) + self.save_checkpoint() + self.warmup_curr_epoch += 1 + + def _get_update_schedule(self, nBatch): + """ + Generate schedule for training architecture weights. Key means after which minibatch + to update architecture weights, value means how many steps for the update. + + Parameters + ---------- + nBatch : int + the total number of minibatches in one epoch + + Returns + ------- + dict + the schedule for updating architecture weights + """ + schedule = {} + for i in range(nBatch): + if (i + 1) % self.grad_update_arch_param_every == 0: + schedule[i] = self.grad_update_steps + return schedule + + def _calc_learning_rate(self, epoch, batch=0, nBatch=None): + """ + Update learning rate. + """ + T_total = self.n_epochs * nBatch + T_cur = epoch * nBatch + batch + lr = 0.5 * self.init_lr * (1 + math.cos(math.pi * T_cur / T_total)) + return lr + + def _adjust_learning_rate(self, optimizer, epoch, batch=0, nBatch=None): + """ + Adjust learning of a given optimizer and return the new learning rate + + Parameters + ---------- + optimizer : pytorch optimizer + the used optimizer + epoch : int + the current epoch number + batch : int + the current minibatch + nBatch : int + the total number of minibatches in one epoch + + Returns + ------- + float + the adjusted learning rate + """ + new_lr = self._calc_learning_rate(epoch, batch, nBatch) + for param_group in optimizer.param_groups: + param_group['lr'] = new_lr + return new_lr + + def _train(self): + """ + Train the model, it trains model weights and architecute weights. + Architecture weights are trained according to the schedule. + Before updating architecture weights, ```requires_grad``` is enabled. + Then, it is disabled after the updating, in order not to update + architecture weights when training model weights. + """ + nBatch = len(self.train_loader) + arch_param_num = self.mutator.num_arch_params() + binary_gates_num = self.mutator.num_arch_params() + logger.info('#arch_params: %d\t#binary_gates: %d', arch_param_num, binary_gates_num) + + update_schedule = self._get_update_schedule(nBatch) + + for epoch in range(self.train_curr_epoch, self.n_epochs): + logger.info('\n--------Train epoch: %d--------\n', epoch + 1) + batch_time = AverageMeter('batch_time') + data_time = AverageMeter('data_time') + losses = AverageMeter('losses') + top1 = AverageMeter('top1') + top5 = AverageMeter('top5') + # switch to train mode + self.model.train() + + end = time.time() + for i, (images, labels) in enumerate(self.train_loader): + data_time.update(time.time() - end) + lr = self._adjust_learning_rate(self.model_optim, epoch, batch=i, nBatch=nBatch) + # train weight parameters + images, labels = images.to(self.device), labels.to(self.device) + self.mutator.reset_binary_gates() + self.mutator.unused_modules_off() + output = self.model(images) + if self.label_smoothing > 0: + loss = cross_entropy_with_label_smoothing(output, labels, self.label_smoothing) + else: + loss = self.criterion(output, labels) + acc1, acc5 = accuracy(output, labels, topk=(1, 5)) + losses.update(loss, images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + self.model.zero_grad() + loss.backward() + self.model_optim.step() + self.mutator.unused_modules_back() + if epoch > 0: + for _ in range(update_schedule.get(i, 0)): + start_time = time.time() + # GradientArchSearchConfig + self.mutator.arch_requires_grad() + arch_loss, exp_value = self._gradient_step() + self.mutator.arch_disable_grad() + used_time = time.time() - start_time + log_str = 'Architecture [%d-%d]\t Time %.4f\t Loss %.4f\t null %s' % \ + (epoch + 1, i, used_time, arch_loss, exp_value) + logger.info(log_str) + batch_time.update(time.time() - end) + end = time.time() + # training log + if i % 10 == 0 or i + 1 == nBatch: + batch_log = 'Train [{0}][{1}/{2}]\t' \ + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \ + 'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t' \ + 'Loss {losses.val:.4f} ({losses.avg:.4f})\t' \ + 'Top-1 acc {top1.val:.3f} ({top1.avg:.3f})\t' \ + 'Top-5 acc {top5.val:.3f} ({top5.avg:.3f})\tlr {lr:.5f}'. \ + format(epoch + 1, i, nBatch - 1, batch_time=batch_time, data_time=data_time, + losses=losses, top1=top1, top5=top5, lr=lr) + logger.info(batch_log) + # validate + if (epoch + 1) % self.arch_valid_frequency == 0: + val_loss, val_top1, val_top5 = self._validate() + val_log = 'Valid [{0}]\tloss {1:.3f}\ttop-1 acc {2:.3f} \ttop-5 acc {3:.3f}\t' \ + 'Train top-1 {top1.avg:.3f}\ttop-5 {top5.avg:.3f}'. \ + format(epoch + 1, val_loss, val_top1, val_top5, top1=top1, top5=top5) + logger.info(val_log) + self.save_checkpoint() + self.train_curr_epoch += 1 + + def _valid_next_batch(self): + """ + Get next one minibatch from validation set + + Returns + ------- + (tensor, tensor) + the tuple of images and labels + """ + if self._valid_iter is None: + self._valid_iter = iter(self.valid_loader) + try: + data = next(self._valid_iter) + except StopIteration: + self._valid_iter = iter(self.valid_loader) + data = next(self._valid_iter) + return data + + def _gradient_step(self): + """ + This gradient step is for updating architecture weights. + Mutator is intensively used in this function to operate on + architecture weights. + + Returns + ------- + float, None + loss of the model, None + """ + # use the same batch size as train batch size for architecture weights + self.valid_loader.batch_sampler.batch_size = self.train_batch_size + self.valid_loader.batch_sampler.drop_last = True + self.model.train() + self.mutator.change_forward_mode(self.binary_mode) + time1 = time.time() # time + # sample a batch of data from validation set + images, labels = self._valid_next_batch() + images, labels = images.to(self.device), labels.to(self.device) + time2 = time.time() # time + self.mutator.reset_binary_gates() + self.mutator.unused_modules_off() + output = self.model(images) + time3 = time.time() + ce_loss = self.criterion(output, labels) + expected_value = None + loss = ce_loss + self.model.zero_grad() + loss.backward() + self.mutator.set_arch_param_grad() + self.arch_optimizer.step() + if self.mutator.get_forward_mode() == 'two': + self.mutator.rescale_updated_arch_param() + self.mutator.unused_modules_back() + self.mutator.change_forward_mode(None) + time4 = time.time() + logger.info('(%.4f, %.4f, %.4f)', time2 - time1, time3 - time2, time4 - time3) + return loss.data.item(), expected_value.item() if expected_value is not None else None + + def save_checkpoint(self): + """ + Save checkpoint of the whole model. Saving model weights and architecture weights in + ```ckpt_path```, and saving currently chosen architecture in ```arch_path```. + """ + if self.ckpt_path: + state = { + 'warmup_curr_epoch': self.warmup_curr_epoch, + 'train_curr_epoch': self.train_curr_epoch, + 'model': self.model.state_dict(), + 'optim': self.model_optim.state_dict(), + 'arch_optim': self.arch_optimizer.state_dict() + } + torch.save(state, self.ckpt_path) + if self.arch_path: + self.export(self.arch_path) + + def load_checkpoint(self): + """ + Load the checkpoint from ```ckpt_path```. + """ + assert self.ckpt_path is not None, "If load_ckpt is not None, ckpt_path should not be None" + ckpt = torch.load(self.ckpt_path) + self.warmup_curr_epoch = ckpt['warmup_curr_epoch'] + self.train_curr_epoch = ckpt['train_curr_epoch'] + self.model.load_state_dict(ckpt['model']) + self.model_optim.load_state_dict(ckpt['optim']) + self.arch_optimizer.load_state_dict(ckpt['arch_optim']) + + def train(self): + """ + Train the whole model. + """ + if self.load_ckpt: + self.load_checkpoint() + if self.warmup: + self._warm_up() + self._train() + + def export(self, file_name): + """ + Export the chosen architecture into a file + + Parameters + ---------- + file_name : str + the file that stores exported chosen architecture + """ + exported_arch = self.mutator.sample_final() + with open(file_name, 'w') as f: + json.dump(exported_arch, f, indent=2, sort_keys=True, cls=TorchTensorEncoder) + + def validate(self): + raise NotImplementedError + + def checkpoint(self): + raise NotImplementedError diff --git a/nni/algorithms/nas/pytorch/proxylessnas/utils.py b/nni/algorithms/nas/pytorch/proxylessnas/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c532efc04c7035658cd5becb8b06b7412ffa6ee0 --- /dev/null +++ b/nni/algorithms/nas/pytorch/proxylessnas/utils.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + +def detach_variable(inputs): + """ + Detach variables + + Parameters + ---------- + inputs : pytorch tensors + pytorch tensors + """ + if isinstance(inputs, tuple): + return tuple([detach_variable(x) for x in inputs]) + else: + x = inputs.detach() + x.requires_grad = inputs.requires_grad + return x + +def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.1): + """ + Parameters + ---------- + pred : pytorch tensor + predicted value + target : pytorch tensor + label + label_smoothing : float + the degree of label smoothing + + Returns + ------- + pytorch tensor + cross entropy + """ + logsoftmax = nn.LogSoftmax() + n_classes = pred.size(1) + # convert to one-hot + target = torch.unsqueeze(target, 1) + soft_target = torch.zeros_like(pred) + soft_target.scatter_(1, target, 1) + # label smoothing + soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + +def accuracy(output, target, topk=(1,)): + """ + Computes the precision@k for the specified values of k + + Parameters + ---------- + output : pytorch tensor + output, e.g., predicted value + target : pytorch tensor + label + topk : tuple + specify top1 and top5 + + Returns + ------- + list + accuracy of top1 and top5 + """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res diff --git a/nni/algorithms/nas/pytorch/random/__init__.py b/nni/algorithms/nas/pytorch/random/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b4102266ca2a03d29c1c16c90b688865ce493b7a --- /dev/null +++ b/nni/algorithms/nas/pytorch/random/__init__.py @@ -0,0 +1 @@ +from .mutator import RandomMutator diff --git a/nni/algorithms/nas/pytorch/random/mutator.py b/nni/algorithms/nas/pytorch/random/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..f302db56c0cdbd611f648342cc26760d344958c4 --- /dev/null +++ b/nni/algorithms/nas/pytorch/random/mutator.py @@ -0,0 +1,36 @@ +import torch +import torch.nn.functional as F + +from nni.nas.pytorch.mutator import Mutator +from nni.nas.pytorch.mutables import LayerChoice, InputChoice + + +class RandomMutator(Mutator): + """ + Random mutator that samples a random candidate in the search space each time ``reset()``. + It uses random function in PyTorch, so users can set seed in PyTorch to ensure deterministic behavior. + """ + + def sample_search(self): + """ + Sample a random candidate. + """ + result = dict() + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + gen_index = torch.randint(high=len(mutable), size=(1, )) + result[mutable.key] = F.one_hot(gen_index, num_classes=len(mutable)).view(-1).bool() + elif isinstance(mutable, InputChoice): + if mutable.n_chosen is None: + result[mutable.key] = torch.randint(high=2, size=(mutable.n_candidates,)).view(-1).bool() + else: + perm = torch.randperm(mutable.n_candidates) + mask = [i in perm[:mutable.n_chosen] for i in range(mutable.n_candidates)] + result[mutable.key] = torch.tensor(mask, dtype=torch.bool) # pylint: disable=not-callable + return result + + def sample_final(self): + """ + Same as :meth:`sample_search`. + """ + return self.sample_search() diff --git a/nni/algorithms/nas/pytorch/spos/__init__.py b/nni/algorithms/nas/pytorch/spos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed432b0845154c4745aa82c8e6a8bad4290237aa --- /dev/null +++ b/nni/algorithms/nas/pytorch/spos/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .evolution import SPOSEvolution +from .mutator import SPOSSupernetTrainingMutator +from .trainer import SPOSSupernetTrainer diff --git a/nni/algorithms/nas/pytorch/spos/evolution.py b/nni/algorithms/nas/pytorch/spos/evolution.py new file mode 100644 index 0000000000000000000000000000000000000000..bd099e276ebfd4aa68215dbbd25e6443a45b9dd1 --- /dev/null +++ b/nni/algorithms/nas/pytorch/spos/evolution.py @@ -0,0 +1,223 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os +import re +from collections import deque + +import numpy as np +from nni.tuner import Tuner +from nni.algorithms.nas.pytorch.classic_nas.mutator import LAYER_CHOICE, INPUT_CHOICE + + +_logger = logging.getLogger(__name__) + + +class SPOSEvolution(Tuner): + """ + SPOS evolution tuner. + + Parameters + ---------- + max_epochs : int + Maximum number of epochs to run. + num_select : int + Number of survival candidates of each epoch. + num_population : int + Number of candidates at the start of each epoch. If candidates generated by + crossover and mutation are not enough, the rest will be filled with random + candidates. + m_prob : float + The probability of mutation. + num_crossover : int + Number of candidates generated by crossover in each epoch. + num_mutation : int + Number of candidates generated by mutation in each epoch. + """ + + def __init__(self, max_epochs=20, num_select=10, num_population=50, m_prob=0.1, + num_crossover=25, num_mutation=25): + assert num_population >= num_select + self.max_epochs = max_epochs + self.num_select = num_select + self.num_population = num_population + self.m_prob = m_prob + self.num_crossover = num_crossover + self.num_mutation = num_mutation + self.epoch = 0 + self.candidates = [] + self.search_space = None + self.random_state = np.random.RandomState(0) + + # async status + self._to_evaluate_queue = deque() + self._sending_parameter_queue = deque() + self._pending_result_ids = set() + self._reward_dict = dict() + self._id2candidate = dict() + self._st_callback = None + + def update_search_space(self, search_space): + """ + Handle the initialization/update event of search space. + """ + self._search_space = search_space + self._next_round() + + def _next_round(self): + _logger.info("Epoch %d, generating...", self.epoch) + if self.epoch == 0: + self._get_random_population() + self.export_results(self.candidates) + else: + best_candidates = self._select_top_candidates() + self.export_results(best_candidates) + if self.epoch >= self.max_epochs: + return + self.candidates = self._get_mutation(best_candidates) + self._get_crossover(best_candidates) + self._get_random_population() + self.epoch += 1 + + def _random_candidate(self): + chosen_arch = dict() + for key, val in self._search_space.items(): + if val["_type"] == LAYER_CHOICE: + choices = val["_value"] + index = self.random_state.randint(len(choices)) + chosen_arch[key] = {"_value": choices[index], "_idx": index} + elif val["_type"] == INPUT_CHOICE: + raise NotImplementedError("Input choice is not implemented yet.") + return chosen_arch + + def _add_to_evaluate_queue(self, cand): + _logger.info("Generate candidate %s, adding to eval queue.", self._get_architecture_repr(cand)) + self._reward_dict[self._hashcode(cand)] = 0. + self._to_evaluate_queue.append(cand) + + def _get_random_population(self): + while len(self.candidates) < self.num_population: + cand = self._random_candidate() + if self._is_legal(cand): + _logger.info("Random candidate generated.") + self._add_to_evaluate_queue(cand) + self.candidates.append(cand) + + def _get_crossover(self, best): + result = [] + for _ in range(10 * self.num_crossover): + cand_p1 = best[self.random_state.randint(len(best))] + cand_p2 = best[self.random_state.randint(len(best))] + assert cand_p1.keys() == cand_p2.keys() + cand = {k: cand_p1[k] if self.random_state.randint(2) == 0 else cand_p2[k] + for k in cand_p1.keys()} + if self._is_legal(cand): + result.append(cand) + self._add_to_evaluate_queue(cand) + if len(result) >= self.num_crossover: + break + _logger.info("Found %d architectures with crossover.", len(result)) + return result + + def _get_mutation(self, best): + result = [] + for _ in range(10 * self.num_mutation): + cand = best[self.random_state.randint(len(best))].copy() + mutation_sample = np.random.random_sample(len(cand)) + for s, k in zip(mutation_sample, cand): + if s < self.m_prob: + choices = self._search_space[k]["_value"] + index = self.random_state.randint(len(choices)) + cand[k] = {"_value": choices[index], "_idx": index} + if self._is_legal(cand): + result.append(cand) + self._add_to_evaluate_queue(cand) + if len(result) >= self.num_mutation: + break + _logger.info("Found %d architectures with mutation.", len(result)) + return result + + def _get_architecture_repr(self, cand): + return re.sub(r"\".*?\": \{\"_idx\": (\d+), \"_value\": \".*?\"\}", r"\1", + self._hashcode(cand)) + + def _is_legal(self, cand): + if self._hashcode(cand) in self._reward_dict: + return False + return True + + def _select_top_candidates(self): + reward_query = lambda cand: self._reward_dict[self._hashcode(cand)] + _logger.info("All candidate rewards: %s", list(map(reward_query, self.candidates))) + result = sorted(self.candidates, key=reward_query, reverse=True)[:self.num_select] + _logger.info("Best candidate rewards: %s", list(map(reward_query, result))) + return result + + @staticmethod + def _hashcode(d): + return json.dumps(d, sort_keys=True) + + def _bind_and_send_parameters(self): + """ + There are two types of resources: parameter ids and candidates. This function is called at + necessary times to bind these resources to send new trials with st_callback. + """ + result = [] + while self._sending_parameter_queue and self._to_evaluate_queue: + parameter_id = self._sending_parameter_queue.popleft() + parameters = self._to_evaluate_queue.popleft() + self._id2candidate[parameter_id] = parameters + result.append(parameters) + self._pending_result_ids.add(parameter_id) + self._st_callback(parameter_id, parameters) + _logger.info("Send parameter [%d] %s.", parameter_id, self._get_architecture_repr(parameters)) + return result + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """ + Callback function necessary to implement a tuner. This will put more parameter ids into the + parameter id queue. + """ + if "st_callback" in kwargs and self._st_callback is None: + self._st_callback = kwargs["st_callback"] + for parameter_id in parameter_id_list: + self._sending_parameter_queue.append(parameter_id) + self._bind_and_send_parameters() + return [] # always not use this. might induce problem of over-sending + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Callback function. Receive a trial result. + """ + _logger.info("Candidate %d, reported reward %f", parameter_id, value) + self._reward_dict[self._hashcode(self._id2candidate[parameter_id])] = value + + def trial_end(self, parameter_id, success, **kwargs): + """ + Callback function when a trial is ended and resource is released. + """ + self._pending_result_ids.remove(parameter_id) + if not self._pending_result_ids and not self._to_evaluate_queue: + # a new epoch now + self._next_round() + assert self._st_callback is not None + self._bind_and_send_parameters() + + def export_results(self, result): + """ + Export a number of candidates to `checkpoints` dir. + + Parameters + ---------- + result : dict + Chosen architectures to be exported. + """ + os.makedirs("checkpoints", exist_ok=True) + for i, cand in enumerate(result): + converted = dict() + for cand_key, cand_val in cand.items(): + onehot = [k == cand_val["_idx"] for k in range(len(self._search_space[cand_key]["_value"]))] + converted[cand_key] = onehot + with open(os.path.join("checkpoints", "%03d_%03d.json" % (self.epoch, i)), "w") as fp: + json.dump(converted, fp) diff --git a/nni/algorithms/nas/pytorch/spos/mutator.py b/nni/algorithms/nas/pytorch/spos/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..1a803cb2e820b0df2dd8b04b3d387af68d3d2680 --- /dev/null +++ b/nni/algorithms/nas/pytorch/spos/mutator.py @@ -0,0 +1,66 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import numpy as np +from nni.algorithms.nas.pytorch.random import RandomMutator + +_logger = logging.getLogger(__name__) + + +class SPOSSupernetTrainingMutator(RandomMutator): + """ + A random mutator with flops limit. + + Parameters + ---------- + model : nn.Module + PyTorch model. + flops_func : callable + Callable that takes a candidate from `sample_search` and returns its candidate. When `flops_func` + is None, functions related to flops will be deactivated. + flops_lb : number + Lower bound of flops. + flops_ub : number + Upper bound of flops. + flops_bin_num : number + Number of bins divided for the interval of flops to ensure the uniformity. Bigger number will be more + uniform, but the sampling will be slower. + flops_sample_timeout : int + Maximum number of attempts to sample before giving up and use a random candidate. + """ + def __init__(self, model, flops_func=None, flops_lb=None, flops_ub=None, + flops_bin_num=7, flops_sample_timeout=500): + + super().__init__(model) + self._flops_func = flops_func + if self._flops_func is not None: + self._flops_bin_num = flops_bin_num + self._flops_bins = [flops_lb + (flops_ub - flops_lb) / flops_bin_num * i for i in range(flops_bin_num + 1)] + self._flops_sample_timeout = flops_sample_timeout + + def sample_search(self): + """ + Sample a candidate for training. When `flops_func` is not None, candidates will be sampled uniformly + relative to flops. + + Returns + ------- + dict + """ + if self._flops_func is not None: + for times in range(self._flops_sample_timeout): + idx = np.random.randint(self._flops_bin_num) + cand = super().sample_search() + if self._flops_bins[idx] <= self._flops_func(cand) <= self._flops_bins[idx + 1]: + _logger.debug("Sampled candidate flops %f in %d times.", cand, times) + return cand + _logger.warning("Failed to sample a flops-valid candidate within %d tries.", self._flops_sample_timeout) + return super().sample_search() + + def sample_final(self): + """ + Implement only to suffice the interface of Mutator. + """ + return self.sample_search() diff --git a/nni/algorithms/nas/pytorch/spos/trainer.py b/nni/algorithms/nas/pytorch/spos/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..7c954e2ad4c913fd16f8e9adf195f2ebb14b1e9b --- /dev/null +++ b/nni/algorithms/nas/pytorch/spos/trainer.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import torch +from nni.nas.pytorch.trainer import Trainer +from nni.nas.pytorch.utils import AverageMeterGroup + +from .mutator import SPOSSupernetTrainingMutator + +logger = logging.getLogger(__name__) + + +class SPOSSupernetTrainer(Trainer): + """ + This trainer trains a supernet that can be used for evolution search. + + Parameters + ---------- + model : nn.Module + Model with mutables. + mutator : nni.nas.pytorch.mutator.Mutator + A mutator object that has been initialized with the model. + loss : callable + Called with logits and targets. Returns a loss tensor. + metrics : callable + Returns a dict that maps metrics keys to metrics data. + optimizer : Optimizer + Optimizer that optimizes the model. + num_epochs : int + Number of epochs of training. + train_loader : iterable + Data loader of training. Raise ``StopIteration`` when one epoch is exhausted. + dataset_valid : iterable + Data loader of validation. Raise ``StopIteration`` when one epoch is exhausted. + batch_size : int + Batch size. + workers: int + Number of threads for data preprocessing. Not used for this trainer. Maybe removed in future. + device : torch.device + Device object. Either ``torch.device("cuda")`` or ``torch.device("cpu")``. When ``None``, trainer will + automatic detects GPU and selects GPU first. + log_frequency : int + Number of mini-batches to log metrics. + callbacks : list of Callback + Callbacks to plug into the trainer. See Callbacks. + """ + + def __init__(self, model, loss, metrics, + optimizer, num_epochs, train_loader, valid_loader, + mutator=None, batch_size=64, workers=4, device=None, log_frequency=None, + callbacks=None): + assert torch.cuda.is_available() + super().__init__(model, mutator if mutator is not None else SPOSSupernetTrainingMutator(model), + loss, metrics, optimizer, num_epochs, None, None, + batch_size, workers, device, log_frequency, callbacks) + + self.train_loader = train_loader + self.valid_loader = valid_loader + + def train_one_epoch(self, epoch): + self.model.train() + meters = AverageMeterGroup() + for step, (x, y) in enumerate(self.train_loader): + x, y = x.to(self.device), y.to(self.device) + self.optimizer.zero_grad() + self.mutator.reset() + logits = self.model(x) + loss = self.loss(logits, y) + loss.backward() + self.optimizer.step() + + metrics = self.metrics(logits, y) + metrics["loss"] = loss.item() + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.train_loader), meters) + + def validate_one_epoch(self, epoch): + self.model.eval() + meters = AverageMeterGroup() + with torch.no_grad(): + for step, (x, y) in enumerate(self.valid_loader): + x, y = x.to(self.device), y.to(self.device) + self.mutator.reset() + logits = self.model(x) + loss = self.loss(logits, y) + metrics = self.metrics(logits, y) + metrics["loss"] = loss.item() + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Epoch [%s/%s] Validation Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.valid_loader), meters) diff --git a/nni/algorithms/nas/tensorflow/__init__.py b/nni/algorithms/nas/tensorflow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/algorithms/nas/tensorflow/classic_nas/__init__.py b/nni/algorithms/nas/tensorflow/classic_nas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec3f5a4894a0460d4a0582a0d6cd43af9bed77e2 --- /dev/null +++ b/nni/algorithms/nas/tensorflow/classic_nas/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .mutator import get_and_apply_next_architecture diff --git a/nni/algorithms/nas/tensorflow/classic_nas/mutator.py b/nni/algorithms/nas/tensorflow/classic_nas/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..cb089c49b89d2b13579f50bc447a78fa8644dd7c --- /dev/null +++ b/nni/algorithms/nas/tensorflow/classic_nas/mutator.py @@ -0,0 +1,217 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +# pylint: skip-file + +import json +import logging +import os +import sys + +import tensorflow as tf + +import nni +from nni.runtime.env_vars import trial_env_vars +from nni.nas.tensorflow.mutables import LayerChoice, InputChoice, MutableScope +from nni.nas.tensorflow.mutator import Mutator + +logger = logging.getLogger(__name__) + +NNI_GEN_SEARCH_SPACE = "NNI_GEN_SEARCH_SPACE" +LAYER_CHOICE = "layer_choice" +INPUT_CHOICE = "input_choice" + + +def get_and_apply_next_architecture(model): + """ + Wrapper of :class:`~nni.nas.tensorflow.classic_nas.mutator.ClassicMutator` to make it more meaningful, + similar to ``get_next_parameter`` for HPO. + Tt will generate search space based on ``model``. + If env ``NNI_GEN_SEARCH_SPACE`` exists, this is in dry run mode for + generating search space for the experiment. + If not, there are still two mode, one is nni experiment mode where users + use ``nnictl`` to start an experiment. The other is standalone mode + where users directly run the trial command, this mode chooses the first + one(s) for each LayerChoice and InputChoice. + Parameters + ---------- + model : nn.Module + User's model with search space (e.g., LayerChoice, InputChoice) embedded in it. + """ + ClassicMutator(model) + + +class ClassicMutator(Mutator): + """ + This mutator is to apply the architecture chosen from tuner. + It implements the forward function of LayerChoice and InputChoice, + to only activate the chosen ones. + Parameters + ---------- + model : nn.Module + User's model with search space (e.g., LayerChoice, InputChoice) embedded in it. + """ + + def __init__(self, model): + super(ClassicMutator, self).__init__(model) + self._chosen_arch = {} + self._search_space = self._generate_search_space() + if NNI_GEN_SEARCH_SPACE in os.environ: + # dry run for only generating search space + self._dump_search_space(os.environ[NNI_GEN_SEARCH_SPACE]) + sys.exit(0) + + if trial_env_vars.NNI_PLATFORM is None: + logger.warning("This is in standalone mode, the chosen are the first one(s).") + self._chosen_arch = self._standalone_generate_chosen() + else: + # get chosen arch from tuner + self._chosen_arch = nni.get_next_parameter() + if self._chosen_arch is None: + if trial_env_vars.NNI_PLATFORM == "unittest": + # happens if NNI_PLATFORM is intentionally set, e.g., in UT + logger.warning("`NNI_PLATFORM` is set but `param` is None. Falling back to standalone mode.") + self._chosen_arch = self._standalone_generate_chosen() + else: + raise RuntimeError("Chosen architecture is None. This may be a platform error.") + self.reset() + + def _sample_layer_choice(self, mutable, idx, value, search_space_item): + """ + Convert layer choice to tensor representation. + Parameters + ---------- + mutable : Mutable + idx : int + Number `idx` of list will be selected. + value : str + The verbose representation of the selected value. + search_space_item : list + The list for corresponding search space. + """ + # doesn't support multihot for layer choice yet + assert 0 <= idx < len(mutable) and search_space_item[idx] == value, \ + "Index '{}' in search space '{}' is not '{}'".format(idx, search_space_item, value) + mask = tf.one_hot(idx, len(mutable)) + return tf.cast(tf.reshape(mask, [-1]), tf.bool) + + def _sample_input_choice(self, mutable, idx, value, search_space_item): + """ + Convert input choice to tensor representation. + Parameters + ---------- + mutable : Mutable + idx : int + Number `idx` of list will be selected. + value : str + The verbose representation of the selected value. + search_space_item : list + The list for corresponding search space. + """ + candidate_repr = search_space_item["candidates"] + multihot_list = [False] * mutable.n_candidates + for i, v in zip(idx, value): + assert 0 <= i < mutable.n_candidates and candidate_repr[i] == v, \ + "Index '{}' in search space '{}' is not '{}'".format(i, candidate_repr, v) + assert not multihot_list[i], "'{}' is selected twice in '{}', which is not allowed.".format(i, idx) + multihot_list[i] = True + return tf.cast(multihot_list, tf.bool) # pylint: disable=not-callable + + def sample_search(self): + """ + See :meth:`sample_final`. + """ + return self.sample_final() + + def sample_final(self): + """ + Convert the chosen arch and apply it on model. + """ + assert set(self._chosen_arch.keys()) == set(self._search_space.keys()), \ + "Unmatched keys, expected keys '{}' from search space, found '{}'.".format(self._search_space.keys(), + self._chosen_arch.keys()) + result = dict() + for mutable in self.mutables: + if isinstance(mutable, (LayerChoice, InputChoice)): + assert mutable.key in self._chosen_arch, \ + "Expected '{}' in chosen arch, but not found.".format(mutable.key) + data = self._chosen_arch[mutable.key] + assert isinstance(data, dict) and "_value" in data and "_idx" in data, \ + "'{}' is not a valid choice.".format(data) + if isinstance(mutable, LayerChoice): + result[mutable.key] = self._sample_layer_choice(mutable, data["_idx"], data["_value"], + self._search_space[mutable.key]["_value"]) + elif isinstance(mutable, InputChoice): + result[mutable.key] = self._sample_input_choice(mutable, data["_idx"], data["_value"], + self._search_space[mutable.key]["_value"]) + elif isinstance(mutable, MutableScope): + logger.info("Mutable scope '%s' is skipped during parsing choices.", mutable.key) + else: + raise TypeError("Unsupported mutable type: '%s'." % type(mutable)) + return result + + def _standalone_generate_chosen(self): + """ + Generate the chosen architecture for standalone mode, + i.e., choose the first one(s) for LayerChoice and InputChoice. + :: + { key_name: {"_value": "conv1", + "_idx": 0} } + { key_name: {"_value": ["in1"], + "_idx": [0]} } + Returns + ------- + dict + the chosen architecture + """ + chosen_arch = {} + for key, val in self._search_space.items(): + if val["_type"] == LAYER_CHOICE: + choices = val["_value"] + chosen_arch[key] = {"_value": choices[0], "_idx": 0} + elif val["_type"] == INPUT_CHOICE: + choices = val["_value"]["candidates"] + n_chosen = val["_value"]["n_chosen"] + if n_chosen is None: + n_chosen = len(choices) + chosen_arch[key] = {"_value": choices[:n_chosen], "_idx": list(range(n_chosen))} + else: + raise ValueError("Unknown key '%s' and value '%s'." % (key, val)) + return chosen_arch + + def _generate_search_space(self): + """ + Generate search space from mutables. + Here is the search space format: + :: + { key_name: {"_type": "layer_choice", + "_value": ["conv1", "conv2"]} } + { key_name: {"_type": "input_choice", + "_value": {"candidates": ["in1", "in2"], + "n_chosen": 1}} } + Returns + ------- + dict + the generated search space + """ + search_space = {} + for mutable in self.mutables: + # for now we only generate flattened search space + if isinstance(mutable, LayerChoice): + key = mutable.key + val = mutable.names + search_space[key] = {"_type": LAYER_CHOICE, "_value": val} + elif isinstance(mutable, InputChoice): + key = mutable.key + search_space[key] = {"_type": INPUT_CHOICE, + "_value": {"candidates": mutable.choose_from, + "n_chosen": mutable.n_chosen}} + elif isinstance(mutable, MutableScope): + logger.info("Mutable scope '%s' is skipped during generating search space.", mutable.key) + else: + raise TypeError("Unsupported mutable type: '%s'." % type(mutable)) + return search_space + + def _dump_search_space(self, file_path): + with open(file_path, "w") as ss_file: + json.dump(self._search_space, ss_file, sort_keys=True, indent=2) diff --git a/nni/algorithms/nas/tensorflow/enas/__init__.py b/nni/algorithms/nas/tensorflow/enas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d3372836ebba2387ade58161218aad731433e46b --- /dev/null +++ b/nni/algorithms/nas/tensorflow/enas/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .mutator import EnasMutator +from .trainer import EnasTrainer diff --git a/nni/algorithms/nas/tensorflow/enas/mutator.py b/nni/algorithms/nas/tensorflow/enas/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..313c81cc9b6e7a1cbd6c4b78c1fa22f2ff5ed4a3 --- /dev/null +++ b/nni/algorithms/nas/tensorflow/enas/mutator.py @@ -0,0 +1,162 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +# pylint: skip-file + +import tensorflow as tf +from tensorflow.keras.layers import Dense, Embedding, LSTMCell, RNN +from tensorflow.keras.losses import SparseCategoricalCrossentropy, Reduction + +from nni.nas.tensorflow.mutator import Mutator +from nni.nas.tensorflow.mutables import LayerChoice, InputChoice, MutableScope + + +class EnasMutator(Mutator): + def __init__(self, model, + lstm_size=64, + lstm_num_layers=1, + tanh_constant=1.5, + cell_exit_extra_step=False, + skip_target=0.4, + temperature=None, + branch_bias=0.25, + entropy_reduction='sum'): + super().__init__(model) + self.tanh_constant = tanh_constant + self.temperature = temperature + self.cell_exit_extra_step = cell_exit_extra_step + + cells = [LSTMCell(units=lstm_size, use_bias=False) for _ in range(lstm_num_layers)] + self.lstm = RNN(cells, stateful=True) + self.g_emb = tf.random.normal((1, 1, lstm_size)) * 0.1 + self.skip_targets = tf.constant([1.0 - skip_target, skip_target]) + + self.max_layer_choice = 0 + self.bias_dict = {} + for mutable in self.mutables: + if isinstance(mutable, LayerChoice): + if self.max_layer_choice == 0: + self.max_layer_choice = len(mutable) + assert self.max_layer_choice == len(mutable), \ + "ENAS mutator requires all layer choice have the same number of candidates." + if 'reduce' in mutable.key: + bias = [] + for choice in mutable.choices: + if 'conv' in str(type(choice)).lower(): + bias.append(branch_bias) + else: + bias.append(-branch_bias) + self.bias_dict[mutable.key] = tf.constant(bias) + + # exposed for trainer + self.sample_log_prob = 0 + self.sample_entropy = 0 + self.sample_skip_penalty = 0 + + # internal nn layers + self.embedding = Embedding(self.max_layer_choice + 1, lstm_size) + self.soft = Dense(self.max_layer_choice, use_bias=False) + self.attn_anchor = Dense(lstm_size, use_bias=False) + self.attn_query = Dense(lstm_size, use_bias=False) + self.v_attn = Dense(1, use_bias=False) + assert entropy_reduction in ['sum', 'mean'], 'Entropy reduction must be one of sum and mean.' + self.entropy_reduction = tf.reduce_sum if entropy_reduction == 'sum' else tf.reduce_mean + self.cross_entropy_loss = SparseCategoricalCrossentropy(from_logits=True, reduction=Reduction.NONE) + + self._first_sample = True + + def sample_search(self): + self._initialize() + self._sample(self.mutables) + self._first_sample = False + return self._choices + + def sample_final(self): + return self.sample_search() + + def _sample(self, tree): + mutable = tree.mutable + if isinstance(mutable, LayerChoice) and mutable.key not in self._choices: + self._choices[mutable.key] = self._sample_layer_choice(mutable) + elif isinstance(mutable, InputChoice) and mutable.key not in self._choices: + self._choices[mutable.key] = self._sample_input_choice(mutable) + for child in tree.children: + self._sample(child) + if self.cell_exit_extra_step and isinstance(mutable, MutableScope) and mutable.key not in self._anchors_hid: + self._anchors_hid[mutable.key] = self.lstm(self._inputs, 1) + + def _initialize(self): + self._choices = {} + self._anchors_hid = {} + self._inputs = self.g_emb + # seems the `input_shape` parameter of RNN does not work + # workaround it by omitting `reset_states` for first run + if not self._first_sample: + self.lstm.reset_states() + self.sample_log_prob = 0 + self.sample_entropy = 0 + self.sample_skip_penalty = 0 + + def _sample_layer_choice(self, mutable): + logit = self.soft(self.lstm(self._inputs)) + if self.temperature is not None: + logit /= self.temperature + if self.tanh_constant is not None: + logit = self.tanh_constant * tf.tanh(logit) + if mutable.key in self.bias_dict: + logit += self.bias_dict[mutable.key] + softmax_logit = tf.math.log(tf.nn.softmax(logit, axis=-1)) + branch_id = tf.reshape(tf.random.categorical(softmax_logit, num_samples=1), [1]) + log_prob = self.cross_entropy_loss(branch_id, logit) + self.sample_log_prob += self.entropy_reduction(log_prob) + entropy = log_prob * tf.math.exp(-log_prob) + self.sample_entropy += self.entropy_reduction(entropy) + self._inputs = tf.reshape(self.embedding(branch_id), [1, 1, -1]) + mask = tf.one_hot(branch_id, self.max_layer_choice) + return tf.cast(tf.reshape(mask, [-1]), tf.bool) + + def _sample_input_choice(self, mutable): + query, anchors = [], [] + for label in mutable.choose_from: + if label not in self._anchors_hid: + self._anchors_hid[label] = self.lstm(self._inputs) + query.append(self.attn_anchor(self._anchors_hid[label])) + anchors.append(self._anchors_hid[label]) + query = tf.concat(query, axis=0) + query = tf.tanh(query + self.attn_query(anchors[-1])) + query = self.v_attn(query) + + if self.temperature is not None: + query /= self.temperature + if self.tanh_constant is not None: + query = self.tanh_constant * tf.tanh(query) + + if mutable.n_chosen is None: + logit = tf.concat([-query, query], axis=1) + softmax_logit = tf.math.log(tf.nn.softmax(logit, axis=-1)) + skip = tf.reshape(tf.random.categorical(softmax_logit, num_samples=1), [-1]) + skip_prob = tf.math.sigmoid(logit) + kl = tf.reduce_sum(skip_prob * tf.math.log(skip_prob / self.skip_targets)) + self.sample_skip_penalty += kl + log_prob = self.cross_entropy_loss(skip, logit) + + skip = tf.cast(skip, tf.float32) + inputs = tf.tensordot(skip, tf.concat(anchors, 0), 1) / (1. + tf.reduce_sum(skip)) + self._inputs = tf.reshape(inputs, [1, 1, -1]) + + else: + assert mutable.n_chosen == 1, "Input choice must select exactly one or any in ENAS." + logit = tf.reshape(query, [1, -1]) + softmax_logit = tf.math.log(tf.nn.softmax(logit, axis=-1)) + index = tf.reshape(tf.random.categorical(softmax_logit, num_samples=1), [-1]) + skip = tf.reshape(tf.one_hot(index, mutable.n_candidates), [-1]) + # when the size is 1, tf does not accept tensor here, complaining the shape is wrong + # but using a numpy array seems fine + log_prob = self.cross_entropy_loss(logit, query.numpy()) + self._inputs = tf.reshape(anchors[index.numpy()[0]], [1, 1, -1]) + + self.sample_log_prob += self.entropy_reduction(log_prob) + entropy = log_prob * tf.exp(-log_prob) + self.sample_entropy += self.entropy_reduction(entropy) + assert len(skip) == mutable.n_candidates, (skip, mutable.n_candidates, mutable.n_chosen) + return tf.cast(skip, tf.bool) diff --git a/nni/algorithms/nas/tensorflow/enas/trainer.py b/nni/algorithms/nas/tensorflow/enas/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..67df9c7f969f9751d5b8c5d5a9f2a4bea1689735 --- /dev/null +++ b/nni/algorithms/nas/tensorflow/enas/trainer.py @@ -0,0 +1,205 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +# pylint: skip-file + +import logging + +import tensorflow as tf +from tensorflow.keras.optimizers import Adam + +from nni.nas.tensorflow.utils import AverageMeterGroup, fill_zero_grads + +from .mutator import EnasMutator + +logger = logging.getLogger(__name__) + + +class EnasTrainer: + def __init__( + self, + model, + loss, + metrics, + reward_function, + optimizer, + batch_size, + num_epochs, + dataset_train, + dataset_valid, + log_frequency=100, + entropy_weight=0.0001, + skip_weight=0.8, + baseline_decay=0.999, + child_steps=500, + mutator_lr=0.00035, + mutator_steps=50, + mutator_steps_aggregate=20, + aux_weight=0.4, + test_arc_per_epoch=1, + ): + self.model = model + self.loss = loss + self.metrics = metrics + self.reward_function = reward_function + self.optimizer = optimizer + self.batch_size = batch_size + self.num_epochs = num_epochs + + x, y = dataset_train + split = int(len(x) * 0.9) + self.train_set = tf.data.Dataset.from_tensor_slices((x[:split], y[:split])) + self.valid_set = tf.data.Dataset.from_tensor_slices((x[split:], y[split:])) + self.test_set = tf.data.Dataset.from_tensor_slices(dataset_valid) + + self.log_frequency = log_frequency + self.entropy_weight = entropy_weight + self.skip_weight = skip_weight + self.baseline_decay = baseline_decay + self.child_steps = child_steps + self.mutator_lr = mutator_lr + self.mutator_steps = mutator_steps + self.mutator_steps_aggregate = mutator_steps_aggregate + self.aux_weight = aux_weight + self.test_arc_per_epoch = test_arc_per_epoch + + self.mutator = EnasMutator(model) + self.mutator_optim = Adam(learning_rate=self.mutator_lr) + + self.baseline = 0.0 + + def train(self, validate=True): + for epoch in range(self.num_epochs): + logger.info("Epoch %d Training", epoch + 1) + self.train_one_epoch(epoch) + logger.info("Epoch %d Validating", epoch + 1) + self.validate_one_epoch(epoch) + + def validate(self): + self.validate_one_epoch(-1) + + def train_one_epoch(self, epoch): + train_loader, valid_loader = self._create_train_loader() + + # Sample model and train + meters = AverageMeterGroup() + + for step in range(1, self.child_steps + 1): + x, y = next(train_loader) + self.mutator.reset() + + with tf.GradientTape() as tape: + logits = self.model(x, training=True) + if isinstance(logits, tuple): + logits, aux_logits = logits + aux_loss = self.loss(aux_logits, y) + else: + aux_loss = 0.0 + metrics = self.metrics(y, logits) + loss = self.loss(y, logits) + self.aux_weight * aux_loss + + grads = tape.gradient(loss, self.model.trainable_weights) + grads = fill_zero_grads(grads, self.model.trainable_weights) + grads, _ = tf.clip_by_global_norm(grads, 5.0) + self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights)) + + metrics["loss"] = tf.reduce_mean(loss).numpy() + meters.update(metrics) + + if self.log_frequency and step % self.log_frequency == 0: + logger.info( + "Model Epoch [%d/%d] Step [%d/%d] %s", + epoch + 1, + self.num_epochs, + step, + self.child_steps, + meters, + ) + + # Train sampler (mutator) + meters = AverageMeterGroup() + for mutator_step in range(1, self.mutator_steps + 1): + grads_list = [] + for step in range(1, self.mutator_steps_aggregate + 1): + with tf.GradientTape() as tape: + x, y = next(valid_loader) + self.mutator.reset() + + logits = self.model(x, training=False) + metrics = self.metrics(y, logits) + reward = ( + self.reward_function(y, logits) + + self.entropy_weight * self.mutator.sample_entropy + ) + self.baseline = self.baseline * self.baseline_decay + reward * ( + 1 - self.baseline_decay + ) + loss = self.mutator.sample_log_prob * (reward - self.baseline) + loss += self.skip_weight * self.mutator.sample_skip_penalty + + meters.update( + { + "reward": reward, + "loss": tf.reduce_mean(loss).numpy(), + "ent": self.mutator.sample_entropy.numpy(), + "log_prob": self.mutator.sample_log_prob.numpy(), + "baseline": self.baseline, + "skip": self.mutator.sample_skip_penalty, + } + ) + + cur_step = step + (mutator_step - 1) * self.mutator_steps_aggregate + if self.log_frequency and cur_step % self.log_frequency == 0: + logger.info( + "RL Epoch [%d/%d] Step [%d/%d] [%d/%d] %s", + epoch + 1, + self.num_epochs, + mutator_step, + self.mutator_steps, + step, + self.mutator_steps_aggregate, + meters, + ) + + grads = tape.gradient(loss, self.mutator.trainable_weights) + grads = fill_zero_grads(grads, self.mutator.trainable_weights) + grads_list.append(grads) + total_grads = [ + tf.math.add_n(weight_grads) for weight_grads in zip(*grads_list) + ] + total_grads, _ = tf.clip_by_global_norm(total_grads, 5.0) + self.mutator_optim.apply_gradients( + zip(total_grads, self.mutator.trainable_weights) + ) + + def validate_one_epoch(self, epoch): + test_loader = self._create_validate_loader() + + for arc_id in range(self.test_arc_per_epoch): + meters = AverageMeterGroup() + for x, y in test_loader: + self.mutator.reset() + logits = self.model(x, training=False) + if isinstance(logits, tuple): + logits, _ = logits + metrics = self.metrics(y, logits) + loss = self.loss(y, logits) + metrics["loss"] = tf.reduce_mean(loss).numpy() + meters.update(metrics) + + logger.info( + "Test Epoch [%d/%d] Arc [%d/%d] Summary %s", + epoch + 1, + self.num_epochs, + arc_id + 1, + self.test_arc_per_epoch, + meters.summary(), + ) + + def _create_train_loader(self): + train_set = self.train_set.shuffle(1000000).repeat().batch(self.batch_size) + test_set = self.valid_set.shuffle(1000000).repeat().batch(self.batch_size) + return iter(train_set), iter(test_set) + + def _create_validate_loader(self): + return iter(self.test_set.shuffle(1000000).batch(self.batch_size)) diff --git a/nni/assessor.py b/nni/assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..7cd83e9232735f5aee82e1abca702e347505f4cd --- /dev/null +++ b/nni/assessor.py @@ -0,0 +1,124 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Assessor analyzes trial's intermediate results (e.g., periodically evaluated accuracy on test dataset) +to tell whether this trial can be early stopped or not. + +See :class:`Assessor`' specification and ``docs/en_US/assessors.rst`` for details. +""" + +from enum import Enum +import logging + +from .recoverable import Recoverable + +__all__ = ['AssessResult', 'Assessor'] + +_logger = logging.getLogger(__name__) + + +class AssessResult(Enum): + """ + Enum class for :meth:`Assessor.assess_trial` return value. + """ + + Good = True + """The trial works well.""" + + Bad = False + """The trial works poorly and should be early stopped.""" + + +class Assessor(Recoverable): + """ + Assessor analyzes trial's intermediate results (e.g., periodically evaluated accuracy on test dataset) + to tell whether this trial can be early stopped or not. + + This is the abstract base class for all assessors. + Early stopping algorithms should inherit this class and override :meth:`assess_trial` method, + which receives intermediate results from trials and give an assessing result. + + If :meth:`assess_trial` returns :obj:`AssessResult.Bad` for a trial, + it hints NNI framework that the trial is likely to result in a poor final accuracy, + and therefore should be killed to save resource. + + If an assessor want's to be notified when a trial ends, it can also override :meth:`trial_end`. + + To write a new assessor, you can reference :class:`~nni.medianstop_assessor.MedianstopAssessor`'s code as an example. + + See Also + -------- + Builtin assessors: + :class:`~nni.algorithms.hpo.medianstop_assessor.MedianstopAssessor` + :class:`~nni.algorithms.hpo.curvefitting_assessor.CurvefittingAssessor` + """ + + def assess_trial(self, trial_job_id, trial_history): + """ + Abstract method for determining whether a trial should be killed. Must override. + + The NNI framework has little guarantee on ``trial_history``. + This method is not guaranteed to be invoked for each time ``trial_history`` get updated. + It is also possible that a trial's history keeps updating after receiving a bad result. + And if the trial failed and retried, ``trial_history`` may be inconsistent with its previous value. + + The only guarantee is that ``trial_history`` is always growing. + It will not be empty and will always be longer than previous value. + + This is an example of how :meth:`assess_trial` get invoked sequentially: + + :: + + trial_job_id | trial_history | return value + ------------ | --------------- | ------------ + Trial_A | [1.0, 2.0] | Good + Trial_B | [1.5, 1.3] | Bad + Trial_B | [1.5, 1.3, 1.9] | Good + Trial_A | [0.9, 1.8, 2.3] | Good + + Parameters + ---------- + trial_job_id : str + Unique identifier of the trial. + trial_history : list + Intermediate results of this trial. The element type is decided by trial code. + + Returns + ------- + AssessResult + :obj:`AssessResult.Good` or :obj:`AssessResult.Bad`. + """ + raise NotImplementedError('Assessor: assess_trial not implemented') + + def trial_end(self, trial_job_id, success): + """ + Abstract method invoked when a trial is completed or terminated. Do nothing by default. + + Parameters + ---------- + trial_job_id : str + Unique identifier of the trial. + success : bool + True if the trial successfully completed; False if failed or terminated. + """ + + def load_checkpoint(self): + """ + Internal API under revising, not recommended for end users. + """ + checkpoin_path = self.get_checkpoint_path() + _logger.info('Load checkpoint ignored by assessor, checkpoint path: %s', checkpoin_path) + + def save_checkpoint(self): + """ + Internal API under revising, not recommended for end users. + """ + checkpoin_path = self.get_checkpoint_path() + _logger.info('Save checkpoint ignored by assessor, checkpoint path: %s', checkpoin_path) + + def _on_exit(self): + pass + + def _on_error(self): + pass diff --git a/nni/common/__init__.py b/nni/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f18054727d456b83c3d647c1115807e90dc61b25 --- /dev/null +++ b/nni/common/__init__.py @@ -0,0 +1 @@ +from .serializer import trace, dump, load, is_traceable diff --git a/nni/common/device.py b/nni/common/device.py new file mode 100644 index 0000000000000000000000000000000000000000..14aae4878d976f50df6fe345cc6a2bbdd86d8e14 --- /dev/null +++ b/nni/common/device.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from dataclasses import dataclass +from abc import ABC, abstractmethod + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + + +@dataclass +class Device(ABC): + node_id: str + status: Literal['idle', 'busy', 'unknown'] = 'idle' + + def __eq__(self, o) -> bool: + if isinstance(self, type(o)): + return self.node_id == o.node_id + else: + return False + + def __lt__(self, o) -> bool: + return self.node_id < o.node_id + + def set_status(self, status): + self.status = status + + def __repr__(self) -> str: + return "{Abstract Device %s, Status %s}" % (self.node_id, self.status) + + @abstractmethod + def device_repr(self) -> str: + pass + + +@dataclass +class GPUDevice(Device): + gpu_id: str = -1 + + def __init__(self, node_id, gpu_id, status='idle'): + self.node_id = node_id + self.gpu_id = gpu_id + self.status = status + + def __eq__(self, o: Device) -> bool: + if isinstance(o, GPUDevice): + return self.node_id == o.node_id and self.gpu_id == o.gpu_id + return False + + def __lt__(self, o: Device) -> bool: + if self.node_id < o.node_id: + return True + elif self.node_id > o.node_id: + return False + else: + if isinstance(o, GPUDevice): + return self.gpu_id < o.gpu_id + else: + return True + + def __repr__(self) -> str: + return "{Environment %s, GPU %d, Status %s}" % (self.node_id, self.gpu_id, self.status) + + def __hash__(self) -> int: + return hash(self.node_id + '_' + str(self.gpu_id)) + + def device_repr(self,): + return f"cuda:{self.gpu_id}" + + +@dataclass +class CPUDevice(Device): + def __init__(self, node_id): + self.node_id = node_id + self.device = 'cpu' + + def __repr__(self) -> str: + return "{CPU Device, NodeID %s, Status %s}" % (self.node_id, self.status) + + def device_repr(self): + return "cpu" diff --git a/nni/common/graph_utils.py b/nni/common/graph_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1c1f4aaf9119eb2249f9350c8b95df7ee02ae676 --- /dev/null +++ b/nni/common/graph_utils.py @@ -0,0 +1,837 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +import logging +import queue +import re +from collections import defaultdict +import torch +from torch.utils.tensorboard._pytorch_graph import NodePy, NodePyIO, NodePyOP, GraphPy +CLASSTYPE_KIND = 'ClassType' +GETATTR_KIND = 'prim::GetAttr' +CAT_KIND = 'aten::cat' +LIST_CONSTRUCT_KIND = 'prim::ListConstruct' +LIST_UNPACK_KIND = 'prim::ListUnpack' +TUPLE_CONSTRUCT_KIND = 'prim::TupleConstruct' +TUPLE_UNPACK_KIND = 'prim::TupleUnpack' +CONSTANT_KIND = 'prim::Constant' + +_logger = logging.getLogger(__name__) + + +def build_module_graph(model, dummy_input): + return TorchModuleGraph(model, dummy_input) + + +def build_graph(model, dummy_input, verbose=False): + g = TorchProtoGraph(model, dummy_input, verbose) + return g.graph_def, g.stepstats + + +def parse_traced_name(module_name): + prefix = 'TracedModule[' + suffix = ']' + if module_name.startswith(prefix) and module_name.endswith(suffix): + module_name = module_name[len(prefix):-len(suffix)] + return module_name + + +class TorchGraph: + """ + This class is to extract pytorch model topology graph by tracing + """ + + def __init__(self, model=None, dummy_input=None, traced_model=None): + """ + Parameters + ---------- + model : pytorch model + The model user wants to speed up + dummy_input : pytorch tensor + The dummy input for ```jit.trace```, users should put it on right device before pass in + traced_model : torch._C.torch.jit.TopLevelTracedModule + An alredy traced model, if traced_model is not None, then TorchGraph will build the graph + based on this traced model and won't trace the model again. + """ + assert torch.__version__ >= '1.3.1' + # check if the input is legal + if traced_model is not None: + assert isinstance(traced_model, torch.jit.TopLevelTracedModule) + self.trace = traced_model + # it's ok if the graph is already unpacked + torch._C._jit_pass_inline(self.trace.graph) + elif model is not None and dummy_input is not None: + self.bound_model = model + self._trace(model, dummy_input) + else: + raise Exception( + 'Please provide model & dummy_input or the traced_model as inputs') + + def _trace(self, model, dummy_input): + training = model.training + model.eval() + kw_args = {} + if torch.__version__ >= '1.6.0': + # only pytorch with version greater than 1.6.0 has the strict option + kw_args['strict'] = False + self.trace = torch.jit.trace(model, dummy_input, **kw_args) + torch._C._jit_pass_inline(self.trace.graph) + model.train(training) + + +class TorchProtoGraph(TorchGraph): + """ + Generates model graph for pytorch models in protobuf, this implementation + is borrowed from pytorch v1.4.0, and fixed following issues: + https://github.com/pytorch/pytorch/issues/33691 + https://github.com/pytorch/pytorch/issues/33670 + + """ + + def __init__(self, model, dummy_input, verbose=False): + super().__init__(model, dummy_input) + + from tensorboard.compat.proto.config_pb2 import RunMetadata + from tensorboard.compat.proto.graph_pb2 import GraphDef + from tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats + from tensorboard.compat.proto.versions_pb2 import VersionDef + + list_of_nodes = self.parse(self.trace.graph, self.trace, dummy_input) + if verbose: + print(self.trace.graph) + self.stepstats = RunMetadata(step_stats=StepStats( + dev_stats=[DeviceStepStats(device="/device:CPU:0")])) + self.graph_def = GraphDef( + node=list_of_nodes, versions=VersionDef(producer=22)) + + def parse(self, graph, trace, args=None, omit_useless_nodes=True): + """This method parses an optimized PyTorch model graph and produces + a list of nodes and node stats for eventual conversion to TensorBoard + protobuf format. + + Args: + graph (PyTorch module): The model graph to be parsed. + trace (PyTorch JIT TracedModule): The model trace to be parsed. + args (tuple): input tensor[s] for the model. + omit_useless_nodes (boolean): Whether to remove nodes from the graph. + """ + nodes_py = GraphPy() + for node in graph.inputs(): + if omit_useless_nodes: + if not node.uses(): # number of user of the node (= number of outputs/ fanout) + continue + + if node.type().kind() != CLASSTYPE_KIND: + nodes_py.append(NodePyIO(node, 'input')) + + attr_to_scope = dict() + + def node_to_name(d): + return str(d).split(":")[0].strip() + for node in graph.nodes(): + if node.kind() == GETATTR_KIND: + attr_name = node.s('name') + node_name = node_to_name(node) + parent = node.input().node() + # If the parent node is not the top-level "self" node + if parent.kind() == GETATTR_KIND: + parent_scope = attr_to_scope[node_to_name(parent)] + attr_scope = parent_scope.split('/')[-1] + attr_to_scope[node_name] = '{}/{}.{}'.format( + parent_scope, attr_scope, attr_name) + else: + attr_to_scope[node_name] = '__module.{}'.format(attr_name) + # We don't need classtype nodes; scope will provide this information + if node.output().type().kind() != CLASSTYPE_KIND: + node_py = NodePyOP(node) + node_py.scopeName = attr_to_scope[node_name] + nodes_py.append(node_py) + else: + nodes_py.append(NodePyOP(node)) + + # Create sink nodes for output ops + for i, node in enumerate(graph.outputs()): + node_py = NodePyIO(node, 'output') + node_py.debugName = "output.{}".format(i + 1) + node_py.inputs = [node.debugName()] + nodes_py.append(node_py) + + alias_to_name = dict() + base_name = parse_traced_name(trace._name) + for name, module in trace.named_modules(prefix='__module'): + mod_name = parse_traced_name(module._name) + attr_name = name.split('.')[-1] + alias_to_name[name] = '{}[{}]'.format(mod_name, attr_name) + + for node in nodes_py.nodes_op: + module_aliases = node.scopeName.split('/')[-1].split('.') + module_name = '' + for i, alias in enumerate(module_aliases): + if i == 0: + module_name = alias + node.scopeName = base_name + else: + module_name += '.' + alias + node.scopeName += '/' + \ + (alias_to_name[module_name] + if module_name in alias_to_name else alias) + + nodes_py.populate_namespace_from_OP_to_IO() + return nodes_py.to_proto() + + +class NodePyGroup(NodePy): + """ + This class is used to represent a graph node which consists of multiple jit traced nodes. In a pytorch trace graph, + there are multiple nodes are traced for one torch.nn.Module object, we group them together to form a single node to + represent the torch.nn.Module object. We also group some functional call trace nodes together to form a new node. + """ + + def __init__(self, name, unique_name, node_type, op_type, node_cpps, inputs=None, outputs=None, key_node=None): + """ + Parameters: + ----------- + name: str + node name, such as `conv1`, `backbone.classifier` + unique_name: str + A global unique name for current node. Due to some modules, + such as relu, may be reused several times, so the scopename + is not suitable as the global unique identifier, so we add a + unique_name for each node as the global unique identifier. + We should use the unique_name to traverset the module graph. + node_type: str + `module` or `func` + op_type: str + operation type, such as `Conv2d`, `aten::view` + node_cpps: list of torch._C.Node + jit trace nodes which are included in this new node + inputs: list of str + All the inputs of this node, each element is debugName of one input + outputs: list of str + All the outputs of this node, each element is debugName of one output + key_node: torch._C.Node + The key node of this NodePyGroup. + """ + super(NodePyGroup, self).__init__(name, []) + self.node_cpps = node_cpps + self.name = name + self.unique_name = unique_name + self.op_type = op_type + self.type = node_type + self.nodes = [] + self.auxiliary = None + self.add_nodes(node_cpps) + self.inputs = inputs + self.outputs = outputs + # The core node in this NodePyGroup + self.key_node = key_node + + def add_nodes(self, node_cpps): + for node_cpp in node_cpps: + nodepy = NodePyOP(node_cpp) + nodepy.name = node_cpp.scopeName() + '_' + node_cpp.kind() + self.nodes.append(nodepy) + + def sub_node_names(self): + return [x.name for x in self.nodes] + + def __repr__(self): + return 'name: {}, type: {}, op_type: {}, sub_nodes: {}, inputs: {}, outputs: {}, aux: {}'.format( + self.name, self.type, self.op_type, self.sub_node_names(), + self.inputs, self.outputs, self.auxiliary + ) + + +class TorchModuleGraph(TorchGraph): + """ + Generates model graph, each node is created from single or multiple jit trace nodes. + """ + + def __init__(self, model=None, dummy_input=None, traced_model=None): + super().__init__(model, dummy_input, traced_model) + self.global_count = 0 + self.reused_module = set() + self.name_to_node, self.input_to_node, self.output_to_node = self._build_graph() + self._extract_auxiliary_info() + + def _expand_key_func_node(self, node, nodes, input_to_node, output_to_node, + module_type): + """ + For trace graph nodes, some nodes are not in modules, these nodes are usually generated by + the functions directly called in module ```forward```. For such nodes, some of them are + trivial op which are label by ```prim::```, some of them are not such ops which is call + non-prim ops. This function is to merge neighbor prim ops to a non-prim op, to construct + a node. + + Parameters + ---------- + node : trace graph node + The non-prim node to expand + nodes : list of trace graph node + All the trace graph nodes within the same scope as the non-prim node + input_to_node : dict + key: input name, value: a node that uses this input + output_to_node : dict + key: output name, value: a node that generates this output + module_type : str + can be 'module' or 'func' + + Returns + ------- + node + the expanded non-prim node + """ + # TODO: scope name could be empty + node_name = '.'.join([self._get_module_name( + node.scopeName()), node.kind(), str(self.global_count)]) + unique_name = node_name + _logger.debug("expand non-prim node, node name: %s", node_name) + self.global_count += 1 + op_type = node.kind() + node_group = [node] + inputs = [] + outputs = [] + node_queue = queue.Queue() + node_queue.put(node) + while not node_queue.empty(): + curr_node = node_queue.get() + for _input in curr_node.inputs(): + if _input.node().kind() == CONSTANT_KIND: + continue + input_name = _input.debugName() + if input_name in output_to_node: + for predecessor_node in output_to_node[input_name]: + if predecessor_node in nodes: + if not self._is_key_func(predecessor_node): + if predecessor_node not in node_group: + node_group.append(predecessor_node) + node_queue.put(predecessor_node) + else: + inputs.append(input_name) + else: + inputs.append(input_name) + else: + inputs.append(input_name) + for output in node.outputs(): + if output.node().kind() == CONSTANT_KIND: + continue + outputs.append(output.debugName()) + nodepy = NodePyGroup(node_name, unique_name, module_type, op_type, + node_group, inputs=inputs, outputs=outputs, key_node=node) + return nodepy + + def _expand_module_node(self, node, node_name, unique_name, op_type, nodes, + input_to_node, output_to_node, module_type): + """ + merge the adjacent nodes of the module. The difference between the + _expand_module_node and _expand_non_prim_node is that, the _expand_non_prim_node + only merge the prim:: nodes into the aten:: node, in contrast,the _expand_module_node + will merge all adjacent nodes into a same nodepy group. + + Parameters + ---------- + node : trace graph node + The non-prim node to expand + node_name : str + specify the node_name for NodePyGroup + unique_name : str + unique_name for the NodePyGroup + op_type : str + specify the op_type for the NodePyGroup + nodes : list of trace graph node + All the trace graph nodes within the same scope as the non-prim node + input_to_node : dict + key: input name, value: a node that uses this input + output_to_node : dict + key: output name, value: a node that generates this output + module_type : str + can be 'module' or 'func' + Returns + ------- + node + the expanded non-prim node + + """ + _logger.debug("expand module node, node name: %s", node_name) + self.global_count += 1 + if not op_type: + op_type = node.kind() + node_group = [node] + inputs = [] + outputs = [] + node_queue = queue.Queue() + node_queue.put(node) + visited = {node} + while not node_queue.empty(): + curr_node = node_queue.get() + for _input in curr_node.inputs(): + if _input.node().kind() == CONSTANT_KIND: + continue + input_name = _input.debugName() + if input_name in output_to_node: + for predecessor_node in output_to_node[input_name]: + if predecessor_node in nodes: + if predecessor_node not in visited: + node_group.append(predecessor_node) + node_queue.put(predecessor_node) + visited.add(predecessor_node) + else: + inputs.append(input_name) + else: + inputs.append(input_name) + for _output in curr_node.outputs(): + if _output.node().kind() == CONSTANT_KIND: + continue + output_name = _output.debugName() + if output_name in input_to_node: + for successor_node in input_to_node[output_name]: + if successor_node in nodes: + if successor_node not in visited: + node_group.append(successor_node) + node_queue.put(successor_node) + visited.add(successor_node) + else: + outputs.append(output_name) + else: + outputs.append(output_name) + unique_outputs = list(set(outputs)) + # remove the dumplicated output names + unique_outputs.sort(key=outputs.index) + + nodepy = NodePyGroup(node_name, unique_name, module_type, op_type, + node_group, inputs=list(inputs), outputs=unique_outputs) + return nodepy + + def _extract_cat_info(self, node_group, cpp_node): + """ + Extract the detail information of the cat operation, + such the order of the input tensor, the shape of each + input tensor, the output shape, and the cat dimension. + + Parameters + ---------- + node_group : NodePyGroup + cpp_node: torch._C.Node + It should be ```aten::cat``` node + + Returns + ------- + dict + Include auxiliary information for the cat operation. + This dict objec has four keys: 'cat_dim', 'out_shape', + 'in_order' and 'in_shape'. cat_dim is the dimension of + the cat operation to concat the input tensors. out_shape + is the shape of the output tensor of the cat operation. + in_order is an ordered list which contains the corresponding + parent operaion nodes of the input tensors. in_shape is also + an ordered list that contains the input shapes of the input + tensor. + """ + # only suport the cat operation + assert cpp_node.kind() == CAT_KIND + cat_info = {} + # get the shape of the output tensor + t_output = cpp_node.output() + out_shape = t_output.type().sizes() + cat_info['out_shape'] = out_shape + # get the cat dimension + inputs = cpp_node.inputs() + cat_dim = list(inputs)[1].toIValue() + cat_info['cat_dim'] = cat_dim + # get the order of the input tensors + # To get the order of the input tensors, we need + # to be aware of the topology of the model, which + # means we should extract the auxiliary information + # after the build_index function. + input_order = [] + list_construct_cpp = list(cpp_node.inputs())[0].node() + input_tensors = list(list_construct_cpp.inputs()) + for _tensor in input_tensors: + debug_name = _tensor.debugName() + if debug_name in self.output_to_node: + input_order.append(self.output_to_node[debug_name].unique_name) + else: + # the input tensor may be the input tensor of the whole model + input_order.append(None) + cat_info['in_order'] = input_order + input_shapes = [t.type().sizes() for t in input_tensors] + cat_info['in_shape'] = input_shapes + return cat_info + + def _extract_linear_shape_info(self, node_group): + """ + Extract linear shape input/output tensor shape info from its aten::addmm op. + + Parameters + ---------- + node_group : NodePyGroup + NodePyGroup object associated with the linear module. + + Returns + ------- + dict + Include shape of input tensor and shape of output tensor + """ + for cpp_node in node_group.node_cpps: + if cpp_node.kind() == 'aten::addmm': + # https://github.com/pytorch/pytorch/blob/1.6/torch/nn/functional.py#L1682 + # inputs of aten::addmm: + # inputs[0] is bias + # inputs[1] is input data + # inputs[2] is weight + t_input = list(cpp_node.inputs())[1] + t_output = cpp_node.output() + assert isinstance(t_input.type(), torch._C.TensorType) + assert isinstance(t_output.type(), torch._C.TensorType) + in_shape = t_input.type().sizes() + out_shape = t_output.type().sizes() + return {'in_shape': in_shape, 'out_shape': out_shape} + return None + + def _extract_shape_info(self, node): + """ + Extract the shape information of ```aten::view``` node + + Parameters + ---------- + node : trace graph node + It should be ```aten::view``` node + + Returns + ------- + dict + Include shape of input tensor and shape of output tensor + """ + t_input = None + for _input in node.inputs(): + t_input = _input + break + t_output = node.output() + assert isinstance(t_input.type(), torch._C.TensorType) + assert isinstance(t_output.type(), torch._C.TensorType) + in_shape = t_input.type().sizes() + out_shape = t_output.type().sizes() + return {'in_shape': in_shape, 'out_shape': out_shape} + + def _extract_leaf_modules(self): + """ + Extract leaf modules from the given graph. Leaf module means it does not have submodules. + To extract leaf modules because only leaf module can be replaced. And shape inference can + be done in leaf module level. Other shape inference is done in lower level i.e., + operation level. + + Returns + ------- + list + a list of scope name of all the leaf modules + """ + def is_parent(name1, name2): + """ + check if name1 is parent node of name2, for example: + name1: aa.bb, name2: aa.bb.cc, return True + name1: aa.b, name2: aa.bb, return False + """ + parts1, parts2 = name1.split('.'), name2.split('.') + if len(parts1) >= len(parts2): + return False + for i, _ in enumerate(parts1): + if parts2[i] != parts1[i]: + return False + return True + module_names = sorted([x[0] + for x in self.trace.named_modules() if x[0]]) + leaf_nodes = [] + for i, name in enumerate(module_names): + if i + 1 >= len(module_names) or not is_parent(name, module_names[i + 1]): + leaf_nodes.append(name) + return leaf_nodes + + def _get_module_name(self, scope_name): + """ + Retrieve module name from scope name. + Parameters: + ----------- + scope_name: str + scope_name of a graph node, for example: + for pytorch 1.3.1: MyModel/BackboneModel[backbone]/Conv2d[conv2] + for pytorch 1.4.0: __module.backbone/__module.backbone.conv2 + + Returns: + ------- + str + module name, such as backbone.conv2 + """ + if torch.__version__ >= '1.4.0': + return scope_name.split('/')[-1].replace('__module.', '') + else: + return '.'.join(re.findall(r'\[(.*?)\]', scope_name)) + + def _build_index(self, nodes_op): + name_to_node = dict() + input_to_node = defaultdict(list) + output_to_node = dict() + for node in nodes_op: + name_to_node[node.unique_name] = node + for _input in node.inputs: + # inputs may have duplicate tensors + if node not in input_to_node[_input]: + input_to_node[_input].append(node) + for output in node.outputs: + if output in output_to_node: + assert output_to_node[output] == node, \ + "One output cannot be generated by multiple nodes %s" % output + output_to_node[output] = node + return name_to_node, input_to_node, output_to_node + + def _is_key_func(self, node_cpp): + """ + Judge if a cpp node is a key function node. + If so, we should not merge this node into the + adjacent node. + """ + if node_cpp.kind().startswith('aten::'): + # the nodes that start with 'aten' are key function + # nodes + return True + if node_cpp.kind() in [LIST_UNPACK_KIND, TUPLE_UNPACK_KIND]: + # We cannot merge the List/Tuple + # Unpack func into other nodes, else it + # may lead to a graph construction error. + # The reason why we donnot take the construct node + # also as a key node is that `cat` operation node need + # the last(previous) visited node to infer the mask. If + # we take the Construct node as the important node, the + # predecessor of the `cat` node will always be a construct + # node, which means we cannot infer the mask for the cat + # operation. + return True + return False + + def unpack_manually(self): + """ + Unpack the tensor tuple or tensor list manually, + and remove the ListUnpack/TupleUnpack node from + the graph. Note: this function will change the + graph structure. + """ + if hasattr(self, 'unpacked'): + # if already unpacked the tuple/list manually + return + for node in self.nodes_py.nodes_op: + if node.op_type in [TUPLE_UNPACK_KIND, LIST_UNPACK_KIND]: + unpack_cpp = node.key_node + last_cpp = list(unpack_cpp.inputs())[0].node() + if last_cpp.kind() in [TUPLE_CONSTRUCT_KIND, LIST_CONSTRUCT_KIND]: + # we need check if the tensor tuple or tensor list is produced + # by a list/tuple construct node. If so, we can unpack the tuple + # or list manunally. + _logger.debug('List/Tuple Construct Node(cpp) %s', str(last_cpp)) + _logger.debug('List/Tuple Unpack Node(cpp) %s', str(unpack_cpp)) + assert len(list(unpack_cpp.outputs())) == len(list(last_cpp.inputs())) + errmsg = '%s Input number: %d if inconsistent with the output number %d' % (unpack_cpp, \ + len(node.inputs), len(list(last_cpp.inputs()))) + + assert len(node.inputs) == len(list(last_cpp.inputs())), errmsg + for _debug_input, _debug_output in zip(node.inputs, node.outputs): + if _debug_input in self.input_to_node and _debug_output in self.input_to_node: + # input_to_node[_debug_input] is a list of NodePyGroup, because + # one tensor can be used as input for multiple nodes at the same time. + + # note that, in this case, the construct cpp node and unpack cpp node + # will be merged into the same NodePyGroup, so we remove the `node` from + # input_to_node[_debug_input] and directly connect this tensor to the + # input_to_node[_debug_output] + if node in self.input_to_node[_debug_input]: + self.input_to_node[_debug_input].remove(node) + # add the following nodes of _output into the input_to_node[_debug_input] + self.input_to_node[_debug_input].extend(self.input_to_node[_debug_output]) + # just remove the _debug_output from the grapgh index. So that we can also skip + # the construct and tuple + if _debug_output in self.input_to_node: + for following_node in self.input_to_node[_debug_output]: + _tmp_index = following_node.inputs.index(_debug_output) + following_node.inputs[_tmp_index] = _debug_input + + + self.unpacked = True + + def _build_graph(self): + """ + Build graph using our defined format from jit trace. + There are basically three steps: first, construct necessary information (data structures), + second, extract all the modules to convert to node, Third, extract all functions to convert + to node. + + Returns + ------- + dict + use name to index nodes, key: node name, value: node + dict + use input (its name) to index nodes, + key: input, value: list of nodes that take this input + dict + use output (its name) to index nodes, + key: output, value: node that generates this output + """ + omit_useless_nodes = True + graph = self.trace.graph + _logger.debug(graph) + # build input/output mapping, from input/output debugName to its node + input_to_node = defaultdict(list) + output_to_node = defaultdict(list) + for node in graph.nodes(): + if node.kind() == CONSTANT_KIND: + continue + for x in node.outputs(): + if x.node().kind() == CONSTANT_KIND: + continue + output_to_node[x.debugName()].append(node) + assert len(output_to_node[x.debugName()]) <= 1, "One output cannot be generated by multiple nodes %s" % x.debugName() + for x in node.inputs(): + if x.node().kind() == CONSTANT_KIND: + continue + input_to_node[x.debugName()].append(node) + + # build module mapping, from module name to all nodes (as list) under this module scope + module_to_nodes = defaultdict(list) + # the mapping of function (non-module in forward) to nodes, key is scope name + func_to_nodes = defaultdict(list) + + nodes_py = GraphPy() + for node in graph.inputs(): + if omit_useless_nodes: + if not node.uses(): # number of user of the node (= number of outputs/ fanout) + continue + + if node.type().kind() != 'ClassType': + nodes_py.append(NodePyIO(node, 'input')) + + self.leaf_modules = self._extract_leaf_modules() + module_to_type = {name: parse_traced_name( + module._name) for name, module in self.trace.named_modules()} + + # associate module name with their trace graph nodes + for node in graph.nodes(): + if node.kind() == CONSTANT_KIND: + continue + module_name = self._get_module_name(node.scopeName()) + if module_name in self.leaf_modules: + module_to_nodes[module_name].append(node) + else: + func_to_nodes[node.scopeName()].append(node) + # build node group for module + for module_name, node_cpps in module_to_nodes.items(): + use_count = 0 + merged = set() + for node in node_cpps: + if node not in merged: + # modules that have same scope name may have different locations in the + # graph. Futhermore, there are also lots of prim:: nodes that in node_cpps, + # so we also need to call the expand_module_node. + unique_name = module_name + if use_count > 0: + unique_name = module_name + '.%d' % use_count + self.reused_module.add(unique_name) + self.reused_module.add(module_name) + node_group = self._expand_module_node( + node, module_name, unique_name, module_to_type[module_name], + node_cpps, input_to_node, output_to_node, 'module') + nodes_py.nodes_op.append(node_group) + use_count += 1 + merged.update(node_group.node_cpps) + + # each scope_name may have multiple funcs, we split them and create node for each of them + # build node group for torch.nn.functional + for _, nodes in func_to_nodes.items(): + # extract non prim:: nodes + key_func_nodes = list() + for node in nodes: + if self._is_key_func(node): + # find the key function nodes + key_func_nodes.append(node) + # for each non prim node, expand it + for node in key_func_nodes: + node_group = self._expand_key_func_node( + node, nodes, input_to_node, output_to_node, 'func') + nodes_py.nodes_op.append(node_group) + # get shape infor for view (aten::view) func + # if node_group.op_type in ['aten::view', 'aten::flatten']: + # node_group.auxiliary = self._extract_shape_info(node) + + for node in graph.outputs(): # Create sink nodes for output ops + node_py = NodePyIO(node, 'output') + nodes_py.append(node_py) + + self.nodes_py = nodes_py + # build index + return self._build_index(self.nodes_py.nodes_op) + + def _extract_auxiliary_info(self): + """ + Extract the auxiliary information for the nodegroups + if necessary. For example, view/flatten operations may + need the shape of the input tensor and output tensor. + """ + # extract the input & output shape for the view and flatten + for node_group in self.nodes_py.nodes_op: + if node_group.op_type in ['aten::view', 'aten::flatten', 'aten::mean', 'aten::reshape']: + # get shape infor for view (aten::view) func + cpp_node = list(filter(lambda x: x.kind() == node_group.op_type, + node_group.node_cpps))[0] + node_group.auxiliary = self._extract_shape_info(cpp_node) + elif node_group.op_type == 'Linear': + node_group.auxiliary = self._extract_linear_shape_info(node_group) + elif node_group.op_type == CAT_KIND: + # get the detail information for cat func + cpp_node = list(filter(lambda x: x.kind() == node_group.op_type, + node_group.node_cpps))[0] + node_group.auxiliary = self._extract_cat_info( + node_group, cpp_node) + + def find_predecessors(self, unique_name): + """ + Find predecessor node of the given node + + Parameters + ---------- + unique_name : str + The unique name of the node + + Returns + ------- + list + a list of nodes who are the given node's predecessor + """ + predecessors = [] + for _input in self.name_to_node[unique_name].inputs: + if not _input in self.output_to_node: + _logger.debug("cannot find node with %s as its output", _input) + else: + node_py = self.output_to_node[_input] + predecessors.append(node_py.unique_name) + return predecessors + + def find_successors(self, unique_name): + """ + Find successor nodes of the given node + + Parameters + ---------- + unique_name : str + The unique name of the node + + Returns + ------- + list + a list of nodes who are the given node's successor + """ + successors = [] + for output in self.name_to_node[unique_name].outputs: + if output not in self.input_to_node: + # may reach the output of the whole graph + continue + nodes_py = self.input_to_node[output] + for node_py in nodes_py: + successors.append(node_py.unique_name) + return successors diff --git a/nni/common/hpo_utils/__init__.py b/nni/common/hpo_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae6bd87bc2c088a56f07123cd23adfd879d77a79 --- /dev/null +++ b/nni/common/hpo_utils/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .validation import validate_search_space +from .formatting import * +from .optimize_mode import OptimizeMode diff --git a/nni/common/hpo_utils/formatting.py b/nni/common/hpo_utils/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..2a2674b9f9fee1d0ba28fd597448bf26aeca5552 --- /dev/null +++ b/nni/common/hpo_utils/formatting.py @@ -0,0 +1,217 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +This script provides a more program-friendly representation of HPO search space. +The format is considered internal helper and is not visible to end users. + +You will find this useful when you want to support nested search space. + +The random tuner is an intuitive example for this utility. +You should check its code before reading docstrings in this file. +""" + +__all__ = [ + 'ParameterSpec', + 'deformat_parameters', + 'format_parameters', + 'format_search_space', +] + +import math +from types import SimpleNamespace +from typing import Any, List, NamedTuple, Optional, Tuple + +import numpy as np + +class ParameterSpec(NamedTuple): + """ + Specification (aka space / range / domain) of one single parameter. + + NOTE: For `loguniform` (and `qloguniform`), the fields `low` and `high` are logarithm of original values. + """ + + name: str # The object key in JSON + type: str # "_type" in JSON + values: List[Any] # "_value" in JSON + + key: Tuple[str] # The "path" of this parameter + + categorical: bool # Whether this paramter is categorical (unordered) or numerical (ordered) + size: int = None # If it's categorical, how many candidates it has + + # uniform distributed + low: float = None # Lower bound of uniform parameter + high: float = None # Upper bound of uniform parameter + + normal_distributed: bool = None # Whether this parameter is uniform or normal distrubuted + mu: float = None # µ of normal parameter + sigma: float = None # σ of normal parameter + + q: Optional[float] = None # If not `None`, the parameter value should be an integer multiple of this + clip: Optional[Tuple[float, float]] = None + # For q(log)uniform, this equals to "values[:2]"; for others this is None + + log_distributed: bool = None # Whether this parameter is log distributed + # When true, low/high/mu/sigma describes log of parameter value (like np.lognormal) + + def is_activated_in(self, partial_parameters): + """ + For nested search space, check whether this parameter should be skipped for current set of paremters. + This function must be used in a pattern similar to random tuner. Otherwise it will misbehave. + """ + if len(self.key) < 2 or isinstance(self.key[-2], str): + return True + return partial_parameters[self.key[:-2]] == self.key[-2] + +def format_search_space(search_space): + """ + Convert user provided search space into a dict of ParameterSpec. + The dict key is dict value's `ParameterSpec.key`. + """ + formatted = _format_search_space(tuple(), search_space) + # In CPython 3.6, dicts preserve order by internal implementation. + # In Python 3.7+, dicts preserve order by language spec. + # Python 3.6 is crappy enough. Don't bother to do extra work for it. + # Remove these comments when we drop 3.6 support. + return {spec.key: spec for spec in formatted} + +def deformat_parameters(formatted_parameters, formatted_search_space): + """ + Convert internal format parameters to users' expected format. + + "test/ut/sdk/test_hpo_formatting.py" provides examples of how this works. + + The function do following jobs: + 1. For "choice" and "randint", convert index (integer) to corresponding value. + 2. For "*log*", convert x to `exp(x)`. + 3. For "q*", convert x to `round(x / q) * q`, then clip into range. + 4. For nested choices, convert flatten key-value pairs into nested structure. + """ + ret = {} + for key, x in formatted_parameters.items(): + spec = formatted_search_space[key] + if spec.categorical: + if spec.type == 'randint': + lower = min(math.ceil(float(x)) for x in spec.values) + _assign(ret, key, int(lower + x)) + elif _is_nested_choices(spec.values): + _assign(ret, tuple([*key, '_name']), spec.values[x]['_name']) + else: + _assign(ret, key, spec.values[x]) + else: + if spec.log_distributed: + x = math.exp(x) + if spec.q is not None: + x = round(x / spec.q) * spec.q + if spec.clip: + x = max(x, spec.clip[0]) + x = min(x, spec.clip[1]) + if isinstance(x, np.number): + x = x.item() + _assign(ret, key, x) + return ret + +def format_parameters(parameters, formatted_search_space): + """ + Convert end users' parameter format back to internal format, mainly for resuming experiments. + + The result is not accurate for "q*" and for "choice" that have duplicate candidates. + """ + # I don't like this function. It's better to use checkpoint for resuming. + ret = {} + for key, spec in formatted_search_space.items(): + if not spec.is_activated_in(ret): + continue + value = parameters + for name in key: + if isinstance(name, str): + value = value[name] + if spec.categorical: + if spec.type == 'randint': + lower = min(math.ceil(float(x)) for x in spec.values) + ret[key] = value - lower + elif _is_nested_choices(spec.values): + names = [nested['_name'] for nested in spec.values] + ret[key] = names.index(value['_name']) + else: + ret[key] = spec.values.index(value) + else: + if spec.log_distributed: + value = math.log(value) + ret[key] = value + return ret + +def _format_search_space(parent_key, space): + formatted = [] + for name, spec in space.items(): + if name == '_name': + continue + key = tuple([*parent_key, name]) + formatted.append(_format_parameter(key, spec['_type'], spec['_value'])) + if spec['_type'] == 'choice' and _is_nested_choices(spec['_value']): + for index, sub_space in enumerate(spec['_value']): + key = tuple([*parent_key, name, index]) + formatted += _format_search_space(key, sub_space) + return formatted + +def _format_parameter(key, type_, values): + spec = SimpleNamespace( + name = key[-1], + type = type_, + values = values, + key = key, + categorical = type_ in ['choice', 'randint'], + ) + + if spec.categorical: + if type_ == 'choice': + spec.size = len(values) + else: + lower = math.ceil(float(values[0])) + upper = math.ceil(float(values[1])) + spec.size = upper - lower + + else: + if type_.startswith('q'): + spec.q = float(values[2]) + else: + spec.q = None + spec.log_distributed = ('log' in type_) + + if 'normal' in type_: + spec.normal_distributed = True + spec.mu = float(values[0]) + spec.sigma = float(values[1]) + + else: + spec.normal_distributed = False + spec.low = float(values[0]) + spec.high = float(values[1]) + if spec.q is not None: + spec.clip = (spec.low, spec.high) + if spec.log_distributed: + # make it align with mu + spec.low = math.log(spec.low) + spec.high = math.log(spec.high) + + return ParameterSpec(**spec.__dict__) + +def _is_nested_choices(values): + assert values # choices should not be empty + for value in values: + if not isinstance(value, dict): + return False + if '_name' not in value: + return False + return True + +def _assign(params, key, x): + if len(key) == 1: + params[key[0]] = x + elif isinstance(key[0], int): + _assign(params, key[1:], x) + else: + if key[0] not in params: + params[key[0]] = {} + _assign(params[key[0]], key[1:], x) diff --git a/nni/common/hpo_utils/optimize_mode.py b/nni/common/hpo_utils/optimize_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7a034c8644d1f7d35e869388947c2542b93b71 --- /dev/null +++ b/nni/common/hpo_utils/optimize_mode.py @@ -0,0 +1,5 @@ +from enum import Enum + +class OptimizeMode(Enum): + Minimize = 'minimize' + Maximize = 'maximize' diff --git a/nni/common/hpo_utils/validation.py b/nni/common/hpo_utils/validation.py new file mode 100644 index 0000000000000000000000000000000000000000..3c1ea71d57fb91edb9c7642532d3269f47f8307d --- /dev/null +++ b/nni/common/hpo_utils/validation.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from typing import Any, List, Optional + +common_search_space_types = [ + 'choice', + 'randint', + 'uniform', + 'quniform', + 'loguniform', + 'qloguniform', + 'normal', + 'qnormal', + 'lognormal', + 'qlognormal', +] + +def validate_search_space( + search_space: Any, + support_types: Optional[List[str]] = None, + raise_exception: bool = False # for now, in case false positive + ) -> bool: + + if not raise_exception: + try: + validate_search_space(search_space, support_types, True) + return True + except ValueError as e: + logging.getLogger(__name__).error(e.args[0]) + return False + + if support_types is None: + support_types = common_search_space_types + + if not isinstance(search_space, dict): + raise ValueError(f'search space is a {type(search_space).__name__}, expect a dict : {repr(search_space)}') + + for name, spec in search_space.items(): + if not isinstance(spec, dict): + raise ValueError(f'search space "{name}" is a {type(spec).__name__}, expect a dict : {repr(spec)}') + if '_type' not in spec or '_value' not in spec: + raise ValueError(f'search space "{name}" does not have "_type" or "_value" : {spec}') + type_ = spec['_type'] + if type_ not in support_types: + raise ValueError(f'search space "{name}" has unsupported type "{type_}" : {spec}') + args = spec['_value'] + if not isinstance(args, list): + raise ValueError(f'search space "{name}"\'s value is not a list : {spec}') + + if type_ == 'choice': + if not all(isinstance(arg, (float, int, str)) for arg in args): + # FIXME: need further check for each algorithm which types are actually supported + # for now validation only prints warning so it doesn't harm + if not isinstance(args[0], dict) or '_name' not in args[0]: # not nested search space + raise ValueError(f'search space "{name}" (choice) should only contain numbers or strings : {spec}') + continue + + if type_.startswith('q'): + if len(args) != 3: + raise ValueError(f'search space "{name}" ({type_}) must have 3 values : {spec}') + else: + if len(args) != 2: + raise ValueError(f'search space "{name}" ({type_}) must have 2 values : {spec}') + + if type_ == 'randint': + if not all(isinstance(arg, int) for arg in args): + raise ValueError(f'search space "{name}" ({type_}) must have int values : {spec}') + else: + if not all(isinstance(arg, (float, int)) for arg in args): + raise ValueError(f'search space "{name}" ({type_}) must have float values : {spec}') + + if 'normal' not in type_: + if args[0] >= args[1]: + raise ValueError(f'search space "{name}" ({type_}) must have high > low : {spec}') + if 'log' in type_ and args[0] <= 0: + raise ValueError(f'search space "{name}" ({type_}) must have low > 0 : {spec}') + else: + if args[1] <= 0: + raise ValueError(f'search space "{name}" ({type_}) must have sigma > 0 : {spec}') + + return True diff --git a/nni/common/nas_utils.py b/nni/common/nas_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f050ec9bb2046b782bf699e1bd3ae7925a31a1 --- /dev/null +++ b/nni/common/nas_utils.py @@ -0,0 +1,317 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import functools +import logging + +from .. import trial + + +_logger = logging.getLogger(__name__) +_MUTABLE_LAYER_SPACE_PREFIX = "_mutable_layer" +_namespace = {} +_tf_variables = {} +_arch_logits_list = [] +_optimizer = None +_train_op = None + + +def classic_mode( + mutable_id, + mutable_layer_id, + funcs, + funcs_args, + fixed_inputs, + optional_inputs, + optional_input_size): + '''Execute the chosen function and inputs directly. + In this mode, the trial code is only running the chosen subgraph (i.e., the chosen ops and inputs), + without touching the full model graph.''' + if trial.get_current_parameter() is None: + trial.get_next_parameter() + + chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, + list(optional_inputs.keys())) + real_chosen_inputs = [optional_inputs[input_name] for input_name in chosen_inputs] + layer_out = funcs[chosen_layer]([fixed_inputs, real_chosen_inputs], **funcs_args[chosen_layer]) + + return layer_out + + +def enas_mode( + mutable_id, + mutable_layer_id, + funcs, + funcs_args, + fixed_inputs, + optional_inputs, + optional_input_size, + tf): + '''For enas mode, we build the full model graph in trial but only run a subgraph。 + This is implemented by masking inputs and branching ops. + Specifically, based on the received subgraph (through nni.get_next_parameter), + it can be known which inputs should be masked and which op should be executed.''' + name_prefix = "{}_{}".format(mutable_id, mutable_layer_id) + # store namespace + _namespace[mutable_id] = True + _namespace[name_prefix] = dict() + _namespace[name_prefix]['funcs'] = list(funcs) + _namespace[name_prefix]['optional_inputs'] = list(optional_inputs) + # create tensorflow variables as 1/0 signals used to form subgraph + name_for_optional_inputs = name_prefix + '_optional_inputs' + name_for_funcs = name_prefix + '_funcs' + _tf_variables[name_prefix] = dict() + _tf_variables[name_prefix]['optional_inputs'] = tf.get_variable( + name_for_optional_inputs, + [len(optional_inputs)], + dtype=tf.bool, + trainable=False + ) + _tf_variables[name_prefix]['funcs'] = tf.get_variable( + name_for_funcs, [], dtype=tf.int64, trainable=False) + + # get real values using their variable names + real_optional_inputs_value = [optional_inputs[name] + for name in _namespace[name_prefix]['optional_inputs']] + real_func_value = [funcs[name] + for name in _namespace[name_prefix]['funcs']] + real_funcs_args = [funcs_args[name] + for name in _namespace[name_prefix]['funcs']] + # build tensorflow graph of geting chosen inputs by masking + real_chosen_inputs = tf.boolean_mask( + real_optional_inputs_value, _tf_variables[name_prefix]['optional_inputs']) + # build tensorflow graph of different branches by using tf.case + branches = dict() + func_output = None + for func_id in range(len(funcs)): + func_output = real_func_value[func_id]([fixed_inputs, real_chosen_inputs], **real_funcs_args[func_id]) + branches[tf.equal(_tf_variables[name_prefix]['funcs'], func_id)] = lambda: func_output + layer_out = tf.case(branches, exclusive=True, default=lambda: func_output) + + return layer_out + + +def oneshot_mode( + mutable_id, + mutable_layer_id, + funcs, + funcs_args, + fixed_inputs, + optional_inputs, + optional_input_size, + tf): + '''Similar to enas mode, oneshot mode also builds the full model graph. + The difference is that oneshot mode does not receive subgraph. + Instead, it uses dropout to randomly dropout inputs and ops.''' + # NNI requires to get_next_parameter before report a result. But the parameter will not be used in this mode + if trial.get_current_parameter() is None: + trial.get_next_parameter() + optional_inputs = list(optional_inputs.values()) + inputs_num = len(optional_inputs) + # Calculate dropout rate according to the formular r^(1/k), where r is a hyper-parameter and k is the number of inputs + if inputs_num > 0: + rate = 0.01 ** (1 / inputs_num) + noise_shape = [inputs_num] + [1] * len(optional_inputs[0].get_shape()) + optional_inputs = tf.nn.dropout( + optional_inputs, rate=rate, noise_shape=noise_shape) + optional_inputs = [optional_inputs[idx] for idx in range(inputs_num)] + layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name]) + for func_name, func in funcs.items()] + output_num = len(layer_outs) + rate = 0.01 ** (1 / output_num) + noise_shape = [output_num] + [1] * len(layer_outs[0].get_shape()) + layer_outs = tf.nn.dropout(layer_outs, rate=rate, noise_shape=noise_shape) + layer_out = tf.reduce_sum(layer_outs, axis=0) + + return layer_out + + +def darts_mode( + mutable_id, + mutable_layer_id, + funcs, + funcs_args, + fixed_inputs, + optional_inputs, + optional_input_size, + tf): + optional_inputs = list(optional_inputs.values()) + layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name]) + for func_name, func in funcs.items()] + # Create architecture weights for every func(op) + var_name = "{}_{}_arch_weights".format(mutable_id, mutable_layer_id) + arch_logits = tf.get_variable(var_name, shape=[len(funcs)], trainable=False) + _arch_logits_list.append(arch_logits) + arch_weights = tf.nn.softmax(arch_logits) + layer_out = tf.add_n([arch_weights[idx] * out for idx, out in enumerate(layer_outs)]) + + return layer_out + + +def reload_tensorflow_variables(tf, session): + '''In Enas mode, this function reload every signal varaible created in `enas_mode` function so + the whole tensorflow graph will be changed into certain subgraph recerived from Tuner. + --------------- + session: the tensorflow session created by users + tf: tensorflow module + ''' + subgraph_from_tuner = trial.get_next_parameter() + mutable_layers = set() + for subgraph_key in subgraph_from_tuner: + if "/" in subgraph_key: + # has to remove the last, could be layer_choice or whatever + mutable_id, mutable_layer_id = _decompose_general_key(subgraph_key[:subgraph_key.rfind("/")]) + if mutable_id is not None: + mutable_layers.add((mutable_id, mutable_layer_id)) + mutable_layers = sorted(list(mutable_layers)) + for mutable_id, mutable_layer_id in mutable_layers: + if mutable_id not in _namespace: + _logger.warning("%s not found in name space", mutable_id) + continue + name_prefix = "{}_{}".format(mutable_id, mutable_layer_id) + # get optional inputs names + optional_inputs = _namespace[name_prefix]['optional_inputs'] + # extract layer information from the subgraph sampled by tuner + chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs) + chosen_layer = _namespace[name_prefix]['funcs'].index(chosen_layer) + chosen_inputs = [1 if inp in chosen_inputs else 0 for inp in optional_inputs] + # load these information into pre-defined tensorflow variables + _tf_variables[name_prefix]['funcs'].load(chosen_layer, session) + _tf_variables[name_prefix]['optional_inputs'].load( + chosen_inputs, session) + + +def _construct_general_key(mutable_id, mutable_layer_id): + # Mutable layer key in a general (search space) format + # that is, prefix/mutable_id/mutable_layer_id + return _MUTABLE_LAYER_SPACE_PREFIX + "/" + mutable_id + "/" + mutable_layer_id + + +def _decompose_general_key(key): + # inverse operation of above + if not key.startswith(_MUTABLE_LAYER_SPACE_PREFIX): + return None, None + else: + _, mutable_id, mutable_layer_id = key.split("/", maxsplit=2) + return mutable_id, mutable_layer_id + + +def darts_training(tf, session, loss, feed_dict): + global _optimizer, _train_op + if _optimizer is None: + _optimizer = tf.MomentumOptimizer(learning_rate=0.025) + # TODO: Calculate loss + grads_and_vars = _optimizer.compute_gradients(loss, _arch_logits_list) + _train_op = _optimizer.apply_gradients(grads_and_vars) + session.run(_train_op) + + +def training_update(nas_mode, tf=None, session=None, loss=None, feed_dict=None): + if nas_mode == 'darts_mode': + darts_training(tf, session, loss, feed_dict) + elif nas_mode == 'enas_mode': + reload_tensorflow_variables(tf, session) + + +def _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs): + # optional_inputs should be name(key)s of the optional inputs + try: + mutable_block = trial.get_current_parameter(mutable_id) + + # There is a NAS tuner + chosen_layer = mutable_block[mutable_layer_id]["chosen_layer"] + chosen_inputs = mutable_block[mutable_layer_id]["chosen_inputs"] + except KeyError: + # Try to find converted NAS parameters + params = trial.get_current_parameter() + expected_prefix = _construct_general_key(mutable_id, mutable_layer_id) + chosen_layer = params[expected_prefix + "/layer_choice"] + + # find how many to choose + optional_input_size = int(params[expected_prefix + "/optional_input_size"]) # convert uniform to randint + + # find who to choose, can duplicate + optional_input_state = params[expected_prefix + "/optional_input_chosen_state"] + chosen_inputs = [] + # make sure dict -> list produce stable result by sorting + optional_inputs_keys = sorted(optional_inputs) + for _ in range(optional_input_size): + chosen_inputs.append(optional_inputs_keys[optional_input_state % len(optional_inputs)]) + optional_input_state //= len(optional_inputs) + + _logger.info("%s_%s: layer: %s, optional inputs: %s", mutable_id, mutable_layer_id, chosen_layer, chosen_inputs) + return chosen_layer, chosen_inputs + + +def convert_nas_search_space(search_space): + """ + Args: + param search_space: raw search space + return: the new search space, mutable_layers will be converted into choice + """ + if not isinstance(search_space, dict): + return search_space + ret = dict() + for k, v in search_space.items(): + if "_type" not in v: + # this should not happen + _logger.warning("There is no _type in one of your search space values with key '%s'" + ". Please check your search space", k) + ret[k] = v + elif v["_type"] != "mutable_layer": + ret[k] = v + else: + _logger.info("Converting mutable_layer search space with key '%s'", k) + # v["_value"] looks like {'mutable_layer_1': {'layer_choice': ...} ...} + values = v["_value"] + for layer_name, layer_data in values.items(): + # there should be at most layer_choice, optional_inputs, optional_input_size in layer_data + + # add "_mutable_layer" as prefix so that they can be recovered later + layer_key = _construct_general_key(k, layer_name) + + if layer_data.get("layer_choice"): # filter out empty choice and no choice + layer_choice = layer_data["layer_choice"] + else: + raise ValueError("No layer choice found in %s" % layer_key) + + if layer_data.get("optional_input_size"): + input_size = layer_data["optional_input_size"] + if isinstance(input_size, int): + input_size = [input_size, input_size] + if input_size[0] > input_size[1] or input_size[0] < 0: + _logger.error("Might not be able to handle optional_input_size < 0, please double check") + input_size[1] += 1 + else: + _logger.info("Optional input choices are set to empty by default in %s", layer_key) + input_size = [0, 1] + + if layer_data.get("optional_inputs"): + total_state_size = len(layer_data["optional_inputs"]) ** (input_size[1] - 1) + else: + _logger.info("Optional inputs not found in %s", layer_key) + total_state_size = 1 + + converted = { + layer_key + "/layer_choice": { + "_type": "choice", "_value": layer_choice + }, + layer_key + "/optional_input_size": { + "_type": "randint", "_value": input_size + }, + layer_key + "/optional_input_chosen_state": { + "_type": "randint", "_value": [0, total_state_size] + } + } + _logger.info(converted) + ret.update(converted) + + return ret + + +def rewrite_nas_space(func): + @functools.wraps(func) + def wrap(self, search_space): + search_space = convert_nas_search_space(search_space) + return func(self, search_space) + return wrap diff --git a/nni/common/serializer.py b/nni/common/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..70a982c34f48864a798c6945f7fddad7f6c0c444 --- /dev/null +++ b/nni/common/serializer.py @@ -0,0 +1,605 @@ +import abc +import base64 +import collections.abc +import copy +import functools +import inspect +import numbers +import types +import warnings +from io import IOBase +from typing import Any, Dict, List, Optional, TypeVar, Union + +import cloudpickle # use cloudpickle as backend for unserializable types and instances +import json_tricks # use json_tricks as serializer backend + +__all__ = ['trace', 'dump', 'load', 'PayloadTooLarge', 'Translatable', 'Traceable', 'is_traceable'] + + +T = TypeVar('T') + + +class PayloadTooLarge(Exception): + pass + + +class Traceable(abc.ABC): + """ + A traceable object have copy and dict. Copy and mutate are used to copy the object for further mutations. + Dict returns a TraceDictType to enable serialization. + """ + @abc.abstractmethod + def trace_copy(self) -> 'Traceable': + """ + Perform a shallow copy. + NOTE: NONE of the attributes will be preserved. + This is the one that should be used when you want to "mutate" a serializable object. + """ + ... + + @property + @abc.abstractmethod + def trace_symbol(self) -> Any: + """ + Symbol object. Could be a class or a function. + ``get_hybrid_cls_or_func_name`` and ``import_cls_or_func_from_hybrid_name`` is a pair to + convert the symbol into a string and convert the string back to symbol. + """ + ... + + @property + @abc.abstractmethod + def trace_args(self) -> List[Any]: + """ + List of positional arguments passed to symbol. Usually empty if ``kw_only`` is true, + in which case all the positional arguments are converted into keyword arguments. + """ + ... + + @property + @abc.abstractmethod + def trace_kwargs(self) -> Dict[str, Any]: + """ + Dict of keyword arguments. + """ + ... + + +class Translatable(abc.ABC): + """ + Inherit this class and implement ``translate`` when the wrapped class needs a different + parameter from the wrapper class in its init function. + """ + + @abc.abstractmethod + def _translate(self) -> Any: + pass + + @staticmethod + def _translate_argument(d: Any) -> Any: + if isinstance(d, Translatable): + return d._translate() + return d + + +def is_traceable(obj: Any) -> bool: + """ + Check whether an object is a traceable instance (not type). + """ + return hasattr(obj, 'trace_copy') and \ + hasattr(obj, 'trace_symbol') and \ + hasattr(obj, 'trace_args') and \ + hasattr(obj, 'trace_kwargs') and \ + not inspect.isclass(obj) + + +class SerializableObject(Traceable): + """ + Serializable object is a wrapper of existing python objects, that supports dump and load easily. + Stores a symbol ``s`` and a dict of arguments ``args``, and the object can be restored with ``s(**args)``. + """ + + def __init__(self, symbol: T, args: List[Any], kwargs: Dict[str, Any], call_super: bool = False): + # use dict to avoid conflicts with user's getattr and setattr + self.__dict__['_nni_symbol'] = symbol + self.__dict__['_nni_args'] = args + self.__dict__['_nni_kwargs'] = kwargs + self.__dict__['_nni_call_super'] = call_super + + if call_super: + # call super means that the serializable object is by itself an object of the target class + super().__init__( + *[_argument_processor(arg) for arg in args], + **{kw: _argument_processor(arg) for kw, arg in kwargs.items()} + ) + + def trace_copy(self) -> Union[T, 'SerializableObject']: + return SerializableObject( + self.trace_symbol, + [copy.copy(arg) for arg in self.trace_args], + {k: copy.copy(v) for k, v in self.trace_kwargs.items()}, + ) + + @property + def trace_symbol(self) -> Any: + return self._get_nni_attr('symbol') + + @trace_symbol.setter + def trace_symbol(self, symbol: Any) -> None: + # for mutation purposes + self.__dict__['_nni_symbol'] = symbol + + @property + def trace_args(self) -> List[Any]: + return self._get_nni_attr('args') + + @trace_args.setter + def trace_args(self, args: List[Any]): + self.__dict__['_nni_args'] = args + + @property + def trace_kwargs(self) -> Dict[str, Any]: + return self._get_nni_attr('kwargs') + + @trace_kwargs.setter + def trace_kwargs(self, kwargs: Dict[str, Any]): + self.__dict__['_nni_kwargs'] = kwargs + + def _get_nni_attr(self, name: str) -> Any: + return self.__dict__['_nni_' + name] + + def __repr__(self): + if self._get_nni_attr('call_super'): + return super().__repr__() + return 'SerializableObject(' + \ + ', '.join(['type=' + self._get_nni_attr('symbol').__name__] + + [repr(d) for d in self._get_nni_attr('args')] + + [k + '=' + repr(v) for k, v in self._get_nni_attr('kwargs').items()]) + \ + ')' + + +def inject_trace_info(obj: Any, symbol: T, args: List[Any], kwargs: Dict[str, Any]) -> Any: + # If an object is already created, this can be a fix so that the necessary info are re-injected into the object. + + def getter_factory(x): + return lambda self: self.__dict__['_nni_' + x] + + def setter_factory(x): + def setter(self, val): + self.__dict__['_nni_' + x] = val + + return setter + + def trace_copy(self): + return SerializableObject( + self.trace_symbol, + [copy.copy(arg) for arg in self.trace_args], + {k: copy.copy(v) for k, v in self.trace_kwargs.items()}, + ) + + attributes = { + 'trace_symbol': property(getter_factory('symbol'), setter_factory('symbol')), + 'trace_args': property(getter_factory('args'), setter_factory('args')), + 'trace_kwargs': property(getter_factory('kwargs'), setter_factory('kwargs')), + 'trace_copy': trace_copy + } + + if hasattr(obj, '__class__') and hasattr(obj, '__dict__'): + for name, method in attributes.items(): + setattr(obj.__class__, name, method) + else: + wrapper = type('wrapper', (Traceable, type(obj)), attributes) + obj = wrapper(obj) # pylint: disable=abstract-class-instantiated + + # make obj complying with the interface of traceable, though we cannot change its base class + obj.__dict__.update(_nni_symbol=symbol, _nni_args=args, _nni_kwargs=kwargs) + + return obj + + +def trace(cls_or_func: T = None, *, kw_only: bool = True) -> Union[T, Traceable]: + """ + Annotate a function or a class if you want to preserve where it comes from. + This is usually used in the following scenarios: + + 1) Care more about execution configuration rather than results, which is usually the case in AutoML. For example, + you want to mutate the parameters of a function. + 2) Repeat execution is not an issue (e.g., reproducible, execution is fast without side effects). + + When a class/function is annotated, all the instances/calls will return a object as it normally will. + Although the object might act like a normal object, it's actually a different object with NNI-specific properties. + One exception is that if your function returns None, it will return an empty traceable object instead, + which should raise your attention when you want to check whether the None ``is None``. + + When parameters of functions are received, it is first stored, and then a shallow copy will be passed to wrapped function/class. + This is to prevent mutable objects gets modified in the wrapped function/class. + When the function finished execution, we also record extra information about where this object comes from. + That's why it's called "trace". + When call ``nni.dump``, that information will be used, by default. + + If ``kw_only`` is true, try to convert all parameters into kwargs type. This is done by inspecting the argument + list and types. This can be useful to extract semantics, but can be tricky in some corner cases. + + .. warning:: + + Generators will be first expanded into a list, and the resulting list will be further passed into the wrapped function/class. + This might hang when generators produce an infinite sequence. We might introduce an API to control this behavior in future. + + Example: + + .. code-block:: python + + @nni.trace + def foo(bar): + pass + """ + + def wrap(cls_or_func): + # already annotated, do nothing + if getattr(cls_or_func, '_traced', False): + return cls_or_func + if isinstance(cls_or_func, type): + cls_or_func = _trace_cls(cls_or_func, kw_only) + elif _is_function(cls_or_func): + cls_or_func = _trace_func(cls_or_func, kw_only) + else: + raise TypeError(f'{cls_or_func} of type {type(cls_or_func)} is not supported to be traced. ' + 'File an issue at https://github.com/microsoft/nni/issues if you believe this is a mistake.') + cls_or_func._traced = True + return cls_or_func + + # if we're being called as @trace() + if cls_or_func is None: + return wrap + + # if we are called without parentheses + return wrap(cls_or_func) + + +def dump(obj: Any, fp: Optional[Any] = None, *, use_trace: bool = True, pickle_size_limit: int = 4096, + allow_nan: bool = True, **json_tricks_kwargs) -> Union[str, bytes]: + """ + Convert a nested data structure to a json string. Save to file if fp is specified. + Use json-tricks as main backend. For unhandled cases in json-tricks, use cloudpickle. + The serializer is not designed for long-term storage use, but rather to copy data between processes. + The format is also subject to change between NNI releases. + + Parameters + ---------- + obj : any + The object to dump. + fp : file handler or path + File to write to. Keep it none if you want to dump a string. + pickle_size_limit : int + This is set to avoid too long serialization result. Set to -1 to disable size check. + allow_nan : bool + Whether to allow nan to be serialized. Different from default value in json-tricks, our default value is true. + json_tricks_kwargs : dict + Other keyword arguments passed to json tricks (backend), e.g., indent=2. + + Returns + ------- + str or bytes + Normally str. Sometimes bytes (if compressed). + """ + + encoders = [ + # we don't need to check for dependency as many of those have already been required by NNI + json_tricks.pathlib_encode, # pathlib is a required dependency for NNI + json_tricks.pandas_encode, # pandas is a required dependency + json_tricks.numpy_encode, # required + json_tricks.encoders.enum_instance_encode, + json_tricks.json_date_time_encode, # same as json_tricks + json_tricks.json_complex_encode, + json_tricks.json_set_encode, + json_tricks.numeric_types_encode, + functools.partial(_json_tricks_serializable_object_encode, use_trace=use_trace), + functools.partial(_json_tricks_func_or_cls_encode, pickle_size_limit=pickle_size_limit), + functools.partial(_json_tricks_any_object_encode, pickle_size_limit=pickle_size_limit), + ] + + json_tricks_kwargs['allow_nan'] = allow_nan + + if fp is not None: + return json_tricks.dump(obj, fp, obj_encoders=encoders, **json_tricks_kwargs) + else: + return json_tricks.dumps(obj, obj_encoders=encoders, **json_tricks_kwargs) + + +def load(string: Optional[str] = None, *, fp: Optional[Any] = None, ignore_comments: bool = True, **json_tricks_kwargs) -> Any: + """ + Load the string or from file, and convert it to a complex data structure. + At least one of string or fp has to be not none. + + Parameters + ---------- + string : str + JSON string to parse. Can be set to none if fp is used. + fp : str + File path to load JSON from. Can be set to none if string is used. + ignore_comments : bool + Remove comments (starting with ``#`` or ``//``). Default is true. + + Returns + ------- + any + The loaded object. + """ + assert string is not None or fp is not None + # see encoders for explanation + hooks = [ + json_tricks.pathlib_hook, + json_tricks.pandas_hook, + json_tricks.json_numpy_obj_hook, + json_tricks.decoders.EnumInstanceHook(), + json_tricks.json_date_time_hook, + json_tricks.json_complex_hook, + json_tricks.json_set_hook, + json_tricks.numeric_types_hook, + _json_tricks_serializable_object_decode, + _json_tricks_func_or_cls_decode, + _json_tricks_any_object_decode + ] + + # to bypass a deprecation warning in json-tricks + json_tricks_kwargs['ignore_comments'] = ignore_comments + + if string is not None: + if isinstance(string, IOBase): + raise TypeError(f'Expect a string, found a {string}. If you intend to use a file, use `nni.load(fp=file)`') + return json_tricks.loads(string, obj_pairs_hooks=hooks, **json_tricks_kwargs) + else: + return json_tricks.load(fp, obj_pairs_hooks=hooks, **json_tricks_kwargs) + + +def _trace_cls(base, kw_only, call_super=True): + # the implementation to trace a class is to store a copy of init arguments + # this won't support class that defines a customized new but should work for most cases + + class wrapper(SerializableObject, base): + def __init__(self, *args, **kwargs): + # store a copy of initial parameters + args, kwargs = _formulate_arguments(base.__init__, args, kwargs, kw_only, is_class_init=True) + + # calling serializable object init to initialize the full object + super().__init__(symbol=base, args=args, kwargs=kwargs, call_super=call_super) + + _copy_class_wrapper_attributes(base, wrapper) + + return wrapper + + +def _trace_func(func, kw_only): + @functools.wraps(func) + def wrapper(*args, **kwargs): + # similar to class, store parameters here + args, kwargs = _formulate_arguments(func, args, kwargs, kw_only) + + # it's not clear whether this wrapper can handle all the types in python + # There are many cases here: https://docs.python.org/3/reference/datamodel.html + # but it looks that we have handled most commonly used cases + res = func( + *[_argument_processor(arg) for arg in args], + **{kw: _argument_processor(arg) for kw, arg in kwargs.items()} + ) + + if res is None: + # don't call super, makes no sense. + # an empty serializable object is "none". Don't check it though. + res = SerializableObject(func, args, kwargs, call_super=False) + elif hasattr(res, '__class__') and hasattr(res, '__dict__'): + # is a class, inject interface directly + # need to be done before primitive types because there could be inheritance here. + res = inject_trace_info(res, func, args, kwargs) + elif isinstance(res, (collections.abc.Callable, types.ModuleType, IOBase)): + raise TypeError(f'Try to add trace info to {res}, but functions and modules are not supported.') + elif isinstance(res, (numbers.Number, collections.abc.Sequence, collections.abc.Set, collections.abc.Mapping)): + # handle primitive types like int, str, set, dict, tuple + # NOTE: simple types including none, bool, int, float, list, tuple, dict + # will be directly captured by python json encoder + # and thus not possible to restore the trace parameters after dump and reload. + # this is a known limitation. + res = inject_trace_info(res, func, args, kwargs) + else: + raise TypeError(f'Try to add trace info to {res}, but the type "{type(res)}" is unknown. ' + 'Please file an issue at https://github.com/microsoft/nni/issues') + + return res + + return wrapper + + +def _copy_class_wrapper_attributes(base, wrapper): + _MISSING = '_missing' + + # assign magic attributes like __module__, __qualname__, __doc__ + for k in functools.WRAPPER_ASSIGNMENTS: + v = getattr(base, k, _MISSING) + if v is not _MISSING: + try: + setattr(wrapper, k, v) + except AttributeError: + pass + + wrapper.__wrapped__ = base + + +def _argument_processor(arg): + # 1) translate + # handle cases like ValueChoice + # This is needed because sometimes the recorded arguments are meant to be different from what the wrapped object receives. + arg = Translatable._translate_argument(arg) + # 2) prevent the stored parameters to be mutated by wrapped class. + # an example: https://github.com/microsoft/nni/issues/4329 + if isinstance(arg, (collections.abc.MutableMapping, collections.abc.MutableSequence, collections.abc.MutableSet)): + arg = copy.copy(arg) + return arg + + +def _formulate_single_argument(arg): + # this is different from argument processor + # it directly apply the transformation on the stored arguments + + # expand generator into list + # Note that some types that are generator (such as range(10)) may not be identified as generator here. + if isinstance(arg, types.GeneratorType): + arg = list(arg) + + return arg + + +def _formulate_arguments(func, args, kwargs, kw_only, is_class_init=False): + # This is to formulate the arguments and make them well-formed. + if kw_only: + # get arguments passed to a function, and save it as a dict + argname_list = list(inspect.signature(func).parameters.keys()) + if is_class_init: + argname_list = argname_list[1:] + full_args = {} + + # match arguments with given arguments + # args should be longer than given list, because args can be used in a kwargs way + assert len(args) <= len(argname_list), f'Length of {args} is greater than length of {argname_list}.' + for argname, value in zip(argname_list, args): + full_args[argname] = value + + # use kwargs to override + full_args.update(kwargs) + + args, kwargs = [], full_args + + args = [_formulate_single_argument(arg) for arg in args] + kwargs = {k: _formulate_single_argument(arg) for k, arg in kwargs.items()} + + return list(args), kwargs + + +def _is_function(obj: Any) -> bool: + # https://stackoverflow.com/questions/624926/how-do-i-detect-whether-a-python-variable-is-a-function + return isinstance(obj, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, + types.BuiltinMethodType)) + + +def _import_cls_or_func_from_name(target: str) -> Any: + if target is None: + return None + path, identifier = target.rsplit('.', 1) + module = __import__(path, globals(), locals(), [identifier]) + return getattr(module, identifier) + + +def _strip_trace_type(traceable: Any) -> Any: + if getattr(traceable, '_traced', False): + return traceable.__wrapped__ + return traceable + + +def _get_cls_or_func_name(cls_or_func: Any) -> str: + module_name = cls_or_func.__module__ + if module_name == '__main__': + raise ImportError('Cannot use a path to identify something from __main__.') + full_name = module_name + '.' + cls_or_func.__name__ + + try: + imported = _import_cls_or_func_from_name(full_name) + # ignores the differences in trace + if _strip_trace_type(imported) != _strip_trace_type(cls_or_func): + raise ImportError(f'Imported {imported} is not same as expected. The function might be dynamically created.') + except ImportError: + raise ImportError(f'Import {cls_or_func.__name__} from "{module_name}" failed.') + + return full_name + + +def get_hybrid_cls_or_func_name(cls_or_func: Any, pickle_size_limit: int = 4096) -> str: + try: + name = _get_cls_or_func_name(cls_or_func) + # import success, use a path format + return 'path:' + name + except (ImportError, AttributeError): + b = cloudpickle.dumps(cls_or_func) + if len(b) > pickle_size_limit: + raise ValueError(f'Pickle too large when trying to dump {cls_or_func}. ' + 'Please try to raise pickle_size_limit if you insist.') + # fallback to cloudpickle + return 'bytes:' + base64.b64encode(b).decode() + + +def import_cls_or_func_from_hybrid_name(s: str) -> Any: + if s.startswith('bytes:'): + b = base64.b64decode(s.split(':', 1)[-1]) + return cloudpickle.loads(b) + if s.startswith('path:'): + s = s.split(':', 1)[-1] + return _import_cls_or_func_from_name(s) + + +def _json_tricks_func_or_cls_encode(cls_or_func: Any, primitives: bool = False, pickle_size_limit: int = 4096) -> str: + if not isinstance(cls_or_func, type) and not _is_function(cls_or_func): + # not a function or class, continue + return cls_or_func + + return { + '__nni_type__': get_hybrid_cls_or_func_name(cls_or_func, pickle_size_limit) + } + + +def _json_tricks_func_or_cls_decode(s: Dict[str, Any]) -> Any: + if isinstance(s, dict) and '__nni_type__' in s: + s = s['__nni_type__'] + return import_cls_or_func_from_hybrid_name(s) + return s + + +def _json_tricks_serializable_object_encode(obj: Any, primitives: bool = False, use_trace: bool = True) -> Dict[str, Any]: + # Encodes a serializable object instance to json. + + # do nothing to instance that is not a serializable object and do not use trace + if not use_trace or not is_traceable(obj): + return obj + + if isinstance(obj.trace_symbol, property): + # commonly made mistake when users forget to call the traced function/class. + warnings.warn(f'The symbol of {obj} is found to be a property. Did you forget to create the instance with ``xx(...)``?') + + ret = {'__symbol__': get_hybrid_cls_or_func_name(obj.trace_symbol)} + if obj.trace_args: + ret['__args__'] = obj.trace_args + if obj.trace_kwargs: + ret['__kwargs__'] = obj.trace_kwargs + return ret + + +def _json_tricks_serializable_object_decode(obj: Dict[str, Any]) -> Any: + if isinstance(obj, dict) and '__symbol__' in obj: + symbol = import_cls_or_func_from_hybrid_name(obj['__symbol__']) + args = obj.get('__args__', []) + kwargs = obj.get('__kwargs__', {}) + return trace(symbol)(*args, **kwargs) + return obj + + +def _json_tricks_any_object_encode(obj: Any, primitives: bool = False, pickle_size_limit: int = 4096) -> Any: + # We want to use this to replace the class instance encode in json-tricks. + # Therefore the coverage should be roughly same. + if isinstance(obj, list) or isinstance(obj, dict): + return obj + if hasattr(obj, '__class__') and (hasattr(obj, '__dict__') or hasattr(obj, '__slots__')): + b = cloudpickle.dumps(obj) + if len(b) > pickle_size_limit > 0: + raise PayloadTooLarge(f'Pickle too large when trying to dump {obj}. This might be caused by classes that are ' + 'not decorated by @nni.trace. Another option is to force bytes pickling and ' + 'try to raise pickle_size_limit.') + # use base64 to dump a bytes array + return { + '__nni_obj__': base64.b64encode(b).decode() + } + return obj + + +def _json_tricks_any_object_decode(obj: Dict[str, Any]) -> Any: + if isinstance(obj, dict) and '__nni_obj__' in obj: + obj = obj['__nni_obj__'] + b = base64.b64decode(obj) + return cloudpickle.loads(b) + return obj diff --git a/nni/common/version.py b/nni/common/version.py new file mode 100644 index 0000000000000000000000000000000000000000..b8881f48ade5c96faac6b9ca70632f135b50c804 --- /dev/null +++ b/nni/common/version.py @@ -0,0 +1,7 @@ +import logging +try: + import torch + TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) +except Exception: + logging.info("PyTorch is not installed.") + TORCH_VERSION = None diff --git a/nni/compression/__init__.py b/nni/compression/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/compression/pytorch/__init__.py b/nni/compression/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10e2fd050de5630d75be2432fc50aaf95cf86b40 --- /dev/null +++ b/nni/compression/pytorch/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .speedup import ModelSpeedup +from .compressor import Compressor, Pruner, Quantizer +from .pruning import apply_compression_results diff --git a/nni/compression/pytorch/compressor.py b/nni/compression/pytorch/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..f8f054ac13043c5f8496a608127fbe62a4c8542f --- /dev/null +++ b/nni/compression/pytorch/compressor.py @@ -0,0 +1,1044 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import types +import logging +import torch +from nni.common.graph_utils import build_module_graph +from nni.compression.pytorch.quantization.literal import QuantType, BN_FOLD_OP, BN_FOLD_TAG +from nni.compression.pytorch.quantization.observers import RecordingObserver +from . import default_layers + +_logger = logging.getLogger(__name__) + +class LayerInfo: + def __init__(self, name, module): + self.module = module + self.name = name + self.type = type(module).__name__ + +def _setattr(model, name, module): + name_list = name.split(".") + for name in name_list[:-1]: + model = getattr(model, name) + setattr(model, name_list[-1], module) + +class Compressor: + """ + Abstract base PyTorch compressor + """ + + def __init__(self, model, config_list, optimizer=None): + """ + Record necessary info in class members + + Parameters + ---------- + model : pytorch model + the model user wants to compress + config_list : list + the configurations that users specify for compression + optimizer: pytorch optimizer + optimizer used to train the model + """ + assert isinstance(model, torch.nn.Module) + self.validate_config(model, config_list) + + self.bound_model = model + self.config_list = config_list + self.optimizer = optimizer + + self.modules_to_compress = None + self.modules_wrapper = [] + self.is_wrapped = False + + self._fwd_hook_handles = {} + self._fwd_hook_id = 0 + + self.reset() + + if not self.modules_wrapper: + _logger.warning('Nothing is configured to compress, please check your model and config_list') + + def validate_config(self, model, config_list): + """ + subclass can optionally implement this method to check if config_list if valid + """ + pass + + def reset(self, checkpoint=None): + """ + reset model state dict and model wrapper + """ + self._unwrap_model() + if checkpoint is not None: + self.bound_model.load_state_dict(checkpoint) + + self.modules_to_compress = None + self.modules_wrapper = [] + + for layer, config in self._detect_modules_to_compress(): + wrapper = self._wrap_modules(layer, config) + self.modules_wrapper.append(wrapper) + + self._wrap_model() + + def _detect_modules_to_compress(self): + """ + detect all modules should be compressed, and save the result in `self.modules_to_compress`. + The model will be instrumented and user should never edit it after calling this method. + """ + if self.modules_to_compress is None: + self.modules_to_compress = [] + for name, module in self.bound_model.named_modules(): + if module == self.bound_model: + continue + layer = LayerInfo(name, module) + config = self.select_config(layer) + if config is not None: + self.modules_to_compress.append((layer, config)) + return self.modules_to_compress + + def _wrap_model(self): + """ + wrap all modules that needed to be compressed + + """ + for wrapper in reversed(self.get_modules_wrapper()): + _setattr(self.bound_model, wrapper.name, wrapper) + self.is_wrapped = True + + def _unwrap_model(self): + """ + unwrap all modules that needed to be compressed + + """ + for wrapper in self.get_modules_wrapper(): + _setattr(self.bound_model, wrapper.name, wrapper.module) + self.is_wrapped = False + + def compress(self): + """ + Compress the model with algorithm implemented by subclass. + + The model will be instrumented and user should never edit it after calling this method. + `self.modules_to_compress` records all the to-be-compressed layers + + Returns + ------- + torch.nn.Module + model with specified modules compressed. + """ + return self.bound_model + + def set_wrappers_attribute(self, name, value): + """ + To register attributes used in wrapped module's forward method. + If the type of the value is Torch.tensor, then this value is registered as a buffer in wrapper, + which will be saved by model.state_dict. Otherwise, this value is just a regular variable in wrapper. + + Parameters + ---------- + name : str + name of the variable + value: any + value of the variable + """ + for wrapper in self.get_modules_wrapper(): + if isinstance(value, torch.Tensor): + wrapper.register_buffer(name, value.clone()) + else: + setattr(wrapper, name, value) + + def get_modules_to_compress(self): + """ + To obtain all the to-be-compressed modules. + + Returns + ------- + list + a list of the layers, each of which is a tuple (`layer`, `config`), + `layer` is `LayerInfo`, `config` is a `dict` + """ + return self.modules_to_compress + + def get_modules_wrapper(self): + """ + To obtain all the wrapped modules. + + Returns + ------- + list + a list of the wrapped modules + """ + return self.modules_wrapper + + def select_config(self, layer): + """ + Find the configuration for `layer` by parsing `self.config_list` + + Parameters + ---------- + layer : LayerInfo + one layer + + Returns + ------- + config or None + the retrieved configuration for this layer, if None, this layer should + not be compressed + """ + ret = None + for config in self.config_list: + config = config.copy() + # expand config if key `default` is in config['op_types'] + if 'op_types' in config and 'default' in config['op_types']: + expanded_op_types = [] + for op_type in config['op_types']: + if op_type == 'default': + expanded_op_types.extend(default_layers.weighted_modules) + else: + expanded_op_types.append(op_type) + config['op_types'] = expanded_op_types + + # check if condition is satisified + if 'op_types' in config and layer.type not in config['op_types']: + continue + if 'op_names' in config and layer.name not in config['op_names']: + continue + + ret = config + if ret is None or 'exclude' in ret: + return None + return ret + + def update_epoch(self, epoch): + """ + If user want to update model every epoch, user can override this method. + This method should be called at the beginning of each epoch + + Parameters + ---------- + epoch : num + the current epoch number + """ + pass + + def _wrap_modules(self, layer, config): + """ + This method is implemented in the subclasses, i.e., `Pruner` and `Quantizer` + + Parameters + ---------- + layer : LayerInfo + the layer to instrument the compression operation + config : dict + the configuration for compressing this layer + """ + raise NotImplementedError() + + def add_activation_collector(self, collector): + self._fwd_hook_id += 1 + self._fwd_hook_handles[self._fwd_hook_id] = [] + for wrapper in self.get_modules_wrapper(): + handle = wrapper.register_forward_hook(collector) + self._fwd_hook_handles[self._fwd_hook_id].append(handle) + return self._fwd_hook_id + + def remove_activation_collector(self, fwd_hook_id): + if fwd_hook_id not in self._fwd_hook_handles: + raise ValueError("%s is not a valid collector id" % str(fwd_hook_id)) + for handle in self._fwd_hook_handles[fwd_hook_id]: + handle.remove() + del self._fwd_hook_handles[fwd_hook_id] + + def patch_optimizer(self, *tasks): + def patch_step(old_step): + def new_step(_, *args, **kwargs): + # call origin optimizer step method + output = old_step(*args, **kwargs) + # calculate mask + for task in tasks: + task() + return output + return new_step + if self.optimizer is not None: + self.optimizer.step = types.MethodType(patch_step(self.optimizer.step), self.optimizer) + + def patch_optimizer_before(self, *tasks): + def patch_step(old_step): + def new_step(_, *args, **kwargs): + for task in tasks: + task() + # call origin optimizer step method + output = old_step(*args, **kwargs) + return output + return new_step + if self.optimizer is not None: + self.optimizer.step = types.MethodType(patch_step(self.optimizer.step), self.optimizer) + +class PrunerModuleWrapper(torch.nn.Module): + def __init__(self, module, module_name, module_type, config, pruner): + """ + Wrap a module to enable data parallel, forward method customization and buffer registeration. + + Parameters + ---------- + module : pytorch module + the module user wants to compress + config : dict + the configurations that users specify for compression + module_name : str + the name of the module to compress, wrapper module shares same name + module_type : str + the type of the module to compress + pruner : Pruner + the pruner used to calculate mask + """ + super().__init__() + # origin layer information + self.module = module + self.name = module_name + self.type = module_type + # config and pruner + self.config = config + self.pruner = pruner + + # register buffer for mask + self.register_buffer("weight_mask", torch.ones(self.module.weight.shape)) + if hasattr(self.module, 'bias') and self.module.bias is not None: + self.register_buffer("bias_mask", torch.ones(self.module.bias.shape)) + else: + self.register_buffer("bias_mask", None) + + def forward(self, *inputs): + # apply mask to weight, bias + self.module.weight.data = self.module.weight.data.mul_(self.weight_mask) + if hasattr(self.module, 'bias') and self.module.bias is not None: + self.module.bias.data = self.module.bias.data.mul_(self.bias_mask) + return self.module(*inputs) + +class Pruner(Compressor): + """ + Prune to an exact pruning level specification + + Attributes + ---------- + mask_dict : dict + Dictionary for saving masks, `key` should be layer name and + `value` should be a tensor which has the same shape with layer's weight + + """ + + def __init__(self, model, config_list, optimizer=None): + super().__init__(model, config_list, optimizer) + + def compress(self): + self.update_mask() + return self.bound_model + + def update_mask(self): + for wrapper_idx, wrapper in enumerate(self.get_modules_wrapper()): + masks = self.calc_mask(wrapper, wrapper_idx=wrapper_idx) + if masks is not None: + for k in masks: + assert hasattr(wrapper, k), "there is no attribute '%s' in wrapper" % k + setattr(wrapper, k, masks[k]) + + def calc_mask(self, wrapper, **kwargs): + """ + Pruners should overload this method to provide mask for weight tensors. + The mask must have the same shape and type comparing to the weight. + It will be applied with `mul()` operation on the weight. + This method is effectively hooked to `forward()` method of the model. + + Parameters + ---------- + wrapper : Module + calculate mask for `wrapper.module`'s weight + """ + raise NotImplementedError("Pruners must overload calc_mask()") + + def _wrap_modules(self, layer, config): + """ + Create a wrapper module to replace the original one. + + Parameters + ---------- + layer : LayerInfo + the layer to instrument the mask + config : dict + the configuration for generating the mask + """ + _logger.debug("Module detected to compress : %s.", layer.name) + wrapper = PrunerModuleWrapper(layer.module, layer.name, layer.type, config, self) + assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name + # move newly registered buffers to the same device of weight + wrapper.to(layer.module.weight.device) + return wrapper + + def export_model(self, model_path, mask_path=None, onnx_path=None, input_shape=None, device=None, + dummy_input=None, opset_version=None): + """ + Export pruned model weights, masks and onnx model(optional) + + Parameters + ---------- + model_path : str + path to save pruned model state_dict + mask_path : str + (optional) path to save mask dict + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model, used for creating a dummy input tensor for torch.onnx.export + if the input has a complex structure (e.g., a tuple), please directly create the input and + pass it to dummy_input instead + note: this argument is deprecated and will be removed; please use dummy_input instead + device : torch.device + device of the model, where to place the dummy input tensor for exporting onnx file; + the tensor is placed on cpu if ```device``` is None + only useful when both onnx_path and input_shape are passed + note: this argument is deprecated and will be removed; please use dummy_input instead + dummy_input: torch.Tensor or tuple + dummy input to the onnx model; used when input_shape is not enough to specify dummy input + user should ensure that the dummy_input is on the same device as the model + opset_version: int + opset_version parameter for torch.onnx.export; only useful when onnx_path is not None + if not passed, torch.onnx.export will use its default opset_version + """ + assert model_path is not None, 'model_path must be specified' + mask_dict = {} + self._unwrap_model() # used for generating correct state_dict name without wrapper state + + for wrapper in self.get_modules_wrapper(): + weight_mask = wrapper.weight_mask + bias_mask = wrapper.bias_mask + if weight_mask is not None: + mask_sum = weight_mask.sum().item() + mask_num = weight_mask.numel() + _logger.debug('Layer: %s Sparsity: %.4f', wrapper.name, 1 - mask_sum / mask_num) + wrapper.module.weight.data = wrapper.module.weight.data.mul(weight_mask) + if bias_mask is not None: + wrapper.module.bias.data = wrapper.module.bias.data.mul(bias_mask) + # save mask to dict + mask_dict[wrapper.name] = {"weight": weight_mask, "bias": bias_mask} + + torch.save(self.bound_model.state_dict(), model_path) + _logger.info('Model state_dict saved to %s', model_path) + + if mask_path is not None: + torch.save(mask_dict, mask_path) + _logger.info('Mask dict saved to %s', mask_path) + + if onnx_path is not None: + assert input_shape is not None or dummy_input is not None,\ + 'input_shape or dummy_input must be specified to export onnx model' + # create dummy_input using input_shape if input_shape is not passed + if dummy_input is None: + _logger.warning("""The argument input_shape and device will be removed in the future. + Please create a dummy input and pass it to dummy_input instead.""") + if device is None: + device = torch.device('cpu') + input_data = torch.Tensor(*input_shape).to(device) + else: + input_data = dummy_input + if opset_version is not None: + torch.onnx.export(self.bound_model, input_data, onnx_path, opset_version=opset_version) + else: + torch.onnx.export(self.bound_model, input_data, onnx_path) + if dummy_input is None: + _logger.info('Model in onnx with input shape %s saved to %s', input_data.shape, onnx_path) + else: + _logger.info('Model in onnx saved to %s', onnx_path) + + self._wrap_model() + + def load_model_state_dict(self, model_state): + """ + Load the state dict saved from unwrapped model. + + Parameters + ---------- + model_state : dict + state dict saved from unwrapped model + """ + if self.is_wrapped: + self._unwrap_model() + self.bound_model.load_state_dict(model_state) + self._wrap_model() + else: + self.bound_model.load_state_dict(model_state) + + def get_pruned_weights(self, dim=0): + """ + Log the simulated prune sparsity. + + Parameters + ---------- + dim : int + the pruned dim. + """ + for _, wrapper in enumerate(self.get_modules_wrapper()): + weight_mask = wrapper.weight_mask + mask_size = weight_mask.size() + if len(mask_size) == 1: + index = torch.nonzero(weight_mask.abs() != 0).tolist() + else: + sum_idx = list(range(len(mask_size))) + sum_idx.remove(dim) + index = torch.nonzero(weight_mask.abs().sum(sum_idx) != 0).tolist() + _logger.info(f'simulated prune {wrapper.name} remain/total: {len(index)}/{weight_mask.size(dim)}') + + +class QuantizerModuleWrapper(torch.nn.Module): + def __init__(self, module, module_name, module_type, config, quantizer, bn_module=None): + """ + Wrap a module to enable data parallel, forward method customization and buffer registeration. + + Parameters + ---------- + module : pytorch module + the module user wants to compress + config : dict + the configurations that users specify for compression + module_name : str + the name of the module to compress, wrapper module shares same name + module_type : str + the type of the module to compress + quantizer :quantizer + the quantizer used to calculate mask + bn_module : torch.nn.Module + batch norm layer corresponding to current module, used for simulating batch normalization folding + """ + super().__init__() + # origin layer information + self.module = module + self.name = module_name + self.type = module_type + # config and pruner + self.config = config + self.quantizer = quantizer + self.bn_module = bn_module + + # register buffer and parameter + # old_weight is used to store origin weight and weight is used to store quantized weight + # the reason why weight is buffer instead of parameter is because in pytorch parameter is used as leaf + # if weight is leaf , then old_weight can not be updated. + if 'weight' in config['quant_types']: + if not _check_weight(self.module): + _logger.warning('Module %s does not have parameter "weight"', self.name) + else: + self.module.register_parameter('old_weight', torch.nn.Parameter(self.module.weight)) + delattr(self.module, 'weight') + self.module.register_buffer('weight', self.module.old_weight.data) + + # for batch normalization folding + if self.bn_module is not None: + if _check_bias(self.module): + self.module.register_parameter('old_bias', torch.nn.Parameter(self.module.bias)) + init_tensor = self.module.old_bias.data + else: + init_tensor = torch.zeros_like(self.bn_module.weight) + delattr(self.module, 'bias') + self.module.register_buffer('bias', init_tensor) + setattr(module, BN_FOLD_TAG, True) + + def forward(self, *inputs): + if 'input' in self.config['quant_types']: + assert len(inputs) == 1, "Quantization of input only supports ops with single input." + new_inp = self.quantizer.quant_grad( + inputs[0], + QuantType.INPUT, + self) + inputs = (new_inp,) + + if 'weight' in self.config['quant_types'] and _check_weight(self.module): + if self.bn_module is not None: + # simulate batch normalization folding + new_weight, new_bias = self.quantizer.fold_bn(*inputs, wrapper=self) + self.module.bias = new_bias + self.module.weight = new_weight + else: + new_weight = self.module.old_weight + self.module.weight = new_weight.data + + self.quantizer.quant_grad( + new_weight, + QuantType.WEIGHT, + self, inputs[0]) + + result = self.module(*inputs) + + if 'output' in self.config['quant_types']: + result = self.quantizer.quant_grad( + result, + QuantType.OUTPUT, + self) + return result + + +class QuantizerIdentityWrapper(torch.nn.Module): + def __init__(self, module, module_name): + """ + Used to wrap modules that should be treated as torch.Identity + + Parameters + ---------- + module : pytorch module + the module to be wrapped + module_name : str + the name of the module to wrapped, wrapper module shares same name + """ + super().__init__() + self.module = module + self.module_name = module_name + + def forward(self, x): + return x + + +class Quantizer(Compressor): + """ + Base quantizer for pytorch quantizer + """ + + def __init__(self, model, config_list, optimizer=None, dummy_input=None): + if isinstance(model, torch.nn.DataParallel): + model = model.module + model_copied = copy.deepcopy(model) + self.identity_wrappers = [] + self.conv_bn_patterns = {} + self.find_conv_bn_patterns(model, dummy_input) + super().__init__(model, config_list, optimizer) + self.all_shapes = {} + self.record_shape(model_copied, dummy_input) + self.quant_grad = QuantGrad.apply + if self.optimizer is not None: + self.patch_optimizer(self.step_with_optimizer) + for wrapper in self.get_modules_wrapper(): + if 'weight' in wrapper.config['quant_types']: + # old_weight is registered to keep track of weight before quantization + # and it is trainable, therefore, it should be added to optimizer. + self.optimizer.add_param_group({"params": wrapper.module.old_weight}) + # This is for conv with bias + bn. Although this situation is relatively rare, + # we still need to deal with the old_bias when it occurs + if hasattr(wrapper.module, "old_bias"): + self.optimizer.add_param_group({"params": getattr(wrapper.module, "old_bias")}) + + def quantize_weight(self, wrapper, **kwargs): + """ + quantize should overload this method to quantize weight. + This method is effectively hooked to :meth:`forward` of the model. + Parameters + ---------- + wrapper : QuantizerModuleWrapper + the wrapper for origin module + """ + raise NotImplementedError('Quantizer must overload quantize_weight()') + + def quantize_output(self, output, wrapper, **kwargs): + """ + quantize should overload this method to quantize output. + This method is effectively hooked to :meth:`forward` of the model. + Parameters + ---------- + output : Tensor + output that needs to be quantized + wrapper : QuantizerModuleWrapper + the wrapper for origin module + """ + raise NotImplementedError('Quantizer must overload quantize_output()') + + def quantize_input(self, inputs, wrapper, **kwargs): + """ + quantize should overload this method to quantize input. + This method is effectively hooked to :meth:`forward` of the model. + Parameters + ---------- + inputs : Tensor + inputs that needs to be quantized + wrapper : QuantizerModuleWrapper + the wrapper for origin module + """ + raise NotImplementedError('Quantizer must overload quantize_input()') + + def fold_bn(self, *inputs, wrapper): + """ + Simulate batch normalization folding in the training graph. Folded weight and bias are + returned for the following operations. + + Parameters + ---------- + inputs : tuple of torch.Tensor + inputs for the module + wrapper : QuantizerModuleWrapper + the wrapper for origin module + + Returns + ------- + Tuple of torch.Tensor + """ + module = wrapper.module + bn_module = wrapper.bn_module + with torch.no_grad(): + output = module(*inputs) + _ = bn_module(output) + running_mean = bn_module.running_mean + running_var = torch.sqrt(bn_module.running_var + bn_module.eps) + bn_weight = bn_module.weight + bn_bias = bn_module.bias + dimensions = len(module.weight.shape) + shape = [-1] + [1] * (dimensions - 1) + new_weight = module.old_weight * bn_weight.reshape(shape) / running_var.reshape(shape) + if hasattr(module, 'old_bias'): + new_bias = bn_bias + (module.old_bias - running_mean) / running_var * bn_weight + else: + new_bias = bn_bias - running_mean / running_var * bn_weight + return new_weight, new_bias + + def _wrap_modules(self, layer, config): + """ + Create a wrapper forward function to replace the original one. + Parameters + ---------- + layer : LayerInfo + the layer to instrument the mask + config : dict + the configuration for quantization + """ + assert 'quant_types' in config, 'must provide quant_types in config' + assert isinstance(config['quant_types'], list), 'quant_types must be list type' + assert 'quant_bits' in config, 'must provide quant_bits in config' + assert isinstance(config['quant_bits'], int) or isinstance(config['quant_bits'], dict), 'quant_bits must be dict type or int type' + + if isinstance(config['quant_bits'], dict): + for quant_type in config['quant_types']: + assert quant_type in config['quant_bits'], 'bits length for %s must be specified in quant_bits dict' % quant_type + + # bound bn module to corresponding conv module + bn_module = None + if layer.name in self.conv_bn_patterns: + bn_module_name = self.conv_bn_patterns[layer.name] + for name, module in self.bound_model.named_modules(): + if name == bn_module_name: + bn_module = module + break + assert bn_module is not None, "BN module corresponding to layer {} is not found".format(layer.name) + self.identity_wrappers.append(QuantizerIdentityWrapper(bn_module, bn_module_name)) + return QuantizerModuleWrapper(layer.module, layer.name, layer.type, config, self, bn_module) + + def _wrap_model(self): + """ + wrap all modules that needed to be compressed + + """ + # wrap folded bn in order to bypass its forward process + for wrapper in reversed(self.identity_wrappers): + _setattr(self.bound_model, wrapper.module_name, wrapper) + super()._wrap_model() + + def _unwrap_model(self): + """ + unwrap all modules that needed to be compressed + + """ + for wrapper in self.identity_wrappers: + _setattr(self.bound_model, wrapper.module_name, wrapper.module) + super()._unwrap_model() + + def export_model_save(self, model, model_path, calibration_config=None, calibration_path=None, onnx_path=None, + input_shape=None, device=None): + """ + This method helps save pytorch model, calibration config, onnx model in quantizer. + + Parameters + ---------- + model : pytorch model + pytorch model to be saved + model_path : str + path to save pytorch + calibration_config: dict + (optional) config of calibration parameters + calibration_path : str + (optional) path to save quantize parameters after calibration + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model + device : torch.device + device of the model, used to place the dummy input tensor for exporting onnx file. + the tensor is placed on cpu if ```device``` is None + """ + torch.save(model.state_dict(), model_path) + _logger.info('Model state_dict saved to %s', model_path) + if calibration_path is not None: + torch.save(calibration_config, calibration_path) + _logger.info('Mask dict saved to %s', calibration_path) + if onnx_path is not None: + assert input_shape is not None, 'input_shape must be specified to export onnx model' + # input info needed + if device is None: + device = torch.device('cpu') + input_data = torch.Tensor(*input_shape) + torch.onnx.export(self.bound_model, input_data.to(device), onnx_path) + _logger.info('Model in onnx with input shape %s saved to %s', input_data.shape, onnx_path) + + def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None): + """ + Export quantized model weights and calibration parameters + + Parameters + ---------- + model_path : str + path to save quantized model weight + calibration_path : str + (optional) path to save quantize parameters after calibration + onnx_path : str + (optional) path to save onnx model + input_shape : list or tuple + input shape to onnx model + device : torch.device + device of the model, used to place the dummy input tensor for exporting onnx file. + the tensor is placed on cpu if ```device``` is None + + Returns + ------- + Dict + """ + raise NotImplementedError('Quantizer must overload export_model()') + + def load_calibration_config(self, calibration_config): + """ + This function aims to help quantizer set quantization parameters by + loading from a calibration_config which is exported by other quantizer + or itself. The main usage of this function is helping quantize aware training + quantizer set appropriate initial parameters so that the training process will + be much more flexible and converges quickly. What's more, it can also enable + quantizer resume quantization model by loading parameters from config. + + Parameters + ---------- + calibration_config : dict + dict which saves quantization parameters, quantizer can export itself + calibration config. + eg, calibration_config = quantizer.export_model(model_path, calibration_path) + """ + raise NotImplementedError('Quantizer must overload export_model()') + + def find_conv_bn_patterns(self, model, dummy_input): + """ + Find all Conv-BN patterns, used for batch normalization folding + + Parameters + ---------- + model : torch.nn.Module + model to be analyzed. + dummy_input : tupel of torch.tensor + inputs to the model, used for generating the torchscript + """ + if dummy_input is None: + _logger.debug("Model inputs are not given, batch normalization folding is disabled") + return + + graph = build_module_graph(model, dummy_input) + for node_group in graph.nodes_py.nodes_op: + if node_group.op_type in BN_FOLD_OP: + successors = graph.find_successors(node_group.unique_name) + successors = [graph.name_to_node[x] for x in successors] + for successor in successors: + if successor.op_type == 'BatchNorm2d': + self.conv_bn_patterns[node_group.name] = successor.name + + def record_shape(self, model, dummy_input): + """ + Record input/output's shapes of each module to be quantized + + Parameters + ---------- + model : torch.nn.Module + model to be recorded. + dummy_input : tupel of torch.tensor + inputs to the model. + """ + def _pre_forward_hook(self, inp): + # Only record the first tensor of the input + return self.pre_forward(inp[0]) + + def _post_forward_hook(self, _, out): + return self.post_forward(out) + + if dummy_input is None: + return + + all_handles = [] + all_observers = {} + modules_to_compress = self.get_modules_to_compress() + compress_names = [layer_info[0].name for layer_info in modules_to_compress] + for name, module in model.named_modules(): + if name in compress_names: + all_observers[name] = {} + all_observers[name]['input_hook'] = RecordingObserver() + all_observers[name]['output_hook'] = RecordingObserver() + module.add_module('pre_forward', all_observers[name]['input_hook']) + module.add_module('post_forward', all_observers[name]['output_hook']) + all_handles.append(module.register_forward_pre_hook(_pre_forward_hook)) + all_handles.append(module.register_forward_hook(_post_forward_hook)) + model(dummy_input) + for name, hooks in all_observers.items(): + # only support single input + input_val = hooks['input_hook'].tensor_val + input_shape = input_val[0].shape if input_val else None + output_val = hooks['output_hook'].tensor_val + output_shape = output_val[0].shape if output_val else None + shapes = [input_shape, output_shape] + self.all_shapes[name] = shapes + return + + def step_with_optimizer(self): + pass + + +class QuantGrad(torch.autograd.Function): + """ + Base class for overriding backward function of quantization operation. + """ + @classmethod + def _quantize(cls, x, scale, zero_point): + """ + Reference function for quantizing x -- non-clamped. + Parameters + ---------- + x : Tensor + tensor to be quantized + scale : Tensor + scale for quantizing x + zero_point : Tensor + zero_point for quantizing x + Returns + ------- + tensor + quantized x without clamped + """ + return ((x / scale) + zero_point).round() + + @classmethod + def get_bits_length(cls, config, quant_type): + """ + Get bits for quantize config + Parameters + ---------- + config : Dict + the configuration for quantization + quant_type : str + quant type + Returns + ------- + int + n-bits for quantization configuration + """ + if isinstance(config["quant_bits"], int): + return config["quant_bits"] + else: + return config["quant_bits"].get(quant_type) + + @staticmethod + def quant_backward(tensor, grad_output, quant_type, scale, zero_point, qmin, qmax): + """ + This method should be overrided by subclass to provide customized backward function, + default implementation is Straight-Through Estimator + Parameters + ---------- + tensor : Tensor + input of quantization operation + grad_output : Tensor + gradient of the output of quantization operation + scale : Tensor + the type of quantization, it can be `QuantType.INPUT`, `QuantType.WEIGHT`, + `QuantType.OUTPUT`, you can define different behavior for different types. + zero_point : Tensor + zero_point for quantizing tensor + qmin : Tensor + quant_min for quantizing tensor + qmax : Tensor + quant_max for quantizng tensor + Returns + ------- + tensor + gradient of the input of quantization operation + """ + return grad_output + + @staticmethod + def forward(ctx, tensor, quant_type, wrapper, input_tensor=None, **kwargs): + output = quantize_helper(tensor, quant_type, wrapper, input_tensor, **kwargs) + + if hasattr(wrapper.module, "layer_quant_setting"): + layer_quant_setting = wrapper.module.layer_quant_setting + qmin, qmax = getattr(layer_quant_setting, quant_type).get_qmin_qmax() + else: + # todo: when dtype/scheme customization is ready for all quantizers, remove this + bits = QuantGrad.get_bits_length(wrapper.config, quant_type) + qmin, qmax = 0, (1 << bits) - 1 + + scale_name, zero_point_name = quant_type.type_to_scale_zero_point_name() + if hasattr(wrapper.module, scale_name) and hasattr(wrapper.module, zero_point_name): + scale = getattr(wrapper.module, scale_name) + zero_point = getattr(wrapper.module, zero_point_name) + # todo: remove this when other quantizers use different scale & zero point for input/weight/output + elif hasattr(wrapper.module, 'scale') and hasattr(wrapper.module, 'zero_point'): + scale = wrapper.module.scale + zero_point = wrapper.module.zero_point + else: + scale, zero_point = None, None + # Only tensors have gradients flowing back needs to be saved by save_for_backward. + # Others should directly assign to ctx. + ctx.save_for_backward(tensor) + ctx.quant_type = quant_type + ctx.qmin, ctx.qmax = qmin, qmax + ctx.scale = scale + ctx.zero_point = zero_point + return output + + @classmethod + def backward(cls, ctx, grad_output): + tensor = ctx.saved_variables[0] + scale, zero_point = ctx.scale, ctx.zero_point + quant_type = ctx.quant_type + qmin, qmax = ctx.qmin, ctx.qmax + output = cls.quant_backward(tensor, grad_output, quant_type, scale, zero_point, qmin, qmax) + return output, None, None, None + +def _check_weight(module): + try: + return isinstance(module.weight.data, torch.Tensor) + except AttributeError: + return False + +def _check_bias(module): + try: + return isinstance(module.bias.data, torch.Tensor) + except AttributeError: + return False + +def quantize_helper(tensor, quant_type, wrapper, input_tensor=None, **kwargs): + if quant_type == QuantType.INPUT: + output = wrapper.quantizer.quantize_input(tensor, wrapper=wrapper, **kwargs) + elif quant_type == QuantType.WEIGHT: + output = wrapper.quantizer.quantize_weight(wrapper, input_tensor=input_tensor, **kwargs) + elif quant_type == QuantType.OUTPUT: + output = wrapper.quantizer.quantize_output(tensor, wrapper, **kwargs) + else: + raise ValueError("unrecognized QuantType.") + + return output + +class QuantForward(torch.nn.Module): + """ + Base class for executing quantization operations. This is for quantization algorithms + that do not need to customize gradient. + """ + + def forward(self, tensor, quant_type, wrapper, input_tensor=None, **kwargs): + return quantize_helper(tensor, quant_type, wrapper, input_tensor, **kwargs) diff --git a/nni/compression/pytorch/default_layers.py b/nni/compression/pytorch/default_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..4d7e6d8aed84ad76c9404f301117fb8a00d9a570 --- /dev/null +++ b/nni/compression/pytorch/default_layers.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +weighted_modules = [ + 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d', + 'Linear', 'Bilinear', + 'PReLU', + 'Embedding', 'EmbeddingBag', +] diff --git a/nni/compression/pytorch/pruning/__init__.py b/nni/compression/pytorch/pruning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9d3a7d2ca90a76918e1a89508f007903d1bb6485 --- /dev/null +++ b/nni/compression/pytorch/pruning/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .apply_compression import apply_compression_results diff --git a/nni/compression/pytorch/pruning/apply_compression.py b/nni/compression/pytorch/pruning/apply_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..8e6b023f5b90b8483e21bd0bc575b19b4a4df023 --- /dev/null +++ b/nni/compression/pytorch/pruning/apply_compression.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch + +logger = logging.getLogger('torch apply compression') + +def apply_compression_results(model, masks_file, map_location=None): + """ + Apply the masks from ```masks_file``` to the model + Note: this API is for inference, because it simply multiplies weights with + corresponding masks when this API is called. + + Parameters + ---------- + model : torch.nn.Module + The model to be compressed + masks_file : str + The path of the mask file + map_location : str + the device on which masks are placed, same to map_location in ```torch.load``` + """ + masks = torch.load(masks_file, map_location) + for name, module in model.named_modules(): + if name in masks: + module.weight.data = module.weight.data.mul_(masks[name]['weight']) + if hasattr(module, 'bias') and module.bias is not None and 'bias' in masks[name]: + module.bias.data = module.bias.data.mul_(masks[name]['bias']) \ No newline at end of file diff --git a/nni/compression/pytorch/quantization/literal.py b/nni/compression/pytorch/quantization/literal.py new file mode 100644 index 0000000000000000000000000000000000000000..eaad1dcf25dc7bbe517e64af9ce7731a3f4c76f8 --- /dev/null +++ b/nni/compression/pytorch/quantization/literal.py @@ -0,0 +1,65 @@ +from enum import Enum, EnumMeta + + +class _QuantLiteralEnumMeta(EnumMeta): + def __contains__(cls, item): + try: + cls(item) + except ValueError: + return False + return True + + +class _QuantLiteralEnum(Enum, metaclass=_QuantLiteralEnumMeta): + pass + + +class QuantScheme(str, _QuantLiteralEnum): + PER_TENSOR_AFFINE = 'per_tensor_affine' + PER_TENSOR_SYMMETRIC = 'per_tensor_symmetric' + PER_CHANNEL_AFFINE = 'per_channel_affine' + PER_CHANNEL_SYMMETRIC = 'per_channel_symmetric' + + +PER_CHANNEL_QUANT_SCHEME = [QuantScheme.PER_CHANNEL_AFFINE, QuantScheme.PER_CHANNEL_SYMMETRIC] + + +class QuantDtype(str, _QuantLiteralEnum): + UINT = 'uint' + INT = 'int' + + +class QuantType(str, _QuantLiteralEnum): + INPUT = 'input' + WEIGHT = 'weight' + OUTPUT = 'output' + + def type_to_scale_zero_point_name(self): + if self == QuantType.INPUT: + return 'input_scale', 'input_zero_point' + elif self == QuantType.WEIGHT: + return 'weight_scale', 'weight_zero_point' + elif self == QuantType.OUTPUT: + return 'output_scale', 'output_zero_point' + else: + raise TypeError + + +# Just show each attribute's name, no practical effect +class QuantConfigLiteral(str, _QuantLiteralEnum): + QUANT_SETTINGS = 'quant_settings' + QUANT_SCHEME = 'quant_scheme' + QUANT_DTYPE = 'quant_dtype' + BITS = 'bits' + QMIN = 'qmin' + QMAX = 'qmax' + INPUT_SCALE = 'input_scale' + INPUT_ZERO_POINT = 'input_zero_point' + OUTPUT_SCALE = 'output_scale' + OUTPUT_ZERO_POINT = 'output_zero_point' + WEIGHT_SCALE = 'weight_scale' + WEIGHT_ZERO_POINT = 'weight_zero_point' + + +BN_FOLD_OP = ["Conv2d"] +BN_FOLD_TAG = 'BN_FOLD_TAG' diff --git a/nni/compression/pytorch/quantization/observers.py b/nni/compression/pytorch/quantization/observers.py new file mode 100644 index 0000000000000000000000000000000000000000..bd7b2bc28862c8e4efaa583ee9d53b1a98a07303 --- /dev/null +++ b/nni/compression/pytorch/quantization/observers.py @@ -0,0 +1,15 @@ +from torch.quantization import default_weight_observer, default_histogram_observer +from torch.quantization import RecordingObserver as _RecordingObserver + +__all__ = ["default_weight_observer", "default_histogram_observer", "RecordingObserver"] + + +class RecordingObserver(_RecordingObserver): + """ + A extended version of PyTorch's RecordingObserver, used to record gpu tensor + """ + + def forward(self, x): + val = x.cpu() + super().forward(val) + return x diff --git a/nni/compression/pytorch/quantization/settings.py b/nni/compression/pytorch/quantization/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..b4206e239a01b2595e19e76d0d91741795fa16c5 --- /dev/null +++ b/nni/compression/pytorch/quantization/settings.py @@ -0,0 +1,118 @@ +from typing import Any, Optional + +from .literal import QuantDtype, QuantType, QuantScheme +from .utils import calculate_qmin_qmax, get_bits_length + + +# default settings for quantization module +quant_default_settings = { + QuantType.WEIGHT: { + 'quant_scheme': QuantScheme.PER_TENSOR_AFFINE, + 'quant_dtype': QuantDtype.UINT, + }, + QuantType.INPUT: { + 'quant_scheme': QuantScheme.PER_TENSOR_AFFINE, + 'quant_dtype': QuantDtype.UINT + }, + QuantType.OUTPUT: { + 'quant_scheme': QuantScheme.PER_TENSOR_AFFINE, + 'quant_dtype': QuantDtype.UINT + } +} + + +class TensorQuantSetting(object): + def __init__(self, **kwargs): + self._fields = {} + for k, v in kwargs.items(): + self._fields[k] = v + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_"): + super().__setattr__(name, val) + else: + self._fields[name] = val + + def __getattr__(self, name): + if name == "_fields" or name not in self._fields: + raise AttributeError("Cannot find {} in TensorQuantSetting!".format(name)) + return self._fields[name] + + def get_qmin_qmax(self): + assert 'qmin' in self._fields and 'qmax' in self._fields, \ + "Can not found qmin & qmax in TensorQuantSetting" + return self._fields['qmin'], self._fields['qmax'] + + +class LayerQuantSetting(object): + def __init__(self, config): + self.input: Optional[TensorQuantSetting] = None + self.weight: Optional[TensorQuantSetting] = None + self.output: Optional[TensorQuantSetting] = None + self._extra_layer_setting = {} + + for quant_type in QuantType: + if quant_type in config.get("quant_types", []): + setting = TensorQuantSetting() + + quant_scheme = self.parse_optional_config(config, quant_type, 'quant_scheme') + setting.quant_scheme = quant_scheme + quant_dtype = self.parse_optional_config(config, quant_type, 'quant_dtype') + setting.quant_dtype = quant_dtype + + bits = get_bits_length(config, quant_type) + qmin, qmax = calculate_qmin_qmax(bits, quant_dtype) + setting.bits = bits + setting.qmin = qmin + setting.qmax = qmax + setattr(self, quant_type, setting) + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_") or name in QuantType: + super().__setattr__(name, val) + else: + self._extra_layer_setting[name] = val + + def __getattr__(self, name): + if name == "_extra_layer_setting" or name not in self._extra_layer_setting: + raise AttributeError("Cannot find {} in LayerQuantSetting!".format(name)) + return self._extra_layer_setting[name] + + @staticmethod + def parse_optional_config(config, quant_type, target): + def get_config(config, quant_type, target): + if not config.get(target): + return None + + if isinstance(config[target], dict): + return config[target].get(quant_type) + else: + return config[target] + + default_val = quant_default_settings[quant_type].get(target, None) + config_val = get_config(config, quant_type, target) + val = config_val if config_val else default_val + return val + + +def set_quant_scheme_dtype(quant_type, new_scheme=None, new_dtype=None): + # todo: remove this if we convert string config to enum type. + if isinstance(quant_type, str): + assert quant_type in QuantType, "Wrong quant_type" + if isinstance(new_scheme, str): + assert new_scheme in QuantScheme, "Wrong quant_scheme" + if isinstance(new_dtype, str): + assert new_dtype in QuantDtype, "Wrong quant_dtype" + + # TODO: It is not a good idea to directly modify global settings. A better choice is + # making this function an attribute function of Quantizer and call this function after + # the quantizer is initialized. However, within current framework of quantization, if + # we want to modify the dtype & scheme when the quantizer is initialized, we must do + # some other things (like changing the shapes of scales and zero_points and other quantization + # information in the subclass). + global quant_default_settings + if new_scheme is not None: + quant_default_settings[quant_type]['quant_scheme'] = new_scheme + if new_dtype is not None: + quant_default_settings[quant_type]['quant_dtype'] = new_dtype + return diff --git a/nni/compression/pytorch/quantization/utils.py b/nni/compression/pytorch/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c0735c52c2b773a567989659f374b3f843ec7178 --- /dev/null +++ b/nni/compression/pytorch/quantization/utils.py @@ -0,0 +1,83 @@ +import torch + +from nni.common.version import TORCH_VERSION + +from .literal import QuantDtype, QuantScheme, QuantType + + +def calculate_qmin_qmax(bits, dtype): + if dtype == QuantDtype.INT: + qmin, qmax = -2 ** (bits - 1) + 1, 2 ** (bits - 1) - 1 + elif dtype == QuantDtype.UINT: + qmin, qmax = 0, 2 ** bits - 1 + else: + raise TypeError("Wrong quantization dtype, please make sure it is one of 'int' and 'uint'.") + return qmin, qmax + + +def get_bits_length(config, quant_type): + if isinstance(config["quant_bits"], int): + return config["quant_bits"] + else: + return config["quant_bits"].get(quant_type) + + +def get_target_dim(quant_type, quant_scheme): + # for weight: c_out x c_in x (h) * (w) + # for feature maps: batch * channel * (t) * h * w + # other type is not supported for now + default_idx = 0 if quant_type == QuantType.WEIGHT else 1 + if is_per_channel(quant_scheme): + target_dim = default_idx + else: + target_dim = None + return target_dim + + +def get_min_max_value(x, quant_type, quant_scheme): + + target_dim = get_target_dim(quant_type, quant_scheme) + if target_dim is None: + return torch.min(x), torch.max(x) + + indices = list(range(len(x.shape))) + assert target_dim < len(indices), "target_dim needs to be less than the number of dim of the tensor" + del indices[target_dim] + + if TORCH_VERSION > (1, 6): + min_val = torch.amin(x, indices, keepdims=True) + max_val = torch.amax(x, indices, keepdims=True) + else: + min_val = max_val = x + for ind in indices: + min_val = torch.min(min_val, dim=ind, keepdim=True)[0] + max_val = torch.max(max_val, dim=ind, keepdim=True)[0] + return min_val, max_val + + +def get_mean_value(x, target_dim=None): + if target_dim is None: + return torch.mean(x) + + indices = list(range(len(x.shape))) + assert target_dim < len(indices), "target_dim needs to be less than the number of dim of the tensor" + del indices[target_dim] + + mean_val = torch.mean(x, dim=indices, keepdim=True) + return mean_val + + +def is_per_channel(quant_scheme): + if quant_scheme in [QuantScheme.PER_CHANNEL_AFFINE, QuantScheme.PER_CHANNEL_SYMMETRIC]: + return True + else: + return False + + +def get_quant_shape(shape, quant_type, quant_scheme): + default_idx = 0 if quant_type == QuantType.WEIGHT else 1 + if is_per_channel(quant_scheme): + quant_shape = [1 if idx != default_idx else s for idx, s in enumerate(shape)] + else: + quant_shape = [1] + return quant_shape diff --git a/nni/compression/pytorch/quantization_speedup/__init__.py b/nni/compression/pytorch/quantization_speedup/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..636c82a5b045d39281064b80c77cfa60489eb6af --- /dev/null +++ b/nni/compression/pytorch/quantization_speedup/__init__.py @@ -0,0 +1 @@ +from .integrated_tensorrt import CalibrateType, ModelSpeedupTensorRT \ No newline at end of file diff --git a/nni/compression/pytorch/quantization_speedup/backend.py b/nni/compression/pytorch/quantization_speedup/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..7d139d48f84c10557e3c83d2a858da707fe31227 --- /dev/null +++ b/nni/compression/pytorch/quantization_speedup/backend.py @@ -0,0 +1,51 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +class BaseModelSpeedup: + """ + Base speedup class for backend engine + """ + def __init__(self, model, config): + """ + Parameters + ---------- + model : pytorch model + The model to speed up by quantization. + config : dict + Config recording bit number and name of layers. + """ + self.model = model + self.config = config + + def inference(self, test_data): + """ + This function should be overrided by subclass to provide inference ability, + which should return output and inference time. + + Parameters + ---------- + test_data : numpy data + test data given to the inference engine + + Returns + ------- + numpy data + output data will be generated after inference + float + latency of such inference process + """ + raise NotImplementedError('Backend engine must overload inference()') + + def compress(self): + """ + This function should be overrided by subclass to build inference + engine which will be used to process input data + """ + raise NotImplementedError('Backend engine must overload compress()') + + def export_quantized_model(self, path): + """ + This function should be overrided by subclass to build inference + engine which will be used to process input data + """ + raise NotImplementedError('Backend engine must overload export_quantized_model()') \ No newline at end of file diff --git a/nni/compression/pytorch/quantization_speedup/calibrator.py b/nni/compression/pytorch/quantization_speedup/calibrator.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc49622f2e8cb1e818149ec06039bf9ab305c33 --- /dev/null +++ b/nni/compression/pytorch/quantization_speedup/calibrator.py @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import logging +import tensorrt as trt +import pycuda.driver as cuda + +logger = logging.getLogger(__name__) + +class Calibrator(trt.IInt8Calibrator): + def __init__(self, training_data, cache_file, batch_size=64, algorithm=trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2): + """ + Parameters + ---------- + training_data : numpy array + The data using to calibrate quantization model + cache_file : str + The path user want to store calibrate cache file + batch_size : int + The batch_size of calibrating process + algorithm : tensorrt.tensorrt.CalibrationAlgoType + The algorithms of calibrating contains LEGACY_CALIBRATION, + ENTROPY_CALIBRATION, ENTROPY_CALIBRATION_2, MINMAX_CALIBRATION. + Please refer to https://docs.nvidia.com/deeplearning/tensorrt/api/ + python_api/infer/Int8/Calibrator.html for detail + """ + trt.IInt8Calibrator.__init__(self) + + self.algorithm = algorithm + self.cache_file = cache_file + + self.data = training_data + self.batch_size = batch_size + self.current_index = 0 + + # Allocate enough memory for a whole batch. + self.device_input = cuda.mem_alloc(self.data[0].nbytes * self.batch_size) + + def get_algorithm(self): + return self.algorithm + + def get_batch_size(self): + return self.batch_size + + def get_batch(self, names): + """ + This function is used to define the way of feeding calibrating data each batch. + + Parameters + ---------- + names : str + The names of the network inputs for each object in the bindings array + + Returns + ------- + list + A list of device memory pointers set to the memory containing each network + input data, or an empty list if there are no more batches for calibration. + You can allocate these device buffers with pycuda, for example, and then + cast them to int to retrieve the pointer + """ + if self.current_index + self.batch_size > self.data.shape[0]: + return None + + current_batch = int(self.current_index / self.batch_size) + if current_batch % 10 == 0: + logger.info("Calibrating batch %d, containing %d images", current_batch, self.batch_size) + + batch = self.data[self.current_index:self.current_index + self.batch_size].ravel() + cuda.memcpy_htod(self.device_input, batch) + self.current_index += self.batch_size + memory_pointers = [self.device_input] + return memory_pointers + + def read_calibration_cache(self): + """ + If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None. + + Returns + ------- + cache object + A cache object which contains calibration parameters for quantization + """ + if os.path.exists(self.cache_file): + with open(self.cache_file, "rb") as f: + return f.read() + + def write_calibration_cache(self, cache): + """ + Write calibration cache to specific path. + + Parameters + ---------- + cache : str + The calibration cache to write + """ + with open(self.cache_file, "wb") as f: + f.write(cache) \ No newline at end of file diff --git a/nni/compression/pytorch/quantization_speedup/frontend_to_onnx.py b/nni/compression/pytorch/quantization_speedup/frontend_to_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..b6c323f2ebcd7e109348472c28ce3eceef56f318 --- /dev/null +++ b/nni/compression/pytorch/quantization_speedup/frontend_to_onnx.py @@ -0,0 +1,148 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import onnx +import onnx.numpy_helper +""" +The main function of this page is to convert pytorch model to onnx model. +Convertion from pytorch model to onnx model is primary so that a critical +problem is caused that Layer name of pytorch model fail to convert to onnx +layer name directly. To solve it, we wrap pytorch model in new wrapper which +multiply bits number and input before computation of each op. Only in this +way can onnx model get bits number of corresponded layer. +""" + +class LayernameModuleWrapper(torch.nn.Module): + def __init__(self, module, module_bits) -> None: + """ + Parameters + ---------- + module : torch.nn.Module + Layer module of pytorch model + module_bits : int + Bits width setting for module + """ + super().__init__() + self.module = module + self.module_bits = module_bits + + def forward(self, inputs): + inputs = inputs*self.module_bits + inputs = self.module(inputs) + return inputs + +def _setattr(model, name, module): + """ + Parameters + ---------- + model : pytorch model + The model to speed up by quantization + name : str + name of pytorch module + module : torch.nn.Module + Layer module of pytorch model + """ + name_list = name.split(".") + for name in name_list[:-1]: + model = getattr(model, name) + setattr(model, name_list[-1], module) + +def unwrapper(model_onnx, index2name, config): + """ + Fill onnx config and remove wrapper node in onnx + + Parameters + ---------- + model_onnx : onnx model + Onnx model which is converted from pytorch model + index2name : dict + Dictionary of layer index and name + config : dict + Config recording name of layers and calibration parameters + + Returns + ------- + onnx model + Onnx model which is converted from pytorch model + dict + The configuration of onnx model layers and calibration parameters + """ + # Support Gemm, Conv, Relu, Clip(Relu6) and Maxpool + support_op = ['Gemm', 'Conv', 'Relu', 'Clip', 'MaxP'] + idx = 0 + onnx_config = {} + while idx < len(model_onnx.graph.node): + nd = model_onnx.graph.node[idx] + if nd.name[0:4] in support_op and idx > 1: + # Grad constant node and multiply node + const_nd = model_onnx.graph.node[idx-2] + mul_nd = model_onnx.graph.node[idx-1] + # Get index number which is transferred by constant node + index = int(onnx.numpy_helper.to_array(const_nd.attribute[0].t)) + if index != -1: + name = index2name[index] + onnx_config[nd.name] = config[name] + nd.input[0] = mul_nd.input[0] + # Remove constant node and multiply node + model_onnx.graph.node.remove(const_nd) + model_onnx.graph.node.remove(mul_nd) + idx = idx-2 + idx = idx+1 + return model_onnx, onnx_config + +def torch_to_onnx(model, config, input_shape, model_path, input_names, output_names): + """ + Convert torch model to onnx model and get layer bits config of onnx model. + + Parameters + ---------- + model : pytorch model + The model to speed up by quantization + config : dict + Config recording bits number and name of layers + input_shape : tuple + The input shape of model, shall pass it to torch.onnx.export + model_path : str + The path user want to store onnx model which is converted from pytorch model + input_names : list + Input name of onnx model providing for torch.onnx.export to generate onnx model + output_name : list + Output name of onnx model providing for torch.onnx.export to generate onnx model + + Returns + ------- + onnx model + Onnx model which is converted from pytorch model + dict + The configuration of onnx model layers and calibration parameters + """ + # Support Gemm, Conv, Relu, Clip(Relu6) and MaxPool + support_op = [torch.nn.Conv2d, torch.nn.Linear, torch.nn.ReLU, torch.nn.ReLU6, torch.nn.MaxPool2d] + # Transfer bits number to onnx layer by using wrapper + index2name = {} + name2index = {} + if config is not None: + for i, name in enumerate(config.keys()): + index2name[i] = name + name2index[name] = i + for name, module in model.named_modules(): + if config is not None and name in config: + assert type(module) in support_op + wrapper_module = LayernameModuleWrapper(module, name2index[name]) + _setattr(model, name, wrapper_module) + elif type(module) in support_op: + wrapper_module = LayernameModuleWrapper(module, -1) + _setattr(model, name, wrapper_module) + # Convert torch model to onnx model and save it in model_path + dummy_input = torch.randn(input_shape) + model.to('cpu') + torch.onnx.export(model, dummy_input, model_path, verbose=False, input_names=input_names, output_names=output_names, export_params=True) + + # Load onnx model + model_onnx = onnx.load(model_path) + model_onnx, onnx_config = unwrapper(model_onnx, index2name, config) + onnx.save(model_onnx, model_path) + + onnx.checker.check_model(model_onnx) + return model_onnx, onnx_config \ No newline at end of file diff --git a/nni/compression/pytorch/quantization_speedup/integrated_tensorrt.py b/nni/compression/pytorch/quantization_speedup/integrated_tensorrt.py new file mode 100644 index 0000000000000000000000000000000000000000..b4a0ab4125b755a541e1a950827bd40938322b32 --- /dev/null +++ b/nni/compression/pytorch/quantization_speedup/integrated_tensorrt.py @@ -0,0 +1,414 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import time +import logging +import tensorrt as trt +import numpy as np +import torch + +from . import frontend_to_onnx as fonnx +from . import calibrator as calibrator +from . import trt_pycuda as common +from .backend import BaseModelSpeedup + +TRT8 = 8 +TRT7 = 7 +TRT_LOGGER = trt.Logger() +logger = logging.getLogger(__name__) + +class CalibrateType: + LEGACY = trt.CalibrationAlgoType.LEGACY_CALIBRATION + ENTROPY = trt.CalibrationAlgoType.ENTROPY_CALIBRATION + ENTROPY2 = trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2 + MINMAX = trt.CalibrationAlgoType.MINMAX_CALIBRATION + +Precision_Dict = { + 8: trt.int8, + 16: trt.float16, + 32: trt.float32 +} + +def valid_config(config=None): + """ + This function validates the bits setting configuration + """ + if config is None: + return + support_bits = [8, 16, 32] + for name in config.keys(): + if 'weight_bits' in config[name]: + w_bits = config[name]['weight_bits'] + assert w_bits in support_bits, "weight bits should be 8, 16, 32" + if 'output_bits' in config[name]: + a_bits = config[name]['output_bits'] + assert a_bits in support_bits, "output bits should be 8, 16, 32" + +def handle_gemm(network, layer_idx, config): + """ + This function handles special gemm operation due to layer numbers of gemm changed during pytorch->onnx model convertion. + + Parameters + ---------- + network : tensorrt.INetworkDefinition + Represents a TensorRT Network from which the Builder can build an Engine + layer_idx : int + layer index of gemm + config : dict + Config recording bits number and name of layers + """ + layer = network.get_layer(layer_idx) + pre_layer = network.get_layer(layer_idx-1) + next_layer = network.get_layer(layer_idx+1) + # if weight bits exists, set three layers' precision, + # input tensor range and the first two layers' output type + if 'weight_bits' in config[layer.name]: + assert 'tracked_min_input' in config[layer.name] + assert 'tracked_max_input' in config[layer.name] + w_bits = config[layer.name]['weight_bits'] + tracked_min_input = config[layer.name]['tracked_min_input'] + tracked_max_input = config[layer.name]['tracked_max_input'] + # set three layers the same precision + layer.precision = Precision_Dict[w_bits] + pre_layer.precision = Precision_Dict[w_bits] + next_layer.precision = Precision_Dict[w_bits] + # set the first two layers' output type + pre_layer.set_output_type(0, Precision_Dict[w_bits]) + layer.set_output_type(0, Precision_Dict[w_bits]) + pre_in_tensor = pre_layer.get_input(0) + in_tensor = layer.get_input(0) + next_in_tensor = next_layer.get_input(0) + # set three layers' input tensor range + pre_in_tensor.dynamic_range = (tracked_min_input, tracked_max_input) + in_tensor.dynamic_range = (tracked_min_input, tracked_max_input) + next_in_tensor.dynamic_range = (tracked_min_input, tracked_max_input) + + # if output bits exists, set the last layer's output type output tensor range + if 'output_bits' in config[layer.name]: + assert 'tracked_min_output' in config[layer.name] + assert 'tracked_max_output' in config[layer.name] + a_bits = config[layer.name]['output_bits'] + tracked_min_output = config[layer.name]['tracked_min_output'] + tracked_max_output = config[layer.name]['tracked_max_output'] + # set the last layer's output type + next_layer.set_output_type(0, Precision_Dict[a_bits]) + next_out_tensor = next_layer.get_output(0) + # set the last layer's output tensor range + next_out_tensor.dynamic_range = (tracked_min_output, tracked_max_output) + +def build_engine(model_file, config=None, extra_layer_bits=32, strict_datatype=False, calib=None): + """ + This function builds an engine from an onnx model with calibration process. + + Parameters + ---------- + model_file : str + The path of onnx model + config : dict + Config recording bits number and name of layers + extra_layer_bits : int + Other layers which are not in config will be quantized to corresponding bits number + strict_datatype : bool + Whether constrain layer bits to the number given in config or not. If true, all the layer + will be set to given bits strictly. Otherwise, these layers will be set automatically by + tensorrt + calib : numpy array + The data using to calibrate quantization model + + Returns + ------- + tensorrt.ICudaEngine + An ICudaEngine for executing inference on a built network + """ + with trt.Builder(TRT_LOGGER) as builder, builder.create_network(common.EXPLICIT_BATCH) as network, \ + trt.OnnxParser(network, TRT_LOGGER) as parser, builder.create_builder_config() as trt_config: + # Attention that, builder should be set to 1 because of the implementation of allocate_buffer + trt_version = int(trt.__version__[0]) + assert trt_version == TRT8 or trt_version == TRT7, "Version of TensorRT is too old, please \ + update TensorRT to version >= 7.0" + if trt_version == TRT7: + logger.warning("TensorRT7 is deprecated and may be removed in the following release.") + + builder.max_batch_size = 1 + if trt_version == TRT8: + trt_config.max_workspace_size = common.GiB(4) + else: + builder.max_workspace_size = common.GiB(4) + + if extra_layer_bits == 32 and config is None: + pass + elif extra_layer_bits == 16 and config is None: + if trt_version == TRT8: + trt_config.set_flag(trt.BuilderFlag.FP16) + else: + builder.fp16_mode = True + elif extra_layer_bits == 8 and config is None: + # entire model in 8bit mode + if trt_version == TRT8: + trt_config.set_flag(trt.BuilderFlag.INT8) + else: + builder.int8_mode = True + else: + if trt_version == TRT8: + trt_config.set_flag(trt.BuilderFlag.INT8) + trt_config.set_flag(trt.BuilderFlag.FP16) + if strict_datatype: + trt_config.set_flag(trt.BuilderFlag.STRICT_TYPES) + else: + builder.int8_mode = True + builder.fp16_mode = True + builder.strict_type_constraints = strict_datatype + + valid_config(config) + + # Parse onnx model + with open(model_file, 'rb') as model: + if not parser.parse(model.read()): + logger.error('ERROR: Fail to parse the ONNX file.') + for error in range(parser.num_errors): + logger.error(parser.get_error(error)) + return None + + if calib is not None: + if trt_version == TRT8: + trt_config.int8_calibrator = calib + else: + builder.int8_calibrator = calib + # This design may not be correct if output more than one + for i in range(network.num_layers): + if config is None: + break + layer = network.get_layer(i) + if layer.name in config: + w_bits = config[layer.name]['weight_bits'] + a_bits = config[layer.name]['output_bits'] + layer.precision = Precision_Dict[w_bits] + layer.set_output_type(0, Precision_Dict[a_bits]) + else: + # This implementation may be incorrect when output number > 1 + for i in range(network.num_layers): + if config is None: + # no low bits layer need to be set, keep original model + break + layer = network.get_layer(i) + if layer.name not in config: + continue + # layer numbers of gemm changed during pytorch->onnx model convertion, need special handle + if layer.name[0:4] == "Gemm": + handle_gemm(network, i, config) + continue + + # If weight_bits exists in config, set layer precision and layer's input tensor dynamic range. + if 'weight_bits' in config[layer.name]: + assert 'tracked_min_input' in config[layer.name] + assert 'tracked_max_input' in config[layer.name] + w_bits = config[layer.name]['weight_bits'] + tracked_min_input = config[layer.name]['tracked_min_input'] + tracked_max_input = config[layer.name]['tracked_max_input'] + layer.precision = Precision_Dict[w_bits] + in_tensor = layer.get_input(0) + in_tensor.dynamic_range = (tracked_min_input, tracked_max_input) + + # If output exists in config, set layer output type and layer's output tensor dynamic range. + if 'output_bits' in config[layer.name]: + assert 'tracked_min_output' in config[layer.name] + assert 'tracked_max_output' in config[layer.name] + a_bits = config[layer.name]['output_bits'] + tracked_min_output = config[layer.name]['tracked_min_output'] + tracked_max_output = config[layer.name]['tracked_max_output'] + layer.set_output_type(0, Precision_Dict[a_bits]) + out_tensor = layer.get_output(0) + out_tensor.dynamic_range = (tracked_min_output, tracked_max_output) + + # Build engine and do int8 calibration. + if trt_version == TRT8: + engine = builder.build_engine(network, trt_config) + else: + engine = builder.build_cuda_engine(network) + return engine + +class ModelSpeedupTensorRT(BaseModelSpeedup): + def __init__(self, model, input_shape, config=None, onnx_path="default_model.onnx", extra_layer_bits=32, strict_datatype=True, + calibrate_type=CalibrateType.ENTROPY2, calib_data_loader=None, calibration_cache = "calibration.cache", batchsize=1, + input_names=["actual_input_1"], output_names=["output1"]): + """ + Parameters + ---------- + model : pytorch model + The model to speed up by quantization. + input_shape : tuple + The input shape of model, shall pass it to torch.onnx.export. + config : dict + Config recording bits number and name of layers. + onnx_path : str + The path user want to store onnx model which is converted from pytorch model. + extra_layer_bits : int + Other layers which are not in config will be quantized to corresponding bits number. + strict_datatype : bool + Whether constrain layer bits to the number given in config or not. If true, all the layer + will be set to given bits strictly. Otherwise, these layers will be set automatically by + tensorrt. + calibrate_type : tensorrt.tensorrt.CalibrationAlgoType + The algorithm of calibrating. Please refer to https://docs.nvidia.com/deeplearning/ + tensorrt/api/python_api/infer/Int8/Calibrator.html for detail + calibrate_data : numpy array + The data using to calibrate quantization model + calibration_cache : str + The path user want to store calibrate cache file + batchsize : int + The batch size of calibration and inference + input_names : list + Input name of onnx model providing for torch.onnx.export to generate onnx model + output_name : list + Output name of onnx model providing for torch.onnx.export to generate onnx model + """ + super().__init__(model, config) + self.model = model + self.onnx_path = onnx_path + self.input_shape = input_shape + self.config = config + self.extra_layer_bits = extra_layer_bits + self.strict_datatype = strict_datatype + self.calibrate_type = calibrate_type + self.calib_data_loader = calib_data_loader + self.calibration_cache = calibration_cache + self.batchsize = batchsize + self.input_names = input_names + self.output_names = output_names + self.context = None + self.onnx_config = {} + + def compress(self): + """ + Get onnx config and build tensorrt engine. + """ + assert self.model is not None + assert self.onnx_path is not None + assert self.input_shape is not None + + # Convert pytorch model to onnx model and save onnx model in onnx_path + _, self.onnx_config = fonnx.torch_to_onnx(self.model, self.config, input_shape=self.input_shape, + model_path=self.onnx_path, input_names=self.input_names, output_names=self.output_names) + + if self.calib_data_loader is not None: + assert self.calibrate_type is not None + context = self._tensorrt_build_withcalib(self.onnx_path) + else: + context = self._tensorrt_build_withoutcalib(self.onnx_path) + self.context = context + + def _tensorrt_build_withcalib(self, onnx_path): + """ + Convert pytorch tensor to numpy darray + + Parameters + ---------- + onnx_path : str + The path of onnx model + + Returns + ------- + tensorrt.IExecutionContext + Context for executing inference using an ICudaEngine + """ + calib_data = None + if type(self.calib_data_loader) == torch.utils.data.dataloader.DataLoader: + calib_data_set = [] + for data, _ in self.calib_data_loader: + calib_data_set.append(data) + calib_data = np.concatenate(calib_data_set) + elif type(self.calib_data_loader) == torch.Tensor: + # trt need numpy as calibration data, only cpu data can convert to numpy directly + if self.calib_data_loader.device != torch.device("cpu"): + self.calib_data_loader = self.calib_data_loader.to("cpu") + calib_data = self.calib_data_loader.numpy() + else: + raise ValueError("Not support calibration datatype") + calib = calibrator.Calibrator(calib_data, self.calibration_cache, self.batchsize, self.calibrate_type) + + # build inference engine with calibration + engine = build_engine(onnx_path, self.onnx_config, self.extra_layer_bits, self.strict_datatype, calib) + return engine.create_execution_context() + + def _tensorrt_build_withoutcalib(self, onnx_path): + """ + Build inference engine without calibration + + Parameters + ---------- + onnx_path : str + The path of onnx model + + Returns + ------- + tensorrt.IExecutionContext + Context for executing inference using an ICudaEngine + """ + engine = build_engine(onnx_path, self.onnx_config, self.extra_layer_bits, self.strict_datatype) + return engine.create_execution_context() + + def inference(self, test_data): + """ + Do inference by tensorrt builded engine. + + Parameters + ---------- + test_data : pytorch tensor + Model input tensor + """ + # convert pytorch tensor to numpy darray + if test_data.device != torch.device("cpu"): + test_data = test_data.to("cpu") + test_data = test_data.numpy() + # Numpy dtype should be float32 + assert test_data.dtype == np.float32 + elapsed_time = 0 + inputs, outputs, bindings, stream = common.allocate_buffers(self.context.engine) + result = [] + for start_idx in range(0, test_data.shape[0], self.batchsize): + # If the number of images in the test set is not divisible by the batch size, the last batch will be smaller. + # This logic is used for handling that case. + end_idx = min(start_idx + self.batchsize, test_data.shape[0]) + effective_batch_size = end_idx - start_idx + + # Do inference for every batch. + inputs[0].host = test_data[start_idx:start_idx + effective_batch_size] + t1 = time.time() + [output] = common.do_inference_v2(self.context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream) + elapsed_time += time.time() - t1 + shape = output.shape[0] + output = output[0:int(shape * effective_batch_size / self.batchsize)].reshape(effective_batch_size, -1) + result.append(output.copy()) + # Use argmax to get predictions and then check accuracy + # convert numpy darray to pytorch tensor + result = torch.Tensor(np.concatenate(result)) + return result, elapsed_time + + def export_quantized_model(self, path): + """ + Export TensorRT quantized model engine which only can be loaded by TensorRT deserialize API. + + Parameters + ---------- + path : str + The path of export model + """ + assert path is not None + with open(path, "wb") as f: + f.write(self.context.engine.serialize()) + logger.info("TensorRT engine has been saved to %s", path) + + def load_quantized_model(self, path): + """ + Load TensorRT quantized model engine from specific path. + + Parameters + ---------- + path : str + The path of export model + """ + assert path is not None + with open(path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime: + engine = runtime.deserialize_cuda_engine(f.read()) + self.context = engine.create_execution_context() + logger.info("Load TensorRT engine from %s successfully.", path) \ No newline at end of file diff --git a/nni/compression/pytorch/quantization_speedup/trt_pycuda.py b/nni/compression/pytorch/quantization_speedup/trt_pycuda.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f8e1f4c6792c4cded99d2aaad3dfeb47a57686 --- /dev/null +++ b/nni/compression/pytorch/quantization_speedup/trt_pycuda.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import pycuda.driver as cuda +import pycuda.autoinit # pylint: disable=unused-import +import tensorrt as trt + +EXPLICIT_BATCH = 1 + +def GiB(val): + return val * 1 << 30 + +# Simple helper data class that's a little nicer to use than a 2-tuple. +class HostDeviceMem(object): + def __init__(self, host_mem, device_mem): + """ + This function builds an engine from an onnx model with calibration process. + + Parameters + ---------- + host_mem : host memory + Memory buffers of host + device_mem : device memory + Memory buffers of device + """ + self.host = host_mem + self.device = device_mem + + def __str__(self): + return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device) + + def __repr__(self): + return self.__str__() + +def allocate_buffers(engine): + """ + Allocates all buffers required for an engine, i.e. host/device inputs/outputs. + + Parameters + ---------- + engine : tensorrt.ICudaEngine + An ICudaEngine for executing inference on a built network + + Returns + ------- + list + All input HostDeviceMem of an engine + list + All output HostDeviceMem of an engine + GPU bindings + Device bindings + GPU stream + A stream is a sequence of commands (possibly issued by different host threads) that execute in order + """ + inputs = [] + outputs = [] + bindings = [] + stream = cuda.Stream() + for binding in engine: + size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size + dtype = trt.nptype(engine.get_binding_dtype(binding)) + # Allocate host and device buffers + host_mem = cuda.pagelocked_empty(size, dtype) + device_mem = cuda.mem_alloc(host_mem.nbytes) + # Append the device buffer to device bindings. + bindings.append(int(device_mem)) + # Append to the appropriate list. + if engine.binding_is_input(binding): + inputs.append(HostDeviceMem(host_mem, device_mem)) + else: + outputs.append(HostDeviceMem(host_mem, device_mem)) + return inputs, outputs, bindings, stream + +# This function is generalized for multiple inputs/outputs for full dimension networks. +# inputs and outputs are expected to be lists of HostDeviceMem objects. +def do_inference_v2(context, bindings, inputs, outputs, stream): + # Transfer input data to the GPU. + [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs] + # Run inference. + context.execute_async_v2(bindings=bindings, stream_handle=stream.handle) + # Transfer predictions back from the GPU. + [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs] + # Synchronize the stream + stream.synchronize() + # Return only the host outputs. + return [out.host for out in outputs] \ No newline at end of file diff --git a/nni/compression/pytorch/speedup/__init__.py b/nni/compression/pytorch/speedup/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cef8ebd76c0d99b1810512afe28723b5eeccb9fc --- /dev/null +++ b/nni/compression/pytorch/speedup/__init__.py @@ -0,0 +1 @@ +from .compressor import ModelSpeedup \ No newline at end of file diff --git a/nni/compression/pytorch/speedup/compress_modules.py b/nni/compression/pytorch/speedup/compress_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..64cb0abfbb7a1d2e6b5e49db19cdf6e4c4a86ec9 --- /dev/null +++ b/nni/compression/pytorch/speedup/compress_modules.py @@ -0,0 +1,543 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +import torch.nn as nn +from .error_code import EmptyLayerError, ShapeMisMatchError, InputsNumberError, OutputTypeError, UnBalancedGroupError + +_logger = logging.getLogger(__name__) + +replace_module = { + 'BatchNorm2d': lambda module, masks: replace_batchnorm2d(module, masks), + 'BatchNorm1d': lambda module, masks: replace_batchnorm1d(module, masks), + 'Conv2d': lambda module, masks: replace_conv2d(module, masks), + 'Linear': lambda module, masks: replace_linear(module, masks), + 'MaxPool2d': lambda module, masks: no_replace(module, masks), + 'AvgPool2d': lambda module, masks: no_replace(module, masks), + 'AdaptiveAvgPool2d': lambda module, masks: no_replace(module, masks), + 'ReLU': lambda module, masks: no_replace(module, masks), + 'ReLU6': lambda module, masks: no_replace(module, masks), + 'LeakyReLU': lambda module, masks: no_replace(module, masks), + 'ELU': lambda module, masks: no_replace(module, masks), + 'Hardtanh': lambda module, masks: no_replace(module, masks), + 'Hardsigmoid': lambda module, masks: no_replace(module, masks), + 'LogSigmoid': lambda module, masks: no_replace(module, masks), + 'PReLU': lambda module, masks: replace_prelu(module, masks), + 'RReLU': lambda module, masks: no_replace(module, masks), + 'SELU': lambda module, masks: no_replace(module, masks), + 'CELU': lambda module, masks: no_replace(module, masks), + 'GELU': lambda module, masks: no_replace(module, masks), + 'Sigmoid': lambda module, masks: no_replace(module, masks), + 'SiLU': lambda module, masks: no_replace(module, masks), + 'Mish': lambda module, masks: no_replace(module, masks), + 'Tanh': lambda module, masks: no_replace(module, masks), + 'Softplus': lambda module, masks: no_replace(module, masks), + 'Softshrink': lambda module, masks: no_replace(module, masks), + 'Softmax': lambda module, masks: no_replace(module, masks), + 'Tanhshrink': lambda module, masks: no_replace(module, masks), + 'Dropout': lambda module, masks: no_replace(module, masks), + 'Dropout2d': lambda module, masks: no_replace(module, masks), + 'Dropout3d': lambda module, masks: no_replace(module, masks), + 'Upsample': lambda module, masks: no_replace(module, masks), + 'LayerNorm': lambda module, masks: replace_layernorm(module, masks), + 'ConvTranspose2d': lambda module, masks: replace_convtranspose2d(module, masks) +} + + +def convert_to_coarse_mask(t_mask, dim): + """ + Convert the mask tensor to the coarse-grained mask tensor. + Parameters + --------- + t_mask: torch.Tensor + The tensor only have 1s and 0s, 0 indicates this value is masked + and 1 indicates the corresponding value is not masked. + dim: int + Try to reduce the mask tensor on this dimension. + + Returns + ------- + indexes: torch.Tensor + The indexes of the sparsity that can be structurally removed. + remained_indexes: torch.Tensor + The indexes of values that need to be remained. + """ + assert isinstance(t_mask, torch.Tensor) + shape = list(t_mask.size()) + n_dims = len(shape) + dim_list = list(range(n_dims)) + # try to reduce the mask from the dim-th dimension + dim_list.remove(dim) + + t_merged = torch.sum(t_mask, dim_list) + assert t_merged.size(0) == shape[dim] + all_pruned = t_merged == 0 + need_remain = t_merged != 0 + # return the indexes of the sparsity that can be removed + indexes = torch.nonzero(all_pruned, as_tuple=True)[0] + remained_indexes = torch.nonzero(need_remain, as_tuple=True)[0] + return indexes, remained_indexes + + +def no_replace(module, masks): + """ + No need to replace + """ + _logger.debug("no need to replace") + return module + + +def replace_prelu(prelu, masks): + """ + Parameters + ---------- + module : torch.nn.PReLU + The prelu module to be replace + masks : tuple of masks + The input/output/weight masks of the target module + + Returns + ------- + torch.nn.PReLU + The new prelu module + """ + in_masks, output_mask, weight_mask = masks + if len(in_masks) != 1: + raise InputsNumberError() + if not isinstance(output_mask, torch.Tensor): + raise OutputTypeError(type(output_mask), torch.Tensor) + + in_mask = in_masks[0] + weight_mask = weight_mask['weight'] + if weight_mask.size(0) == 1: + return prelu + pruned_in, remained_in = convert_to_coarse_mask(in_mask, 1) + pruned_out, remained_out = convert_to_coarse_mask(output_mask, 1) + n_remained_in = weight_mask.size(0) - pruned_in.size(0) + n_remained_out = weight_mask.size(0) - pruned_out.size(0) + remained_in, remained_out = remained_in.to( + prelu.weight.device), remained_out.to(prelu.weight.device) + if n_remained_in != n_remained_out: + raise ShapeMisMatchError() + + if n_remained_in == 0: + return torch.nn.Identity() + new_prelu = torch.nn.PReLU(n_remained_in) + new_prelu.weight.data = torch.index_select( + prelu.weight.data, 0, remained_in) + return new_prelu + + +def replace_linear(linear, masks): + """ + This function will replace the original linear according to + the infered masks. This function support the fine-grained and + coarse-grained sparsity. In the fine-grained scenario, this function + will remove the whole column/row that happen to be totally covered by + the masks. + + Parameters + ---------- + linear : torch.nn.Linear + The linear module to be replace + masks : Tuple of the input masks, output masks and weight masks + Tuple of the masks, for example + ([input_m1, input_m2], [output_m], {'weight':weight_m}) + + Returns + ------- + torch.nn.Linear + The new linear module + """ + in_masks, output_mask, weight_mask = masks + assert isinstance(linear, nn.Linear) + if len(in_masks) != 1: + raise InputsNumberError() + if not isinstance(output_mask, torch.Tensor): + raise OutputTypeError(type(output_mask), torch.Tensor) + + in_mask = in_masks[0] + + weight_mask = weight_mask['weight'] + # N C K + pruned_in, remained_in = convert_to_coarse_mask(in_mask, 1) + pruned_out, remained_out = convert_to_coarse_mask(output_mask, 1) + n_remained_in = weight_mask.size(1) - pruned_in.size(0) + n_remained_out = weight_mask.size(0) - pruned_out.size(0) + remained_in, remained_out = remained_in.to( + linear.weight.device), remained_out.to(linear.weight.device) + _logger.info("replace linear with new in_features: %d, out_features: %d", + n_remained_in, n_remained_out) + need_bias = False + if linear.bias is not None: + need_bias = True + new_linear = torch.nn.Linear(in_features=n_remained_in, + out_features=n_remained_out, + bias=need_bias) + new_linear.to(linear.weight.device) + # Copy the remained weight from the original module + with torch.no_grad(): + tmp_weight_data = torch.index_select( + linear.weight.data, 0, remained_out) + new_linear.weight.data = torch.index_select( + tmp_weight_data, 1, remained_in) + + if linear.bias is not None: + new_linear.bias.data = torch.index_select( + linear.bias.data, 0, remained_out) + + return new_linear + + +def replace_batchnorm1d(norm, masks): + """ + Parameters + ---------- + norm : torch.nn.BatchNorm1d + The batchnorm module to be replace + masks : Tuple of the input masks, output masks and weight masks + Tuple of the masks, for example + ([input_m1, input_m2], [output_m], {'weight':weight_m}) + + Returns + ------- + torch.nn.BatchNorm1d + The new batchnorm module + """ + in_masks, output_mask, _ = masks + assert isinstance(norm, nn.BatchNorm1d) + in_mask = in_masks[0] + + # N, C, H, W + _, remained_in = convert_to_coarse_mask(in_mask, 1) + _, remained_out = convert_to_coarse_mask(output_mask, 1) + if remained_in.size(0) != remained_out.size(0): + raise ShapeMisMatchError() + + num_features = remained_in.size(0) + _logger.info("replace batchnorm1d with num_features: %d", num_features) + new_norm = torch.nn.BatchNorm1d(num_features=num_features, + eps=norm.eps, + momentum=norm.momentum, + affine=norm.affine, + track_running_stats=norm.track_running_stats) + # assign weights + if norm.affine: + new_norm.weight.data = torch.index_select(norm.weight.data, 0, remained_in) + new_norm.bias.data = torch.index_select(norm.bias.data, 0, remained_in) + + new_norm.running_mean.data = torch.index_select( + norm.running_mean.data, 0, remained_in) + new_norm.running_var.data = torch.index_select( + norm.running_var.data, 0, remained_in) + return new_norm + + +def replace_batchnorm2d(norm, masks): + """ + Parameters + ---------- + norm : torch.nn.BatchNorm2d + The batchnorm module to be replace + masks : Tuple of the input masks, output masks and weight masks + Tuple of the masks, for example + ([input_m1, input_m2], [output_m], {'weight':weight_m}) + + Returns + ------- + torch.nn.BatchNorm2d + The new batchnorm module + """ + in_masks, output_mask, _ = masks + assert isinstance(norm, nn.BatchNorm2d) + in_mask = in_masks[0] + + # N, C, H, W + _, remained_in = convert_to_coarse_mask(in_mask, 1) + _, remained_out = convert_to_coarse_mask(output_mask, 1) + if remained_in.size(0) != remained_out.size(0): + raise ShapeMisMatchError() + + num_features = remained_in.size(0) + _logger.info("replace batchnorm2d with num_features: %d", num_features) + new_norm = torch.nn.BatchNorm2d(num_features=num_features, + eps=norm.eps, + momentum=norm.momentum, + affine=norm.affine, + track_running_stats=norm.track_running_stats) + # assign weights + if norm.affine: + new_norm.weight.data = torch.index_select(norm.weight.data, 0, remained_in) + new_norm.bias.data = torch.index_select(norm.bias.data, 0, remained_in) + + new_norm.running_mean.data = torch.index_select( + norm.running_mean.data, 0, remained_in) + new_norm.running_var.data = torch.index_select( + norm.running_var.data, 0, remained_in) + return new_norm + + +def replace_conv2d(conv, masks): + """ + Replace the original conv with a new one according to the infered + masks, the function support the fine-grained sparsity and coarse-grained + sparsity. In the fine-grained scenario, this replace function will replace + the filters that happen to be totally coverd by the fine-grained sparsity. + + Parameters + ---------- + conv : torch.nn.Conv2d + The conv2d module to be replaced + masks : Tuple of the input masks, output masks and weight masks + Tuple of the masks, for example + ([input_m1, input_m2], [output_m], {'weight':weight_m}) + + Returns + ------- + torch.nn.Conv2d + The new conv2d module + """ + in_masks, output_mask, weight_masks = masks + assert isinstance(conv, nn.Conv2d) + # the conv layer should only have one input tensor + if len(in_masks) != 1: + raise InputsNumberError() + + in_mask = in_masks[0] + + weight_mask = weight_masks['weight'] + pruned_in, remained_in = convert_to_coarse_mask(in_mask, 1) + pruned_out, remained_out = convert_to_coarse_mask(output_mask, 1) + + n_remained_in = weight_mask.size(1) * conv.groups - pruned_in.size(0) + n_remained_out = weight_mask.size(0) - pruned_out.size(0) + + if n_remained_in != remained_in.size(0) or n_remained_out != remained_out.size(0): + raise ShapeMisMatchError() + + k_size1, k_size2 = conv.kernel_size + # Note: We should resolve the group dependency of the conv layers before + # run into here. + # check if the mask tensor meets the group dependency and calculate the + # new number of the groups after pruning + # the original step size of the input channel for each group + ori_inchannel_step = int(conv.in_channels/conv.groups) + # the original step size of the output channel for each group + ori_outchannel_step = int(conv.out_channels/conv.groups) + # calculate the new_in_channel_step and new_outchannel_step first + new_inchannel_step = new_outchannel_step = None + for groupid in range(conv.groups): + in_start = groupid * ori_inchannel_step + in_end = in_start + ori_inchannel_step + out_start = groupid * ori_outchannel_step + out_end = out_start + ori_outchannel_step + current_input_index = list( + filter(lambda x: in_start <= x and x < in_end, remained_in.tolist())) + current_output_index = list( + filter(lambda x: out_start <= x and x < out_end, remained_out.tolist())) + # remap the global index to the group index + if len(current_input_index) == 0: + # if the whole group are pruned + continue + else: + + new_inchannel_step = len(current_input_index) + new_outchannel_step = len(current_output_index) + break + tmp_weight = torch.ones( + n_remained_out, new_inchannel_step, k_size1, k_size2) + tmp_weight = tmp_weight.to(conv.weight.device) + if new_inchannel_step == 0 or new_outchannel_step == 0: + raise EmptyLayerError() + if n_remained_in % new_inchannel_step != 0 or n_remained_out % new_outchannel_step != 0: + raise UnBalancedGroupError() + + new_groups = 0 + for groupid in range(conv.groups): + in_start = groupid * ori_inchannel_step + in_end = in_start + ori_inchannel_step + out_start = groupid * ori_outchannel_step + out_end = out_start + ori_outchannel_step + current_input_index = list( + filter(lambda x: in_start <= x and x < in_end, remained_in.tolist())) + current_output_index = list( + filter(lambda x: out_start <= x and x < out_end, remained_out.tolist())) + # remap the global index to the group index + current_input_index = [x-in_start for x in current_input_index] + if len(current_input_index) == 0: + # if the whole group are pruned + assert len(current_output_index) == 0 + continue + # check if the number of remained channel of each group are the same + if len(current_input_index) != new_inchannel_step or len(current_output_index) != new_outchannel_step: + raise UnBalancedGroupError() + + # copy the weight into tmp_weight + new_out_start = new_outchannel_step * new_groups + new_out_end = new_out_start + new_outchannel_step + tmp_weight[new_out_start:new_out_end] = torch.index_select( + conv.weight[current_output_index], 1, torch.as_tensor(current_input_index, dtype=torch.long).to(conv.weight.device)) + new_groups += 1 + + _logger.debug("replace conv2d with in_channels: %d, out_channels: %d", + n_remained_in, n_remained_out) + + # need_bias is a flag that indicates that if a conv layer need + # bias, if the original conv doesn't have a bias and there is + # no constant need to be folded into the bias, the need_bias is False. + need_bias = conv.bias is not None + new_conv = torch.nn.Conv2d(in_channels=n_remained_in, + out_channels=n_remained_out, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=new_groups, + bias=need_bias, + padding_mode=conv.padding_mode) + + new_conv.to(conv.weight.device) + new_conv.weight.copy_(tmp_weight) + + # copy the bias data + if conv.bias is not None: + new_conv.bias.data.copy_(torch.index_select( + conv.bias.data, 0, remained_out)) + + return new_conv + + +def replace_convtranspose2d(convtrans, masks): + """ + We need anothor replace function for + convtranspose2d, because the layout of + the weight is different from traditional + conv layers. The layout of the weight is [N_in, N_out, ksize_1, ksize_2] + Parameters + ---------- + convtrans : torch.nn.ConvTranspose2d + The conv2d module to be replaced + masks : Tuple of the input masks, output masks and weight masks + Tuple of the masks, for example + ([input_m1, input_m2], [output_m], {'weight':weight_m}) + Returns + ------- + torch.nn.ConvTranspose2d + The new conv2d module + """ + in_masks, output_mask, weight_masks = masks + assert isinstance(convtrans, torch.nn.ConvTranspose2d) + if len(in_masks) != 1: + raise InputsNumberError() + in_mask = in_masks[0] + + weight_mask = weight_masks['weight'] + pruned_in, remained_in = convert_to_coarse_mask(in_mask, 1) + pruned_out, remained_out = convert_to_coarse_mask(output_mask, 1) + # ConvTranspose2d has the weight shape of [N_in, N_out/groups, k1, k2] + n_remained_in = weight_mask.size(0) - pruned_in.size(0) + n_remained_out = weight_mask.size( + 1) * convtrans.groups - pruned_out.size(0) + if n_remained_in != remained_in.size(0) or n_remained_out != remained_out.size(0): + raise ShapeMisMatchError() + + k_size1, k_size2 = convtrans.kernel_size + # Note: we should resolve the group dependency of the convtrans layers before + # run into this function + ori_inchannel_step = int(convtrans.in_channels/convtrans.groups) + ori_outchannel_step = int(convtrans.out_channels/convtrans.groups) + new_inchannel_step = new_outchannel_step = None + for groupid in range(convtrans.groups): + in_start = groupid * ori_inchannel_step + in_end = in_start + ori_inchannel_step + out_start = groupid * ori_outchannel_step + out_end = out_start + ori_outchannel_step + current_input_index = list( + filter(lambda x: in_start <= x and x < in_end, remained_in.tolist())) + current_output_index = list( + filter(lambda x: out_start <= x and x < out_end, remained_out.tolist())) + if len(current_input_index) == 0: + # if the whole group are pruned + continue + else: + new_inchannel_step = len(current_input_index) + new_outchannel_step = len(current_output_index) + break + tmp_weight = torch.ones( + n_remained_in, new_outchannel_step, k_size1, k_size2) + tmp_weight = tmp_weight.to(convtrans.weight.device) + + if new_inchannel_step == 0 or new_outchannel_step == 0: + raise EmptyLayerError() + if n_remained_in % new_inchannel_step != 0 or n_remained_out % new_outchannel_step != 0: + raise UnBalancedGroupError() + + new_groups = 0 + for groupid in range(convtrans.groups): + # copy the weights of this group + in_start = groupid * ori_inchannel_step + in_end = in_start + ori_inchannel_step + out_start = groupid * ori_outchannel_step + out_end = out_start + ori_outchannel_step + current_input_index = list( + filter(lambda x: in_start <= x and x < in_end, remained_in.tolist())) + current_output_index = list( + filter(lambda x: out_start <= x and x < out_end, remained_out.tolist())) + # remap the global index to the group index + # in the convtranspose layer, the groups are on + # the output channel dimension + current_output_index = [x-out_start for x in current_output_index] + if len(current_input_index) == 0: + # if the whole group are pruned + assert len(current_output_index) == 0 + continue + # check if the number of remained channel of each group are the same + if len(current_input_index) != new_inchannel_step or len(current_output_index) != new_outchannel_step: + raise UnBalancedGroupError() + + # copy the weight into tmp_weight + new_in_start = new_inchannel_step * new_groups + new_in_end = new_in_start + new_inchannel_step + tmp_weight[new_in_start:new_in_end] = torch.index_select( + convtrans.weight[current_input_index], 1, torch.as_tensor(current_output_index, dtype=torch.long).to(convtrans.weight.device)) + new_groups += 1 + + _logger.debug('Replace convtranspose2d with in_channels:%d out_channels:%d', + n_remained_in, n_remained_out) + new_convtrans = torch.nn.ConvTranspose2d(in_channels=n_remained_in, + out_channels=n_remained_out, + kernel_size=convtrans.kernel_size, + stride=convtrans.stride, + padding=convtrans.padding, + dilation=convtrans.dilation, + groups=new_groups, + bias=convtrans.bias is not None, + padding_mode=convtrans.padding_mode) + new_convtrans.to(convtrans.weight.device) + new_convtrans.weight.copy_(tmp_weight) + if convtrans.bias is not None: + if output_mask is not None: + new_convtrans.bias.data[:] = torch.index_select( + convtrans.bias.data, 0, remained_out) + else: + new_convtrans.bias.data.copy_(convtrans.bias.data) + return new_convtrans + + +def replace_layernorm(layernorm, masks): + in_masks, _, _ = masks + assert isinstance(layernorm, nn.LayerNorm) + if len(in_masks) != 1: + raise InputsNumberError() + in_mask = in_masks[0] + dim_n = len(in_mask.size()) + new_shape = [] + for i in range(1, dim_n): + sum_dims = list(range(0, dim_n)) + sum_dims.remove(i) + reduced = torch.sum(in_mask, sum_dims) + n_remained = torch.sum(reduced > 0) + new_shape.append(n_remained) + + return nn.LayerNorm(tuple(new_shape), layernorm.eps, layernorm.elementwise_affine) diff --git a/nni/compression/pytorch/speedup/compressor.py b/nni/compression/pytorch/speedup/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..41b5c38cd15215702b9271f9976a37106ee1bf42 --- /dev/null +++ b/nni/compression/pytorch/speedup/compressor.py @@ -0,0 +1,518 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import logging +from pathlib import Path +import queue + +import torch +import torch.nn as nn + +from nni.common.graph_utils import build_module_graph +from nni.compression.pytorch.utils.mask_conflict import fix_mask_conflict +from nni.compression.pytorch.utils.utils import get_module_by_name +from .compress_modules import replace_module +from .infer_mask import AutoMaskInference +from .jit_translate import jit_to_python_function +from ..utils import rand_like_with_shape + + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +class ModelSpeedup: + """ + This class is to speedup the model with provided weight mask. + + Parameters + ---------- + model : pytorch model + The model user wants to speed up + dummy_input : pytorch tensor, tuple of tensor, list of tensor + Note: The first dimension of the dummy_input should be the batchsize. + The dummy input for ```jit.trace```, users should put it on the right + device. + masks_file : str/dict + The path of user provided mask file, or the mask object + map_location : str + the device on which masks are placed, same to map_location in ```torch.load``` + batch_dim : int + the index of batch dimension in the dummy_input + confidence: the confidence coefficient of the sparsity inference. This value is + actually used as the batchsize of the dummy_input. + """ + + def __init__(self, model, dummy_input, masks_file, map_location=None, + batch_dim=0, confidence=8): + assert confidence > 1 + # The auto inference will change the values of the parameters in the model + # so we need make a copy before the mask inference + self.ori_state_dict = copy.deepcopy(model.state_dict()) + self.bound_model = model + self.inferred_masks = dict() # key: module_name, value: ModuleMasks + self.batch_dim = batch_dim + self.dummy_input, self.device = self._random_model_input(dummy_input, confidence, batch_dim) + self.torch_graph = build_module_graph(model, self.dummy_input) + # dict object to save the auto inferences objects of the submodules + self.auto_inferences = {} + # the index dict to find the corresponding torch._C.Value object + # according to the debug name + # we need the dummy_input to infer the mask automaticlly, so we save + # the indexes from tensor's debugname to the torch._C.Value object. + self.debugname_to_value = {} + # load the mask tensor to the same device with the dummy_input + # self.masks save the mask tensors pruned by the user and the infered + # masks of the others modules + if isinstance(masks_file, (str, Path)) and Path(masks_file).exists(): + self.masks = torch.load( + masks_file, map_location if map_location is not None else str(self.device)) + elif isinstance(masks_file, dict): + self.masks = masks_file + else: + raise Exception('Please provide the mask or the path of the mask file') + self.constant = {} + # self.internal_result save the internal output of the submodules + self.internal_result = {} + + def _random_model_input(self, dummy_input, confidence, batch_dim): + """ + Get the new random dummy input accordint to the original dummy_input + and confidence, batch_dim. + + Parameters + ---------- + dummy_input: Tensor or list/dict of Tensors + The dummy_input given by the user. + confidence: int + The new batch size of the generated dummy_input. + batch_dim: int + The index of the batch dimension. + + Returns + ------ + new_dummy_input: Tensor or list/dict of Tensors + The generated dummy_input for mask inference. + device: torch.device + The device of the generated dummy_inputs + """ + input_errmsg = 'Only support the tensor, list/tuple/dict of tensors as input' + # Some model may use list of tensors as input, for example transformers + new_dummy_input, device = None, None + if isinstance(dummy_input, torch.Tensor): + input_shape = list(dummy_input.size()) + # set the batchsize to the confidence ratio + input_shape[batch_dim] = confidence + new_dummy_input = rand_like_with_shape(input_shape, dummy_input) + device = dummy_input.device + elif isinstance(dummy_input, (tuple, list)): + # else if the dummy input is list/tuple + new_dummy_input = [] + old_batchsize = dummy_input[0].size(0) + device = dummy_input[0].device + for _, t_input in enumerate(dummy_input): + assert isinstance(t_input, torch.Tensor), input_errmsg + assert t_input.size(0) == old_batchsize, 'The first dimension should be batchsize\ + and the batchsize of all inputs should be the same!' + input_shape = list(t_input.size()) + input_shape[batch_dim] = confidence + # rand_func = torch.randint if t_input.dtype + new_dummy_input.append( + rand_like_with_shape(input_shape, t_input)) + elif isinstance(dummy_input, dict): + new_dummy_input = {} + tmp_key = list(dummy_input.keys())[0] + old_batchsize = dummy_input[tmp_key].size(0) + device = dummy_input[tmp_key].device + for in_name, t_input in dummy_input.items(): + assert isinstance(t_input, torch.Tensor), input_errmsg + assert old_batchsize == t_input.size(0), 'The first dimension should be batchsize\ + and the batchsize of all inputs should be the same!' + input_shape = list(t_input.size()) + input_shape[batch_dim] = confidence + new_dummy_input[in_name] = rand_like_with_shape( + input_shape, t_input) + else: + raise TypeError(input_errmsg) + return new_dummy_input, device + + def _prepare_dummy_input(self, node): + """ + Prepare the dummy_input for the auto mask inference. + + Parameters + ---------- + node: NodePyGroup + + Returns + ------- + dummy_input: list + List of tensors that will be used as input for the target node. + debugnames: list of strs + Debugnames of the dummy_inputs. + """ + _logger.debug('Prepare auto mask inference for node: %s', + node.unique_name) + + # prepare the inputs and outputs mask for this node, + # if there is already a mask in self.masks, then use + # the original mask tensor, else create a new one. + inputs_name = node.inputs + # build the dummy_input, in_masks the target node + dummy_input = [] + debugnames = [] + for _input in inputs_name: + if _input not in self.internal_result: + # if the input debug name is not in self.internal_result, + # then this node isn't a output tensor of any predecessor + # nodes. This node is a attribute of the submodule, such as + # weight or bias, etc. We will skip these tensors. + # If we don't want this specific judgement here, we can merge + # the `prim::GetAttr` node of the weight/bias tensor into the key + # node, such as `conv`. + # This is caused by the `meage_module_node` function in the + # _graph_utils.py, because it doesn't merge the prim::GetAttr + # node into the key node. In current version of _graph_utils.py, + # we will only merge the nodes that have same scope name, however, + # the scope name of the correponding prim::GetAttr node of `weight` tensor + # is None. + continue + # The detach operation here is for the in-place operation. We cannot + # directly can the backward on the output tensor of an in-place operator. + dummy_input.append(self.internal_result[_input].detach()) + debugnames.append(_input) + + return dummy_input, debugnames + + def update_direct_sparsity(self, node): + """ + Update the direct sparsity for the target node. Here the direct sparsity + means that the sparsity in the output tensor that caused by the sparsity + in the input tensors/weight tensors. + """ + # this name is consistent with the name returned by named_modules() + module_name = node.name + _logger.info('Update mask for %s', module_name) + unique_name = node.unique_name + dummy_input, input_debugname = self._prepare_dummy_input(node) + # get the input mask from self.masks + # Note: the input mask of the successor nodes are + # already created by the predecessor node + in_masks = [self.masks[debugname] for debugname in input_debugname] + in_constants = [self.constant[debugname] + for debugname in input_debugname] + if node.type == 'func': + # we cannot get the runable function directly from the jit traced + # graph, so we translate it back to python function, Note: the function + # is appliable to both cpu/gpu devices, the output tensors will be on the + # same device of the input tensors + func = jit_to_python_function(node, self) + if func is None: + # no need to infer the sparsity for this node + self.auto_inferences[unique_name] = None + return + # function doesn't have weights + _auto_infer = AutoMaskInference( + func, dummy_input, in_masks, in_constants=in_constants, batch_dim=self.batch_dim) + else: + weight_mask = None + if module_name in self.masks: + weight_mask = self.masks[module_name] + _, module = get_module_by_name(self.bound_model, module_name) + _auto_infer = AutoMaskInference( + module, dummy_input, in_masks, weight_mask, in_constants=in_constants, + state_dict=copy.deepcopy(module.state_dict()), batch_dim=self.batch_dim) + self.auto_inferences[unique_name] = _auto_infer + _auto_infer.name = node.unique_name + + _auto_infer.update_direct_sparsity() + # also save the input debug names into the auto_infer + _auto_infer.input_debugname = input_debugname + # update the mask tensor and the internal output of the submodules + # after manually unpack the tuple/list of tensors, the number of the outputs + # of each node should always be one(Except for the TupleUnpack node at the end + # of the whole model) + assert len( + node.outputs) == 1, 'The number of the output should be one after the Tuple unpacked manually' + + out_debugname = node.outputs[0] + # update the output mask into self.masks + self.masks[out_debugname] = _auto_infer.output_mask + self.constant[out_debugname] = _auto_infer.out_constant + # update the output result into self.internal_result, so that + # the successor nodes can take these output tensors as inputs. + self.internal_result[out_debugname] = _auto_infer.output + # update the parameter mask of the node + + self.masks[module_name] = _auto_infer.weight_mask + + def update_indirect_sparsity(self, node): + """ + This function will update the indirect sparsity. To explain what's + indirect sparsity, for example, there is two tensors TA and TB, and + we perform the calculation: TC = TA x TB in which TC is also a tensor. + Once some values in TA are masked to zeros, then the corresponding + positions in TB are also potential sparsities, because these have no + effect of the final output(the gradient of these positions in TB equal + to 0 all the time). This function it to fine the potential sparsity caused + by other sparsity(we call it indirect sparsity here). Basically we can find + these potential sparsity through gradient. + + Parameters + --------- + node: the NodePy + The target node to update the indirect sparsity + """ + unique_name = node.unique_name + if unique_name in self.auto_inferences and self.auto_inferences[unique_name] is not None: + # if the auto inference object already in self.auto_inference, then + # directly update the previous one + # self.auto_inferences[unique_name].update() + _logger.info( + 'Update the indirect sparsity for the %s', unique_name) + auto_infer = self.auto_inferences[unique_name] + auto_infer.update_indirect_sparsity() + # pass the gradient to the predecessor nodes + for in_id, tin in enumerate(auto_infer.dummy_input): + debug_name = auto_infer.input_debugname[in_id] + last_output = self.internal_result[debug_name] + # if isinstance(last_output, torch.Tensor): + # TODO what if last output is tuple/list of tensor + if last_output.grad is not None and tin.grad is not None: + last_output.grad.data += tin.grad.data + else: + last_output.grad = tin.grad + else: + _logger.warning('Note: %s does not have corresponding mask inference object', node.name) + + def _vnode_to_value(self, c_node): + """ + translate the C Value node into the values/tensors. + """ + errmsg = "Only support the torch._C.Value type" + assert isinstance(c_node, torch._C.Value), errmsg + if isinstance(c_node.type(), torch._C.TensorType): + shape = tuple(c_node.type().sizes()) + dtype = c_node.type().scalarType() + # TODO should use a more general way to get the input + if dtype.startswith('Float') or dtype.startswith('Double'): + return torch.rand(shape).to(self.device) + else: + # This small range is due to the ·ReLU6·, we will add + # the manual specific mask inference rule for several + # ops in the future, so that we can remove the constraint. + return torch.randint(0, 10, shape, device=self.device) + else: + value = c_node.toIValue() + # TODO support more kinds of value node + errmsg = "Doesn't support convert %s to values", str(c_node.type()) + # currently only support the tensors and constant values + assert value is not None, errmsg + return value + + def infer_modules_masks(self): + """ + Infer the mask for all layers in the module, this function can be divided into + two steps: first, forward inference of the the masks. Second, backward inference + of the mask. We keep repeating these two steps until the masks of the model doesn't + change. + """ + # unpack the tensor tuple/list before the mask inference + self.torch_graph.unpack_manually() + # find the input/ouput tensor of the whole graph + graph_input = [] + graph_output = [] + for name, nodeio in self.torch_graph.nodes_py.nodes_io.items(): + if nodeio.input_or_output == 'input': + graph_input.append((name, nodeio)) + # also put the graph input tensor into the internal_result + # TODO if we can find the corresponding relation between the value node + # and the dummy_inputs, we can use the inputs value in the dummy_input + value = self._vnode_to_value(self.debugname_to_value[name]) + self.internal_result[name] = value + # create the mask tensor for the input value + if isinstance(self.internal_result[name], torch.Tensor): + self.masks[name] = torch.ones_like(value) + self.constant[name] = torch.zeros_like(value) + elif nodeio.input_or_output == 'output': + graph_output.append((name, nodeio)) + # count the degree for the node in the graph + in_degree = {} + out_degree = {} + visit_queue = queue.Queue() + for node in self.torch_graph.nodes_py.nodes_op: + successors = self.torch_graph.find_successors(node.unique_name) + out_degree[node.unique_name] = len(successors) + predecessors = self.torch_graph.find_predecessors(node.unique_name) + in_degree[node.unique_name] = len(predecessors) + if in_degree[node.unique_name] == 0: + visit_queue.put(node) + # Forward mask inference + while not visit_queue.empty(): + curnode = visit_queue.get() + # forward mask inference for curnode + self.update_direct_sparsity(curnode) + successors = self.torch_graph.find_successors(curnode.unique_name) + for successor in successors: + in_degree[successor] -= 1 + if in_degree[successor] == 0: + visit_queue.put(self.torch_graph.name_to_node[successor]) + # backward mask inference + for unique_name in out_degree: + if out_degree[unique_name] == 0: + visit_queue.put(self.torch_graph.name_to_node[unique_name]) + while not visit_queue.empty(): + curnode = visit_queue.get() + self.update_indirect_sparsity(curnode) + predecessors = self.torch_graph.find_predecessors( + curnode.unique_name) + for predecessor in predecessors: + out_degree[predecessor] -= 1 + if out_degree[predecessor] == 0: + visit_queue.put(self.torch_graph.name_to_node[predecessor]) + + def replace_compressed_modules(self): + """ + Replace all the modules that have changed (weights/inputs/output) shape. + The new module is created using the same arguments of the to-be-replaced module, + and correctly inherits its weights. + + NOTE: ```func``` type cannot be replaced as it is not a module, thus, one limitation + is that ```func``` should be not required to be replaced. + """ + with torch.no_grad(): + for unique_name in self.auto_inferences: + self.replace_submodule(unique_name) + + def replace_submodule(self, unique_name, reindex_dim=None, reindex=None): + """ + Replace the submodule according to the inferred sparsity. + unique_name: str + The unique_name of the submodule to replace. + reindex_dim: int + The dimension of the re-index operation. + reindex: Reindex + The index tensor. Normally this variable is None. If we want to reindex the + output of this submodule, we can pass the index by this parameter. + """ + class ReindexModule(nn.Module): + """ + ReindexModule is used to resolve the mask conflict when replace the submodule. + Basically, we can use two ways to resolve the mask conflict: (1) unmask some + values(will introduce more computation overhead) (2) reindex and padd the output + tensor of the target op(introduce more memory access overhad). Currently this + method is shutdown, in the future, we will merge these two methods into a graph + pass which is used to resolve the mask conflict. + """ + def __init__(self, ori_module, reindex_dim, reindex): + super(ReindexModule, self).__init__() + self.ori_module = ori_module + self.reindex_dim = reindex_dim + self.reindex = reindex + tmp_index = [slice(None, None) for i in range(reindex_dim+1)] + # the index for the tensor + tmp_index[reindex_dim] = reindex + self.t_index = tuple(tmp_index) + + def forward(self, x): + tmpout = self.ori_module(x) + shape = list(tmpout.size()) + shape[self.reindex_dim] = self.reindex.size(0) + out = torch.zeros(tuple(shape), device=tmpout.device, + requires_grad=tmpout.requires_grad) + out[self.t_index] = tmpout + return out + + assert unique_name in self.auto_inferences + g_node = self.torch_graph.name_to_node[unique_name] + _logger.debug("replace %s, in %s type, with op_type %s", + unique_name, g_node.type, g_node.op_type) + auto_infer = self.auto_inferences[unique_name] + if g_node.type == 'module': + if g_node.unique_name in self.torch_graph.reused_module: + if reindex_dim is not None: + _logger.warning( + 'Cannot replace a reused module with padding operator!!') + return None + super_module, leaf_module = get_module_by_name( + self.bound_model, g_node.name) + m_type = g_node.op_type + if not m_type in replace_module: + raise RuntimeError( + "Has not supported replacing the module: `{}`".format(m_type)) + _logger.info("replace module (name: %s, op_type: %s)", + g_node.name, m_type) + compressed_module = replace_module[m_type]( + leaf_module, auto_infer.get_masks()) + new_submodule = compressed_module + if reindex_dim is None: + setattr(super_module, g_node.name.split( + '.')[-1], compressed_module) + elif reindex_dim is not None and reindex is not None: + # reindex the output of this submodule and replace the orginal module + new_submodule = ReindexModule( + compressed_module, reindex_dim, reindex) + setattr(super_module, g_node.name.split( + '.')[-1], new_submodule) + return new_submodule + elif g_node.type == 'func': + _logger.info("Warning: cannot replace (name: %s, op_type: %s) which is func type", + unique_name, g_node.op_type) + return None + else: + raise RuntimeError("Unsupported node type: {}".format(g_node.type)) + + def initialize_speedup(self): + """ + Do some initial work for speedup. + """ + # initialize the self.debugname_to_value + # build a mapping table from the debug name of the tensor + # to its value node in the graph + traced_graph = self.torch_graph.trace.graph + for node in traced_graph.nodes(): + for _input in node.inputs(): + debug_name = _input.debugName() + if debug_name not in self.debugname_to_value: + self.debugname_to_value[debug_name] = _input + for _output in node.outputs(): + debug_name = _output.debugName() + if debug_name not in self.debugname_to_value: + self.debugname_to_value[debug_name] = _output + # put the model itself into internel_result to perform the + # value inference for the 'prim::GetAttr', the first ClassType + # of the whole graph is the model class + + for graph_input in traced_graph.inputs(): + if graph_input.type().kind() == 'ClassType': + self.internal_result[graph_input.debugName() + ] = self.bound_model + break + + def speedup_model(self): + """ + There are basically two steps: first, do mask/shape inference, + second, replace modules. + """ + + _logger.info("start to speed up the model") + self.initialize_speedup() + training = self.bound_model.training + # set to the evaluation mode + self.bound_model.train(False) + # TODO suppose to fix the conflict after the sparsity propagation + # which is more elegent + fix_mask_conflict(self.masks, self.bound_model, self.dummy_input) + + _logger.info("infer module masks...") + self.infer_modules_masks() + _logger.info('resolve the mask conflict') + + # load the original stat dict before replace the model + self.bound_model.load_state_dict(self.ori_state_dict) + _logger.info("replace compressed modules...") + # the mask conflict should be already resolved + self.replace_compressed_modules() + self.bound_model.train(training) + _logger.info("speedup done") diff --git a/nni/compression/pytorch/speedup/error_code.py b/nni/compression/pytorch/speedup/error_code.py new file mode 100644 index 0000000000000000000000000000000000000000..80d71700060630b465770dd6bff03827acbc948d --- /dev/null +++ b/nni/compression/pytorch/speedup/error_code.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +# Error Code of the speedup +class SpeedupError(Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return str(self.msg) + +class EmptyLayerError(SpeedupError): + def __init__(self): + super(EmptyLayerError, self).__init__("Pruning a Layer to empty is not legal") + +class ShapeMisMatchError(SpeedupError): + def __init__(self): + super(ShapeMisMatchError, self).__init__("Shape mismatch!") + +class InputsNumberError(SpeedupError): + def __init__(self): + super(InputsNumberError, self).__init__("The number of the inputs of the target OP is wrong") + +class OutputTypeError(SpeedupError): + def __init__(self, current_type, target_type): + msg = f"The output type should be {str(target_type)}, but {str(current_type)} founded" + super(OutputTypeError, self).__init__(msg) + +class UnBalancedGroupError(SpeedupError): + def __init__(self): + msg = "The number remained filters in each group is different" + super(UnBalancedGroupError, self).__init__(msg) \ No newline at end of file diff --git a/nni/compression/pytorch/speedup/infer_mask.py b/nni/compression/pytorch/speedup/infer_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..8ace639207ea18379e8bbf9ae3c32b78c8b7c7eb --- /dev/null +++ b/nni/compression/pytorch/speedup/infer_mask.py @@ -0,0 +1,378 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +import torch.nn as nn +from ..utils import randomize_tensor, torch_float_dtype, torch_integer_dtype +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + +STD_DELTA = 1e-6 + + +class AutoMaskInference: + def __init__(self, module, dummy_input, in_masks=None, weight_mask=None, \ + output_mask=None, name=None, in_constants=None, state_dict=None, batch_dim=0): + """ + This class will infer the mask of the target module automatically. + This update_direct_sparsity will infer the output mask according + to the input masks, in constrast, update_indirect_sparsity will + infer the input masks according to given output masks. The newly + found sparsity will be incrementally updated to the original in_masks + and output_mask. + + Parameters + ---------- + module: torch.nn.Module/function + The target module to infer the mask. Need to be callable. + dummy_input: torch.Tensor/list of Tensor + The dummy_input of the target module. + in_masks: list of torch.Tensor + The input masks of the target module, if in_masks is not None, then + update_direct_sparsity and update_indirect_sparsity will incrementally + update the given in_masks, else, AutoMaskInference will create a new + in_masks for the target module. + output_mask: torch.Tensor + The output mask of the target module. Similar to in_masks, if output_mask + is not None, then update_direct_sparsity and update_indirect_sparsity will + incrementally update the given output_mask, else AutoMaskInference will create + one output_mask for the target module. + weight_mask: dict of the weight masks + The weight masks of the target module, the key is the corresponding name of + the mask. For example: {'weight':torch.ones(1000, 1000), bias:torch.ones(1000)} + name: str + Name of the target module. + in_constants: list of torch.Tensor + The correponding constant values of the in_masks. + state_dict: dict of torch.Tensor + The original values of the weights. + batch_dim: int + The index of the batch dimension of the input tensors. + + """ + errmsg = '%s is not callable, should pass the nn.Module/function' % str( + module) + assert callable(module), errmsg + self.module = module + + # Initialize the dummy_input + if isinstance(dummy_input, list): + # if there are multiple input variables + self.dummy_input = dummy_input + else: + # if there is only one input variable + self.dummy_input = [dummy_input] + + # Initialize the masks for input tensors + self.in_masks = in_masks if in_masks is not None else [ + None] * len(self.dummy_input) + self.in_constants = in_constants if in_constants is not None else [ + torch.zeros_like(x) for x in dummy_input] + for in_id, _ in enumerate(self.in_masks): + if self.in_masks[in_id] is None and \ + isinstance(self.dummy_input[in_id], torch.Tensor): + # if the input mask is None then create a all-ones mask for corresponding input tensor + self.in_masks[in_id] = torch.ones_like(self.dummy_input[in_id]) + # ones_like will put the created mask on the same device with the dummy_input + + # Initialize the mask for output tensors + self.output = self.module(*dummy_input) + # self.output.requires_grad_() + if output_mask is not None: + # assume the given output mask is right + self.output_mask = output_mask + else: + if isinstance(self.output, torch.Tensor): + self.output_mask = torch.ones_like(self.output) + elif isinstance(self.output, list) or isinstance(self.output, tuple): + self.output_mask = [] + for o_tensor in self.output: + if isinstance(o_tensor, torch.Tensor): + self.output_mask.append(torch.ones_like(o_tensor)) + else: + # if one of the outputs is not tensor, set the corresponding + # mask to None + self.output_mask.append(None) + else: + self.output_mask = None + + # Initialize the mask for the parameters + self.weights = {} + self.weight_mask = {} + if weight_mask: + self.weight_mask.update(weight_mask) + self.name = name + if isinstance(self.module, nn.Module): + # the function should not has parameters + # get all the parameter tensors of the target module + for name, para in module.named_parameters(): + self.weights[name] = para + if name not in self.weight_mask: + self.weight_mask[name] = torch.ones_like(para.data) + self.state_dict = state_dict + # TODO support the other batch dimension in the future + self.batch_dim = batch_dim + + def random_init(self, start=0.1, end=8.0): + """ + Random initialize the weights of the module. The value of + the tensor will not affect the mask auto inference. + """ + # currently we set the random range to 0.1-8.0 because of the ReLU6, + # if we use a range that far larger than 6, it may infer a wrong mask + # when the confidence is low. In the future, we will add the mask inference + # rules for ReLU6 to break this range constraint. + with torch.no_grad(): + for tensor in self.dummy_input: + if isinstance(tensor, torch.Tensor) and len(tensor.size()) > 0: + # if the tensor is a scalar, then skip this tensor + randomize_tensor(tensor, start, end) + for para in self.weights: + randomize_tensor(self.weights[para].data, start, end) + + + def zero_grad(self): + """ + Set the gradient of the weight, input tensor to be zeros. + """ + with torch.no_grad(): + # set the weight's gradient to zero + if isinstance(self.module, nn.Module): + self.module.zero_grad() + # also zero the gradient of the input tensors + for tensor in self.dummy_input: + if isinstance(tensor, torch.Tensor): + if tensor.grad is not None: + tensor.grad.data.zero_() + + def requires_grad_(self, flag=True): + """ + Set the requires_grad of input tensor and parameters to flag. + """ + for t_in in self.dummy_input: + if isinstance(t_in, torch.Tensor) and t_in.dtype in torch_float_dtype: + # only float type can require the gradient + # enable the auto gradient + t_in.requires_grad_(flag) + for para_name in self.weights: + if self.weights[para_name].dtype in torch_float_dtype: + self.weights[para_name].requires_grad_(flag) + + def apply_mask(self): + self.__apply_input_mask() + self.__apply_weight_mask() + + def __apply_input_mask(self): + """ + Apply the mask of the input tensor. + """ + with torch.no_grad(): + # apply the input mask + for tid, in_tensor in enumerate(self.dummy_input): + if isinstance(in_tensor, torch.Tensor) and self.in_masks[tid] is not None: + in_tensor.data = in_tensor.data * \ + self.in_masks[tid] + \ + (1-self.in_masks[tid]) * self.in_constants[tid] + + + def __apply_weight_mask(self): + """ + Apply the weight mask of this module. + """ + with torch.no_grad(): + # apply the weight mask + for para in self.weights: + if para in self.weight_mask: + self.weights[para].data *= self.weight_mask[para].data + + def isconstants(self, tout): + """ + Find the constants in the tensor tout. This function return a mask tensor that + indicates if a value in tout is a constant, and return one more tensor to indicate + that the values of the constant. + + Paramters + --------- + tout: torch.Tensor + The target output tensor to find the constants + Returns + ------- + mask: torch.Tensor + The mask tensor(same shape with tout) that indicates that whether + the correponding value is a constant. + constant: torch.Tensor + The mask tensot(same shape with tout) that indicates the values of + the constants in the tout. + """ + assert isinstance(tout, torch.Tensor) + out_mask = torch.ones_like(tout) + constant = torch.zeros_like(tout) + # judge if tout is a scalar(tensor that only have one value) + if len(tout.size()) == 0: + # tout is a scalar tensor, for the scalar tensor, we take + # this scalar as a constant, usually, the scalar tensor is returned + # by the size() function + constant = tout + return out_mask, constant + if tout.dtype in torch_integer_dtype: + # Pytorch cannot use torch.mean and torch.std to process + # intergers :( , so if dtype of the input tensor is integer, we need + # check if is the constant by ourselves + # Note: the first dimension should be the batch dimension + same = tout[:] == tout[0] + reduced = torch.sum(same, dim=0) + is_constant = reduced == tout.size(0) + out_mask[:, is_constant] = 0 + constant[:, is_constant] = tout[0][is_constant] + + else: + # calculate the std of the output among batch dimension + std = torch.std(tout, dim=0) + # calculate the mean value of the output among the batch dimension + mean = torch.mean(tout, dim=0) + mask_pos = std < STD_DELTA + out_mask[:, mask_pos] = 0 + constant[:, mask_pos] = mean[mask_pos] + return out_mask, constant + + + def update_indirect_sparsity(self): + """ + This function will update the indirect sparsity. To explain what's + indirect sparsity, for example, there is two tensors TA and TB, and + we perform the calculation: TC = TA x TB in which TC is also a tensor. + Once some values in TA are masked to zeros, then the corresponding + positions in TB are also potential sparsities, because these have no + effect of the final output(the gradient of these positions in TB equal + to 0 all the time). This function it to fine the potential sparsity caused + by other sparsity(we call it indirect sparsity here). Basically we can find + these potential sparsity through gradient. + """ + # Each node only update the output mask when we backwards + # update the output mask, this is because that some op may + # have the broadcast operation, for example, OP A's output + # tensor may be taken by two OPs(B, C) as inputs. So we cannot + # directly update the input mask at the OP B or C. We can only + # update the mask of C's output tensor only when B and C are + # already updated(gradient are already calculated and added to + # C's output tensor). + # Besides, updating the mask of C's output tensor equals to updating + # the input mask of OP B and C. + if isinstance(self.output, torch.Tensor) and self.output.grad is not None: + # if output have gradient which means this node has successor + # nodes and the successor nodes have already update their indirect + # sparsity + # we can mask the values whose gradient is always zeros + gradient_sum = torch.sum(torch.abs(self.output.grad.data), dim=0) + _grad_zero = gradient_sum == 0 + for batchid in range(self.output.size(0)): + # set the same mask value for the whole batche + self.output_mask[batchid][_grad_zero] = 0 + elif isinstance(self.output, tuple) or isinstance(self.output, list): + assert isinstance(self.output_mask, (tuple, list)) + for oid, tout in enumerate(self.output): + errmsg = 'The output only support tensor/list of tensors' + assert isinstance(tout, torch.Tensor), errmsg + gradient_sum = torch.sum( + torch.abs(self.output.grad.data), dim=0) + _grad_zero = gradient_sum == 0 + for batchid in range(self.output.size(0)): + # set the same mask value for the whole batch + self.output_mask[oid][batchid][_grad_zero] = 0 + + self.requires_grad_(True) + # Forward inference with auto gradient enabled + # Note: tensors that need gradient cannot be used in the in-place operator + self.random_init() + self.apply_mask() + # Some operator may have the in_place operations, so we need to clone the input + # before passing to the self.module + tmp_dummy_input = [x.clone() if isinstance( + x, torch.Tensor) else x for x in self.dummy_input] + output = self.module(*tmp_dummy_input) + + if output.grad_fn is None: + # the output does not have the gradient function + return + # Note: output maybe tensor or list/tuple of tensors + if isinstance(output, torch.Tensor): + output.backward(self.output_mask) + elif isinstance(output, list) or isinstance(output, tuple): + for tid, t_out in enumerate(output): + t_out.backward(self.output_mask[tid]) + + # update the sparsity of the paramters + for para_name in self.weights: + grad_zero = self.weights[para_name].grad.data == 0 + self.weight_mask[para_name][grad_zero] = 0 + + def update_direct_sparsity(self): + # we don't need the gradient in the forward inference + out_mask = None + constant = None + with torch.no_grad(): + # Note: we need randomly init the input one more time here! + # Because some operation have the in-place operation, such as relu_, + # the in-place operation may modify or write 0s into the dummy_input + self.random_init() + # apply the mask for the input tensor and the weight tensor + self.apply_mask() + # Note: due to the in-place operator, such as relu_, + # ori_out may be the same tensor with dummy_input, + # so we use clone and detach to create a new tensor with + # the same values. + out = self.module(*self.dummy_input) + if isinstance(out, torch.Tensor): + out_mask, constant = self.isconstants(out.clone().detach()) + elif isinstance(out, tuple) or isinstance(out, list): + out_mask = [] + constant = [] + for tout in out: + _mask, _constant = self.isconstants(tout.clone().detach()) + out_mask.append(_mask) + constant.append(_constant) + else: + _logger.warning( + 'Only support the OP whose output is tensor/tuple of tensor/list of tensor') + + # We also need random the parameters of the module, because if the weight of the model has + # a unmasked 0, then our out sparsity inference may be wrong + # However, after radomizing the weight/parameters, the constant in the output tensors may + # be different from the constants that calculated from its original stata_dict. However, + # so to get the right constant to eliminate the bias between model before and after sparsity + # inference, we need to reload its state_dict and recalculate the constant + # Currently we also get the constant values at the same time when infering the mask, in + # the future, we will separate the constant inference into a single graph pass. + if len(self.weights) > 0 and self.state_dict is not None: + + self.module.load_state_dict(self.state_dict) + # apply weight mask + self.__apply_weight_mask() + out = self.module(*self.dummy_input).clone().detach() + if isinstance(out, torch.Tensor): + constant = torch.zeros_like(out) + constant_pos = out_mask == 0 + constant[constant_pos] = out[constant_pos] + elif isinstance(out, (list, tuple)): + constant = [] + for i, tout in enumerate(out): + _tmp = torch.zeros_like(tout) + sparsity_pos = out_mask[i] == 0 + _tmp[sparsity_pos] = tout[sparsity_pos] + constant.append(_tmp) + + if isinstance(out_mask, torch.Tensor): + assert isinstance(self.output_mask, torch.Tensor) + self.output_mask *= out_mask + elif isinstance(out_mask, list): + for i, _ in enumerate(out_mask): + self.output_mask[i] *= out_mask[i] + else: + _logger.warning('There is no output sparsity') + # also save the out_constant + self.out_constant = constant + + def get_masks(self): + return (self.in_masks, self.output_mask, self.weight_mask) + diff --git a/nni/compression/pytorch/speedup/jit_translate.py b/nni/compression/pytorch/speedup/jit_translate.py new file mode 100644 index 0000000000000000000000000000000000000000..f0e1098093514ea788417794ca377ca0fc86fac1 --- /dev/null +++ b/nni/compression/pytorch/speedup/jit_translate.py @@ -0,0 +1,561 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import re +import logging +from functools import partial +import torch + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +def translate_list(list_node, speedup=None): + """ + Get the list of values from the list construct node. + Parameters + --------- + list_node: Torch.C.Value + The cpp node of the target list. + speedup: ModuleSpeed + The Module speedup module. + Returns + ------- + values: list + The list of values in the target cpp list node. + """ + # the node that create the list + create_node = list_node.node() + assert create_node.kind() == 'prim::ListConstruct' + inputs = list(create_node.inputs()) + values = [] + for _i in inputs: + debugName = _i.debugName() + if speedup is not None and debugName in speedup.internal_result: + # this value is the result of the other nodes, such as + # ate::size + values.append(speedup.internal_result[debugName].item()) + else: + # if the corresponding value is a constant + values.append(_i.toIValue()) + return values + + +def parse_constant(cvalue, speedup): + """ + Parse the constant values from this Node + Parameters + ---------- + cvalue: Torch.C.Value + The cpp node of the target constant value. + speedup: ModelSpeedup + The Model speedup module. + Returns + ------- + value: int/float/tensor + The constant values parsed from the node. + """ + logger.debug('Try to parse the constant value: %s', cvalue.debugName()) + if cvalue.toIValue() is not None: + return cvalue.toIValue() + if cvalue.debugName() in speedup.internal_result: + return speedup.internal_result[cvalue.debugName()] + # Get the operator node of the this value + op_node = cvalue.node() + + inputs = op_node.inputs() + input_values = [parse_constant(_i, speedup) for _i in inputs] + func = trans_from_jit_to_python[op_node.kind()](op_node, speedup) + return func(*input_values) + + +def dropout_python(node, speedup): + return torch.dropout + + +def flatten_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + start_dim = inputs[1].toIValue() + end_dim = inputs[2].toIValue() + new_flatten = partial(torch.flatten, start_dim=start_dim, end_dim=end_dim) + return new_flatten + + +def relu_inplace_python(node, speedup): + return torch.relu_ + + +def relu_python(node, speedup): + return torch.relu + + +def sigmoid_python(node, speedup): + return torch.sigmoid + + +def mean_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + dim_list = translate_list(inputs[1], speedup) + keep_dim = inputs[2].toIValue() + new_mean = partial(torch.mean, dim=tuple(dim_list), keepdim=keep_dim) + return new_mean + + +def add_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + constant = None + for i in range(2): + input_i = inputs[i] + debug_name = input_i.debugName() + if debug_name not in speedup.internal_result: + # this input is a constant value + # TODO: what if this input is a constant tensor + + if input_i.toIValue() is not None: + constant = parse_constant(input_i, speedup) + break + if constant is None: + return torch.add + else: + new_add = partial(torch.add, constant) + return new_add + + +def floor_div_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + divisor = inputs[1] + constant = None + if divisor.debugName() not in speedup.internal_result: + # divisor is a constant value/tensor + constant = parse_constant(divisor, speedup) + if constant is None: + return torch.floor_divide + else: + new_op = partial(torch.floor_divide, other=constant) + return new_op + + +def mul_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + constant = None + for i in range(2): + input_i = inputs[i] + debug_name = input_i.debugName() + if debug_name not in speedup.internal_result: + constant = parse_constant(input_i, speedup) + # both two inputs cannot be constants at the same time + break + if constant is None: + return torch.mul + else: + new_mul = partial(torch.mul, constant) + return new_mul + + +def transpose_python(node, speedup): + return torch.t + + +def transpose2_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + dim_1 = inputs[1].toIValue() + dim_2 = inputs[2].toIValue() + new_transpose = partial(torch.transpose, dim0=dim_1, dim1=dim_2) + return new_transpose + + +def matmul_python(node, speedup): + return torch.matmul + + +def div_python(node, speedup): + # The second input parameter of torch.div can be a + # tensor or a constant, if it is a constant, we need + # to return + c_node = node.key_node + inputs = list(c_node.inputs()) + if inputs[1].debugName() in speedup.internal_result: + # the second input parameters is the output of the other + # nodes + return torch.div + else: + other = inputs[1].toIValue() + new_div = partial(torch.div, other=other) + + return new_div + + +def softmax_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + dim = inputs[1].toIValue() + new_softmax = partial(torch.softmax, dim=dim) + return new_softmax + + +def contiguous_python(node, speedup): + class contiguousModule(torch.nn.Module): + def forward(self, x): + return x.contiguous() + return contiguousModule() + + +def gelu_python(node, speedup): + return torch.nn.GELU() + + +def avgpool2d_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + kernel_size = translate_list(inputs[1], speedup) + stride = translate_list(inputs[2], speedup) + padding = translate_list(inputs[3], speedup) + new_avgpool = partial(torch.nn.functional.avg_pool2d, + kernel_size=kernel_size, stride=stride, padding=padding) + return new_avgpool + + +def adaptive_avgpool_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + output_size = translate_list(inputs[1], speedup) + new_avgpool = torch.nn.AdaptiveAvgPool2d(output_size) + return new_avgpool + + +def tupleunpack_python(node, speedup): + # Note: tuple unpack should only exists at the + # the end of the model, and is no need to replace/propagate mask + return None + + +def num2tensor_python(node, speedup): + return torch.nn.Identity() + + +def exp_python(node, speedup): + return torch.exp + + +def squeeze_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + dim = None + if len(inputs) > 1: + dim = parse_constant(inputs[1], speedup) + new_squeeze = partial(torch.squeeze, dim=dim) + return new_squeeze + +def unsqueeze_python(node, speedup): + c_node = node.key_node + inputs = list(c_node.inputs()) + dim = parse_constant(inputs[1], speedup) + new_unsqueeze = partial(torch.unsqueeze, dim=dim) + return new_unsqueeze + +########################################################## +# Split Line +# Following module/functions cannot be translated into a +# single function, so we use torch.nn.Module to wrap the +# the core function, and return the torch.nn.Module instead +########################################################## + + +def slice_python(node, speedup): + class SliceMoudle(torch.nn.Module): + def __init__(self, sliceobj): + super(SliceMoudle, self).__init__() + self.sliceobj = sliceobj + + def forward(self, x, *args): + # args is for the slice dimension and indexes, however, + # we already get them from the cpp nodes. Note, though, we + # don't need the slice indexes any more, we cannot remove this + # parameter here, because, there may be multiple inputs passed from + # previous nodes such as aten::size + logger.info('Model has Slice operation, and the operand size=%s, Slice object:%s', str( + x.size()), str(self.sliceobj)) + return x[self.sliceobj] + + c_node = node.key_node + inputs = list(c_node.inputs()) + + slice_dim = parse_constant(inputs[1], speedup) + slice_start = parse_constant(inputs[2], speedup) + slice_end = parse_constant(inputs[3], speedup) + slice_step = parse_constant(inputs[4], speedup) + slice_obj = slice(slice_start, slice_end, slice_step) + slice_list = [] + for _ in range(slice_dim): + slice_list.append(slice(None, None)) + logger.info('Slice dim:%s, Slice obj:%s', str(slice_dim), str(slice_obj)) + slice_list.append(slice_obj) + return SliceMoudle(tuple(slice_list)) + + +def select_python(node, speedup): + class SelectModule(torch.nn.Module): + def __init__(self, dim, index): + super(SelectModule, self).__init__() + self.dim = dim + self.index = index + + def forward(self, x): + return x.select(self.dim, self.index) + c_node = node.key_node + inputs = list(c_node.inputs()) + dim = inputs[1].toIValue() + index = inputs[2].toIValue() + return SelectModule(dim, index) + + +def size_python(node, speedup): + # return None + class SizeMoudle(torch.nn.Module): + def __init__(self, sizedim): + super(SizeMoudle, self).__init__() + self.sizedim = sizedim + + def forward(self, x): + return torch.as_tensor([x.size(self.sizedim)], dtype=torch.long) + # return torch.tensor(x.size(self.sizedim)) + c_node = node.key_node + inputs = list(c_node.inputs()) + size_dim = inputs[1].toIValue() + return SizeMoudle(size_dim) + + +def toint_python(node, speedup): + class ToIntModule(torch.nn.Module): + def forward(self, x): + return x.to(torch.int) + return ToIntModule() + + +def view_python(node, speedup): + class ViewModule(torch.nn.Module): + def __init__(self, shape): + super(ViewModule, self).__init__() + self.shape = shape + logger.info('View Module output size: %s', str(self.shape)) + + def forward(self, *args): + return args[0].view(self.shape) + c_node = node.key_node + inputs = list(c_node.inputs()) + shape = translate_list(inputs[1], speedup) + return ViewModule(shape) + + +def reshape_python(node, speedup): + class ReshapeModule(torch.nn.Module): + def __init__(self, shape): + super(ReshapeModule, self).__init__() + self.shape = shape + logger.info('Reshape Module output size: %s', str(self.shape)) + + def forward(self, *args): + return args[0].view(self.shape) + c_node = node.key_node + inputs = list(c_node.inputs()) + shape = translate_list(inputs[1], speedup) + return ReshapeModule(shape) + + +def permute_python(node, speedup): + class PermuteModule(torch.nn.Module): + def __init__(self, dimlist): + super(PermuteModule, self).__init__() + self.dimlist = dimlist + + def forward(self, x): + return x.permute(self.dimlist) + c_node = node.key_node + inputs = list(c_node.inputs()) + dim_list = translate_list(inputs[1], speedup) + return PermuteModule(dim_list) + + +def getattr_python(node, speedup): + """ + Note: Ops started with Prim:: is not taken as the key node, + so we directly pass the Cpp node into this funciton. + Parameters + ---------- + node: torch._C.Node + The cpp node of prim::Getattr + speedup: ModelSpeedup + The corresponding speedup object. + """ + class GetModule(torch.nn.Module): + def __init__(self, key): + super(GetModule, self).__init__() + self.key = key + + def forward(self, obj): + logger.info('Get attribute: %s', self.key) + return getattr(obj, self.key) + # get the name of the attribute, for example + # prim::GetAttr[name="module_list"](%self.1) + assert node.kind() == 'prim::GetAttr' + pattern = '\[name=\"(.*?)\"\]' + key_words = re.findall(pattern, str(node)) + assert len(key_words) == 1 + return GetModule(key_words[0]) + + +def upsample_bilinear2d_python(node, speedup): + class UpsampleModule(torch.nn.Module): + def __init__(self, size_list, scale_list): + super(UpsampleModule, self).__init__() + self.size_list = size_list + self.scale_list = scale_list + + def forward(self, *args): + """ + The first input of args is the target tensor to upsample + , the following parameters is useless, because we already + get the size_list and the scale_list by parsing the cpp_nodes. + """ + return torch.nn.functional.upsample_bilinear(args[0], + size=self.size_list, scale_factor=self.scale_list) + c_node = node.key_node + inputs = list(c_node.inputs()) + size_list_node = inputs[1].node() + scale_list_node = inputs[3].node() + size_list = None + scale_list = None + + if size_list_node.kind() == 'prim::ListConstruct': + size_list = translate_list(inputs[1], speedup) + if scale_list_node.kind() == 'prim::ListConstruct': + scale_list = translate_list(inputs[3], speedup) + return UpsampleModule(size_list, scale_list) + + +def typeas_python(node, speedup): + """ + currently only support type_as float. + TODO: support more types in the type_as, need to figure out + how to get the scalar type from torch._C.TensorType. + """ + class TypeasModule(torch.nn.Module): + def __init__(self, dtype=torch.float): + self.example = torch.zeros(1, dtype=dtype) + + def forward(self, x): + return x.type_as(self.example) + return TypeasModule() + + +def to_python(node, speedup): + # for the time being, only device parameters are supported + class ToModule(torch.nn.Module): + def __init__(self, device): + super(ToModule, self).__init__() + + def forward(self, x): + return x.to(device) + + c_node = node.key_node + inputs = list(c_node.inputs()) + device = inputs[3].toIValue() + return ToModule(device) + + +def cat_python(node, speedup): + class CatModule(torch.nn.Module): + def __init__(self, cat_dim): + super(CatModule, self).__init__() + self.cat_dim = cat_dim + + def forward(self, *args): + return torch.cat(args, dim=self.cat_dim) + + c_node = node.key_node + inputs = list(c_node.inputs()) + dim = inputs[1].toIValue() + return CatModule(dim) + + +trans_from_jit_to_python = { + 'aten::add': add_python, + 'aten::add_': add_python, + 'aten::mul': mul_python, + 'aten::mul_': mul_python, + 'aten::relu': relu_python, + 'aten::relu_': relu_inplace_python, + 'aten::sigmoid': sigmoid_python, + 'aten::sigmoid_': sigmoid_python, + # tanh behaives like relu + 'aten::tanh': relu_python, + 'aten::tanh_': relu_python, + 'aten::flatten': flatten_python, + 'aten::mean': mean_python, + 'aten::dropout': dropout_python, + 'aten::slice': slice_python, + 'aten::select': select_python, + 'aten::size': size_python, + 'aten::t': transpose_python, + 'aten::transpose': transpose2_python, + 'aten::Int': toint_python, + 'aten::view': view_python, + 'aten::reshape': reshape_python, + 'aten::permute': permute_python, + 'aten::matmul': matmul_python, + 'aten::div': div_python, + 'aten::floor_divide': floor_div_python, + 'aten::softmax': softmax_python, + 'aten::contiguous': contiguous_python, + 'aten::gelu': gelu_python, + 'aten::cat': cat_python, + 'aten::avg_pool2d': avgpool2d_python, + 'aten::max_pool2d': avgpool2d_python, + 'aten::adaptive_avg_pool2d': adaptive_avgpool_python, + 'aten::to': to_python, + 'aten::type_as': typeas_python, + 'aten::upsample_bilinear2d': upsample_bilinear2d_python, + 'aten::exp': exp_python, + 'aten::squeeze': squeeze_python, + 'aten::unsqueeze': unsqueeze_python, + 'prim::TupleUnpack': tupleunpack_python, + 'prim::ListUnpack': tupleunpack_python, + 'prim::NumToTensor': num2tensor_python, + 'prim::GetAttr': getattr_python + +} + + +def jit_to_python_function(node, speedup): + """ + Return a callable object to inference the mask according to the + node.op_type. + + Parameters + --------- + node: NodeGroup + The target node to inference the mask + speedup: ModelSpeedup + The speedup object of the target model. + + Returns + ------ + func: callable object(nn.Module/function) + Return the translated function that used to inference the mask + , if current op_type is not supported, then we return None. + """ + logger.debug( + 'Translate C function %s into its python version', node.op_type) + if node.op_type not in trans_from_jit_to_python: + logger.error( + '%s is not Supported! Please report an issue at https://github.com/microsoft/nni. Thanks~', node.op_type) + # return None to skip the mask inference for this node + return None + return trans_from_jit_to_python[node.op_type](node, speedup) diff --git a/nni/compression/pytorch/utils/__init__.py b/nni/compression/pytorch/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..19f3dc51a3f421beb1ceb0cf7c043684c0855c8b --- /dev/null +++ b/nni/compression/pytorch/utils/__init__.py @@ -0,0 +1,23 @@ +from .utils import * +from .shape_dependency import * +from .shape_dependency import ReshapeDependency + +def not_safe_to_prune(model, dummy_input): + """ + Get the layers that are not safe to prune(may bring the shape conflict). + For example, if the output tensor of a conv layer is directly followed by + a shape-dependent function(such as reshape/view), then this conv layer + may be not safe to be pruned. Pruning may change the output shape of + this conv layer and result in shape problems. This function find all the + layers that directly followed by the shape-dependent functions(view, reshape, etc). + If you run the inference after the speedup and run into a shape related error, + please exclude the layers returned by this function and try again. + + Parameters + ---------- + model: torch.nn.Module + The target model to prune. + dummy_input: torch.Tensor/list of torch.Tensor/tuple of Tensor + """ + reshape_dset = ReshapeDependency(model, dummy_input) + return reshape_dset.dependency_sets diff --git a/nni/compression/pytorch/utils/config_validation.py b/nni/compression/pytorch/utils/config_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..930e4e686e0c41bca92208ee1c0f5d5c11b4ce19 --- /dev/null +++ b/nni/compression/pytorch/utils/config_validation.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from schema import Schema, And, SchemaError + +def validate_op_names(model, op_names, logger): + found_names = set(map(lambda x: x[0], model.named_modules())) + + not_found_op_names = list(set(op_names) - found_names) + if not_found_op_names: + logger.warning('op_names %s not found in model', not_found_op_names) + + return True + +def validate_op_types(model, op_types, logger): + found_types = set(['default']) | set(map(lambda x: type(x[1]).__name__, model.named_modules())) + + not_found_op_types = list(set(op_types) - found_types) + if not_found_op_types: + logger.warning('op_types %s not found in model', not_found_op_types) + + return True + +def validate_op_types_op_names(data): + if not ('op_types' in data or 'op_names' in data): + raise SchemaError('Either op_types or op_names must be specified.') + return True + +class CompressorSchema: + def __init__(self, data_schema, model, logger): + assert isinstance(data_schema, list) and len(data_schema) <= 1 + self.data_schema = data_schema + self.compressor_schema = Schema(self._modify_schema(data_schema, model, logger)) + + def _modify_schema(self, data_schema, model, logger): + if not data_schema: + return data_schema + + for k in data_schema[0]: + old_schema = data_schema[0][k] + if k == 'op_types' or (isinstance(k, Schema) and k._schema == 'op_types'): + new_schema = And(old_schema, lambda n: validate_op_types(model, n, logger)) + data_schema[0][k] = new_schema + if k == 'op_names' or (isinstance(k, Schema) and k._schema == 'op_names'): + new_schema = And(old_schema, lambda n: validate_op_names(model, n, logger)) + data_schema[0][k] = new_schema + + data_schema[0] = And(data_schema[0], lambda d: validate_op_types_op_names(d)) + + return data_schema + + def validate(self, data): + self.compressor_schema.validate(data) + +def validate_exclude_sparsity(data): + if not ('exclude' in data or 'sparsity' in data): + raise SchemaError('Either sparisty or exclude must be specified.') + return True + +def validate_exclude_quant_types_quant_bits(data): + if not ('exclude' in data or ('quant_types' in data and 'quant_bits' in data)): + raise SchemaError('Either (quant_types and quant_bits) or exclude must be specified.') + return True + +class PrunerSchema(CompressorSchema): + def _modify_schema(self, data_schema, model, logger): + data_schema = super()._modify_schema(data_schema, model, logger) + data_schema[0] = And(data_schema[0], lambda d: validate_exclude_sparsity(d)) + return data_schema + +class QuantizerSchema(CompressorSchema): + def _modify_schema(self, data_schema, model, logger): + data_schema = super()._modify_schema(data_schema, model, logger) + data_schema[0] = And(data_schema[0], lambda d: validate_exclude_quant_types_quant_bits(d)) + return data_schema diff --git a/nni/compression/pytorch/utils/counter.py b/nni/compression/pytorch/utils/counter.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d7de89f2f340ad2ba8750bcb58b4ca054c8f54 --- /dev/null +++ b/nni/compression/pytorch/utils/counter.py @@ -0,0 +1,403 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import functools +from collections import Counter +from prettytable import PrettyTable + +import torch +import torch.nn as nn +from torch.nn.utils.rnn import PackedSequence +from nni.compression.pytorch.compressor import PrunerModuleWrapper + + +__all__ = ['count_flops_params'] + + +def _get_params(m): + return sum([p.numel() for p in m.parameters()]) + + +class ModelProfiler: + + def __init__(self, custom_ops=None, mode='default'): + """ + ModelProfiler is used to share state to hooks. + + Parameters + ---------- + custom_ops: dict + a mapping of (module -> torch.nn.Module : custom operation) + the custom operation is a callback funtion to calculate + the module flops, parameters and the weight shape, it will overwrite the default operation. + for reference, please see ``self.ops``. + mode: + the mode of how to collect information. If the mode is set to `default`, + only the information of convolution, linear and rnn modules will be collected. + If the mode is set to `full`, other operations will also be collected. + """ + self.ops = { + nn.Conv1d: self._count_convNd, + nn.Conv2d: self._count_convNd, + nn.Conv3d: self._count_convNd, + nn.ConvTranspose1d: self._count_convNd, + nn.ConvTranspose2d: self._count_convNd, + nn.ConvTranspose3d: self._count_convNd, + nn.Linear: self._count_linear, + nn.RNNCell: self._count_rnn_cell, + nn.GRUCell: self._count_gru_cell, + nn.LSTMCell: self._count_lstm_cell, + nn.RNN: self._count_rnn, + nn.GRU: self._count_gru, + nn.LSTM: self._count_lstm + } + self._count_bias = False + if mode == 'full': + self.ops.update({ + nn.BatchNorm1d: self._count_bn, + nn.BatchNorm2d: self._count_bn, + nn.BatchNorm3d: self._count_bn, + nn.LeakyReLU: self._count_relu, + nn.AvgPool1d: self._count_avgpool, + nn.AvgPool2d: self._count_avgpool, + nn.AvgPool3d: self._count_avgpool, + nn.AdaptiveAvgPool1d: self._count_adap_avgpool, + nn.AdaptiveAvgPool2d: self._count_adap_avgpool, + nn.AdaptiveAvgPool3d: self._count_adap_avgpool, + nn.Upsample: self._count_upsample, + nn.UpsamplingBilinear2d: self._count_upsample, + nn.UpsamplingNearest2d: self._count_upsample + }) + self._count_bias = True + + if custom_ops is not None: + self.ops.update(custom_ops) + + self.mode = mode + self.results = [] + + def _push_result(self, result): + self.results.append(result) + + def _get_result(self, m, flops): + # assume weight is called `weight`, otherwise it's not applicable + # if user customize the operation, the callback function should + # return the dict result, inluding calculated flops, params and weight_shape. + + result = { + 'flops': flops, + 'params': _get_params(m), + 'weight_shape': tuple(m.weight.size()) if hasattr(m, 'weight') else 0, + } + return result + + def _count_convNd(self, m, x, y): + cin = m.in_channels + kernel_ops = torch.zeros(m.weight.size()[2:]).numel() + output_size = torch.zeros(y.size()[2:]).numel() + cout = y.size()[1] + + if hasattr(m, 'weight_mask'): + cout = m.weight_mask.sum() // (cin * kernel_ops) + + total_ops = cout * output_size * kernel_ops * cin // m.groups # cout x oW x oH + + if self._count_bias: + bias_flops = 1 if m.bias is not None else 0 + total_ops += cout * output_size * bias_flops + + return self._get_result(m, total_ops) + + def _count_linear(self, m, x, y): + out_features = m.out_features + if hasattr(m, 'weight_mask'): + out_features = m.weight_mask.sum() // m.in_features + total_ops = out_features * m.in_features + + if self._count_bias: + bias_flops = 1 if m.bias is not None else 0 + total_ops += out_features * bias_flops + + return self._get_result(m, total_ops) + + def _count_bn(self, m, x, y): + total_ops = 2 * x[0][0].numel() + return self._get_result(m, total_ops) + + def _count_relu(self, m, x, y): + total_ops = x[0][0].numel() + return self._get_result(m, total_ops) + + def _count_avgpool(self, m, x, y): + total_ops = y[0].numel() + return self._get_result(m, total_ops) + + def _count_adap_avgpool(self, m, x, y): + kernel = torch.Tensor([*(x[0].shape[2:])]) // torch.Tensor(list((m.output_size,))).squeeze() + total_add = int(torch.prod(kernel)) + total_div = 1 + kernel_ops = total_add + total_div + num_elements = y[0].numel() + total_ops = kernel_ops * num_elements + + return self._get_result(m, total_ops) + + def _count_upsample(self, m, x, y): + if m.mode == 'linear': + total_ops = y[0].nelement() * 5 # 2 muls + 3 add + elif m.mode == 'bilinear': + # https://en.wikipedia.org/wiki/Bilinear_interpolation + total_ops = y[0].nelement() * 11 # 6 muls + 5 adds + elif m.mode == 'bicubic': + # https://en.wikipedia.org/wiki/Bicubic_interpolation + # Product matrix [4x4] x [4x4] x [4x4] + ops_solve_A = 224 # 128 muls + 96 adds + ops_solve_p = 35 # 16 muls + 12 adds + 4 muls + 3 adds + total_ops = y[0].nelement() * (ops_solve_A + ops_solve_p) + elif m.mode == 'trilinear': + # https://en.wikipedia.org/wiki/Trilinear_interpolation + # can viewed as 2 bilinear + 1 linear + total_ops = y[0].nelement() * (13 * 2 + 5) + else: + total_ops = 0 + + return self._get_result(m, total_ops) + + def _count_cell_flops(self, input_size, hidden_size, cell_type): + # h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh}) + total_ops = hidden_size * (input_size + hidden_size) + hidden_size + + if self._count_bias: + total_ops += hidden_size * 2 + + if cell_type == 'rnn': + return total_ops + + if cell_type == 'gru': + # r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\ + # z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\ + # n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\ + total_ops *= 3 + + # r hadamard : r * (~) + total_ops += hidden_size + + # h' = (1 - z) * n + z * h + # hadamard hadamard add + total_ops += hidden_size * 3 + + elif cell_type == 'lstm': + # i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\ + # f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\ + # o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\ + # g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\ + total_ops *= 4 + + # c' = f * c + i * g + # hadamard hadamard add + total_ops += hidden_size * 3 + + # h' = o * \tanh(c') + total_ops += hidden_size + + return total_ops + + def _count_rnn_cell(self, m, x, y): + total_ops = self._count_cell_flops(m.input_size, m.hidden_size, 'rnn') + return self._get_result(m, total_ops) + + def _count_gru_cell(self, m, x, y): + total_ops = self._count_cell_flops(m.input_size, m.hidden_size, 'gru') + return self._get_result(m, total_ops) + + def _count_lstm_cell(self, m, x, y): + total_ops = self._count_cell_flops(m.input_size, m.hidden_size, 'lstm') + return self._get_result(m, total_ops) + + def _get_bsize_nsteps(self, m, x): + if isinstance(x[0], PackedSequence): + batch_size = torch.max(x[0].batch_sizes) + num_steps = x[0].batch_sizes.size(0) + else: + if m.batch_first: + batch_size = x[0].size(0) + num_steps = x[0].size(1) + else: + batch_size = x[0].size(1) + num_steps = x[0].size(0) + + return batch_size, num_steps + + def _count_rnn_module(self, m, x, y, module_name): + input_size = m.input_size + hidden_size = m.hidden_size + num_layers = m.num_layers + + _, num_steps = self._get_bsize_nsteps(m, x) + total_ops = self._count_cell_flops(input_size, hidden_size, module_name) + + for _ in range(num_layers - 1): + if m.bidirectional: + cell_flops = self._count_cell_flops(hidden_size * 2, hidden_size, module_name) * 2 + else: + cell_flops = self._count_cell_flops(hidden_size, hidden_size, module_name) + total_ops += cell_flops + + total_ops *= num_steps + return total_ops + + def _count_rnn(self, m, x, y): + total_ops = self._count_rnn_module(m, x, y, 'rnn') + + return self._get_result(m, total_ops) + + def _count_gru(self, m, x, y): + total_ops = self._count_rnn_module(m, x, y, 'gru') + + return self._get_result(m, total_ops) + + def _count_lstm(self, m, x, y): + total_ops = self._count_rnn_module(m, x, y, 'lstm') + + return self._get_result(m, total_ops) + + def count_module(self, m, x, y, name): + # assume x is tuple of single tensor + result = self.ops[type(m)](m, x, y) + output_size = y[0].size() if isinstance(y, tuple) else y.size() + + total_result = { + 'name': name, + 'input_size': tuple(x[0].size()), + 'output_size': tuple(output_size), + 'module_type': type(m).__name__, + **result + } + + self._push_result(total_result) + + def sum_flops(self): + return sum([s['flops'] for s in self.results]) + + def sum_params(self): + return sum({s['name']: s['params'] for s in self.results}.values()) + + def format_results(self): + table = PrettyTable() + name_counter = Counter([s['name'] for s in self.results]) + has_multi_use = any(map(lambda v: v > 1, name_counter.values())) + name_counter = Counter() # clear the counter to count from 0 + + headers = [ + 'Index', + 'Name', + 'Type', + 'Weight Shape', + 'FLOPs', + '#Params', + ] + if has_multi_use: + headers.append('#Call') + + table.field_names = headers + for i, result in enumerate(self.results): + flops_count = int(result['flops'].item()) if isinstance(result['flops'], torch.Tensor) else int(result['flops']) + row_values = [ + i, + result['name'], + result['module_type'], + str(result['weight_shape']), + flops_count, + result['params'], + ] + name_counter[result['name']] += 1 + if has_multi_use: + row_values.append(name_counter[result['name']]) + table.add_row(row_values) + return table + + +def count_flops_params(model, x, custom_ops=None, verbose=True, mode='default'): + """ + Count FLOPs and Params of the given model. This function would + identify the mask on the module and take the pruned shape into consideration. + Note that, for sturctured pruning, we only identify the remained filters + according to its mask, and do not take the pruned input channels into consideration, + so the calculated FLOPs will be larger than real number. + + The FLOPs is counted "per sample", which means that input has a batch size larger than 1, + the calculated FLOPs should not differ from batch size of 1. + + Parameters + --------- + model : nn.Module + Target model. + x : tuple or tensor + The input shape of data (a tuple), a tensor or a tuple of tensor as input data. + custom_ops : dict + A mapping of (module -> torch.nn.Module : custom operation) + the custom operation is a callback funtion to calculate + the module flops and parameters, it will overwrite the default operation. + for reference, please see ``ops`` in ``ModelProfiler``. + verbose : bool + If False, mute detail information about modules. Default is True. + mode : str + the mode of how to collect information. If the mode is set to ``default``, + only the information of convolution and linear will be collected. + If the mode is set to ``full``, other operations will also be collected. + + Returns + ------- + tuple of int, int and dict + Representing total FLOPs, total parameters, and a detailed list of results respectively. + The list of results are a list of dict, each of which contains (name, module_type, weight_shape, + flops, params, input_size, output_size) as its keys. + """ + + assert isinstance(x, tuple) or isinstance(x, torch.Tensor) + assert mode in ['default', 'full'] + + original_device = next(model.parameters()).device + training = model.training + + if isinstance(x, tuple) and all(isinstance(t, int) for t in x): + x = (torch.zeros(x).to(original_device), ) + elif torch.is_tensor(x): + x = (x.to(original_device), ) + else: + x = (t.to(original_device) for t in x) + + handler_collection = [] + profiler = ModelProfiler(custom_ops, mode) + + prev_m = None + for name, m in model.named_modules(): + # dealing with weight mask here + if isinstance(prev_m, PrunerModuleWrapper): + # weight mask is set to weight mask of its parent (wrapper) + weight_mask = prev_m.weight_mask + m.weight_mask = weight_mask + prev_m = m + + if type(m) in profiler.ops: + # if a leaf node + _handler = m.register_forward_hook(functools.partial(profiler.count_module, name=name)) + handler_collection.append(_handler) + + model.eval() + + with torch.no_grad(): + model(*x) + + # restore origin status + model.train(training).to(original_device) + for handler in handler_collection: + handler.remove() + + if verbose: + # get detail information + print(profiler.format_results()) + print(f'FLOPs total: {profiler.sum_flops()}') + print(f'#Params total: {profiler.sum_params()}') + + return profiler.sum_flops(), profiler.sum_params(), profiler.results diff --git a/nni/compression/pytorch/utils/mask_conflict.py b/nni/compression/pytorch/utils/mask_conflict.py new file mode 100644 index 0000000000000000000000000000000000000000..f72d67de22841e398e607b96810cbb106f887d88 --- /dev/null +++ b/nni/compression/pytorch/utils/mask_conflict.py @@ -0,0 +1,398 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import os +import logging +import torch +import numpy as np +from .shape_dependency import ChannelDependency, GroupDependency, InputChannelDependency +from .utils import get_module_by_name +# logging.basicConfig(level = logging.DEBUG) +_logger = logging.getLogger('FixMaskConflict') + + +def fix_mask_conflict(masks, model, dummy_input, traced=None): + """ + MaskConflict fix the mask conflict for the channel dependencies + and group dependency. + + Parameters + ---------- + masks : dict/str + A dict object that stores the masks or the path of the mask file + model : torch.nn.Module + model to fix the mask conflict + dummy_input : torch.Tensor/list of tensors/dict of tensors + input example to trace the model + traced : torch._C.torch.jit.TopLevelTracedModule + the traced model of the target model, is this parameter is not None, + we donnot use the model and dummpy_input to get the trace graph. + """ + if isinstance(masks, str): + # if the input is the path of the mask_file + assert os.path.exists(masks) + masks = torch.load(masks) + assert len(masks) > 0, 'Mask tensor cannot be empty' + # if the user uses the model and dummy_input to trace the model, we + # should get the traced model handly, so that, we only trace the + # model once, GroupMaskConflict and ChannelMaskConflict will reuse + # this traced model. + if traced is None: + assert model is not None and dummy_input is not None + training = model.training + # We need to trace the model in eval mode + model.eval() + kw_args = {} + if torch.__version__ >= '1.6.0': + # only pytorch with version greater than 1.6.0 has the strict option + kw_args['strict'] = False + traced = torch.jit.trace(model, dummy_input, **kw_args) + model.train(training) + + fix_group_mask = GroupMaskConflict(masks, model, dummy_input, traced) + masks = fix_group_mask.fix_mask() + fix_channel_mask = ChannelMaskConflict(masks, model, dummy_input, traced) + masks = fix_channel_mask.fix_mask() + return masks + + +class MaskFix: + def __init__(self, masks, model=None, dummy_input=None, traced=None): + # check if the parameters are valid + parameter_valid = False + if traced is not None: + parameter_valid = True + elif (model is not None) and (dummy_input is not None): + parameter_valid = True + if not parameter_valid: + raise Exception('The input parameters is invalid!') + self.model = model + self.dummy_input = dummy_input + self.traced = traced + self.masks = masks + + def fix_mask(self): + raise NotImplementedError + + def export(self, path): + """ + Export the masks after fixing the conflict to file. + """ + torch.save(self.masks, path) + + +class GroupMaskConflict(MaskFix): + def __init__(self, masks, model, dummy_input, traced=None): + """ + GroupMaskConflict fix the mask conflict between the layers that + has group dependecy with each other. + + Parameters + ---------- + masks : dict + a dict object that stores the masks + model : torch.nn.Module + model to fix the mask conflict + dummy_input : torch.Tensor + input example to trace the model + traced : torch._C.torch.jit.TopLevelTracedModule + the traced model of the target model, is this parameter is not None, + we donnot use the model and dummpy_input to get the trace graph. + """ + super(GroupMaskConflict, self).__init__( + masks, model, dummy_input, traced) + + def fix_mask(self): + """ + Fix the mask conflict before the mask inference for the layers that + has group dependencies. This function should be called before the + mask inference of the 'speedup' module. + """ + group_depen = GroupDependency( + self.model, self.dummy_input, self.traced) + depens = group_depen.dependency + min_groups = group_depen.min_groups + _logger.info(depens) + for layername in depens: + group_max = depens[layername] + group_min = min_groups[layername] + if layername not in self.masks: + # this layer not pruned + continue + w_mask = self.masks[layername]['weight'] + shape = w_mask.size() + count = np.prod(shape[1:]) + all_ones = (w_mask.flatten(1).sum(-1) == count).nonzero().squeeze(1).tolist() + all_zeros = (w_mask.flatten(1).sum(-1) == 0).nonzero().squeeze(1).tolist() + if len(all_ones) + len(all_zeros) < w_mask.size(0): + # In fine-grained pruning, skip this layer + _logger.info('Layers %s using fine-grained pruning', layername) + continue + assert shape[0] % group_max == 0 + # Find the number of masked filter for each group (mini_masked). + # Because we have to keep the pruned filter can still + # be divided into the same number of groups, so we only can + # prune mini_masked filters for each group. + step = shape[0] / group_max + group_masked = [] + for i in range(group_max): + _start = step * i + _end = step * (i + 1) + _tmp_list = list( + filter(lambda x: _start <= x and x < _end, all_zeros)) + group_masked.append(_tmp_list) + mini_masked = min([len(x) for x in group_masked]) + need_unmask = set() + for gm in group_masked: + for i in range(mini_masked, len(gm)): + # To keep the output channel number still being divisible to + # groups, we set the masks of following filters to be zero. + pos = gm[i] + need_unmask.add(pos) + step = shape[0] / group_min + for i in range(group_min): + _start = step * i + _end = step * (i+1) + _tmp_list = list( + filter(lambda x: _start <= x and x < _end, all_zeros)) + if len(_tmp_list) == step: + # if the whole group is removed, then we don't have to unmask for + # the filters in this group + for pos in _tmp_list: + if pos in need_unmask: + need_unmask.remove(pos) + for pos in need_unmask: + self.masks[layername]['weight'][pos] = torch.ones(shape[1:]) + if hasattr(self.masks[layername], 'bias'): + self.masks[layername]['bias'][pos] = 1 + return self.masks + + +class ChannelMaskConflict(MaskFix): + def __init__(self, masks, model, dummy_input, traced=None): + """ + ChannelMaskConflict fix the mask conflict between the layers that + has channel dependecy with each other. + + Parameters + ---------- + masks : dict + a dict object that stores the masks + model : torch.nn.Module + model to fix the mask conflict + dummy_input : torch.Tensor + input example to trace the model + graph : torch._C.torch.jit.TopLevelTracedModule + the traced graph of the target model, is this parameter is not None, + we donnot use the model and dummpy_input to get the trace graph. + """ + super(ChannelMaskConflict, self).__init__( + masks, model, dummy_input, traced) + self.conv_prune_dim = detect_mask_prune_dim(masks, model) + self.channel_prune_type = detect_channel_prune_type(masks, model) + _logger.info('Dectected conv prune dim" %d', self.conv_prune_dim) + + def fix_mask(self): + """ + Fix the mask conflict before the mask inference for the layers that + has shape dependencies. This function should be called before the + mask inference of the 'speedup' module. Only structured pruning masks + are supported. + """ + if self.conv_prune_dim == 0: + channel_depen = ChannelDependency( + self.model, self.dummy_input, self.traced, self.channel_prune_type) + + else: + channel_depen = InputChannelDependency( + self.model, self.dummy_input, self.traced) + depen_sets = channel_depen.dependency_sets + sum_idx = (1, 2, 3) if self.conv_prune_dim == 0 else (0, 2, 3) + + (_tmp_name, _tmp_tensor) = list(self.masks.items())[0] + device = _tmp_tensor['weight'].device + + for dset in depen_sets: + if len(dset) <= 1: + continue + # channel_masks is a list, each element is None or a vector, for example: + # [[0, 1, 1, 0, 0], [0, 0, 1, 1, 0], None], None means no channel + # is pruned. + channel_masks = [] + fine_grained = False + for name in dset: + if name in self.masks: + _, m = get_module_by_name(self.model, name) + assert m is not None + mask = self.masks[name]['weight'] + if type(m).__name__ == 'Conv2d': + channel_mask = (mask.abs().sum(sum_idx) != 0).int() + channel_masks.append(channel_mask) + if (channel_mask.sum() * (mask.numel() / mask.shape[self.conv_prune_dim])).item() != (mask > 0).sum().item(): + fine_grained = True + elif type(m).__name__ == 'Linear': + if self.conv_prune_dim == 1: + channel_masks.append( + (mask.abs().sum(0) != 0).int()) + else: + channel_masks.append( + (mask.abs().sum(1) != 0).int()) + elif type(m).__name__ == 'BatchNorm2d': + channel_masks.append(mask.int()) + elif type(m).__name__ == 'ConvTranspose2d': + # convtranspose have difference memory layout, so that we need create + # a tmp_sum_idx for conv_transpose + tmp_sum_idx = ( + 0, 2, 3) if self.conv_prune_dim == 0 else (1, 2, 3) + channel_mask = (mask.abs().sum(tmp_sum_idx) != 0).int() + channel_masks.append(channel_mask) + if (channel_mask.sum() * (mask.numel() / mask.shape[1 - self.conv_prune_dim])).item() != (mask > 0).sum().item(): + fine_grained = True + else: + raise RuntimeError( + f'unsupported module type: {type(m).__name__}') + else: + # no mask means not pruned, equivlent to full masks + channel_masks.append(None) + if fine_grained: + _logger.info("Fine-grianed mask detected") + if all(x is None for x in channel_masks): + continue + num_channels_list = [len(x) + for x in channel_masks if x is not None] + # number of channels in same set should be identical + assert len(set(num_channels_list)) == 1 + num_channels = num_channels_list[0] + + for i, dim_mask in enumerate(channel_masks): + if dim_mask is None: + channel_masks[i] = torch.ones( + num_channels).int().to(device) + + # merge masks with 'or' + merged_channel_mask = channel_masks[0].clone() + for i in range(1, len(channel_masks)): + merged_channel_mask = ( + (merged_channel_mask + channel_masks[i]) != 0).int() + + merged_index = torch.nonzero(merged_channel_mask, as_tuple=True)[0] + + for name in dset: + if name not in self.masks: + assert all(merged_channel_mask) + continue + orig_mask = self.masks[name]['weight'] + _, m = get_module_by_name(self.model, name) + new_mask = torch.zeros_like(orig_mask) + if type(m).__name__ == 'Conv2d': + if self.conv_prune_dim == 0: + new_mask[merged_index, :, :, :] = 1. + else: + new_mask[:, merged_index, :, :] = 1. + elif type(m).__name__ == 'Linear': + if self.conv_prune_dim == 0: + new_mask[merged_index, :] = 1 + elif self.conv_prune_dim == 1: + new_mask[:, merged_index] = 1. + elif type(m).__name__ == 'BatchNorm2d': + new_mask = merged_channel_mask.type_as(orig_mask) + else: + raise RuntimeError( + f'unsupported module type: {type(m).__name__}') + self.masks[name]['weight'] = new_mask + if 'bias' in self.masks[name] and self.masks[name]['bias'] is not None: + if type(m).__name__ == 'Conv2d': + assert self.conv_prune_dim == 0 + if self.conv_prune_dim == 0: + self.masks[name]['bias'] = merged_channel_mask.type_as( + self.masks[name]['bias']) + + return self.masks + +def detect_channel_prune_type(masks, model): + """ + User can prune a channel through two ways: 1) prune + the corresponding filter of the conv layer(all the + filter related pruner), 2) prune the BN layers that + followed after a conv(Slim pruner). This function find + the pruning type of the masks. + + Parameters + ---------- + masks: dict + A dict object that stores the masks. + model: nn.Module + Model object which the mask can be applied on. + + Returns: + ------- + prune_type: str + Could be Filter or Batchnorm + """ + prune_type = 'Filter' + all_batch_norm = True + for layer_name in masks: + _, m = get_module_by_name(model, layer_name) + if m is None or (not isinstance(m, torch.nn.BatchNorm2d)): + all_batch_norm = False + break + if all_batch_norm: + # if all masks are for batchnorm layers, then the prune_type is BatchNorm + # Note, actually we currently do not support pruning both Conv and BatchNorm + # at the same time. + prune_type = 'Batchnorm' + return prune_type + +def detect_mask_prune_dim(masks, model): + """ + Detect how the masks of convolutional layers are pruned. + + Parameters + ---------- + masks: dict + A dict object that stores the masks. + model: nn.Module + Model object which the mask can be applied on. + Returns: + ------- + How the masks of convolutional layers are pruned, this depends on pruning algorithms, it should + return 1 for masks generated by AMCPruner, and returns 0 for masks generated by the rest + NNI builtin pruners. + 0: filter pruning, prune filters of weights which causes channels of output feature maps are pruned. + 1: channel pruning, prune kernels corresponding to each input channels which causes channels of + input feature maps are pruned. + """ + dim0_preserved, dim1_preserved = 0., 0. + dim0_num, dim1_num = 0., 0. + for module_name in masks: + _, m = get_module_by_name(model, module_name) + if m is None or type(m).__name__ != 'Conv2d': + continue + + mask = masks[module_name]['weight'].clone() + assert (mask >= 0).sum() == mask.numel(), \ + "mask values should be greater than or equal to 0." + mask = (mask > 0).int() + mask = mask.view(mask.shape[0], mask.shape[1], -1) + dim0_mask = (mask.sum((1, 2)) > 0).int() + dim1_mask = (mask.sum((0, 2)) > 0).int() + dim0_preserved += dim0_mask.sum().item() + dim1_preserved += dim1_mask.sum().item() + dim0_num += len(dim0_mask) + dim1_num += len(dim1_mask) + + if dim0_num == 0 or dim1_num == 0: + _logger.warning('no multi-dimension masks found.') + return 0 + + dim0_sparsity, dim1_sparsity = 1. - dim0_preserved / \ + dim0_num, 1. - dim1_preserved / dim1_num + _logger.info('dim0 sparsity: %f', dim0_sparsity) + _logger.info('dim1 sparsity: %f', dim1_sparsity) + + if dim0_sparsity == dim1_sparsity == 0.: + _logger.warning('nothing masked.') + + if dim0_sparsity > 0 and dim1_sparsity > 0: + _logger.warning('both dim0 and dim1 masks found.') + + return 0 if dim0_sparsity >= dim1_sparsity else 1 diff --git a/nni/compression/pytorch/utils/num_param_counter.py b/nni/compression/pytorch/utils/num_param_counter.py new file mode 100644 index 0000000000000000000000000000000000000000..89ad0979943126c45112d8b865d87ec12cdb1961 --- /dev/null +++ b/nni/compression/pytorch/utils/num_param_counter.py @@ -0,0 +1,16 @@ +def get_total_num_weights(model, op_types=['default']): + ''' + calculate the total number of weights + + Returns + ------- + int + total weights of all the op considered + ''' + num_weights = 0 + for _, module in model.named_modules(): + if module == model: + continue + if 'default' in op_types or type(module).__name__ in op_types: + num_weights += module.weight.data.numel() + return num_weights \ No newline at end of file diff --git a/nni/compression/pytorch/utils/sensitivity_analysis.py b/nni/compression/pytorch/utils/sensitivity_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..a36a523feb1e3d7e7959d00ad01c8705af4f9373 --- /dev/null +++ b/nni/compression/pytorch/utils/sensitivity_analysis.py @@ -0,0 +1,249 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import csv +import logging +from collections import OrderedDict + +import numpy as np +import torch.nn as nn + +# FIXME: I don't know where "utils" should be +SUPPORTED_OP_NAME = ['Conv2d', 'Conv1d'] +SUPPORTED_OP_TYPE = [getattr(nn, name) for name in SUPPORTED_OP_NAME] + +logger = logging.getLogger('Sensitivity_Analysis') +logger.setLevel(logging.INFO) + + +class SensitivityAnalysis: + def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop_mode=None, early_stop_value=None): + """ + Perform sensitivity analysis for this model. + Parameters + ---------- + model : torch.nn.Module + the model to perform sensitivity analysis + val_func : function + validation function for the model. Due to + different models may need different dataset/criterion + , therefore the user need to cover this part by themselves. + In the val_func, the model should be tested on the validation dateset, + and the validation accuracy/loss should be returned as the output of val_func. + There are no restrictions on the input parameters of the val_function. + User can use the val_args, val_kwargs parameters in analysis + to pass all the parameters that val_func needed. + sparsities : list + The sparsity list provided by users. This parameter is set when the user + only wants to test some specific sparsities. In the sparsity list, each element + is a sparsity value which means how much weight the pruner should prune. Take + [0.25, 0.5, 0.75] for an example, the SensitivityAnalysis will prune 25% 50% 75% + weights gradually for each layer. + prune_type : str + The pruner type used to prune the conv layers, default is 'l1', + and 'l2', 'fine-grained' is also supported. + early_stop_mode : str + If this flag is set, the sensitivity analysis + for a conv layer will early stop when the validation metric( + for example, accurracy/loss) has alreay meet the threshold. We + support four different early stop modes: minimize, maximize, dropped, + raised. The default value is None, which means the analysis won't stop + until all given sparsities are tested. This option should be used with + early_stop_value together. + + minimize: The analysis stops when the validation metric return by the val_func + lower than early_stop_value. + maximize: The analysis stops when the validation metric return by the val_func + larger than early_stop_value. + dropped: The analysis stops when the validation metric has dropped by early_stop_value. + raised: The analysis stops when the validation metric has raised by early_stop_value. + early_stop_value : float + This value is used as the threshold for different earlystop modes. + This value is effective only when the early_stop_mode is set. + + """ + from nni.algorithms.compression.pytorch.pruning.constants_pruner import PRUNER_DICT + + self.model = model + self.val_func = val_func + self.target_layer = OrderedDict() + self.ori_state_dict = copy.deepcopy(self.model.state_dict()) + self.target_layer = {} + self.sensitivities = {} + if sparsities is not None: + self.sparsities = sorted(sparsities) + else: + self.sparsities = np.arange(0.1, 1.0, 0.1) + self.sparsities = [np.round(x, 2) for x in self.sparsities] + self.Pruner = PRUNER_DICT[prune_type] + self.early_stop_mode = early_stop_mode + self.early_stop_value = early_stop_value + self.ori_metric = None # original validation metric for the model + # already_pruned is for the iterative sensitivity analysis + # For example, sensitivity_pruner iteratively prune the target + # model according to the sensitivity. After each round of + # pruning, the sensitivity_pruner will test the new sensitivity + # for each layer + self.already_pruned = {} + self.model_parse() + + @property + def layers_count(self): + return len(self.target_layer) + + def model_parse(self): + for name, submodel in self.model.named_modules(): + for op_type in SUPPORTED_OP_TYPE: + if isinstance(submodel, op_type): + self.target_layer[name] = submodel + self.already_pruned[name] = 0 + + def _need_to_stop(self, ori_metric, cur_metric): + """ + Judge if meet the stop conditon(early_stop, min_threshold, + max_threshold). + Parameters + ---------- + ori_metric : float + original validation metric + cur_metric : float + current validation metric + + Returns + ------- + stop : bool + if stop the sensitivity analysis + """ + if self.early_stop_mode is None: + # early stop mode is not enable + return False + assert self.early_stop_value is not None + if self.early_stop_mode == 'minimize': + if cur_metric < self.early_stop_value: + return True + elif self.early_stop_mode == 'maximize': + if cur_metric > self.early_stop_value: + return True + elif self.early_stop_mode == 'dropped': + if cur_metric < ori_metric - self.early_stop_value: + return True + elif self.early_stop_mode == 'raised': + if cur_metric > ori_metric + self.early_stop_value: + return True + return False + + def analysis(self, val_args=None, val_kwargs=None, specified_layers=None): + """ + This function analyze the sensitivity to pruning for + each conv layer in the target model. + If start and end are not set, we analyze all the conv + layers by default. Users can specify several layers to + analyze or parallelize the analysis process easily through + the start and end parameter. + + Parameters + ---------- + val_args : list + args for the val_function + val_kwargs : dict + kwargs for the val_funtion + specified_layers : list + list of layer names to analyze sensitivity. + If this variable is set, then only analyze + the conv layers that specified in the list. + User can also use this option to parallelize + the sensitivity analysis easily. + Returns + ------- + sensitivities : dict + dict object that stores the trajectory of the + accuracy/loss when the prune ratio changes + """ + if val_args is None: + val_args = [] + if val_kwargs is None: + val_kwargs = {} + # Get the original validation metric(accuracy/loss) before pruning + # Get the accuracy baseline before starting the analysis. + self.ori_metric = self.val_func(*val_args, **val_kwargs) + namelist = list(self.target_layer.keys()) + if specified_layers is not None: + # only analyze several specified conv layers + namelist = list(filter(lambda x: x in specified_layers, namelist)) + for name in namelist: + self.sensitivities[name] = {} + for sparsity in self.sparsities: + # here the sparsity is the relative sparsity of the + # the remained weights + # Calculate the actual prune ratio based on the already pruned ratio + real_sparsity = ( + 1.0 - self.already_pruned[name]) * sparsity + self.already_pruned[name] + # TODO In current L1/L2 Filter Pruner, the 'op_types' is still necessary + # I think the L1/L2 Pruner should specify the op_types automaticlly + # according to the op_names + cfg = [{'sparsity': real_sparsity, 'op_names': [ + name], 'op_types': ['Conv2d']}] + pruner = self.Pruner(self.model, cfg) + pruner.compress() + val_metric = self.val_func(*val_args, **val_kwargs) + logger.info('Layer: %s Sparsity: %.2f Validation Metric: %.4f', + name, real_sparsity, val_metric) + + self.sensitivities[name][sparsity] = val_metric + pruner._unwrap_model() + del pruner + # check if the current metric meet the stop condition + if self._need_to_stop(self.ori_metric, val_metric): + break + + # reset the weights pruned by the pruner, because the + # input sparsities is sorted, so we donnot need to reset + # weight of the layer when the sparsity changes, instead, + # we only need reset the weight when the pruning layer changes. + self.model.load_state_dict(self.ori_state_dict) + + return self.sensitivities + + def export(self, filepath): + """ + Export the results of the sensitivity analysis + to a csv file. The firstline of the csv file describe the content + structure. The first line is constructed by 'layername' and sparsity + list. Each line below records the validation metric returned by val_func + when this layer is under different sparsities. Note that, due to the early_stop + option, some layers may not have the metrics under all sparsities. + + layername, 0.25, 0.5, 0.75 + conv1, 0.6, 0.55 + conv2, 0.61, 0.57, 0.56 + + Parameters + ---------- + filepath : str + Path of the output file + """ + str_sparsities = [str(x) for x in self.sparsities] + header = ['layername'] + str_sparsities + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf) + csv_w.writerow(header) + for layername in self.sensitivities: + row = [] + row.append(layername) + for sparsity in sorted(self.sensitivities[layername].keys()): + row.append(self.sensitivities[layername][sparsity]) + csv_w.writerow(row) + + def update_already_pruned(self, layername, ratio): + """ + Set the already pruned ratio for the target layer. + """ + self.already_pruned[layername] = ratio + + def load_state_dict(self, state_dict): + """ + Update the weight of the model + """ + self.ori_state_dict = copy.deepcopy(state_dict) + self.model.load_state_dict(self.ori_state_dict) diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..f972212a5aaa63394bcc759cf60a93e2b6ab40fd --- /dev/null +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -0,0 +1,724 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import csv +import logging +import torch +import numpy as np +from nni.compression.pytorch.compressor import PrunerModuleWrapper +from nni.algorithms.compression.v2.pytorch.base import PrunerModuleWrapper as PrunerModuleWrapper_v2 +from .utils import get_module_by_name + + +__all__ = ['ChannelDependency', 'GroupDependency', + 'InputChannelDependency', 'AttentionWeightDependency'] + + +CONV_TYPE = 'aten::_convolution' +ADD_TYPES = ['aten::add', 'aten::add_'] +MUL_TYPES = ['aten::mul', 'atem::mul_'] +CAT_TYPE = 'aten::cat' +logger = logging.getLogger('Shape_Dependency') +RESHAPE_OPS = [CAT_TYPE, 'aten::view', + 'aten::reshape', 'aten::flatten', 'aten::mean'] + + +def lcm_list(L): + lcm = 1 + for i in L: + lcm = np.lcm(lcm, i) + return lcm + + +def gcd_list(L): + gcd = L[0] + for i in L: + gcd = np.gcd(gcd, i) + return gcd + + +class Dependency: + def __init__(self, model=None, dummy_input=None, traced_model=None): + """ + Build the graph for the model. + """ + from nni.common.graph_utils import TorchModuleGraph + + # check if the input is legal + if traced_model is None: + # user should provide model & dummy_input to trace + # the model or a already traced model + assert model is not None and dummy_input is not None + self.graph = TorchModuleGraph(model, dummy_input, traced_model) + self.model = model + self.dependency = dict() + self.build_dependency() + + def build_dependency(self): + raise NotImplementedError + + def export(self, filepath): + raise NotImplementedError + + +def reshape_break_channel_dependency(op_node): + """ + The reshape operations such as (reshape, view, flatten) may break + the channel dependency. We need to check the input parameters of + these reshape operations to check if this reshape node will break + the channel dependency. However, it's complicated to analyze the the input + parameters for each reshape function and infer if it will break the channel + dependency. So currently, we just check if the input channel and the output + channel is the same, if so, then we can say the original reshape function + doesn't want to change the number of the channels, which means the channel + dependency is not broken. In contrast, the original reshap operation wants + to change the number of channels, so it breaks the channel dependency. + + Parameters + ---------- + opnode: NodePyOP + A Op node of the graph. + Returns + ------- + bool + If this operation will break the channel dependency. + """ + in_shape = op_node.auxiliary['in_shape'] + out_shape = op_node.auxiliary['out_shape'] + in_channel = in_shape[1] + out_channel = out_shape[1] + return in_channel != out_channel + + +class ChannelDependency(Dependency): + def __init__(self, model, dummy_input, traced_model=None, prune_type='Filter'): + """ + This model analyze the channel dependencies between the conv + layers in a model. + Parameters + ---------- + model : torch.nn.Module + The model to be analyzed. + data : torch.Tensor + The example input data to trace the network architecture. + traced_model : torch._C.Graph + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. + prune_type: str + This parameter indicates the channel pruning type: 1) `Filter` + prune the filter of the convolution layer to prune the corresponding + channels 2) `Batchnorm`: prune the channel in the batchnorm layer + """ + self.prune_type = prune_type + self.target_types = [] + if self.prune_type == 'Filter': + self.target_types.extend(['Conv2d', 'Linear', 'ConvTranspose2d']) + elif self.prune_type == 'Batchnorm': + self.target_types.append('BatchNorm2d') + + super(ChannelDependency, self).__init__( + model, dummy_input, traced_model) + + def _get_parent_layers(self, node): + """ + Find the nearest father conv layers for the target node. + Parameters + --------- + node : torch._C.Node + target node. + Returns + ------- + parent_layers: list + nearest father conv/linear layers for the target worknode. + """ + + parent_layers = [] + queue = [] + queue.append(node) + while queue: + curnode = queue.pop(0) + if curnode.op_type in self.target_types: + # find the first met conv + parent_layers.append(curnode.name) + continue + elif curnode.op_type in RESHAPE_OPS: + if reshape_break_channel_dependency(curnode): + continue + parents = self.graph.find_predecessors(curnode.unique_name) + parents = [self.graph.name_to_node[name] for name in parents] + for parent in parents: + queue.append(parent) + + return parent_layers + + def build_dependency(self): + """ + Build the channel dependency for the conv layers + in the model. + """ + # unpack the tuple/list manually before analyze the + # channel dependency + self.graph.unpack_manually() + for node in self.graph.nodes_py.nodes_op: + parent_layers = [] + # find the node that contains aten::add + # or aten::cat operations + if node.op_type in ADD_TYPES: + parent_layers = self._get_parent_layers(node) + elif node.op_type == CAT_TYPE: + # To determine if this cat operation will introduce channel + # dependency, we need the specific input parameters of the cat + # opertion. To get the input parameters of the cat opertion, we + # need to traverse all the cpp_nodes included by this NodePyGroup, + # because, TorchModuleGraph merges the important nodes and the adjacent + # unimportant nodes (nodes started with prim::attr, for example) into a + # NodepyGroup. + cat_dim = None + for cnode in node.node_cpps: + if cnode.kind() == CAT_TYPE: + cat_dim = list(cnode.inputs())[1].toIValue() + break + if cat_dim != 1: + parent_layers = self._get_parent_layers(node) + dependency_set = set(parent_layers) + # merge the dependencies + for parent in parent_layers: + if parent in self.dependency: + dependency_set.update(self.dependency[parent]) + # save the dependencies + for _node in dependency_set: + self.dependency[_node] = dependency_set + + def export(self, filepath): + """ + export the channel dependencies as a csv file. + The layers at the same line have output channel + dependencies with each other. For example, + layer1.1.conv2, conv1, and layer1.0.conv2 have + output channel dependencies with each other, which + means the output channel(filters) numbers of these + three layers should be same with each other, otherwise + the model may has shape conflict. + Output example: + Dependency Set,Convolutional Layers + Set 1,layer1.1.conv2,layer1.0.conv2,conv1 + Set 2,layer1.0.conv1 + Set 3,layer1.1.conv1 + """ + header = ['Dependency Set', 'Layers'] + setid = 0 + visited = set() + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf, delimiter=',') + csv_w.writerow(header) + for node in self.graph.nodes_py.nodes_op: + if node.op_type not in self.target_types or node in visited: + continue + setid += 1 + row = ['Set %d' % setid] + if node.name not in self.dependency: + visited.add(node) + row.append(node.name) + else: + for other in self.dependency[node.name]: + visited.add(self.graph.name_to_node[other]) + row.append(other) + csv_w.writerow(row) + + @property + def dependency_sets(self): + """ + Get the list of the dependency set. + + Returns + ------- + dependency_sets : list + list of the dependency sets. For example, + [set(['conv1', 'conv2']), set(['conv3', 'conv4'])] + """ + d_sets = [] + visited = set() + for node in self.graph.nodes_py.nodes_op: + if node.op_type not in self.target_types or node in visited: + continue + tmp_set = set() + if node.name not in self.dependency: + visited.add(node) + tmp_set.add(node.name) + else: + for other in self.dependency[node.name]: + visited.add(self.graph.name_to_node[other]) + tmp_set.add(other) + d_sets.append(tmp_set) + return d_sets + + +class InputChannelDependency(ChannelDependency): + """ + Some pruners may prune the input channel of the convolutional + layers. While pruning the input channel of the convolutional layers, + the layers that share the same input tensor should prune the same + channels, and we say these layers that share the same input tensor/channel + has the input channel dependency. If we only prune the input channel of one + layer in the dependency set, there will be a shape conflict for the other + layers in the same dependency set, which may trigger a runtime error. + Here we judge whether the application will truncate the dependency by analyzing + whether the number of channels before and after the operation has changed. + If not, the input channel dependency will be passed to the following nodes. + """ + + def __init__(self, model, dummy_input, traced_model=None): + """ + This model analyze the input channel dependencies between the conv + layers in a model. + Parameters + ---------- + model : torch.nn.Module + The model to be analyzed. + data : torch.Tensor + The example input data to trace the network architecture. + traced_model : torch._C.Graph + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. + """ + super(InputChannelDependency, self).__init__( + model, dummy_input, traced_model) + + def _get_following_convs(self, tensor): + queue = [] + key_layers = [] + queue.extend(self.graph.input_to_node[tensor]) + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Conv2d' or curnode.op_type == 'Linear' or curnode.op_type == 'ConvTranspose2d': + # find the first met conv + key_layers.append(curnode.name) + continue + elif curnode.op_type in RESHAPE_OPS: + # check if the reshape operation will break the channel dependency + if reshape_break_channel_dependency(curnode): + # reshape operations also breaks the dependency relationship + continue + successors = self.graph.find_successors(curnode.unique_name) + successors = [self.graph.name_to_node[name] for name in successors] + for layer in successors: + queue.append(layer) + return key_layers + + def build_dependency(self): + """ + Build the input channel dependencies. + The `InputChannelDependency` indicates the layers that have + dependencies when pruning the input channel of the conv layers. + In contrast, `ChannelDependency` indicates the dependent layers + when pruning the output channles of conv layers (for example, L1FilterPruner). + """ + # unpack the tuple or list manually + self.graph.unpack_manually() + for tensor in self.graph.input_to_node: + # start from this tensor, find all the conv layers that + # take this tensor as input. Similar to the `ChannelDependency` + # the conv layer will truncate the dependencies + layers = self._get_following_convs(tensor) + dependency_set = set(layers) + for layer in layers: + if layer in self.dependency: + dependency_set.update(self.dependency[layer]) + for layer in dependency_set: + self.dependency[layer] = dependency_set + + +class GroupDependency(Dependency): + def __init__(self, model, dummy_input, traced_model=None): + """ + This model analyze the group dependencis between the conv + layers in a model. + Parameters + ---------- + model : torch.nn.Module + The model to be analyzed. + data : torch.Tensor + The example input data to trace the network architecture. + traced_model : torch._C.Graph + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. + """ + self.min_groups = {} + super(GroupDependency, self).__init__(model, dummy_input, traced_model) + + def _get_parent_convs(self, node): + """ + Find the nearest father conv layers for the target node. + Parameters + --------- + node : torch._C.Node + target node. + Returns + ------- + parent_layers : list + nearest father conv layers for the target node. Due to the group + dependency only exists between the conv layers, so we only find + the parent conv layers. + """ + parent_layers = [] + # the input node is a Conv node + predeessors = self.graph.find_predecessors(node.unique_name) + predeessors = [self.graph.name_to_node[x] for x in predeessors] + queue = predeessors + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Conv2d' or curnode.op_type == 'ConvTranspose2d': + # find the first met conv + parent_layers.append(curnode.name) + continue + parents = self.graph.find_predecessors(curnode.unique_name) + parents = [self.graph.name_to_node[name] for name in parents] + for parent in parents: + queue.append(parent) + return parent_layers + + def _get_conv_groups(self, node_group): + """ + Get the number of groups for a convolutional layer. + Parameters + ---------- + node_group : NodePyGroup + target node. + Returns + ------- + group : int + the number of the groups of the target conv layer. + """ + node_name = node_group.name + _, leaf_module = get_module_by_name(self.model, node_name) + if isinstance(leaf_module, (PrunerModuleWrapper, PrunerModuleWrapper_v2)): + leaf_module = leaf_module.module + assert isinstance( + leaf_module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)) + group = leaf_module.groups + n_filter = leaf_module.out_channels + if n_filter == group: + # depthwise conv will not introduce extra group dependency + return 1 + return group + + def build_dependency(self): + """ + Build the channel dependency for the conv layers + in the model. This function return the group number + of each conv layers. Note that, here, the group count + of conv layers may be larger than their originl groups. + This is because that the input channel will also be grouped + for the group conv layers. To make this clear, assume we + have two group conv layers: conv1(group=2), conv2(group=4). + conv2 takes the output features of conv1 as input. + Then we have to the filters of conv1 can still be + divided into 4 groups after filter pruning, because + the input channels of conv2 should be divided into + 4 groups. + + Returns + ------- + self.dependency : dict + key: the name of conv layers, value: the minimum value that the number of + filters should be divisible to. + """ + self.groups = {} + for node in self.graph.nodes_py.nodes_op: + if node.op_type == 'Conv2d' or node.op_type == 'ConvTranspose2d': + group = self._get_conv_groups(node) + if node.name in self.groups: + # the conv layer whose group is larger than 1 will require that + # it's number of output channel to be divisible by the number of group. + self.groups[node.name].append(group) + else: + self.groups[node.name] = [group] + if group > 1: + # for the conv layer whose group is larger than 1, it will require the number + # of output channels of their parent conv layer to be divisible by group. + parent_convs = self._get_parent_convs(node) + for parent in parent_convs: + if parent in self.groups: + self.groups[parent].append(group) + else: + self.groups[parent] = [group] + + for name in self.groups: + self.dependency[name] = lcm_list(self.groups[name]) + if min(self.groups[name]) == gcd_list(self.groups[name]): + self.min_groups[name] = min(self.groups[name]) + else: + self.min_groups[name] = 1 + + return self.dependency + + def export(self, filepath): + """ + export the group dependency to a csv file. + Each line describes a convolution layer, the + first part of each line is the Pytorch module + name of the conv layer. The second part of each + line is the group count of the filters in this layer. + Note that, the group count may be larger than this + layers original group number. + output example: + Conv layer, Groups + Conv1, 1 + Conv2, 2 + Conv3, 4 + """ + header = ['Conv Layer Name', 'Group'] + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf, delimiter=',') + csv_w.writerow(header) + for name in self.dependency: + group = self.dependency[name] + csv_w.writerow([name, group]) + + @property + def dependency_sets(self): + return self.dependency + + +class ReshapeDependency(Dependency): + def __init__(self, model=None, dummy_input=None, traced_model=None): + """ + Some model may have the view/reshape functions, such functions may have fixed parameters + and cannot be replaced at all. Therefore, these functions may have some constraints on + their input shapes. In this class, we find the direct input conv/linear layers of these + reshape functions. If you get the shape conflict when run the forward inference on the + speeduped model, please try remove these layers from the pruner config list and try again. + + Parameters + ---------- + model : torch.nn.Module + The model to be analyzed. + data : torch.Tensor + The example input data to trace the network architecture. + traced_model : torch._C.Graph + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. + """ + super(ReshapeDependency, self).__init__( + model, dummy_input, traced_model) + + def _get_parent_layers(self, node): + """ + Find the nearest father conv layers for the target node. + + Parameters + --------- + node : torch._C.Node + target node. + + Returns + ------- + parent_layers: list + nearest father conv/linear layers for the target worknode. + """ + parent_layers = [] + queue = [] + queue.append(node) + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Conv2d' or curnode.op_type == 'Linear' or curnode.op_type == 'ConvTranspose2d': + # find the first met conv + parent_layers.append(curnode.name) + continue + parents = self.graph.find_predecessors(curnode.unique_name) + parents = [self.graph.name_to_node[name] for name in parents] + for parent in parents: + queue.append(parent) + return parent_layers + + def build_dependency(self): + """ + Build the channel dependency for the conv layers + in the model. + """ + # unpack the tuple/list manually before analyze the + # channel dependency + self.graph.unpack_manually() + for node in self.graph.nodes_py.nodes_op: + parent_layers = [] + # find the node that contains aten::add + # or aten::cat operations + if node.op_type in ['aten::view', 'aten::reshape']: + logger.info('Detect reshape-like functions: %s', node.op_type) + parent_layers = self._get_parent_layers(node) + print('Parent layers', parent_layers) + self.dependency[node.unique_name] = parent_layers + + def export(self, filepath): + """ + export the reshape dependencies as a csv file. + + Output example: + Reshape OP, Dependent Layers + model.view.1,layer1.1.conv2,layer1.0.conv2,conv1 + model.mean.1,layer1.0.conv1 + model.reshape.1,layer1.1.conv1 + """ + header = ['Reshape OP', 'Dependent Layers'] + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf, delimiter=',') + csv_w.writerow(header) + for reshape_op in self.dependency: + row = [reshape_op].extend(self.dependency[reshape_op]) + csv_w.writerow(row) + + @property + def dependency_sets(self): + """ + Get the list of the dependency set. + + Returns + ------- + dependency_sets : list + list of the dependency sets. For example, + [set(['conv1', 'conv2']), set(['conv3', 'conv4'])] + + """ + d_sets = [] + for reshape_node in self.dependency: + d_sets.extend(self.dependency[reshape_node]) + d_sets = list(set(d_sets)) + return d_sets + + +class AttentionWeightDependency(Dependency): + def __init__(self, model=None, dummy_input=None, traced_model=None): + """ + Groups the linear layers belonging to the same attention layer in a model. + Currently, we only capture weights in attention layers with forward computations written + as four Linear layers (projections for Q, K, V, and output) and two matmul operations. + The method implemented here can work for Huggingface transformers but may not correctly + capture transformers written in other fashions (e.g., torch.nn.Transformer). + + Parameters + ---------- + model : torch.nn.Module + The model to be analyzed. + dummy_input : torch.Tensor + The example input data to trace the network architecture. + traced_model : torch._C.Graph + if we already have the traced graph of the target model, we do not + need to trace the model again. + """ + super(AttentionWeightDependency, self).__init__( + model, dummy_input, traced_model) + + def _get_parent_layers(self, node): + """ + Find the nearest parent linear layers for the target node. + + Parameters + --------- + node : torch._C.Node + target node. + + Returns + ------- + parent_layers: list + nearest parent linear layers for the target worknode. + """ + parent_layers = [] + queue = [] + queue.append(node) + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Linear': + if curnode.name not in parent_layers: + parent_layers.append(curnode.name) + continue + if curnode.op_type == 'LayerNorm': + continue + parents = self.graph.find_predecessors(curnode.unique_name) + parents = [self.graph.name_to_node[name] for name in parents] + for parent in parents: + queue.append(parent) + return parent_layers + + def _get_children_layers(self, node): + """ + Find the nearest children linear layers for the target node. + + Parameters + --------- + node : torch._C.Node + target node. + + Returns + ------- + children_layers: list + nearest children linear layers for the target worknode. + """ + children_layers = [] + queue = [] + queue.append(node) + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Linear': + if curnode.name not in children_layers: + children_layers.append(curnode.name) + continue + if curnode.op_type == 'LayerNorm': + continue + children = self.graph.find_successors(curnode.unique_name) + children = [self.graph.name_to_node[name] for name in children] + for child in children: + queue.append(child) + return children_layers + + def build_dependency(self): + """ + For every matmul operation, find the immediate parent and children Linear operations. + If we get three parents and one children, add these four weights as a dependecy group. + """ + self.graph.unpack_manually() + for node in self.graph.nodes_py.nodes_op: + layers = [] + if node.op_type == 'aten::matmul': + parent_layers = self._get_parent_layers(node) + children_layers = self._get_children_layers(node) + if len(parent_layers) == 3 and len(children_layers) == 1: + layers.extend(parent_layers) + layers.extend(children_layers) + + self.dependency[node.name] = layers + + @property + def dependency_sets(self): + """ + Get the list of the dependency set. + + Returns + ------- + dependency_sets : list + list of the dependency sets. + Each dependency set is a 4-element list of module names, with the first three elements being the projection + matrices for Q, K, V (in any order), and the last element being the dense matrix. + """ + d_sets = [] + for node in self.graph.nodes_py.nodes_op: + if node.op_type != 'aten::matmul' or node.name not in self.dependency or len(self.dependency[node.name]) != 4: + continue + d_sets.append(self.dependency[node.name]) + + return d_sets + + def export(self, filepath): + """ + Export the group dependency to a csv file. Each line describes an attention layer. + + Output example: + Attention layer matmul op, Group + """ + header = ['Attention layer matmul op', 'Group'] + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf, delimiter=',') + csv_w.writerow(header) + for name in self.dependency: + group = self.dependency[name] + if len(group) > 0: + csv_w.writerow([name, group]) diff --git a/nni/compression/pytorch/utils/utils.py b/nni/compression/pytorch/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e23d9b28e49a399374602a48996fada92e5a3a4f --- /dev/null +++ b/nni/compression/pytorch/utils/utils.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import torch + + +torch_float_dtype = [torch.float, torch.float16, torch.float32, torch.float64, torch.half, torch.double] +torch_integer_dtype = [torch.uint8, torch.int16, torch.short, torch.int32, torch.long, torch.bool] + +def get_module_by_name(model, module_name): + """ + Get a module specified by its module name + + Parameters + ---------- + model : pytorch model + the pytorch model from which to get its module + module_name : str + the name of the required module + + Returns + ------- + module, module + the parent module of the required module, the required module + """ + name_list = module_name.split(".") + for name in name_list[:-1]: + if hasattr(model, name): + model = getattr(model, name) + else: + return None, None + if hasattr(model, name_list[-1]): + leaf_module = getattr(model, name_list[-1]) + return model, leaf_module + else: + return None, None + + +def rand_like_with_shape(shape, ori_t): + """ + Return a new random tensor like the original + tensor. + """ + assert isinstance(ori_t, torch.Tensor) + device = ori_t.device + dtype = ori_t.dtype + require_grad = ori_t.requires_grad + lower_bound = torch.min(ori_t) + higher_bound = torch.max(ori_t) + if dtype in [torch.uint8, torch.int16, torch.short, torch.int16, torch.long, torch.bool]: + return torch.randint(lower_bound, higher_bound+1, shape, dtype=dtype, device=device) + else: + return torch.rand(shape, dtype=dtype, device=device, requires_grad=require_grad) + +def randomize_tensor(tensor, start=1, end=100): + """ + Randomize the target tensor according to the given + range. + """ + assert isinstance(tensor, torch.Tensor) + if tensor.dtype in torch_integer_dtype: + # integer tensor can only be randomized by the torch.randint + # torch.randint(int(start), int(end), tensor.size(), out=tensor.data, dtype=tensor.dtype) + pass + else: + # we can use nn.init.uniform_ to randomize this tensor + # Note: the tensor that with integer type cannot be randomize + # with nn.init.uniform_ + torch.nn.init.uniform_(tensor.data, start, end) + diff --git a/nni/compression/tensorflow/__init__.py b/nni/compression/tensorflow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d05fade2f1140c68bb78969f89aaa8529414f3ca --- /dev/null +++ b/nni/compression/tensorflow/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .compressor import Compressor, Pruner diff --git a/nni/compression/tensorflow/compressor.py b/nni/compression/tensorflow/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..bb249807e77c8ad14423dad8c6d09321b64f9e21 --- /dev/null +++ b/nni/compression/tensorflow/compressor.py @@ -0,0 +1,338 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Abstract base classes for TensorFlow model compression. +""" + +import logging + +import tensorflow as tf +assert tf.__version__.startswith('2'), 'NNI model compression only supports TensorFlow v2.x' + +from . import default_layers + +_logger = logging.getLogger(__name__) + + +class Compressor: + """ + Common base class for all compressors. + + This class is designed for other base classes. + Algorithms should inherit ``Pruner`` or ``Quantizer`` instead. + + Attributes + ---------- + compressed_model : tf.keras.Model + Compressed user model. + wrappers : list of tf.keras.Model + A wrapper is an instrumented TF ``Layer``, in ``Model`` format. + + Parameters + ---------- + model : tf.keras.Model + The user model to be compressed. + config_list : list of JSON object + User configuration. The format is detailed in tutorial. + LayerWrapperClass : a class derive from Model + The class used to instrument layers. + """ + + def __init__(self, model, config_list, LayerWrapperClass): + assert isinstance(model, tf.keras.Model) + self.validate_config(model, config_list) + + self._original_model = model + self._config_list = config_list + self._wrapper_class = LayerWrapperClass + self._wrappers = {} # key: id(layer) , value: Wrapper(layer) + + self.compressed_model = self._instrument(model) + self.wrappers = list(self._wrappers.values()) + + if not self.wrappers: + _logger.warning('Nothing is configured to compress, please check your model and config list') + + def set_wrappers_attribute(self, name, value): + """ + Call ``setattr`` on all wrappers. + """ + for wrapper in self.wrappers: + setattr(wrapper, name, value) + + def validate_config(self, model, config_list): + """ + Compression algorithm should overload this function to validate configuration. + """ + pass + + + def _instrument(self, layer): + if isinstance(layer, tf.keras.Sequential): + return self._instrument_sequential(layer) + if isinstance(layer, tf.keras.Model): + return self._instrument_model(layer) + + # a layer can be referenced in multiple attributes of a model, + # but should only be instrumented once + if id(layer) in self._wrappers: + return self._wrappers[id(layer)] + + config = self._select_config(layer) + if config is not None: + wrapper = self._wrapper_class(layer, config, self) + self._wrappers[id(layer)] = wrapper + return wrapper + + return layer + + def _uninstrument(self, layer): + # note that ``self._wrappers`` cache is not cleared here, + # so the same wrapper objects will be recovered in next ``self._instrument()`` call + if isinstance(layer, LayerWrapper): + layer._instrumented = False + return self._uninstrument(layer.layer) + if isinstance(layer, tf.keras.Sequential): + return self._uninstrument_sequential(layer) + if isinstance(layer, tf.keras.Model): + return self._uninstrument_model(layer) + return layer + + def _instrument_sequential(self, seq): + layers = list(seq.layers) # seq.layers is read-only property + need_rebuild = False + for i, layer in enumerate(layers): + new_layer = self._instrument(layer) + if new_layer is not layer: + layers[i] = new_layer + need_rebuild = True + return tf.keras.Sequential(layers) if need_rebuild else seq + + def _uninstrument_sequential(self, seq): + layers = list(seq.layers) + rebuilt = False + for i, layer in enumerate(layers): + orig_layer = self._uninstrument(layer) + if orig_layer is not layer: + layers[i] = orig_layer + rebuilt = True + return tf.keras.Sequential(layers) if rebuilt else seq + + def _instrument_model(self, model): + for key, value in list(model.__dict__.items()): # avoid "dictionary keys changed during iteration" + if isinstance(value, tf.keras.layers.Layer): + new_layer = self._instrument(value) + if new_layer is not value: + setattr(model, key, new_layer) + elif isinstance(value, list): + for i, item in enumerate(value): + if isinstance(item, tf.keras.layers.Layer): + value[i] = self._instrument(item) + return model + + def _uninstrument_model(self, model): + for key, value in list(model.__dict__.items()): + if isinstance(value, tf.keras.layers.Layer): + orig_layer = self._uninstrument(value) + if orig_layer is not value: + setattr(model, key, orig_layer) + elif isinstance(value, list): + for i, item in enumerate(value): + if isinstance(item, tf.keras.layers.Layer): + value[i] = self._uninstrument(item) + return model + + def _select_config(self, layer): + # Find the last matching config block for given layer. + # Returns None if the layer should not be compressed. + layer_type = type(layer).__name__ + last_match = None + for config in self._config_list: + if 'op_types' in config: + match = layer_type in config['op_types'] + match_default = 'default' in config['op_types'] and layer_type in default_layers.weighted_modules + if not match and not match_default: + continue + if 'op_names' in config and layer.name not in config['op_names']: + continue + last_match = config + if last_match is None or 'exclude' in last_match: + return None + return last_match + + +class LayerWrapper(tf.keras.Model): + """ + Abstract base class of layer wrappers. + + Concrete layer wrapper classes must inherit this to support ``isinstance`` check. + """ + def __init__(self): + super().__init__() + self._instrumented = True + + +class Pruner(Compressor): + """ + Base class for pruning algorithms. + + End users should use ``compress`` and callback APIs (WIP) to prune their models. + + The underlying model is instrumented upon initialization of pruner object. + So if you want to pre-train the model, train it before creating pruner object. + + The compressed model can only execute in eager mode. + + Algorithm developers should override ``calc_masks`` method to specify pruning strategy. + + Parameters + ---------- + model : tf.keras.Model + The user model to prune. + config_list : list of JSON object + User configuration. The format is detailed in tutorial. + """ + def __init__(self, model, config_list): + super().__init__(model, config_list, PrunerLayerWrapper) + #self.callback = PrunerCallback(self) + + def compress(self): + """ + Apply compression on a pre-trained model. + + If you want to prune the model during training, use callback API (WIP) instead. + + Returns + ------- + tf.keras.Model + The compressed model. + """ + self._update_mask() + return self.compressed_model + + def export_model(self, model_path, mask_path=None): + """ + Export pruned model and optionally mask tensors. + + Parameters + ---------- + model_path : path-like + The path passed to ``Model.save()``. + You can use ".h5" extension name to export HDF5 format. + mask_path : path-like or None + Export masks to the path when set. + Because Keras cannot save tensors without a ``Model``, + this will create a model, set all masks as its weights, and then save that model. + Masks in saved model will be named by corresponding layer name in compressed model. + + Returns + ------- + None + """ + _logger.info('Saving model to %s', model_path) + input_shape = self.compressed_model._build_input_shape # cannot find a public API + model = self._uninstrument(self.compressed_model) + if input_shape: + model.build(input_shape) + model.save(model_path) + self._instrument(model) + + if mask_path is not None: + _logger.info('Saving masks to %s', mask_path) + # can't find "save raw weights" API in tensorflow, so build a simple model + mask_model = tf.keras.Model() + for wrapper in self.wrappers: + setattr(mask_model, wrapper.layer.name, wrapper.masks) + mask_model.save_weights(mask_path) + + _logger.info('Done') + + def calc_masks(self, wrapper, **kwargs): + """ + Abstract method to be overridden by algorithm. End users should ignore it. + + If the callback is set up, this method will be invoked at end of each training minibatch. + If not, it will only be called when end user invokes ``compress``. + + Parameters + ---------- + wrapper : PrunerLayerWrapper + The instrumented layer. + **kwargs + Reserved for forward compatibility. + + Returns + ------- + dict of (str, tf.Tensor), or None + The key is weight ``Variable``'s name. The value is a mask ``Tensor`` of weight's shape and dtype. + If a weight's key does not appear in the return value, that weight will not be pruned. + Returning ``None`` means the mask is not changed since last time. + Weight names are globally unique, e.g. `model/conv_1/kernel:0`. + """ + # TODO: maybe it should be able to calc on weight-granularity, beside from layer-granularity + raise NotImplementedError("Pruners must overload calc_masks()") + + def _update_mask(self): + for wrapper_idx, wrapper in enumerate(self.wrappers): + masks = self.calc_masks(wrapper, wrapper_idx=wrapper_idx) + if masks is not None: + wrapper.masks = masks + + +class PrunerLayerWrapper(LayerWrapper): + """ + Instrumented TF layer. + + Wrappers will be passed to pruner's ``calc_masks`` API, + and the pruning algorithm should use wrapper's attributes to calculate masks. + + Once instrumented, underlying layer's weights will get **modified** by masks before forward pass. + + Attributes + ---------- + layer : tf.keras.layers.Layer + The original layer. + config : JSON object + Selected configuration. The format is detailed in tutorial. + pruner : Pruner + Bound pruner object. + masks : dict of (str, tf.Tensor) + Current masks. The key is weight's name and the value is mask tensor. + On initialization, `masks` is an empty dict, which means no weight is pruned. + Afterwards, `masks` is the last return value of ``Pruner.calc_masks``. + See ``Pruner.calc_masks`` for details. + """ + def __init__(self, layer, config, pruner): + super().__init__() + self.layer = layer + self.config = config + self.pruner = pruner + self.masks = {} + _logger.info('Layer detected to compress: %s', self.layer.name) + + def call(self, *inputs): + self._update_weights() + return self.layer(*inputs) + + def _update_weights(self): + new_weights = [] + for weight in self.layer.weights: + mask = self.masks.get(weight.name) + if mask is not None: + new_weights.append(tf.math.multiply(weight, mask)) + else: + new_weights.append(weight) + if new_weights and not hasattr(new_weights[0], 'numpy'): + raise RuntimeError('NNI: Compressed model can only run in eager mode') + self.layer.set_weights([weight.numpy() for weight in new_weights]) + + +# TODO: designed to replace `patch_optimizer` +#class PrunerCallback(tf.keras.callbacks.Callback): +# def __init__(self, pruner): +# super().__init__() +# self._pruner = pruner +# +# def on_train_batch_end(self, batch, logs=None): +# self._pruner.update_mask() diff --git a/nni/compression/tensorflow/default_layers.py b/nni/compression/tensorflow/default_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..0c729bd883f1623d28ad279de053a39a5742109c --- /dev/null +++ b/nni/compression/tensorflow/default_layers.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +weighted_modules = [ + 'Conv1D', 'Conv2D', 'Conv3D', 'Conv1DTranspose', 'Conv2DTranspose', 'Conv3DTranspose', + 'Dense', + 'PReLU', + 'Embedding', +] diff --git a/nni/experiment/__init__.py b/nni/experiment/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..682ae36056ee35a6f82d12c6083ebf7c372d453b --- /dev/null +++ b/nni/experiment/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .config import * +from .experiment import Experiment, RunMode +from .data import * diff --git a/nni/experiment/config/__init__.py b/nni/experiment/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b706f9b73e0b94c2494aaf29c3818572f39144be --- /dev/null +++ b/nni/experiment/config/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .experiment_config import ExperimentConfig +from .algorithm import AlgorithmConfig, CustomAlgorithmConfig +from .training_services import * +from .shared_storage import * diff --git a/nni/experiment/config/algorithm.py b/nni/experiment/config/algorithm.py new file mode 100644 index 0000000000000000000000000000000000000000..bd79f90e5b120069795f4de289962b9cba4b5840 --- /dev/null +++ b/nni/experiment/config/algorithm.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Config classes for tuner/assessor/advisor algorithms. + +Use ``AlgorithmConfig`` to specify a built-in algorithm; +use ``CustomAlgorithmConfig`` to specify a custom algorithm. + +Check the reference_ for explaination of each field. + +You may also want to check `tuner's overview`_. + +.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + +.. _tuner's overview: https://nni.readthedocs.io/en/stable/Tuner/BuiltinTuner.html + +""" + +__all__ = ['AlgorithmConfig', 'CustomAlgorithmConfig'] + +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, Optional + +from .base import ConfigBase +from .utils import PathLike + +@dataclass(init=False) +class _AlgorithmConfig(ConfigBase): + """ + Common base class for ``AlgorithmConfig`` and ``CustomAlgorithmConfig``. + + It's a "union set" of 2 derived classes. So users can use it as either one. + """ + + name: Optional[str] = None + class_name: Optional[str] = None + code_directory: Optional[PathLike] = None + class_args: Optional[Dict[str, Any]] = None + + def _validate_canonical(self): + super()._validate_canonical() + if self.class_name is None: # assume it's built-in algorithm by default + assert self.name + assert self.code_directory is None + else: # custom algorithm + assert self.name is None + assert self.class_name + if not Path(self.code_directory).is_dir(): + raise ValueError(f'CustomAlgorithmConfig: code_directory "{self.code_directory}" is not a directory') + +@dataclass(init=False) +class AlgorithmConfig(_AlgorithmConfig): + """ + Configuration for built-in algorithm. + """ + name: str + class_args: Optional[Dict[str, Any]] = None + +@dataclass(init=False) +class CustomAlgorithmConfig(_AlgorithmConfig): + """ + Configuration for custom algorithm. + """ + class_name: str + code_directory: Optional[PathLike] = '.' + class_args: Optional[Dict[str, Any]] = None diff --git a/nni/experiment/config/base.py b/nni/experiment/config/base.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d44e063f1ee2bcda6800667d543270ad5c5f72 --- /dev/null +++ b/nni/experiment/config/base.py @@ -0,0 +1,277 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +``ConfigBase`` class. Nothing else. + +Docstrings in this file are mainly for NNI contributors instead of end users. +""" + +__all__ = ['ConfigBase'] + +import copy +import dataclasses +from pathlib import Path + +import yaml + +from . import utils + +class ConfigBase: + """ + The abstract base class of experiment config classes. + + A config class should be a type-hinted dataclass inheriting ``ConfigBase``. + Or for a training service config class, it can inherit ``TrainingServiceConfig``. + + .. code-block:: python + + @dataclass(init=False) + class ExperimentConfig(ConfigBase): + name: Optional[str] + ... + + Subclasses are suggested to override ``_canonicalize()`` and ``_validate_canonical()`` methods. + + Users can create a config object with constructor or ``ConfigBase.load()``, + validate its legality with ``ConfigBase.validate()``, + and finally convert it to the format accepted by NNI manager with ``ConfigBase.json()``. + + Example usage: + + .. code-block:: python + + # when using Python API + config1 = ExperimentConfig(trialCommand='...', trialConcurrency=1, ...) + config1.validate() + print(config1.json()) + + # when using config file + config2 = ExperimentConfig.load('examples/config.yml') + config2.validate() + print(config2.json()) + + Config objects will remember where they are loaded; therefore relative paths can be resolved smartly. + If a config object is created with constructor, the base path will be current working directory. + If it is loaded with ``ConfigBase.load(path)``, the base path will be ``path``'s parent. + """ + + def __init__(self, **kwargs): + """ + There are two common ways to use the constructor, + directly writing kwargs and unpacking from JSON (YAML) object: + + .. code-block:: python + + config1 = AlgorithmConfig(name='TPE', class_args={'optimize_mode': 'maximize'}) + + json = {'name': 'TPE', 'classArgs': {'optimize_mode': 'maximize'}} + config2 = AlgorithmConfig(**json) + + If the config class has fields whose type is another config class, or list of another config class, + they will recursively load dict values. + + Because JSON objects can use "camelCase" for field names, + cases and underscores in ``kwargs`` keys are ignored in this constructor. + For example if a config class has a field ``hello_world``, + then using ``hello_world=1``, ``helloWorld=1``, and ``_HELLOWORLD_=1`` in constructor + will all assign to the same field. + + If ``kwargs`` contain extra keys, `AttributeError` will be raised. + + If ``kwargs`` do not have enough key, missing fields are silently set to `MISSING()`. + You can use ``utils.is_missing()`` to check them. + """ + self._base_path = utils.get_base_path() + args = {utils.case_insensitive(key): value for key, value in kwargs.items()} + for field in dataclasses.fields(self): + value = args.pop(utils.case_insensitive(field.name), field.default) + setattr(self, field.name, value) + if args: # maybe a key is misspelled + class_name = type(self).__name__ + fields = ', '.join(args.keys()) + raise AttributeError(f'{class_name} does not have field(s) {fields}') + + # try to unpack nested config + for field in dataclasses.fields(self): + value = getattr(self, field.name) + if utils.is_instance(value, field.type): + continue # already accepted by subclass, don't touch it + if isinstance(value, dict): + config = utils.guess_config_type(value, field.type) + if config is not None: + setattr(self, field.name, config) + elif isinstance(value, list) and value and isinstance(value[0], dict): + configs = utils.guess_list_config_type(value, field.type) + if configs: + setattr(self, field.name, configs) + + @classmethod + def load(cls, path): + """ + Load a YAML config file from file system. + + Since YAML is a superset of JSON, it can also load JSON files. + + This method raises exception if: + + - The file is not available + - The file content is not valid YAML + - Top level value of the YAML is not object + - The YAML contains not supported fields + + It does not raise exception when the YAML misses fields or contains bad fields. + + Parameters + ---------- + path : PathLike + Path of the config file. + + Returns + ------- + cls + An object of ConfigBase subclass. + """ + with open(path) as yaml_file: + data = yaml.safe_load(yaml_file) + if not isinstance(data, dict): + raise TypeError(f'Conent of config file {path} is not a dict/object') + utils.set_base_path(Path(path).parent) + config = cls(**data) + utils.unset_base_path() + return config + + def canonical_copy(self): + """ + Create a "canonical" copy of the config, and validate it. + + This function is mainly used internally by NNI. + + Term explanation: + The config schema for end users is more flexible than the format NNI manager accepts, + so config classes have to deal with the conversion. + Here we call the converted format "canonical". + + Returns + ------- + type(self) + A deep copy. + """ + canon = copy.deepcopy(self) + canon._canonicalize([]) + canon._validate_canonical() + return canon + + def validate(self): + """ + Validate legality of the config object. Raise exception if any error occurred. + + This function does **not** return truth value. Do not write ``if config.validate()``. + + Returns + ------- + None + """ + self.canonical_copy() + + def json(self): + """ + Convert the config to JSON object (not JSON string). + + In current implementation ``json()`` will invoke ``validate()``, but this might change in future version. + It is recommended to call ``validate()`` before ``json()`` for now. + + Returns + ------- + dict + JSON object. + """ + canon = self.canonical_copy() + return dataclasses.asdict(canon, dict_factory=_dict_factory) # this is recursive + + def _canonicalize(self, parents): + """ + To be overrided by subclass. + + Convert the config object to canonical format. + + The default implementation will: + + 1. Resolve all ``PathLike`` fields to absolute path + 2. Call ``_canonicalize([self] + parents)`` on all children config objects, including those inside list and dict + + If the subclass has nested config fields, be careful about where to call ``super()._canonicalize()``. + + Parameters + ---------- + parents : list[ConfigBase] + The upper level config objects. + For example local training service's ``trialGpuNumber`` will be copied from top level when not set, + in this case it will be invoked like ``localConfig._canonicalize([experimentConfig])``. + """ + for field in dataclasses.fields(self): + value = getattr(self, field.name) + if isinstance(value, (Path, str)) and utils.is_path_like(field.type): + setattr(self, field.name, utils.resolve_path(value, self._base_path)) + else: + _recursive_canonicalize_child(value, [self] + parents) + + def _validate_canonical(self): + """ + To be overrided by subclass. + + Validate legality of a canonical config object. It's caller's responsibility to ensure the config is canonical. + + Raise exception if any problem found. This function does **not** return truth value. + + The default implementation will: + + 1. Validate that all fields match their type hint + 2. Call ``_validate_canonical()`` on children config objects, including those inside list and dict + """ + utils.validate_type(self) + for field in dataclasses.fields(self): + value = getattr(self, field.name) + _recursive_validate_child(value) + + def __setattr__(self, name, value): + """ + To prevent typo, config classes forbid assigning to attribute that is not a config field, + unless it starts with underscore. + """ + if hasattr(self, name) or name.startswith('_'): + super().__setattr__(name, value) + return + if name in [field.name for field in dataclasses.fields(self)]: # might happend during __init__ + super().__setattr__(name, value) + return + raise AttributeError(f'{type(self).__name__} does not have field {name}') + +def _dict_factory(items): + ret = {} + for key, value in items: + if value is not None: + k = utils.camel_case(key) + v = str(value) if isinstance(value, Path) else value + ret[k] = v + return ret + +def _recursive_canonicalize_child(child, parents): + if isinstance(child, ConfigBase): + child._canonicalize(parents) + elif isinstance(child, list): + for item in child: + _recursive_canonicalize_child(item, parents) + elif isinstance(child, dict): + for item in child.values(): + _recursive_canonicalize_child(item, parents) + +def _recursive_validate_child(child): + if isinstance(child, ConfigBase): + child._validate_canonical() + elif isinstance(child, list): + for item in child: + _recursive_validate_child(item) + elif isinstance(child, dict): + for item in child.values(): + _recursive_validate_child(item) diff --git a/nni/experiment/config/convert.py b/nni/experiment/config/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..860b3c1fffba6da49945f6531414b07b9b2fa176 --- /dev/null +++ b/nni/experiment/config/convert.py @@ -0,0 +1,299 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import logging + +_logger = logging.getLogger(__name__) + +def to_v2(v1): + v1 = copy.deepcopy(v1) + platform = v1.pop('trainingServicePlatform') + assert platform in ['local', 'remote', 'pai', 'aml', 'kubeflow', 'frameworkcontroller'] + if platform == 'pai': + platform = 'openpai' + + v2 = {} + + _drop_field(v1, 'authorName') + _move_field(v1, v2, 'experimentName') + _drop_field(v1, 'description') + _move_field(v1, v2, 'trialConcurrency') + _move_field(v1, v2, 'maxExecDuration', 'maxExperimentDuration') + _move_field(v1, v2, 'maxTrialNum', 'maxTrialNumber') + _move_field(v1, v2, 'searchSpacePath', 'searchSpaceFile') + assert not v1.pop('multiPhase', None), 'Multi-phase is no longer supported' + _deprecate(v1, v2, 'multiThread') + _move_field(v1, v2, 'nniManagerIp') + _move_field(v1, v2, 'logDir', 'experimentWorkingDirectory') + _move_field(v1, v2, 'debug') + _deprecate(v1, v2, 'versionCheck') + _move_field(v1, v2, 'logLevel') + _deprecate(v1, v2, 'logCollection') + _move_field(v1, v2, 'useAnnotation') + + if 'trial' in v1: + v1_trial = v1.pop('trial') + _move_field(v1_trial, v2, 'command', 'trialCommand') + _move_field(v1_trial, v2, 'codeDir', 'trialCodeDirectory') + _move_field(v1_trial, v2, 'gpuNum', 'trialGpuNumber') + + for algo_type in ['tuner', 'assessor', 'advisor']: + v1_algo = v1.pop(algo_type, None) + if not v1_algo: + continue + + builtin_name = v1_algo.pop(f'builtin{algo_type.title()}Name', None) + if builtin_name is not None: + v2_algo = {'name': builtin_name} + + else: + code_directory = v1_algo.pop('codeDir') + class_file_name = v1_algo.pop('classFileName') + assert class_file_name.endswith('.py') + class_name = class_file_name[:-3] + '.' + v1_algo.pop('className') + v2_algo = {'className': class_name, 'codeDirectory': code_directory} + + if 'classArgs' in v1_algo: + v2_algo['classArgs'] = v1_algo.pop('classArgs') + + v2[algo_type] = v2_algo + _deprecate(v1_algo, v2, 'includeIntermediateResults') + _move_field(v1_algo, v2, 'gpuIndices', 'tunerGpuIndices') + + if v1_algo: + _logger.error('%s config not fully converted: %s', algo_type, v1_algo) + + ts = {'platform': platform} + v2['trainingService'] = ts + + if platform == 'local': + local_config = v1.pop('localConfig', {}) + _move_field(local_config, ts, 'gpuIndices') + _move_field(local_config, ts, 'maxTrialNumPerGpu', 'maxTrialNumberPerGpu') + _move_field(local_config, ts, 'useActiveGpu') + if local_config: + _logger.error('localConfig not fully converted: %s', local_config) + + if platform == 'remote': + remote_config = v1.pop('remoteConfig', {}) + _move_field(remote_config, ts, 'reuse', 'reuseMode') + if remote_config: + _logger.error('remoteConfig not fully converted: %s', remote_config) + + ts['machineList'] = [] + for v1_machine in v1.pop('machineList'): + v2_machine = {} + ts['machineList'].append(v2_machine) + _move_field(v1_machine, v2_machine, 'ip', 'host') + _move_field(v1_machine, v2_machine, 'port') + _move_field(v1_machine, v2_machine, 'username', 'user') + _move_field(v1_machine, v2_machine, 'sshKeyPath', 'sshKeyFile') + _move_field(v1_machine, v2_machine, 'passphrase') + _move_field(v1_machine, v2_machine, 'gpuIndices') + _move_field(v1_machine, v2_machine, 'maxTrialNumPerGpu', 'maxTrialNumberPerGpu') + _move_field(v1_machine, v2_machine, 'useActiveGpu') + _move_field(v1_machine, v2_machine, 'pythonPath') + _move_field(v1_machine, v2_machine, 'passwd', 'password') + if v1_machine: + _logger.error('remote machine not fully converted: %s', v1_machine) + + if platform == 'openpai': + _move_field(v1_trial, ts, 'nniManagerNFSMountPath', 'localStorageMountPoint') + _move_field(v1_trial, ts, 'containerNFSMountPath', 'containerStorageMountPoint') + _move_field(v1_trial, ts, 'cpuNum', 'trialCpuNumber') + _move_field(v1_trial, ts, 'memoryMB', 'trialMemorySize') + _move_field(v1_trial, ts, 'image', 'dockerImage') + _move_field(v1_trial, ts, 'virtualCluster') + _move_field(v1_trial, ts, 'paiStorageConfigName', 'storageConfigName') + _move_field(v1_trial, ts, 'paiConfigPath', 'openpaiConfigFile') + + pai_config = v1.pop('paiConfig') + _move_field(pai_config, ts, 'userName', 'username') + _deprecate(pai_config, v2, 'password') + _move_field(pai_config, ts, 'token') + _move_field(pai_config, ts, 'host') + _move_field(pai_config, ts, 'reuse', 'reuseMode') + _move_field(pai_config, ts, 'gpuNum', 'trialGpuNumber') + _move_field(pai_config, ts, 'cpuNum', 'trialCpuNumber') + _move_field(pai_config, ts, 'memoryMB', 'trialMemorySize') + _deprecate(pai_config, v2, 'maxTrialNumPerGpu') + _deprecate(pai_config, v2, 'useActiveGpu') + if pai_config: + _logger.error('paiConfig not fully converted: %s', pai_config) + + if platform == 'aml': + _move_field(v1_trial, ts, 'image', 'dockerImage') + + aml_config = v1.pop('amlConfig', {}) + _move_field(aml_config, ts, 'subscriptionId') + _move_field(aml_config, ts, 'resourceGroup') + _move_field(aml_config, ts, 'workspaceName') + _move_field(aml_config, ts, 'computeTarget') + _move_field(aml_config, ts, 'maxTrialNumPerGpu', 'maxTrialNumberPerGpu') + _deprecate(aml_config, v2, 'useActiveGpu') + if aml_config: + _logger.error('amlConfig not fully converted: %s', aml_config) + + if platform == 'kubeflow': + kf_config = v1.pop('kubeflowConfig') + _move_field(kf_config, ts, 'operator') + _move_field(kf_config, ts, 'apiVersion') + + storage_name = kf_config.pop('storage', None) + if storage_name is None: + storage_name = 'nfs' if 'nfs' in kf_config else 'azureStorage' + if storage_name == 'nfs': + nfs = kf_config.pop('nfs') + ts['storage'] = {'storageType': 'nfs', 'server': nfs['server'], 'path': nfs['path']} + if storage_name == 'azureStorage': + key_vault = kf_config.pop('keyVault') + azure_storage = kf_config.pop('azureStorage') + ts['storage'] = { + 'storageType': 'azureStorage', + 'azureAccount': azure_storage['accountName'], + 'azureShare': azure_storage['azureShare'], + 'keyVaultName': key_vault['vaultName'], + 'keyVaultKey': key_vault['name'], + } + _deprecate(kf_config, v2, 'uploadRetryCount') + + if kf_config: + _logger.error('kubeflowConfig not fully converted: %s', kf_config) + + _drop_field(v1_trial, 'nasMode') + for role_name in ['worker', 'ps', 'master']: + if role_name not in v1_trial: + continue + v1_role = v1_trial.pop(role_name) + v2_role = {} + ts[role_name] = v2_role + + _move_field(v1_role, v2_role, 'replicas') + _move_field(v1_role, v2_role, 'command') + _move_field(v1_role, v2_role, 'gpuNum', 'gpuNumber') + _move_field(v1_role, v2_role, 'cpuNum', 'cpuNumber') + _move_field(v1_role, v2_role, 'memoryMB', 'memorySize') + _move_field(v1_role, v2_role, 'image', 'dockerImage') + _deprecate(v1_role, v2, 'privateRegistryAuthPath') + + v2_role['codeDirectory'] = v2['trialCodeDirectory'] + + if v1_role: + _logger.error('kubeflow role not fully converted: %s', v1_role) + + if platform == 'frameworkcontroller': + fc_config = v1.pop('frameworkcontrollerConfig') + _move_field(fc_config, ts, 'serviceAccountName') + _move_field(fc_config, ts, 'reuse', 'reuseMode') + + storage_name = fc_config.pop('storage', None) + if storage_name is None: + storage_name = 'nfs' if 'nfs' in fc_config else 'azureStorage' + if storage_name == 'nfs': + nfs = fc_config.pop('nfs') + ts['storage'] = {'storageType': 'nfs', 'server': nfs['server'], 'path': nfs['path']} + if storage_name == 'azureStorage': + key_vault = fc_config.pop('keyVault') + azure_storage = fc_config.pop('azureStorage') + ts['storage'] = { + 'storageType': 'azureStorage', + 'azureAccount': azure_storage['accountName'], + 'azureShare': azure_storage['azureShare'], + 'keyVaultName': key_vault['vaultName'], + 'keyVaultKey': key_vault['name'], + } + _deprecate(fc_config, v2, 'uploadRetryCount') + + if fc_config: + _logger.error('frameworkcontroller not fully converted: %s', fc_config) + + _drop_field(v1_trial, 'nasMode') + ts['taskRoles'] = [] + for v1_role in v1_trial.pop('taskRoles', []): + v2_role = {} + ts['taskRoles'].append(v2_role) + + _move_field(v1_role, v2_role, 'name') + _move_field(v1_role, v2_role, 'taskNum', 'taskNumber') + _move_field(v1_role, v2_role, 'frameworkControllerCompletionPolicy', 'frameworkAttemptCompletionPolicy') + _move_field(v1_role, v2_role, 'command') + _move_field(v1_role, v2_role, 'gpuNum', 'gpuNumber') + _move_field(v1_role, v2_role, 'cpuNum', 'cpuNumber') + _move_field(v1_role, v2_role, 'memoryMB', 'memorySize') + _move_field(v1_role, v2_role, 'image', 'dockerImage') + _deprecate(v1_role, v2, 'privateRegistryAuthPath') + + policy = 'frameworkAttemptCompletionPolicy' + if v1_role[policy]: + v2_role[policy] = {} + _move_field(v1_role[policy], v2_role[policy], 'minFailedTaskCount') + _move_field(v1_role[policy], v2_role[policy], 'minSucceededTaskCount', 'minSucceedTaskCount') + if not v1_role[policy]: + v1_role.pop(policy) + + if v1_role: + _logger.error('frameworkcontroller role not fully converted: %s', v1_role) + + # this is required, seems a bug in nni manager + if not v2.get('trialCommand'): + v2['trialCommand'] = v2_role['command'] + + # hybrid mode should always use v2 schema, so no need to handle here + + v1_storage = v1.pop('sharedStorage', None) + if v1_storage: + v2_storage = {} + v2['sharedStorage'] = v2_storage + + _move_field(v1_storage, v2_storage, 'storageType') + _move_field(v1_storage, v2_storage, 'nfsServer') + _move_field(v1_storage, v2_storage, 'exportedDirectory') + _move_field(v1_storage, v2_storage, 'localMountPoint') + _move_field(v1_storage, v2_storage, 'remoteMountPoint') + _move_field(v1_storage, v2_storage, 'localMounted') + _move_field(v1_storage, v2_storage, 'storageAccountName') + _move_field(v1_storage, v2_storage, 'storageAccountKey') + _move_field(v1_storage, v2_storage, 'containerName') + + if v1_storage: + _logger.error('shared storage not fully converted: %s', v1_storage) + + if v1_trial: + _logger.error('trial config not fully converted: %s', v1_trial) + if v1: + _logger.error('Config not fully converted: %s', v1) + return v2 + +def _move_field(v1, v2, v1_key, v2_key=None): + if v2_key is None: + v2_key = v1_key + if v1_key in v1: + value = v1.pop(v1_key, None) + if value is not None: + v2[v2_key] = value + +def _drop_field(v1, key): + if key in v1: + _logger.warning(f'Config field "{key}" is no longer supported and has been ignored') + v1.pop(key) + +def _deprecate(v1, v2, key): + _drop_field(v1, key) + +def convert_algo(algo_type, v1_algo): + builtin_name = v1_algo.pop(f'builtin{algo_type.title()}Name', None) + if builtin_name is not None: + v2_algo = {'name': builtin_name} + + else: + code_directory = v1_algo.pop('codeDir') + class_file_name = v1_algo.pop('classFileName') + assert class_file_name.endswith('.py') + class_name = class_file_name[:-3] + '.' + v1_algo.pop('className') + v2_algo = {'className': class_name, 'codeDirectory': code_directory} + + if 'classArgs' in v1_algo: + v2_algo['classArgs'] = v1_algo.pop('classArgs') + + return v2_algo diff --git a/nni/experiment/config/experiment_config.py b/nni/experiment/config/experiment_config.py new file mode 100644 index 0000000000000000000000000000000000000000..74b1093b14f4ad9d7ae9af0c51c13c286a7ec0e7 --- /dev/null +++ b/nni/experiment/config/experiment_config.py @@ -0,0 +1,176 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Top level experiement configuration class, ``ExperimentConfig``. +""" + +__all__ = ['ExperimentConfig'] + +from dataclasses import dataclass +import json +import logging +from pathlib import Path +from typing import Any, List, Optional, Union + +import yaml + +from .algorithm import _AlgorithmConfig +from .base import ConfigBase +from .shared_storage import SharedStorageConfig +from .training_service import TrainingServiceConfig +from . import utils + +@dataclass(init=False) +class ExperimentConfig(ConfigBase): + """ + Class of experiment configuration. Check the reference_ for explaination of each field. + + When used in Python experiment API, it can be constructed in two favors: + + 1. Create an empty project then set each field + + .. code-block:: python + + config = ExperimentConfig('local') + config.search_space = {...} + config.tuner.name = 'random' + config.training_service.use_active_gpu = True + + 2. Use kwargs directly + + .. code-block:: python + + config = ExperimentConfig( + search_space = {...}, + tuner = AlgorithmConfig(name='random'), + training_service = LocalConfig( + use_active_gpu = True + ) + ) + + .. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + """ + # TODO: + # The behavior described below is expected but does not work, + # because some fields are consumed by TrialDispatcher outside environment service. + # Add the lines to docstr when we fix this issue. + + # Fields commented as "training service field" acts like shortcut for all training services. + # Users can either specify them here or inside training service config. + # In latter case hybrid training services can have different settings. + + experiment_name: Optional[str] = None + search_space_file: Optional[utils.PathLike] = None + search_space: Any = None + trial_command: Optional[str] = None # training service field + trial_code_directory: utils.PathLike = '.' # training service field + trial_concurrency: int + trial_gpu_number: Optional[int] = None # training service field + max_experiment_duration: Union[str, int, None] = None + max_trial_number: Optional[int] = None + max_trial_duration: Union[str, int, None] = None + nni_manager_ip: Optional[str] = None # training service field + use_annotation: bool = False + debug: bool = False + log_level: Optional[str] = None + experiment_working_directory: utils.PathLike = '~/nni-experiments' + tuner_gpu_indices: Union[List[int], int, str, None] = None + tuner: Optional[_AlgorithmConfig] = None + assessor: Optional[_AlgorithmConfig] = None + advisor: Optional[_AlgorithmConfig] = None + training_service: Union[TrainingServiceConfig, List[TrainingServiceConfig]] + shared_storage: Optional[SharedStorageConfig] = None + + def __init__(self, training_service_platform=None, **kwargs): + super().__init__(**kwargs) + if training_service_platform is not None: + # the user chose to init with `config = ExperimentConfig('local')` and set fields later + # we need to create empty training service & algorithm configs to support `config.tuner.name = 'random'` + assert utils.is_missing(self.training_service) + if isinstance(training_service_platform, list): + self.training_service = [utils.training_service_config_factory(ts) for ts in training_service_platform] + else: + self.training_service = utils.training_service_config_factory(training_service_platform) + for algo_type in ['tuner', 'assessor', 'advisor']: + # add placeholder items, so users can write `config.tuner.name = 'random'` + if getattr(self, algo_type) is None: + setattr(self, algo_type, _AlgorithmConfig(name='_none_', class_args={})) + elif not utils.is_missing(self.training_service): + # training service is set via json or constructor + if isinstance(self.training_service, list): + self.training_service = [utils.load_training_service_config(ts) for ts in self.training_service] + else: + self.training_service = utils.load_training_service_config(self.training_service) + + def _canonicalize(self, _parents): + if self.log_level is None: + self.log_level = 'debug' if self.debug else 'info' + self.tuner_gpu_indices = utils.canonical_gpu_indices(self.tuner_gpu_indices) + + for algo_type in ['tuner', 'assessor', 'advisor']: + algo = getattr(self, algo_type) + if algo is not None and algo.name == '_none_': + setattr(self, algo_type, None) + + super()._canonicalize([self]) + + if self.search_space_file is not None: + yaml_error = None + try: + self.search_space = _load_search_space_file(self.search_space_file) + except Exception as e: + yaml_error = repr(e) + if yaml_error is not None: # raise it outside except block to make stack trace clear + msg = f'ExperimentConfig: Failed to load search space file "{self.search_space_file}": {yaml_error}' + raise ValueError(msg) + + if self.nni_manager_ip is None: + # show a warning if user does not set nni_manager_ip. we have many issues caused by this + # the simple detection logic won't work for hybrid, but advanced users should not need it + # ideally we should check accessibility of the ip, but it need much more work + platform = getattr(self.training_service, 'platform') + has_ip = isinstance(getattr(self.training_service, 'nni_manager_ip'), str) # not None or MISSING + if platform and platform != 'local' and not has_ip: + ip = utils.get_ipv4_address() + msg = f'nni_manager_ip is not set, please make sure {ip} is accessible from training machines' + logging.getLogger('nni.experiment.config').warning(msg) + + def _validate_canonical(self): + super()._validate_canonical() + + space_cnt = (self.search_space is not None) + (self.search_space_file is not None) + if self.use_annotation and space_cnt != 0: + raise ValueError('ExperimentConfig: search space must not be set when annotation is enabled') + if not self.use_annotation and space_cnt < 1: + raise ValueError('ExperimentConfig: search_space and search_space_file must be set one') + + # to make the error message clear, ideally it should be: + # `if concurrency < 0: raise ValueError('trial_concurrency ({concurrency}) must greater than 0')` + # but I believe there will be hardy few users make this kind of mistakes, so let's keep it simple + assert self.trial_concurrency > 0 + assert self.max_experiment_duration is None or utils.parse_time(self.max_experiment_duration) > 0 + assert self.max_trial_number is None or self.max_trial_number > 0 + assert self.max_trial_duration is None or utils.parse_time(self.max_trial_duration) > 0 + assert self.log_level in ['fatal', 'error', 'warning', 'info', 'debug', 'trace'] + + # following line is disabled because it has side effect + # enable it if users encounter problems caused by failure in creating experiment directory + # currently I have only seen one issue of this kind + #Path(self.experiment_working_directory).mkdir(parents=True, exist_ok=True) + + utils.validate_gpu_indices(self.tuner_gpu_indices) + + tuner_cnt = (self.tuner is not None) + (self.advisor is not None) + if tuner_cnt != 1: + raise ValueError('ExperimentConfig: tuner and advisor must be set one') + +def _load_search_space_file(search_space_path): + # FIXME + # we need this because PyYAML 6.0 does not support YAML 1.2, + # which means it is not fully compatible with JSON + content = Path(search_space_path).read_text(encoding='utf8') + try: + return json.loads(content) + except Exception: + return yaml.safe_load(content) diff --git a/nni/experiment/config/shared_storage.py b/nni/experiment/config/shared_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..082b771c100c2a4f8d24aac4ea5a26b99262c6f1 --- /dev/null +++ b/nni/experiment/config/shared_storage.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from dataclasses import dataclass +from typing import Optional + +from .base import ConfigBase +from .utils import PathLike + +__all__ = ['NfsConfig', 'AzureBlobConfig'] + +@dataclass(init=False) +class SharedStorageConfig(ConfigBase): + storage_type: str + local_mount_point: PathLike + remote_mount_point: str + local_mounted: str + storage_account_name: Optional[str] = None + storage_account_key: Optional[str] = None + container_name: Optional[str] = None + nfs_server: Optional[str] = None + exported_directory: Optional[str] = None + +@dataclass(init=False) +class NfsConfig(SharedStorageConfig): + storage_type: str = 'NFS' + nfs_server: str + exported_directory: str + +@dataclass(init=False) +class AzureBlobConfig(SharedStorageConfig): + storage_type: str = 'AzureBlob' + storage_account_name: str + storage_account_key: Optional[str] = None + container_name: str diff --git a/nni/experiment/config/training_service.py b/nni/experiment/config/training_service.py new file mode 100644 index 0000000000000000000000000000000000000000..f53268240f076f5c93bcdbdd0721856b76b65c18 --- /dev/null +++ b/nni/experiment/config/training_service.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +``TrainingServiceConfig`` class. + +Docstrings in this file are mainly for NNI contributors, or training service authors. +""" + +__all__ = ['TrainingServiceConfig'] + +from dataclasses import dataclass +from pathlib import Path +from typing import Optional + +from .base import ConfigBase +from .utils import PathLike, is_missing + +@dataclass(init=False) +class TrainingServiceConfig(ConfigBase): + """ + The base class of training service config classes. + + See ``LocalConfig`` for example usage. + """ + + platform: str + trial_command: str + trial_code_directory: PathLike + trial_gpu_number: Optional[int] + nni_manager_ip: Optional[str] + debug: bool + + def _canonicalize(self, parents): + """ + Besides from ``ConfigBase._canonicalize()``, this overloaded version will also + copy training service specific fields from ``ExperimentConfig``. + """ + shortcuts = [ # fields that can set in root level config as shortcut + 'trial_command', + 'trial_code_directory', + 'trial_gpu_number', + 'nni_manager_ip', + 'debug', + ] + for field_name in shortcuts: + if is_missing(getattr(self, field_name)): + value = getattr(parents[0], field_name) + setattr(self, field_name, value) + super()._canonicalize(parents) + + def _validate_canonical(self): + super()._validate_canonical() + cls = type(self) + assert self.platform == cls.platform + if not Path(self.trial_code_directory).is_dir(): + raise ValueError(f'{cls.__name__}: trial_code_directory "{self.trial_code_directory}" is not a directory') + assert self.trial_gpu_number is None or self.trial_gpu_number >= 0 diff --git a/nni/experiment/config/training_services/__init__.py b/nni/experiment/config/training_services/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3926d788a62c301da4637422942eb6357482d8bb --- /dev/null +++ b/nni/experiment/config/training_services/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .local import * +from .remote import * +from .openpai import * +from .k8s_storage import * +from .kubeflow import * +from .frameworkcontroller import * +from .aml import * +from .dlc import * diff --git a/nni/experiment/config/training_services/aml.py b/nni/experiment/config/training_services/aml.py new file mode 100644 index 0000000000000000000000000000000000000000..78055cbd402ef509550368a408d013522ef5198b --- /dev/null +++ b/nni/experiment/config/training_services/aml.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Configuration for AML training service. + +Check the reference_ for explaination of each field. + +You may also want to check `AML training service doc`_. + +.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + +.. _AML training service doc: https://nni.readthedocs.io/en/stable/TrainingService/AMLMode.html + +""" + +__all__ = ['AmlConfig'] + +from dataclasses import dataclass + +from ..training_service import TrainingServiceConfig + +@dataclass(init=False) +class AmlConfig(TrainingServiceConfig): + platform: str = 'aml' + subscription_id: str + resource_group: str + workspace_name: str + compute_target: str + docker_image: str = 'msranni/nni:latest' + max_trial_number_per_gpu: int = 1 diff --git a/nni/experiment/config/training_services/dlc.py b/nni/experiment/config/training_services/dlc.py new file mode 100644 index 0000000000000000000000000000000000000000..6c7b8b5e55c2ceedd2d3c845b11b76a796f1eafd --- /dev/null +++ b/nni/experiment/config/training_services/dlc.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from dataclasses import dataclass + +from ..training_service import TrainingServiceConfig + +__all__ = ['DlcConfig'] + +@dataclass(init=False) +class DlcConfig(TrainingServiceConfig): + platform: str = 'dlc' + type: str = 'Worker' + image: str # 'registry-vpc.{region}.aliyuncs.com/pai-dlc/tensorflow-training:1.15.0-cpu-py36-ubuntu18.04', + job_type: str = 'TFJob' + pod_count: int + ecs_spec: str # e.g.,'ecs.c6.large' + region: str + nas_data_source_id: str + access_key_id: str + access_key_secret: str + local_storage_mount_point: str + container_storage_mount_point: str diff --git a/nni/experiment/config/training_services/frameworkcontroller.py b/nni/experiment/config/training_services/frameworkcontroller.py new file mode 100644 index 0000000000000000000000000000000000000000..5676127174886b7c26598b56c51d31bf2fc67a8b --- /dev/null +++ b/nni/experiment/config/training_services/frameworkcontroller.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Configuration for FrameworkController training service. + +Check the reference_ for explaination of each field. + +You may also want to check `FrameworkController training service doc`_. + +.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + +.. _FrameworkController training service doc: https://nni.readthedocs.io/en/stable/TrainingService/FrameworkControllerMode.html + +""" + +__all__ = ['FrameworkControllerConfig', 'FrameworkControllerRoleConfig', 'FrameworkAttemptCompletionPolicy'] + +from dataclasses import dataclass +from typing import List, Optional, Union + +from ..base import ConfigBase +from ..training_service import TrainingServiceConfig +from .k8s_storage import K8sStorageConfig + +@dataclass(init=False) +class FrameworkAttemptCompletionPolicy(ConfigBase): + min_failed_task_count: int + min_succeed_task_count: int + +@dataclass(init=False) +class FrameworkControllerRoleConfig(ConfigBase): + name: str + docker_image: str = 'msranni/nni:latest' + task_number: int + command: str + gpu_number: int + cpu_number: int + memory_size: Union[str, int] + framework_attempt_completion_policy: FrameworkAttemptCompletionPolicy + +@dataclass(init=False) +class FrameworkControllerConfig(TrainingServiceConfig): + platform: str = 'frameworkcontroller' + storage: K8sStorageConfig + service_account_name: Optional[str] + task_roles: List[FrameworkControllerRoleConfig] + reuse_mode: Optional[bool] = True + + def _canonicalize(self, parents): + super()._canonicalize(parents) + # framework controller does not need these fields, set empty string for type check + if self.trial_command is None: + self.trial_command = '' diff --git a/nni/experiment/config/training_services/k8s_storage.py b/nni/experiment/config/training_services/k8s_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..d9ad01ac3a3d3830a662d6fe091b1026968a69f3 --- /dev/null +++ b/nni/experiment/config/training_services/k8s_storage.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Storage config classes for ``KubeflowConfig`` and ``FrameworkControllerConfig`` +""" + +__all__ = ['K8sStorageConfig', 'K8sAzureStorageConfig', 'K8sNfsConfig'] + +from dataclasses import dataclass +from typing import Optional + +from ..base import ConfigBase + +@dataclass(init=False) +class K8sStorageConfig(ConfigBase): + storage_type: str + azure_account: Optional[str] = None + azure_share: Optional[str] = None + key_vault_name: Optional[str] = None + key_vault_key: Optional[str] = None + server: Optional[str] = None + path: Optional[str] = None + + def _validate_canonical(self): + super()._validate_canonical() + if self.storage_type == 'azureStorage': + assert self.server is None and self.path is None + elif self.storage_type == 'nfs': + assert self.azure_account is None and self.azure_share is None + assert self.key_vault_name is None and self.key_vault_key is None + else: + raise ValueError(f'Kubernetes storage_type ("{self.storage_type}") must either be "azureStorage" or "nfs"') + +@dataclass(init=False) +class K8sNfsConfig(K8sStorageConfig): + storage: str = 'nfs' + server: str + path: str + +@dataclass(init=False) +class K8sAzureStorageConfig(K8sStorageConfig): + storage: str = 'azureStorage' + azure_account: str + azure_share: str + key_vault_name: str + key_vault_key: str diff --git a/nni/experiment/config/training_services/kubeflow.py b/nni/experiment/config/training_services/kubeflow.py new file mode 100644 index 0000000000000000000000000000000000000000..15d1981698763bf9e5cad43f3b9619733dad623a --- /dev/null +++ b/nni/experiment/config/training_services/kubeflow.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Configuration for Kubeflow training service. + +Check the reference_ for explaination of each field. + +You may also want to check `Kubeflow training service doc`_. + +.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + +.. _Kubeflow training service doc: https://nni.readthedocs.io/en/stable/TrainingService/KubeflowMode.html + +""" + +__all__ = ['KubeflowConfig', 'KubeflowRoleConfig'] + +from dataclasses import dataclass +from typing import Optional, Union + +from ..base import ConfigBase +from ..training_service import TrainingServiceConfig +from .k8s_storage import K8sStorageConfig + +@dataclass(init=False) +class KubeflowRoleConfig(ConfigBase): + replicas: int + command: str + gpu_number: Optional[int] = 0 + cpu_number: int + memory_size: Union[str, int] + docker_image: str = 'msranni/nni:latest' + code_directory: str + +@dataclass(init=False) +class KubeflowConfig(TrainingServiceConfig): + platform: str = 'kubeflow' + operator: str + api_version: str + storage: K8sStorageConfig + worker: Optional[KubeflowRoleConfig] = None + ps: Optional[KubeflowRoleConfig] = None + master: Optional[KubeflowRoleConfig] = None + reuse_mode: Optional[bool] = True #set reuse mode as true for v2 config + + def _canonicalize(self, parents): + super()._canonicalize(parents) + # kubeflow does not need these fields, set empty string for type check + if self.trial_command is None: + self.trial_command = '' + if self.trial_code_directory is None: + self.trial_code_directory = '' + + def _validate_canonical(self): + super()._validate_canonical() + assert self.operator in ['tf-operator', 'pytorch-operator'] diff --git a/nni/experiment/config/training_services/local.py b/nni/experiment/config/training_services/local.py new file mode 100644 index 0000000000000000000000000000000000000000..a284de8138885ffddced499b614a8a8638cdefd1 --- /dev/null +++ b/nni/experiment/config/training_services/local.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Configuration for local training service. + +Check the reference_ for explaination of each field. + +You may also want to check `local training service doc`_. + +.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + +.. _local training service doc: https://nni.readthedocs.io/en/stable/TrainingService/LocalMode.html + +""" + +__all__ = ['LocalConfig'] + +from dataclasses import dataclass +from typing import List, Optional, Union + +from ..training_service import TrainingServiceConfig +from .. import utils + +@dataclass(init=False) +class LocalConfig(TrainingServiceConfig): + platform: str = 'local' + use_active_gpu: Optional[bool] = None + max_trial_number_per_gpu: int = 1 + gpu_indices: Union[List[int], int, str, None] = None + reuse_mode: bool = False + + def _canonicalize(self, parents): + super()._canonicalize(parents) + self.gpu_indices = utils.canonical_gpu_indices(self.gpu_indices) + self.nni_manager_ip = None + + def _validate_canonical(self): + super()._validate_canonical() + utils.validate_gpu_indices(self.gpu_indices) + if self.trial_gpu_number and self.use_active_gpu is None: + raise ValueError( + 'LocalConfig: please set use_active_gpu to True if your system has GUI, ' + 'or set it to False if the computer runs multiple experiments concurrently.' + ) + if not self.trial_gpu_number and self.max_trial_number_per_gpu != 1: + raise ValueError('LocalConfig: max_trial_number_per_gpu does not work without trial_gpu_number') diff --git a/nni/experiment/config/training_services/openpai.py b/nni/experiment/config/training_services/openpai.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9784b2ff88515e247d1911b20ecb072f52fa69 --- /dev/null +++ b/nni/experiment/config/training_services/openpai.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Configuration for OpenPAI training service. + +Check the reference_ for explaination of each field. + +You may also want to check `OpenPAI training service doc`_. + +.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + +.. _OpenPAI training service doc: https://nni.readthedocs.io/en/stable/TrainingService/PaiMode.html + +""" + +__all__ = ['OpenpaiConfig'] + +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, Optional, Union + +from ..training_service import TrainingServiceConfig +from ..utils import PathLike + +@dataclass(init=False) +class OpenpaiConfig(TrainingServiceConfig): + platform: str = 'openpai' + host: str + username: str + token: str + trial_cpu_number: int + trial_memory_size: Union[str, int] + storage_config_name: str + docker_image: str = 'msranni/nni:latest' + virtual_cluster: Optional[str] + local_storage_mount_point: PathLike + container_storage_mount_point: str + reuse_mode: bool = True + + openpai_config: Optional[Dict] = None + openpai_config_file: Optional[PathLike] = None + + def _canonicalize(self, parents): + super()._canonicalize(parents) + if '://' not in self.host: + self.host = 'https://' + self.host + + def _validate_canonical(self) -> None: + super()._validate_canonical() + if self.trial_gpu_number is None: + raise ValueError('OpenpaiConfig: trial_gpu_number is not set') + if not Path(self.local_storage_mount_point).is_dir(): + raise ValueError( + f'OpenpaiConfig: local_storage_mount_point "(self.local_storage_mount_point)" is not a directory' + ) + if self.openpai_config is not None and self.openpai_config_file is not None: + raise ValueError('openpai_config and openpai_config_file can only be set one') + if self.openpai_config_file is not None and not Path(self.openpai_config_file).is_file(): + raise ValueError(f'OpenpaiConfig: openpai_config_file "(self.openpai_config_file)" is not a file') diff --git a/nni/experiment/config/training_services/remote.py b/nni/experiment/config/training_services/remote.py new file mode 100644 index 0000000000000000000000000000000000000000..4a1a49d815a6b246ff76cb14bc580ee86f1ee9e4 --- /dev/null +++ b/nni/experiment/config/training_services/remote.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Configuration for remote training service. + +Check the reference_ for explaination of each field. + +You may also want to check `remote training service doc`_. + +.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html + +.. _remote training service doc: https://nni.readthedocs.io/en/stable/TrainingService/RemoteMachineMode.html + +""" + +__all__ = ['RemoteConfig', 'RemoteMachineConfig'] + +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional, Union +import warnings + +from ..base import ConfigBase +from ..training_service import TrainingServiceConfig +from .. import utils + +@dataclass(init=False) +class RemoteMachineConfig(ConfigBase): + host: str + port: int = 22 + user: str + password: Optional[str] = None + ssh_key_file: Optional[utils.PathLike] = '~/.ssh/id_rsa' + ssh_passphrase: Optional[str] = None + use_active_gpu: bool = False + max_trial_number_per_gpu: int = 1 + gpu_indices: Union[List[int], int, str, None] = None + python_path: Optional[str] = None + + def _canonicalize(self, parents): + super()._canonicalize(parents) + if self.password is not None: + self.ssh_key_file = None + self.gpu_indices = utils.canonical_gpu_indices(self.gpu_indices) + + def _validate_canonical(self): + super()._validate_canonical() + + assert 0 < self.port < 65536 + assert self.max_trial_number_per_gpu > 0 + utils.validate_gpu_indices(self.gpu_indices) + + if self.password is not None: + warnings.warn('SSH password will be exposed in web UI as plain text. We recommend to use SSH key file.') + elif not Path(self.ssh_key_file).is_file(): + raise ValueError( + f'RemoteMachineConfig: You must either provide password or a valid SSH key file "{self.ssh_key_file}"' + ) + +@dataclass(init=False) +class RemoteConfig(TrainingServiceConfig): + platform: str = 'remote' + machine_list: List[RemoteMachineConfig] + reuse_mode: bool = True + + def _validate_canonical(self): + super()._validate_canonical() + if not self.machine_list: + raise ValueError(f'RemoteConfig: must provide at least one machine in machine_list') + if not self.trial_gpu_number and any(machine.max_trial_number_per_gpu != 1 for machine in self.machine_list): + raise ValueError('RemoteConfig: max_trial_number_per_gpu does not work without trial_gpu_number') diff --git a/nni/experiment/config/utils/__init__.py b/nni/experiment/config/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c4b8b586d0953435188171ce60154e6e190380ee --- /dev/null +++ b/nni/experiment/config/utils/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Utility functions for experiment config classes. + +Check "public.py" to see which functions you can utilize. +""" + +from .public import * +from .internal import * diff --git a/nni/experiment/config/utils/internal.py b/nni/experiment/config/utils/internal.py new file mode 100644 index 0000000000000000000000000000000000000000..b34b46023dc8697c2c5bfb0beba66cd1438cbe9c --- /dev/null +++ b/nni/experiment/config/utils/internal.py @@ -0,0 +1,174 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Utility functions for experiment config classes, internal part. + +If you are implementing a config class for a training service, it's unlikely you will need these. +""" + +import dataclasses +import importlib +import json +import os.path +from pathlib import Path +import socket + +import typeguard + +import nni.runtime.config + +from .public import is_missing + +## handle relative path ## + +_current_base_path = None + +def get_base_path(): + if _current_base_path is None: + return Path() + return _current_base_path + +def set_base_path(path): + global _current_base_path + assert _current_base_path is None + _current_base_path = path + +def unset_base_path(): + global _current_base_path + _current_base_path = None + +def resolve_path(path, base_path): + if path is None: + return None + # Path.resolve() does not work on Windows when file not exist, so use os.path instead + path = os.path.expanduser(path) + if not os.path.isabs(path): + path = os.path.join(base_path, path) + return str(os.path.realpath(path)) # it should be already str, but official doc does not specify it's type + +## field name case convertion ## + +def case_insensitive(key): + return key.lower().replace('_', '') + +def camel_case(key): + words = key.strip('_').split('_') + return words[0] + ''.join(word.title() for word in words[1:]) + +## type hint utils ## + +def is_instance(value, type_hint): + try: + typeguard.check_type('_', value, type_hint) + except TypeError: + return False + return True + +def validate_type(config): + class_name = type(config).__name__ + for field in dataclasses.fields(config): + value = getattr(config, field.name) + #check existense + if is_missing(value): + raise ValueError(f'{class_name}: {field.name} is not set') + if not is_instance(value, field.type): + raise ValueError(f'{class_name}: type of {field.name} ({repr(value)}) is not {field.type}') + +def is_path_like(type_hint): + # only `PathLike` and `Any` accepts `Path`; check `int` to make sure it's not `Any` + return is_instance(Path(), type_hint) and not is_instance(1, type_hint) + +## type inference ## + +def guess_config_type(obj, type_hint): + ret = guess_list_config_type([obj], type_hint, _hint_list_item=True) + return ret[0] if ret else None + +def guess_list_config_type(objs, type_hint, _hint_list_item=False): + # avoid circular import + from ..base import ConfigBase + from ..training_service import TrainingServiceConfig + + # because __init__ of subclasses might be complex, we first create empty objects to determine type + candidate_classes = [] + for cls in _all_subclasses(ConfigBase): + if issubclass(cls, TrainingServiceConfig): # training service configs are specially handled + continue + empty_list = [cls.__new__(cls)] + if _hint_list_item: + good_type = is_instance(empty_list[0], type_hint) + else: + good_type = is_instance(empty_list, type_hint) + if good_type: + candidate_classes.append(cls) + + if not candidate_classes: # it does not accept config type + return None + if len(candidate_classes) == 1: # the type is confirmed, raise error if cannot convert to this type + return [candidate_classes[0](**obj) for obj in objs] + + # multiple candidates available, call __init__ to further verify + candidate_configs = [] + for cls in candidate_classes: + try: + configs = [cls(**obj) for obj in objs] + except Exception: + continue + candidate_configs.append(configs) + + if not candidate_configs: + return None + if len(candidate_configs) == 1: + return candidate_configs[0] + + # still have multiple candidates, choose the common base class + for base in candidate_configs: + base_class = type(base[0]) + is_base = all(isinstance(configs[0], base_class) for configs in candidate_configs) + if is_base: + return base + + return None # cannot detect the type, give up + +def _all_subclasses(cls): + subclasses = set(cls.__subclasses__()) + return subclasses.union(*[_all_subclasses(subclass) for subclass in subclasses]) + +def training_service_config_factory(platform): + cls = _get_ts_config_class(platform) + if cls is None: + raise ValueError(f'Bad training service platform: {platform}') + return cls() + +def load_training_service_config(config): + if isinstance(config, dict) and 'platform' in config: + cls = _get_ts_config_class(config['platform']) + if cls is not None: + return cls(**config) + return config # not valid json, don't touch + +def _get_ts_config_class(platform): + from ..training_service import TrainingServiceConfig # avoid circular import + + # import all custom config classes so they can be found in TrainingServiceConfig.__subclasses__() + custom_ts_config_path = nni.runtime.config.get_config_file('training_services.json') + with custom_ts_config_path.open() as config_file: + custom_ts_config = json.load(config_file) + for custom_ts_pkg in custom_ts_config.keys(): + pkg = importlib.import_module(custom_ts_pkg) + _config_class = pkg.nni_training_service_info.config_class + + for cls in TrainingServiceConfig.__subclasses__(): + if cls.platform == platform: + return cls + return None + +## misc ## + +def get_ipv4_address(): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(('192.0.2.0', 80)) + addr = s.getsockname()[0] + s.close() + return addr diff --git a/nni/experiment/config/utils/public.py b/nni/experiment/config/utils/public.py new file mode 100644 index 0000000000000000000000000000000000000000..623a95ff646e6fc8ddadc34677438f66fba58872 --- /dev/null +++ b/nni/experiment/config/utils/public.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Utility functions for experiment config classes. +""" + +import dataclasses +import math +from pathlib import Path +from typing import Union + +PathLike = Union[Path, str] + +def is_missing(value): + """ + Used to check whether a dataclass field has ever been assigned. + + If a field without default value has never been assigned, it will have a special value ``MISSING``. + This function checks if the parameter is ``MISSING``. + """ + # MISSING is not singleton and there is no official API to check it + return isinstance(value, type(dataclasses.MISSING)) + +def canonical_gpu_indices(indices): + """ + If ``indices`` is not None, cast it to list of int. + """ + if isinstance(indices, str): + return [int(idx) for idx in indices.split(',')] + if isinstance(indices, int): + return [indices] + return indices + +def validate_gpu_indices(indices): + if indices is None: + return + if len(set(indices)) != len(indices): + raise ValueError(f'Duplication detected in GPU indices {indices}') + if any(idx < 0 for idx in indices): + raise ValueError(f'Negative detected in GPU indices {indices}') + +def parse_time(value): + """ + If ``value`` is a string, convert it to integral number of seconds. + """ + return _parse_unit(value, 's', _time_units) + +def parse_memory_size(value): + """ + If ``value`` is a string, convert it to integral number of mega bytes. + """ + return _parse_unit(value, 'mb', _size_units) + +_time_units = {'d': 24 * 3600, 'h': 3600, 'm': 60, 's': 1} +_size_units = {'tb': 1024 ** 4, 'gb': 1024 ** 3, 'mb': 1024 ** 2, 'kb': 1024, 'b': 1} + +def _parse_unit(value, target_unit, all_units): + if not isinstance(value, str): + return value + value = value.lower() + for unit, factor in all_units.items(): + if value.endswith(unit): + number = value[:-len(unit)] + value = float(number) * factor + return math.ceil(value / all_units[target_unit]) + supported_units = ', '.join(all_units.keys()) + raise ValueError(f'Bad unit in "{value}", supported units are {supported_units}') diff --git a/nni/experiment/data.py b/nni/experiment/data.py new file mode 100644 index 0000000000000000000000000000000000000000..d58f4671f6e7007f9967f1796ec12b212786e501 --- /dev/null +++ b/nni/experiment/data.py @@ -0,0 +1,135 @@ +from dataclasses import dataclass +import json +from typing import List + + +@dataclass +class TrialResult: + """ + TrialResult stores the result information of a trial job. + + Attributes + ---------- + parameter: dict + Hyper parameters for this trial. + value: serializable object, usually a number, or a dict with key "default" and other extra keys + Final result. + trialJobId: str + Trial job id. + """ + parameter: dict + value: dict + trialJobId: str + + def __init__(self, parameter: dict, value: str, trialJobId: str): + self.parameter = parameter + self.value = json.loads(value) + self.trialJobId = trialJobId + + +@dataclass +class TrialMetricData: + """ + TrialMetricData stores the metric data of a trial job. + A trial job may have both intermediate metric and final metric. + + Attributes + ---------- + timestamp: int + Time stamp. + trialJobId: str + Trial job id. + parameterId: int + Parameter id. + type: str + Metric type, `PERIODICAL` for intermediate result and `FINAL` for final result. + sequence: int + Sequence number in this trial. + data: serializable object, usually a number, or a dict with key "default" and other extra keys + Metric data. + """ + timestamp: int + trialJobId: str + parameterId: int + type: str + sequence: int + data: dict + + def __init__(self, timestamp: int, trialJobId: str, parameterId: int, type: str, sequence: int, data: str): # pylint: disable=W0622 + self.timestamp = timestamp + self.trialJobId = trialJobId + self.parameterId = parameterId + self.type = type + self.sequence = sequence + self.data = json.loads(json.loads(data)) + + +@dataclass +class TrialHyperParameters: + """ + TrialHyperParameters stores the hyper parameters of a trial job. + + Attributes + ---------- + parameter_id: int + Parameter id. + parameter_source: str + Parameter source. + parameters: dict + Hyper parameters. + parameter_index: int + Parameter index. + """ + parameter_id: int + parameter_source: str + parameters: dict + parameter_index: int + + +@dataclass +class TrialJob: + """ + TrialJob stores the information of a trial job. + + Attributes + ---------- + trialJobId: str + Trial job id. + status: str + Job status. + hyperParameters: list of `nni.experiment.TrialHyperParameters` + See `nni.experiment.TrialHyperParameters`. + logPath: str + Log path. + startTime: int + Job start time (timestamp). + endTime: int + Job end time (timestamp). + finalMetricData: list of `nni.experiment.TrialMetricData` + See `nni.experiment.TrialMetricData`. + stderrPath: str + Stderr log path. + sequenceId: int + Sequence Id. + """ + trialJobId: str + status: str + hyperParameters: List[TrialHyperParameters] + logPath: str + startTime: int + endTime: int + finalMetricData: List[TrialMetricData] + stderrPath: str + sequenceId: int + + def __init__(self, trialJobId: str, status: str, logPath: str, startTime: int, sequenceId: int, + endTime: int = -1, stderrPath: str = '', hyperParameters: List = [], finalMetricData: List = []): + self.trialJobId = trialJobId + self.status = status + self.hyperParameters = [TrialHyperParameters(**json.loads(e)) for e in hyperParameters] + self.logPath = logPath + self.startTime = startTime + self.endTime = endTime + self.finalMetricData = [TrialMetricData(**e) for e in finalMetricData] + self.stderrPath = stderrPath + self.sequenceId = sequenceId diff --git a/nni/experiment/experiment.py b/nni/experiment/experiment.py new file mode 100644 index 0000000000000000000000000000000000000000..57ff2356bf280419407ec5eb723686299a299112 --- /dev/null +++ b/nni/experiment/experiment.py @@ -0,0 +1,476 @@ +import atexit +from enum import Enum +import logging +from pathlib import Path +import socket +from subprocess import Popen +import time +from typing import Optional, Union, List, overload, Any + +import colorama +import psutil + +import nni.runtime.log + +from .config import ExperimentConfig +from .data import TrialJob, TrialMetricData, TrialResult +from . import launcher +from . import management +from . import rest +from ..tools.nnictl.command_utils import kill_command + +_logger = logging.getLogger('nni.experiment') + +class RunMode(Enum): + """ + Config lifecycle and ouput redirection of NNI manager process. + + - Background: stop NNI manager when Python script exits; do not print NNI manager log. (default) + - Foreground: stop NNI manager when Python script exits; print NNI manager log to stdout. + - Detach: do not stop NNI manager when Python script exits. + + NOTE: + This API is non-stable and is likely to get refactored in next release. + NNI manager should treat log level more seriously so we can default to "foreground" without being too verbose. + """ + Background = 'background' + Foreground = 'foreground' + Detach = 'detach' + +class Experiment: + """ + Create and stop an NNI experiment. + + Attributes + ---------- + config + Experiment configuration. + port + Web UI port of the experiment, or `None` if it is not running. + """ + + @overload + def __init__(self, config: ExperimentConfig) -> None: + """ + Prepare an experiment. + + Use `Experiment.run()` to launch it. + + Parameters + ---------- + config + Experiment configuration. + """ + ... + + @overload + def __init__(self, training_service: Union[str, List[str]]) -> None: + """ + Prepare an experiment, leaving configuration fields to be set later. + + Example usage:: + + experiment = Experiment('remote') + experiment.config.trial_command = 'python3 trial.py' + experiment.config.machines.append(RemoteMachineConfig(ip=..., user_name=...)) + ... + experiment.run(8080) + + Parameters + ---------- + training_service + Name of training service. + Supported value: "local", "remote", "openpai", "aml", "kubeflow", "frameworkcontroller", "adl" and hybrid training service. + """ + ... + + def __init__(self, config=None, training_service=None): + nni.runtime.log.init_logger_for_command_line() + + self.config: Optional[ExperimentConfig] = None + self.id: str = management.generate_experiment_id() + self.port: Optional[int] = None + self._proc: Optional[Popen] = None + self.mode = 'new' + self.url_prefix: Optional[str] = None + + args = [config, training_service] # deal with overloading + if isinstance(args[0], (str, list)): + self.config = ExperimentConfig(args[0]) + else: + self.config = args[0] + + def start(self, port: int = 8080, debug: bool = False, run_mode: RunMode = RunMode.Background) -> None: + """ + Start the experiment in background. + + This method will raise exception on failure. + If it returns, the experiment should have been successfully started. + + Parameters + ---------- + port + The port of web UI. + debug + Whether to start in debug mode. + """ + if run_mode is not RunMode.Detach: + atexit.register(self.stop) + + config = self.config.canonical_copy() + if config.use_annotation: + raise RuntimeError('NNI annotation is not supported by Python experiment API.') + + if config.experiment_working_directory is not None: + log_dir = Path(config.experiment_working_directory, self.id, 'log') + else: # this should never happen in latest version, keep it until v2.7 for potential compatibility + log_dir = Path.home() / f'nni-experiments/{self.id}/log' + nni.runtime.log.start_experiment_log(self.id, log_dir, debug) + + self._proc = launcher.start_experiment(self.mode, self.id, config, port, debug, run_mode, self.url_prefix) + assert self._proc is not None + + self.port = port # port will be None if start up failed + + ips = [config.nni_manager_ip] + for interfaces in psutil.net_if_addrs().values(): + for interface in interfaces: + if interface.family == socket.AF_INET: + ips.append(interface.address) + ips = [f'http://{ip}:{port}' for ip in ips if ip] + msg = 'Web UI URLs: ' + colorama.Fore.CYAN + ' '.join(ips) + colorama.Style.RESET_ALL + _logger.info(msg) + + def stop(self) -> None: + """ + Stop background experiment. + """ + _logger.info('Stopping experiment, please wait...') + atexit.unregister(self.stop) + + nni.runtime.log.stop_experiment_log(self.id) + if self._proc is not None: + try: + rest.delete(self.port, '/experiment', self.url_prefix) + except Exception as e: + _logger.exception(e) + _logger.warning('Cannot gracefully stop experiment, killing NNI process...') + kill_command(self._proc.pid) + + self.id = None + self.port = None + self._proc = None + _logger.info('Experiment stopped') + + def run(self, port: int = 8080, wait_completion: bool = True, debug: bool = False) -> bool: + """ + Run the experiment. + + If wait_completion is True, this function will block until experiment finish or error. + + Return `True` when experiment done; or return `False` when experiment failed. + + Else if wait_completion is False, this function will non-block and return None immediately. + """ + self.start(port, debug) + if wait_completion: + try: + while True: + time.sleep(10) + status = self.get_status() + if status == 'DONE' or status == 'STOPPED': + return True + if status == 'ERROR': + return False + except KeyboardInterrupt: + _logger.warning('KeyboardInterrupt detected') + finally: + self.stop() + + @classmethod + def connect(cls, port: int): + """ + Connect to an existing experiment. + + Parameters + ---------- + port + The port of web UI. + """ + experiment = Experiment() + experiment.port = port + experiment.id = experiment.get_experiment_profile().get('id') + status = experiment.get_status() + pid = experiment.get_experiment_metadata(experiment.id).get('pid') + if pid is None: + _logger.warning('Get experiment pid failed, can not stop experiment by stop().') + else: + experiment._proc = psutil.Process(pid) + _logger.info('Connect to port %d success, experiment id is %s, status is %s.', port, experiment.id, status) + return experiment + + @staticmethod + def resume(experiment_id: str, port: int = 8080, wait_completion: bool = True, debug: bool = False): + """ + Resume a stopped experiment. + + Parameters + ---------- + experiment_id + The stopped experiment id. + port + The port of web UI. + wait_completion + If true, run in the foreground. If false, run in the background. + debug + Whether to start in debug mode. + """ + experiment = Experiment._resume(experiment_id) + experiment.run(port=port, wait_completion=wait_completion, debug=debug) + if not wait_completion: + return experiment + + @staticmethod + def view(experiment_id: str, port: int = 8080, non_blocking: bool = False): + """ + View a stopped experiment. + + Parameters + ---------- + experiment_id + The stopped experiment id. + port + The port of web UI. + non_blocking + If false, run in the foreground. If true, run in the background. + """ + experiment = Experiment._view(experiment_id) + experiment.start(port=port, debug=False) + if non_blocking: + return experiment + else: + try: + while True: + time.sleep(10) + except KeyboardInterrupt: + _logger.warning('KeyboardInterrupt detected') + finally: + experiment.stop() + + @staticmethod + def _resume(exp_id, exp_dir=None): + exp = Experiment() + exp.id = exp_id + exp.mode = 'resume' + exp.config = launcher.get_stopped_experiment_config(exp_id, exp_dir) + return exp + + @staticmethod + def _view(exp_id, exp_dir=None): + exp = Experiment() + exp.id = exp_id + exp.mode = 'view' + exp.config = launcher.get_stopped_experiment_config(exp_id, exp_dir) + return exp + + def get_status(self) -> str: + """ + Return experiment status as a str. + + Returns + ------- + str + Experiment status. + """ + resp = rest.get(self.port, '/check-status', self.url_prefix) + return resp['status'] + + def get_trial_job(self, trial_job_id: str): + """ + Return a trial job. + + Parameters + ---------- + trial_job_id: str + Trial job id. + + Returns + ------- + TrialJob + A `TrialJob` instance corresponding to `trial_job_id`. + """ + resp = rest.get(self.port, '/trial-jobs/{}'.format(trial_job_id), self.url_prefix) + return TrialJob(**resp) + + def list_trial_jobs(self): + """ + Return information for all trial jobs as a list. + + Returns + ------- + list + List of `TrialJob`. + """ + resp = rest.get(self.port, '/trial-jobs', self.url_prefix) + return [TrialJob(**trial_job) for trial_job in resp] + + def get_job_statistics(self): + """ + Return trial job statistics information as a dict. + + Returns + ------- + dict + Job statistics information. + """ + resp = rest.get(self.port, '/job-statistics', self.url_prefix) + return resp + + def get_job_metrics(self, trial_job_id=None): + """ + Return trial job metrics. + + Parameters + ---------- + trial_job_id: str + trial job id. if this parameter is None, all trail jobs' metrics will be returned. + + Returns + ------- + dict + Each key is a trialJobId, the corresponding value is a list of `TrialMetricData`. + """ + api = '/metric-data/{}'.format(trial_job_id) if trial_job_id else '/metric-data' + resp = rest.get(self.port, api, self.url_prefix) + metric_dict = {} + for metric in resp: + trial_id = metric["trialJobId"] + if trial_id not in metric_dict: + metric_dict[trial_id] = [TrialMetricData(**metric)] + else: + metric_dict[trial_id].append(TrialMetricData(**metric)) + return metric_dict + + def get_experiment_profile(self): + """ + Return experiment profile as a dict. + + Returns + ------- + dict + The profile of the experiment. + """ + resp = rest.get(self.port, '/experiment', self.url_prefix) + return resp + + def get_experiment_metadata(self, exp_id: str): + """ + Return experiment metadata with specified exp_id as a dict. + + Returns + ------- + dict + The specified experiment metadata. + """ + experiments_metadata = self.get_all_experiments_metadata() + for metadata in experiments_metadata: + if metadata['id'] == exp_id: + return metadata + return {} + + def get_all_experiments_metadata(self): + """ + Return all experiments metadata as a list. + + Returns + ------- + list + The experiments metadata. + """ + resp = rest.get(self.port, '/experiments-info', self.url_prefix) + return resp + + def export_data(self): + """ + Return exported information for all trial jobs. + + Returns + ------- + list + List of `TrialResult`. + """ + resp = rest.get(self.port, '/export-data', self.url_prefix) + return [TrialResult(**trial_result) for trial_result in resp] + + def _get_query_type(self, key: str): + if key == 'trialConcurrency': + return '?update_type=TRIAL_CONCURRENCY' + if key == 'maxExecDuration': + return '?update_type=MAX_EXEC_DURATION' + if key == 'searchSpace': + return '?update_type=SEARCH_SPACE' + if key == 'maxTrialNum': + return '?update_type=MAX_TRIAL_NUM' + + def _update_experiment_profile(self, key: str, value: Any): + """ + Update an experiment's profile + + Parameters + ---------- + key: str + One of `['trial_concurrency', 'max_experiment_duration', 'search_space', 'max_trial_number']`. + value: Any + New value of the key. + """ + api = '/experiment{}'.format(self._get_query_type(key)) + experiment_profile = self.get_experiment_profile() + experiment_profile['params'][key] = value + rest.put(self.port, api, experiment_profile, self.url_prefix) + logging.info('Successfully update %s.', key) + + def update_trial_concurrency(self, value: int): + """ + Update an experiment's trial_concurrency + + Parameters + ---------- + value: int + New trial_concurrency value. + """ + self._update_experiment_profile('trialConcurrency', value) + + def update_max_experiment_duration(self, value: str): + """ + Update an experiment's max_experiment_duration + + Parameters + ---------- + value: str + Strings like '1m' for one minute or '2h' for two hours. + SUFFIX may be 's' for seconds, 'm' for minutes, 'h' for hours or 'd' for days. + """ + self._update_experiment_profile('maxExecDuration', value) + + def update_search_space(self, value: dict): + """ + Update the experiment's search_space. + TODO: support searchspace file. + + Parameters + ---------- + value: dict + New search_space. + """ + self._update_experiment_profile('searchSpace', value) + + def update_max_trial_number(self, value: int): + """ + Update an experiment's max_trial_number + + Parameters + ---------- + value: int + New max_trial_number value. + """ + self._update_experiment_profile('maxTrialNum', value) diff --git a/nni/experiment/launcher.py b/nni/experiment/launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3c48cd20118b220bc772a026aff406d4d2d3397b --- /dev/null +++ b/nni/experiment/launcher.py @@ -0,0 +1,278 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import contextlib +from dataclasses import dataclass, fields +from datetime import datetime +import logging +import os.path +from pathlib import Path +import socket +from subprocess import Popen +import sys +import time +from typing import Optional, Tuple, List, Any + +import colorama + +import nni_node # pylint: disable=wrong-import-order, import-error +import nni.runtime.protocol + +from .config import ExperimentConfig +from .pipe import Pipe +from . import rest +from ..tools.nnictl.config_utils import Experiments, Config +from ..tools.nnictl.nnictl_utils import update_experiment + +_logger = logging.getLogger('nni.experiment') + +@dataclass(init=False) +class NniManagerArgs: + port: int + experiment_id: int + start_mode: str # new or resume + mode: str # training service platform + log_dir: str + log_level: str + readonly: bool = False + foreground: bool = False + url_prefix: Optional[str] = None + dispatcher_pipe: Optional[str] = None + + def __init__(self, action, exp_id, config, port, debug, foreground, url_prefix): + self.port = port + self.experiment_id = exp_id + self.foreground = foreground + self.url_prefix = url_prefix + self.log_dir = config.experiment_working_directory + + if isinstance(config.training_service, list): + self.mode = 'hybrid' + else: + self.mode = config.training_service.platform + + self.log_level = config.log_level + if debug and self.log_level not in ['debug', 'trace']: + self.log_level = 'debug' + + if action == 'resume': + self.start_mode = 'resume' + elif action == 'view': + self.start_mode = 'resume' + self.readonly = True + else: + self.start_mode = 'new' + + def to_command_line_args(self): + ret = [] + for field in fields(self): + value = getattr(self, field.name) + if value is not None: + ret.append('--' + field.name) + if isinstance(value, bool): + ret.append(str(value).lower()) + else: + ret.append(str(value)) + return ret + +def start_experiment(action, exp_id, config, port, debug, run_mode, url_prefix): + foreground = run_mode.value == 'foreground' + nni_manager_args = NniManagerArgs(action, exp_id, config, port, debug, foreground, url_prefix) + + _ensure_port_idle(port) + websocket_platforms = ['hybrid', 'remote', 'openpai', 'kubeflow', 'frameworkcontroller', 'adl'] + if action != 'view' and nni_manager_args.mode in websocket_platforms: + _ensure_port_idle(port + 1, f'{nni_manager_args.mode} requires an additional port') + + proc = None + try: + _logger.info( + 'Creating experiment, Experiment ID: %s', colorama.Fore.CYAN + exp_id + colorama.Style.RESET_ALL + ) + proc = _start_rest_server(nni_manager_args, run_mode) + start_time = int(time.time() * 1000) + + _logger.info('Starting web server...') + _check_rest_server(port, url_prefix=url_prefix) + + Experiments().add_experiment( + exp_id, + port, + start_time, + nni_manager_args.mode, + config.experiment_name, + pid=proc.pid, + logDir=config.experiment_working_directory, + tag=[], + ) + + _logger.info('Setting up...') + rest.post(port, '/experiment', config.json(), url_prefix) + + return proc + + except Exception as e: + _logger.error('Create experiment failed') + if proc is not None: + with contextlib.suppress(Exception): + proc.kill() + raise e + +def _start_rest_server(nni_manager_args, run_mode) -> Tuple[int, Popen]: + node_dir = Path(nni_node.__path__[0]) + node = str(node_dir / ('node.exe' if sys.platform == 'win32' else 'node')) + main_js = str(node_dir / 'main.js') + cmd = [node, '--max-old-space-size=4096', main_js] + cmd += nni_manager_args.to_command_line_args() + + if run_mode.value == 'detach': + log = Path(nni_manager_args.log_dir, nni_manager_args.experiment_id, 'log') + out = (log / 'nnictl_stdout.log').open('a') + err = (log / 'nnictl_stderr.log').open('a') + header = f'Experiment {nni_manager_args.experiment_id} start: {datetime.now()}' + header = '-' * 80 + '\n' + header + '\n' + '-' * 80 + '\n' + out.write(header) + err.write(header) + + else: + out = None + err = None + + if sys.platform == 'win32': + from subprocess import CREATE_NEW_PROCESS_GROUP + return Popen(cmd, stdout=out, stderr=err, cwd=node_dir, creationflags=CREATE_NEW_PROCESS_GROUP) + else: + return Popen(cmd, stdout=out, stderr=err, cwd=node_dir, preexec_fn=os.setpgrp) + + +def start_experiment_retiarii(exp_id: str, config: ExperimentConfig, port: int, debug: bool) -> Popen: + pipe = None + proc = None + + config.validate(initialized_tuner=True) + _ensure_port_idle(port) + if isinstance(config.training_service, list): # hybrid training service + _ensure_port_idle(port + 1, 'Hybrid training service requires an additional port') + elif config.training_service.platform in ['remote', 'openpai', 'kubeflow', 'frameworkcontroller', 'adl']: + _ensure_port_idle(port + 1, f'{config.training_service.platform} requires an additional port') + + try: + _logger.info('Creating experiment, Experiment ID: %s', colorama.Fore.CYAN + exp_id + colorama.Style.RESET_ALL) + pipe = Pipe(exp_id) + start_time, proc = _start_rest_server_retiarii(config, port, debug, exp_id, pipe.path) + _logger.info('Connecting IPC pipe...') + pipe_file = pipe.connect() + nni.runtime.protocol._in_file = pipe_file + nni.runtime.protocol._out_file = pipe_file + _logger.info('Starting web server...') + _check_rest_server(port) + platform = 'hybrid' if isinstance(config.training_service, list) else config.training_service.platform + _save_experiment_information(exp_id, port, start_time, platform, + config.experiment_name, proc.pid, config.experiment_working_directory, ['retiarii']) + _logger.info('Setting up...') + rest.post(port, '/experiment', config.json()) + return proc, pipe + + except Exception as e: + _logger.error('Create experiment failed') + if proc is not None: + with contextlib.suppress(Exception): + proc.kill() + if pipe is not None: + with contextlib.suppress(Exception): + pipe.close() + raise e + +def _ensure_port_idle(port: int, message: Optional[str] = None) -> None: + sock = socket.socket() + if sock.connect_ex(('localhost', port)) == 0: + sock.close() + message = f'(message)' if message else '' + raise RuntimeError(f'Port {port} is not idle {message}') + + +def _start_rest_server_retiarii(config: ExperimentConfig, port: int, debug: bool, experiment_id: str, + pipe_path: str = None, mode: str = 'new') -> Tuple[int, Popen]: + if isinstance(config.training_service, list): + ts = 'hybrid' + else: + ts = config.training_service.platform + if ts == 'openpai': + ts = 'pai' + + args = { + 'port': port, + 'mode': ts, + 'experiment_id': experiment_id, + 'start_mode': mode, + 'log_dir': config.experiment_working_directory, + 'log_level': 'debug' if debug else 'info' + } + if pipe_path is not None: + args['dispatcher_pipe'] = pipe_path + + if mode == 'view': + args['start_mode'] = 'resume' + args['readonly'] = 'true' + + node_dir = Path(nni_node.__path__[0]) + node = str(node_dir / ('node.exe' if sys.platform == 'win32' else 'node')) + main_js = str(node_dir / 'main.js') + cmd = [node, '--max-old-space-size=4096', main_js] + for arg_key, arg_value in args.items(): + cmd.append('--' + arg_key) + cmd.append(str(arg_value)) + + if sys.platform == 'win32': + from subprocess import CREATE_NEW_PROCESS_GROUP + proc = Popen(cmd, cwd=node_dir, creationflags=CREATE_NEW_PROCESS_GROUP) + else: + if pipe_path is None: + import os + proc = Popen(cmd, cwd=node_dir, preexec_fn=os.setpgrp) + else: + proc = Popen(cmd, cwd=node_dir) + return int(time.time() * 1000), proc + + +def _check_rest_server(port: int, retry: int = 3, url_prefix: Optional[str] = None) -> None: + for i in range(retry): + with contextlib.suppress(Exception): + rest.get(port, '/check-status', url_prefix) + return + if i > 0: + _logger.warning('Timeout, retry...') + time.sleep(1) + rest.get(port, '/check-status', url_prefix) + + +def _save_experiment_information(experiment_id: str, port: int, start_time: int, platform: str, + name: str, pid: int, logDir: str, tag: List[Any]) -> None: + experiments_config = Experiments() + experiments_config.add_experiment(experiment_id, port, start_time, platform, name, pid=pid, logDir=logDir, tag=tag) + + +def get_stopped_experiment_config(exp_id, exp_dir=None): + config_json = get_stopped_experiment_config_json(exp_id, exp_dir) + config = ExperimentConfig(**config_json) + if exp_dir and not os.path.samefile(exp_dir, config.experiment_working_directory): + msg = 'Experiment working directory provided in command line (%s) is different from experiment config (%s)' + _logger.warning(msg, exp_dir, config.experiment_working_directory) + config.experiment_working_directory = exp_dir + return config + +def get_stopped_experiment_config_json(exp_id, exp_dir=None): + if exp_dir: + return Config(exp_id, exp_dir).get_config() + else: + update_experiment() + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_metadata = experiments_dict.get(exp_id) + if experiment_metadata is None: + _logger.error('Id %s not exist!', exp_id) + return None + if experiment_metadata['status'] != 'STOPPED': + _logger.error('Only stopped experiments can be resumed or viewed!') + return None + return Config(exp_id, experiment_metadata['logDir']).get_config() diff --git a/nni/experiment/management.py b/nni/experiment/management.py new file mode 100644 index 0000000000000000000000000000000000000000..b15c4d6d2561cc678574fc197a95d1d1d1ffafe3 --- /dev/null +++ b/nni/experiment/management.py @@ -0,0 +1,16 @@ +from pathlib import Path +import random +import string + + +def generate_experiment_id() -> str: + return ''.join(random.sample(string.ascii_lowercase + string.digits, 8)) + + +def create_experiment_directory(experiment_id: str) -> Path: + path = Path.home() / 'nni-experiments' / experiment_id + path.mkdir(parents=True, exist_ok=True) + return path + + +# TODO: port shangning's work here, and use it in Experiment.start()/.stop() diff --git a/nni/experiment/pipe.py b/nni/experiment/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..e59fd8270bd74b1735cde9bae2983875b85f74e0 --- /dev/null +++ b/nni/experiment/pipe.py @@ -0,0 +1,72 @@ +from io import BufferedIOBase +import logging +import os +import sys + +_logger = logging.getLogger(__name__) + +if sys.platform == 'win32': + import _winapi + import msvcrt + + class WindowsPipe: + def __init__(self, experiment_id: str): + self.path: str = r'\\.\pipe\nni-' + experiment_id + self.file = None + + self._handle = _winapi.CreateNamedPipe( + self.path, + _winapi.PIPE_ACCESS_DUPLEX, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, + 1, + 8192, + 8192, + 0, + _winapi.NULL + ) + + def connect(self) -> BufferedIOBase: + _winapi.ConnectNamedPipe(self._handle, _winapi.NULL) + fd = msvcrt.open_osfhandle(self._handle, 0) + self.file = os.fdopen(fd, 'w+b') + return self.file + + def close(self) -> None: + try: + if self.file is not None: + self.file.close() + except Exception as e: + _logger.debug('Error on closing Windows pipe: %s', e) + + Pipe = WindowsPipe + + +else: + import socket + + from . import management + + class UnixPipe: + def __init__(self, experiment_id: str): + self.path: str = str(management.create_experiment_directory(experiment_id) / 'dispatcher-pipe') + self.file = None + + self._socket = socket.socket(socket.AF_UNIX) + self._socket.bind(self.path) + self._socket.listen(1) # only accepts one connection + + def connect(self) -> BufferedIOBase: + conn, _ = self._socket.accept() + self.file = conn.makefile('rwb') + return self.file + + def close(self) -> None: + try: + if self.file is not None: + self.file.close() + self._socket.close() + os.unlink(self.path) + except Exception as e: + _logger.debug('Error on closing POSIX pipe: %s', e) + + Pipe = UnixPipe diff --git a/nni/experiment/rest.py b/nni/experiment/rest.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1c1ef43a2661cf53fbcf6113afeb18718ecfbd --- /dev/null +++ b/nni/experiment/rest.py @@ -0,0 +1,44 @@ +import logging +from typing import Any, Optional + +import requests + +_logger = logging.getLogger(__name__) + +timeout = 20 + +def request(method: str, port: Optional[int], api: str, data: Any = None, prefix: Optional[str] = None) -> Any: + if port is None: + raise RuntimeError('Experiment is not running') + + url_parts = [ + f'http://localhost:{port}', + prefix, + 'api/v1/nni', + api + ] + url = '/'.join(part.strip('/') for part in url_parts if part) + + if data is None: + resp = requests.request(method, url, timeout=timeout) + else: + resp = requests.request(method, url, json=data, timeout=timeout) + + if not resp.ok: + _logger.error('rest request %s %s failed: %s %s', method.upper(), url, resp.status_code, resp.text) + resp.raise_for_status() + + if method.lower() in ['get', 'post'] and len(resp.content) > 0: + return resp.json() + +def get(port: Optional[int], api: str, prefix: Optional[str] = None) -> Any: + return request('get', port, api, prefix=prefix) + +def post(port: Optional[int], api: str, data: Any, prefix: Optional[str] = None) -> Any: + return request('post', port, api, data, prefix=prefix) + +def put(port: Optional[int], api: str, data: Any, prefix: Optional[str] = None) -> None: + request('put', port, api, data, prefix=prefix) + +def delete(port: Optional[int], api: str, prefix: Optional[str] = None) -> None: + request('delete', port, api, prefix=prefix) diff --git a/nni/feature_engineering/__init__.py b/nni/feature_engineering/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/nni/feature_engineering/__init__.py @@ -0,0 +1 @@ + diff --git a/nni/feature_engineering/feature_selector.py b/nni/feature_engineering/feature_selector.py new file mode 100644 index 0000000000000000000000000000000000000000..32021bfb29931f459011edd4ceff3b5e2f899c99 --- /dev/null +++ b/nni/feature_engineering/feature_selector.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +import logging + +_logger = logging.getLogger(__name__) + + +class FeatureSelector(): + + def __init__(self, **kwargs): + self.selected_features_ = None + self.X = None + self.y = None + + + def fit(self, X, y, **kwargs): + """ + Fit the training data to FeatureSelector + + Paramters + --------- + X : array-like numpy matrix + The training input samples, which shape is [n_samples, n_features]. + y: array-like numpy matrix + The target values (class labels in classification, real numbers in + regression). Which shape is [n_samples]. + """ + self.X = X + self.y = y + + + def get_selected_features(self): + """ + Fit the training data to FeatureSelector + + Returns + ------- + list : + Return the index of imprtant feature. + """ + return self.selected_features_ diff --git a/nni/nas/__init__.py b/nni/nas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/nas/benchmarks/__init__.py b/nni/nas/benchmarks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33f53dde9c924e5725cecc4ebe0bac10d66281a7 --- /dev/null +++ b/nni/nas/benchmarks/__init__.py @@ -0,0 +1 @@ +from .utils import load_benchmark, download_benchmark diff --git a/nni/nas/benchmarks/constants.py b/nni/nas/benchmarks/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..045a0e8b20250700488f950c19501572e17dd331 --- /dev/null +++ b/nni/nas/benchmarks/constants.py @@ -0,0 +1,23 @@ +import os + + +ENV_NASBENCHMARK_DIR = 'NASBENCHMARK_DIR' +ENV_NNI_HOME = 'NNI_HOME' +ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' +DEFAULT_CACHE_DIR = '~/.cache' + + +def _get_nasbenchmark_dir(): + nni_home = os.path.expanduser( + os.getenv(ENV_NNI_HOME, + os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'nni'))) + return os.getenv(ENV_NASBENCHMARK_DIR, os.path.join(nni_home, 'nasbenchmark')) + + +DATABASE_DIR = _get_nasbenchmark_dir() + +DB_URLS = { + 'nasbench101': 'https://nni.blob.core.windows.net/nasbenchmark/nasbench101-209f5694.db', + 'nasbench201': 'https://nni.blob.core.windows.net/nasbenchmark/nasbench201-b2b60732.db', + 'nds': 'https://nni.blob.core.windows.net/nasbenchmark/nds-5745c235.db' +} diff --git a/nni/nas/benchmarks/download.py b/nni/nas/benchmarks/download.py new file mode 100644 index 0000000000000000000000000000000000000000..40da6d573d1ccf38bf5b7c78becb3cee9f21f431 --- /dev/null +++ b/nni/nas/benchmarks/download.py @@ -0,0 +1,10 @@ +import argparse + +if __name__ == '__main__': + parser = argparse.ArgumentParser('NAS benchmark downloader') + parser.add_argument('benchmark_name', choices=['nasbench101', 'nasbench201', 'nds']) + + args = parser.parse_args() + + from .utils import download_benchmark + download_benchmark(args.benchmark_name) diff --git a/nni/nas/benchmarks/nasbench101/__init__.py b/nni/nas/benchmarks/nasbench101/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e9aad0552cb9520d82685e28b0269ae85c54595e --- /dev/null +++ b/nni/nas/benchmarks/nasbench101/__init__.py @@ -0,0 +1,3 @@ +from .constants import INPUT, OUTPUT, CONV3X3_BN_RELU, CONV1X1_BN_RELU, MAXPOOL3X3 +from .model import Nb101TrialStats, Nb101IntermediateStats, Nb101TrialConfig +from .query import query_nb101_trial_stats diff --git a/nni/nas/benchmarks/nasbench101/constants.py b/nni/nas/benchmarks/nasbench101/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..c2769f497f4dea6f0b5e1541d2413a8475866f15 --- /dev/null +++ b/nni/nas/benchmarks/nasbench101/constants.py @@ -0,0 +1,14 @@ +INPUT = 'input' +OUTPUT = 'output' +CONV3X3_BN_RELU = 'conv3x3-bn-relu' +CONV1X1_BN_RELU = 'conv1x1-bn-relu' +MAXPOOL3X3 = 'maxpool3x3' + + +LABEL2ID = { + INPUT: -1, + OUTPUT: -2, + CONV3X3_BN_RELU: 0, + CONV1X1_BN_RELU: 1, + MAXPOOL3X3: 2 +} diff --git a/nni/nas/benchmarks/nasbench101/db_gen.py b/nni/nas/benchmarks/nasbench101/db_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..9c3498d7a6bf645766d34aebf6112a6351109ad0 --- /dev/null +++ b/nni/nas/benchmarks/nasbench101/db_gen.py @@ -0,0 +1,56 @@ +import argparse + +from tqdm import tqdm +from nasbench import api # pylint: disable=import-error + +from nni.nas.benchmarks.utils import load_benchmark +from .model import Nb101TrialConfig, Nb101TrialStats, Nb101IntermediateStats +from .graph_util import nasbench_format_to_architecture_repr, hash_module + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('input_file', + help='Path to the file to be converted, e.g., nasbench_full.tfrecord') + args = parser.parse_args() + nasbench = api.NASBench(args.input_file) + + db = load_benchmark('nasbench101') + with db: + db.create_tables([Nb101TrialConfig, Nb101TrialStats, Nb101IntermediateStats]) + for hashval in tqdm(nasbench.hash_iterator(), desc='Dumping data into database'): + metadata, metrics = nasbench.get_metrics_from_hash(hashval) + num_vertices, architecture = nasbench_format_to_architecture_repr( + metadata['module_adjacency'], metadata['module_operations']) + assert hashval == hash_module(architecture, num_vertices) + for epochs in [4, 12, 36, 108]: + trial_config = Nb101TrialConfig.create( + arch=architecture, + num_vertices=num_vertices, + hash=hashval, + num_epochs=epochs + ) + + for seed in range(3): + cur = metrics[epochs][seed] + trial = Nb101TrialStats.create( + config=trial_config, + train_acc=cur['final_train_accuracy'] * 100, + valid_acc=cur['final_validation_accuracy'] * 100, + test_acc=cur['final_test_accuracy'] * 100, + parameters=metadata['trainable_parameters'] / 1e6, + training_time=cur['final_training_time'] * 60 + ) + for t in ['halfway', 'final']: + Nb101IntermediateStats.create( + trial=trial, + current_epoch=epochs // 2 if t == 'halfway' else epochs, + training_time=cur[t + '_training_time'], + train_acc=cur[t + '_train_accuracy'] * 100, + valid_acc=cur[t + '_validation_accuracy'] * 100, + test_acc=cur[t + '_test_accuracy'] * 100 + ) + + +if __name__ == '__main__': + main() diff --git a/nni/nas/benchmarks/nasbench101/graph_util.py b/nni/nas/benchmarks/nasbench101/graph_util.py new file mode 100644 index 0000000000000000000000000000000000000000..10805685fec3ff7359ec39dc0ae1c019e67950ae --- /dev/null +++ b/nni/nas/benchmarks/nasbench101/graph_util.py @@ -0,0 +1,111 @@ +import hashlib + +import numpy as np + +from .constants import INPUT, LABEL2ID, OUTPUT + + +def _labeling_from_architecture(architecture, vertices): + return [INPUT] + [architecture['op{}'.format(i)] for i in range(1, vertices - 1)] + [OUTPUT] + + +def _adjancency_matrix_from_architecture(architecture, vertices): + matrix = np.zeros((vertices, vertices), dtype=np.bool) + for i in range(1, vertices): + for k in architecture['input{}'.format(i)]: + matrix[k, i] = 1 + return matrix + + +def nasbench_format_to_architecture_repr(adjacency_matrix, labeling): + """ + Computes a graph-invariance MD5 hash of the matrix and label pair. + Imported from NAS-Bench-101 repo. + + Parameters + ---------- + adjacency_matrix : np.ndarray + A 2D array of shape NxN, where N is the number of vertices. + ``matrix[u][v]`` is 1 if there is a direct edge from `u` to `v`, + otherwise it will be 0. + labeling : list of str + A list of str that starts with input and ends with output. The intermediate + nodes are chosen from candidate operators. + + Returns + ------- + tuple and int and dict + Converted number of vertices and architecture. + """ + num_vertices = adjacency_matrix.shape[0] + assert len(labeling) == num_vertices + architecture = {} + for i in range(1, num_vertices - 1): + architecture['op{}'.format(i)] = labeling[i] + assert labeling[i] not in [INPUT, OUTPUT] + for i in range(1, num_vertices): + architecture['input{}'.format(i)] = [k for k in range(i) if adjacency_matrix[k, i]] + return num_vertices, architecture + + +def infer_num_vertices(architecture): + """ + Infer number of vertices from an architecture dict. + + Parameters + ---------- + architecture : dict + Architecture in NNI format. + + Returns + ------- + int + Number of vertices. + """ + op_keys = set([k for k in architecture.keys() if k.startswith('op')]) + intermediate_vertices = len(op_keys) + assert op_keys == {'op{}'.format(i) for i in range(1, intermediate_vertices + 1)} + return intermediate_vertices + 2 + + +def hash_module(architecture, vertices): + """ + Computes a graph-invariance MD5 hash of the matrix and label pair. + This snippet is modified from code in NAS-Bench-101 repo. + + Parameters + ---------- + matrix : np.ndarray + Square upper-triangular adjacency matrix. + labeling : list of int + Labels of length equal to both dimensions of matrix. + + Returns + ------- + str + MD5 hash of the matrix and labeling. + """ + labeling = _labeling_from_architecture(architecture, vertices) + labeling = [LABEL2ID[t] for t in labeling] + matrix = _adjancency_matrix_from_architecture(architecture, vertices) + in_edges = np.sum(matrix, axis=0).tolist() + out_edges = np.sum(matrix, axis=1).tolist() + + assert len(in_edges) == len(out_edges) == len(labeling) + hashes = list(zip(out_edges, in_edges, labeling)) + hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes] + # Computing this up to the diameter is probably sufficient but since the + # operation is fast, it is okay to repeat more times. + for _ in range(vertices): + new_hashes = [] + for v in range(vertices): + in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]] + out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]] + new_hashes.append(hashlib.md5( + (''.join(sorted(in_neighbors)) + '|' + + ''.join(sorted(out_neighbors)) + '|' + + hashes[v]).encode('utf-8')).hexdigest()) + hashes = new_hashes + fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest() + + return fingerprint diff --git a/nni/nas/benchmarks/nasbench101/model.py b/nni/nas/benchmarks/nasbench101/model.py new file mode 100644 index 0000000000000000000000000000000000000000..876fc4d3514b23544759d617de2b02f189ca9722 --- /dev/null +++ b/nni/nas/benchmarks/nasbench101/model.py @@ -0,0 +1,99 @@ +from peewee import CharField, FloatField, ForeignKeyField, IntegerField, Model, Proxy +from playhouse.sqlite_ext import JSONField + +from nni.nas.benchmarks.utils import json_dumps + +proxy = Proxy() + + +class Nb101TrialConfig(Model): + """ + Trial config for NAS-Bench-101. + + Attributes + ---------- + arch : dict + A dict with keys ``op1``, ``op2``, ... and ``input1``, ``input2``, ... Vertices are + enumerate from 0. Since node 0 is input node, it is skipped in this dict. Each ``op`` + is one of :const:`nni.nas.benchmark.nasbench101.CONV3X3_BN_RELU`, + :const:`nni.nas.benchmark.nasbench101.CONV1X1_BN_RELU`, and :const:`nni.nas.benchmark.nasbench101.MAXPOOL3X3`. + Each ``input`` is a list of previous nodes. For example ``input5`` can be ``[0, 1, 3]``. + num_vertices : int + Number of vertices (nodes) in one cell. Should be less than or equal to 7 in default setup. + hash : str + Graph-invariant MD5 string for this architecture. + num_epochs : int + Number of epochs planned for this trial. Should be one of 4, 12, 36, 108 in default setup. + """ + + arch = JSONField(json_dumps=json_dumps, index=True) + num_vertices = IntegerField(index=True) + hash = CharField(max_length=64, index=True) + num_epochs = IntegerField(index=True) + + class Meta: + database = proxy + + +class Nb101TrialStats(Model): + """ + Computation statistics for NAS-Bench-101. Each corresponds to one trial. + Each config has multiple trials with different random seeds, but unfortunately seed for each trial is unavailable. + NAS-Bench-101 trains and evaluates on CIFAR-10 by default. The original training set is divided into + 40k training images and 10k validation images, and the original validation set is used for test only. + + Attributes + ---------- + config : Nb101TrialConfig + Setup for this trial data. + train_acc : float + Final accuracy on training data, ranging from 0 to 100. + valid_acc : float + Final accuracy on validation data, ranging from 0 to 100. + test_acc : float + Final accuracy on test data, ranging from 0 to 100. + parameters : float + Number of trainable parameters in million. + training_time : float + Duration of training in seconds. + """ + config = ForeignKeyField(Nb101TrialConfig, backref='trial_stats', index=True) + train_acc = FloatField() + valid_acc = FloatField() + test_acc = FloatField() + parameters = FloatField() + training_time = FloatField() + + class Meta: + database = proxy + + +class Nb101IntermediateStats(Model): + """ + Intermediate statistics for NAS-Bench-101. + + Attributes + ---------- + trial : Nb101TrialStats + The exact trial where the intermediate result is produced. + current_epoch : int + Elapsed epochs when evaluation is done. + train_acc : float + Intermediate accuracy on training data, ranging from 0 to 100. + valid_acc : float + Intermediate accuracy on validation data, ranging from 0 to 100. + test_acc : float + Intermediate accuracy on test data, ranging from 0 to 100. + training_time : float + Time elapsed in seconds. + """ + + trial = ForeignKeyField(Nb101TrialStats, backref='intermediates', index=True) + current_epoch = IntegerField(index=True) + train_acc = FloatField() + valid_acc = FloatField() + test_acc = FloatField() + training_time = FloatField() + + class Meta: + database = proxy diff --git a/nni/nas/benchmarks/nasbench101/query.py b/nni/nas/benchmarks/nasbench101/query.py new file mode 100644 index 0000000000000000000000000000000000000000..163229e50b1c931c14e34b94815b147d3fdf9db5 --- /dev/null +++ b/nni/nas/benchmarks/nasbench101/query.py @@ -0,0 +1,76 @@ +import functools + +from peewee import fn +from playhouse.shortcuts import model_to_dict + +from nni.nas.benchmarks.utils import load_benchmark +from .model import Nb101TrialStats, Nb101TrialConfig, proxy +from .graph_util import hash_module, infer_num_vertices + + +def query_nb101_trial_stats(arch, num_epochs, isomorphism=True, reduction=None, include_intermediates=False): + """ + Query trial stats of NAS-Bench-101 given conditions. + + Parameters + ---------- + arch : dict or None + If a dict, it is in the format that is described in + :class:`nni.nas.benchmark.nasbench101.Nb101TrialConfig`. Only trial stats + matched will be returned. If none, all architectures in the database will be matched. + num_epochs : int or None + If int, matching results will be returned. Otherwise a wildcard. + isomorphism : boolean + Whether to match essentially-same architecture, i.e., architecture with the + same graph-invariant hash value. + reduction : str or None + If 'none' or None, all trial stats will be returned directly. + If 'mean', fields in trial stats will be averaged given the same trial config. + include_intermediates : boolean + If true, intermediate results will be returned. + + Returns + ------- + generator of dict + A generator of :class:`nni.nas.benchmark.nasbench101.Nb101TrialStats` objects, + where each of them has been converted into a dict. + """ + + if proxy.obj is None: + proxy.initialize(load_benchmark('nasbench101')) + + fields = [] + if reduction == 'none': + reduction = None + if reduction == 'mean': + for field_name in Nb101TrialStats._meta.sorted_field_names: + if field_name not in ['id', 'config']: + fields.append(fn.AVG(getattr(Nb101TrialStats, field_name)).alias(field_name)) + elif reduction is None: + fields.append(Nb101TrialStats) + else: + raise ValueError('Unsupported reduction: \'%s\'' % reduction) + query = Nb101TrialStats.select(*fields, Nb101TrialConfig).join(Nb101TrialConfig) + conditions = [] + if arch is not None: + if isomorphism: + num_vertices = infer_num_vertices(arch) + conditions.append(Nb101TrialConfig.hash == hash_module(arch, num_vertices)) + else: + conditions.append(Nb101TrialConfig.arch == arch) + if num_epochs is not None: + conditions.append(Nb101TrialConfig.num_epochs == num_epochs) + if conditions: + query = query.where(functools.reduce(lambda a, b: a & b, conditions)) + if reduction is not None: + query = query.group_by(Nb101TrialStats.config) + for trial in query: + if include_intermediates: + data = model_to_dict(trial) + # exclude 'trial' from intermediates as it is already available in data + data['intermediates'] = [ + {k: v for k, v in model_to_dict(t).items() if k != 'trial'} for t in trial.intermediates + ] + yield data + else: + yield model_to_dict(trial) diff --git a/nni/nas/benchmarks/nasbench201/__init__.py b/nni/nas/benchmarks/nasbench201/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ce3d0170d29d2e6c0fbdfdcff17077be72dd420 --- /dev/null +++ b/nni/nas/benchmarks/nasbench201/__init__.py @@ -0,0 +1,3 @@ +from .constants import NONE, SKIP_CONNECT, CONV_1X1, CONV_3X3, AVG_POOL_3X3 +from .model import Nb201TrialStats, Nb201IntermediateStats, Nb201TrialConfig +from .query import query_nb201_trial_stats diff --git a/nni/nas/benchmarks/nasbench201/constants.py b/nni/nas/benchmarks/nasbench201/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d6bce5d112f3d84ae0dd17a780e4a3e83758bd --- /dev/null +++ b/nni/nas/benchmarks/nasbench201/constants.py @@ -0,0 +1,12 @@ +NONE = 'none' +SKIP_CONNECT = 'skip_connect' +CONV_1X1 = 'conv_1x1' +CONV_3X3 = 'conv_3x3' +AVG_POOL_3X3 = 'avg_pool_3x3' +PRIMITIVES = [ + NONE, + SKIP_CONNECT, + CONV_1X1, + CONV_3X3, + AVG_POOL_3X3, +] diff --git a/nni/nas/benchmarks/nasbench201/db_gen.py b/nni/nas/benchmarks/nasbench201/db_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..ea8958a396b2f07214c219198d295ea3f8cd1713 --- /dev/null +++ b/nni/nas/benchmarks/nasbench201/db_gen.py @@ -0,0 +1,109 @@ +import argparse +import re + +import tqdm +import torch + +from nni.nas.benchmarks.utils import load_benchmark +from .constants import NONE, SKIP_CONNECT, CONV_1X1, CONV_3X3, AVG_POOL_3X3 +from .model import Nb201TrialConfig, Nb201TrialStats, Nb201IntermediateStats + + +def parse_arch_str(arch_str): + mp = { + 'none': NONE, + 'skip_connect': SKIP_CONNECT, + 'nor_conv_1x1': CONV_1X1, + 'nor_conv_3x3': CONV_3X3, + 'avg_pool_3x3': AVG_POOL_3X3 + } + m = re.match(r'\|(.*)~0\|\+\|(.*)~0\|(.*)~1\|\+\|(.*)~0\|(.*)~1\|(.*)~2\|', arch_str) + return { + '0_1': mp[m.group(1)], + '0_2': mp[m.group(2)], + '1_2': mp[m.group(3)], + '0_3': mp[m.group(4)], + '1_3': mp[m.group(5)], + '2_3': mp[m.group(6)] + } + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('input_file', + help='Path to the file to be converted, e.g., NAS-Bench-201-v1_1-096897.pth.') + args = parser.parse_args() + dataset_split = { + 'cifar10-valid': ['train', 'x-valid', 'ori-test', 'ori-test'], + 'cifar10': ['train', 'ori-test', 'ori-test', 'ori-test'], + 'cifar100': ['train', 'x-valid', 'x-test', 'ori-test'], + 'imagenet16-120': ['train', 'x-valid', 'x-test', 'ori-test'], + } + + db = load_benchmark('nasbench201') + + with db: + db.create_tables([Nb201TrialConfig, Nb201TrialStats, Nb201IntermediateStats]) + print('Loading NAS-Bench-201 pickle...') + nb201_data = torch.load(args.input_file) + print('Dumping architectures...') + for arch_str in nb201_data['meta_archs']: + arch_json = parse_arch_str(arch_str) + for epochs in [12, 200]: + for dataset in Nb201TrialConfig.dataset.choices: + Nb201TrialConfig.create(arch=arch_json, num_epochs=epochs, dataset=dataset, + num_channels=16, num_cells=5) + for arch_info in tqdm.tqdm(nb201_data['arch2infos'].values(), + desc='Processing architecture statistics'): + for epochs_verb, d in arch_info.items(): + if epochs_verb == 'less': + epochs = 12 + else: + epochs = 200 + arch_json = parse_arch_str(d['arch_str']) + for (dataset, seed), r in d['all_results'].items(): + sp = dataset_split[dataset.lower()] + data_parsed = { + 'train_acc': r['train_acc1es'][epochs - 1], + 'valid_acc': r['eval_acc1es']['{}@{}'.format(sp[1], epochs - 1)], + 'test_acc': r['eval_acc1es']['{}@{}'.format(sp[2], epochs - 1)], + 'ori_test_acc': r['eval_acc1es']['{}@{}'.format(sp[3], epochs - 1)], + 'train_loss': r['train_losses'][epochs - 1], + 'valid_loss': r['eval_losses']['{}@{}'.format(sp[1], epochs - 1)], + 'test_loss': r['eval_losses']['{}@{}'.format(sp[2], epochs - 1)], + 'ori_test_loss': r['eval_losses']['{}@{}'.format(sp[3], epochs - 1)], + 'parameters': r['params'], + 'flops': r['flop'], + 'latency': r['latency'][0], + 'training_time': r['train_times'][epochs - 1] * epochs, + 'valid_evaluation_time': r['eval_times']['{}@{}'.format(sp[1], epochs - 1)], + 'test_evaluation_time': r['eval_times']['{}@{}'.format(sp[2], epochs - 1)], + 'ori_test_evaluation_time': r['eval_times']['{}@{}'.format(sp[3], epochs - 1)], + } + config = Nb201TrialConfig.get( + (Nb201TrialConfig.num_epochs == epochs) & + (Nb201TrialConfig.arch == arch_json) & + (Nb201TrialConfig.dataset == dataset.lower()) + ) + trial_stats = Nb201TrialStats.create(config=config, seed=seed, **data_parsed) + intermediate_stats = [] + for epoch in range(epochs): + data_parsed = { + 'train_acc': r['train_acc1es'].get(epoch), + 'valid_acc': r['eval_acc1es'].get('{}@{}'.format(sp[1], epoch)), + 'test_acc': r['eval_acc1es'].get('{}@{}'.format(sp[2], epoch)), + 'ori_test_acc': r['eval_acc1es'].get('{}@{}'.format(sp[3], epoch)), + 'train_loss': r['train_losses'].get(epoch), + 'valid_loss': r['eval_losses'].get('{}@{}'.format(sp[1], epoch)), + 'test_loss': r['eval_losses'].get('{}@{}'.format(sp[2], epoch)), + 'ori_test_loss': r['eval_losses'].get('{}@{}'.format(sp[3], epoch)), + } + if all([v is None for v in data_parsed.values()]): + continue + data_parsed.update(current_epoch=epoch + 1, trial=trial_stats) + intermediate_stats.append(data_parsed) + Nb201IntermediateStats.insert_many(intermediate_stats).execute(db) + + +if __name__ == '__main__': + main() diff --git a/nni/nas/benchmarks/nasbench201/model.py b/nni/nas/benchmarks/nasbench201/model.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd01dc9dc5b3f1995002086a5b3d4cff4cdb06e --- /dev/null +++ b/nni/nas/benchmarks/nasbench201/model.py @@ -0,0 +1,157 @@ +from peewee import CharField, FloatField, ForeignKeyField, IntegerField, Model, Proxy +from playhouse.sqlite_ext import JSONField + +from nni.nas.benchmarks.utils import json_dumps + +proxy = Proxy() + + +class Nb201TrialConfig(Model): + """ + Trial config for NAS-Bench-201. + + Attributes + ---------- + arch : dict + A dict with keys ``0_1``, ``0_2``, ``0_3``, ``1_2``, ``1_3``, ``2_3``, each of which + is an operator chosen from :const:`nni.nas.benchmark.nasbench201.NONE`, + :const:`nni.nas.benchmark.nasbench201.SKIP_CONNECT`, + :const:`nni.nas.benchmark.nasbench201.CONV_1X1`, + :const:`nni.nas.benchmark.nasbench201.CONV_3X3` and :const:`nni.nas.benchmark.nasbench201.AVG_POOL_3X3`. + num_epochs : int + Number of epochs planned for this trial. Should be one of 12 and 200. + num_channels: int + Number of channels for initial convolution. 16 by default. + num_cells: int + Number of cells per stage. 5 by default. + dataset: str + Dataset used for training and evaluation. NAS-Bench-201 provides the following 4 options: + ``cifar10-valid`` (training data is splited into 25k for training and 25k for validation, + validation data is used for test), ``cifar10`` (training data is used in training, validation + data is splited into 5k for validation and 5k for testing), ``cifar100`` (same protocol as ``cifar10``), + and ``imagenet16-120`` (a subset of 120 classes in ImageNet, downscaled to 16x16, using training data + for training, 6k images from validation set for validation and the other 6k for testing). + """ + + arch = JSONField(json_dumps=json_dumps, index=True) + num_epochs = IntegerField(index=True) + num_channels = IntegerField() + num_cells = IntegerField() + dataset = CharField(max_length=20, index=True, choices=[ + 'cifar10-valid', # 25k+25k+10k + 'cifar10', # 50k+5k+5k + 'cifar100', # 50k+5k+5k + 'imagenet16-120', + ]) + + class Meta: + database = proxy + + +class Nb201TrialStats(Model): + """ + Computation statistics for NAS-Bench-201. Each corresponds to one trial. + + Attributes + ---------- + config : Nb201TrialConfig + Setup for this trial data. + seed : int + Random seed selected, for reproduction. + train_acc : float + Final accuracy on training data, ranging from 0 to 100. + valid_acc : float + Final accuracy on validation data, ranging from 0 to 100. + test_acc : float + Final accuracy on test data, ranging from 0 to 100. + ori_test_acc : float + Test accuracy on original validation set (10k for CIFAR and 12k for Imagenet16-120), + ranging from 0 to 100. + train_loss : float or None + Final cross entropy loss on training data. Note that loss could be NaN, in which case + this attributed will be None. + valid_loss : float or None + Final cross entropy loss on validation data. + test_loss : float or None + Final cross entropy loss on test data. + ori_test_loss : float or None + Final cross entropy loss on original validation set. + parameters : float + Number of trainable parameters in million. + latency : float + Latency in seconds. + flops : float + FLOPs in million. + training_time : float + Duration of training in seconds. + valid_evaluation_time : float + Time elapsed to evaluate on validation set. + test_evaluation_time : float + Time elapsed to evaluate on test set. + ori_test_evaluation_time : float + Time elapsed to evaluate on original test set. + """ + config = ForeignKeyField(Nb201TrialConfig, backref='trial_stats', index=True) + seed = IntegerField() + train_acc = FloatField() + valid_acc = FloatField() + test_acc = FloatField() + ori_test_acc = FloatField() # test accuracy of the original test set + train_loss = FloatField(null=True) # possibly nan + valid_loss = FloatField(null=True) + test_loss = FloatField(null=True) + ori_test_loss = FloatField(null=True) + parameters = FloatField() # parameters in million + latency = FloatField() # latency in milliseconds + flops = FloatField() # flops in million + training_time = FloatField() + valid_evaluation_time = FloatField() + test_evaluation_time = FloatField() + ori_test_evaluation_time = FloatField() + + class Meta: + database = proxy + + +class Nb201IntermediateStats(Model): + """ + Intermediate statistics for NAS-Bench-201. + + Attributes + ---------- + trial : Nb201TrialStats + Corresponding trial. + current_epoch : int + Elapsed epochs. + train_acc : float + Current accuracy on training data, ranging from 0 to 100. + valid_acc : float + Current accuracy on validation data, ranging from 0 to 100. + test_acc : float + Current accuracy on test data, ranging from 0 to 100. + ori_test_acc : float + Test accuracy on original validation set (10k for CIFAR and 12k for Imagenet16-120), + ranging from 0 to 100. + train_loss : float or None + Current cross entropy loss on training data. + valid_loss : float or None + Current cross entropy loss on validation data. + test_loss : float or None + Current cross entropy loss on test data. + ori_test_loss : float or None + Current cross entropy loss on original validation set. + """ + + trial = ForeignKeyField(Nb201TrialStats, backref='intermediates', index=True) + current_epoch = IntegerField(index=True) + train_acc = FloatField(null=True) + valid_acc = FloatField(null=True) + test_acc = FloatField(null=True) + ori_test_acc = FloatField(null=True) + train_loss = FloatField(null=True) + valid_loss = FloatField(null=True) + test_loss = FloatField(null=True) + ori_test_loss = FloatField(null=True) + + class Meta: + database = proxy diff --git a/nni/nas/benchmarks/nasbench201/query.py b/nni/nas/benchmarks/nasbench201/query.py new file mode 100644 index 0000000000000000000000000000000000000000..bd507f1cafdfc8f269cce853e6a1109cdbcd55e0 --- /dev/null +++ b/nni/nas/benchmarks/nasbench201/query.py @@ -0,0 +1,73 @@ +import functools + +from peewee import fn +from playhouse.shortcuts import model_to_dict + +from nni.nas.benchmarks.utils import load_benchmark +from .model import Nb201TrialStats, Nb201TrialConfig, proxy + + +def query_nb201_trial_stats(arch, num_epochs, dataset, reduction=None, include_intermediates=False): + """ + Query trial stats of NAS-Bench-201 given conditions. + + Parameters + ---------- + arch : dict or None + If a dict, it is in the format that is described in + :class:`nni.nas.benchmark.nasbench201.Nb201TrialConfig`. Only trial stats + matched will be returned. If none, all architectures in the database will be matched. + num_epochs : int or None + If int, matching results will be returned. Otherwise a wildcard. + dataset : str or None + If specified, can be one of the dataset available in :class:`nni.nas.benchmark.nasbench201.Nb201TrialConfig`. + Otherwise a wildcard. + reduction : str or None + If 'none' or None, all trial stats will be returned directly. + If 'mean', fields in trial stats will be averaged given the same trial config. + include_intermediates : boolean + If true, intermediate results will be returned. + + Returns + ------- + generator of dict + A generator of :class:`nni.nas.benchmark.nasbench201.Nb201TrialStats` objects, + where each of them has been converted into a dict. + """ + + if proxy.obj is None: + proxy.initialize(load_benchmark('nasbench201')) + + fields = [] + if reduction == 'none': + reduction = None + if reduction == 'mean': + for field_name in Nb201TrialStats._meta.sorted_field_names: + if field_name not in ['id', 'config', 'seed']: + fields.append(fn.AVG(getattr(Nb201TrialStats, field_name)).alias(field_name)) + elif reduction is None: + fields.append(Nb201TrialStats) + else: + raise ValueError('Unsupported reduction: \'%s\'' % reduction) + query = Nb201TrialStats.select(*fields, Nb201TrialConfig).join(Nb201TrialConfig) + conditions = [] + if arch is not None: + conditions.append(Nb201TrialConfig.arch == arch) + if num_epochs is not None: + conditions.append(Nb201TrialConfig.num_epochs == num_epochs) + if dataset is not None: + conditions.append(Nb201TrialConfig.dataset == dataset) + if conditions: + query = query.where(functools.reduce(lambda a, b: a & b, conditions)) + if reduction is not None: + query = query.group_by(Nb201TrialStats.config) + for trial in query: + if include_intermediates: + data = model_to_dict(trial) + # exclude 'trial' from intermediates as it is already available in data + data['intermediates'] = [ + {k: v for k, v in model_to_dict(t).items() if k != 'trial'} for t in trial.intermediates + ] + yield data + else: + yield model_to_dict(trial) diff --git a/nni/nas/benchmarks/nds/__init__.py b/nni/nas/benchmarks/nds/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9d393b86e1948d6ea5fdf8cacee6d4fd645c8434 --- /dev/null +++ b/nni/nas/benchmarks/nds/__init__.py @@ -0,0 +1,3 @@ +from .constants import * +from .model import NdsTrialConfig, NdsTrialStats, NdsIntermediateStats +from .query import query_nds_trial_stats diff --git a/nni/nas/benchmarks/nds/constants.py b/nni/nas/benchmarks/nds/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..164f094f3aacb8ac36a8572c28e31e9419baa416 --- /dev/null +++ b/nni/nas/benchmarks/nds/constants.py @@ -0,0 +1,16 @@ +NONE = 'none' +SKIP_CONNECT = 'skip_connect' +AVG_POOL_3X3 = 'avg_pool_3x3' +MAX_POOL_3X3 = 'max_pool_3x3' +MAX_POOL_5X5 = 'max_pool_5x5' +MAX_POOL_7X7 = 'max_pool_7x7' +CONV_1X1 = 'conv_1x1' +CONV_3X3 = 'conv_3x3' +CONV_3X1_1X3 = 'conv_3x1_1x3' +CONV_7X1_1X7 = 'conv_7x1_1x7' +DIL_CONV_3X3 = 'dil_conv_3x3' +DIL_CONV_5X5 = 'dil_conv_5x5' +SEP_CONV_3X3 = 'sep_conv_3x3' +SEP_CONV_5X5 = 'sep_conv_5x5' +SEP_CONV_7X7 = 'sep_conv_7x7' +DIL_SEP_CONV_3X3 = 'dil_sep_conv_3x3' diff --git a/nni/nas/benchmarks/nds/db_gen.py b/nni/nas/benchmarks/nds/db_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..6a8ad6fac3ac70eb656ff0c46fbbea629c2492b8 --- /dev/null +++ b/nni/nas/benchmarks/nds/db_gen.py @@ -0,0 +1,156 @@ +import json +import argparse +import os + +import numpy as np +import tqdm + +from nni.nas.benchmarks.utils import load_benchmark +from .model import NdsTrialConfig, NdsTrialStats, NdsIntermediateStats + + +def inject_item(db, item, proposer, dataset, generator): + if 'genotype' in item['net']: + model_family = 'nas_cell' + num_nodes_normal = len(item['net']['genotype']['normal']) // 2 + num_nodes_reduce = len(item['net']['genotype']['reduce']) // 2 + model_spec = { + 'num_nodes_normal': num_nodes_normal, + 'num_nodes_reduce': num_nodes_reduce, + 'depth': item['net']['depth'], + 'width': item['net']['width'], + 'aux': item['net']['aux'], + 'drop_prob': item['net']['drop_prob'], + } + cell_spec = {} + for cell_type in ['normal', 'reduce']: + for i in range(num_nodes_normal): + for j, label in enumerate(['x', 'y']): + cell_spec['{}_{}_op_{}'.format(cell_type, i, label)] = \ + item['net']['genotype'][cell_type][i * 2 + j][0] + cell_spec['{}_{}_input_{}'.format(cell_type, i, label)] = \ + item['net']['genotype'][cell_type][i * 2 + j][1] + cell_spec['{}_concat'.format(cell_type)] = item['net']['genotype']['{}_concat'.format(cell_type)] + else: + if item['net']['block_type'].startswith('res_bottleneck'): + model_family = 'residual_bottleneck' + elif item['net']['block_type'].startswith('res_basic'): + model_family = 'residual_basic' + elif item['net']['block_type'].startswith('double_plain'): + model_family = 'vanilla' + else: + raise ValueError('Unrecognized block type') + model_spec = {k: v for k, v in item['net'].items() if v and k != 'block_type'} + cell_spec = {} + trial_config, _ = NdsTrialConfig.get_or_create( + model_family=model_family, + model_spec=model_spec, + cell_spec=cell_spec, + proposer=proposer, + base_lr=item['optim']['base_lr'], + weight_decay=item['optim']['wd'], + num_epochs=item['optim']['max_ep'], + dataset=dataset, + generator=generator + ) + assert len(item['train_ep_top1']) == len(item['test_ep_top1']) == trial_config.num_epochs + trial = NdsTrialStats.create( + config=trial_config, + seed=item['rng_seed'], + final_train_acc=100 - item['train_ep_top1'][-1], + final_train_loss=item['train_ep_loss'][-1], + final_test_acc=100 - item['test_ep_top1'][-1], + best_train_acc=100 - min(item['train_ep_top1']), + best_train_loss=np.nanmin(item['train_ep_loss']).item(), + best_test_acc=100 - min(item['test_ep_top1']), + parameters=item['params'] / 1e6, + flops=item['flops'] / 1e6, + iter_time=item['iter_time'] + ) + intermediate_stats = [] + for i in range(trial_config.num_epochs): + intermediate_stats.append({ + 'trial': trial, + 'current_epoch': i + 1, + 'train_loss': item['train_ep_loss'][i], + 'train_acc': 100 - item['train_ep_top1'][i], + 'test_acc': 100 - item['test_ep_top1'][i] + }) + NdsIntermediateStats.insert_many(intermediate_stats).execute(db) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('input_dir', help='Path to extracted NDS data dir.') + args = parser.parse_args() + + sweep_list = [ + 'Amoeba.json', + 'Amoeba_in.json', + 'DARTS.json', + 'DARTS_fix-w-d.json', + 'DARTS_in.json', + 'DARTS_lr-wd.json', + 'DARTS_lr-wd_in.json', + 'ENAS.json', + 'ENAS_fix-w-d.json', + 'ENAS_in.json', + 'NASNet.json', + 'NASNet_in.json', + 'PNAS.json', + 'PNAS_fix-w-d.json', + 'PNAS_in.json', + 'ResNeXt-A.json', + 'ResNeXt-A_in.json', + 'ResNeXt-B.json', + 'ResNeXt-B_in.json', + 'ResNet-B.json', + 'ResNet.json', + 'ResNet_lr-wd.json', + 'ResNet_lr-wd_in.json', + 'ResNet_reruns.json', + 'ResNet_rng1.json', + 'ResNet_rng2.json', + 'ResNet_rng3.json', + 'Vanilla.json', + 'Vanilla_lr-wd.json', + 'Vanilla_lr-wd_in.json', + 'Vanilla_reruns.json', + 'Vanilla_rng1.json', + 'Vanilla_rng2.json', + 'Vanilla_rng3.json' + ] + + db = load_benchmark('nds') + + with db: + db.create_tables([NdsTrialConfig, NdsTrialStats, NdsIntermediateStats]) + for json_idx, json_file in enumerate(sweep_list, start=1): + if 'fix-w-d' in json_file: + generator = 'fix_w_d' + elif 'lr-wd' in json_file: + generator = 'tune_lr_wd' + else: + generator = 'random' + if '_in' in json_file: + dataset = 'imagenet' + else: + dataset = 'cifar10' + proposer = json_file.split(".")[0].split("_")[0].lower() + with open(os.path.join(args.input_dir, json_file), 'r') as f: + data = json.load(f) + if 'top' in data and 'mid' in data: + for t in tqdm.tqdm(data['top'], + desc='[{}/{}] Processing {} (top)'.format(json_idx, len(sweep_list), json_file)): + inject_item(db, t, proposer, dataset, generator) + for t in tqdm.tqdm(data['mid'], + desc='[{}/{}] Processing {} (mid)'.format(json_idx, len(sweep_list), json_file)): + inject_item(db, t, proposer, dataset, generator) + else: + for job in tqdm.tqdm(data, + desc='[{}/{}] Processing {}'.format(json_idx, len(sweep_list), json_file)): + inject_item(db, job, proposer, dataset, generator) + + +if __name__ == '__main__': + main() diff --git a/nni/nas/benchmarks/nds/model.py b/nni/nas/benchmarks/nds/model.py new file mode 100644 index 0000000000000000000000000000000000000000..701479e25a6dac6a08765f4bbb0cebc9983bbed8 --- /dev/null +++ b/nni/nas/benchmarks/nds/model.py @@ -0,0 +1,140 @@ +from peewee import CharField, FloatField, ForeignKeyField, IntegerField, Model, Proxy +from playhouse.sqlite_ext import JSONField + +from nni.nas.benchmarks.utils import json_dumps + +proxy = Proxy() + + +class NdsTrialConfig(Model): + """ + Trial config for NDS. + + Attributes + ---------- + model_family : str + Could be ``nas_cell``, ``residual_bottleneck``, ``residual_basic`` or ``vanilla``. + model_spec : dict + If ``model_family`` is ``nas_cell``, it contains ``num_nodes_normal``, ``num_nodes_reduce``, ``depth``, + ``width``, ``aux`` and ``drop_prob``. If ``model_family`` is ``residual_bottleneck``, it contains ``bot_muls``, + ``ds`` (depths), ``num_gs`` (number of groups) and ``ss`` (strides). If ``model_family`` is ``residual_basic`` or + ``vanilla``, it contains ``ds``, ``ss`` and ``ws``. + cell_spec : dict + If ``model_family`` is not ``nas_cell`` it will be an empty dict. Otherwise, it specifies + ``___``, where i ranges from 0 to ``num_nodes_ - 1``. + If it is an ``op``, the value is chosen from the constants specified previously like :const:`nni.nas.benchmark.nds.CONV_1X1`. + If it is i's ``input``, the value range from 0 to ``i + 1``, as ``nas_cell`` uses previous two nodes as inputs, and + node 0 is actually the second node. Refer to NASNet paper for details. Finally, another two key-value pairs + ``normal_concat`` and ``reduce_concat`` specify which nodes are eventually concatenated into output. + dataset : str + Dataset used. Could be ``cifar10`` or ``imagenet``. + generator : str + Can be one of ``random`` which generates configurations at random, while keeping learning rate and weight decay fixed, + ``fix_w_d`` which further keeps ``width`` and ``depth`` fixed, only applicable for ``nas_cell``. ``tune_lr_wd`` which + further tunes learning rate and weight decay. + proposer : str + Paper who has proposed the distribution for random sampling. Available proposers include ``nasnet``, ``darts``, ``enas``, + ``pnas``, ``amoeba``, ``vanilla``, ``resnext-a``, ``resnext-b``, ``resnet``, ``resnet-b`` (ResNet with bottleneck). + See NDS paper for details. + base_lr : float + Initial learning rate. + weight_decay : float + L2 weight decay applied on weights. + num_epochs : int + Number of epochs scheduled, during which learning rate will decay to 0 following cosine annealing. + """ + + model_family = CharField(max_length=20, index=True, choices=[ + 'nas_cell', + 'residual_bottleneck', + 'residual_basic', + 'vanilla', + ]) + model_spec = JSONField(json_dumps=json_dumps, index=True) + cell_spec = JSONField(json_dumps=json_dumps, index=True, null=True) + dataset = CharField(max_length=15, index=True, choices=['cifar10', 'imagenet']) + generator = CharField(max_length=15, index=True, choices=[ + 'random', + 'fix_w_d', + 'tune_lr_wd', + ]) + proposer = CharField(max_length=15, index=True) + base_lr = FloatField() + weight_decay = FloatField() + num_epochs = IntegerField() + + class Meta: + database = proxy + + +class NdsTrialStats(Model): + """ + Computation statistics for NDS. Each corresponds to one trial. + + Attributes + ---------- + config : NdsTrialConfig + Corresponding config for trial. + seed : int + Random seed selected, for reproduction. + final_train_acc : float + Final accuracy on training data, ranging from 0 to 100. + final_train_loss : float or None + Final cross entropy loss on training data. Could be NaN (None). + final_test_acc : float + Final accuracy on test data, ranging from 0 to 100. + best_train_acc : float + Best accuracy on training data, ranging from 0 to 100. + best_train_loss : float or None + Best cross entropy loss on training data. Could be NaN (None). + best_test_acc : float + Best accuracy on test data, ranging from 0 to 100. + parameters : float + Number of trainable parameters in million. + flops : float + FLOPs in million. + iter_time : float + Seconds elapsed for each iteration. + """ + config = ForeignKeyField(NdsTrialConfig, backref='trial_stats', index=True) + seed = IntegerField() + final_train_acc = FloatField() + final_train_loss = FloatField(null=True) + final_test_acc = FloatField() + best_train_acc = FloatField() + best_train_loss = FloatField(null=True) + best_test_acc = FloatField() + parameters = FloatField() + flops = FloatField() + iter_time = FloatField() + + class Meta: + database = proxy + + +class NdsIntermediateStats(Model): + """ + Intermediate statistics for NDS. + + Attributes + ---------- + trial : NdsTrialStats + Corresponding trial. + current_epoch : int + Elapsed epochs. + train_loss : float or None + Current cross entropy loss on training data. Can be NaN (None). + train_acc : float + Current accuracy on training data, ranging from 0 to 100. + test_acc : float + Current accuracy on test data, ranging from 0 to 100. + """ + + trial = ForeignKeyField(NdsTrialStats, backref='intermediates', index=True) + current_epoch = IntegerField(index=True) + train_loss = FloatField(null=True) + train_acc = FloatField() + test_acc = FloatField() + + class Meta: + database = proxy diff --git a/nni/nas/benchmarks/nds/query.py b/nni/nas/benchmarks/nds/query.py new file mode 100644 index 0000000000000000000000000000000000000000..a9589b658be821d069ed9ad42a2979cbd9729ca3 --- /dev/null +++ b/nni/nas/benchmarks/nds/query.py @@ -0,0 +1,80 @@ +import functools + +from peewee import fn +from playhouse.shortcuts import model_to_dict + +from nni.nas.benchmarks.utils import load_benchmark +from .model import NdsTrialStats, NdsTrialConfig, proxy + + +def query_nds_trial_stats(model_family, proposer, generator, model_spec, cell_spec, dataset, + num_epochs=None, reduction=None, include_intermediates=False): + """ + Query trial stats of NDS given conditions. + + Parameters + ---------- + model_family : str or None + If str, can be one of the model families available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. + Otherwise a wildcard. + proposer : str or None + If str, can be one of the proposers available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. Otherwise a wildcard. + generator : str or None + If str, can be one of the generators available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. Otherwise a wildcard. + model_spec : dict or None + If specified, can be one of the model spec available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. + Otherwise a wildcard. + cell_spec : dict or None + If specified, can be one of the cell spec available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. + Otherwise a wildcard. + dataset : str or None + If str, can be one of the datasets available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. Otherwise a wildcard. + num_epochs : float or None + If int, matching results will be returned. Otherwise a wildcard. + reduction : str or None + If 'none' or None, all trial stats will be returned directly. + If 'mean', fields in trial stats will be averaged given the same trial config. + include_intermediates : boolean + If true, intermediate results will be returned. + + Returns + ------- + generator of dict + A generator of :class:`nni.nas.benchmark.nds.NdsTrialStats` objects, + where each of them has been converted into a dict. + """ + + if proxy.obj is None: + proxy.initialize(load_benchmark('nds')) + + fields = [] + if reduction == 'none': + reduction = None + if reduction == 'mean': + for field_name in NdsTrialStats._meta.sorted_field_names: + if field_name not in ['id', 'config', 'seed']: + fields.append(fn.AVG(getattr(NdsTrialStats, field_name)).alias(field_name)) + elif reduction is None: + fields.append(NdsTrialStats) + else: + raise ValueError('Unsupported reduction: \'%s\'' % reduction) + query = NdsTrialStats.select(*fields, NdsTrialConfig).join(NdsTrialConfig) + conditions = [] + for field_name in ['model_family', 'proposer', 'generator', 'model_spec', 'cell_spec', + 'dataset', 'num_epochs']: + if locals()[field_name] is not None: + conditions.append(getattr(NdsTrialConfig, field_name) == locals()[field_name]) + if conditions: + query = query.where(functools.reduce(lambda a, b: a & b, conditions)) + if reduction is not None: + query = query.group_by(NdsTrialStats.config) + for trial in query: + if include_intermediates: + data = model_to_dict(trial) + # exclude 'trial' from intermediates as it is already available in data + data['intermediates'] = [ + {k: v for k, v in model_to_dict(t).items() if k != 'trial'} for t in trial.intermediates + ] + yield data + else: + yield model_to_dict(trial) diff --git a/nni/nas/benchmarks/nlp/__init__.py b/nni/nas/benchmarks/nlp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9dc929727c4293a778dd5d86b7cf294ed5015ffc --- /dev/null +++ b/nni/nas/benchmarks/nlp/__init__.py @@ -0,0 +1,4 @@ +from .model import NlpTrialStats, NlpIntermediateStats, NlpTrialConfig +from .query import query_nlp_trial_stats + + diff --git a/nni/nas/benchmarks/nlp/db_gen.py b/nni/nas/benchmarks/nlp/db_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..4c1198051438601a53732df9922f6d62993acd32 --- /dev/null +++ b/nni/nas/benchmarks/nlp/db_gen.py @@ -0,0 +1,46 @@ +import json +import os +import argparse +import tqdm + +from .model import db, NlpTrialConfig, NlpTrialStats, NlpIntermediateStats + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('input_dir', help='Path to extracted NLP data dir.') + args = parser.parse_args() + with db, tqdm.tqdm(total=len(os.listdir(args.input_dir)), desc="creating tables") as pbar: + db.create_tables([NlpTrialConfig, NlpTrialStats, NlpIntermediateStats]) + json_files = os.listdir(args.input_dir) + for json_file in json_files: + pbar.update(1) + if json_file.endswith('.json'): + log_path = os.path.join(args.input_dir, json_file) + cur = json.load(open(log_path, 'r')) + arch = json.loads(cur['recepie']) + unested_arch = {} + for k in arch.keys(): + # print(k) + unested_arch['{}_op'.format(k)] = arch[k]['op'] + for i in range(len(arch[k]['input'])): + unested_arch['{}_input_{}'.format(k, i)] = arch[k]['input'][i] + config = NlpTrialConfig.create(arch=unested_arch, dataset=cur['data'][5:]) + if cur['status'] == 'OK': + trial_stats = NlpTrialStats.create(config=config, train_loss=cur['train_losses'][-1], val_loss=cur['val_losses'][-1], + test_loss=cur['test_losses'][-1], training_time=cur['wall_times'][-1]) + epochs = 50 + intermediate_stats = [] + for epoch in range(epochs): + epoch_res = { + 'train_loss' : cur['train_losses'][epoch], + 'val_loss' : cur['val_losses'][epoch], + 'test_loss' : cur['test_losses'][epoch], + 'training_time' : cur['wall_times'][epoch] + } + epoch_res.update(current_epoch=epoch + 1, trial=trial_stats) + intermediate_stats.append(epoch_res) + NlpIntermediateStats.insert_many(intermediate_stats).execute(db) + + +if __name__ == '__main__': + main() diff --git a/nni/nas/benchmarks/nlp/model.py b/nni/nas/benchmarks/nlp/model.py new file mode 100644 index 0000000000000000000000000000000000000000..d83ab7ff237af5fce0c9b1de322f77ff4cc021f1 --- /dev/null +++ b/nni/nas/benchmarks/nlp/model.py @@ -0,0 +1,92 @@ +import os + +from peewee import CharField, FloatField, ForeignKeyField, IntegerField, Model +from playhouse.sqlite_ext import JSONField, SqliteExtDatabase + +from nni.nas.benchmarks.utils import json_dumps +from nni.nas.benchmarks.constants import DATABASE_DIR + +db = SqliteExtDatabase(os.path.join(DATABASE_DIR, 'nlp.db'), autoconnect=True) + +class NlpTrialConfig(Model): + """ + Trial config for NLP. epoch_num is fixed at 50. + + Attributes + ---------- + arch: dict + aka recepie in NAS-NLP-Benchmark repo (https://github.com/fmsnew/nas-bench-nlp-release). + an arch has multiple Node, Node_input_n and Node_op. + ``Node`` can be ``node_n`` or ``h_new_n`` or ``f/i/o/j(_act)`` etc. (n is an int number and need not to be consecutive) + ``Node_input_n`` can be ``Node`` or ``x`` etc. + ``Node_op`` can be ``linear`` or ``activation_sigm`` or ``activation_tanh`` or ``elementwise_prod`` + or ``elementwise_sum`` or ``activation_leaky_relu`` ... + e.g., {"h_new_0_input_0":"node_3","h_new_0_input_1":"x","h_new_0_op":"linear","node_2_input_0":"x", + "node_2_input_1":"h_prev_0","node_2_op":"linear","node_3_input_0":"node_2","node_3_op":"activation_leaky_relu"} + dataset: str + Dataset used. Could be ``ptb`` or ``wikitext-2``. + """ + arch = JSONField(json_dumps=json_dumps, index=True) + dataset = CharField(max_length=15, index=True, choices=[ + 'ptb', + 'wikitext-2' + ]) + + class Meta: + database = db + +class NlpTrialStats(Model): + """ + Computation statistics for NAS-NLP-Benchmark. + Each corresponds to one trial result after 50 epoch. + + Attributes + ---------- + config : NlpTrialConfig + Corresponding config for trial. + train_loss : float or None + Final loss on training data. Could be NaN (None). + val_loss : float or None + Final loss on validation data. Could be NaN (None). + test_loss : float or None + Final loss on test data. Could be NaN (None). + training_time : float + Time elapsed in seconds. aka wall_time in in NAS-NLP-Benchmark repo. + """ + config = ForeignKeyField(NlpTrialConfig, backref='trial_stats', index=True) + train_loss = FloatField(null=True) + val_loss = FloatField(null=True) + test_loss = FloatField(null=True) + training_time = FloatField(null=True) + + class Meta: + database = db + +class NlpIntermediateStats(Model): + """ + Computation statistics for NAS-NLP-Benchmark. + Each corresponds to one trial result for 1-50 epoch. + + Attributes + ---------- + config : NlpTrialConfig + Corresponding config for trial. + train_loss : float or None + Final loss on training data. Could be NaN (None). + val_loss : float or None + Final loss on validation data. Could be NaN (None). + test_loss : float or None + Final loss on test data. Could be NaN (None). + training_time : float + Time elapsed in seconds. aka wall_time in in NAS-NLP-Benchmark repo. + """ + trial = ForeignKeyField(NlpTrialStats, backref='intermediates', index=True) + current_epoch = IntegerField(index=True) + train_loss = FloatField(null=True) + val_loss = FloatField(null=True) + test_loss = FloatField(null=True) + training_time = FloatField(null=True) + + class Meta: + database = db + \ No newline at end of file diff --git a/nni/nas/benchmarks/nlp/query.py b/nni/nas/benchmarks/nlp/query.py new file mode 100644 index 0000000000000000000000000000000000000000..98885896b4d719ac07532a90a317f77a96d0ddc6 --- /dev/null +++ b/nni/nas/benchmarks/nlp/query.py @@ -0,0 +1,61 @@ +import functools + +from peewee import fn +from playhouse.shortcuts import model_to_dict +from .model import NlpTrialStats, NlpTrialConfig + +def query_nlp_trial_stats(arch, dataset, reduction=None, include_intermediates=False): + """ + Query trial stats of NLP benchmark given conditions, including config(arch + dataset) and training results after 50 epoch. + + Parameters + ---------- + arch : dict or None + If a dict, it is in the format that is described in + :class:`nni.nas.benchmark.nlp.NlpTrialConfig`. Only trial stats matched will be returned. + If none, all architectures in the database will be matched. + dataset : str or None + If specified, can be one of the dataset available in :class:`nni.nas.benchmark.nlp.NlpTrialConfig`. + Otherwise a wildcard. + reduction : str or None + If 'none' or None, all trial stats will be returned directly. + If 'mean', fields in trial stats will be averaged given the same trial config. + Please note that some trial configs have multiple runs which make "reduction" meaningful, while some may not. + include_intermediates : boolean + If true, intermediate results will be returned. + + Returns + ------- + generator of dict + A generator of :class:`nni.nas.benchmark.nlp.NlpTrialStats` objects, + where each of them has been converted into a dict. + """ + fields = [] + if reduction == 'none': + reduction = None + if reduction == 'mean': + for field_name in NlpTrialStats._meta.sorted_field_names: + if field_name not in ['id', 'config']: + fields.append(fn.AVG(getattr(NlpTrialStats, field_name)).alias(field_name)) + elif reduction is None: + fields.append(NlpTrialStats) + else: + raise ValueError('Unsupported reduction: \'%s\'' % reduction) + query = NlpTrialStats.select(*fields, NlpTrialConfig).join(NlpTrialConfig) + + conditions = [] + if arch is not None: + conditions.append(NlpTrialConfig.arch == arch) + if dataset is not None: + conditions.append(NlpTrialConfig.dataset == dataset) + + for trial in query.where(functools.reduce(lambda a, b: a & b, conditions)): + if include_intermediates: + data = model_to_dict(trial) + # exclude 'trial' from intermediates as it is already available in data + data['intermediates'] = [ + {k: v for k, v in model_to_dict(t).items() if k != 'trial'} for t in trial.intermediates + ] + yield data + else: + yield model_to_dict(trial) \ No newline at end of file diff --git a/nni/nas/benchmarks/utils.py b/nni/nas/benchmarks/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..462a5c6488c3ad251d04dd51585619e97545fec6 --- /dev/null +++ b/nni/nas/benchmarks/utils.py @@ -0,0 +1,105 @@ +import functools +import hashlib +import json +import logging +import os +import shutil +import tempfile +from pathlib import Path + +import requests +import tqdm +from playhouse.sqlite_ext import SqliteExtDatabase + +from .constants import DB_URLS, DATABASE_DIR + + +json_dumps = functools.partial(json.dumps, sort_keys=True) + +# to prevent repetitive loading of benchmarks +_loaded_benchmarks = {} + + +def load_or_download_file(local_path: str, download_url: str, download: bool = False, progress: bool = True): + f = None + hash_prefix = Path(local_path).stem.split('-')[-1] + + _logger = logging.getLogger(__name__) + + try: + sha256 = hashlib.sha256() + + if Path(local_path).exists(): + _logger.info('"%s" already exists. Checking hash.', local_path) + with Path(local_path).open('rb') as fr: + while True: + chunk = fr.read(8192) + if len(chunk) == 0: + break + sha256.update(chunk) + elif download: + _logger.info('"%s" does not exist. Downloading "%s"', local_path, download_url) + + # Follow download implementation in torchvision: + # We deliberately save it in a temp file and move it after + # download is complete. This prevents a local working checkpoint + # being overridden by a broken download. + dst_dir = Path(local_path).parent + dst_dir.mkdir(exist_ok=True, parents=True) + + f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) + r = requests.get(download_url, stream=True) + total_length = int(r.headers.get('content-length')) + with tqdm.tqdm(total=total_length, disable=not progress, + unit='B', unit_scale=True, unit_divisor=1024) as pbar: + for chunk in r.iter_content(8192): + f.write(chunk) + sha256.update(chunk) + pbar.update(len(chunk)) + f.flush() + else: + raise FileNotFoundError('Download is not enabled, but file still does not exist: {}'.format(local_path)) + + digest = sha256.hexdigest() + if not digest.startswith(hash_prefix): + raise RuntimeError('Invalid hash value (expected "{}", got "{}")'.format(hash_prefix, digest)) + + if f is not None: + shutil.move(f.name, local_path) + finally: + if f is not None: + f.close() + if os.path.exists(f.name): + os.remove(f.name) + + +def load_benchmark(benchmark: str) -> SqliteExtDatabase: + """ + Load a benchmark as a database. + + Parmaeters + ---------- + benchmark : str + Benchmark name like nasbench201. + """ + if benchmark in _loaded_benchmarks: + return _loaded_benchmarks[benchmark] + url = DB_URLS[benchmark] + local_path = os.path.join(DATABASE_DIR, os.path.basename(url)) + load_or_download_file(local_path, url) + _loaded_benchmarks[benchmark] = SqliteExtDatabase(local_path, autoconnect=True) + return _loaded_benchmarks[benchmark] + + +def download_benchmark(benchmark: str, progress: bool = True): + """ + Download a converted benchmark. + + Parameters + ---------- + benchmark : str + Benchmark name like nasbench201. + """ + url = DB_URLS[benchmark] + local_path = os.path.join(DATABASE_DIR, os.path.basename(url)) + load_or_download_file(local_path, url, True, progress) diff --git a/nni/nas/pytorch/__init__.py b/nni/nas/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a61f1672d859edda94ed7194e1bc313d824a39b --- /dev/null +++ b/nni/nas/pytorch/__init__.py @@ -0,0 +1,6 @@ +from .base_mutator import BaseMutator +from .base_trainer import BaseTrainer +from .fixed import apply_fixed_architecture +from .mutables import Mutable, LayerChoice, InputChoice +from .mutator import Mutator +from .trainer import Trainer diff --git a/nni/nas/pytorch/base_mutator.py b/nni/nas/pytorch/base_mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..df1a5f9ba8719a5c6619cd4ccc2874c0017a44db --- /dev/null +++ b/nni/nas/pytorch/base_mutator.py @@ -0,0 +1,155 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import torch.nn as nn +from nni.nas.pytorch.mutables import Mutable, MutableScope, InputChoice +from nni.nas.pytorch.utils import StructuredMutableTreeNode + +logger = logging.getLogger(__name__) + + +class BaseMutator(nn.Module): + """ + A mutator is responsible for mutating a graph by obtaining the search space from the network and implementing + callbacks that are called in ``forward`` in mutables. + + Parameters + ---------- + model : nn.Module + PyTorch model to apply mutator on. + """ + + def __init__(self, model): + super().__init__() + self.__dict__["model"] = model + self._structured_mutables = self._parse_search_space(self.model) + + def _parse_search_space(self, module, root=None, prefix="", memo=None, nested_detection=None): + if memo is None: + memo = set() + if root is None: + root = StructuredMutableTreeNode(None) + if module not in memo: + memo.add(module) + if isinstance(module, Mutable): + if nested_detection is not None: + raise RuntimeError("Cannot have nested search space. Error at {} in {}" + .format(module, nested_detection)) + module.name = prefix + module.set_mutator(self) + root = root.add_child(module) + if not isinstance(module, MutableScope): + nested_detection = module + if isinstance(module, InputChoice): + for k in module.choose_from: + if k != InputChoice.NO_KEY and k not in [m.key for m in memo if isinstance(m, Mutable)]: + raise RuntimeError("'{}' required by '{}' not found in keys that appeared before, and is not NO_KEY." + .format(k, module.key)) + for name, submodule in module._modules.items(): + if submodule is None: + continue + submodule_prefix = prefix + ("." if prefix else "") + name + self._parse_search_space(submodule, root, submodule_prefix, memo=memo, + nested_detection=nested_detection) + return root + + @property + def mutables(self): + """ + A generator of all modules inheriting :class:`~nni.nas.pytorch.mutables.Mutable`. + Modules are yielded in the order that they are defined in ``__init__``. + For mutables with their keys appearing multiple times, only the first one will appear. + """ + return self._structured_mutables + + @property + def undedup_mutables(self): + return self._structured_mutables.traverse(deduplicate=False) + + def forward(self, *inputs): + """ + Warnings + -------- + Don't call forward of a mutator. + """ + raise RuntimeError("Forward is undefined for mutators.") + + def __setattr__(self, name, value): + if name == "model": + raise AttributeError("Attribute `model` can be set at most once, and you shouldn't use `self.model = model` to " + "include you network, as it will include all parameters in model into the mutator.") + return super().__setattr__(name, value) + + def enter_mutable_scope(self, mutable_scope): + """ + Callback when forward of a MutableScope is entered. + + Parameters + ---------- + mutable_scope : MutableScope + The mutable scope that is entered. + """ + pass + + def exit_mutable_scope(self, mutable_scope): + """ + Callback when forward of a MutableScope is exited. + + Parameters + ---------- + mutable_scope : MutableScope + The mutable scope that is exited. + """ + pass + + def on_forward_layer_choice(self, mutable, *args, **kwargs): + """ + Callbacks of forward in LayerChoice. + + Parameters + ---------- + mutable : nni.nas.pytorch.mutables.LayerChoice + Module whose forward is called. + args : list of torch.Tensor + The arguments of its forward function. + kwargs : dict + The keyword arguments of its forward function. + + Returns + ------- + tuple of torch.Tensor and torch.Tensor + Output tensor and mask. + """ + raise NotImplementedError + + def on_forward_input_choice(self, mutable, tensor_list): + """ + Callbacks of forward in InputChoice. + + Parameters + ---------- + mutable : nni.nas.pytorch.mutables.InputChoice + Mutable that is called. + tensor_list : list of torch.Tensor + The arguments mutable is called with. + + Returns + ------- + tuple of torch.Tensor and torch.Tensor + Output tensor and mask. + """ + raise NotImplementedError + + def export(self): + """ + Export the data of all decisions. This should output the decisions of all the mutables, so that the whole + network can be fully determined with these decisions for further training from scratch. + + Returns + ------- + dict + Mappings from mutable keys to decisions. + """ + raise NotImplementedError diff --git a/nni/nas/pytorch/base_trainer.py b/nni/nas/pytorch/base_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..2e7a4a2a23a24cd2886eac12dda80eedfa6076d8 --- /dev/null +++ b/nni/nas/pytorch/base_trainer.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from abc import ABC, abstractmethod + + +class BaseTrainer(ABC): + + @abstractmethod + def train(self): + """ + Override the method to train. + """ + raise NotImplementedError + + @abstractmethod + def validate(self): + """ + Override the method to validate. + """ + raise NotImplementedError + + @abstractmethod + def export(self, file): + """ + Override the method to export to file. + + Parameters + ---------- + file : str + File path to export to. + """ + raise NotImplementedError + + @abstractmethod + def checkpoint(self): + """ + Override to dump a checkpoint. + """ + raise NotImplementedError diff --git a/nni/nas/pytorch/callbacks.py b/nni/nas/pytorch/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..86a0dc3800745d93939b9ce85dd9cdca3abef166 --- /dev/null +++ b/nni/nas/pytorch/callbacks.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os + +import torch +import torch.nn as nn + +_logger = logging.getLogger(__name__) + + +class Callback: + """ + Callback provides an easy way to react to events like begin/end of epochs. + """ + + def __init__(self): + self.model = None + self.mutator = None + self.trainer = None + + def build(self, model, mutator, trainer): + """ + Callback needs to be built with model, mutator, trainer, to get updates from them. + + Parameters + ---------- + model : nn.Module + Model to be trained. + mutator : nn.Module + Mutator that mutates the model. + trainer : BaseTrainer + Trainer that is to call the callback. + """ + self.model = model + self.mutator = mutator + self.trainer = trainer + + def on_epoch_begin(self, epoch): + """ + Implement this to do something at the begin of epoch. + + Parameters + ---------- + epoch : int + Epoch number, starting from 0. + """ + pass + + def on_epoch_end(self, epoch): + """ + Implement this to do something at the end of epoch. + + Parameters + ---------- + epoch : int + Epoch number, starting from 0. + """ + pass + + def on_batch_begin(self, epoch): + pass + + def on_batch_end(self, epoch): + pass + + +class LRSchedulerCallback(Callback): + """ + Calls scheduler on every epoch ends. + + Parameters + ---------- + scheduler : LRScheduler + Scheduler to be called. + """ + def __init__(self, scheduler, mode="epoch"): + super().__init__() + assert mode == "epoch" + self.scheduler = scheduler + self.mode = mode + + def on_epoch_end(self, epoch): + """ + Call ``self.scheduler.step()`` on epoch end. + """ + self.scheduler.step() + + +class ArchitectureCheckpoint(Callback): + """ + Calls ``trainer.export()`` on every epoch ends. + + Parameters + ---------- + checkpoint_dir : str + Location to save checkpoints. + """ + def __init__(self, checkpoint_dir): + super().__init__() + self.checkpoint_dir = checkpoint_dir + os.makedirs(self.checkpoint_dir, exist_ok=True) + + def on_epoch_end(self, epoch): + """ + Dump to ``/checkpoint_dir/epoch_{number}.json`` on epoch end. + """ + dest_path = os.path.join(self.checkpoint_dir, "epoch_{}.json".format(epoch)) + _logger.info("Saving architecture to %s", dest_path) + self.trainer.export(dest_path) + + +class ModelCheckpoint(Callback): + """ + Calls ``trainer.export()`` on every epoch ends. + + Parameters + ---------- + checkpoint_dir : str + Location to save checkpoints. + """ + def __init__(self, checkpoint_dir): + super().__init__() + self.checkpoint_dir = checkpoint_dir + os.makedirs(self.checkpoint_dir, exist_ok=True) + + def on_epoch_end(self, epoch): + """ + Dump to ``/checkpoint_dir/epoch_{number}.pth.tar`` on every epoch end. + ``DataParallel`` object will have their inside modules exported. + """ + if isinstance(self.model, nn.DataParallel): + state_dict = self.model.module.state_dict() + else: + state_dict = self.model.state_dict() + dest_path = os.path.join(self.checkpoint_dir, "epoch_{}.pth.tar".format(epoch)) + _logger.info("Saving model to %s", dest_path) + torch.save(state_dict, dest_path) diff --git a/nni/nas/pytorch/fixed.py b/nni/nas/pytorch/fixed.py new file mode 100644 index 0000000000000000000000000000000000000000..9bfa933e80e740ef5ff8832b9d45484bbdc07771 --- /dev/null +++ b/nni/nas/pytorch/fixed.py @@ -0,0 +1,147 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging + +from .mutables import InputChoice, LayerChoice, MutableScope +from .mutator import Mutator +from .utils import to_list + + +_logger = logging.getLogger(__name__) + + +class FixedArchitecture(Mutator): + """ + Fixed architecture mutator that always selects a certain graph. + + Parameters + ---------- + model : nn.Module + A mutable network. + fixed_arc : dict + Preloaded architecture object. + strict : bool + Force everything that appears in ``fixed_arc`` to be used at least once. + verbose : bool + Print log messages if set to True + """ + + def __init__(self, model, fixed_arc, strict=True, verbose=True): + super().__init__(model) + self._fixed_arc = fixed_arc + self.verbose = verbose + + mutable_keys = set([mutable.key for mutable in self.mutables if not isinstance(mutable, MutableScope)]) + fixed_arc_keys = set(self._fixed_arc.keys()) + if fixed_arc_keys - mutable_keys: + raise RuntimeError("Unexpected keys found in fixed architecture: {}.".format(fixed_arc_keys - mutable_keys)) + if mutable_keys - fixed_arc_keys: + raise RuntimeError("Missing keys in fixed architecture: {}.".format(mutable_keys - fixed_arc_keys)) + self._fixed_arc = self._from_human_readable_architecture(self._fixed_arc) + + def _from_human_readable_architecture(self, human_arc): + # convert from an exported architecture + result_arc = {k: to_list(v) for k, v in human_arc.items()} # there could be tensors, numpy arrays, etc. + # First, convert non-list to list, because there could be {"op1": 0} or {"op1": "conv"}, + # which means {"op1": [0, ]} ir {"op1": ["conv", ]} + result_arc = {k: v if isinstance(v, list) else [v] for k, v in result_arc.items()} + # Second, infer which ones are multi-hot arrays and which ones are in human-readable format. + # This is non-trivial, since if an array in [0, 1], we cannot know for sure it means [false, true] or [true, true]. + # Here, we assume an multihot array has to be a boolean array or a float array and matches the length. + for mutable in self.mutables: + if mutable.key not in result_arc: + continue # skip silently + choice_arr = result_arc[mutable.key] + if all(isinstance(v, bool) for v in choice_arr) or all(isinstance(v, float) for v in choice_arr): + if (isinstance(mutable, LayerChoice) and len(mutable) == len(choice_arr)) or \ + (isinstance(mutable, InputChoice) and mutable.n_candidates == len(choice_arr)): + # multihot, do nothing + continue + if isinstance(mutable, LayerChoice): + choice_arr = [mutable.names.index(val) if isinstance(val, str) else val for val in choice_arr] + choice_arr = [i in choice_arr for i in range(len(mutable))] + elif isinstance(mutable, InputChoice): + choice_arr = [mutable.choose_from.index(val) if isinstance(val, str) else val for val in choice_arr] + choice_arr = [i in choice_arr for i in range(mutable.n_candidates)] + result_arc[mutable.key] = choice_arr + return result_arc + + def sample_search(self): + """ + Always returns the fixed architecture. + """ + return self._fixed_arc + + def sample_final(self): + """ + Always returns the fixed architecture. + """ + return self._fixed_arc + + def replace_layer_choice(self, module=None, prefix=""): + """ + Replace layer choices with selected candidates. It's done with best effort. + In case of weighted choices or multiple choices. if some of the choices on weighted with zero, delete them. + If single choice, replace the module with a normal module. + + Parameters + ---------- + module : nn.Module + Module to be processed. + prefix : str + Module name under global namespace. + """ + if module is None: + module = self.model + for name, mutable in module.named_children(): + global_name = (prefix + "." if prefix else "") + name + if isinstance(mutable, LayerChoice): + chosen = self._fixed_arc[mutable.key] + if sum(chosen) == 1 and max(chosen) == 1 and not mutable.return_mask: + # sum is one, max is one, there has to be an only one + # this is compatible with both integer arrays, boolean arrays and float arrays + if self.verbose: + _logger.info("Replacing %s with candidate number %d.", global_name, chosen.index(1)) + setattr(module, name, mutable[chosen.index(1)]) + else: + if mutable.return_mask and self.verbose: + _logger.info("`return_mask` flag of %s is true. As it relies on the behavior of LayerChoice, " \ + "LayerChoice will not be replaced.") + # remove unused parameters + for ch, n in zip(chosen, mutable.names): + if ch == 0 and not isinstance(ch, float): + setattr(mutable, n, None) + else: + self.replace_layer_choice(mutable, global_name) + + +def apply_fixed_architecture(model, fixed_arc, verbose=True): + """ + Load architecture from `fixed_arc` and apply to model. + + Parameters + ---------- + model : torch.nn.Module + Model with mutables. + fixed_arc : str or dict + Path to the JSON that stores the architecture, or dict that stores the exported architecture. + verbose : bool + Print log messages if set to True + + Returns + ------- + FixedArchitecture + Mutator that is responsible for fixes the graph. + """ + + if isinstance(fixed_arc, str): + with open(fixed_arc) as f: + fixed_arc = json.load(f) + architecture = FixedArchitecture(model, fixed_arc, verbose) + architecture.reset() + + # for the convenience of parameters counting + architecture.replace_layer_choice() + return architecture diff --git a/nni/nas/pytorch/mutables.py b/nni/nas/pytorch/mutables.py new file mode 100644 index 0000000000000000000000000000000000000000..7fbb655e51e25c0d639746657bec5d4709e50c44 --- /dev/null +++ b/nni/nas/pytorch/mutables.py @@ -0,0 +1,344 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import warnings +from collections import OrderedDict + +import torch.nn as nn + +from nni.nas.pytorch.utils import global_mutable_counting + +logger = logging.getLogger(__name__) + + +class Mutable(nn.Module): + """ + Mutable is designed to function as a normal layer, with all necessary operators' weights. + States and weights of architectures should be included in mutator, instead of the layer itself. + + Mutable has a key, which marks the identity of the mutable. This key can be used by users to share + decisions among different mutables. In mutator's implementation, mutators should use the key to + distinguish different mutables. Mutables that share the same key should be "similar" to each other. + + Currently the default scope for keys is global. By default, the keys uses a global counter from 1 to + produce unique ids. + + Parameters + ---------- + key : str + The key of mutable. + + Notes + ----- + The counter is program level, but mutables are model level. In case multiple models are defined, and + you want to have `counter` starting from 1 in the second model, it's recommended to assign keys manually + instead of using automatic keys. + """ + + def __init__(self, key=None): + super().__init__() + if key is not None: + if not isinstance(key, str): + key = str(key) + logger.warning("Warning: key \"%s\" is not string, converted to string.", key) + self._key = key + else: + self._key = self.__class__.__name__ + str(global_mutable_counting()) + self.init_hook = self.forward_hook = None + + def __deepcopy__(self, memodict=None): + raise NotImplementedError("Deep copy doesn't work for mutables.") + + def __call__(self, *args, **kwargs): + self._check_built() + return super().__call__(*args, **kwargs) + + def set_mutator(self, mutator): + if "mutator" in self.__dict__: + raise RuntimeError("`set_mutator` is called more than once. Did you parse the search space multiple times? " + "Or did you apply multiple fixed architectures?") + self.__dict__["mutator"] = mutator + + @property + def key(self): + """ + Read-only property of key. + """ + return self._key + + @property + def name(self): + """ + After the search space is parsed, it will be the module name of the mutable. + """ + return self._name if hasattr(self, "_name") else self._key + + @name.setter + def name(self, name): + self._name = name + + def _check_built(self): + if not hasattr(self, "mutator"): + raise ValueError( + "Mutator not set for {}. You might have forgotten to initialize and apply your mutator. " + "Or did you initialize a mutable on the fly in forward pass? Move to `__init__` " + "so that trainer can locate all your mutables. See NNI docs for more details.".format(self)) + + +class MutableScope(Mutable): + """ + Mutable scope marks a subgraph/submodule to help mutators make better decisions. + + If not annotated with mutable scope, search space will be flattened as a list. However, some mutators might + need to leverage the concept of a "cell". So if a module is defined as a mutable scope, everything in it will + look like "sub-search-space" in the scope. Scopes can be nested. + + There are two ways mutators can use mutable scope. One is to traverse the search space as a tree during initialization + and reset. The other is to implement `enter_mutable_scope` and `exit_mutable_scope`. They are called before and after + the forward method of the class inheriting mutable scope. + + Mutable scopes are also mutables that are listed in the mutator.mutables (search space), but they are not supposed + to appear in the dict of choices. + + Parameters + ---------- + key : str + Key of mutable scope. + """ + def __init__(self, key): + super().__init__(key=key) + + def _check_built(self): + return True # bypass the test because it's deprecated + + def __call__(self, *args, **kwargs): + if not hasattr(self, 'mutator'): + return super().__call__(*args, **kwargs) + warnings.warn("`MutableScope` is deprecated in Retiarii.", DeprecationWarning) + try: + self._check_built() + self.mutator.enter_mutable_scope(self) + return super().__call__(*args, **kwargs) + finally: + self.mutator.exit_mutable_scope(self) + + +class LayerChoice(Mutable): + """ + Layer choice selects one of the ``op_candidates``, then apply it on inputs and return results. + In rare cases, it can also select zero or many. + + Layer choice does not allow itself to be nested. + + Parameters + ---------- + op_candidates : list of nn.Module or OrderedDict + A module list to be selected from. + reduction : str + ``mean``, ``concat``, ``sum`` or ``none``. Policy if multiples are selected. + If ``none``, a list is returned. ``mean`` returns the average. ``sum`` returns the sum. + ``concat`` concatenate the list at dimension 1. + return_mask : bool + If ``return_mask``, return output tensor and a mask. Otherwise return tensor only. + key : str + Key of the input choice. + + Attributes + ---------- + length : int + Deprecated. Number of ops to choose from. ``len(layer_choice)`` is recommended. + names : list of str + Names of candidates. + choices : list of Module + Deprecated. A list of all candidate modules in the layer choice module. + ``list(layer_choice)`` is recommended, which will serve the same purpose. + + Notes + ----- + ``op_candidates`` can be a list of modules or a ordered dict of named modules, for example, + + .. code-block:: python + + self.op_choice = LayerChoice(OrderedDict([ + ("conv3x3", nn.Conv2d(3, 16, 128)), + ("conv5x5", nn.Conv2d(5, 16, 128)), + ("conv7x7", nn.Conv2d(7, 16, 128)) + ])) + + Elements in layer choice can be modified or deleted. Use ``del self.op_choice["conv5x5"]`` or + ``self.op_choice[1] = nn.Conv3d(...)``. Adding more choices is not supported yet. + """ + + def __init__(self, op_candidates, reduction="sum", return_mask=False, key=None): + super().__init__(key=key) + self.names = [] + if isinstance(op_candidates, OrderedDict): + for name, module in op_candidates.items(): + assert name not in ["length", "reduction", "return_mask", "_key", "key", "names"], \ + "Please don't use a reserved name '{}' for your module.".format(name) + self.add_module(name, module) + self.names.append(name) + elif isinstance(op_candidates, list): + for i, module in enumerate(op_candidates): + self.add_module(str(i), module) + self.names.append(str(i)) + else: + raise TypeError("Unsupported op_candidates type: {}".format(type(op_candidates))) + self.reduction = reduction + self.return_mask = return_mask + + def __getitem__(self, idx): + if isinstance(idx, str): + return self._modules[idx] + return list(self)[idx] + + def __setitem__(self, idx, module): + key = idx if isinstance(idx, str) else self.names[idx] + return setattr(self, key, module) + + def __delitem__(self, idx): + if isinstance(idx, slice): + for key in self.names[idx]: + delattr(self, key) + else: + if isinstance(idx, str): + key, idx = idx, self.names.index(idx) + else: + key = self.names[idx] + delattr(self, key) + del self.names[idx] + + @property + def length(self): + warnings.warn("layer_choice.length is deprecated. Use `len(layer_choice)` instead.", DeprecationWarning) + return len(self) + + def __len__(self): + return len(self.names) + + def __iter__(self): + return map(lambda name: self._modules[name], self.names) + + @property + def choices(self): + warnings.warn("layer_choice.choices is deprecated. Use `list(layer_choice)` instead.", DeprecationWarning) + return list(self) + + def forward(self, *args, **kwargs): + """ + Returns + ------- + tuple of tensors + Output and selection mask. If ``return_mask`` is ``False``, only output is returned. + """ + out, mask = self.mutator.on_forward_layer_choice(self, *args, **kwargs) + if self.return_mask: + return out, mask + return out + + +class InputChoice(Mutable): + """ + Input choice selects ``n_chosen`` inputs from ``choose_from`` (contains ``n_candidates`` keys). For beginners, + use ``n_candidates`` instead of ``choose_from`` is a safe option. To get the most power out of it, you might want to + know about ``choose_from``. + + The keys in ``choose_from`` can be keys that appear in past mutables, or ``NO_KEY`` if there are no suitable ones. + The keys are designed to be the keys of the sources. To help mutators make better decisions, + mutators might be interested in how the tensors to choose from come into place. For example, the tensor is the + output of some operator, some node, some cell, or some module. If this operator happens to be a mutable (e.g., + ``LayerChoice`` or ``InputChoice``), it has a key naturally that can be used as a source key. If it's a + module/submodule, it needs to be annotated with a key: that's where a :class:`MutableScope` is needed. + + In the example below, ``input_choice`` is a 4-choose-any. The first 3 is semantically output of cell1, output of cell2, + output of cell3 with respectively. Notice that an extra max pooling is followed by cell1, indicating x1 is not + "actually" the direct output of cell1. + + .. code-block:: python + + class Cell(MutableScope): + pass + + class Net(nn.Module): + def __init__(self): + self.cell1 = Cell("cell1") + self.cell2 = Cell("cell2") + self.op = LayerChoice([conv3x3(), conv5x5()], key="op") + self.input_choice = InputChoice(choose_from=["cell1", "cell2", "op", InputChoice.NO_KEY]) + + def forward(self, x): + x1 = max_pooling(self.cell1(x)) + x2 = self.cell2(x) + x3 = self.op(x) + x4 = torch.zeros_like(x) + return self.input_choice([x1, x2, x3, x4]) + + Parameters + ---------- + n_candidates : int + Number of inputs to choose from. + choose_from : list of str + List of source keys to choose from. At least of one of ``choose_from`` and ``n_candidates`` must be fulfilled. + If ``n_candidates`` has a value but ``choose_from`` is None, it will be automatically treated as ``n_candidates`` + number of empty string. + n_chosen : int + Recommended inputs to choose. If None, mutator is instructed to select any. + reduction : str + ``mean``, ``concat``, ``sum`` or ``none``. See :class:`LayerChoice`. + return_mask : bool + If ``return_mask``, return output tensor and a mask. Otherwise return tensor only. + key : str + Key of the input choice. + """ + + NO_KEY = "" + + def __init__(self, n_candidates=None, choose_from=None, n_chosen=None, + reduction="sum", return_mask=False, key=None): + super().__init__(key=key) + # precondition check + assert n_candidates is not None or choose_from is not None, "At least one of `n_candidates` and `choose_from`" \ + "must be not None." + if choose_from is not None and n_candidates is None: + n_candidates = len(choose_from) + elif choose_from is None and n_candidates is not None: + choose_from = [self.NO_KEY] * n_candidates + assert n_candidates == len(choose_from), "Number of candidates must be equal to the length of `choose_from`." + assert n_candidates > 0, "Number of candidates must be greater than 0." + assert n_chosen is None or 0 <= n_chosen <= n_candidates, "Expected selected number must be None or no more " \ + "than number of candidates." + + self.n_candidates = n_candidates + self.choose_from = choose_from.copy() + self.n_chosen = n_chosen + self.reduction = reduction + self.return_mask = return_mask + + def forward(self, optional_inputs): + """ + Forward method of LayerChoice. + + Parameters + ---------- + optional_inputs : list or dict + Recommended to be a dict. As a dict, inputs will be converted to a list that follows the order of + ``choose_from`` in initialization. As a list, inputs must follow the semantic order that is the same as + ``choose_from``. + + Returns + ------- + tuple of tensors + Output and selection mask. If ``return_mask`` is ``False``, only output is returned. + """ + optional_input_list = optional_inputs + if isinstance(optional_inputs, dict): + optional_input_list = [optional_inputs[tag] for tag in self.choose_from] + assert isinstance(optional_input_list, list), \ + "Optional input list must be a list, not a {}.".format(type(optional_input_list)) + assert len(optional_inputs) == self.n_candidates, \ + "Length of the input list must be equal to number of candidates." + out, mask = self.mutator.on_forward_input_choice(self, optional_input_list) + if self.return_mask: + return out, mask + return out diff --git a/nni/nas/pytorch/mutator.py b/nni/nas/pytorch/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..e1894b52496ae63611178ccdf301cd4f612abc9e --- /dev/null +++ b/nni/nas/pytorch/mutator.py @@ -0,0 +1,308 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from collections import defaultdict + +import numpy as np +import torch + +from .base_mutator import BaseMutator +from .mutables import LayerChoice, InputChoice +from .utils import to_list + +logger = logging.getLogger(__name__) + + +class Mutator(BaseMutator): + + def __init__(self, model): + super().__init__(model) + self._cache = dict() + self._connect_all = False + + def sample_search(self): + """ + Override to implement this method to iterate over mutables and make decisions. + + Returns + ------- + dict + A mapping from key of mutables to decisions. + """ + raise NotImplementedError + + def sample_final(self): + """ + Override to implement this method to iterate over mutables and make decisions that is final + for export and retraining. + + Returns + ------- + dict + A mapping from key of mutables to decisions. + """ + raise NotImplementedError + + def reset(self): + """ + Reset the mutator by call the `sample_search` to resample (for search). Stores the result in a local + variable so that `on_forward_layer_choice` and `on_forward_input_choice` can use the decision directly. + """ + self._cache = self.sample_search() + + def export(self): + """ + Resample (for final) and return results. + + Returns + ------- + dict + A mapping from key of mutables to decisions. + """ + sampled = self.sample_final() + result = dict() + for mutable in self.mutables: + if not isinstance(mutable, (LayerChoice, InputChoice)): + # not supported as built-in + continue + result[mutable.key] = self._convert_mutable_decision_to_human_readable(mutable, sampled.pop(mutable.key)) + if sampled: + raise ValueError("Unexpected keys returned from 'sample_final()': %s", list(sampled.keys())) + return result + + def status(self): + """ + Return current selection status of mutator. + + Returns + ------- + dict + A mapping from key of mutables to decisions. All weights (boolean type and float type) + are converted into real number values. Numpy arrays and tensors are converted into list. + """ + data = dict() + for k, v in self._cache.items(): + if torch.is_tensor(v): + v = v.detach().cpu().numpy().tolist() + if isinstance(v, np.ndarray): + v = v.astype(np.float32).tolist() + data[k] = v + return data + + def graph(self, inputs): + """ + Return model supernet graph. + + Parameters + ---------- + inputs: tuple of tensor + Inputs that will be feeded into the network. + + Returns + ------- + dict + Containing ``node``, in Tensorboard GraphDef format. + Additional key ``mutable`` is a map from key to list of modules. + """ + if not torch.__version__.startswith("1.4"): + logger.warning("Graph is only tested with PyTorch 1.4. Other versions might not work.") + from nni.common.graph_utils import build_graph + from google.protobuf import json_format + # protobuf should be installed as long as tensorboard is installed + try: + self._connect_all = True + graph_def, _ = build_graph(self.model, inputs, verbose=False) + result = json_format.MessageToDict(graph_def) + finally: + self._connect_all = False + + # `mutable` is to map the keys to a list of corresponding modules. + # A key can be linked to multiple modules, use `dedup=False` to find them all. + result["mutable"] = defaultdict(list) + for mutable in self.mutables.traverse(deduplicate=False): + # A module will be represent in the format of + # [{"type": "Net", "name": ""}, {"type": "Cell", "name": "cell1"}, {"type": "Conv2d": "name": "conv"}] + # which will be concatenated into Net/Cell[cell1]/Conv2d[conv] in frontend. + # This format is aligned with the scope name jit gives. + modules = mutable.name.split(".") + path = [ + {"type": self.model.__class__.__name__, "name": ""} + ] + m = self.model + for module in modules: + m = getattr(m, module) + path.append({ + "type": m.__class__.__name__, + "name": module + }) + result["mutable"][mutable.key].append(path) + return result + + def on_forward_layer_choice(self, mutable, *args, **kwargs): + """ + On default, this method retrieves the decision obtained previously, and select certain operations. + Only operations with non-zero weight will be executed. The results will be added to a list. + Then it will reduce the list of all tensor outputs with the policy specified in `mutable.reduction`. + + Parameters + ---------- + mutable : nni.nas.pytorch.mutables.LayerChoice + Layer choice module. + args : list of torch.Tensor + Inputs + kwargs : dict + Inputs + + Returns + ------- + tuple of torch.Tensor and torch.Tensor + Output and mask. + """ + if self._connect_all: + return self._all_connect_tensor_reduction(mutable.reduction, + [op(*args, **kwargs) for op in mutable]), \ + torch.ones(len(mutable)).bool() + + def _map_fn(op, args, kwargs): + return op(*args, **kwargs) + + mask = self._get_decision(mutable) + assert len(mask) == len(mutable), \ + "Invalid mask, expected {} to be of length {}.".format(mask, len(mutable)) + out, mask = self._select_with_mask(_map_fn, [(choice, args, kwargs) for choice in mutable], mask) + return self._tensor_reduction(mutable.reduction, out), mask + + def on_forward_input_choice(self, mutable, tensor_list): + """ + On default, this method retrieves the decision obtained previously, and select certain tensors. + Then it will reduce the list of all tensor outputs with the policy specified in `mutable.reduction`. + + Parameters + ---------- + mutable : nni.nas.pytorch.mutables.InputChoice + Input choice module. + tensor_list : list of torch.Tensor + Tensor list to apply the decision on. + + Returns + ------- + tuple of torch.Tensor and torch.Tensor + Output and mask. + """ + if self._connect_all: + return self._all_connect_tensor_reduction(mutable.reduction, tensor_list), \ + torch.ones(mutable.n_candidates).bool() + mask = self._get_decision(mutable) + assert len(mask) == mutable.n_candidates, \ + "Invalid mask, expected {} to be of length {}.".format(mask, mutable.n_candidates) + out, mask = self._select_with_mask(lambda x: x, [(t,) for t in tensor_list], mask) + return self._tensor_reduction(mutable.reduction, out), mask + + def _select_with_mask(self, map_fn, candidates, mask): + """ + Select masked tensors and return a list of tensors. + + Parameters + ---------- + map_fn : function + Convert candidates to target candidates. Can be simply identity. + candidates : list of torch.Tensor + Tensor list to apply the decision on. + mask : list-like object + Can be a list, an numpy array or a tensor (recommended). Needs to + have the same length as ``candidates``. + + Returns + ------- + tuple of list of torch.Tensor and torch.Tensor + Output and mask. + """ + if (isinstance(mask, list) and len(mask) >= 1 and isinstance(mask[0], bool)) or \ + (isinstance(mask, np.ndarray) and mask.dtype == np.bool) or \ + "BoolTensor" in mask.type(): + out = [map_fn(*cand) for cand, m in zip(candidates, mask) if m] + elif (isinstance(mask, list) and len(mask) >= 1 and isinstance(mask[0], (float, int))) or \ + (isinstance(mask, np.ndarray) and mask.dtype in (np.float32, np.float64, np.int32, np.int64)) or \ + "FloatTensor" in mask.type(): + out = [map_fn(*cand) * m for cand, m in zip(candidates, mask) if m] + else: + raise ValueError("Unrecognized mask '%s'" % mask) + if not torch.is_tensor(mask): + mask = torch.tensor(mask) # pylint: disable=not-callable + return out, mask + + def _tensor_reduction(self, reduction_type, tensor_list): + if reduction_type == "none": + return tensor_list + if not tensor_list: + return None # empty. return None for now + if len(tensor_list) == 1: + return tensor_list[0] + if reduction_type == "sum": + return sum(tensor_list) + if reduction_type == "mean": + return sum(tensor_list) / len(tensor_list) + if reduction_type == "concat": + return torch.cat(tensor_list, dim=1) + raise ValueError("Unrecognized reduction policy: \"{}\"".format(reduction_type)) + + def _all_connect_tensor_reduction(self, reduction_type, tensor_list): + if reduction_type == "none": + return tensor_list + if reduction_type == "concat": + return torch.cat(tensor_list, dim=1) + return torch.stack(tensor_list).sum(0) + + def _get_decision(self, mutable): + """ + By default, this method checks whether `mutable.key` is already in the decision cache, + and returns the result without double-check. + + Parameters + ---------- + mutable : Mutable + + Returns + ------- + object + """ + if mutable.key not in self._cache: + raise ValueError("\"{}\" not found in decision cache.".format(mutable.key)) + result = self._cache[mutable.key] + logger.debug("Decision %s: %s", mutable.key, result) + return result + + def _convert_mutable_decision_to_human_readable(self, mutable, sampled): + # Assert the existence of mutable.key in returned architecture. + # Also check if there is anything extra. + multihot_list = to_list(sampled) + converted = None + # If it's a boolean array, we can do optimization. + if all([t == 0 or t == 1 for t in multihot_list]): + if isinstance(mutable, LayerChoice): + assert len(multihot_list) == len(mutable), \ + "Results returned from 'sample_final()' (%s: %s) either too short or too long." \ + % (mutable.key, multihot_list) + # check if all modules have different names and they indeed have names + if len(set(mutable.names)) == len(mutable) and not all(d.isdigit() for d in mutable.names): + converted = [name for i, name in enumerate(mutable.names) if multihot_list[i]] + else: + converted = [i for i in range(len(multihot_list)) if multihot_list[i]] + if isinstance(mutable, InputChoice): + assert len(multihot_list) == mutable.n_candidates, \ + "Results returned from 'sample_final()' (%s: %s) either too short or too long." \ + % (mutable.key, multihot_list) + # check if all input candidates have different names + if len(set(mutable.choose_from)) == mutable.n_candidates: + converted = [name for i, name in enumerate(mutable.choose_from) if multihot_list[i]] + else: + converted = [i for i in range(len(multihot_list)) if multihot_list[i]] + if converted is not None: + # if only one element, then remove the bracket + if len(converted) == 1: + converted = converted[0] + else: + # do nothing + converted = multihot_list + return converted diff --git a/nni/nas/pytorch/nasbench201/__init__.py b/nni/nas/pytorch/nasbench201/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1419b3fa92420753a9c0ebea60944023ff0c313e --- /dev/null +++ b/nni/nas/pytorch/nasbench201/__init__.py @@ -0,0 +1 @@ +from .nasbench201 import NASBench201Cell diff --git a/nni/nas/pytorch/nasbench201/nasbench201.py b/nni/nas/pytorch/nasbench201/nasbench201.py new file mode 100644 index 0000000000000000000000000000000000000000..cd42fa1a2f20b86978f1d7fd44ca630c01d54d4d --- /dev/null +++ b/nni/nas/pytorch/nasbench201/nasbench201.py @@ -0,0 +1,72 @@ +from collections import OrderedDict +import torch.nn as nn +from nni.nas.pytorch.mutables import LayerChoice + +from .nasbench201_ops import Pooling, ReLUConvBN, Zero, FactorizedReduce + + +class NASBench201Cell(nn.Module): + """ + Builtin cell structure of NAS Bench 201. One cell contains four nodes. The First node serves as an input node + accepting the output of the previous cell. And other nodes connect to all previous nodes with an edge that + represents an operation chosen from a set to transform the tensor from the source node to the target node. + Every node accepts all its inputs and adds them as its output. + + Parameters + --- + cell_id: str + the name of this cell + C_in: int + the number of input channels of the cell + C_out: int + the number of output channels of the cell + stride: int + stride of all convolution operations in the cell + bn_affine: bool + If set to ``True``, all ``torch.nn.BatchNorm2d`` in this cell will have learnable affine parameters. Default: True + bn_momentum: float + the value used for the running_mean and running_var computation. Default: 0.1 + bn_track_running_stats: bool + When set to ``True``, all ``torch.nn.BatchNorm2d`` in this cell tracks the running mean and variance. Default: True + """ + + def __init__(self, cell_id, C_in, C_out, stride, bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True): + super(NASBench201Cell, self).__init__() + + self.NUM_NODES = 4 + self.layers = nn.ModuleList() + + OPS = lambda layer_idx: OrderedDict([ + ("none", Zero(C_in, C_out, stride)), + ("avg_pool_3x3", Pooling(C_in, C_out, stride if layer_idx == 0 else 1, bn_affine, bn_momentum, + bn_track_running_stats)), + ("conv_3x3", ReLUConvBN(C_in, C_out, 3, stride if layer_idx == 0 else 1, 1, 1, bn_affine, bn_momentum, + bn_track_running_stats)), + ("conv_1x1", ReLUConvBN(C_in, C_out, 1, stride if layer_idx == 0 else 1, 0, 1, bn_affine, bn_momentum, + bn_track_running_stats)), + ("skip_connect", nn.Identity() if stride == 1 and C_in == C_out + else FactorizedReduce(C_in, C_out, stride if layer_idx == 0 else 1, bn_affine, bn_momentum, + bn_track_running_stats)) + ]) + + for i in range(self.NUM_NODES): + node_ops = nn.ModuleList() + for j in range(0, i): + node_ops.append(LayerChoice(OPS(j), key="%d_%d" % (j, i), reduction="mean")) + self.layers.append(node_ops) + self.in_dim = C_in + self.out_dim = C_out + self.cell_id = cell_id + + def forward(self, input): # pylint: disable=W0622 + """ + Parameters + --- + input: torch.tensor + the output of the previous layer + """ + nodes = [input] + for i in range(1, self.NUM_NODES): + node_feature = sum(self.layers[i][k](nodes[k]) for k in range(i)) + nodes.append(node_feature) + return nodes[-1] diff --git a/nni/nas/pytorch/nasbench201/nasbench201_ops.py b/nni/nas/pytorch/nasbench201/nasbench201_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..aa60f8854fc1cf3169884cdde139eaa97f78831e --- /dev/null +++ b/nni/nas/pytorch/nasbench201/nasbench201_ops.py @@ -0,0 +1,146 @@ +import torch +import torch.nn as nn + + +class ReLUConvBN(nn.Module): + """ + Parameters + --- + C_in: int + the number of input channels + C_out: int + the number of output channels + stride: int + stride of the convolution + padding: int + zero-padding added to both sides of the input + dilation: int + spacing between kernel elements + bn_affine: bool + If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True + bn_momentun: float + the value used for the running_mean and running_var computation. Default: 0.1 + bn_track_running_stats: bool + When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, + bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True): + super(ReLUConvBN, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_out, kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(C_out, affine=bn_affine, momentum=bn_momentum, + track_running_stats=bn_track_running_stats) + ) + + def forward(self, x): + """ + Parameters + --- + x: torch.Tensor + input tensor + """ + return self.op(x) + + +class Pooling(nn.Module): + """ + Parameters + --- + C_in: int + the number of input channels + C_out: int + the number of output channels + stride: int + stride of the convolution + bn_affine: bool + If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True + bn_momentun: float + the value used for the running_mean and running_var computation. Default: 0.1 + bn_track_running_stats: bool + When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True + """ + def __init__(self, C_in, C_out, stride, bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True): + super(Pooling, self).__init__() + if C_in == C_out: + self.preprocess = None + else: + self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 0, + bn_affine, bn_momentum, bn_track_running_stats) + self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) + + def forward(self, x): + """ + Parameters + --- + x: torch.Tensor + input tensor + """ + if self.preprocess: + x = self.preprocess(x) + return self.op(x) + + +class Zero(nn.Module): + """ + Parameters + --- + C_in: int + the number of input channels + C_out: int + the number of output channels + stride: int + stride of the convolution + """ + def __init__(self, C_in, C_out, stride): + super(Zero, self).__init__() + self.C_in = C_in + self.C_out = C_out + self.stride = stride + self.is_zero = True + + def forward(self, x): + """ + Parameters + --- + x: torch.Tensor + input tensor + """ + if self.C_in == self.C_out: + if self.stride == 1: + return x.mul(0.) + else: + return x[:, :, ::self.stride, ::self.stride].mul(0.) + else: + shape = list(x.shape) + shape[1] = self.C_out + zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device) + return zeros + + +class FactorizedReduce(nn.Module): + def __init__(self, C_in, C_out, stride, bn_affine=True, bn_momentum=0.1, + bn_track_running_stats=True): + super(FactorizedReduce, self).__init__() + self.stride = stride + self.C_in = C_in + self.C_out = C_out + self.relu = nn.ReLU(inplace=False) + if stride == 2: + C_outs = [C_out // 2, C_out - C_out // 2] + self.convs = nn.ModuleList() + for i in range(2): + self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False)) + self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0) + else: + raise ValueError("Invalid stride : {:}".format(stride)) + self.bn = nn.BatchNorm2d(C_out, affine=bn_affine, momentum=bn_momentum, + track_running_stats=bn_track_running_stats) + + def forward(self, x): + x = self.relu(x) + y = self.pad(x) + out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out diff --git a/nni/nas/pytorch/search_space_zoo/__init__.py b/nni/nas/pytorch/search_space_zoo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59bb3b78d1874808c9ac9ed2c2b625786f16250e --- /dev/null +++ b/nni/nas/pytorch/search_space_zoo/__init__.py @@ -0,0 +1,4 @@ +from .darts_cell import DartsCell +from .enas_cell import ENASMicroLayer +from .enas_cell import ENASMacroLayer +from .enas_cell import ENASMacroGeneralModel diff --git a/nni/nas/pytorch/search_space_zoo/darts_cell.py b/nni/nas/pytorch/search_space_zoo/darts_cell.py new file mode 100644 index 0000000000000000000000000000000000000000..53fca5940c1f85b38cef2d08e8e20c8499728609 --- /dev/null +++ b/nni/nas/pytorch/search_space_zoo/darts_cell.py @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from collections import OrderedDict + +import torch +import torch.nn as nn +from nni.nas.pytorch import mutables + +from .darts_ops import PoolBN, SepConv, DilConv, FactorizedReduce, DropPath, StdConv + + +class Node(nn.Module): + def __init__(self, node_id, num_prev_nodes, channels, num_downsample_connect): + """ + builtin Darts Node structure + + Parameters + --- + node_id: str + num_prev_nodes: int + the number of previous nodes in this cell + channels: int + output channels + num_downsample_connect: int + downsample the input node if this cell is reduction cell + """ + super().__init__() + self.ops = nn.ModuleList() + choice_keys = [] + for i in range(num_prev_nodes): + stride = 2 if i < num_downsample_connect else 1 + choice_keys.append("{}_p{}".format(node_id, i)) + self.ops.append( + mutables.LayerChoice(OrderedDict([ + ("maxpool", PoolBN('max', channels, 3, stride, 1, affine=False)), + ("avgpool", PoolBN('avg', channels, 3, stride, 1, affine=False)), + ("skipconnect", + nn.Identity() if stride == 1 else FactorizedReduce(channels, channels, affine=False)), + ("sepconv3x3", SepConv(channels, channels, 3, stride, 1, affine=False)), + ("sepconv5x5", SepConv(channels, channels, 5, stride, 2, affine=False)), + ("dilconv3x3", DilConv(channels, channels, 3, stride, 2, 2, affine=False)), + ("dilconv5x5", DilConv(channels, channels, 5, stride, 4, 2, affine=False)) + ]), key=choice_keys[-1])) + self.drop_path = DropPath() + self.input_switch = mutables.InputChoice(choose_from=choice_keys, n_chosen=2, key="{}_switch".format(node_id)) + + def forward(self, prev_nodes): + assert len(self.ops) == len(prev_nodes) + out = [op(node) for op, node in zip(self.ops, prev_nodes)] + out = [self.drop_path(o) if o is not None else None for o in out] + return self.input_switch(out) + + +class DartsCell(nn.Module): + """ + Builtin Darts Cell structure. There are ``n_nodes`` nodes in one cell, in which the first two nodes' values are + fixed to the results of previous previous cell and previous cell respectively. One node will connect all + the nodes after with predefined operations in a mutable way. The last node accepts five inputs from nodes + before and it concats all inputs in channels as the output of the current cell, and the number of output + channels is ``n_nodes`` times ``channels``. + + Parameters + --- + n_nodes: int + the number of nodes contained in this cell + channels_pp: int + the number of previous previous cell's output channels + channels_p: int + the number of previous cell's output channels + channels: int + the number of output channels for each node + reduction_p: bool + Is previous cell a reduction cell + reduction: bool + is current cell a reduction cell + """ + def __init__(self, n_nodes, channels_pp, channels_p, channels, reduction_p, reduction): + super().__init__() + self.reduction = reduction + self.n_nodes = n_nodes + + # If previous cell is reduction cell, current input size does not match with + # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. + if reduction_p: + self.preproc0 = FactorizedReduce(channels_pp, channels, affine=False) + else: + self.preproc0 = StdConv(channels_pp, channels, 1, 1, 0, affine=False) + self.preproc1 = StdConv(channels_p, channels, 1, 1, 0, affine=False) + + # generate dag + self.mutable_ops = nn.ModuleList() + for depth in range(2, self.n_nodes + 2): + self.mutable_ops.append(Node("{}_n{}".format("reduce" if reduction else "normal", depth), + depth, channels, 2 if reduction else 0)) + + def forward(self, pprev, prev): + """ + Parameters + --- + pprev: torch.Tensor + the output of the previous previous layer + prev: torch.Tensor + the output of the previous layer + """ + tensors = [self.preproc0(pprev), self.preproc1(prev)] + for node in self.mutable_ops: + cur_tensor = node(tensors) + tensors.append(cur_tensor) + + output = torch.cat(tensors[2:], dim=1) + return output diff --git a/nni/nas/pytorch/search_space_zoo/darts_ops.py b/nni/nas/pytorch/search_space_zoo/darts_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5410cfb4a5a466f26e67aaf93d21f90672407c --- /dev/null +++ b/nni/nas/pytorch/search_space_zoo/darts_ops.py @@ -0,0 +1,196 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + + +class DropPath(nn.Module): + def __init__(self, p=0.): + """ + Drop path with probability. + + Parameters + ---------- + p : float + Probability of an path to be zeroed. + """ + super().__init__() + self.p = p + + def forward(self, x): + if self.training and self.p > 0.: + keep_prob = 1. - self.p + # per data point mask + mask = torch.zeros((x.size(0), 1, 1, 1), device=x.device).bernoulli_(keep_prob) + return x / keep_prob * mask + + return x + + +class PoolBN(nn.Module): + """ + AvgPool or MaxPool with BN. ``pool_type`` must be ``max`` or ``avg``. + + Parameters + --- + pool_type: str + choose operation + C: int + number of channels + kernal_size: int + size of the convolving kernel + stride: int + stride of the convolution + padding: int + zero-padding added to both sides of the input + affine: bool + is using affine in BatchNorm + """ + + def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + self.bn = nn.BatchNorm2d(C, affine=affine) + + def forward(self, x): + out = self.pool(x) + out = self.bn(out) + return out + + +class StdConv(nn.Sequential): + """ + Standard conv: ReLU - Conv - BN + + Parameters + --- + C_in: int + the number of input channels + C_out: int + the number of output channels + kernel_size: int + size of the convolution kernel + padding: + zero-padding added to both sides of the input + affine: bool + is using affine in BatchNorm + """ + + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential + for idx, ops in enumerate((nn.ReLU(), nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine))): + self.add_module(str(idx), ops) + + +class FacConv(nn.Module): + """ + Factorized conv: ReLU - Conv(Kx1) - Conv(1xK) - BN + """ + + def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False), + nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class DilConv(nn.Module): + """ + (Dilated) depthwise separable conv. + ReLU - (Dilated) depthwise separable - Pointwise - BN. + If dilation == 2, 3x3 conv => 5x5 receptive field, 5x5 conv => 9x9 receptive field. + + Parameters + --- + C_in: int + the number of input channels + C_out: int + the number of output channels + kernal_size: + size of the convolving kernel + padding: + zero-padding added to both sides of the input + dilation: int + spacing between kernel elements. + affine: bool + is using affine in BatchNorm + """ + + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in, + bias=False), + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class SepConv(nn.Module): + """ + Depthwise separable conv. + DilConv(dilation=1) * 2. + + Parameters + --- + C_in: int + the number of input channels + C_out: int + the number of output channels + kernal_size: + size of the convolving kernel + padding: + zero-padding added to both sides of the input + dilation: int + spacing between kernel elements. + affine: bool + is using affine in BatchNorm + """ + + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine), + DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class FactorizedReduce(nn.Module): + """ + Reduce feature map size by factorized pointwise (stride=2). + """ + + def __init__(self, C_in, C_out, affine=True): + super().__init__() + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + x = self.relu(x) + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out diff --git a/nni/nas/pytorch/search_space_zoo/enas_cell.py b/nni/nas/pytorch/search_space_zoo/enas_cell.py new file mode 100644 index 0000000000000000000000000000000000000000..de57d55e23f83bb5a35cd4d3a474c71b8fc90fc4 --- /dev/null +++ b/nni/nas/pytorch/search_space_zoo/enas_cell.py @@ -0,0 +1,255 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch import mutables +from .enas_ops import FactorizedReduce, StdConv, SepConvBN, Pool, ConvBranch, PoolBranch + + +class Cell(nn.Module): + def __init__(self, cell_name, prev_labels, channels): + super().__init__() + self.input_choice = mutables.InputChoice(choose_from=prev_labels, n_chosen=1, return_mask=True, + key=cell_name + "_input") + self.op_choice = mutables.LayerChoice([ + SepConvBN(channels, channels, 3, 1), + SepConvBN(channels, channels, 5, 2), + Pool("avg", 3, 1, 1), + Pool("max", 3, 1, 1), + nn.Identity() + ], key=cell_name + "_op") + + def forward(self, prev_layers): + chosen_input, chosen_mask = self.input_choice(prev_layers) + cell_out = self.op_choice(chosen_input) + return cell_out, chosen_mask + + +class Node(mutables.MutableScope): + def __init__(self, node_name, prev_node_names, channels): + super().__init__(node_name) + self.cell_x = Cell(node_name + "_x", prev_node_names, channels) + self.cell_y = Cell(node_name + "_y", prev_node_names, channels) + + def forward(self, prev_layers): + out_x, mask_x = self.cell_x(prev_layers) + out_y, mask_y = self.cell_y(prev_layers) + return out_x + out_y, mask_x | mask_y + + +class Calibration(nn.Module): + def __init__(self, in_channels, out_channels): + super().__init__() + self.process = None + if in_channels != out_channels: + self.process = StdConv(in_channels, out_channels) + + def forward(self, x): + if self.process is None: + return x + return self.process(x) + + +class ENASMicroLayer(nn.Module): + """ + Builtin EnasMicroLayer. Micro search designs only one building block whose architecture is repeated + throughout the final architecture. A cell has ``num_nodes`` nodes and searches the topology and + operations among them in RL way. The first two nodes in a layer stand for the outputs from previous + previous layer and previous layer respectively. For the following nodes, the controller chooses + two previous nodes and applies two operations respectively for each node. Nodes that are not served + as input for any other node are viewed as the output of the layer. If there are multiple output nodes, + the model will calculate the average of these nodes as the layer output. Every node's output has ``out_channels`` + channels so the result of the layer has the same number of channels as each node. + + Parameters + --- + num_nodes: int + the number of nodes contained in this layer + in_channles_pp: int + the number of previous previous layer's output channels + in_channels_p: int + the number of previous layer's output channels + out_channels: int + output channels of this layer + reduction: bool + is reduction operation empolyed before this layer + """ + def __init__(self, num_nodes, in_channels_pp, in_channels_p, out_channels, reduction): + super().__init__() + self.reduction = reduction + if self.reduction: + self.reduce0 = FactorizedReduce(in_channels_pp, out_channels, affine=False) + self.reduce1 = FactorizedReduce(in_channels_p, out_channels, affine=False) + in_channels_pp = in_channels_p = out_channels + self.preproc0 = Calibration(in_channels_pp, out_channels) + self.preproc1 = Calibration(in_channels_p, out_channels) + + self.num_nodes = num_nodes + name_prefix = "reduce" if reduction else "normal" + self.nodes = nn.ModuleList() + node_labels = [mutables.InputChoice.NO_KEY, mutables.InputChoice.NO_KEY] + for i in range(num_nodes): + node_labels.append("{}_node_{}".format(name_prefix, i)) + self.nodes.append(Node(node_labels[-1], node_labels[:-1], out_channels)) + self.final_conv_w = nn.Parameter(torch.zeros(out_channels, self.num_nodes + 2, out_channels, 1, 1), + requires_grad=True) + self.bn = nn.BatchNorm2d(out_channels, affine=False) + self.reset_parameters() + + def reset_parameters(self): + nn.init.kaiming_normal_(self.final_conv_w) + + def forward(self, pprev, prev): + """ + Parameters + --- + pprev: torch.Tensor + the output of the previous previous layer + prev: torch.Tensor + the output of the previous layer + """ + if self.reduction: + pprev, prev = self.reduce0(pprev), self.reduce1(prev) + pprev_, prev_ = self.preproc0(pprev), self.preproc1(prev) + + prev_nodes_out = [pprev_, prev_] + nodes_used_mask = torch.zeros(self.num_nodes + 2, dtype=torch.bool, device=prev.device) + for i in range(self.num_nodes): + node_out, mask = self.nodes[i](prev_nodes_out) + nodes_used_mask[:mask.size(0)] |= mask.to(node_out.device) + prev_nodes_out.append(node_out) + + unused_nodes = torch.cat([out for used, out in zip(nodes_used_mask, prev_nodes_out) if not used], 1) + unused_nodes = F.relu(unused_nodes) + conv_weight = self.final_conv_w[:, ~nodes_used_mask, :, :, :] + conv_weight = conv_weight.view(conv_weight.size(0), -1, 1, 1) + out = F.conv2d(unused_nodes, conv_weight) + return prev, self.bn(out) + + +class ENASMacroLayer(mutables.MutableScope): + """ + Builtin ENAS Marco Layer. With search space changing to layer level, the controller decides + what operation is employed and the previous layer to connect to for skip connections. The model + is made up of the same layers but the choice of each layer may be different. + + Parameters + --- + key: str + the name of this layer + prev_labels: str + names of all previous layers + in_filters: int + the number of input channels + out_filters: + the number of output channels + """ + def __init__(self, key, prev_labels, in_filters, out_filters): + super().__init__(key) + self.in_filters = in_filters + self.out_filters = out_filters + self.mutable = mutables.LayerChoice([ + ConvBranch(in_filters, out_filters, 3, 1, 1, separable=False), + ConvBranch(in_filters, out_filters, 3, 1, 1, separable=True), + ConvBranch(in_filters, out_filters, 5, 1, 2, separable=False), + ConvBranch(in_filters, out_filters, 5, 1, 2, separable=True), + PoolBranch('avg', in_filters, out_filters, 3, 1, 1), + PoolBranch('max', in_filters, out_filters, 3, 1, 1) + ]) + if prev_labels: + self.skipconnect = mutables.InputChoice(choose_from=prev_labels, n_chosen=None) + else: + self.skipconnect = None + self.batch_norm = nn.BatchNorm2d(out_filters, affine=False) + + def forward(self, prev_list): + """ + Parameters + --- + prev_list: list + The cell selects the last element of the list as input and applies an operation on it. + The cell chooses none/one/multiple tensor(s) as SkipConnect(s) from the list excluding + the last element. + """ + out = self.mutable(prev_list[-1]) + if self.skipconnect is not None: + connection = self.skipconnect(prev_list[:-1]) + if connection is not None: + out += connection + return self.batch_norm(out) + + +class ENASMacroGeneralModel(nn.Module): + """ + The network is made up by stacking ENASMacroLayer. The Macro search space contains these layers. + Each layer chooses an operation from predefined ones and SkipConnect then forms a network. + + Parameters + --- + num_layers: int + The number of layers contained in the network. + out_filters: int + The number of each layer's output channels. + in_channel: int + The number of input's channels. + num_classes: int + The number of classes for classification. + dropout_rate: float + Dropout layer's dropout rate before the final dense layer. + """ + def __init__(self, num_layers=12, out_filters=24, in_channels=3, num_classes=10, + dropout_rate=0.0): + super().__init__() + self.num_layers = num_layers + self.num_classes = num_classes + self.out_filters = out_filters + + self.stem = nn.Sequential( + nn.Conv2d(in_channels, out_filters, 3, 1, 1, bias=False), + nn.BatchNorm2d(out_filters) + ) + + pool_distance = self.num_layers // 3 + self.pool_layers_idx = [pool_distance - 1, 2 * pool_distance - 1] + self.dropout_rate = dropout_rate + self.dropout = nn.Dropout(self.dropout_rate) + + self.layers = nn.ModuleList() + self.pool_layers = nn.ModuleList() + labels = [] + for layer_id in range(self.num_layers): + labels.append("layer_{}".format(layer_id)) + if layer_id in self.pool_layers_idx: + self.pool_layers.append(FactorizedReduce(self.out_filters, self.out_filters)) + self.layers.append(ENASMacroLayer(labels[-1], labels[:-1], self.out_filters, self.out_filters)) + + self.gap = nn.AdaptiveAvgPool2d(1) + self.dense = nn.Linear(self.out_filters, self.num_classes) + + def forward(self, x): + """ + Parameters + --- + x: torch.Tensor + the input of the network + """ + bs = x.size(0) + cur = self.stem(x) + + layers = [cur] + + for layer_id in range(self.num_layers): + cur = self.layers[layer_id](layers) + layers.append(cur) + if layer_id in self.pool_layers_idx: + for i, layer in enumerate(layers): + layers[i] = self.pool_layers[self.pool_layers_idx.index(layer_id)](layer) + cur = layers[-1] + + cur = self.gap(cur).view(bs, -1) + cur = self.dropout(cur) + logits = self.dense(cur) + return logits diff --git a/nni/nas/pytorch/search_space_zoo/enas_ops.py b/nni/nas/pytorch/search_space_zoo/enas_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..21ecc2da798dd025966a4631d59fb6f652e5da98 --- /dev/null +++ b/nni/nas/pytorch/search_space_zoo/enas_ops.py @@ -0,0 +1,171 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + + +class StdConv(nn.Module): + def __init__(self, C_in, C_out): + super(StdConv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=False), + nn.ReLU() + ) + + def forward(self, x): + return self.conv(x) + + +class PoolBranch(nn.Module): + """ + Pooling structure for Macro search. First pass through a 1x1 Conv, then pooling operation followed by BatchNorm2d. + + Parameters + --- + pool_type: str + only accept ``max`` for MaxPool and ``avg`` for AvgPool + C_in: int + the number of input channels + C_out: int + the number of output channels + kernal_size: int + size of the convolving kernel + stride: int + stride of the convolution + padding: int + zero-padding added to both sides of the input + """ + def __init__(self, pool_type, C_in, C_out, kernel_size, stride, padding, affine=False): + super().__init__() + self.preproc = StdConv(C_in, C_out) + self.pool = Pool(pool_type, kernel_size, stride, padding) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + out = self.preproc(x) + out = self.pool(out) + out = self.bn(out) + return out + + +class SeparableConv(nn.Module): + def __init__(self, C_in, C_out, kernel_size, stride, padding): + super(SeparableConv, self).__init__() + self.depthwise = nn.Conv2d(C_in, C_in, kernel_size=kernel_size, padding=padding, stride=stride, + groups=C_in, bias=False) + self.pointwise = nn.Conv2d(C_in, C_out, kernel_size=1, bias=False) + + def forward(self, x): + out = self.depthwise(x) + out = self.pointwise(out) + return out + + +class ConvBranch(nn.Module): + """ + Conv structure for Macro search. First pass through a 1x1 Conv, + then Conv operation with kernal_size equals 3 or 5 followed by BatchNorm and ReLU. + + Parameters + --- + C_in: int + the number of input channels + C_out: int + the number of output channels + kernal_size: int + size of the convolving kernel + stride: int + stride of the convolution + padding: int + zero-padding added to both sides of the input + separable: True + is separable Conv is used + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, separable): + super(ConvBranch, self).__init__() + self.preproc = StdConv(C_in, C_out) + if separable: + self.conv = SeparableConv(C_out, C_out, kernel_size, stride, padding) + else: + self.conv = nn.Conv2d(C_out, C_out, kernel_size, stride=stride, padding=padding) + self.postproc = nn.Sequential( + nn.BatchNorm2d(C_out, affine=False), + nn.ReLU() + ) + + def forward(self, x): + out = self.preproc(x) + out = self.conv(out) + out = self.postproc(out) + return out + + +class FactorizedReduce(nn.Module): + def __init__(self, C_in, C_out, affine=False): + super().__init__() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out + + +class Pool(nn.Module): + """ + Pooling structure + + Parameters + --- + pool_type: str + only accept ``max`` for MaxPool and ``avg`` for AvgPool + kernal_size: int + size of the convolving kernel + stride: int + stride of the convolution + padding: int + zero-padding added to both sides of the input + """ + def __init__(self, pool_type, kernel_size, stride, padding): + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + def forward(self, x): + return self.pool(x) + + +class SepConvBN(nn.Module): + """ + Implement SepConv followed by BatchNorm. The structure is ReLU ==> SepConv ==> BN. + + Parameters + --- + C_in: int + the number of imput channels + C_out: int + the number of output channels + kernal_size: int + size of the convolving kernel + padding: int + zero-padding added to both sides of the input + """ + def __init__(self, C_in, C_out, kernel_size, padding): + super().__init__() + self.relu = nn.ReLU() + self.conv = SeparableConv(C_in, C_out, kernel_size, 1, padding) + self.bn = nn.BatchNorm2d(C_out, affine=True) + + def forward(self, x): + x = self.relu(x) + x = self.conv(x) + x = self.bn(x) + return x diff --git a/nni/nas/pytorch/trainer.py b/nni/nas/pytorch/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..6a3881177a63befdf1c32f8b53edc88b4f8fd379 --- /dev/null +++ b/nni/nas/pytorch/trainer.py @@ -0,0 +1,194 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os +import time +from abc import abstractmethod + +import torch + +from .base_trainer import BaseTrainer + +_logger = logging.getLogger(__name__) + + +class TorchTensorEncoder(json.JSONEncoder): + def default(self, o): # pylint: disable=method-hidden + if isinstance(o, torch.Tensor): + olist = o.tolist() + if "bool" not in o.type().lower() and all(map(lambda d: d == 0 or d == 1, olist)): + _logger.warning("Every element in %s is either 0 or 1. " + "You might consider convert it into bool.", olist) + return olist + return super().default(o) + + +class Trainer(BaseTrainer): + """ + A trainer with some helper functions implemented. To implement a new trainer, + users need to implement :meth:`train_one_epoch`, :meth:`validate_one_epoch` and :meth:`checkpoint`. + + Parameters + ---------- + model : nn.Module + Model with mutables. + mutator : BaseMutator + A mutator object that has been initialized with the model. + loss : callable + Called with logits and targets. Returns a loss tensor. + See `PyTorch loss functions`_ for examples. + metrics : callable + Called with logits and targets. Returns a dict that maps metrics keys to metrics data. For example, + + .. code-block:: python + + def metrics_fn(output, target): + return {"acc1": accuracy(output, target, topk=1), "acc5": accuracy(output, target, topk=5)} + + optimizer : Optimizer + Optimizer that optimizes the model. + num_epochs : int + Number of epochs of training. + dataset_train : torch.utils.data.Dataset + Dataset of training. If not otherwise specified, ``dataset_train`` and ``dataset_valid`` should be standard + PyTorch Dataset. See `torch.utils.data`_ for examples. + dataset_valid : torch.utils.data.Dataset + Dataset of validation/testing. + batch_size : int + Batch size. + workers : int + Number of workers used in data preprocessing. + device : torch.device + Device object. Either ``torch.device("cuda")`` or ``torch.device("cpu")``. When ``None``, trainer will + automatic detects GPU and selects GPU first. + log_frequency : int + Number of mini-batches to log metrics. + callbacks : list of Callback + Callbacks to plug into the trainer. See Callbacks. + + + .. _`PyTorch loss functions`: https://pytorch.org/docs/stable/nn.html#loss-functions + .. _`torch.utils.data`: https://pytorch.org/docs/stable/data.html + """ + def __init__(self, model, mutator, loss, metrics, optimizer, num_epochs, + dataset_train, dataset_valid, batch_size, workers, device, log_frequency, callbacks): + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device + self.model = model + self.mutator = mutator + self.loss = loss + + self.metrics = metrics + self.optimizer = optimizer + + self.model.to(self.device) + self.mutator.to(self.device) + self.loss.to(self.device) + + self.num_epochs = num_epochs + self.dataset_train = dataset_train + self.dataset_valid = dataset_valid + self.batch_size = batch_size + self.workers = workers + self.log_frequency = log_frequency + self.log_dir = os.path.join("logs", str(time.time())) + os.makedirs(self.log_dir, exist_ok=True) + self.status_writer = open(os.path.join(self.log_dir, "log"), "w") + self.callbacks = callbacks if callbacks is not None else [] + for callback in self.callbacks: + callback.build(self.model, self.mutator, self) + + @abstractmethod + def train_one_epoch(self, epoch): + """ + Train one epoch. + + Parameters + ---------- + epoch : int + Epoch number starting from 0. + """ + pass + + @abstractmethod + def validate_one_epoch(self, epoch): + """ + Validate one epoch. + + Parameters + ---------- + epoch : int + Epoch number starting from 0. + """ + pass + + def train(self, validate=True): + """ + Train ``num_epochs``. + Trigger callbacks at the start and the end of each epoch. + + Parameters + ---------- + validate : bool + If ``true``, will do validation every epoch. + """ + for epoch in range(self.num_epochs): + for callback in self.callbacks: + callback.on_epoch_begin(epoch) + + # training + _logger.info("Epoch %d Training", epoch + 1) + self.train_one_epoch(epoch) + + if validate: + # validation + _logger.info("Epoch %d Validating", epoch + 1) + self.validate_one_epoch(epoch) + + for callback in self.callbacks: + callback.on_epoch_end(epoch) + + def validate(self): + """ + Do one validation. + """ + self.validate_one_epoch(-1) + + def export(self, file): + """ + Call ``mutator.export()`` and dump the architecture to ``file``. + + Parameters + ---------- + file : str + A file path. Expected to be a JSON. + """ + mutator_export = self.mutator.export() + with open(file, "w") as f: + json.dump(mutator_export, f, indent=2, sort_keys=True, cls=TorchTensorEncoder) + + def checkpoint(self): + """ + Return trainer checkpoint. + """ + raise NotImplementedError("Not implemented yet") + + def enable_visualization(self): + """ + Enable visualization. Write graph and training log to folder ``logs/``. + """ + sample = None + for x, _ in self.train_loader: + sample = x.to(self.device)[:2] + break + if sample is None: + _logger.warning("Sample is %s.", sample) + _logger.info("Creating graph json, writing to %s. Visualization enabled.", self.log_dir) + with open(os.path.join(self.log_dir, "graph.json"), "w") as f: + json.dump(self.mutator.graph(sample), f) + self.visualization_enabled = True + + def _write_graph_status(self): + if hasattr(self, "visualization_enabled") and self.visualization_enabled: + print(json.dumps(self.mutator.status()), file=self.status_writer, flush=True) diff --git a/nni/nas/pytorch/utils.py b/nni/nas/pytorch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f5aabfb74e5c1d2350f579233ae36fdadb2313 --- /dev/null +++ b/nni/nas/pytorch/utils.py @@ -0,0 +1,210 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from collections import OrderedDict + +import numpy as np +import torch + +_counter = 0 + +_logger = logging.getLogger(__name__) + + +def global_mutable_counting(): + """ + A program level counter starting from 1. + """ + global _counter + _counter += 1 + return _counter + + +def _reset_global_mutable_counting(): + """ + Reset the global mutable counting to count from 1. Useful when defining multiple models with default keys. + """ + global _counter + _counter = 0 + + +def to_device(obj, device): + """ + Move a tensor, tuple, list, or dict onto device. + """ + if torch.is_tensor(obj): + return obj.to(device) + if isinstance(obj, tuple): + return tuple(to_device(t, device) for t in obj) + if isinstance(obj, list): + return [to_device(t, device) for t in obj] + if isinstance(obj, dict): + return {k: to_device(v, device) for k, v in obj.items()} + if isinstance(obj, (int, float, str)): + return obj + raise ValueError("'%s' has unsupported type '%s'" % (obj, type(obj))) + + +def to_list(arr): + if torch.is_tensor(arr): + return arr.cpu().numpy().tolist() + if isinstance(arr, np.ndarray): + return arr.tolist() + if isinstance(arr, (list, tuple)): + return list(arr) + return arr + + +class AverageMeterGroup: + """ + Average meter group for multiple average meters. + """ + + def __init__(self): + self.meters = OrderedDict() + + def update(self, data): + """ + Update the meter group with a dict of metrics. + Non-exist average meters will be automatically created. + """ + for k, v in data.items(): + if k not in self.meters: + self.meters[k] = AverageMeter(k, ":4f") + self.meters[k].update(v) + + def __getattr__(self, item): + return self.meters[item] + + def __getitem__(self, item): + return self.meters[item] + + def __str__(self): + return " ".join(str(v) for v in self.meters.values()) + + def summary(self): + """ + Return a summary string of group data. + """ + return " ".join(v.summary() for v in self.meters.values()) + + +class AverageMeter: + """ + Computes and stores the average and current value. + + Parameters + ---------- + name : str + Name to display. + fmt : str + Format string to print the values. + """ + + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + """ + Reset the meter. + """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ + Update with value and weight. + + Parameters + ---------- + val : float or int + The new value to be accounted in. + n : int + The weight of the new value. + """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + def summary(self): + fmtstr = '{name}: {avg' + self.fmt + '}' + return fmtstr.format(**self.__dict__) + + +class StructuredMutableTreeNode: + """ + A structured representation of a search space. + A search space comes with a root (with `None` stored in its `mutable`), and a bunch of children in its `children`. + This tree can be seen as a "flattened" version of the module tree. Since nested mutable entity is not supported yet, + the following must be true: each subtree corresponds to a ``MutableScope`` and each leaf corresponds to a + ``Mutable`` (other than ``MutableScope``). + + Parameters + ---------- + mutable : nni.nas.pytorch.mutables.Mutable + The mutable that current node is linked with. + """ + + def __init__(self, mutable): + self.mutable = mutable + self.children = [] + + def add_child(self, mutable): + """ + Add a tree node to the children list of current node. + """ + self.children.append(StructuredMutableTreeNode(mutable)) + return self.children[-1] + + def type(self): + """ + Return the ``type`` of mutable content. + """ + return type(self.mutable) + + def __iter__(self): + return self.traverse() + + def traverse(self, order="pre", deduplicate=True, memo=None): + """ + Return a generator that generates a list of mutables in this tree. + + Parameters + ---------- + order : str + pre or post. If pre, current mutable is yield before children. Otherwise after. + deduplicate : bool + If true, mutables with the same key will not appear after the first appearance. + memo : dict + An auxiliary dict that memorize keys seen before, so that deduplication is possible. + + Returns + ------- + generator of Mutable + """ + if memo is None: + memo = set() + assert order in ["pre", "post"] + if order == "pre": + if self.mutable is not None: + if not deduplicate or self.mutable.key not in memo: + memo.add(self.mutable.key) + yield self.mutable + for child in self.children: + for m in child.traverse(order=order, deduplicate=deduplicate, memo=memo): + yield m + if order == "post": + if self.mutable is not None: + if not deduplicate or self.mutable.key not in memo: + memo.add(self.mutable.key) + yield self.mutable diff --git a/nni/nas/tensorflow/__init__.py b/nni/nas/tensorflow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/nas/tensorflow/base_mutator.py b/nni/nas/tensorflow/base_mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..860680f199278d3fd38910b82e7661b17d2f652e --- /dev/null +++ b/nni/nas/tensorflow/base_mutator.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from tensorflow.keras import Model + +from .mutables import Mutable, MutableScope, InputChoice +from .utils import StructuredMutableTreeNode + + +class BaseMutator(Model): + def __init__(self, model): + super().__init__() + self.__dict__['model'] = model + self._structured_mutables = self._parse_search_space(self.model) + + def _parse_search_space(self, module, root=None, prefix='', memo=None, nested_detection=None): + if memo is None: + memo = set() + if root is None: + root = StructuredMutableTreeNode(None) + if module not in memo: + memo.add(module) + if isinstance(module, Mutable): + if nested_detection is not None: + raise RuntimeError('Cannot have nested search space. Error at {} in {}' + .format(module, nested_detection)) + module.name = prefix + module.set_mutator(self) + root = root.add_child(module) + if not isinstance(module, MutableScope): + nested_detection = module + if isinstance(module, InputChoice): + for k in module.choose_from: + if k != InputChoice.NO_KEY and k not in [m.key for m in memo if isinstance(m, Mutable)]: + raise RuntimeError('"{}" required by "{}" not found in keys that appeared before, and is not NO_KEY.' + .format(k, module.key)) + for submodule in module.layers: + if not isinstance(submodule, Model): + continue + submodule_prefix = prefix + ('.' if prefix else '') + submodule.name + self._parse_search_space(submodule, root, submodule_prefix, memo=memo, nested_detection=nested_detection) + return root + + @property + def mutables(self): + return self._structured_mutables + + def undedup_mutables(self): + return self._structured_mutables.traverse(deduplicate=False) + + def call(self, *inputs): + raise RuntimeError('Call is undefined for mutators.') + + def __setattr__(self, name, value): + if name == 'model': + raise AttributeError("Attribute `model` can be set at most once, and you shouldn't use `self.model = model` to " + "include your network, as it will include all parameters in model into the mutator.") + return super().__setattr__(name, value) + + def enter_mutable_scope(self, mutable_scope): + pass + + def exit_mutable_scope(self, mutable_scope): + pass + + def on_forward_layer_choice(self, mutable, *inputs): + raise NotImplementedError + + def on_forward_input_choice(self, mutable, tensor_list): + raise NotImplementedError + + def export(self): + raise NotImplementedError diff --git a/nni/nas/tensorflow/mutables.py b/nni/nas/tensorflow/mutables.py new file mode 100644 index 0000000000000000000000000000000000000000..06183a34c1872804f87887567692fcab6a732816 --- /dev/null +++ b/nni/nas/tensorflow/mutables.py @@ -0,0 +1,144 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from collections import OrderedDict + +from tensorflow.keras import Model + +from .utils import global_mutable_counting + + +_logger = logging.getLogger(__name__) + + +class Mutable(Model): + def __init__(self, key=None): + super().__init__() + if key is None: + self._key = '{}_{}'.format(type(self).__name__, global_mutable_counting()) + elif isinstance(key, str): + self._key = key + else: + self._key = str(key) + _logger.warning('Key "%s" is not string, converted to string.', key) + self.init_hook = None + self.forward_hook = None + + def __deepcopy__(self, memodict=None): + raise NotImplementedError("Deep copy doesn't work for mutables.") + + def set_mutator(self, mutator): + if hasattr(self, 'mutator'): + raise RuntimeError('`set_mutator is called more than once. ' + 'Did you parse the search space multiple times? ' + 'Or did you apply multiple fixed architectures?') + self.mutator = mutator + + def call(self, *inputs): + raise NotImplementedError('Method `call` of Mutable must be overridden') + + def build(self, input_shape): + self._check_built() + + @property + def key(self): + return self._key + + @property + def name(self): + return self._name if hasattr(self, '_name') else self._key + + @name.setter + def name(self, name): + self._name = name + + def _check_built(self): + if not hasattr(self, 'mutator'): + raise ValueError( + "Mutator not set for {}. You might have forgotten to initialize and apply your mutator. " + "Or did you initialize a mutable on the fly in forward pass? Move to `__init__` " + "so that trainer can locate all your mutables. See NNI docs for more details.".format(self)) + + def __repr__(self): + return '{} ({})'.format(self.name, self.key) + + +class MutableScope(Mutable): + def __call__(self, *args, **kwargs): + try: + self.mutator.enter_mutable_scope(self) + return super().__call__(*args, **kwargs) + finally: + self.mutator.exit_mutable_scope(self) + + +class LayerChoice(Mutable): + def __init__(self, op_candidates, reduction='sum', return_mask=False, key=None): + super().__init__(key=key) + self.names = [] + if isinstance(op_candidates, OrderedDict): + for name in op_candidates: + assert name not in ["length", "reduction", "return_mask", "_key", "key", "names"], \ + "Please don't use a reserved name '{}' for your module.".format(name) + self.names.append(name) + elif isinstance(op_candidates, list): + for i, _ in enumerate(op_candidates): + self.names.append(str(i)) + else: + raise TypeError("Unsupported op_candidates type: {}".format(type(op_candidates))) + + self.length = len(op_candidates) + self.choices = op_candidates + self.reduction = reduction + self.return_mask = return_mask + + def call(self, *inputs): + out, mask = self.mutator.on_forward_layer_choice(self, *inputs) + if self.return_mask: + return out, mask + return out + + def build(self, input_shape): + self._check_built() + for op in self.choices: + op.build(input_shape) + + def __len__(self): + return len(self.choices) + + +class InputChoice(Mutable): + NO_KEY = '' + + def __init__(self, n_candidates=None, choose_from=None, n_chosen=None, reduction='sum', return_mask=False, key=None): + super().__init__(key=key) + assert n_candidates is not None or choose_from is not None, \ + 'At least one of `n_candidates` and `choose_from` must be not None.' + if choose_from is not None and n_candidates is None: + n_candidates = len(choose_from) + elif choose_from is None and n_candidates is not None: + choose_from = [self.NO_KEY] * n_candidates + assert n_candidates == len(choose_from), 'Number of candidates must be equal to the length of `choose_from`.' + assert n_candidates > 0, 'Number of candidates must be greater than 0.' + assert n_chosen is None or 0 <= n_chosen <= n_candidates, \ + 'Expected selected number must be None or no more than number of candidates.' + + self.n_candidates = n_candidates + self.choose_from = choose_from.copy() + self.n_chosen = n_chosen + self.reduction = reduction + self.return_mask = return_mask + + def call(self, optional_inputs): + optional_input_list = optional_inputs + if isinstance(optional_inputs, dict): + optional_input_list = [optional_inputs[tag] for tag in self.choose_from] + assert isinstance(optional_input_list, list), \ + 'Optional input list must be a list, not a {}.'.format(type(optional_input_list)) + assert len(optional_inputs) == self.n_candidates, \ + 'Length of the input list must be equal to number of candidates.' + out, mask = self.mutator.on_forward_input_choice(self, optional_input_list) + if self.return_mask: + return out, mask + return out diff --git a/nni/nas/tensorflow/mutator.py b/nni/nas/tensorflow/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..b0d2aed684e289b556ccc1388b95477d55c5c2da --- /dev/null +++ b/nni/nas/tensorflow/mutator.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import tensorflow as tf + +from .base_mutator import BaseMutator + + +_logger = logging.getLogger(__name__) + + +class Mutator(BaseMutator): + def __init__(self, model): + super().__init__(model) + self._cache = {} + + def sample_search(self): + raise NotImplementedError('Method `sample_search` must be overridden') + + def sample_final(self): + raise NotImplementedError('Method `sample_final` must be overriden for exporting') + + def reset(self): + self._cache = self.sample_search() + + def export(self): + return self.sample_final() + + # TODO: status + # TODO: graph + + def on_forward_layer_choice(self, mutable, *inputs): + mask = self._get_decision(mutable) + assert len(mask) == len(mutable), \ + 'Invalid mask, expected {} to be of length {}.'.format(mask, len(mutable)) + out = self._select_with_mask(lambda choice: choice(*inputs), mutable.choices, mask) + return self._tensor_reduction(mutable.reduction, out), mask + + def on_forward_input_choice(self, mutable, tensor_list): + mask = self._get_decision(mutable) + assert len(mask) == mutable.n_candidates, \ + 'Invalid mask, expected {} to be of length {}.'.format(mask, mutable.n_candidates) + out = self._select_with_mask(lambda tensor: tensor, tensor_list, mask) + return self._tensor_reduction(mutable.reduction, out), mask + + def _select_with_mask(self, map_fn, candidates, mask): + if mask.dtype.is_bool: + out = [map_fn(cand) for cand, m in zip(candidates, mask) if m] + elif mask.dtype.is_floating: + out = [map_fn(cand) * m for cand, m in zip(candidates, mask) if m] + else: + raise ValueError('Unrecognized mask, dtype is {}'.format(mask.dtype.name)) + return out + + def _tensor_reduction(self, reduction_type, tensor_list): + if reduction_type == 'none': + return tensor_list + if not tensor_list: + return None + if len(tensor_list) == 1: + return tensor_list[0] + if reduction_type == 'sum': + return sum(tensor_list) + if reduction_type == 'mean': + return sum(tensor_list) / len(tensor_list) + if reduction_type == 'concat': + image_data_format = tf.keras.backend.image_data_format() + if image_data_format == "channels_first": + axis = 0 + else: + axis = -1 + return tf.concat(tensor_list, axis=axis) # pylint: disable=E1120,E1123 + # pylint issue #3613 + raise ValueError('Unrecognized reduction policy: "{}'.format(reduction_type)) + + def _get_decision(self, mutable): + if mutable.key not in self._cache: + raise ValueError('"{}" not found in decision cache.'.format(mutable.key)) + result = self._cache[mutable.key] + _logger.debug('Decision %s: %s', mutable.key, result) + return result diff --git a/nni/nas/tensorflow/utils.py b/nni/nas/tensorflow/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0cfc6e815d973774a543e23078692189bfbb90d0 --- /dev/null +++ b/nni/nas/tensorflow/utils.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import tensorflow as tf + +_counter = 0 + +def global_mutable_counting(): + global _counter + _counter += 1 + return _counter + + +class AverageMeter: + def __init__(self, name): + self.name = name + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val): + self.val = val + self.sum += val + self.count += 1 + self.avg = self.sum / self.count + + def __str__(self): + return '{name} {val:4f} ({avg:4f})'.format(**self.__dict__) + + def summary(self): + return '{name}: {avg:4f}'.format(**self.__dict__) + + +class AverageMeterGroup: + def __init__(self): + self.meters = {} + + def update(self, data): + for k, v in data.items(): + if k not in self.meters: + self.meters[k] = AverageMeter(k) + self.meters[k].update(v) + + def __str__(self): + return ' '.join(str(v) for v in self.meters.values()) + + def summary(self): + return ' '.join(v.summary() for v in self.meters.values()) + + +class StructuredMutableTreeNode: + def __init__(self, mutable): + self.mutable = mutable + self.children = [] + + def add_child(self, mutable): + self.children.append(StructuredMutableTreeNode(mutable)) + return self.children[-1] + + def type(self): + return type(self.mutable) + + def __iter__(self): + return self.traverse() + + def traverse(self, order="pre", deduplicate=True, memo=None): + if memo is None: + memo = set() + assert order in ["pre", "post"] + if order == "pre": + if self.mutable is not None: + if not deduplicate or self.mutable.key not in memo: + memo.add(self.mutable.key) + yield self.mutable + for child in self.children: + for m in child.traverse(order=order, deduplicate=deduplicate, memo=memo): + yield m + if order == "post": + if self.mutable is not None: + if not deduplicate or self.mutable.key not in memo: + memo.add(self.mutable.key) + yield self.mutable + + +def fill_zero_grads(grads, weights): + ret = [] + for grad, weight in zip(grads, weights): + if grad is not None: + ret.append(grad) + else: + ret.append(tf.zeros_like(weight)) + return ret diff --git a/nni/parameter_expressions.py b/nni/parameter_expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..adff923f7ea1cb4debe114c22376522adb8d8a6d --- /dev/null +++ b/nni/parameter_expressions.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +''' +parameter_expression.py +''' + +import numpy as np + + +def choice(options, random_state): + ''' + options: 1-D array-like or int + random_state: an object of numpy.random.RandomState + ''' + return random_state.choice(options) + + +def randint(lower, upper, random_state): + ''' + Generate a random integer from `lower` (inclusive) to `upper` (exclusive). + lower: an int that represent an lower bound + upper: an int that represent an upper bound + random_state: an object of numpy.random.RandomState + ''' + return random_state.randint(lower, upper) + + +def uniform(low, high, random_state): + ''' + low: an float that represent an lower bound + high: an float that represent an upper bound + random_state: an object of numpy.random.RandomState + ''' + assert high >= low, 'Upper bound must be larger than lower bound' + return random_state.uniform(low, high) + + +def quniform(low, high, q, random_state): + ''' + low: an float that represent an lower bound + high: an float that represent an upper bound + q: sample step + random_state: an object of numpy.random.RandomState + ''' + return np.clip(np.round(uniform(low, high, random_state) / q) * q, low, high) + + +def loguniform(low, high, random_state): + ''' + low: an float that represent an lower bound + high: an float that represent an upper bound + random_state: an object of numpy.random.RandomState + ''' + assert low > 0, 'Lower bound must be positive' + return np.exp(uniform(np.log(low), np.log(high), random_state)) + + +def qloguniform(low, high, q, random_state): + ''' + low: an float that represent an lower bound + high: an float that represent an upper bound + q: sample step + random_state: an object of numpy.random.RandomState + ''' + return np.clip(np.round(loguniform(low, high, random_state) / q) * q, low, high) + + +def normal(mu, sigma, random_state): + ''' + The probability density function of the normal distribution, + first derived by De Moivre and 200 years later by both Gauss and Laplace independently. + mu: float or array_like of floats + Mean (“centre”) of the distribution. + sigma: float or array_like of floats + Standard deviation (spread or “width”) of the distribution. + random_state: an object of numpy.random.RandomState + ''' + return random_state.normal(mu, sigma) + + +def qnormal(mu, sigma, q, random_state): + ''' + mu: float or array_like of floats + sigma: float or array_like of floats + q: sample step + random_state: an object of numpy.random.RandomState + ''' + return np.round(normal(mu, sigma, random_state) / q) * q + + +def lognormal(mu, sigma, random_state): + ''' + mu: float or array_like of floats + sigma: float or array_like of floats + random_state: an object of numpy.random.RandomState + ''' + return np.exp(normal(mu, sigma, random_state)) + + +def qlognormal(mu, sigma, q, random_state): + ''' + mu: float or array_like of floats + sigma: float or array_like of floats + q: sample step + random_state: an object of numpy.random.RandomState + ''' + return np.round(lognormal(mu, sigma, random_state) / q) * q diff --git a/nni/recoverable.py b/nni/recoverable.py new file mode 100644 index 0000000000000000000000000000000000000000..70f11e634dc74886c0d379ca7fc86202654bf6ad --- /dev/null +++ b/nni/recoverable.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os + +class Recoverable: + + def load_checkpoint(self): + pass + + def save_checkpoint(self): + pass + + def get_checkpoint_path(self): + ckp_path = os.getenv('NNI_CHECKPOINT_DIRECTORY') + if ckp_path is not None and os.path.isdir(ckp_path): + return ckp_path + return None diff --git a/nni/retiarii/__init__.py b/nni/retiarii/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..866b7515632fd9ec7c1fa35dc701928d124020f3 --- /dev/null +++ b/nni/retiarii/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .operation import Operation +from .graph import * +from .execution import * +from .fixed import fixed_arch +from .mutator import * +from .serializer import basic_unit, model_wrapper, serialize, serialize_cls diff --git a/nni/retiarii/codegen/__init__.py b/nni/retiarii/codegen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52f3abc6366f6a48339afa654948af0e651e3287 --- /dev/null +++ b/nni/retiarii/codegen/__init__.py @@ -0,0 +1 @@ +from .pytorch import model_to_pytorch_script diff --git a/nni/retiarii/codegen/pytorch.py b/nni/retiarii/codegen/pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..ef9b294db9883703305cd420505f08e034cd5e94 --- /dev/null +++ b/nni/retiarii/codegen/pytorch.py @@ -0,0 +1,212 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from typing import Dict, List, Tuple, Any + +from nni.retiarii.operation_def.torch_op_def import ToDevice +from nni.common.device import Device, GPUDevice + +from ..graph import IllegalGraphError, Edge, Graph, Node, Model + +_logger = logging.getLogger(__name__) + + +def model_to_pytorch_script(model: Model, placement=None) -> str: + graphs = [] + total_pkgs = set() + for name, cell in model.graphs.items(): + import_pkgs, graph_code = graph_to_pytorch_model(name, cell, placement=placement) + graphs.append(graph_code) + total_pkgs.update(import_pkgs) + pkgs_code = '\n'.join(['import {}'.format(pkg) for pkg in total_pkgs]) + return _PyTorchScriptTemplate.format(pkgs_code, '\n\n'.join(graphs)).strip() + + +def _sorted_incoming_edges(node: Node) -> List[Edge]: + edges = [edge for edge in node.graph.edges if edge.tail is node] + _logger.debug('sorted_incoming_edges: %s', str(edges)) + if not edges: + return [] + _logger.debug('all tail_slots are None: %s', str([edge.tail_slot for edge in edges])) + if all(edge.tail_slot is None for edge in edges): + return edges + if all(isinstance(edge.tail_slot, int) for edge in edges): + edges = sorted(edges, key=(lambda edge: edge.tail_slot)) + if [edge.tail_slot for edge in edges] == list(range(len(edges))): + return edges + raise IllegalGraphError(node.graph, 'Node {} has bad inputs'.format(node.name)) + + +def _format_inputs(node: Node) -> Tuple[List[str], List[Any]]: + """ + Format the inputs of a given node + + Parameters + ---------- + node : Node + a graph node, get and format its inputs + + Returns + ------- + list + the list of input names + list + the list of input values, if an input is simple type, record its value, + otherwise the value is None + """ + edges = _sorted_incoming_edges(node) + inputs = [] + inputs_value = [] + for edge in edges: + if edge.head.name == '_inputs': + assert isinstance(edge.head_slot, int) + if edge.head.operation.io_names is not None: + # when input has names, e.g., forward(self, tensor1, tensor2, another_one) + inputs.append(edge.head.operation.io_names[edge.head_slot]) + else: + # when input has no name, e.g., forward(*_inputs) + inputs.append('_inputs[{}]'.format(edge.head_slot)) + inputs_value.append(None) + else: + if edge.head_slot is None: + # when the input comes from a single-output operator + inputs.append('{}'.format(edge.head.name)) + if edge.head.operation.type in ('prim::Constant', 'prim::GetAttr') and \ + 'value' in edge.head.operation.parameters: + inputs_value.append(edge.head.operation.parameters['value']) + else: + inputs_value.append(None) + else: + # when the input comes from a multi-output operator: needs to know which one it comes from + inputs.append('{}[{}]'.format(edge.head.name, edge.head_slot)) + inputs_value.append(None) + return inputs, inputs_value + + +def _remove_prefix(names, graph_name): + """ + variables name (full name space) is too long, + shorten the name by removing the prefix ```graph_name``` + """ + if isinstance(names, list): + converted_names = [] + for name in names: + if name.startswith(graph_name): + converted_names.append(name[len(graph_name):]) + else: + converted_names.append(name) + return converted_names + else: + return names[len(graph_name):] if names.startswith(graph_name) else names + + +def generate_cuda_mapping(placement: Dict[Node, Device]) -> Dict[Device, int]: + ''' + Since HIP_VISIBLE_DEVICES will be set to the list of real GPU ID, + we need to remap the GPU ID when generating code to match them correctly. + For example, when HIP_VISIBLE_DEVICES="0,3", we need to use "cuda:0", "cuda:1" in the generated code. + ''' + unique_devices = sorted(list(set([e for e in placement.values() if isinstance(e, GPUDevice)]))) + node_gpu_cnt = {} + cuda_remapped_id = {} + for d in unique_devices: + if d.node_id not in node_gpu_cnt: + node_gpu_cnt[d.node_id] = 0 + node_gpu_cnt[d.node_id] += 1 + cuda_remapped_id[d] = node_gpu_cnt[d.node_id] - 1 + + return cuda_remapped_id + + +def graph_to_pytorch_model(graph_name: str, graph: Graph, placement=None) -> str: + nodes = graph.topo_sort() + + # handle module node and function node differently + # only need to generate code for module here + import_pkgs = set() + node_codes = [] + cuda_remapped_id = None + if placement: + cuda_remapped_id = generate_cuda_mapping(placement) + for node in nodes: + if node.operation: + if placement and isinstance(node.operation, ToDevice): + node.operation.override_device_repr("cuda:%d" % cuda_remapped_id[node.operation.device]) + + if node.operation.type == 'shared': + continue + pkg_name = node.operation.get_import_pkg() + if pkg_name is not None: + import_pkgs.add(pkg_name) + node_code = node.operation.to_init_code(_remove_prefix(node.name, graph_name)) + if node_code is not None: + if placement and node in placement and len(node_code) > 0: + if isinstance(placement[node], GPUDevice): + device_repr = "cuda:%d" % cuda_remapped_id[placement[node]] + else: + device_repr = placement[node].device_repr() + node_codes.append(f"{node_code}.to('{device_repr}')") + else: + node_codes.append(node_code) + + if graph.input_node.operation.io_names is None: + input_code = '*_inputs' + else: + for name in graph.input_node.operation.io_names: + assert not name.startswith(graph_name) + input_code = ', '.join(graph.input_node.operation.io_names) + + edge_codes = [] + sorted_nodes = graph.topo_sort() + for node in sorted_nodes: + if node.operation: + inputs, inputs_value = _format_inputs(node) + inputs = _remove_prefix(inputs, graph_name) + node_name = _remove_prefix(node.name, graph_name) + submodule_name = node_name + if node.operation.type == 'shared': + submodule_name = _remove_prefix(node.operation.parameters['reference'], graph_name) + edge_codes.append(node.operation.to_forward_code(submodule_name, node_name, inputs, inputs_value)) + + output_names, _ = _format_inputs(graph.output_node) + output_names = _remove_prefix(output_names, graph_name) + if not output_names: + raise RuntimeError('"forward" function should have return value(s): {}, {}, {}'.format(output_names, graph_name, graph.output_node)) + output_code = ', '.join(output_names) + + linebreak = '\n ' + return import_pkgs, _PyTorchModelTemplate.format( + graph_name=('Graph' if graph_name == '_graph' else graph_name), + inputs=input_code, + outputs=output_code, + nodes=linebreak.join(node_codes), + edges=linebreak.join(edge_codes) + ) + + +# TODO: handle imports + +_PyTorchScriptTemplate = ''' +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + +import nni.retiarii.nn.pytorch + +{} + +{} +''' + +_PyTorchModelTemplate = ''' +class {graph_name}(nn.Module): + def __init__(self): + super().__init__() + {nodes} + + def forward(self, {inputs}): + {edges} + return {outputs} +''' diff --git a/nni/retiarii/codegen/tensorflow.py b/nni/retiarii/codegen/tensorflow.py new file mode 100644 index 0000000000000000000000000000000000000000..ac0e0d7003599560a0222b3675cc0bc7374a4694 --- /dev/null +++ b/nni/retiarii/codegen/tensorflow.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +# pylint: skip-file + +""" +FIXME +This file is inherited from last version. + +I expect it can work with a few modifications to incorporate with the latest API, but it hasn't +been tested and I'm not sure. +""" + +from ..graph_v2 import IllegalGraphError, Cell, Edge, Graph, Node +from ..operations_tf import Operation +from ..type_utils import * + + +def graph_to_tensorflow_script(graph: Graph) -> str: + graphs = [graph_to_tensorflow_model(name, cell) for name, cell in graph.cell_templates.items()] + return _TensorFlowScriptTemplate.format('\n\n'.join(graphs)).strip() + + +def _sort_incoming_edges(node: Node) -> List[Edge]: + edges = [edge for edge in node.graph.edges if edge.tail is node] + if not edges: + return [] + if all(edge.tail_idx is None for edge in edges): + return edges + if all(isinstance(edge.tail_idx, int) for edge in edges): + edges = sorted(edges, key=(lambda edge: edge.tail_idx)) + if [edge.tail_idx for edge in edges] == list(range(len(edges))): + return edges + raise IllegalGraphError(node.graph, 'Node {} has bad inputs'.format(node.name)) + +def _format_inputs(node: Node) -> str: + edges = _sort_incoming_edges(node) + inputs = [] + for edge in edges: + if edge.head.name == '_inputs': + assert isinstance(edge.head_idx, int) + if node.graph.input_names is not None: + inputs.append(node.graph.input_names[edge.head_idx]) + else: + inputs.append('_inputs[{}]'.format(edge.head_idx)) + else: + if edge.head_idx is None: + inputs.append('{}'.format(edge.head.name)) + else: + inputs.append('{}[{}]'.format(edge.head.name, edge.head_idx)) + return ', '.join(inputs) + + +def graph_to_tensorflow_model(graph_name: str, graph: Graph) -> str: + nodes = graph.topo_sort() + + # handle module node and function node differently + # only need to generate code for module here + node_codes = [] + for node in nodes: + if isinstance(node, Cell): + node_codes.append('self.{} = {}()'.format(node.name, node.template_name)) + else: + node_codes.append('self.{} = {}'.format(node.name, cast(Operation, node.operation).to_tensorflow_init())) + + edge_codes = [] + + for node in nodes: + inputs = _format_inputs(node) + edge_codes.append('{} = self.{}({})'.format(node.name, node.name, inputs)) + + output_code = _format_inputs(graph.output_node) + if not output_code: + output_code = 'None' + + if graph.input_names is None: + input_code = '*_inputs' + else: + input_code = ', '.join(graph.input_names) + + linebreak = '\n ' + return _TensorFlowModelTemplate.format( + graph_name=('Graph' if graph_name == '_graph' else graph_name), + inputs=input_code, + outputs=output_code, + nodes=linebreak.join(node_codes), + edges=linebreak.join(edge_codes) + ) + + +_TensorFlowScriptTemplate = ''' +import tensorflow as tf +import tensorflow.keras as K + +import sdk.custom_ops_tf as CUSTOM + +{} +''' + +_TensorFlowModelTemplate = ''' +class {graph_name}(K.Model): + def __init__(self): + super().__init__() + {nodes} + + def call(self, {inputs}): + {edges} + return {outputs} +''' \ No newline at end of file diff --git a/nni/retiarii/converter/README.md b/nni/retiarii/converter/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d0f19066b1ac0ed367cd5d8bd488984288f08315 --- /dev/null +++ b/nni/retiarii/converter/README.md @@ -0,0 +1,37 @@ +# PyTorch Graph Converter + +## Namespace for PyTorch Graph + +We should have a concrete rule for specifying nodes in graph with namespace. + +Each node has a name, either specified or generated. The nodes in the same hierarchy cannot have the same name. + +* The name of module node natively follows this rule, because we use variable name for instantiated modules like what PyTorch graph does. + +* For the nodes created in `forward` function, we use a global sequence number. + +### Namespace for mutated (new) nodes + +TBD + +## Graph Simplification + +TBD + +## Node Types + +We define concrete type string for each node type. + +## Module's Input Arguments + +We use wrapper to obtain the input arguments of modules. Users need to use our wrapped "nn" and wrapped "Module". + +## Control Flow + +### for loop + +Currently, we only support `ModuleList` (`ModuleDict`) based for loop, which is automatically unfolded by TorchScript. That is to say, we do not support loop in TorchScript for now. + +### if/else + +For now, we only deal with the case that the condition is constant or attribute. In this case, only one branch is kept during generating the graph. \ No newline at end of file diff --git a/nni/retiarii/converter/__init__.py b/nni/retiarii/converter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e0fff09f2da1af2f3904ae9ab40939498030a295 --- /dev/null +++ b/nni/retiarii/converter/__init__.py @@ -0,0 +1 @@ +from .graph_gen import convert_to_graph diff --git a/nni/retiarii/converter/graph_gen.py b/nni/retiarii/converter/graph_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..b7dc175233388c5048b5d9d22188231ed127ef40 --- /dev/null +++ b/nni/retiarii/converter/graph_gen.py @@ -0,0 +1,844 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import re + +import torch + +from ..graph import Graph, Model, Node +from ..nn.pytorch import InputChoice, Placeholder, LayerChoice +from ..operation import Cell, Operation +from ..serializer import get_init_parameters_or_fail +from ..utils import get_importable_name +from .op_types import MODULE_EXCEPT_LIST, OpTypeName +from .utils import ( + _convert_name, build_full_name, _without_shape_info, + _extract_info_from_trace_node, get_full_name_by_scope_name, + is_layerchoice_node, match_node, build_cand_name, + build_python_name +) + + +class GraphConverter: + def __init__(self): + self.global_seq = 0 + self.global_graph_id = 0 + + def _add_edge_handle_source_node(self, _input, graph_inputs, ir_graph, output_remap, node_index): + if _input in output_remap: + assert output_remap[_input].kind() == 'aten::append' + predecessor_node = output_remap[_input] + assert predecessor_node in node_index, 'predecessor node: {}'.format(predecessor_node) + src_node_idx = None + src_node = node_index[predecessor_node] + assert isinstance(src_node, Node) + elif _input in graph_inputs: + idx = graph_inputs.index(_input) + src_node = ir_graph.input_node + src_node_idx = idx + else: + predecessor_node = _input.node() + assert predecessor_node in node_index, 'predecessor node: {}'.format(predecessor_node) + # find out the index of _input in the outputs of predecessor_node + predecessor_outputs = [_output for _output in predecessor_node.outputs()] + if len(predecessor_outputs) == 1: + idx = None + else: + idx = predecessor_outputs.index(_input) + ir_predecessor_node = node_index[predecessor_node] + src_node_idx = idx + assert isinstance(ir_predecessor_node, Node) + src_node = ir_predecessor_node + return src_node, src_node_idx + + def _add_edge(self, ir_graph, node, graph_inputs, node_index, new_node, output_remap, ignore_first=False): + """ + Parameters + ---------- + ir_graph : Graph + node : torch._C.Node + graph_inputs : List[torch._C.Value] + a list of a script graph's inputs + node_index : Dict + new_node : Node + newly created ir node corresponding to `node` + output_remap : Dict + ignore_first : bool + if it is true, skip the first input + """ + is_single_input = (len([_input for _input in node.inputs()]) - (1 if ignore_first else 0)) == 1 + new_node_input_idx = 0 + for _input in node.inputs(): + if ignore_first: + ignore_first = False + continue + # handle source node + src_node, src_node_idx = self._add_edge_handle_source_node(_input, graph_inputs, ir_graph, output_remap, node_index) + # handle destination node + dst_node = new_node + if is_single_input: + dst_node_idx = None + else: + dst_node_idx = new_node_input_idx + # create edge + ir_graph.add_edge(head=(src_node, src_node_idx), tail=(dst_node, dst_node_idx)) + + new_node_input_idx += 1 + + def create_prim_constant_node(self, ir_graph, node, module_name): + # NOTE: compare with string not type, because the type is defined in pytorch C code. + # `.kind()` can also be used here + if node.outputsAt(0).type().str() == 'None': + attrs = {'type': 'None'} + else: + attrs = {'type': node.outputsAt(0).type().str(), 'value': node.outputsAt(0).toIValue()} + self.global_seq += 1 + new_node = ir_graph.add_node(build_full_name(module_name, OpTypeName.Constant, self.global_seq), + node.kind(), attrs) + return new_node + + def handle_prim_attr_node(self, node, module): + assert node.hasAttribute('name') + value = None + if node.inputsAt(0).debugName() == 'self': + _val = getattr(module, node.s('name')) + # TODO: serialize complex data type, and output proper error message + if isinstance(_val, (int, float, str, bool)): + value = _val + attrs = {'name': node.s('name'), 'input': node.inputsAt(0).debugName(), 'value': value} + return node.kind(), attrs + + def _remove_mangle(self, module_type_str): + return re.sub('\\.___torch_mangle_\\d+', '', module_type_str) + + def remove_unconnected_nodes(self, ir_graph, targeted_type=None): + """ + Parameters + ---------- + ir_graph : Graph + our ir graph representation + targeted_type : str + nodes with ```targeted_type``` will be removed from graph if their fanout is 0. + ```None``` means removing all the nodes whose fanout is 0. + """ + # build index of outputs of Node(s) + node_fanout = set() + for edge in ir_graph.edges: + if edge.head.id not in node_fanout: + node_fanout.add(edge.head.id) + + to_removes = [] + for hidden_node in ir_graph.hidden_nodes: + if hidden_node.id not in node_fanout: + assert isinstance(hidden_node, Node) + if targeted_type is None: + to_removes.append(hidden_node) + elif hidden_node.operation.type == targeted_type: + to_removes.append(hidden_node) + + for hidden_node in to_removes: + hidden_node.remove() + + def handle_graph_nodes(self, script_module, sm_graph, + module, module_name, module_python_name, + ir_model, ir_graph, + shared_module_index=None): + """ + Convert torch script node to our node ir, and build our graph ir + + Parameters + ---------- + script_module : torch.jit.RecursiveScriptModule + the torch script of ```module``` + sm_graph : torch._C.Graph + the graph in torch script + module : nn.Module + the targeted pytorch module + module_name : str + ```module```'s name + ir_model : Model + the whole graph ir + ir_graph : Graph + the graph ir of ```module``` + shared_module_index : dict + it is used for knowing which module has been created an ir node, + if created and invoked again, then the new ir node can simply reference that ir node. + this way we can identify shared modules (i.e., one module invoked multiple times in `forward` function) + + Returns + ------- + dict + the mapping from graph node to our graph ir node + """ + # handle inputs + graph_inputs = [] + for _input in sm_graph.inputs(): + if _input.debugName() == 'self': + assert _input.unique() == 0 + continue + graph_inputs.append(_input) + # TODO: add scope name + ir_graph._add_input(_convert_name(_input.debugName())) + + node_index = {} # graph node to graph ir node + if shared_module_index is None: + shared_module_index = {} + + # some node does not have output but it modifies a variable, for example aten::append + # %17 : Tensor[] = aten::append(%out.1, %16) + # %out.1 is updated, and %17 is None + # we add output to this type of node and connect it to the following node which uses %out.1 + # key: tensor (%out.1), value: node (this node) + output_remap = {} + + # ===================handle control flow: if=================== + def handle_if_condition(cond_tensor): + """ + to calculate the condition, we only deal with the following op types by tracing back + `prim::GetAttr`, `aten::__getitem__`, `prim::Constant`, `aten::eq` + + generate the expression using recursive calls + + NOTE: do not support dynamic graph + """ + def _generate_expr(tensor): + if tensor.node().kind() == 'prim::GetAttr': + return f'({getattr(module, tensor.node().s("name"))})' + elif tensor.node().kind() == 'aten::__getitem__': + t = _generate_expr(tensor.node().inputsAt(0)) + idx = _generate_expr(tensor.node().inputsAt(1)) + return f'({t}[{idx}])' + elif tensor.node().kind() == 'prim::Constant': + return f'{tensor.toIValue()}' + elif tensor.node().kind() == 'aten::eq': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} == {right})' + elif tensor.node().kind() == 'aten::le': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} <= {right})' + elif tensor.node().kind() == 'aten::ge': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} >= {right})' + elif tensor.node().kind() == 'aten::__not__': + value = _generate_expr(tensor.node().inputsAt(0)) + return f'(not {value})' + elif tensor.node().kind() == 'aten::Bool': + value = _generate_expr(tensor.node().inputsAt(0)) + return f'bool({value})' + elif tensor.node().kind() == 'aten::__is__': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} is {right})' + elif tensor.node().kind() == 'aten::__isnot__': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} is not {right})' + elif tensor.node().kind() == 'aten::ne': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} != {right})' + elif tensor.node().kind() == 'aten::gt': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} > {right})' + elif tensor.node().kind() == 'aten::lt': + left = _generate_expr(tensor.node().inputsAt(0)) + right = _generate_expr(tensor.node().inputsAt(1)) + return f'({left} < {right})' + elif tensor.node().kind() == 'prim::If': + raise RuntimeError('Have not supported `if A and/or B`, please use two `if` statements instead.') + elif tensor.node().kind() == 'aten::abs': + value = _generate_expr(tensor.node().inputsAt(0)) + return f'(torch.abs({value}))' + elif tensor.node().kind() == 'aten::sum': + value = _generate_expr(tensor.node().inputsAt(0)) + return f'(torch.sum({value}))' + elif tensor.node().kind() == 'aten::item': + value = _generate_expr(tensor.node().inputsAt(0)) + return f'({value}.item())' + else: + raise RuntimeError(f'Unsupported op type {tensor.node().kind()} in if condition, ' + 'you are suggested to decorate the corresponding class with "@basic_unit".') + expr = _generate_expr(cond_tensor) + return eval(expr) + + def handle_if_node(node): + """ + Parameters + ---------- + node : torch._C.Node + the node from TorchScript graph + + Returns + ------- + Node + the created node ir + """ + # only deal with input of prim::If is constant or attribute for now + # will support constant expression in future + inputs = [i for i in node.inputs()] + assert len(inputs) == 1 + cond = handle_if_condition(inputs[0]) + chosen_block = 0 if cond else 1 + blocks = [block for block in node.blocks()] + assert len(blocks) == 2 + last_block_node = None + for node in blocks[chosen_block].nodes(): + last_block_node = handle_single_node(node) + self.global_seq += 1 + new_node = ir_graph.add_node(build_full_name(module_name, 'noop_identity', self.global_seq), 'noop_identity') + self._add_edge(ir_graph, blocks[chosen_block].returnNode(), graph_inputs, node_index, new_node, output_remap) + last_block_node = new_node + return last_block_node + + # ===================handle function call=================== + def handle_function_callmethod(node): + # get and handle the first input, which should be an nn.Module + assert node.hasAttribute('name') + # NOTE: "forward__0" is hacky, LSTM instance is parsed to call forward__0 in torchscript + if node.s('name') in ['forward', 'forward__0']: + # node.inputsAt(0).type() is + submodule_type_str = self._remove_mangle(node.inputsAt(0).type().str()) + submodule = node.inputsAt(0).node() + assert submodule.kind() == 'prim::GetAttr' + assert submodule.hasAttribute('name') + submodule_name = submodule.s('name') + + if submodule.inputsAt(0).debugName() == 'self': + # module is usually instantiated in __init__. + # when calling a module in forward, + # prim::GetAttr is used to obtain the module in torch script. + # therefore, we do this check for a module. example below: + # %25 : __torch__.xxx = prim::GetAttr[name="input_switch"](%self) + # %27 : Tensor = prim::CallMethod[name="forward"](%25, %out.1) + assert submodule_name in script_module._modules, "submodule_name: {} not in script_module {}".format( + submodule_name, script_module._modules.keys()) + + submodule_full_name = build_full_name(module_name, submodule_name) + submodule_python_name = build_python_name(module_python_name, submodule_name) + submodule_obj = getattr(module, submodule_name) + subgraph, sub_m_attrs = self._convert_module(script_module._modules[submodule_name], + submodule_obj, + submodule_full_name, submodule_python_name, + ir_model) + else: + # %8 : __torch__.nni.retiarii.model_apis.nn.___torch_mangle_37.ModuleList = prim::GetAttr[name="cells"](%self) + # %10 : __torch__.darts_model.Cell = prim::GetAttr[name="0"](%8) + # %s1.4 : Tensor = prim::CallMethod[name="forward"](%10, %4, %4) + if submodule.inputsAt(0).type().name() == 'ModuleList': + # handle ModuleList + predecessor = submodule.inputsAt(0).node() + module_name_space = [submodule_name] + while predecessor.inputsAt(0).debugName() != 'self': + # this is for dealing with nested ModuleList. below is an example + # %3 : __torch__.torch.nn.modules.container.___torch_mangle_0.ModuleList = prim::GetAttr[name="ops"](%self) + # %5 : __torch__.torch.nn.modules.container.ModuleList = prim::GetAttr[name="0"](%3) + # %7 : __torch__.torch.nn.modules.container.ModuleList = prim::GetAttr[name="1"](%3) + # %9 : __torch__.torch.nn.modules.container.ModuleList = prim::GetAttr[name="2"](%3) + # %11 : __torch__.torch.nn.modules.container.ModuleList = prim::GetAttr[name="3"](%3) + # %14 : __torch__.torch.nn.modules.linear.Linear = prim::GetAttr[name="0"](%5) + # %16 : __torch__.torch.nn.modules.linear.Linear = prim::GetAttr[name="1"](%5) + # %state.2 : Tensor = prim::CallMethod[name="forward"](%14, %x.1) # modulelist.py:18:24 + # %state.4 : Tensor = prim::CallMethod[name="forward"](%16, %state.2) # modulelist.py:18:24 + assert predecessor.kind() == 'prim::GetAttr' + module_name_space.append(predecessor.s('name')) + predecessor = predecessor.inputsAt(0).node() + assert predecessor.kind() == 'prim::GetAttr' + assert predecessor.hasAttribute('name') + module_name_space.append(predecessor.s('name')) + submodule_full_name = build_full_name(module_name, list(reversed(module_name_space))) + submodule_python_name = build_python_name(module_python_name, list(reversed(module_name_space))) + submodule_obj = module + script_submodule = script_module + for each_name in list(reversed(module_name_space)): + submodule_obj = getattr(submodule_obj, each_name) + script_submodule = script_submodule._modules[each_name] + subgraph, sub_m_attrs = self._convert_module(script_submodule, submodule_obj, submodule_full_name, + submodule_python_name, ir_model) + else: + raise RuntimeError('Unsupported module case: {}'.format(submodule.inputsAt(0).type().str())) + + if submodule_full_name in shared_module_index: + # this module is invoked more than once, the ir node has already been created + # create a reference node for it. + # example: {"name": "conv2", "operation": {"type": "shared", "parameters": {"reference": "conv1"}}} + self.global_seq += 1 + shared_node_name = build_full_name(submodule_full_name, '', self.global_seq) + shared_node_python_name = build_python_name(submodule_python_name, self.global_seq) + shared_type_operation = Operation.new('shared', {'reference': submodule_full_name}) + subcell = ir_graph.add_node(shared_node_name, shared_type_operation) + subcell.python_name = shared_node_python_name + else: + # this module is processed for the first time, build cell for it + if subgraph is None: + # if we do not parse this module's graph, we create Node for this module + subcell = ir_graph.add_node(submodule_full_name, submodule_type_str, sub_m_attrs) + subcell.python_name = submodule_python_name + if isinstance(submodule_obj, Placeholder): + subcell.update_label(submodule_obj.label) + elif isinstance(submodule_obj, InputChoice): + subcell.update_label(sub_m_attrs['label']) + else: + # Graph already created, create Cell for it + new_cell = Cell(cell_name=submodule_full_name, parameters=sub_m_attrs) + subcell = ir_graph.add_node(submodule_full_name, new_cell) + subcell.python_name = submodule_python_name + shared_module_index[submodule_full_name] = subcell + node_index[node] = subcell + # connect the cell into graph + self._add_edge(ir_graph, node, graph_inputs, node_index, subcell, output_remap, ignore_first=True) + else: + # handle normal member function + assert hasattr(script_module, node.s('name')) + # TODO: support non member functions + assert node.inputsAt(0).debugName() == 'self' + script_method = getattr(script_module, node.s('name')) # + + # step #1: generate graph ir for this method + method_ir_graph = Graph(model=ir_model, graph_id=-100, name='temp_graph', _internal=True) + self.handle_graph_nodes(script_module, script_method.graph, module, + module_name, module_python_name, ir_model, method_ir_graph, shared_module_index) + self.refine_graph(method_ir_graph) + + # step #2: merge this graph to its module graph + for h_node in method_ir_graph.hidden_nodes: + h_node.graph = ir_graph + ir_graph.hidden_nodes.append(h_node) + for edge in method_ir_graph.edges: + edge.graph = ir_graph + if edge.head == method_ir_graph.input_node: + # this is a member method, 'self' is the first argument, thus +1 + _input = node.inputsAt(edge.head_slot + 1) + src_node, src_node_idx = self._add_edge_handle_source_node(_input, graph_inputs, ir_graph, output_remap, node_index) + edge.head = src_node + edge.head_slot = src_node_idx + if edge.tail == method_ir_graph.output_node: + # since the following nodes have not been created, skip this edge + # edge.head is the output node of this method + # TODO: check whether there could be multiple output nodes??? + node_index[node] = edge.head + continue + ir_graph.edges.append(edge) + + # ===================handle each single node=================== + def handle_single_node(node): + """ + Parameters + ---------- + node : torch._C.Node + the node from TorchScript graph + + Returns + ------- + Node + the created node ir + """ + if node.kind() == 'prim::CallMethod': + handle_function_callmethod(node) + elif node.kind() == 'prim::CallFunction': + func_type_str = self._remove_mangle(node.inputsAt(0).type().str()) + func = node.inputsAt(0).node() + assert func.kind() == 'prim::Constant' + assert func.hasAttribute('name') + func_name = func.s('name') + # create node for func + self.global_seq += 1 + func_node = ir_graph.add_node(build_full_name(module_name, func_name, self.global_seq), + '{}.{}'.format(func_type_str, func_name)) + func_python_name = build_python_name(module_python_name, func_name) + func_node.python_name = func_python_name + node_index[node] = func_node + self._add_edge(ir_graph, node, graph_inputs, node_index, func_node, output_remap, ignore_first=True) + elif node.kind() == 'prim::Constant': + new_node = self.create_prim_constant_node(ir_graph, node, module_name) + node_index[node] = new_node + elif node.kind() in ['prim::ListConstruct', 'prim::ListUnpack', 'prim::TupleConstruct', 'prim::TupleUnpack']: + self.global_seq += 1 + prim_op_name = node.kind().split('::')[-1] + new_node = ir_graph.add_node(build_full_name(module_name, prim_op_name, self.global_seq), node.kind()) + node_index[node] = new_node + self._add_edge(ir_graph, node, graph_inputs, node_index, new_node, output_remap) + elif node.kind() == 'prim::GetAttr': + node_type, attrs = self.handle_prim_attr_node(node, module) + self.global_seq += 1 + new_node = ir_graph.add_node(build_full_name(module_name, OpTypeName.Attr, self.global_seq), + node_type, attrs) + node_index[node] = new_node + elif node.kind() == 'prim::If': + last_block_node = handle_if_node(node) + # last_block_node is None means no node in the branch block + node_index[node] = last_block_node + elif node.kind() == 'prim::Loop': + # refer to https://gist.github.com/liuzhe-lz/90c35d9dd6fd7f3f32544940151ab186 + raise RuntimeError('Loop has not been supported yet!') + elif node.kind().startswith('prim::'): + self.global_seq += 1 + prim_op_name = node.kind().replace('::', '__') + prim_node = ir_graph.add_node(build_full_name(module_name, prim_op_name, self.global_seq), node.kind()) + node_index[node] = prim_node + self._add_edge(ir_graph, node, graph_inputs, node_index, prim_node, output_remap) + elif node.kind() == 'aten::append': + self.global_seq += 1 + aten_op_name = node.kind().replace('::', '__') + aten_node = ir_graph.add_node(build_full_name(module_name, aten_op_name, self.global_seq), node.kind()) + node_index[node] = aten_node + self._add_edge(ir_graph, node, graph_inputs, node_index, aten_node, output_remap) + output_remap[node.inputsAt(0)] = node + elif node.kind().startswith('aten::'): + # handle aten::XXX + self.global_seq += 1 + aten_op_name = node.kind().replace('::', '__') + aten_op_python_name = node.kind().replace('aten::', '') + aten_node = ir_graph.add_node(build_full_name(module_name, aten_op_name, self.global_seq), node.kind()) + aten_python_name = build_python_name(module_python_name, aten_op_python_name) + aten_node.python_name = aten_python_name + node_index[node] = aten_node + self._add_edge(ir_graph, node, graph_inputs, node_index, aten_node, output_remap) + else: + raise RuntimeError('Unsupported kind: {}'.format(node.kind())) + + return node_index[node] + + for node in sm_graph.nodes(): + handle_single_node(node) + + if node_index != {}: + for _output in sm_graph.outputs(): + ir_graph._add_output(_convert_name(_output.debugName())) + predecessor_node_outputs = [o for o in _output.node().outputs()] + if len(predecessor_node_outputs) == 1: + src_node_idx = None + else: + src_node_idx = predecessor_node_outputs.index(_output) + + ir_graph.add_edge(head=(node_index[_output.node()], src_node_idx), + tail=(ir_graph.output_node, None)) + else: + # here is an example that the ir_graph and node_index is empty + # graph(%self : __torch__.torchmodels.googlenet.GoogLeNet, + # %x.1 : Tensor): return (%x.1) + # add an edge from head to tail to handle this situation + ir_graph.add_edge(head=(ir_graph.input_node, 0), tail=(ir_graph.output_node, None)) + + + def merge_aten_slices(self, ir_graph): + """ + if there is aten::slice node, merge the consecutive ones together. + ```x[:, :, 1:, 1:]``` in python code will be converted into 4 node in torch script, + each node has 5 inputs: tensor, dim, x, y, z (i.e., x:y:z) + """ + head_slice_nodes = [] + has_slice_node = False + for node in ir_graph.hidden_nodes: + if node.operation.type == 'aten::slice': + has_slice_node = True + for pred in node.predecessors: + if pred.operation.type not in ['aten::slice', 'prim::Constant']: + head_slice_nodes.append(node) + break + if has_slice_node: + assert head_slice_nodes + + for head_node in head_slice_nodes: + slot = 0 + new_slice_node = ir_graph.add_node(build_full_name(head_node.name, 'merged'), OpTypeName.MergedSlice) + if len(head_node.incoming_edges) == 4: + # when slice is for one dimension list, there are only 4 inputs, thus merge is not needed + for edge in head_node.incoming_edges: + edge.tail = new_slice_node + for edge in head_node.outgoing_edges: + edge.head = new_slice_node + ir_graph.hidden_nodes.remove(head_node) + break + assert len(head_node.incoming_edges) == 5 + for edge in head_node.incoming_edges: + edge.tail = new_slice_node + slot += 5 + node = head_node + while len(node.successors) == 1 and node.successors[0].operation.type == 'aten::slice': + suc_node = node.successors[0] + assert len(suc_node.incoming_edges) == 5 + for edge in suc_node.incoming_edges: + if edge.tail_slot == 0: + edge.remove() + else: + edge.tail = new_slice_node + edge.tail_slot = slot + edge.tail_slot - 1 + slot += 4 + ir_graph.hidden_nodes.remove(node) + node = suc_node + + for edge in node.outgoing_edges: + edge.head = new_slice_node + ir_graph.hidden_nodes.remove(node) + + def refine_graph(self, ir_graph): + """ + Do the following process to simplify graph: + 1. remove unconnected constant node + 2. remove unconnected getattr node + """ + # some constant is not used, for example, function name as prim::Constant + self.remove_unconnected_nodes(ir_graph, targeted_type='prim::Constant') + self.remove_unconnected_nodes(ir_graph, targeted_type='prim::GetAttr') + self.merge_aten_slices(ir_graph) + + def _handle_inputchoice(self, module): + return { + 'n_candidates': module.n_candidates, + 'n_chosen': module.n_chosen, + 'reduction': module.reduction, + 'label': module.label + } + + def _handle_valuechoice(self, module): + return { + 'candidates': module.candidates, + 'label': module.label, + 'accessor': module._accessor + } + + def _convert_module(self, script_module, module, module_name, module_python_name, ir_model): + # NOTE: have not supported nested LayerChoice, i.e., a candidate module + # also has LayerChoice or InputChoice or ValueChoice + original_type_name = script_module.original_name + m_attrs = None + if original_type_name == OpTypeName.LayerChoice: + graph = Graph(ir_model, -100, module_name, _internal=True) # graph_id is not used now + graph.python_name = module_python_name + candidate_name_list = [] + for cand_name in module.names: + cand = module[cand_name] + script_cand = script_module._modules[cand_name] + cand_full_name = build_cand_name(cand_name, module.label) + cand_python_name = build_python_name(module_python_name, cand_name) + candidate_name_list.append(cand_full_name) + subgraph, attrs = self._convert_module(script_cand, cand, cand_full_name, cand_python_name, ir_model) + if subgraph is not None: + cand_node = graph.add_node(subgraph.name, Cell(cell_name=subgraph.name, parameters=attrs)) + cand_node.python_name = cand_python_name + else: + cand_type = '__torch__.' + get_importable_name(cand.__class__) + cand_node = graph.add_node(cand_full_name, cand_type, attrs) + cand_node.python_name = cand_python_name + graph._register() + return graph, {'mutation': 'layerchoice', 'label': module.label, 'candidates': candidate_name_list} + elif original_type_name == OpTypeName.InputChoice: + m_attrs = self._handle_inputchoice(module) + elif original_type_name == OpTypeName.ValueChoice: + m_attrs = self._handle_valuechoice(module) + elif original_type_name == OpTypeName.Placeholder: + m_attrs = get_init_parameters_or_fail(module) + elif module.__class__.__module__.startswith('torch.nn') and \ + original_type_name in torch.nn.__dict__ and \ + original_type_name not in MODULE_EXCEPT_LIST: + # this is a basic module from pytorch, no need to parse its graph + m_attrs = get_init_parameters_or_fail(module) + elif getattr(module, '_nni_basic_unit', False): + # this module is marked as serialize, won't continue to parse + m_attrs = get_init_parameters_or_fail(module) + if m_attrs is not None: + return None, m_attrs + + # handle TorchScript graph + sm_graph = script_module.graph + self.global_graph_id += 1 + ir_graph = Graph(model=ir_model, graph_id=self.global_graph_id, name=module_name, _internal=True) + ir_graph.python_name = module_python_name + + # handle graph nodes + self.handle_graph_nodes(script_module, sm_graph, module, + module_name, module_python_name, ir_model, ir_graph) + self.refine_graph(ir_graph) + + ir_graph._register() + + # add mutation signal for special modules + if original_type_name == OpTypeName.Repeat: + attrs = { + 'mutation': 'repeat', + 'label': module.label, + 'min_depth': module.min_depth, + 'max_depth': module.max_depth + } + return ir_graph, attrs + + return ir_graph, {} + + def convert_module(self, script_module, module, module_name, ir_model): + """ + Convert a module to its graph ir (i.e., Graph) along with its input arguments + + Parameters + ---------- + script_module : torch.jit.RecursiveScriptModule + the script module of ```module``` obtained with torch.jit.script + module : nn.Module + the targeted module instance + module_name : str + the constructed name space of ```module``` + ir_model : Model + the whole graph ir + + Returns + ------- + Graph + the built graph ir from module, ```None``` means do not further parse the module + dict + the input arguments of this module + """ + return self._convert_module(script_module, module, module_name, None, ir_model) + + +class GraphConverterWithShape(GraphConverter): + """ + Convert a pytorch model to nni ir along with input/output shape info. + Based ir acquired through `torch.jit.script` + and shape info acquired through `torch.jit.trace`. + + Known issues + ------------ + 1. `InputChoice` and `ValueChoice` not supported yet. + 2. Currently random inputs are fed while tracing layerchoice. + If forward path of candidates depends on input data, then wrong path will be traced. + This will result in incomplete shape info. + """ + def convert_module(self, script_module, module, module_name, ir_model, dummy_input): + module.eval() + + ir_graph, attrs = self._convert_module(script_module, module, module_name, None, ir_model) + self.remove_dummy_nodes(ir_model) + self._initialize_parameters(ir_model) + self._trace_module(module, module_name, ir_model, dummy_input) + return ir_graph, attrs + + def _initialize_parameters(self, ir_model: 'Model'): + for ir_node in ir_model.get_nodes(): + if ir_node.operation.parameters is None: + ir_node.operation.parameters = {} + ir_node.operation.attributes.setdefault('input_shape', []) + ir_node.operation.attributes.setdefault('output_shape', []) + + def _trace_module(self, module, module_name, ir_model: 'Model', dummy_input): + # First, trace the whole graph + tm_graph = self._trace(module, dummy_input) + + for node in tm_graph.nodes(): + shape_parameters, parameters = _extract_info_from_trace_node(node) + # '__module.convpool/__module.convpool.1/__module.convpool.1.conv' + ir_node = match_node(ir_model, node, module_name) + if ir_node is not None: + ir_node.operation.attributes.update(shape_parameters) + if parameters: + ir_node.operation.parameters.update(parameters) + + self.propagate_shape(ir_model) + + # trace each layerchoice + for name, submodule in module.named_modules(): + # TODO: support InputChoice and ValueChoice + if isinstance(submodule, LayerChoice): + full_name = get_full_name_by_scope_name(ir_model, name.split('.'), module_name) + lc_node = ir_model.get_node_by_name(full_name) + + for cand_name in submodule.names: + cand = submodule[cand_name] + cand_name = build_cand_name(cand_name, submodule.label) + # TODO: Feed the exact input tensor if user provides input, + # in case the path changes according to input data. + lc_inputs = [torch.randn(shape) for shape in lc_node.operation.attributes['input_shape']] + self._trace_module(cand, cand_name, ir_model, lc_inputs) + + def propagate_shape(self, ir_model: 'Model'): + + def propagate_shape_for_graph(graph: 'Graph'): + if graph == ir_model.root_graph: + return + + graph_node = ir_model.get_node_by_name(graph.name) + if not _without_shape_info(graph_node): + return + + if is_layerchoice_node(graph_node): + cand_name = graph_node.operation.parameters['candidates'][0] + cand_node = ir_model.get_node_by_name(cand_name) + if _without_shape_info(cand_node): + propagate_shape_for_graph(ir_model.graphs[cand_name]) + graph_node.operation.attributes['input_shape'] = cand_node.operation.attributes['input_shape'] + graph_node.operation.attributes['output_shape'] = cand_node.operation.attributes['output_shape'] + else: + input_shape = [[]] * len(graph.input_node.operation.io_names or []) + output_shape = [[]] * len(graph.output_node.operation.io_names or []) + for edge in graph.input_node.outgoing_edges: + node = edge.tail + if _without_shape_info(node): + if node.name in ir_model.graphs: + propagate_shape_for_graph(ir_model.graphs[node.name]) + if node.operation.attributes['input_shape']: + input_shape[edge.head_slot or 0] = node.operation.attributes['input_shape'][edge.tail_slot or 0] + graph_node.operation.attributes['input_shape'] = input_shape + for edge in graph.output_node.incoming_edges: + node = edge.head + if _without_shape_info(node): + if node.name in ir_model.graphs: + propagate_shape_for_graph(ir_model.graphs[node.name]) + if node.operation.attributes['output_shape']: + output_shape[edge.tail_slot or 0] = node.operation.attributes['output_shape'][edge.head_slot or 0] + graph_node.operation.attributes['output_shape'] = output_shape + + propagate_shape_for_graph(graph_node.graph) + + # propagate from node to graph + for node in ir_model.get_nodes(): + propagate_shape_for_graph(node.graph) + + def _trace(self, module, dummy_input): + traced_module = torch.jit.trace(module, dummy_input) + torch._C._jit_pass_inline(traced_module.graph) + return traced_module.graph + + def remove_dummy_nodes(self, ir_model: 'Model'): + # remove identity nodes + for node in ir_model.get_nodes_by_type('noop_identity'): + graph = node.graph + for in_edge in node.incoming_edges: + for out_edge in node.outgoing_edges: + if in_edge.tail_slot == out_edge.head_slot: + graph.add_edge(head=(in_edge.head, in_edge.head_slot), tail=(out_edge.tail, out_edge.tail_slot)) + graph.del_edge(in_edge) + graph.del_edge(out_edge) + break + node.remove() + + +def convert_to_graph(script_module, module, converter=None, **kwargs): + """ + Convert module to our graph ir, i.e., build a ```Model``` type + + Parameters + ---------- + script_module : torch.jit.RecursiveScriptModule + the script module obtained with torch.jit.script + module : nn.Module + the targeted module instance + converter : `TorchConverter` + default `GraphConverter` is used + kwargs: + will be passed to `converter.convert_module()` + + Returns + ------- + Model + the constructed IR model + """ + + model = Model(_internal=True) + module_name = '_model' + if converter is None: + converter = GraphConverter() + converter.convert_module(script_module, module, module_name, model, **kwargs) + + return model diff --git a/nni/retiarii/converter/op_types.py b/nni/retiarii/converter/op_types.py new file mode 100644 index 0000000000000000000000000000000000000000..6e06208c09dbb5c6ea1e7ead56b95bf6c64effbb --- /dev/null +++ b/nni/retiarii/converter/op_types.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from enum import Enum + +# except the special case which can not treat as a basic module from pytorch +MODULE_EXCEPT_LIST = ['Sequential'] + + +class OpTypeName(str, Enum): + """ + op type to its type name str + """ + Attr = 'Attr' + Constant = 'Constant' + LayerChoice = 'LayerChoice' + InputChoice = 'InputChoice' + ValueChoice = 'ValueChoice' + Placeholder = 'Placeholder' + MergedSlice = 'MergedSlice' + Repeat = 'Repeat' + Cell = 'Cell' diff --git a/nni/retiarii/converter/utils.py b/nni/retiarii/converter/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e6174f40061efdb447d5ceae133325a5cccecdbf --- /dev/null +++ b/nni/retiarii/converter/utils.py @@ -0,0 +1,256 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from ..operation import Cell +from ..graph import Model, Graph, Node, Edge + + +def build_full_name(prefix, name, seq=None): + if isinstance(name, list): + name = '__'.join(name) + if seq is None: + return '{}__{}'.format(prefix, name) + else: + return '{}__{}{}'.format(prefix, name, str(seq)) + + +def build_python_name(prefix, name): + if isinstance(name, list): + name = '.'.join(name) + if prefix: + return '{}.{}'.format(prefix, name) + else: # predix could be None + return name + + +def build_cand_name(name, label): + return f'layerchoice_{label}_{name}' + + +def _convert_name(name: str) -> str: + """ + Convert the names using separator '.' to valid variable name in code + """ + return name.replace('.', '__') + + +def _extract_info_from_trace_node(trace_node): + """ + Extract parameters from a trace node. + + Parameters + ---------- + trace_node: torch._C.Value + """ + input_shape = [] + output_shape = [] + + inputs = list(trace_node.inputs()) + + # cat input tensors are in a strange place + if trace_node.kind() == 'aten::cat': + input_shape = [input.type().sizes() for input in inputs[0].node().inputs()] + else: + for _input in inputs: + input_type = _input.type() + if input_type.kind() == 'TensorType': + shape = input_type.sizes() + if shape: + input_shape.append(shape) + + for _output in trace_node.outputs(): + output_type = _output.type() + if output_type.kind() == 'TensorType': + shape = output_type.sizes() + if shape: + output_shape.append(shape) + + shape_parameters = { + 'input_shape': input_shape, + 'output_shape': output_shape, + } + + if trace_node.kind() == 'aten::cat': + parameters = {'dim': inputs[1].toIValue()} + return shape_parameters, parameters + else: + return shape_parameters, None + + +def is_layerchoice_node(ir_node: Node): + if ir_node is not None and isinstance(ir_node.operation, Cell) and ir_node.operation.parameters.get('mutation') == 'layerchoice': + return True + else: + return False + + +def get_full_name_by_scope_name(ir_model: Model, scope_names, prefix=''): + full_name = prefix + + for last_scope in range(len(scope_names)): + ir_node = ir_model.get_node_by_name(full_name) + # check if it's layerchoice + if is_layerchoice_node(ir_node): + full_name = f'layerchoice_{ir_node.operation.parameters["label"]}_{scope_names[last_scope]}' + else: + full_name = build_full_name(full_name, scope_names[last_scope]) + + return full_name + + +def match_node(ir_model: Model, torch_node, prefix=''): + """ + Match the corresponding node of a torch._C.Value + """ + scope_names = torch_node.scopeName().split('/')[-1].split('.')[1:] + full_name = get_full_name_by_scope_name(ir_model, scope_names, prefix) + # handle the case when node is not nn.Module, but directly used in forward() + # Because name can't be directly matched, so I use a hacky way. + # I match the first unshaped node of that kind + graph = ir_model.graphs.get(full_name) + if graph is not None: + for node in graph.get_nodes_by_type(torch_node.kind()): + if not node.operation.attributes['input_shape']: + return node + return None + else: + return ir_model.get_node_by_name(full_name) + + +def _without_shape_info(node: Node): + return not node.operation.attributes['input_shape'] and not node.operation.attributes['output_shape'] + + +def flatten_model_graph(ir_model: Model): + """ + Flatten the subgraph into root graph. + """ + def _flatten(graph: Graph): + """ + flatten this graph + """ + model = graph.model + node_to_remove = [] + + for node in graph.hidden_nodes: + node_graph = model.graphs.get(node.name) + if node_graph is not None: + _flatten(node_graph) + + # flatten node graph into this graph + id_to_new_node = {} + for node_graph_node in node_graph.hidden_nodes: + new_node = Node(graph, node_graph_node.id, node_graph_node.name, node_graph_node.operation, _internal=True) + new_node.update_label(node_graph_node.label) + new_node._register() + id_to_new_node[new_node.id] = new_node + + # reconnect node edges + for in_edge in node.incoming_edges: + graph.del_edge(in_edge) + for input_node_edge in node_graph.input_node.outgoing_edges: + if input_node_edge.head_slot == in_edge.tail_slot: + graph.add_edge( + head=(in_edge.head, in_edge.head_slot), + tail=(id_to_new_node[input_node_edge.tail.id], input_node_edge.tail_slot)) + + for out_edge in node.outgoing_edges: + graph.del_edge(out_edge) + for output_node_edge in node_graph.output_node.incoming_edges: + if output_node_edge.head_slot == out_edge.tail_slot: + graph.add_edge( + head=(id_to_new_node[output_node_edge.head.id], output_node_edge.head_slot), + tail=(out_edge.tail, out_edge.tail_slot)) + + for edge in node_graph.edges: + if edge.head == node_graph.input_node or edge.tail == node_graph.output_node: + continue + new_head = id_to_new_node[edge.head.id] + new_tail = id_to_new_node[edge.tail.id] + Edge((new_head, edge.head_slot), (new_tail, edge.tail_slot), _internal=True)._register() + + node_to_remove.append(node) + del model.graphs[node.name] + + for node in node_to_remove: + node.remove() + + new_ir_model = ir_model.fork() + _flatten(new_ir_model.root_graph) + + # remove subgraphs + new_ir_model.graphs = {new_ir_model._root_graph_name: new_ir_model.root_graph} + return new_ir_model + + +def flatten_model_graph_without_layerchoice(ir_model: Model): + """ + Flatten the subgraph into root graph and jump all layerchoice + """ + def _flatten_without_layerchoice(graph: Graph): + """ + flatten this graph + """ + model = graph.model + node_to_remove = [] + + for node in graph.hidden_nodes: + if is_layerchoice_node(node): + for in_edge in node.incoming_edges: + graph.del_edge(in_edge) + for out_edge in node.outgoing_edges: + graph.del_edge(out_edge) + del model.graphs[node.name] + node.remove() + return + + node_graph = model.graphs.get(node.name) + if node_graph is not None: + _flatten_without_layerchoice(node_graph) + + # flatten node graph into this graph + id_to_new_node = {} + for node_graph_node in node_graph.hidden_nodes: + new_node = Node(graph, node_graph_node.id, node_graph_node.name, node_graph_node.operation, _internal=True) + new_node.update_label(node_graph_node.label) + new_node._register() + id_to_new_node[new_node.id] = new_node + + # reconnect node edges + for in_edge in node.incoming_edges: + graph.del_edge(in_edge) + for input_node_edge in node_graph.input_node.outgoing_edges: + if input_node_edge.head_slot == in_edge.tail_slot: + graph.add_edge( + head=(in_edge.head, in_edge.head_slot), + tail=(id_to_new_node[input_node_edge.tail.id], input_node_edge.tail_slot)) + + for out_edge in node.outgoing_edges: + graph.del_edge(out_edge) + for output_node_edge in node_graph.output_node.incoming_edges: + if output_node_edge.head_slot == out_edge.tail_slot: + graph.add_edge( + head=(id_to_new_node[output_node_edge.head.id], output_node_edge.head_slot), + tail=(out_edge.tail, out_edge.tail_slot)) + + + for edge in node_graph.edges: + if edge.head == node_graph.input_node or edge.tail == node_graph.output_node: + continue + new_head = id_to_new_node[edge.head.id] + new_tail = id_to_new_node[edge.tail.id] + Edge((new_head, edge.head_slot), (new_tail, edge.tail_slot), _internal=True)._register() + + node_to_remove.append(node) + del model.graphs[node.name] + + for node in node_to_remove: + node.remove() + + new_ir_model = ir_model.fork() + _flatten_without_layerchoice(new_ir_model.root_graph) + + # remove subgraphs + new_ir_model.graphs = {new_ir_model._root_graph_name: new_ir_model.root_graph} + return new_ir_model + diff --git a/nni/retiarii/converter/visualize.py b/nni/retiarii/converter/visualize.py new file mode 100644 index 0000000000000000000000000000000000000000..2bfe299198d65c29d87aadbd1e8a1001c045992c --- /dev/null +++ b/nni/retiarii/converter/visualize.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import graphviz + + +def convert_to_visualize(graph_ir, vgraph): + for name, graph in graph_ir.items(): + if name == '_evaluator': + continue + with vgraph.subgraph(name='cluster'+name) as subgraph: + subgraph.attr(color='blue') + cell_node = {} + ioput = {'_inputs': '{}-{}'.format(name, '_'.join(graph['inputs'])), + '_outputs': '{}-{}'.format(name, '_'.join(graph['outputs']))} + subgraph.node(ioput['_inputs']) + subgraph.node(ioput['_outputs']) + for node_name, node_value in graph['nodes'].items(): + value = node_value['operation'] + if value['type'] == '_cell': + cell_input_name = '{}-{}'.format(value['cell_name'], '_'.join(graph_ir[value['cell_name']]['inputs'])) + cell_output_name = '{}-{}'.format(value['cell_name'], '_'.join(graph_ir[value['cell_name']]['outputs'])) + cell_node[node_name] = (cell_input_name, cell_output_name) + print('cell: ', node_name, cell_input_name, cell_output_name) + else: + subgraph.node(node_name) + for edge in graph['edges']: + src = edge['head'][0] + if src == '_inputs': + src = ioput['_inputs'] + elif src in cell_node: + src = cell_node[src][1] + dst = edge['tail'][0] + if dst == '_outputs': + dst = ioput['_outputs'] + elif dst in cell_node: + dst = cell_node[dst][0] + subgraph.edge(src, dst) + + +def visualize_model(graph_ir): + vgraph = graphviz.Digraph('G', filename='vgraph', format='jpg') + convert_to_visualize(graph_ir, vgraph) + vgraph.render() diff --git a/nni/retiarii/debug_configs.py b/nni/retiarii/debug_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..9b4b1c643b33ae5336a4a0cad436311e5d30e331 --- /dev/null +++ b/nni/retiarii/debug_configs.py @@ -0,0 +1,3 @@ +# we will support tensorflow in future release + +framework = 'pytorch' diff --git a/nni/retiarii/evaluator/__init__.py b/nni/retiarii/evaluator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c6fd370b906e8ad1545101c9ab0a8e3d605c0b33 --- /dev/null +++ b/nni/retiarii/evaluator/__init__.py @@ -0,0 +1 @@ +from .functional import FunctionalEvaluator diff --git a/nni/retiarii/evaluator/functional.py b/nni/retiarii/evaluator/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..025eaf84a950b1aceaa27ed2d69b15357369d666 --- /dev/null +++ b/nni/retiarii/evaluator/functional.py @@ -0,0 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from ..graph import Evaluator + + +class FunctionalEvaluator(Evaluator): + """ + Functional evaluator that directly takes a function and thus should be general. + + Attributes + ---------- + function + The full name of the function. + arguments + Keyword arguments for the function other than model. + """ + + def __init__(self, function, **kwargs): + self.function = function + self.arguments = kwargs + + @staticmethod + def _load(ir): + return FunctionalEvaluator(ir['function'], **ir['arguments']) + + def _dump(self): + return { + 'type': self.__class__, + 'function': self.function, + 'arguments': self.arguments + } + + def _execute(self, model_cls): + return self.function(model_cls, **self.arguments) + + def __eq__(self, other): + return self.function == other.function and self.arguments == other.arguments diff --git a/nni/retiarii/evaluator/pytorch/__init__.py b/nni/retiarii/evaluator/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..76da136a768ebabe211e6950edf5819796830149 --- /dev/null +++ b/nni/retiarii/evaluator/pytorch/__init__.py @@ -0,0 +1 @@ +from .lightning import * diff --git a/nni/retiarii/evaluator/pytorch/cgo/__init__.py b/nni/retiarii/evaluator/pytorch/cgo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/retiarii/evaluator/pytorch/cgo/accelerator.py b/nni/retiarii/evaluator/pytorch/cgo/accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..bc0787bacc1e662279f5e84e6ce67e26d1934f98 --- /dev/null +++ b/nni/retiarii/evaluator/pytorch/cgo/accelerator.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import Any, List, Optional, Union + +import torch +from pytorch_lightning.accelerators.accelerator import Accelerator +from pytorch_lightning.plugins.environments import ClusterEnvironment +from pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin +from pytorch_lightning.trainer import Trainer +from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector + +import nni + + +class BypassPlugin(TrainingTypePlugin): + """ Plugin that handles communication on a single device. """ + + def __init__(self, device: str): + super().__init__() + self.device: str = device + self.global_rank = 0 + self.local_rank = 0 + self.world_size = 1 + + def connect(self, model: torch.nn.Module) -> torch.nn.Module: + self._model = model + self.model_to_device() + return self.model + + @property + def on_tpu(self) -> bool: + return False + + @property + def on_gpu(self) -> bool: + return "cuda" in self.device and torch.cuda.is_available() + + def reduce(self, tensor: Union[Any, torch.Tensor], *args: Any, **kwargs: Any) -> Union[Any, torch.Tensor]: + """ + Reduces a tensor from several distributed processes to one aggregated tensor. + As this plugin only operates with a single device, the reduction is simply the identity. + + Args: + tensor: the tensor to sync and reduce + *args: ignored + **kwargs: ignored + + Return: + the unmodified input as reduction is not needed for single process operation + """ + return tensor + + def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor: + """Perform a all_gather on all processes """ + return tensor + + def teardown(self): + """ + This method is called to teardown the training process. + It is the right place to release memory and free other resources. + """ + pass + + @property + def root_device(self) -> torch.device: + return torch.device(self.device) + + def model_to_device(self) -> None: + # bypass device placement from pytorch lightning + pass + + def setup(self) -> None: + pass + + @property + def is_global_zero(self) -> bool: + return True + + def barrier(self, *args, **kwargs) -> None: + pass + + def broadcast(self, obj: object, src: int = 0) -> object: + return obj + + +def get_accelerator_connector( + num_processes: int = 1, + devices: Optional[Union[List[int], str, int]] = None, + tpu_cores: Optional[Union[List[int], str, int]] = None, + ipus: Optional[int] = None, + distributed_backend: Optional[str] = None, + accelerator: Optional[Union[str, Accelerator]] = None, + gpus: Optional[Union[List[int], str, int]] = None, + auto_select_gpus: bool = False, + num_nodes: int = 1, + sync_batchnorm: bool = False, + benchmark: bool = False, + replace_sampler_ddp: bool = True, + deterministic: bool = False, + precision: int = 32, + amp_backend: str = 'native', + amp_level: Optional[str] = None, + plugins: Optional[Union[List[Union[TrainingTypePlugin, ClusterEnvironment, str]], + TrainingTypePlugin, ClusterEnvironment, str]] = None, + **other_trainier_kwargs) -> AcceleratorConnector: + gpu_ids = Trainer()._parse_devices(gpus, auto_select_gpus, tpu_cores) + return AcceleratorConnector( + num_processes, + devices, + tpu_cores, + ipus, + distributed_backend, + accelerator, + gpus, + gpu_ids, + num_nodes, + sync_batchnorm, + benchmark, + replace_sampler_ddp, + deterministic, + precision, + amp_backend, + amp_level, + plugins, + ) + + +@nni.trace +class BypassAccelerator(Accelerator): + def __init__(self, precision_plugin=None, device="cpu", **trainer_kwargs): + if precision_plugin is None: + precision_plugin = get_accelerator_connector(**trainer_kwargs).select_precision_plugin() + + # pylint: disable=abstract-class-instantiated + super().__init__(precision_plugin=precision_plugin, training_type_plugin=BypassPlugin(device)) diff --git a/nni/retiarii/evaluator/pytorch/cgo/evaluator.py b/nni/retiarii/evaluator/pytorch/cgo/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..967ad6dbaa6a7d363394b80e00a0dd4a28c7fb4e --- /dev/null +++ b/nni/retiarii/evaluator/pytorch/cgo/evaluator.py @@ -0,0 +1,221 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import warnings +from typing import Dict, List, Optional, Union + + +import torch.nn as nn +import torch.optim as optim +import torchmetrics +from torch.utils.data import DataLoader + +import nni + +from ..lightning import LightningModule, _AccuracyWithLogits, Lightning +from .trainer import Trainer + + +@nni.trace +class _MultiModelSupervisedLearningModule(LightningModule): + def __init__(self, criterion: nn.Module, metrics: Dict[str, torchmetrics.Metric], + n_models: int = 0, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam): + super().__init__() + self.save_hyperparameters('criterion', 'optimizer', 'learning_rate', 'weight_decay') + self.criterion = criterion() + self.criterion_cls = criterion + self.optimizer = optimizer + self.metrics = nn.ModuleDict({name: cls() for name, cls in metrics.items()}) + self.n_models = n_models + + def forward(self, x): + y_hat = self.model(x) + return y_hat + + def training_step(self, batch, batch_idx): + x, y = batch + multi_y_hat = self(x) + if isinstance(multi_y_hat, tuple): + assert len(multi_y_hat) == self.n_models + else: + assert self.n_models == 1 + multi_y_hat = [multi_y_hat] + multi_loss = [] + for idx, y_hat in enumerate(multi_y_hat): + loss = self.criterion(y_hat.to("cpu"), y.to("cpu")) + self.log(f'train_loss_{idx}', loss, prog_bar=True) + for name, metric in self.metrics.items(): + self.log(f'train_{idx}_' + name, metric(y_hat.to("cpu"), y.to("cpu")), prog_bar=True) + multi_loss.append(loss) + return sum(multi_loss) + + def validation_step(self, batch, batch_idx): + x, y = batch + multi_y_hat = self(x) + if isinstance(multi_y_hat, tuple): + assert len(multi_y_hat) == self.n_models + else: + assert self.n_models == 1 + multi_y_hat = [multi_y_hat] + for idx, y_hat in enumerate(multi_y_hat): + self.log(f'val_loss_{idx}', self.criterion(y_hat.to("cpu"), y.to("cpu")), prog_bar=True) + for name, metric in self.metrics.items(): + self.log(f'val_{idx}_' + name, metric(y_hat.to("cpu"), y.to("cpu")), prog_bar=True) + + def test_step(self, batch, batch_idx): + x, y = batch + multi_y_hat = self(x) + if isinstance(multi_y_hat, tuple): + assert len(multi_y_hat) == self.n_models + else: + assert self.n_models == 1 + multi_y_hat = [multi_y_hat] + for idx, y_hat in enumerate(multi_y_hat): + self.log(f'test_loss_{idx}', self.criterion(y_hat.to("cpu"), y.to("cpu")), prog_bar=True) + for name, metric in self.metrics.items(): + self.log(f'test_{idx}_' + name, metric(y_hat.to("cpu"), y.to("cpu")), prog_bar=True) + + def configure_optimizers(self): + return self.optimizer(self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay) + + def on_validation_epoch_end(self): + nni.report_intermediate_result(self._get_validation_metrics()) + + def teardown(self, stage): + if stage == 'fit': + nni.report_final_result(self._get_validation_metrics()) + + def _get_validation_metrics(self): + # TODO: split metric of multiple models? + if len(self.metrics) == 1: + metric_name = next(iter(self.metrics)) + ret = [] + for idx in range(self.n_models): + ret.append(self.trainer.callback_metrics[f'val_{idx}_' + metric_name].item()) + return ret + else: + warnings.warn('Multiple metrics without "default" is not supported by current framework.') + return {name: self.trainer.callback_metrics['val_' + name].item() for name in self.metrics} + + +class MultiModelSupervisedLearningModule(_MultiModelSupervisedLearningModule): + """ + Lightning Module of SupervisedLearning for Cross-Graph Optimization. + Users who needs cross-graph optimization should use this module. + + Parameters + ---------- + criterion : nn.Module + Class for criterion module (not an instance). default: ``nn.CrossEntropyLoss`` + learning_rate : float + Learning rate. default: 0.001 + weight_decay : float + L2 weight decay. default: 0 + optimizer : Optimizer + Class for optimizer (not an instance). default: ``Adam`` + """ + + def __init__(self, criterion: nn.Module, metrics: Dict[str, torchmetrics.Metric], + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam): + super().__init__(criterion, metrics, learning_rate=learning_rate, weight_decay=weight_decay, optimizer=optimizer) + + +@nni.trace +class _ClassificationModule(MultiModelSupervisedLearningModule): + def __init__(self, criterion: nn.Module = nn.CrossEntropyLoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam): + super().__init__(criterion, {'acc': _AccuracyWithLogits}, + learning_rate=learning_rate, weight_decay=weight_decay, optimizer=optimizer) + + +class Classification(Lightning): + """ + Trainer that is used for classification. + + Parameters + ---------- + criterion : nn.Module + Class for criterion module (not an instance). default: ``nn.CrossEntropyLoss`` + learning_rate : float + Learning rate. default: 0.001 + weight_decay : float + L2 weight decay. default: 0 + optimizer : Optimizer + Class for optimizer (not an instance). default: ``Adam`` + train_dataloders : DataLoader + Used in ``trainer.fit()``. A PyTorch DataLoader with training samples. + If the ``lightning_module`` has a predefined train_dataloader method this will be skipped. + val_dataloaders : DataLoader or List of DataLoader + Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples. + If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped. + trainer_kwargs : dict + Optional keyword arguments passed to trainer. See + `Lightning documentation `__ for details. + """ + + def __init__(self, criterion: nn.Module = nn.CrossEntropyLoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam, + train_dataloader: Optional[DataLoader] = None, + val_dataloaders: Union[DataLoader, List[DataLoader], None] = None, + **trainer_kwargs): + module = _ClassificationModule(criterion=criterion, learning_rate=learning_rate, + weight_decay=weight_decay, optimizer=optimizer) + super().__init__(module, Trainer(use_cgo=True, **trainer_kwargs), + train_dataloader=train_dataloader, val_dataloaders=val_dataloaders) + + +@nni.trace +class _RegressionModule(MultiModelSupervisedLearningModule): + def __init__(self, criterion: nn.Module = nn.MSELoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam): + super().__init__(criterion, {'mse': torchmetrics.MeanSquaredError}, + learning_rate=learning_rate, weight_decay=weight_decay, optimizer=optimizer) + + +class Regression(Lightning): + """ + Trainer that is used for regression. + + Parameters + ---------- + criterion : nn.Module + Class for criterion module (not an instance). default: ``nn.MSELoss`` + learning_rate : float + Learning rate. default: 0.001 + weight_decay : float + L2 weight decay. default: 0 + optimizer : Optimizer + Class for optimizer (not an instance). default: ``Adam`` + train_dataloders : DataLoader + Used in ``trainer.fit()``. A PyTorch DataLoader with training samples. + If the ``lightning_module`` has a predefined train_dataloader method this will be skipped. + val_dataloaders : DataLoader or List of DataLoader + Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples. + If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped. + trainer_kwargs : dict + Optional keyword arguments passed to trainer. See + `Lightning documentation `__ for details. + """ + + def __init__(self, criterion: nn.Module = nn.MSELoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam, + train_dataloader: Optional[DataLoader] = None, + val_dataloaders: Union[DataLoader, List[DataLoader], None] = None, + **trainer_kwargs): + module = _RegressionModule(criterion=criterion, learning_rate=learning_rate, + weight_decay=weight_decay, optimizer=optimizer) + super().__init__(module, Trainer(use_cgo=True, **trainer_kwargs), + train_dataloader=train_dataloader, val_dataloaders=val_dataloaders) diff --git a/nni/retiarii/evaluator/pytorch/cgo/trainer.py b/nni/retiarii/evaluator/pytorch/cgo/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..b6cb0cf3b8ff605c9bec2e1818bed3f8a1074183 --- /dev/null +++ b/nni/retiarii/evaluator/pytorch/cgo/trainer.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import pytorch_lightning as pl +import nni +from .accelerator import BypassAccelerator + + +@nni.trace +class Trainer(pl.Trainer): + """ + Trainer for cross-graph optimization. + + Parameters + ---------- + use_cgo : bool + Whether cross-graph optimization (CGO) is used. + If it is True, CGO will manage device placement. + Any device placement from pytorch lightning will be bypassed. + default: False + trainer_kwargs : dict + Optional keyword arguments passed to trainer. See + `Lightning documentation `__ for details. + """ + + def __init__(self, use_cgo=False, **trainer_kwargs): + if use_cgo: + if "accelerator" in trainer_kwargs: + raise ValueError("accelerator should not be set when cross-graph optimization is enabled.") + trainer_kwargs['accelerator'] = BypassAccelerator(device='cpu', **trainer_kwargs) + + super().__init__(**trainer_kwargs) diff --git a/nni/retiarii/evaluator/pytorch/lightning.py b/nni/retiarii/evaluator/pytorch/lightning.py new file mode 100644 index 0000000000000000000000000000000000000000..fd868e9ecc63144bc3254dbddb550e7db092e8fb --- /dev/null +++ b/nni/retiarii/evaluator/pytorch/lightning.py @@ -0,0 +1,328 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import warnings +from pathlib import Path +from typing import Dict, Union, Optional, List, Type + +import pytorch_lightning as pl +import torch.nn as nn +import torch.optim as optim +import torchmetrics +import torch.utils.data as torch_data + +import nni +from nni.common.serializer import is_traceable +try: + from .cgo import trainer as cgo_trainer + cgo_import_failed = False +except ImportError: + cgo_import_failed = True + +from nni.retiarii.graph import Evaluator + + +__all__ = ['LightningModule', 'Trainer', 'DataLoader', 'Lightning', 'Classification', 'Regression'] + + +class LightningModule(pl.LightningModule): + """ + Basic wrapper of generated model. + + Lightning modules used in NNI should inherit this class. + """ + + def set_model(self, model: Union[Type[nn.Module], nn.Module]) -> None: + if isinstance(model, nn.Module): + self.model = model + else: + self.model = model() + + +Trainer = nni.trace(pl.Trainer) +DataLoader = nni.trace(torch_data.DataLoader) + +@nni.trace +class Lightning(Evaluator): + """ + Delegate the whole training to PyTorch Lightning. + + Since the arguments passed to the initialization needs to be serialized, ``LightningModule``, ``Trainer`` or + ``DataLoader`` in this file should be used. Another option is to hide dataloader in the Lightning module, in + which case, dataloaders are not required for this class to work. + + Following the programming style of Lightning, metrics sent to NNI should be obtained from ``callback_metrics`` + in trainer. Two hooks are added at the end of validation epoch and the end of ``fit``, respectively. The metric name + and type depend on the specific task. + + Parameters + ---------- + lightning_module : LightningModule + Lightning module that defines the training logic. + trainer : Trainer + Lightning trainer that handles the training. + train_dataloders : DataLoader + Used in ``trainer.fit()``. A PyTorch DataLoader with training samples. + If the ``lightning_module`` has a predefined train_dataloader method this will be skipped. + val_dataloaders : DataLoader or List of DataLoader + Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples. + If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped. + """ + + def __init__(self, lightning_module: LightningModule, trainer: Trainer, + train_dataloader: Optional[DataLoader] = None, + val_dataloaders: Union[DataLoader, List[DataLoader], None] = None): + assert isinstance(lightning_module, LightningModule), f'Lightning module must be an instance of {__name__}.LightningModule.' + if cgo_import_failed: + assert isinstance(trainer, pl.Trainer) and is_traceable(trainer), f'Trainer must be imported from {__name__}' + else: + # this is not isinstance(trainer, Trainer) because with a different trace call, it can be different + assert (isinstance(trainer, pl.Trainer) and is_traceable(trainer)) or isinstance(trainer, cgo_trainer.Trainer), \ + f'Trainer must be imported from {__name__} or nni.retiarii.evaluator.pytorch.cgo.trainer' + assert _check_dataloader(train_dataloader), f'Wrong dataloader type. Try import DataLoader from {__name__}.' + assert _check_dataloader(val_dataloaders), f'Wrong dataloader type. Try import DataLoader from {__name__}.' + self.module = lightning_module + self.trainer = trainer + self.train_dataloader = train_dataloader + self.val_dataloaders = val_dataloaders + + @staticmethod + def _load(ir): + return Lightning(ir['module'], ir['trainer'], ir['train_dataloader'], ir['val_dataloaders']) + + def _dump(self): + return { + 'type': self.__class__, + 'module': self.module, + 'trainer': self.trainer, + 'train_dataloader': self.train_dataloader, + 'val_dataloaders': self.val_dataloaders + } + + def _execute(self, model_cls): + return self.fit(model_cls) + + def __eq__(self, other): + eq_func = False + eq_args = False + if other is None: + return False + if hasattr(self, "function") and hasattr(other, "function"): + eq_func = (self.function == other.function) + elif not (hasattr(self, "function") or hasattr(other, "function")): + eq_func = True + + if hasattr(self, "arguments") and hasattr(other, "arguments"): + eq_args = (self.arguments == other.arguments) + elif not (hasattr(self, "arguments") or hasattr(other, "arguments")): + eq_args = True + + return eq_func and eq_args + + def fit(self, model): + """ + Fit the model with provided dataloader, with Lightning trainer. + + Parameters + ---------- + model : nn.Module + The model to fit. + """ + self.module.set_model(model) + return self.trainer.fit(self.module, self.train_dataloader, self.val_dataloaders) + + +def _check_dataloader(dataloader): + if dataloader is None: + return True + if isinstance(dataloader, list): + return all([_check_dataloader(d) for d in dataloader]) + return isinstance(dataloader, torch_data.DataLoader) and is_traceable(dataloader) + + +### The following are some commonly used Lightning modules ### + +class _SupervisedLearningModule(LightningModule): + def __init__(self, criterion: nn.Module, metrics: Dict[str, torchmetrics.Metric], + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam, + export_onnx: Union[Path, str, bool, None] = None): + super().__init__() + self.save_hyperparameters('criterion', 'optimizer', 'learning_rate', 'weight_decay') + self.criterion = criterion() + self.optimizer = optimizer + self.metrics = nn.ModuleDict({name: cls() for name, cls in metrics.items()}) + + if export_onnx is None or export_onnx is True: + self.export_onnx = Path(os.environ.get('NNI_OUTPUT_DIR', '.')) / 'model.onnx' + self.export_onnx.parent.mkdir(exist_ok=True) + elif export_onnx: + self.export_onnx = Path(export_onnx) + else: + self.export_onnx = None + self._already_exported = False + + def forward(self, x): + y_hat = self.model(x) + return y_hat + + def training_step(self, batch, batch_idx): + x, y = batch + y_hat = self(x) + loss = self.criterion(y_hat, y) + self.log('train_loss', loss, prog_bar=True) + for name, metric in self.metrics.items(): + self.log('train_' + name, metric(y_hat, y), prog_bar=True) + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + y_hat = self(x) + + if not self._already_exported: + try: + self.to_onnx(self.export_onnx, x, export_params=True) + except RuntimeError as e: + warnings.warn(f'ONNX conversion failed. As a result, you might not be able to use visualization. Error message: {e}') + self._already_exported = True + + self.log('val_loss', self.criterion(y_hat, y), prog_bar=True) + for name, metric in self.metrics.items(): + self.log('val_' + name, metric(y_hat, y), prog_bar=True) + + def test_step(self, batch, batch_idx): + x, y = batch + y_hat = self(x) + self.log('test_loss', self.criterion(y_hat, y), prog_bar=True) + for name, metric in self.metrics.items(): + self.log('test_' + name, metric(y_hat, y), prog_bar=True) + + def configure_optimizers(self): + return self.optimizer(self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay) + + def on_validation_epoch_end(self): + nni.report_intermediate_result(self._get_validation_metrics()) + + def on_fit_end(self): + nni.report_final_result(self._get_validation_metrics()) + + def _get_validation_metrics(self): + if len(self.metrics) == 1: + metric_name = next(iter(self.metrics)) + return self.trainer.callback_metrics['val_' + metric_name].item() + else: + warnings.warn('Multiple metrics without "default" is not supported by current framework.') + return {name: self.trainer.callback_metrics['val_' + name].item() for name in self.metrics} + + +class _AccuracyWithLogits(torchmetrics.Accuracy): + def update(self, pred, target): + return super().update(nn.functional.softmax(pred), target) + + +@nni.trace +class _ClassificationModule(_SupervisedLearningModule): + def __init__(self, criterion: nn.Module = nn.CrossEntropyLoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam, + export_onnx: bool = True): + super().__init__(criterion, {'acc': _AccuracyWithLogits}, + learning_rate=learning_rate, weight_decay=weight_decay, optimizer=optimizer, + export_onnx=export_onnx) + + +class Classification(Lightning): + """ + Trainer that is used for classification. + + Parameters + ---------- + criterion : nn.Module + Class for criterion module (not an instance). default: ``nn.CrossEntropyLoss`` + learning_rate : float + Learning rate. default: 0.001 + weight_decay : float + L2 weight decay. default: 0 + optimizer : Optimizer + Class for optimizer (not an instance). default: ``Adam`` + train_dataloders : DataLoader + Used in ``trainer.fit()``. A PyTorch DataLoader with training samples. + If the ``lightning_module`` has a predefined train_dataloader method this will be skipped. + val_dataloaders : DataLoader or List of DataLoader + Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples. + If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped. + export_onnx : bool + If true, model will be exported to ``model.onnx`` before training starts. default true + trainer_kwargs : dict + Optional keyword arguments passed to trainer. See + `Lightning documentation `__ for details. + """ + + def __init__(self, criterion: nn.Module = nn.CrossEntropyLoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam, + train_dataloader: Optional[DataLoader] = None, + val_dataloaders: Union[DataLoader, List[DataLoader], None] = None, + export_onnx: bool = True, + **trainer_kwargs): + module = _ClassificationModule(criterion=criterion, learning_rate=learning_rate, + weight_decay=weight_decay, optimizer=optimizer, export_onnx=export_onnx) + super().__init__(module, Trainer(**trainer_kwargs), + train_dataloader=train_dataloader, val_dataloaders=val_dataloaders) + + +@nni.trace +class _RegressionModule(_SupervisedLearningModule): + def __init__(self, criterion: nn.Module = nn.MSELoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam, + export_onnx: bool = True): + super().__init__(criterion, {'mse': torchmetrics.MeanSquaredError}, + learning_rate=learning_rate, weight_decay=weight_decay, optimizer=optimizer, + export_onnx=export_onnx) + + +class Regression(Lightning): + """ + Trainer that is used for regression. + + Parameters + ---------- + criterion : nn.Module + Class for criterion module (not an instance). default: ``nn.MSELoss`` + learning_rate : float + Learning rate. default: 0.001 + weight_decay : float + L2 weight decay. default: 0 + optimizer : Optimizer + Class for optimizer (not an instance). default: ``Adam`` + train_dataloders : DataLoader + Used in ``trainer.fit()``. A PyTorch DataLoader with training samples. + If the ``lightning_module`` has a predefined train_dataloader method this will be skipped. + val_dataloaders : DataLoader or List of DataLoader + Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples. + If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped. + export_onnx : bool + If true, model will be exported to ``model.onnx`` before training starts. default: true + trainer_kwargs : dict + Optional keyword arguments passed to trainer. See + `Lightning documentation `__ for details. + """ + + def __init__(self, criterion: nn.Module = nn.MSELoss, + learning_rate: float = 0.001, + weight_decay: float = 0., + optimizer: optim.Optimizer = optim.Adam, + train_dataloader: Optional[DataLoader] = None, + val_dataloaders: Union[DataLoader, List[DataLoader], None] = None, + export_onnx: bool = True, + **trainer_kwargs): + module = _RegressionModule(criterion=criterion, learning_rate=learning_rate, + weight_decay=weight_decay, optimizer=optimizer, export_onnx=export_onnx) + super().__init__(module, Trainer(**trainer_kwargs), + train_dataloader=train_dataloader, val_dataloaders=val_dataloaders) diff --git a/nni/retiarii/execution/__init__.py b/nni/retiarii/execution/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0a0e47b0b01b597936c8c2a8a09cafeda899168b --- /dev/null +++ b/nni/retiarii/execution/__init__.py @@ -0,0 +1 @@ +from .api import * diff --git a/nni/retiarii/execution/api.py b/nni/retiarii/execution/api.py new file mode 100644 index 0000000000000000000000000000000000000000..8027e7e36308c4cf511ba1942264be9bdb2dfef1 --- /dev/null +++ b/nni/retiarii/execution/api.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import time +from typing import Iterable + +from ..graph import Model, ModelStatus +from .interface import AbstractExecutionEngine +from .listener import DefaultListener + +_execution_engine = None +_default_listener = None + +__all__ = ['get_execution_engine', 'get_and_register_default_listener', + 'list_models', 'submit_models', 'wait_models', 'query_available_resources', + 'set_execution_engine', 'is_stopped_exec', 'budget_exhausted'] + + +def set_execution_engine(engine: AbstractExecutionEngine) -> None: + global _execution_engine + if _execution_engine is None: + _execution_engine = engine + else: + raise RuntimeError('Execution engine is already set.') + + +def get_execution_engine() -> AbstractExecutionEngine: + global _execution_engine + assert _execution_engine is not None, 'You need to set execution engine, before using it.' + return _execution_engine + + +def get_and_register_default_listener(engine: AbstractExecutionEngine) -> DefaultListener: + global _default_listener + if _default_listener is None: + _default_listener = DefaultListener() + engine.register_graph_listener(_default_listener) + return _default_listener + + +def submit_models(*models: Model) -> None: + engine = get_execution_engine() + get_and_register_default_listener(engine) + engine.submit_models(*models) + + +def list_models(*models: Model) -> Iterable[Model]: + engine = get_execution_engine() + get_and_register_default_listener(engine) + return engine.list_models() + + +def wait_models(*models: Model) -> None: + get_and_register_default_listener(get_execution_engine()) + while True: + time.sleep(1) + left_models = [g for g in models if not g.status in (ModelStatus.Trained, ModelStatus.Failed)] + if not left_models: + break + + +def query_available_resources() -> int: + engine = get_execution_engine() + resources = engine.query_available_resource() + return resources if isinstance(resources, int) else len(resources) + + +def is_stopped_exec(model: Model) -> bool: + return model.status in (ModelStatus.Trained, ModelStatus.Failed) + + +def budget_exhausted() -> bool: + engine = get_execution_engine() + return engine.budget_exhausted() diff --git a/nni/retiarii/execution/base.py b/nni/retiarii/execution/base.py new file mode 100644 index 0000000000000000000000000000000000000000..54eb041c317b697ff258d2602f6c2d1b4ecb1fc1 --- /dev/null +++ b/nni/retiarii/execution/base.py @@ -0,0 +1,145 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import random +import string +from typing import Any, Dict, Iterable, List + +from .interface import AbstractExecutionEngine, AbstractGraphListener +from .utils import get_mutation_summary +from .. import codegen, utils +from ..graph import Model, ModelStatus, MetricData, Evaluator +from ..integration_api import send_trial, receive_trial_parameters, get_advisor + +_logger = logging.getLogger(__name__) + +class BaseGraphData: + """ + Attributes + ---------- + model_script + code of an instantiated PyTorch model + evaluator + training approach for model_script + mutation_summary + a dict of all the choices during mutations in the HPO search space format + """ + def __init__(self, model_script: str, evaluator: Evaluator, mutation_summary: dict) -> None: + self.model_script = model_script + self.evaluator = evaluator + self.mutation_summary = mutation_summary + + def dump(self) -> dict: + return { + 'model_script': self.model_script, + # engine needs to call dump here, + # otherwise, evaluator will become binary + # also, evaluator can be none in tests + 'evaluator': self.evaluator._dump() if self.evaluator is not None else None, + 'mutation_summary': self.mutation_summary + } + + @staticmethod + def load(data) -> 'BaseGraphData': + return BaseGraphData(data['model_script'], Evaluator._load(data['evaluator']), data['mutation_summary']) + + +class BaseExecutionEngine(AbstractExecutionEngine): + """ + The execution engine with no optimization at all. + Resource management is implemented in this class. + """ + + def __init__(self) -> None: + """ + Upon initialization, advisor callbacks need to be registered. + Advisor will call the callbacks when the corresponding event has been triggered. + Base execution engine will get those callbacks and broadcast them to graph listener. + """ + self._listeners: List[AbstractGraphListener] = [] + + # register advisor callbacks + advisor = get_advisor() + advisor.send_trial_callback = self._send_trial_callback + advisor.request_trial_jobs_callback = self._request_trial_jobs_callback + advisor.trial_end_callback = self._trial_end_callback + advisor.intermediate_metric_callback = self._intermediate_metric_callback + advisor.final_metric_callback = self._final_metric_callback + + self._running_models: Dict[int, Model] = dict() + self._history: List[Model] = [] + + self.resources = 0 + + def submit_models(self, *models: Model) -> None: + for model in models: + data = self.pack_model_data(model) + self._running_models[send_trial(data.dump())] = model + self._history.append(model) + + def list_models(self) -> Iterable[Model]: + return self._history + + def register_graph_listener(self, listener: AbstractGraphListener) -> None: + self._listeners.append(listener) + + def _send_trial_callback(self, paramater: dict) -> None: + if self.resources <= 0: + # FIXME: should be a warning message here + _logger.debug('There is no available resource, but trial is submitted.') + self.resources -= 1 + _logger.debug('Resource used. Remaining: %d', self.resources) + + def _request_trial_jobs_callback(self, num_trials: int) -> None: + self.resources += num_trials + _logger.debug('New resource available. Remaining: %d', self.resources) + + def _trial_end_callback(self, trial_id: int, success: bool) -> None: + model = self._running_models[trial_id] + if success: + model.status = ModelStatus.Trained + else: + model.status = ModelStatus.Failed + for listener in self._listeners: + listener.on_training_end(model, success) + + def _intermediate_metric_callback(self, trial_id: int, metrics: MetricData) -> None: + model = self._running_models[trial_id] + model.intermediate_metrics.append(metrics) + for listener in self._listeners: + listener.on_intermediate_metric(model, metrics) + + def _final_metric_callback(self, trial_id: int, metrics: MetricData) -> None: + model = self._running_models[trial_id] + model.metric = metrics + for listener in self._listeners: + listener.on_metric(model, metrics) + + def query_available_resource(self) -> int: + return self.resources + + def budget_exhausted(self) -> bool: + advisor = get_advisor() + return advisor.stopping + + @classmethod + def pack_model_data(cls, model: Model) -> Any: + mutation_summary = get_mutation_summary(model) + return BaseGraphData(codegen.model_to_pytorch_script(model), model.evaluator, mutation_summary) + + @classmethod + def trial_execute_graph(cls) -> None: + """ + Initialize the model, hand it over to trainer. + """ + graph_data = BaseGraphData.load(receive_trial_parameters()) + random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + file_name = f'_generated_model/{random_str}.py' + os.makedirs(os.path.dirname(file_name), exist_ok=True) + with open(file_name, 'w') as f: + f.write(graph_data.model_script) + model_cls = utils.import_(f'_generated_model.{random_str}._model') + graph_data.evaluator._execute(model_cls) + os.remove(file_name) diff --git a/nni/retiarii/execution/benchmark.py b/nni/retiarii/execution/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..a3e6ac4c3fae1de99eadec912c7ea5542c6d7e93 --- /dev/null +++ b/nni/retiarii/execution/benchmark.py @@ -0,0 +1,149 @@ +import os +import random +from typing import Dict, Any, List, Optional, Union, Tuple, Callable, Iterable + +from ..graph import Model +from ..integration_api import receive_trial_parameters +from .base import BaseExecutionEngine +from .utils import get_mutation_dict + + +class BenchmarkGraphData: + + SUPPORTED_BENCHMARK_LIST = [ + 'nasbench101', + 'nasbench201-cifar10', + 'nasbench201-cifar100', + 'nasbench201-imagenet16', + 'nds-cifar10', + 'nds-imagenet', + 'nlp' + ] + + def __init__(self, mutation: Dict[str, Any], benchmark: str, + metric_name: Optional[str] = None, + db_path: Optional[str] = None) -> None: + self.mutation = mutation # mutation dict. e.g., {'layer1': 'conv3x3', ...} + self.benchmark = benchmark # e.g., nasbench101, nasbench201, ... + self.db_path = db_path # path to directory of database + + def dump(self) -> dict: + from nni.nas.benchmarks.constants import DATABASE_DIR + return { + 'mutation': self.mutation, + 'benchmark': self.benchmark, + 'db_path': self.db_path or DATABASE_DIR # database path need to be passed from manager to worker + } + + @staticmethod + def load(data) -> 'BenchmarkGraphData': + return BenchmarkGraphData(data['mutation'], data['benchmark'], data['metric_name'], data['db_path']) + + +class BenchmarkExecutionEngine(BaseExecutionEngine): + """ + Execution engine that does not actually run any trial, but query the database for results. + + The database query is done on the trial end to make sure intermediate metrics are available. + It will also support an accelerated mode that returns metric immediately without even running into NNI manager + (not implemented yet). + """ + + def __init__(self, benchmark: Union[str, Callable[[BenchmarkGraphData], Tuple[float, List[float]]]], acceleration: bool = False): + super().__init__() + assert benchmark in BenchmarkGraphData.SUPPORTED_BENCHMARK_LIST, \ + f'{benchmark} is not one of the supported benchmarks: {BenchmarkGraphData.SUPPORTED_BENCHMARK_LIST}' + self.benchmark = benchmark + self.acceleration = acceleration + + def pack_model_data(self, model: Model) -> Any: + # called when a new model is submitted to backend. + # convert a Model into a data that is acceptable by trial end. + mutation = get_mutation_dict(model) + graph_data = BenchmarkGraphData(mutation, self.benchmark) + + return graph_data + + @classmethod + def trial_execute_graph(cls) -> None: + graph_data = BenchmarkGraphData.load(receive_trial_parameters()) + os.environ['NASBENCHMARK_DIR'] = graph_data.db_path + final, intermediates = cls.query_in_benchmark(graph_data) + + import nni + for i in intermediates: + nni.report_intermediate_result(i) + nni.report_final_result(final) + + @staticmethod + def query_in_benchmark(graph_data: BenchmarkGraphData) -> Tuple[float, List[float]]: + if not isinstance(graph_data.benchmark, str): + return graph_data.benchmark(graph_data) + + # built-in benchmarks with default query setting + if graph_data.benchmark == 'nasbench101': + from nni.nas.benchmarks.nasbench101 import query_nb101_trial_stats + arch = None + for t in graph_data.mutation.values(): + if isinstance(t, dict): + arch = t + if arch is None: + raise ValueError(f'Cannot identify architecture from mutation dict: {graph_data.mutation}') + print(arch) + return _convert_to_final_and_intermediates( + query_nb101_trial_stats(arch, 108, include_intermediates=True), + 'valid_acc' + ) + elif graph_data.benchmark.startswith('nasbench201'): + from nni.nas.benchmarks.nasbench201 import query_nb201_trial_stats + dataset = graph_data.benchmark.split('-')[-1] + return _convert_to_final_and_intermediates( + query_nb201_trial_stats(_flatten_architecture(graph_data.mutation), 200, dataset, include_intermediates=True), + 'valid_acc', + ) + elif graph_data.benchmark.startswith('nds'): + # FIXME: not tested yet + from nni.nas.benchmarks.nds import query_nds_trial_stats + dataset = graph_data.benchmark.split('-')[-1] + return _convert_to_final_and_intermediates( + query_nds_trial_stats(None, None, None, None, _flatten_architecture(graph_data.mutation), + dataset, include_intermediates=True), + 'valid_acc' + ) + elif graph_data.benchmark.startswith('nlp'): + # FIXME: not tested yet + from nni.nas.benchmarks.nlp import query_nlp_trial_stats + # TODO: I'm not sure of the availble datasets in this benchmark. and the docs are missing. + return _convert_to_final_and_intermediates( + query_nlp_trial_stats(_flatten_architecture(graph_data.mutation), 'ptb', include_intermediates=True), + 'valid_acc' + ) + else: + raise ValueError(f'{graph_data.benchmark} is not a supported benchmark.') + + +def _flatten_architecture(mutation: Dict[str, Any], benchmark: Optional[str] = None): + # STRONG ASSUMPTION HERE! + # This assumes that the benchmarked search space is a one-level search space. + # This means that it is either ONE cell or ONE network. + # Two cell search space like NDS is not supported yet for now. + # Some benchmark even needs special handling to pop out invalid keys. I don't think this is a good design. + + # support double underscore to be compatible with naming convention in base engine + ret = {k.split('/')[-1].split('__')[-1]: v for k, v in mutation.items()} + if benchmark == 'nasbench101': + ret = {k: v for k, v in ret.items() if k.startswith('op') or k.startswith('input')} + ret = {k: v if k.startswith('op') or isinstance(v, list) else [v] for k, v in ret.items()} + return ret + + +def _convert_to_final_and_intermediates(benchmark_result: Iterable[Any], metric_name: str) -> Tuple[float, List[float]]: + # convert benchmark results from database to + # final result (float) and intermediate results (list of floats) + benchmark_result = list(benchmark_result) + assert len(benchmark_result) > 0, 'Invalid query. Results from benchmark is empty.' + if len(benchmark_result) > 1: + benchmark_result = random.choice(benchmark_result) + else: + benchmark_result = benchmark_result[0] + return benchmark_result[metric_name], [i[metric_name] for i in benchmark_result['intermediates'] if i[metric_name] is not None] diff --git a/nni/retiarii/execution/cgo_engine.py b/nni/retiarii/execution/cgo_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e204dbe7cdc97d25a6bb68e5aaa91a112bd35c --- /dev/null +++ b/nni/retiarii/execution/cgo_engine.py @@ -0,0 +1,368 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +import logging +import os +import random +import string +import time +import threading +from typing import Iterable, List, Dict, Tuple +from dataclasses import dataclass + +from nni.common.device import GPUDevice, Device +from .interface import AbstractExecutionEngine, AbstractGraphListener, WorkerInfo +from .. import codegen, utils +from ..graph import Model, ModelStatus, MetricData, Node +from ..integration_api import send_trial, receive_trial_parameters, get_advisor +from .logical_optimizer.logical_plan import LogicalPlan, AbstractLogicalNode +from .logical_optimizer.opt_dedup_input import DedupInputOptimizer +from ..evaluator.pytorch.lightning import Lightning +from ..evaluator.pytorch.cgo.evaluator import _MultiModelSupervisedLearningModule + +from .base import BaseGraphData + +_logger = logging.getLogger(__name__) + + +@dataclass +class TrialSubmission: + model: Model + placement: Dict[Node, Device] + grouped_models: List[Model] + + +class CGOExecutionEngine(AbstractExecutionEngine): + """ + The execution engine with Cross-Graph Optimization (CGO). + + Only models using PyTorch Lighting and MultiModelSupervisedLearningModule as the evaluator can be optimized. + Otherwise, a model will be submitted independently without any cross-graph optimization. + + Parameters + ---------- + devices : List[Device] + Available devices for execution. + max_concurrency : int + The maximum number of trials to run concurrently. + batch_waiting_time: int + Seconds to wait for each batch of trial submission. + The trials within one batch could apply cross-graph optimization. + """ + + def __init__(self, devices: List[Device] = None, + max_concurrency: int = None, + batch_waiting_time: int = 60, + ) -> None: + self._listeners: List[AbstractGraphListener] = [] + self._running_models: Dict[int, Model] = dict() + self.logical_plan_counter = 0 + self.available_devices: List[Device] = [] + self.max_concurrency: int = max_concurrency + for device in devices: + self.available_devices.append(device) + self.all_devices = self.available_devices.copy() + + self._batch_waiting_time = batch_waiting_time # seconds to wait for all models in a batch to do cross-graph optimization + self._optimizers = [DedupInputOptimizer()] + self._original_models = {} + self._original_model_to_multi_model = {} + self._trial_to_original_models = {} + self._trial_used_devices: Dict[int, List[Device]] = {} + + self._history: List[Model] = [] + + self._queuing_models: List[Model] = [] + self._models_to_retry: List[Model] = [] + self._queue_lock = threading.Lock() + + # register advisor callbacks + advisor = get_advisor() + # advisor.send_trial_callback = self._send_trial_callback + # advisor.request_trial_jobs_callback = self._request_trial_jobs_callback + advisor.trial_end_callback = self._trial_end_callback + advisor.intermediate_metric_callback = self._intermediate_metric_callback + advisor.final_metric_callback = self._final_metric_callback + + self._stopped = False + self._consumer_thread = threading.Thread(target=self._consume_models) + self._consumer_thread.start() + + def join(self): + self._stopped = True + self._consumer_thread.join() + + def add_optimizer(self, opt): + self._optimizers.append(opt) + + def submit_models(self, *models: List[Model]) -> None: + curr_time = time.time() + _logger.info('%d models are submitted', len(models)) + self._queue_lock.acquire() + self._queuing_models.extend([(curr_time, _) for _ in models]) + self._queue_lock.release() + + def _submit_retry_models(self, models: List[Model]) -> None: + _logger.info('%d models are retried', len(models)) + self._queue_lock.acquire() + self._models_to_retry.extend(models) + self._queue_lock.release() + + def _consume_models(self): + # a thread to monitor self._models_to_retry and self._queuing_models to consume them in batch + while not self._stopped: + if len(self._models_to_retry) > 0: + self._queue_lock.acquire() + # retrying jobs should be first scheduled. + for m in self._models_to_retry: + if len(self.available_devices) > 0: + self._submit_models_in_batch(m) # submit the single model to avoid cross-graph optimization. + self._models_to_retry = self._models_to_retry[1:] + self._queue_lock.release() + + if len(self._queuing_models) > 0: + self._queue_lock.acquire() + curr_time = time.time() + + num_models_to_submit = len(self.available_devices) + if self.max_concurrency: + num_models_to_submit = min(num_models_to_submit, self.max_concurrency) + + if curr_time - self._queuing_models[0][0] > self._batch_waiting_time: + num_models_to_submit = min(num_models_to_submit, len(self._queuing_models)) + if num_models_to_submit > 0: + self._submit_models_in_batch(*[_[1] for _ in self._queuing_models[:num_models_to_submit]]) + self._queuing_models = self._queuing_models[num_models_to_submit:] + self._queue_lock.release() + time.sleep(1) + + def _extract_placement_constaint(self, placement_mapping: Dict[Node, Device]): + unique_gpus = sorted(list(set([e for e in placement_mapping.values() if isinstance(e, GPUDevice)]))) + placement_constraint = None + if len(unique_gpus) > 0: + placement_constraint = {} + placement_constraint['type'] = 'Device' + placement_constraint['gpus'] = [(e.node_id, e.gpu_id) for e in unique_gpus] + return placement_constraint + + def _submit_models_in_batch(self, *models: List[Model]) -> None: + _logger.info('%d models are submitted in batch', len(models)) + _logger.debug('model id: %s', str([m.model_id for m in models])) + logical = self._build_logical(models) + + for opt in self._optimizers: + opt.convert(logical) + + phy_models_and_placements = self._assemble(logical) + for model, placement, grouped_models in phy_models_and_placements: + data = BaseGraphData(codegen.model_to_pytorch_script(model, placement=placement), model.evaluator, {}) + placement_constraint = self._extract_placement_constaint(placement) + trial_id = send_trial(data.dump(), placement_constraint=placement_constraint) + # unique non-cpu devices used by the trial + self._trial_used_devices[trial_id] = list(set([_ for _ in placement.values() if isinstance(_, GPUDevice)])) + + # currently, it is impossible for search strategy to submit models more than the number of available devices + for used_device in self._trial_used_devices[trial_id]: + self.available_devices.remove(used_device) # used_device must be in self.available_devices + self._running_models[trial_id] = model + + self._trial_to_original_models[trial_id] = [] + for m in grouped_models: + self._original_models[m.model_id] = m + self._original_model_to_multi_model[m.model_id] = model + self._trial_to_original_models[trial_id].append(m.model_id) + self._history.append(m) + + def list_models(self) -> Iterable[Model]: + return self._history + + def _assemble(self, logical_plan: LogicalPlan) -> List[Tuple[Model, Dict[Node, Device], List[Model]]]: + """ + Return the assembled models as a list of tuple. + Each tuple contains the assembled model, the device placement of graph nodes, and the original models. + """ + # try to use the available_devices first so that it can be launched as early as possible + # if free devices are not enough to assemble all models in one trial, try all devices + if len(self.available_devices) > 0: + grouped_models: List[Dict[Model, Device]] = AssemblePolicy().group(logical_plan, self.available_devices) + + if len(self.available_devices) == 0 or len(grouped_models) > 1: + grouped_models: List[Dict[Model, Device]] = AssemblePolicy().group(logical_plan, self.all_devices) + + phy_models_and_placements = [] + for multi_model in grouped_models: + model, model_placement = logical_plan.assemble(multi_model) + assert isinstance(model.evaluator, Lightning), \ + "cross-graph optimization only supports pytorch lighting as evaluator" + assert isinstance(model.evaluator.module, _MultiModelSupervisedLearningModule), \ + "cross-graph optimization only support MultiModelSupervisedLearningModule" + + # replace the module with a new instance whose n_models is set + # n_models must be set in __init__, otherwise it cannot be captured by serialize_cls + new_module_init_params = model.evaluator.module.trace_kwargs.copy() + + # MultiModelSupervisedLearningModule hides n_models of _MultiModelSupervisedLearningModule from users + new_module_init_params['n_models'] = len(multi_model) + new_module = _MultiModelSupervisedLearningModule(**new_module_init_params) + model.evaluator.module = new_module + phy_models_and_placements.append((model, model_placement, multi_model.keys())) + return phy_models_and_placements + + def _build_logical(self, models: List[Model]) -> LogicalPlan: + logical_plan = LogicalPlan(plan_id=self.logical_plan_counter) + for model in models: + logical_plan.add_model(model) + self.logical_plan_counter += 1 + return logical_plan + + def register_graph_listener(self, listener: AbstractGraphListener) -> None: + self._listeners.append(listener) + + # def _send_trial_callback(self, paramater: dict) -> None: + # if len(self.available_devices) == 0: + # _logger.warning('There is no available devices, but trial is submitted.') + # _logger.debug('Resource used. Remaining: %d', len(self.available_devices)) + + # def _request_trial_jobs_callback(self, num_trials: int) -> None: + # self.resources += num_trials + # _logger.info('on_resource_available: %d', self.resources) + + def _trial_end_callback(self, trial_id: int, success: bool) -> None: + model = self._running_models[trial_id] + if success: + model.status = ModelStatus.Trained + else: + model.status = ModelStatus.Failed + models_to_retry = [] + for model_id in self._original_model_to_multi_model: + if self._original_model_to_multi_model[model_id] == model: + original_model = self._original_models[model_id] + if success: + original_model.status = ModelStatus.Trained + else: + original_model.status = ModelStatus.Failed + # the failed models in a multi-model will be retried one by one w/o CGO + if len(self._trial_to_original_models[trial_id]) > 1: + models_to_retry.append(original_model) + for listener in self._listeners: + listener.on_training_end(original_model, success) + + if len(models_to_retry) > 0: + self._submit_retry_models(models_to_retry) + + self.available_devices.extend(self._trial_used_devices[trial_id]) + self.available_devices = sorted(list(set(self.available_devices))) + del self._running_models[trial_id] + + def _intermediate_metric_callback(self, trial_id: int, metrics: MetricData) -> None: + merged_metrics = {} + for idx, _ in enumerate(metrics): + merged_metrics[self._trial_to_original_models[trial_id][idx]] = metrics[idx] + for model_id in merged_metrics: + self._original_models[model_id].intermediate_metrics.append(merged_metrics[model_id]) + for listener in self._listeners: + listener.on_intermediate_metric(self._original_models[model_id], merged_metrics[model_id]) + + def _final_metric_callback(self, trial_id: int, metrics: MetricData) -> None: + _logger.debug(metrics) + + if isinstance(metrics, float): + self._listeners[0].on_metric(self._running_models[trial_id], metrics) + else: + merged_metrics = {} + for idx, _ in enumerate(metrics): + merged_metrics[self._trial_to_original_models[trial_id][idx]] = metrics[idx] + for model_id in merged_metrics: + self._original_models[model_id].metric = merged_metrics[model_id] + for listener in self._listeners: + listener.on_metric(self._original_models[model_id], merged_metrics[model_id]) + + def query_available_resource(self) -> List[WorkerInfo]: + # the _queuing_models need to use available_devices first + self._queue_lock.acquire() + available_for_more_models = len(self.available_devices) - len(self._queuing_models) - len(self._models_to_retry) + self._queue_lock.release() + return available_for_more_models + + def budget_exhausted(self) -> bool: + advisor = get_advisor() + return advisor.stopping + + @classmethod + def trial_execute_graph(cls) -> None: + """ + Initialize the model, hand it over to trainer. + """ + graph_data = BaseGraphData.load(receive_trial_parameters()) + _logger.info('CGO_ENGINE trial parameters received') + random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + file_name = f'_generated_model/{random_str}.py' + os.makedirs(os.path.dirname(file_name), exist_ok=True) + with open(file_name, 'w') as f: + f.write(graph_data.model_script) + + trainer_instance = graph_data.evaluator + model_cls = utils.import_(f'_generated_model.{random_str}._model') + + trainer_instance.fit(model_cls()) + os.remove(file_name) + + +class AssemblePolicy: + @staticmethod + def _is_related_node(model: Model, node: Node): + if isinstance(node, AbstractLogicalNode): + if model in node.related_models: + return True + else: + if model == node.graph.model: + return True + return False + + @staticmethod + def _check_graph_connectivity(model: Model, + group_model: Dict[Model, Device], + logical_plan: LogicalPlan) -> bool: + for edge in logical_plan.logical_graph.edges: + if AssemblePolicy._is_related_node(model, edge.head) or \ + AssemblePolicy._is_related_node(model, edge.tail): + for grouped_model in group_model: + if AssemblePolicy._is_related_node(grouped_model, edge.head) or \ + AssemblePolicy._is_related_node(grouped_model, edge.tail): + return True + return False + + @staticmethod + def _check_evaluator(new_model: Model, group_model: Dict[Model, Device]) -> bool: + if not (isinstance(new_model.evaluator, Lightning) + and isinstance(new_model.evaluator.module, _MultiModelSupervisedLearningModule)): + return False + for m in group_model: + if not m.evaluator == new_model.evaluator: + return False + return True + + @staticmethod + def group(logical_plan, available_devices): + # TODO: Packing multiple model in one GPU + # Currently, we only support one model per GPU + all_grouped_models = [] + group_model = {} + assert(len(available_devices) > 0) # There should be at least 1 device, set in CGO_DEVICES + for idx, m in enumerate(logical_plan.models): + # models in one group should + # (1) not use more GPUs than available_devices + # (2) be connected in the logical plan (independent models should be assembled in multiple groups) + # (3) use same MultiModelSupervisedLearningModule + if len(group_model) > 0 and \ + (AssemblePolicy._check_graph_connectivity(m, group_model, logical_plan) == False or + AssemblePolicy._check_evaluator(m, group_model) == False): + all_grouped_models.append(group_model) + group_model = {} + group_model[m] = available_devices[idx % len(available_devices)] + if len(group_model) == len(available_devices) or \ + idx == len(logical_plan.models) - 1: + all_grouped_models.append(group_model) + group_model = {} + return all_grouped_models diff --git a/nni/retiarii/execution/interface.py b/nni/retiarii/execution/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..ae74e241067640196ca87422b21415f17c023106 --- /dev/null +++ b/nni/retiarii/execution/interface.py @@ -0,0 +1,153 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from abc import ABC, abstractmethod, abstractclassmethod +from typing import Any, Iterable, NewType, List, Union + +from ..graph import Model, MetricData + +__all__ = [ + 'GraphData', 'WorkerInfo', + 'AbstractGraphListener', 'AbstractExecutionEngine' +] + + +GraphData = NewType('GraphData', Any) +""" +A _serializable_ internal data type defined by execution engine. + +Execution engine will submit this kind of data through NNI to worker machine, and train it there. + +A `GraphData` object describes a (merged) executable graph. + +This is trial's "hyper-parameter" in NNI's term and will be transfered in JSON format. + +See `AbstractExecutionEngine` for details. +""" + + +WorkerInfo = NewType('WorkerInfo', Any) +""" +To be designed. Discussion needed. + +This describes the properties of a worker machine. (e.g. memory size) +""" + + +class AbstractGraphListener(ABC): + """ + Abstract listener interface to receive graph events. + + Use `AbstractExecutionEngine.register_graph_listener()` to activate a listener. + """ + + @abstractmethod + def on_metric(self, model: Model, metric: MetricData) -> None: + """ + Reports the final metric of a graph. + """ + raise NotImplementedError + + @abstractmethod + def on_intermediate_metric(self, model: Model, metric: MetricData) -> None: + """ + Reports the latest intermediate metric of a trainning graph. + """ + pass + + @abstractmethod + def on_training_end(self, model: Model, success: bool) -> None: + """ + Reports either a graph is fully trained or the training process has failed. + """ + pass + + +class AbstractExecutionEngine(ABC): + """ + The abstract interface of execution engine. + + Most of these APIs are used by strategy, except `trial_execute_graph`, which is invoked by framework in trial. + Strategy will get the singleton execution engine object through a global API, + and use it in either sync or async manner. + + Execution engine is responsible for submitting (maybe-optimized) models to NNI, + and assigning their metrics to the `Model` object after training. + Execution engine is also responsible to launch the graph in trial process, + because it's the only one who understands graph data, or "hyper-parameter" in NNI's term. + + Execution engine will leverage NNI Advisor APIs, which are yet open for discussion. + + In synchronized use case, the strategy will have a loop to call `submit_models` and `wait_models` repeatly, + and will receive metrics from `Model` attributes. + Execution engine could assume that strategy will only submit graph when there are availabe resources (for now). + + In asynchronized use case, the strategy will register a listener to receive events, + while still using `submit_models` to train. + + There will be a `BaseExecutionEngine` subclass. + Inner-graph optimizing is supposed to derive `BaseExecutionEngine`, + while overrides `submit_models` and `trial_execute_graph`. + cross-graph optimizing is supposed to derive `AbstractExectutionEngine` directly, + because in this case APIs like `wait_graph` and `listener.on_training_end` will have unique logic. + + There might be some util functions benefit all optimizing methods, + but non-mandatory utils should not be covered in abstract interface. + """ + + @abstractmethod + def submit_models(self, *models: Model) -> None: + """ + Submit models to NNI. + + This method is supposed to call something like `nni.Advisor.create_trial_job(graph_data)`. + """ + raise NotImplementedError + + @abstractmethod + def list_models(self) -> Iterable[Model]: + """ + Get all models in submitted. + + Execution engine should store a copy of models that have been submitted and return a list of copies in this method. + """ + raise NotImplementedError + + @abstractmethod + def query_available_resource(self) -> Union[List[WorkerInfo], int]: + """ + Returns information of all idle workers. + If no details are available, this may returns a list of "empty" objects, reporting the number of idle workers. + + Could be left unimplemented for first iteration. + """ + raise NotImplementedError + + @abstractmethod + def budget_exhausted(self) -> bool: + """ + Check whether user configured max trial number or max execution duration has been reached + """ + raise NotImplementedError + + @abstractmethod + def register_graph_listener(self, listener: AbstractGraphListener) -> None: + """ + Register a listener to receive graph events. + + Could be left unimplemented for first iteration. + """ + raise NotImplementedError + + @abstractclassmethod + def trial_execute_graph(cls) -> MetricData: + """ + Train graph and returns its metrics, in a separate trial process. + + Each call to `nni.Advisor.create_trial_job(graph_data)` will eventually invoke this method. + + Because this method will be invoked in trial process on training platform, + it has different context from other methods and has no access to global variable or `self`. + However util APIs like `.utils.experiment_config()` should still be available. + """ + raise NotImplementedError diff --git a/nni/retiarii/execution/listener.py b/nni/retiarii/execution/listener.py new file mode 100644 index 0000000000000000000000000000000000000000..cfda111fae7d837a4824a2365d3ab02a70df29f9 --- /dev/null +++ b/nni/retiarii/execution/listener.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from ..graph import Model, ModelStatus +from .interface import MetricData, AbstractGraphListener + + +class DefaultListener(AbstractGraphListener): + + def on_metric(self, model: Model, metric: MetricData) -> None: + model.metric = metric + + def on_intermediate_metric(self, model: Model, metric: MetricData) -> None: + model.intermediate_metrics.append(metric) + + def on_training_end(self, model: Model, success: bool) -> None: + if success: + model.status = ModelStatus.Trained + else: + model.status = ModelStatus.Failed diff --git a/nni/retiarii/execution/logical_optimizer/__init__.py b/nni/retiarii/execution/logical_optimizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/retiarii/execution/logical_optimizer/interface.py b/nni/retiarii/execution/logical_optimizer/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..03e1e84772417ccda1dc39a97a54e4123b183570 --- /dev/null +++ b/nni/retiarii/execution/logical_optimizer/interface.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from abc import ABC + +from .logical_plan import LogicalPlan + + +class AbstractOptimizer(ABC): + def __init__(self) -> None: + pass + + def convert(self, logical_plan: LogicalPlan) -> None: + raise NotImplementedError diff --git a/nni/retiarii/execution/logical_optimizer/logical_plan.py b/nni/retiarii/execution/logical_optimizer/logical_plan.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa9728a378da6958e3ce5c7faad6a7e08bc96e9 --- /dev/null +++ b/nni/retiarii/execution/logical_optimizer/logical_plan.py @@ -0,0 +1,336 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +from typing import Dict, Tuple, Any + +from nni.retiarii.utils import uid +from nni.common.device import Device, CPUDevice + +from ...graph import Cell, Edge, Graph, Model, Node +from ...operation import Operation, _IOPseudoOperation + + +class AbstractLogicalNode(Node): + def __init__(self, graph, node_id, name, operation, _internal=False): + super().__init__(graph, node_id, name, operation, _internal=_internal) + self.related_models = [] + + def assemble(self, multi_model_placement: Dict[Model, Device]) -> Tuple[Node, Device]: + """ + Given a set of models to be formed in a physical model and their device placement, + this function replaces the logical node with an executable physical node for the physical model. + + Parameters + ---------- + multi_model_placement : dict + a dict of models and device placement. + These models will be assembled into the same physical model to run. + + Returns + ------- + node : Node + the physical node to replace the logical node in the physical model + placement : Device + the device placement of the returned physical node + """ + + raise NotImplementedError + + def _fork_to(self, graph: Graph): + raise NotImplementedError + + +class LogicalGraph(Graph): + def __init__(self, model: Model, graph_id: int, name: str = None, _internal: bool = False): + super().__init__(model, graph_id, name='logical_' + name, _internal=_internal) + + def _dump(self) -> Any: + nodes_dump = {} + for node in self.hidden_nodes: + if isinstance(node, OriginNode): + nodes_dump[f"{node.original_graph.model.model_id}_{node.name}"] = node._dump() + else: + nodes_dump[f"{node.graph.model.model_id}_{node.name}"] = node._dump() + + edges_dump = [] + for edge in self.edges: + if isinstance(edge.head, OriginNode): + head_info = f'{edge.head.original_graph.model.model_id}_{edge.head.name}' + else: + head_info = edge.head.name + if isinstance(edge.tail, OriginNode): + tail_info = f'{edge.tail.original_graph.model.model_id}_{edge.tail.name}' + else: + tail_info = edge.tail.name + edges_dump.append((head_info, tail_info)) + return { + 'inputs': self.input_node.operation.io_names, + 'outputs': self.output_node.operation.io_names, + 'nodes': nodes_dump, + 'edges': edges_dump + } + + def _fork_to(self, model: Model) -> Graph: + new_graph = Graph(model, self.id, self.name, + _internal=True)._register() + + for node in self.hidden_nodes: + if isinstance(node, AbstractLogicalNode): + node._fork_to(new_graph) + else: + Node(new_graph, node.id, node.name, + node.operation, _internal=True)._register() + + id_to_new_node = {node.__repr__(): node for node in new_graph.nodes} + + for edge in self.edges: + new_head = id_to_new_node[edge.head.__repr__()] + new_tail = id_to_new_node[edge.tail.__repr__()] + Edge((new_head, edge.head_slot), + (new_tail, edge.tail_slot), _internal=True)._register() + + return new_graph + + +class OriginNode(AbstractLogicalNode): + """ + This is logical node representing the original node without any modification. + In assemble, just return the original node along with the physical placement given by multi_model_placement. + """ + + def __init__(self, logical_graph: LogicalGraph, + original_graph: Graph, original_node: Node, + name: str, operation, _internal=False): + super().__init__(logical_graph, original_node.id, name, operation) + self.original_graph = original_graph + self.original_node = original_node + + def assemble(self, multi_model_placement: Dict[Model, Device]) -> Tuple[Node, Device]: + model_id = self.original_node.graph.model.model_id + new_node = Node(self.original_node.graph, self.original_node.id, + f"M_{model_id}_" + + self.original_node.name, + self.original_node.operation) + return new_node, multi_model_placement[self.original_node.graph.model] + + def __repr__(self): + return f'OriginNode(id={self.id}, name={self.name}, \ + operation={self.operation}, origin_model_id={self.original_graph.model.model_id})' + + def _fork_to(self, graph: Graph): + OriginNode(graph, self.original_graph, self.original_node, + self.name, self.operation)._register() + + +class LogicalPlan: + def __init__(self, plan_id=0) -> None: + self.lp_model = Model(_internal=True) + self.id = plan_id + self.logical_graph = LogicalGraph( + self.lp_model, self.id, name=f'{self.id}', _internal=True)._register() + self.lp_model._root_graph_name = self.logical_graph.name + self.models = [] + + def add_model(self, model: Model): + self.models.append(model) + # Only optimize the root graph. + self._merge_graph(model.root_graph) + + def _merge_graph(self, from_graph): + to_graph = self.logical_graph + id_to_new_node = {} # old node ID -> new node object + + for old_node in from_graph.nodes: + new_node = OriginNode(to_graph, old_node.graph, + old_node, old_node.name, + old_node.operation, _internal=True)._register() + id_to_new_node[old_node.id] = new_node + + for edge in from_graph.edges: + new_head = id_to_new_node[edge.head.id] + new_tail = id_to_new_node[edge.tail.id] + Edge((new_head, edge.head_slot), (new_tail, edge.tail_slot), _internal=True)._register() + + def assemble(self, multi_model_placement: Dict[Model, Device]) \ + -> Tuple[Model, Dict[Node, Device]]: + """ + Given a set of models to be formed in a physical model and their device placement, + this function replaces all the logical node in this LogicalPlan with executable physical nodes + for the physical model. + + Parameters + ---------- + multi_model_placement : dict + a dict of models and device placement. + These models will be assembled into the same physical model to run. + + Returns + ------- + phy_model : Model + the physical model formed by models in `multi_model_placement` + all logical node are replaced by physical nodes + node_placements : dict + the device placement of the nodes in `phy_model` + """ + phy_model = Model(_internal=True) + phy_graph = self.lp_model.root_graph._fork_to(phy_model) + phy_graph._rename_graph(phy_graph.name, "_model") + + # merge sub-graphs + for model in multi_model_placement: + if phy_model.evaluator is None and model.evaluator is not None: + phy_model.evaluator = model.evaluator + for graph_name in model.graphs: + if graph_name != model._root_graph_name: + new_graph = model.graphs[graph_name]._fork_to( + phy_model, name_prefix=f'M_{model.model_id}_') + + # prefix of M_ of hidden_nodes name in non-root graphs is added here + for new_node in new_graph.hidden_nodes: + if isinstance(new_node.operation, Cell): + old_cell_name = new_node.operation.cell_name + new_node.operation = copy.deepcopy(new_node.operation) + new_node.operation.cell_name = f'M_{model.model_id}_{old_cell_name}' + + assert(phy_model.evaluator is not None) + + # When replace logical nodes, merge the training configs when + # input/output nodes are replaced. + evaluator_slot = {} # Model ID -> Slot ID + input_slot_mapping = {} + output_slot_mapping = {} + # Replace all logical nodes to executable physical nodes + hidden_nodes = phy_graph.hidden_nodes.copy() + node_placements = {} + + added_models = [] + + for node in hidden_nodes: + if isinstance(node, OriginNode): + model_id = node.original_graph.model.model_id + if node.original_graph.model not in multi_model_placement: + for edge in node.incoming_edges: + edge.remove() + for edge in node.outgoing_edges: + edge.remove() + node.remove() + continue + + if isinstance(node, AbstractLogicalNode): + new_node, placement = node.assemble(multi_model_placement) + if isinstance(new_node.operation, _IOPseudoOperation): + model_id = new_node.graph.model.model_id + if model_id not in evaluator_slot: + added_models.append(model_id) + evaluator_slot[model_id] = len(added_models) - 1 + slot = evaluator_slot[model_id] + else: + slot = evaluator_slot[model_id] + # If a model's inputs/outputs are not used in the multi-model + # the codegen and trainer should not generate and use them + # "use_input" and "use_output" are used to mark whether + # an input/output of a model is used in a multi-model + if new_node.operation.type == '_inputs': + input_slot_mapping[new_node] = slot + if new_node.operation.type == '_outputs': + output_slot_mapping[new_node] = slot + + self.node_replace(node, new_node) + + # name prefix of M_ of cells in hidden_nodes of root graphs is added here + # FIXME: merge this rename with non-root graph, only do once. + if isinstance(new_node.operation, Cell): + old_cell_name = new_node.operation.cell_name + new_node.operation = copy.deepcopy(new_node.operation) + new_node.operation.cell_name = f'M_{model_id}_{old_cell_name}' + + # input should be at CPU, move it to GPU first if necessary + if isinstance(new_node.operation, _IOPseudoOperation) and new_node.operation.type == '_inputs': + # hack: only support single_server + node_placements[new_node] = CPUDevice(node_id=placement.node_id) + else: + node_placements[new_node] = placement + + node.remove() + + # If two nodes are placed on different devices, use ToDevice op to copy the node + # TODO: when copying one node to multiple devices, broadcast is more efficient than P2P communication + existing_edges = phy_graph.edges.copy() + # Avoid a node is copied multiple times on the same device + copied_op: Dict[Tuple(Node, Device), Node] = {} + for edge in existing_edges: + head_placement = node_placements[edge.head] + tail_placement = node_placements[edge.tail] + if head_placement != tail_placement: + if head_placement.node_id != tail_placement.node_id: + raise ValueError('Cross-server placement is not supported.') + # Same server different devices + if (edge.head, tail_placement) in copied_op: + to_node = copied_op[(edge.head, tail_placement)] + else: + dst_name = edge.head.name + "_to_" + edge.tail.name + to_operation = Operation.new( + 'ToDevice', { + "device": tail_placement, "src": ( + edge.head.name, edge.head_slot), "dst": dst_name}) + to_node = Node(phy_graph, uid(), dst_name, to_operation)._register() + Edge((edge.head, edge.head_slot), (to_node, None), _internal=True)._register() + copied_op[(edge.head, tail_placement)] = to_node + node_placements[to_node] = head_placement + edge.head = to_node + edge.head_slot = None + + # merge all input nodes into one with multiple slots + input_nodes = [] + for node in phy_graph.hidden_nodes: + if isinstance(node.operation, _IOPseudoOperation) and node.operation.type == '_inputs': + input_nodes.append(node) + + for edge in phy_graph.edges: + if edge.head in input_nodes: + edge.head_slot = input_slot_mapping[edge.head] + edge.head = phy_graph.input_node + + # merge all output nodes into one with multiple slots + output_nodes = [] + for node in phy_graph.hidden_nodes: + if isinstance(node.operation, _IOPseudoOperation) and node.operation.type == '_outputs': + output_nodes.append(node) + + for edge in phy_graph.edges: + if edge.tail in output_nodes: + edge.tail_slot = output_slot_mapping[edge.tail] + edge.tail = phy_graph.output_node + + for node in input_nodes: + node.remove() + for node in output_nodes: + node.remove() + + return phy_model, node_placements + + def node_replace(self, old_node: Node, new_node: Node, input_slot_mapping=None, output_slot_mapping=None): + # TODO: currently, only support single input slot and output slot. + if input_slot_mapping is not None or output_slot_mapping is not None: + raise ValueError('Slot mapping is not supported') + + phy_graph = old_node.graph + new_node.graph = phy_graph + + new_node._register() + + for edge in phy_graph.edges: + if edge.head == old_node: + edge.head = new_node + elif edge.tail == old_node: + edge.tail = new_node + + # after the replacement, there might be multiple duplicated edges + # with the same input and output nodes, which should be de-duplicated + self._remove_duplicated_edges() + + def _remove_duplicated_edges(self): + # TODO: it does not have duplicated edges if only supporting dedup input + # Duplicated edges appear when a chain of prefix nodes are deduplicated + pass diff --git a/nni/retiarii/execution/logical_optimizer/opt_dedup_input.py b/nni/retiarii/execution/logical_optimizer/opt_dedup_input.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3433137e7e0b4a7dd8f8ca2a013be11b23b2d7 --- /dev/null +++ b/nni/retiarii/execution/logical_optimizer/opt_dedup_input.py @@ -0,0 +1,110 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import List, Dict, Tuple + +from nni.retiarii.utils import uid +from nni.retiarii.evaluator.pytorch.cgo.evaluator import MultiModelSupervisedLearningModule +from nni.common.device import GPUDevice + +from ...graph import Graph, Model, Node +from .interface import AbstractOptimizer +from .logical_plan import (AbstractLogicalNode, LogicalGraph, LogicalPlan, + OriginNode) + + +_supported_evaluators = [MultiModelSupervisedLearningModule] + + +class DedupInputNode(AbstractLogicalNode): + """ + This is logical node representing the node for deduplication. + In assemble, just return one copy of the original node when multiple models are assembled. + These models will share the result of once calculation. + """ + + + def __init__(self, logical_graph: LogicalGraph, node_id: int, + nodes_to_dedup: List[Node], _internal=False): + super().__init__(logical_graph, node_id, + "Dedup_" + nodes_to_dedup[0].name, + nodes_to_dedup[0].operation) + self.origin_nodes: List[OriginNode] = nodes_to_dedup.copy() + self.related_models = [_.original_graph.model for _ in self.origin_nodes] + + def assemble(self, multi_model_placement: Dict[Model, GPUDevice]) -> Tuple[Node, GPUDevice]: + for node in self.origin_nodes: + if node.original_graph.model in multi_model_placement: + new_node = Node(node.original_graph, node.id, + f'M_{node.original_graph.model.model_id}_{node.name}', + node.operation) + return new_node, multi_model_placement[node.original_graph.model] + raise ValueError(f'DedupInputNode {self.name} does not contain nodes from multi_model') + + def _fork_to(self, graph: Graph): + DedupInputNode(graph, self.id, self.origin_nodes)._register() + + def __repr__(self) -> str: + return f'DedupNode(id={self.id}, name={self.name}, \ + len(nodes_to_dedup)={len(self.origin_nodes)}' + + +class DedupInputOptimizer(AbstractOptimizer): + def __init__(self) -> None: + pass + + def _check_supported_evaluator(self, evaluator): + for e in _supported_evaluators: + if isinstance(evaluator, e): + return True + return False + + def _check_deduplicate_by_node(self, root_node, node_to_check): + if root_node == node_to_check: + return True + if root_node.operation.type == '_inputs' and \ + node_to_check.operation.type == '_inputs' and \ + isinstance(root_node, OriginNode) and \ + isinstance(node_to_check, OriginNode): + if self._check_supported_evaluator(root_node.original_graph.model.evaluator): + return False + if root_node.original_graph.model.evaluator == node_to_check.original_graph.model.evaluator: + return True + else: + return False + else: + return False + + def convert(self, logical_plan: LogicalPlan) -> None: + nodes_to_skip = set() + while True: # repeat until the logical_graph converges + input_nodes = logical_plan.logical_graph.get_nodes_by_type("_inputs") + # _PseudoOperation(type_name="_inputs")) + root_node = None + for node in input_nodes: + if node in nodes_to_skip: + continue + root_node = node + break + if root_node is None: + break # end of convert + else: + nodes_to_dedup = [] + for node in input_nodes: + if node in nodes_to_skip: + continue + if self._check_deduplicate_by_node(root_node, node): + nodes_to_dedup.append(node) + assert(len(nodes_to_dedup) >= 1) + if len(nodes_to_dedup) == 1: + assert(nodes_to_dedup[0] == root_node) + nodes_to_skip.add(root_node) + else: + dedup_node = DedupInputNode(logical_plan.logical_graph, uid(), nodes_to_dedup)._register() + for edge in logical_plan.logical_graph.edges: + if edge.head in nodes_to_dedup: + edge.head = dedup_node + if edge.tail in nodes_to_dedup: + edge.tail = dedup_node + for node in nodes_to_dedup: + node.remove() diff --git a/nni/retiarii/execution/python.py b/nni/retiarii/execution/python.py new file mode 100644 index 0000000000000000000000000000000000000000..06ca48796a477705029388a1bfff86f4e31bdf07 --- /dev/null +++ b/nni/retiarii/execution/python.py @@ -0,0 +1,63 @@ +from typing import Dict, Any, Type + +import torch.nn as nn + +from ..graph import Evaluator, Model +from ..integration_api import receive_trial_parameters +from ..utils import ContextStack +from .base import BaseExecutionEngine +from .utils import get_mutation_dict, mutation_dict_to_summary + + +class PythonGraphData: + def __init__(self, class_: Type[nn.Module], init_parameters: Dict[str, Any], + mutation: Dict[str, Any], evaluator: Evaluator) -> None: + self.class_ = class_ + self.init_parameters = init_parameters + self.mutation = mutation + self.evaluator = evaluator + self.mutation_summary = mutation_dict_to_summary(mutation) + + def dump(self) -> dict: + return { + 'class': self.class_, + 'init_parameters': self.init_parameters, + 'mutation': self.mutation, + # engine needs to call dump here, + # otherwise, evaluator will become binary + # also, evaluator can be none in tests + 'evaluator': self.evaluator._dump() if self.evaluator is not None else None, + 'mutation_summary': self.mutation_summary + } + + @staticmethod + def load(data) -> 'PythonGraphData': + return PythonGraphData(data['class'], data['init_parameters'], data['mutation'], Evaluator._load(data['evaluator'])) + + +class PurePythonExecutionEngine(BaseExecutionEngine): + """ + This is the execution engine that doesn't rely on Python-IR converter. + + We didn't explicitly state this independency for now. Front-end needs to decide which converter / no converter + to use depending on the execution type. In the future, that logic may be moved into this execution engine. + + The execution engine needs to store the class path of base model, and init parameters to re-initialize the model + with the mutation dict in the context, so that the mutable modules are created to be the fixed instance on the fly. + """ + + @classmethod + def pack_model_data(cls, model: Model) -> Any: + mutation = get_mutation_dict(model) + graph_data = PythonGraphData(model.python_class, model.python_init_params, mutation, model.evaluator) + return graph_data + + @classmethod + def trial_execute_graph(cls) -> None: + graph_data = PythonGraphData.load(receive_trial_parameters()) + + def _model(): + return graph_data.class_(**graph_data.init_parameters) + + with ContextStack('fixed', graph_data.mutation): + graph_data.evaluator._execute(_model) diff --git a/nni/retiarii/execution/utils.py b/nni/retiarii/execution/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..db9efe85cdbea2aa1f4468a2ef406c3d64b0a6c6 --- /dev/null +++ b/nni/retiarii/execution/utils.py @@ -0,0 +1,25 @@ +from typing import Any, List +from ..graph import Model + +def _unpack_if_only_one(ele: List[Any]): + if len(ele) == 1: + return ele[0] + return ele + +def get_mutation_dict(model: Model): + return {mut.mutator.label: _unpack_if_only_one(mut.samples) for mut in model.history} + +def mutation_dict_to_summary(mutation: dict) -> dict: + mutation_summary = {} + for label, samples in mutation.items(): + # FIXME: this check might be wrong + if not isinstance(samples, list): + mutation_summary[label] = samples + else: + for i, sample in enumerate(samples): + mutation_summary[f'{label}_{i}'] = sample + return mutation_summary + +def get_mutation_summary(model: Model) -> dict: + mutation = get_mutation_dict(model) + return mutation_dict_to_summary(mutation) diff --git a/nni/retiarii/experiment/__init__.py b/nni/retiarii/experiment/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/retiarii/experiment/pytorch.py b/nni/retiarii/experiment/pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c71698660ff919a933558c8ee38d29f6e93ff6 --- /dev/null +++ b/nni/retiarii/experiment/pytorch.py @@ -0,0 +1,407 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import atexit +import logging +import os +import socket +import time +import warnings +from dataclasses import dataclass +from pathlib import Path +from subprocess import Popen +from threading import Thread +from typing import Any, List, Optional, Union + +import colorama +import psutil +import torch +import torch.nn as nn +import nni.runtime.log +from nni.common.device import GPUDevice +from nni.experiment import Experiment, launcher, management, rest +from nni.experiment.config import utils +from nni.experiment.config.base import ConfigBase +from nni.experiment.config.training_service import TrainingServiceConfig +from nni.experiment.pipe import Pipe +from nni.tools.nnictl.command_utils import kill_command + +from ..codegen import model_to_pytorch_script +from ..converter import convert_to_graph +from ..converter.graph_gen import GraphConverterWithShape +from ..execution import list_models, set_execution_engine +from ..execution.utils import get_mutation_dict +from ..graph import Evaluator +from ..integration import RetiariiAdvisor +from ..mutator import Mutator +from ..nn.pytorch.mutator import extract_mutation_from_pt_module, process_inline_mutation +from ..oneshot.interface import BaseOneShotTrainer +from ..serializer import is_model_wrapped +from ..strategy import BaseStrategy +from ..strategy.utils import dry_run_for_formatted_search_space + +_logger = logging.getLogger(__name__) + + +@dataclass(init=False) +class RetiariiExeConfig(ConfigBase): + experiment_name: Optional[str] = None + search_space: Any = '' # TODO: remove + trial_command: str = '_reserved' + trial_code_directory: utils.PathLike = '.' + trial_concurrency: int + trial_gpu_number: int = 0 + devices: Optional[List[Union[str, GPUDevice]]] = None + max_experiment_duration: Optional[str] = None + max_trial_number: Optional[int] = None + max_concurrency_cgo: Optional[int] = None + batch_waiting_time: Optional[int] = None + nni_manager_ip: Optional[str] = None + debug: bool = False + log_level: Optional[str] = None + experiment_working_directory: utils.PathLike = '~/nni-experiments' + # remove configuration of tuner/assessor/advisor + training_service: TrainingServiceConfig + execution_engine: str = 'py' + + # input used in GraphConverterWithShape. Currently support shape tuple only. + dummy_input: Optional[List[int]] = None + + # input used for benchmark engine. + benchmark: Optional[str] = None + + def __init__(self, training_service_platform: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if training_service_platform is not None: + assert 'training_service' not in kwargs + self.training_service = utils.training_service_config_factory(platform=training_service_platform) + self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry py' + + def __setattr__(self, key, value): + fixed_attrs = {'search_space': '', + 'trial_command': '_reserved'} + if key in fixed_attrs and fixed_attrs[key] != value: + raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!') + # 'trial_code_directory' is handled differently because the path will be converted to absolute path by us + if key == 'trial_code_directory' and not (str(value) == '.' or os.path.isabs(value)): + raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!') + if key == 'execution_engine': + assert value in ['base', 'py', 'cgo', 'benchmark'], f'The specified execution engine "{value}" is not supported.' + self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry ' + value + self.__dict__[key] = value + + def validate(self, initialized_tuner: bool = False) -> None: + super().validate() + + @property + def _canonical_rules(self): + return _canonical_rules + + @property + def _validation_rules(self): + return _validation_rules + + +_canonical_rules = { +} + +_validation_rules = { + 'trial_code_directory': lambda value: (Path(value).is_dir(), f'"{value}" does not exist or is not directory'), + 'trial_concurrency': lambda value: value > 0, + 'trial_gpu_number': lambda value: value >= 0, + 'max_trial_number': lambda value: value > 0, + 'log_level': lambda value: value in ["trace", "debug", "info", "warning", "error", "fatal"], + 'training_service': lambda value: (type(value) is not TrainingServiceConfig, 'cannot be abstract base class') +} + + +def preprocess_model(base_model, trainer, applied_mutators, full_ir=True, dummy_input=None): + # TODO: this logic might need to be refactored into execution engine + if full_ir: + try: + script_module = torch.jit.script(base_model) + except Exception as e: + _logger.error('Your base model cannot be parsed by torch.jit.script, please fix the following error:') + raise e + if dummy_input is not None: + # FIXME: this is a workaround as full tensor is not supported in configs + dummy_input = torch.randn(*dummy_input) + converter = GraphConverterWithShape() + base_model_ir = convert_to_graph(script_module, base_model, converter, dummy_input=dummy_input) + else: + base_model_ir = convert_to_graph(script_module, base_model) + # handle inline mutations + mutators = process_inline_mutation(base_model_ir) + else: + base_model_ir, mutators = extract_mutation_from_pt_module(base_model) + base_model_ir.evaluator = trainer + + if mutators is not None and applied_mutators: + raise RuntimeError('Have not supported mixed usage of LayerChoice/InputChoice and mutators, ' + 'do not use mutators when you use LayerChoice/InputChoice') + if mutators is not None: + applied_mutators = mutators + return base_model_ir, applied_mutators + + +def debug_mutated_model(base_model, trainer, applied_mutators): + """ + Locally run only one trial without launching an experiment for debug purpose, then exit. + For example, it can be used to quickly check shape mismatch. + + Specifically, it applies mutators (default to choose the first candidate for the choices) + to generate a new model, then run this model locally. + + Parameters + ---------- + base_model : nni.retiarii.nn.pytorch.nn.Module + the base model + trainer : nni.retiarii.evaluator + the training class of the generated models + applied_mutators : list + a list of mutators that will be applied on the base model for generating a new model + """ + base_model_ir, applied_mutators = preprocess_model(base_model, trainer, applied_mutators) + from ..strategy import _LocalDebugStrategy + strategy = _LocalDebugStrategy() + strategy.run(base_model_ir, applied_mutators) + _logger.info('local debug completed!') + + +class RetiariiExperiment(Experiment): + def __init__(self, base_model: nn.Module, trainer: Union[Evaluator, BaseOneShotTrainer], + applied_mutators: List[Mutator] = None, strategy: BaseStrategy = None): + # TODO: The current design of init interface of Retiarii experiment needs to be reviewed. + self.config: RetiariiExeConfig = None + self.port: Optional[int] = None + + self.base_model = base_model + self.trainer = trainer + self.applied_mutators = applied_mutators + self.strategy = strategy + + self._dispatcher = RetiariiAdvisor() + self._dispatcher_thread: Optional[Thread] = None + self._proc: Optional[Popen] = None + self._pipe: Optional[Pipe] = None + + self.url_prefix = None + + # check for sanity + if not is_model_wrapped(base_model): + warnings.warn(colorama.Style.BRIGHT + colorama.Fore.RED + + '`@model_wrapper` is missing for the base model. The experiment might still be able to run, ' + 'but it may cause inconsistent behavior compared to the time when you add it.' + colorama.Style.RESET_ALL, + RuntimeWarning) + + def _start_strategy(self): + base_model_ir, self.applied_mutators = preprocess_model( + self.base_model, self.trainer, self.applied_mutators, + full_ir=self.config.execution_engine not in ['py', 'benchmark'], + dummy_input=self.config.dummy_input + ) + + _logger.info('Start strategy...') + search_space = dry_run_for_formatted_search_space(base_model_ir, self.applied_mutators) + self.update_search_space(search_space) + self.strategy.run(base_model_ir, self.applied_mutators) + _logger.info('Strategy exit') + # TODO: find out a proper way to show no more trial message on WebUI + # self._dispatcher.mark_experiment_as_ending() + + def start(self, port: int = 8080, debug: bool = False) -> None: + """ + Start the experiment in background. + This method will raise exception on failure. + If it returns, the experiment should have been successfully started. + Parameters + ---------- + port + The port of web UI. + debug + Whether to start in debug mode. + """ + atexit.register(self.stop) + + self.config = self.config.canonical_copy() + + # we will probably need a execution engine factory to make this clean and elegant + if self.config.execution_engine == 'base': + from ..execution.base import BaseExecutionEngine + engine = BaseExecutionEngine() + elif self.config.execution_engine == 'cgo': + from ..execution.cgo_engine import CGOExecutionEngine + + assert self.config.training_service.platform == 'remote', \ + "CGO execution engine currently only supports remote training service" + assert self.config.batch_waiting_time is not None + devices = self._construct_devices() + engine = CGOExecutionEngine(devices, + max_concurrency=self.config.max_concurrency_cgo, + batch_waiting_time=self.config.batch_waiting_time) + elif self.config.execution_engine == 'py': + from ..execution.python import PurePythonExecutionEngine + engine = PurePythonExecutionEngine() + elif self.config.execution_engine == 'benchmark': + from ..execution.benchmark import BenchmarkExecutionEngine + engine = BenchmarkExecutionEngine(self.config.benchmark) + set_execution_engine(engine) + + self.id = management.generate_experiment_id() + + if self.config.experiment_working_directory is not None: + log_dir = Path(self.config.experiment_working_directory, self.id, 'log') + else: + log_dir = Path.home() / f'nni-experiments/{self.id}/log' + nni.runtime.log.start_experiment_log(self.id, log_dir, debug) + + self._proc, self._pipe = launcher.start_experiment_retiarii(self.id, self.config, port, debug) + assert self._proc is not None + assert self._pipe is not None + + self.port = port # port will be None if start up failed + + # dispatcher must be launched after pipe initialized + # the logic to launch dispatcher in background should be refactored into dispatcher api + self._dispatcher = self._create_dispatcher() + self._dispatcher_thread = Thread(target=self._dispatcher.run) + self._dispatcher_thread.start() + + ips = [self.config.nni_manager_ip] + for interfaces in psutil.net_if_addrs().values(): + for interface in interfaces: + if interface.family == socket.AF_INET: + ips.append(interface.address) + ips = [f'http://{ip}:{port}' for ip in ips if ip] + msg = 'Web UI URLs: ' + colorama.Fore.CYAN + ' '.join(ips) + colorama.Style.RESET_ALL + _logger.info(msg) + + exp_status_checker = Thread(target=self._check_exp_status) + exp_status_checker.start() + self._start_strategy() + # TODO: the experiment should be completed, when strategy exits and there is no running job + _logger.info('Waiting for experiment to become DONE (you can ctrl+c if there is no running trial jobs)...') + exp_status_checker.join() + + def _construct_devices(self): + devices = [] + if hasattr(self.config.training_service, 'machine_list'): + for machine in self.config.training_service.machine_list: + assert machine.gpu_indices is not None, \ + 'gpu_indices must be set in RemoteMachineConfig for CGO execution engine' + for gpu_idx in machine.gpu_indices: + devices.append(GPUDevice(machine.host, gpu_idx)) + return devices + + def _create_dispatcher(self): + return self._dispatcher + + def run(self, config: RetiariiExeConfig = None, port: int = 8080, debug: bool = False) -> str: + """ + Run the experiment. + This function will block until experiment finish or error. + """ + if isinstance(self.trainer, BaseOneShotTrainer): + self.trainer.fit() + else: + assert config is not None, 'You are using classic search mode, config cannot be None!' + self.config = config + self.start(port, debug) + + def _check_exp_status(self) -> bool: + """ + Run the experiment. + This function will block until experiment finish or error. + Return `True` when experiment done; or return `False` when experiment failed. + """ + try: + while True: + time.sleep(10) + # this if is to deal with the situation that + # nnimanager is cleaned up by ctrl+c first + if self._proc.poll() is None: + status = self.get_status() + else: + return False + if status == 'DONE' or status == 'STOPPED': + return True + if status == 'ERROR': + return False + except KeyboardInterrupt: + _logger.warning('KeyboardInterrupt detected') + finally: + self.stop() + + def stop(self) -> None: + """ + Stop background experiment. + """ + _logger.info('Stopping experiment, please wait...') + atexit.unregister(self.stop) + + # stop strategy first + if self._dispatcher_thread is not None: + self._dispatcher.stopping = True + self._dispatcher_thread.join(timeout=1) + + if self.id is not None: + nni.runtime.log.stop_experiment_log(self.id) + if self._proc is not None: + try: + # this if is to deal with the situation that + # nnimanager is cleaned up by ctrl+c first + if self._proc.poll() is None: + rest.delete(self.port, '/experiment') + except Exception as e: + _logger.exception(e) + _logger.warning('Cannot gracefully stop experiment, killing NNI process...') + kill_command(self._proc.pid) + + if self._pipe is not None: + self._pipe.close() + + self.id = None + self.port = None + self._proc = None + self._pipe = None + self._dispatcher = None + self._dispatcher_thread = None + _logger.info('Experiment stopped') + + def export_top_models(self, top_k: int = 1, optimize_mode: str = 'maximize', formatter: str = 'dict') -> Any: + """ + Export several top performing models. + + For one-shot algorithms, only top-1 is supported. For others, ``optimize_mode`` and ``formatter`` are + available for customization. + + top_k : int + How many models are intended to be exported. + optimize_mode : str + ``maximize`` or ``minimize``. Not supported by one-shot algorithms. + ``optimize_mode`` is likely to be removed and defined in strategy in future. + formatter : str + Support ``code`` and ``dict``. Not supported by one-shot algorithms. + If ``code``, the python code of model will be returned. + If ``dict``, the mutation history will be returned. + """ + if formatter == 'code': + assert self.config.execution_engine != 'py', 'You should use `dict` formatter when using Python execution engine.' + if isinstance(self.trainer, BaseOneShotTrainer): + assert top_k == 1, 'Only support top_k is 1 for now.' + return self.trainer.export() + else: + all_models = filter(lambda m: m.metric is not None, list_models()) + assert optimize_mode in ['maximize', 'minimize'] + all_models = sorted(all_models, key=lambda m: m.metric, reverse=optimize_mode == 'maximize') + assert formatter in ['code', 'dict'], 'Export formatter other than "code" and "dict" is not supported yet.' + if formatter == 'code': + return [model_to_pytorch_script(model) for model in all_models[:top_k]] + elif formatter == 'dict': + return [get_mutation_dict(model) for model in all_models[:top_k]] + + def retrain_model(self, model): + """ + this function retrains the exported model, and test it to output test accuracy + """ + raise NotImplementedError diff --git a/nni/retiarii/fixed.py b/nni/retiarii/fixed.py new file mode 100644 index 0000000000000000000000000000000000000000..e85cea582e051d0cedfdedfc57b6459da2c65521 --- /dev/null +++ b/nni/retiarii/fixed.py @@ -0,0 +1,40 @@ +import json +import logging +from pathlib import Path +from typing import Union, Dict, Any + +from .utils import ContextStack + +_logger = logging.getLogger(__name__) + + +def fixed_arch(fixed_arch: Union[str, Path, Dict[str, Any]], verbose=True): + """ + Load architecture from ``fixed_arch`` and apply to model. This should be used as a context manager. For example, + + .. code-block:: python + + with fixed_arch('/path/to/export.json'): + model = Model(3, 224, 224) + + Parameters + ---------- + fixed_arc : str, Path or dict + Path to the JSON that stores the architecture, or dict that stores the exported architecture. + verbose : bool + Print log messages if set to True + + Returns + ------- + ContextStack + Context manager that provides a fixed architecture when creates the model. + """ + + if isinstance(fixed_arch, (str, Path)): + with open(fixed_arch) as f: + fixed_arch = json.load(f) + + if verbose: + _logger.info(f'Fixed architecture: %s', fixed_arch) + + return ContextStack('fixed', fixed_arch) diff --git a/nni/retiarii/graph.py b/nni/retiarii/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..069f07dcf767b92d5894ce48b6540e5e722d000b --- /dev/null +++ b/nni/retiarii/graph.py @@ -0,0 +1,798 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Model representation. +""" + +import abc +import json +from enum import Enum +from typing import (Any, Dict, Iterable, List, Optional, Tuple, Type, Union, overload) + +from .operation import Cell, Operation, _IOPseudoOperation +from .utils import uid + +__all__ = ['Model', 'ModelStatus', 'Graph', 'Node', 'Edge', 'Mutation', 'IllegalGraphError', 'MetricData'] + + +MetricData = Any +""" +Type hint for graph metrics (loss, accuracy, etc). +""" + +EdgeEndpoint = Tuple['Node', Optional[int]] +""" +Type hint for edge's endpoint. The int indicates nodes' order. +""" + + +class Evaluator(abc.ABC): + """ + Evaluator of a model. An evaluator should define where the training code is, and the configuration of + training code. The configuration includes basic runtime information trainer needs to know (such as number of GPUs) + or tune-able parameters (such as learning rate), depending on the implementation of training code. + + Each config should define how it is interpreted in ``_execute()``, taking only one argument which is the mutated model class. + For example, functional evaluator might directly import the function and call the function. + """ + + def __repr__(self): + items = ', '.join(['%s=%r' % (k, v) for k, v in self.__dict__.items()]) + return f'{self.__class__.__name__}({items})' + + @staticmethod + def _load(ir: Any) -> 'Evaluator': + evaluator_type = ir.get('type') + if isinstance(evaluator_type, str): + # for debug purposes only + for subclass in Evaluator.__subclasses__(): + if subclass.__name__ == evaluator_type: + evaluator_type = subclass + break + assert issubclass(evaluator_type, Evaluator) + return evaluator_type._load(ir) + + @abc.abstractmethod + def _dump(self) -> Any: + """ + Subclass implements ``_dump`` for their own serialization. + They should return a dict, with a key ``type`` which equals ``self.__class__``, + and optionally other keys. + """ + pass + + @abc.abstractmethod + def _execute(self, model_cls: type) -> Any: + pass + + @abc.abstractmethod + def __eq__(self, other) -> bool: + pass + + +class Model: + """ + Represents a neural network model. + + During mutation, one `Model` object is created for each trainable snapshot. + For example, consider a mutator that insert a node at an edge for each iteration. + In one iteration, the mutator invokes 4 primitives: add node, remove edge, add edge to head, add edge to tail. + These 4 primitives operates in one `Model` object. + When they are all done the model will be set to "frozen" (trainable) status and be submitted to execution engine. + And then a new iteration starts, and a new `Model` object is created by forking last model. + + Attributes + ---------- + python_class + Python class that base model is converted from. + python_init_params + Initialization parameters of python class. + status + See `ModelStatus`. + root_graph + The outermost graph which usually takes dataset as input and feeds output to loss function. + graphs + All graphs (subgraphs) in this model. + evaluator + Model evaluator + history + Mutation history. + `self` is directly mutated from `self.history[-1]`; + `self.history[-1] is mutated from `self.history[-2]`, and so on. + `self.history[0]` is the base graph. + metric + Training result of the model, or `None` if it's not yet trained or has failed to train. + intermediate_metrics + Intermediate training metrics. If the model is not trained, it's an empty list. + """ + + def __init__(self, _internal=False): + assert _internal, '`Model()` is private, use `model.fork()` instead' + self.model_id: int = uid('model') + self.python_class: Optional[Type] = None + self.python_init_params: Optional[Dict[str, Any]] = None + + self.status: ModelStatus = ModelStatus.Mutating + + self._root_graph_name: str = '_model' + self.graphs: Dict[str, Graph] = {} + self.evaluator: Optional[Evaluator] = None + + self.history: List['Model'] = [] + + self.metric: Optional[MetricData] = None + self.intermediate_metrics: List[MetricData] = [] + + def __repr__(self): + return f'Model(model_id={self.model_id}, status={self.status}, graphs={list(self.graphs.keys())}, ' + \ + f'evaluator={self.evaluator}, metric={self.metric}, intermediate_metrics={self.intermediate_metrics}, ' + \ + f'python_class={self.python_class})' + + @property + def root_graph(self) -> 'Graph': + return self.graphs[self._root_graph_name] + + def fork(self) -> 'Model': + """ + Create a new model which has same topology, names, and IDs to current one. + + Can only be invoked on a frozen model. + The new model will be in `Mutating` state. + + This API is used in mutator base class. + """ + new_model = Model(_internal=True) + new_model._root_graph_name = self._root_graph_name + new_model.python_class = self.python_class + new_model.python_init_params = self.python_init_params + new_model.graphs = {name: graph._fork_to(new_model) for name, graph in self.graphs.items()} + new_model.evaluator = self.evaluator # TODO this needs a clever copy (not deepcopy) if we need mutation + new_model.history = [*self.history] + # Note: the history is not updated. It will be updated when the model is changed, that is in mutator. + return new_model + + @staticmethod + def _load(ir: Any) -> 'Model': + model = Model(_internal=True) + for graph_name, graph_data in ir.items(): + if graph_name != '_evaluator': + Graph._load(model, graph_name, graph_data)._register() + if '_evaluator' in ir: + model.evaluator = Evaluator._load(ir['_evaluator']) + return model + + def _dump(self) -> Any: + ret = {name: graph._dump() for name, graph in self.graphs.items()} + if self.evaluator is not None: + ret['_evaluator'] = self.evaluator._dump() + return ret + + def get_nodes(self) -> Iterable['Node']: + """ + Traverse through all the nodes. + """ + for graph in self.graphs.values(): + for node in graph.nodes: + yield node + + def get_nodes_by_label(self, label: str) -> List['Node']: + """ + Traverse all the nodes to find the matched node(s) with the given label. + There could be multiple nodes with the same label. Name space name can uniquely + identify a graph or node. + + NOTE: the implementation does not support the class abstraction + """ + matched_nodes = [] + for graph in self.graphs.values(): + nodes = graph.get_nodes_by_label(label) + matched_nodes.extend(nodes) + return matched_nodes + + def get_nodes_by_type(self, type_name: str) -> List['Node']: + """ + Traverse all the nodes to find the matched node(s) with the given type. + """ + matched_nodes = [] + for graph in self.graphs.values(): + nodes = graph.get_nodes_by_type(type_name) + matched_nodes.extend(nodes) + return matched_nodes + + def get_node_by_name(self, node_name: str) -> 'Node': + """ + Traverse all the nodes to find the matched node with the given name. + """ + matched_nodes = [] + for graph in self.graphs.values(): + nodes = graph.get_nodes_by_name(node_name) + matched_nodes.extend(nodes) + assert len(matched_nodes) <= 1 + if matched_nodes: + return matched_nodes[0] + else: + return None + + def get_node_by_python_name(self, python_name: str) -> 'Node': + """ + Traverse all the nodes to find the matched node with the given python_name. + """ + matched_nodes = [] + for graph in self.graphs.values(): + nodes = graph.get_nodes_by_python_name(python_name) + matched_nodes.extend(nodes) + # assert len(matched_nodes) <= 1 + if matched_nodes: + return matched_nodes[0] + else: + return None + + def get_cell_nodes(self) -> List['Node']: + matched_nodes = [] + for graph in self.graphs.values(): + nodes = [node for node in graph.nodes if isinstance(node.operation, Cell)] + matched_nodes.extend(nodes) + return matched_nodes + + +class ModelStatus(Enum): + """ + The status of model. + + A model is created in `Mutating` status. + When the mutation is done and the model get ready to train, its status becomes `Frozen`. + When training started, the model's status becomes `Training`. + If training is successfully ended, model's `metric` attribute get set and its status becomes `Trained`. + If training failed, the status becomes `Failed`. + """ + Mutating = "mutating" + Frozen = "frozen" + Training = "training" + Trained = "trained" + Failed = "failed" + + +_InputPseudoUid = -1 +_OutputPseudoUid = -2 + + +class Graph: + """ + Graph topology. + + This class simply represents the topology, with no semantic meaning. + All other information like metric, non-graph functions, mutation history, etc should go to `Model`. + + Each graph belongs to and only belongs to one `Model`. + + Attributes + ---------- + model + The model containing (and owning) this graph. + id + Unique ID in the model. + If two models have graphs of identical ID, they are semantically the same graph. + Typically this means one graph is mutated from another, or they are both mutated from one ancestor. + name + Mnemonic name of this graph. It should have an one-to-one mapping with ID. + input_names + Optional mnemonic names of input parameters. + output_names + Optional mnemonic names of output values. + input_node + ... + output_node + ... + hidden_nodes + ... + nodes + All input/output/hidden nodes. + edges + ... + python_name + The name of torch.nn.Module, should have one-to-one mapping with items in python model. + """ + + def __init__(self, model: Model, graph_id: int, name: str = None, _internal: bool = False): + assert _internal, '`Graph()` is private' + + self.model: Model = model + self.id: int = graph_id + self.name: str = name or f'_generated_{graph_id}' + + # `python_name` is `None` by default. It should be set after initialization if it is needed. + self.python_name: Optional[str] = None + + self.input_node: Node = Node(self, _InputPseudoUid, '_inputs', _IOPseudoOperation('_inputs'), _internal=True) + self.output_node: Node = Node(self, _OutputPseudoUid, '_outputs', _IOPseudoOperation('_outputs'), _internal=True) + self.hidden_nodes: List[Node] = [] + + self.edges: List[Edge] = [] + + def __repr__(self): + return f'Graph(id={self.id}, name={self.name}, ' + \ + f'input_names={self.input_node.operation.io_names}, ' + \ + f'output_names={self.output_node.operation.io_names}, ' + \ + f'num_hidden_nodes={len(self.hidden_nodes)}, num_edges={len(self.edges)})' + + @property + def nodes(self) -> List['Node']: + return [self.input_node, self.output_node] + self.hidden_nodes + + def _add_input(self, input_name) -> None: + if self.input_node.operation.io_names is None: + self.input_node.operation.io_names = [input_name] + else: + self.input_node.operation.io_names.append(input_name) + + def _add_output(self, output_name) -> None: + if self.output_node.operation.io_names is None: + self.output_node.operation.io_names = [output_name] + else: + self.output_node.operation.io_names.append(output_name) + + @overload + def add_node(self, name: str, operation: Operation) -> 'Node': ... + @overload + def add_node(self, name: str, type_name: str, parameters: Dict[str, Any] = None) -> 'Node': ... + + def add_node(self, name, operation_or_type, parameters=None): + if isinstance(operation_or_type, Operation): + op = operation_or_type + else: + op = Operation.new(operation_or_type, parameters, name) + return Node(self, uid(), name, op, _internal=True)._register() + + @overload + def insert_node_on_edge(self, edge: 'Edge', name: str, operation: Operation) -> 'Node': ... + @overload + def insert_node_on_edge(self, edge: 'Edge', name: str, type_name: str, parameters: Dict[str, Any] = None) -> 'Node': ... + + def insert_node_on_edge(self, edge, name, operation_or_type, parameters=None) -> 'Node': + if isinstance(operation_or_type, Operation): + op = operation_or_type + else: + op = Operation.new(operation_or_type, parameters, name) + new_node = Node(self, uid(), name, op, _internal=True)._register() + # update edges + self.add_edge((edge.head, edge.head_slot), (new_node, None)) + self.add_edge((new_node, None), (edge.tail, edge.tail_slot)) + self.del_edge(edge) + return new_node + + # mutation + def add_edge(self, head: EdgeEndpoint, tail: EdgeEndpoint) -> 'Edge': + assert head[0].graph is self and tail[0].graph is self + return Edge(head, tail, _internal=True)._register() + + def del_edge(self, edge: 'Edge') -> None: + self.edges.remove(edge) + + def get_node_by_name(self, name: str) -> Optional['Node']: + """ + Returns the node which has specified name; or returns `None` if no node has this name. + """ + found = [node for node in self.nodes if node.name == name] + return found[0] if found else None + + def get_node_by_python_name(self, python_name: str) -> Optional['Node']: + """ + Returns the node which has specified python_name; or returns `None` if no node has this python_name. + """ + found = [node for node in self.nodes if node.python_name == python_name] + return found[0] if found else None + + def get_nodes_by_type(self, operation_type: str) -> List['Node']: + """ + Returns nodes whose operation is specified typed. + """ + return [node for node in self.hidden_nodes if node.operation.type == operation_type] + + def get_node_by_id(self, node_id: int) -> Optional['Node']: + """ + Returns the node which has specified name; or returns `None` if no node has this name. + """ + found = [node for node in self.nodes if node.id == node_id] + return found[0] if found else None + + def get_nodes_by_label(self, label: str) -> List['Node']: + return [node for node in self.hidden_nodes if node.label == label] + + def get_nodes_by_name(self, name: str) -> List['Node']: + return [node for node in self.hidden_nodes if node.name == name] + + def get_nodes_by_python_name(self, python_name: str) -> Optional['Node']: + return [node for node in self.nodes if node.python_name == python_name] + + def topo_sort(self) -> List['Node']: + node_to_fanin = {} + curr_nodes = [] + for node in self.nodes: + fanin = len(node.incoming_edges) + node_to_fanin[node] = fanin + if fanin == 0: + curr_nodes.append(node) + + sorted_nodes = [] + while curr_nodes: + curr_node = curr_nodes.pop(0) + sorted_nodes.append(curr_node) + # use successor_slots because a node may connect to another node multiple times + # to different slots + for successor_slot in curr_node.successor_slots: + successor = successor_slot[0] + node_to_fanin[successor] -= 1 + if node_to_fanin[successor] == 0: + curr_nodes.append(successor) + + for key in node_to_fanin: + assert node_to_fanin[key] == 0, '{}, fanin: {}, predecessor: {}, edges: {}, fanin: {}, keys: {}'.format( + key, + node_to_fanin[key], + key.predecessors[0], + self.edges, + node_to_fanin.values(), + node_to_fanin.keys()) + + return sorted_nodes + + def fork(self) -> 'Graph': + """ + Fork the model and returns corresponding graph in new model. + This shortcut might be helpful because many algorithms only cares about "stem" subgraph instead of whole model. + """ + return self.model.fork().graphs[self.name] + + def __eq__(self, other: object) -> bool: + return self is other + + def _fork_to(self, model: Model, name_prefix='') -> 'Graph': + new_graph = Graph(model, self.id, name_prefix + self.name, _internal=True)._register() + # TODO: use node copy instead + new_graph.input_node.operation.io_names = self.input_node.operation.io_names + new_graph.output_node.operation.io_names = self.output_node.operation.io_names + new_graph.input_node.update_label(self.input_node.label) + new_graph.output_node.update_label(self.output_node.label) + new_graph.python_name = self.python_name + + for node in self.hidden_nodes: + new_node = Node(new_graph, node.id, node.name, node.operation, _internal=True) + new_node.python_name = node.python_name + new_node.update_label(node.label) + new_node._register() + + id_to_new_node = {node.id: node for node in new_graph.nodes} + + for edge in self.edges: + new_head = id_to_new_node[edge.head.id] + new_tail = id_to_new_node[edge.tail.id] + Edge((new_head, edge.head_slot), (new_tail, edge.tail_slot), _internal=True)._register() + + return new_graph + + def _copy(self) -> 'Graph': + # Copy this graph inside the model. + # The new graph will have identical topology, but its nodes' name and ID will be different. + new_graph = Graph(self.model, uid(), _internal=True)._register() + new_graph.input_node.operation.io_names = self.input_node.operation.io_names + new_graph.output_node.operation.io_names = self.output_node.operation.io_names + new_graph.input_node.update_label(self.input_node.label) + new_graph.output_node.update_label(self.output_node.label) + new_graph.python_name = self.python_name + + id_to_new_node = {} # old node ID -> new node object + + for old_node in self.hidden_nodes: + new_node = Node(new_graph, uid(), None, old_node.operation, _internal=True)._register() + new_node.python_name = old_node.python_name + new_node.update_label(old_node.label) + id_to_new_node[old_node.id] = new_node + + for edge in self.edges: + new_head = id_to_new_node[edge.head.id] + new_tail = id_to_new_node[edge.tail.id] + Edge((new_head, edge.head_slot), (new_tail, edge.tail_slot), _internal=True)._register() + + return new_graph + + def _register(self) -> 'Graph': + self.model.graphs[self.name] = self + return self + + def _rename_graph(self, old_name, new_name): + self.model.graphs[old_name].name = new_name + self.model.graphs[new_name] = self.model.graphs[old_name] + del self.model.graphs[old_name] + + @staticmethod + def _load(model: Model, name: str, ir: Any) -> 'Graph': + graph = Graph(model, uid(), name, _internal=True) + graph.input_node.operation.io_names = ir.get('inputs') + graph.output_node.operation.io_names = ir.get('outputs') + for node_name, node_data in ir['nodes'].items(): + Node._load(graph, node_name, node_data)._register() + for edge_data in ir['edges']: + Edge._load(graph, edge_data)._register() + return graph + + def _dump(self) -> Any: + return { + 'inputs': self.input_node.operation.io_names, + 'outputs': self.output_node.operation.io_names, + 'nodes': {node.name: node._dump() for node in self.hidden_nodes}, + 'edges': [edge._dump() for edge in self.edges] + } + + +class Node: + """ + An operation or an opaque subgraph inside a graph. + + Each node belongs to and only belongs to one `Graph`. + Nodes should never be created with constructor. Use `Graph.add_node()` instead. + + The node itself is for topology only. + Information of tensor calculation should all go inside `operation` attribute. + + TODO: parameter of subgraph (cell) + It's easy to assign parameters on cell node, but it's hard to "use" them. + We need to design a way to reference stored cell parameters in inner node operations. + e.g. `self.fc = Linear(self.units)` <- how to express `self.units` in IR? + + Attributes + ---------- + graph + The graph containing this node. + id + Unique ID in the model. + If two models have nodes with same ID, they are semantically the same node. + name + Mnemonic name. It should have an one-to-one mapping with ID. + python_name + The name of torch.nn.Module, should have one-to-one mapping with items in python model. + label + Optional. If two nodes have the same label, they are considered same by the mutator. + operation + ... + cell + Read only shortcut to get the referenced subgraph. + If this node is not a subgraph (is a primitive operation), accessing `cell` will raise an error. + predecessors + Predecessor nodes of this node in the graph. This is an optional mutation helper. + successors + Successor nodes of this node in the graph. This is an optional mutation helper. + incoming_edges + Incoming edges of this node in the graph. This is an optional mutation helper. + outgoing_edges + Outgoing edges of this node in the graph. This is an optional mutation helper. + """ + + def __init__(self, graph, node_id, name, operation, _internal=False): + self.graph: Graph = graph + self.id: int = node_id + self.name: str = name or f'_generated_{node_id}' + # `python_name` is `None` by default. It should be set after initialization if it is needed. + self.python_name: Optional[str] = None + # TODO: the operation is likely to be considered editable by end-user and it will be hard to debug + # maybe we should copy it here or make Operation class immutable, in next release + self.operation: Operation = operation + self.label: Optional[str] = None + + def __repr__(self): + return f'Node(id={self.id}, name={self.name}, python_name={self.python_name}, label={self.label}, operation={self.operation})' + + @property + def predecessors(self) -> List['Node']: + return sorted(set(edge.head for edge in self.incoming_edges), key=(lambda node: node.id)) + + @property + def successors(self) -> List['Node']: + return sorted(set(edge.tail for edge in self.outgoing_edges), key=(lambda node: node.id)) + + @property + def successor_slots(self) -> List[Tuple['Node', Union[int, None]]]: + return set((edge.tail, edge.tail_slot) for edge in self.outgoing_edges) + + @property + def incoming_edges(self) -> List['Edge']: + return [edge for edge in self.graph.edges if edge.tail is self] + + @property + def outgoing_edges(self) -> List['Edge']: + return [edge for edge in self.graph.edges if edge.head is self] + + @property + def cell(self) -> Graph: + assert isinstance(self.operation, Cell) + return self.graph.model.graphs[self.operation.parameters['cell']] + + def update_label(self, label: str) -> None: + self.label = label + + @overload + def update_operation(self, operation: Operation) -> None: ... + @overload + def update_operation(self, type_name: str, parameters: Dict[str, Any] = None) -> None: ... + + def update_operation(self, operation_or_type, parameters=None): + if isinstance(operation_or_type, Operation): + self.operation = operation_or_type + else: + self.operation = Operation.new(operation_or_type, parameters) + + # mutation + def remove(self) -> None: + assert not self.incoming_edges and not self.outgoing_edges + self.graph.hidden_nodes.remove(self) + + # mutation + def specialize_cell(self) -> Graph: + """ + Only available if the operation is a cell. + Duplicate the cell template and let this node reference to newly created copy. + """ + new_cell = self.cell._copy()._register() + self.operation = Cell(new_cell.name) + return new_cell + + def __eq__(self, other: object) -> bool: + return self is other + + def __hash__(self) -> int: + return hash(id(self)) + + def _register(self) -> 'Node': + self.graph.hidden_nodes.append(self) + return self + + @staticmethod + def _load(graph: Graph, name: str, ir: Any) -> 'Node': + if ir['operation']['type'] == '_cell': + op = Cell(ir['operation']['cell_name'], ir['operation'].get('parameters', {}), attributes=ir['operation'].get('attributes', {})) + else: + op = Operation.new(ir['operation']['type'], + ir['operation'].get('parameters', {}), + attributes=ir['operation'].get('attributes', {})) + node = Node(graph, uid(), name, op) + if 'label' in ir: + node.update_label(ir['label']) + return node + + def _dump(self) -> Any: + ret = {'operation': {'type': self.operation.type, 'parameters': self.operation.parameters, 'attributes': self.operation.attributes}} + if isinstance(self.operation, Cell): + ret['operation']['cell_name'] = self.operation.cell_name + if self.label is not None: + ret['label'] = self.label + if self.python_name is not None: + ret['python_name'] = self.python_name + return ret + + +class Edge: + """ + A tensor, or "data flow", between two nodes. + + Example forward code snippet: + ``` + a, b, c = split(x) + p = concat(a, c) + q = sum(b, p) + z = relu(q) + ``` + + Edges in above snippet: + + head: (split, 0), tail: (concat, 0) # a in concat + + head: (split, 2), tail: (concat, 1) # c in concat + + head: (split, 1), tail: (sum, -1 or 0) # b in sum + + head: (concat, null), tail: (sum, -1 or 1) # p in sum + + head: (sum, null), tail: (relu, null) # q in relu + + Attributes + ---------- + graph + ... + head + Head node. + tail + Tail node. + head_slot + Index of outputs in head node. + If the node has only one output, this should be `null`. + tail_slot + Index of inputs in tail node. + If the node has only one input, this should be `null`. + If the node does not care about order, this can be `-1`. + """ + + def __init__(self, head: EdgeEndpoint, tail: EdgeEndpoint, _internal: bool = False): + assert _internal, '`Edge()` is private' + self.graph: Graph = head[0].graph + self.head: Node = head[0] + self.tail: Node = tail[0] + self.head_slot: Optional[int] = head[1] + self.tail_slot: Optional[int] = tail[1] + + def __repr__(self): + return f'Edge(head=({self.head}, {self.head_slot}), tail=({self.tail}, {self.tail_slot}))' + + # mutation + def remove(self) -> None: + self.graph.edges.remove(self) + + def _register(self) -> 'Edge': + self.graph.edges.append(self) + return self + + @staticmethod + def _load(graph: Graph, ir: Any) -> 'Edge': + head = graph.get_node_by_name(ir['head'][0]) + tail = graph.get_node_by_name(ir['tail'][0]) + assert head is not None and tail is not None + return Edge((head, ir['head'][1]), (tail, ir['tail'][1]), _internal=True) + + def _dump(self) -> Any: + return { + 'head': [self.head.name, self.head_slot], + 'tail': [self.tail.name, self.tail_slot] + } + + +class Mutation: + """ + An execution of mutation, which consists of four parts: a mutator, a list of decisions (choices), + the model that it comes from, and the model that it becomes. + + In general cases, the mutation logs are not reliable and should not be replayed as the mutators can + be arbitrarily complex. However, for inline mutations, the labels correspond to mutator labels here, + this can be useful for metadata visualization and python execution mode. + + Attributes + ---------- + mutator + Mutator. + samples + Decisions/choices. + from_ + Model that is comes from. + to + Model that it becomes. + """ + + def __init__(self, mutator: 'Mutator', samples: List[Any], from_: Model, to: Model): # noqa: F821 + self.mutator: 'Mutator' = mutator # noqa: F821 + self.samples: List[Any] = samples + self.from_: Model = from_ + self.to: Model = to + + def __repr__(self): + return f'Edge(mutator={self.mutator}, samples={self.samples}, from={self.from_}, to={self.to})' + + +class IllegalGraphError(ValueError): + def __init__(self, graph, *args): + self._debug_dump_graph(graph) + super().__init__(*args) + + @staticmethod + def _debug_dump_graph(graph): + if isinstance(graph, Graph): + graph = graph._dump() + with open('generated/debug.json', 'w') as dump_file: + json.dump(graph, dump_file, indent=4) + + +class DebugEvaluator(Evaluator): + @staticmethod + def _load(ir: Any) -> 'DebugEvaluator': + return DebugEvaluator() + + def _dump(self) -> Any: + return {'type': DebugEvaluator} + + def _execute(self, model_cls: type) -> Any: + pass + + def __eq__(self, other) -> bool: + return True diff --git a/nni/retiarii/integration.py b/nni/retiarii/integration.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2e3d0a1632cfd4b7593d001ce8fc17710474bc --- /dev/null +++ b/nni/retiarii/integration.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +from typing import Any, Callable + +import nni +from nni.common.serializer import PayloadTooLarge +from nni.runtime.msg_dispatcher_base import MsgDispatcherBase +from nni.runtime.protocol import CommandType, send +from nni.utils import MetricType + +from .graph import MetricData +from .integration_api import register_advisor + +_logger = logging.getLogger(__name__) + + +class RetiariiAdvisor(MsgDispatcherBase): + """ + The class is to connect Retiarii components to NNI backend. + + It will function as the main thread when running a Retiarii experiment through NNI. + Strategy will be launched as its thread, who will call APIs in execution engine. Execution + engine will then find the advisor singleton and send payloads to advisor. + + When metrics are sent back, advisor will first receive the payloads, who will call the callback + function (that is a member function in graph listener). + + The conversion advisor provides are minimum. It is only a send/receive module, and execution engine + needs to handle all the rest. + + FIXME + How does advisor exit when strategy exists? + + Attributes + ---------- + send_trial_callback + + request_trial_jobs_callback + + trial_end_callback + + intermediate_metric_callback + + final_metric_callback + """ + + def __init__(self): + super(RetiariiAdvisor, self).__init__() + register_advisor(self) # register the current advisor as the "global only" advisor + self.search_space = None + + self.send_trial_callback: Callable[[dict], None] = None + self.request_trial_jobs_callback: Callable[[int], None] = None + self.trial_end_callback: Callable[[int, bool], None] = None + self.intermediate_metric_callback: Callable[[int, MetricData], None] = None + self.final_metric_callback: Callable[[int, MetricData], None] = None + + self.parameters_count = 0 + + def handle_initialize(self, data): + """callback for initializing the advisor + Parameters + ---------- + data: dict + search space + """ + self.handle_update_search_space(data) + send(CommandType.Initialized, '') + + def _validate_placement_constraint(self, placement_constraint): + if placement_constraint is None: + raise ValueError('placement_constraint is None') + if not 'type' in placement_constraint: + raise ValueError('placement_constraint must have `type`') + if not 'gpus' in placement_constraint: + raise ValueError('placement_constraint must have `gpus`') + if placement_constraint['type'] not in ['None', 'GPUNumber', 'Device']: + raise ValueError('placement_constraint.type must be either `None`,. `GPUNumber` or `Device`') + if placement_constraint['type'] == 'None' and len(placement_constraint['gpus']) > 0: + raise ValueError('placement_constraint.gpus must be an empty list when type == None') + if placement_constraint['type'] == 'GPUNumber': + if len(placement_constraint['gpus']) != 1: + raise ValueError('placement_constraint.gpus currently only support one host when type == GPUNumber') + for e in placement_constraint['gpus']: + if not isinstance(e, int): + raise ValueError('placement_constraint.gpus must be a list of number when type == GPUNumber') + if placement_constraint['type'] == 'Device': + for e in placement_constraint['gpus']: + if not isinstance(e, tuple): + raise ValueError('placement_constraint.gpus must be a list of tuple when type == Device') + if not (len(e) == 2 and isinstance(e[0], str) and isinstance(e[1], int)): + raise ValueError('placement_constraint.gpus`s tuple must be (str, int)') + + def send_trial(self, parameters, placement_constraint=None): + """ + Send parameters to NNI. + + Parameters + ---------- + parameters : Any + Any payload. + + Returns + ------- + int + Parameter ID that is assigned to this parameter, + which will be used for identification in future. + """ + self.parameters_count += 1 + if placement_constraint is None: + placement_constraint = { + 'type': 'None', + 'gpus': [] + } + self._validate_placement_constraint(placement_constraint) + new_trial = { + 'parameter_id': self.parameters_count, + 'parameters': parameters, + 'parameter_source': 'algorithm', + 'placement_constraint': placement_constraint + } + _logger.debug('New trial sent: %s', new_trial) + + try: + send_payload = nni.dump(new_trial, pickle_size_limit=int(os.getenv('PICKLE_SIZE_LIMIT', 64 * 1024))) + except PayloadTooLarge: + raise ValueError( + 'Serialization failed when trying to dump the model because payload too large (larger than 64 KB). ' + 'This is usually caused by pickling large objects (like datasets) by mistake. ' + 'See the full error traceback for details and https://nni.readthedocs.io/en/stable/NAS/Serialization.html ' + 'for how to resolve such issue. ' + ) + + # trial parameters can be super large, disable pickle size limit here + # nevertheless, there could still be blocked by pipe / nni-manager + send(CommandType.NewTrialJob, send_payload) + + if self.send_trial_callback is not None: + self.send_trial_callback(parameters) # pylint: disable=not-callable + return self.parameters_count + + def mark_experiment_as_ending(self): + send(CommandType.NoMoreTrialJobs, '') + + def handle_request_trial_jobs(self, num_trials): + _logger.debug('Request trial jobs: %s', num_trials) + if self.request_trial_jobs_callback is not None: + self.request_trial_jobs_callback(num_trials) # pylint: disable=not-callable + + def handle_update_search_space(self, data): + _logger.debug('Received search space: %s', data) + self.search_space = data + + def handle_trial_end(self, data): + _logger.debug('Trial end: %s', data) + self.trial_end_callback(nni.load(data['hyper_params'])['parameter_id'], # pylint: disable=not-callable + data['event'] == 'SUCCEEDED') + + def handle_report_metric_data(self, data): + _logger.debug('Metric reported: %s', data) + if data['type'] == MetricType.REQUEST_PARAMETER: + raise ValueError('Request parameter not supported') + elif data['type'] == MetricType.PERIODICAL: + self.intermediate_metric_callback(data['parameter_id'], # pylint: disable=not-callable + self._process_value(data['value'])) + elif data['type'] == MetricType.FINAL: + self.final_metric_callback(data['parameter_id'], # pylint: disable=not-callable + self._process_value(data['value'])) + + @staticmethod + def _process_value(value) -> Any: # hopefully a float + value = nni.load(value) + if isinstance(value, dict): + if 'default' in value: + return value['default'] + else: + return value + return value diff --git a/nni/retiarii/integration_api.py b/nni/retiarii/integration_api.py new file mode 100644 index 0000000000000000000000000000000000000000..cceff57cabec4319212dbfc1acdc1980b1a347f8 --- /dev/null +++ b/nni/retiarii/integration_api.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import NewType, Any + +import nni + +# NOTE: this is only for passing flake8, we cannot import RetiariiAdvisor +# because it would induce cycled import +RetiariiAdvisor = NewType('RetiariiAdvisor', Any) + +_advisor: 'RetiariiAdvisor' = None + + +def get_advisor() -> 'RetiariiAdvisor': + global _advisor + assert _advisor is not None + return _advisor + + +def register_advisor(advisor: 'RetiariiAdvisor'): + global _advisor + assert _advisor is None + _advisor = advisor + + +def send_trial(parameters: dict, placement_constraint=None) -> int: + """ + Send a new trial. Executed on tuner end. + Return a ID that is the unique identifier for this trial. + """ + return get_advisor().send_trial(parameters, placement_constraint) + +def receive_trial_parameters() -> dict: + """ + Received a new trial. Executed on trial end. + Reload with our json loads because NNI didn't use Retiarii serializer to load the data. + """ + params = nni.get_next_parameter() + return params + + +def get_experiment_id() -> str: + return nni.get_experiment_id() diff --git a/nni/retiarii/mutator.py b/nni/retiarii/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..abf69703fd228fb2a8e3b322bf7af33e6a8c0e38 --- /dev/null +++ b/nni/retiarii/mutator.py @@ -0,0 +1,121 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import (Any, Iterable, List, Optional, Tuple) + +from .graph import Model, Mutation, ModelStatus + + +__all__ = ['Sampler', 'Mutator', 'InvalidMutation'] + + +Choice = Any + + +class Sampler: + """ + Handles `Mutator.choice()` calls. + """ + + def choice(self, candidates: List[Choice], mutator: 'Mutator', model: Model, index: int) -> Choice: + raise NotImplementedError() + + def mutation_start(self, mutator: 'Mutator', model: Model) -> None: + pass + + def mutation_end(self, mutator: 'Mutator', model: Model) -> None: + pass + + +class Mutator: + """ + Mutates graphs in model to generate new model. + `Mutator` class will be used in two places: + + 1. Inherit `Mutator` to implement graph mutation logic. + 2. Use `Mutator` subclass to implement NAS strategy. + + In scenario 1, the subclass should implement `Mutator.mutate()` interface with `Mutator.choice()`. + In scenario 2, strategy should use constructor or `Mutator.bind_sampler()` to initialize subclass, + and then use `Mutator.apply()` to mutate model. + For certain mutator subclasses, strategy or sampler can use `Mutator.dry_run()` to predict choice candidates. + # Method names are open for discussion. + + If mutator has a label, in most cases, it means that this mutator is applied to nodes with this label. + """ + + def __init__(self, sampler: Optional[Sampler] = None, label: Optional[str] = None): + self.sampler: Optional[Sampler] = sampler + self.label: Optional[str] = label + self._cur_model: Optional[Model] = None + self._cur_choice_idx: Optional[int] = None + + def bind_sampler(self, sampler: Sampler) -> 'Mutator': + """ + Set the sampler which will handle `Mutator.choice` calls. + """ + self.sampler = sampler + return self + + def apply(self, model: Model) -> Model: + """ + Apply this mutator on a model. + Returns mutated model. + The model will be copied before mutation and the original model will not be modified. + """ + assert self.sampler is not None + copy = model.fork() + self._cur_model = copy + self._cur_choice_idx = 0 + self._cur_samples = [] + self.sampler.mutation_start(self, copy) + self.mutate(copy) + self.sampler.mutation_end(self, copy) + copy.history.append(Mutation(self, self._cur_samples, model, copy)) + copy.status = ModelStatus.Frozen + self._cur_model = None + self._cur_choice_idx = None + return copy + + def dry_run(self, model: Model) -> Tuple[List[List[Choice]], Model]: + """ + Dry run mutator on a model to collect choice candidates. + If you invoke this method multiple times on same or different models, + it may or may not return identical results, depending on how the subclass implements `Mutator.mutate()`. + """ + sampler_backup = self.sampler + recorder = _RecorderSampler() + self.sampler = recorder + new_model = self.apply(model) + self.sampler = sampler_backup + return recorder.recorded_candidates, new_model + + def mutate(self, model: Model) -> None: + """ + Abstract method to be implemented by subclass. + Mutate a model in place. + """ + raise NotImplementedError() + + def choice(self, candidates: Iterable[Choice]) -> Choice: + """ + Ask sampler to make a choice. + """ + assert self.sampler is not None and self._cur_model is not None and self._cur_choice_idx is not None + ret = self.sampler.choice(list(candidates), self, self._cur_model, self._cur_choice_idx) + self._cur_samples.append(ret) + self._cur_choice_idx += 1 + return ret + + +class _RecorderSampler(Sampler): + def __init__(self): + self.recorded_candidates: List[List[Choice]] = [] + + def choice(self, candidates: List[Choice], *args) -> Choice: + self.recorded_candidates.append(candidates) + return candidates[0] + + +class InvalidMutation(Exception): + pass diff --git a/nni/retiarii/nn/__init__.py b/nni/retiarii/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/retiarii/nn/pytorch/__init__.py b/nni/retiarii/nn/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bcc8c45f3f3ac13ac94ea474e058c348a5e3d6ef --- /dev/null +++ b/nni/retiarii/nn/pytorch/__init__.py @@ -0,0 +1,4 @@ +from .api import * +from .component import * +from .nn import * +from .hypermodule import * \ No newline at end of file diff --git a/nni/retiarii/nn/pytorch/api.py b/nni/retiarii/nn/pytorch/api.py new file mode 100644 index 0000000000000000000000000000000000000000..4249bc98c3646ccedaa23a0ef694692e3b7ca3e9 --- /dev/null +++ b/nni/retiarii/nn/pytorch/api.py @@ -0,0 +1,400 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import warnings +from typing import Any, List, Union, Dict, Optional + +import torch +import torch.nn as nn + +from nni.common.serializer import Translatable +from nni.retiarii.serializer import basic_unit +from .utils import Mutable, generate_new_label, get_fixed_value + + +__all__ = ['LayerChoice', 'InputChoice', 'ValueChoice', 'Placeholder', 'ChosenInputs'] + + +class LayerChoice(Mutable): + """ + Layer choice selects one of the ``candidates``, then apply it on inputs and return results. + + Layer choice does not allow itself to be nested. + + Parameters + ---------- + candidates : list of nn.Module or OrderedDict + A module list to be selected from. + prior : list of float + Prior distribution used in random sampling. + label : str + Identifier of the layer choice. + + Attributes + ---------- + length : int + Deprecated. Number of ops to choose from. ``len(layer_choice)`` is recommended. + names : list of str + Names of candidates. + choices : list of Module + Deprecated. A list of all candidate modules in the layer choice module. + ``list(layer_choice)`` is recommended, which will serve the same purpose. + + Notes + ----- + ``candidates`` can be a list of modules or a ordered dict of named modules, for example, + + .. code-block:: python + + self.op_choice = LayerChoice(OrderedDict([ + ("conv3x3", nn.Conv2d(3, 16, 128)), + ("conv5x5", nn.Conv2d(5, 16, 128)), + ("conv7x7", nn.Conv2d(7, 16, 128)) + ])) + + Elements in layer choice can be modified or deleted. Use ``del self.op_choice["conv5x5"]`` or + ``self.op_choice[1] = nn.Conv3d(...)``. Adding more choices is not supported yet. + """ + + # FIXME: prior is designed but not supported yet + + @classmethod + def create_fixed_module(cls, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *, + label: Optional[str] = None, **kwargs): + chosen = get_fixed_value(label) + if isinstance(candidates, list): + return candidates[int(chosen)] + else: + return candidates[chosen] + + def __init__(self, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *, + prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs): + super(LayerChoice, self).__init__() + if 'key' in kwargs: + warnings.warn(f'"key" is deprecated. Assuming label.') + label = kwargs['key'] + if 'return_mask' in kwargs: + warnings.warn(f'"return_mask" is deprecated. Ignoring...') + if 'reduction' in kwargs: + warnings.warn(f'"reduction" is deprecated. Ignoring...') + self.candidates = candidates + self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))] + assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.' + self._label = generate_new_label(label) + + self.names = [] + if isinstance(candidates, dict): + for name, module in candidates.items(): + assert name not in ["length", "reduction", "return_mask", "_key", "key", "names"], \ + "Please don't use a reserved name '{}' for your module.".format(name) + self.add_module(name, module) + self.names.append(name) + elif isinstance(candidates, list): + for i, module in enumerate(candidates): + self.add_module(str(i), module) + self.names.append(str(i)) + else: + raise TypeError("Unsupported candidates type: {}".format(type(candidates))) + self._first_module = self._modules[self.names[0]] # to make the dummy forward meaningful + + @property + def key(self): + return self._key() + + @torch.jit.ignore + def _key(self): + warnings.warn('Using key to access the identifier of LayerChoice is deprecated. Please use label instead.', + category=DeprecationWarning) + return self._label + + @property + def label(self): + return self._label + + def __getitem__(self, idx): + if isinstance(idx, str): + return self._modules[idx] + return list(self)[idx] + + def __setitem__(self, idx, module): + key = idx if isinstance(idx, str) else self.names[idx] + return setattr(self, key, module) + + def __delitem__(self, idx): + if isinstance(idx, slice): + for key in self.names[idx]: + delattr(self, key) + else: + if isinstance(idx, str): + key, idx = idx, self.names.index(idx) + else: + key = self.names[idx] + delattr(self, key) + del self.names[idx] + + def __len__(self): + return len(self.names) + + def __iter__(self): + return map(lambda name: self._modules[name], self.names) + + @property + def choices(self): + return self._choices() + + @torch.jit.ignore + def _choices(self): + warnings.warn("layer_choice.choices is deprecated. Use `list(layer_choice)` instead.", category=DeprecationWarning) + return list(self) + + def forward(self, x): + warnings.warn('You should not run forward of this module directly.') + return self._first_module(x) + + def __repr__(self): + return f'LayerChoice({self.candidates}, label={repr(self.label)})' + + +class InputChoice(Mutable): + """ + Input choice selects ``n_chosen`` inputs from ``choose_from`` (contains ``n_candidates`` keys). + Use ``reduction`` to specify how chosen inputs are reduced into one output. A few options are: + + * ``none``: do nothing and return the list directly. + * ``sum``: summing all the chosen inputs. + * ``mean``: taking the average of all chosen inputs. + * ``concat``: concatenate all chosen inputs at dimension 1. + + We don't support customizing reduction yet. + + Parameters + ---------- + n_candidates : int + Number of inputs to choose from. It is required. + n_chosen : int + Recommended inputs to choose. If None, mutator is instructed to select any. + reduction : str + ``mean``, ``concat``, ``sum`` or ``none``. + prior : list of float + Prior distribution used in random sampling. + label : str + Identifier of the input choice. + """ + + @classmethod + def create_fixed_module(cls, n_candidates: int, n_chosen: Optional[int] = 1, reduction: str = 'sum', *, + prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs): + return ChosenInputs(get_fixed_value(label), reduction=reduction) + + def __init__(self, n_candidates: int, n_chosen: Optional[int] = 1, + reduction: str = 'sum', *, + prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs): + super(InputChoice, self).__init__() + if 'key' in kwargs: + warnings.warn(f'"key" is deprecated. Assuming label.') + label = kwargs['key'] + if 'return_mask' in kwargs: + warnings.warn(f'"return_mask" is deprecated. Ignoring...') + if 'choose_from' in kwargs: + warnings.warn(f'"reduction" is deprecated. Ignoring...') + self.n_candidates = n_candidates + self.n_chosen = n_chosen + self.reduction = reduction + self.prior = prior or [1 / n_candidates for _ in range(n_candidates)] + assert self.reduction in ['mean', 'concat', 'sum', 'none'] + self._label = generate_new_label(label) + + @property + def key(self): + return self._key() + + @torch.jit.ignore + def _key(self): + warnings.warn('Using key to access the identifier of InputChoice is deprecated. Please use label instead.', + category=DeprecationWarning) + return self._label + + @property + def label(self): + return self._label + + def forward(self, candidate_inputs: List[torch.Tensor]) -> torch.Tensor: + warnings.warn('You should not run forward of this module directly.') + return candidate_inputs[0] + + def __repr__(self): + return f'InputChoice(n_candidates={self.n_candidates}, n_chosen={self.n_chosen}, ' \ + f'reduction={repr(self.reduction)}, label={repr(self.label)})' + + +class ValueChoice(Translatable, Mutable): + """ + ValueChoice is to choose one from ``candidates``. + + In most use scenarios, ValueChoice should be passed to the init parameters of a serializable module. For example, + + .. code-block:: python + + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, nn.ValueChoice([32, 64]), kernel_size=nn.ValueChoice([3, 5, 7])) + + def forward(self, x): + return self.conv(x) + + In case, you want to search a parameter that is used repeatedly, this is also possible by sharing the same value choice instance. + (Sharing the label should have the same effect.) For example, + + .. code-block:: python + + class Net(nn.Module): + def __init__(self): + super().__init__() + hidden_dim = nn.ValueChoice([128, 512]) + self.fc = nn.Sequential( + nn.Linear(64, hidden_dim), + nn.Linear(hidden_dim, 10) + ) + + # the following code has the same effect. + # self.fc = nn.Sequential( + # nn.Linear(64, nn.ValueChoice([128, 512], label='dim')), + # nn.Linear(nn.ValueChoice([128, 512], label='dim'), 10) + # ) + + def forward(self, x): + return self.fc(x) + + Note that ValueChoice should be used directly. Transformations like ``nn.Linear(32, nn.ValueChoice([64, 128]) * 2)`` + are not supported. + + Another common use case is to initialize the values to choose from in init and call the module in forward to get the chosen value. + Usually, this is used to pass a mutable value to a functional API like ``torch.xxx`` or ``nn.functional.xxx```. + For example, + + .. code-block:: python + + class Net(nn.Module): + def __init__(self): + super().__init__() + self.dropout_rate = nn.ValueChoice([0., 1.]) + + def forward(self, x): + return F.dropout(x, self.dropout_rate()) + + Parameters + ---------- + candidates : list + List of values to choose from. + prior : list of float + Prior distribution to sample from. + label : str + Identifier of the value choice. + """ + + # FIXME: prior is designed but not supported yet + + @classmethod + def create_fixed_module(cls, candidates: List[Any], *, label: Optional[str] = None, **kwargs): + return get_fixed_value(label) + + def __init__(self, candidates: List[Any], *, prior: Optional[List[float]] = None, label: Optional[str] = None): + super().__init__() + self.candidates = candidates + self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))] + assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.' + self._label = generate_new_label(label) + self._accessor = [] + + @property + def label(self): + return self._label + + def forward(self): + warnings.warn('You should not run forward of this module directly.') + return self.candidates[0] + + def _translate(self): + # Will function as a value when used in serializer. + return self.access(self.candidates[0]) + + def __repr__(self): + return f'ValueChoice({self.candidates}, label={repr(self.label)})' + + def access(self, value): + if not self._accessor: + return value + try: + v = value + for a in self._accessor: + v = v[a] + except KeyError: + raise KeyError(''.join([f'[{a}]' for a in self._accessor]) + f' does not work on {value}') + return v + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + new_item = ValueChoice(self.candidates, label=self.label) + new_item._accessor = [*self._accessor] + return new_item + + def __getitem__(self, item): + """ + Get a sub-element of value choice. + + The underlying implementation is to clone the current instance, and append item to "accessor", which records all + the history getitem calls. For example, when accessor is ``[a, b, c]``, the value choice will return ``vc[a][b][c]`` + where ``vc`` is the original value choice. + """ + access = copy.deepcopy(self) + access._accessor.append(item) + for candidate in self.candidates: + access.access(candidate) + return access + + +@basic_unit +class Placeholder(nn.Module): + # TODO: docstring + + def __init__(self, label, **related_info): + self.label = label + self.related_info = related_info + super().__init__() + + def forward(self, x): + return x + + +class ChosenInputs(nn.Module): + """ + A module that chooses from a tensor list and outputs a reduced tensor. + The already-chosen version of InputChoice. + """ + + def __init__(self, chosen: Union[List[int], int], reduction: str): + super().__init__() + self.chosen = chosen if isinstance(chosen, list) else [chosen] + self.reduction = reduction + + def forward(self, candidate_inputs): + return self._tensor_reduction(self.reduction, [candidate_inputs[i] for i in self.chosen]) + + def _tensor_reduction(self, reduction_type, tensor_list): + if reduction_type == 'none': + return tensor_list + if not tensor_list: + return None # empty. return None for now + if len(tensor_list) == 1: + return tensor_list[0] + if reduction_type == 'sum': + return sum(tensor_list) + if reduction_type == 'mean': + return sum(tensor_list) / len(tensor_list) + if reduction_type == 'concat': + return torch.cat(tensor_list, dim=1) + raise ValueError(f'Unrecognized reduction policy: "{reduction_type}"') diff --git a/nni/retiarii/nn/pytorch/component.py b/nni/retiarii/nn/pytorch/component.py new file mode 100644 index 0000000000000000000000000000000000000000..0092bc1ce3c06bbd23d56c45086d6dc9309a17ea --- /dev/null +++ b/nni/retiarii/nn/pytorch/component.py @@ -0,0 +1,229 @@ +import copy +from collections import OrderedDict +from typing import Callable, List, Union, Tuple, Optional + +import torch +import torch.nn as nn + +from .api import LayerChoice, InputChoice +from .nn import ModuleList + +from .nasbench101 import NasBench101Cell, NasBench101Mutator +from .utils import Mutable, generate_new_label, get_fixed_value + + +__all__ = ['Repeat', 'Cell', 'NasBench101Cell', 'NasBench101Mutator', 'NasBench201Cell'] + + +class Repeat(Mutable): + """ + Repeat a block by a variable number of times. + + Parameters + ---------- + blocks : function, list of function, module or list of module + The block to be repeated. If not a list, it will be replicated into a list. + If a list, it should be of length ``max_depth``, the modules will be instantiated in order and a prefix will be taken. + If a function, it will be called (the argument is the index) to instantiate a module. + Otherwise the module will be deep-copied. + depth : int or tuple of int + If one number, the block will be repeated by a fixed number of times. If a tuple, it should be (min, max), + meaning that the block will be repeated at least `min` times and at most `max` times. + """ + + @classmethod + def create_fixed_module(cls, + blocks: Union[Callable[[int], nn.Module], + List[Callable[[int], nn.Module]], + nn.Module, + List[nn.Module]], + depth: Union[int, Tuple[int, int]], *, label: Optional[str] = None): + repeat = get_fixed_value(label) + return nn.Sequential(*cls._replicate_and_instantiate(blocks, repeat)) + + def __init__(self, + blocks: Union[Callable[[int], nn.Module], + List[Callable[[int], nn.Module]], + nn.Module, + List[nn.Module]], + depth: Union[int, Tuple[int, int]], *, label: Optional[str] = None): + super().__init__() + self._label = generate_new_label(label) + self.min_depth = depth if isinstance(depth, int) else depth[0] + self.max_depth = depth if isinstance(depth, int) else depth[1] + assert self.max_depth >= self.min_depth > 0 + self.blocks = nn.ModuleList(self._replicate_and_instantiate(blocks, self.max_depth)) + + @property + def label(self): + return self._label + + def forward(self, x): + for block in self.blocks: + x = block(x) + return x + + @staticmethod + def _replicate_and_instantiate(blocks, repeat): + if not isinstance(blocks, list): + if isinstance(blocks, nn.Module): + blocks = [blocks] + [copy.deepcopy(blocks) for _ in range(repeat - 1)] + else: + blocks = [blocks for _ in range(repeat)] + assert len(blocks) > 0 + assert repeat <= len(blocks), f'Not enough blocks to be used. {repeat} expected, only found {len(blocks)}.' + blocks = blocks[:repeat] + if not isinstance(blocks[0], nn.Module): + blocks = [b(i) for i, b in enumerate(blocks)] + return blocks + + +class Cell(nn.Module): + """ + Cell structure [zophnas]_ [zophnasnet]_ that is popularly used in NAS literature. + + A cell consists of multiple "nodes". Each node is a sum of multiple operators. Each operator is chosen from + ``op_candidates``, and takes one input from previous nodes and predecessors. Predecessor means the input of cell. + The output of cell is the concatenation of some of the nodes in the cell (currently all the nodes). + + Parameters + ---------- + op_candidates : function or list of module + A list of modules to choose from, or a function that returns a list of modules. + num_nodes : int + Number of nodes in the cell. + num_ops_per_node: int + Number of operators in each node. The output of each node is the sum of all operators in the node. Default: 1. + num_predecessors : int + Number of inputs of the cell. The input to forward should be a list of tensors. Default: 1. + merge_op : str + Currently only ``all`` is supported, which has slight difference with that described in reference. Default: all. + label : str + Identifier of the cell. Cell sharing the same label will semantically share the same choice. + + References + ---------- + .. [zophnas] Barret Zoph, Quoc V. Le, "Neural Architecture Search with Reinforcement Learning". https://arxiv.org/abs/1611.01578 + .. [zophnasnet] Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le, + "Learning Transferable Architectures for Scalable Image Recognition". https://arxiv.org/abs/1707.07012 + """ + + # TODO: + # Support loose end concat (shape inference on the following cells) + # How to dynamically create convolution with stride as the first node + + def __init__(self, + op_candidates: Union[Callable, List[nn.Module]], + num_nodes: int, + num_ops_per_node: int = 1, + num_predecessors: int = 1, + merge_op: str = 'all', + label: str = None): + super().__init__() + self._label = generate_new_label(label) + self.ops = ModuleList() + self.inputs = ModuleList() + self.num_nodes = num_nodes + self.num_ops_per_node = num_ops_per_node + self.num_predecessors = num_predecessors + for i in range(num_nodes): + self.ops.append(ModuleList()) + self.inputs.append(ModuleList()) + for k in range(num_ops_per_node): + if isinstance(op_candidates, list): + assert len(op_candidates) > 0 and isinstance(op_candidates[0], nn.Module) + ops = copy.deepcopy(op_candidates) + else: + ops = op_candidates() + self.ops[-1].append(LayerChoice(ops, label=f'{self.label}__op_{i}_{k}')) + self.inputs[-1].append(InputChoice(i + num_predecessors, 1, label=f'{self.label}/input_{i}_{k}')) + assert merge_op in ['all'] # TODO: loose_end + self.merge_op = merge_op + + @property + def label(self): + return self._label + + def forward(self, x: List[torch.Tensor]): + states = x + for ops, inps in zip(self.ops, self.inputs): + current_state = [] + for op, inp in zip(ops, inps): + current_state.append(op(inp(states))) + current_state = torch.sum(torch.stack(current_state), 0) + states.append(current_state) + return torch.cat(states[self.num_predecessors:], 1) + + +class NasBench201Cell(nn.Module): + """ + Cell structure that is proposed in NAS-Bench-201 [nasbench201]_ . + + This cell is a densely connected DAG with ``num_tensors`` nodes, where each node is tensor. + For every i < j, there is an edge from i-th node to j-th node. + Each edge in this DAG is associated with an operation transforming the hidden state from the source node + to the target node. All possible operations are selected from a predefined operation set, defined in ``op_candidates``. + Each of the ``op_candidates`` should be a callable that accepts input dimension and output dimension, + and returns a ``Module``. + + Input of this cell should be of shape :math:`[N, C_{in}, *]`, while output should be :math:`[N, C_{out}, *]`. For example, + + The space size of this cell would be :math:`|op|^{N(N-1)/2}`, where :math:`|op|` is the number of operation candidates, + and :math:`N` is defined by ``num_tensors``. + + Parameters + ---------- + op_candidates : list of callable + Operation candidates. Each should be a function accepts input feature and output feature, returning nn.Module. + in_features : int + Input dimension of cell. + out_features : int + Output dimension of cell. + num_tensors : int + Number of tensors in the cell (input included). Default: 4 + label : str + Identifier of the cell. Cell sharing the same label will semantically share the same choice. + + References + ---------- + .. [nasbench201] Dong, X. and Yang, Y., 2020. Nas-bench-201: Extending the scope of reproducible neural architecture search. + arXiv preprint arXiv:2001.00326. + """ + + @staticmethod + def _make_dict(x): + if isinstance(x, list): + return OrderedDict([(str(i), t) for i, t in enumerate(x)]) + return OrderedDict(x) + + def __init__(self, op_candidates: List[Callable[[int, int], nn.Module]], + in_features: int, out_features: int, num_tensors: int = 4, + label: Optional[str] = None): + super().__init__() + self._label = generate_new_label(label) + + self.layers = nn.ModuleList() + self.in_features = in_features + self.out_features = out_features + self.num_tensors = num_tensors + + op_candidates = self._make_dict(op_candidates) + + for tid in range(1, num_tensors): + node_ops = nn.ModuleList() + for j in range(tid): + inp = in_features if j == 0 else out_features + op_choices = OrderedDict([(key, cls(inp, out_features)) + for key, cls in op_candidates.items()]) + node_ops.append(LayerChoice(op_choices, label=f'{self._label}__{j}_{tid}')) # put __ here to be compatible with base engine + self.layers.append(node_ops) + + def forward(self, inputs): + tensors = [inputs] + for layer in self.layers: + current_tensor = [] + for i, op in enumerate(layer): + current_tensor.append(op(tensors[i])) + current_tensor = torch.sum(torch.stack(current_tensor), 0) + tensors.append(current_tensor) + return tensors[-1] diff --git a/nni/retiarii/nn/pytorch/hypermodule.py b/nni/retiarii/nn/pytorch/hypermodule.py new file mode 100644 index 0000000000000000000000000000000000000000..771d84162805c49de2da7902fd532eb3f632d348 --- /dev/null +++ b/nni/retiarii/nn/pytorch/hypermodule.py @@ -0,0 +1,255 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + +from nni.retiarii.serializer import basic_unit + +from .api import LayerChoice +from .utils import generate_new_label +from ...utils import version_larger_equal + +__all__ = ['AutoActivation'] + +TorchVersion = '1.5.0' + +# ============== unary function modules ============== + +@basic_unit +class UnaryIdentity(nn.Module): + def forward(self, x): + return x + +@basic_unit +class UnaryNegative(nn.Module): + def forward(self, x): + return -x + +@basic_unit +class UnaryAbs(nn.Module): + def forward(self, x): + return torch.abs(x) + +@basic_unit +class UnarySquare(nn.Module): + def forward(self, x): + return torch.square(x) + +@basic_unit +class UnaryPow(nn.Module): + def forward(self, x): + return torch.pow(x, 3) + +@basic_unit +class UnarySqrt(nn.Module): + def forward(self, x): + return torch.sqrt(x) + +@basic_unit +class UnaryMul(nn.Module): + def __init__(self): + super().__init__() + # element-wise for now, will change to per-channel trainable parameter + self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32)) # pylint: disable=not-callable + def forward(self, x): + return x * self.beta + +@basic_unit +class UnaryAdd(nn.Module): + def __init__(self): + super().__init__() + # element-wise for now, will change to per-channel trainable parameter + self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32)) # pylint: disable=not-callable + def forward(self, x): + return x + self.beta + +@basic_unit +class UnaryLogAbs(nn.Module): + def forward(self, x): + return torch.log(torch.abs(x) + 1e-7) + +@basic_unit +class UnaryExp(nn.Module): + def forward(self, x): + return torch.exp(x) + +@basic_unit +class UnarySin(nn.Module): + def forward(self, x): + return torch.sin(x) + +@basic_unit +class UnaryCos(nn.Module): + def forward(self, x): + return torch.cos(x) + +@basic_unit +class UnarySinh(nn.Module): + def forward(self, x): + return torch.sinh(x) + +@basic_unit +class UnaryCosh(nn.Module): + def forward(self, x): + return torch.cosh(x) + +@basic_unit +class UnaryTanh(nn.Module): + def forward(self, x): + return torch.tanh(x) + +if not version_larger_equal(torch.__version__, TorchVersion): + @basic_unit + class UnaryAsinh(nn.Module): + def forward(self, x): + return torch.asinh(x) + +@basic_unit +class UnaryAtan(nn.Module): + def forward(self, x): + return torch.atan(x) + +if not version_larger_equal(torch.__version__, TorchVersion): + @basic_unit + class UnarySinc(nn.Module): + def forward(self, x): + return torch.sinc(x) + +@basic_unit +class UnaryMax(nn.Module): + def forward(self, x): + return torch.max(x, torch.zeros_like(x)) + +@basic_unit +class UnaryMin(nn.Module): + def forward(self, x): + return torch.min(x, torch.zeros_like(x)) + +@basic_unit +class UnarySigmoid(nn.Module): + def forward(self, x): + return torch.sigmoid(x) + +@basic_unit +class UnaryLogExp(nn.Module): + def forward(self, x): + return torch.log(1 + torch.exp(x)) + +@basic_unit +class UnaryExpSquare(nn.Module): + def forward(self, x): + return torch.exp(-torch.square(x)) + +@basic_unit +class UnaryErf(nn.Module): + def forward(self, x): + return torch.erf(x) + +unary_modules = ['UnaryIdentity', 'UnaryNegative', 'UnaryAbs', 'UnarySquare', 'UnaryPow', + 'UnarySqrt', 'UnaryMul', 'UnaryAdd', 'UnaryLogAbs', 'UnaryExp', 'UnarySin', 'UnaryCos', + 'UnarySinh', 'UnaryCosh', 'UnaryTanh', 'UnaryAtan', 'UnaryMax', + 'UnaryMin', 'UnarySigmoid', 'UnaryLogExp', 'UnaryExpSquare', 'UnaryErf'] + +if not version_larger_equal(torch.__version__, TorchVersion): + unary_modules.append('UnaryAsinh') + unary_modules.append('UnarySinc') + +# ============== binary function modules ============== + +@basic_unit +class BinaryAdd(nn.Module): + def forward(self, x): + return x[0] + x[1] + +@basic_unit +class BinaryMul(nn.Module): + def forward(self, x): + return x[0] * x[1] + +@basic_unit +class BinaryMinus(nn.Module): + def forward(self, x): + return x[0] - x[1] + +@basic_unit +class BinaryDivide(nn.Module): + def forward(self, x): + return x[0] / (x[1] + 1e-7) + +@basic_unit +class BinaryMax(nn.Module): + def forward(self, x): + return torch.max(x[0], x[1]) + +@basic_unit +class BinaryMin(nn.Module): + def forward(self, x): + return torch.min(x[0], x[1]) + +@basic_unit +class BinarySigmoid(nn.Module): + def forward(self, x): + return torch.sigmoid(x[0]) * x[1] + +@basic_unit +class BinaryExpSquare(nn.Module): + def __init__(self): + super().__init__() + self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32)) # pylint: disable=not-callable + def forward(self, x): + return torch.exp(-self.beta * torch.square(x[0] - x[1])) + +@basic_unit +class BinaryExpAbs(nn.Module): + def __init__(self): + super().__init__() + self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32)) # pylint: disable=not-callable + def forward(self, x): + return torch.exp(-self.beta * torch.abs(x[0] - x[1])) + +@basic_unit +class BinaryParamAdd(nn.Module): + def __init__(self): + super().__init__() + self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32)) # pylint: disable=not-callable + def forward(self, x): + return self.beta * x[0] + (1 - self.beta) * x[1] + +binary_modules = ['BinaryAdd', 'BinaryMul', 'BinaryMinus', 'BinaryDivide', 'BinaryMax', + 'BinaryMin', 'BinarySigmoid', 'BinaryExpSquare', 'BinaryExpAbs', 'BinaryParamAdd'] + + +class AutoActivation(nn.Module): + """ + This module is an implementation of the paper "Searching for Activation Functions" + (https://arxiv.org/abs/1710.05941). + NOTE: current `beta` is not per-channel parameter + + Parameters + ---------- + unit_num : int + the number of core units + """ + def __init__(self, unit_num: int = 1, label: str = None): + super().__init__() + self._label = generate_new_label(label) + self.unaries = nn.ModuleList() + self.binaries = nn.ModuleList() + self.first_unary = LayerChoice([eval('{}()'.format(unary)) for unary in unary_modules], label = f'{self.label}__unary_0') + for i in range(unit_num): + one_unary = LayerChoice([eval('{}()'.format(unary)) for unary in unary_modules], label = f'{self.label}__unary_{i+1}') + self.unaries.append(one_unary) + for i in range(unit_num): + one_binary = LayerChoice([eval('{}()'.format(binary)) for binary in binary_modules], label = f'{self.label}__binary_{i}') + self.binaries.append(one_binary) + + @property + def label(self): + return self._label + + def forward(self, x): + out = self.first_unary(x) + for unary, binary in zip(self.unaries, self.binaries): + out = binary(torch.stack([out, unary(x)])) + return out diff --git a/nni/retiarii/nn/pytorch/mutator.py b/nni/retiarii/nn/pytorch/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..802b923221e93b88e30545b02f60e667b8e1db43 --- /dev/null +++ b/nni/retiarii/nn/pytorch/mutator.py @@ -0,0 +1,324 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import inspect +from typing import Any, List, Optional, Tuple + +import torch.nn as nn + +from nni.retiarii.graph import Cell, Graph, Model, ModelStatus, Node +from nni.retiarii.mutator import Mutator +from nni.retiarii.serializer import is_basic_unit, is_model_wrapped +from nni.retiarii.utils import uid + +from .api import LayerChoice, InputChoice, ValueChoice, Placeholder +from .component import Repeat, NasBench101Cell, NasBench101Mutator + + +class LayerChoiceMutator(Mutator): + def __init__(self, nodes: List[Node]): + super().__init__(label=nodes[0].operation.parameters['label']) + self.nodes = nodes + + def mutate(self, model): + candidates = self.nodes[0].operation.parameters['candidates'] + chosen = self.choice(candidates) + for node in self.nodes: + # Each layer choice corresponds to a cell, which is unconnected in the base graph. + # We add the connections here in the mutation logic. + # Thus, the mutated model should not be mutated again. Everything should be based on the original base graph. + target = model.graphs[node.operation.cell_name] + chosen_node = target.get_node_by_name(chosen) + assert chosen_node is not None + target.add_edge((target.input_node, 0), (chosen_node, None)) + target.add_edge((chosen_node, None), (target.output_node, None)) + model.get_node_by_name(node.name).update_operation(Cell(node.operation.cell_name)) + + # remove redundant nodes + for rm_node in list(target.hidden_nodes): # remove from a list on the fly will cause issues + if rm_node.name != chosen_node.name: + rm_node.remove() + + +class InputChoiceMutator(Mutator): + def __init__(self, nodes: List[Node]): + super().__init__(label=nodes[0].operation.parameters['label']) + self.nodes = nodes + + def mutate(self, model): + n_candidates = self.nodes[0].operation.parameters['n_candidates'] + n_chosen = self.nodes[0].operation.parameters['n_chosen'] + candidates = list(range(n_candidates)) + if n_chosen is None: + chosen = [i for i in candidates if self.choice([False, True])] + # FIXME This is a hack to make choice align with the previous format + self._cur_samples = chosen + else: + chosen = [self.choice(candidates) for _ in range(n_chosen)] + for node in self.nodes: + target = model.get_node_by_name(node.name) + target.update_operation('__torch__.nni.retiarii.nn.pytorch.ChosenInputs', + {'chosen': chosen, 'reduction': node.operation.parameters['reduction']}) + + +class ValueChoiceMutator(Mutator): + def __init__(self, nodes: List[Node], candidates: List[Any]): + super().__init__(label=nodes[0].operation.parameters['label']) + self.nodes = nodes + self.candidates = candidates + + def mutate(self, model): + chosen = self.choice(self.candidates) + for node in self.nodes: + target = model.get_node_by_name(node.name) + target.update_operation('prim::Constant', {'type': type(chosen).__name__, 'value': chosen}) + + +class ParameterChoiceMutator(Mutator): + def __init__(self, nodes: List[Tuple[Node, str]], candidates: List[Any]): + node, argname = nodes[0] + super().__init__(label=node.operation.parameters[argname].label) + self.nodes = nodes + self.candidates = candidates + + def mutate(self, model): + chosen = self.choice(self.candidates) + for node, argname in self.nodes: + chosen_value = node.operation.parameters[argname].access(chosen) + target = model.get_node_by_name(node.name) + target.update_operation(target.operation.type, {**target.operation.parameters, argname: chosen_value}) + + +class RepeatMutator(Mutator): + def __init__(self, nodes: List[Node]): + # nodes is a subgraph consisting of repeated blocks. + super().__init__(label=nodes[0].operation.parameters['label']) + self.nodes = nodes + + def _retrieve_chain_from_graph(self, graph: Graph) -> List[Node]: + u = graph.input_node + chain = [] + while u != graph.output_node: + if u != graph.input_node: + chain.append(u) + assert len(u.successors) == 1, f'This graph is an illegal chain. {u} has output {u.successor}.' + u = u.successors[0] + return chain + + def mutate(self, model): + min_depth = self.nodes[0].operation.parameters['min_depth'] + max_depth = self.nodes[0].operation.parameters['max_depth'] + if min_depth < max_depth: + chosen_depth = self.choice(list(range(min_depth, max_depth + 1))) + for node in self.nodes: + # the logic here is similar to layer choice. We find cell attached to each node. + target: Graph = model.graphs[node.operation.cell_name] + chain = self._retrieve_chain_from_graph(target) + for edge in chain[chosen_depth - 1].outgoing_edges: + edge.remove() + target.add_edge((chain[chosen_depth - 1], None), (target.output_node, None)) + for rm_node in chain[chosen_depth:]: + for edge in rm_node.outgoing_edges: + edge.remove() + rm_node.remove() + # to delete the unused parameters. + model.get_node_by_name(node.name).update_operation(Cell(node.operation.cell_name)) + + +def process_inline_mutation(model: Model) -> Optional[List[Mutator]]: + applied_mutators = [] + + ic_nodes = _group_by_label(model.get_nodes_by_type('__torch__.nni.retiarii.nn.pytorch.api.InputChoice')) + for node_list in ic_nodes: + assert _is_all_equal(map(lambda node: node.operation.parameters['n_candidates'], node_list)) and \ + _is_all_equal(map(lambda node: node.operation.parameters['n_chosen'], node_list)), \ + 'Input choice with the same label must have the same number of candidates.' + mutator = InputChoiceMutator(node_list) + applied_mutators.append(mutator) + + vc_nodes = _group_by_label(model.get_nodes_by_type('__torch__.nni.retiarii.nn.pytorch.api.ValueChoice')) + for node_list in vc_nodes: + assert _is_all_equal(map(lambda node: node.operation.parameters['candidates'], node_list)), \ + 'Value choice with the same label must have the same candidates.' + mutator = ValueChoiceMutator(node_list, node_list[0].operation.parameters['candidates']) + applied_mutators.append(mutator) + + pc_nodes = [] + for node in model.get_nodes(): + for name, choice in node.operation.parameters.items(): + if isinstance(choice, ValueChoice): + pc_nodes.append((node, name)) + pc_nodes = _group_parameters_by_label(pc_nodes) + for node_list in pc_nodes: + assert _is_all_equal([node.operation.parameters[name].candidates for node, name in node_list]), \ + 'Value choice with the same label must have the same candidates.' + first_node, first_argname = node_list[0] + mutator = ParameterChoiceMutator(node_list, first_node.operation.parameters[first_argname].candidates) + applied_mutators.append(mutator) + + # apply layer choice at last as it will delete some nodes + lc_nodes = _group_by_label(filter(lambda d: d.operation.parameters.get('mutation') == 'layerchoice', + model.get_nodes_by_type('_cell'))) + for node_list in lc_nodes: + assert _is_all_equal(map(lambda node: len(node.operation.parameters['candidates']), node_list)), \ + 'Layer choice with the same label must have the same number of candidates.' + mutator = LayerChoiceMutator(node_list) + applied_mutators.append(mutator) + + repeat_nodes = _group_by_label(filter(lambda d: d.operation.parameters.get('mutation') == 'repeat', + model.get_nodes_by_type('_cell'))) + for node_list in repeat_nodes: + assert _is_all_equal(map(lambda node: node.operation.parameters['max_depth'], node_list)) and \ + _is_all_equal(map(lambda node: node.operation.parameters['min_depth'], node_list)), \ + 'Repeat with the same label must have the same number of candidates.' + mutator = RepeatMutator(node_list) + applied_mutators.append(mutator) + + if applied_mutators: + return applied_mutators + return None + + +# The following are written for pure-python mode + + +class ManyChooseManyMutator(Mutator): + """ + Choose based on labels. Will not affect the model itself. + """ + + def __init__(self, label: Optional[str]): + super().__init__(label=label) + + @staticmethod + def candidates(node): + if 'n_candidates' in node.operation.parameters: + return list(range(node.operation.parameters['n_candidates'])) + else: + return node.operation.parameters['candidates'] + + @staticmethod + def number_of_chosen(node): + if 'n_chosen' in node.operation.parameters: + return node.operation.parameters['n_chosen'] + return 1 + + def mutate(self, model: Model): + # this mutate does not have any effect, but it is recorded in the mutation history + for node in model.get_nodes_by_label(self.label): + n_chosen = self.number_of_chosen(node) + if n_chosen is None: + candidates = [i for i in self.candidates(node) if self.choice([False, True])] + # FIXME This is a hack to make choice align with the previous format + # For example, it will convert [False, True, True] into [1, 2]. + self._cur_samples = candidates + else: + for _ in range(n_chosen): + self.choice(self.candidates(node)) + break + + +def extract_mutation_from_pt_module(pytorch_model: nn.Module) -> Tuple[Model, Optional[List[Mutator]]]: + model = Model(_internal=True) + graph = Graph(model, uid(), '_model', _internal=True)._register() + model.python_class = pytorch_model.__class__ + if len(inspect.signature(model.python_class.__init__).parameters) > 1: + if not is_model_wrapped(pytorch_model): + raise ValueError('Please annotate the model with @model_wrapper decorator in python execution mode ' + 'if your model has init parameters.') + model.python_init_params = pytorch_model.trace_kwargs + else: + model.python_init_params = {} + + for name, module in pytorch_model.named_modules(): + # tricky case: value choice that serves as parameters are stored in traced arguments + if is_basic_unit(module): + for key, value in module.trace_kwargs.items(): + if isinstance(value, ValueChoice): + node = graph.add_node(name + '.init.' + key, 'ValueChoice', {'candidates': value.candidates}) + node.label = value.label + + if isinstance(module, (LayerChoice, InputChoice, ValueChoice)): + # TODO: check the label of module and warn if it's auto-generated + pass + if isinstance(module, LayerChoice): + node = graph.add_node(name, 'LayerChoice', {'candidates': module.names}) + node.label = module.label + if isinstance(module, InputChoice): + node = graph.add_node(name, 'InputChoice', + {'n_candidates': module.n_candidates, 'n_chosen': module.n_chosen}) + node.label = module.label + if isinstance(module, ValueChoice): + node = graph.add_node(name, 'ValueChoice', {'candidates': module.candidates}) + node.label = module.label + if isinstance(module, Repeat) and module.min_depth <= module.max_depth: + node = graph.add_node(name, 'Repeat', { + 'candidates': list(range(module.min_depth, module.max_depth + 1)) + }) + node.label = module.label + if isinstance(module, NasBench101Cell): + node = graph.add_node(name, 'NasBench101Cell', { + 'max_num_edges': module.max_num_edges + }) + node.label = module.label + if isinstance(module, Placeholder): + raise NotImplementedError('Placeholder is not supported in python execution mode.') + + model.status = ModelStatus.Frozen + if not graph.hidden_nodes: + return model, None + + mutators = [] + mutators_final = [] + for nodes in _group_by_label_and_type(graph.hidden_nodes): + assert _is_all_equal(map(lambda n: n.operation.type, nodes)), \ + f'Node with label "{nodes[0].label}" does not all have the same type.' + assert _is_all_equal(map(lambda n: n.operation.parameters, nodes)), \ + f'Node with label "{nodes[0].label}" does not agree on parameters.' + if nodes[0].operation.type == 'NasBench101Cell': + mutators_final.append(NasBench101Mutator(nodes[0].label)) + else: + mutators.append(ManyChooseManyMutator(nodes[0].label)) + return model, mutators + mutators_final + + +# utility functions + + +def _is_all_equal(lst): + last = None + for x in lst: + if last is not None and last != x: + return False + last = x + return True + + +def _group_by_label_and_type(nodes: List[Node]) -> List[List[Node]]: + result = {} + for node in nodes: + key = (node.label, node.operation.type) + if key not in result: + result[key] = [] + result[key].append(node) + return list(result.values()) + + +def _group_by_label(nodes: List[Node]) -> List[List[Node]]: + result = {} + for node in nodes: + label = node.operation.parameters['label'] + if label not in result: + result[label] = [] + result[label].append(node) + return list(result.values()) + + +def _group_parameters_by_label(nodes: List[Tuple[Node, str]]) -> List[List[Tuple[Node, str]]]: + result = {} + for node, argname in nodes: + label = node.operation.parameters[argname].label + if label not in result: + result[label] = [] + result[label].append((node, argname)) + return list(result.values()) diff --git a/nni/retiarii/nn/pytorch/nasbench101.py b/nni/retiarii/nn/pytorch/nasbench101.py new file mode 100644 index 0000000000000000000000000000000000000000..5671af1f623d52998eced6275295d021107d9239 --- /dev/null +++ b/nni/retiarii/nn/pytorch/nasbench101.py @@ -0,0 +1,398 @@ +import logging +from collections import OrderedDict +from typing import Callable, List, Optional, Union, Dict + +import numpy as np +import torch +import torch.nn as nn + +from nni.retiarii.mutator import InvalidMutation, Mutator +from nni.retiarii.graph import Model +from .api import InputChoice, ValueChoice, LayerChoice +from .utils import Mutable, generate_new_label, get_fixed_dict + +_logger = logging.getLogger(__name__) + + +def compute_vertex_channels(input_channels, output_channels, matrix): + """ + This is (almost) copied from the original NAS-Bench-101 implementation. + + Computes the number of channels at every vertex. + + Given the input channels and output channels, this calculates the number of channels at each interior vertex. + Interior vertices have the same number of channels as the max of the channels of the vertices it feeds into. + The output channels are divided amongst the vertices that are directly connected to it. + When the division is not even, some vertices may receive an extra channel to compensate. + + Parameters + ---------- + in_channels : int + input channels count. + output_channels : int + output channel count. + matrix : np.ndarray + adjacency matrix for the module (pruned by model_spec). + + Returns + ------- + list of int + list of channel counts, in order of the vertices. + """ + + num_vertices = np.shape(matrix)[0] + + vertex_channels = [0] * num_vertices + vertex_channels[0] = input_channels + vertex_channels[num_vertices - 1] = output_channels + + if num_vertices == 2: + # Edge case where module only has input and output vertices + return vertex_channels + + # Compute the in-degree ignoring input, axis 0 is the src vertex and axis 1 is + # the dst vertex. Summing over 0 gives the in-degree count of each vertex. + in_degree = np.sum(matrix[1:], axis=0) + interior_channels = output_channels // in_degree[num_vertices - 1] + correction = output_channels % in_degree[num_vertices - 1] # Remainder to add + + # Set channels of vertices that flow directly to output + for v in range(1, num_vertices - 1): + if matrix[v, num_vertices - 1]: + vertex_channels[v] = interior_channels + if correction: + vertex_channels[v] += 1 + correction -= 1 + + # Set channels for all other vertices to the max of the out edges, going backwards. + # (num_vertices - 2) index skipped because it only connects to output. + for v in range(num_vertices - 3, 0, -1): + if not matrix[v, num_vertices - 1]: + for dst in range(v + 1, num_vertices - 1): + if matrix[v, dst]: + vertex_channels[v] = max(vertex_channels[v], vertex_channels[dst]) + assert vertex_channels[v] > 0 + + _logger.debug('vertex_channels: %s', str(vertex_channels)) + + # Sanity check, verify that channels never increase and final channels add up. + final_fan_in = 0 + for v in range(1, num_vertices - 1): + if matrix[v, num_vertices - 1]: + final_fan_in += vertex_channels[v] + for dst in range(v + 1, num_vertices - 1): + if matrix[v, dst]: + assert vertex_channels[v] >= vertex_channels[dst] + assert final_fan_in == output_channels or num_vertices == 2 + # num_vertices == 2 means only input/output nodes, so 0 fan-in + + return vertex_channels + + +def prune(matrix, ops): + """ + Prune the extraneous parts of the graph. + + General procedure: + + 1. Remove parts of graph not connected to input. + 2. Remove parts of graph not connected to output. + 3. Reorder the vertices so that they are consecutive after steps 1 and 2. + + These 3 steps can be combined by deleting the rows and columns of the + vertices that are not reachable from both the input and output (in reverse). + """ + num_vertices = np.shape(matrix)[0] + + # calculate the connection matrix within V number of steps. + connections = np.linalg.matrix_power(matrix + np.eye(num_vertices), num_vertices) + + visited_from_input = set([i for i in range(num_vertices) if connections[0, i]]) + visited_from_output = set([i for i in range(num_vertices) if connections[i, -1]]) + + # Any vertex that isn't connected to both input and output is extraneous to the computation graph. + extraneous = set(range(num_vertices)).difference( + visited_from_input.intersection(visited_from_output)) + + if len(extraneous) > num_vertices - 2: + raise InvalidMutation('Non-extraneous graph is less than 2 vertices, ' + 'the input is not connected to the output and the spec is invalid.') + + matrix = np.delete(matrix, list(extraneous), axis=0) + matrix = np.delete(matrix, list(extraneous), axis=1) + for index in sorted(extraneous, reverse=True): + del ops[index] + return matrix, ops + + +def truncate(inputs, channels): + input_channels = inputs.size(1) + if input_channels < channels: + raise ValueError('input channel < output channels for truncate') + elif input_channels == channels: + return inputs # No truncation necessary + else: + # Truncation should only be necessary when channel division leads to + # vertices with +1 channels. The input vertex should always be projected to + # the minimum channel count. + assert input_channels - channels == 1 + return inputs[:, :channels] + + +class _NasBench101CellFixed(nn.Module): + """ + The fixed version of NAS-Bench-101 Cell, used in python-version execution engine. + """ + + def __init__(self, operations: List[Callable[[int], nn.Module]], + adjacency_list: List[List[int]], + in_features: int, out_features: int, num_nodes: int, + projection: Callable[[int, int], nn.Module]): + super().__init__() + + assert num_nodes == len(operations) + 2 == len(adjacency_list) + 1 + + self.operations = ['IN'] + operations + ['OUT'] # add psuedo nodes + self.connection_matrix = self.build_connection_matrix(adjacency_list, num_nodes) + del num_nodes # raw number of nodes is no longer used + + self.connection_matrix, self.operations = prune(self.connection_matrix, self.operations) + + self.hidden_features = compute_vertex_channels(in_features, out_features, self.connection_matrix) + + self.num_nodes = len(self.connection_matrix) + self.in_features = in_features + self.out_features = out_features + _logger.info('Prund number of nodes: %d', self.num_nodes) + _logger.info('Pruned connection matrix: %s', str(self.connection_matrix)) + + self.projections = nn.ModuleList([nn.Identity()]) + self.ops = nn.ModuleList([nn.Identity()]) + for i in range(1, self.num_nodes): + self.projections.append(projection(in_features, self.hidden_features[i])) + + for i in range(1, self.num_nodes - 1): + self.ops.append(operations[i - 1](self.hidden_features[i])) + + @staticmethod + def build_connection_matrix(adjacency_list, num_nodes): + adjacency_list = [[]] + adjacency_list # add adjacency for first node + connections = np.zeros((num_nodes, num_nodes), dtype='int') + for i, lst in enumerate(adjacency_list): + assert all([0 <= k < i for k in lst]) + for k in lst: + connections[k, i] = 1 + return connections + + def forward(self, inputs): + tensors = [inputs] + for t in range(1, self.num_nodes - 1): + + # Create interior connections, truncating if necessary + add_in = [truncate(tensors[src], self.hidden_features[t]) + for src in range(1, t) if self.connection_matrix[src, t]] + + # Create add connection from projected input + if self.connection_matrix[0, t]: + add_in.append(self.projections[t](tensors[0])) + + if len(add_in) == 1: + vertex_input = add_in[0] + else: + vertex_input = sum(add_in) + + # Perform op at vertex t + vertex_out = self.ops[t](vertex_input) + tensors.append(vertex_out) + + # Construct final output tensor by concating all fan-in and adding input. + if np.sum(self.connection_matrix[:, -1]) == 1: + src = np.where(self.connection_matrix[:, -1] == 1)[0][0] + return self.projections[-1](tensors[0]) if src == 0 else tensors[src] + + outputs = torch.cat([tensors[src] for src in range(1, self.num_nodes - 1) if self.connection_matrix[src, -1]], 1) + if self.connection_matrix[0, -1]: + outputs += self.projections[-1](tensors[0]) + assert outputs.size(1) == self.out_features + return outputs + + +class NasBench101Cell(Mutable): + """ + Cell structure that is proposed in NAS-Bench-101 [nasbench101]_ . + + This cell is usually used in evaluation of NAS algorithms because there is a ``comprehensive analysis'' of this search space + available, which includes a full architecture-dataset that ``maps 423k unique architectures to metrics + including run time and accuracy''. You can also use the space in your own space design, in which scenario it should be possible + to leverage results in the benchmark to narrow the huge space down to a few efficient architectures. + + The space of this cell architecture consists of all possible directed acyclic graphs on no more than ``max_num_nodes`` nodes, + where each possible node (other than IN and OUT) has one of ``op_candidates``, representing the corresponding operation. + Edges connecting the nodes can be no more than ``max_num_edges``. + To align with the paper settings, two vertices specially labeled as operation IN and OUT, are also counted into + ``max_num_nodes`` in our implementaion, the default value of ``max_num_nodes`` is 7 and ``max_num_edges`` is 9. + + Input of this cell should be of shape :math:`[N, C_{in}, *]`, while output should be `[N, C_{out}, *]`. The shape + of each hidden nodes will be first automatically computed, depending on the cell structure. Each of the ``op_candidates`` + should be a callable that accepts computed ``num_features`` and returns a ``Module``. For example, + + .. code-block:: python + + def conv_bn_relu(num_features): + return nn.Sequential( + nn.Conv2d(num_features, num_features, 1), + nn.BatchNorm2d(num_features), + nn.ReLU() + ) + + The output of each node is the sum of its input node feed into its operation, except for the last node (output node), + which is the concatenation of its input *hidden* nodes, adding the *IN* node (if IN and OUT are connected). + + When input tensor is added with any other tensor, there could be shape mismatch. Therefore, a projection transformation + is needed to transform the input tensor. In paper, this is simply a Conv1x1 followed by BN and ReLU. The ``projection`` + parameters accepts ``in_features`` and ``out_features``, returns a ``Module``. This parameter has no default value, + as we hold no assumption that users are dealing with images. An example for this parameter is, + + .. code-block:: python + + def projection_fn(in_features, out_features): + return nn.Conv2d(in_features, out_features, 1) + + Parameters + ---------- + op_candidates : list of callable + Operation candidates. Each should be a function accepts number of feature, returning nn.Module. + in_features : int + Input dimension of cell. + out_features : int + Output dimension of cell. + projection : callable + Projection module that is used to preprocess the input tensor of the whole cell. + A callable that accept input feature and output feature, returning nn.Module. + max_num_nodes : int + Maximum number of nodes in the cell, input and output included. At least 2. Default: 7. + max_num_edges : int + Maximum number of edges in the cell. Default: 9. + label : str + Identifier of the cell. Cell sharing the same label will semantically share the same choice. + + References + ---------- + .. [nasbench101] Ying, Chris, et al. "Nas-bench-101: Towards reproducible neural architecture search." + International Conference on Machine Learning. PMLR, 2019. + """ + + @staticmethod + def _make_dict(x): + if isinstance(x, list): + return OrderedDict([(str(i), t) for i, t in enumerate(x)]) + return OrderedDict(x) + + @classmethod + def create_fixed_module(cls, op_candidates: Union[Dict[str, Callable[[int], nn.Module]], List[Callable[[int], nn.Module]]], + in_features: int, out_features: int, projection: Callable[[int, int], nn.Module], + max_num_nodes: int = 7, max_num_edges: int = 9, label: Optional[str] = None): + def make_list(x): return x if isinstance(x, list) else [x] + + label, selected = get_fixed_dict(label) + op_candidates = cls._make_dict(op_candidates) + num_nodes = selected[f'{label}/num_nodes'] + adjacency_list = [make_list(selected[f'{label}/input{i}']) for i in range(1, num_nodes)] + if sum([len(e) for e in adjacency_list]) > max_num_edges: + raise InvalidMutation(f'Expected {max_num_edges} edges, found: {adjacency_list}') + return _NasBench101CellFixed( + [op_candidates[selected[f'{label}/op{i}']] for i in range(1, num_nodes - 1)], + adjacency_list, in_features, out_features, num_nodes, projection) + + def __init__(self, op_candidates: Union[Dict[str, Callable[[int], nn.Module]], List[Callable[[int], nn.Module]]], + in_features: int, out_features: int, projection: Callable[[int, int], nn.Module], + max_num_nodes: int = 7, max_num_edges: int = 9, label: Optional[str] = None): + + super().__init__() + self._label = generate_new_label(label) + num_vertices_prior = [2 ** i for i in range(2, max_num_nodes + 1)] + num_vertices_prior = (np.array(num_vertices_prior) / sum(num_vertices_prior)).tolist() + self.num_nodes = ValueChoice(list(range(2, max_num_nodes + 1)), + prior=num_vertices_prior, + label=f'{self._label}/num_nodes') + self.max_num_nodes = max_num_nodes + self.max_num_edges = max_num_edges + + op_candidates = self._make_dict(op_candidates) + + # this is only for input validation and instantiating enough layer choice and input choice + self.hidden_features = out_features + + self.projections = nn.ModuleList([nn.Identity()]) + self.ops = nn.ModuleList([nn.Identity()]) + self.inputs = nn.ModuleList([nn.Identity()]) + for _ in range(1, max_num_nodes): + self.projections.append(projection(in_features, self.hidden_features)) + for i in range(1, max_num_nodes): + if i < max_num_nodes - 1: + self.ops.append(LayerChoice(OrderedDict([(k, op(self.hidden_features)) for k, op in op_candidates.items()]), + label=f'{self._label}/op{i}')) + self.inputs.append(InputChoice(i, None, label=f'{self._label}/input{i}')) + + @property + def label(self): + return self._label + + def forward(self, x): + # This is a dummy forward and actually not used + tensors = [x] + for i in range(1, self.max_num_nodes): + node_input = self.inputs[i]([self.projections[i](tensors[0])] + [t for t in tensors[1:]]) + if i < self.max_num_nodes - 1: + node_output = self.ops[i](node_input) + else: + node_output = node_input + tensors.append(node_output) + return tensors[-1] + + +class NasBench101Mutator(Mutator): + # for validation purposes + # for python execution engine + + def __init__(self, label: Optional[str]): + super().__init__(label=label) + + @staticmethod + def candidates(node): + if 'n_candidates' in node.operation.parameters: + return list(range(node.operation.parameters['n_candidates'])) + else: + return node.operation.parameters['candidates'] + + @staticmethod + def number_of_chosen(node): + if 'n_chosen' in node.operation.parameters: + return node.operation.parameters['n_chosen'] + return 1 + + def mutate(self, model: Model): + for node in model.get_nodes_by_label(self.label): + max_num_edges = node.operation.parameters['max_num_edges'] + break + mutation_dict = {mut.mutator.label: mut.samples for mut in model.history} + num_nodes = mutation_dict[f'{self.label}/num_nodes'][0] + adjacency_list = [mutation_dict[f'{self.label}/input{i}'] for i in range(1, num_nodes)] + if sum([len(e) for e in adjacency_list]) > max_num_edges: + raise InvalidMutation(f'Expected {max_num_edges} edges, found: {adjacency_list}') + matrix = _NasBench101CellFixed.build_connection_matrix(adjacency_list, num_nodes) + + operations = ['IN'] + [mutation_dict[f'{self.label}/op{i}'][0] for i in range(1, num_nodes - 1)] + ['OUT'] + assert len(operations) == len(matrix) + matrix, operations = prune(matrix, operations) # possible to raise InvalidMutation inside + + # NOTE: a hack to maintain a clean copy of what nasbench101 cell looks like + self._cur_samples = {} + for i in range(1, len(matrix)): + if i + 1 < len(matrix): + self._cur_samples[f'op{i}'] = operations[i] + self._cur_samples[f'input{i}'] = [k for k in range(i) if matrix[k, i]] + self._cur_samples = [self._cur_samples] # by design, _cur_samples is a list of samples + + def dry_run(self, model): + return [], model diff --git a/nni/retiarii/nn/pytorch/nn.py b/nni/retiarii/nn/pytorch/nn.py new file mode 100644 index 0000000000000000000000000000000000000000..6b79af636e484a82fdd82ad692ce00798d83be64 --- /dev/null +++ b/nni/retiarii/nn/pytorch/nn.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn + +from ...serializer import basic_unit +from ...utils import version_larger_equal + +# NOTE: support pytorch version >= 1.5.0 + +__all__ = [ + 'Module', 'Sequential', 'ModuleList', # TODO: 'ModuleDict', 'ParameterList', 'ParameterDict', + 'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', + 'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6', + 'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink', + 'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin', + 'Tanhshrink', 'RReLU', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d', + 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d", + 'LPPool1d', 'LPPool2d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d', + 'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm', + 'Dropout', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout', + 'ReflectionPad1d', 'ReflectionPad2d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d', + 'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', + 'LSTMCell', 'GRUCell', 'PixelShuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d', + 'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d', + 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad2d', 'ConstantPad1d', 'ConstantPad2d', + 'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold', + 'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder', + 'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer', + 'Flatten', 'Hardsigmoid' +] + +if version_larger_equal(torch.__version__, '1.6.0'): + __all__.append('Hardswish') + +if version_larger_equal(torch.__version__, '1.7.0'): + __all__.extend(['Unflatten', 'SiLU', 'TripletMarginWithDistanceLoss']) + + +Module = nn.Module + +Sequential = nn.Sequential +ModuleList = basic_unit(nn.ModuleList, basic_unit_tag=False) + +Identity = basic_unit(nn.Identity) +Linear = basic_unit(nn.Linear) +Conv1d = basic_unit(nn.Conv1d) +Conv2d = basic_unit(nn.Conv2d) +Conv3d = basic_unit(nn.Conv3d) +ConvTranspose1d = basic_unit(nn.ConvTranspose1d) +ConvTranspose2d = basic_unit(nn.ConvTranspose2d) +ConvTranspose3d = basic_unit(nn.ConvTranspose3d) +Threshold = basic_unit(nn.Threshold) +ReLU = basic_unit(nn.ReLU) +Hardtanh = basic_unit(nn.Hardtanh) +ReLU6 = basic_unit(nn.ReLU6) +Sigmoid = basic_unit(nn.Sigmoid) +Tanh = basic_unit(nn.Tanh) +Softmax = basic_unit(nn.Softmax) +Softmax2d = basic_unit(nn.Softmax2d) +LogSoftmax = basic_unit(nn.LogSoftmax) +ELU = basic_unit(nn.ELU) +SELU = basic_unit(nn.SELU) +CELU = basic_unit(nn.CELU) +GLU = basic_unit(nn.GLU) +GELU = basic_unit(nn.GELU) +Hardshrink = basic_unit(nn.Hardshrink) +LeakyReLU = basic_unit(nn.LeakyReLU) +LogSigmoid = basic_unit(nn.LogSigmoid) +Softplus = basic_unit(nn.Softplus) +Softshrink = basic_unit(nn.Softshrink) +MultiheadAttention = basic_unit(nn.MultiheadAttention) +PReLU = basic_unit(nn.PReLU) +Softsign = basic_unit(nn.Softsign) +Softmin = basic_unit(nn.Softmin) +Tanhshrink = basic_unit(nn.Tanhshrink) +RReLU = basic_unit(nn.RReLU) +AvgPool1d = basic_unit(nn.AvgPool1d) +AvgPool2d = basic_unit(nn.AvgPool2d) +AvgPool3d = basic_unit(nn.AvgPool3d) +MaxPool1d = basic_unit(nn.MaxPool1d) +MaxPool2d = basic_unit(nn.MaxPool2d) +MaxPool3d = basic_unit(nn.MaxPool3d) +MaxUnpool1d = basic_unit(nn.MaxUnpool1d) +MaxUnpool2d = basic_unit(nn.MaxUnpool2d) +MaxUnpool3d = basic_unit(nn.MaxUnpool3d) +FractionalMaxPool2d = basic_unit(nn.FractionalMaxPool2d) +FractionalMaxPool3d = basic_unit(nn.FractionalMaxPool3d) +LPPool1d = basic_unit(nn.LPPool1d) +LPPool2d = basic_unit(nn.LPPool2d) +LocalResponseNorm = basic_unit(nn.LocalResponseNorm) +BatchNorm1d = basic_unit(nn.BatchNorm1d) +BatchNorm2d = basic_unit(nn.BatchNorm2d) +BatchNorm3d = basic_unit(nn.BatchNorm3d) +InstanceNorm1d = basic_unit(nn.InstanceNorm1d) +InstanceNorm2d = basic_unit(nn.InstanceNorm2d) +InstanceNorm3d = basic_unit(nn.InstanceNorm3d) +LayerNorm = basic_unit(nn.LayerNorm) +GroupNorm = basic_unit(nn.GroupNorm) +SyncBatchNorm = basic_unit(nn.SyncBatchNorm) +Dropout = basic_unit(nn.Dropout) +Dropout2d = basic_unit(nn.Dropout2d) +Dropout3d = basic_unit(nn.Dropout3d) +AlphaDropout = basic_unit(nn.AlphaDropout) +FeatureAlphaDropout = basic_unit(nn.FeatureAlphaDropout) +ReflectionPad1d = basic_unit(nn.ReflectionPad1d) +ReflectionPad2d = basic_unit(nn.ReflectionPad2d) +ReplicationPad2d = basic_unit(nn.ReplicationPad2d) +ReplicationPad1d = basic_unit(nn.ReplicationPad1d) +ReplicationPad3d = basic_unit(nn.ReplicationPad3d) +CrossMapLRN2d = basic_unit(nn.CrossMapLRN2d) +Embedding = basic_unit(nn.Embedding) +EmbeddingBag = basic_unit(nn.EmbeddingBag) +RNNBase = basic_unit(nn.RNNBase) +RNN = basic_unit(nn.RNN) +LSTM = basic_unit(nn.LSTM) +GRU = basic_unit(nn.GRU) +RNNCellBase = basic_unit(nn.RNNCellBase) +RNNCell = basic_unit(nn.RNNCell) +LSTMCell = basic_unit(nn.LSTMCell) +GRUCell = basic_unit(nn.GRUCell) +PixelShuffle = basic_unit(nn.PixelShuffle) +Upsample = basic_unit(nn.Upsample) +UpsamplingNearest2d = basic_unit(nn.UpsamplingNearest2d) +UpsamplingBilinear2d = basic_unit(nn.UpsamplingBilinear2d) +PairwiseDistance = basic_unit(nn.PairwiseDistance) +AdaptiveMaxPool1d = basic_unit(nn.AdaptiveMaxPool1d) +AdaptiveMaxPool2d = basic_unit(nn.AdaptiveMaxPool2d) +AdaptiveMaxPool3d = basic_unit(nn.AdaptiveMaxPool3d) +AdaptiveAvgPool1d = basic_unit(nn.AdaptiveAvgPool1d) +AdaptiveAvgPool2d = basic_unit(nn.AdaptiveAvgPool2d) +AdaptiveAvgPool3d = basic_unit(nn.AdaptiveAvgPool3d) +TripletMarginLoss = basic_unit(nn.TripletMarginLoss) +ZeroPad2d = basic_unit(nn.ZeroPad2d) +ConstantPad1d = basic_unit(nn.ConstantPad1d) +ConstantPad2d = basic_unit(nn.ConstantPad2d) +ConstantPad3d = basic_unit(nn.ConstantPad3d) +Bilinear = basic_unit(nn.Bilinear) +CosineSimilarity = basic_unit(nn.CosineSimilarity) +Unfold = basic_unit(nn.Unfold) +Fold = basic_unit(nn.Fold) +AdaptiveLogSoftmaxWithLoss = basic_unit(nn.AdaptiveLogSoftmaxWithLoss) +TransformerEncoder = basic_unit(nn.TransformerEncoder) +TransformerDecoder = basic_unit(nn.TransformerDecoder) +TransformerEncoderLayer = basic_unit(nn.TransformerEncoderLayer) +TransformerDecoderLayer = basic_unit(nn.TransformerDecoderLayer) +Transformer = basic_unit(nn.Transformer) +Flatten = basic_unit(nn.Flatten) +Hardsigmoid = basic_unit(nn.Hardsigmoid) + +if version_larger_equal(torch.__version__, '1.6.0'): + Hardswish = basic_unit(nn.Hardswish) + +if version_larger_equal(torch.__version__, '1.7.0'): + SiLU = basic_unit(nn.SiLU) + Unflatten = basic_unit(nn.Unflatten) + TripletMarginWithDistanceLoss = basic_unit(nn.TripletMarginWithDistanceLoss) diff --git a/nni/retiarii/nn/pytorch/utils.py b/nni/retiarii/nn/pytorch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f277685856888bb855ab663b1354da1f9cedc7bd --- /dev/null +++ b/nni/retiarii/nn/pytorch/utils.py @@ -0,0 +1,61 @@ +from typing import Any, Optional, Tuple, Union + +import torch.nn as nn +from nni.retiarii.utils import NoContextError, ModelNamespace, get_current_context + + +class Mutable(nn.Module): + """ + This is just an implementation trick for now. + + In future, this could be the base class for all PyTorch mutables including layer choice, input choice, etc. + This is not considered as an interface, but rather as a base class consisting of commonly used class/instance methods. + For API developers, it's not recommended to use ``isinstance(module, Mutable)`` to check for mutable modules either, + before the design is finalized. + """ + + def __new__(cls, *args, **kwargs): + if not args and not kwargs: + # this can be the case of copy/deepcopy + # attributes are assigned afterwards in __dict__ + return super().__new__(cls) + + try: + return cls.create_fixed_module(*args, **kwargs) + except NoContextError: + return super().__new__(cls) + + @classmethod + def create_fixed_module(cls, *args, **kwargs) -> Union[nn.Module, Any]: + """ + Try to create a fixed module from fixed dict. + If the code is running in a trial, this method would succeed, and a concrete module instead of a mutable will be created. + Raises no context error if the creation failed. + """ + raise NotImplementedError + + +def generate_new_label(label: Optional[str]): + if label is None: + return ModelNamespace.next_label() + return label + + +def get_fixed_value(label: str) -> Any: + ret = get_current_context('fixed') + try: + return ret[generate_new_label(label)] + except KeyError: + raise KeyError(f'Fixed context with {label} not found. Existing values are: {ret}') + + +def get_fixed_dict(label_prefix: str) -> Tuple[str, Any]: + ret = get_current_context('fixed') + try: + label_prefix = generate_new_label(label_prefix) + ret = {k: v for k, v in ret.items() if k.startswith(label_prefix + '/')} + if not ret: + raise KeyError + return label_prefix, ret + except KeyError: + raise KeyError(f'Fixed context with prefix {label_prefix} not found. Existing values are: {ret}') diff --git a/nni/retiarii/oneshot/__init__.py b/nni/retiarii/oneshot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4e1dad8512bd03c9475d4546d2b86748222aff2 --- /dev/null +++ b/nni/retiarii/oneshot/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .interface import BaseOneShotTrainer diff --git a/nni/retiarii/oneshot/interface.py b/nni/retiarii/oneshot/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..217bae9096efe518bdef9accc6ce55ef61fedc78 --- /dev/null +++ b/nni/retiarii/oneshot/interface.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import abc +from typing import Any + + +class BaseOneShotTrainer(abc.ABC): + """ + Build many (possibly all) architectures into a full graph, search (with train) and export the best. + + One-shot trainer has a ``fit`` function with no return value. Trainers should fit and search for the best architecture. + Currently, all the inputs of trainer needs to be manually set before fit (including the search space, data loader + to use training epochs, and etc.). + + It has an extra ``export`` function that exports an object representing the final searched architecture. + """ + + @abc.abstractmethod + def fit(self) -> None: + pass + + @abc.abstractmethod + def export(self) -> Any: + pass diff --git a/nni/retiarii/oneshot/pytorch/__init__.py b/nni/retiarii/oneshot/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6e9456958af9e0b6f78450e8707f9d1ef046d009 --- /dev/null +++ b/nni/retiarii/oneshot/pytorch/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .darts import DartsTrainer +from .enas import EnasTrainer +from .proxyless import ProxylessTrainer +from .random import SinglePathTrainer, RandomTrainer +from .utils import replace_input_choice, replace_layer_choice diff --git a/nni/retiarii/oneshot/pytorch/darts.py b/nni/retiarii/oneshot/pytorch/darts.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e80df05cc371595260791c5a4e5ac5111dee73 --- /dev/null +++ b/nni/retiarii/oneshot/pytorch/darts.py @@ -0,0 +1,286 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import logging +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..interface import BaseOneShotTrainer +from .utils import AverageMeterGroup, replace_layer_choice, replace_input_choice, to_device + + +_logger = logging.getLogger(__name__) + + +class DartsLayerChoice(nn.Module): + def __init__(self, layer_choice): + super(DartsLayerChoice, self).__init__() + self.name = layer_choice.label + self.op_choices = nn.ModuleDict(OrderedDict([(name, layer_choice[name]) for name in layer_choice.names])) + self.alpha = nn.Parameter(torch.randn(len(self.op_choices)) * 1e-3) + + def forward(self, *args, **kwargs): + op_results = torch.stack([op(*args, **kwargs) for op in self.op_choices.values()]) + alpha_shape = [-1] + [1] * (len(op_results.size()) - 1) + return torch.sum(op_results * F.softmax(self.alpha, -1).view(*alpha_shape), 0) + + def parameters(self): + for _, p in self.named_parameters(): + yield p + + def named_parameters(self): + for name, p in super(DartsLayerChoice, self).named_parameters(): + if name == 'alpha': + continue + yield name, p + + def export(self): + return list(self.op_choices.keys())[torch.argmax(self.alpha).item()] + + +class DartsInputChoice(nn.Module): + def __init__(self, input_choice): + super(DartsInputChoice, self).__init__() + self.name = input_choice.label + self.alpha = nn.Parameter(torch.randn(input_choice.n_candidates) * 1e-3) + self.n_chosen = input_choice.n_chosen or 1 + + def forward(self, inputs): + inputs = torch.stack(inputs) + alpha_shape = [-1] + [1] * (len(inputs.size()) - 1) + return torch.sum(inputs * F.softmax(self.alpha, -1).view(*alpha_shape), 0) + + def parameters(self): + for _, p in self.named_parameters(): + yield p + + def named_parameters(self): + for name, p in super(DartsInputChoice, self).named_parameters(): + if name == 'alpha': + continue + yield name, p + + def export(self): + return torch.argsort(-self.alpha).cpu().numpy().tolist()[:self.n_chosen] + + +class DartsTrainer(BaseOneShotTrainer): + """ + DARTS trainer. + + Parameters + ---------- + model : nn.Module + PyTorch model to be trained. + loss : callable + Receives logits and ground truth label, return a loss tensor. + metrics : callable + Receives logits and ground truth label, return a dict of metrics. + optimizer : Optimizer + The optimizer used for optimizing the model. + num_epochs : int + Number of epochs planned for training. + dataset : Dataset + Dataset for training. Will be split for training weights and architecture weights. + grad_clip : float + Gradient clipping. Set to 0 to disable. Default: 5. + learning_rate : float + Learning rate to optimize the model. + batch_size : int + Batch size. + workers : int + Workers for data loading. + device : torch.device + ``torch.device("cpu")`` or ``torch.device("cuda")``. + log_frequency : int + Step count per logging. + arc_learning_rate : float + Learning rate of architecture parameters. + unrolled : float + ``True`` if using second order optimization, else first order optimization. + """ + + def __init__(self, model, loss, metrics, optimizer, + num_epochs, dataset, grad_clip=5., + learning_rate=2.5E-3, batch_size=64, workers=4, + device=None, log_frequency=None, + arc_learning_rate=3.0E-4, unrolled=False): + self.model = model + self.loss = loss + self.metrics = metrics + self.num_epochs = num_epochs + self.dataset = dataset + self.batch_size = batch_size + self.workers = workers + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device + self.log_frequency = log_frequency + self.model.to(self.device) + + self.nas_modules = [] + replace_layer_choice(self.model, DartsLayerChoice, self.nas_modules) + replace_input_choice(self.model, DartsInputChoice, self.nas_modules) + for _, module in self.nas_modules: + module.to(self.device) + + self.model_optim = optimizer + # use the same architecture weight for modules with duplicated names + ctrl_params = {} + for _, m in self.nas_modules: + if m.name in ctrl_params: + assert m.alpha.size() == ctrl_params[m.name].size(), 'Size of parameters with the same label should be same.' + m.alpha = ctrl_params[m.name] + else: + ctrl_params[m.name] = m.alpha + self.ctrl_optim = torch.optim.Adam(list(ctrl_params.values()), arc_learning_rate, betas=(0.5, 0.999), + weight_decay=1.0E-3) + self.unrolled = unrolled + self.grad_clip = 5. + + self._init_dataloader() + + def _init_dataloader(self): + n_train = len(self.dataset) + split = n_train // 2 + indices = list(range(n_train)) + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) + self.train_loader = torch.utils.data.DataLoader(self.dataset, + batch_size=self.batch_size, + sampler=train_sampler, + num_workers=self.workers) + self.valid_loader = torch.utils.data.DataLoader(self.dataset, + batch_size=self.batch_size, + sampler=valid_sampler, + num_workers=self.workers) + + def _train_one_epoch(self, epoch): + self.model.train() + meters = AverageMeterGroup() + for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(self.train_loader, self.valid_loader)): + trn_X, trn_y = to_device(trn_X, self.device), to_device(trn_y, self.device) + val_X, val_y = to_device(val_X, self.device), to_device(val_y, self.device) + + # phase 1. architecture step + self.ctrl_optim.zero_grad() + if self.unrolled: + self._unrolled_backward(trn_X, trn_y, val_X, val_y) + else: + self._backward(val_X, val_y) + self.ctrl_optim.step() + + # phase 2: child network step + self.model_optim.zero_grad() + logits, loss = self._logits_and_loss(trn_X, trn_y) + loss.backward() + if self.grad_clip > 0: + nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip) # gradient clipping + self.model_optim.step() + + metrics = self.metrics(logits, trn_y) + metrics['loss'] = loss.item() + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + _logger.info('Epoch [%s/%s] Step [%s/%s] %s', epoch + 1, + self.num_epochs, step + 1, len(self.train_loader), meters) + + def _logits_and_loss(self, X, y): + logits = self.model(X) + loss = self.loss(logits, y) + return logits, loss + + def _backward(self, val_X, val_y): + """ + Simple backward with gradient descent + """ + _, loss = self._logits_and_loss(val_X, val_y) + loss.backward() + + def _unrolled_backward(self, trn_X, trn_y, val_X, val_y): + """ + Compute unrolled loss and backward its gradients + """ + backup_params = copy.deepcopy(tuple(self.model.parameters())) + + # do virtual step on training data + lr = self.model_optim.param_groups[0]["lr"] + momentum = self.model_optim.param_groups[0]["momentum"] + weight_decay = self.model_optim.param_groups[0]["weight_decay"] + self._compute_virtual_model(trn_X, trn_y, lr, momentum, weight_decay) + + # calculate unrolled loss on validation data + # keep gradients for model here for compute hessian + _, loss = self._logits_and_loss(val_X, val_y) + w_model, w_ctrl = tuple(self.model.parameters()), tuple([c.alpha for _, c in self.nas_modules]) + w_grads = torch.autograd.grad(loss, w_model + w_ctrl) + d_model, d_ctrl = w_grads[:len(w_model)], w_grads[len(w_model):] + + # compute hessian and final gradients + hessian = self._compute_hessian(backup_params, d_model, trn_X, trn_y) + with torch.no_grad(): + for param, d, h in zip(w_ctrl, d_ctrl, hessian): + # gradient = dalpha - lr * hessian + param.grad = d - lr * h + + # restore weights + self._restore_weights(backup_params) + + def _compute_virtual_model(self, X, y, lr, momentum, weight_decay): + """ + Compute unrolled weights w` + """ + # don't need zero_grad, using autograd to calculate gradients + _, loss = self._logits_and_loss(X, y) + gradients = torch.autograd.grad(loss, self.model.parameters()) + with torch.no_grad(): + for w, g in zip(self.model.parameters(), gradients): + m = self.model_optim.state[w].get('momentum_buffer', 0.) + w = w - lr * (momentum * m + g + weight_decay * w) + + def _restore_weights(self, backup_params): + with torch.no_grad(): + for param, backup in zip(self.model.parameters(), backup_params): + param.copy_(backup) + + def _compute_hessian(self, backup_params, dw, trn_X, trn_y): + """ + dw = dw` { L_val(w`, alpha) } + w+ = w + eps * dw + w- = w - eps * dw + hessian = (dalpha { L_trn(w+, alpha) } - dalpha { L_trn(w-, alpha) }) / (2*eps) + eps = 0.01 / ||dw|| + """ + self._restore_weights(backup_params) + norm = torch.cat([w.view(-1) for w in dw]).norm() + eps = 0.01 / norm + if norm < 1E-8: + _logger.warning('In computing hessian, norm is smaller than 1E-8, cause eps to be %.6f.', norm.item()) + + dalphas = [] + for e in [eps, -2. * eps]: + # w+ = w + eps*dw`, w- = w - eps*dw` + with torch.no_grad(): + for p, d in zip(self.model.parameters(), dw): + p += e * d + + _, loss = self._logits_and_loss(trn_X, trn_y) + dalphas.append(torch.autograd.grad(loss, [c.alpha for _, c in self.nas_modules])) + + dalpha_pos, dalpha_neg = dalphas # dalpha { L_trn(w+) }, # dalpha { L_trn(w-) } + hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)] + return hessian + + def fit(self): + for i in range(self.num_epochs): + self._train_one_epoch(i) + + @torch.no_grad() + def export(self): + result = dict() + for name, module in self.nas_modules: + if name not in result: + result[name] = module.export() + return result diff --git a/nni/retiarii/oneshot/pytorch/enas.py b/nni/retiarii/oneshot/pytorch/enas.py new file mode 100644 index 0000000000000000000000000000000000000000..7f03c1dd1015a23f6255ba05a221a179e8d37fde --- /dev/null +++ b/nni/retiarii/oneshot/pytorch/enas.py @@ -0,0 +1,336 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + +from ..interface import BaseOneShotTrainer +from .random import PathSamplingLayerChoice, PathSamplingInputChoice +from .utils import AverageMeterGroup, replace_layer_choice, replace_input_choice, to_device + +_logger = logging.getLogger(__name__) + + +class StackedLSTMCell(nn.Module): + def __init__(self, layers, size, bias): + super().__init__() + self.lstm_num_layers = layers + self.lstm_modules = nn.ModuleList([nn.LSTMCell(size, size, bias=bias) + for _ in range(self.lstm_num_layers)]) + + def forward(self, inputs, hidden): + prev_h, prev_c = hidden + next_h, next_c = [], [] + for i, m in enumerate(self.lstm_modules): + curr_h, curr_c = m(inputs, (prev_h[i], prev_c[i])) + next_c.append(curr_c) + next_h.append(curr_h) + # current implementation only supports batch size equals 1, + # but the algorithm does not necessarily have this limitation + inputs = curr_h[-1].view(1, -1) + return next_h, next_c + + +class ReinforceField: + """ + A field with ``name``, with ``total`` choices. ``choose_one`` is true if one and only one is meant to be + selected. Otherwise, any number of choices can be chosen. + """ + + def __init__(self, name, total, choose_one): + self.name = name + self.total = total + self.choose_one = choose_one + + def __repr__(self): + return f'ReinforceField(name={self.name}, total={self.total}, choose_one={self.choose_one})' + + +class ReinforceController(nn.Module): + """ + A controller that mutates the graph with RL. + + Parameters + ---------- + fields : list of ReinforceField + List of fields to choose. + lstm_size : int + Controller LSTM hidden units. + lstm_num_layers : int + Number of layers for stacked LSTM. + tanh_constant : float + Logits will be equal to ``tanh_constant * tanh(logits)``. Don't use ``tanh`` if this value is ``None``. + skip_target : float + Target probability that skipconnect will appear. + temperature : float + Temperature constant that divides the logits. + entropy_reduction : str + Can be one of ``sum`` and ``mean``. How the entropy of multi-input-choice is reduced. + """ + + def __init__(self, fields, lstm_size=64, lstm_num_layers=1, tanh_constant=1.5, + skip_target=0.4, temperature=None, entropy_reduction='sum'): + super(ReinforceController, self).__init__() + self.fields = fields + self.lstm_size = lstm_size + self.lstm_num_layers = lstm_num_layers + self.tanh_constant = tanh_constant + self.temperature = temperature + self.skip_target = skip_target + + self.lstm = StackedLSTMCell(self.lstm_num_layers, self.lstm_size, False) + self.attn_anchor = nn.Linear(self.lstm_size, self.lstm_size, bias=False) + self.attn_query = nn.Linear(self.lstm_size, self.lstm_size, bias=False) + self.v_attn = nn.Linear(self.lstm_size, 1, bias=False) + self.g_emb = nn.Parameter(torch.randn(1, self.lstm_size) * 0.1) + self.skip_targets = nn.Parameter(torch.tensor([1.0 - self.skip_target, self.skip_target]), # pylint: disable=not-callable + requires_grad=False) + assert entropy_reduction in ['sum', 'mean'], 'Entropy reduction must be one of sum and mean.' + self.entropy_reduction = torch.sum if entropy_reduction == 'sum' else torch.mean + self.cross_entropy_loss = nn.CrossEntropyLoss(reduction='none') + self.soft = nn.ModuleDict({ + field.name: nn.Linear(self.lstm_size, field.total, bias=False) for field in fields + }) + self.embedding = nn.ModuleDict({ + field.name: nn.Embedding(field.total, self.lstm_size) for field in fields + }) + + def resample(self): + self._initialize() + result = dict() + for field in self.fields: + result[field.name] = self._sample_single(field) + return result + + def _initialize(self): + self._inputs = self.g_emb.data + self._c = [torch.zeros((1, self.lstm_size), + dtype=self._inputs.dtype, + device=self._inputs.device) for _ in range(self.lstm_num_layers)] + self._h = [torch.zeros((1, self.lstm_size), + dtype=self._inputs.dtype, + device=self._inputs.device) for _ in range(self.lstm_num_layers)] + self.sample_log_prob = 0 + self.sample_entropy = 0 + self.sample_skip_penalty = 0 + + def _lstm_next_step(self): + self._h, self._c = self.lstm(self._inputs, (self._h, self._c)) + + def _sample_single(self, field): + self._lstm_next_step() + logit = self.soft[field.name](self._h[-1]) + if self.temperature is not None: + logit /= self.temperature + if self.tanh_constant is not None: + logit = self.tanh_constant * torch.tanh(logit) + if field.choose_one: + sampled = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1) + log_prob = self.cross_entropy_loss(logit, sampled) + self._inputs = self.embedding[field.name](sampled) + else: + logit = logit.view(-1, 1) + logit = torch.cat([-logit, logit], 1) # pylint: disable=invalid-unary-operand-type + sampled = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1) + skip_prob = torch.sigmoid(logit) + kl = torch.sum(skip_prob * torch.log(skip_prob / self.skip_targets)) + self.sample_skip_penalty += kl + log_prob = self.cross_entropy_loss(logit, sampled) + sampled = sampled.nonzero().view(-1) + if sampled.sum().item(): + self._inputs = (torch.sum(self.embedding[field.name](sampled.view(-1)), 0) / (1. + torch.sum(sampled))).unsqueeze(0) + else: + self._inputs = torch.zeros(1, self.lstm_size, device=self.embedding[field.name].weight.device) + + sampled = sampled.detach().numpy().tolist() + self.sample_log_prob += self.entropy_reduction(log_prob) + entropy = (log_prob * torch.exp(-log_prob)).detach() # pylint: disable=invalid-unary-operand-type + self.sample_entropy += self.entropy_reduction(entropy) + if len(sampled) == 1: + sampled = sampled[0] + return sampled + + +class EnasTrainer(BaseOneShotTrainer): + """ + ENAS trainer. + + Parameters + ---------- + model : nn.Module + PyTorch model to be trained. + loss : callable + Receives logits and ground truth label, return a loss tensor. + metrics : callable + Receives logits and ground truth label, return a dict of metrics. + reward_function : callable + Receives logits and ground truth label, return a tensor, which will be feeded to RL controller as reward. + optimizer : Optimizer + The optimizer used for optimizing the model. + num_epochs : int + Number of epochs planned for training. + dataset : Dataset + Dataset for training. Will be split for training weights and architecture weights. + batch_size : int + Batch size. + workers : int + Workers for data loading. + device : torch.device + ``torch.device("cpu")`` or ``torch.device("cuda")``. + log_frequency : int + Step count per logging. + grad_clip : float + Gradient clipping. Set to 0 to disable. Default: 5. + entropy_weight : float + Weight of sample entropy loss. + skip_weight : float + Weight of skip penalty loss. + baseline_decay : float + Decay factor of baseline. New baseline will be equal to ``baseline_decay * baseline_old + reward * (1 - baseline_decay)``. + ctrl_lr : float + Learning rate for RL controller. + ctrl_steps_aggregate : int + Number of steps that will be aggregated into one mini-batch for RL controller. + ctrl_steps : int + Number of mini-batches for each epoch of RL controller learning. + ctrl_kwargs : dict + Optional kwargs that will be passed to :class:`ReinforceController`. + """ + + def __init__(self, model, loss, metrics, reward_function, + optimizer, num_epochs, dataset, + batch_size=64, workers=4, device=None, log_frequency=None, + grad_clip=5., entropy_weight=0.0001, skip_weight=0.8, baseline_decay=0.999, + ctrl_lr=0.00035, ctrl_steps_aggregate=20, ctrl_kwargs=None): + self.model = model + self.loss = loss + self.metrics = metrics + self.optimizer = optimizer + self.num_epochs = num_epochs + self.dataset = dataset + self.batch_size = batch_size + self.workers = workers + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device + self.log_frequency = log_frequency + + self.nas_modules = [] + replace_layer_choice(self.model, PathSamplingLayerChoice, self.nas_modules) + replace_input_choice(self.model, PathSamplingInputChoice, self.nas_modules) + for _, module in self.nas_modules: + module.to(self.device) + self.model.to(self.device) + + self.nas_fields = [ReinforceField(name, len(module), + isinstance(module, PathSamplingLayerChoice) or module.n_chosen == 1) + for name, module in self.nas_modules] + self.controller = ReinforceController(self.nas_fields, **(ctrl_kwargs or {})) + + self.grad_clip = grad_clip + self.reward_function = reward_function + self.ctrl_optim = optim.Adam(self.controller.parameters(), lr=ctrl_lr) + self.batch_size = batch_size + self.workers = workers + + self.entropy_weight = entropy_weight + self.skip_weight = skip_weight + self.baseline_decay = baseline_decay + self.baseline = 0. + self.ctrl_steps_aggregate = ctrl_steps_aggregate + + self.init_dataloader() + + def init_dataloader(self): + n_train = len(self.dataset) + split = n_train // 2 + indices = list(range(n_train)) + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:-split]) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[-split:]) + self.train_loader = torch.utils.data.DataLoader(self.dataset, + batch_size=self.batch_size, + sampler=train_sampler, + num_workers=self.workers) + self.valid_loader = torch.utils.data.DataLoader(self.dataset, + batch_size=self.batch_size, + sampler=valid_sampler, + num_workers=self.workers) + + def _train_model(self, epoch): + self.model.train() + self.controller.eval() + meters = AverageMeterGroup() + for step, (x, y) in enumerate(self.train_loader): + x, y = to_device(x, self.device), to_device(y, self.device) + self.optimizer.zero_grad() + + self._resample() + logits = self.model(x) + metrics = self.metrics(logits, y) + loss = self.loss(logits, y) + loss.backward() + if self.grad_clip > 0: + nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip) + self.optimizer.step() + metrics['loss'] = loss.item() + meters.update(metrics) + + if self.log_frequency is not None and step % self.log_frequency == 0: + _logger.info('Model Epoch [%d/%d] Step [%d/%d] %s', epoch + 1, + self.num_epochs, step + 1, len(self.train_loader), meters) + + def _train_controller(self, epoch): + self.model.eval() + self.controller.train() + meters = AverageMeterGroup() + self.ctrl_optim.zero_grad() + for ctrl_step, (x, y) in enumerate(self.valid_loader): + x, y = to_device(x, self.device), to_device(y, self.device) + + self._resample() + with torch.no_grad(): + logits = self.model(x) + metrics = self.metrics(logits, y) + reward = self.reward_function(logits, y) + if self.entropy_weight: + reward += self.entropy_weight * self.controller.sample_entropy.item() + self.baseline = self.baseline * self.baseline_decay + reward * (1 - self.baseline_decay) + loss = self.controller.sample_log_prob * (reward - self.baseline) + if self.skip_weight: + loss += self.skip_weight * self.controller.sample_skip_penalty + metrics['reward'] = reward + metrics['loss'] = loss.item() + metrics['ent'] = self.controller.sample_entropy.item() + metrics['log_prob'] = self.controller.sample_log_prob.item() + metrics['baseline'] = self.baseline + metrics['skip'] = self.controller.sample_skip_penalty + + loss /= self.ctrl_steps_aggregate + loss.backward() + meters.update(metrics) + + if (ctrl_step + 1) % self.ctrl_steps_aggregate == 0: + if self.grad_clip > 0: + nn.utils.clip_grad_norm_(self.controller.parameters(), self.grad_clip) + self.ctrl_optim.step() + self.ctrl_optim.zero_grad() + + if self.log_frequency is not None and ctrl_step % self.log_frequency == 0: + _logger.info('RL Epoch [%d/%d] Step [%d/%d] %s', epoch + 1, self.num_epochs, + ctrl_step + 1, len(self.valid_loader), meters) + + def _resample(self): + result = self.controller.resample() + for name, module in self.nas_modules: + module.sampled = result[name] + + def fit(self): + for i in range(self.num_epochs): + self._train_model(i) + self._train_controller(i) + + def export(self): + self.controller.eval() + with torch.no_grad(): + return self.controller.resample() diff --git a/nni/retiarii/oneshot/pytorch/proxyless.py b/nni/retiarii/oneshot/pytorch/proxyless.py new file mode 100644 index 0000000000000000000000000000000000000000..358af3ae011c51dc4af93771f8fb00bf663f3377 --- /dev/null +++ b/nni/retiarii/oneshot/pytorch/proxyless.py @@ -0,0 +1,366 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..interface import BaseOneShotTrainer +from .utils import AverageMeterGroup, replace_layer_choice, replace_input_choice, to_device + + +_logger = logging.getLogger(__name__) + + +class ArchGradientFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, x, binary_gates, run_func, backward_func): + ctx.run_func = run_func + ctx.backward_func = backward_func + + detached_x = x.detach() + detached_x.requires_grad = x.requires_grad + with torch.enable_grad(): + output = run_func(detached_x) + ctx.save_for_backward(detached_x, output) + return output.data + + @staticmethod + def backward(ctx, grad_output): + detached_x, output = ctx.saved_tensors + + grad_x = torch.autograd.grad(output, detached_x, grad_output, only_inputs=True) + # compute gradients w.r.t. binary_gates + binary_grads = ctx.backward_func(detached_x.data, output.data, grad_output.data) + + return grad_x[0], binary_grads, None, None + + +class ProxylessLayerChoice(nn.Module): + def __init__(self, ops): + super(ProxylessLayerChoice, self).__init__() + self.ops = nn.ModuleList(ops) + self.alpha = nn.Parameter(torch.randn(len(self.ops)) * 1E-3) + self._binary_gates = nn.Parameter(torch.randn(len(self.ops)) * 1E-3) + self.sampled = None + + def forward(self, *args): + def run_function(ops, active_id): + def forward(_x): + return ops[active_id](_x) + return forward + + def backward_function(ops, active_id, binary_gates): + def backward(_x, _output, grad_output): + binary_grads = torch.zeros_like(binary_gates.data) + with torch.no_grad(): + for k in range(len(ops)): + if k != active_id: + out_k = ops[k](_x.data) + else: + out_k = _output.data + grad_k = torch.sum(out_k * grad_output) + binary_grads[k] = grad_k + return binary_grads + return backward + + assert len(args) == 1 + x = args[0] + return ArchGradientFunction.apply( + x, self._binary_gates, run_function(self.ops, self.sampled), + backward_function(self.ops, self.sampled, self._binary_gates) + ) + + def resample(self): + probs = F.softmax(self.alpha, dim=-1) + sample = torch.multinomial(probs, 1)[0].item() + self.sampled = sample + with torch.no_grad(): + self._binary_gates.zero_() + self._binary_gates.grad = torch.zeros_like(self._binary_gates.data) + self._binary_gates.data[sample] = 1.0 + + def finalize_grad(self): + binary_grads = self._binary_gates.grad + with torch.no_grad(): + if self.alpha.grad is None: + self.alpha.grad = torch.zeros_like(self.alpha.data) + probs = F.softmax(self.alpha, dim=-1) + for i in range(len(self.ops)): + for j in range(len(self.ops)): + self.alpha.grad[i] += binary_grads[j] * probs[j] * (int(i == j) - probs[i]) + + def export(self): + return torch.argmax(self.alpha).item() + + def export_prob(self): + return F.softmax(self.alpha, dim=-1) + + +class ProxylessInputChoice(nn.Module): + def __init__(self, *args, **kwargs): + raise NotImplementedError('Input choice is not supported for ProxylessNAS.') + + +class HardwareLatencyEstimator: + def __init__(self, applied_hardware, model, dummy_input=(1, 3, 224, 224), dump_lat_table='data/latency_table.yaml'): + import nn_meter # pylint: disable=import-error + _logger.info(f'Load latency predictor for applied hardware: {applied_hardware}.') + self.predictor_name = applied_hardware + self.latency_predictor = nn_meter.load_latency_predictor(applied_hardware) + self.block_latency_table = self._form_latency_table(model, dummy_input, dump_lat_table=dump_lat_table) + + def _form_latency_table(self, model, dummy_input, dump_lat_table): + latency_table = {} + + from nni.retiarii.converter import convert_to_graph + from nni.retiarii.converter.graph_gen import GraphConverterWithShape + from nni.retiarii.converter.utils import flatten_model_graph_without_layerchoice, is_layerchoice_node + script_module = torch.jit.script(model) + base_model_ir = convert_to_graph(script_module, model, + converter=GraphConverterWithShape(), dummy_input=torch.randn(*dummy_input)) + + # form the latency of layerchoice blocks for the latency table + temp_ir_model = base_model_ir.fork() + cell_nodes = base_model_ir.get_cell_nodes() + layerchoice_nodes = [node for node in cell_nodes if is_layerchoice_node(node)] + for lc_node in layerchoice_nodes: + cand_lat = {} + for candidate in lc_node.operation.parameters['candidates']: + node_graph = base_model_ir.graphs.get(candidate) + if node_graph is not None: + temp_ir_model._root_graph_name = node_graph.name + latency = self.latency_predictor.predict(temp_ir_model, model_type = 'nni-ir') + else: + _logger.warning(f"Could not found graph for layerchoice candidate {candidate}") + latency = 0 + cand_lat[candidate.split('_')[-1]] = float(latency) + latency_table[lc_node.operation.parameters['label']] = cand_lat + + # form the latency of the stationary block in the latency table + temp_ir_model._root_graph_name = base_model_ir._root_graph_name + temp_ir_model = flatten_model_graph_without_layerchoice(temp_ir_model) + latency = self.latency_predictor.predict(temp_ir_model, model_type = 'nni-ir') + latency_table['stationary_block'] = {'root': float(latency)} + + # save latency table + if dump_lat_table: + import os, yaml + os.makedirs(os.path.dirname(dump_lat_table), exist_ok=True) + with open(dump_lat_table, 'a') as fp: + yaml.dump([{ + "applied_hardware": self.predictor_name, + 'latency_table': latency_table + }], fp) + _logger.info("Latency lookup table form done") + + return latency_table + + def cal_expected_latency(self, current_architecture_prob): + lat = self.block_latency_table['stationary_block']['root'] + for module_name, probs in current_architecture_prob.items(): + assert len(probs) == len(self.block_latency_table[module_name]) + lat += torch.sum(torch.tensor([probs[i] * self.block_latency_table[module_name][str(i)] + for i in range(len(probs))])) + return lat + + def export_latency(self, current_architecture): + lat = self.block_latency_table['stationary_block']['root'] + for module_name, selected_module in current_architecture.items(): + lat += self.block_latency_table[module_name][str(selected_module)] + return lat + + +class ProxylessTrainer(BaseOneShotTrainer): + """ + Proxyless trainer. + + Parameters + ---------- + model : nn.Module + PyTorch model to be trained. + loss : callable + Receives logits and ground truth label, return a loss tensor. + metrics : callable + Receives logits and ground truth label, return a dict of metrics. + optimizer : Optimizer + The optimizer used for optimizing the model. + num_epochs : int + Number of epochs planned for training. + dataset : Dataset + Dataset for training. Will be split for training weights and architecture weights. + warmup_epochs : int + Number of epochs to warmup model parameters. + batch_size : int + Batch size. + workers : int + Workers for data loading. + device : torch.device + ``torch.device("cpu")`` or ``torch.device("cuda")``. + log_frequency : int + Step count per logging. + arc_learning_rate : float + Learning rate of architecture parameters. + grad_reg_loss_type: string + Regularization type to add hardware related loss, allowed types include + - ``"mul#log"``: ``regularized_loss = (torch.log(expected_latency) / math.log(self.ref_latency)) ** beta`` + - ``"add#linear"``: ``regularized_loss = reg_lambda * (expected_latency - self.ref_latency) / self.ref_latency`` + - None: do not apply loss regularization. + grad_reg_loss_params: dict + Regularization params, allowed params include + - ``"alpha"`` and ``"beta"`` is required when ``grad_reg_loss_type == "mul#log"`` + - ``"lambda"`` is required when ``grad_reg_loss_type == "add#linear"`` + applied_hardware: string + Applied hardware for to constraint the model's latency. Latency is predicted by Microsoft + nn-Meter (https://github.com/microsoft/nn-Meter). + dummy_input: tuple + The dummy input shape when applied to the target hardware. + ref_latency: float + Reference latency value in the applied hardware (ms). + """ + + def __init__(self, model, loss, metrics, optimizer, + num_epochs, dataset, warmup_epochs=0, + batch_size=64, workers=4, device=None, log_frequency=None, + arc_learning_rate=1.0E-3, + grad_reg_loss_type=None, grad_reg_loss_params=None, + applied_hardware=None, dummy_input=(1, 3, 224, 224), + ref_latency=65.0): + self.model = model + self.loss = loss + self.metrics = metrics + self.optimizer = optimizer + self.num_epochs = num_epochs + self.warmup_epochs = warmup_epochs + self.dataset = dataset + self.batch_size = batch_size + self.workers = workers + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device + self.log_frequency = log_frequency + + # latency predictor + if applied_hardware: + self.latency_estimator = HardwareLatencyEstimator(applied_hardware, self.model, dummy_input) + else: + self.latency_estimator = None + self.reg_loss_type = grad_reg_loss_type + self.reg_loss_params = {} if grad_reg_loss_params is None else grad_reg_loss_params + self.ref_latency = ref_latency + + self.model.to(self.device) + self.nas_modules = [] + replace_layer_choice(self.model, ProxylessLayerChoice, self.nas_modules) + replace_input_choice(self.model, ProxylessInputChoice, self.nas_modules) + for _, module in self.nas_modules: + module.to(self.device) + + self.optimizer = optimizer + # we do not support deduplicate control parameters with same label (like DARTS) yet. + self.ctrl_optim = torch.optim.Adam([m.alpha for _, m in self.nas_modules], arc_learning_rate, + weight_decay=0, betas=(0, 0.999), eps=1e-8) + self._init_dataloader() + + def _init_dataloader(self): + n_train = len(self.dataset) + split = n_train // 2 + indices = list(range(n_train)) + train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) + valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) + self.train_loader = torch.utils.data.DataLoader(self.dataset, + batch_size=self.batch_size, + sampler=train_sampler, + num_workers=self.workers) + self.valid_loader = torch.utils.data.DataLoader(self.dataset, + batch_size=self.batch_size, + sampler=valid_sampler, + num_workers=self.workers) + + def _train_one_epoch(self, epoch): + self.model.train() + meters = AverageMeterGroup() + for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(self.train_loader, self.valid_loader)): + trn_X, trn_y = to_device(trn_X, self.device), to_device(trn_y, self.device) + val_X, val_y = to_device(val_X, self.device), to_device(val_y, self.device) + + if epoch >= self.warmup_epochs: + # 1) train architecture parameters + for _, module in self.nas_modules: + module.resample() + self.ctrl_optim.zero_grad() + logits, loss = self._logits_and_loss_for_arch_update(val_X, val_y) + loss.backward() + for _, module in self.nas_modules: + module.finalize_grad() + self.ctrl_optim.step() + + # 2) train model parameters + for _, module in self.nas_modules: + module.resample() + self.optimizer.zero_grad() + logits, loss = self._logits_and_loss_for_weight_update(trn_X, trn_y) + loss.backward() + self.optimizer.step() + metrics = self.metrics(logits, trn_y) + metrics["loss"] = loss.item() + if self.latency_estimator: + metrics["latency"] = self._export_latency() + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + _logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.train_loader), meters) + + def _logits_and_loss_for_arch_update(self, X, y): + ''' return logits and loss for architecture parameter update ''' + logits = self.model(X) + ce_loss = self.loss(logits, y) + if not self.latency_estimator: + return logits, ce_loss + + current_architecture_prob = {} + for module_name, module in self.nas_modules: + probs = module.export_prob() + current_architecture_prob[module_name] = probs + expected_latency = self.latency_estimator.cal_expected_latency(current_architecture_prob) + + if self.reg_loss_type == 'mul#log': + import math + alpha = self.reg_loss_params.get('alpha', 1) + beta = self.reg_loss_params.get('beta', 0.6) + # noinspection PyUnresolvedReferences + reg_loss = (torch.log(expected_latency) / math.log(self.ref_latency)) ** beta + return logits, alpha * ce_loss * reg_loss + elif self.reg_loss_type == 'add#linear': + reg_lambda = self.reg_loss_params.get('lambda', 2e-1) + reg_loss = reg_lambda * (expected_latency - self.ref_latency) / self.ref_latency + return logits, ce_loss + reg_loss + elif self.reg_loss_type is None: + return logits, ce_loss + else: + raise ValueError(f'Do not support: {self.reg_loss_type}') + + def _logits_and_loss_for_weight_update(self, X, y): + ''' return logits and loss for weight parameter update ''' + logits = self.model(X) + loss = self.loss(logits, y) + return logits, loss + + def _export_latency(self): + current_architecture = {} + for module_name, module in self.nas_modules: + selected_module = module.export() + current_architecture[module_name] = selected_module + return self.latency_estimator.export_latency(current_architecture) + + def fit(self): + for i in range(self.num_epochs): + self._train_one_epoch(i) + + @torch.no_grad() + def export(self): + result = dict() + for name, module in self.nas_modules: + if name not in result: + result[name] = module.export() + return result diff --git a/nni/retiarii/oneshot/pytorch/random.py b/nni/retiarii/oneshot/pytorch/random.py new file mode 100644 index 0000000000000000000000000000000000000000..794639c68dcadd609799a0180bdea577c3ada072 --- /dev/null +++ b/nni/retiarii/oneshot/pytorch/random.py @@ -0,0 +1,203 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import random + +import torch +import torch.nn as nn + +from ..interface import BaseOneShotTrainer +from .utils import AverageMeterGroup, replace_layer_choice, replace_input_choice, to_device + + +_logger = logging.getLogger(__name__) + + +def _get_mask(sampled, total): + multihot = [i == sampled or (isinstance(sampled, list) and i in sampled) for i in range(total)] + return torch.tensor(multihot, dtype=torch.bool) # pylint: disable=not-callable + + +class PathSamplingLayerChoice(nn.Module): + """ + Mixed module, in which fprop is decided by exactly one or multiple (sampled) module. + If multiple module is selected, the result will be sumed and returned. + + Attributes + ---------- + sampled : int or list of int + Sampled module indices. + mask : tensor + A multi-hot bool 1D-tensor representing the sampled mask. + """ + + def __init__(self, layer_choice): + super(PathSamplingLayerChoice, self).__init__() + self.op_names = [] + for name, module in layer_choice.named_children(): + self.add_module(name, module) + self.op_names.append(name) + assert self.op_names, 'There has to be at least one op to choose from.' + self.sampled = None # sampled can be either a list of indices or an index + + def forward(self, *args, **kwargs): + assert self.sampled is not None, 'At least one path needs to be sampled before fprop.' + if isinstance(self.sampled, list): + return sum([getattr(self, self.op_names[i])(*args, **kwargs) for i in self.sampled]) # pylint: disable=not-an-iterable + else: + return getattr(self, self.op_names[self.sampled])(*args, **kwargs) # pylint: disable=invalid-sequence-index + + def __len__(self): + return len(self.op_names) + + @property + def mask(self): + return _get_mask(self.sampled, len(self)) + + +class PathSamplingInputChoice(nn.Module): + """ + Mixed input. Take a list of tensor as input, select some of them and return the sum. + + Attributes + ---------- + sampled : int or list of int + Sampled module indices. + mask : tensor + A multi-hot bool 1D-tensor representing the sampled mask. + """ + + def __init__(self, input_choice): + super(PathSamplingInputChoice, self).__init__() + self.n_candidates = input_choice.n_candidates + self.n_chosen = input_choice.n_chosen + self.sampled = None + + def forward(self, input_tensors): + if isinstance(self.sampled, list): + return sum([input_tensors[t] for t in self.sampled]) # pylint: disable=not-an-iterable + else: + return input_tensors[self.sampled] + + def __len__(self): + return self.n_candidates + + @property + def mask(self): + return _get_mask(self.sampled, len(self)) + + +class SinglePathTrainer(BaseOneShotTrainer): + """ + Single-path trainer. Samples a path every time and backpropagates on that path. + + Parameters + ---------- + model : nn.Module + Model with mutables. + loss : callable + Called with logits and targets. Returns a loss tensor. + metrics : callable + Returns a dict that maps metrics keys to metrics data. + optimizer : Optimizer + Optimizer that optimizes the model. + num_epochs : int + Number of epochs of training. + dataset_train : Dataset + Dataset of training. + dataset_valid : Dataset + Dataset of validation. + batch_size : int + Batch size. + workers: int + Number of threads for data preprocessing. Not used for this trainer. Maybe removed in future. + device : torch.device + Device object. Either ``torch.device("cuda")`` or ``torch.device("cpu")``. When ``None``, trainer will + automatic detects GPU and selects GPU first. + log_frequency : int + Number of mini-batches to log metrics. + """ + + def __init__(self, model, loss, metrics, + optimizer, num_epochs, dataset_train, dataset_valid, + batch_size=64, workers=4, device=None, log_frequency=None): + self.model = model + self.loss = loss + self.metrics = metrics + self.optimizer = optimizer + self.num_epochs = num_epochs + self.dataset_train = dataset_train + self.dataset_valid = dataset_valid + self.batch_size = batch_size + self.workers = workers + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device + self.log_frequency = log_frequency + self.model.to(self.device) + + self.nas_modules = [] + replace_layer_choice(self.model, PathSamplingLayerChoice, self.nas_modules) + replace_input_choice(self.model, PathSamplingInputChoice, self.nas_modules) + for _, module in self.nas_modules: + module.to(self.device) + + self.train_loader = torch.utils.data.DataLoader(self.dataset_train, + batch_size=batch_size, + num_workers=workers) + self.valid_loader = torch.utils.data.DataLoader(self.dataset_valid, + batch_size=batch_size, + num_workers=workers) + + def _resample(self): + result = {} + for name, module in self.nas_modules: + if name not in result: + result[name] = random.randint(0, len(module) - 1) + module.sampled = result[name] + return result + + def _train_one_epoch(self, epoch): + self.model.train() + meters = AverageMeterGroup() + for step, (x, y) in enumerate(self.train_loader): + x, y = to_device(x, self.device), to_device(y, self.device) + self.optimizer.zero_grad() + self._resample() + logits = self.model(x) + loss = self.loss(logits, y) + loss.backward() + self.optimizer.step() + + metrics = self.metrics(logits, y) + metrics["loss"] = loss.item() + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + _logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.train_loader), meters) + + def _validate_one_epoch(self, epoch): + self.model.eval() + meters = AverageMeterGroup() + with torch.no_grad(): + for step, (x, y) in enumerate(self.valid_loader): + x, y = to_device(x, self.device), to_device(y, self.device) + self._resample() + logits = self.model(x) + loss = self.loss(logits, y) + metrics = self.metrics(logits, y) + metrics["loss"] = loss.item() + meters.update(metrics) + if self.log_frequency is not None and step % self.log_frequency == 0: + _logger.info("Epoch [%s/%s] Validation Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.valid_loader), meters) + + def fit(self): + for i in range(self.num_epochs): + self._train_one_epoch(i) + self._validate_one_epoch(i) + + def export(self): + return self._resample() + + +RandomTrainer = SinglePathTrainer diff --git a/nni/retiarii/oneshot/pytorch/utils.py b/nni/retiarii/oneshot/pytorch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..61a4f916ae626cfd5409064321ccd6915e2868cc --- /dev/null +++ b/nni/retiarii/oneshot/pytorch/utils.py @@ -0,0 +1,182 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from collections import OrderedDict + +import numpy as np +import torch +import nni.retiarii.nn.pytorch as nn +from nni.nas.pytorch.mutables import InputChoice, LayerChoice + +_logger = logging.getLogger(__name__) + + +def to_device(obj, device): + """ + Move a tensor, tuple, list, or dict onto device. + """ + if torch.is_tensor(obj): + return obj.to(device) + if isinstance(obj, tuple): + return tuple(to_device(t, device) for t in obj) + if isinstance(obj, list): + return [to_device(t, device) for t in obj] + if isinstance(obj, dict): + return {k: to_device(v, device) for k, v in obj.items()} + if isinstance(obj, (int, float, str)): + return obj + raise ValueError("'%s' has unsupported type '%s'" % (obj, type(obj))) + + +def to_list(arr): + if torch.is_tensor(arr): + return arr.cpu().numpy().tolist() + if isinstance(arr, np.ndarray): + return arr.tolist() + if isinstance(arr, (list, tuple)): + return list(arr) + return arr + + +class AverageMeterGroup: + """ + Average meter group for multiple average meters. + """ + + def __init__(self): + self.meters = OrderedDict() + + def update(self, data): + """ + Update the meter group with a dict of metrics. + Non-exist average meters will be automatically created. + """ + for k, v in data.items(): + if k not in self.meters: + self.meters[k] = AverageMeter(k, ":4f") + self.meters[k].update(v) + + def __getattr__(self, item): + return self.meters[item] + + def __getitem__(self, item): + return self.meters[item] + + def __str__(self): + return " ".join(str(v) for v in self.meters.values()) + + def summary(self): + """ + Return a summary string of group data. + """ + return " ".join(v.summary() for v in self.meters.values()) + + +class AverageMeter: + """ + Computes and stores the average and current value. + + Parameters + ---------- + name : str + Name to display. + fmt : str + Format string to print the values. + """ + + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + """ + Reset the meter. + """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ + Update with value and weight. + + Parameters + ---------- + val : float or int + The new value to be accounted in. + n : int + The weight of the new value. + """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + def summary(self): + fmtstr = '{name}: {avg' + self.fmt + '}' + return fmtstr.format(**self.__dict__) + + +def _replace_module_with_type(root_module, init_fn, type_name, modules): + if modules is None: + modules = [] + + def apply(m): + for name, child in m.named_children(): + if isinstance(child, type_name): + setattr(m, name, init_fn(child)) + modules.append((child.key, getattr(m, name))) + else: + apply(child) + + apply(root_module) + return modules + + +def replace_layer_choice(root_module, init_fn, modules=None): + """ + Replace layer choice modules with modules that are initiated with init_fn. + + Parameters + ---------- + root_module : nn.Module + Root module to traverse. + init_fn : Callable + Initializing function. + modules : dict, optional + Update the replaced modules into the dict and check duplicate if provided. + + Returns + ------- + List[Tuple[str, nn.Module]] + A list from layer choice keys (names) and replaced modules. + """ + return _replace_module_with_type(root_module, init_fn, (LayerChoice, nn.LayerChoice), modules) + + +def replace_input_choice(root_module, init_fn, modules=None): + """ + Replace input choice modules with modules that are initiated with init_fn. + + Parameters + ---------- + root_module : nn.Module + Root module to traverse. + init_fn : Callable + Initializing function. + modules : dict, optional + Update the replaced modules into the dict and check duplicate if provided. + + Returns + ------- + List[Tuple[str, nn.Module]] + A list from layer choice keys (names) and replaced modules. + """ + return _replace_module_with_type(root_module, init_fn, (InputChoice, nn.InputChoice), modules) diff --git a/nni/retiarii/operation.py b/nni/retiarii/operation.py new file mode 100644 index 0000000000000000000000000000000000000000..7c2af1e3a422268000844a9d5723e26b3cad9213 --- /dev/null +++ b/nni/retiarii/operation.py @@ -0,0 +1,242 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import (Any, Dict, List) + +from . import debug_configs + +__all__ = ['Operation', 'Cell'] + + +def _convert_name(name: str) -> str: + """ + Convert the names using separator '.' to valid variable name in code + """ + return name.replace('.', '__') + + +class Operation: + """ + Calculation logic of a graph node. + + The constructor is private. Use `Operation.new()` to create operation object. + + `Operation` is a naive record. + Do not "mutate" its attributes or store information relate to specific node. + All complex logic should be implemented in `Node` class. + + Attributes + ---------- + type + Operation type name (e.g. Conv2D). + If it starts with underscore, the "operation" is a special one (e.g. subgraph, input/output). + parameters + Arbitrary key-value parameters (e.g. kernel_size). + """ + + def __init__(self, type_name: str, parameters: Dict[str, Any] = {}, _internal: bool = False, attributes: Dict[str, Any] = {}): + assert _internal, '`Operation()` is private, use `Operation.new()` instead' + self.type: str = type_name + self.parameters: Dict[str, Any] = parameters + self.attributes: Dict[str, Any] = attributes + + def to_init_code(self, field: str) -> str: + raise NotImplementedError() + + def to_forward_code(self, field: str, output: str, inputs: List[str]) -> str: + raise NotImplementedError() + + def _to_class_name(self) -> str: + raise NotImplementedError() + + def __bool__(self) -> bool: + return True + + @staticmethod + def new(type_name: str, parameters: Dict[str, Any] = None, cell_name: str = None, + attributes: Dict[str, Any] = None) -> 'Operation': + parameters = parameters or {} + attributes = attributes or {} + if type_name == '_cell': + # NOTE: cell_name is the same as its Node's name, when the cell is wrapped within the node + return Cell(cell_name, parameters) + else: + if debug_configs.framework.lower() in ('torch', 'pytorch'): + from .operation_def import torch_op_def # pylint: disable=unused-import + cls = PyTorchOperation._find_subclass(type_name) + elif debug_configs.framework.lower() in ('tf', 'tensorflow'): + from .operation_def import tf_op_def # pylint: disable=unused-import + cls = TensorFlowOperation._find_subclass(type_name) + else: + raise ValueError(f'Unsupported framework: {debug_configs.framework}') + return cls(type_name, parameters, _internal=True, attributes=attributes) + + @classmethod + def _find_subclass(cls, subclass_name): + for subclass in cls.__subclasses__(): + if subclass.__name__ == subclass_name: + return subclass + return cls + + def __repr__(self): + type_name = type(self).__name__ + args = [f'{key}={repr(value)}' for key, value in self.parameters.items()] + if type_name != self.type: + args = [f'type="{self.type}"'] + args + return f'{type_name}({", ".join(args)})' + + def __eq__(self, other): + return type(other) is type(self) and other.type == self.type and other.parameters == self.parameters + + +class PyTorchOperation(Operation): + @classmethod + def _find_subclass(cls, subclass_name): + if cls.to_class_name(subclass_name) is not None: + subclass_name = 'ModuleOperator' + if cls.is_functional(subclass_name): + subclass_name = 'FunctionalOperator' + for subclass in cls.__subclasses__(): + if hasattr(subclass, '_ori_type_name') and \ + subclass_name in subclass._ori_type_name: + return subclass + for subclass in cls.__subclasses__(): + if hasattr(subclass, '_artificial_op_name') and \ + subclass_name in subclass._artificial_op_name: + return subclass + return cls + + @classmethod + def to_class_name(cls, type_name) -> str: + if type_name.startswith('__torch__.'): + return type_name[len('__torch__.'):] + elif type_name.startswith('__mutated__.'): + return type_name[len('__mutated__.'):] + else: + return None + + @classmethod + def is_functional(cls, type_name) -> bool: + return type_name.startswith('Function.') + + def _to_class_name(self) -> str: + if self.type.startswith('__torch__.'): + return self.type[len('__torch__.'):] + elif self.type.startswith('__mutated__.'): + return self.type[len('__mutated__.'):] + else: + return None + + def get_import_pkg(self) -> str: + if self.type.startswith('__torch__.'): + return self.type[len('__torch__.'):].split('.')[0] + elif self.type.startswith('__mutated__.'): + return self.type[len('__mutated__.'):].split('.')[0] + else: + return None + + def to_init_code(self, field: str) -> str: + if self._to_class_name() is not None: + assert 'positional_args' not in self.parameters + kw_params = ', '.join(f'{key}={repr(value)}' for key, value in self.parameters.items()) + return f'self.{field} = {self._to_class_name()}({kw_params})' + return None + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + """ + Parameters + ---------- + field : str + the name of member submodule + output : str + the output name (lvalue) of this line of code + inputs : List[str] + variables used in this line of code + inputs_value : List[Any] + some variables are actually constant, their real values are recorded in ```inputs_value```. + if not constant, we simply put None at the corresponding index + + Returns + ------- + str + generated code line + """ + if self.type == 'aten::slice': + raise RuntimeError('not supposed to have aten::slice operation') + else: + raise RuntimeError(f'unsupported operation type: {self.type} ? {self._to_class_name()}') + + +class TensorFlowOperation(Operation): + def _to_class_name(self) -> str: + return 'K.layers.' + self.type + + +class Cell(PyTorchOperation): + """ + TODO: this is pytorch cell + + An operation reference to a subgraph. + + Example code: + ``` + def __init__(...): + ... + self.cell = CustomCell(...) + self.relu = K.layers.ReLU() + ... + + def forward(...): + ... + x = self.cell(x) + ... + ``` + + In above example, node `self.cell`'s operation is `Cell(cell_name='CustomCell')`. + For comparison, `self.relu`'s operation is `Operation(type='ReLU')`. + + TODO: parameters of subgraph (see `Node` class) + + Attributes + ---------- + type + Always "_cell". + parameters + A dict with only one item; the key is "cell" and the value is cell's name. + framework + No real usage. Exists for compatibility with base class. + """ + + def __init__(self, cell_name: str, parameters: Dict[str, Any] = None, attributes: Dict[str, Any] = None): + self.type = '_cell' + self.cell_name = cell_name + self.parameters = parameters or {} + self.attributes = attributes or {} + + def _to_class_name(self): + # TODO: ugly, think about how to refactor this part + return _convert_name(self.cell_name) + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = self.{field}({", ".join(inputs)})' + +class _IOPseudoOperation(Operation): + """ + This is the pseudo operation used by I/O nodes. + The benefit is that users no longer need to verify `Node.operation is not None`, + especially in static type checking. + """ + + def __init__(self, type_name: str, io_names: List = None): + assert type_name.startswith('_') + super(_IOPseudoOperation, self).__init__(type_name, {}, True) + self.io_names = io_names + + def to_init_code(self, field: str) -> str: + raise ValueError(f'Cannot generate code for pseudo operation "{self.type}"') + + def to_forward_code(self, field: str, output: str, inputs: List[str]) -> str: + raise ValueError(f'Cannot generate code for pseudo operation "{self.type}"') + + def __bool__(self) -> bool: + return False diff --git a/nni/retiarii/operation_def/__init__.py b/nni/retiarii/operation_def/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0b9575cc55a0a035af3de2c1608b60bc7631a676 --- /dev/null +++ b/nni/retiarii/operation_def/__init__.py @@ -0,0 +1,7 @@ +""" +Definition of operation types. + +These are currently examples for overriding codegen. + +Feel free to propose better package name or hierarchy. +""" diff --git a/nni/retiarii/operation_def/tf_op_def.py b/nni/retiarii/operation_def/tf_op_def.py new file mode 100644 index 0000000000000000000000000000000000000000..d030364a335206aa04cba99368c246341024f0bf --- /dev/null +++ b/nni/retiarii/operation_def/tf_op_def.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from ..operation import TensorFlowOperation + + +class Conv2D(TensorFlowOperation): + def __init__(self, type_name, parameters, _internal, attributes=None): + if 'padding' not in parameters: + parameters['padding'] = 'same' + super().__init__(type_name, parameters, _internal) diff --git a/nni/retiarii/operation_def/torch_op_def.py b/nni/retiarii/operation_def/torch_op_def.py new file mode 100644 index 0000000000000000000000000000000000000000..d47949aa67cdefd93e11a5e8c7c476ee5c6c77f1 --- /dev/null +++ b/nni/retiarii/operation_def/torch_op_def.py @@ -0,0 +1,537 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import (Any, Dict, List) + +import torch + +from ..operation import PyTorchOperation + + +mem_format = [ + 'torch.contiguous_format', # 0 + 'torch.preserve_format', # 1 + 'torch.channels_last', # 2 +] + +# this snippet is copied from torch/onnx/symbolic_helper.py, +# the original definition is in c10/core/ScalarType.h +# This indicates each scalar type's corresponding +scalar_type_to_pytorch_type = [ + 'torch.uint8', # 0 + 'torch.int8', # 1 + 'torch.short', # 2 + 'torch.int', # 3 + 'torch.int64', # 4 + 'torch.half', # 5 + 'torch.float', # 6 + 'torch.double', # 7 + 'torch.complex32', # 8 + 'torch.complex64', # 9 + 'torch.complex128', # 10 + 'torch.bool', # 11 +] + + +class NoOpIdentity(PyTorchOperation): + """ + this operator type is added by us + """ + _ori_type_name = ['noop_identity'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = {", ".join(inputs)}' + + +class ModuleOperator(PyTorchOperation): + _ori_type_name = ['ModuleOperator', 'shared'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = self.{field}({", ".join(inputs)})' + + +class FunctionalOperator(PyTorchOperation): + _ori_type_name = ['FunctionalOperator'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + func_name = self.type[len('Function.'):] + if not hasattr(torch.nn.functional, func_name): + raise RuntimeError('For now, we only support calling independent functions from `torch.nn.functional`, ' + f'{func_name} is not in it.') + return f'{output} = F.{func_name}({", ".join(inputs)})' + + +class PrimConstant(PyTorchOperation): + _ori_type_name = ['prim::Constant'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + # TODO: refactor this part, maybe we can remove the code gen of prim::Constant + # TODO: deal with all the types + if self.parameters['type'] in ['None', 'NoneType']: + return f'{output} = None' + elif self.parameters['type'] in ('int', 'float', 'bool', 'int[]'): # 'Long()' ??? + return f'{output} = {self.parameters["value"]}' + elif self.parameters['type'] == 'str': + str_val = self.parameters["value"] + return f'{output} = "{str_val}"' + elif self.parameters['type'] == 'Device': + value = self.parameters['value'] + return f'{output} = torch.device("{value}")' + elif self.parameters['type'] in ('dict', 'list', 'tuple'): + # TODO: prim::TupleIndex is not supported yet + return f'{output} = {repr(self.parameters["value"])}' + else: + raise RuntimeError(f'unsupported type of prim::Constant: {self.parameters["type"]}') + + +class PrimListConstruct(PyTorchOperation): + _ori_type_name = ['prim::ListConstruct'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = [{", ".join(inputs)}]' + + +class PrimListUnpack(PyTorchOperation): + _ori_type_name = ['prim::ListUnpack'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = {inputs[0]}' + + +class PrimTupleConstruct(PyTorchOperation): + _ori_type_name = ['prim::TupleConstruct'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = ({", ".join(inputs)})' + + +class PrimTupleUnpack(PyTorchOperation): + _ori_type_name = ['prim::TupleUnpack'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + # have single output here, because the following code uses index to access the unpacked values + assert len(inputs) == 1 + return f'{output} = {inputs[0]}' + + +class PrimGetAttr(PyTorchOperation): + _ori_type_name = ['prim::GetAttr'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + if self.parameters['value'] is not None: + return f"{output} = {self.parameters['value']}" + else: + return f"{output} = {self.parameters['input']}.{self.parameters['name']}" + + +class SimpleMember(PyTorchOperation): + _ori_type_name = ['prim::is_cuda', 'prim::data'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + member_name = self.type.split('::')[-1] + return f'{output} = {inputs[0]}.{member_name}' + + +class AtenContiguous(PyTorchOperation): + _ori_type_name = ['aten::contiguous'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + # defined in pytorch/c10/core/MemoryFormat.h + assert inputs_value[1] in [0, 1, 2] + return f'{output} = {inputs[0]}.contiguous(memory_format={mem_format[inputs_value[1]]})' + + +class AtenGetitem(PyTorchOperation): + _ori_type_name = ['aten::__getitem__'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + assert len(inputs) == 2 + return f'{output} = {inputs[0]}[{inputs[1]}]' + + +class AtenAppend(PyTorchOperation): + _ori_type_name = ['aten::append'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + assert len(inputs) == 2 + return f'_, {output} = {inputs[0]}.append({inputs[1]}), {inputs[0]}' + + +class MergedSlice(PyTorchOperation): + _ori_type_name = ['MergedSlice'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + if (len(inputs) - 1) % 4 == 0: + slices = [] + dim = int((len(inputs) - 1) / 4) + for i in range(dim): + slices.append(f'{inputs[i*4+2]}:{inputs[i*4+3]}:{inputs[i*4+4]}') + slice_str = ','.join(slices) + return f'{output} = {inputs[0]}[{slice_str}]' + elif len(inputs) == 4: + # this case is for simple list + return f'{output} = {inputs[0]}[{inputs[1]}:{inputs[2]}:{inputs[3]}]' + else: + raise RuntimeError('Unsupported slice pattern') + +# the following Aten classes means these aten ops are not in torch.Tensor + + +class AtenBool(PyTorchOperation): + _ori_type_name = ['aten::Bool'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = bool({inputs[0]})' + + +class AtenNot(PyTorchOperation): + _ori_type_name = ['aten::__not__'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = not {inputs[0]}' + + +class AtenCat(PyTorchOperation): + _ori_type_name = ['aten::cat'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + assert len(inputs) == 2 + return f'{output} = torch.cat({inputs[0]}, dim={inputs[1]})' + +# ==================================== + + +class AtenTensors(PyTorchOperation): + _ori_type_name = ['aten::full', 'aten::full_like', 'aten::empty_like', + 'aten::ones_like', 'aten::zeros_like', 'aten::rand', + 'aten::randn', 'aten::scalar_tensor', 'aten::new_full', + 'aten::new_empty', 'aten::new_zeros', 'aten::arange', + 'aten::tensor', 'aten::ones', 'aten::zeros', 'aten::as_tensor'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + schemas = torch._C._jit_get_schemas_for_operator(self.type) + # match number of inputs + overloaded_defs = [len(s.arguments) for s in schemas] + matched = overloaded_defs.index(len(inputs)) + args_list = [] + for idx, arg in enumerate(schemas[matched].arguments): + if arg.name == 'dtype': + arg_str = f'dtype={scalar_type_to_pytorch_type[inputs_value[idx]]}' if inputs_value[idx] is not None else '' + elif arg.name == 'layout': + if inputs_value[idx] is not None: + arg_str = f'layout=torch.strided' + print('Warning: only support `torch.strided` for now!!!') + else: + arg_str = '' + elif arg.name == 'device': + arg_str = f'device=torch.device({inputs[idx]})' if inputs_value[idx] is not None else '' + elif arg.name == 'memory_format': + arg_str = f'memory_format={mem_format[inputs_value[idx]]}' if inputs_value[idx] is not None else '' + elif arg.name == 'pin_memory': + # TODO: deal with this argument + continue + elif arg.name == 'requires_grad': + arg_str = f'requires_grad={inputs[idx]}' if inputs_value[idx] else '' + elif str(arg.type).startswith('Optional['): + arg_str = f'{arg.name}={inputs[idx]}' + else: + arg_str = f'{inputs[idx]}' + if arg_str != '': + args_list.append(arg_str) + op_name = self.type.split('::')[-1] + if hasattr(torch, op_name): + return f'{output} = torch.{op_name}({", ".join(args_list)})' + else: + return f'{output} = {inputs[0]}.{op_name}({", ".join(args_list[1:])})' + +# ==================================== + + +class AtenFloordiv(PyTorchOperation): + _ori_type_name = ['aten::floordiv'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = {inputs[0]} // {inputs[1]}' + + +class AtenMul(PyTorchOperation): + _ori_type_name = ['aten::mul'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = {inputs[0]} * {inputs[1]}' + + +class AtenLen(PyTorchOperation): + _ori_type_name = ['aten::len'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = len({inputs[0]})' + + +class AtenIntImplicit(PyTorchOperation): + _ori_type_name = ['aten::IntImplicit', 'aten::Float', 'aten::Int', 'aten::ScalarImplicit'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + if self.type.endswith('Implicit'): + return f'{output} = {inputs[0]}' + elif self.type == 'aten::Int': + return f'{output} = int({inputs[0]})' + elif self.type == 'aten::Float': + return f'{output} = float({inputs[0]})' + + +class AtenIndex(PyTorchOperation): + _ori_type_name = ['aten::index'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = {inputs[0]}[{inputs[1]}]' + + +ManuallyChooseDef = { + 'aten::flatten': [('start_dim', 'int', '0'), ('end_dim', 'int', '-1')], + 'aten::split': [('split_size', 'int', 'None'), ('dim', 'int', '0')], + # in v1.9 dtype is supported as input argument for view, but torch script does not support it + 'aten::view': [('size', 'List[int]', 'None')], + # NOTE: dim supports different types: List[int], List[str], Optional[List[int]], now we only support the first two, refactor needed + # torch.std(input, dim, unbiased, keepdim=False, *, out=None) Tensor + # torch.std(input, unbiased) Tensor + 'aten::std': [('dim', 'List[int]', 'None'), ('unbiased', 'bool', 'True'), ('keepdim', 'bool', 'False')] +} + +TensorOpExceptions = { + 'aten::sub': lambda output, inputs: f'{output} = {inputs[0]} - {inputs[1]}', # example: x.size(1) - 3 + 'aten::add': lambda output, inputs: f'{output} = {inputs[0]} + {inputs[1]}' # example: input.shape[0] + 5 +} + +TorchOpExclude = ['aten::Size', 'aten::as_tensor', 'aten::device', + 'aten::manual_seed', 'aten::quantized_gru', 'aten::quantized_lstm', + 'aten::save', 'aten::tensor', 'aten::wait' + ] + + +def _hidden(name): + return name.startswith('_') and not name.startswith('__') + + +def _emit_args(args): + # filter out the `out` argument here + return [(arg.name, str(arg.type), str(arg.default_value)) for arg in args] # if arg.name != 'out' + + +def _get_tensor_ops(): + def is_tensor_method(schema): + if len(schema.arguments) == 0: + return False + self = schema.arguments[0] + if self.name != 'self': + return False + if not self.type.isSubtypeOf(torch._C.TensorType.get()): + return False + return True + + op_args = {} + # discover methods + for elem in dir(torch.Tensor): + if not _hidden(elem): + schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem) + for schema in schemas: + if is_tensor_method(schema): + op_name = 'aten::' + elem + args = _emit_args(schema.arguments[1:]) + if op_name in op_args: + op_args[op_name].append(args) + else: + op_args[op_name] = [args] + + return op_args.keys(), op_args + + +def _get_torch_ops(): + torch_op_args = {} + for mod in torch.jit._builtins._modules_containing_builtins: + name = mod.__name__ + if name == 'torch._C._nn': + continue + # only process 'torch.XXX' + for elem in dir(mod): + builtin = torch.jit._builtins._find_builtin(getattr(mod, elem)) + if builtin is not None: + schemas = torch._C._jit_get_schemas_for_operator(builtin) + for schema in schemas: + # remove _tan but not __and__ + if not _hidden(elem): + op_name = 'aten::' + elem + if len(schema.arguments) > 0 and schema.arguments[0].name == 'self': + continue + args = _emit_args(schema.arguments) + if op_name in torch_op_args: + torch_op_args[op_name].append(args) + else: + torch_op_args[op_name] = [args] + + return torch_op_args.keys(), torch_op_args + + +def _get_torch_ops_exclude_tensor_ops(): + tensor_op_names, _ = _get_tensor_ops() + torch_op_names, torch_ops = _get_torch_ops() + + torch_exclude_ops = {} + for name in torch_op_names: + if name not in tensor_op_names: + if name not in TorchOpExclude: + # exclude the ops that are not in + # https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/native_functions.yaml + torch_exclude_ops[name] = torch_ops[name] + + return torch_exclude_ops.keys(), torch_exclude_ops + + +class TensorOps(PyTorchOperation): + """ + corresponding to _get_tensor_ops in torch.jit.supported_ops + """ + _ori_type_name, _op_args = _get_tensor_ops() + + comparison_ops = {'aten::eq': '==', 'aten::ne': '!=', 'aten::le': '<=', 'aten::ge': '>=', 'aten::lt': '<', 'aten::gt': '>'} + + @staticmethod + def _get_matched_args(_type, inputs): + def has_same_arg_name(matched): + concated_names = [] + for i, each in enumerate(matched): + name = ','.join([arg[0] for arg in each]) + concated_names.append(name) + for i in range(len(concated_names) - 1): + if concated_names[i] != concated_names[i + 1]: + return False + return True + + overloaded_defs = TensorOps._op_args[_type] + matched = [] + for each in overloaded_defs: + # plus 1 because we skip the first argument when generating tensor op def + if len(each) + 1 == len(inputs): + matched.append(each) + if len(matched) == 1: + return matched[0] + elif len(matched) > 1: + # TODO: match with arg's type. manually choose for now + if has_same_arg_name(matched): + # return any one is okay + return matched[0] + elif _type in ManuallyChooseDef: + return ManuallyChooseDef[_type] + else: + raise RuntimeError(f'tensor op type {_type} has more than one matched: {matched}') + else: + if _type in TensorOpExceptions: + return None + raise RuntimeError(f'tensor op type {_type} has no matched') + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + # TODO: deal with conditional ops + if self.type in TensorOps.comparison_ops: + return f'{output} = ({inputs[0]} {TensorOps.comparison_ops[self.type]} {inputs[1]})' + matched_args = TensorOps._get_matched_args(self.type, inputs) + if matched_args is None: + return TensorOpExceptions[self.type](output, inputs) + op_name = self.type.split('::')[-1] + args_str = ', '.join([f'{name}={inputs[i+1]}' for i, (name, t, default) in enumerate(matched_args)]) + return f'{output} = {inputs[0]}.{op_name}({args_str})' + + +class TorchOps(PyTorchOperation): + """ + corresponding to _get_nn_functional_ops in torch.jit.supported_ops + """ + _ori_type_name, _op_args = _get_torch_ops_exclude_tensor_ops() + # add 'aten::pixel_shuffle' + _op_args['aten::pixel_shuffle'] = [[('input', 'Tensor', 'None'), ('upscale_factor', 'Optional[int]', 'None')]] + _ori_type_name = _op_args.keys() + + @staticmethod + def _get_matched_args(_type, inputs): + def has_same_arg_name(matched): + concated_names = [] + for i, each in enumerate(matched): + name = ','.join([arg[0] for arg in each]) + concated_names.append(name) + for i in range(len(concated_names) - 1): + if concated_names[i] != concated_names[i + 1]: + return False + return True + + overloaded_defs = TorchOps._op_args[_type] + matched = [] + for each in overloaded_defs: + if len(each) == len(inputs): + matched.append(each) + if len(matched) == 1: + return matched[0] + elif len(matched) > 1: + # TODO: match with arg's type. manually choose for now + if has_same_arg_name(matched): + # return any one is okay + return matched[0] + else: + raise RuntimeError(f'torch op type {_type} has more than one matched: {matched}') + else: + raise RuntimeError(f'torch op type {_type} has no matched') + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + matched_args = TorchOps._get_matched_args(self.type, inputs) + op_name = self.type.split('::')[-1] + args_str = ', '.join([f'{name}={inputs[i]}' if t.startswith('Optional[') else f'{inputs[i]}' + for i, (name, t, default) in enumerate(matched_args)]) + return f'{output} = torch.{op_name}({args_str})' + + +class AtenAvgpool2d(PyTorchOperation): + # NOTE: it is not included in the above aten ops for unkown reason + _ori_type_name = ['aten::avg_pool2d'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = F.avg_pool2d({", ".join(inputs)})' + + +class ToDevice(PyTorchOperation): + _artificial_op_name = "ToDevice" + + def __init__(self, type_name: str, parameters: Dict[str, Any], _internal: bool = False, + attributes: Dict[str, Any] = None): + self.type = "ToDevice" + self.device = parameters['device'] + self.overridden_device_repr = None + self.src = parameters['src'] + self.dst = parameters['dst'] + + def override_device_repr(self, device_repr): + # CUDA GPUDevice may remap GPU physical ID to CUDA ID. The device repr is different from GPUDevice.device_repr() + # override_device_repr will be called in pytorch.graph_to_pytorch_model to replace device_repr with the correct + # CUDA ID, e.g., when a job uses Physical GPU-1,2, its CUDA ID should be "cuda:0" and "cuda:1". + # self.device.device_repr() would return "cuda:1" and "cuda:2", but override_device_repr should be "cuda:0" and + # "cuda:1" + self.overridden_device_repr = device_repr + + def __repr__(self): + if self.overridden_device_repr is None: + return f'to("{self.device.device_repr()}")' + else: + return f'to("{self.overridden_device_repr}")' + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any]) -> str: + if self.overridden_device_repr is None: + forward_code = f'{output} = {inputs[0]}.to("{self.device.device_repr()}")' + else: + forward_code = f'{output} = {inputs[0]}.to("{self.overridden_device_repr}")' + return forward_code + + +class AtenDet(PyTorchOperation): + # for torch 1.9 + # NOTE: it is not included in the above aten ops, maybe because torch.det is alias for torch.linalg.det + _ori_type_name = ['aten::linalg_det'] + + def to_forward_code(self, field: str, output: str, inputs: List[str], inputs_value: List[Any] = None) -> str: + return f'{output} = torch.det({inputs[0]})' diff --git a/nni/retiarii/serializer.py b/nni/retiarii/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..933475628dac28a1fcffc3460471891f3e17718a --- /dev/null +++ b/nni/retiarii/serializer.py @@ -0,0 +1,138 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import inspect +import warnings +from typing import Any, TypeVar, Union + +from nni.common.serializer import Traceable, is_traceable, trace, _copy_class_wrapper_attributes +from .utils import ModelNamespace + +__all__ = ['get_init_parameters_or_fail', 'serialize', 'serialize_cls', 'basic_unit', 'model_wrapper', + 'is_basic_unit', 'is_model_wrapped'] + +T = TypeVar('T') + + +def get_init_parameters_or_fail(obj: Any): + if is_traceable(obj): + return obj.trace_kwargs + raise ValueError(f'Object {obj} needs to be serializable but `trace_kwargs` is not available. ' + 'If it is a built-in module (like Conv2d), please import it from retiarii.nn. ' + 'If it is a customized module, please to decorate it with @basic_unit. ' + 'For other complex objects (e.g., trainer, optimizer, dataset, dataloader), ' + 'try to use @nni.trace.') + + +def serialize(cls, *args, **kwargs): + """ + To create an serializable instance inline without decorator. For example, + + .. code-block:: python + + self.op = serialize(MyCustomOp, hidden_units=128) + """ + warnings.warn('nni.retiarii.serialize is deprecated and will be removed in future release. ' + + 'Try to use nni.trace, e.g., nni.trace(torch.optim.Adam)(learning_rate=1e-4) instead.', + category=DeprecationWarning) + return trace(cls)(*args, **kwargs) + + +def serialize_cls(cls): + """ + To create an serializable class. + """ + warnings.warn('nni.retiarii.serialize is deprecated and will be removed in future release. ' + + 'Try to use nni.trace instead.', category=DeprecationWarning) + return trace(cls) + + +def basic_unit(cls: T, basic_unit_tag: bool = True) -> Union[T, Traceable]: + """ + To wrap a module as a basic unit, is to make it a primitive and stop the engine from digging deeper into it. + + ``basic_unit_tag`` is true by default. If set to false, it will not be explicitly mark as a basic unit, and + graph parser will continue to parse. Currently, this is to handle a special case in ``nn.Sequential``. + + Although ``basic_unit`` calls ``trace`` in its implementation, it is not for serialization. Rather, it is meant + to capture the initialization arguments for mutation. Also, graph execution engine will stop digging into the inner + modules when it reaches a module that is decorated with ``basic_unit``. + + .. code-block:: python + + @basic_unit + class PrimitiveOp(nn.Module): + ... + """ + _check_wrapped(cls) + + import torch.nn as nn + assert issubclass(cls, nn.Module), 'When using @basic_unit, the class must be a subclass of nn.Module.' + + cls = trace(cls) + cls._nni_basic_unit = basic_unit_tag + + # HACK: for torch script + # https://github.com/pytorch/pytorch/pull/45261 + # https://github.com/pytorch/pytorch/issues/54688 + # I'm not sure whether there will be potential issues + import torch + cls._get_nni_attr = torch.jit.ignore(cls._get_nni_attr) + cls.trace_symbol = torch.jit.unused(cls.trace_symbol) + cls.trace_args = torch.jit.unused(cls.trace_args) + cls.trace_kwargs = torch.jit.unused(cls.trace_kwargs) + + return cls + + +def model_wrapper(cls: T) -> Union[T, Traceable]: + """ + Wrap the base model (search space). For example, + + .. code-block:: python + + @model_wrapper + class MyModel(nn.Module): + ... + + The wrapper serves two purposes: + + 1. Capture the init parameters of python class so that it can be re-instantiated in another process. + 2. Reset uid in namespace so that the auto label counting in each model stably starts from zero. + + Currently, NNI might not complain in simple cases where ``@model_wrapper`` is actually not needed. + But in future, we might enforce ``@model_wrapper`` to be required for base model. + """ + _check_wrapped(cls) + + import torch.nn as nn + assert issubclass(cls, nn.Module) + + wrapper = trace(cls) + + class reset_wrapper(wrapper): + def __init__(self, *args, **kwargs): + with ModelNamespace(): + super().__init__(*args, **kwargs) + + _copy_class_wrapper_attributes(wrapper, reset_wrapper) + reset_wrapper.__wrapped__ = wrapper.__wrapped__ + reset_wrapper._nni_model_wrapper = True + return reset_wrapper + + +def is_basic_unit(cls_or_instance) -> bool: + if not inspect.isclass(cls_or_instance): + cls_or_instance = cls_or_instance.__class__ + return getattr(cls_or_instance, '_nni_basic_unit', False) + + +def is_model_wrapped(cls_or_instance) -> bool: + if not inspect.isclass(cls_or_instance): + cls_or_instance = cls_or_instance.__class__ + return getattr(cls_or_instance, '_nni_model_wrapper', False) + + +def _check_wrapped(cls: T) -> bool: + if getattr(cls, '_traced', False) or getattr(cls, '_nni_model_wrapper', False): + raise TypeError(f'{cls} is already wrapped with trace wrapper (basic_unit / model_wrapper / trace). Cannot wrap again.') diff --git a/nni/retiarii/strategy/__init__.py b/nni/retiarii/strategy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..04511eaa69b85666e9ebc69b643bc0c74fe6467d --- /dev/null +++ b/nni/retiarii/strategy/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .base import BaseStrategy +from .bruteforce import Random, GridSearch +from .evolution import RegularizedEvolution +from .tpe_strategy import TPEStrategy +from .local_debug_strategy import _LocalDebugStrategy +from .rl import PolicyBasedRL diff --git a/nni/retiarii/strategy/_rl_impl.py b/nni/retiarii/strategy/_rl_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..ce51487a773a95d7e6eeafbae8c8688881017287 --- /dev/null +++ b/nni/retiarii/strategy/_rl_impl.py @@ -0,0 +1,170 @@ +# This file might cause import error for those who didn't install RL-related dependencies + +import logging +import threading +from multiprocessing.pool import ThreadPool + +import gym +import numpy as np +import tianshou +import torch +import torch.nn as nn + +from gym import spaces +from tianshou.data import to_torch +from tianshou.env.worker import EnvWorker + +from .utils import get_targeted_model +from ..graph import ModelStatus +from ..execution import submit_models, wait_models + + +_logger = logging.getLogger(__name__) +_thread_lock = threading.Lock() + + +class MultiThreadEnvWorker(EnvWorker): + def __init__(self, env_fn): + self.env = env_fn() + self.pool = ThreadPool(processes=1) + super().__init__(env_fn) + + def get_env_attr(self, key): + return getattr(self.env, key) + + def set_env_attr(self, key, value): + return setattr(self.env, key, value) + + def __getattr__(self, key): + if tianshou.__version__ >= '0.4.5': # not a strict check here + return super().__getattr__(key) # https://github.com/thu-ml/tianshou/pull/478 + return getattr(self.env, key) + + def reset(self): + return self.env.reset() + + @staticmethod + def wait(*args, **kwargs): + raise NotImplementedError('Async collect is not supported yet.') + + def send_action(self, action) -> None: + # self.result is actually a handle + self.result = self.pool.apply_async(self.env.step, (action,)) + + def get_result(self): + return self.result.get() + + def seed(self, seed): + super().seed(seed) + return self.env.seed(seed) + + def render(self, **kwargs): + return self.env.render(**kwargs) + + def close_env(self) -> None: + self.pool.terminate() + return self.env.close() + + +class ModelEvaluationEnv(gym.Env): + def __init__(self, base_model, mutators, search_space): + self.base_model = base_model + self.mutators = mutators + self.search_space = search_space + self.ss_keys = list(self.search_space.keys()) + self.action_dim = max(map(lambda v: len(v), self.search_space.values())) + self.num_steps = len(self.search_space) + + @property + def observation_space(self): + return spaces.Dict({ + 'action_history': spaces.MultiDiscrete([self.action_dim] * self.num_steps), + 'cur_step': spaces.Discrete(self.num_steps + 1), + 'action_dim': spaces.Discrete(self.action_dim + 1) + }) + + @property + def action_space(self): + return spaces.Discrete(self.action_dim) + + def reset(self): + self.action_history = np.zeros(self.num_steps, dtype=np.int32) + self.cur_step = 0 + self.sample = {} + return { + 'action_history': self.action_history, + 'cur_step': self.cur_step, + 'action_dim': len(self.search_space[self.ss_keys[self.cur_step]]) + } + + def step(self, action): + cur_key = self.ss_keys[self.cur_step] + assert action < len(self.search_space[cur_key]), \ + f'Current action {action} out of range {self.search_space[cur_key]}.' + self.action_history[self.cur_step] = action + self.sample[cur_key] = self.search_space[cur_key][action] + self.cur_step += 1 + obs = { + 'action_history': self.action_history, + 'cur_step': self.cur_step, + 'action_dim': len(self.search_space[self.ss_keys[self.cur_step]]) \ + if self.cur_step < self.num_steps else self.action_dim + } + if self.cur_step == self.num_steps: + with _thread_lock: + model = get_targeted_model(self.base_model, self.mutators, self.sample) + _logger.info(f'New model created: {self.sample}') + submit_models(model) + wait_models(model) + if model.status == ModelStatus.Failed: + return self.reset(), 0., False, {} + rew = model.metric + _logger.info(f'Model metric received as reward: {rew}') + return obs, rew, True, {} + else: + + return obs, 0., False, {} + + +class Preprocessor(nn.Module): + def __init__(self, obs_space, hidden_dim=64, num_layers=1): + super().__init__() + self.action_dim = obs_space['action_history'].nvec[0] + self.hidden_dim = hidden_dim + # first token is [SOS] + self.embedding = nn.Embedding(self.action_dim + 1, hidden_dim) + self.rnn = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True) + + def forward(self, obs): + seq = nn.functional.pad(obs['action_history'] + 1, (1, 1)) # pad the start token and end token + # end token is used to avoid out-of-range of v_s_. Will not actually affect BP. + seq = self.embedding(seq.long()) + feature, _ = self.rnn(seq) + return feature[torch.arange(len(feature), device=feature.device), obs['cur_step'].long() + 1] + + +class Actor(nn.Module): + def __init__(self, action_space, preprocess): + super().__init__() + self.preprocess = preprocess + self.action_dim = action_space.n + self.linear = nn.Linear(self.preprocess.hidden_dim, self.action_dim) + + def forward(self, obs, **kwargs): + obs = to_torch(obs, device=self.linear.weight.device) + out = self.linear(self.preprocess(obs)) + # to take care of choices with different number of options + mask = torch.arange(self.action_dim).expand(len(out), self.action_dim) >= obs['action_dim'].unsqueeze(1) + out[mask.to(out.device)] = float('-inf') + return nn.functional.softmax(out, dim=-1), kwargs.get('state', None) + + +class Critic(nn.Module): + def __init__(self, preprocess): + super().__init__() + self.preprocess = preprocess + self.linear = nn.Linear(self.preprocess.hidden_dim, 1) + + def forward(self, obs, **kwargs): + obs = to_torch(obs, device=self.linear.weight.device) + return self.linear(self.preprocess(obs)).squeeze(-1) diff --git a/nni/retiarii/strategy/base.py b/nni/retiarii/strategy/base.py new file mode 100644 index 0000000000000000000000000000000000000000..894da87b784b901346424e503067bae8e276de16 --- /dev/null +++ b/nni/retiarii/strategy/base.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import abc +from typing import List + +from ..graph import Model +from ..mutator import Mutator + + +class BaseStrategy(abc.ABC): + + @abc.abstractmethod + def run(self, base_model: Model, applied_mutators: List[Mutator]) -> None: + pass diff --git a/nni/retiarii/strategy/bruteforce.py b/nni/retiarii/strategy/bruteforce.py new file mode 100644 index 0000000000000000000000000000000000000000..ab0b8969021cd79f2bce8a50ef3f63a1d9b5a792 --- /dev/null +++ b/nni/retiarii/strategy/bruteforce.py @@ -0,0 +1,133 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +import itertools +import logging +import random +import time +from typing import Any, Dict, List + +from .. import InvalidMutation, Sampler, submit_models, query_available_resources, budget_exhausted +from .base import BaseStrategy +from .utils import dry_run_for_search_space, get_targeted_model, filter_model + +_logger = logging.getLogger(__name__) + + +def grid_generator(search_space: Dict[Any, List[Any]], shuffle=True): + keys = list(search_space.keys()) + search_space_values = copy.deepcopy(list(search_space.values())) + if shuffle: + for values in search_space_values: + random.shuffle(values) + for values in itertools.product(*search_space_values): + yield {key: value for key, value in zip(keys, values)} + + +def random_generator(search_space: Dict[Any, List[Any]], dedup=True, retries=500): + keys = list(search_space.keys()) + history = set() + search_space_values = copy.deepcopy(list(search_space.values())) + while True: + for retry_count in range(retries): + selected = [random.choice(v) for v in search_space_values] + if not dedup: + break + selected = tuple(selected) + if selected not in history: + history.add(selected) + break + if retry_count + 1 == retries: + _logger.debug('Random generation has run out of patience. There is nothing to search. Exiting.') + return + yield {key: value for key, value in zip(keys, selected)} + + +class GridSearch(BaseStrategy): + """ + Traverse the search space and try all the possible combinations one by one. + + Parameters + ---------- + shuffle : bool + Shuffle the order in a candidate list, so that they are tried in a random order. Default: true. + """ + + def __init__(self, shuffle=True): + self._polling_interval = 2. + self.shuffle = shuffle + + def run(self, base_model, applied_mutators): + search_space = dry_run_for_search_space(base_model, applied_mutators) + for sample in grid_generator(search_space, shuffle=self.shuffle): + _logger.debug('New model created. Waiting for resource. %s', str(sample)) + while query_available_resources() <= 0: + if budget_exhausted(): + return + time.sleep(self._polling_interval) + submit_models(get_targeted_model(base_model, applied_mutators, sample)) + + +class _RandomSampler(Sampler): + def choice(self, candidates, mutator, model, index): + return random.choice(candidates) + + +class Random(BaseStrategy): + """ + Random search on the search space. + + Parameters + ---------- + variational : bool + Do not dry run to get the full search space. Used when the search space has variational size or candidates. Default: false. + dedup : bool + Do not try the same configuration twice. When variational is true, deduplication is not supported. Default: true. + model_filter: Callable[[Model], bool] + Feed the model and return a bool. This will filter the models in search space and select which to submit. + """ + + def __init__(self, variational=False, dedup=True, model_filter=None): + self.variational = variational + self.dedup = dedup + if variational and dedup: + raise ValueError('Dedup is not supported in variational mode.') + self.random_sampler = _RandomSampler() + self._polling_interval = 2. + self.filter = model_filter + + def run(self, base_model, applied_mutators): + if self.variational: + _logger.info('Random search running in variational mode.') + sampler = _RandomSampler() + for mutator in applied_mutators: + mutator.bind_sampler(sampler) + while True: + avail_resource = query_available_resources() + if avail_resource > 0: + model = base_model + for mutator in applied_mutators: + model = mutator.apply(model) + _logger.debug('New model created. Applied mutators are: %s', str(applied_mutators)) + if filter_model(self.filter, model): + submit_models(model) + elif budget_exhausted(): + break + else: + time.sleep(self._polling_interval) + else: + _logger.info('Random search running in fixed size mode. Dedup: %s.', 'on' if self.dedup else 'off') + search_space = dry_run_for_search_space(base_model, applied_mutators) + for sample in random_generator(search_space, dedup=self.dedup): + _logger.debug('New model created. Waiting for resource. %s', str(sample)) + while query_available_resources() <= 0: + if budget_exhausted(): + return + time.sleep(self._polling_interval) + try: + model = get_targeted_model(base_model, applied_mutators, sample) + if filter_model(self.filter, model): + submit_models(model) + except InvalidMutation as e: + _logger.warning(f'Invalid mutation: {e}. Skip.') diff --git a/nni/retiarii/strategy/evolution.py b/nni/retiarii/strategy/evolution.py new file mode 100644 index 0000000000000000000000000000000000000000..1f7aa366ed599863bf38496eb93e842be9639ed1 --- /dev/null +++ b/nni/retiarii/strategy/evolution.py @@ -0,0 +1,169 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import collections +import dataclasses +import logging +import random +import time + +from ..execution import query_available_resources, submit_models +from ..graph import ModelStatus +from .base import BaseStrategy +from .utils import dry_run_for_search_space, get_targeted_model, filter_model + + +_logger = logging.getLogger(__name__) + + +@dataclasses.dataclass +class Individual: + """ + A class that represents an individual. + Holds two attributes, where ``x`` is the model and ``y`` is the metric (e.g., accuracy). + """ + x: dict + y: float + + +class RegularizedEvolution(BaseStrategy): + """ + Algorithm for regularized evolution (i.e. aging evolution). + Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image Classifier Architecture Search". + + Parameters + ---------- + optimize_mode : str + Can be one of "maximize" and "minimize". Default: maximize. + population_size : int + The number of individuals to keep in the population. Default: 100. + cycles : int + The number of cycles (trials) the algorithm should run for. Default: 20000. + sample_size : int + The number of individuals that should participate in each tournament. Default: 25. + mutation_prob : float + Probability that mutation happens in each dim. Default: 0.05 + on_failure : str + Can be one of "ignore" and "worst". If "ignore", simply give up the model and find a new one. + If "worst", mark the model as -inf (if maximize, inf if minimize), so that the algorithm "learns" to avoid such model. + Default: ignore. + model_filter: Callable[[Model], bool] + Feed the model and return a bool. This will filter the models in search space and select which to submit. + """ + + def __init__(self, optimize_mode='maximize', population_size=100, sample_size=25, cycles=20000, + mutation_prob=0.05, on_failure='ignore', model_filter=None): + assert optimize_mode in ['maximize', 'minimize'] + assert on_failure in ['ignore', 'worst'] + assert sample_size < population_size + self.optimize_mode = optimize_mode + self.population_size = population_size + self.sample_size = sample_size + self.cycles = cycles + self.mutation_prob = mutation_prob + self.on_failure = on_failure + + self._worst = float('-inf') if self.optimize_mode == 'maximize' else float('inf') + + self._success_count = 0 + self._population = collections.deque() + self._running_models = [] + self._polling_interval = 2. + self.filter = model_filter + + def random(self, search_space): + return {k: random.choice(v) for k, v in search_space.items()} + + def mutate(self, parent, search_space): + child = {} + for k, v in parent.items(): + if random.uniform(0, 1) < self.mutation_prob: + # NOTE: we do not exclude the original choice here for simplicity, + # which is slightly different from the original paper. + child[k] = random.choice(search_space[k]) + else: + child[k] = v + return child + + def best_parent(self): + samples = [p for p in self._population] # copy population + random.shuffle(samples) + samples = list(samples)[:self.sample_size] + if self.optimize_mode == 'maximize': + parent = max(samples, key=lambda sample: sample.y) + else: + parent = min(samples, key=lambda sample: sample.y) + return parent.x + + def run(self, base_model, applied_mutators): + search_space = dry_run_for_search_space(base_model, applied_mutators) + # Run the first population regardless concurrency + _logger.info('Initializing the first population.') + while len(self._population) + len(self._running_models) <= self.population_size: + # try to submit new models + while len(self._population) + len(self._running_models) < self.population_size: + config = self.random(search_space) + self._submit_config(config, base_model, applied_mutators) + # collect results + self._move_succeeded_models_to_population() + self._remove_failed_models_from_running_list() + time.sleep(self._polling_interval) + + if len(self._population) >= self.population_size: + break + + # Resource-aware mutation of models + _logger.info('Running mutations.') + while self._success_count + len(self._running_models) <= self.cycles: + # try to submit new models + while query_available_resources() > 0 and self._success_count + len(self._running_models) < self.cycles: + config = self.mutate(self.best_parent(), search_space) + self._submit_config(config, base_model, applied_mutators) + # collect results + self._move_succeeded_models_to_population() + self._remove_failed_models_from_running_list() + time.sleep(self._polling_interval) + + if self._success_count >= self.cycles: + break + + def _submit_config(self, config, base_model, mutators): + _logger.debug('Model submitted to running queue: %s', config) + model = get_targeted_model(base_model, mutators, config) + if not filter_model(self.filter, model): + if self.on_failure == "worst": + model.status = ModelStatus.Failed + self._running_models.append((config, model)) + else: + submit_models(model) + self._running_models.append((config, model)) + return model + + def _move_succeeded_models_to_population(self): + completed_indices = [] + for i, (config, model) in enumerate(self._running_models): + metric = None + if self.on_failure == 'worst' and model.status == ModelStatus.Failed: + metric = self._worst + elif model.status == ModelStatus.Trained: + metric = model.metric + if metric is not None: + individual = Individual(config, metric) + _logger.debug('Individual created: %s', str(individual)) + self._population.append(individual) + if len(self._population) > self.population_size: + self._population.popleft() + completed_indices.append(i) + for i in completed_indices[::-1]: + # delete from end to start so that the index number will not be affected. + self._success_count += 1 + self._running_models.pop(i) + + def _remove_failed_models_from_running_list(self): + # This is only done when on_failure policy is set to "ignore". + # Otherwise, failed models will be treated as inf when processed. + if self.on_failure == 'ignore': + number_of_failed_models = len([g for g in self._running_models if g[1].status == ModelStatus.Failed]) + self._running_models = [g for g in self._running_models if g[1].status != ModelStatus.Failed] + if number_of_failed_models > 0: + _logger.info('%d failed models are ignored. Will retry.', number_of_failed_models) diff --git a/nni/retiarii/strategy/local_debug_strategy.py b/nni/retiarii/strategy/local_debug_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..dd842babcf4a2d275ed3034f269ea878f17b2648 --- /dev/null +++ b/nni/retiarii/strategy/local_debug_strategy.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import random +import string + +from .. import Sampler, codegen, utils +from ..execution.base import BaseGraphData +from ..execution.utils import get_mutation_summary +from .base import BaseStrategy + +_logger = logging.getLogger(__name__) + +class ChooseFirstSampler(Sampler): + def choice(self, candidates, mutator, model, index): + return candidates[0] + +class _LocalDebugStrategy(BaseStrategy): + """ + This class is supposed to be used internally, for debugging trial mutation + """ + + def run_one_model(self, model): + mutation_summary = get_mutation_summary(model) + graph_data = BaseGraphData(codegen.model_to_pytorch_script(model), model.evaluator, mutation_summary) + random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + file_name = f'_generated_model/{random_str}.py' + os.makedirs(os.path.dirname(file_name), exist_ok=True) + with open(file_name, 'w') as f: + f.write(graph_data.model_script) + model_cls = utils.import_(f'_generated_model.{random_str}._model') + graph_data.evaluator._execute(model_cls) + os.remove(file_name) + + def run(self, base_model, applied_mutators): + _logger.info('local debug strategy has been started.') + model = base_model + _logger.debug('New model created. Applied mutators: %s', str(applied_mutators)) + choose_first_sampler = ChooseFirstSampler() + for mutator in applied_mutators: + mutator.bind_sampler(choose_first_sampler) + model = mutator.apply(model) + # directly run models + self.run_one_model(model) diff --git a/nni/retiarii/strategy/rl.py b/nni/retiarii/strategy/rl.py new file mode 100644 index 0000000000000000000000000000000000000000..23a05ed1ed9ad20d7421b82c0e820482c71aef34 --- /dev/null +++ b/nni/retiarii/strategy/rl.py @@ -0,0 +1,78 @@ +import logging +from typing import Optional, Callable + +from .base import BaseStrategy +from .utils import dry_run_for_search_space +from ..execution import query_available_resources + +try: + has_tianshou = True + import torch + from tianshou.data import Collector, VectorReplayBuffer + from tianshou.env import BaseVectorEnv + from tianshou.policy import BasePolicy, PPOPolicy # pylint: disable=unused-import + from ._rl_impl import ModelEvaluationEnv, MultiThreadEnvWorker, Preprocessor, Actor, Critic +except ImportError: + has_tianshou = False + + +_logger = logging.getLogger(__name__) + + +class PolicyBasedRL(BaseStrategy): + """ + Algorithm for policy-based reinforcement learning. + This is a wrapper of algorithms provided in tianshou (PPO by default), + and can be easily customized with other algorithms that inherit ``BasePolicy`` (e.g., REINFORCE [1]_). + + Parameters + ---------- + max_collect : int + How many times collector runs to collect trials for RL. Default 100. + trial_per_collect : int + How many trials (trajectories) each time collector collects. + After each collect, trainer will sample batch from replay buffer and do the update. Default: 20. + policy_fn : function + Takes ``ModelEvaluationEnv`` as input and return a policy. See ``_default_policy_fn`` for an example. + + References + ---------- + + .. [1] Barret Zoph and Quoc V. Le, "Neural Architecture Search with Reinforcement Learning". + https://arxiv.org/abs/1611.01578 + """ + + def __init__(self, max_collect: int = 100, trial_per_collect = 20, + policy_fn: Optional[Callable[['ModelEvaluationEnv'], 'BasePolicy']] = None): + if not has_tianshou: + raise ImportError('`tianshou` is required to run RL-based strategy. ' + 'Please use "pip install tianshou" to install it beforehand.') + + self.policy_fn = policy_fn or self._default_policy_fn + self.max_collect = max_collect + self.trial_per_collect = trial_per_collect + + @staticmethod + def _default_policy_fn(env): + net = Preprocessor(env.observation_space) + actor = Actor(env.action_space, net) + critic = Critic(net) + optim = torch.optim.Adam(set(actor.parameters()).union(critic.parameters()), lr=1e-4) + return PPOPolicy(actor, critic, optim, torch.distributions.Categorical, + discount_factor=1., action_space=env.action_space) + + def run(self, base_model, applied_mutators): + search_space = dry_run_for_search_space(base_model, applied_mutators) + concurrency = query_available_resources() + + env_fn = lambda: ModelEvaluationEnv(base_model, applied_mutators, search_space) + policy = self.policy_fn(env_fn()) + + env = BaseVectorEnv([env_fn for _ in range(concurrency)], MultiThreadEnvWorker) + collector = Collector(policy, env, VectorReplayBuffer(20000, len(env))) + + for cur_collect in range(1, self.max_collect + 1): + _logger.info('Collect [%d] Running...', cur_collect) + result = collector.collect(n_episode=self.trial_per_collect) + _logger.info('Collect [%d] Result: %s', cur_collect, str(result)) + policy.update(0, collector.buffer, batch_size=64, repeat=5) diff --git a/nni/retiarii/strategy/tpe_strategy.py b/nni/retiarii/strategy/tpe_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..7f55ad302e6ecfe356a242ae92db04b74ad30e27 --- /dev/null +++ b/nni/retiarii/strategy/tpe_strategy.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import time + +from nni.algorithms.hpo.hyperopt_tuner import HyperoptTuner + +from .. import Sampler, submit_models, query_available_resources, is_stopped_exec, budget_exhausted +from .base import BaseStrategy + +_logger = logging.getLogger(__name__) + + +class TPESampler(Sampler): + def __init__(self, optimize_mode='minimize'): + self.tpe_tuner = HyperoptTuner('tpe', optimize_mode) + self.cur_sample = None + self.index = None + self.total_parameters = {} + + def update_sample_space(self, sample_space): + search_space = {} + for i, each in enumerate(sample_space): + search_space[str(i)] = {'_type': 'choice', '_value': each} + self.tpe_tuner.update_search_space(search_space) + + def generate_samples(self, model_id): + self.cur_sample = self.tpe_tuner.generate_parameters(model_id) + self.total_parameters[model_id] = self.cur_sample + self.index = 0 + + def receive_result(self, model_id, result): + self.tpe_tuner.receive_trial_result(model_id, self.total_parameters[model_id], result) + + def choice(self, candidates, mutator, model, index): + chosen = self.cur_sample[str(self.index)] + self.index += 1 + return chosen + + +class TPEStrategy(BaseStrategy): + """ + The Tree-structured Parzen Estimator (TPE) [bergstrahpo]_ is a sequential model-based optimization (SMBO) approach. + SMBO methods sequentially construct models to approximate the performance of hyperparameters based on historical measurements, + and then subsequently choose new hyperparameters to test based on this model. + + References + ---------- + + .. [bergstrahpo] Bergstra et al., "Algorithms for Hyper-Parameter Optimization". + https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf + """ + + def __init__(self): + self.tpe_sampler = TPESampler() + self.model_id = 0 + self.running_models = {} + + def run(self, base_model, applied_mutators): + sample_space = [] + new_model = base_model + for mutator in applied_mutators: + recorded_candidates, new_model = mutator.dry_run(new_model) + sample_space.extend(recorded_candidates) + self.tpe_sampler.update_sample_space(sample_space) + + _logger.info('TPE strategy has been started.') + while not budget_exhausted(): + avail_resource = query_available_resources() + if avail_resource > 0: + model = base_model + _logger.debug('New model created. Applied mutators: %s', str(applied_mutators)) + self.tpe_sampler.generate_samples(self.model_id) + for mutator in applied_mutators: + mutator.bind_sampler(self.tpe_sampler) + model = mutator.apply(model) + # run models + submit_models(model) + self.running_models[self.model_id] = model + self.model_id += 1 + else: + time.sleep(2) + + _logger.debug('num of running models: %d', len(self.running_models)) + to_be_deleted = [] + for _id, _model in self.running_models.items(): + if is_stopped_exec(_model): + if _model.metric is not None: + self.tpe_sampler.receive_result(_id, _model.metric) + _logger.debug('tpe receive results: %d, %s', _id, _model.metric) + to_be_deleted.append(_id) + for _id in to_be_deleted: + del self.running_models[_id] diff --git a/nni/retiarii/strategy/utils.py b/nni/retiarii/strategy/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4262674f8613731c3e6893380565cbf6c9c8e687 --- /dev/null +++ b/nni/retiarii/strategy/utils.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +import collections +import logging +from typing import Dict, Any, List +from ..graph import Model +from ..mutator import Mutator, Sampler + +_logger = logging.getLogger(__name__) + + +class _FixedSampler(Sampler): + def __init__(self, sample): + self.sample = sample + + def choice(self, candidates, mutator, model, index): + return self.sample[(mutator, index)] + + +def dry_run_for_search_space(model: Model, mutators: List[Mutator]) -> Dict[Any, List[Any]]: + search_space = collections.OrderedDict() + for mutator in mutators: + recorded_candidates, model = mutator.dry_run(model) + for i, candidates in enumerate(recorded_candidates): + search_space[(mutator, i)] = candidates + return search_space + +def dry_run_for_formatted_search_space(model: Model, mutators: List[Mutator]) -> Dict[Any, Dict[Any, Any]]: + search_space = collections.OrderedDict() + for mutator in mutators: + recorded_candidates, model = mutator.dry_run(model) + if len(recorded_candidates) == 1: + search_space[mutator.label] = {'_type': 'choice', '_value': recorded_candidates[0]} + else: + for i, candidate in enumerate(recorded_candidates): + search_space[f'{mutator.label}_{i}'] = {'_type': 'choice', '_value': candidate} + return search_space + +def get_targeted_model(base_model: Model, mutators: List[Mutator], sample: dict) -> Model: + sampler = _FixedSampler(sample) + model = base_model + for mutator in mutators: + model = mutator.bind_sampler(sampler).apply(model) + return model + + +def filter_model(model_filter, ir_model): + if model_filter is not None: + _logger.debug(f'Check if model satisfies constraints.') + if model_filter(ir_model): + _logger.debug(f'Model satisfied. Submit the model.') + return True + else: + _logger.debug(f'Model unsatisfied. Discard the model.') + return False + else: + return True diff --git a/nni/retiarii/trial_entry.py b/nni/retiarii/trial_entry.py new file mode 100644 index 0000000000000000000000000000000000000000..144536a4177f24f8c25d8737e5a9c7a54211f6e1 --- /dev/null +++ b/nni/retiarii/trial_entry.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Entrypoint for trials. + +Assuming execution engine is BaseExecutionEngine. +""" +import argparse + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('exec', choices=['base', 'py', 'cgo', 'benchmark']) + args = parser.parse_args() + if args.exec == 'base': + from .execution.base import BaseExecutionEngine + engine = BaseExecutionEngine + elif args.exec == 'cgo': + from .execution.cgo_engine import CGOExecutionEngine + engine = CGOExecutionEngine + elif args.exec == 'py': + from .execution.python import PurePythonExecutionEngine + engine = PurePythonExecutionEngine + elif args.exec == 'benchmark': + from .execution.benchmark import BenchmarkExecutionEngine + engine = BenchmarkExecutionEngine + engine.trial_execute_graph() diff --git a/nni/retiarii/utils.py b/nni/retiarii/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..425d707ec137b0b52d7c08aa45cbffa0bce279a2 --- /dev/null +++ b/nni/retiarii/utils.py @@ -0,0 +1,156 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import inspect +import warnings +from collections import defaultdict +from typing import Any, List, Dict +from pathlib import Path + + +def import_(target: str, allow_none: bool = False) -> Any: + if target is None: + return None + path, identifier = target.rsplit('.', 1) + module = __import__(path, globals(), locals(), [identifier]) + return getattr(module, identifier) + + +def version_larger_equal(a: str, b: str) -> bool: + # TODO: refactor later + a = a.split('+')[0] + b = b.split('+')[0] + return tuple(map(int, a.split('.'))) >= tuple(map(int, b.split('.'))) + + +_last_uid = defaultdict(int) + +_DEFAULT_MODEL_NAMESPACE = 'model' + + +def uid(namespace: str = 'default') -> int: + _last_uid[namespace] += 1 + return _last_uid[namespace] + + +def reset_uid(namespace: str = 'default') -> None: + _last_uid[namespace] = 0 + + +def get_module_name(cls_or_func): + module_name = cls_or_func.__module__ + if module_name == '__main__': + # infer the module name with inspect + for frm in inspect.stack(): + if inspect.getmodule(frm[0]).__name__ == '__main__': + # main module found + main_file_path = Path(inspect.getsourcefile(frm[0])) + if not Path().samefile(main_file_path.parent): + raise RuntimeError(f'You are using "{main_file_path}" to launch your experiment, ' + f'please launch the experiment under the directory where "{main_file_path.name}" is located.') + module_name = main_file_path.stem + break + if module_name == '__main__': + warnings.warn('Callstack exhausted but main module still not found. This will probably cause issues that the ' + 'function/class cannot be imported.') + + # NOTE: this is hacky. As torchscript retrieves LSTM's source code to do something. + # to make LSTM's source code can be found, we should assign original LSTM's __module__ to + # the wrapped LSTM's __module__ + # TODO: find out all the modules that have the same requirement as LSTM + if f'{cls_or_func.__module__}.{cls_or_func.__name__}' == 'torch.nn.modules.rnn.LSTM': + module_name = cls_or_func.__module__ + + return module_name + + +def get_importable_name(cls, relocate_module=False): + module_name = get_module_name(cls) if relocate_module else cls.__module__ + return module_name + '.' + cls.__name__ + + +class NoContextError(Exception): + pass + + +class ContextStack: + """ + This is to maintain a globally-accessible context envinronment that is visible to everywhere. + + Use ``with ContextStack(namespace, value):`` to initiate, and use ``get_current_context(namespace)`` to + get the corresponding value in the namespace. + + Note that this is not multi-processing safe. Also, the values will get cleared for a new process. + """ + + _stack: Dict[str, List[Any]] = defaultdict(list) + + def __init__(self, key: str, value: Any): + self.key = key + self.value = value + + def __enter__(self): + self.push(self.key, self.value) + return self + + def __exit__(self, *args, **kwargs): + self.pop(self.key) + + @classmethod + def push(cls, key: str, value: Any): + cls._stack[key].append(value) + + @classmethod + def pop(cls, key: str) -> None: + cls._stack[key].pop() + + @classmethod + def top(cls, key: str) -> Any: + if not cls._stack[key]: + raise NoContextError('Context is empty.') + return cls._stack[key][-1] + + +class ModelNamespace: + """ + To create an individual namespace for models to enable automatic numbering. + """ + + def __init__(self, key: str = _DEFAULT_MODEL_NAMESPACE): + # for example, key: "model_wrapper" + self.key = key + + def __enter__(self): + # For example, currently the top of stack is [1, 2, 2], and [1, 2, 2, 3] is used, + # the next thing up is [1, 2, 2, 4]. + # `reset_uid` to count from zero for "model_wrapper_1_2_2_4" + try: + current_context = ContextStack.top(self.key) + next_uid = uid(self._simple_name(self.key, current_context)) + ContextStack.push(self.key, current_context + [next_uid]) + reset_uid(self._simple_name(self.key, current_context + [next_uid])) + except NoContextError: + ContextStack.push(self.key, []) + reset_uid(self._simple_name(self.key, [])) + + def __exit__(self, *args, **kwargs): + ContextStack.pop(self.key) + + @staticmethod + def next_label(key: str = _DEFAULT_MODEL_NAMESPACE) -> str: + try: + current_context = ContextStack.top(key) + except NoContextError: + # fallback to use "default" namespace + return ModelNamespace._simple_name('default', [uid()]) + + next_uid = uid(ModelNamespace._simple_name(key, current_context)) + return ModelNamespace._simple_name(key, current_context + [next_uid]) + + @staticmethod + def _simple_name(key: str, lst: List[Any]) -> str: + return key + ''.join(['_' + str(k) for k in lst]) + + +def get_current_context(key: str) -> Any: + return ContextStack.top(key) diff --git a/nni/runtime/__init__.py b/nni/runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/runtime/common.py b/nni/runtime/common.py new file mode 100644 index 0000000000000000000000000000000000000000..537a35b55c697dd722f20747e6d2b6c19f0fbc54 --- /dev/null +++ b/nni/runtime/common.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +_multi_thread = False +_multi_phase = False + +def enable_multi_thread(): + global _multi_thread + _multi_thread = True + +def multi_thread_enabled(): + return _multi_thread + +def enable_multi_phase(): + global _multi_phase + _multi_phase = True + +def multi_phase_enabled(): + return _multi_phase diff --git a/nni/runtime/config.py b/nni/runtime/config.py new file mode 100644 index 0000000000000000000000000000000000000000..8c353c8f20b63af6d03cda16884aefbb32f5706f --- /dev/null +++ b/nni/runtime/config.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +from pathlib import Path +import shutil +import sys + +import nni + +def get_config_directory() -> Path: + """ + Get NNI config directory. + Create it if not exist. + """ + if os.getenv('NNI_CONFIG_DIR') is not None: + config_dir = Path(os.getenv('NNI_CONFIG_DIR')) + elif sys.prefix != sys.base_prefix or Path(sys.prefix, 'conda-meta').is_dir(): + config_dir = Path(sys.prefix, 'nni') + elif sys.platform == 'win32': + config_dir = Path(os.environ['APPDATA'], 'nni') + else: + config_dir = Path.home() / '.config/nni' + config_dir.mkdir(parents=True, exist_ok=True) + return config_dir + +def get_config_file(name: str) -> Path: + """ + Get an NNI config file. + Copy from `nni/runtime/default_config` if not exist. + """ + config_file = get_config_directory() / name + if not config_file.exists(): + default = get_builtin_config_file(name) + shutil.copyfile(default, config_file) + return config_file + +def get_builtin_config_file(name: str) -> Path: + """ + Get a readonly builtin config file. + """ + return Path(nni.__path__[0], 'runtime/default_config', name) diff --git a/nni/runtime/default_config/builtin_algorithms.yml b/nni/runtime/default_config/builtin_algorithms.yml new file mode 100644 index 0000000000000000000000000000000000000000..b6e3b811aae966238575bab87463123ad94b805c --- /dev/null +++ b/nni/runtime/default_config/builtin_algorithms.yml @@ -0,0 +1,82 @@ +advisors: +- builtinName: Hyperband + classArgsValidator: nni.algorithms.hpo.hyperband_advisor.HyperbandClassArgsValidator + className: nni.algorithms.hpo.hyperband_advisor.Hyperband + source: nni +- builtinName: BOHB + classArgsValidator: nni.algorithms.hpo.bohb_advisor.BOHBClassArgsValidator + className: nni.algorithms.hpo.bohb_advisor.BOHB + source: nni +assessors: +- builtinName: Medianstop + classArgsValidator: nni.algorithms.hpo.medianstop_assessor.MedianstopClassArgsValidator + className: nni.algorithms.hpo.medianstop_assessor.MedianstopAssessor + source: nni +- builtinName: Curvefitting + classArgsValidator: nni.algorithms.hpo.curvefitting_assessor.CurvefittingClassArgsValidator + className: nni.algorithms.hpo.curvefitting_assessor.CurvefittingAssessor + source: nni +tuners: +- builtinName: PPOTuner + classArgsValidator: nni.algorithms.hpo.ppo_tuner.PPOClassArgsValidator + className: nni.algorithms.hpo.ppo_tuner.PPOTuner + source: nni +- builtinName: SMAC + classArgsValidator: nni.algorithms.hpo.smac_tuner.SMACClassArgsValidator + className: nni.algorithms.hpo.smac_tuner.SMACTuner + source: nni +- builtinName: TPE + className: nni.algorithms.hpo.tpe_tuner.TpeTuner + source: nni +- builtinName: TPE_legacy + classArgs: + algorithm_name: tpe + classArgsValidator: nni.algorithms.hpo.hyperopt_tuner.HyperoptClassArgsValidator + className: nni.algorithms.hpo.hyperopt_tuner.HyperoptTuner + source: nni +- builtinName: Random + className: nni.algorithms.hpo.random_tuner.RandomTuner + classArgsValidator: nni.algorithms.hpo.random_tuner.RandomClassArgsValidator + source: nni +- builtinName: Anneal + classArgs: + algorithm_name: anneal + classArgsValidator: nni.algorithms.hpo.hyperopt_tuner.HyperoptClassArgsValidator + className: nni.algorithms.hpo.hyperopt_tuner.HyperoptTuner + source: nni +- builtinName: Evolution + classArgsValidator: nni.algorithms.hpo.evolution_tuner.EvolutionClassArgsValidator + className: nni.algorithms.hpo.evolution_tuner.EvolutionTuner + source: nni +- acceptClassArgs: false + builtinName: BatchTuner + className: nni.algorithms.hpo.batch_tuner.BatchTuner + source: nni +- acceptClassArgs: false + builtinName: GridSearch + className: nni.algorithms.hpo.gridsearch_tuner.GridSearchTuner + source: nni +- builtinName: NetworkMorphism + classArgsValidator: nni.algorithms.hpo.networkmorphism_tuner.NetworkMorphismClassArgsValidator + className: nni.algorithms.hpo.networkmorphism_tuner.NetworkMorphismTuner + source: nni +- builtinName: MetisTuner + classArgsValidator: nni.algorithms.hpo.metis_tuner.MetisClassArgsValidator + className: nni.algorithms.hpo.metis_tuner.MetisTuner + source: nni +- builtinName: GPTuner + classArgsValidator: nni.algorithms.hpo.gp_tuner.GPClassArgsValidator + className: nni.algorithms.hpo.gp_tuner.GPTuner + source: nni +- builtinName: PBTTuner + classArgsValidator: nni.algorithms.hpo.pbt_tuner.PBTClassArgsValidator + className: nni.algorithms.hpo.pbt_tuner.PBTTuner + source: nni +- builtinName: RegularizedEvolutionTuner + classArgsValidator: nni.algorithms.hpo.regularized_evolution_tuner.EvolutionClassArgsValidator + className: nni.algorithms.hpo.regularized_evolution_tuner.RegularizedEvolutionTuner + source: nni +- builtinName: DNGOTuner + classArgsValidator: nni.algorithms.hpo.dngo_tuner.DNGOClassArgsValidator + className: nni.algorithms.hpo.dngo_tuner.DNGOTuner + source: nni diff --git a/nni/runtime/default_config/registered_algorithms.yml b/nni/runtime/default_config/registered_algorithms.yml new file mode 100644 index 0000000000000000000000000000000000000000..0967ef424bce6791893e9a57bb952f80fd536e93 --- /dev/null +++ b/nni/runtime/default_config/registered_algorithms.yml @@ -0,0 +1 @@ +{} diff --git a/nni/runtime/default_config/training_services.json b/nni/runtime/default_config/training_services.json new file mode 100644 index 0000000000000000000000000000000000000000..0967ef424bce6791893e9a57bb952f80fd536e93 --- /dev/null +++ b/nni/runtime/default_config/training_services.json @@ -0,0 +1 @@ +{} diff --git a/nni/runtime/env_vars.py b/nni/runtime/env_vars.py new file mode 100644 index 0000000000000000000000000000000000000000..810ab2f4f6db96d4ab195d44643f1deaab5f6528 --- /dev/null +++ b/nni/runtime/env_vars.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +from collections import namedtuple + + +_trial_env_var_names = [ + 'NNI_PLATFORM', + 'NNI_EXP_ID', + 'NNI_TRIAL_JOB_ID', + 'NNI_SYS_DIR', + 'NNI_OUTPUT_DIR', + 'NNI_TRIAL_SEQ_ID', + 'MULTI_PHASE', + 'REUSE_MODE' +] + +_dispatcher_env_var_names = [ + 'SDK_PROCESS', + 'NNI_MODE', + 'NNI_CHECKPOINT_DIRECTORY', + 'NNI_LOG_DIRECTORY', + 'NNI_LOG_LEVEL', + 'NNI_INCLUDE_INTERMEDIATE_RESULTS' +] + +def _load_env_vars(env_var_names): + env_var_dict = {k: os.environ.get(k) for k in env_var_names} + return namedtuple('EnvVars', env_var_names)(**env_var_dict) + +trial_env_vars = _load_env_vars(_trial_env_var_names) + +dispatcher_env_vars = _load_env_vars(_dispatcher_env_var_names) diff --git a/nni/runtime/log.py b/nni/runtime/log.py new file mode 100644 index 0000000000000000000000000000000000000000..afe583eaec783afdbdc4b9b65624f8154e3532cf --- /dev/null +++ b/nni/runtime/log.py @@ -0,0 +1,180 @@ +import logging +import sys +from datetime import datetime +from io import TextIOBase +from logging import FileHandler, Formatter, Handler, StreamHandler +from pathlib import Path +from typing import Optional + +import colorama + +from .env_vars import dispatcher_env_vars, trial_env_vars + +handlers = {} + +log_format = '[%(asctime)s] %(levelname)s (%(name)s/%(threadName)s) %(message)s' +time_format = '%Y-%m-%d %H:%M:%S' +formatter = Formatter(log_format, time_format) + + +def init_logger() -> None: + """ + This function will (and should only) get invoked on the first time of importing nni (no matter which submodule). + It will try to detect the running environment and setup logger accordingly. + + The detection should work in most cases but for `nnictl` and `nni.experiment`. + They will be identified as "standalone" mode and must configure the logger by themselves. + """ + colorama.init() + + if dispatcher_env_vars.SDK_PROCESS == 'dispatcher': + _init_logger_dispatcher() + return + + trial_platform = trial_env_vars.NNI_PLATFORM + + if trial_platform == 'unittest': + return + + if trial_platform and not trial_env_vars.REUSE_MODE: + _init_logger_trial() + return + + _init_logger_standalone() + + logging.getLogger('filelock').setLevel(logging.WARNING) + +_cli_log_initialized = False + +def init_logger_for_command_line() -> None: + """ + Initialize logger for command line usage. + This means that NNI is used as "main function" rather than underlying library or background service, + so it should print log to stdout. + + It is used by nnictl and `nni.experiment.Experiment`. + + This function will get invoked after `init_logger()`. + """ + global _cli_log_initialized + if not _cli_log_initialized: + _cli_log_initialized = True + colorful_formatter = Formatter(log_format, time_format) + colorful_formatter.format = _colorful_format + handlers['_default_'].setFormatter(colorful_formatter) + +def start_experiment_log(experiment_id: str, log_directory: Path, debug: bool) -> None: + log_path = _prepare_log_dir(log_directory) / 'dispatcher.log' + log_level = logging.DEBUG if debug else logging.INFO + _register_handler(FileHandler(log_path), log_level, experiment_id) + +def stop_experiment_log(experiment_id: str) -> None: + if experiment_id in handlers: + handler = handlers.pop(experiment_id, None) + if handler is not None: + logging.getLogger().removeHandler(handler) + + +def _init_logger_dispatcher() -> None: + log_level_map = { + 'fatal': logging.CRITICAL, + 'error': logging.ERROR, + 'warning': logging.WARNING, + 'info': logging.INFO, + 'debug': logging.DEBUG, + 'trace': 0 + } + + log_path = _prepare_log_dir(dispatcher_env_vars.NNI_LOG_DIRECTORY) / 'dispatcher.log' + log_level = log_level_map.get(dispatcher_env_vars.NNI_LOG_LEVEL, logging.INFO) + _register_handler(FileHandler(log_path), log_level) + + +def _init_logger_trial() -> None: + log_path = _prepare_log_dir(trial_env_vars.NNI_OUTPUT_DIR) / 'trial.log' + log_file = open(log_path, 'a') + _register_handler(StreamHandler(log_file), logging.INFO) + + if trial_env_vars.NNI_PLATFORM == 'local': + sys.stdout = _LogFileWrapper(log_file) + + +def _init_logger_standalone() -> None: + _register_handler(StreamHandler(sys.stdout), logging.INFO) + + +def _prepare_log_dir(path: Optional[str]) -> Path: + if path is None: + return Path() + ret = Path(path) + ret.mkdir(parents=True, exist_ok=True) + return ret + +def _register_handler(handler: Handler, level: int, tag: str = '_default_') -> None: + assert tag not in handlers + handlers[tag] = handler + handler.setFormatter(formatter) + logger = logging.getLogger() + logger.addHandler(handler) + logger.setLevel(level) + +def _colorful_format(record): + time = formatter.formatTime(record, time_format) + if not record.name.startswith('nni.'): + return '[{}] ({}) {}'.format(time, record.name, record.msg % record.args) + if record.levelno >= logging.ERROR: + color = colorama.Fore.RED + level = 'ERROR: ' + elif record.levelno >= logging.WARNING: + color = colorama.Fore.YELLOW + level = 'WARNING: ' + elif record.levelno >= logging.INFO: + color = colorama.Fore.GREEN + level = '' + else: + color = colorama.Fore.BLUE + level = '' + msg = color + level + (record.msg % record.args) + colorama.Style.RESET_ALL + if record.levelno < logging.INFO: + return '[{}] {}:{} {}'.format(time, record.threadName, record.name, msg) + else: + return '[{}] {}'.format(time, msg) + +class _LogFileWrapper(TextIOBase): + # wrap the logger file so that anything written to it will automatically get formatted + + def __init__(self, log_file: TextIOBase): + self.file: TextIOBase = log_file + self.line_buffer: Optional[str] = None + self.line_start_time: Optional[datetime] = None + + def write(self, s: str) -> int: + cur_time = datetime.now() + if self.line_buffer and (cur_time - self.line_start_time).total_seconds() > 0.1: + self.flush() + + if self.line_buffer: + self.line_buffer += s + else: + self.line_buffer = s + self.line_start_time = cur_time + + if '\n' not in s: + return len(s) + + time_str = cur_time.strftime(time_format) + lines = self.line_buffer.split('\n') + for line in lines[:-1]: + self.file.write(f'[{time_str}] PRINT {line}\n') + self.file.flush() + + self.line_buffer = lines[-1] + self.line_start_time = cur_time + return len(s) + + def flush(self) -> None: + if self.line_buffer: + time_str = self.line_start_time.strftime(time_format) + self.file.write(f'[{time_str}] PRINT {self.line_buffer}\n') + self.file.flush() + self.line_buffer = None diff --git a/nni/runtime/msg_dispatcher.py b/nni/runtime/msg_dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..9f8481daffebcb7445a24350acdba6e1d918cc3f --- /dev/null +++ b/nni/runtime/msg_dispatcher.py @@ -0,0 +1,243 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from collections import defaultdict + +from nni import NoMoreTrialError +from nni.assessor import AssessResult + +from .common import multi_thread_enabled, multi_phase_enabled +from .env_vars import dispatcher_env_vars +from .msg_dispatcher_base import MsgDispatcherBase +from .protocol import CommandType, send +from ..common.serializer import dump, load +from ..utils import MetricType + +_logger = logging.getLogger(__name__) + +# Assessor global variables +_trial_history = defaultdict(dict) +'''key: trial job ID; value: intermediate results, mapping from sequence number to data''' + +_ended_trials = set() +'''trial_job_id of all ended trials. +We need this because NNI manager may send metrics after reporting a trial ended. +TODO: move this logic to NNI manager +''' + + +def _sort_history(history): + ret = [] + for i, _ in enumerate(history): + if i in history: + ret.append(history[i]) + else: + break + return ret + + +# Tuner global variables +_next_parameter_id = 0 +_trial_params = {} +'''key: parameter ID; value: parameters''' +_customized_parameter_ids = set() + + +def _create_parameter_id(): + global _next_parameter_id + _next_parameter_id += 1 + return _next_parameter_id - 1 + + +def _pack_parameter(parameter_id, params, customized=False, trial_job_id=None, parameter_index=None): + _trial_params[parameter_id] = params + ret = { + 'parameter_id': parameter_id, + 'parameter_source': 'customized' if customized else 'algorithm', + 'parameters': params + } + if trial_job_id is not None: + ret['trial_job_id'] = trial_job_id + if parameter_index is not None: + ret['parameter_index'] = parameter_index + else: + ret['parameter_index'] = 0 + return dump(ret) + + +class MsgDispatcher(MsgDispatcherBase): + def __init__(self, tuner, assessor=None): + super(MsgDispatcher, self).__init__() + self.tuner = tuner + self.assessor = assessor + if assessor is None: + _logger.debug('Assessor is not configured') + + def load_checkpoint(self): + self.tuner.load_checkpoint() + if self.assessor is not None: + self.assessor.load_checkpoint() + + def save_checkpoint(self): + self.tuner.save_checkpoint() + if self.assessor is not None: + self.assessor.save_checkpoint() + + def handle_initialize(self, data): + """Data is search space + """ + self.tuner.update_search_space(data) + send(CommandType.Initialized, '') + + def send_trial_callback(self, id_, params): + """For tuner to issue trial config when the config is generated + """ + send(CommandType.NewTrialJob, _pack_parameter(id_, params)) + + def handle_request_trial_jobs(self, data): + # data: number or trial jobs + ids = [_create_parameter_id() for _ in range(data)] + _logger.debug("requesting for generating params of %s", ids) + params_list = self.tuner.generate_multiple_parameters(ids, st_callback=self.send_trial_callback) + + for i, _ in enumerate(params_list): + send(CommandType.NewTrialJob, _pack_parameter(ids[i], params_list[i])) + # when parameters is None. + if len(params_list) < len(ids): + send(CommandType.NoMoreTrialJobs, _pack_parameter(ids[0], '')) + + def handle_update_search_space(self, data): + self.tuner.update_search_space(data) + + def handle_import_data(self, data): + """Import additional data for tuning + data: a list of dictionaries, each of which has at least two keys, 'parameter' and 'value' + """ + for entry in data: + entry['value'] = entry['value'] if type(entry['value']) is str else dump(entry['value']) + entry['value'] = load(entry['value']) + self.tuner.import_data(data) + + def handle_add_customized_trial(self, data): + # data: parameters + id_ = _create_parameter_id() + _customized_parameter_ids.add(id_) + + def handle_report_metric_data(self, data): + """ + data: a dict received from nni_manager, which contains: + - 'parameter_id': id of the trial + - 'value': metric value reported by nni.report_final_result() + - 'type': report type, support {'FINAL', 'PERIODICAL'} + """ + # metrics value is dumped as json string in trial, so we need to decode it here + if 'value' in data: + data['value'] = load(data['value']) + if data['type'] == MetricType.FINAL: + self._handle_final_metric_data(data) + elif data['type'] == MetricType.PERIODICAL: + if self.assessor is not None: + self._handle_intermediate_metric_data(data) + elif data['type'] == MetricType.REQUEST_PARAMETER: + assert multi_phase_enabled() + assert data['trial_job_id'] is not None + assert data['parameter_index'] is not None + param_id = _create_parameter_id() + try: + param = self.tuner.generate_parameters(param_id, trial_job_id=data['trial_job_id']) + except NoMoreTrialError: + param = None + send(CommandType.SendTrialJobParameter, _pack_parameter(param_id, param, trial_job_id=data['trial_job_id'], + parameter_index=data['parameter_index'])) + else: + raise ValueError('Data type not supported: {}'.format(data['type'])) + + def handle_trial_end(self, data): + """ + data: it has three keys: trial_job_id, event, hyper_params + - trial_job_id: the id generated by training service + - event: the job's state + - hyper_params: the hyperparameters generated and returned by tuner + """ + trial_job_id = data['trial_job_id'] + _ended_trials.add(trial_job_id) + if trial_job_id in _trial_history: + _trial_history.pop(trial_job_id) + if self.assessor is not None: + self.assessor.trial_end(trial_job_id, data['event'] == 'SUCCEEDED') + if self.tuner is not None: + self.tuner.trial_end(load(data['hyper_params'])['parameter_id'], data['event'] == 'SUCCEEDED') + + def _handle_final_metric_data(self, data): + """Call tuner to process final results + """ + id_ = data['parameter_id'] + value = data['value'] + if id_ is None or id_ in _customized_parameter_ids: + if not hasattr(self.tuner, '_accept_customized'): + self.tuner._accept_customized = False + if not self.tuner._accept_customized: + _logger.info('Customized trial job %s ignored by tuner', id_) + return + customized = True + else: + customized = False + if id_ in _trial_params: + self.tuner.receive_trial_result(id_, _trial_params[id_], value, customized=customized, + trial_job_id=data.get('trial_job_id')) + else: + _logger.warning('Find unknown job parameter id %s, maybe something goes wrong.', _trial_params[id_]) + + def _handle_intermediate_metric_data(self, data): + """Call assessor to process intermediate results + """ + if data['type'] != MetricType.PERIODICAL: + return + if self.assessor is None: + return + + trial_job_id = data['trial_job_id'] + if trial_job_id in _ended_trials: + return + + history = _trial_history[trial_job_id] + history[data['sequence']] = data['value'] + ordered_history = _sort_history(history) + if len(ordered_history) < data['sequence']: # no user-visible update since last time + return + + try: + result = self.assessor.assess_trial(trial_job_id, ordered_history) + except Exception as e: + _logger.error('Assessor error') + _logger.exception(e) + + if isinstance(result, bool): + result = AssessResult.Good if result else AssessResult.Bad + elif not isinstance(result, AssessResult): + msg = 'Result of Assessor.assess_trial must be an object of AssessResult, not %s' + raise RuntimeError(msg % type(result)) + + if result is AssessResult.Bad: + _logger.debug('BAD, kill %s', trial_job_id) + send(CommandType.KillTrialJob, dump(trial_job_id)) + # notify tuner + _logger.debug('env var: NNI_INCLUDE_INTERMEDIATE_RESULTS: [%s]', + dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS) + if dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS == 'true': + self._earlystop_notify_tuner(data) + else: + _logger.debug('GOOD') + + def _earlystop_notify_tuner(self, data): + """Send last intermediate result as final result to tuner in case the + trial is early stopped. + """ + _logger.debug('Early stop notify tuner data: [%s]', data) + data['type'] = MetricType.FINAL + if multi_thread_enabled(): + self._handle_final_metric_data(data) + else: + data['value'] = dump(data['value']) + self.enqueue_command(CommandType.ReportMetricData, data) diff --git a/nni/runtime/msg_dispatcher_base.py b/nni/runtime/msg_dispatcher_base.py new file mode 100644 index 0000000000000000000000000000000000000000..890ea30e57a05ff9a778061c6e796cbf02010a93 --- /dev/null +++ b/nni/runtime/msg_dispatcher_base.py @@ -0,0 +1,245 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import threading +import logging +from multiprocessing.dummy import Pool as ThreadPool +from queue import Queue, Empty + +from .common import multi_thread_enabled +from .env_vars import dispatcher_env_vars +from ..common import load +from ..recoverable import Recoverable +from .protocol import CommandType, receive + + +_logger = logging.getLogger(__name__) + +QUEUE_LEN_WARNING_MARK = 20 +_worker_fast_exit_on_terminate = True + + +class MsgDispatcherBase(Recoverable): + """This is where tuners and assessors are not defined yet. + Inherits this class to make your own advisor. + """ + + def __init__(self): + self.stopping = False + if multi_thread_enabled(): + self.pool = ThreadPool() + self.thread_results = [] + else: + self.default_command_queue = Queue() + self.assessor_command_queue = Queue() + self.default_worker = threading.Thread(target=self.command_queue_worker, args=(self.default_command_queue,)) + self.assessor_worker = threading.Thread(target=self.command_queue_worker, + args=(self.assessor_command_queue,)) + self.default_worker.start() + self.assessor_worker.start() + self.worker_exceptions = [] + + def run(self): + """Run the tuner. + This function will never return unless raise. + """ + _logger.info('Dispatcher started') + if dispatcher_env_vars.NNI_MODE == 'resume': + self.load_checkpoint() + + while not self.stopping: + command, data = receive() + if data: + data = load(data) + + if command is None or command is CommandType.Terminate: + break + if multi_thread_enabled(): + result = self.pool.map_async(self.process_command_thread, [(command, data)]) + self.thread_results.append(result) + if any([thread_result.ready() and not thread_result.successful() for thread_result in + self.thread_results]): + _logger.debug('Caught thread exception') + break + else: + self.enqueue_command(command, data) + if self.worker_exceptions: + break + + _logger.info('Dispatcher exiting...') + self.stopping = True + if multi_thread_enabled(): + self.pool.close() + self.pool.join() + else: + self.default_worker.join() + self.assessor_worker.join() + + _logger.info('Dispatcher terminiated') + + def command_queue_worker(self, command_queue): + """Process commands in command queues. + """ + while True: + try: + # set timeout to ensure self.stopping is checked periodically + command, data = command_queue.get(timeout=3) + try: + self.process_command(command, data) + except Exception as e: + _logger.exception(e) + self.worker_exceptions.append(e) + break + except Empty: + pass + if self.stopping and (_worker_fast_exit_on_terminate or command_queue.empty()): + break + + def enqueue_command(self, command, data): + """Enqueue command into command queues + """ + if command == CommandType.TrialEnd or ( + command == CommandType.ReportMetricData and data['type'] == 'PERIODICAL'): + self.assessor_command_queue.put((command, data)) + else: + self.default_command_queue.put((command, data)) + + qsize = self.default_command_queue.qsize() + if qsize >= QUEUE_LEN_WARNING_MARK: + _logger.warning('default queue length: %d', qsize) + + qsize = self.assessor_command_queue.qsize() + if qsize >= QUEUE_LEN_WARNING_MARK: + _logger.warning('assessor queue length: %d', qsize) + + def process_command_thread(self, request): + """Worker thread to process a command. + """ + command, data = request + if multi_thread_enabled(): + try: + self.process_command(command, data) + except Exception as e: + _logger.exception(str(e)) + raise + else: + pass + + def process_command(self, command, data): + _logger.debug('process_command: command: [%s], data: [%s]', command, data) + + command_handlers = { + # Tuner commands: + CommandType.Initialize: self.handle_initialize, + CommandType.RequestTrialJobs: self.handle_request_trial_jobs, + CommandType.UpdateSearchSpace: self.handle_update_search_space, + CommandType.ImportData: self.handle_import_data, + CommandType.AddCustomizedTrialJob: self.handle_add_customized_trial, + + # Tuner/Assessor commands: + CommandType.ReportMetricData: self.handle_report_metric_data, + + CommandType.TrialEnd: self.handle_trial_end, + CommandType.Ping: self.handle_ping, + } + if command not in command_handlers: + raise AssertionError('Unsupported command: {}'.format(command)) + command_handlers[command](data) + + def handle_ping(self, data): + pass + + def handle_initialize(self, data): + """Initialize search space and tuner, if any + This method is meant to be called only once for each experiment, after calling this method, + dispatcher should `send(CommandType.Initialized, '')`, to set the status of the experiment to be "INITIALIZED". + Parameters + ---------- + data: dict + search space + """ + raise NotImplementedError('handle_initialize not implemented') + + def handle_request_trial_jobs(self, data): + """The message dispatcher is demanded to generate ``data`` trial jobs. + These trial jobs should be sent via ``send(CommandType.NewTrialJob, nni.dump(parameter))``, + where ``parameter`` will be received by NNI Manager and eventually accessible to trial jobs as "next parameter". + Semantically, message dispatcher should do this ``send`` exactly ``data`` times. + + The JSON sent by this method should follow the format of + + :: + + { + "parameter_id": 42 + "parameters": { + // this will be received by trial + }, + "parameter_source": "algorithm" // optional + } + + Parameters + ---------- + data: int + number of trial jobs + """ + raise NotImplementedError('handle_request_trial_jobs not implemented') + + def handle_update_search_space(self, data): + """This method will be called when search space is updated. + It's recommended to call this method in `handle_initialize` to initialize search space. + *No need to* notify NNI Manager when this update is done. + Parameters + ---------- + data: dict + search space + """ + raise NotImplementedError('handle_update_search_space not implemented') + + def handle_import_data(self, data): + """Import previous data when experiment is resumed. + Parameters + ---------- + data: list + a list of dictionaries, each of which has at least two keys, 'parameter' and 'value' + """ + raise NotImplementedError('handle_import_data not implemented') + + def handle_add_customized_trial(self, data): + """Experimental API. Not recommended for usage. + """ + raise NotImplementedError('handle_add_customized_trial not implemented') + + def handle_report_metric_data(self, data): + """Called when metric data is reported or new parameters are requested (for multiphase). + When new parameters are requested, this method should send a new parameter. + + Parameters + ---------- + data: dict + a dict which contains 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'. + type: can be `MetricType.REQUEST_PARAMETER`, `MetricType.FINAL` or `MetricType.PERIODICAL`. + `REQUEST_PARAMETER` is used to request new parameters for multiphase trial job. In this case, + the dict will contain additional keys: `trial_job_id`, `parameter_index`. Refer to `msg_dispatcher.py` + as an example. + + Raises + ------ + ValueError + Data type is not supported + """ + raise NotImplementedError('handle_report_metric_data not implemented') + + def handle_trial_end(self, data): + """Called when the state of one of the trials is changed + + Parameters + ---------- + data: dict + a dict with keys: trial_job_id, event, hyper_params. + trial_job_id: the id generated by training service. + event: the job’s state. + hyper_params: the string that is sent by message dispatcher during the creation of trials. + + """ + raise NotImplementedError('handle_trial_end not implemented') \ No newline at end of file diff --git a/nni/runtime/platform/__init__.py b/nni/runtime/platform/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9f42a16f4351ac1105bdcba0d1d60647f608b476 --- /dev/null +++ b/nni/runtime/platform/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from ..env_vars import trial_env_vars, dispatcher_env_vars + +assert dispatcher_env_vars.SDK_PROCESS != 'dispatcher' + +if trial_env_vars.NNI_PLATFORM is None: + from .standalone import * +elif trial_env_vars.NNI_PLATFORM == 'unittest': + from .test import * +else: + from .local import * diff --git a/nni/runtime/platform/local.py b/nni/runtime/platform/local.py new file mode 100644 index 0000000000000000000000000000000000000000..2cabc003cf43d793ca04ce12f998e62b54ee21c1 --- /dev/null +++ b/nni/runtime/platform/local.py @@ -0,0 +1,82 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import time +import subprocess + +from nni.common import dump, load +from ..env_vars import trial_env_vars + +_sysdir = trial_env_vars.NNI_SYS_DIR +if not os.path.exists(os.path.join(_sysdir, '.nni')): + os.makedirs(os.path.join(_sysdir, '.nni')) +_metric_file = open(os.path.join(_sysdir, '.nni', 'metrics'), 'ab') + +_outputdir = trial_env_vars.NNI_OUTPUT_DIR +if not os.path.exists(_outputdir): + os.makedirs(_outputdir) + +_reuse_mode = trial_env_vars.REUSE_MODE +_nni_platform = trial_env_vars.NNI_PLATFORM + +_multiphase = trial_env_vars.MULTI_PHASE + +_param_index = 0 + +def request_next_parameter(): + metric = dump({ + 'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID, + 'type': 'REQUEST_PARAMETER', + 'sequence': 0, + 'parameter_index': _param_index + }) + send_metric(metric) + +def get_next_parameter(): + global _param_index + params_file_name = '' + if _multiphase in ('true', 'True'): + params_file_name = ('parameter_{}.cfg'.format(_param_index), 'parameter.cfg')[_param_index == 0] + else: + if _param_index > 0: + return None + elif _param_index == 0: + params_file_name = 'parameter.cfg' + else: + raise AssertionError('_param_index value ({}) should >=0'.format(_param_index)) + + params_filepath = os.path.join(_sysdir, params_file_name) + if not os.path.isfile(params_filepath): + request_next_parameter() + while not (os.path.isfile(params_filepath) and os.path.getsize(params_filepath) > 0): + time.sleep(3) + params_file = open(params_filepath, 'r') + params = load(fp=params_file) + _param_index += 1 + return params + +def send_metric(string): + if _nni_platform != 'local' or _reuse_mode in ('true', 'True'): + assert len(string) < 1000000, 'Metric too long' + print("NNISDK_MEb'%s'" % (string), flush=True) + else: + data = (string + '\n').encode('utf8') + assert len(data) < 1000000, 'Metric too long' + _metric_file.write(b'ME%06d%b' % (len(data), data)) + _metric_file.flush() + if sys.platform == "win32": + file = open(_metric_file.name) + file.close() + else: + subprocess.run(['touch', _metric_file.name], check=True) + +def get_experiment_id(): + return trial_env_vars.NNI_EXP_ID + +def get_trial_id(): + return trial_env_vars.NNI_TRIAL_JOB_ID + +def get_sequence_id(): + return int(trial_env_vars.NNI_TRIAL_SEQ_ID) diff --git a/nni/runtime/platform/standalone.py b/nni/runtime/platform/standalone.py new file mode 100644 index 0000000000000000000000000000000000000000..f8926822b1f843d588dbf6e146e481ffef0450c9 --- /dev/null +++ b/nni/runtime/platform/standalone.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import warnings + +import colorama +from nni.common import load + + +__all__ = [ + 'get_next_parameter', + 'get_experiment_id', + 'get_trial_id', + 'get_sequence_id', + 'send_metric', +] + +_logger = logging.getLogger('nni') + + +def get_next_parameter(): + warning_message = ''.join([ + colorama.Style.BRIGHT, + colorama.Fore.RED, + 'Running NNI code without runtime. ', + 'Check the following tutorial if you are new to NNI: ', + colorama.Fore.YELLOW, + 'https://nni.readthedocs.io/en/stable/Tutorial/QuickStart.html#id1', + colorama.Style.RESET_ALL + ]) + warnings.warn(warning_message, RuntimeWarning) + return { + 'parameter_id': None, + 'parameters': {} + } + +def get_experiment_id(): + return 'STANDALONE' + +def get_trial_id(): + return 'STANDALONE' + +def get_sequence_id(): + return 0 + +def send_metric(string): + metric = load(string) + if metric['type'] == 'FINAL': + _logger.info('Final result: %s', metric['value']) + elif metric['type'] == 'PERIODICAL': + _logger.info('Intermediate result: %s (Index %s)', metric['value'], metric['sequence']) + else: + _logger.error('Unexpected metric: %s', string) diff --git a/nni/runtime/platform/test.py b/nni/runtime/platform/test.py new file mode 100644 index 0000000000000000000000000000000000000000..05133522b895f5e7d8756bc52c02c7470969a962 --- /dev/null +++ b/nni/runtime/platform/test.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +# pylint: skip-file + +import copy +from nni.common import load + + +_params = None +_last_metric = None + + +def get_next_parameter(): + return _params + + +def get_experiment_id(): + return 'fakeidex' + + +def get_trial_id(): + return 'fakeidtr' + + +def get_sequence_id(): + return 0 + + +def send_metric(string): + global _last_metric + _last_metric = string + + +def init_params(params): + global _params + _params = copy.deepcopy(params) + + +def get_last_metric(): + metrics = load(_last_metric) + metrics['value'] = load(metrics['value']) + + return metrics diff --git a/nni/runtime/protocol.py b/nni/runtime/protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..9c5222f0970a71ac6bc7f6fd45e397ebfac2680b --- /dev/null +++ b/nni/runtime/protocol.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import threading +from enum import Enum + +_logger = logging.getLogger(__name__) + + +class CommandType(Enum): + # in + Initialize = b'IN' + RequestTrialJobs = b'GE' + ReportMetricData = b'ME' + UpdateSearchSpace = b'SS' + ImportData = b'FD' + AddCustomizedTrialJob = b'AD' + TrialEnd = b'EN' + Terminate = b'TE' + Ping = b'PI' + + # out + Initialized = b'ID' + NewTrialJob = b'TR' + SendTrialJobParameter = b'SP' + NoMoreTrialJobs = b'NO' + KillTrialJob = b'KI' + +_lock = threading.Lock() +try: + if os.environ.get('NNI_PLATFORM') != 'unittest': + _in_file = open(3, 'rb') + _out_file = open(4, 'wb') +except OSError: + _logger.debug('IPC pipeline not exists') + + +def send(command, data): + """Send command to Training Service. + command: CommandType object. + data: string payload. + """ + global _lock + try: + _lock.acquire() + data = data.encode('utf8') + msg = b'%b%014d%b' % (command.value, len(data), data) + _logger.debug('Sending command, data: [%s]', msg) + _out_file.write(msg) + _out_file.flush() + finally: + _lock.release() + + +def receive(): + """Receive a command from Training Service. + Returns a tuple of command (CommandType) and payload (str) + """ + header = _in_file.read(16) + _logger.debug('Received command, header: [%s]', header) + if header is None or len(header) < 16: + # Pipe EOF encountered + _logger.debug('Pipe EOF encountered') + return None, None + length = int(header[2:]) + data = _in_file.read(length) + command = CommandType(header[:2]) + data = data.decode('utf8') + _logger.debug('Received command, data: [%s]', data) + return command, data diff --git a/nni/smartparam.py b/nni/smartparam.py new file mode 100644 index 0000000000000000000000000000000000000000..dde0ac2bd64e8b22e2948909b116c33aefda7561 --- /dev/null +++ b/nni/smartparam.py @@ -0,0 +1,148 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np + +from .runtime.env_vars import trial_env_vars +from . import trial +from . import parameter_expressions as param_exp +from .common.nas_utils import classic_mode, enas_mode, oneshot_mode, darts_mode + + +__all__ = [ + 'choice', + 'randint', + 'uniform', + 'quniform', + 'loguniform', + 'qloguniform', + 'normal', + 'qnormal', + 'lognormal', + 'qlognormal', + 'function_choice', + 'mutable_layer' +] + + +if trial_env_vars.NNI_PLATFORM is None: + def choice(*options, name=None): + return param_exp.choice(options, np.random.RandomState()) + + def randint(lower, upper, name=None): + return param_exp.randint(lower, upper, np.random.RandomState()) + + def uniform(low, high, name=None): + return param_exp.uniform(low, high, np.random.RandomState()) + + def quniform(low, high, q, name=None): + assert high > low, 'Upper bound must be larger than lower bound' + return param_exp.quniform(low, high, q, np.random.RandomState()) + + def loguniform(low, high, name=None): + assert low > 0, 'Lower bound must be positive' + return param_exp.loguniform(low, high, np.random.RandomState()) + + def qloguniform(low, high, q, name=None): + return param_exp.qloguniform(low, high, q, np.random.RandomState()) + + def normal(mu, sigma, name=None): + return param_exp.normal(mu, sigma, np.random.RandomState()) + + def qnormal(mu, sigma, q, name=None): + return param_exp.qnormal(mu, sigma, q, np.random.RandomState()) + + def lognormal(mu, sigma, name=None): + return param_exp.lognormal(mu, sigma, np.random.RandomState()) + + def qlognormal(mu, sigma, q, name=None): + return param_exp.qlognormal(mu, sigma, q, np.random.RandomState()) + + def function_choice(*funcs, name=None): + return param_exp.choice(funcs, np.random.RandomState())() + + def mutable_layer(): + raise RuntimeError('Cannot call nni.mutable_layer in this mode') + +else: + + def choice(options, name=None, key=None): + return options[_get_param(key)] + + def randint(lower, upper, name=None, key=None): + return _get_param(key) + + def uniform(low, high, name=None, key=None): + return _get_param(key) + + def quniform(low, high, q, name=None, key=None): + return _get_param(key) + + def loguniform(low, high, name=None, key=None): + return _get_param(key) + + def qloguniform(low, high, q, name=None, key=None): + return _get_param(key) + + def normal(mu, sigma, name=None, key=None): + return _get_param(key) + + def qnormal(mu, sigma, q, name=None, key=None): + return _get_param(key) + + def lognormal(mu, sigma, name=None, key=None): + return _get_param(key) + + def qlognormal(mu, sigma, q, name=None, key=None): + return _get_param(key) + + def function_choice(funcs, name=None, key=None): + return funcs[_get_param(key)]() + + def mutable_layer( + mutable_id, + mutable_layer_id, + funcs, + funcs_args, + fixed_inputs, + optional_inputs, + optional_input_size, + mode='classic_mode', + tf=None): + '''execute the chosen function and inputs. + Below is an example of chosen function and inputs: + { + "mutable_id": { + "mutable_layer_id": { + "chosen_layer": "pool", + "chosen_inputs": ["out1", "out3"] + } + } + } + Parameters: + --------------- + mutable_id: the name of this mutable_layer block (which could have multiple mutable layers) + mutable_layer_id: the name of a mutable layer in this block + funcs: dict of function calls + funcs_args: + fixed_inputs: + optional_inputs: dict of optional inputs + optional_input_size: number of candidate inputs to be chosen + tf: tensorflow module + ''' + args = (mutable_id, mutable_layer_id, funcs, funcs_args, fixed_inputs, optional_inputs, optional_input_size) + if mode == 'classic_mode': + return classic_mode(*args) + assert tf is not None, 'Internal Error: Tensorflow should not be None in modes other than classic_mode' + if mode == 'enas_mode': + return enas_mode(*args, tf) + if mode == 'oneshot_mode': + return oneshot_mode(*args, tf) + if mode == 'darts_mode': + return darts_mode(*args, tf) + raise RuntimeError('Unrecognized mode: %s' % mode) + + def _get_param(key): + if trial.get_current_parameter() is None: + trial.get_next_parameter() + return trial.get_current_parameter(key) diff --git a/nni/tools/__init__.py b/nni/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/tools/annotation/.gitignore b/nni/tools/annotation/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..36e264cf443988bf3101b2467e83b259bcd5a4ad --- /dev/null +++ b/nni/tools/annotation/.gitignore @@ -0,0 +1 @@ +_generated diff --git a/nni/tools/annotation/__init__.py b/nni/tools/annotation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d9ba405c8844591ec083921c90ba2a1d67d52cf8 --- /dev/null +++ b/nni/tools/annotation/__init__.py @@ -0,0 +1,141 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import shutil +import json + +from . import code_generator +from . import search_space_generator +from . import specific_code_generator + + +__all__ = ['generate_search_space', 'expand_annotations'] + +slash = '/' +if sys.platform == "win32": + slash = '\\' + +def generate_search_space(code_dir): + """Generate search space from Python source code. + Return a serializable search space object. + code_dir: directory path of source files (str) + """ + code_dir = str(code_dir) + search_space = {} + + if code_dir.endswith(slash): + code_dir = code_dir[:-1] + + for subdir, _, files in os.walk(code_dir): + # generate module name from path + if subdir == code_dir: + package = '' + else: + assert subdir.startswith(code_dir + slash), subdir + prefix_len = len(code_dir) + 1 + package = subdir[prefix_len:].replace(slash, '.') + '.' + + for file_name in files: + if file_name.endswith('.py'): + path = os.path.join(subdir, file_name) + module = package + file_name[:-3] + search_space.update(_generate_file_search_space(path, module)) + + return search_space + +def _generate_file_search_space(path, module): + with open(path) as src: + try: + search_space, code = search_space_generator.generate(module, src.read()) + except Exception as exc: # pylint: disable=broad-except + if exc.args: + raise RuntimeError(path + ' ' + '\n'.join(exc.args)) + else: + raise RuntimeError('Failed to generate search space for %s: %r' % (path, exc)) + with open(path, 'w') as dst: + dst.write(code) + return search_space + + +def expand_annotations(src_dir, dst_dir, exp_id='', trial_id='', nas_mode=None): + """Expand annotations in user code. + Return dst_dir if annotation detected; return src_dir if not. + src_dir: directory path of user code (str) + dst_dir: directory to place generated files (str) + nas_mode: the mode of NAS given that NAS interface is used + """ + src_dir, dst_dir = str(src_dir), str(dst_dir) + + if src_dir[-1] == slash: + src_dir = src_dir[:-1] + + if dst_dir[-1] == slash: + dst_dir = dst_dir[:-1] + + annotated = False + + for src_subdir, dirs, files in os.walk(src_dir): + assert src_subdir.startswith(src_dir) + dst_subdir = src_subdir.replace(src_dir, dst_dir, 1) + os.makedirs(dst_subdir, exist_ok=True) + + # generate module name from path + if src_subdir == src_dir: + package = '' + else: + assert src_subdir.startswith(src_dir + slash), src_subdir + prefix_len = len(src_dir) + 1 + package = src_subdir[prefix_len:].replace(slash, '.') + '.' + + for file_name in files: + src_path = os.path.join(src_subdir, file_name) + dst_path = os.path.join(dst_subdir, file_name) + if file_name.endswith('.py'): + if trial_id == '': + annotated |= _expand_file_annotations(src_path, dst_path, nas_mode) + else: + module = package + file_name[:-3] + annotated |= _generate_specific_file(src_path, dst_path, exp_id, trial_id, module) + else: + shutil.copyfile(src_path, dst_path) + + for dir_name in dirs: + os.makedirs(os.path.join(dst_subdir, dir_name), exist_ok=True) + + return dst_dir if annotated else src_dir + +def _expand_file_annotations(src_path, dst_path, nas_mode): + with open(src_path) as src, open(dst_path, 'w') as dst: + try: + annotated_code = code_generator.parse(src.read(), nas_mode) + if annotated_code is None: + shutil.copyfile(src_path, dst_path) + return False + dst.write(annotated_code) + return True + + except Exception as exc: # pylint: disable=broad-except + if exc.args: + raise RuntimeError(src_path + ' ' + '\n'.join(str(arg) for arg in exc.args)) + else: + raise RuntimeError('Failed to expand annotations for %s: %r' % (src_path, exc)) + +def _generate_specific_file(src_path, dst_path, exp_id, trial_id, module): + with open(src_path) as src, open(dst_path, 'w') as dst: + try: + with open(os.path.expanduser('~/nni-experiments/%s/trials/%s/parameter.cfg'%(exp_id, trial_id))) as fd: + para_cfg = json.load(fd) + annotated_code = specific_code_generator.parse(src.read(), para_cfg["parameters"], module) + if annotated_code is None: + shutil.copyfile(src_path, dst_path) + return False + dst.write(annotated_code) + return True + + except Exception as exc: # pylint: disable=broad-except + if exc.args: + raise RuntimeError(src_path + ' ' + '\n'.join(str(arg) for arg in exc.args)) + else: + raise RuntimeError('Failed to expand annotations for %s: %r' % (src_path, exc)) diff --git a/nni/tools/annotation/code_generator.py b/nni/tools/annotation/code_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..c924e4195072eeff3e112b9ca5e01cb1df41c178 --- /dev/null +++ b/nni/tools/annotation/code_generator.py @@ -0,0 +1,369 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import ast +import astor + +from .utils import ast_Num, ast_Str, lineno + +# pylint: disable=unidiomatic-typecheck + +def parse_annotation_mutable_layers(code, lineno, nas_mode): + """Parse the string of mutable layers in annotation. + Return a list of AST Expr nodes + code: annotation string (excluding '@') + nas_mode: the mode of NAS + """ + module = ast.parse(code) + assert type(module) is ast.Module, 'internal error #1' + assert len(module.body) == 1, 'Annotation mutable_layers contains more than one expression' + assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' + call = module.body[0].value + nodes = [] + mutable_id = 'mutable_block_' + str(lineno) + mutable_layer_cnt = 0 + for arg in call.args: + fields = {'layer_choice': False, + 'fixed_inputs': False, + 'optional_inputs': False, + 'optional_input_size': False, + 'layer_output': False} + for k, value in zip(arg.keys, arg.values): + if k.id == 'layer_choice': + assert not fields['layer_choice'], 'Duplicated field: layer_choice' + assert type(value) is ast.List, 'Value of layer_choice should be a list' + call_funcs_keys = [] + call_funcs_values = [] + call_kwargs_values = [] + for call in value.elts: + assert type(call) is ast.Call, 'Element in layer_choice should be function call' + call_name = astor.to_source(call).strip() + call_funcs_keys.append(ast_Str(s=call_name)) + call_funcs_values.append(call.func) + assert not call.args, 'Number of args without keyword should be zero' + kw_args = [] + kw_values = [] + for kw in call.keywords: + kw_args.append(ast_Str(s=kw.arg)) + kw_values.append(kw.value) + call_kwargs_values.append(ast.Dict(keys=kw_args, values=kw_values)) + call_funcs = ast.Dict(keys=call_funcs_keys, values=call_funcs_values) + call_kwargs = ast.Dict(keys=call_funcs_keys, values=call_kwargs_values) + fields['layer_choice'] = True + elif k.id == 'fixed_inputs': + assert not fields['fixed_inputs'], 'Duplicated field: fixed_inputs' + assert type(value) is ast.List, 'Value of fixed_inputs should be a list' + fixed_inputs = value + fields['fixed_inputs'] = True + elif k.id == 'optional_inputs': + assert not fields['optional_inputs'], 'Duplicated field: optional_inputs' + assert type(value) is ast.List, 'Value of optional_inputs should be a list' + var_names = [ast_Str(s=astor.to_source(var).strip()) for var in value.elts] + optional_inputs = ast.Dict(keys=var_names, values=value.elts) + fields['optional_inputs'] = True + elif k.id == 'optional_input_size': + assert not fields['optional_input_size'], 'Duplicated field: optional_input_size' + assert type(value) is ast_Num or type(value) is ast.List, \ + 'Value of optional_input_size should be a number or list' + optional_input_size = value + fields['optional_input_size'] = True + elif k.id == 'layer_output': + assert not fields['layer_output'], 'Duplicated field: layer_output' + assert type(value) is ast.Name, 'Value of layer_output should be ast.Name type' + layer_output = value + fields['layer_output'] = True + else: + raise AssertionError('Unexpected field in mutable layer') + # make call for this mutable layer + assert fields['layer_choice'], 'layer_choice must exist' + assert fields['layer_output'], 'layer_output must exist' + mutable_layer_id = 'mutable_layer_' + str(mutable_layer_cnt) + mutable_layer_cnt += 1 + target_call_attr = ast.Attribute(value=ast.Name(id='nni', ctx=ast.Load()), attr='mutable_layer', ctx=ast.Load()) + target_call_args = [ast_Str(s=mutable_id), + ast_Str(s=mutable_layer_id), + call_funcs, + call_kwargs] + if fields['fixed_inputs']: + target_call_args.append(fixed_inputs) + else: + target_call_args.append(ast.List(elts=[])) + if fields['optional_inputs']: + target_call_args.append(optional_inputs) + assert fields['optional_input_size'], 'optional_input_size must exist when optional_inputs exists' + target_call_args.append(optional_input_size) + else: + target_call_args.append(ast.Dict(keys=[], values=[])) + target_call_args.append(ast_Num(n=0)) + target_call_args.append(ast_Str(s=nas_mode)) + if nas_mode in ['enas_mode', 'oneshot_mode', 'darts_mode']: + target_call_args.append(ast.Name(id='tensorflow')) + target_call = ast.Call(func=target_call_attr, args=target_call_args, keywords=[]) + node = ast.Assign(targets=[layer_output], value=target_call) + nodes.append(node) + return nodes + + +def parse_annotation(code): + """Parse an annotation string. + Return an AST Expr node. + code: annotation string (excluding '@') + """ + module = ast.parse(code) + assert type(module) is ast.Module, 'internal error #1' + assert len(module.body) == 1, 'Annotation contains more than one expression' + assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' + return module.body[0] + + +def parse_annotation_function(code, func_name): + """Parse an annotation function. + Return the value of `name` keyword argument and the AST Call node. + func_name: expected function name + """ + expr = parse_annotation(code) + call = expr.value + assert type(call) is ast.Call, 'Annotation is not a function call' + + assert type(call.func) is ast.Attribute, 'Unexpected annotation function' + assert type(call.func.value) is ast.Name, 'Invalid annotation function name' + assert call.func.value.id == 'nni', 'Annotation is not a NNI function' + assert call.func.attr == func_name, 'internal error #2' + + assert len(call.keywords) == 1, 'Annotation function contains more than one keyword argument' + assert call.keywords[0].arg == 'name', 'Annotation keyword argument is not "name"' + name = call.keywords[0].value + + return name, call + + +def parse_nni_variable(code): + """Parse `nni.variable` expression. + Return the name argument and AST node of annotated expression. + code: annotation string + """ + name, call = parse_annotation_function(code, 'variable') + + assert len(call.args) == 1, 'nni.variable contains more than one arguments' + arg = call.args[0] + assert type(arg) is ast.Call, 'Value of nni.variable is not a function call' + assert type(arg.func) is ast.Attribute, 'nni.variable value is not a NNI function' + assert type(arg.func.value) is ast.Name, 'nni.variable value is not a NNI function' + assert arg.func.value.id == 'nni', 'nni.variable value is not a NNI function' + + name_str = astor.to_source(name).strip() + keyword_arg = ast.keyword(arg='name', value=ast_Str(s=name_str)) + arg.keywords.append(keyword_arg) + if arg.func.attr == 'choice': + convert_args_to_dict(arg) + + return name, arg + + +def parse_nni_function(code): + """Parse `nni.function_choice` expression. + Return the AST node of annotated expression and a list of dumped function call expressions. + code: annotation string + """ + name, call = parse_annotation_function(code, 'function_choice') + funcs = [ast.dump(func, False) for func in call.args] + convert_args_to_dict(call, with_lambda=True) + + name_str = astor.to_source(name).strip() + call.keywords[0].value = ast_Str(s=name_str) + + return call, funcs + + +def convert_args_to_dict(call, with_lambda=False): + """Convert all args to a dict such that every key and value in the dict is the same as the value of the arg. + Return the AST Call node with only one arg that is the dictionary + """ + keys, values = list(), list() + for arg in call.args: + if type(arg) in [ast_Str, ast_Num]: + arg_value = arg + else: + # if arg is not a string or a number, we use its source code as the key + arg_value = astor.to_source(arg).strip('\n"') + arg_value = ast_Str(str(arg_value)) + arg = make_lambda(arg) if with_lambda else arg + keys.append(arg_value) + values.append(arg) + del call.args[:] + call.args.append(ast.Dict(keys=keys, values=values)) + + return call + + +def make_lambda(call): + """Wrap an AST Call node to lambda expression node. + call: ast.Call node + """ + empty_args = ast.arguments(args=[], vararg=None, kwarg=None, defaults=[]) + return ast.Lambda(args=empty_args, body=call) + + +def test_variable_equal(node1, node2): + """Test whether two variables are the same.""" + if type(node1) is not type(node2): + return False + if isinstance(node1, ast.AST): + for k, v in vars(node1).items(): + if k in ('lineno', 'col_offset', 'ctx', 'end_lineno', 'end_col_offset'): + continue + if not test_variable_equal(v, getattr(node2, k)): + return False + return True + if isinstance(node1, list): + if len(node1) != len(node2): + return False + return all(test_variable_equal(n1, n2) for n1, n2 in zip(node1, node2)) + + return node1 == node2 + + +def replace_variable_node(node, annotation): + """Replace a node annotated by `nni.variable`. + node: the AST node to replace + annotation: annotation string + """ + assert type(node) is ast.Assign, 'nni.variable is not annotating assignment expression' + assert len(node.targets) == 1, 'Annotated assignment has more than one left-hand value' + name, expr = parse_nni_variable(annotation) + assert test_variable_equal(node.targets[0], name), 'Annotated variable has wrong name' + node.value = expr + return node + + +def replace_function_node(node, annotation): + """Replace a node annotated by `nni.function_choice`. + node: the AST node to replace + annotation: annotation string + """ + target, funcs = parse_nni_function(annotation) + FuncReplacer(funcs, target).visit(node) + return node + + +class FuncReplacer(ast.NodeTransformer): + """To replace target function call expressions in a node annotated by `nni.function_choice`""" + + def __init__(self, funcs, target): + """Constructor. + funcs: list of dumped function call expressions to replace + target: use this AST node to replace matching expressions + """ + self.funcs = set(funcs) + self.target = target + + def visit_Call(self, node): # pylint: disable=invalid-name + if ast.dump(node, False) in self.funcs: + return self.target + return node + + +class Transformer(ast.NodeTransformer): + """Transform original code to annotated code""" + + def __init__(self, nas_mode=None): + self.stack = [] + self.last_line = 0 + self.annotated = False + self.nas_mode = nas_mode + + def visit(self, node): + if isinstance(node, (ast.expr, ast.stmt)): + self.last_line = lineno(node) + + # do nothing for root + if not self.stack: + return self._visit_children(node) + + annotation = self.stack[-1] + + # this is a standalone string, may be an annotation + if type(node) is ast.Expr and type(node.value) is ast_Str: + # must not annotate an annotation string + assert annotation is None, 'Annotating an annotation' + return self._visit_string(node) + + if annotation is not None: # this expression is annotated + self.stack[-1] = None # so next expression is not + if annotation.startswith('nni.variable'): + return replace_variable_node(node, annotation) + if annotation.startswith('nni.function_choice'): + return replace_function_node(node, annotation) + + return self._visit_children(node) + + def _visit_string(self, node): + string = node.value.s + if string.startswith('@nni.'): + self.annotated = True + else: + return node # not an annotation, ignore it + + if string.startswith('@nni.training_update'): + expr = parse_annotation(string[1:]) + call_node = expr.value + call_node.args.insert(0, ast_Str(s=self.nas_mode)) + return expr + + if string.startswith('@nni.report_intermediate_result') \ + or string.startswith('@nni.report_final_result') \ + or string.startswith('@nni.get_next_parameter'): + return parse_annotation(string[1:]) # expand annotation string to code + + if string.startswith('@nni.mutable_layers'): + nodes = parse_annotation_mutable_layers(string[1:], lineno(node), self.nas_mode) + return nodes + + if string.startswith('@nni.variable') \ + or string.startswith('@nni.function_choice'): + self.stack[-1] = string[1:] # mark that the next expression is annotated + return None + + raise AssertionError('Unexpected annotation function') + + def _visit_children(self, node): + self.stack.append(None) + self.generic_visit(node) + annotation = self.stack.pop() + assert annotation is None, 'Annotation has no target' + return node + + +def parse(code, nas_mode=None): + """Annotate user code. + Return annotated code (str) if annotation detected; return None if not. + code: original user code (str), + nas_mode: the mode of NAS given that NAS interface is used + """ + try: + ast_tree = ast.parse(code) + except Exception: + raise RuntimeError('Bad Python code') + + transformer = Transformer(nas_mode) + try: + transformer.visit(ast_tree) + except AssertionError as exc: + raise RuntimeError('%d: %s' % (ast_tree.last_line, exc.args[0])) + + if not transformer.annotated: + return None + + last_future_import = -1 + import_nni = ast.Import(names=[ast.alias(name='nni', asname=None)]) + nodes = ast_tree.body + for i, _ in enumerate(nodes): + if type(nodes[i]) is ast.ImportFrom and nodes[i].module == '__future__': + last_future_import = i + nodes.insert(last_future_import + 1, import_nni) + # enas, oneshot and darts modes for tensorflow need tensorflow module, so we import it here + if nas_mode in ['enas_mode', 'oneshot_mode', 'darts_mode']: + import_tf = ast.Import(names=[ast.alias(name='tensorflow', asname=None)]) + nodes.insert(last_future_import + 1, import_tf) + + return astor.to_source(ast_tree) diff --git a/nni/tools/annotation/search_space_generator.py b/nni/tools/annotation/search_space_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a19f53dff601faa682f48b3f1ee574f21b9acf --- /dev/null +++ b/nni/tools/annotation/search_space_generator.py @@ -0,0 +1,135 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import ast +import numbers + +import astor + +from .utils import ast_Num, ast_Str, lineno + +# pylint: disable=unidiomatic-typecheck + + +# list of functions related to search space generating +_ss_funcs = [ + 'choice', + 'randint', + 'uniform', + 'quniform', + 'loguniform', + 'qloguniform', + 'normal', + 'qnormal', + 'lognormal', + 'qlognormal', + 'function_choice', + 'mutable_layer' +] + + +class SearchSpaceGenerator(ast.NodeTransformer): + """Generate search space from smart parater APIs""" + + def __init__(self, module_name): + self.module_name = module_name + self.search_space = {} + self.last_line = 0 # last parsed line, useful for error reporting + + def generate_mutable_layer_search_space(self, args): + mutable_block = args[0].s + mutable_layer = args[1].s + key = self.module_name + '/' + mutable_block + args[0].s = key + if key not in self.search_space: + self.search_space[key] = {'_type': 'mutable_layer', '_value': {}} + self.search_space[key]['_value'][mutable_layer] = { + 'layer_choice': [k.s for k in args[2].keys], + 'optional_inputs': [k.s for k in args[5].keys], + 'optional_input_size': args[6].n if isinstance(args[6], ast_Num) else [args[6].elts[0].n, args[6].elts[1].n] + } + + def visit_Call(self, node): # pylint: disable=invalid-name + self.generic_visit(node) + + # ignore if the function is not 'nni.*' + if type(node.func) is not ast.Attribute: + return node + if type(node.func.value) is not ast.Name: + return node + if node.func.value.id != 'nni': + return node + + # ignore if its not a search space function (e.g. `report_final_result`) + func = node.func.attr + if func not in _ss_funcs: + return node + + self.last_line = lineno(node) + + if func == 'mutable_layer': + self.generate_mutable_layer_search_space(node.args) + return node + + if node.keywords: + # there is a `name` argument + assert len(node.keywords) == 1, 'Smart parameter has keyword argument other than "name"' + assert node.keywords[0].arg == 'name', 'Smart paramater\'s keyword argument is not "name"' + assert type(node.keywords[0].value) is ast_Str, 'Smart parameter\'s name must be string literal' + name = node.keywords[0].value.s + specified_name = True + else: + # generate the missing name automatically + name = '__line' + str(str(node.args[-1].lineno)) + specified_name = False + node.keywords = list() + + if func in ('choice', 'function_choice'): + # we will use keys in the dict as the choices, which is generated by code_generator according to the args given by user + assert len(node.args) == 1, 'Smart parameter has arguments other than dict' + # check if it is a number or a string and get its value accordingly + args = [key.n if type(key) is ast_Num else key.s for key in node.args[0].keys] + else: + # arguments of other functions must be literal number + assert all(isinstance(ast.literal_eval(astor.to_source(arg)), numbers.Real) for arg in node.args), \ + 'Smart parameter\'s arguments must be number literals' + args = [ast.literal_eval(astor.to_source(arg)) for arg in node.args] + + key = self.module_name + '/' + name + '/' + func + # store key in ast.Call + node.keywords.append(ast.keyword(arg='key', value=ast_Str(s=key))) + + if func == 'function_choice': + func = 'choice' + value = {'_type': func, '_value': args} + + if specified_name: + # multiple functions with same name must have identical arguments + old = self.search_space.get(key) + assert old is None or old == value, 'Different smart parameters have same name' + else: + # generated name must not duplicate + assert key not in self.search_space, 'Only one smart parameter is allowed in a line' + + self.search_space[key] = value + + return node + + +def generate(module_name, code): + """Generate search space. + Return a serializable search space object. + module_name: name of the module (str) + code: user code (str) + """ + try: + ast_tree = ast.parse(code) + except Exception: + raise RuntimeError('Bad Python code') + + visitor = SearchSpaceGenerator(module_name) + try: + visitor.visit(ast_tree) + except AssertionError as exc: + raise RuntimeError('%d: %s' % (visitor.last_line, exc.args[0])) + return visitor.search_space, astor.to_source(ast_tree) diff --git a/nni/tools/annotation/specific_code_generator.py b/nni/tools/annotation/specific_code_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..5ddd362205dfa909d0077315ebe5dd9b8ade239f --- /dev/null +++ b/nni/tools/annotation/specific_code_generator.py @@ -0,0 +1,354 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import ast +import astor +from nni.tools.nnictl.common_utils import print_warning + +from .utils import ast_Num, ast_Str, lineno + +# pylint: disable=unidiomatic-typecheck + +para_cfg = None +prefix_name = None + + +def parse_annotation_mutable_layers(code, lineno): + """Parse the string of mutable layers in annotation. + Return a list of AST Expr nodes + code: annotation string (excluding '@') + """ + module = ast.parse(code) + assert type(module) is ast.Module, 'internal error #1' + assert len(module.body) == 1, 'Annotation mutable_layers contains more than one expression' + assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' + call = module.body[0].value + nodes = [] + mutable_id = prefix_name + '/mutable_block_' + str(lineno) + mutable_layer_cnt = 0 + for arg in call.args: + fields = {'layer_choice': False, + 'fixed_inputs': False, + 'optional_inputs': False, + 'optional_input_size': False, + 'layer_output': False} + mutable_layer_id = 'mutable_layer_' + str(mutable_layer_cnt) + mutable_layer_cnt += 1 + func_call = None + for k, value in zip(arg.keys, arg.values): + if k.id == 'layer_choice': + assert not fields['layer_choice'], 'Duplicated field: layer_choice' + assert type(value) is ast.List, 'Value of layer_choice should be a list' + for call in value.elts: + assert type(call) is ast.Call, 'Element in layer_choice should be function call' + call_name = astor.to_source(call).strip() + if call_name == para_cfg[mutable_id][mutable_layer_id]['chosen_layer']: + func_call = call + assert not call.args, 'Number of args without keyword should be zero' + break + fields['layer_choice'] = True + elif k.id == 'fixed_inputs': + assert not fields['fixed_inputs'], 'Duplicated field: fixed_inputs' + assert type(value) is ast.List, 'Value of fixed_inputs should be a list' + fixed_inputs = value + fields['fixed_inputs'] = True + elif k.id == 'optional_inputs': + assert not fields['optional_inputs'], 'Duplicated field: optional_inputs' + assert type(value) is ast.List, 'Value of optional_inputs should be a list' + var_names = [astor.to_source(var).strip() for var in value.elts] + chosen_inputs = para_cfg[mutable_id][mutable_layer_id]['chosen_inputs'] + elts = [] + for i in chosen_inputs: + index = var_names.index(i) + elts.append(value.elts[index]) + optional_inputs = ast.List(elts=elts) + fields['optional_inputs'] = True + elif k.id == 'optional_input_size': + pass + elif k.id == 'layer_output': + assert not fields['layer_output'], 'Duplicated field: layer_output' + assert type(value) is ast.Name, 'Value of layer_output should be ast.Name type' + layer_output = value + fields['layer_output'] = True + else: + raise AssertionError('Unexpected field in mutable layer') + # make call for this mutable layer + assert fields['layer_choice'], 'layer_choice must exist' + assert fields['layer_output'], 'layer_output must exist' + + if not fields['fixed_inputs']: + fixed_inputs = ast.List(elts=[]) + if not fields['optional_inputs']: + optional_inputs = ast.List(elts=[]) + inputs = ast.List(elts=[fixed_inputs, optional_inputs]) + + func_call.args.append(inputs) + node = ast.Assign(targets=[layer_output], value=func_call) + nodes.append(node) + return nodes + + +def parse_annotation(code): + """Parse an annotation string. + Return an AST Expr node. + code: annotation string (excluding '@') + """ + module = ast.parse(code) + assert type(module) is ast.Module, 'internal error #1' + assert len(module.body) == 1, 'Annotation contains more than one expression' + assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' + return module.body[0] + + +def parse_annotation_function(code, func_name): + """Parse an annotation function. + Return the value of `name` keyword argument and the AST Call node. + func_name: expected function name + """ + expr = parse_annotation(code) + call = expr.value + assert type(call) is ast.Call, 'Annotation is not a function call' + + assert type(call.func) is ast.Attribute, 'Unexpected annotation function' + assert type(call.func.value) is ast.Name, 'Invalid annotation function name' + assert call.func.value.id == 'nni', 'Annotation is not a NNI function' + assert call.func.attr == func_name, 'internal error #2' + + assert len(call.keywords) == 1, 'Annotation function contains more than one keyword argument' + assert call.keywords[0].arg == 'name', 'Annotation keyword argument is not "name"' + name = call.keywords[0].value + + return name, call + + +def parse_nni_variable(code): + """Parse `nni.variable` expression. + Return the name argument and AST node of annotated expression. + code: annotation string + """ + name, call = parse_annotation_function(code, 'variable') + + assert len(call.args) == 1, 'nni.variable contains more than one arguments' + arg = call.args[0] + assert type(arg) is ast.Call, 'Value of nni.variable is not a function call' + assert type(arg.func) is ast.Attribute, 'nni.variable value is not a NNI function' + assert type(arg.func.value) is ast.Name, 'nni.variable value is not a NNI function' + assert arg.func.value.id == 'nni', 'nni.variable value is not a NNI function' + + name_str = astor.to_source(name).strip() + keyword_arg = ast.keyword(arg='name', value=ast_Str(s=name_str)) + arg.keywords.append(keyword_arg) + if arg.func.attr == 'choice': + convert_args_to_dict(arg) + + return name, arg + + +def parse_nni_function(code): + """Parse `nni.function_choice` expression. + Return the AST node of annotated expression and a list of dumped function call expressions. + code: annotation string + """ + name, call = parse_annotation_function(code, 'function_choice') + funcs = [ast.dump(func, False) for func in call.args] + convert_args_to_dict(call, with_lambda=True) + + name_str = astor.to_source(name).strip() + call.keywords[0].value = ast_Str(s=name_str) + + return call, funcs + + +def convert_args_to_dict(call, with_lambda=False): + """Convert all args to a dict such that every key and value in the dict is the same as the value of the arg. + Return the AST Call node with only one arg that is the dictionary + """ + keys, values = list(), list() + for arg in call.args: + if type(arg) in [ast_Str, ast_Num]: + arg_value = arg + else: + # if arg is not a string or a number, we use its source code as the key + arg_value = astor.to_source(arg).strip('\n"') + arg_value = ast_Str(str(arg_value)) + arg = make_lambda(arg) if with_lambda else arg + keys.append(arg_value) + values.append(arg) + del call.args[:] + call.args.append(ast.Dict(keys=keys, values=values)) + + return call + + +def make_lambda(call): + """Wrap an AST Call node to lambda expression node. + call: ast.Call node + """ + empty_args = ast.arguments(args=[], vararg=None, kwarg=None, defaults=[]) + return ast.Lambda(args=empty_args, body=call) + + +def test_variable_equal(node1, node2): + """Test whether two variables are the same.""" + if type(node1) is not type(node2): + return False + if isinstance(node1, ast.AST): + for k, v in vars(node1).items(): + if k in ('lineno', 'col_offset', 'ctx', 'end_lineno', 'end_col_offset'): + continue + if not test_variable_equal(v, getattr(node2, k)): + return False + return True + if isinstance(node1, list): + if len(node1) != len(node2): + return False + return all(test_variable_equal(n1, n2) for n1, n2 in zip(node1, node2)) + + return node1 == node2 + + +def replace_variable_node(node, annotation): + """Replace a node annotated by `nni.variable`. + node: the AST node to replace + annotation: annotation string + """ + assert type(node) is ast.Assign, 'nni.variable is not annotating assignment expression' + assert len(node.targets) == 1, 'Annotated assignment has more than one left-hand value' + name, expr = parse_nni_variable(annotation) + assert test_variable_equal(node.targets[0], name), 'Annotated variable has wrong name' + node.value = expr + return node + + +def replace_function_node(node, annotation): + """Replace a node annotated by `nni.function_choice`. + node: the AST node to replace + annotation: annotation string + """ + target, funcs = parse_nni_function(annotation) + FuncReplacer(funcs, target).visit(node) + return node + + +class FuncReplacer(ast.NodeTransformer): + """To replace target function call expressions in a node annotated by `nni.function_choice`""" + + def __init__(self, funcs, target): + """Constructor. + funcs: list of dumped function call expressions to replace + target: use this AST node to replace matching expressions + """ + self.funcs = set(funcs) + self.target = target + + def visit_Call(self, node): # pylint: disable=invalid-name + if ast.dump(node, False) in self.funcs: + return self.target + return node + + +class Transformer(ast.NodeTransformer): + """Transform original code to annotated code""" + + def __init__(self): + self.stack = [] + self.last_line = 0 + self.annotated = False + + def visit(self, node): + if isinstance(node, (ast.expr, ast.stmt)): + self.last_line = lineno(node) + + # do nothing for root + if not self.stack: + return self._visit_children(node) + + annotation = self.stack[-1] + + # this is a standalone string, may be an annotation + if type(node) is ast.Expr and type(node.value) is ast_Str: + # must not annotate an annotation string + assert annotation is None, 'Annotating an annotation' + return self._visit_string(node) + + if annotation is not None: # this expression is annotated + self.stack[-1] = None # so next expression is not + if annotation.startswith('nni.variable'): + return replace_variable_node(node, annotation) + if annotation.startswith('nni.function_choice'): + return replace_function_node(node, annotation) + + return self._visit_children(node) + + def _visit_string(self, node): + string = node.value.s + if string.startswith('@nni.'): + self.annotated = True + else: + return node # not an annotation, ignore it + + if string.startswith('@nni.get_next_parameter'): + deprecated_message = "'@nni.get_next_parameter' is deprecated in annotation due to inconvenience. " \ + "Please remove this line in the trial code." + print_warning(deprecated_message) + return ast.Expr(value=ast.Call(func=ast.Name(id='print', ctx=ast.Load()), + args=[ast_Str(s='Get next parameter here...')], keywords=[])) + + if string.startswith('@nni.training_update'): + return ast.Expr(value=ast.Call(func=ast.Name(id='print', ctx=ast.Load()), + args=[ast_Str(s='Training update here...')], keywords=[])) + + if string.startswith('@nni.report_intermediate_result'): + module = ast.parse(string[1:]) + arg = module.body[0].value.args[0] + return ast.Expr(value=ast.Call(func=ast.Name(id='print', ctx=ast.Load()), + args=[ast_Str(s='nni.report_intermediate_result: '), arg], keywords=[])) + + if string.startswith('@nni.report_final_result'): + module = ast.parse(string[1:]) + arg = module.body[0].value.args[0] + return ast.Expr(value=ast.Call(func=ast.Name(id='print', ctx=ast.Load()), + args=[ast_Str(s='nni.report_final_result: '), arg], keywords=[])) + + if string.startswith('@nni.mutable_layers'): + return parse_annotation_mutable_layers(string[1:], lineno(node)) + + if string.startswith('@nni.variable') \ + or string.startswith('@nni.function_choice'): + self.stack[-1] = string[1:] # mark that the next expression is annotated + return None + + raise AssertionError('Unexpected annotation function') + + def _visit_children(self, node): + self.stack.append(None) + self.generic_visit(node) + annotation = self.stack.pop() + assert annotation is None, 'Annotation has no target' + return node + + +def parse(code, para, module): + """Annotate user code. + Return annotated code (str) if annotation detected; return None if not. + code: original user code (str) + """ + global para_cfg + global prefix_name + para_cfg = para + prefix_name = module + try: + ast_tree = ast.parse(code) + except Exception: + raise RuntimeError('Bad Python code') + + transformer = Transformer() + try: + transformer.visit(ast_tree) + except AssertionError as exc: + raise RuntimeError('%d: %s' % (ast_tree.last_line, exc.args[0])) + + if not transformer.annotated: + return None + + return astor.to_source(ast_tree) diff --git a/nni/tools/annotation/utils.py b/nni/tools/annotation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..176621c2cf3d9211d626b182277ef61f170880f9 --- /dev/null +++ b/nni/tools/annotation/utils.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import ast +from sys import version_info + + +if version_info >= (3, 8): + ast_Num = ast_Str = ast_Bytes = ast_NameConstant = ast_Ellipsis = ast.Constant + + def lineno(ast_node): + return ast_node.end_lineno + +else: + ast_Num = ast.Num + ast_Str = ast.Str + ast_Bytes = ast.Bytes + ast_NameConstant = ast.NameConstant + ast_Ellipsis = ast.Ellipsis + + def lineno(ast_node): + return ast_node.lineno diff --git a/nni/tools/gpu_tool/__init__.py b/nni/tools/gpu_tool/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/tools/gpu_tool/gpu_metrics_collector.py b/nni/tools/gpu_tool/gpu_metrics_collector.py new file mode 100644 index 0000000000000000000000000000000000000000..d56ec7f9a71e08d91ea15f0dbc74746d1cd83b61 --- /dev/null +++ b/nni/tools/gpu_tool/gpu_metrics_collector.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import os +import subprocess +import sys +import time +import traceback + +from xml.dom import minidom + + +def main(argv): + metrics_output_dir = os.environ['METRIC_OUTPUT_DIR'] + + cmd = 'rocm-smi --showuse --showmemuse --json'.split() + + while(True): + try: + smi_output = subprocess.check_output(cmd) + except Exception: + traceback.print_exc() + gen_empty_gpu_metric(metrics_output_dir) + break + parse_nvidia_smi_result(smi_output, metrics_output_dir) + # TODO: change to sleep time configurable via arguments + time.sleep(5) + + +def parse_nvidia_smi_result(smi, outputDir): + try: + old_umask = os.umask(0) + gpuList = eval(smi) + with open(os.path.join(outputDir, "gpu_metrics"), 'a') as outputFile: + outPut = {} + outPut["Timestamp"] = time.asctime(time.localtime()) + outPut["gpuCount"] = len(gpuList) + outPut["gpuInfos"] = [] + for gpuIndex, gpu in enumerate(gpuList): + gpuInfo = {} + gpuInfo['index'] = gpuIndex + gpuInfo['gpuUtil'] = gpuList[gpu][list(gpuList[gpu].keys())[0]] + "%" + gpuInfo['gpuMemUtil'] = gpuList[gpu][list(gpuList[gpu].keys())[1]] + "%" + # can not find the runingProNumber. just put 1 to here + runningProNumber = 1 + gpuInfo['activeProcessNum'] = runningProNumber + + outPut["gpuInfos"].append(gpuInfo) + print(outPut) + outputFile.write("{}\n".format(json.dumps(outPut, sort_keys=True))) + outputFile.flush() + except Exception as error: + # e_info = sys.exc_info() + print('gpu_metrics_collector error: %s' % error) + finally: + os.umask(old_umask) + + +def gen_empty_gpu_metric(outputDir): + try: + old_umask = os.umask(0) + with open(os.path.join(outputDir, "gpu_metrics"), 'a') as outputFile: + outPut = {} + outPut["Timestamp"] = time.asctime(time.localtime()) + outPut["gpuCount"] = 0 + outPut["gpuInfos"] = [] + print(outPut) + outputFile.write("{}\n".format(json.dumps(outPut, sort_keys=True))) + outputFile.flush() + except Exception: + traceback.print_exc() + finally: + os.umask(old_umask) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/nni/tools/jupyter_extension/__init__.py b/nni/tools/jupyter_extension/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..df86a70a855c0647420205e570d06c53e2f8bdd3 --- /dev/null +++ b/nni/tools/jupyter_extension/__init__.py @@ -0,0 +1,4 @@ +from . import proxy + +load_jupyter_server_extension = proxy.setup +_load_jupyter_server_extension = proxy.setup diff --git a/nni/tools/jupyter_extension/management.py b/nni/tools/jupyter_extension/management.py new file mode 100644 index 0000000000000000000000000000000000000000..66235d8ee6bd9c2ebc918462040edc2b9bfd01f9 --- /dev/null +++ b/nni/tools/jupyter_extension/management.py @@ -0,0 +1,61 @@ +import json +from pathlib import Path +import shutil +import os + +from jupyter_core.paths import jupyter_config_dir, jupyter_data_dir + +import nni_node + +def _get_jupyter_lab_version(): + try: + import jupyterlab + return jupyterlab.__version__ + except ImportError: + return '3.x' + +jupyter_lab_major_version = _get_jupyter_lab_version().split('.')[0] + +_backend_config_file = Path(jupyter_config_dir(), 'jupyter_server_config.d', 'nni.json') +_backend_config_content = { + 'ServerApp': { + 'jpserver_extensions': { + 'nni.tools.jupyter_extension': True + } + } +} +_v2_backend_config_file = Path(jupyter_config_dir(), 'jupyter_notebook_config.d', 'nni.json') +_v2_backend_config_content = { + "NotebookApp": { + "nbserver_extensions": { + "nni.tools.jupyter_extension": True + } + } +} + +_frontend_src = Path(nni_node.__path__[0], 'jupyter-extension') +_frontend_dst = Path(jupyter_data_dir(), 'labextensions', 'nni-jupyter-extension') + +def install(): + _backend_config_file.parent.mkdir(parents=True, exist_ok=True) + _backend_config_file.write_text(json.dumps(_backend_config_content)) + + _frontend_dst.parent.mkdir(parents=True, exist_ok=True) + + if jupyter_lab_major_version == '2': + _v2_backend_config_file.parent.mkdir(parents=True, exist_ok=True) + _v2_backend_config_file.write_text(json.dumps(_v2_backend_config_content)) + + if (_frontend_src.is_symlink()): + linkto = os.path.realpath(_frontend_src) + os.symlink(linkto, _frontend_dst) + else: + shutil.copytree(_frontend_src, _frontend_dst) + else: + shutil.copytree(_frontend_src, _frontend_dst) + +def uninstall(): + _backend_config_file.unlink() + if jupyter_lab_major_version == '2': + _v2_backend_config_file.unlink() + shutil.rmtree(_frontend_dst) diff --git a/nni/tools/jupyter_extension/proxy.py b/nni/tools/jupyter_extension/proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..f5457379dc809c9217035d4f1c5085f6ecc88f5d --- /dev/null +++ b/nni/tools/jupyter_extension/proxy.py @@ -0,0 +1,43 @@ +import json +from pathlib import Path + +import requests +from tornado.web import RequestHandler + +def setup(server): + base_url = server.web_app.settings['base_url'] + url_pattern = base_url.rstrip('/') + '/nni/(.*)' + server.web_app.add_handlers('.*$', [(url_pattern, NniProxyHandler)]) + +class NniProxyHandler(RequestHandler): + def get(self, path): + ports = _get_experiment_ports() + if not ports: + self.set_status(404) + return + + if path == 'index': + if len(ports) > 1: # if there is more than one running experiments, show experiment list + self.redirect('experiment') + else: # if there is only one running experiment, show that experiment + self.redirect('oview') + return + + r = requests.get(f'http://localhost:{ports[0]}/{path}') + self.set_status(r.status_code) + for key, value in r.headers.items(): + self.add_header(key, value) + self.finish(r.content) + + # TODO: post, put, etc + + def set_default_headers(self): + self.clear_header('Content-Type') + self.clear_header('Date') + +def _get_experiment_ports(): + experiment_list_path = Path.home() / 'nni-experiments/.experiment' + if not experiment_list_path.exists(): + return None + experiments = json.load(open(experiment_list_path)) + return [exp['port'] for exp in experiments.values() if exp['status'] != 'STOPPED'] diff --git a/nni/tools/nnictl/__init__.py b/nni/tools/nnictl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/tools/nnictl/algo_management.py b/nni/tools/nnictl/algo_management.py new file mode 100644 index 0000000000000000000000000000000000000000..27ea331538f0a5a36f3fbaa49a37a2e1e10517ed --- /dev/null +++ b/nni/tools/nnictl/algo_management.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import importlib +import json +import nni +from nni.tools import package_utils +from .common_utils import print_error, print_green, get_yml_content + +def read_reg_meta_list(meta_path): + content = get_yml_content(meta_path) + if content.get('algorithms'): + meta_list = content.get('algorithms') + else: + meta_list = [content] + for meta in meta_list: + assert 'algoType' in meta + assert meta['algoType'] in ['tuner', 'assessor', 'advisor'] + assert 'builtinName' in meta + assert 'className' in meta + meta['nniVersion'] = nni.__version__ + return [package_utils.AlgoMeta.load(meta) for meta in meta_list] + +def verify_algo_import(meta): + def _do_verify_import(full_name): + module_name, class_name = full_name.rsplit('.', 1) + class_module = importlib.import_module(module_name) + getattr(class_module, class_name) + + _do_verify_import(meta.class_name) + + if meta.validator_class_name is not None: + _do_verify_import(meta.validator_class_name) + +def algo_reg(args): + meta_list = read_reg_meta_list(args.meta_path) + for meta in meta_list: + old = package_utils.get_algo_meta(meta.name) + if old is not None and old.is_builtin: + print_error(f'Cannot overwrite builtin algorithm {meta.name}') + continue + + verify_algo_import(meta) + if old is not None: + print_green(f'Updating exist algorithm') + package_utils.register_algo_meta(meta) + print_green(f'{meta.name} registered sucessfully!') + +def algo_unreg(args): + name = args.name[0] + meta = package_utils.get_algo_meta(name) + if meta is None: + print_error('builtin algorithms {} not found!'.format(name)) + return + if meta.is_builtin: + print_error('{} is provided by nni, can not be unregistered!'.format(name)) + return + package_utils.unregister_algo_meta(name) + print_green('{} unregistered sucessfully!'.format(name)) + +def algo_show(args): + builtin_name = args.name[0] + meta = package_utils.get_algo_meta(builtin_name) + if meta is not None: + print(json.dumps(meta.dump(), indent=4)) + else: + print_error('package {} not found'.format(builtin_name)) + +def algo_list(args): + print('+-----------------+------------+-----------+--------=-------------+------------------------------------------+') + print('| Name | Type | source | Class Name | Module Name |') + print('+-----------------+------------+-----------+----------------------+------------------------------------------+') + MAX_MODULE_NAME = 38 + for meta in package_utils.get_all_algo_meta(): + module_name, class_name = meta.class_name.rsplit('.', 1) + if len(module_name) > MAX_MODULE_NAME: + module_name = module_name[:MAX_MODULE_NAME-3] + '...' + fields = [ + meta.name, + meta.algo_type, + 'nni' if meta.is_builtin else 'user', + class_name, + module_name + ] + print('| {:15s} | {:10s} | {:9s} | {:20s} | {:40s} |'.format(*fields)) + print('+-----------------+------------+-----------+----------------------+------------------------------------------+') diff --git a/nni/tools/nnictl/command_utils.py b/nni/tools/nnictl/command_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2bbcc883d1cef450fef9b723dc8ca71d34daa095 --- /dev/null +++ b/nni/tools/nnictl/command_utils.py @@ -0,0 +1,82 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from subprocess import call, check_output +import sys +import os +import signal +import psutil +from .common_utils import print_error + + +def check_output_command(file_path, head=None, tail=None): + """call check_output command to read content from a file""" + if os.path.exists(file_path): + if sys.platform == 'win32': + cmds = ['powershell.exe', 'type', file_path] + if head: + cmds += ['|', 'select', '-first', str(head)] + elif tail: + cmds += ['|', 'select', '-last', str(tail)] + return check_output(cmds, shell=True).decode('utf-8') + else: + cmds = ['cat', file_path] + if head: + cmds = ['head', '-' + str(head), file_path] + elif tail: + cmds = ['tail', '-' + str(tail), file_path] + return check_output(cmds, shell=False).decode('utf-8') + else: + print_error('{0} does not exist!'.format(file_path)) + exit(1) + + +def kill_command(pid): + """kill command""" + if sys.platform == 'win32': + process = psutil.Process(pid=pid) + process.send_signal(signal.CTRL_BREAK_EVENT) + else: + cmds = ['kill', str(pid)] + call(cmds) + + +def install_package_command(package_name): + """ + Install python package from pip. + + Parameters + ---------- + package_name: str + The name of package to be installed. + """ + call(_get_pip_install() + [package_name], shell=False) + + +def install_requirements_command(requirements_path): + """ + Install packages from `requirements.txt` in `requirements_path`. + + Parameters + ---------- + requirements_path: str + Path to the directory that contains `requirements.txt`. + """ + return call(_get_pip_install() + ["-r", requirements_path], shell=False) + + +def _get_pip_install(): + python = "python" if sys.platform == "win32" else "python3" + ret = [python, "-m", "pip", "install"] + if "CONDA_DEFAULT_ENV" not in os.environ and "VIRTUAL_ENV" not in os.environ and \ + (sys.platform != "win32" and os.getuid() != 0): # on unix and not running in root + ret.append("--user") # not in virtualenv or conda + return ret + +def call_pip_install(source): + return call(_get_pip_install() + [source]) + +def call_pip_uninstall(module_name): + python = "python" if sys.platform == "win32" else "python3" + cmd = [python, "-m", "pip", "uninstall", module_name] + return call(cmd) diff --git a/nni/tools/nnictl/common_utils.py b/nni/tools/nnictl/common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..25df67cf2e11297ccbf02223432a72c01768868c --- /dev/null +++ b/nni/tools/nnictl/common_utils.py @@ -0,0 +1,125 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import json +import tempfile +import time +import socket +import string +import random +import glob +from colorama import Fore +import filelock +import psutil +import yaml + +from .constants import ERROR_INFO, NORMAL_INFO, WARNING_INFO + +def get_yml_content(file_path): + '''Load yaml file content''' + try: + with open(file_path, 'r') as file: + return yaml.safe_load(file) + except yaml.scanner.ScannerError as err: + print_error('yaml file format error!') + print_error(err) + exit(1) + except Exception as exception: + print_error(exception) + exit(1) + +def get_json_content(file_path): + '''Load json file content''' + try: + with open(file_path, 'r') as file: + return json.load(file) + except TypeError as err: + print_error('json file format error!') + print_error(err) + return None + + +def print_error(*content): + '''Print error information to screen''' + print(Fore.RED + ERROR_INFO + ' '.join([str(c) for c in content]) + Fore.RESET) + +def print_green(*content): + '''Print information to screen in green''' + print(Fore.GREEN + ' '.join([str(c) for c in content]) + Fore.RESET) + +def print_normal(*content): + '''Print error information to screen''' + print(NORMAL_INFO, *content) + +def print_warning(*content): + '''Print warning information to screen''' + print(Fore.YELLOW + WARNING_INFO + ' '.join([str(c) for c in content]) + Fore.RESET) + +def detect_process(pid): + '''Detect if a process is alive''' + try: + process = psutil.Process(pid) + return process.is_running() + except: + return False + +def detect_port(port): + '''Detect if the port is used''' + socket_test = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + socket_test.connect(('127.0.0.1', int(port))) + socket_test.close() + return True + except: + return False + +def get_user(): + if sys.platform == 'win32': + return os.environ['USERNAME'] + else: + return os.environ['USER'] + +def generate_temp_dir(): + '''generate a temp folder''' + def generate_folder_name(): + return os.path.join(tempfile.gettempdir(), 'nni', ''.join(random.sample(string.ascii_letters + string.digits, 8))) + temp_dir = generate_folder_name() + while os.path.exists(temp_dir): + temp_dir = generate_folder_name() + os.makedirs(temp_dir) + return temp_dir + +class SimplePreemptiveLock(filelock.SoftFileLock): + '''this is a lock support check lock expiration, if you do not need check expiration, you can use SoftFileLock''' + def __init__(self, lock_file, stale=-1): + super(__class__, self).__init__(lock_file, timeout=-1) + self._lock_file_name = '{}.{}'.format(self._lock_file, os.getpid()) + self._stale = stale + + def _acquire(self): + open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC + try: + lock_file_names = glob.glob(self._lock_file + '.*') + for file_name in lock_file_names: + if os.path.exists(file_name) and (self._stale < 0 or time.time() - os.stat(file_name).st_mtime < self._stale): + return None + fd = os.open(self._lock_file_name, open_mode) + except (IOError, OSError): + pass + else: + self._lock_file_fd = fd + return None + + def _release(self): + os.close(self._lock_file_fd) + self._lock_file_fd = None + try: + os.remove(self._lock_file_name) + except OSError: + pass + return None + +def get_file_lock(path: string, stale=-1): + return SimplePreemptiveLock(path + '.lock', stale=stale) diff --git a/nni/tools/nnictl/config_schema.py b/nni/tools/nnictl/config_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..997ad1fa363c4062381fd47d3678e2247a73ef49 --- /dev/null +++ b/nni/tools/nnictl/config_schema.py @@ -0,0 +1,637 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os + +from schema import And, Optional, Or, Regex, Schema, SchemaError +from nni.tools.package_utils.tuner_factory import ( + create_validator_instance, + get_all_builtin_names, + get_registered_algo_meta, +) + +from .common_utils import get_yml_content, print_warning +from .constants import SCHEMA_PATH_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_TYPE_ERROR + + +def setType(key, valueType): + '''check key type''' + return And(valueType, error=SCHEMA_TYPE_ERROR % (key, valueType.__name__)) + + +def setChoice(key, *args): + '''check choice''' + return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args))) + + +def setNumberRange(key, keyType, start, end): + '''check number range''' + return And( + And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)), + And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))), + ) + + +def setPathCheck(key): + '''check if path exist''' + return And(os.path.exists, error=SCHEMA_PATH_ERROR % key) + + +class AlgoSchema: + """ + This class is the schema of 'tuner', 'assessor' and 'advisor' sections of experiment configuraion file. + For example: + AlgoSchema('tuner') creates the schema of tuner section. + """ + + def __init__(self, algo_type): + """ + Parameters: + ----------- + algo_type: str + One of ['tuner', 'assessor', 'advisor']. + 'tuner': This AlgoSchema class create the schema of tuner section. + 'assessor': This AlgoSchema class create the schema of assessor section. + 'advisor': This AlgoSchema class create the schema of advisor section. + """ + assert algo_type in ['tuner', 'assessor', 'advisor'] + self.algo_type = algo_type + self.algo_schema = { + Optional('codeDir'): setPathCheck('codeDir'), + Optional('classFileName'): setType('classFileName', str), + Optional('className'): setType('className', str), + Optional('classArgs'): dict, + Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool), + Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'), + } + self.builtin_keys = { + 'tuner': 'builtinTunerName', + 'assessor': 'builtinAssessorName', + 'advisor': 'builtinAdvisorName' + } + self.builtin_name_schema = {} + for k, n in self.builtin_keys.items(): + self.builtin_name_schema[k] = {Optional(n): setChoice(n, *get_all_builtin_names(k+'s'))} + + self.customized_keys = set(['codeDir', 'classFileName', 'className']) + + def validate_class_args(self, class_args, algo_type, builtin_name): + if not builtin_name or not class_args: + return + meta = get_registered_algo_meta(builtin_name, algo_type+'s') + if meta and 'acceptClassArgs' in meta and meta['acceptClassArgs'] == False: + raise SchemaError('classArgs is not allowed.') + + logging.getLogger('nni.protocol').setLevel(logging.ERROR) # we know IPC is not there, don't complain + validator = create_validator_instance(algo_type+'s', builtin_name) + if validator: + try: + validator.validate_class_args(**class_args) + except Exception as e: + raise SchemaError(str(e)) + + def missing_customized_keys(self, data): + return self.customized_keys - set(data.keys()) + + def validate_extras(self, data, algo_type): + builtin_key = self.builtin_keys[algo_type] + if (builtin_key in data) and (set(data.keys()) & self.customized_keys): + raise SchemaError('{} and {} cannot be specified at the same time.'.format( + builtin_key, set(data.keys()) & self.customized_keys + )) + + if self.missing_customized_keys(data) and builtin_key not in data: + raise SchemaError('Either customized {} ({}) or builtin {} ({}) must be set.'.format( + algo_type, self.customized_keys, algo_type, builtin_key)) + + if not self.missing_customized_keys(data): + class_file_name = os.path.join(data['codeDir'], data['classFileName']) + if not os.path.isfile(class_file_name): + raise SchemaError('classFileName {} not found.'.format(class_file_name)) + + builtin_name = data.get(builtin_key) + class_args = data.get('classArgs') + self.validate_class_args(class_args, algo_type, builtin_name) + + def validate(self, data): + self.algo_schema.update(self.builtin_name_schema[self.algo_type]) + Schema(self.algo_schema).validate(data) + self.validate_extras(data, self.algo_type) + + +common_schema = { + 'authorName': setType('authorName', str), + 'experimentName': setType('experimentName', str), + Optional('description'): setType('description', str), + 'trialConcurrency': setNumberRange('trialConcurrency', int, 1, 99999), + Optional('maxExecDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$', error='ERROR: maxExecDuration format is [digit]{s,m,h,d}')), + Optional('maxTrialDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$', error='ERROR: maxTrialDuration format is [digit]{s,m,h,d}')), + Optional('maxTrialNum'): setNumberRange('maxTrialNum', int, 1, 99999), + 'trainingServicePlatform': setChoice( + 'trainingServicePlatform', 'remote', 'local', 'pai', 'kubeflow', 'frameworkcontroller', 'dlts', 'aml', 'adl', 'hybrid'), + Optional('searchSpacePath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'searchSpacePath'), + Optional('multiPhase'): setType('multiPhase', bool), + Optional('multiThread'): setType('multiThread', bool), + Optional('nniManagerIp'): setType('nniManagerIp', str), + Optional('logDir'): And(os.path.isdir, error=SCHEMA_PATH_ERROR % 'logDir'), + Optional('debug'): setType('debug', bool), + Optional('versionCheck'): setType('versionCheck', bool), + Optional('logLevel'): setChoice('logLevel', 'trace', 'debug', 'info', 'warning', 'error', 'fatal'), + Optional('logCollection'): setChoice('logCollection', 'http', 'none'), + 'useAnnotation': setType('useAnnotation', bool), + Optional('tuner'): AlgoSchema('tuner'), + Optional('advisor'): AlgoSchema('advisor'), + Optional('assessor'): AlgoSchema('assessor'), + Optional('localConfig'): { + Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'), + Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int), + Optional('useActiveGpu'): setType('useActiveGpu', bool) + }, + Optional('sharedStorage'): { + 'storageType': setChoice('storageType', 'NFS', 'AzureBlob'), + Optional('localMountPoint'): setType('localMountPoint', str), + Optional('remoteMountPoint'): setType('remoteMountPoint', str), + Optional('nfsServer'): setType('nfsServer', str), + Optional('exportedDirectory'): setType('exportedDirectory', str), + Optional('storageAccountName'): setType('storageAccountName', str), + Optional('storageAccountKey'): setType('storageAccountKey', str), + Optional('containerName'): setType('containerName', str), + Optional('localMounted'): setChoice('localMounted', 'usermount', 'nnimount', 'nomount') + } +} + +common_trial_schema = { + 'trial': { + 'command': setType('command', str), + 'codeDir': setPathCheck('codeDir'), + Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), + Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode') + } +} + +pai_yarn_trial_schema = { + 'trial': { + 'command': setType('command', str), + 'codeDir': setPathCheck('codeDir'), + 'gpuNum': setNumberRange('gpuNum', int, 0, 99999), + 'cpuNum': setNumberRange('cpuNum', int, 0, 99999), + 'memoryMB': setType('memoryMB', int), + 'image': setType('image', str), + Optional('authFile'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'authFile'), + Optional('shmMB'): setType('shmMB', int), + Optional('dataDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'), + error='ERROR: dataDir format error, dataDir format is hdfs://xxx.xxx.xxx.xxx:xxx'), + Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'), + error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'), + Optional('virtualCluster'): setType('virtualCluster', str), + Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'), + Optional('portList'): [{ + 'label': setType('label', str), + 'beginAt': setType('beginAt', int), + 'portNumber': setType('portNumber', int) + }] + } +} + + +pai_trial_schema = { + 'trial': { + 'codeDir': setPathCheck('codeDir'), + 'nniManagerNFSMountPath': setPathCheck('nniManagerNFSMountPath'), + 'containerNFSMountPath': setType('containerNFSMountPath', str), + Optional('command'): setType('command', str), + Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), + Optional('cpuNum'): setNumberRange('cpuNum', int, 0, 99999), + Optional('memoryMB'): setType('memoryMB', int), + Optional('image'): setType('image', str), + Optional('virtualCluster'): setType('virtualCluster', str), + Optional('paiStorageConfigName'): setType('paiStorageConfigName', str), + Optional('paiConfigPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'paiConfigPath') + } +} + +pai_config_schema = { + Optional('paiConfig'): { + 'userName': setType('userName', str), + Or('passWord', 'token', only_one=True): str, + 'host': setType('host', str), + Optional('reuse'): setType('reuse', bool), + Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), + Optional('cpuNum'): setNumberRange('cpuNum', int, 0, 99999), + Optional('memoryMB'): setType('memoryMB', int), + Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int), + Optional('useActiveGpu'): setType('useActiveGpu', bool), + } +} + +dlts_trial_schema = { + 'trial': { + 'command': setType('command', str), + 'codeDir': setPathCheck('codeDir'), + 'gpuNum': setNumberRange('gpuNum', int, 0, 99999), + 'image': setType('image', str), + } +} + +dlts_config_schema = { + 'dltsConfig': { + 'dashboard': setType('dashboard', str), + + Optional('cluster'): setType('cluster', str), + Optional('team'): setType('team', str), + + Optional('email'): setType('email', str), + Optional('password'): setType('password', str), + } +} + +aml_trial_schema = { + 'trial': { + 'codeDir': setPathCheck('codeDir'), + 'command': setType('command', str), + 'image': setType('image', str), + Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), + } +} + +aml_config_schema = { + Optional('amlConfig'): { + 'subscriptionId': setType('subscriptionId', str), + 'resourceGroup': setType('resourceGroup', str), + 'workspaceName': setType('workspaceName', str), + 'computeTarget': setType('computeTarget', str), + Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int), + Optional('useActiveGpu'): setType('useActiveGpu', bool), + } +} + +hybrid_trial_schema = { + 'trial': { + 'codeDir': setPathCheck('codeDir'), + Optional('nniManagerNFSMountPath'): setPathCheck('nniManagerNFSMountPath'), + Optional('containerNFSMountPath'): setType('containerNFSMountPath', str), + Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'), + 'command': setType('command', str), + Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), + Optional('cpuNum'): setNumberRange('cpuNum', int, 0, 99999), + Optional('memoryMB'): setType('memoryMB', int), + Optional('image'): setType('image', str), + Optional('virtualCluster'): setType('virtualCluster', str), + Optional('paiStorageConfigName'): setType('paiStorageConfigName', str), + Optional('paiConfigPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'paiConfigPath') + } +} + +hybrid_config_schema = { + 'hybridConfig': { + 'trainingServicePlatforms': ['local', 'remote', 'pai', 'aml'] + } +} + +adl_trial_schema = { + 'trial':{ + 'codeDir': setType('codeDir', str), + 'command': setType('command', str), + 'gpuNum': setNumberRange('gpuNum', int, 0, 99999), + 'image': setType('image', str), + Optional('namespace'): setType('namespace', str), + Optional('imagePullSecrets'): [{ + 'name': setType('name', str) + }], + Optional('nfs'): { + 'server': setType('server', str), + 'path': setType('path', str), + 'containerMountPath': setType('containerMountPath', str) + }, + Optional('adaptive'): setType('adaptive', bool), + Optional('checkpoint'): { + 'storageClass': setType('storageClass', str), + 'storageSize': setType('storageSize', str) + }, + Optional('cpuNum'): setNumberRange('cpuNum', int, 0, 99999), + Optional('memorySize'): setType('memorySize', str) + } +} + +kubeflow_trial_schema = { + 'trial': { + 'codeDir': setPathCheck('codeDir'), + Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'), + Optional('ps'): { + 'replicas': setType('replicas', int), + 'command': setType('command', str), + 'gpuNum': setNumberRange('gpuNum', int, 0, 99999), + 'cpuNum': setNumberRange('cpuNum', int, 0, 99999), + 'memoryMB': setType('memoryMB', int), + 'image': setType('image', str), + Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath') + }, + Optional('master'): { + 'replicas': setType('replicas', int), + 'command': setType('command', str), + 'gpuNum': setNumberRange('gpuNum', int, 0, 99999), + 'cpuNum': setNumberRange('cpuNum', int, 0, 99999), + 'memoryMB': setType('memoryMB', int), + 'image': setType('image', str), + Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath') + }, + Optional('worker'): { + 'replicas': setType('replicas', int), + 'command': setType('command', str), + 'gpuNum': setNumberRange('gpuNum', int, 0, 99999), + 'cpuNum': setNumberRange('cpuNum', int, 0, 99999), + 'memoryMB': setType('memoryMB', int), + 'image': setType('image', str), + Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath') + } + } +} + +kubeflow_config_schema = { + 'kubeflowConfig': Or({ + 'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'), + 'apiVersion': setType('apiVersion', str), + Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'), + 'nfs': { + 'server': setType('server', str), + 'path': setType('path', str) + }, + Optional('reuse'): setType('reuse', bool), + }, { + 'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'), + 'apiVersion': setType('apiVersion', str), + Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'), + 'keyVault': { + 'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'), + error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'), + 'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'), + error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)') + }, + 'azureStorage': { + 'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'), + error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'), + 'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'), + error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)') + }, + Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999), + Optional('reuse'): setType('reuse', bool), + }) +} + +frameworkcontroller_trial_schema = { + 'trial': { + 'codeDir': setPathCheck('codeDir'), + Optional('taskRoles'): [{ + 'name': setType('name', str), + 'taskNum': setType('taskNum', int), + 'frameworkAttemptCompletionPolicy': { + 'minFailedTaskCount': setType('minFailedTaskCount', int), + 'minSucceededTaskCount': setType('minSucceededTaskCount', int), + }, + 'command': setType('command', str), + 'gpuNum': setNumberRange('gpuNum', int, 0, 99999), + 'cpuNum': setNumberRange('cpuNum', int, 0, 99999), + 'memoryMB': setType('memoryMB', int), + 'image': setType('image', str), + Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath') + }] + } +} + +frameworkcontroller_config_schema = { + 'frameworkcontrollerConfig': Or({ + Optional('storage'): setChoice('storage', 'nfs', 'azureStorage', 'pvc'), + Optional('serviceAccountName'): setType('serviceAccountName', str), + 'nfs': { + 'server': setType('server', str), + 'path': setType('path', str) + }, + Optional('namespace'): setType('namespace', str), + Optional('configPath'): setType('configPath', str), + Optional('reuse'): setType('reuse', bool), + }, { + Optional('storage'): setChoice('storage', 'nfs', 'azureStorage', 'pvc'), + Optional('serviceAccountName'): setType('serviceAccountName', str), + 'configPath': setType('configPath', str), + 'pvc': {'path': setType('server', str)}, + Optional('namespace'): setType('namespace', str), + Optional('reuse'): setType('reuse', bool), + }, { + Optional('storage'): setChoice('storage', 'nfs', 'azureStorage', 'pvc'), + Optional('serviceAccountName'): setType('serviceAccountName', str), + 'keyVault': { + 'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'), + error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'), + 'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'), + error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)') + }, + 'azureStorage': { + 'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'), + error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'), + 'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'), + error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)') + }, + Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999), + Optional('namespace'): setType('namespace', str), + Optional('configPath'): setType('configPath', str), + Optional('reuse'): setType('reuse', bool), + }) +} + +remote_config_schema = { + Optional('remoteConfig'): { + 'reuse': setType('reuse', bool) + } +} + +machine_list_schema = { + Optional('machineList'): [Or( + { + 'ip': setType('ip', str), + Optional('port'): setNumberRange('port', int, 1, 65535), + 'username': setType('username', str), + 'sshKeyPath': setPathCheck('sshKeyPath'), + Optional('passphrase'): setType('passphrase', str), + Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'), + Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int), + Optional('useActiveGpu'): setType('useActiveGpu', bool), + Optional('pythonPath'): setType('pythonPath', str) + }, + { + 'ip': setType('ip', str), + Optional('port'): setNumberRange('port', int, 1, 65535), + 'username': setType('username', str), + 'passwd': setType('passwd', str), + Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'), + Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int), + Optional('useActiveGpu'): setType('useActiveGpu', bool), + Optional('pythonPath'): setType('pythonPath', str) + })] +} + +training_service_schema_dict = { + 'adl': Schema({**common_schema, **adl_trial_schema}), + 'local': Schema({**common_schema, **common_trial_schema}), + 'remote': Schema({**common_schema, **common_trial_schema, **machine_list_schema, **remote_config_schema}), + 'pai': Schema({**common_schema, **pai_trial_schema, **pai_config_schema}), + 'kubeflow': Schema({**common_schema, **kubeflow_trial_schema, **kubeflow_config_schema}), + 'frameworkcontroller': Schema({**common_schema, **frameworkcontroller_trial_schema, **frameworkcontroller_config_schema}), + 'aml': Schema({**common_schema, **aml_trial_schema, **aml_config_schema}), + 'dlts': Schema({**common_schema, **dlts_trial_schema, **dlts_config_schema}), + 'hybrid': Schema({**common_schema, **hybrid_trial_schema, **hybrid_config_schema, **machine_list_schema, + **pai_config_schema, **aml_config_schema, **remote_config_schema}), +} + + +class NNIConfigSchema: + def validate(self, data): + train_service = data['trainingServicePlatform'] + Schema(common_schema['trainingServicePlatform']).validate(train_service) + train_service_schema = training_service_schema_dict[train_service] + train_service_schema.validate(data) + self.validate_extras(data) + + def validate_extras(self, experiment_config): + self.validate_tuner_adivosr_assessor(experiment_config) + self.validate_pai_trial_conifg(experiment_config) + self.validate_kubeflow_operators(experiment_config) + self.validate_hybrid_platforms(experiment_config) + self.validate_frameworkcontroller_trial_config(experiment_config) + + def validate_tuner_adivosr_assessor(self, experiment_config): + if experiment_config.get('advisor'): + if experiment_config.get('assessor') or experiment_config.get('tuner'): + raise SchemaError('advisor could not be set with assessor or tuner simultaneously!') + self.validate_annotation_content(experiment_config, 'advisor', 'builtinAdvisorName') + else: + if not experiment_config.get('tuner'): + raise SchemaError('Please provide tuner spec!') + self.validate_annotation_content(experiment_config, 'tuner', 'builtinTunerName') + + def validate_search_space_content(self, experiment_config): + '''Validate searchspace content, + if the searchspace file is not json format or its values does not contain _type and _value which must be specified, + it will not be a valid searchspace file''' + try: + search_space_content = json.load(open(experiment_config.get('searchSpacePath'), 'r')) + for value in search_space_content.values(): + if not value.get('_type') or not value.get('_value'): + raise SchemaError('please use _type and _value to specify searchspace!') + except Exception as e: + raise SchemaError('searchspace file is not a valid json format! ' + str(e)) + + def validate_kubeflow_operators(self, experiment_config): + '''Validate whether the kubeflow operators are valid''' + if experiment_config.get('kubeflowConfig'): + if experiment_config.get('kubeflowConfig').get('operator') == 'tf-operator': + if experiment_config.get('trial').get('master') is not None: + raise SchemaError('kubeflow with tf-operator can not set master') + if experiment_config.get('trial').get('worker') is None: + raise SchemaError('kubeflow with tf-operator must set worker') + elif experiment_config.get('kubeflowConfig').get('operator') == 'pytorch-operator': + if experiment_config.get('trial').get('ps') is not None: + raise SchemaError('kubeflow with pytorch-operator can not set ps') + if experiment_config.get('trial').get('master') is None: + raise SchemaError('kubeflow with pytorch-operator must set master') + + if experiment_config.get('kubeflowConfig').get('storage') == 'nfs': + if experiment_config.get('kubeflowConfig').get('nfs') is None: + raise SchemaError('please set nfs configuration!') + elif experiment_config.get('kubeflowConfig').get('storage') == 'azureStorage': + if experiment_config.get('kubeflowConfig').get('azureStorage') is None: + raise SchemaError('please set azureStorage configuration!') + elif experiment_config.get('kubeflowConfig').get('storage') is None: + if experiment_config.get('kubeflowConfig').get('azureStorage'): + raise SchemaError('please set storage type!') + + def validate_annotation_content(self, experiment_config, spec_key, builtin_name): + ''' + Valid whether useAnnotation and searchSpacePath is coexist + spec_key: 'advisor' or 'tuner' + builtin_name: 'builtinAdvisorName' or 'builtinTunerName' + ''' + if experiment_config.get('useAnnotation'): + if experiment_config.get('searchSpacePath'): + raise SchemaError('If you set useAnnotation=true, please leave searchSpacePath empty') + else: + # validate searchSpaceFile + if experiment_config[spec_key].get(builtin_name) == 'NetworkMorphism': + return + if experiment_config[spec_key].get(builtin_name): + if experiment_config.get('searchSpacePath') is None: + raise SchemaError('Please set searchSpacePath!') + self.validate_search_space_content(experiment_config) + + def validate_pai_config_path(self, experiment_config): + '''validate paiConfigPath field''' + if experiment_config.get('trainingServicePlatform') == 'pai': + if experiment_config.get('trial', {}).get('paiConfigPath'): + # validate commands + pai_config = get_yml_content(experiment_config['trial']['paiConfigPath']) + taskRoles_dict = pai_config.get('taskRoles') + if not taskRoles_dict: + raise SchemaError('Please set taskRoles in paiConfigPath config file!') + else: + pai_trial_fields_required_list = ['image', 'paiStorageConfigName', 'command'] + for trial_field in pai_trial_fields_required_list: + if experiment_config['trial'].get(trial_field) is None: + raise SchemaError('Please set {0} in trial configuration,\ + or set additional pai configuration file path in paiConfigPath!'.format(trial_field)) + pai_resource_fields_required_list = ['gpuNum', 'cpuNum', 'memoryMB'] + for required_field in pai_resource_fields_required_list: + if experiment_config['trial'].get(required_field) is None and \ + experiment_config['paiConfig'].get(required_field) is None: + raise SchemaError('Please set {0} in trial or paiConfig configuration,\ + or set additional pai configuration file path in paiConfigPath!'.format(required_field)) + + def validate_pai_trial_conifg(self, experiment_config): + '''validate the trial config in pai platform''' + if experiment_config.get('trainingServicePlatform') in ['pai']: + if experiment_config.get('trial').get('shmMB') and \ + experiment_config['trial']['shmMB'] > experiment_config['trial']['memoryMB']: + raise SchemaError('shmMB should be no more than memoryMB!') + # backward compatibility + warning_information = '{0} is not supported in NNI anymore, please remove the field in config file!\ + please refer https://github.com/microsoft/nni/blob/master/docs/en_US/TrainingService/PaiMode.md#run-an-experiment\ + for the practices of how to get data and output model in trial code' + if experiment_config.get('trial').get('dataDir'): + print_warning(warning_information.format('dataDir')) + if experiment_config.get('trial').get('outputDir'): + print_warning(warning_information.format('outputDir')) + self.validate_pai_config_path(experiment_config) + + def validate_hybrid_platforms(self, experiment_config): + required_config_name_map = { + 'remote': 'machineList', + 'aml': 'amlConfig', + 'pai': 'paiConfig' + } + if experiment_config.get('trainingServicePlatform') == 'hybrid': + for platform in experiment_config['hybridConfig']['trainingServicePlatforms']: + config_name = required_config_name_map.get(platform) + if config_name and not experiment_config.get(config_name): + raise SchemaError('Need to set {0} for {1} in hybrid mode!'.format(config_name, platform)) + + def validate_frameworkcontroller_trial_config(self, experiment_config): + if experiment_config.get('trainingServicePlatform') == 'frameworkcontroller': + if not experiment_config.get('trial').get('taskRoles'): + if not experiment_config.get('frameworkcontrollerConfig').get('configPath'): + raise SchemaError("""If no taskRoles are specified a valid custom frameworkcontroller config should + be set using the configPath attribute in frameworkcontrollerConfig!""") + config_content = get_yml_content(experiment_config.get('frameworkcontrollerConfig').get('configPath')) + if not config_content.get('spec').get('taskRoles') or not config_content.get('spec').get('taskRoles'): + raise SchemaError('Invalid frameworkcontroller config! No taskRoles were specified!') + if not config_content.get('spec').get('taskRoles')[0].get('task'): + raise SchemaError('Invalid frameworkcontroller config! No task was specified for taskRole!') + names = [] + for taskRole in config_content.get('spec').get('taskRoles'): + if not "name" in taskRole: + raise SchemaError('Invalid frameworkcontroller config! Name is missing for taskRole!') + names.append(taskRole.get("name")) + if len(names) > len(set(names)): + raise SchemaError('Invalid frameworkcontroller config! Duplicate taskrole names!') + if not config_content.get('metadata').get('name'): + raise SchemaError('Invalid frameworkcontroller config! No experiment name was specified!') + diff --git a/nni/tools/nnictl/config_utils.py b/nni/tools/nnictl/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6abdc8a49cd0059c9cf4fa2e7095e6dc38a8d138 --- /dev/null +++ b/nni/tools/nnictl/config_utils.py @@ -0,0 +1,175 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sqlite3 +import nni +from .constants import NNI_HOME_DIR +from .common_utils import get_file_lock + +def config_v0_to_v1(config: dict) -> dict: + if 'clusterMetaData' not in config: + return config + elif 'trainingServicePlatform' in config: + import copy + experiment_config = copy.deepcopy(config) + if experiment_config['trainingServicePlatform'] == 'hybrid': + inverse_config = {'hybridConfig': experiment_config['clusterMetaData']['hybrid_config']} + platform_list = inverse_config['hybridConfig']['trainingServicePlatforms'] + for platform in platform_list: + inverse_config.update(_inverse_cluster_metadata(platform, experiment_config['clusterMetaData'])) + experiment_config.update(inverse_config) + else: + inverse_config = _inverse_cluster_metadata(experiment_config['trainingServicePlatform'], experiment_config['clusterMetaData']) + experiment_config.update(inverse_config) + experiment_config.pop('clusterMetaData') + return experiment_config + else: + raise RuntimeError('experiment config key `trainingServicePlatform` not found') + +def _inverse_cluster_metadata(platform: str, metadata_config: list) -> dict: + inverse_config = {} + if platform == 'local': + inverse_config['trial'] = {} + for kv in metadata_config: + if kv['key'] == 'local_config': + inverse_config['localConfig'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + elif platform == 'remote': + for kv in metadata_config: + if kv['key'] == 'machine_list': + inverse_config['machineList'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + elif kv['key'] == 'remote_config': + inverse_config['remoteConfig'] = kv['value'] + elif platform == 'pai': + for kv in metadata_config: + if kv['key'] == 'pai_config': + inverse_config['paiConfig'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + elif platform == 'kubeflow': + for kv in metadata_config: + if kv['key'] == 'kubeflow_config': + inverse_config['kubeflowConfig'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + elif platform == 'frameworkcontroller': + for kv in metadata_config: + if kv['key'] == 'frameworkcontroller_config': + inverse_config['frameworkcontrollerConfig'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + elif platform == 'aml': + for kv in metadata_config: + if kv['key'] == 'aml_config': + inverse_config['amlConfig'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + elif platform == 'dlc': + for kv in metadata_config: + if kv['key'] == 'dlc_config': + inverse_config['dlcConfig'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + elif platform == 'adl': + for kv in metadata_config: + if kv['key'] == 'adl_config': + inverse_config['adlConfig'] = kv['value'] + elif kv['key'] == 'trial_config': + inverse_config['trial'] = kv['value'] + else: + raise RuntimeError('training service platform {} not found'.format(platform)) + return inverse_config + +class Config: + '''a util class to load and save config''' + def __init__(self, experiment_id: str, log_dir: str): + self.experiment_id = experiment_id + self.conn = sqlite3.connect(os.path.join(log_dir, experiment_id, 'db', 'nni.sqlite')) + self.refresh_config() + + def refresh_config(self): + '''refresh to get latest config''' + sql = 'select params from ExperimentProfile where id=? order by revision DESC' + args = (self.experiment_id,) + self.config = config_v0_to_v1(nni.load(self.conn.cursor().execute(sql, args).fetchone()[0])) + + def get_config(self): + '''get a value according to key''' + return self.config + +class Experiments: + '''Maintain experiment list''' + def __init__(self, home_dir=NNI_HOME_DIR): + os.makedirs(home_dir, exist_ok=True) + self.experiment_file = os.path.join(home_dir, '.experiment') + self.lock = get_file_lock(self.experiment_file, stale=2) + with self.lock: + self.experiments = self.read_file() + + def add_experiment(self, expId, port, startTime, platform, experiment_name, endTime='N/A', status='INITIALIZED', + tag=[], pid=None, webuiUrl=[], logDir='', prefixUrl=None): + '''set {key:value} pairs to self.experiment''' + with self.lock: + self.experiments = self.read_file() + self.experiments[expId] = {} + self.experiments[expId]['id'] = expId + self.experiments[expId]['port'] = port + self.experiments[expId]['startTime'] = startTime + self.experiments[expId]['endTime'] = endTime + self.experiments[expId]['status'] = status + self.experiments[expId]['platform'] = platform + self.experiments[expId]['experimentName'] = experiment_name + self.experiments[expId]['tag'] = tag + self.experiments[expId]['pid'] = pid + self.experiments[expId]['webuiUrl'] = webuiUrl + self.experiments[expId]['logDir'] = str(logDir) + self.experiments[expId]['prefixUrl'] = prefixUrl + self.write_file() + + def update_experiment(self, expId, key, value): + '''Update experiment''' + with self.lock: + self.experiments = self.read_file() + if expId not in self.experiments: + return False + if value is None: + self.experiments[expId].pop(key, None) + else: + self.experiments[expId][key] = value + self.write_file() + return True + + def remove_experiment(self, expId): + '''remove an experiment by id''' + with self.lock: + self.experiments = self.read_file() + if expId in self.experiments: + self.experiments.pop(expId) + self.write_file() + + def get_all_experiments(self): + '''return all of experiments''' + return self.experiments + + def write_file(self): + '''save config to local file''' + try: + with open(self.experiment_file, 'w') as file: + nni.dump(self.experiments, file, indent=4) + except IOError as error: + print('Error:', error) + return '' + + def read_file(self): + '''load config from local file''' + if os.path.exists(self.experiment_file): + try: + with open(self.experiment_file, 'r') as file: + return nni.load(fp=file) + except ValueError: + return {} + return {} diff --git a/nni/tools/nnictl/constants.py b/nni/tools/nnictl/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..120674bc5f3481b6474f85107aa99b81044bdd4f --- /dev/null +++ b/nni/tools/nnictl/constants.py @@ -0,0 +1,79 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +from colorama import Fore + +NNI_HOME_DIR = os.path.join(os.path.expanduser('~'), 'nni-experiments') + +ERROR_INFO = 'ERROR: ' +NORMAL_INFO = 'INFO: ' +WARNING_INFO = 'WARNING: ' + +DEFAULT_REST_PORT = 8080 +REST_TIME_OUT = 20 + +EXPERIMENT_SUCCESS_INFO = Fore.GREEN + 'Successfully started experiment!\n' + Fore.RESET + \ + '------------------------------------------------------------------------------------\n' \ + 'The experiment id is %s\n'\ + 'The Web UI urls are: %s\n' \ + '------------------------------------------------------------------------------------\n\n' \ + 'You can use these commands to get more information about the experiment\n' \ + '------------------------------------------------------------------------------------\n' \ + ' commands description\n' \ + '1. nnictl experiment show show the information of experiments\n' \ + '2. nnictl trial ls list all of trial jobs\n' \ + '3. nnictl top monitor the status of running experiments\n' \ + '4. nnictl log stderr show stderr log content\n' \ + '5. nnictl log stdout show stdout log content\n' \ + '6. nnictl stop stop an experiment\n' \ + '7. nnictl trial kill kill a trial job by id\n' \ + '8. nnictl --help get help information about nnictl\n' \ + '------------------------------------------------------------------------------------\n' \ + 'Command reference document https://nni.readthedocs.io/en/latest/Tutorial/Nnictl.html\n' \ + '------------------------------------------------------------------------------------\n' + +LOG_HEADER = '-----------------------------------------------------------------------\n' \ + ' Experiment start time %s\n' \ + '-----------------------------------------------------------------------\n' + +EXPERIMENT_START_FAILED_INFO = 'There is an experiment running in the port %d, please stop it first or set another port!\n' \ + 'You could use \'nnictl stop --port [PORT]\' command to stop an experiment!\nOr you could ' \ + 'use \'nnictl create --config [CONFIG_PATH] --port [PORT]\' to set port!\n' + +EXPERIMENT_INFORMATION_FORMAT = '----------------------------------------------------------------------------------------\n' \ + ' Experiment information\n' \ + '%s\n' \ + '----------------------------------------------------------------------------------------\n' + +EXPERIMENT_DETAIL_FORMAT = 'Id: %s Name: %s Status: %s Port: %s Platform: %s StartTime: %s EndTime: %s\n' + +EXPERIMENT_MONITOR_INFO = 'Id: %s Status: %s Port: %s Platform: %s \n' \ + 'StartTime: %s Duration: %s' + +TRIAL_MONITOR_HEAD = '-------------------------------------------------------------------------------------\n' + \ + '%-15s %-25s %-25s %-15s \n' % ('trialId', 'startTime', 'endTime', 'status') + \ + '-------------------------------------------------------------------------------------' + +TRIAL_MONITOR_CONTENT = '%-15s %-25s %-25s %-15s' + +TRIAL_MONITOR_TAIL = '-------------------------------------------------------------------------------------\n\n\n' + +TUNERS_SUPPORTING_IMPORT_DATA = { + 'TPE', + 'Anneal', + 'GridSearch', + 'MetisTuner', + 'BOHB', + 'SMAC', + 'BatchTuner' +} + +TUNERS_NO_NEED_TO_IMPORT_DATA = { + 'Random', + 'Hyperband' +} + +SCHEMA_TYPE_ERROR = '%s should be %s type!' +SCHEMA_RANGE_ERROR = '%s should be in range of %s!' +SCHEMA_PATH_ERROR = '%s path not exist!' diff --git a/nni/tools/nnictl/launcher.py b/nni/tools/nnictl/launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..dde00c3e3fd27bd5c002e4adeea931f3d0b2a808 --- /dev/null +++ b/nni/tools/nnictl/launcher.py @@ -0,0 +1,130 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from getpass import getuser +import logging +from pathlib import Path +import tempfile + +from colorama import Fore +import yaml + +from nni.experiment import Experiment, RunMode +from nni.experiment.config import ExperimentConfig, convert, utils +from nni.runtime.log import init_logger_for_command_line +from nni.tools.annotation import expand_annotations, generate_search_space + +# used for v1-only legacy setup, remove them later +from nni.experiment.launcher import get_stopped_experiment_config_json +from . import legacy_launcher + +_logger = logging.getLogger(__name__) + +def create_experiment(args): + # to make it clear what are inside args + config_file = Path(args.config) + port = args.port + debug = args.debug + url_prefix = args.url_prefix + foreground = args.foreground + + # it should finally be done in nnictl main function + # but for now don't break routines without logging support + init_logger_for_command_line() + logging.getLogger('nni').setLevel(logging.INFO) + + if not config_file.is_file(): + _logger.error(f'"{config_file}" is not a valid file.') + exit(1) + + with config_file.open() as config: + config_content = yaml.safe_load(config) + + v1_platform = config_content.get('trainingServicePlatform') + if v1_platform: + can_convert = True + if v1_platform == 'adl': + can_convert = False + if v1_platform in ['kubeflow', 'frameworkcontroller']: + reuse = config_content.get(v1_platform + 'Config', {}).get('reuse') + can_convert = (reuse != False) # if user does not explicitly specify it, convert to reuse mode + + if not can_convert: + legacy_launcher.create_experiment(args) + exit() + + try: + v2_config = convert.to_v2(config_content) + except Exception: + _logger.error( + 'You are using legacy config format with incorrect fields or values, ' + 'to get more accurate error message please update it to the new format.' + ) + _logger.error('Reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html') + exit(1) + _logger.warning(f'You are using legacy config file, please update it to latest format:') + # use `print` here because logging will add timestamp and make it hard to copy paste + print(Fore.YELLOW + '=' * 80 + Fore.RESET) + print(yaml.dump(v2_config, sort_keys=False).strip()) + print(Fore.YELLOW + '=' * 80 + Fore.RESET) + print(Fore.YELLOW + 'Reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html' + Fore.RESET) + + utils.set_base_path(config_file.parent) + config = ExperimentConfig(**v2_config) + utils.unset_base_path() + + else: + config = ExperimentConfig.load(config_file) + + if config.use_annotation: + path = Path(tempfile.gettempdir(), getuser(), 'nni', 'annotation') + path.mkdir(parents=True, exist_ok=True) + path = tempfile.mkdtemp(dir=path) + code_dir = expand_annotations(config.trial_code_directory, path) + config.trial_code_directory = code_dir + config.search_space = generate_search_space(code_dir) + assert config.search_space, 'ERROR: Generated search space is empty' + config.use_annotation = False + + exp = Experiment(config) + exp.url_prefix = url_prefix + run_mode = RunMode.Foreground if foreground else RunMode.Detach + exp.start(port, debug, run_mode) + + _logger.info(f'To stop experiment run "nnictl stop {exp.id}" or "nnictl stop --all"') + _logger.info('Reference: https://nni.readthedocs.io/en/stable/Tutorial/Nnictl.html') + +def resume_experiment(args): + exp_id = args.id + port = args.port + debug = args.debug + foreground = args.foreground + exp_dir = args.experiment_dir + + init_logger_for_command_line() + logging.getLogger('nni').setLevel(logging.INFO) + + config_json = get_stopped_experiment_config_json(exp_id, exp_dir) + if config_json.get('trainingServicePlatform'): + legacy_launcher.resume_experiment(args) + exit() + + exp = Experiment._resume(exp_id, exp_dir) + run_mode = RunMode.Foreground if foreground else RunMode.Detach + exp.start(port, debug, run_mode) + +def view_experiment(args): + exp_id = args.id + port = args.port + exp_dir = args.experiment_dir + + init_logger_for_command_line() + logging.getLogger('nni').setLevel(logging.INFO) + + config_json = get_stopped_experiment_config_json(exp_id, exp_dir) + if config_json.get('trainingServicePlatform'): + legacy_launcher.view_experiment(args) + exit() + + exp = Experiment._view(exp_id, exp_dir) + exp.start(port, run_mode=RunMode.Detach) diff --git a/nni/tools/nnictl/launcher_utils.py b/nni/tools/nnictl/launcher_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef4f3c35fc9c4e8c3c215412d66a5c048691a2d6 --- /dev/null +++ b/nni/tools/nnictl/launcher_utils.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +from schema import SchemaError +from .config_schema import NNIConfigSchema +from .common_utils import print_normal + +def expand_path(experiment_config, key): + '''Change '~' to user home directory''' + if experiment_config.get(key): + experiment_config[key] = os.path.expanduser(experiment_config[key]) + +def parse_relative_path(root_path, experiment_config, key): + '''Change relative path to absolute path''' + if experiment_config.get(key) and not os.path.isabs(experiment_config.get(key)): + absolute_path = os.path.join(root_path, experiment_config.get(key)) + print_normal('expand %s: %s to %s ' % (key, experiment_config[key], absolute_path)) + experiment_config[key] = absolute_path + +def parse_time(time): + '''Change the time to seconds''' + unit = time[-1] + if unit not in ['s', 'm', 'h', 'd']: + raise SchemaError('the unit of time could only from {s, m, h, d}') + time = time[:-1] + if not time.isdigit(): + raise SchemaError('time format error!') + parse_dict = {'s':1, 'm':60, 'h':3600, 'd':86400} + return int(time) * parse_dict[unit] + +def parse_path(experiment_config, config_path): + '''Parse path in config file''' + expand_path(experiment_config, 'searchSpacePath') + if experiment_config.get('logDir'): + expand_path(experiment_config, 'logDir') + if experiment_config.get('trial'): + expand_path(experiment_config['trial'], 'codeDir') + if experiment_config['trial'].get('authFile'): + expand_path(experiment_config['trial'], 'authFile') + if experiment_config['trial'].get('ps'): + if experiment_config['trial']['ps'].get('privateRegistryAuthPath'): + expand_path(experiment_config['trial']['ps'], 'privateRegistryAuthPath') + if experiment_config['trial'].get('master'): + if experiment_config['trial']['master'].get('privateRegistryAuthPath'): + expand_path(experiment_config['trial']['master'], 'privateRegistryAuthPath') + if experiment_config['trial'].get('worker'): + if experiment_config['trial']['worker'].get('privateRegistryAuthPath'): + expand_path(experiment_config['trial']['worker'], 'privateRegistryAuthPath') + if experiment_config['trial'].get('taskRoles'): + for index in range(len(experiment_config['trial']['taskRoles'])): + if experiment_config['trial']['taskRoles'][index].get('privateRegistryAuthPath'): + expand_path(experiment_config['trial']['taskRoles'][index], 'privateRegistryAuthPath') + if experiment_config.get('tuner'): + expand_path(experiment_config['tuner'], 'codeDir') + if experiment_config.get('assessor'): + expand_path(experiment_config['assessor'], 'codeDir') + if experiment_config.get('advisor'): + expand_path(experiment_config['advisor'], 'codeDir') + if experiment_config['advisor'].get('classArgs') and experiment_config['advisor']['classArgs'].get('config_space'): + expand_path(experiment_config['advisor']['classArgs'], 'config_space') + if experiment_config.get('machineList'): + for index in range(len(experiment_config['machineList'])): + expand_path(experiment_config['machineList'][index], 'sshKeyPath') + if experiment_config['trial'].get('paiConfigPath'): + expand_path(experiment_config['trial'], 'paiConfigPath') + + # If users use relative path, convert it to absolute path. + root_path = os.path.dirname(config_path) + if experiment_config.get('searchSpacePath'): + parse_relative_path(root_path, experiment_config, 'searchSpacePath') + if experiment_config.get('logDir'): + parse_relative_path(root_path, experiment_config, 'logDir') + if experiment_config.get('trial'): + # In AdaptDL mode, 'codeDir' shouldn't be parsed because it points to the path in the container. + if experiment_config.get('trainingServicePlatform') != 'adl': + parse_relative_path(root_path, experiment_config['trial'], 'codeDir') + if experiment_config['trial'].get('authFile'): + parse_relative_path(root_path, experiment_config['trial'], 'authFile') + if experiment_config['trial'].get('ps'): + if experiment_config['trial']['ps'].get('privateRegistryAuthPath'): + parse_relative_path(root_path, experiment_config['trial']['ps'], 'privateRegistryAuthPath') + if experiment_config['trial'].get('master'): + if experiment_config['trial']['master'].get('privateRegistryAuthPath'): + parse_relative_path(root_path, experiment_config['trial']['master'], 'privateRegistryAuthPath') + if experiment_config['trial'].get('worker'): + if experiment_config['trial']['worker'].get('privateRegistryAuthPath'): + parse_relative_path(root_path, experiment_config['trial']['worker'], 'privateRegistryAuthPath') + if experiment_config['trial'].get('taskRoles'): + for index in range(len(experiment_config['trial']['taskRoles'])): + if experiment_config['trial']['taskRoles'][index].get('privateRegistryAuthPath'): + parse_relative_path(root_path, experiment_config['trial']['taskRoles'][index], 'privateRegistryAuthPath') + if experiment_config.get('tuner'): + parse_relative_path(root_path, experiment_config['tuner'], 'codeDir') + if experiment_config.get('assessor'): + parse_relative_path(root_path, experiment_config['assessor'], 'codeDir') + if experiment_config.get('advisor'): + parse_relative_path(root_path, experiment_config['advisor'], 'codeDir') + # for BOHB when delivering a ConfigSpace file directly + if experiment_config['advisor'].get('classArgs') and experiment_config['advisor']['classArgs'].get('config_space'): + parse_relative_path(root_path, experiment_config['advisor']['classArgs'], 'config_space') + + if experiment_config.get('machineList'): + for index in range(len(experiment_config['machineList'])): + parse_relative_path(root_path, experiment_config['machineList'][index], 'sshKeyPath') + if experiment_config['trial'].get('paiConfigPath'): + parse_relative_path(root_path, experiment_config['trial'], 'paiConfigPath') + + # For frameworkcontroller a custom configuration path may be specified + if experiment_config.get('frameworkcontrollerConfig'): + if experiment_config['frameworkcontrollerConfig'].get('configPath'): + parse_relative_path(root_path, experiment_config['frameworkcontrollerConfig'], 'configPath') + +def set_default_values(experiment_config): + if experiment_config.get('maxExecDuration') is None: + experiment_config['maxExecDuration'] = '999d' + if experiment_config.get('maxTrialNum') is None: + experiment_config['maxTrialNum'] = 99999 + if experiment_config.get('maxTrialDuration') is None: + experiment_config['maxTrialDuration'] = '999d' + if experiment_config['trainingServicePlatform'] == 'remote' or \ + experiment_config['trainingServicePlatform'] == 'hybrid' and \ + 'remote' in experiment_config['hybridConfig']['trainingServicePlatforms']: + for index in range(len(experiment_config['machineList'])): + if experiment_config['machineList'][index].get('port') is None: + experiment_config['machineList'][index]['port'] = 22 + +def validate_all_content(experiment_config, config_path): + '''Validate whether experiment_config is valid''' + parse_path(experiment_config, config_path) + set_default_values(experiment_config) + + NNIConfigSchema().validate(experiment_config) + + if 'maxExecDuration' in experiment_config: + experiment_config['maxExecDuration'] = parse_time(experiment_config['maxExecDuration']) diff --git a/nni/tools/nnictl/legacy_launcher.py b/nni/tools/nnictl/legacy_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..73342884af4bbc6f7b2c793593397aa431e4952c --- /dev/null +++ b/nni/tools/nnictl/legacy_launcher.py @@ -0,0 +1,619 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import os +from pathlib import Path +import sys +import string +import random +import time +import tempfile +import re +from subprocess import Popen, check_call, CalledProcessError, PIPE, STDOUT +from nni.experiment.config import ExperimentConfig, convert +from nni.tools.annotation import expand_annotations, generate_search_space +from nni.tools.package_utils.tuner_factory import get_builtin_module_class_name +import nni_node # pylint: disable=import-error, wrong-import-order +from .launcher_utils import validate_all_content +from .rest_utils import rest_put, rest_post, check_rest_server, check_response +from .url_utils import cluster_metadata_url, experiment_url, get_local_urls, set_prefix_url +from .config_utils import Config, Experiments +from .common_utils import get_yml_content, get_json_content, print_error, print_normal, detect_port, get_user + +from .constants import NNI_HOME_DIR, ERROR_INFO, REST_TIME_OUT, EXPERIMENT_SUCCESS_INFO, LOG_HEADER +from .command_utils import check_output_command, kill_command +from .nnictl_utils import update_experiment + +k8s_training_services = ['kubeflow', 'frameworkcontroller', 'adl'] + +def get_log_path(experiment_id): + '''generate stdout and stderr log path''' + os.makedirs(os.path.join(NNI_HOME_DIR, experiment_id, 'log'), exist_ok=True) + stdout_full_path = os.path.join(NNI_HOME_DIR, experiment_id, 'log', 'nnictl_stdout.log') + stderr_full_path = os.path.join(NNI_HOME_DIR, experiment_id, 'log', 'nnictl_stderr.log') + return stdout_full_path, stderr_full_path + +def print_log_content(config_file_name): + '''print log information''' + stdout_full_path, stderr_full_path = get_log_path(config_file_name) + print_normal(' Stdout:') + print(check_output_command(stdout_full_path)) + print('\n\n') + print_normal(' Stderr:') + print(check_output_command(stderr_full_path)) + +def start_rest_server(port, platform, mode, experiment_id, foreground=False, log_dir=None, log_level=None, url_prefix=None): + '''Run nni manager process''' + if detect_port(port): + print_error('Port %s is used by another process, please reset the port!\n' \ + 'You could use \'nnictl create --help\' to get help information' % port) + exit(1) + + if (platform not in ['local', 'aml']) and detect_port(int(port) + 1): + print_error('%s mode need an additional adjacent port %d, and the port %d is used by another process!\n' \ + 'You could set another port to start experiment!\n' \ + 'You could use \'nnictl create --help\' to get help information' % (platform, (int(port) + 1), (int(port) + 1))) + exit(1) + + print_normal('Starting restful server...') + + entry_dir = nni_node.__path__[0] + if (not entry_dir) or (not os.path.exists(entry_dir)): + print_error('Fail to find nni under python library') + exit(1) + entry_file = os.path.join(entry_dir, 'main.js') + + if sys.platform == 'win32': + node_command = os.path.join(entry_dir, 'node.exe') + else: + node_command = os.path.join(entry_dir, 'node') + cmds = [node_command, '--max-old-space-size=4096', entry_file, '--port', str(port), '--mode', platform, \ + '--experiment_id', experiment_id] + if mode == 'view': + cmds += ['--start_mode', 'resume'] + cmds += ['--readonly', 'true'] + else: + cmds += ['--start_mode', mode] + if log_dir is not None: + cmds += ['--log_dir', log_dir] + if log_level is not None: + cmds += ['--log_level', log_level] + if foreground: + cmds += ['--foreground', 'true'] + if url_prefix: + _validate_prefix_path(url_prefix) + set_prefix_url(url_prefix) + cmds += ['--url_prefix', url_prefix] + + stdout_full_path, stderr_full_path = get_log_path(experiment_id) + with open(stdout_full_path, 'a+') as stdout_file, open(stderr_full_path, 'a+') as stderr_file: + start_time = time.time() + time_now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time)) + #add time information in the header of log files + log_header = LOG_HEADER % str(time_now) + stdout_file.write(log_header) + stderr_file.write(log_header) + if sys.platform == 'win32': + from subprocess import CREATE_NEW_PROCESS_GROUP + if foreground: + process = Popen(cmds, cwd=entry_dir, stdout=PIPE, stderr=STDOUT, creationflags=CREATE_NEW_PROCESS_GROUP) + else: + process = Popen(cmds, cwd=entry_dir, stdout=stdout_file, stderr=stderr_file, creationflags=CREATE_NEW_PROCESS_GROUP) + else: + if foreground: + process = Popen(cmds, cwd=entry_dir, stdout=PIPE, stderr=PIPE) + else: + process = Popen(cmds, cwd=entry_dir, stdout=stdout_file, stderr=stderr_file) + return process, int(start_time * 1000) + +def set_trial_config(experiment_config, port, config_file_name): + '''set trial configuration''' + request_data = dict() + request_data['trial_config'] = experiment_config['trial'] + response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT) + if check_response(response): + return True + else: + print('Error message is {}'.format(response.text)) + _, stderr_full_path = get_log_path(config_file_name) + if response: + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) + return False + +def set_adl_config(experiment_config, port, config_file_name): + '''set adl configuration''' + adl_config_data = dict() + # hack for supporting v2 config, need refactor + adl_config_data['adl_config'] = {} + response = rest_put(cluster_metadata_url(port), json.dumps(adl_config_data), REST_TIME_OUT) + err_message = None + if not response or not response.status_code == 200: + if response is not None: + err_message = response.text + _, stderr_full_path = get_log_path(config_file_name) + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) + return False, err_message + set_V1_common_config(experiment_config, port, config_file_name) + result, message = setNNIManagerIp(experiment_config, port, config_file_name) + if not result: + return result, message + #set trial_config + return set_trial_config(experiment_config, port, config_file_name), None + +def validate_response(response, config_file_name): + err_message = None + if not response or not response.status_code == 200: + if response is not None: + err_message = response.text + _, stderr_full_path = get_log_path(config_file_name) + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) + print_error('Error:' + err_message) + exit(1) + +# hack to fix v1 version_check and log_collection bug, need refactor +def set_V1_common_config(experiment_config, port, config_file_name): + version_check = True + #debug mode should disable version check + if experiment_config.get('debug') is not None: + version_check = not experiment_config.get('debug') + #validate version check + if experiment_config.get('versionCheck') is not None: + version_check = experiment_config.get('versionCheck') + response = rest_put(cluster_metadata_url(port), json.dumps({'version_check': version_check}), REST_TIME_OUT) + validate_response(response, config_file_name) + if experiment_config.get('logCollection'): + data = json.dumps({'log_collection': experiment_config.get('logCollection')}) + response = rest_put(cluster_metadata_url(port), data, REST_TIME_OUT) + validate_response(response, config_file_name) + +def setNNIManagerIp(experiment_config, port, config_file_name): + '''set nniManagerIp''' + if experiment_config.get('nniManagerIp') is None: + return True, None + ip_config_dict = dict() + ip_config_dict['nni_manager_ip'] = {'nniManagerIp': experiment_config['nniManagerIp']} + response = rest_put(cluster_metadata_url(port), json.dumps(ip_config_dict), REST_TIME_OUT) + err_message = None + if not response or not response.status_code == 200: + if response is not None: + err_message = response.text + _, stderr_full_path = get_log_path(config_file_name) + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) + return False, err_message + return True, None + +def set_kubeflow_config(experiment_config, port, config_file_name): + '''set kubeflow configuration''' + kubeflow_config_data = dict() + kubeflow_config_data['kubeflow_config'] = experiment_config['kubeflowConfig'] + response = rest_put(cluster_metadata_url(port), json.dumps(kubeflow_config_data), REST_TIME_OUT) + err_message = None + if not response or not response.status_code == 200: + if response is not None: + err_message = response.text + _, stderr_full_path = get_log_path(config_file_name) + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) + return False, err_message + set_V1_common_config(experiment_config, port, config_file_name) + result, message = setNNIManagerIp(experiment_config, port, config_file_name) + if not result: + return result, message + #set trial_config + return set_trial_config(experiment_config, port, config_file_name), err_message + +def set_frameworkcontroller_config(experiment_config, port, config_file_name): + '''set kubeflow configuration''' + frameworkcontroller_config_data = dict() + frameworkcontroller_config_data['frameworkcontroller_config'] = experiment_config['frameworkcontrollerConfig'] + response = rest_put(cluster_metadata_url(port), json.dumps(frameworkcontroller_config_data), REST_TIME_OUT) + err_message = None + if not response or not response.status_code == 200: + if response is not None: + err_message = response.text + _, stderr_full_path = get_log_path(config_file_name) + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) + return False, err_message + set_V1_common_config(experiment_config, port, config_file_name) + result, message = setNNIManagerIp(experiment_config, port, config_file_name) + if not result: + return result, message + #set trial_config + return set_trial_config(experiment_config, port, config_file_name), err_message + +def set_shared_storage(experiment_config, port, config_file_name): + if 'sharedStorage' in experiment_config: + data = json.dumps({'shared_storage_config': experiment_config['sharedStorage']}) + response = rest_put(cluster_metadata_url(port), data, REST_TIME_OUT) + err_message = None + if not response or not response.status_code == 200: + if response is not None: + err_message = response.text + _, stderr_full_path = get_log_path(config_file_name) + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) + return False, err_message + return True, None + +def set_experiment_v1(experiment_config, mode, port, config_file_name): + '''Call startExperiment (rest POST /experiment) with yaml file content''' + request_data = dict() + request_data['authorName'] = experiment_config['authorName'] + request_data['experimentName'] = experiment_config['experimentName'] + request_data['trialConcurrency'] = experiment_config['trialConcurrency'] + request_data['maxExecDuration'] = experiment_config['maxExecDuration'] + request_data['maxExperimentDuration'] = str(experiment_config['maxExecDuration']) + 's' + request_data['maxTrialNum'] = experiment_config['maxTrialNum'] + request_data['maxTrialDuration'] = experiment_config['maxTrialDuration'] + request_data['maxTrialNumber'] = experiment_config['maxTrialNum'] + request_data['searchSpace'] = experiment_config.get('searchSpace') + request_data['trainingServicePlatform'] = experiment_config.get('trainingServicePlatform') + # hack for hotfix, fix config.trainingService undefined error, need refactor + request_data['trainingService'] = {'platform': experiment_config.get('trainingServicePlatform')} + if experiment_config.get('description'): + request_data['description'] = experiment_config['description'] + if experiment_config.get('multiPhase'): + request_data['multiPhase'] = experiment_config.get('multiPhase') + if experiment_config.get('multiThread'): + request_data['multiThread'] = experiment_config.get('multiThread') + if experiment_config.get('nniManagerIp'): + request_data['nniManagerIp'] = experiment_config.get('nniManagerIp') + if experiment_config.get('advisor'): + request_data['advisor'] = experiment_config['advisor'] + if request_data['advisor'].get('gpuNum'): + print_error('gpuNum is deprecated, please use gpuIndices instead.') + if request_data['advisor'].get('gpuIndices') and isinstance(request_data['advisor'].get('gpuIndices'), int): + request_data['advisor']['gpuIndices'] = str(request_data['advisor'].get('gpuIndices')) + else: + request_data['tuner'] = experiment_config['tuner'] + if request_data['tuner'].get('gpuNum'): + print_error('gpuNum is deprecated, please use gpuIndices instead.') + if request_data['tuner'].get('gpuIndices') and isinstance(request_data['tuner'].get('gpuIndices'), int): + request_data['tuner']['gpuIndices'] = str(request_data['tuner'].get('gpuIndices')) + if 'assessor' in experiment_config: + request_data['assessor'] = experiment_config['assessor'] + if request_data['assessor'].get('gpuNum'): + print_error('gpuNum is deprecated, please remove it from your config file.') + #debug mode should disable version check + if experiment_config.get('debug') is not None: + request_data['versionCheck'] = not experiment_config.get('debug') + #validate version check + if experiment_config.get('versionCheck') is not None: + request_data['versionCheck'] = experiment_config.get('versionCheck') + if experiment_config.get('logCollection'): + request_data['logCollection'] = experiment_config.get('logCollection') + request_data['clusterMetaData'] = [] + if experiment_config['trainingServicePlatform'] == 'kubeflow': + request_data['clusterMetaData'].append( + {'key': 'kubeflow_config', 'value': experiment_config['kubeflowConfig']}) + request_data['clusterMetaData'].append( + {'key': 'trial_config', 'value': experiment_config['trial']}) + elif experiment_config['trainingServicePlatform'] == 'frameworkcontroller': + request_data['clusterMetaData'].append( + {'key': 'frameworkcontroller_config', 'value': experiment_config['frameworkcontrollerConfig']}) + request_data['clusterMetaData'].append( + {'key': 'trial_config', 'value': experiment_config['trial']}) + elif experiment_config['trainingServicePlatform'] == 'adl': + request_data['clusterMetaData'].append( + {'key': 'trial_config', 'value': experiment_config['trial']}) + response = rest_post(experiment_url(port), json.dumps(request_data), REST_TIME_OUT, show_error=True) + if check_response(response): + return response + else: + _, stderr_full_path = get_log_path(config_file_name) + if response is not None: + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) + print_error('Setting experiment error, error message is {}'.format(response.text)) + return None + +def set_experiment_v2(experiment_config, mode, port, config_file_name): + '''Call startExperiment (rest POST /experiment) with yaml file content''' + response = rest_post(experiment_url(port), json.dumps(experiment_config), REST_TIME_OUT, show_error=True) + if check_response(response): + return response + else: + _, stderr_full_path = get_log_path(config_file_name) + if response is not None: + with open(stderr_full_path, 'a+') as fout: + fout.write(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) + print_error('Setting experiment error, error message is {}'.format(response.text)) + return None + +def set_platform_config(platform, experiment_config, port, config_file_name, rest_process): + '''call set_cluster_metadata for specific platform''' + print_normal('Setting {0} config...'.format(platform)) + config_result, err_msg = None, None + if platform == 'adl': + config_result, err_msg = set_adl_config(experiment_config, port, config_file_name) + elif platform == 'kubeflow': + config_result, err_msg = set_kubeflow_config(experiment_config, port, config_file_name) + elif platform == 'frameworkcontroller': + config_result, err_msg = set_frameworkcontroller_config(experiment_config, port, config_file_name) + else: + raise Exception(ERROR_INFO % 'Unsupported platform!') + exit(1) + if config_result: + config_result, err_msg = set_shared_storage(experiment_config, port, config_file_name) + if config_result: + print_normal('Successfully set {0} config!'.format(platform)) + else: + print_error('Failed! Error is: {}'.format(err_msg)) + try: + kill_command(rest_process.pid) + except Exception: + raise Exception(ERROR_INFO % 'Rest server stopped!') + exit(1) + +def launch_experiment(args, experiment_config, mode, experiment_id, config_version): + '''follow steps to start rest server and start experiment''' + # check packages for tuner + package_name, module_name = None, None + if experiment_config.get('tuner') and experiment_config['tuner'].get('builtinTunerName'): + package_name = experiment_config['tuner']['builtinTunerName'] + module_name, _ = get_builtin_module_class_name('tuners', package_name) + elif experiment_config.get('advisor') and experiment_config['advisor'].get('builtinAdvisorName'): + package_name = experiment_config['advisor']['builtinAdvisorName'] + module_name, _ = get_builtin_module_class_name('advisors', package_name) + if package_name and module_name: + try: + stdout_full_path, stderr_full_path = get_log_path(experiment_id) + with open(stdout_full_path, 'a+') as stdout_file, open(stderr_full_path, 'a+') as stderr_file: + check_call([sys.executable, '-c', 'import %s'%(module_name)], stdout=stdout_file, stderr=stderr_file) + except CalledProcessError: + print_error('some errors happen when import package %s.' %(package_name)) + print_log_content(experiment_id) + if package_name in ['SMAC', 'BOHB', 'PPOTuner']: + print_error(f'The dependencies for {package_name} can be installed through pip install nni[{package_name}]') + raise + if config_version == 1: + log_dir = experiment_config['logDir'] if experiment_config.get('logDir') else NNI_HOME_DIR + else: + log_dir = experiment_config['experimentWorkingDirectory'] if experiment_config.get('experimentWorkingDirectory') else NNI_HOME_DIR + log_level = experiment_config['logLevel'] if experiment_config.get('logLevel') else 'info' + #view experiment mode do not need debug function, when view an experiment, there will be no new logs created + foreground = False + if mode != 'view': + foreground = args.foreground + if log_level not in ['trace', 'debug'] and (args.debug or experiment_config.get('debug') is True): + log_level = 'debug' + # start rest server + if config_version == 1: + platform = experiment_config['trainingServicePlatform'] + elif isinstance(experiment_config['trainingService'], list): + platform = 'hybrid' + else: + platform = experiment_config['trainingService']['platform'] + + rest_process, start_time = start_rest_server(args.port, platform, \ + mode, experiment_id, foreground, log_dir, log_level, args.url_prefix) + # save experiment information + Experiments().add_experiment(experiment_id, args.port, start_time, + platform, + experiment_config.get('experimentName', 'N/A') + , pid=rest_process.pid, logDir=log_dir, prefixUrl=args.url_prefix) + # Deal with annotation + if experiment_config.get('useAnnotation'): + path = os.path.join(tempfile.gettempdir(), get_user(), 'nni', 'annotation') + if not os.path.isdir(path): + os.makedirs(path) + path = tempfile.mkdtemp(dir=path) + if config_version == 1: + nas_mode = experiment_config['trial'].get('nasMode', 'classic_mode') + code_dir = expand_annotations(experiment_config['trial']['codeDir'], path, nas_mode=nas_mode) + experiment_config['trial']['codeDir'] = code_dir + else: + code_dir = expand_annotations(experiment_config['trialCodeDirectory'], path) + experiment_config['trialCodeDirectory'] = code_dir + search_space = generate_search_space(code_dir) + experiment_config['searchSpace'] = search_space + assert search_space, ERROR_INFO % 'Generated search space is empty' + elif config_version == 1: + if experiment_config.get('searchSpacePath'): + search_space = get_json_content(experiment_config.get('searchSpacePath')) + experiment_config['searchSpace'] = search_space + else: + experiment_config['searchSpace'] = '' + + # check rest server + running, _ = check_rest_server(args.port) + if running: + print_normal('Successfully started Restful server!') + else: + print_error('Restful server start failed!') + print_log_content(experiment_id) + try: + kill_command(rest_process.pid) + except Exception: + raise Exception(ERROR_INFO % 'Rest server stopped!') + exit(1) + if config_version == 1 and mode != 'view': + # set platform configuration + set_platform_config(experiment_config['trainingServicePlatform'], experiment_config, args.port,\ + experiment_id, rest_process) + + # start a new experiment + print_normal('Starting experiment...') + # set debug configuration + if mode != 'view' and experiment_config.get('debug') is None: + experiment_config['debug'] = args.debug + if config_version == 1: + response = set_experiment_v1(experiment_config, mode, args.port, experiment_id) + else: + response = set_experiment_v2(experiment_config, mode, args.port, experiment_id) + if response: + if experiment_id is None: + experiment_id = json.loads(response.text).get('experiment_id') + else: + print_error('Start experiment failed!') + print_log_content(experiment_id) + try: + kill_command(rest_process.pid) + except Exception: + raise Exception(ERROR_INFO % 'Restful server stopped!') + exit(1) + url_prefix_format = '' if args.url_prefix is None else '/{0}'.format(args.url_prefix) + if experiment_config.get('nniManagerIp'): + web_ui_url_list = ['http://{0}:{1}{2}'.format(experiment_config['nniManagerIp'], str(args.port), url_prefix_format)] + else: + web_ui_url_list = get_local_urls(args.port, url_prefix_format) + Experiments().update_experiment(experiment_id, 'webuiUrl', web_ui_url_list) + + print_normal(EXPERIMENT_SUCCESS_INFO % (experiment_id, ' '.join(web_ui_url_list))) + if mode != 'view' and args.foreground: + try: + while True: + log_content = rest_process.stdout.readline().strip().decode('utf-8') + print(log_content) + except KeyboardInterrupt: + kill_command(rest_process.pid) + print_normal('Stopping experiment...') + +def _validate_v1(config, path): + try: + validate_all_content(config, path) + except Exception as e: + print_error(f'Config V1 validation failed: {repr(e)}') + exit(1) + +def _validate_v2(config, path): + base_path = Path(path).parent + try: + conf = ExperimentConfig(_base_path=base_path, **config) + return conf.json() + except Exception as e: + print_error(f'Config V2 validation failed: {repr(e)}') + +def _validate_prefix_path(path): + assert not path.startswith('/'), 'URL prefix should not start with "/".' + parts = path.split('/') + valid = all(re.match('^[A-Za-z0-9_-]*$', part) for part in parts) + assert valid, 'URL prefix should only contain letter, number, underscore, and hyphen.' + +def create_experiment(args): + '''start a new experiment''' + experiment_id = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + config_path = os.path.abspath(args.config) + if not os.path.exists(config_path): + print_error('Please set correct config path!') + exit(1) + config_yml = get_yml_content(config_path) + + if 'trainingServicePlatform' in config_yml: + _validate_v1(config_yml, config_path) + platform = config_yml['trainingServicePlatform'] + if platform in k8s_training_services: + schema = 1 + config_v1 = config_yml + else: + schema = 2 + config_v2 = convert.to_v2(config_yml).json() + else: + config_v2 = _validate_v2(config_yml, config_path) + schema = 2 + + try: + if schema == 1: + launch_experiment(args, config_v1, 'new', experiment_id, 1) + else: + launch_experiment(args, config_v2, 'new', experiment_id, 2) + except Exception as exception: + restServerPid = Experiments().get_all_experiments().get(experiment_id, {}).get('pid') + if restServerPid: + kill_command(restServerPid) + print_error(exception) + exit(1) + +def manage_stopped_experiment(args, mode): + '''view a stopped experiment''' + update_experiment() + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_id = None + #find the latest stopped experiment + if not args.id: + print_error('Please set experiment id! \nYou could use \'nnictl {0} id\' to {0} a stopped experiment!\n' \ + 'You could use \'nnictl experiment list --all\' to show all experiments!\n' \ + 'If your experiment is not started in current machine, you could specify experiment folder using ' \ + '--experiment_dir argument'.format(mode)) + exit(1) + else: + if experiments_dict.get(args.id) is None: + print_error('Id %s not exist!' % args.id) + exit(1) + if experiments_dict[args.id]['status'] != 'STOPPED': + print_error('Only stopped experiments can be {0}ed!'.format(mode)) + exit(1) + experiment_id = args.id + print_normal('{0} experiment {1}...'.format(mode, experiment_id)) + experiment_config = Config(experiment_id, experiments_dict[args.id]['logDir']).get_config() + experiments_config.update_experiment(args.id, 'port', args.port) + args.url_prefix = experiments_dict[args.id]['prefixUrl'] + assert 'trainingService' in experiment_config or 'trainingServicePlatform' in experiment_config + try: + if 'trainingServicePlatform' in experiment_config: + experiment_config['logDir'] = experiments_dict[args.id]['logDir'] + launch_experiment(args, experiment_config, mode, experiment_id, 1) + else: + experiment_config['experimentWorkingDirectory'] = experiments_dict[args.id]['logDir'] + launch_experiment(args, experiment_config, mode, experiment_id, 2) + except Exception as exception: + restServerPid = Experiments().get_all_experiments().get(experiment_id, {}).get('pid') + if restServerPid: + kill_command(restServerPid) + print_error(exception) + exit(1) + +def view_experiment(args): + '''view a stopped experiment''' + if args.experiment_dir: + manage_external_experiment(args, 'view') + else: + manage_stopped_experiment(args, 'view') + +def resume_experiment(args): + '''resume an experiment''' + '''view a stopped experiment''' + if args.experiment_dir: + manage_external_experiment(args, 'resume') + else: + manage_stopped_experiment(args, 'resume') + +def manage_external_experiment(args, mode): + '''view a experiment from external path''' + # validate arguments + if not os.path.exists(args.experiment_dir): + print_error('Folder %s does not exist!' % args.experiment_dir) + exit(1) + if not os.path.isdir(args.experiment_dir): + print_error('Path %s is not folder directory!' % args.experiment_dir) + exit(1) + if args.id: + experiment_id = args.id + log_dir = args.experiment_dir + else: + print_normal('NNI can not detect experiment id in argument, will use last folder name as experiment id in experiment_dir argument.') + experiment_id = Path(args.experiment_dir).name + log_dir = str(Path(args.experiment_dir).parent) + if not experiment_id: + print_error("Please set experiment id argument, or add id as the last folder name in experiment_dir argument.") + exit(1) + args.url_prefix = None + experiment_config = Config(experiment_id, log_dir).get_config() + assert 'trainingService' in experiment_config or 'trainingServicePlatform' in experiment_config + try: + if 'trainingServicePlatform' in experiment_config: + experiment_config['logDir'] = log_dir + launch_experiment(args, experiment_config, mode, experiment_id, 1) + else: + experiment_config['experimentWorkingDirectory'] = log_dir + launch_experiment(args, experiment_config, mode, experiment_id, 2) + except Exception as exception: + print_error(exception) + exit(1) diff --git a/nni/tools/nnictl/nnictl.py b/nni/tools/nnictl/nnictl.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0d128bdf0b283ca18528d0f40396e00caa8202 --- /dev/null +++ b/nni/tools/nnictl/nnictl.py @@ -0,0 +1,299 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +import logging +import os +import pkg_resources +from colorama import init +from .common_utils import print_error +from .launcher import create_experiment, resume_experiment, view_experiment +from .updater import update_searchspace, update_concurrency, update_duration, update_trialnum, import_data +from .nnictl_utils import stop_experiment, trial_ls, trial_kill, list_experiment, experiment_status,\ + log_trial, experiment_clean, platform_clean, experiment_list, \ + monitor_experiment, export_trials_data, trial_codegen, webui_url, \ + get_config, log_stdout, log_stderr, search_space_auto_gen, \ + save_experiment, load_experiment +from .algo_management import algo_reg, algo_unreg, algo_show, algo_list +from .constants import DEFAULT_REST_PORT +from .import ts_management + +init(autoreset=True) + +if os.environ.get('COVERAGE_PROCESS_START'): + import coverage + coverage.process_startup() + +def nni_info(*args): + if args[0].version: + try: + print(pkg_resources.get_distribution('nni').version) + except pkg_resources.ResolutionError: + print_error('Get version failed, please use `pip3 list | grep nni` to check nni version!') + else: + print('please run "nnictl {positional argument} --help" to see nnictl guidance') + +def parse_args(): + logging.getLogger().setLevel(logging.ERROR) + + '''Definite the arguments users need to follow and input''' + parser = argparse.ArgumentParser(prog='nnictl', description='use nnictl command to control nni experiments') + parser.add_argument('--version', '-v', action='store_true') + parser.set_defaults(func=nni_info) + + # create subparsers for args with sub values + subparsers = parser.add_subparsers() + + # parse the command of auto generating search space + parser_start = subparsers.add_parser('ss_gen', help='automatically generate search space file from trial code') + parser_start.add_argument('--trial_command', '-t', required=True, dest='trial_command', help='the command for running trial code') + parser_start.add_argument('--trial_dir', '-d', default='./', dest='trial_dir', help='the directory for running the command') + parser_start.add_argument('--file', '-f', default='nni_auto_gen_search_space.json', dest='file', help='the path of search space file') + parser_start.set_defaults(func=search_space_auto_gen) + + # parse start command + parser_start = subparsers.add_parser('create', help='create a new experiment') + parser_start.add_argument('--config', '-c', required=True, dest='config', help='the path of yaml config file') + parser_start.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', type=int, help='the port of restful server') + parser_start.add_argument('--debug', '-d', action='store_true', help=' set debug mode') + parser_start.add_argument('--url_prefix', '-u', dest='url_prefix', help=' set prefix url') + parser_start.add_argument('--foreground', '-f', action='store_true', help=' set foreground mode, print log content to terminal') + parser_start.set_defaults(func=create_experiment) + + # parse resume command + parser_resume = subparsers.add_parser('resume', help='resume a new experiment') + parser_resume.add_argument('id', help='The id of the experiment you want to resume') + parser_resume.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', type=int, help='the port of restful server') + parser_resume.add_argument('--debug', '-d', action='store_true', help=' set debug mode') + parser_resume.add_argument('--foreground', '-f', action='store_true', help=' set foreground mode, print log content to terminal') + parser_resume.add_argument('--experiment_dir', '-e', help='resume experiment from external folder, specify the full path of ' \ + 'experiment folder') + parser_resume.set_defaults(func=resume_experiment) + + # parse view command + parser_view = subparsers.add_parser('view', help='view a stopped experiment') + parser_view.add_argument('id', help='The id of the experiment you want to view') + parser_view.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', type=int, help='the port of restful server') + parser_view.add_argument('--experiment_dir', '-e', help='view experiment from external folder, specify the full path of ' \ + 'experiment folder') + parser_view.set_defaults(func=view_experiment) + + # parse update command + parser_updater = subparsers.add_parser('update', help='update the experiment') + #add subparsers for parser_updater + parser_updater_subparsers = parser_updater.add_subparsers() + parser_updater_searchspace = parser_updater_subparsers.add_parser('searchspace', help='update searchspace') + parser_updater_searchspace.add_argument('id', nargs='?', help='the id of experiment') + parser_updater_searchspace.add_argument('--filename', '-f', required=True) + parser_updater_searchspace.set_defaults(func=update_searchspace) + parser_updater_concurrency = parser_updater_subparsers.add_parser('concurrency', help='update concurrency') + parser_updater_concurrency.add_argument('id', nargs='?', help='the id of experiment') + parser_updater_concurrency.add_argument('--value', '-v', required=True) + parser_updater_concurrency.set_defaults(func=update_concurrency) + parser_updater_duration = parser_updater_subparsers.add_parser('duration', help='update duration') + parser_updater_duration.add_argument('id', nargs='?', help='the id of experiment') + parser_updater_duration.add_argument('--value', '-v', required=True, help='the unit of time should in {\'s\', \'m\', \'h\', \'d\'}') + parser_updater_duration.set_defaults(func=update_duration) + parser_updater_trialnum = parser_updater_subparsers.add_parser('trialnum', help='update maxtrialnum') + parser_updater_trialnum.add_argument('id', nargs='?', help='the id of experiment') + parser_updater_trialnum.add_argument('--value', '-v', required=True) + parser_updater_trialnum.set_defaults(func=update_trialnum) + + #parse stop command + parser_stop = subparsers.add_parser('stop', help='stop the experiment') + parser_stop.add_argument('id', nargs='?', help='the id of experiment, use \'all\' to stop all running experiments') + parser_stop.add_argument('--port', '-p', dest='port', type=int, help='the port of restful server') + parser_stop.add_argument('--all', '-a', action='store_true', help='stop all of experiments') + parser_stop.set_defaults(func=stop_experiment) + + #parse trial command + parser_trial = subparsers.add_parser('trial', help='get trial information') + #add subparsers for parser_trial + parser_trial_subparsers = parser_trial.add_subparsers() + parser_trial_ls = parser_trial_subparsers.add_parser('ls', help='list trial jobs') + parser_trial_ls.add_argument('id', nargs='?', help='the id of experiment') + parser_trial_ls.add_argument('--head', type=int, help='list the highest experiments on the default metric') + parser_trial_ls.add_argument('--tail', type=int, help='list the lowest experiments on the default metric') + parser_trial_ls.set_defaults(func=trial_ls) + parser_trial_kill = parser_trial_subparsers.add_parser('kill', help='kill trial jobs') + parser_trial_kill.add_argument('id', nargs='?', help='the id of experiment') + parser_trial_kill.add_argument('--trial_id', '-T', required=True, dest='trial_id', help='the id of trial to be killed') + parser_trial_kill.set_defaults(func=trial_kill) + parser_trial_codegen = parser_trial_subparsers.add_parser('codegen', help='generate trial code for a specific trial') + parser_trial_codegen.add_argument('id', nargs='?', help='the id of experiment') + parser_trial_codegen.add_argument('--trial_id', '-T', required=True, dest='trial_id', help='the id of trial to do code generation') + parser_trial_codegen.set_defaults(func=trial_codegen) + + #parse experiment command + parser_experiment = subparsers.add_parser('experiment', help='get experiment information') + #add subparsers for parser_experiment + parser_experiment_subparsers = parser_experiment.add_subparsers() + parser_experiment_show = parser_experiment_subparsers.add_parser('show', help='show the information of experiment') + parser_experiment_show.add_argument('id', nargs='?', help='the id of experiment') + parser_experiment_show.set_defaults(func=list_experiment) + parser_experiment_status = parser_experiment_subparsers.add_parser('status', help='show the status of experiment') + parser_experiment_status.add_argument('id', nargs='?', help='the id of experiment') + parser_experiment_status.set_defaults(func=experiment_status) + parser_experiment_list = parser_experiment_subparsers.add_parser('list', help='list all of running experiment ids') + parser_experiment_list.add_argument('--all', action='store_true', default=False, help='list all of experiments') + parser_experiment_list.set_defaults(func=experiment_list) + parser_experiment_clean = parser_experiment_subparsers.add_parser('delete', help='clean up the experiment data') + parser_experiment_clean.add_argument('id', nargs='?', help='the id of experiment') + parser_experiment_clean.add_argument('--all', action='store_true', default=False, help='delete all of experiments') + parser_experiment_clean.set_defaults(func=experiment_clean) + #import tuning data + parser_import_data = parser_experiment_subparsers.add_parser('import', help='import additional data') + parser_import_data.add_argument('id', nargs='?', help='the id of experiment') + parser_import_data.add_argument('--filename', '-f', required=True) + parser_import_data.set_defaults(func=import_data) + #export trial data + parser_trial_export = parser_experiment_subparsers.add_parser('export', help='export trial job results to csv or json') + parser_trial_export.add_argument('id', nargs='?', help='the id of experiment') + parser_trial_export.add_argument('--type', '-t', choices=['json', 'csv'], required=True, dest='type', help='target file type') + parser_trial_export.add_argument('--filename', '-f', required=True, dest='path', help='target file path') + parser_trial_export.add_argument('--intermediate', '-i', action='store_true', + default=False, help='are intermediate results included') + parser_trial_export.set_defaults(func=export_trials_data) + #save an NNI experiment + parser_save_experiment = parser_experiment_subparsers.add_parser('save', help='save an experiment') + parser_save_experiment.add_argument('id', nargs='?', help='the id of experiment') + parser_save_experiment.add_argument('--path', '-p', required=False, help='the folder path to store nni experiment data, \ + default current working directory') + parser_save_experiment.add_argument('--saveCodeDir', '-s', action='store_true', default=False, help='save codeDir data \ + of the experiment') + parser_save_experiment.set_defaults(func=save_experiment) + #load an NNI experiment + parser_load_experiment = parser_experiment_subparsers.add_parser('load', help='load an experiment') + parser_load_experiment.add_argument('--path', '-p', required=True, help='the path of nni package file') + parser_load_experiment.add_argument('--codeDir', '-c', required=True, help='the path of codeDir for loaded experiment, \ + this path will also put the code in the loaded experiment package') + parser_load_experiment.add_argument('--logDir', '-l', required=False, help='the path of logDir for loaded experiment') + parser_load_experiment.add_argument('--searchSpacePath', '-s', required=False, help='the path of search space file for \ + loaded experiment, this path contains file name. Default in $codeDir/search_space.json') + parser_load_experiment.set_defaults(func=load_experiment) + + #parse platform command + parser_platform = subparsers.add_parser('platform', help='get platform information') + #add subparsers for parser_platform + parser_platform_subparsers = parser_platform.add_subparsers() + parser_platform_clean = parser_platform_subparsers.add_parser('clean', help='clean up the platform data') + parser_platform_clean.add_argument('--config', '-c', required=True, dest='config', help='the path of yaml config file') + parser_platform_clean.set_defaults(func=platform_clean) + + #TODO:finish webui function + #parse board command + parser_webui = subparsers.add_parser('webui', help='get web ui information') + #add subparsers for parser_board + parser_webui_subparsers = parser_webui.add_subparsers() + parser_webui_url = parser_webui_subparsers.add_parser('url', help='show the url of web ui') + parser_webui_url.add_argument('id', nargs='?', help='the id of experiment') + parser_webui_url.set_defaults(func=webui_url) + + #parse config command + parser_config = subparsers.add_parser('config', help='get config information') + parser_config_subparsers = parser_config.add_subparsers() + parser_config_show = parser_config_subparsers.add_parser('show', help='show the information of config') + parser_config_show.add_argument('id', nargs='?', help='the id of experiment') + parser_config_show.set_defaults(func=get_config) + + #parse log command + parser_log = subparsers.add_parser('log', help='get log information') + # add subparsers for parser_log + parser_log_subparsers = parser_log.add_subparsers() + parser_log_stdout = parser_log_subparsers.add_parser('stdout', help='get stdout information') + parser_log_stdout.add_argument('id', nargs='?', help='the id of experiment') + parser_log_stdout.add_argument('--tail', '-T', dest='tail', type=int, help='get tail -100 content of stdout') + parser_log_stdout.add_argument('--head', '-H', dest='head', type=int, help='get head -100 content of stdout') + parser_log_stdout.add_argument('--path', action='store_true', default=False, help='get the path of stdout file') + parser_log_stdout.set_defaults(func=log_stdout) + parser_log_stderr = parser_log_subparsers.add_parser('stderr', help='get stderr information') + parser_log_stderr.add_argument('id', nargs='?', help='the id of experiment') + parser_log_stderr.add_argument('--tail', '-T', dest='tail', type=int, help='get tail -100 content of stderr') + parser_log_stderr.add_argument('--head', '-H', dest='head', type=int, help='get head -100 content of stderr') + parser_log_stderr.add_argument('--path', action='store_true', default=False, help='get the path of stderr file') + parser_log_stderr.set_defaults(func=log_stderr) + parser_log_trial = parser_log_subparsers.add_parser('trial', help='get trial log path') + parser_log_trial.add_argument('id', nargs='?', help='the id of experiment') + parser_log_trial.add_argument('--trial_id', '-T', dest='trial_id', help='find trial log path by id') + parser_log_trial.set_defaults(func=log_trial) + + #parse algo command + parser_algo = subparsers.add_parser('algo', help='control nni builtin tuner, assessor and advisor algorithms') + # add subparsers for parser_algo + parser_algo_subparsers = parser_algo.add_subparsers() + parser_algo_reg = parser_algo_subparsers.add_parser( + 'register', + aliases=('reg',), + help='''register algorithms as nni builtin algorithm, for example: + nnictl reg --meta_path + where is the path to a meta data in yml format, + reference the nni document and examples/tuners/customized_tuner example + for the format of the yml file.''' + ) + parser_algo_reg.add_argument('--meta_path', '-m', dest='meta_path', help='path to the meta file', required=True) + parser_algo_reg.set_defaults(func=algo_reg) + + parser_algo_unreg = parser_algo_subparsers.add_parser('unregister', aliases=('unreg',), help='unregister algorithm') + parser_algo_unreg.add_argument('name', nargs=1, help='builtin name of the algorithm') + parser_algo_unreg.set_defaults(func=algo_unreg) + + parser_algo_show = parser_algo_subparsers.add_parser('show', help='show the information of algorithm') + parser_algo_show.add_argument('name', nargs=1, help='builtin name of the algorithm') + parser_algo_show.set_defaults(func=algo_show) + + parser_algo_list = parser_algo_subparsers.add_parser('list', help='list registered algorithms') + parser_algo_list.set_defaults(func=algo_list) + + #parse trainingservice command + parser_ts = subparsers.add_parser('trainingservice', help='control training service') + # add subparsers for parser_ts + parser_ts_subparsers = parser_ts.add_subparsers() + + parser_ts_reg = parser_ts_subparsers.add_parser('register', help='register training service') + parser_ts_reg.add_argument('--package', dest='package', help='package name', required=True) + parser_ts_reg.set_defaults(func=ts_management.register) + + parser_ts_unreg = parser_ts_subparsers.add_parser('unregister', help='unregister training service') + parser_ts_unreg.add_argument('--package', dest='package', help='package name', required=True) + parser_ts_unreg.set_defaults(func=ts_management.unregister) + + parser_ts_list = parser_ts_subparsers.add_parser('list', help='list custom training services') + parser_ts_list.set_defaults(func=ts_management.list_services) + + # To show message that nnictl package command is replaced by nnictl algo, to be remove in the future release. + def show_messsage_for_nnictl_package(args): + print_error('nnictl package command is replaced by nnictl algo, please run nnictl algo -h to show the usage') + + parser_package_subparsers = subparsers.add_parser('package', help='this argument is replaced by algo', prefix_chars='\n') + parser_package_subparsers.add_argument('args', nargs=argparse.REMAINDER) + parser_package_subparsers.set_defaults(func=show_messsage_for_nnictl_package) + + #parse top command + parser_top = subparsers.add_parser('top', help='monitor the experiment') + parser_top.add_argument('--time', '-t', dest='time', type=int, default=3, help='the time interval to update the experiment status, ' \ + 'the unit is second') + parser_top.set_defaults(func=monitor_experiment) + + # jupyter-extension command + jupyter_parser = subparsers.add_parser('jupyter-extension', help='install or uninstall JupyterLab extension (internal preview)') + jupyter_subparsers = jupyter_parser.add_subparsers() + jupyter_install_parser = jupyter_subparsers.add_parser('install', help='install JupyterLab extension') + jupyter_install_parser.set_defaults(func=_jupyter_install) + jupyter_uninstall_parser = jupyter_subparsers.add_parser('uninstall', help='uninstall JupyterLab extension') + jupyter_uninstall_parser.set_defaults(func=_jupyter_uninstall) + + args = parser.parse_args() + args.func(args) + +def _jupyter_install(_args): + import nni.tools.jupyter_extension.management as jupyter_management + jupyter_management.install() + print('Successfully installed JupyterLab extension') + +def _jupyter_uninstall(_args): + import nni.tools.jupyter_extension.management as jupyter_management + jupyter_management.uninstall() + print('Successfully uninstalled JupyterLab extension') + +if __name__ == '__main__': + parse_args() diff --git a/nni/tools/nnictl/nnictl_utils.py b/nni/tools/nnictl/nnictl_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f7d0f6895f5e1d9a2664f8c83b063a5dddab77a --- /dev/null +++ b/nni/tools/nnictl/nnictl_utils.py @@ -0,0 +1,960 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import csv +import os +import sys +import json +import time +import shutil +import subprocess +from functools import cmp_to_key +import traceback +from datetime import datetime, timezone +from subprocess import Popen +from nni.tools.annotation import expand_annotations +from .rest_utils import rest_get, rest_delete, check_rest_server_quick, check_response +from .url_utils import trial_jobs_url, experiment_url, trial_job_id_url, export_data_url, metric_data_url +from .config_utils import Config, Experiments +from .constants import NNI_HOME_DIR, EXPERIMENT_INFORMATION_FORMAT, EXPERIMENT_DETAIL_FORMAT, EXPERIMENT_MONITOR_INFO, \ + TRIAL_MONITOR_HEAD, TRIAL_MONITOR_CONTENT, TRIAL_MONITOR_TAIL, REST_TIME_OUT +from .common_utils import print_normal, print_error, print_warning, detect_process, get_yml_content, generate_temp_dir +from .common_utils import print_green +from .command_utils import check_output_command, kill_command +from .ssh_utils import create_ssh_sftp_client, remove_remote_directory + +def get_experiment_time(port): + '''get the startTime and endTime of an experiment''' + response = rest_get(experiment_url(port), REST_TIME_OUT) + if response and check_response(response): + content = json.loads(response.text) + return content.get('startTime'), content.get('endTime') + return None, None + +def get_experiment_status(port): + '''get the status of an experiment''' + result, response = check_rest_server_quick(port) + if result: + return json.loads(response.text).get('status') + return None + +def update_experiment(): + '''Update the experiment status in config file''' + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + if not experiments_dict: + return None + for key in experiments_dict.keys(): + if isinstance(experiments_dict[key], dict): + if experiments_dict[key].get('status') != 'STOPPED': + rest_pid = experiments_dict[key].get('pid') + if not detect_process(rest_pid): + experiments_config.update_experiment(key, 'status', 'STOPPED') + continue + +def check_experiment_id(args, update=True): + '''check if the id is valid + ''' + if update: + update_experiment() + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + if not experiments_dict: + print_normal('There is no experiment running...') + return None + if not args.id: + running_experiment_list = [] + for key in experiments_dict.keys(): + if isinstance(experiments_dict[key], dict): + if experiments_dict[key].get('status') != 'STOPPED': + running_experiment_list.append(key) + elif isinstance(experiments_dict[key], list): + # if the config file is old version, remove the configuration from file + experiments_config.remove_experiment(key) + if len(running_experiment_list) > 1: + print_error('There are multiple experiments, please set the experiment id...') + experiment_information = "" + for key in running_experiment_list: + experiment_information += EXPERIMENT_DETAIL_FORMAT % ( + key, + experiments_dict[key].get('experimentName', 'N/A'), + experiments_dict[key]['status'], + experiments_dict[key].get('port', 'N/A'), + experiments_dict[key].get('platform'), + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \ + if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'], + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) \ + if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime']) + print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) + exit(1) + elif not running_experiment_list: + print_error('There is no experiment running.') + return None + else: + return running_experiment_list[0] + if experiments_dict.get(args.id): + return args.id + else: + print_error('Id not correct.') + return None + +def parse_ids(args): + '''Parse the arguments for nnictl stop + 1.If port is provided and id is not specified, return the id who owns the port + 2.If both port and id are provided, return the id if it owns the port, otherwise fail + 3.If there is an id specified, return the corresponding id + 4.If there is no id specified, and there is an experiment running, return the id, or return Error + 5.If the id matches an experiment, nnictl will return the id. + 6.If the id ends with *, nnictl will match all ids matchs the regular + 7.If the id does not exist but match the prefix of an experiment id, nnictl will return the matched id + 8.If the id does not exist but match multiple prefix of the experiment ids, nnictl will give id information + ''' + update_experiment() + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + if not experiments_dict: + print_normal('Experiment is not running...') + return None + result_list = [] + running_experiment_list = [] + for key in experiments_dict.keys(): + if isinstance(experiments_dict[key], dict): + if experiments_dict[key].get('status') != 'STOPPED': + running_experiment_list.append(key) + elif isinstance(experiments_dict[key], list): + # if the config file is old version, remove the configuration from file + experiments_config.remove_experiment(key) + if args.all: + return running_experiment_list + if args.port is not None: + for key in running_experiment_list: + if experiments_dict[key].get('port') == args.port: + result_list.append(key) + if args.id and result_list and args.id != result_list[0]: + print_error('Experiment id and resful server port not match') + exit(1) + elif not args.id: + if len(running_experiment_list) > 1: + print_error('There are multiple experiments, please set the experiment id...') + experiment_information = "" + for key in running_experiment_list: + experiment_information += EXPERIMENT_DETAIL_FORMAT % ( + key, + experiments_dict[key].get('experimentName', 'N/A'), + experiments_dict[key]['status'], + experiments_dict[key].get('port', 'N/A'), + experiments_dict[key].get('platform'), + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \ + if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'], + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) \ + if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime']) + print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) + exit(1) + else: + result_list = running_experiment_list + elif args.id.endswith('*'): + for expId in running_experiment_list: + if expId.startswith(args.id[:-1]): + result_list.append(expId) + elif args.id in running_experiment_list: + result_list.append(args.id) + else: + for expId in running_experiment_list: + if expId.startswith(args.id): + result_list.append(expId) + if len(result_list) > 1: + print_error(args.id + ' is ambiguous, please choose ' + ' '.join(result_list)) + return None + if not result_list and (args.id or args.port): + print_error('There are no experiments matched, please set correct experiment id or restful server port') + elif not result_list: + print_error('There is no experiment running...') + return result_list + +def get_config_filename(args): + '''get the file name of config file''' + experiment_id = check_experiment_id(args) + if experiment_id is None: + print_error('Please set correct experiment id.') + exit(1) + return experiment_id + +def get_experiment_port(args): + '''get the port of experiment''' + experiment_id = check_experiment_id(args) + if experiment_id is None: + print_error('Please set correct experiment id.') + exit(1) + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + return experiments_dict[experiment_id].get('port') + +def convert_time_stamp_to_date(content): + '''Convert time stamp to date time format''' + start_time_stamp = content.get('startTime') + end_time_stamp = content.get('endTime') + if start_time_stamp: + start_time = datetime.fromtimestamp(start_time_stamp // 1000, timezone.utc).astimezone().strftime("%Y/%m/%d %H:%M:%S") + content['startTime'] = str(start_time) + if end_time_stamp: + end_time = datetime.fromtimestamp(end_time_stamp // 1000, timezone.utc).astimezone().strftime("%Y/%m/%d %H:%M:%S") + content['endTime'] = str(end_time) + return content + +def check_rest(args): + '''check if restful server is running''' + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + rest_port = experiments_dict.get(get_config_filename(args)).get('port') + running, _ = check_rest_server_quick(rest_port) + if running: + print_normal('Restful server is running...') + else: + print_normal('Restful server is not running...') + return running + +def stop_experiment(args): + '''Stop the experiment which is running''' + if args.id and args.id == 'all': + print_warning('\'nnictl stop all\' is abolished, please use \'nnictl stop --all\' to stop all of experiments!') + exit(1) + experiment_id_list = parse_ids(args) + if experiment_id_list: + for experiment_id in experiment_id_list: + print_normal('Stopping experiment %s' % experiment_id) + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + rest_pid = experiments_dict.get(experiment_id).get('pid') + if rest_pid: + kill_command(rest_pid) + print_normal('Stop experiment success.') + +def trial_ls(args): + '''List trial''' + def final_metric_data_cmp(lhs, rhs): + metric_l = json.loads(json.loads(lhs['finalMetricData'][0]['data'])) + metric_r = json.loads(json.loads(rhs['finalMetricData'][0]['data'])) + if isinstance(metric_l, float): + return metric_l - metric_r + elif isinstance(metric_l, dict): + return metric_l['default'] - metric_r['default'] + else: + print_error('Unexpected data format. Please check your data.') + raise ValueError + + if args.head and args.tail: + print_error('Head and tail cannot be set at the same time.') + return + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_id = get_config_filename(args) + rest_port = experiments_dict.get(experiment_id).get('port') + rest_pid = experiments_dict.get(experiment_id).get('pid') + if not detect_process(rest_pid): + print_error('Experiment is not running...') + return + running, response = check_rest_server_quick(rest_port) + if running: + response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT) + if response and check_response(response): + content = json.loads(response.text) + if args.head: + assert args.head > 0, 'The number of requested data must be greater than 0.' + content = sorted(filter(lambda x: 'finalMetricData' in x, content), + key=cmp_to_key(final_metric_data_cmp), reverse=True)[:args.head] + elif args.tail: + assert args.tail > 0, 'The number of requested data must be greater than 0.' + content = sorted(filter(lambda x: 'finalMetricData' in x, content), + key=cmp_to_key(final_metric_data_cmp))[:args.tail] + for index, value in enumerate(content): + content[index] = convert_time_stamp_to_date(value) + print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':'))) + return content + else: + print_error('List trial failed...') + else: + print_error('Restful server is not running...') + return None + +def trial_kill(args): + '''List trial''' + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_id = get_config_filename(args) + rest_port = experiments_dict.get(experiment_id).get('port') + rest_pid = experiments_dict.get(experiment_id).get('pid') + if not detect_process(rest_pid): + print_error('Experiment is not running...') + return + running, _ = check_rest_server_quick(rest_port) + if running: + response = rest_delete(trial_job_id_url(rest_port, args.trial_id), REST_TIME_OUT) + if response and check_response(response): + print(response.text) + return True + else: + print_error('Kill trial job failed...') + else: + print_error('Restful server is not running...') + return False + +def trial_codegen(args): + '''Generate code for a specific trial''' + print_warning('Currently, this command is only for nni nas programming interface.') + exp_id = get_config_filename(args) + experiment_config = Config(exp_id, Experiments().get_all_experiments()[exp_id]['logDir']).get_config() + if not experiment_config.get('useAnnotation'): + print_error('The experiment is not using annotation') + exit(1) + code_dir = experiment_config['trial']['codeDir'] + expand_annotations(code_dir, './exp_%s_trial_%s_code'%(exp_id, args.trial_id), exp_id, args.trial_id) + +def list_experiment(args): + '''Get experiment information''' + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_id = get_config_filename(args) + rest_port = experiments_dict.get(experiment_id).get('port') + rest_pid = experiments_dict.get(experiment_id).get('pid') + if not detect_process(rest_pid): + print_error('Experiment is not running...') + return + running, _ = check_rest_server_quick(rest_port) + if running: + response = rest_get(experiment_url(rest_port), REST_TIME_OUT) + if response and check_response(response): + content = convert_time_stamp_to_date(json.loads(response.text)) + print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':'))) + return content + else: + print_error('List experiment failed...') + else: + print_error('Restful server is not running...') + return None + +def experiment_status(args): + '''Show the status of experiment''' + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + rest_port = experiments_dict.get(get_config_filename(args)).get('port') + result, response = check_rest_server_quick(rest_port) + if not result: + print_normal('Restful server is not running...') + else: + print(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) + return result + +def log_internal(args, filetype): + '''internal function to call get_log_content''' + file_name = get_config_filename(args) + if filetype == 'stdout': + file_full_path = os.path.join(NNI_HOME_DIR, file_name, 'log', 'nnictl_stdout.log') + else: + file_full_path = os.path.join(NNI_HOME_DIR, file_name, 'log', 'nnictl_stderr.log') + print(check_output_command(file_full_path, head=args.head, tail=args.tail)) + +def log_stdout(args): + '''get stdout log''' + log_internal(args, 'stdout') + +def log_stderr(args): + '''get stderr log''' + log_internal(args, 'stderr') + +def log_trial_adl_helper(args, experiment_id): + # adljob_id format should be consistent to the one in "adlTrainingService.ts": + # const adlJobName: string = `nni-exp-${this.experimentId}-trial-${trialJobId}`.toLowerCase(); + adlJobName = "nni-exp-{}-trial-{}".format(experiment_id, args.trial_id).lower() + print_warning('Note that no log will show when trial is pending or done (succeeded or failed). ' + 'You can retry the command.') + print_green('>>> Trial log streaming:') + try: + subprocess.run( + [ + "kubectl", "logs", + "-l", "adaptdl/job=%s" % adlJobName, + "-f" # Follow the stream + ], # TODO: support remaining argument, uncomment the lines in nnictl.py + ) # TODO: emulate tee behaviors, not necessary tho. + except KeyboardInterrupt: + pass + except Exception: + print_error('Error! Please check kubectl:') + traceback.print_exc() + exit(1) + finally: + print_green('<<< [adlJobName:%s]' % adlJobName) + nni_manager_collection_path = os.path.expanduser('~/nni-experiments/%s/trials/%s/stdout_log_collection.log' % + (experiment_id, args.trial_id)) + print_green('>>> (Optional) How to persist the complete trial log locally:') + print( + 'Please ensure `logCollection: http` ' + 'exists in the experiment configuration yaml. ' + 'After trial done, you can check it from the file below: \n %s' + % nni_manager_collection_path + ) + + +def log_trial(args): + ''''get trial log path''' + trial_id_path_dict = {} + trial_id_list = [] + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_id = get_config_filename(args) + rest_port = experiments_dict.get(experiment_id).get('port') + rest_pid = experiments_dict.get(experiment_id).get('pid') + experiment_config = Config(experiment_id, experiments_dict.get(experiment_id).get('logDir')).get_config() + if not detect_process(rest_pid): + print_error('Experiment is not running...') + return + running, response = check_rest_server_quick(rest_port) + if running: + response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT) + if response and check_response(response): + content = json.loads(response.text) + for trial in content: + trial_id_list.append(trial.get('trialJobId')) + if trial.get('logPath'): + trial_id_path_dict[trial.get('trialJobId')] = trial['logPath'] + else: + print_error('Restful server is not running...') + exit(1) + is_adl = experiment_config.get('trainingServicePlatform') == 'adl' + if is_adl and not args.trial_id: + print_error('Trial ID is required to retrieve the log for adl. Please specify it with "--trial_id".') + exit(1) + if args.trial_id: + if args.trial_id not in trial_id_list: + print_error('Trial id {0} not correct, please check your command!'.format(args.trial_id)) + exit(1) + if is_adl: + log_trial_adl_helper(args, experiment_id) + # adl has its own way to log trial, and it thus returns right after the helper returns + return + if trial_id_path_dict.get(args.trial_id): + print_normal('id:' + args.trial_id + ' path:' + trial_id_path_dict[args.trial_id]) + else: + print_error('Log path is not available yet, please wait...') + exit(1) + else: + print_normal('All of trial log info:') + for key in trial_id_path_dict: + print_normal('id:' + key + ' path:' + trial_id_path_dict[key]) + if not trial_id_path_dict: + print_normal('None') + +def get_config(args): + '''get config info''' + experiment_id = get_config_filename(args) + experiment_config = Config(experiment_id, Experiments().get_all_experiments()[experiment_id]['logDir']).get_config() + print(json.dumps(experiment_config, indent=4)) + +def webui_url(args): + '''show the url of web ui''' + experiment_id = get_config_filename(args) + experiments_dict = Experiments().get_all_experiments() + print_normal('{0} {1}'.format('Web UI url:', ' '.join(experiments_dict[experiment_id].get('webuiUrl')))) + +def local_clean(directory): + '''clean up local data''' + print_normal('removing folder {0}'.format(directory)) + try: + shutil.rmtree(directory) + except FileNotFoundError: + print_error('{0} does not exist.'.format(directory)) + +def remote_clean(machine_list, experiment_id=None): + '''clean up remote data''' + for machine in machine_list: + passwd = machine.get('passwd') + userName = machine.get('username') + host = machine.get('ip') + port = machine.get('port') + sshKeyPath = machine.get('sshKeyPath') + passphrase = machine.get('passphrase') + if experiment_id: + remote_dir = '/' + '/'.join(['tmp', 'nni-experiments', experiment_id]) + else: + remote_dir = '/' + '/'.join(['tmp', 'nni-experiments']) + sftp = create_ssh_sftp_client(host, port, userName, passwd, sshKeyPath, passphrase) + print_normal('removing folder {0}'.format(host + ':' + str(port) + remote_dir)) + remove_remote_directory(sftp, remote_dir) + +def experiment_clean(args): + '''clean up the experiment data''' + experiment_id_list = [] + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + if args.all: + experiment_id_list = list(experiments_dict.keys()) + else: + if args.id is None: + print_error('please set experiment id.') + exit(1) + if args.id not in experiments_dict: + print_error('Cannot find experiment {0}.'.format(args.id)) + exit(1) + experiment_id_list.append(args.id) + while True: + print('INFO: This action will delete experiment {0}, and it\'s not recoverable.'.format(' '.join(experiment_id_list))) + inputs = input('INFO: do you want to continue?[y/N]:') + if not inputs.lower() or inputs.lower() in ['n', 'no']: + exit(0) + elif inputs.lower() not in ['y', 'n', 'yes', 'no']: + print_warning('please input Y or N.') + else: + break + for experiment_id in experiment_id_list: + experiment_id = get_config_filename(args) + experiment_config = Config(experiment_id, Experiments().get_all_experiments()[experiment_id]['logDir']).get_config() + platform = experiment_config.get('trainingServicePlatform') or experiment_config.get('trainingService', {}).get('platform') + if platform == 'remote': + machine_list = experiment_config.get('machineList') + remote_clean(machine_list, experiment_id) + elif platform != 'local': + # TODO: support all platforms + print_warning('platform {0} clean up not supported yet.'.format(platform)) + exit(0) + # clean local data + local_base_dir = experiments_config.experiments[experiment_id]['logDir'] + if not local_base_dir: + local_base_dir = NNI_HOME_DIR + local_experiment_dir = os.path.join(local_base_dir, experiment_id) + experiment_folder_name_list = ['checkpoint', 'db', 'log', 'trials'] + for folder_name in experiment_folder_name_list: + local_clean(os.path.join(local_experiment_dir, folder_name)) + if not os.listdir(local_experiment_dir): + local_clean(local_experiment_dir) + print_normal('removing metadata of experiment {0}'.format(experiment_id)) + experiments_config.remove_experiment(experiment_id) + print_normal('Done.') + +def get_platform_dir(config_content): + '''get the dir list to be deleted''' + platform = config_content.get('trainingServicePlatform') + dir_list = [] + if platform == 'remote': + machine_list = config_content.get('machineList') + for machine in machine_list: + host = machine.get('ip') + port = machine.get('port') + dir_list.append(host + ':' + str(port) + '/tmp/nni') + elif platform == 'pai': + host = config_content.get('paiConfig').get('host') + user_name = config_content.get('paiConfig').get('userName') + output_dir = config_content.get('trial').get('outputDir') + dir_list.append('server: {0}, path: {1}/nni'.format(host, user_name)) + if output_dir: + dir_list.append(output_dir) + return dir_list + +def platform_clean(args): + '''clean up the experiment data''' + config_path = os.path.abspath(args.config) + if not os.path.exists(config_path): + print_error('Please set correct config path.') + exit(1) + config_content = get_yml_content(config_path) + platform = config_content.get('trainingServicePlatform') + if platform == 'local': + print_normal('it doesn’t need to clean local platform.') + exit(0) + if platform not in ['remote', 'pai']: + print_normal('platform {0} not supported.'.format(platform)) + exit(0) + update_experiment() + dir_list = get_platform_dir(config_content) + if not dir_list: + print_normal('No folder of NNI caches is found.') + exit(1) + while True: + print_normal('This command will remove below folders of NNI caches. If other users are using experiments' \ + ' on below hosts, it will be broken.') + for value in dir_list: + print(' ' + value) + inputs = input('INFO: do you want to continue?[y/N]:') + if not inputs.lower() or inputs.lower() in ['n', 'no']: + exit(0) + elif inputs.lower() not in ['y', 'n', 'yes', 'no']: + print_warning('please input Y or N.') + else: + break + if platform == 'remote': + machine_list = config_content.get('machineList') + remote_clean(machine_list) + print_normal('Done.') + +def experiment_list(args): + '''get the information of all experiments''' + update_experiment() + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + if not experiments_dict: + print_normal('Cannot find experiments.') + exit(1) + experiment_id_list = [] + if args.all: + for key in experiments_dict.keys(): + experiment_id_list.append(key) + else: + for key in experiments_dict.keys(): + if experiments_dict[key]['status'] != 'STOPPED': + experiment_id_list.append(key) + if not experiment_id_list: + print_warning('There is no experiment running...\nYou can use \'nnictl experiment list --all\' to list all experiments.') + experiment_information = "" + for key in experiment_id_list: + experiment_information += EXPERIMENT_DETAIL_FORMAT % ( + key, + experiments_dict[key].get('experimentName', 'N/A'), + experiments_dict[key]['status'], + experiments_dict[key].get('port', 'N/A'), + experiments_dict[key].get('platform'), + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \ + if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'], + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) \ + if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime']) + print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) + return experiment_id_list + +def get_time_interval(time1, time2): + '''get the interval of two times''' + try: + seconds = int((time2 - time1) / 1000) + #convert seconds to day:hour:minute:second + days = seconds / 86400 + seconds %= 86400 + hours = seconds / 3600 + seconds %= 3600 + minutes = seconds / 60 + seconds %= 60 + return '%dd %dh %dm %ds' % (days, hours, minutes, seconds) + except: + return 'N/A' + +def show_experiment_info(): + '''show experiment information in monitor''' + update_experiment() + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + if not experiments_dict: + print('There is no experiment running...') + exit(1) + experiment_id_list = [] + for key in experiments_dict.keys(): + if experiments_dict[key]['status'] != 'STOPPED': + experiment_id_list.append(key) + if not experiment_id_list: + print_warning('There is no experiment running...') + return + for key in experiment_id_list: + print(EXPERIMENT_MONITOR_INFO % ( + key, experiments_dict[key]['status'], experiments_dict[key]['port'], + experiments_dict[key].get('platform'), + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \ + if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'], + get_time_interval(experiments_dict[key]['startTime'], experiments_dict[key]['endTime']))) + print(TRIAL_MONITOR_HEAD) + running, response = check_rest_server_quick(experiments_dict[key]['port']) + if running: + response = rest_get(trial_jobs_url(experiments_dict[key]['port']), REST_TIME_OUT) + if response and check_response(response): + content = json.loads(response.text) + for index, value in enumerate(content): + content[index] = convert_time_stamp_to_date(value) + print(TRIAL_MONITOR_CONTENT % (content[index].get('trialJobId'), content[index].get('startTime'), \ + content[index].get('endTime'), content[index].get('status'))) + print(TRIAL_MONITOR_TAIL) + +def set_monitor(auto_exit, time_interval, port=None, pid=None): + '''set the experiment monitor engine''' + while True: + try: + if sys.platform == 'win32': + os.system('cls') + else: + os.system('clear') + update_experiment() + show_experiment_info() + if auto_exit: + status = get_experiment_status(port) + if status in ['DONE', 'ERROR', 'STOPPED']: + print_normal('Experiment status is {0}.'.format(status)) + print_normal('Stopping experiment...') + kill_command(pid) + print_normal('Stop experiment success.') + exit(0) + time.sleep(time_interval) + except KeyboardInterrupt: + if auto_exit: + print_normal('Stopping experiment...') + kill_command(pid) + print_normal('Stop experiment success.') + else: + print_normal('Exiting...') + exit(0) + except Exception as exception: + print_error(exception) + exit(1) + +def monitor_experiment(args): + '''monitor the experiment''' + if args.time <= 0: + print_error('please input a positive integer as time interval, the unit is second.') + exit(1) + set_monitor(False, args.time) + +def export_trials_data(args): + '''export experiment metadata and intermediate results to json or csv + ''' + def groupby_trial_id(intermediate_results): + sorted(intermediate_results, key=lambda x: x['timestamp']) + groupby = dict() + for content in intermediate_results: + groupby.setdefault(content['trialJobId'], []).append(json.loads(content['data'])) + return groupby + + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_id = get_config_filename(args) + rest_port = experiments_dict.get(experiment_id).get('port') + rest_pid = experiments_dict.get(experiment_id).get('pid') + + if not detect_process(rest_pid): + print_error('Experiment is not running...') + return + running, response = check_rest_server_quick(rest_port) + if not running: + print_error('Restful server is not running') + return + response = rest_get(export_data_url(rest_port), 20) + if response is not None and check_response(response): + content = json.loads(response.text) + if args.intermediate: + intermediate_results_response = rest_get(metric_data_url(rest_port), REST_TIME_OUT) + if not intermediate_results_response or not check_response(intermediate_results_response): + print_error('Error getting intermediate results.') + return + intermediate_results = groupby_trial_id(json.loads(intermediate_results_response.text)) + for record in content: + record['intermediate'] = intermediate_results[record['trialJobId']] + if args.type == 'json': + with open(args.path, 'w') as file: + file.write(json.dumps(content)) + elif args.type == 'csv': + trial_records = [] + for record in content: + formated_record = dict() + if args.intermediate: + formated_record['intermediate'] = '[' + ','.join(record['intermediate']) + ']' + record_value = json.loads(record['value']) + if not isinstance(record_value, (float, int)): + formated_record.update({**record['parameter'], **record_value, **{'trialJobId': record['trialJobId']}}) + else: + formated_record.update({**record['parameter'], **{'reward': record_value, 'trialJobId': record['trialJobId']}}) + trial_records.append(formated_record) + if not trial_records: + print_error('No trial results collected! Please check your trial log...') + exit(0) + with open(args.path, 'w', newline='') as file: + writer = csv.DictWriter(file, set.union(*[set(r.keys()) for r in trial_records])) + writer.writeheader() + writer.writerows(trial_records) + else: + print_error('Unknown type: %s' % args.type) + return + else: + print_error('Export failed...') + +def search_space_auto_gen(args): + '''dry run trial code to generate search space file''' + trial_dir = os.path.expanduser(args.trial_dir) + file_path = os.path.expanduser(args.file) + if not os.path.isabs(file_path): + file_path = os.path.join(os.getcwd(), file_path) + assert os.path.exists(trial_dir) + if os.path.exists(file_path): + print_warning('%s already exists, will be overwritten.' % file_path) + print_normal('Dry run to generate search space...') + Popen(args.trial_command, cwd=trial_dir, env=dict(os.environ, NNI_GEN_SEARCH_SPACE=file_path), shell=True).wait() + if not os.path.exists(file_path): + print_warning('Expected search space file \'{}\' generated, but not found.'.format(file_path)) + else: + print_normal('Generate search space done: \'{}\'.'.format(file_path)) + +def save_experiment(args): + '''save experiment data to a zip file''' + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + if args.id is None: + print_error('Please set experiment id.') + exit(1) + if args.id not in experiments_dict: + print_error('Cannot find experiment {0}.'.format(args.id)) + exit(1) + if experiments_dict[args.id].get('status') != 'STOPPED': + print_error('Can only save stopped experiment!') + exit(1) + print_normal('Saving...') + experiment_config = Config(args.id, experiments_dict[args.id]['logDir']).get_config() + logDir = os.path.join(experiments_dict[args.id]['logDir'], args.id) + temp_root_dir = generate_temp_dir() + + # Step1. Copy logDir to temp folder + if not os.path.exists(logDir): + print_error('logDir: %s does not exist!' % logDir) + exit(1) + temp_experiment_dir = os.path.join(temp_root_dir, 'experiment') + shutil.copytree(logDir, temp_experiment_dir) + + # Step2. Copy nnictl metadata to temp folder + temp_nnictl_dir = os.path.join(temp_root_dir, 'nnictl') + os.makedirs(temp_nnictl_dir, exist_ok=True) + try: + with open(os.path.join(temp_nnictl_dir, '.experiment'), 'w') as file: + experiments_dict[args.id]['id'] = args.id + json.dump(experiments_dict[args.id], file) + except IOError: + print_error('Write file to %s failed!' % os.path.join(temp_nnictl_dir, '.experiment')) + exit(1) + nnictl_log_dir = os.path.join(NNI_HOME_DIR, args.id, 'log') + shutil.copytree(nnictl_log_dir, os.path.join(temp_nnictl_dir, args.id, 'log')) + + # Step3. Copy code dir + if args.saveCodeDir: + temp_code_dir = os.path.join(temp_root_dir, 'code') + shutil.copytree(experiment_config['trial']['codeDir'], temp_code_dir) + + # Step4. Copy searchSpace file + search_space_path = experiment_config.get('searchSpacePath') + if search_space_path: + if not os.path.exists(search_space_path): + print_warning('search space %s does not exist!' % search_space_path) + else: + temp_search_space_dir = os.path.join(temp_root_dir, 'searchSpace') + os.makedirs(temp_search_space_dir, exist_ok=True) + search_space_name = os.path.basename(search_space_path) + shutil.copyfile(search_space_path, os.path.join(temp_search_space_dir, search_space_name)) + + # Step5. Archive folder + zip_package_name = 'nni_experiment_%s' % args.id + if args.path: + os.makedirs(args.path, exist_ok=True) + zip_package_name = os.path.join(args.path, zip_package_name) + shutil.make_archive(zip_package_name, 'zip', temp_root_dir) + print_normal('Save to %s.zip success!' % zip_package_name) + + # Step5. Cleanup temp data + shutil.rmtree(temp_root_dir) + +def load_experiment(args): + '''load experiment data''' + package_path = os.path.expanduser(args.path) + if not os.path.exists(args.path): + print_error('file path %s does not exist!' % args.path) + exit(1) + if args.searchSpacePath and os.path.isdir(args.searchSpacePath): + print_error('search space path should be a full path with filename, not a directory!') + exit(1) + temp_root_dir = generate_temp_dir() + shutil.unpack_archive(package_path, temp_root_dir) + print_normal('Loading...') + # Step1. Validation + if not os.path.exists(args.codeDir): + print_error('Invalid: codeDir path does not exist!') + exit(1) + if args.logDir: + if not os.path.exists(args.logDir): + print_error('Invalid: logDir path does not exist!') + exit(1) + experiment_temp_dir = os.path.join(temp_root_dir, 'experiment') + if not os.path.exists(os.path.join(experiment_temp_dir, 'db')): + print_error('Invalid archive file: db file does not exist!') + shutil.rmtree(temp_root_dir) + exit(1) + nnictl_temp_dir = os.path.join(temp_root_dir, 'nnictl') + if not os.path.exists(os.path.join(nnictl_temp_dir, '.experiment')): + print_error('Invalid archive file: nnictl metadata file does not exist!') + shutil.rmtree(temp_root_dir) + exit(1) + try: + with open(os.path.join(nnictl_temp_dir, '.experiment'), 'r') as file: + experiment_metadata = json.load(file) + except ValueError as err: + print_error('Invalid nnictl metadata file: %s' % err) + shutil.rmtree(temp_root_dir) + exit(1) + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + experiment_id = experiment_metadata.get('id') + if experiment_id in experiments_dict: + print_error('Invalid: experiment id already exist!') + shutil.rmtree(temp_root_dir) + exit(1) + if not os.path.exists(os.path.join(nnictl_temp_dir, experiment_id)): + print_error('Invalid: experiment metadata does not exist!') + shutil.rmtree(temp_root_dir) + exit(1) + + # Step2. Copy nnictl metadata + src_path = os.path.join(nnictl_temp_dir, experiment_id) + dest_path = os.path.join(NNI_HOME_DIR, experiment_id) + if os.path.exists(dest_path): + shutil.rmtree(dest_path) + shutil.copytree(src_path, dest_path) + + # Step3. Copy experiment data + os.rename(os.path.join(temp_root_dir, 'experiment'), os.path.join(temp_root_dir, experiment_id)) + src_path = os.path.join(os.path.join(temp_root_dir, experiment_id)) + experiment_config = Config(experiment_id, temp_root_dir).get_config() + if args.logDir: + logDir = args.logDir + experiment_config['logDir'] = logDir + else: + if experiment_config.get('logDir'): + logDir = experiment_config['logDir'] + else: + logDir = NNI_HOME_DIR + + dest_path = os.path.join(logDir, experiment_id) + if os.path.exists(dest_path): + shutil.rmtree(dest_path) + shutil.copytree(src_path, dest_path) + + # Step4. Copy code dir + codeDir = os.path.expanduser(args.codeDir) + if not os.path.isabs(codeDir): + codeDir = os.path.join(os.getcwd(), codeDir) + print_normal('Expand codeDir to %s' % codeDir) + experiment_config['trial']['codeDir'] = codeDir + archive_code_dir = os.path.join(temp_root_dir, 'code') + if os.path.exists(archive_code_dir): + file_list = os.listdir(archive_code_dir) + for file_name in file_list: + src_path = os.path.join(archive_code_dir, file_name) + target_path = os.path.join(codeDir, file_name) + if os.path.exists(target_path): + print_error('Copy %s failed, %s exist!' % (file_name, target_path)) + continue + if os.path.isdir(src_path): + shutil.copytree(src_path, target_path) + else: + shutil.copy(src_path, target_path) + + # Step5. Create experiment metadata + experiments_config.add_experiment(experiment_id, + experiment_metadata.get('port'), + experiment_metadata.get('startTime'), + experiment_metadata.get('platform'), + experiment_metadata.get('experimentName'), + experiment_metadata.get('endTime'), + experiment_metadata.get('status'), + experiment_metadata.get('tag'), + experiment_metadata.get('pid'), + experiment_metadata.get('webUrl'), + logDir) + print_normal('Load experiment %s succsss!' % experiment_id) + + # Step6. Cleanup temp data + shutil.rmtree(temp_root_dir) diff --git a/nni/tools/nnictl/rest_utils.py b/nni/tools/nnictl/rest_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..74a836bdae8bc5f2c9fa7ef0dfaf2a5e08d5a3b5 --- /dev/null +++ b/nni/tools/nnictl/rest_utils.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import time +import requests +from .url_utils import check_status_url +from .constants import REST_TIME_OUT +from .common_utils import print_error + +def rest_put(url, data, timeout, show_error=False): + '''Call rest put method''' + try: + response = requests.put(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ + data=data, timeout=timeout) + return response + except requests.exceptions.Timeout: + print_error("Connect %s timeout." % url) + return None + except Exception as exception: + if show_error: + print_error(exception) + return None + +def rest_post(url, data, timeout, show_error=False): + '''Call rest post method''' + try: + response = requests.post(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ + data=data, timeout=timeout) + return response + except requests.exceptions.Timeout: + print_error("Connect %s timeout." % url) + return None + except Exception as exception: + if show_error: + print_error(exception) + return None + +def rest_get(url, timeout, show_error=False): + '''Call rest get method''' + try: + response = requests.get(url, timeout=timeout) + return response + except requests.exceptions.Timeout: + print_error("Connect %s timeout." % url) + return None + except Exception as exception: + if show_error: + print_error(exception) + return None + +def rest_delete(url, timeout, show_error=False): + '''Call rest delete method''' + try: + response = requests.delete(url, timeout=timeout) + return response + except requests.exceptions.Timeout: + print_error("Connect %s timeout." % url) + return None + except Exception as exception: + if show_error: + print_error(exception) + return None + +def check_rest_server(rest_port): + '''Check if restful server is ready''' + retry_count = 20 + for _ in range(retry_count): + response = rest_get(check_status_url(rest_port), REST_TIME_OUT) + if response: + if response.status_code == 200: + return True, response + else: + return False, response + else: + time.sleep(1) + return False, response + +def check_rest_server_quick(rest_port): + '''Check if restful server is ready, only check once''' + response = rest_get(check_status_url(rest_port), 5) + if response and response.status_code == 200: + return True, response + return False, None + +def check_response(response): + '''Check if a response is success according to status_code''' + if response and response.status_code == 200: + return True + return False diff --git a/nni/tools/nnictl/ssh_utils.py b/nni/tools/nnictl/ssh_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e3f26a8e24c1be67bf86cfa0192481b245c93f97 --- /dev/null +++ b/nni/tools/nnictl/ssh_utils.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +from .common_utils import print_error +from .command_utils import install_package_command + +def check_environment(): + '''check if paramiko is installed''' + try: + import paramiko + except: + install_package_command('paramiko') + import paramiko + return paramiko + +def copy_remote_directory_to_local(sftp, remote_path, local_path): + '''copy remote directory to local machine''' + try: + os.makedirs(local_path, exist_ok=True) + files = sftp.listdir(remote_path) + for file in files: + remote_full_path = os.path.join(remote_path, file) + local_full_path = os.path.join(local_path, file) + try: + if sftp.listdir(remote_full_path): + copy_remote_directory_to_local(sftp, remote_full_path, local_full_path) + except: + sftp.get(remote_full_path, local_full_path) + except Exception: + pass + +def create_ssh_sftp_client(host_ip, port, username, password, ssh_key_path, passphrase): + '''create ssh client''' + try: + paramiko = check_environment() + conn = paramiko.Transport(host_ip, port) + if ssh_key_path is not None: + ssh_key = paramiko.RSAKey.from_private_key_file(ssh_key_path, password=passphrase) + conn.connect(username=username, pkey=ssh_key) + else: + conn.connect(username=username, password=password) + sftp = paramiko.SFTPClient.from_transport(conn) + return sftp + except Exception as exception: + print_error('Create ssh client error %s\n' % exception) + +def remove_remote_directory(sftp, directory): + '''remove a directory in remote machine''' + try: + files = sftp.listdir(directory) + for file in files: + filepath = '/'.join([directory, file]) + try: + sftp.remove(filepath) + except IOError: + remove_remote_directory(sftp, filepath) + sftp.rmdir(directory) + except IOError as err: + print_error(err) diff --git a/nni/tools/nnictl/ts_management.py b/nni/tools/nnictl/ts_management.py new file mode 100644 index 0000000000000000000000000000000000000000..4dffe21ebf744cdac9a0c0aa770666e978fdfe43 --- /dev/null +++ b/nni/tools/nnictl/ts_management.py @@ -0,0 +1,78 @@ +import importlib +import json + +from nni.runtime.config import get_config_file +from .common_utils import print_error, print_green + +_builtin_training_services = [ + 'local', + 'remote', + 'openpai', 'pai', + 'aml', + 'dlc', + 'kubeflow', + 'frameworkcontroller', + 'adl', +] + +def register(args): + if args.package in _builtin_training_services: + print_error(f'{args.package} is a builtin training service') + return + + try: + module = importlib.import_module(args.package) + except Exception: + print_error(f'Cannot import package {args.package}') + return + + try: + info = module.nni_training_service_info + except Exception: + print_error(f'Cannot read nni_training_service_info from {args.package}') + return + + try: + info.config_class() + except Exception: + print_error('Bad experiment config class') + return + + try: + service_config = { + 'nodeModulePath': str(info.node_module_path), + 'nodeClassName': info.node_class_name, + } + json.dumps(service_config) + except Exception: + print_error('Bad node_module_path or bad node_class_name') + return + + config = _load() + update = args.package in config + + config[args.package] = service_config + _save(config) + + if update: + print_green(f'Sucessfully updated {args.package}') + else: + print_green(f'Sucessfully registered {args.package}') + +def unregister(args): + config = _load() + if args.package not in config: + print_error(f'{args.package} is not a registered training service') + return + config.pop(args.package, None) + _save(config) + print_green(f'Sucessfully unregistered {args.package}') + +def list_services(_): + print('\n'.join(_load().keys())) + +def _load(): + return json.load(get_config_file('training_services.json').open()) + +def _save(config): + json.dump(config, get_config_file('training_services.json').open('w'), indent=4) diff --git a/nni/tools/nnictl/updater.py b/nni/tools/nnictl/updater.py new file mode 100644 index 0000000000000000000000000000000000000000..aec6c0c3d64703d9d30dc671e9e6b0f90ff7e6f1 --- /dev/null +++ b/nni/tools/nnictl/updater.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import os +from .rest_utils import rest_put, rest_post, rest_get, check_rest_server_quick, check_response +from .url_utils import experiment_url, import_data_url +from .config_utils import Config, Experiments +from .common_utils import get_json_content, print_normal, print_error, print_warning +from .nnictl_utils import get_experiment_port, get_config_filename, detect_process +from .launcher_utils import parse_time +from .constants import REST_TIME_OUT, TUNERS_SUPPORTING_IMPORT_DATA, TUNERS_NO_NEED_TO_IMPORT_DATA + +def validate_digit(value, start, end): + '''validate if a digit is valid''' + if not str(value).isdigit() or int(value) < start or int(value) > end: + raise ValueError('value (%s) must be a digit from %s to %s' % (value, start, end)) + +def validate_file(path): + '''validate if a file exist''' + if not os.path.exists(path): + raise FileNotFoundError('%s is not a valid file path' % path) + +def validate_dispatcher(args): + '''validate if the dispatcher of the experiment supports importing data''' + experiment_id = get_config_filename(args) + experiment_config = Config(experiment_id, Experiments().get_all_experiments()[experiment_id]['logDir']).get_config() + if experiment_config.get('tuner') and experiment_config['tuner'].get('builtinTunerName'): + dispatcher_name = experiment_config['tuner']['builtinTunerName'] + elif experiment_config.get('advisor') and experiment_config['advisor'].get('builtinAdvisorName'): + dispatcher_name = experiment_config['advisor']['builtinAdvisorName'] + else: # otherwise it should be a customized one + return + if dispatcher_name not in TUNERS_SUPPORTING_IMPORT_DATA: + if dispatcher_name in TUNERS_NO_NEED_TO_IMPORT_DATA: + print_warning("There is no need to import data for %s" % dispatcher_name) + exit(0) + else: + print_error("%s does not support importing addtional data" % dispatcher_name) + exit(1) + +def load_search_space(path): + '''load search space content''' + content = get_json_content(path) + if not content: + raise ValueError('searchSpace file should not be empty') + return content + +def get_query_type(key): + '''get update query type''' + if key == 'trialConcurrency': + return '?update_type=TRIAL_CONCURRENCY' + if key == 'maxExecDuration': + return '?update_type=MAX_EXEC_DURATION' + if key == 'searchSpace': + return '?update_type=SEARCH_SPACE' + if key == 'maxTrialNum': + return '?update_type=MAX_TRIAL_NUM' + +def update_experiment_profile(args, key, value): + '''call restful server to update experiment profile''' + experiments_config = Experiments() + experiments_dict = experiments_config.get_all_experiments() + rest_port = experiments_dict.get(get_config_filename(args)).get('port') + running, _ = check_rest_server_quick(rest_port) + if running: + response = rest_get(experiment_url(rest_port), REST_TIME_OUT) + if response and check_response(response): + experiment_profile = json.loads(response.text) + experiment_profile['params'][key] = value + response = rest_put(experiment_url(rest_port)+get_query_type(key), json.dumps(experiment_profile), REST_TIME_OUT) + if response and check_response(response): + return response + else: + print_error('Restful server is not running...') + return None + +def update_searchspace(args): + validate_file(args.filename) + content = load_search_space(args.filename) + args.port = get_experiment_port(args) + if args.port is not None: + if update_experiment_profile(args, 'searchSpace', content): + print_normal('Update %s success!' % 'searchSpace') + else: + print_error('Update %s failed!' % 'searchSpace') + + +def update_concurrency(args): + validate_digit(args.value, 1, 1000) + args.port = get_experiment_port(args) + if args.port is not None: + if update_experiment_profile(args, 'trialConcurrency', int(args.value)): + print_normal('Update %s success!' % 'concurrency') + else: + print_error('Update %s failed!' % 'concurrency') + +def update_duration(args): + #parse time, change time unit to seconds + args.value = parse_time(args.value) + args.port = get_experiment_port(args) + if args.port is not None: + if update_experiment_profile(args, 'maxExecDuration', int(args.value)): + print_normal('Update %s success!' % 'duration') + else: + print_error('Update %s failed!' % 'duration') + +def update_trialnum(args): + validate_digit(args.value, 1, 999999999) + if update_experiment_profile(args, 'maxTrialNum', int(args.value)): + print_normal('Update %s success!' % 'trialnum') + else: + print_error('Update %s failed!' % 'trialnum') + +def load_imported_data(path): + '''load the trial data that will be imported''' + content = json.dumps(get_json_content(path)) + if not content: + raise ValueError('Imported data should not be empty') + return content + +def import_data(args): + '''import additional data to the experiment''' + validate_file(args.filename) + validate_dispatcher(args) + content = load_imported_data(args.filename) + + experiments_dict = Experiments().get_all_experiments() + experiment_id = get_config_filename(args) + rest_port = experiments_dict.get(experiment_id).get('port') + rest_pid = experiments_dict.get(experiment_id).get('pid') + if not detect_process(rest_pid): + print_error('Experiment is not running...') + return + running, _ = check_rest_server_quick(rest_port) + if not running: + print_error('Restful server is not running') + return + + args.port = rest_port + if args.port is not None: + if import_data_to_restful_server(args, content): + pass + else: + print_error('Import data failed!') + +def import_data_to_restful_server(args, content): + '''call restful server to import data to the experiment''' + experiments_dict = Experiments().get_all_experiments() + rest_port = experiments_dict.get(get_config_filename(args)).get('port') + running, _ = check_rest_server_quick(rest_port) + if running: + response = rest_post(import_data_url(rest_port), content, REST_TIME_OUT) + if response and check_response(response): + return response + else: + print_error('Restful server is not running...') + return None diff --git a/nni/tools/nnictl/url_utils.py b/nni/tools/nnictl/url_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f1b807d2d5784e95d2e38ad4b390a1b4306bbe --- /dev/null +++ b/nni/tools/nnictl/url_utils.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import socket +import psutil + +BASE_URL = 'http://localhost' + +API_ROOT_URL = '/api/v1/nni' + +EXPERIMENT_API = '/experiment' + +CLUSTER_METADATA_API = '/experiment/cluster-metadata' + +IMPORT_DATA_API = '/experiment/import-data' + +CHECK_STATUS_API = '/check-status' + +TRIAL_JOBS_API = '/trial-jobs' + +EXPORT_DATA_API = '/export-data' + +TENSORBOARD_API = '/tensorboard' + +METRIC_DATA_API = '/metric-data' + +def format_url_path(path): + return API_ROOT_URL if path is None else f'/{path}{API_ROOT_URL}' + +def set_prefix_url(prefix_path): + global API_ROOT_URL + API_ROOT_URL = format_url_path(prefix_path) + +def metric_data_url(port): + '''get metric_data url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, METRIC_DATA_API) + +def check_status_url(port): + '''get check_status url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, CHECK_STATUS_API) + + +def cluster_metadata_url(port): + '''get cluster_metadata_url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, CLUSTER_METADATA_API) + + +def import_data_url(port): + '''get import_data_url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, IMPORT_DATA_API) + + +def experiment_url(port): + '''get experiment_url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, EXPERIMENT_API) + + +def trial_jobs_url(port): + '''get trial_jobs url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, TRIAL_JOBS_API) + + +def trial_job_id_url(port, job_id): + '''get trial_jobs with id url''' + return '{0}:{1}{2}{3}/{4}'.format(BASE_URL, port, API_ROOT_URL, TRIAL_JOBS_API, job_id) + + +def export_data_url(port): + '''get export_data url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, EXPORT_DATA_API) + + +def tensorboard_url(port): + '''get tensorboard url''' + return '{0}:{1}{2}{3}'.format(BASE_URL, port, API_ROOT_URL, TENSORBOARD_API) + + +def get_local_urls(port,prefix): + '''get urls of local machine''' + url_list = [] + for _, info in psutil.net_if_addrs().items(): + for addr in info: + if socket.AddressFamily.AF_INET == addr.family: + url_list.append('http://{0}:{1}{2}'.format(addr.address, port, prefix)) + return url_list diff --git a/nni/tools/package_utils/__init__.py b/nni/tools/package_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..955ba7b74fd7e0f4a6767823350b1555b9c06560 --- /dev/null +++ b/nni/tools/package_utils/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .common import * +from .config_manager import * +from .tuner_factory import * diff --git a/nni/tools/package_utils/common.py b/nni/tools/package_utils/common.py new file mode 100644 index 0000000000000000000000000000000000000000..f75978147ba8329c9c327e0bebf1b2e06585a2f9 --- /dev/null +++ b/nni/tools/package_utils/common.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +__all__ = ['AlgoMeta'] + +from typing import Dict, NamedTuple, Optional + +class AlgoMeta(NamedTuple): + name: str + class_name: Optional[str] + accept_class_args: bool + class_args: Optional[dict] + validator_class_name: Optional[str] + algo_type: str # 'tuner' | 'assessor' | 'advisor' + is_builtin: bool + nni_version: Optional[str] + + @staticmethod + def load(meta: Dict, algo_type: Optional[str] = None) -> 'AlgoMeta': + if algo_type is None: + algo_type = meta['algoType'] + return AlgoMeta( + name=meta['builtinName'], + class_name=meta['className'], + accept_class_args=meta.get('acceptClassArgs', True), + class_args=meta.get('classArgs'), + validator_class_name=meta.get('classArgsValidator'), + algo_type=algo_type, + is_builtin=(meta.get('source') == 'nni'), + nni_version=meta.get('nniVersion') + ) + + def dump(self) -> Dict: + ret = {} + ret['builtinName'] = self.name + ret['className'] = self.class_name + if not self.accept_class_args: + ret['acceptClassArgs'] = False + if self.class_args is not None: + ret['classArgs'] = self.class_args + if self.validator_class_name is not None: + ret['classArgsValidator'] = self.validator_class_name + ret['source'] = 'nni' if self.is_builtin else 'user' + if self.nni_version is not None: + ret['nniVersion'] = self.nni_version + return ret diff --git a/nni/tools/package_utils/config_manager.py b/nni/tools/package_utils/config_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..fa4c89c93ea801e8cc06c37dcf9389d54cb17eeb --- /dev/null +++ b/nni/tools/package_utils/config_manager.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +__all__ = [ + 'get_algo_meta', + 'get_all_algo_meta', + 'register_algo_meta', + 'unregister_algo_meta', +] + +from collections import defaultdict +from typing import List, Optional + +import yaml + +from nni.runtime.config import get_builtin_config_file, get_config_file +from .common import AlgoMeta + +def get_algo_meta(name: AlgoMeta) -> Optional[AlgoMeta]: + """ + Get meta information of a built-in or registered algorithm. + Return None if not found. + """ + for algo in get_all_algo_meta(): + if algo.name == name: + return algo + return None + +def get_all_algo_meta() -> List[AlgoMeta]: + """ + Get meta information of all built-in and registered algorithms. + """ + return _load_builtin_config() + _load_custom_config() + +def register_algo_meta(algo_meta: AlgoMeta) -> None: + """ + Register a custom algorithm. + If it already exists, overwrite it. + """ + algos = {algo.name: algo for algo in _load_custom_config()} + algos[algo_meta.name] = algo_meta + _save_custom_config(algos.values()) + +def unregister_algo_meta(algo_name: str) -> None: + """ + Unregister a custom algorithm. + If it does not exist, do nothing. + """ + algos = [algo for algo in _load_custom_config() if algo.name != algo_name] + _save_custom_config(algos) + +def _load_builtin_config(): + path = get_builtin_config_file('builtin_algorithms.yml') + return _load_config_file(path) + +def _load_custom_config(): + path = get_config_file('registered_algorithms.yml') + # for backward compatibility, NNI v2.5- stores all algorithms in this file + return [algo for algo in _load_config_file(path) if not algo.is_builtin] + +def _load_config_file(path): + with open(path) as f: + config = yaml.safe_load(f) + algos = [] + for algo_type in ['tuner', 'assessor', 'advisor']: + for algo in config.get(algo_type + 's', []): + algos.append(AlgoMeta.load(algo, algo_type)) + return algos + +def _save_custom_config(custom_algos): + config = defaultdict(list) + for algo in custom_algos: + config[algo.algo_type + 's'].append(algo.dump()) + text = yaml.dump(dict(config), default_flow_style=False) + get_config_file('registered_algorithms.yml').write_text(text) diff --git a/nni/tools/package_utils/tuner_factory.py b/nni/tools/package_utils/tuner_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..eea02b8432229d1df26c41b0c89ad7b4cd55ef09 --- /dev/null +++ b/nni/tools/package_utils/tuner_factory.py @@ -0,0 +1,198 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +__all__ = [ + 'create_builtin_class_instance', + 'create_customized_class_instance', +] + +import importlib +import os +import sys + +from . import config_manager + +ALGO_TYPES = ['tuners', 'assessors', 'advisors'] + +def get_all_builtin_names(algo_type): + """Get all builtin names of registered algorithms of specified type + + Parameters + ---------- + algo_type: str + can be one of 'tuners', 'assessors' or 'advisors' + + Returns: list of string + ------- + All builtin names of specified type, for example, if algo_type is 'tuners', returns + all builtin tuner names. + """ + algos = config_manager.get_all_algo_meta() + return [meta.name for meta in algos if meta.algo_type == algo_type.rstrip('s')] + +def get_registered_algo_meta(builtin_name, algo_type=None): + """ Get meta information of registered algorithms. + + Parameters + ---------- + builtin_name: str + builtin name. + algo_type: str | None + can be one of 'tuners', 'assessors', 'advisors' or None + + Returns: dict | None + ------- + Returns meta information of speicified builtin alogorithms, for example: + { + 'classArgsValidator': 'nni.smac_tuner.SMACClassArgsValidator', + 'className': 'nni.smac_tuner.SMACTuner', + 'builtinName': 'SMAC' + } + """ + algo = config_manager.get_algo_meta(builtin_name) + if algo is None: + return None + if algo_type is not None and algo.algo_type != algo_type.rstrip('s'): + return None + return algo.dump() + +def parse_full_class_name(full_class_name): + if not full_class_name: + return None, None + parts = full_class_name.split('.') + module_name, class_name = '.'.join(parts[:-1]), parts[-1] + return module_name, class_name + +def get_builtin_module_class_name(algo_type, builtin_name): + """Get module name and class name of all builtin algorithms + + Parameters + ---------- + algo_type: str + can be one of 'tuners', 'assessors', 'advisors' + builtin_name: str + builtin name. + + Returns: tuple + ------- + tuple of (module name, class name) + """ + assert algo_type in ALGO_TYPES + assert builtin_name is not None + meta = get_registered_algo_meta(builtin_name, algo_type) + if not meta: + return None, None + return parse_full_class_name(meta['className']) + +def create_validator_instance(algo_type, builtin_name): + """Create instance of validator class + + Parameters + ---------- + algo_type: str + can be one of 'tuners', 'assessors', 'advisors' + builtin_name: str + builtin name. + + Returns: object | None + ------- + Returns validator class instance. + If specified validator class does not exist, returns None. + """ + assert algo_type in ALGO_TYPES + assert builtin_name is not None + meta = get_registered_algo_meta(builtin_name, algo_type) + if not meta or 'classArgsValidator' not in meta: + return None + module_name, class_name = parse_full_class_name(meta['classArgsValidator']) + class_module = importlib.import_module(module_name) + class_constructor = getattr(class_module, class_name) + + return class_constructor() + +def create_builtin_class_instance(builtin_name, input_class_args, algo_type): + """Create instance of builtin algorithms + + Parameters + ---------- + builtin_name: str + builtin name. + input_class_args: dict + kwargs for builtin class constructor + algo_type: str + can be one of 'tuners', 'assessors', 'advisors' + + Returns: object + ------- + Returns builtin class instance. + """ + assert algo_type in ALGO_TYPES + if builtin_name not in get_all_builtin_names(algo_type): + raise RuntimeError('Builtin name is not found: {}'.format(builtin_name)) + + def parse_algo_meta(algo_meta, input_class_args): + """ + 1. parse class_name field in meta data into module name and class name, + for example: + parse class_name 'nni.hyperopt_tuner.hyperopt_tuner.HyperoptTuner' in meta data into: + module name: nni.hyperopt_tuner.hyperopt_tuner + class name: HyperoptTuner + 2. merge user specified class args together with builtin class args. + """ + assert algo_meta + module_name, class_name = parse_full_class_name(algo_meta['className']) + + class_args = {} + if 'classArgs' in algo_meta: + class_args = algo_meta['classArgs'] + if input_class_args is not None: + class_args.update(input_class_args) + + return module_name, class_name, class_args + + algo_meta = get_registered_algo_meta(builtin_name, algo_type) + module_name, class_name, class_args = parse_algo_meta(algo_meta, input_class_args) + + if importlib.util.find_spec(module_name) is None: + raise RuntimeError('Builtin module can not be loaded: {}'.format(module_name)) + + class_module = importlib.import_module(module_name) + class_constructor = getattr(class_module, class_name) + + instance = class_constructor(**class_args) + + return instance + +def create_customized_class_instance(class_params): + """Create instance of customized algorithms + + Parameters + ---------- + class_params: dict + class_params should contains following keys: + codeDirectory: code directory + className: qualified class name + classArgs (optional): kwargs pass to class constructor + + Returns: object + ------- + Returns customized class instance. + """ + + code_dir = class_params.get('codeDirectory') + qualified_class_name = class_params.get('className') + class_args = class_params.get('classArgs') + + if code_dir and not os.path.isdir(code_dir): + raise ValueError(f'Directory not found: {code_dir}') + + sys.path.append(code_dir) + module_name, class_name = qualified_class_name.rsplit('.', 1) + class_module = importlib.import_module(module_name) + class_constructor = getattr(class_module, class_name) + + if class_args is None: + class_args = {} + instance = class_constructor(**class_args) + + return instance diff --git a/nni/tools/trial_tool/__init__.py b/nni/tools/trial_tool/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nni/tools/trial_tool/aml_channel.py b/nni/tools/trial_tool/aml_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..c8e1d7484a427b80b657d1c889c3a1cbe4aafecc --- /dev/null +++ b/nni/tools/trial_tool/aml_channel.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from azureml.core.run import Run # pylint: disable=import-error +from .base_channel import BaseChannel +from .log_utils import LogType, nni_log + + +class AMLChannel(BaseChannel): + def __init__(self, args): + self.args = args + self.run = Run.get_context() + super(AMLChannel, self).__init__(args) + self.current_message_index = -1 + + def _inner_open(self): + pass + + def _inner_close(self): + pass + + def _inner_send(self, message): + try: + self.run.log('trial_runner', message.decode('utf8')) + except Exception as exception: + nni_log(LogType.Error, 'meet unhandled exception when send message: %s' % exception) + + def _inner_receive(self): + messages = [] + message_dict = self.run.get_metrics() + if 'nni_manager' not in message_dict: + return [] + message_list = message_dict['nni_manager'] + if not message_list: + return messages + if type(message_list) is list: + if self.current_message_index < len(message_list) - 1: + messages = message_list[self.current_message_index + 1 : len(message_list)] + self.current_message_index = len(message_list) - 1 + elif self.current_message_index == -1: + messages = [message_list] + self.current_message_index += 1 + newMessage = [] + for message in messages: + # receive message is string, to get consistent result, encode it here. + newMessage.append(message.encode('utf8')) + return newMessage diff --git a/nni/tools/trial_tool/base_channel.py b/nni/tools/trial_tool/base_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..b9d3392abc0b15ce7eb90b07df4107902fb65d9d --- /dev/null +++ b/nni/tools/trial_tool/base_channel.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import threading +import time +from abc import ABC, abstractmethod +from queue import Empty, Queue + +from .log_utils import LogType, nni_log +from .commands import CommandType + +INTERVAL_SECONDS = 0.5 + + +class BaseChannel(ABC): + def __init__(self, args): + self.is_keep_parsed = args.node_count > 1 + self.args = args + self.node_id = self.args.node_id + + @abstractmethod + def _inner_send(self, message): + pass + + @abstractmethod + def _inner_receive(self): + return [] + + @abstractmethod + def _inner_open(self): + pass + + @abstractmethod + def _inner_close(self): + pass + + def open(self): + # initialize receive, send threads. + self.is_running = True + self.receive_queue = Queue() + self.receive_thread = threading.Thread(target=self._receive_loop) + self.receive_thread.start() + self.send_queue = Queue() + self.send_thread = threading.Thread(target=self._send_loop) + self.send_thread.start() + + self._inner_open() + + client_info = { + "isReady": True, + "runnerId": self.args.runner_id, + "expId": self.args.exp_id, + } + nni_log(LogType.Info, 'Channel: send ready information %s' % client_info) + self.send(CommandType.Initialized, client_info) + + def close(self): + self.is_running = False + try: + self._inner_close() + except Exception as err: + # ignore any error on closing + print("error on closing channel: %s" % err) + + def send(self, command, data): + """Send command to Training Service. + command: CommandType object. + data: string payload. + the message is sent synchronized. + """ + data["node"] = self.node_id + data = json.dumps(data) + data = data.encode('utf8') + message = b'%b%014d%b' % (command.value, len(data), data) + self.send_queue.put(message) + + def sent(self): + return self.send_queue.qsize() == 0 + + def received(self): + return self.receive_queue.qsize() > 0 + + def receive(self): + """Receive a command from Training Service. + Returns a tuple of command (CommandType) and payload (str) + """ + command = None + data = None + + try: + command_content = self.receive_queue.get(False) + if command_content is not None: + if (len(command_content) < 16): + # invalid header + nni_log(LogType.Error, 'incorrect command is found, command must be greater than 16 bytes!') + return None, None + header = command_content[:16] + command = CommandType(header[:2]) + length = int(header[2:]) + if (len(command_content)-16 != length): + nni_log(LogType.Error, 'incorrect command length, length {}, actual data length is {}, header {}.' + .format(length, len(command_content)-16, header)) + return None, None + data = command_content[16:16+length] + data = json.loads(data.decode('utf8')) + if self.node_id is None: + nni_log(LogType.Info, 'Received command, header: [%s], data: [%s]' % (header, data)) + else: + nni_log(LogType.Info, 'Received command(%s), header: [%s], data: [%s]' % (self.node_id, header, data)) + except Empty: + # do nothing, if no command received. + pass + except Exception as identifier: + nni_log(LogType.Error, 'meet unhandled exception in base_channel: %s' % identifier) + return command, data + + def _fetch_message(self, buffer, has_new_line=False): + messages = [] + while(len(buffer)) >= 16: + header = buffer[:16] + length = int(header[2:]) + + message_length = length+16 + total_length = message_length + if has_new_line: + total_length += 1 + + # break, if buffer is too short. + if len(buffer) < total_length: + break + data = buffer[16:message_length] + if has_new_line and 10 != buffer[total_length-1]: + nni_log(LogType.Error, 'end of message should be \\n, but got {}'.format(self.in_cache[total_length-1])) + buffer = buffer[total_length:] + messages.append(header + data) + + return messages, buffer + + def _receive_loop(self): + while (self.is_running): + messages = self._inner_receive() + if messages is not None: + for message in messages: + self.receive_queue.put(message) + time.sleep(INTERVAL_SECONDS) + + def _send_loop(self): + while (self.is_running): + message = None + try: + # no sleep, since it's a block call with INTERVAL_SECONDS second timeout + message = self.send_queue.get(True, INTERVAL_SECONDS) + except Empty: + # do nothing, if no command received. + pass + if message is not None: + self._inner_send(message) diff --git a/nni/tools/trial_tool/commands.py b/nni/tools/trial_tool/commands.py new file mode 100644 index 0000000000000000000000000000000000000000..86b10a2fe9a85d17bdfb4d9ec57b0b6cceb250da --- /dev/null +++ b/nni/tools/trial_tool/commands.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from enum import Enum + + +class CommandType(Enum): + Initialize = b'IN' + RequestTrialJobs = b'GE' + ReportMetricData = b'ME' + ReportGpuInfo = b'GI' + UpdateSearchSpace = b'SS' + ImportData = b'FD' + AddCustomizedTrialJob = b'AD' + TrialEnd = b'EN' + Terminate = b'TE' + Ping = b'PI' + + Initialized = b'ID' + NewTrialJob = b'TR' + SendTrialJobParameter = b'SP' + NoMoreTrialJobs = b'NO' + KillTrialJob = b'KI' + StdOut = b'SO' + VersionCheck = b'VC' diff --git a/nni/tools/trial_tool/constants.py b/nni/tools/trial_tool/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..bef401337039d46aa23fc1e5816b58978d07e104 --- /dev/null +++ b/nni/tools/trial_tool/constants.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os + +API_ROOT_URL = '/api/v1/nni-pai' + +BASE_URL = 'http://{}' + +LOG_DIR = os.environ['NNI_OUTPUT_DIR'] + +NNI_PLATFORM = os.environ['NNI_PLATFORM'] + +STDOUT_FULL_PATH = os.path.join(LOG_DIR, 'stdout') + +STDERR_FULL_PATH = os.path.join(LOG_DIR, 'stderr') + +STDOUT_API = '/stdout' +VERSION_API = '/version' +PARAMETER_META_API = '/parameter-file-meta' +NNI_SYS_DIR = os.environ['NNI_SYS_DIR'] +NNI_TRIAL_JOB_ID = os.environ['NNI_TRIAL_JOB_ID'] +NNI_EXP_ID = os.environ['NNI_EXP_ID'] +MULTI_PHASE = os.environ['MULTI_PHASE'] diff --git a/nni/tools/trial_tool/file_channel.py b/nni/tools/trial_tool/file_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..9a431d25f7ee3a4eeff973667d345ce2ddada1cb --- /dev/null +++ b/nni/tools/trial_tool/file_channel.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os + +from .base_channel import BaseChannel + +command_path = "./commands" +runner_commands_file_name_prefix = "runner_commands" +manager_commands_file_name = "manager_commands.txt" + + +class FileChannel(BaseChannel): + + def __init__(self, args): + self.node_id = args.node_id + self.out_file = None + self.in_file = None + self.in_offset = 0 + self.in_cache = b"" + + super(FileChannel, self).__init__(args) + + def _inner_open(self): + pass + + def _inner_close(self): + if self.out_file is not None: + self.out_file.close() + self.out_file = None + if self.in_file is not None: + self.in_file.close() + self.in_file = None + + def _inner_send(self, message): + if self.out_file is None: + if not os.path.exists(command_path): + os.makedirs(command_path, exist_ok=True) + + if self.node_id is None: + file_name = os.path.join(command_path, "%s.txt" % runner_commands_file_name_prefix) + else: + file_name = os.path.join(command_path, "%s_%s.txt" % ( + runner_commands_file_name_prefix, self.node_id)) + self.out_file = open(file_name, "ab") + + self.out_file.write(message) + self.out_file.write(b'\n') + self.out_file.flush() + + def _open_manager_command(self): + full_name = os.path.join(command_path, manager_commands_file_name) + + if self.in_file is not None and self.in_file.closed: + self.in_file = None + + if self.in_file is None and os.path.exists(full_name): + self.in_file = open(full_name, "rb") + self.in_file.seek(self.in_offset) + + def _inner_receive(self): + messages = [] + + if self.in_file is None: + self._open_manager_command() + if self.in_file is not None: + self.in_file.seek(0, os.SEEK_END) + new_offset = self.in_file.tell() + self.in_file.seek(self.in_offset, os.SEEK_SET) + count = new_offset - self.in_offset + if count > 0: + self.in_cache += self.in_file.read(count) + self.in_offset = new_offset + messages, self.in_cache = self._fetch_message(self.in_cache, True) + return messages diff --git a/nni/tools/trial_tool/gpu.py b/nni/tools/trial_tool/gpu.py new file mode 100644 index 0000000000000000000000000000000000000000..8d5dffe82e5403a184ff25717df9b475ea977b50 --- /dev/null +++ b/nni/tools/trial_tool/gpu.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import subprocess +import time +import traceback +from xml.dom import minidom + + +def collect_gpu_usage(node_id): + cmd = 'rocm-smi --showuse --showmemuse --showmeminfo vis_vram --showid --json'.split() + info = None + try: + smi_output = subprocess.check_output(cmd) + info = parse_nvidia_smi_result(smi_output) + except Exception: + traceback.print_exc() + info = gen_empty_gpu_metric() + return info + + +def parse_nvidia_smi_result(smi): + try: + output = {} + gpuList = eval(smi) + output["Timestamp"] = time.asctime(time.localtime()) + output["gpuCount"] = len(gpuList) + output["gpuInfos"] = [] + for gpuIndex, gpu in enumerate(gpuList): + gpuInfo = {} + gpuInfo['index'] = gpuIndex + gpuInfo['gpuUtil'] = gpuList[gpu][list(gpuList[gpu].keys())[1]] + "%" + gpuInfo['gpuMemUtil'] = gpuList[gpu][list(gpuList[gpu].keys())[2]] + "%" + runningProNumber = 1 + gpuInfo['activeProcessNum'] = runningProNumber + + gpuInfo['gpuType'] = gpuList[gpu][list(gpuList[gpu].keys())[0]] + gpuInfo['gpuMemTotal'] = round(float(gpuList[gpu][list(gpuList[gpu].keys())[3]])/1048576, 2) + gpuInfo['gpuMemUsed'] = round(float(gpuList[gpu][list(gpuList[gpu].keys())[4]])/1048576, 2) + gpuInfo['gpuMemFree'] = str(gpuInfo['gpuMemTotal'] - gpuInfo['gpuMemUsed']) + gpuInfo['gpuMemTotal'] = str(gpuInfo['gpuMemTotal']) + "MB" + gpuInfo['gpuMemUsed'] = str(gpuInfo['gpuMemUsed']) + "MB" + gpuInfo['gpuMemFree'] = str(gpuInfo['gpuMemFree']) + "MB" + + output["gpuInfos"].append(gpuInfo) + except Exception: + traceback.print_exc() + output = {} + return output + + +def gen_empty_gpu_metric(): + try: + output = {} + output["Timestamp"] = time.asctime(time.localtime()) + output["gpuCount"] = 0 + output["gpuInfos"] = [] + except Exception: + traceback.print_exc() + output = {} + return output diff --git a/nni/tools/trial_tool/hdfsClientUtility.py b/nni/tools/trial_tool/hdfsClientUtility.py new file mode 100644 index 0000000000000000000000000000000000000000..05d4ea0d85675b5c763bf27f29db50b7900e1d03 --- /dev/null +++ b/nni/tools/trial_tool/hdfsClientUtility.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import posixpath +from .log_utils import LogType, nni_log + +def copyHdfsDirectoryToLocal(hdfsDirectory, localDirectory, hdfsClient): + '''Copy directory from HDFS to local''' + if not os.path.exists(localDirectory): + os.makedirs(localDirectory) + try: + listing = hdfsClient.list_status(hdfsDirectory) + except Exception as exception: + nni_log(LogType.Error, 'List hdfs directory {0} error: {1}'.format(hdfsDirectory, str(exception))) + raise exception + + for f in listing: + if f.type == 'DIRECTORY': + subHdfsDirectory = posixpath.join(hdfsDirectory, f.pathSuffix) + subLocalDirectory = os.path.join(localDirectory, f.pathSuffix) + copyHdfsDirectoryToLocal(subHdfsDirectory, subLocalDirectory, hdfsClient) + elif f.type == 'FILE': + hdfsFilePath = posixpath.join(hdfsDirectory, f.pathSuffix) + localFilePath = os.path.join(localDirectory, f.pathSuffix) + copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient) + else: + raise AssertionError('unexpected type {}'.format(f.type)) + +def copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient, override=True): + '''Copy file from HDFS to local''' + if not hdfsClient.exists(hdfsFilePath): + raise Exception('HDFS file {} does not exist!'.format(hdfsFilePath)) + try: + file_status = hdfsClient.get_file_status(hdfsFilePath) + if file_status.type != 'FILE': + raise Exception('HDFS file path {} is not a file'.format(hdfsFilePath)) + except Exception as exception: + nni_log(LogType.Error, 'Get hdfs file {0} status error: {1}'.format(hdfsFilePath, str(exception))) + raise exception + + if os.path.exists(localFilePath) and override: + os.remove(localFilePath) + try: + hdfsClient.copy_to_local(hdfsFilePath, localFilePath) + except Exception as exception: + nni_log(LogType.Error, 'Copy hdfs file {0} to {1} error: {2}'.format(hdfsFilePath, localFilePath, str(exception))) + raise exception + nni_log(LogType.Info, 'Successfully copied hdfs file {0} to {1}, {2} bytes'.format(hdfsFilePath, localFilePath, file_status.length)) + +def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient): + '''Copy directory from local to HDFS''' + if not os.path.exists(localDirectory): + raise Exception('Local Directory does not exist!') + hdfsClient.mkdirs(hdfsDirectory) + result = True + for file in os.listdir(localDirectory): + file_path = os.path.join(localDirectory, file) + if os.path.isdir(file_path): + hdfs_directory = os.path.join(hdfsDirectory, file) + try: + result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient) + except Exception as exception: + nni_log(LogType.Error, + 'Copy local directory {0} to hdfs directory {1} error: {2}'.format(file_path, hdfs_directory, str(exception))) + result = False + else: + hdfs_file_path = os.path.join(hdfsDirectory, file) + try: + result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient) + except Exception as exception: + nni_log(LogType.Error, 'Copy local file {0} to hdfs {1} error: {2}'.format(file_path, hdfs_file_path, str(exception))) + result = False + return result + +def copyFileToHdfs(localFilePath, hdfsFilePath, hdfsClient, override=True): + '''Copy a local file to HDFS directory''' + if not os.path.exists(localFilePath): + raise Exception('Local file Path does not exist!') + if os.path.isdir(localFilePath): + raise Exception('localFile should not a directory!') + if hdfsClient.exists(hdfsFilePath): + if override: + hdfsClient.delete(hdfsFilePath) + else: + return False + try: + hdfsClient.copy_from_local(localFilePath, hdfsFilePath) + return True + except Exception as exception: + nni_log(LogType.Error, 'Copy local file {0} to hdfs file {1} error: {2}'.format(localFilePath, hdfsFilePath, str(exception))) + return False diff --git a/nni/tools/trial_tool/log_utils.py b/nni/tools/trial_tool/log_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8d5b3d94c0b60d60b978b94d967f092bfa9a1c56 --- /dev/null +++ b/nni/tools/trial_tool/log_utils.py @@ -0,0 +1,219 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import json +import logging +import logging.handlers +import time +import threading +import re + +from datetime import datetime +from enum import Enum, unique +from logging import StreamHandler + +from queue import Queue + +from .rest_utils import rest_post +from .url_utils import gen_send_stdout_url +from .commands import CommandType + + +@unique +class LogType(Enum): + Trace = 'TRACE' + Debug = 'DEBUG' + Info = 'INFO' + Warning = 'WARNING' + Error = 'ERROR' + Fatal = 'FATAL' + + +@unique +class StdOutputType(Enum): + Stdout = 'stdout', + Stderr = 'stderr' + + +def nni_log(log_type, log_message): + '''Log message into stdout''' + dt = datetime.now() + print('[{0}] {1} {2}'.format(dt, log_type.value, log_message), flush=True) + + +class NNIRestLogHanlder(StreamHandler): + def __init__(self, host, port, tag, trial_id, channel, std_output_type=StdOutputType.Stdout): + StreamHandler.__init__(self) + self.host = host + self.port = port + self.tag = tag + self.std_output_type = std_output_type + self.trial_id = trial_id + self.channel = channel + self.orig_stdout = sys.__stdout__ + self.orig_stderr = sys.__stderr__ + + def emit(self, record): + log_entry = {} + log_entry['tag'] = self.tag + log_entry['stdOutputType'] = self.std_output_type.name + log_entry['msg'] = self.format(record) + + try: + if self.channel is None: + rest_post(gen_send_stdout_url(self.host, self.port), json.dumps(log_entry), 10, True) + else: + if self.trial_id is not None: + log_entry["trial"] = self.trial_id + self.channel.send(CommandType.StdOut, log_entry) + except Exception as e: + self.orig_stderr.write(str(e) + '\n') + self.orig_stderr.flush() + + +class RemoteLogger(object): + """ + NNI remote logger + """ + + def __init__(self, syslog_host, syslog_port, tag, std_output_type, log_collection, trial_id=None, channel=None, log_level=logging.INFO): + ''' + constructor + ''' + logger_name = 'nni_syslog_{}'.format(tag) + # to prevent multiple trial logged in same logger + if trial_id is not None: + logger_name = '{}_{}'.format(logger_name, trial_id) + self.logger = logging.getLogger(logger_name) + self.log_level = log_level + self.logger.setLevel(self.log_level) + self.pipeReader = None + self.handler = NNIRestLogHanlder(syslog_host, syslog_port, tag, trial_id, channel) + self.logger.addHandler(self.handler) + if std_output_type == StdOutputType.Stdout: + self.orig_stdout = sys.__stdout__ + else: + self.orig_stdout = sys.__stderr__ + self.log_collection = log_collection + + def get_pipelog_reader(self): + ''' + Get pipe for remote logger + ''' + self.pipeReader = PipeLogReader(self.logger, self.log_collection, logging.INFO) + return self.pipeReader + + def flush(self): + ''' + Add flush in handler + ''' + for handler in self.logger.handlers: + handler.flush() + + def write(self, buf): + ''' + Write buffer data into logger/stdout + ''' + for line in buf.rstrip().splitlines(): + self.orig_stdout.write(line.rstrip() + '\n') + self.orig_stdout.flush() + try: + self.logger.log(self.log_level, line.rstrip()) + except Exception: + pass + + def close(self): + ''' + Close handlers and resources + ''' + if self.pipeReader is not None: + self.pipeReader.set_process_exit() + for handler in self.logger.handlers: + handler.close() + self.logger.removeHandler(handler) + + +class PipeLogReader(threading.Thread): + """ + The reader thread reads log data from pipe + """ + + def __init__(self, logger, log_collection, log_level=logging.INFO): + """Setup the object with a logger and a loglevel + and start the thread + """ + threading.Thread.__init__(self) + self.queue = Queue() + self.logger = logger + self.daemon = False + self.log_level = log_level + self.fdRead, self.fdWrite = os.pipe() + self.pipeReader = os.fdopen(self.fdRead) + self.orig_stdout = sys.__stdout__ + self._is_read_completed = False + self.process_exit = False + self.log_collection = log_collection + self.log_pattern = re.compile(r'NNISDK_MEb\'.*\'$') + + def _populateQueue(stream, queue): + ''' + Collect lines from 'stream' and put them in 'quque'. + ''' + time.sleep(1) + while True: + cur_process_exit = self.process_exit + try: + line = self.queue.get(True, 5) + try: + self.logger.log(self.log_level, line.rstrip()) + except Exception: + pass + except Exception: + if cur_process_exit == True: + self._is_read_completed = True + break + + self.pip_log_reader_thread = threading.Thread(target=_populateQueue, args=(self.pipeReader, self.queue)) + self.pip_log_reader_thread.daemon = True + self.start() + self.pip_log_reader_thread.start() + + def fileno(self): + """Return the write file descriptor of the pipe + """ + return self.fdWrite + + def run(self): + """Run the thread, logging everything. + If the log_collection is 'none', the log content will not be enqueued + """ + for line in iter(self.pipeReader.readline, ''): + self.orig_stdout.write(line.rstrip() + '\n') + self.orig_stdout.flush() + + if self.log_collection == 'none': + search_result = self.log_pattern.search(line) + if search_result: + metrics = search_result.group(0) + self.queue.put(metrics+'\n') + else: + self.queue.put(line) + + self.pipeReader.close() + + def close(self): + """Close the write end of the pipe. + """ + os.close(self.fdWrite) + + @property + def is_read_completed(self): + """Return if read is completed + """ + return self._is_read_completed + + def set_process_exit(self): + self.process_exit = True + return self.process_exit diff --git a/nni/tools/trial_tool/rest_utils.py b/nni/tools/trial_tool/rest_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..959209c7470bdb5dfe1ca32af91ba85757dd51e0 --- /dev/null +++ b/nni/tools/trial_tool/rest_utils.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import requests + +def rest_get(url, timeout): + '''Call rest get method''' + try: + response = requests.get(url, timeout=timeout) + return response + except Exception as e: + print('Get exception {0} when sending http get to url {1}'.format(str(e), url)) + return None + +def rest_post(url, data, timeout, rethrow_exception=False): + '''Call rest post method''' + try: + response = requests.post(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ + data=data, timeout=timeout) + return response + except Exception as e: + if rethrow_exception is True: + raise + print('Get exception {0} when sending http post to url {1}'.format(str(e), url)) + return None + +def rest_put(url, data, timeout): + '''Call rest put method''' + try: + response = requests.put(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ + data=data, timeout=timeout) + return response + except Exception as e: + print('Get exception {0} when sending http put to url {1}'.format(str(e), url)) + return None + +def rest_delete(url, timeout): + '''Call rest delete method''' + try: + response = requests.delete(url, timeout=timeout) + return response + except Exception as e: + print('Get exception {0} when sending http delete to url {1}'.format(str(e), url)) + return None diff --git a/nni/tools/trial_tool/trial.py b/nni/tools/trial_tool/trial.py new file mode 100644 index 0000000000000000000000000000000000000000..1d84ee8b824ac1b77dc2d05448751f0277f12e8b --- /dev/null +++ b/nni/tools/trial_tool/trial.py @@ -0,0 +1,163 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import ctypes +import os +import sys +import shlex +import tarfile +import time +from datetime import datetime +from subprocess import Popen + +import psutil + +from .log_utils import LogType, RemoteLogger, StdOutputType, nni_log +from .commands import CommandType + +trial_output_path_name = ".nni" + + +class Trial: + def __init__(self, args, data): + self.process = None + self.data = data + self.args = args + self.command_channel = args.command_channel + self.trial_syslogger_stdout = None + + global NNI_TRIAL_JOB_ID + self.id = data["trialId"] + if self.id is None: + raise Exception("trial_id is not found in %s" % data) + os.environ['NNI_TRIAL_JOB_ID'] = self.id + NNI_TRIAL_JOB_ID = self.id + + # for multiple nodes. If it's None, it means single node. + self.node_id = args.node_id + if self.node_id is None: + self.name = self.id + else: + self.name = "%s_%s" % (self.id, self.node_id) + + def run(self): + # redirect trial's stdout and stderr to syslog + self.trial_syslogger_stdout = RemoteLogger(self.args.nnimanager_ip, self.args.nnimanager_port, 'trial', StdOutputType.Stdout, + self.args.log_collection, self.id, self.args.command_channel) + + nni_log(LogType.Info, "%s: start to run trial" % self.name) + + trial_working_dir = os.path.realpath(os.path.join(os.curdir, "..", "..", "trials", self.id)) + self.trial_output_dir = os.path.join(trial_working_dir, trial_output_path_name) + trial_code_dir = os.path.join(trial_working_dir, "code") + trial_nnioutput_dir = os.path.join(trial_working_dir, "nnioutput") + + environ = os.environ.copy() + environ['NNI_TRIAL_SEQ_ID'] = str(self.data["sequenceId"]) + environ['NNI_OUTPUT_DIR'] = os.path.join(trial_working_dir, "nnioutput") + environ['NNI_SYS_DIR'] = trial_working_dir + self.working_dir = trial_working_dir + + # prepare code and parameters + prepared_flag_file_name = os.path.join(trial_working_dir, "trial_prepared") + if not os.path.exists(trial_working_dir): + os.makedirs(trial_working_dir, exist_ok=True) + + os.makedirs(self.trial_output_dir, exist_ok=True) + os.makedirs(trial_nnioutput_dir, exist_ok=True) + # prepare code + os.makedirs(trial_code_dir, exist_ok=True) + with tarfile.open(os.path.join("..", "nni-code.tar.gz"), "r:gz") as tar: + tar.extractall(trial_code_dir) + + # save parameters + nni_log(LogType.Info, '%s: saving parameter %s' % (self.name, self.data["parameter"]["value"])) + parameter_file_name = os.path.join(trial_working_dir, "parameter.cfg") + with open(parameter_file_name, "w") as parameter_file: + parameter_file.write(self.data["parameter"]["value"]) + + # ready flag + with open(prepared_flag_file_name, "w") as prepared_flag_file: + prepared_flag_file.write("%s" % (int(datetime.now().timestamp() * 1000))) + + # make sure code prepared by other node. + if self.node_id is not None: + while True: + if os.path.exists(prepared_flag_file_name): + break + time.sleep(0.1) + + trial_command = self.args.trial_command + + gpuIndices = self.data.get("gpuIndices") + if (gpuIndices is not None): + if sys.platform == "win32": + trial_command = 'set HIP_VISIBLE_DEVICES="%s " && call %s' % (gpuIndices, trial_command) + else: + trial_command = 'HIP_VISIBLE_DEVICES="%s " %s' % (gpuIndices, trial_command) + + self.log_pipe_stdout = self.trial_syslogger_stdout.get_pipelog_reader() + self.process = Popen(trial_command, shell=True, stdout=self.log_pipe_stdout, + stderr=self.log_pipe_stdout, cwd=trial_code_dir, env=dict(environ)) + nni_log(LogType.Info, '{0}: spawns a subprocess (pid {1}) to run command: {2}'. + format(self.name, self.process.pid, shlex.split(trial_command))) + + def save_parameter_file(self, command_data): + parameters = command_data["parameters"] + file_index = int(parameters["index"]) + if file_index == 0: + parameter_file_name = "parameter.cfg" + else: + parameter_file_name = "parameter_{}.cfg".format(file_index) + parameter_file_name = os.path.join(self.working_dir, parameter_file_name) + with open(parameter_file_name, "w") as parameter_file: + nni_log(LogType.Info, '%s: saving parameter %s' % (self.name, parameters["value"])) + parameter_file.write(parameters["value"]) + + def is_running(self): + if (self.process is None): + return False + + retCode = self.process.poll() + # child worker process exits and all stdout data is read + if retCode is not None and self.log_pipe_stdout.set_process_exit() and self.log_pipe_stdout.is_read_completed == True: + # In Windows, the retCode -1 is 4294967295. It's larger than c_long, and raise OverflowError. + # So covert it to int32. + retCode = ctypes.c_long(retCode).value + nni_log(LogType.Info, '{0}: subprocess terminated. Exit code is {1}.'.format(self.name, retCode)) + + end_time = int(datetime.now().timestamp() * 1000) + end_message = { + "code": retCode, + "time": end_time, + "trial": self.id, + } + self.command_channel.send(CommandType.TrialEnd, end_message) + self.cleanup() + return False + else: + return True + + def kill(self, trial_id=None): + if trial_id == self.id or trial_id is None: + if self.process is not None: + try: + nni_log(LogType.Info, "%s: killing trial" % self.name) + for child in psutil.Process(self.process.pid).children(True): + child.kill() + self.process.kill() + except psutil.NoSuchProcess: + nni_log(LogType.Info, "kill trial %s failed: %s does not exist!" % (trial_id, self.process.pid)) + except Exception as ex: + nni_log(LogType.Error, "kill trial %s failed: %s " % (trial_id, str(ex))) + self.cleanup() + + def cleanup(self): + nni_log(LogType.Info, "%s: clean up trial" % self.name) + self.process = None + if self.log_pipe_stdout is not None: + self.log_pipe_stdout.set_process_exit() + self.log_pipe_stdout = None + if self.trial_syslogger_stdout is not None: + self.trial_syslogger_stdout.close() + self.trial_syslogger_stdout = None diff --git a/nni/tools/trial_tool/trial_keeper.py b/nni/tools/trial_tool/trial_keeper.py new file mode 100644 index 0000000000000000000000000000000000000000..536c669d00cd007c356f56ef7f9f5e698a8a7343 --- /dev/null +++ b/nni/tools/trial_tool/trial_keeper.py @@ -0,0 +1,264 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +import ctypes +import json +import logging +import os +import re +import shlex +import sys +import threading +import time +from subprocess import Popen + +import pkg_resources +from pyhdfs import HdfsClient + +from .constants import (LOG_DIR, MULTI_PHASE, NNI_EXP_ID, NNI_PLATFORM, + NNI_SYS_DIR, NNI_TRIAL_JOB_ID) +from .hdfsClientUtility import (copyDirectoryToHdfs, copyHdfsDirectoryToLocal, + copyHdfsFileToLocal) +from .log_utils import LogType, RemoteLogger, StdOutputType, nni_log +from .rest_utils import rest_get, rest_post +from .url_utils import gen_parameter_meta_url, gen_send_version_url + +logger = logging.getLogger('trial_keeper') +regular = re.compile('v?(?P[0-9](\.[0-9]){0,1}).*') + +_hdfs_client = None +_trial_process = None + + +def get_hdfs_client(args): + global _hdfs_client + + if _hdfs_client is not None: + return _hdfs_client + # backward compatibility + hdfs_host = None + + if args.hdfs_host: + hdfs_host = args.hdfs_host + elif args.pai_hdfs_host: + hdfs_host = args.pai_hdfs_host + else: + return None + + if hdfs_host is not None and args.nni_hdfs_exp_dir is not None: + try: + if args.webhdfs_path: + _hdfs_client = HdfsClient(hosts='{0}:80'.format(hdfs_host), user_name=args.pai_user_name, + webhdfs_path=args.webhdfs_path, timeout=5) + else: + # backward compatibility + _hdfs_client = HdfsClient(hosts='{0}:{1}'.format(hdfs_host, '50070'), user_name=args.pai_user_name, + timeout=5) + except Exception as e: + nni_log(LogType.Error, 'Create HDFS client error: ' + str(e)) + raise e + return _hdfs_client + + +def main_loop(args): + '''main loop logic for trial keeper''' + global _trial_process + + if not os.path.exists(LOG_DIR): + os.makedirs(LOG_DIR) + + trial_keeper_syslogger = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial_keeper', + StdOutputType.Stdout, args.log_collection) + # redirect trial keeper's stdout and stderr to syslog + trial_syslogger_stdout = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial', StdOutputType.Stdout, + args.log_collection) + sys.stdout = sys.stderr = trial_keeper_syslogger + hdfs_output_dir = None + + if args.hdfs_output_dir: + hdfs_output_dir = args.hdfs_output_dir + elif args.pai_hdfs_output_dir: + hdfs_output_dir = args.pai_hdfs_output_dir + + hdfs_client = get_hdfs_client(args) + + if hdfs_client is not None: + copyHdfsDirectoryToLocal(args.nni_hdfs_exp_dir, os.getcwd(), hdfs_client) + + if args.job_id_file: + with open(args.job_id_file, 'w') as job_file: + job_file.write("%d" % os.getpid()) + + # Notice: We don't appoint env, which means subprocess wil inherit current environment and that is expected behavior + log_pipe_stdout = trial_syslogger_stdout.get_pipelog_reader() + if sys.platform == 'win32': + _trial_process = Popen(args.trial_command, shell=True, stdout=log_pipe_stdout, stderr=log_pipe_stdout) + else: + _trial_process = Popen(args.trial_command, shell=True, stdout=log_pipe_stdout, stderr=log_pipe_stdout, preexec_fn=os.setsid) + nni_log(LogType.Info, 'Trial keeper spawns a subprocess (pid {0}) to run command: {1}'.format(_trial_process.pid, + shlex.split( + args.trial_command))) + + while True: + retCode = _trial_process.poll() + # child worker process exits and all stdout data is read + if retCode is not None and log_pipe_stdout.set_process_exit() and log_pipe_stdout.is_read_completed == True: + # In Windows, the retCode -1 is 4294967295. It's larger than c_long, and raise OverflowError. + # So covert it to int32. + retCode = ctypes.c_long(retCode).value + nni_log(LogType.Info, 'subprocess terminated. Exit code is {}. Quit'.format(retCode)) + if hdfs_output_dir is not None: + # Copy local directory to hdfs for OpenPAI + nni_local_output_dir = os.environ['NNI_OUTPUT_DIR'] + try: + if copyDirectoryToHdfs(nni_local_output_dir, hdfs_output_dir, hdfs_client): + nni_log(LogType.Info, + 'copy directory from {0} to {1} success!'.format(nni_local_output_dir, hdfs_output_dir)) + else: + nni_log(LogType.Info, + 'copy directory from {0} to {1} failed!'.format(nni_local_output_dir, hdfs_output_dir)) + except Exception as e: + nni_log(LogType.Error, 'HDFS copy directory got exception: ' + str(e)) + raise e + + # Exit as the retCode of subprocess(trial) + exit(retCode) + break + + time.sleep(2) + + +def trial_keeper_help_info(*args): + print('please run --help to see guidance') + + +def check_version(args): + try: + trial_keeper_version = pkg_resources.get_distribution('nni').version + except pkg_resources.ResolutionError: + # package nni does not exist, try nni-tool package + nni_log(LogType.Error, 'Package nni does not exist!') + os._exit(1) + if not args.nni_manager_version: + # skip version check + nni_log(LogType.Warning, 'Skipping version check!') + else: + try: + trial_keeper_version = regular.search(trial_keeper_version).group('version') + nni_log(LogType.Info, 'trial_keeper_version is {0}'.format(trial_keeper_version)) + nni_manager_version = regular.search(args.nni_manager_version).group('version') + nni_log(LogType.Info, 'nni_manager_version is {0}'.format(nni_manager_version)) + log_entry = {} + if trial_keeper_version != nni_manager_version: + nni_log(LogType.Warning, 'Version does not match!') + error_message = 'NNIManager version is {0}, TrialKeeper version is {1}, NNI version does not match!'.format( + nni_manager_version, trial_keeper_version) + log_entry['tag'] = 'VCFail' + log_entry['msg'] = error_message + rest_post(gen_send_version_url(args.nnimanager_ip, args.nnimanager_port), json.dumps(log_entry), 10, + False) + else: + nni_log(LogType.Info, 'Version match!') + log_entry['tag'] = 'VCSuccess' + rest_post(gen_send_version_url(args.nnimanager_ip, args.nnimanager_port), json.dumps(log_entry), 10, + False) + except AttributeError as err: + nni_log(LogType.Error, err) + + +def is_multi_phase(): + return MULTI_PHASE and (MULTI_PHASE in ['True', 'true']) + + +def download_parameter(meta_list, args): + """ + Download parameter file to local working directory. + meta_list format is defined in paiJobRestServer.ts + example meta_list: + [ + {"experimentId":"yWFJarYa","trialId":"UpPkl","filePath":"/chec/nni/experiments/yWFJarYa/trials/UpPkl/parameter_1.cfg"}, + {"experimentId":"yWFJarYa","trialId":"aIUMA","filePath":"/chec/nni/experiments/yWFJarYa/trials/aIUMA/parameter_1.cfg"} + ] + """ + nni_log(LogType.Debug, str(meta_list)) + nni_log(LogType.Debug, + 'NNI_SYS_DIR: {}, trial Id: {}, experiment ID: {}'.format(NNI_SYS_DIR, NNI_TRIAL_JOB_ID, NNI_EXP_ID)) + nni_log(LogType.Debug, 'NNI_SYS_DIR files: {}'.format(os.listdir(NNI_SYS_DIR))) + for meta in meta_list: + if meta['experimentId'] == NNI_EXP_ID and meta['trialId'] == NNI_TRIAL_JOB_ID: + param_fp = os.path.join(NNI_SYS_DIR, os.path.basename(meta['filePath'])) + if not os.path.exists(param_fp): + hdfs_client = get_hdfs_client(args) + copyHdfsFileToLocal(meta['filePath'], param_fp, hdfs_client, override=False) + + +def fetch_parameter_file(args): + class FetchThread(threading.Thread): + def __init__(self, args): + super(FetchThread, self).__init__() + self.args = args + + def run(self): + uri = gen_parameter_meta_url(self.args.nnimanager_ip, self.args.nnimanager_port) + nni_log(LogType.Info, uri) + + while True: + res = rest_get(uri, 10) + nni_log(LogType.Debug, 'status code: {}'.format(res.status_code)) + if res.status_code == 200: + meta_list = res.json() + download_parameter(meta_list, self.args) + else: + nni_log(LogType.Warning, 'rest response: {}'.format(str(res))) + time.sleep(5) + + fetch_file_thread = FetchThread(args) + fetch_file_thread.start() + + +def _set_adaptdl_signal_handler(): + import signal + global _trial_process + def _handler(signum, frame): + nni_log(LogType.Info, "RECEIVED SIGNAL {}".format(signum)) + nni_log(LogType.Debug, "TRIAL PROCESS ID {}".format(_trial_process.pid)) + if _trial_process and (signum == signal.SIGTERM or signum == signal.SIGINT): + os.killpg(os.getpgid(_trial_process.pid), signal.SIGINT) + os.waitpid(_trial_process.pid, 0) + exit(1) + signal.signal(signal.SIGTERM, _handler) + signal.signal(signal.SIGINT, _handler) + + +if __name__ == '__main__': + '''NNI Trial Keeper main function''' + PARSER = argparse.ArgumentParser() + PARSER.set_defaults(func=trial_keeper_help_info) + PARSER.add_argument('--trial_command', type=str, help='Command to launch trial process') + PARSER.add_argument('--nnimanager_ip', type=str, default='localhost', help='NNI manager rest server IP') + PARSER.add_argument('--nnimanager_port', type=str, default='8081', help='NNI manager rest server port') + PARSER.add_argument('--pai_hdfs_output_dir', type=str, help='the output dir of pai_hdfs') # backward compatibility + PARSER.add_argument('--hdfs_output_dir', type=str, help='the output dir of hdfs') + PARSER.add_argument('--pai_hdfs_host', type=str, help='the host of pai_hdfs') # backward compatibility + PARSER.add_argument('--hdfs_host', type=str, help='the host of hdfs') + PARSER.add_argument('--pai_user_name', type=str, help='the username of hdfs') + PARSER.add_argument('--nni_hdfs_exp_dir', type=str, help='nni experiment directory in hdfs') + PARSER.add_argument('--webhdfs_path', type=str, help='the webhdfs path used in webhdfs URL') + PARSER.add_argument('--nni_manager_version', type=str, help='the nni version transmitted from nniManager') + PARSER.add_argument('--log_collection', type=str, help='set the way to collect log in trialkeeper') + PARSER.add_argument('--job_id_file', type=str, help='set job id file for operating and monitoring job.') + args, unknown = PARSER.parse_known_args() + if args.trial_command is None: + exit(1) + check_version(args) + try: + if NNI_PLATFORM == 'adl': + _set_adaptdl_signal_handler() + main_loop(args) + except SystemExit as se: + nni_log(LogType.Info, 'NNI trial keeper exit with code {}'.format(se.code)) + os._exit(se.code) + except Exception as e: + nni_log(LogType.Error, 'Exit trial keeper with code 1 because Exception: {} is catched'.format(str(e))) + os._exit(1) diff --git a/nni/tools/trial_tool/trial_runner.py b/nni/tools/trial_tool/trial_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..ef56b47485cd19cc3932b03c0a6e00798be9e139 --- /dev/null +++ b/nni/tools/trial_tool/trial_runner.py @@ -0,0 +1,254 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +import json +import os +import random +import re +import sys +import time +import traceback +from datetime import datetime, timedelta + +import pkg_resources + +from .gpu import collect_gpu_usage + +idle_timeout_seconds = 10 * 60 +gpu_refressh_interval_seconds = 5 +regular = re.compile('v?(?P[0-9](\.[0-9]){0,1}).*') +trial_runner_syslogger = None + + +def main_loop(args): + '''main loop logic for trial runner''' + idle_last_time = datetime.now() + gpu_refresh_last_time = datetime.now() - timedelta(minutes=1) + try: + if args.job_pid_file: + with open(args.job_pid_file, 'w') as job_file: + job_file.write("%d" % os.getpid()) + + trials = dict() + + command_channel = args.command_channel + # command loop + while True: + command_type, command_data = command_channel.receive() + if command_type == CommandType.NewTrialJob: + trial_id = command_data["trialId"] + if trial_id in trials.keys(): + trial = trials[trial_id] + if trial.is_running(): + raise Exception('trial %s is running already, cannot start a new one' % trial.id) + else: + del trials[trial_id] + trial = Trial(args, command_data) + trial.run() + trials[trial_id] = trial + elif command_type == CommandType.KillTrialJob: + trial_id = command_data + if trial_id in trials.keys(): + trial = trials[trial_id] + trial.kill(command_data) + elif command_type == CommandType.SendTrialJobParameter: + trial_id = command_data["trialId"] + if trial_id in trials.keys(): + trial = trials[trial_id] + trial.save_parameter_file(command_data) + elif command_type is not None: + raise Exception("unknown command %s" % command_type) + + trial_list = list(trials.values()) + for trial in trial_list: + if trial is not None and trial.is_running(): + idle_last_time = datetime.now() + else: + del trials[trial.id] + + if (datetime.now() - idle_last_time).seconds > idle_timeout_seconds: + nni_log(LogType.Info, "trial runner is idle more than {0} seconds, so exit.".format( + idle_timeout_seconds)) + break + + if args.enable_gpu_collect and (datetime.now() - gpu_refresh_last_time).seconds > gpu_refressh_interval_seconds: + # collect gpu information + gpu_info = collect_gpu_usage(args.node_id) + command_channel.send(CommandType.ReportGpuInfo, gpu_info) + gpu_refresh_last_time = datetime.now() + time.sleep(0.5) + except Exception as ex: + traceback.print_exc() + raise ex + finally: + nni_log(LogType.Info, "main_loop exits.") + + trial_list = list(trials.values()) + for trial in trial_list: + trial.kill() + del trials[trial.id] + # wait to send commands + for _ in range(10): + if command_channel.sent(): + break + time.sleep(1) + command_channel.close() + + +def trial_runner_help_info(*args): + print('please run --help to see guidance') + + +def check_version(args): + try: + trial_runner_version = pkg_resources.get_distribution('nni').version + except pkg_resources.ResolutionError: + # package nni does not exist, try nni-tool package + nni_log(LogType.Error, 'Package nni does not exist!') + os._exit(1) + if not args.nni_manager_version: + # skip version check + nni_log(LogType.Warning, 'Skipping version check!') + else: + try: + command_channel = args.command_channel + trial_runner_version = regular.search(trial_runner_version).group('version') + nni_log(LogType.Info, '{0}: runner_version is {1}'.format(args.node_id, trial_runner_version)) + nni_manager_version = regular.search(args.nni_manager_version).group('version') + nni_log(LogType.Info, '{0}: nni_manager_version is {1}'.format(args.node_id, nni_manager_version)) + log_entry = {} + if trial_runner_version != nni_manager_version: + nni_log(LogType.Warning, '{0}: Version does not match!'.format(args.node_id)) + error_message = '{0}: NNIManager version is {1}, Trial runner version is {2}, NNI version does not match!'.format( + args.node_id, nni_manager_version, trial_runner_version) + log_entry['tag'] = 'VCFail' + log_entry['msg'] = error_message + command_channel.send(CommandType.VersionCheck, log_entry) + while not command_channel.sent(): + time.sleep(1) + else: + nni_log(LogType.Info, '{0}: Version match!'.format(args.node_id)) + log_entry['tag'] = 'VCSuccess' + command_channel.send(CommandType.VersionCheck, log_entry) + except AttributeError as err: + nni_log(LogType.Error, '{0}: {1}'.format(args.node_id, err)) + +if __name__ == '__main__': + + '''NNI Trial Runner main function''' + PARSER = argparse.ArgumentParser() + PARSER.set_defaults(func=trial_runner_help_info) + PARSER.add_argument('--trial_command', type=str, help='Command to launch trial process') + PARSER.add_argument('--nnimanager_ip', type=str, help='NNI manager rest server IP') + PARSER.add_argument('--nnimanager_port', type=str, help='NNI manager rest server port') + PARSER.add_argument('--nni_manager_version', type=str, help='the nni version transmitted from nniManager') + PARSER.add_argument('--log_collection', type=str, help='set the way to collect log in trial runner') + PARSER.add_argument('--node_count', type=int, help='number of nodes, it determines how to consume command and save code file') + PARSER.add_argument('--job_pid_file', type=str, help='save trial runner process pid') + args, unknown = PARSER.parse_known_args() + + setting_file = "settings.json" + if not os.path.exists(setting_file): + setting_file = "../{}".format(setting_file) + if os.path.exists(setting_file): + with open(setting_file, 'r') as fp: + settings = json.load(fp) + print("setting is {}".format(settings)) + else: + print("not found setting file") + + args.exp_id = settings["experimentId"] + args.platform = settings["platform"] + # runner_id is unique runner in experiment + args.runner_id = os.path.basename(os.path.realpath(os.path.curdir)) + args.runner_name = "runner_"+args.runner_id + args.enable_gpu_collect = settings["enableGpuCollector"] + args.command_channel = settings["commandChannel"] + + if args.trial_command is None: + args.trial_command = settings["command"] + if args.nnimanager_ip is None: + args.nnimanager_ip = settings["nniManagerIP"] + if args.nnimanager_port is None: + args.nnimanager_port = settings["nniManagerPort"] + if args.nni_manager_version is None: + args.nni_manager_version = settings["nniManagerVersion"] + if args.log_collection is None: + args.log_collection = settings["logCollection"] + if args.node_count is None: + # default has only one node. + args.node_count = 1 + + os.environ['NNI_OUTPUT_DIR'] = os.curdir + "/nnioutput" + os.environ['NNI_PLATFORM'] = args.platform + os.environ['NNI_SYS_DIR'] = os.curdir + os.environ['NNI_EXP_ID'] = args.exp_id + os.environ['MULTI_PHASE'] = "true" + os.environ['NNI_TRIAL_JOB_ID'] = "runner" + os.environ['REUSE_MODE'] = "true" + + from .log_utils import LogType, RemoteLogger, StdOutputType, nni_log + from .trial import Trial + from .file_channel import FileChannel + from .web_channel import WebChannel + from .commands import CommandType + + is_multi_node = args.node_count > 1 + + if (is_multi_node): + # for multiple nodes, create a file to get a unique id. + while True: + node_id = random.randint(0, 10000) + unique_check_file_name = "node_%s" % (node_id) + if not os.path.exists(unique_check_file_name): + break + with open(unique_check_file_name, "w") as unique_check_file: + unique_check_file.write("%s" % (int(datetime.now().timestamp() * 1000))) + args.node_id = node_id + else: + # node id is unique in the runner + args.node_id = None + + # init command channel + command_channel = None + if args.command_channel == "file": + command_channel = FileChannel(args) + elif args.command_channel == 'aml': + from .aml_channel import AMLChannel + command_channel = AMLChannel(args) + else: + command_channel = WebChannel(args) + command_channel.open() + + nni_log(LogType.Info, "command channel is {}, actual type is {}".format(args.command_channel, type(command_channel))) + args.command_channel = command_channel + + trial_runner_syslogger = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'runner', + StdOutputType.Stdout, args.log_collection, args.runner_name, command_channel) + sys.stdout = sys.stderr = trial_runner_syslogger + nni_log(LogType.Info, "{}: merged args is {}".format(args.node_id, args)) + + if args.trial_command is None: + nni_log(LogType.Error, "{}: no command is found.".format(args.node_id)) + os._exit(1) + check_version(args) + try: + main_loop(args) + except SystemExit as se: + nni_log(LogType.Info, '{}: NNI trial runner exit with code {}'.format(args.node_id, se.code)) + + # try best to send latest errors to server + timeout = 10 + while not command_channel.sent() and timeout > 0: + timeout -= 1 + time.sleep(1) + os._exit(se.code) + finally: + if trial_runner_syslogger is not None: + if trial_runner_syslogger.pipeReader is not None: + trial_runner_syslogger.pipeReader.set_process_exit() + trial_runner_syslogger.close() + + # the process doesn't exit even main loop exit. So exit it explictly. + os._exit(0) diff --git a/nni/tools/trial_tool/url_utils.py b/nni/tools/trial_tool/url_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7942c62fb5b4d8d3734ddf8c23a21b0f379d76ec --- /dev/null +++ b/nni/tools/trial_tool/url_utils.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .constants import API_ROOT_URL, BASE_URL, STDOUT_API, NNI_TRIAL_JOB_ID, NNI_EXP_ID, VERSION_API, PARAMETER_META_API + + +def gen_send_stdout_url(ip, port): + '''Generate send stdout url''' + return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, STDOUT_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) + + +def gen_send_version_url(ip, port): + '''Generate send error url''' + return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, VERSION_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) + + +def gen_parameter_meta_url(ip, port): + '''Generate send error url''' + return '{0}:{1}{2}{3}'.format(BASE_URL.format(ip), port, API_ROOT_URL, PARAMETER_META_API) diff --git a/nni/tools/trial_tool/web_channel.py b/nni/tools/trial_tool/web_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..87901e1163a5fbcf82a2bb05c701735a346333fc --- /dev/null +++ b/nni/tools/trial_tool/web_channel.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import asyncio +import os +import websockets + +from .base_channel import BaseChannel +from .log_utils import LogType, nni_log + + +class WebChannel(BaseChannel): + + def __init__(self, args): + self.node_id = args.node_id + self.args = args + self.client = None + self.in_cache = b"" + self.timeout = 10 + + super(WebChannel, self).__init__(args) + + self._event_loop = None + + def _inner_open(self): + url = "ws://{}:{}".format(self.args.nnimanager_ip, self.args.nnimanager_port) + try: + connect = asyncio.wait_for(websockets.connect(url), self.timeout) + self._event_loop = asyncio.get_event_loop() + client = self._event_loop.run_until_complete(connect) + self.client = client + nni_log(LogType.Info, 'WebChannel: connected with info %s' % url) + except asyncio.TimeoutError: + nni_log(LogType.Error, 'connect to %s timeout! Please make sure NNIManagerIP configured correctly, and accessable.' % url) + os._exit(1) + + def _inner_close(self): + if self.client is not None: + self.client.close() + self.client = None + if self._event_loop.is_running(): + self._event_loop.stop() + self._event_loop = None + + def _inner_send(self, message): + loop = asyncio.new_event_loop() + loop.run_until_complete(self.client.send(message)) + + def _inner_receive(self): + messages = [] + if self.client is not None: + received = self._event_loop.run_until_complete(self.client.recv()) + # receive message is string, to get consistent result, encode it here. + self.in_cache += received.encode("utf8") + messages, self.in_cache = self._fetch_message(self.in_cache) + + return messages diff --git a/nni/trial.py b/nni/trial.py new file mode 100644 index 0000000000000000000000000000000000000000..e8eec281b9ef0b8f7b66f47f05b5d7ef0fd01de4 --- /dev/null +++ b/nni/trial.py @@ -0,0 +1,156 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .common.serializer import dump +from .runtime.env_vars import trial_env_vars +from .runtime import platform + + +__all__ = [ + 'get_next_parameter', + 'get_current_parameter', + 'report_intermediate_result', + 'report_final_result', + 'get_experiment_id', + 'get_trial_id', + 'get_sequence_id' +] + + +_params = None +_experiment_id = platform.get_experiment_id() +_trial_id = platform.get_trial_id() +_sequence_id = platform.get_sequence_id() + + +def get_next_parameter(): + """ + Get the hyper paremeters generated by tuner. For a multiphase experiment, it returns a new group of hyper + parameters at each call of get_next_parameter. For a non-multiphase (multiPhase is not configured or set to False) + experiment, it returns hyper parameters only on the first call for each trial job, it returns None since second call. + This API should be called only once in each trial job of an experiment which is not specified as multiphase. + + Returns + ------- + dict + A dict object contains the hyper parameters generated by tuner, the keys of the dict are defined in + search space. Returns None if no more hyper parameters can be generated by tuner. + """ + global _params + _params = platform.get_next_parameter() + if _params is None: + return None + return _params['parameters'] + +def get_current_parameter(tag=None): + """ + Get current hyper parameters generated by tuner. It returns the same group of hyper parameters as the last + call of get_next_parameter returns. + + Parameters + ---------- + tag: str + hyper parameter key + """ + global _params + if _params is None: + return None + if tag is None: + return _params['parameters'] + return _params['parameters'][tag] + +def get_experiment_id(): + """ + Get experiment ID. + + Returns + ------- + str + Identifier of current experiment + """ + return _experiment_id + +def get_trial_id(): + """ + Get trial job ID which is string identifier of a trial job, for example 'MoXrp'. In one experiment, each trial + job has an unique string ID. + + Returns + ------- + str + Identifier of current trial job which is calling this API. + """ + return _trial_id + +def get_sequence_id(): + """ + Get trial job sequence nubmer. A sequence number is an integer value assigned to each trial job base on the + order they are submitted, incremental starting from 0. In one experiment, both trial job ID and sequence number + are unique for each trial job, they are of different data types. + + Returns + ------- + int + Sequence number of current trial job which is calling this API. + """ + return _sequence_id + +_intermediate_seq = 0 + + +def overwrite_intermediate_seq(value): + """ + Overwrite intermediate sequence value. + + Parameters + ---------- + value: + int + """ + assert isinstance(value, int) + global _intermediate_seq + _intermediate_seq = value + + +def report_intermediate_result(metric): + """ + Reports intermediate result to NNI. + + Parameters + ---------- + metric: + serializable object. + """ + global _intermediate_seq + assert _params or trial_env_vars.NNI_PLATFORM is None, \ + 'nni.get_next_parameter() needs to be called before report_intermediate_result' + metric = dump({ + 'parameter_id': _params['parameter_id'] if _params else None, + 'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID, + 'type': 'PERIODICAL', + 'sequence': _intermediate_seq, + 'value': dump(metric) + }) + _intermediate_seq += 1 + platform.send_metric(metric) + +def report_final_result(metric): + """ + Reports final result to NNI. + + Parameters + ---------- + metric: serializable object + Usually (for built-in tuners to work), it should be a number, or + a dict with key "default" (a number), and any other extra keys. + """ + assert _params or trial_env_vars.NNI_PLATFORM is None, \ + 'nni.get_next_parameter() needs to be called before report_final_result' + metric = dump({ + 'parameter_id': _params['parameter_id'] if _params else None, + 'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID, + 'type': 'FINAL', + 'sequence': 0, + 'value': dump(metric) + }) + platform.send_metric(metric) diff --git a/nni/tuner.py b/nni/tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..4fbcc011d0c55676e9bc9c7e906d7655bb5badeb --- /dev/null +++ b/nni/tuner.py @@ -0,0 +1,223 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Tuner is an AutoML algorithm, which generates a new configuration for the next try. +A new trial will run with this configuration. + +See :class:`Tuner`' specification and ``docs/en_US/tuners.rst`` for details. +""" + +import logging + +import nni + +from .recoverable import Recoverable + +__all__ = ['Tuner'] + +_logger = logging.getLogger(__name__) + + +class Tuner(Recoverable): + """ + Tuner is an AutoML algorithm, which generates a new configuration for the next try. + A new trial will run with this configuration. + + This is the abstract base class for all tuners. + Tuning algorithms should inherit this class and override :meth:`update_search_space`, :meth:`receive_trial_result`, + as well as :meth:`generate_parameters` or :meth:`generate_multiple_parameters`. + + After initializing, NNI will first call :meth:`update_search_space` to tell tuner the feasible region, + and then call :meth:`generate_parameters` one or more times to request for hyper-parameter configurations. + + The framework will train several models with given configuration. + When one of them is finished, the final accuracy will be reported to :meth:`receive_trial_result`. + And then another configuration will be reqeusted and trained, util the whole experiment finish. + + If a tuner want's to know when a trial ends, it can also override :meth:`trial_end`. + + Tuners use *parameter ID* to track trials. + In tuner context, there is a one-to-one mapping between parameter ID and trial. + When the framework ask tuner to generate hyper-parameters for a new trial, + an ID has already been assigned and can be recorded in :meth:`generate_parameters`. + Later when the trial ends, the ID will be reported to :meth:`trial_end`, + and :meth:`receive_trial_result` if it has a final result. + Parameter IDs are unique integers. + + The type/format of search space and hyper-parameters are not limited, + as long as they are JSON-serializable and in sync with trial code. + For HPO tuners, however, there is a widely shared common interface, + which supports ``choice``, ``randint``, ``uniform``, and so on. + See ``docs/en_US/Tutorial/SearchSpaceSpec.md`` for details of this interface. + + [WIP] For advanced tuners which take advantage of trials' intermediate results, + an ``Advisor`` interface is under development. + + See Also + -------- + Builtin tuners: + :class:`~nni.algorithms.hpo.hyperopt_tuner.hyperopt_tuner.HyperoptTuner` + :class:`~nni.algorithms.hpo.evolution_tuner.evolution_tuner.EvolutionTuner` + :class:`~nni.algorithms.hpo.smac_tuner.SMACTuner` + :class:`~nni.algorithms.hpo.gridsearch_tuner.GridSearchTuner` + :class:`~nni.algorithms.hpo.networkmorphism_tuner.networkmorphism_tuner.NetworkMorphismTuner` + :class:`~nni.algorithms.hpo.metis_tuner.mets_tuner.MetisTuner` + :class:`~nni.algorithms.hpo.ppo_tuner.PPOTuner` + :class:`~nni.algorithms.hpo.gp_tuner.gp_tuner.GPTuner` + """ + + def generate_parameters(self, parameter_id, **kwargs): + """ + Abstract method which provides a set of hyper-parameters. + + This method will get called when the framework is about to launch a new trial, + if user does not override :meth:`generate_multiple_parameters`. + + The return value of this method will be received by trials via :func:`nni.get_next_parameter`. + It should fit in the search space, though the framework will not verify this. + + User code must override either this method or :meth:`generate_multiple_parameters`. + + Parameters + ---------- + parameter_id : int + Unique identifier for requested hyper-parameters. This will later be used in :meth:`receive_trial_result`. + **kwargs + Unstable parameters which should be ignored by normal users. + + Returns + ------- + any + The hyper-parameters, a dict in most cases, but could be any JSON-serializable type when needed. + + Raises + ------ + nni.NoMoreTrialError + If the search space is fully explored, tuner can raise this exception. + """ + # FIXME: some tuners raise NoMoreTrialError when they are waiting for more trial results + # we need to design a new exception for this purpose + raise NotImplementedError('Tuner: generate_parameters not implemented') + + def generate_multiple_parameters(self, parameter_id_list, **kwargs): + """ + Callback method which provides multiple sets of hyper-parameters. + + This method will get called when the framework is about to launch one or more new trials. + + If user does not override this method, it will invoke :meth:`generate_parameters` on each parameter ID. + + See :meth:`generate_parameters` for details. + + User code must override either this method or :meth:`generate_parameters`. + + Parameters + ---------- + parameter_id_list : list of int + Unique identifiers for each set of requested hyper-parameters. + These will later be used in :meth:`receive_trial_result`. + **kwargs + Unstable parameters which should be ignored by normal users. + + Returns + ------- + list + List of hyper-parameters. An empty list indicates there are no more trials. + """ + result = [] + for parameter_id in parameter_id_list: + try: + _logger.debug("generating param for %s", parameter_id) + res = self.generate_parameters(parameter_id, **kwargs) + except nni.NoMoreTrialError: + return result + result.append(res) + return result + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + """ + Abstract method invoked when a trial reports its final result. Must override. + + This method only listens to results of algorithm-generated hyper-parameters. + Currently customized trials added from web UI will not report result to this method. + + Parameters + ---------- + parameter_id : int + Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`. + parameters + Hyper-parameters generated by :meth:`generate_parameters`. + value + Result from trial (the return value of :func:`nni.report_final_result`). + **kwargs + Unstable parameters which should be ignored by normal users. + """ + raise NotImplementedError('Tuner: receive_trial_result not implemented') + + def _accept_customized_trials(self, accept=True): + # FIXME: because Tuner is designed as interface, this API should not be here + + # Enable or disable receiving results of user-added hyper-parameters. + # By default `receive_trial_result()` will only receive results of algorithm-generated hyper-parameters. + # If tuners want to receive those of customized parameters as well, they can call this function in `__init__()`. + + # pylint: disable=attribute-defined-outside-init + self._accept_customized = accept + + def trial_end(self, parameter_id, success, **kwargs): + """ + Abstract method invoked when a trial is completed or terminated. Do nothing by default. + + Parameters + ---------- + parameter_id : int + Unique identifier for hyper-parameters used by this trial. + success : bool + True if the trial successfully completed; False if failed or terminated. + **kwargs + Unstable parameters which should be ignored by normal users. + """ + + def update_search_space(self, search_space): + """ + Abstract method for updating the search space. Must override. + + Tuners are advised to support updating search space at run-time. + If a tuner can only set search space once before generating first hyper-parameters, + it should explicitly document this behaviour. + + Parameters + ---------- + search_space + JSON object defined by experiment owner. + """ + raise NotImplementedError('Tuner: update_search_space not implemented') + + def load_checkpoint(self): + """ + Internal API under revising, not recommended for end users. + """ + checkpoin_path = self.get_checkpoint_path() + _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path) + + def save_checkpoint(self): + """ + Internal API under revising, not recommended for end users. + """ + checkpoin_path = self.get_checkpoint_path() + _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path) + + def import_data(self, data): + """ + Internal API under revising, not recommended for end users. + """ + # Import additional data for tuning + # data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value' + pass + + def _on_exit(self): + pass + + def _on_error(self): + pass diff --git a/nni/utils.py b/nni/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..58d53baefe700c5857ac0534e0fe0055410cf5bf --- /dev/null +++ b/nni/utils.py @@ -0,0 +1,310 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +from enum import Enum, unique +from pathlib import Path +from schema import And + +from . import parameter_expressions + + +@unique +class OptimizeMode(Enum): + """Optimize Mode class + + if OptimizeMode is 'minimize', it means the tuner need to minimize the reward + that received from Trial. + + if OptimizeMode is 'maximize', it means the tuner need to maximize the reward + that received from Trial. + """ + Minimize = 'minimize' + Maximize = 'maximize' + + +class NodeType: + """Node Type class + """ + ROOT = 'root' + TYPE = '_type' + VALUE = '_value' + INDEX = '_index' + NAME = '_name' + + +class MetricType: + """The types of metric data + """ + FINAL = 'FINAL' + PERIODICAL = 'PERIODICAL' + REQUEST_PARAMETER = 'REQUEST_PARAMETER' + + +def split_index(params): + """ + Delete index infromation from params + """ + if isinstance(params, dict): + if NodeType.INDEX in params.keys(): + return split_index(params[NodeType.VALUE]) + result = {} + for key in params: + result[key] = split_index(params[key]) + return result + else: + return params + + +def extract_scalar_reward(value, scalar_key='default'): + """ + Extract scalar reward from trial result. + + Parameters + ---------- + value : int, float, dict + the reported final metric data + scalar_key : str + the key name that indicates the numeric number + + Raises + ------ + RuntimeError + Incorrect final result: the final result should be float/int, + or a dict which has a key named "default" whose value is float/int. + """ + if isinstance(value, (float, int)): + reward = value + elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)): + reward = value[scalar_key] + else: + raise RuntimeError('Incorrect final result: the final result should be float/int, ' \ + 'or a dict which has a key named "default" whose value is float/int.') + return reward + + +def extract_scalar_history(trial_history, scalar_key='default'): + """ + Extract scalar value from a list of intermediate results. + + Parameters + ---------- + trial_history : list + accumulated intermediate results of a trial + scalar_key : str + the key name that indicates the numeric number + + Raises + ------ + RuntimeError + Incorrect final result: the final result should be float/int, + or a dict which has a key named "default" whose value is float/int. + """ + return [extract_scalar_reward(ele, scalar_key) for ele in trial_history] + + +def convert_dict2tuple(value): + """ + convert dict type to tuple to solve unhashable problem. + NOTE: this function will change original data. + """ + if isinstance(value, dict): + for _keys in value: + value[_keys] = convert_dict2tuple(value[_keys]) + return tuple(sorted(value.items())) + return value + + +def json2space(x, oldy=None, name=NodeType.ROOT): + """ + Change search space from json format to hyperopt format + + """ + y = list() + if isinstance(x, dict): + if NodeType.TYPE in x.keys(): + _type = x[NodeType.TYPE] + name = name + '-' + _type + if _type == 'choice': + if oldy is not None: + _index = oldy[NodeType.INDEX] + y += json2space(x[NodeType.VALUE][_index], + oldy[NodeType.VALUE], name=name+'[%d]' % _index) + else: + y += json2space(x[NodeType.VALUE], None, name=name) + y.append(name) + else: + for key in x.keys(): + y += json2space(x[key], oldy[key] if oldy else None, name+"[%s]" % str(key)) + elif isinstance(x, list): + for i, x_i in enumerate(x): + if isinstance(x_i, dict): + if NodeType.NAME not in x_i.keys(): + raise RuntimeError('\'_name\' key is not found in this nested search space.') + y += json2space(x_i, oldy[i] if oldy else None, name + "[%d]" % i) + return y + + +def json2parameter(x, is_rand, random_state, oldy=None, Rand=False, name=NodeType.ROOT): + """ + Json to pramaters. + + """ + if isinstance(x, dict): + if NodeType.TYPE in x.keys(): + _type = x[NodeType.TYPE] + _value = x[NodeType.VALUE] + name = name + '-' + _type + Rand |= is_rand[name] + if Rand is True: + if _type == 'choice': + _index = random_state.randint(len(_value)) + y = { + NodeType.INDEX: _index, + NodeType.VALUE: json2parameter( + x[NodeType.VALUE][_index], + is_rand, + random_state, + None, + Rand, + name=name+"[%d]" % _index + ) + } + else: + y = getattr(parameter_expressions, _type)(*(_value + [random_state])) + else: + y = copy.deepcopy(oldy) + else: + y = dict() + for key in x.keys(): + y[key] = json2parameter( + x[key], + is_rand, + random_state, + oldy[key] if oldy else None, + Rand, + name + "[%s]" % str(key) + ) + elif isinstance(x, list): + y = list() + for i, x_i in enumerate(x): + if isinstance(x_i, dict): + if NodeType.NAME not in x_i.keys(): + raise RuntimeError('\'_name\' key is not found in this nested search space.') + y.append(json2parameter( + x_i, + is_rand, + random_state, + oldy[i] if oldy else None, + Rand, + name + "[%d]" % i + )) + else: + y = copy.deepcopy(x) + return y + +def merge_parameter(base_params, override_params): + """ + Update the parameters in ``base_params`` with ``override_params``. + Can be useful to override parsed command line arguments. + + Parameters + ---------- + base_params : namespace or dict + Base parameters. A key-value mapping. + override_params : dict or None + Parameters to override. Usually the parameters got from ``get_next_parameters()``. + When it is none, nothing will happen. + + Returns + ------- + namespace or dict + The updated ``base_params``. Note that ``base_params`` will be updated inplace. The return value is + only for convenience. + """ + if override_params is None: + return base_params + is_dict = isinstance(base_params, dict) + for k, v in override_params.items(): + if is_dict: + if k not in base_params: + raise ValueError('Key \'%s\' not found in base parameters.' % k) + if type(base_params[k]) != type(v) and base_params[k] is not None: + raise TypeError('Expected \'%s\' in override parameters to have type \'%s\', but found \'%s\'.' % + (k, type(base_params[k]), type(v))) + base_params[k] = v + else: + if not hasattr(base_params, k): + raise ValueError('Key \'%s\' not found in base parameters.' % k) + if type(getattr(base_params, k)) != type(v) and getattr(base_params, k) is not None: + raise TypeError('Expected \'%s\' in override parameters to have type \'%s\', but found \'%s\'.' % + (k, type(getattr(base_params, k)), type(v))) + setattr(base_params, k, v) + return base_params + +class ClassArgsValidator(object): + """ + NNI tuners/assessors/adivisors accept a `classArgs` parameter in experiment configuration file. + This ClassArgsValidator interface is used to validate the classArgs section in exeperiment + configuration file. + """ + def validate_class_args(self, **kwargs): + """ + Validate the classArgs configuration in experiment configuration file. + + Parameters + ---------- + kwargs: dict + kwargs passed to tuner/assessor/advisor constructor + + Raises: + Raise an execption if the kwargs is invalid. + """ + pass + + def choices(self, key, *args): + """ + Utility method to create a scheme to check whether the `key` is one of the `args`. + + Parameters: + ---------- + key: str + key name of the data to be validated + args: list of str + list of the choices + + Returns: Schema + -------- + A scheme to check whether the `key` is one of the `args`. + """ + return And(lambda n: n in args, error='%s should be in [%s]!' % (key, str(args))) + + def range(self, key, keyType, start, end): + """ + Utility method to create a schema to check whether the `key` is in the range of [start, end]. + + Parameters: + ---------- + key: str + key name of the data to be validated + keyType: type + python data type, such as int, float + start: type is specified by keyType + start of the range + end: type is specified by keyType + end of the range + + Returns: Schema + -------- + A scheme to check whether the `key` is in the range of [start, end]. + """ + return And( + And(keyType, error='%s should be %s type!' % (key, keyType.__name__)), + And(lambda n: start <= n <= end, error='%s should be in range of (%s, %s)!' % (key, start, end)) + ) + + def path(self, key): + return And( + And(str, error='%s should be a string!' % key), + And(lambda p: Path(p).exists(), error='%s path does not exist!' % (key)) + ) diff --git a/pipelines/fast-test.yml b/pipelines/fast-test.yml new file mode 100644 index 0000000000000000000000000000000000000000..a1d8a141188c99b63a650257bddb5cc875da6075 --- /dev/null +++ b/pipelines/fast-test.yml @@ -0,0 +1,470 @@ +# To reduce debug cost, steps are sorted differently on each platform, +# so that a bug in any module will cause at least one platform to fail quickly. + +stages: +- stage: lint + jobs: + - job: docs + pool: + vmImage: ubuntu-latest + variables: + PIP_CACHE_DIR: $(Pipeline.Workspace)/.pip + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + - script: | + sudo apt-get install -y pandoc + sudo apt-get remove swig -y + sudo apt-get install swig3.0 -y + sudo ln -s /usr/bin/swig3.0 /usr/bin/swig + displayName: Install apt packages + - task: Cache@2 + inputs: + key: 'python | "$(Agent.OS)" | dependencies/*.txt' + restoreKeys: | + python | "$(Agent.OS)" + python + path: $(PIP_CACHE_DIR) + displayName: Cache pip packages + - script: | + set -e + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + python -m pip install -r dependencies/required.txt + python -m pip install -r dependencies/recommended.txt + python -m pip install -r dependencies/required_extra.txt + displayName: Install requirements + - script: | + python test/vso_tools/interim_patch.py + displayName: Apply patch + - script: | + cd docs + python tools/chineselink.py check + displayName: Translation up-to-date + - script: | + cd docs/en_US + sphinx-build -M html . _build -W --keep-going -T + displayName: Sphinx # TODO: rstcheck + + - job: python + pool: + vmImage: ubuntu-latest + variables: + PIP_CACHE_DIR: $(Pipeline.Workspace)/.pip + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + - script: | + sudo apt-get remove swig -y + sudo apt-get install swig3.0 -y + sudo ln -s /usr/bin/swig3.0 /usr/bin/swig + displayName: Install apt packages + - task: Cache@2 + inputs: + key: 'python | "$(Agent.OS)" | dependencies/*.txt' + restoreKeys: | + python | "$(Agent.OS)" + python + path: $(PIP_CACHE_DIR) + displayName: Cache pip packages + - script: | + set -e + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + python -m pip install -r dependencies/required.txt + python -m pip install -r dependencies/recommended.txt + python -m pip install -r dependencies/required_extra.txt + python -m pip install "typing-extensions>=3.10" # pylint requires newer typing extension. Override requirements in tensorflow + displayName: Install requirements + - script: python -m pylint --rcfile pylintrc nni + displayName: pylint + - script: | + set -e + python -m flake8 nni --count --select=E9,F63,F72,F82 --show-source --statistics + EXCLUDES=examples/trials/mnist-nas/*/mnist*.py,examples/trials/nas_cifar10/src/cifar10/general_child.py + python -m flake8 examples --count --exclude=$EXCLUDES --select=E9,F63,F72,F82 --show-source --statistics + displayName: flake8 + + - job: typescript + pool: + vmImage: ubuntu-latest + variables: + YARN_CACHE_FOLDER: $(Pipeline.Workspace)/.yarn + steps: + - task: NodeTool@0 + inputs: + versionSpec: 16.3.0 + displayName: Configure Node.js version + - task: Cache@2 + inputs: + key: 'yarn | "$(Agent.OS)" | ts/**/yarn.lock, !**/node_modules/**' + restoreKeys: | + yarn | "$(Agent.OS)" + path: $(YARN_CACHE_FOLDER) + displayName: Cache yarn packages + - script: | + set -e + cd ts/nni_manager + yarn + yarn eslint + displayName: ESLint (NNI Manager) + - script: | + set -e + cd ts/webui + yarn + yarn eslint + displayName: ESLint (WebUI) + + +- stage: test + jobs: + - job: ubuntu_latest + pool: + vmImage: ubuntu-latest + variables: + PIP_CACHE_DIR: $(Pipeline.Workspace)/.pip + YARN_CACHE_FOLDER: $(Pipeline.Workspace)/.yarn + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.9 + displayName: Configure Python version + + - task: NodeTool@0 + inputs: + versionSpec: 16.3.0 + displayName: Configure Node.js version + + - script: | + sudo apt-get install -y pandoc + sudo apt-get remove swig -y + sudo apt-get install swig3.0 -y + sudo ln -s /usr/bin/swig3.0 /usr/bin/swig + displayName: Install apt packages + + - task: Cache@2 + inputs: + key: 'python | "$(Agent.OS)" | latest | dependencies/*.txt' + restoreKeys: | + python | "$(Agent.OS)" + python + path: $(PIP_CACHE_DIR) + displayName: Cache pip packages + + - task: Cache@2 + inputs: + key: 'yarn | "$(Agent.OS)" | latest | ts/**/yarn.lock, !**/node_modules/**' + restoreKeys: | + yarn | "$(Agent.OS)" + path: $(YARN_CACHE_FOLDER) + displayName: Cache yarn packages + + - script: | + set -e + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + echo "##vso[task.setvariable variable=PATH]${HOME}/.local/bin:${PATH}" + displayName: Install Python tools + + - script: | + python setup.py develop + mkdir -p coverage + displayName: Install NNI + + - script: | + set -e + python -m pip install -r dependencies/recommended.txt + python -m pip install -e .[PPOTuner,DNGO] + displayName: Install extra dependencies + + # Need del later + - script: | + python test/vso_tools/interim_patch.py + displayName: Torch utils tensorboard interim patch + + - script: | + set -e + cd test + python -m pytest ut --cov-config=.coveragerc \ + --ignore=ut/compression/v1/test_pruners.py \ + --ignore=ut/compression/v1/test_compressor_tf.py \ + --ignore=ut/compression/v1/test_compressor_torch.py \ + --ignore=ut/compression/v1/test_model_speedup.py + python -m pytest ut/compression/v1/test_pruners.py --cov-config=.coveragerc --cov-append + python -m pytest ut/compression/v1/test_compressor_tf.py --cov-config=.coveragerc --cov-append + python -m pytest ut/compression/v1/test_compressor_torch.py --cov-config=.coveragerc --cov-append + python -m pytest ut/compression/v1/test_model_speedup.py --cov-config=.coveragerc --cov-append + cp coverage.xml ../coverage/python.xml + displayName: Python unit test + + - script: | + set -e + cd ts/nni_manager + yarn test + cp coverage/cobertura-coverage.xml ../../coverage/typescript.xml + displayName: TypeScript unit test + + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testResultsFiles: '$(System.DefaultWorkingDirectory)/**/test-*.xml' + testRunTitle: 'Publish test results for Python $(python.version)' + displayName: Publish test results + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: coverage/* + displayName: Publish code coverage results + + - script: | + cd test + python nni_test/nnitest/run_tests.py --config config/pr_tests.yml + displayName: Simple integration test + + - job: ubuntu_legacy + pool: + vmImage: ubuntu-18.04 + variables: + PIP_CACHE_DIR: $(Pipeline.Workspace)/.pip + YARN_CACHE_FOLDER: $(Pipeline.Workspace)/.yarn + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.6 + displayName: Configure Python version + + - task: NodeTool@0 + inputs: + versionSpec: 16.3.0 + displayName: Configure Node.js version + + - script: | + sudo apt-get install -y pandoc + sudo apt-get remove swig -y + sudo apt-get install swig3.0 -y + sudo ln -s /usr/bin/swig3.0 /usr/bin/swig + displayName: Install apt packages + + - task: Cache@2 + inputs: + key: 'python | "$(Agent.OS)" | legacy | dependencies/*.txt' + restoreKeys: | + python | "$(Agent.OS)" + python + path: $(PIP_CACHE_DIR) + displayName: Cache pip packages + + - task: Cache@2 + inputs: + key: 'yarn | "$(Agent.OS)" | legacy | ts/**/yarn.lock, !**/node_modules/**' + restoreKeys: | + yarn | "$(Agent.OS)" + path: $(YARN_CACHE_FOLDER) + displayName: Cache yarn packages + + - script: | + set -e + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + echo "##vso[task.setvariable variable=PATH]${HOME}/.local/bin:${PATH}" + displayName: Install Python tools + + - script: | + python setup.py develop + displayName: Install NNI + + - script: | + set -e + python -m pip install -r dependencies/recommended_legacy.txt + python -m pip install -e .[SMAC,BOHB,PPOTuner,DNGO] + displayName: Install extra dependencies + + # Need del later + - script: | + set -e + python test/vso_tools/interim_patch.py + displayName: Torch utils tensorboard interim patch + + + - script: | + cd test + python nni_test/nnitest/run_tests.py --config config/pr_tests.yml + displayName: Simple integration test + + - script: | + cd test + python -m pytest ut --ignore=ut/retiarii/test_convert_basic.py \ + --ignore=ut/retiarii/test_convert_operators.py \ + --ignore=ut/retiarii/test_convert_pytorch.py + displayName: Python unit test + + - script: | + set -e + cd ts/nni_manager + yarn test + displayName: TypeScript unit test + + - job: macos + pool: + vmImage: macOS-10.15 + variables: + PIP_CACHE_DIR: $(Pipeline.Workspace)/.pip + YARN_CACHE_FOLDER: $(Pipeline.Workspace)/.yarn + timeoutInMinutes: 90 # macos test need extra time + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + + - task: NodeTool@0 + inputs: + versionSpec: 16.3.0 + displayName: Configure Node.js version + + - script: | + brew install swig@3 + rm -f /usr/local/bin/swig + ln -s /usr/local/opt/swig\@3/bin/swig /usr/local/bin/swig + displayName: Install brew packages + + - task: Cache@2 + inputs: + key: 'python | "$(Agent.OS)" | dependencies/*.txt' + restoreKeys: | + python | "$(Agent.OS)" + python + path: $(PIP_CACHE_DIR) + displayName: Cache pip packages + + - task: Cache@2 + inputs: + key: 'yarn | "$(Agent.OS)" | ts/**/yarn.lock, !**/node_modules/**' + restoreKeys: | + yarn | "$(Agent.OS)" + path: $(YARN_CACHE_FOLDER) + displayName: Cache yarn packages + + - script: | + set -e + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + displayName: Install Python tools + + - script: | + python setup.py develop + displayName: Install NNI + + - script: | + set -e + export CI=true + (cd ts/nni_manager && yarn test --exclude test/core/nnimanager.test.ts) + displayName: TypeScript unit test + + - script: | + set -e + python -m pip install -r dependencies/recommended.txt + python -m pip install -e .[SMAC,BOHB,PPOTuner,DNGO] + displayName: Install extra dependencies + + # Need del later + - script: | + set -e + python test/vso_tools/interim_patch.py + displayName: Torch utils tensorboard interim patch + + - script: | + cd test + python -m pytest ut + displayName: Python unit test + + - script: | + cd test + python nni_test/nnitest/run_tests.py --config config/pr_tests.yml + displayName: Simple integration test + + - job: windows + pool: + vmImage: windows-2019 + variables: + PIP_CACHE_DIR: $(Pipeline.Workspace)/.pip + YARN_CACHE_FOLDER: $(Pipeline.Workspace)/.yarn + timeoutInMinutes: 120 # windows test need extra time + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + + - task: NodeTool@0 + inputs: + versionSpec: 16.3.0 + displayName: Configure Node.js version + + - task: Cache@2 + inputs: + key: 'python | "$(Agent.OS)" | dependencies/*.txt' + restoreKeys: | + python | "$(Agent.OS)" + python + path: $(PIP_CACHE_DIR) + displayName: Cache pip packages + + - task: Cache@2 + inputs: + key: 'yarn | "$(Agent.OS)" | ts/**/yarn.lock, !**/node_modules/**' + restoreKeys: | + yarn | "$(Agent.OS)" + path: $(YARN_CACHE_FOLDER) + displayName: Cache yarn packages + + - script: | + set -e + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + displayName: Install Python tools + + - script: | + python setup.py develop --no-user + displayName: Install NNI + + - script: | + python -m pip install -r dependencies/recommended.txt + python -m pip install -e .[DNGO] + displayName: Install extra dependencies + + # Need del later + - script: | + set -e + python test/vso_tools/interim_patch.py + displayName: Torch utils tensorboard interim patch + + - script: | + cd test + python -m pytest ut + displayName: Python unit test + + - script: | + cd ts/nni_manager + yarn test + displayName: TypeScript unit test + + - script: | + cd test + python nni_test/nnitest/run_tests.py --config config/pr_tests.yml + displayName: Simple integration test + + +trigger: + branches: + exclude: [ l10n_master ] diff --git a/pipelines/full-test-linux.yml b/pipelines/full-test-linux.yml new file mode 100644 index 0000000000000000000000000000000000000000..d9e0b39f43713868f41cea7bbb5714356552385d --- /dev/null +++ b/pipelines/full-test-linux.yml @@ -0,0 +1,67 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +jobs: +- job: linux + pool: nni-ci-gpu-local + timeoutInMinutes: 120 + + steps: + - script: | + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]999.$(date -u +%Y%m%d%H%M%S)" + + python3 -m pip install -U -r dependencies/setup.txt + python3 -m pip install -r dependencies/develop.txt + displayName: Prepare + + - script: | + set -e + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl[SMAC,BOHB,PPOTuner] + displayName: Install NNI + + - script: | + set -e + sudo apt-get install swig -y + python3 -m pip install -r dependencies/recommended_gpu.txt + python3 -m pip install -e .[SMAC,BOHB,PPOTuner,DNGO] + displayName: Install extra dependencies + + - script: | + set -e + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install customized tuner + + - script: | + set -e + (cd test && python3 -m pytest ut) + export PATH=$PATH:$PWD/toolchain/yarn/bin + export CI=true + (cd ts/nni_manager && yarn test) + displayName: Unit test + continueOnError: true + + - script: | + cd test + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts local + displayName: Integration test + continueOnError: true + + - script: | + cd test + source scripts/nas.sh + displayName: NAS test + continueOnError: true + + - script: | + cd test + source scripts/model_compression.sh + displayName: Model compression test diff --git a/pipelines/full-test-windows.yml b/pipelines/full-test-windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..35c707dd573696669a3d76715687ca68e2edd336 --- /dev/null +++ b/pipelines/full-test-windows.yml @@ -0,0 +1,44 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +jobs: +- job: local_windows + pool: NNI CI WINDOWS FULL TEST + timeoutInMinutes: 120 + + steps: + - script: | + python -m pip install -U -r dependencies/setup.txt + python -m pip install -r dependencies/develop.txt + python -m pip install -r dependencies/recommended.txt + displayName: Install Python tools + + - script: | + python -m pip uninstall nni --yes + set NNI_RELEASE=999.0 + python setup.py build_ts + python setup.py bdist_wheel -p win_amd64 + python -m pip install dist/nni-999.0-py3-none-win_amd64.whl[PPOTuner,DNGO] + displayName: Install NNI + + - script: | + cd examples/tuners/customized_tuner + python setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install example customized tuner + + - script: | + cd test + python -m pytest ut + echo "TODO: TypeScript UT" + displayName: Unit test + continueOnError: true + + - script: | + cd test + python nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts local + displayName: Integration test diff --git a/pipelines/integration-test-adl.yml b/pipelines/integration-test-adl.yml new file mode 100644 index 0000000000000000000000000000000000000000..2fe9802489faf921069dd01a8fba3ba6c900d644 --- /dev/null +++ b/pipelines/integration-test-adl.yml @@ -0,0 +1,63 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +jobs: +- job: adl + pool: NNI CI KUBE CLI + timeoutInMinutes: 120 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + echo "Build docker image: $(build_docker_image)" + + python3 -m pip install --upgrade pip setuptools + displayName: Prepare + + - script: | + set -e + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl[SMAC,BOHB] + displayName: Build and install NNI + + - script: | + set -e + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install customized tuner + + - script: | + set -e + docker login -u nnidev -p $(docker_hub_password) + sed -i '$a RUN python3 -m pip install adaptdl tensorboard' Dockerfile + sed -i '$a COPY examples /examples' Dockerfile + sed -i '$a COPY test /test' Dockerfile + echo '## Build docker image ##' + docker build --build-arg NNI_RELEASE=${NNI_RELEASE} -t nnidev/nni-nightly . + echo '## Upload docker image ##' + docker push nnidev/nni-nightly + condition: eq(variables['build_docker_image'], 'true') + displayName: Build and upload docker image + + - script: | + set -e + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts adl \ + --nni_docker_image nnidev/nni-nightly \ + --checkpoint_storage_class $(checkpoint_storage_class) \ + --checkpoint_storage_size $(checkpoint_storage_size) \ + --nni_manager_ip $(nni_manager_ip) + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts adl --exclude multi-phase,multi-thread + displayName: Integration test diff --git a/pipelines/integration-test-aml.yml b/pipelines/integration-test-aml.yml new file mode 100644 index 0000000000000000000000000000000000000000..d16899d0fca12670fe995a682a03846b0d41f2b7 --- /dev/null +++ b/pipelines/integration-test-aml.yml @@ -0,0 +1,62 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +jobs: +- job: aml + pool: NNI CI REMOTE CLI + timeoutInMinutes: 120 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + echo "Build docker image: $(build_docker_image)" + displayName: Prepare + + - script: | + set -e + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl[SMAC,BOHB] + displayName: Build and install NNI + + - script: | + set -e + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install customized tuner + + - script: | + set -e + docker login -u nnidev -p $(docker_hub_password) + echo '## Build docker image ##' + docker build --build-arg NNI_RELEASE=${NNI_RELEASE} -t nnidev/nni-nightly . + echo '## Upload docker image ##' + docker push nnidev/nni-nightly + condition: eq(variables['build_docker_image'], 'true') + displayName: Build and upload docker image + + - script: | + set -e + cd test + az login --service-principal -u $(client_id) -p $(client_secret) --tenant $(tenant_id) + python3 nni_test/nnitest/generate_ts_config.py \ + --ts aml \ + --subscription_id $(subscriptionId) \ + --resource_group $(resourceGroup) \ + --workspace_name $(workspaceName) \ + --compute_target $(computeTarget) \ + --nni_manager_ip $(manager_ip) \ + --nni_docker_image nnidev/nni-nightly + + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts aml + displayName: Integration test diff --git a/pipelines/integration-test-frameworkcontroller.yml b/pipelines/integration-test-frameworkcontroller.yml new file mode 100644 index 0000000000000000000000000000000000000000..9854a40aec6fbb0599e73e61a128c2894b4727f1 --- /dev/null +++ b/pipelines/integration-test-frameworkcontroller.yml @@ -0,0 +1,68 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +# variables set on VSO: (mostly for security concern) +# manager_ip +# docker_hub_password + +jobs: +- job: frameworkcontroller + pool: NNI CI KUBE CLI + timeoutInMinutes: 120 + + steps: + - script: | + echo "Working directory: ${PWD}" + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + python3 test/vso_tools/generate_nni_version.py + python3 -m pip install --upgrade pip setuptools + displayName: Prepare + + - script: | + set -e + python3 test/vso_tools/install_nni.py $(NNI_RELEASE) SMAC,BOHB + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install NNI + + - script: | + set -e + docker login -u nnidev -p $(docker_hub_password) + docker build --build-arg NNI_RELEASE=$(NNI_RELEASE) -t nnidev/nni-nightly . + docker push nnidev/nni-nightly + displayName: Build and upload docker image + + - script: | + set -e + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts frameworkcontroller \ + --keyvault_vaultname $(keyvault_vaultname) \ + --keyvault_name $(keyvault_name) \ + --azs_account $(azs_account) \ + --azs_share $(azs_share) \ + --nni_docker_image nnidev/nni-nightly \ + --nni_manager_ip $(manager_ip) + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts frameworkcontroller --exclude multi-phase,multi-thread + displayName: Integration test + + - script: | + set -e + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts frameworkcontroller \ + --keyvault_vaultname $(keyvault_vaultname) \ + --keyvault_name $(keyvault_name) \ + --azs_account $(azs_account) \ + --azs_share $(azs_share) \ + --nni_docker_image nnidev/nni-nightly \ + --nni_manager_ip $(manager_ip) \ + --reuse_mode True \ + --config_version v2 + python3 nni_test/nnitest/run_tests.py --config config/integration_tests_config_v2.yml --ts frameworkcontroller --reuse_mode True --exclude multi-phase,multi-thread + displayName: Integration test (reuse mode) diff --git a/pipelines/integration-test-hybrid.yml b/pipelines/integration-test-hybrid.yml new file mode 100644 index 0000000000000000000000000000000000000000..862e695a1738393947defaac82ae254da3bb3869 --- /dev/null +++ b/pipelines/integration-test-hybrid.yml @@ -0,0 +1,100 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +jobs: +- job: hybrid + pool: NNI CI REMOTE CLI + timeoutInMinutes: 120 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + displayName: Prepare + + - script: | + set -e + python3 test/vso_tools/install_nni.py $(NNI_RELEASE) SMAC,BOHB + + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install NNI + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + sourceFolder: dist + targetFolder: /tmp/nnitest/$(Build.BuildId)/dist + overwrite: true + displayName: Copy wheel to remote machine + timeoutInMinutes: 10 + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + contents: Dockerfile + targetFolder: /tmp/nnitest/$(Build.BuildId) + overwrite: true + displayName: Copy dockerfile to remote machine + timeoutInMinutes: 10 + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + sourceFolder: test + targetFolder: /tmp/nnitest/$(Build.BuildId)/test + overwrite: true + displayName: Copy test scripts to remote machine + timeoutInMinutes: 10 + + # Need del later + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + contents: test/vso_tools/interim_patch.py + targetFolder: /tmp/nnitest/$(Build.BuildId) + overwrite: true + displayName: Copy torch patch to remote machine + timeoutInMinutes: 10 + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: python3 /tmp/nnitest/$(Build.BuildId)/test/vso_tools/start_docker.py $(NNI_RELEASE) $(Build.BuildId) $(password_in_docker) + displayName: Install NNI and run docker on Linux worker + + - script: | + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts hybrid \ + --remote_reuse true \ + --remote_user nni \ + --remote_host $(worker_ip) \ + --remote_pwd $(password_in_docker) \ + --remote_port $(docker_port) \ + --nni_manager_ip $(manager_ip) \ + --subscription_id $(subscription_id) \ + --resource_group $(resource_group) \ + --workspace_name $(workspace_name) \ + --compute_target $(compute_target) \ + --config_version v2 + python3 nni_test/nnitest/run_tests.py --config config/integration_tests_config_v2.yml --ts hybrid + displayName: Integration test + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: python3 /tmp/nnitest/$(Build.BuildId)/test/vso_tools/stop_docker.py $(Build.BuildId) + condition: always() + displayName: Stop docker diff --git a/pipelines/integration-test-kubeflow.yml b/pipelines/integration-test-kubeflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..6068d7998bdb70db0319931ce442a055d3855abf --- /dev/null +++ b/pipelines/integration-test-kubeflow.yml @@ -0,0 +1,78 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +# variables set on VSO: (mostly for security concern) +# manager_ip +# docker_hub_password + +jobs: +- job: kubeflow + pool: NNI CI KUBE CLI + timeoutInMinutes: 120 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + displayName: Prepare + + - script: | + set -e + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-$(NNI_RELEASE)-py3-none-manylinux1_x86_64.whl[SMAC,BOHB] + displayName: Build and install NNI + + - script: | + set -e + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install customized tuner + + - script: | + set -e + docker login -u nnidev -p $(docker_hub_password) + docker build --build-arg NNI_RELEASE=$(NNI_RELEASE) -t nnidev/nni-nightly . + docker push nnidev/nni-nightly + displayName: Build and upload docker image + + - script: | + set -e + cd test + az login --service-principal -u $(client_id) -p $(client_secret) --tenant $(tenant_id) + python3 nni_test/nnitest/generate_ts_config.py \ + --ts kubeflow \ + --keyvault_vaultname $(keyvault_vaultname) \ + --keyvault_name $(keyvault_name) \ + --azs_account $(azs_account) \ + --azs_share $(azs_share) \ + --nni_docker_image nnidev/nni-nightly \ + --nni_manager_ip $(manager_ip) + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts kubeflow --exclude multi-phase,multi-thread + displayName: Integration test + + - script: | + set -e + cd test + az login --service-principal -u $(client_id) -p $(client_secret) --tenant $(tenant_id) + python3 nni_test/nnitest/generate_ts_config.py \ + --ts kubeflow \ + --keyvault_vaultname $(keyvault_vaultname) \ + --keyvault_name $(keyvault_name) \ + --azs_account $(azs_account) \ + --azs_share $(azs_share) \ + --nni_docker_image nnidev/nni-nightly \ + --nni_manager_ip $(manager_ip) \ + --reuse_mode True \ + --config_version v2 + python3 nni_test/nnitest/run_tests.py --config config/integration_tests_config_v2.yml --ts kubeflow --reuse_mode True --exclude multi-phase,multi-thread + displayName: Integration test (reuse mode) diff --git a/pipelines/integration-test-openpai-linux.yml b/pipelines/integration-test-openpai-linux.yml new file mode 100644 index 0000000000000000000000000000000000000000..08733b6a5c3fed9bb40fbf3c21b1fb28cfdbe87a --- /dev/null +++ b/pipelines/integration-test-openpai-linux.yml @@ -0,0 +1,89 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +# variables set on VSO: (mostly for security concern) +# pai_user +# pai_token +# manager_ip +# docker_hub_password + +jobs: +- job: pai + pool: NNI CI PAI LINUX CLI + timeoutInMinutes: 120 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + echo "Build docker image: $(build_docker_image)" + displayName: Prepare + + - script: | + set -e + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl[SMAC,BOHB] + displayName: Build and install NNI + + - script: | + set -e + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install customized tuner + + - script: | + set -e + docker login -u nnidev -p $(docker_hub_password) + echo '## Build docker image ##' + docker build --build-arg NNI_RELEASE=${NNI_RELEASE} -t nnidev/nni-nightly . + echo '## Upload docker image ##' + docker push nnidev/nni-nightly + condition: eq(variables['build_docker_image'], 'true') + displayName: Build and upload docker image + + - script: | + set -e + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts pai \ + --pai_reuse false \ + --pai_host https://ne.openpai.org \ + --pai_user $(pai_user) \ + --nni_docker_image nnidev/nni-nightly \ + --pai_storage_config_name confignfs-data \ + --pai_token $(pai_token) \ + --nni_manager_nfs_mount_path $(nni_manager_nfs_mount_path) \ + --container_nfs_mount_path $(container_nfs_mount_path) \ + --nni_manager_ip $(manager_ip) \ + --vc nni \ + --debug true + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts pai + displayName: Integration test + + - script: | + set -e + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts pai \ + --pai_reuse true \ + --pai_host https://ne.openpai.org \ + --pai_user $(pai_user) \ + --nni_docker_image nnidev/nni-nightly \ + --pai_storage_config_name confignfs-data \ + --pai_token $(pai_token) \ + --nni_manager_nfs_mount_path $(nni_manager_nfs_mount_path) \ + --container_nfs_mount_path $(container_nfs_mount_path) \ + --nni_manager_ip $(manager_ip) \ + --vc nni + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts pai + displayName: Integration test (reuse mode) diff --git a/pipelines/integration-test-remote-l2l.yml b/pipelines/integration-test-remote-l2l.yml new file mode 100644 index 0000000000000000000000000000000000000000..65a2d3a67e02a21d600f4e72076b0bff58498bb7 --- /dev/null +++ b/pipelines/integration-test-remote-l2l.yml @@ -0,0 +1,119 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +# variables set on VSO: (for security concern) +# manager_ip +# worker_ip +# password_in_docker + +jobs: +- job: remote_linux2linux + pool: NNI CI REMOTE CLI + timeoutInMinutes: 140 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + displayName: Prepare + + - script: | + set -e + python3 test/vso_tools/install_nni.py $(NNI_RELEASE) SMAC,BOHB + + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install NNI + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + sourceFolder: dist + targetFolder: /tmp/nnitest/$(Build.BuildId)/dist + overwrite: true + displayName: Copy wheel to remote machine + timeoutInMinutes: 10 + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + contents: Dockerfile + targetFolder: /tmp/nnitest/$(Build.BuildId) + overwrite: true + displayName: Copy dockerfile to remote machine + timeoutInMinutes: 10 + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + sourceFolder: test + targetFolder: /tmp/nnitest/$(Build.BuildId)/test + overwrite: true + displayName: Copy test scripts to remote machine + timeoutInMinutes: 10 + + # Need del later + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + contents: test/vso_tools/interim_patch.py + targetFolder: /tmp/nnitest/$(Build.BuildId) + overwrite: true + displayName: Copy torch patch to remote machine + timeoutInMinutes: 10 + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: python3 /tmp/nnitest/$(Build.BuildId)/test/vso_tools/start_docker.py $(NNI_RELEASE) $(Build.BuildId) $(password_in_docker) + displayName: Install NNI and run docker on Linux worker + + - script: | + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts remote \ + --remote_reuse true \ + --remote_user nni \ + --remote_host $(worker_ip) \ + --remote_port $(docker_port) \ + --remote_pwd $(password_in_docker) \ + --nni_manager_ip $(manager_ip) \ + --azurestoragetoken $(azureblob_token_test) \ + --nfs_server $(NFS_IP) \ + --local_mount_point $(LOCAL_MOUNT_POINT) \ + --remote_mount_point $(REMOTE_MOUNT_POINT) \ + --exported_directory $(Exported_Directory) + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts remote + displayName: Integration test (reuse mode) + + - script: | + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts remote \ + --remote_reuse false \ + --remote_user nni \ + --remote_host $(worker_ip) \ + --remote_port $(docker_port) \ + --remote_pwd $(password_in_docker) \ + --nni_manager_ip $(manager_ip) + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts remote + displayName: Integration test + + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: python3 /tmp/nnitest/$(Build.BuildId)/test/vso_tools/stop_docker.py $(Build.BuildId) + condition: always() + displayName: Stop docker diff --git a/pipelines/integration-test-remote-l2w.yml b/pipelines/integration-test-remote-l2w.yml new file mode 100644 index 0000000000000000000000000000000000000000..1cd2af74923e125eccaf303e6b5197a1ce8ab568 --- /dev/null +++ b/pipelines/integration-test-remote-l2w.yml @@ -0,0 +1,83 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +variables: + worker: remote_nni-ci-gpu-04-w + +# variables set on VSO: (for security concern) +# manager_ip +# worker_ip +# worker_port +# worker_password + +jobs: +- job: remote_linux2windows + pool: NNI CI EAST US REMOTE CLI + timeoutInMinutes: 120 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + echo "Build ID: $(Build.BuildId)" + python3 setup.py clean --all + displayName: Prepare on Linux manager + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + targetFolder: /tmp/nnitest/$(Build.BuildId) + overwrite: true + displayName: Copy source files to Windows worker + timeoutInMinutes: 10 + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: | + conda activate l2w & python /tmp/nnitest/$(Build.BuildId)/test/vso_tools/install_nni.py $(NNI_RELEASE) + failOnStdErr: false + displayName: Install NNI on Windows worker + + - script: | + set -e + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl[SMAC,BOHB] + displayName: Install NNI on Linux manager + + - script: | + set -e + cd examples/tuners/customized_tuner + python3 setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install customized tuner + + - script: | + set -e + cd test + python3 nni_test/nnitest/generate_ts_config.py \ + --ts remote \ + --remote_user AzureUser \ + --remote_host $(worker_ip) \ + --remote_port $(worker_port) \ + --remote_pwd $(worker_password) \ + --nni_manager_ip $(manager_ip) + python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts remote + displayName: Integration test + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: rm -rf /tmp/nnitest/$(Build.BuildId) + condition: always() + displayName: Clean up on Windows worker diff --git a/pipelines/integration-test-remote-w2l.yml b/pipelines/integration-test-remote-w2l.yml new file mode 100644 index 0000000000000000000000000000000000000000..662960fcfca9416aa4c2089545450abb1d8f3eef --- /dev/null +++ b/pipelines/integration-test-remote-w2l.yml @@ -0,0 +1,73 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +variables: + worker: remote-nni-ci-gpu-04 + +# variables set on VSO: +# manager_ip +# worker_ip +# password_in_docker + +jobs: +- job: remote_windows2linux + pool: NNI CI WINDOWS REMOTE CLI + timeoutInMinutes: 120 + + steps: + - script: + python test/vso_tools/generate_nni_version.py + python -m pip install --upgrade pip setuptools + python setup.py clean --all + displayName: Prepare on Windows manager + + - task: CopyFilesOverSSH@0 + inputs: + sshEndpoint: $(worker) + targetFolder: /tmp/nnitest/$(Build.BuildId) + overwrite: true + displayName: Copy source files to Linux worker + timeoutInMinutes: 10 + + - script: | + python test/vso_tools/install_nni.py $(NNI_RELEASE) + + cd examples/tuners/customized_tuner + python setup.py develop --user + nnictl algo register --meta meta_file.yml + displayName: Install NNI on Windows manager + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: | + python3 /tmp/nnitest/$(Build.BuildId)/test/vso_tools/build_wheel.py $(NNI_RELEASE) + python3 /tmp/nnitest/$(Build.BuildId)/test/vso_tools/start_docker.py $(NNI_RELEASE) $(Build.BuildId) $(password_in_docker) + failOnStdErr: false + displayName: Install NNI and run docker on Linux worker + + - powershell: | + cd test + python nni_test/nnitest/generate_ts_config.py ` + --ts remote ` + --remote_reuse false ` + --remote_user nni ` + --remote_host $(worker_ip) ` + --remote_port $(docker_port) ` + --remote_pwd $(password_in_docker) ` + --nni_manager_ip $(manager_ip) + Get-Content config/training_service.yml + python nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts remote --exclude cifar10 + displayName: Integration test + + - task: SSH@0 + inputs: + sshEndpoint: $(worker) + runOptions: commands + commands: python3 /tmp/nnitest/$(Build.BuildId)/test/vso_tools/stop_docker.py $(Build.BuildId) + displayName: Stop docker diff --git a/pipelines/integration-test-trt.yml b/pipelines/integration-test-trt.yml new file mode 100644 index 0000000000000000000000000000000000000000..5f73b34d8c3e218dd11e76d093086bcbb54f7442 --- /dev/null +++ b/pipelines/integration-test-trt.yml @@ -0,0 +1,35 @@ +trigger: none +pr: none +schedules: +- cron: 0 16 * * * + branches: + include: [ master ] + +jobs: +- job: trt + pool: NNI CI TENSORRT + timeoutInMinutes: 120 + + steps: + - script: | + export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S) + echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/ENTER/bin" + echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}" + + echo "Working directory: ${PWD}" + echo "NNI version: ${NNI_RELEASE}" + displayName: Prepare + + - script: | + set -e + export PATH="$PATH:/home/nni-test/ENTER/bin" + python3 setup.py build_ts + python3 setup.py bdist_wheel -p manylinux1_x86_64 + python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl[SMAC,BOHB] + displayName: Build and install NNI + + - script: | + set -e + cd test + python3 nni_test/nnitest/test_quantize_model_speedup.py + displayName: Quantize model speedup test diff --git a/pipelines/release.yml b/pipelines/release.yml new file mode 100644 index 0000000000000000000000000000000000000000..9874760cfdfc1ac750f8325fad13247722858e40 --- /dev/null +++ b/pipelines/release.yml @@ -0,0 +1,145 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +trigger: none +pr: none + +jobs: +- job: validate_version_number + pool: + vmImage: Ubuntu 18.04 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + + - script: | + echo $(build_type) + echo $(NNI_RELEASE) + export BRANCH_TAG=`git describe --tags --abbrev=0` + echo $BRANCH_TAG + if [[ $BRANCH_TAG == v$(NNI_RELEASE) && $(NNI_RELEASE) =~ ^[0-9](.[0-9])+$ ]]; then + echo 'Build version match branch tag' + else + echo 'Build version does not match branch tag' + exit 1 + fi + condition: eq( variables['build_type'], 'release' ) + displayName: Validate release version number and branch tag + + - script: | + echo $(build_type) + echo $(NNI_RELEASE) + if [[ $(NNI_RELEASE) =~ ^[0-9](.[0-9])+(a|b|rc)[0-9]$ ]]; then + echo 'Valid prerelease version $(NNI_RELEASE)' + echo `git describe --tags --abbrev=0` + else + echo 'Invalid build version $(NNI_RELEASE)' + exit 1 + fi + condition: ne( variables['build_type'], 'release' ) + displayName: Validate prerelease version number + +- job: linux + dependsOn: validate_version_number + condition: succeeded() + pool: + vmImage: Ubuntu 18.04 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + + - script: | + python -m pip install --upgrade pip setuptools wheel twine + python -m pip install jupyter jupyterlab + python test/vso_tools/build_wheel.py $(NNI_RELEASE) + displayName: Build wheel + + - script: | + if [[ $(build_type) == 'release' || $(build_type) == 'rc' ]]; then + echo 'uploading to pypi...' + python -m twine upload -u nni -p $(pypi_password) dist/* + else + echo 'uploading to testpypi...' + python -m twine upload -u nni -p $(pypi_password) --repository-url https://test.pypi.org/legacy/ dist/* + fi + displayName: Upload wheel + + - script: | + if [[ $(build_type) == 'release' || $(build_type) == 'rc' ]]; then + docker login -u msranni -p $(docker_hub_password) + export IMAGE_NAME=msranni/nni + else + docker login -u nnidev -p $(docker_hub_password) + export IMAGE_NAME=nnidev/nni-test + fi + + echo "## Building ${IMAGE_NAME}:$(NNI_RELEASE) ##" + docker build --build-arg NNI_RELEASE=$(NNI_RELEASE) -t ${IMAGE_NAME} . + docker tag ${IMAGE_NAME} ${IMAGE_NAME}:v$(NNI_RELEASE) + docker push ${IMAGE_NAME}:v$(NNI_RELEASE) + if [[ $(build_type) != 'rc' ]]; then + docker push ${IMAGE_NAME} + fi + displayName: Build and upload docker image + +- job: macos + dependsOn: validate_version_number + condition: succeeded() + pool: + vmImage: macOS-10.15 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + + - script: | + python -m pip install --upgrade pip setuptools wheel twine + python -m pip install jupyter jupyterlab + python test/vso_tools/build_wheel.py $(NNI_RELEASE) + displayName: Build wheel + + - script: | + if [[ $(build_type) == 'release' || $(build_type) == 'rc' ]]; then + echo '## uploading to pypi ##' + python -m twine upload -u nni -p $(pypi_password) dist/* + else + echo '## uploading to testpypi ##' + python -m twine upload -u nni -p $(pypi_password) --repository-url https://test.pypi.org/legacy/ dist/* + fi + displayName: Upload wheel + +- job: windows + dependsOn: validate_version_number + condition: succeeded() + pool: + vmImage: windows-2019 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: 3.8 + displayName: Configure Python version + + - powershell: | + python -m pip install --upgrade pip setuptools wheel twine + python -m pip install jupyter jupyterlab + python test/vso_tools/build_wheel.py $(NNI_RELEASE) + displayName: Build wheel + + - powershell: | + if ($env:BUILD_TYPE -eq 'release' -Or $env:BUILD_TYPE -eq 'rc') { + Write-Host '## uploading to pypi ##' + python -m twine upload -u nni -p $(pypi_password) dist/* + } else { + Write-Host '## uploading to testpypi ##' + python -m twine upload -u nni -p $(pypi_password) --repository-url https://test.pypi.org/legacy/ dist/* + } + displayName: Upload wheel diff --git a/pylintrc b/pylintrc new file mode 100644 index 0000000000000000000000000000000000000000..ef30bcb2e25b3defef3f85e6d1d479a6abfa96c3 --- /dev/null +++ b/pylintrc @@ -0,0 +1,50 @@ +# Usage: +# python3 -m pylint --rcfile=PATH_TO_THIS_FILE PACKAGE_NAME +# or +# python3 -m pylint --rcfile=PATH_TO_THIS_FILE SOURCE_FILE.py + +[SETTINGS] + +max-line-length=140 + +max-args=8 +max-locals=15 +max-statements=50 +max-attributes=15 + +const-naming-style=any + +# based on pylint 2.1.1 +disable=W,C,R,I,no-member + +# will be enforced on CI Pipeline +enable= unused-wildcard-import, + bad-whitespace, + unused-import, + bad-continuation, + wrong-import-order, + trailing-whitespace, + logging-not-lazy, + line-too-long, + unused-variable, + wildcard-import, +# useless-super-delegation, + len-as-condition, + logging-format-interpolation, + redefined-builtin, + deprecated-method + +# will change to `enable` one day +# disable= missing-docstring + +# will not be enforced on CI but highly recommend contributor fixing it +# enable=no-member, +# too-many-branches, +# protected-access + +ignore-patterns=test* + +# List of members which are set dynamically and missed by pylint inference +generated-members=numpy.*,torch.*,tensorflow.*,pycuda.*,tensorrt.* + +ignored-modules=tensorflow,_winapi,msvcrt,tensorrt,pycuda,nni_node diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..67e9c46ffcd328044b2455aa9f59de4a151b7057 --- /dev/null +++ b/setup.py @@ -0,0 +1,287 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Script for installation and distribution. + +You can use environment variable `NNI_RELEASE` to set release version. + +If release version is not set, default to a development build whose version string will be `999.dev0`. + + +## Prepare Environment ## + +Install development dependencies: + + $ pip install -U -r dependencies/setup.txt + $ pip install -r dependencies/develop.txt + + +## Development ## + +Build and install for development: + + $ python setup.py develop + +Uninstall: + + $ pip uninstall nni + +Remove generated files: (use "--all" to remove toolchain and built wheel) + + $ python setup.py clean [--all] + +Compile TypeScript modules without re-install: + + $ python setup.py build_ts + + +## Release ## + +Build wheel package: + + $ NNI_RELEASE=2.0 python setup.py build_ts + $ NNI_RELEASE=2.0 python setup.py bdist_wheel -p manylinux1_x86_64 + +for jupyterlab 2.x package: + $ JUPYTER_LAB_VERSION=2.3.1 NNI_RELEASE=2.0 python setup.py build_ts + $ JUPYTER_LAB_VERSION=2.3.1 NNI_RELEASE=2.0 python setup.py bdist_wheel -p manylinux1_x86_64 + +Where "2.0" is version string and "manylinux1_x86_64" is platform. +The platform may also be "macosx_10_9_x86_64" or "win_amd64". + +`build_ts` must be manually invoked before `bdist_wheel`, +or setuptools cannot locate JS files which should be packed into wheel. +""" + +from distutils.cmd import Command +from distutils.command.build import build +from distutils.command.clean import clean +import glob +import os +import shutil +import sys + +import setuptools +from setuptools.command.develop import develop + +import setup_ts + +release = os.environ.get('NNI_RELEASE') + +def _get_jupyter_lab_version(): + try: + import jupyterlab + return jupyterlab.__version__ + except ImportError: + return '3.x' + +jupyter_lab_major_version = _get_jupyter_lab_version().split('.')[0] + +def check_jupyter_lab_version(): + environ_version = os.environ.get('JUPYTER_LAB_VERSION') + + jupyter_lab_version = _get_jupyter_lab_version() + + if environ_version: + if jupyter_lab_version.split('.')[0] != environ_version.split('.')[0]: + sys.exit(f'ERROR: To build a jupyter lab extension, run "JUPYTER_LAB_VERSION={jupyter_lab_version}", current: {environ_version} ') + elif jupyter_lab_version.split('.')[0] != '3': + sys.exit(f'ERROR: To build a jupyter lab extension, run "JUPYTER_LAB_VERSION={jupyter_lab_version}" first for nondefault version(3.x)') + +def _setup(): + setuptools.setup( + name = 'nni', + version = release or '999.dev0', + description = 'Neural Network Intelligence project', + long_description = open('README.md', encoding='utf-8').read(), + long_description_content_type = 'text/markdown', + url = 'https://github.com/Microsoft/nni', + author = 'Microsoft NNI Team', + author_email = 'nni@microsoft.com', + license = 'MIT', + classifiers = [ + 'License :: OSI Approved :: MIT License', + 'Operating System :: MacOS :: MacOS X', + 'Operating System :: Microsoft :: Windows :: Windows 10', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python :: 3 :: Only', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + ], + + packages = _find_python_packages(), + package_data = { + 'nni': _find_requirements_txt() + _find_default_config(), # setuptools issue #1806 + 'nni_node': _find_node_files() # note: this does not work before building + }, + + data_files = _get_data_files(), + + python_requires = '>=3.6', + install_requires = _read_requirements_txt('dependencies/required.txt'), + extras_require = { + 'SMAC': _read_requirements_txt('dependencies/required_extra.txt', 'SMAC'), + 'BOHB': _read_requirements_txt('dependencies/required_extra.txt', 'BOHB'), + 'PPOTuner': _read_requirements_txt('dependencies/required_extra.txt', 'PPOTuner'), + 'DNGO': _read_requirements_txt('dependencies/required_extra.txt', 'DNGO'), + }, + setup_requires = ['requests'], + + entry_points = { + 'console_scripts' : [ + 'nnictl = nni.tools.nnictl.nnictl:parse_args' + ] + }, + + cmdclass = { + 'build': Build, + 'build_ts': BuildTs, + 'clean': Clean, + 'develop': Develop, + } + ) + +def _get_data_files(): + data_files = [] + if jupyter_lab_major_version == '2': + extension_file = glob.glob("nni_node/jupyter-extension/extensions/nni-jupyter-extension*.tgz") + data_files = [('share/jupyter/lab/extensions', extension_file)] + return data_files + +def _find_python_packages(): + packages = [] + for dirpath, dirnames, filenames in os.walk('nni'): + if '/__pycache__' not in dirpath and '/.mypy_cache' not in dirpath and '/default_config' not in dirpath: + packages.append(dirpath.replace('/', '.')) + return sorted(packages) + ['nni_node'] + +def _find_requirements_txt(): + requirement_files = [] + for dirpath, dirnames, filenames in os.walk('nni'): + if 'requirements.txt' in filenames: + requirement_files.append(os.path.join(dirpath[len('nni/'):], 'requirements.txt')) + return requirement_files + +def _find_default_config(): + return ['runtime/default_config/' + name for name in os.listdir('nni/runtime/default_config')] + +def _find_node_files(): + if not os.path.exists('nni_node'): + if release and 'build_ts' not in sys.argv and 'clean' not in sys.argv: + sys.exit('ERROR: To build a release version, run "python setup.py build_ts" first') + return [] + files = [] + for dirpath, dirnames, filenames in os.walk('nni_node'): + for filename in filenames: + files.append(os.path.join(dirpath[len('nni_node/'):], filename)) + if '__init__.py' in files: + files.remove('__init__.py') + return sorted(files) + +def _read_requirements_txt(file_path, section=None): + with open(file_path) as f: + lines = [line.strip() for line in f.readlines() if line.strip()] # remove whitespaces and empty lines + if section is None: + return [line for line in lines if not line.startswith('#')] + selected_lines = [] + started = False + for line in lines: + if started: + if line.startswith('#'): + return selected_lines + else: + selected_lines.append(line) + elif line.startswith('# ' + section): + started = True + return selected_lines + +def _using_conda_or_virtual_environment(): + return sys.prefix != sys.base_prefix or os.path.isdir(os.path.join(sys.prefix, 'conda-meta')) + +class BuildTs(Command): + description = 'build TypeScript modules' + + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + check_jupyter_lab_version() + setup_ts.build(release) + +class Build(build): + def run(self): + if not release: + sys.exit('Please set environment variable "NNI_RELEASE="') + + check_jupyter_lab_version() + + if os.path.islink('nni_node/main.js'): + sys.exit('A development build already exists. Please uninstall NNI and run "python3 setup.py clean --all".') + open('nni/version.py', 'w').write(f"__version__ = '{release}'") + super().run() + +class Develop(develop): + user_options = develop.user_options + [ + ('no-user', None, 'Prevent automatically adding "--user"'), + ('skip-ts', None, 'Prevent building TypeScript modules') + ] + + boolean_options = develop.boolean_options + ['no-user', 'skip-ts'] + + def initialize_options(self): + super().initialize_options() + self.no_user = None + self.skip_ts = None + + def finalize_options(self): + # if `--user` or `--no-user` is explicitly set, do nothing + # otherwise activate `--user` if using system python + if not self.user and not self.no_user: + self.user = not _using_conda_or_virtual_environment() + super().finalize_options() + + def run(self): + open('nni/version.py', 'w').write("__version__ = '999.dev0'") + if not self.skip_ts: + setup_ts.build(release=None) + super().run() + +class Clean(clean): + def finalize_options(self): + self._all = self.all + self.all = True # always use `clean --all` + super().finalize_options() + + def run(self): + super().run() + setup_ts.clean(self._all) + _clean_temp_files() + shutil.rmtree('nni.egg-info', ignore_errors=True) + if self._all: + shutil.rmtree('dist', ignore_errors=True) + + +def _clean_temp_files(): + for pattern in _temp_files: + for path in glob.glob(pattern): + if os.path.islink(path) or os.path.isfile(path): + os.remove(path) + else: + shutil.rmtree(path) + +_temp_files = [ + # unit test + 'test/model_path/', + 'test/temp.json', + 'test/ut/sdk/*.pth', + 'test/ut/tools/annotation/_generated/' +] + + +if __name__ == '__main__': + _setup() diff --git a/setup_ts.py b/setup_ts.py new file mode 100644 index 0000000000000000000000000000000000000000..cfbc513803acb5441bf9ba61fe764db7f9817d68 --- /dev/null +++ b/setup_ts.py @@ -0,0 +1,290 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Script for building TypeScript modules. +This script is called by `setup.py` and common users should avoid using this directly. + +It compiles TypeScript source files in `ts` directory, +and copies (or links) JavaScript output as well as dependencies to `nni_node`. + +You can set environment `GLOBAL_TOOLCHAIN=1` to use global node and yarn, if you know what you are doing. +""" + +from io import BytesIO +import json +import os +from pathlib import Path +import shutil +import subprocess +import sys +import tarfile +import traceback +from zipfile import ZipFile + + +node_version = 'v16.3.0' +yarn_version = 'v1.22.10' + +def _get_jupyter_lab_version(): + try: + import jupyterlab + return jupyterlab.__version__ + except ImportError: + return '3.x' + +jupyter_lab_major_version = _get_jupyter_lab_version().split('.')[0] + +def build(release): + """ + Compile TypeScript modules and copy or symlink to nni_node directory. + + `release` is the version number without leading letter "v". + + If `release` is None or empty, this is a development build and uses symlinks on Linux/macOS; + otherwise this is a release build and copies files instead. + On Windows it always copies files because creating symlink requires extra privilege. + """ + if release or not os.environ.get('GLOBAL_TOOLCHAIN'): + download_toolchain() + prepare_nni_node() + update_package() + compile_ts(release) + if release or sys.platform == 'win32': + copy_nni_node(release) + else: + symlink_nni_node() + restore_package() + +def clean(clean_all=False): + """ + Remove TypeScript-related intermediate files. + Python intermediate files are not touched here. + """ + shutil.rmtree('nni_node', ignore_errors=True) + + for file_or_dir in generated_files: + path = Path(file_or_dir) + if path.is_symlink() or path.is_file(): + path.unlink() + elif path.is_dir(): + shutil.rmtree(path) + + if clean_all: + shutil.rmtree('toolchain', ignore_errors=True) + + +if sys.platform == 'linux' or sys.platform == 'darwin': + node_executable = 'node' + node_spec = f'node-{node_version}-{sys.platform}-x64' + node_download_url = f'https://nodejs.org/dist/{node_version}/{node_spec}.tar.xz' + node_extractor = lambda data: tarfile.open(fileobj=BytesIO(data), mode='r:xz') + node_executable_in_tarball = 'bin/node' + + yarn_executable = 'yarn' + yarn_download_url = f'https://github.com/yarnpkg/yarn/releases/download/{yarn_version}/yarn-{yarn_version}.tar.gz' + + path_env_seperator = ':' + +elif sys.platform == 'win32': + node_executable = 'node.exe' + node_spec = f'node-{node_version}-win-x64' + node_download_url = f'https://nodejs.org/dist/{node_version}/{node_spec}.zip' + node_extractor = lambda data: ZipFile(BytesIO(data)) + node_executable_in_tarball = 'node.exe' + + yarn_executable = 'yarn.cmd' + yarn_download_url = f'https://github.com/yarnpkg/yarn/releases/download/{yarn_version}/yarn-{yarn_version}.tar.gz' + + path_env_seperator = ';' + +else: + raise RuntimeError('Unsupported system') + + +def download_toolchain(): + """ + Download and extract node and yarn. + """ + if Path('toolchain/node', node_executable_in_tarball).is_file(): + return + + Path('toolchain').mkdir(exist_ok=True) + import requests # place it here so setup.py can install it before importing + + _print(f'Downloading node.js from {node_download_url}') + resp = requests.get(node_download_url) + resp.raise_for_status() + _print('Extracting node.js') + tarball = node_extractor(resp.content) + tarball.extractall('toolchain') + shutil.rmtree('toolchain/node', ignore_errors=True) + Path('toolchain', node_spec).rename('toolchain/node') + + _print(f'Downloading yarn from {yarn_download_url}') + resp = requests.get(yarn_download_url) + resp.raise_for_status() + _print('Extracting yarn') + tarball = tarfile.open(fileobj=BytesIO(resp.content), mode='r:gz') + tarball.extractall('toolchain') + shutil.rmtree('toolchain/yarn', ignore_errors=True) + Path(f'toolchain/yarn-{yarn_version}').rename('toolchain/yarn') + +def update_package(): + if jupyter_lab_major_version == '2': + package_json = json.load(open('ts/jupyter_extension/package.json')) + json.dump(package_json, open('ts/jupyter_extension/.package_default.json', 'w'), indent=2) + + package_json['scripts']['build'] = 'tsc && jupyter labextension link .' + package_json['dependencies']['@jupyterlab/application'] = '^2.3.0' + package_json['dependencies']['@jupyterlab/launcher'] = '^2.3.0' + + package_json['jupyterlab']['outputDir'] = 'build' + json.dump(package_json, open('ts/jupyter_extension/package.json', 'w'), indent=2) + print(f'updated package.json with {json.dumps(package_json, indent=2)}') + +def restore_package(): + if jupyter_lab_major_version == '2': + package_json = json.load(open('ts/jupyter_extension/.package_default.json')) + print(f'stored package.json with {json.dumps(package_json, indent=2)}') + json.dump(package_json, open('ts/jupyter_extension/package.json', 'w'), indent=2) + os.remove('ts/jupyter_extension/.package_default.json') + +def prepare_nni_node(): + """ + Create clean nni_node diretory, then copy node runtime to it. + """ + shutil.rmtree('nni_node', ignore_errors=True) + Path('nni_node').mkdir() + + Path('nni_node/__init__.py').write_text('"""NNI node.js modules."""\n') + + node_src = Path('toolchain/node', node_executable_in_tarball) + node_dst = Path('nni_node', node_executable) + shutil.copy(node_src, node_dst) + + +def compile_ts(release): + """ + Use yarn to download dependencies and compile TypeScript code. + """ + _print('Building NNI manager') + _yarn('ts/nni_manager') + _yarn('ts/nni_manager', 'build') + # todo: I don't think these should be here + shutil.rmtree('ts/nni_manager/dist/config', ignore_errors=True) + shutil.copytree('ts/nni_manager/config', 'ts/nni_manager/dist/config') + + _print('Building web UI') + _yarn('ts/webui') + _yarn('ts/webui', 'build') + + _print('Building JupyterLab extension') + if release: + _yarn('ts/jupyter_extension') + _yarn('ts/jupyter_extension', 'build') + else: + try: + _yarn('ts/jupyter_extension') + _yarn('ts/jupyter_extension', 'build') + except Exception: + _print('Failed to build JupyterLab extension, skip for develop mode', color='yellow') + _print(traceback.format_exc(), color='yellow') + + +def symlink_nni_node(): + """ + Create symlinks to compiled JS files. + If you manually modify and compile TS source files you don't need to install again. + """ + _print('Creating symlinks') + + for path in Path('ts/nni_manager/dist').iterdir(): + _symlink(path, Path('nni_node', path.name)) + _symlink('ts/nni_manager/package.json', 'nni_node/package.json') + _symlink('ts/nni_manager/node_modules', 'nni_node/node_modules') + + _symlink('ts/webui/build', 'nni_node/static') + + if jupyter_lab_major_version == '2': + _symlink('ts/jupyter_extension/build', 'nni_node/jupyter-extension') + _symlink(os.path.join(sys.exec_prefix, 'share/jupyter/lab/extensions'), 'nni_node/jupyter-extension/extensions') + elif Path('ts/jupyter_extension/dist').exists(): + _symlink('ts/jupyter_extension/dist', 'nni_node/jupyter-extension') + + +def copy_nni_node(version): + """ + Copy compiled JS files to nni_node. + This is meant for building release package, so you need to provide version string. + The version will written to `package.json` in nni_node directory, + while `package.json` in ts directory will be left unchanged. + """ + _print('Copying files') + + # copytree(..., dirs_exist_ok=True) is not supported by Python 3.6 + for path in Path('ts/nni_manager/dist').iterdir(): + if path.is_dir(): + shutil.copytree(path, Path('nni_node', path.name)) + elif path.name != 'nni_manager.tsbuildinfo': + shutil.copyfile(path, Path('nni_node', path.name)) + + package_json = json.load(open('ts/nni_manager/package.json')) + if version: + while len(version.split('.')) < 3: # node.js semver requires at least three parts + version = version + '.0' + package_json['version'] = version + json.dump(package_json, open('nni_node/package.json', 'w'), indent=2) + + # reinstall without development dependencies + _yarn('ts/nni_manager', '--prod', '--cwd', str(Path('nni_node').resolve())) + + shutil.copytree('ts/webui/build', 'nni_node/static') + + if jupyter_lab_major_version == '2': + shutil.copytree('ts/jupyter_extension/build', 'nni_node/jupyter-extension/build') + shutil.copytree(os.path.join(sys.exec_prefix, 'share/jupyter/lab/extensions'), 'nni_node/jupyter-extension/extensions') + elif version or Path('ts/jupyter_extension/dist').exists(): + shutil.copytree('ts/jupyter_extension/dist', 'nni_node/jupyter-extension') + + +_yarn_env = dict(os.environ) +# `Path('nni_node').resolve()` does not work on Windows if the directory not exists +_yarn_env['PATH'] = str(Path().resolve() / 'nni_node') + path_env_seperator + os.environ['PATH'] +_yarn_path = Path().resolve() / 'toolchain/yarn/bin' / yarn_executable + +def _yarn(path, *args): + if os.environ.get('GLOBAL_TOOLCHAIN'): + subprocess.run(['yarn', *args], cwd=path, check=True) + else: + subprocess.run([str(_yarn_path), *args], cwd=path, check=True, env=_yarn_env) + + +def _symlink(target_file, link_location): + target = Path(target_file) + link = Path(link_location) + relative = os.path.relpath(target, link.parent) + link.symlink_to(relative, target.is_dir()) + + +def _print(*args, color='cyan'): + color_code = {'yellow': 33, 'cyan': 36}[color] + if sys.platform == 'win32': + print(*args, flush=True) + else: + print(f'\033[1;{color_code}m#', *args, '\033[0m', flush=True) + + +generated_files = [ + 'ts/nni_manager/dist', + 'ts/nni_manager/node_modules', + 'ts/webui/build', + 'ts/webui/node_modules', + + # unit test + 'ts/nni_manager/.nyc_output', + 'ts/nni_manager/coverage', + 'ts/nni_manager/exp_profile.json', + 'ts/nni_manager/metrics.json', + 'ts/nni_manager/trial_jobs.json', +] diff --git a/test/.coveragerc b/test/.coveragerc new file mode 100644 index 0000000000000000000000000000000000000000..2d76ac1916c9b88bc5af29d0d80be92b776aab77 --- /dev/null +++ b/test/.coveragerc @@ -0,0 +1,25 @@ +# .coveragerc to control coverage.py +[run] +branch = True + +concurrency = multiprocessing + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about missing debug-only code: + def __repr__ + if self\.debug + + # Don't complain if tests don't hit defensive assertion code: + raise AssertionError + raise NotImplementedError + + # Don't complain if non-runnable code isn't run: + if 0: + if __name__ == .__main__.: + +ignore_errors = True diff --git a/test/.gitignore b/test/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..35133a80630d4dff2d3b46b45616a486395eb769 --- /dev/null +++ b/test/.gitignore @@ -0,0 +1,13 @@ +__pycache__ + +tuner_search_space.json +tuner_result.txt +assessor_result.txt + +_generated_model.py +_generated_model_*.py +_generated_model +data +generated +lightning_logs +model.onnx diff --git a/test/async_sharing_test/config.yml b/test/async_sharing_test/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..6897698093f016793bf319fa6dfdbfc11e956847 --- /dev/null +++ b/test/async_sharing_test/config.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: example_weight_sharing +trialConcurrency: 3 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: remote +#choice: true, false +useAnnotation: false +multiThread: true +tuner: + codeDir: . + classFileName: simple_tuner.py + className: SimpleTuner +trial: + command: python3 main.py + codeDir: . + gpuNum: 0 +machineList: + - ip: 10.10.10.10 + username: bob + passwd: bob123 + - ip: 10.10.10.11 + username: bob + passwd: bob123 diff --git a/test/async_sharing_test/main.py b/test/async_sharing_test/main.py new file mode 100644 index 0000000000000000000000000000000000000000..afa2bddf25bffd3e91181527a0bece8317565eb3 --- /dev/null +++ b/test/async_sharing_test/main.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Test code for weight sharing +need NFS setup and mounted as `/mnt/nfs/nni` +""" + +import hashlib +import os +import random +import time + +import nni + + +def generate_rand_file(fl_name): + """ + generate random file and write to `fl_name` + """ + fl_size = random.randint(1024, 102400) + fl_dir = os.path.split(fl_name)[0] + if not os.path.exists(fl_dir): + os.makedirs(fl_dir) + with open(fl_name, 'wb') as fout: + fout.write(os.urandom(fl_size)) + + +def check_sum(fl_name, tid=None): + """ + compute checksum for generated file of `fl_name` + """ + hasher = hashlib.md5() + with open(fl_name, 'rb') as fin: + for chunk in iter(lambda: fin.read(4096), b""): + hasher.update(chunk) + ret = hasher.hexdigest() + if tid is not None: + ret = ret + str(tid) + return ret + + +if __name__ == '__main__': + nfs_path = '/mnt/nfs/nni/test' + params = nni.get_next_parameter() + print(params) + if params['id'] == 0: + model_file = os.path.join(nfs_path, str(params['id']), 'model.dat') + generate_rand_file(model_file) + time.sleep(10) + nni.report_final_result({ + 'checksum': check_sum(model_file, tid=params['id']), + 'path': model_file + }) + else: + model_file = params['prev_path'] + time.sleep(10) + nni.report_final_result({ + 'checksum': check_sum(model_file, tid=params['prev_id']) + }) diff --git a/test/async_sharing_test/simple_tuner.py b/test/async_sharing_test/simple_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..ec49c3832217bbc17e4f5cbae51c360b0d83cc4f --- /dev/null +++ b/test/async_sharing_test/simple_tuner.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +SimpleTuner for Weight Sharing +""" + +import logging + +from threading import Event, Lock +from nni.tuner import Tuner + +_logger = logging.getLogger('WeightSharingTuner') + + +class SimpleTuner(Tuner): + """ + simple tuner, test for weight sharing + """ + + def __init__(self): + super(SimpleTuner, self).__init__() + self.trial_meta = {} + self.f_id = None # father + self.sig_event = Event() + self.thread_lock = Lock() + + def generate_parameters(self, parameter_id, **kwargs): + if self.f_id is None: + self.thread_lock.acquire() + self.f_id = parameter_id + self.trial_meta[parameter_id] = { + 'prev_id': 0, + 'id': parameter_id, + 'checksum': None, + 'path': '', + } + _logger.info('generate parameter for father trial %s', parameter_id) + self.thread_lock.release() + return { + 'prev_id': 0, + 'id': parameter_id, + } + else: + self.sig_event.wait() + self.thread_lock.acquire() + self.trial_meta[parameter_id] = { + 'id': parameter_id, + 'prev_id': self.f_id, + 'prev_path': self.trial_meta[self.f_id]['path'] + } + self.thread_lock.release() + return self.trial_meta[parameter_id] + + def receive_trial_result(self, parameter_id, parameters, reward, **kwargs): + self.thread_lock.acquire() + if parameter_id == self.f_id: + self.trial_meta[parameter_id]['checksum'] = reward['checksum'] + self.trial_meta[parameter_id]['path'] = reward['path'] + self.sig_event.set() + else: + if reward['checksum'] != self.trial_meta[self.f_id]['checksum']: + raise ValueError("Inconsistency in weight sharing: {} != {}".format( + reward['checksum'], self.trial_meta[self.f_id]['checksum'])) + self.thread_lock.release() + + def update_search_space(self, search_space): + pass diff --git a/test/config/assessors/curvefitting-v2.yml b/test/config/assessors/curvefitting-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..ce17bbb6556e6b431b3e747bc2f582b065e55cf5 --- /dev/null +++ b/test/config/assessors/curvefitting-v2.yml @@ -0,0 +1,20 @@ +experimentName: default_test +searchSpaceFile: ../naive_trial/search_space.json +trialCommand: python3 trial.py +trialCodeDirectory: ../naive_trial +trialGpuNumber: 0 +trialConcurrency: 8 +maxExperimentDuration: 15m +maxTrialNumber: 8 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local +assessor: + name: Curvefitting + classArgs: + epoch_num: 20 + start_step: 6 + threshold: 0.95 diff --git a/test/config/assessors/curvefitting.yml b/test/config/assessors/curvefitting.yml new file mode 100644 index 0000000000000000000000000000000000000000..ab6f4283ce8b1a98f1190ed5cb4f79d9c88cb05b --- /dev/null +++ b/test/config/assessors/curvefitting.yml @@ -0,0 +1,27 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 10m +maxTrialNum: 8 +trialConcurrency: 8 +searchSpacePath: ../naive_trial/search_space.json + +tuner: + builtinTunerName: TPE + classArgs: + optimize_mode: maximize +assessor: + builtinAssessorName: Curvefitting + classArgs: + epoch_num: 20 + start_step: 6 + threshold: 0.95 +trial: + codeDir: ../naive_trial + command: python3 trial.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/assessors/medianstop-v2.yml b/test/config/assessors/medianstop-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..b59a594304c3c6f368ccd03e2bae2c63bc5e187f --- /dev/null +++ b/test/config/assessors/medianstop-v2.yml @@ -0,0 +1,18 @@ +experimentName: default_test +searchSpaceFile: ../naive_trial/search_space.json +trialCommand: python3 trial.py +trialCodeDirectory: ../naive_trial +trialGpuNumber: 0 +trialConcurrency: 8 +maxExperimentDuration: 15m +maxTrialNumber: 8 +tuner: + name: TPE + classArgs: + optimize_mode: maximize +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize \ No newline at end of file diff --git a/test/config/assessors/medianstop.yml b/test/config/assessors/medianstop.yml new file mode 100644 index 0000000000000000000000000000000000000000..672b8e4cb4049cd61679dd591d1ce0db7fe9ff09 --- /dev/null +++ b/test/config/assessors/medianstop.yml @@ -0,0 +1,25 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 8 +trialConcurrency: 8 +searchSpacePath: ../naive_trial/search_space.json + +tuner: + builtinTunerName: TPE + classArgs: + optimize_mode: maximize + +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + +trial: + codeDir: ../naive_trial + command: python3 trial.py + gpuNum: 0 + +useAnnotation: false + +trainingServicePlatform: local diff --git a/test/config/customized_tuners/demotuner-sklearn-classification-v2.yml b/test/config/customized_tuners/demotuner-sklearn-classification-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..6c8a5bfb35137ea0621ef1de76bfcdb486e03018 --- /dev/null +++ b/test/config/customized_tuners/demotuner-sklearn-classification-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ../../../examples/trials/sklearn/classification/search_space.json +trialCommand: python3 main.py +trialCodeDirectory: ../../../examples/trials/sklearn/classification +trialGpuNumber: 0 +trialConcurrency: 4 +maxExperimentDuration: 15m +maxTrialNumber: 2 +tuner: + name: demotuner +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/customized_tuners/demotuner-sklearn-classification.yml b/test/config/customized_tuners/demotuner-sklearn-classification.yml new file mode 100644 index 0000000000000000000000000000000000000000..38aba2d1aad0e2dbe89fbb87ba28eeedc7dab409 --- /dev/null +++ b/test/config/customized_tuners/demotuner-sklearn-classification.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ../../../examples/trials/sklearn/classification/search_space.json + +tuner: + builtinTunerName: demotuner +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/sklearn/classification + command: python3 main.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/cifar10-pytorch-adl.yml b/test/config/examples/cifar10-pytorch-adl.yml new file mode 100644 index 0000000000000000000000000000000000000000..07635fb514dd52daa642a5d757fa6695fcb02960 --- /dev/null +++ b/test/config/examples/cifar10-pytorch-adl.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 1 +trialConcurrency: 1 +searchSpacePath: ./cifar10_adl_search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: /examples/trials/cifar10_pytorch + command: python3 main_adl.py --epochs 1 + gpuNum: 1 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: adl diff --git a/test/config/examples/cifar10-pytorch-v2.yml b/test/config/examples/cifar10-pytorch-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..1273f9456767aa5737873e2291082d377d4581bb --- /dev/null +++ b/test/config/examples/cifar10-pytorch-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: cifar10_search_space.json +trialCommand: python3 main.py --epochs 1 --batches 1 +trialCodeDirectory: ../../../examples/trials/cifar10_pytorch +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/examples/cifar10-pytorch.yml b/test/config/examples/cifar10-pytorch.yml new file mode 100644 index 0000000000000000000000000000000000000000..be7d7ac61868f2bb7d8fe20b80b01998a32e0d7e --- /dev/null +++ b/test/config/examples/cifar10-pytorch.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./cifar10_search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/cifar10_pytorch + command: python3 main.py --epochs 1 --batches 1 + gpuNum: 1 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/cifar10_adl_search_space.json b/test/config/examples/cifar10_adl_search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..0dadf05f6a55a4514e4e31372f5cc917476b818a --- /dev/null +++ b/test/config/examples/cifar10_adl_search_space.json @@ -0,0 +1,5 @@ +{ + "lr":{"_type":"choice", "_value":[0.1, 0.01, 0.001]}, + "bs":{"_type":"choice","_value":[64, 96, 128]}, + "model":{"_type":"choice", "_value":["ResNet18", "SENet18", "MobileNet"]} +} diff --git a/test/config/examples/cifar10_search_space.json b/test/config/examples/cifar10_search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..ca1c0d20348acc2e0c1a2717b6696fdab0e53e5a --- /dev/null +++ b/test/config/examples/cifar10_search_space.json @@ -0,0 +1,5 @@ +{ + "lr":{"_type":"choice", "_value":[0.1, 0.01, 0.001, 0.0001]}, + "optimizer":{"_type":"choice", "_value":["SGD", "Adadelta", "Adagrad", "Adam", "Adamax"]}, + "model":{"_type":"choice", "_value":["vgg", "resnet18"]} +} diff --git a/test/config/examples/classic-nas-pytorch-v2.yml b/test/config/examples/classic-nas-pytorch-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..585cccbda343b3e435b17a64d55665c8b3a32b92 --- /dev/null +++ b/test/config/examples/classic-nas-pytorch-v2.yml @@ -0,0 +1,18 @@ +experimentName: default_test +searchSpaceFile: ni-nas-search-space.json +trialCommand: python3 main.py --epochs 1 --batches 1 +trialCodeDirectory: ../../../examples/nas/legacy/classic_nas +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: PPOTuner + classArgs: + optimize_mode: maximize +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/examples/classic-nas-pytorch.yml b/test/config/examples/classic-nas-pytorch.yml new file mode 100644 index 0000000000000000000000000000000000000000..3414c9b936b23f1eb254d5c5f651fd71d039a22d --- /dev/null +++ b/test/config/examples/classic-nas-pytorch.yml @@ -0,0 +1,21 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 10m +maxTrialNum: 1 +trialConcurrency: 1 +searchSpacePath: nni-nas-search-space.json + +tuner: + builtinTunerName: PPOTuner + classArgs: + optimize_mode: maximize +trial: + command: python3 mnist.py --epochs 1 + codeDir: ../../../examples/nas/legacy/classic_nas + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local \ No newline at end of file diff --git a/test/config/examples/classic-nas-tf2.yml b/test/config/examples/classic-nas-tf2.yml new file mode 100644 index 0000000000000000000000000000000000000000..c43c113497c26230879169e0118291bf4107f23a --- /dev/null +++ b/test/config/examples/classic-nas-tf2.yml @@ -0,0 +1,21 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 10m +maxTrialNum: 1 +trialConcurrency: 1 +searchSpacePath: nni-nas-search-space-tf2.json + +tuner: + builtinTunerName: PPOTuner + classArgs: + optimize_mode: maximize +trial: + command: python3 train.py --epochs 1 + codeDir: ../../../examples/nas/legacy/classic_nas-tf + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local \ No newline at end of file diff --git a/test/config/examples/mnist-annotation-v2.yml b/test/config/examples/mnist-annotation-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..0601ffedba49cbbf19e943fdd493376d5f06ec63 --- /dev/null +++ b/test/config/examples/mnist-annotation-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ../../../examples/trials/mnist-keras/search_space.json +trialCommand: python3 mnist.py --batch_num 10 +trialCodeDirectory: ../../../examples/trials/mnist-annotation +trialGpuNumber: 0 +trialConcurrency: 2 +maxExperimentDuration: 15m +maxTrialNumber: 2 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/examples/mnist-annotation.yml b/test/config/examples/mnist-annotation.yml new file mode 100644 index 0000000000000000000000000000000000000000..330400570dc220890be7acafbce7defb203f6807 --- /dev/null +++ b/test/config/examples/mnist-annotation.yml @@ -0,0 +1,21 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-annotation + command: python3 mnist.py --batch_num 10 + +useAnnotation: true +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/mnist-keras-v2.yml b/test/config/examples/mnist-keras-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..7d1313d239033115d90e48686e558537c7f0fbd9 --- /dev/null +++ b/test/config/examples/mnist-keras-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ../../../examples/trials/mnist-keras/search_space.json +trialCommand: python3 mnist-keras.py --num_train 200 --epochs 1 +trialCodeDirectory: ../../../examples/trials/mnist-keras +trialGpuNumber: 0 +trialConcurrency: 2 +maxExperimentDuration: 15m +maxTrialNumber: 2 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/examples/mnist-keras.yml b/test/config/examples/mnist-keras.yml new file mode 100644 index 0000000000000000000000000000000000000000..6bb9e0e999ea6c0ed06133b3cf1028a38636274c --- /dev/null +++ b/test/config/examples/mnist-keras.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ../../../examples/trials/mnist-keras/search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-keras + command: python3 mnist-keras.py --num_train 200 --epochs 1 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/mnist-nested-search-space-v2.yml b/test/config/examples/mnist-nested-search-space-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..e5099daf9009cd537da4fe5286b0eaf7b2c3aadb --- /dev/null +++ b/test/config/examples/mnist-nested-search-space-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ../../../examples/trials/mnist-nested-search-space/search_space.json +trialCommand: python3 mnist.py --batch_num 10 +trialCodeDirectory: ../../../examples/trials/mnist-nested-search-space +trialGpuNumber: 0 +trialConcurrency: 2 +maxExperimentDuration: 15m +maxTrialNumber: 2 +tuner: + name: TPE +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/examples/mnist-nested-search-space.yml b/test/config/examples/mnist-nested-search-space.yml new file mode 100644 index 0000000000000000000000000000000000000000..89e51a180a832da79922279abd2070f85f154e81 --- /dev/null +++ b/test/config/examples/mnist-nested-search-space.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ../../../examples/trials/mnist-nested-search-space/search_space.json + +tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-nested-search-space + command: python3 mnist.py --batch_num 10 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/mnist-pytorch-gpu.yml b/test/config/examples/mnist-pytorch-gpu.yml new file mode 100644 index 0000000000000000000000000000000000000000..b8784c1c3a21b82f89b8fd14c4f07ef1947968f4 --- /dev/null +++ b/test/config/examples/mnist-pytorch-gpu.yml @@ -0,0 +1,26 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./mnist_pytorch_search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-pytorch + command: python3 mnist.py --epochs 1 --batch_num 10 + gpuNum: 1 +localConfig: + useActiveGpu: true + maxTrialNumPerGpu: 2 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/mnist-pytorch-v2.yml b/test/config/examples/mnist-pytorch-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..e481507d745684b381cda6448aecaa153b0f091c --- /dev/null +++ b/test/config/examples/mnist-pytorch-v2.yml @@ -0,0 +1,17 @@ +experimentName: default_test +searchSpaceFile: ./mnist_pytorch_search_space.json +trialCommand: python3 mnist.py --epochs 1 --batch_num 10 +trialCodeDirectory: ../../../examples/trials/mnist-pytorch +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize + diff --git a/test/config/examples/mnist-pytorch.yml b/test/config/examples/mnist-pytorch.yml new file mode 100644 index 0000000000000000000000000000000000000000..570d9de81f43858a2be54e9c7491bebfa856748b --- /dev/null +++ b/test/config/examples/mnist-pytorch.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./mnist_pytorch_search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-pytorch + command: python3 mnist.py --epochs 1 --batch_num 10 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/mnist-tfv1-v2.yml b/test/config/examples/mnist-tfv1-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..49f3a79e9dba592670510496ef33340a03819d4b --- /dev/null +++ b/test/config/examples/mnist-tfv1-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ./mnist_search_space.json +trialCommand: python3 mnist.py --batch_num 10 +trialCodeDirectory: ../../../examples/trials/mnist-tfv1 +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/examples/mnist-tfv1.yml b/test/config/examples/mnist-tfv1.yml new file mode 100644 index 0000000000000000000000000000000000000000..f8393918ad67fb81e1adc2ade011ad1420f1763b --- /dev/null +++ b/test/config/examples/mnist-tfv1.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ./mnist_search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-tfv1 + command: python3 mnist.py --batch_num 10 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/mnist-tfv2-v2.yml b/test/config/examples/mnist-tfv2-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..089ce672e870138b35807590aa7bad733eec3a2d --- /dev/null +++ b/test/config/examples/mnist-tfv2-v2.yml @@ -0,0 +1,17 @@ +experimentName: default_test +searchSpaceFile: ./mnist_search_space.json +trialCommand: python3 mnist.py +trialCodeDirectory: ../../../examples/trials/mnist-tfv2 +trialGpuNumber: 0 +trialConcurrency: 2 +maxExperimentDuration: 15m +maxTrialNumber: 4 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize + diff --git a/test/config/examples/mnist-tfv2.yml b/test/config/examples/mnist-tfv2.yml new file mode 100644 index 0000000000000000000000000000000000000000..b0b7913607f6d9b7412e32c7984160d8cdcb77e5 --- /dev/null +++ b/test/config/examples/mnist-tfv2.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ./mnist_search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-tfv2 + command: python3 mnist.py + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/mnist_pytorch_search_space.json b/test/config/examples/mnist_pytorch_search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c26cdce369fa13e3fdf7c34f10b9cd89a6fc931e --- /dev/null +++ b/test/config/examples/mnist_pytorch_search_space.json @@ -0,0 +1,6 @@ +{ + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "hidden_size":{"_type":"choice","_value":[128, 256, 512, 1024]}, + "lr":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"uniform","_value":[0, 1]} +} diff --git a/test/config/examples/mnist_search_space.json b/test/config/examples/mnist_search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..dd05405e27b14bc815ab564bf9d6a1ff9b42d809 --- /dev/null +++ b/test/config/examples/mnist_search_space.json @@ -0,0 +1,7 @@ +{ + "dropout_rate":{"_type":"uniform","_value":[0.5, 0.9]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "batch_size": {"_type":"choice", "_value": [16, 32]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]} +} diff --git a/test/config/examples/sklearn-classification-v2.yml b/test/config/examples/sklearn-classification-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..1c7f566cd6ce99da504929852b068de56fb0a634 --- /dev/null +++ b/test/config/examples/sklearn-classification-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ../../../examples/trials/sklearn/classification/search_space.json +trialCommand: python3 main.py +trialCodeDirectory: ../../../examples/trials/sklearn/classification +trialGpuNumber: 0 +trialConcurrency: 2 +maxExperimentDuration: 15m +maxTrialNumber: 4 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/examples/sklearn-classification.yml b/test/config/examples/sklearn-classification.yml new file mode 100644 index 0000000000000000000000000000000000000000..cb1e7d41589d6a4ab4106c05f5de9eb644e5a456 --- /dev/null +++ b/test/config/examples/sklearn-classification.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ../../../examples/trials/sklearn/classification/search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/sklearn/classification + command: python3 main.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/examples/sklearn-regression-v2.yml b/test/config/examples/sklearn-regression-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..2d128cf061e7cd738955f35412ec24d709e2f17b --- /dev/null +++ b/test/config/examples/sklearn-regression-v2.yml @@ -0,0 +1,17 @@ +experimentName: default_test +searchSpaceFile: ../../../examples/trials/sklearn/regression/search_space.json +trialCommand: python3 main.py +trialCodeDirectory: ../../../examples/trials/sklearn/regression +trialGpuNumber: 0 +trialConcurrency: 2 +maxExperimentDuration: 15m +maxTrialNumber: 4 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize + diff --git a/test/config/examples/sklearn-regression.yml b/test/config/examples/sklearn-regression.yml new file mode 100644 index 0000000000000000000000000000000000000000..b102212252c2db761fd22bea3f0315c6fa3b194b --- /dev/null +++ b/test/config/examples/sklearn-regression.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ../../../examples/trials/sklearn/regression/search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/sklearn/regression + command: python3 main.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/integration_tests.yml b/test/config/integration_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..dd34cb1e52311d73bf0e4dc0663e8b6dd1486b0b --- /dev/null +++ b/test/config/integration_tests.yml @@ -0,0 +1,257 @@ + +defaultTestCaseConfig: + launchCommand: nnictl create --config $configFile --debug + stopCommand: nnictl stop + experimentStatusCheck: True + platform: linux darwin win32 + trainingService: all + +testCases: +####################################################################### +# nni examples test +####################################################################### +- name: sklearn-classification + # test case config yml file relative to nni source code directory + configFile: test/config/examples/sklearn-classification.yml + + # test case specific config, the content of configFile will be overrided + # by config section + config: + + # validator is called after experiment is done + # validator class needs to be implemented in nni_test/nnitest/validators.py + validator: + + # launch command, default launch command is 'nnictl create --config $configFile' + launchCommand: nnictl create --config $configFile --debug + + # stop command, default stop command is 'nnictl stop', empty means no stop command + stopCommand: nnictl stop + + # set experiment ID into variable, variable name should start with $, such as $expId + setExperimentIdtoVar: $expId + + # check status of experiment before calling validator + experimentStatusCheck: True + +- name: shared-storage-remote-azureblob + configFile: test/config/sharedstorage_test/config_sharedstorage_remote_azureblob.yml + config: + sharedStorage: + localMountPoint: /tmp/nnimount/testlocalrootpath + remoteMountPoint: /tmp/nnimount/testremoterootpath + storageAccountName: nennistorage + storageAccountKey: $(azureblob_token_test) + containerName: sharedstorage + trainingService: remote + validator: + class: FileExistValidator + kwargs: + rootpath: /tmp/nnimount/testlocalrootpath + +- name: sklearn-regression + configFile: test/config/examples/sklearn-regression.yml + +# mount point in local may late to umount when stop experiment in shared-storage-remote-azureblob, +# so keep two shared storage tests away from each other. +- name: shared-storage-remote-nfs + configFile: test/config/sharedstorage_test/config_sharedstorage_remote_nfs.yml + trainingService: remote + validator: + class: FileExistValidator + kwargs: + rootpath: /tmp/nnimount/testlocalrootpath + +- name: mnist-tensorflow + configFile: test/config/examples/mnist-tfv2.yml + config: + maxExecDuration: 10m # This example will use longger time in remote mode, set max_duration to 10m to avoid timeout error. + maxTrialNum: 1 + trialConcurrency: 1 + trainingService: local remote # FIXME: timeout on pai, looks like tensorflow failed to link CUDA + +- name: mnist-pytorch-local + configFile: test/config/examples/mnist-pytorch.yml + # download data first, to prevent concurrent issue. + launchCommand: python3 ../examples/trials/mnist-pytorch/mnist.py --epochs 1 --batch_num 0 --data_dir ../examples/trials/mnist-pytorch/data && nnictl create --config $configFile --debug + trainingService: local + +- name: mnist-pytorch-local-gpu + configFile: test/config/examples/mnist-pytorch-gpu.yml + # download data first, to prevent concurrent issue. + launchCommand: python3 ../examples/trials/mnist-pytorch/mnist.py --epochs 1 --batch_num 0 --data_dir ../examples/trials/mnist-pytorch/data && nnictl create --config $configFile --debug + trainingService: local + +- name: mnist-pytorch-non-local + configFile: test/config/examples/mnist-pytorch.yml + # download data first, to prevent concurrent issue. + launchCommand: nnictl create --config $configFile --debug + trainingService: remote pai kubeflow frameworkcontroller dlts + +# TODO: move this and following commented test cases to pytorch or tf2 +#- name: mnist-annotation +# configFile: test/config/examples/mnist-annotation.yml +# config: +# maxTrialNum: 1 +# trialConcurrency: 1 + +- name: cifar10-pytorch + configFile: test/config/examples/cifar10-pytorch.yml + config: + # this example downloads large pretrained model weights + # test 1 trial to save time + maxExecDuration: 10m + maxTrialNum: 1 + trialConcurrency: 1 + trial: + command: python3 main.py --epochs 1 --batches 1 + gpuNum: 0 + +- name: cifar10-pytorch-adl + configFile: test/config/examples/cifar10-pytorch-adl.yml + trainingService: adl + +#- name: nested-ss +# configFile: test/config/examples/mnist-nested-search-space.yml + +- name: classic-nas-gen-ss + configFile: test/config/examples/classic-nas-pytorch.yml + launchCommand: nnictl ss_gen --trial_command="python3 mnist.py --epochs 1" --trial_dir=../examples/nas/legacy/classic_nas --file=config/examples/nni-nas-search-space.json + stopCommand: + experimentStatusCheck: False + trainingService: local + +- name: classic-nas-pytorch + configFile: test/config/examples/classic-nas-pytorch.yml + # remove search space file + stopCommand: nnictl stop + onExitCommand: python3 -c "import os; os.remove('config/examples/nni-nas-search-space.json')" + trainingService: local + +######################################################################### +# nni features test +######################################################################### +- name: metrics-float + configFile: test/config/metrics_test/config.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics.json + +- name: export-float + configFile: test/config/metrics_test/config.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: ExportValidator + +- name: metrics-dict + configFile: test/config/metrics_test/config_dict_metrics.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics_dict.json + +- name: export-dict + configFile: test/config/metrics_test/config_dict_metrics.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: ExportValidator + +- name: experiment-import + configFile: test/config/nnictl_experiment/sklearn-classification.yml + validator: + class: ImportValidator + kwargs: + import_data_file_path: config/nnictl_experiment/test_import.json + +- name: foreground + configFile: test/config/examples/sklearn-regression.yml + launchCommand: python3 nni_test/nnitest/foreground.py --config $configFile --timeout 45 + stopCommand: + experimentStatusCheck: False + platform: linux darwin + +# Experiment resume test part 1 +- name: nnictl-resume-1 + configFile: test/config/examples/sklearn-regression.yml + setExperimentIdtoVar: $resumeExpId + # for subfolder in codedir test + launchCommand: python3 -c "import os; os.makedirs('../examples/trials/sklearn/regression/subfolder', exist_ok=True); open('../examples/trials/sklearn/regression/subfolder/subfile', 'a').close()" && nnictl create --config $configFile --debug + +# Experiment resume test part 2 +- name: nnictl-resume-2 + configFile: test/config/examples/sklearn-regression.yml + launchCommand: nnictl resume $resumeExpId + +# Experiment view test +- name: nnictl-view + configFile: test/config/examples/sklearn-regression.yml + launchCommand: nnictl view $resumeExpId + experimentStatusCheck: False + + +######################################################################### +# nni assessor test +######################################################################### +- name: assessor-curvefitting + configFile: test/config/assessors/curvefitting.yml + +- name: assessor-medianstop + configFile: test/config/assessors/medianstop.yml + +######################################################################### +# nni tuners test +######################################################################### +#- name: tuner-annel +# configFile: test/config/tuners/anneal.yml + +#- name: tuner-evolution +# configFile: test/config/tuners/evolution.yml + +#- name: tuner-random +# configFile: test/config/tuners/random.yml + +#- name: tuner-smac +# configFile: test/config/tuners/smac.yml +# platform: linux darwin + +#- name: tuner-tpe +# configFile: test/config/tuners/tpe.yml + +#- name: tuner-batch +# configFile: test/config/tuners/batch.yml + +#- name: tuner-bohb +# configFile: test/config/tuners/bohb.yml +# platform: linux darwin + +#- name: tuner-gp +# configFile: test/config/tuners/gp.yml + +#- name: tuner-grid +# configFile: test/config/tuners/gridsearch.yml + +#- name: tuner-hyperband +# configFile: test/config/tuners/hyperband.yml + +#- name: tuner-metis +# configFile: test/config/tuners/metis.yml + +- name: tuner-regularized_evolution + configFile: test/config/tuners/regularized_evolution_tuner.yml + +######################################################################### +# nni customized-tuners test +######################################################################### +- name: customized-tuners-demotuner + configFile: test/config/customized_tuners/demotuner-sklearn-classification.yml diff --git a/test/config/integration_tests_config_v2.yml b/test/config/integration_tests_config_v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..ef72ac31f762cc8a175bf8aa64c2ce675ca7e0cb --- /dev/null +++ b/test/config/integration_tests_config_v2.yml @@ -0,0 +1,135 @@ + +defaultTestCaseConfig: + launchCommand: nnictl create --config $configFile --debug + stopCommand: nnictl stop + experimentStatusCheck: True + platform: linux darwin win32 + trainingService: all + +testCases: +####################################################################### +# nni examples test +####################################################################### +- name: sklearn-classification + # test case config yml file relative to nni source code directory + configFile: test/config/examples/sklearn-classification-v2.yml + +- name: sklearn-regression + configFile: test/config/examples/sklearn-regression-v2.yml + +- name: mnist-tensorflow + configFile: test/config/examples/mnist-tfv2-v2.yml + trainingService: local remote + +- name: mnist-pytorch-local + configFile: test/config/examples/mnist-pytorch-v2.yml + # download data first, to prevent concurrent issue. + launchCommand: python3 ../examples/trials/mnist-pytorch/mnist.py --epochs 1 --batch_num 0 --data_dir ../examples/trials/mnist-pytorch/data && nnictl create --config $configFile --debug + trainingService: local + +- name: mnist-pytorch-non-local + configFile: test/config/examples/mnist-pytorch-v2.yml + trainingService: remote pai kubeflow frameworkcontroller dlts hybrid + +- name: cifar10-pytorch + configFile: test/config/examples/cifar10-pytorch-v2.yml + +- name: cifar10-pytorch-adl + configFile: test/config/examples/cifar10-pytorch-adl.yml + trainingService: adl + +- name: classic-nas-gen-ss + configFile: test/config/examples/classic-nas-pytorch-v2.yml + launchCommand: nnictl ss_gen --trial_command="python3 mnist.py --epochs 1" --trial_dir=../examples/nas/legacy/classic_nas --file=config/examples/nni-nas-search-space.json + stopCommand: + experimentStatusCheck: False + trainingService: local + +- name: classic-nas-pytorch + configFile: test/config/examples/classic-nas-pytorch-v2.yml + # remove search space file + stopCommand: nnictl stop + onExitCommand: python3 -c "import os; os.remove('config/examples/nni-nas-search-space.json')" + trainingService: local + +######################################################################### +# nni features test +######################################################################### +- name: metrics-float + configFile: test/config/metrics_test/config-v2.yml + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics.json + +- name: export-float + configFile: test/config/metrics_test/config-v2.yml + validator: + class: ExportValidator + +- name: metrics-dict + configFile: test/config/metrics_test/config_dict_metrics-v2.yml + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics_dict.json + +- name: export-dict + configFile: test/config/metrics_test/config_dict_metrics-v2.yml + validator: + class: ExportValidator + +- name: experiment-import + configFile: test/config/nnictl_experiment/sklearn-classification-v2.yml + validator: + class: ImportValidator + kwargs: + import_data_file_path: config/nnictl_experiment/test_import.json + +- name: foreground + configFile: test/config/examples/sklearn-regression-v2.yml + launchCommand: python3 nni_test/nnitest/foreground.py --config $configFile --timeout 45 + stopCommand: + experimentStatusCheck: False + platform: linux darwin + +# Experiment resume test part 1 +- name: nnictl-resume-1 + configFile: test/config/examples/sklearn-regression-v2.yml + setExperimentIdtoVar: $resumeExpId + # for subfolder in codedir test + launchCommand: python3 -c "import os; os.makedirs('../examples/trials/sklearn/regression/subfolder', exist_ok=True); open('../examples/trials/sklearn/regression/subfolder/subfile', 'a').close()" && nnictl create --config $configFile --debug + +# Experiment resume test part 2 +- name: nnictl-resume-2 + configFile: test/config/examples/sklearn-regression-v2.yml + launchCommand: nnictl resume $resumeExpId + +# Experiment view test +- name: nnictl-view + configFile: test/config/examples/sklearn-regression-v2.yml + launchCommand: nnictl view $resumeExpId + experimentStatusCheck: False + + +######################################################################### +# nni assessor test +######################################################################### +- name: assessor-curvefitting + configFile: test/config/assessors/curvefitting-v2.yml + +- name: assessor-medianstop + configFile: test/config/assessors/medianstop-v2.yml + +######################################################################### +# nni tuners test +######################################################################### +- name: tuner-regularized_evolution + configFile: test/config/tuners/regularized_evolution_tuner-v2.yml + +######################################################################### +# nni customized-tuners test +######################################################################### +- name: customized-tuners-demotuner + configFile: test/config/customized_tuners/demotuner-sklearn-classification-v2.yml + diff --git a/test/config/integration_tests_tf2.yml b/test/config/integration_tests_tf2.yml new file mode 100644 index 0000000000000000000000000000000000000000..4c36b2af620a76197e8de88f4225f6e33597d15e --- /dev/null +++ b/test/config/integration_tests_tf2.yml @@ -0,0 +1,142 @@ + +defaultTestCaseConfig: + launchCommand: nnictl create --config $configFile --debug + stopCommand: nnictl stop + experimentStatusCheck: True + platform: linux darwin win32 + trainingService: all + +testCases: +####################################################################### +# nni examples test +####################################################################### +- name: sklearn-classification + # test case config yml file relative to nni source code directory + configFile: test/config/examples/sklearn-classification.yml + + # test case specific config, the content of configFile will be overrided + # by config section + config: + + # validator is called after experiment is done + # validator class needs to be implemented in nni_test/nnitest/validators.py + validator: + + # launch command, default launch command is 'nnictl create --config $configFile' + launchCommand: nnictl create --config $configFile --debug + + # stop command, default stop command is 'nnictl stop', empty means no stop command + stopCommand: nnictl stop + + # set experiment ID into variable, variable name should start with $, such as $expId + setExperimentIdtoVar: $expId + + # check status of experiment before calling validator + experimentStatusCheck: True + +- name: sklearn-regression + configFile: test/config/examples/sklearn-regression.yml + +- name: mnist-pytorch + configFile: test/config/examples/mnist-pytorch.yml + +- name: cifar10-pytorch + configFile: test/config/examples/cifar10-pytorch.yml + config: + # this example downloads large pretrained model weights + # test 1 trial to save time + maxExecDuration: 10m + maxTrialNum: 1 + trialConcurrency: 1 + trial: + command: python3 main.py --epochs 1 --batches 1 + gpuNum: 0 + +- name: cifar10-pytorch-adl + configFile: test/config/examples/cifar10-pytorch-adl.yml + trainingService: adl + +- name: classic-nas-gen-ss + configFile: test/config/examples/classic-nas-tf2.yml + launchCommand: nnictl ss_gen --trial_command="python3 train.py --epochs 1" --trial_dir=../examples/nas/legacy/classic_nas-tf --file=config/examples/nni-nas-search-space-tf2.json + stopCommand: + experimentStatusCheck: False + trainingService: local + +- name: classic-nas-tensorflow2 + configFile: test/config/examples/classic-nas-tf2.yml + # remove search space file + stopCommand: nnictl stop + onExitCommand: python3 -c 'import os; os.remove("config/examples/nni-nas-search-space-tf2.json")' + trainingService: local + +######################################################################### +# nni features test +######################################################################### +- name: metrics-float + configFile: test/config/metrics_test/config.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics.json + +- name: export-float + configFile: test/config/metrics_test/config.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: ExportValidator + +- name: metrics-dict + configFile: test/config/metrics_test/config_dict_metrics.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics_dict.json + +- name: export-dict + configFile: test/config/metrics_test/config_dict_metrics.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: ExportValidator + +- name: foreground + configFile: test/config/examples/sklearn-regression.yml + launchCommand: python3 nni_test/nnitest/foreground.py --config $configFile --timeout 45 + stopCommand: + experimentStatusCheck: False + platform: linux darwin + +# Experiment resume test part 1 +- name: nnictl-resume-1 + configFile: test/config/examples/sklearn-regression.yml + setExperimentIdtoVar: $resumeExpId + +# Experiment resume test part 2 +- name: nnictl-resume-2 + configFile: test/config/examples/sklearn-regression.yml + launchCommand: nnictl resume $resumeExpId + +# Experiment view test +- name: nnictl-view + configFile: test/config/examples/sklearn-regression.yml + launchCommand: nnictl view $resumeExpId + experimentStatusCheck: False + +######################################################################### +# nni assessor test +######################################################################### +- name: assessor-curvefitting + configFile: test/config/assessors/curvefitting.yml + +- name: assessor-medianstop + configFile: test/config/assessors/medianstop.yml diff --git a/test/config/metrics_test/config-v2.yml b/test/config/metrics_test/config-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..d553c8c87822ec92914faf0f681412946fab0d32 --- /dev/null +++ b/test/config/metrics_test/config-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ./search_space.json +trialCommand: python3 trial.py +trialCodeDirectory: . +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/metrics_test/config.yml b/test/config/metrics_test/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..3e5f43d4cb80c679b884caf329d5b63f557fc79e --- /dev/null +++ b/test/config/metrics_test/config.yml @@ -0,0 +1,20 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 3m +maxTrialNum: 1 +trialConcurrency: 1 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: Random + +trial: + codeDir: . + command: python3 trial.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/metrics_test/config_dict_metrics-v2.yml b/test/config/metrics_test/config_dict_metrics-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..f3874870484f116cbaf1d3f60ce8a92f8a21df99 --- /dev/null +++ b/test/config/metrics_test/config_dict_metrics-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ./search_space.json +trialCommand: python3 trial.py --dict_metrics +trialCodeDirectory: . +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize diff --git a/test/config/metrics_test/config_dict_metrics.yml b/test/config/metrics_test/config_dict_metrics.yml new file mode 100644 index 0000000000000000000000000000000000000000..286363dffe82ca908b3c5099adb7b4ba37d0eef3 --- /dev/null +++ b/test/config/metrics_test/config_dict_metrics.yml @@ -0,0 +1,20 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 3m +maxTrialNum: 1 +trialConcurrency: 1 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: Random + +trial: + codeDir: . + command: python3 trial.py --dict_metrics + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/metrics_test/config_failure-v2.yml b/test/config/metrics_test/config_failure-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..28cc8a639839ba54b1719a3d6593d05dedb05445 --- /dev/null +++ b/test/config/metrics_test/config_failure-v2.yml @@ -0,0 +1,17 @@ +experimentName: default_test +searchSpaceFile: ./search_space.json +trialCommand: python3 not_exist.py +trialCodeDirectory: . +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: Random +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize + diff --git a/test/config/metrics_test/config_failure.yml b/test/config/metrics_test/config_failure.yml new file mode 100644 index 0000000000000000000000000000000000000000..f5301b67251d2a9677df731d816dc827369b7ccb --- /dev/null +++ b/test/config/metrics_test/config_failure.yml @@ -0,0 +1,20 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 3m +maxTrialNum: 1 +trialConcurrency: 1 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: Random + +trial: + codeDir: . + command: python3 not_exist.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/metrics_test/expected_metrics.json b/test/config/metrics_test/expected_metrics.json new file mode 100644 index 0000000000000000000000000000000000000000..dfa01d74809a83aba3afefc68758c35f952c5ad5 --- /dev/null +++ b/test/config/metrics_test/expected_metrics.json @@ -0,0 +1,4 @@ +{ + "intermediate_result": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], + "final_result": 1.0 +} \ No newline at end of file diff --git a/test/config/metrics_test/expected_metrics_dict.json b/test/config/metrics_test/expected_metrics_dict.json new file mode 100644 index 0000000000000000000000000000000000000000..c3d57f88afe481a215cf4ec9100fa78d92fb6b53 --- /dev/null +++ b/test/config/metrics_test/expected_metrics_dict.json @@ -0,0 +1,11 @@ +{ + "intermediate_result": [ + {"default": 0.1, "loss": 0.11, "other": 0.111}, + {"default": 0.2, "loss": 0.22, "other": 0.222}, + {"default": 0.3, "loss": 0.33, "other": 0.333}, + {"default": 0.4, "loss": 0.44, "other": 0.444}, + {"default": 0.5, "loss": 0.55, "other": 0.555} + + ], + "final_result": {"default": 0.6, "loss": 0.66, "other": 0.666} +} diff --git a/test/config/metrics_test/search_space.json b/test/config/metrics_test/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..3c3a59251042a0537476d00daedb0532cd4ceabc --- /dev/null +++ b/test/config/metrics_test/search_space.json @@ -0,0 +1,7 @@ +{ + "test": + { + "_type" : "choice", + "_value" : [1, 100] + } +} \ No newline at end of file diff --git a/test/config/metrics_test/trial.py b/test/config/metrics_test/trial.py new file mode 100644 index 0000000000000000000000000000000000000000..43e3ac1b4d66f7bd96f307c7314cbfb226ab1cdc --- /dev/null +++ b/test/config/metrics_test/trial.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import time +import json +import argparse +import nni + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--dict_metrics", action='store_true') + args = parser.parse_args() + + if args.dict_metrics: + result_file = 'expected_metrics_dict.json' + else: + result_file = 'expected_metrics.json' + + nni.get_next_parameter() + with open(result_file, 'r') as f: + m = json.load(f) + time.sleep(5) + for v in m['intermediate_result']: + time.sleep(1) + print('report_intermediate_result:', v) + nni.report_intermediate_result(v) + time.sleep(1) + print('report_final_result:', m['final_result']) + nni.report_final_result(m['final_result']) + print('done') diff --git a/test/config/multi_phase/batch.yml b/test/config/multi_phase/batch.yml new file mode 100644 index 0000000000000000000000000000000000000000..1a488d368aa45dacd00b81d02cacdda4bf5b7321 --- /dev/null +++ b/test/config/multi_phase/batch.yml @@ -0,0 +1,20 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: BatchTuner + +trial: + codeDir: . + command: python3 multi_phase.py + gpuNum: 0 + +useAnnotation: false +multiPhase: true +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/multi_phase/evolution.yml b/test/config/multi_phase/evolution.yml new file mode 100644 index 0000000000000000000000000000000000000000..bc06b8a256770a79156a939d1978a0579485e883 --- /dev/null +++ b/test/config/multi_phase/evolution.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 8 +trialConcurrency: 4 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: Evolution + classArgs: + optimize_mode: maximize + +trial: + codeDir: . + command: python3 multi_phase.py + gpuNum: 0 + +useAnnotation: false +multiPhase: true +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/multi_phase/grid.yml b/test/config/multi_phase/grid.yml new file mode 100644 index 0000000000000000000000000000000000000000..aeb0a0103dcd0189ca27db2411c844cd3f9a40bf --- /dev/null +++ b/test/config/multi_phase/grid.yml @@ -0,0 +1,20 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: GridSearch + +trial: + codeDir: . + command: python3 multi_phase.py + gpuNum: 0 + +useAnnotation: false +multiPhase: true +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/multi_phase/metis.yml b/test/config/multi_phase/metis.yml new file mode 100644 index 0000000000000000000000000000000000000000..3198b480edb330404ce6f265a57b2c5a47edc619 --- /dev/null +++ b/test/config/multi_phase/metis.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 4 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: MetisTuner + classArgs: + optimize_mode: maximize + +trial: + codeDir: . + command: python3 multi_phase.py + gpuNum: 0 + +useAnnotation: false +multiPhase: true +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/multi_phase/multi_phase.py b/test/config/multi_phase/multi_phase.py new file mode 100644 index 0000000000000000000000000000000000000000..84034a7e303d8db60fc8baa13f2c3e64e71e198c --- /dev/null +++ b/test/config/multi_phase/multi_phase.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import time +import nni + +if __name__ == '__main__': + for i in range(5): + hyper_params = nni.get_next_parameter() + print('hyper_params:[{}]'.format(hyper_params)) + if hyper_params is None: + break + nni.report_final_result(0.1*i) + time.sleep(3) diff --git a/test/config/multi_phase/search_space.json b/test/config/multi_phase/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..3c3a59251042a0537476d00daedb0532cd4ceabc --- /dev/null +++ b/test/config/multi_phase/search_space.json @@ -0,0 +1,7 @@ +{ + "test": + { + "_type" : "choice", + "_value" : [1, 100] + } +} \ No newline at end of file diff --git a/test/config/multi_phase/tpe.yml b/test/config/multi_phase/tpe.yml new file mode 100644 index 0000000000000000000000000000000000000000..2e259eeafe7e2eee71300374717e873e3234848e --- /dev/null +++ b/test/config/multi_phase/tpe.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 4 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: TPE + classArgs: + optimize_mode: maximize + +trial: + codeDir: . + command: python3 multi_phase.py + gpuNum: 0 + +useAnnotation: false +multiPhase: true +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/multi_thread/config.yml b/test/config/multi_thread/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..f12c19ebc93a67d27991407719c70c814e1c4a96 --- /dev/null +++ b/test/config/multi_thread/config.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +tuner: + codeDir: . + classFileName: multi_thread_tuner.py + className: MultiThreadTuner + +trial: + codeDir: . + command: python3 multi_thread_trial.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: true + +trainingServicePlatform: local diff --git a/test/config/multi_thread/multi_thread_trial.py b/test/config/multi_thread/multi_thread_trial.py new file mode 100644 index 0000000000000000000000000000000000000000..3351953492115edb4ed6980aaad68a7da71da2c1 --- /dev/null +++ b/test/config/multi_thread/multi_thread_trial.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import nni +import time + +if __name__ == '__main__': + nni.get_next_parameter() + time.sleep(3) + nni.report_final_result(0.5) diff --git a/test/config/multi_thread/multi_thread_tuner.py b/test/config/multi_thread/multi_thread_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..a519916003ac93cf0fe0f739ad49b29775e5f462 --- /dev/null +++ b/test/config/multi_thread/multi_thread_tuner.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import time +from nni.tuner import Tuner + + +class MultiThreadTuner(Tuner): + def __init__(self): + self.parent_done = False + + def generate_parameters(self, parameter_id, **kwargs): + logging.debug('generate_parameters: %s %s', parameter_id, kwargs) + if parameter_id == 0: + return {'x': 0} + else: + while not self.parent_done: + logging.debug('parameter_id %s sleeping', parameter_id) + time.sleep(2) + logging.debug('parameter_id %s waked up', parameter_id) + return {'x': 1} + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + logging.debug('receive_trial_result: %s %s %s %s', parameter_id, parameters, value, kwargs) + if parameter_id == 0: + self.parent_done = True + + def update_search_space(self, search_space): + pass diff --git a/test/config/multi_thread/search_space.json b/test/config/multi_thread/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..3c3a59251042a0537476d00daedb0532cd4ceabc --- /dev/null +++ b/test/config/multi_thread/search_space.json @@ -0,0 +1,7 @@ +{ + "test": + { + "_type" : "choice", + "_value" : [1, 100] + } +} \ No newline at end of file diff --git a/test/config/naive_test/README.md b/test/config/naive_test/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2985756a7aaa5e1c3dba4baf90f61e0f58fc4f9d --- /dev/null +++ b/test/config/naive_test/README.md @@ -0,0 +1,20 @@ +## Usage + +* To test before installing: +`python3 run.py --preinstall` +* To test the integrity of installation: +`python3 run.py` +* It will print `PASS` in green eventually if everything works well. + +## Details +* This test case tests the communication between trials and tuner/assessor. +* The naive trials receive an integer `x` as parameter, and reports `x`, `x²`, `x³`, ... , `x¹⁰` as metrics. +* The naive tuner simply generates the sequence of natural numbers, and print received metrics to `tuner_result.txt`. +* The naive assessor kills trials when `sum(metrics) % 11 == 1`, and print killed trials to `assessor_result.txt`. +* When tuner and assessor exit with exception, they will append `ERROR` to corresponding result file. +* When the experiment is done, meaning it is successfully done in this case, `Experiment done` can be detected in the nni_manager.log file. + +## Issues +* Private APIs are used to detect whether tuner and assessor have terminated successfully. +* The output of REST server is not tested. +* Remote machine training service is not tested. \ No newline at end of file diff --git a/test/config/naive_test/README_zh_CN.md b/test/config/naive_test/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..036cecf8689b0e49cbfbb1c48c1abe607ddb67b5 --- /dev/null +++ b/test/config/naive_test/README_zh_CN.md @@ -0,0 +1,18 @@ +## 用法 + +* 安装前测试: `python3 run.py --preinstall` +* 安装的集成测试: `python3 run.py` +* 如果没有问题,最终会打印绿色的 `PASS`。 + +## 详细说明 +* 这是测试 Trial 和 Tuner、Assessor 之间通信的测试用例。 +* Trial 会收到整数 `x` 作为参数,并返回 `x`, `x²`, `x³`, ... , `x¹⁰` 作为指标。 +* Tuner 会简单的生成自然数序列,并将收到的指标输出到 `tuner_result.txt`。 +* 当 `sum(metrics) % 11 == 1` 时,Assessor 会终止 Trial,并将终止的 Trial 输出到 `assessor_result.txt`。 +* 当 Tuner 和 Assessor 发生异常时,会在相应的文件中输出 `ERROR`。 +* 当 Experiment 结束时,也表示用例成功执行,可以在 nni_manager.log 文件中找到 `Experiment done`。 + +## 问题 +* 使用了私有 API 来检测是否 Tuner 和 Assessor 成功结束。 +* RESTful 服务的输出未测试。 +* 远程计算机训练平台没有被测试。 \ No newline at end of file diff --git a/test/config/naive_test/expected_assessor_result.txt b/test/config/naive_test/expected_assessor_result.txt new file mode 100644 index 0000000000000000000000000000000000000000..3c28700db5536b4bdab9338b669d73347fcefc97 --- /dev/null +++ b/test/config/naive_test/expected_assessor_result.txt @@ -0,0 +1,6 @@ +1 1 +2 7 +3 2 +5 3 +7 2 +8 3 diff --git a/test/config/naive_test/expected_tuner_result.txt b/test/config/naive_test/expected_tuner_result.txt new file mode 100644 index 0000000000000000000000000000000000000000..a2b43fb2b20b140329fd816946d83ab236b45595 --- /dev/null +++ b/test/config/naive_test/expected_tuner_result.txt @@ -0,0 +1,4 @@ +4 1048576 +6 60466176 +9 3486784401 +10 10000000000 diff --git a/test/config/naive_test/local.yml b/test/config/naive_test/local.yml new file mode 100644 index 0000000000000000000000000000000000000000..edf2a50322cc3bc3b69a9b75ad207c30b07c4309 --- /dev/null +++ b/test/config/naive_test/local.yml @@ -0,0 +1,26 @@ +authorName: nni +experimentName: naive +trialConcurrency: 3 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote +trainingServicePlatform: local +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + codeDir: . + classFileName: naive_tuner.py + className: NaiveTuner + classArgs: + optimize_mode: maximize +assessor: + codeDir: . + classFileName: naive_assessor.py + className: NaiveAssessor + classArgs: + optimize_mode: maximize +trial: + command: python3 naive_trial.py + codeDir: . + gpuNum: 0 diff --git a/test/config/naive_test/naive_assessor.py b/test/config/naive_test/naive_assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..54468f6e99fd09de5407af6e849a3df74b1c546a --- /dev/null +++ b/test/config/naive_test/naive_assessor.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os + +from nni.assessor import Assessor, AssessResult + +_logger = logging.getLogger('NaiveAssessor') +_logger.info('start') + +_pwd = os.path.dirname(__file__) +_result = open(os.path.join(_pwd, 'assessor_result.txt'), 'w') + +class NaiveAssessor(Assessor): + def __init__(self, optimize_mode): + self._killed = set() + _logger.info('init') + + def assess_trial(self, trial_job_id, trial_history): + _logger.info('assess trial %s %s', trial_job_id, trial_history) + + id_ = trial_history[0] + if id_ in self._killed: + return AssessResult.Bad + + s = 0 + for i, val in enumerate(trial_history): + s += val + if s % 11 == 1: + self._killed.add(id_) + _result.write('%d %d\n' % (id_, i + 1)) + _result.flush() + return AssessResult.Bad + + return AssessResult.Good + + def _on_exit(self): + _result.close() + + def _on_error(self): + _result.write('ERROR\n') + _result.close() diff --git a/test/config/naive_test/naive_trial.py b/test/config/naive_test/naive_trial.py new file mode 100644 index 0000000000000000000000000000000000000000..b35b52d4bc490a1bebfa6409d3ce240b8929b01d --- /dev/null +++ b/test/config/naive_test/naive_trial.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import time + +import nni + +params = nni.get_next_parameter() +print('params:', params) +x = params['x'] + +time.sleep(1) +for i in range(1, 10): + nni.report_intermediate_result(x ** i) + time.sleep(0.5) + +nni.report_final_result(x ** 10) diff --git a/test/config/naive_test/naive_tuner.py b/test/config/naive_test/naive_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..28a052050f0f77062ef15802bb97a191187ef31b --- /dev/null +++ b/test/config/naive_test/naive_tuner.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import logging +import os + +from nni.tuner import Tuner +from nni.utils import extract_scalar_reward + +_logger = logging.getLogger('NaiveTuner') +_logger.info('start') + +_pwd = os.path.dirname(__file__) +_result = open(os.path.join(_pwd, 'tuner_result.txt'), 'w') + +class NaiveTuner(Tuner): + def __init__(self, optimize_mode): + self.cur = 0 + _logger.info('init') + + def generate_parameters(self, parameter_id, **kwargs): + self.cur += 1 + _logger.info('generate parameters: %s', self.cur) + return { 'x': self.cur } + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + reward = extract_scalar_reward(value) + _logger.info('receive trial result: %s, %s, %s', parameter_id, parameters, reward) + _result.write('%d %d\n' % (parameters['x'], reward)) + _result.flush() + + def update_search_space(self, search_space): + _logger.info('update_search_space: %s', search_space) + with open(os.path.join(_pwd, 'tuner_search_space.json'), 'w') as file_: + json.dump(search_space, file_) + + def _on_exit(self): + _result.close() + + def _on_error(self): + _result.write('ERROR\n') + _result.close() diff --git a/test/config/naive_test/search_space.json b/test/config/naive_test/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..cf736d71527ff97275035a13b5906f2d8bf3c32d --- /dev/null +++ b/test/config/naive_test/search_space.json @@ -0,0 +1 @@ +{ "x": [1, 100] } diff --git a/test/config/naive_trial/naive_trial.py b/test/config/naive_trial/naive_trial.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c3e3aecc7e17b4110a7412017b496ddba0df3c --- /dev/null +++ b/test/config/naive_trial/naive_trial.py @@ -0,0 +1,14 @@ +import time +import nni + +if __name__ == '__main__': + print('trial start') + params = nni.get_next_parameter() + print('params:', params) + epochs = 2 + + for i in range(epochs): + nni.report_intermediate_result(0.1 * (i+1)) + time.sleep(1) + nni.report_final_result(0.8) + print('trial done') diff --git a/test/config/naive_trial/search_space.json b/test/config/naive_trial/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..5c2084f310affe05608789d1ffa5d8be0d04dc2e --- /dev/null +++ b/test/config/naive_trial/search_space.json @@ -0,0 +1,17 @@ +{ + "k": + { + "_type" : "randint", + "_value" : [0, 4] + }, + "d": + { + "_type" : "choice", + "_value" : [-1, 1] + }, + "n": + { + "_type" : "uniform", + "_value" : [0, 0.2] + } +} diff --git a/test/config/naive_trial/search_space_choices.json b/test/config/naive_trial/search_space_choices.json new file mode 100644 index 0000000000000000000000000000000000000000..6262095eeff962095dbeef0d43c2300eebff228f --- /dev/null +++ b/test/config/naive_trial/search_space_choices.json @@ -0,0 +1,7 @@ +{ + "p": + { + "_type" : "choice", + "_value" : [1, 2, 3, 4] + } +} diff --git a/test/config/naive_trial/trial.py b/test/config/naive_trial/trial.py new file mode 100644 index 0000000000000000000000000000000000000000..1d83fcacf63c8ab9f11b8ae36dec187530754596 --- /dev/null +++ b/test/config/naive_trial/trial.py @@ -0,0 +1,29 @@ +import random +import time +import math +import nni + +curve_func = { + 0: lambda x: x, + 1: lambda x: x * x, + 2: lambda x: math.pow(x, 0.5), + 3: lambda x: math.tanh(x) +} + +if __name__ == '__main__': + print('trial start') + + params = nni.get_next_parameter() + print('params:', params) + epochs = 20 + + for i in range(epochs): + v = curve_func[params['k']](i / epochs) + v += v * (random.random() * params['n']) + v *= params['d'] + nni.report_intermediate_result(v) + + if i % 5 == 0: + time.sleep(1) + nni.report_final_result(v) + print('trial done') diff --git a/test/config/naive_trial/trial_choices.py b/test/config/naive_trial/trial_choices.py new file mode 100644 index 0000000000000000000000000000000000000000..878c4ef41e7559602aafceeb03fd30540d8133fd --- /dev/null +++ b/test/config/naive_trial/trial_choices.py @@ -0,0 +1,13 @@ +import random +import nni + +if __name__ == '__main__': + print('trial start') + + params = nni.get_next_parameter() + print('params:', params) + + nni.report_intermediate_result(random.random()) + nni.report_final_result(random.random()) + + print('trial done') diff --git a/test/config/nnictl_experiment/sklearn-classification-v2.yml b/test/config/nnictl_experiment/sklearn-classification-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..4d8b5757769ec6a4371cc709ad3ec3fb8c015005 --- /dev/null +++ b/test/config/nnictl_experiment/sklearn-classification-v2.yml @@ -0,0 +1,16 @@ +experimentName: default_test +searchSpaceFile: ../../../examples/trials/sklearn/classification/search_space.json +trialCommand: python3 main.py +trialCodeDirectory: ../../../examples/trials/sklearn/classification +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: TPE +trainingService: + platform: local +assessor: + name: Medianstop + classArgs: + optimize_mode: maximize \ No newline at end of file diff --git a/test/config/nnictl_experiment/sklearn-classification.yml b/test/config/nnictl_experiment/sklearn-classification.yml new file mode 100644 index 0000000000000000000000000000000000000000..5a803e40d2c07f431164bdf41630b2083f44e8ce --- /dev/null +++ b/test/config/nnictl_experiment/sklearn-classification.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 4 +trialConcurrency: 2 +searchSpacePath: ../../../examples/trials/sklearn/classification/search_space.json + +tuner: + builtinTunerName: TPE +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/sklearn/classification + command: python3 main.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/nnictl_experiment/test_import.json b/test/config/nnictl_experiment/test_import.json new file mode 100644 index 0000000000000000000000000000000000000000..6a8c396b9eb9f5a7d1e0f413bc9d4a2c192ad67f --- /dev/null +++ b/test/config/nnictl_experiment/test_import.json @@ -0,0 +1,4 @@ +[ + {"parameter": {"C": 0.15940134774738896, "kernel": "sigmoid", "degree": 3, "gamma": 0.07295826917955316, "coef0": 0.0978204758732429}, "value": 0.6}, + {"parameter": {"C": 0.5556430724708544, "kernel": "linear", "degree": 3, "gamma": 0.04957496655414671, "coef0": 0.08520868779907687}, "value": 0.7} +] diff --git a/test/config/pr_tests.yml b/test/config/pr_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..11bb431ffc65ab1b0c38b7a4efd52e50365c0825 --- /dev/null +++ b/test/config/pr_tests.yml @@ -0,0 +1,144 @@ + +defaultTestCaseConfig: + launchCommand: nnictl create --config $configFile + stopCommand: nnictl stop + experimentStatusCheck: True + platform: linux darwin win32 + trainingService: all + +testCases: + +######################################################################### +# naive test +######################################################################### + +- name: naive-test + configFile: test/config/naive_test/local.yml + launchCommand: python3 nni_test/nnitest/naive_test.py --config $configFile + experimentStatusCheck: False + stopCommand: + platform: linux darwin + +######################################################################### +# nni features test +######################################################################### +- name: metrics-float + configFile: test/config/metrics_test/config.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics.json + +- name: metrics-dict + configFile: test/config/metrics_test/config_dict_metrics.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics_dict.json + +######################################################################### +# nni assessor test +######################################################################### +- name: assessor-curvefitting + configFile: test/config/assessors/curvefitting.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + trial: + codeDir: ../naive_trial + command: python3 naive_trial.py + +- name: assessor-medianstop + configFile: test/config/assessors/medianstop.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + trial: + codeDir: ../naive_trial + command: python3 naive_trial.py + +######################################################################### +# nni tuners test +######################################################################### +- name: tuner-annel + configFile: test/config/tuners/anneal.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + useAnnotation: False + searchSpacePath: ../naive_trial/search_space.json + trial: + codeDir: ../naive_trial + command: python3 naive_trial.py + +- name: tuner-evolution + configFile: test/config/tuners/evolution.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + useAnnotation: False + searchSpacePath: ../naive_trial/search_space.json + trial: + codeDir: ../naive_trial + command: python3 naive_trial.py + +- name: tuner-random + configFile: test/config/tuners/random.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + useAnnotation: False + searchSpacePath: ../naive_trial/search_space.json + trial: + codeDir: ../naive_trial + command: python3 naive_trial.py + +- name: tuner-tpe + configFile: test/config/tuners/tpe.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + useAnnotation: False + searchSpacePath: ../naive_trial/search_space.json + trial: + codeDir: ../naive_trial + command: python3 naive_trial.py + +- name: tuner-batch + configFile: test/config/tuners/batch.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + useAnnotation: False + searchSpacePath: ../naive_trial/search_space_choices.json + trial: + codeDir: ../naive_trial + command: python3 trial_choices.py + +- name: tuner-gp + configFile: test/config/tuners/gp.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + useAnnotation: False + searchSpacePath: ../naive_trial/search_space.json + trial: + codeDir: ../naive_trial + command: python3 naive_trial.py + +- name: tuner-grid + configFile: test/config/tuners/gridsearch.yml + config: + maxTrialNum: 2 + trialConcurrency: 2 + useAnnotation: False + searchSpacePath: ../naive_trial/search_space_choices.json + trial: + codeDir: ../naive_trial + command: python3 trial_choices.py diff --git a/test/config/sharedstorage_test/config_sharedstorage_remote_azureblob.yml b/test/config/sharedstorage_test/config_sharedstorage_remote_azureblob.yml new file mode 100644 index 0000000000000000000000000000000000000000..ae75e0a284b6904334ae368afb8107f35ad098ad --- /dev/null +++ b/test/config/sharedstorage_test/config_sharedstorage_remote_azureblob.yml @@ -0,0 +1,41 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 1 +trainingServicePlatform: remote +searchSpacePath: config_sharedstorage_search_space.json +#choice: true, false +useAnnotation: false +nniManagerIp: 127.0.0.1 +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 config_sharedstorage_trial.py + codeDir: . + gpuNum: 0 +sharedStorage: + storageType: AzureBlob + localMountPoint: ${your/local/mount/point} + remoteMountPoint: ${your/remote/mount/point} + storageAccountName: ${replace_to_your_storageAccountName} + storageAccountKey: ${replace_to_your_storageAccountKey} + containerName: ${replace_to_your_containerName} + # usermount means you have already mount this storage on localMountPoint + # nnimount means nni will try to mount this storage on localMountPoint + # nomount means storage will not mount in local machine, will support partial storages in the future + localMounted: nnimount +#machineList can be empty if the platform is local +machineList: + - ip: 10.1.1.1 + username: bob + passwd: bob123 + #port can be skip if using default ssh port 22 + #port: 22 +remoteConfig: + reuse: true \ No newline at end of file diff --git a/test/config/sharedstorage_test/config_sharedstorage_remote_nfs.yml b/test/config/sharedstorage_test/config_sharedstorage_remote_nfs.yml new file mode 100644 index 0000000000000000000000000000000000000000..7bf458b43808cea54005b0f8837e1987002e39b8 --- /dev/null +++ b/test/config/sharedstorage_test/config_sharedstorage_remote_nfs.yml @@ -0,0 +1,40 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 1 +trainingServicePlatform: remote +searchSpacePath: config_sharedstorage_search_space.json +#choice: true, false +useAnnotation: false +nniManagerIp: 127.0.0.1 +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + command: python3 config_sharedstorage_trial.py + codeDir: . + gpuNum: 0 +sharedStorage: + storageType: NFS + localMountPoint: ${your/local/mount/point} + remoteMountPoint: ${your/remote/mount/point} + nfsServer: ${nfs-server-ip} + exportedDirectory: ${nfs/exported/directory} + # usermount means you have already mount this storage on localMountPoint + # nnimount means nni will try to mount this storage on localMountPoint + # nomount means storage will not mount in local machine, will support partial storages in the future + localMounted: nnimount +#machineList can be empty if the platform is local +machineList: + - ip: 10.1.1.1 + username: bob + passwd: bob123 + #port can be skip if using default ssh port 22 + #port: 22 +remoteConfig: + reuse: true \ No newline at end of file diff --git a/test/config/sharedstorage_test/config_sharedstorage_search_space.json b/test/config/sharedstorage_test/config_sharedstorage_search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..dd05405e27b14bc815ab564bf9d6a1ff9b42d809 --- /dev/null +++ b/test/config/sharedstorage_test/config_sharedstorage_search_space.json @@ -0,0 +1,7 @@ +{ + "dropout_rate":{"_type":"uniform","_value":[0.5, 0.9]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "batch_size": {"_type":"choice", "_value": [16, 32]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]} +} diff --git a/test/config/sharedstorage_test/config_sharedstorage_trial.py b/test/config/sharedstorage_test/config_sharedstorage_trial.py new file mode 100644 index 0000000000000000000000000000000000000000..adfc46cba42f5e7cc93fa60ea80f71544483e18b --- /dev/null +++ b/test/config/sharedstorage_test/config_sharedstorage_trial.py @@ -0,0 +1,24 @@ +""" +A deep MNIST classifier using convolutional layers. + +This file is a modification of the official pytorch mnist example: +https://github.com/pytorch/examples/blob/master/mnist/main.py +""" +import os +import logging +import nni +logger = logging.getLogger('mnist_AutoML') +if __name__ == '__main__': + try: + logger.debug(os.environ.get('NNI_OUTPUT_DIR')) + filename = os.path.join(os.environ.get('NNI_OUTPUT_DIR'), 'checkingfile.txt') + f = open(filename, "a") + + tuner_params = nni.get_next_parameter() + f.write(str(tuner_params)) + nni.report_final_result(1) + + f.close() + except Exception as exception: + logger.exception(exception) + raise diff --git a/test/config/training_service.yml b/test/config/training_service.yml new file mode 100644 index 0000000000000000000000000000000000000000..07e809414f04d7f697deb96a574b5ba50e0be26a --- /dev/null +++ b/test/config/training_service.yml @@ -0,0 +1,145 @@ +all: + logCollection: http + +kubeflow: + maxExecDuration: 15m + nniManagerIp: + # use a small trial number to make IT faster + maxTrialNum: 2 + trialConcurrency: 2 + + kubeflowConfig: + operator: tf-operator + apiVersion: v1 + storage: azureStorage + keyVault: + vaultName: + name: + azureStorage: + accountName: + azureShare: + trial: + worker: + replicas: 1 + command: + gpuNum: 1 + cpuNum: 1 + memoryMB: 8192 + image: + trainingServicePlatform: kubeflow + +frameworkcontroller: + maxExecDuration: 15m + nniManagerIp: + # use a small trial number to make IT faster + maxTrialNum: 2 + trialConcurrency: 2 + frameworkcontrollerConfig: + serviceAccountName: frameworkbarrier + storage: azureStorage + keyVault: + vaultName: + name: + azureStorage: + accountName: + azureShare: + trial: + taskRoles: + - name: worker + taskNum: 1 + command: + gpuNum: 1 + cpuNum: 1 + memoryMB: 8192 + image: + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceededTaskCount: 1 + trainingServicePlatform: frameworkcontroller + +local: + trainingServicePlatform: local +pai: + nniManagerIp: + maxExecDuration: 15m + # PAI has job submission limitation, set maxTrialNum=1 to control trial job numbers for PAI + maxTrialNum: 1 + trialConcurrency: 1 + paiConfig: + host: + userName: + trainingServicePlatform: pai + trial: + gpuNum: 1 + cpuNum: 1 + image: + memoryMB: 8192 + virtualCluster: default + nniManagerNFSMountPath: + containerNFSMountPath: + paiStorageConfigName: +remote: + remoteConfig: + reuse: false + machineList: + - ip: + passwd: + port: + username: + trainingServicePlatform: remote + sharedStorage: + storageAccountKey: + nfsServer: +hybrid: + maxExecDuration: 15m + nniManagerIp: + maxTrialNum: 2 + trialConcurrency: 2 + trial: + gpuNum: 0 + trainingServicePlatform: hybrid + hybridConfig: + # TODO: Add more platforms + trainingServicePlatforms: + - remote + - local + machineList: + - ip: + passwd: + port: + username: + remoteConfig: + reuse: true +adl: + maxExecDuration: 15m + nniManagerIp: + # use a small trial number to make IT faster + maxTrialNum: 2 + trialConcurrency: 2 + trial: + namespace: default + command: + codeDir: + gpuNum: 1 + cpuNum: 1 + image: + memorySize: 1Gi + checkpoint: + storageClass: + storageSize: + trainingServicePlatform: adl +aml: + nniManagerIp: + maxExecDuration: 15m + # PAI has job submission limitation, set maxTrialNum=1 to control trial job numbers for PAI + maxTrialNum: 2 + trialConcurrency: 2 + trainingServicePlatform: aml + trial: + gpuNum: 1 + image: + amlConfig: + subscriptionId: + resourceGroup: + workspaceName: + computeTarget: diff --git a/test/config/training_service_v2.yml b/test/config/training_service_v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..85ee6378175419b6c9770b53f2197987e4d9da7b --- /dev/null +++ b/test/config/training_service_v2.yml @@ -0,0 +1,64 @@ +hybrid: + trainingService: + - platform: remote + machineList: + - host: + user: + password: + port: + - platform: local + - platform: aml + subscriptionId: + resourceGroup: + workspaceName: + computeTarget: +kubeflow: + trialGpuNumber: 1 + trialConcurrency: 2 + maxTrialNumber: 2 + nniManagerIp: + trainingService: + reuseMode: true + platform: kubeflow + worker: + command: + code_directory: + dockerImage: + cpuNumber: 1 + gpuNumber: 0 + memorySize: 8192 + replicas: 1 + operator: tf-operator + storage: + storageType: azureStorage + azureAccount: + azureShare: + keyVaultName: + keyVaultKey: + apiVersion: v1 +frameworkcontroller: + trialGpuNumber: 1 + trialConcurrency: 2 + maxTrialNumber: 2 + nniManagerIp: + trainingService: + reuseMode: true + platform: frameworkcontroller + serviceAccountName: frameworkcontroller + taskRoles: + - name: worker + dockerImage: + taskNumber: 1 + command: + gpuNumber: 0 + cpuNumber: 1 + memorySize: 8192 + framework_attempt_completion_policy: + min_failed_task_count: 1 + minSucceedTaskCount: 1 + storage: + storageType: azureStorage + azureAccount: + azureShare: + keyVaultName: + keyVaultKey: \ No newline at end of file diff --git a/test/config/tuners/anneal.yml b/test/config/tuners/anneal.yml new file mode 100644 index 0000000000000000000000000000000000000000..f01cd1f48883e6e994d7b0ea459f0f4e80d52d05 --- /dev/null +++ b/test/config/tuners/anneal.yml @@ -0,0 +1,24 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 + +tuner: + builtinTunerName: Anneal + classArgs: + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-annotation + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: true +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/batch.yml b/test/config/tuners/batch.yml new file mode 100644 index 0000000000000000000000000000000000000000..6073065cefe55aa610beb540653912f13aa98c9c --- /dev/null +++ b/test/config/tuners/batch.yml @@ -0,0 +1,24 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 +searchSpacePath: search_space_batchtuner.json + +tuner: + builtinTunerName: BatchTuner +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-batch-tune-keras + command: python3 mnist-keras.py --epochs 1 + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local + diff --git a/test/config/tuners/bohb.yml b/test/config/tuners/bohb.yml new file mode 100644 index 0000000000000000000000000000000000000000..5d71bf237294c1892b3c4487e5bc4c88222d196a --- /dev/null +++ b/test/config/tuners/bohb.yml @@ -0,0 +1,25 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 +searchSpacePath: search_space_advisor.json + +advisor: + builtinAdvisorName: BOHB + classArgs: + max_budget: 27 + min_budget: 1 + eta: 3 + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-advisor + command: python3 mnist.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local + diff --git a/test/config/tuners/evolution.yml b/test/config/tuners/evolution.yml new file mode 100644 index 0000000000000000000000000000000000000000..f2fc2008880d5289d7ad6c292f9ce2e4523e7f07 --- /dev/null +++ b/test/config/tuners/evolution.yml @@ -0,0 +1,24 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 + +tuner: + builtinTunerName: Evolution + classArgs: + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-annotation + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: true +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/gp.yml b/test/config/tuners/gp.yml new file mode 100644 index 0000000000000000000000000000000000000000..30835223bfd37402c15ab8a01165a2bb44cf15c9 --- /dev/null +++ b/test/config/tuners/gp.yml @@ -0,0 +1,33 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 +searchSpacePath: search_space.json + +tuner: + builtinTunerName: GPTuner + classArgs: + optimize_mode: maximize + utility: 'ei' + kappa: 5.0 + xi: 0.0 + nu: 2.5 + alpha: 1.0e-6 + cold_start_num: 10 + selection_num_warm_up: 100000 + selection_num_starting_points: 250 +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-tfv1 + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/gridsearch.yml b/test/config/tuners/gridsearch.yml new file mode 100644 index 0000000000000000000000000000000000000000..341315c2bdec4bffb3cc821c3f410476d733291b --- /dev/null +++ b/test/config/tuners/gridsearch.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 +searchSpacePath: search_space.json + +tuner: + builtinTunerName: GridSearch +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-tfv1 + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/hyperband.yml b/test/config/tuners/hyperband.yml new file mode 100644 index 0000000000000000000000000000000000000000..980dfc747bf1a9ccdaa6fccf14233598eec1a698 --- /dev/null +++ b/test/config/tuners/hyperband.yml @@ -0,0 +1,25 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 +searchSpacePath: search_space_advisor.json + +advisor: + builtinAdvisorName: Hyperband + classArgs: + optimize_mode: maximize + R: 60 + eta: 3 + exec_mode: parallelism +trial: + codeDir: ../../../examples/trials/mnist-advisor + command: python3 mnist.py + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local + diff --git a/test/config/tuners/metis.yml b/test/config/tuners/metis.yml new file mode 100644 index 0000000000000000000000000000000000000000..cc6244abb2bdf9b0e6d8f7398d91bf59efaf855d --- /dev/null +++ b/test/config/tuners/metis.yml @@ -0,0 +1,25 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 +searchSpacePath: search_space.json + +tuner: + builtinTunerName: MetisTuner + classArgs: + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-tfv1 + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/random.yml b/test/config/tuners/random.yml new file mode 100644 index 0000000000000000000000000000000000000000..86916603f3555d17c53ada1d709bdf6944a76671 --- /dev/null +++ b/test/config/tuners/random.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-annotation + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: true +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/regularized_evolution_tuner-v2.yml b/test/config/tuners/regularized_evolution_tuner-v2.yml new file mode 100644 index 0000000000000000000000000000000000000000..a2a923b2209a6a10cb4a71722b2f31e99d27c18b --- /dev/null +++ b/test/config/tuners/regularized_evolution_tuner-v2.yml @@ -0,0 +1,14 @@ +experimentName: default_test +searchSpaceFile: seach_space_classic_nas.json +trialCommand: python3 mnist.py --epochs 1 +trialCodeDirectory: ../../../examples/nas/legacy/classic_nas +trialGpuNumber: 0 +trialConcurrency: 1 +maxExperimentDuration: 15m +maxTrialNumber: 1 +tuner: + name: RegularizedEvolutionTuner + classArgs: + optimize_mode: maximize +trainingService: + platform: local diff --git a/test/config/tuners/regularized_evolution_tuner.yml b/test/config/tuners/regularized_evolution_tuner.yml new file mode 100644 index 0000000000000000000000000000000000000000..05fb46665a0632163e957709f65bc54cd55188bf --- /dev/null +++ b/test/config/tuners/regularized_evolution_tuner.yml @@ -0,0 +1,20 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 10m +maxTrialNum: 1 +trialConcurrency: 1 +searchSpacePath: seach_space_classic_nas.json +tuner: + builtinTunerName: RegularizedEvolutionTuner + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/nas/legacy/classic_nas + command: python3 mnist.py --epochs 1 + gpuNum: 0 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/seach_space_classic_nas.json b/test/config/tuners/seach_space_classic_nas.json new file mode 100644 index 0000000000000000000000000000000000000000..bac470b72f33dffbe5c5532840e0a506c10d9dd3 --- /dev/null +++ b/test/config/tuners/seach_space_classic_nas.json @@ -0,0 +1,26 @@ +{ + "first_conv": { + "_type": "layer_choice", + "_value": [ + "conv5x5", + "conv3x3" + ] + }, + "mid_conv": { + "_type": "layer_choice", + "_value": [ + "0", + "1" + ] + }, + "skip": { + "_type": "input_choice", + "_value": { + "candidates": [ + "", + "" + ], + "n_chosen": 1 + } + } +} diff --git a/test/config/tuners/search_space.json b/test/config/tuners/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c081d144d18adf3a2a9a780375334caefa2b42f0 --- /dev/null +++ b/test/config/tuners/search_space.json @@ -0,0 +1,7 @@ +{ + "dropout_rate":{"_type":"quniform","_value":[0.5, 0.9, 2]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "batch_size": {"_type":"choice", "_value": [1, 4, 8, 16, 32]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]} +} diff --git a/test/config/tuners/search_space_advisor.json b/test/config/tuners/search_space_advisor.json new file mode 100644 index 0000000000000000000000000000000000000000..540f2708cb888baa2a9700436f9879724e049328 --- /dev/null +++ b/test/config/tuners/search_space_advisor.json @@ -0,0 +1,7 @@ +{ + "dropout_rate":{"_type":"uniform","_value":[0.5,0.9]}, + "conv_size":{"_type":"choice","_value":[2,3,5,7]}, + "hidden_size":{"_type":"choice","_value":[124, 512, 1024]}, + "batch_size": {"_type":"choice","_value":[8, 16, 32, 64]}, + "learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]} +} diff --git a/test/config/tuners/search_space_batchtuner.json b/test/config/tuners/search_space_batchtuner.json new file mode 100644 index 0000000000000000000000000000000000000000..a802136a273cc45c7b546915ca819d832f9db6a6 --- /dev/null +++ b/test/config/tuners/search_space_batchtuner.json @@ -0,0 +1,12 @@ +{ + "combine_params": + { + "_type" : "choice", + "_value" : [{"optimizer": "Adam", "learning_rate": 0.00001}, + {"optimizer": "Adam", "learning_rate": 0.0001}, + {"optimizer": "Adam", "learning_rate": 0.001}, + {"optimizer": "SGD", "learning_rate": 0.01}, + {"optimizer": "SGD", "learning_rate": 0.005}, + {"optimizer": "SGD", "learning_rate": 0.0002}] + } +} \ No newline at end of file diff --git a/test/config/tuners/smac.yml b/test/config/tuners/smac.yml new file mode 100644 index 0000000000000000000000000000000000000000..d943c9918c8a1913903c132be0ffa7eca34ecd30 --- /dev/null +++ b/test/config/tuners/smac.yml @@ -0,0 +1,24 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 + +tuner: + builtinTunerName: SMAC + classArgs: + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-annotation + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: true +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/config/tuners/tpe.yml b/test/config/tuners/tpe.yml new file mode 100644 index 0000000000000000000000000000000000000000..8e4a8877051adfce7d48110617536e2dff25666a --- /dev/null +++ b/test/config/tuners/tpe.yml @@ -0,0 +1,24 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 5m +maxTrialNum: 2 +trialConcurrency: 1 + +tuner: + builtinTunerName: TPE + classArgs: + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ../../../examples/trials/mnist-annotation + command: python3 mnist.py --batch_num 100 + gpuNum: 0 + +useAnnotation: true +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/nni_test/nnitest/__init__.py b/test/nni_test/nnitest/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/nni_test/nnitest/foreground.py b/test/nni_test/nnitest/foreground.py new file mode 100644 index 0000000000000000000000000000000000000000..4bfe6c173b00be0d01e828df9a6304693ba57a65 --- /dev/null +++ b/test/nni_test/nnitest/foreground.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import subprocess +import argparse +import time +import shlex +import signal + +def test_foreground(args): + launch_command = 'nnictl create --config {} --foreground'.format(args.config) + print('nnictl foreground launch command: ', launch_command, flush=True) + + proc = subprocess.Popen(shlex.split(launch_command)) + + time.sleep(args.timeout) + proc.send_signal(signal.SIGINT) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, required=True) + parser.add_argument("--timeout", type=int, default=45) + args = parser.parse_args() + + test_foreground(args) diff --git a/test/nni_test/nnitest/generate_ts_config.py b/test/nni_test/nnitest/generate_ts_config.py new file mode 100644 index 0000000000000000000000000000000000000000..9019bb6a54a4784c41b514eddf89e7e67de12843 --- /dev/null +++ b/test/nni_test/nnitest/generate_ts_config.py @@ -0,0 +1,203 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import sys +import os +import glob +import argparse +from utils import get_yml_content, dump_yml_content + +TRAINING_SERVICE_FILE = os.path.join('config', 'training_service.yml') +TRAINING_SERVICE_FILE_V2 = os.path.join('config', 'training_service_v2.yml') + +def update_training_service_config(args): + config = get_yml_content(TRAINING_SERVICE_FILE) + if args.nni_manager_ip is not None and args.config_version == 'v1': + config[args.ts]['nniManagerIp'] = args.nni_manager_ip + if args.ts == 'pai': + if args.pai_user is not None: + config[args.ts]['paiConfig']['userName'] = args.pai_user + if args.pai_host is not None: + config[args.ts]['paiConfig']['host'] = args.pai_host + if args.pai_token is not None: + config[args.ts]['paiConfig']['token'] = args.pai_token + if args.pai_reuse is not None: + config[args.ts]['paiConfig']['reuse'] = args.pai_reuse.lower() == 'true' + if args.nni_docker_image is not None: + config[args.ts]['trial']['image'] = args.nni_docker_image + if args.nni_manager_nfs_mount_path is not None: + config[args.ts]['trial']['nniManagerNFSMountPath'] = args.nni_manager_nfs_mount_path + if args.container_nfs_mount_path is not None: + config[args.ts]['trial']['containerNFSMountPath'] = args.container_nfs_mount_path + if args.pai_storage_config_name is not None: + config[args.ts]['trial']['paiStorageConfigName'] = args.pai_storage_config_name + if args.vc is not None: + config[args.ts]['trial']['virtualCluster'] = args.vc + if args.debug is not None: + config[args.ts]['debug'] = args.debug.lower() == 'true' + elif args.ts == 'kubeflow' and args.reuse_mode == 'False': + if args.nfs_server is not None: + config[args.ts]['kubeflowConfig']['nfs']['server'] = args.nfs_server + if args.nfs_path is not None: + config[args.ts]['kubeflowConfig']['nfs']['path'] = args.nfs_path + if args.keyvault_vaultname is not None: + config[args.ts]['kubeflowConfig']['keyVault']['vaultName'] = args.keyvault_vaultname + if args.keyvault_name is not None: + config[args.ts]['kubeflowConfig']['keyVault']['name'] = args.keyvault_name + if args.azs_account is not None: + config[args.ts]['kubeflowConfig']['azureStorage']['accountName'] = args.azs_account + if args.azs_share is not None: + config[args.ts]['kubeflowConfig']['azureStorage']['azureShare'] = args.azs_share + if args.nni_docker_image is not None: + config[args.ts]['trial']['worker']['image'] = args.nni_docker_image + config[args.ts]['kubeflowConfig']['reuse'] = False + elif args.ts == 'kubeflow' and args.reuse_mode == 'True': + config = get_yml_content(TRAINING_SERVICE_FILE_V2) + config[args.ts]['trainingService']['worker']['dockerImage'] = args.nni_docker_image + config[args.ts]['trainingService']['storage']['azureAccount'] = args.azs_account + config[args.ts]['trainingService']['storage']['azureShare'] = args.azs_share + config[args.ts]['trainingService']['storage']['keyVaultName'] = args.keyvault_vaultname + config[args.ts]['trainingService']['storage']['keyVaultKey'] = args.keyvault_name + config[args.ts]['nni_manager_ip'] = args.nni_manager_ip + dump_yml_content(TRAINING_SERVICE_FILE_V2, config) + elif args.ts == 'frameworkcontroller' and args.reuse_mode == 'False': + if args.nfs_server is not None: + config[args.ts]['frameworkcontrollerConfig']['nfs']['server'] = args.nfs_server + if args.nfs_path is not None: + config[args.ts]['frameworkcontrollerConfig']['nfs']['path'] = args.nfs_path + if args.keyvault_vaultname is not None: + config[args.ts]['frameworkcontrollerConfig']['keyVault']['vaultName'] = args.keyvault_vaultname + if args.keyvault_name is not None: + config[args.ts]['frameworkcontrollerConfig']['keyVault']['name'] = args.keyvault_name + if args.azs_account is not None: + config[args.ts]['frameworkcontrollerConfig']['azureStorage']['accountName'] = args.azs_account + if args.azs_share is not None: + config[args.ts]['frameworkcontrollerConfig']['azureStorage']['azureShare'] = args.azs_share + if args.nni_docker_image is not None: + config[args.ts]['trial']['taskRoles'][0]['image'] = args.nni_docker_image + config[args.ts]['frameworkcontrollerConfig']['reuse'] = False + elif args.ts == 'frameworkcontroller' and args.reuse_mode == 'True': + config = get_yml_content(TRAINING_SERVICE_FILE_V2) + config[args.ts]['trainingService']['taskRoles'][0]['dockerImage'] = args.nni_docker_image + config[args.ts]['trainingService']['storage']['azureAccount'] = args.azs_account + config[args.ts]['trainingService']['storage']['azureShare'] = args.azs_share + config[args.ts]['trainingService']['storage']['keyVaultName'] = args.keyvault_vaultname + config[args.ts]['trainingService']['storage']['keyVaultKey'] = args.keyvault_name + config[args.ts]['nni_manager_ip'] = args.nni_manager_ip + dump_yml_content(TRAINING_SERVICE_FILE_V2, config) + elif args.ts == 'remote': + if args.remote_user is not None: + config[args.ts]['machineList'][0]['username'] = args.remote_user + if args.remote_host is not None: + config[args.ts]['machineList'][0]['ip'] = args.remote_host + if args.remote_port is not None: + config[args.ts]['machineList'][0]['port'] = args.remote_port + if args.remote_pwd is not None: + config[args.ts]['machineList'][0]['passwd'] = args.remote_pwd + if args.remote_reuse is not None: + config[args.ts]['remoteConfig']['reuse'] = args.remote_reuse.lower() == 'true' + if args.azurestoragetoken is not None: + config[args.ts]['sharedStorage']['storageAccountKey'] = args.azurestoragetoken + if args.nfs_server is not None: + config[args.ts]['sharedStorage']['nfsServer'] = args.nfs_server + if args.local_mount_point is not None: + config[args.ts]['sharedStorage']['localMountPoint'] = args.local_mount_point + if args.remote_mount_point is not None: + config[args.ts]['sharedStorage']['remoteMountPoint'] = args.remote_mount_point + if args.exported_directory is not None: + config[args.ts]['sharedStorage']['exportedDirectory'] = args.exported_directory + elif args.ts == 'adl': + if args.nni_docker_image is not None: + config[args.ts]['trial']['image'] = args.nni_docker_image + if args.checkpoint_storage_class is not None: + config[args.ts]['trial']['checkpoint']['storageClass'] = args.checkpoint_storage_class + if args.checkpoint_storage_size is not None: + config[args.ts]['trial']['checkpoint']['storageSize'] = args.checkpoint_storage_size + if args.adaptive is not None: + config[args.ts]['trial']['adaptive'] = args.adaptive + if args.adl_nfs_server is not None and args.adl_nfs_path is not None and args.adl_nfs_container_mount_path is not None: + # default keys in nfs is empty, need to initialize + config[args.ts]['trial']['nfs'] = {} + config[args.ts]['trial']['nfs']['server'] = args.adl_nfs_server + config[args.ts]['trial']['nfs']['path'] = args.adl_nfs_path + config[args.ts]['trial']['nfs']['container_mount_path'] = args.nadl_fs_container_mount_path + elif args.ts == 'aml': + if args.nni_docker_image is not None: + config[args.ts]['trial']['image'] = args.nni_docker_image + if args.subscription_id is not None: + config[args.ts]['amlConfig']['subscriptionId'] = args.subscription_id + if args.resource_group is not None: + config[args.ts]['amlConfig']['resourceGroup'] = args.resource_group + if args.workspace_name is not None: + config[args.ts]['amlConfig']['workspaceName'] = args.workspace_name + if args.compute_target is not None: + config[args.ts]['amlConfig']['computeTarget'] = args.compute_target + dump_yml_content(TRAINING_SERVICE_FILE, config) + + if args.ts == 'hybrid': + config = get_yml_content(TRAINING_SERVICE_FILE_V2) + config[args.ts]['trainingService'][0]['machineList'][0]['user'] = args.remote_user + config[args.ts]['trainingService'][0]['machineList'][0]['host'] = args.remote_host + config[args.ts]['trainingService'][0]['machineList'][0]['password'] = args.remote_pwd + config[args.ts]['trainingService'][0]['machineList'][0]['port'] = args.remote_port + config[args.ts]['trainingService'][2]['subscriptionId'] = args.subscription_id + config[args.ts]['trainingService'][2]['resourceGroup'] = args.resource_group + config[args.ts]['trainingService'][2]['workspaceName'] = args.workspace_name + config[args.ts]['trainingService'][2]['computeTarget'] = args.compute_target + config[args.ts]['nni_manager_ip'] = args.nni_manager_ip + dump_yml_content(TRAINING_SERVICE_FILE_V2, config) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--ts", type=str, choices=['pai', 'kubeflow', 'remote', 'local', 'frameworkcontroller', 'adl', 'aml', 'hybrid'], default='pai') + parser.add_argument("--config_version", type=str, choices=['v1', 'v2'], default='v1') + parser.add_argument("--nni_docker_image", type=str) + parser.add_argument("--nni_manager_ip", type=str) + parser.add_argument("--reuse_mode", type=str, default='False') + # args for remote with shared storage + parser.add_argument("--azurestoragetoken", type=str) + parser.add_argument("--nfs_server", type=str) + parser.add_argument("--local_mount_point", type=str) + parser.add_argument("--remote_mount_point", type=str) + parser.add_argument("--exported_directory", type=str) + # args for PAI + parser.add_argument("--pai_user", type=str) + parser.add_argument("--pai_pwd", type=str) + parser.add_argument("--pai_host", type=str) + parser.add_argument("--data_dir", type=str) + parser.add_argument("--output_dir", type=str) + parser.add_argument("--vc", type=str) + parser.add_argument("--pai_token", type=str) + parser.add_argument("--pai_reuse", type=str) + parser.add_argument("--pai_storage_config_name", type=str) + parser.add_argument("--nni_manager_nfs_mount_path", type=str) + parser.add_argument("--container_nfs_mount_path", type=str) + parser.add_argument("--debug", type=str) + # args for kubeflow and frameworkController + parser.add_argument("--nfs_path", type=str) + parser.add_argument("--keyvault_vaultname", type=str) + parser.add_argument("--keyvault_name", type=str) + parser.add_argument("--azs_account", type=str) + parser.add_argument("--azs_share", type=str) + # args for remote + parser.add_argument("--remote_user", type=str) + parser.add_argument("--remote_pwd", type=str) + parser.add_argument("--remote_host", type=str) + parser.add_argument("--remote_port", type=int) + parser.add_argument("--remote_reuse", type=str) + # args for adl + parser.add_argument("--checkpoint_storage_class", type=str) + parser.add_argument("--checkpoint_storage_size", type=str) + parser.add_argument("--adaptive", type=str) + parser.add_argument("--adl_nfs_server", type=str) + parser.add_argument("--adl_nfs_path", type=str) + parser.add_argument("--adl_nfs_container_mount_path", type=str) + # args for aml + parser.add_argument("--subscription_id", type=str) + parser.add_argument("--resource_group", type=str) + parser.add_argument("--workspace_name", type=str) + parser.add_argument("--compute_target", type=str) + args = parser.parse_args() + + update_training_service_config(args) diff --git a/test/nni_test/nnitest/naive_test.py b/test/nni_test/nnitest/naive_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b998686960113a45e19eaa1cc89e187d1cde1ec3 --- /dev/null +++ b/test/nni_test/nnitest/naive_test.py @@ -0,0 +1,121 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import sys +import os.path as osp +import argparse +import json +import subprocess +import sys +import time +import traceback + +from utils import is_experiment_done, get_experiment_id, get_nni_log_path, read_last_line, remove_files, setup_experiment, detect_port, wait_for_port_available +from utils import GREEN, RED, CLEAR, EXPERIMENT_URL + +NNI_SOURCE_DIR = '..' +NAIVE_TEST_CONFIG_DIR = osp.join(NNI_SOURCE_DIR, 'test', 'config', 'naive_test') + +def naive_test(args): + '''run naive integration test''' + to_remove = ['tuner_search_space.json', 'tuner_result.txt', 'assessor_result.txt'] + to_remove = list(map(lambda file: osp.join(NAIVE_TEST_CONFIG_DIR, file), to_remove)) + remove_files(to_remove) + + proc = subprocess.run(['nnictl', 'create', '--config', args.config]) + assert proc.returncode == 0, '`nnictl create` failed with code %d' % proc.returncode + + print('Spawning trials...') + + nnimanager_log_path = get_nni_log_path(EXPERIMENT_URL) + current_trial = 0 + + for _ in range(120): + time.sleep(1) + + tuner_status = read_last_line(osp.join(NAIVE_TEST_CONFIG_DIR, 'tuner_result.txt')) + assessor_status = read_last_line(osp.join(NAIVE_TEST_CONFIG_DIR, 'assessor_result.txt')) + experiment_status = is_experiment_done(nnimanager_log_path) + + assert tuner_status != 'ERROR', 'Tuner exited with error' + assert assessor_status != 'ERROR', 'Assessor exited with error' + + if experiment_status: + break + + if tuner_status is not None: + for line in open(osp.join(NAIVE_TEST_CONFIG_DIR, 'tuner_result.txt')): + if line.strip() == 'ERROR': + break + trial = int(line.split(' ')[0]) + if trial > current_trial: + current_trial = trial + print('Trial #%d done' % trial) + + assert experiment_status, 'Failed to finish in 2 min' + + ss1 = json.load(open(osp.join(NAIVE_TEST_CONFIG_DIR, 'search_space.json'))) + ss2 = json.load(open(osp.join(NAIVE_TEST_CONFIG_DIR, 'tuner_search_space.json'))) + assert ss1 == ss2, 'Tuner got wrong search space' + + tuner_result = set(open(osp.join(NAIVE_TEST_CONFIG_DIR, 'tuner_result.txt'))) + expected = set(open(osp.join(NAIVE_TEST_CONFIG_DIR, 'expected_tuner_result.txt'))) + # Trials may complete before NNI gets assessor's result, + # so it is possible to have more final result than expected + print('Tuner result:', tuner_result) + print('Expected tuner result:', expected) + assert tuner_result.issuperset(expected), 'Bad tuner result' + + assessor_result = set(open(osp.join(NAIVE_TEST_CONFIG_DIR, 'assessor_result.txt'))) + expected = set(open(osp.join(NAIVE_TEST_CONFIG_DIR, 'expected_assessor_result.txt'))) + assert assessor_result == expected, 'Bad assessor result' + + subprocess.run(['nnictl', 'stop']) + wait_for_port_available(8080, 10) + +def stop_experiment_test(args): + config_file = args.config + '''Test `nnictl stop` command, including `nnictl stop exp_id` and `nnictl stop all`. + Simple `nnictl stop` is not tested here since it is used in all other test code''' + subprocess.run(['nnictl', 'create', '--config', config_file, '--port', '8080'], check=True) + subprocess.run(['nnictl', 'create', '--config', config_file, '--port', '8888'], check=True) + subprocess.run(['nnictl', 'create', '--config', config_file, '--port', '8989'], check=True) + subprocess.run(['nnictl', 'create', '--config', config_file, '--port', '8990'], check=True) + + # test cmd 'nnictl stop id` + experiment_id = get_experiment_id(EXPERIMENT_URL) + proc = subprocess.run(['nnictl', 'stop', experiment_id]) + assert proc.returncode == 0, '`nnictl stop %s` failed with code %d' % (experiment_id, proc.returncode) + wait_for_port_available(8080, 10) + assert not detect_port(8080), '`nnictl stop %s` failed to stop experiments' % experiment_id + + # test cmd `nnictl stop --port` + proc = subprocess.run(['nnictl', 'stop', '--port', '8990']) + assert proc.returncode == 0, '`nnictl stop %s` failed with code %d' % (experiment_id, proc.returncode) + wait_for_port_available(8990, 10) + assert not detect_port(8990), '`nnictl stop %s` failed to stop experiments' % experiment_id + + # test cmd `nnictl stop --all` + proc = subprocess.run(['nnictl', 'stop', '--all']) + assert proc.returncode == 0, '`nnictl stop --all` failed with code %d' % proc.returncode + wait_for_port_available(8888, 10) + wait_for_port_available(8989, 10) + assert not detect_port(8888) and not detect_port(8989), '`nnictl stop --all` failed to stop experiments' + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, required=True) + parser.add_argument("--preinstall", action='store_true') + args = parser.parse_args() + setup_experiment(not args.preinstall) + try: + naive_test(args) + stop_experiment_test(args) + # TODO: check the output of rest server + print(GREEN + 'PASS' + CLEAR) + except Exception as error: + print(RED + 'FAIL' + CLEAR) + print('%r' % error) + traceback.print_exc() + sys.exit(1) diff --git a/test/nni_test/nnitest/remote_docker.py b/test/nni_test/nnitest/remote_docker.py new file mode 100644 index 0000000000000000000000000000000000000000..2c89c34374f852f8e1d48ced8dd2c8df8e3d836b --- /dev/null +++ b/test/nni_test/nnitest/remote_docker.py @@ -0,0 +1,84 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import argparse +from subprocess import check_output, check_call +import socket +import random +import re + +def detect_port(port): + '''Detect if the port is used, return True if the port is used''' + socket_test = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + socket_test.connect(('127.0.0.1', int(port))) + socket_test.close() + return True + except: + return False + +def find_port(): + '''Find a port which is free''' + port = random.randint(10000, 20000) + while detect_port(port): + port = random.randint(10000, 20000) + return port + +def find_wheel_package(dir): + '''Find the wheel package uploaded to this machine''' + regular = re.compile('^nni-.*\.whl$') + for file_name in os.listdir(dir): + if regular.search(file_name): + return file_name + return None + +def start_container(image, name, nnimanager_os): + '''Start docker container, generate a port in /tmp/nnitest/{name}/port file''' + port = find_port() + source_dir = '/tmp/nnitest/' + name + run_cmds = ['docker', 'run', '-d', '-t', '-p', str(port) + ':22', '--name', name, '--mount', 'type=bind,source=' + source_dir + ',target=/tmp/nni', image] + output = check_output(run_cmds) + commit_id = output.decode('utf-8') + + if nnimanager_os == 'windows': + wheel_name = find_wheel_package(os.path.join(source_dir, 'nni-remote/deployment/pypi/dist')) + else: + wheel_name = find_wheel_package(os.path.join(source_dir, 'dist')) + + if not wheel_name: + print('Error: could not find wheel package in {0}'.format(source_dir)) + exit(1) + + def get_dist(wheel_name): + '''get the wheel package path''' + if nnimanager_os == 'windows': + return '/tmp/nni/nni-remote/deployment/pypi/dist/{0}'.format(wheel_name) + else: + return '/tmp/nni/dist/{0}'.format(wheel_name) + + pip_cmds = ['docker', 'exec', name, 'python3', '-m', 'pip', 'install', '--upgrade', 'pip', 'setuptools==41.0.0'] + check_call(pip_cmds) + sdk_cmds = ['docker', 'exec', name, 'python3', '-m', 'pip', 'install', get_dist(wheel_name)] + check_call(sdk_cmds) + with open(source_dir + '/port', 'w') as file: + file.write(str(port)) + +def stop_container(name): + '''Stop docker container''' + stop_cmds = ['docker', 'container', 'stop', name] + check_call(stop_cmds) + rm_cmds = ['docker', 'container', 'rm', name] + check_call(rm_cmds) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--mode', required=True, choices=['start', 'stop'], dest='mode', help='start or stop a container') + parser.add_argument('--name', required=True, dest='name', help='the name of container to be used') + parser.add_argument('--image', dest='image', help='the image to be used') + parser.add_argument('--os', dest='os', default='unix', choices=['unix', 'windows'], help='nniManager os version') + args = parser.parse_args() + if args.mode == 'start': + start_container(args.image, args.name, args.os) + else: + stop_container(args.name) diff --git a/test/nni_test/nnitest/run_tests.py b/test/nni_test/nnitest/run_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..8f979bda68940c2710235227c01c77b4d1df62a9 --- /dev/null +++ b/test/nni_test/nnitest/run_tests.py @@ -0,0 +1,328 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +import datetime +import json +import os +import subprocess +import sys +import time + +import yaml + +import validators +from utils import (CLEAR, EXPERIMENT_URL, GREEN, RED, REST_ENDPOINT, + STATUS_URL, TRIAL_JOBS_URL, deep_update, dump_yml_content, + get_experiment_dir, get_experiment_id, + get_experiment_status, get_failed_trial_jobs, + get_trial_stats, get_yml_content, parse_max_duration_time, + print_experiment_log, print_trial_job_log, + wait_for_port_available) + +it_variables = {} + + +def update_training_service_config(config, training_service, config_file_path, nni_source_dir, reuse_mode='False'): + it_ts_config = get_yml_content(os.path.join('config', 'training_service.yml')) + # hack for kubeflow trial config + if training_service == 'kubeflow' and reuse_mode == 'False': + it_ts_config[training_service]['trial']['worker']['command'] = config['trial']['command'] + config['trial'].pop('command') + if 'gpuNum' in config['trial']: + config['trial'].pop('gpuNum') + elif training_service == 'kubeflow' and reuse_mode == 'True': + it_ts_config = get_yml_content(os.path.join('config', 'training_service_v2.yml')) + print(it_ts_config) + it_ts_config[training_service]['trainingService']['worker']['command'] = config['trialCommand'] + it_ts_config[training_service]['trainingService']['worker']['code_directory'] = config['trialCodeDirectory'] + + if training_service == 'frameworkcontroller' and reuse_mode == 'False': + it_ts_config[training_service]['trial']['taskRoles'][0]['command'] = config['trial']['command'] + config['trial'].pop('command') + if 'gpuNum' in config['trial']: + config['trial'].pop('gpuNum') + elif training_service == 'frameworkcontroller' and reuse_mode == 'True': + it_ts_config = get_yml_content(os.path.join('config', 'training_service_v2.yml')) + it_ts_config[training_service]['trainingService']['taskRoles'][0]['command'] = config['trialCommand'] + + if training_service == 'adl': + # hack for adl trial config, codeDir in adl mode refers to path in container + containerCodeDir = config['trial']['codeDir'] + # replace metric test folders to container folder + if config['trial']['codeDir'] == '.': + containerCodeDir = '/' + config_file_path[:config_file_path.rfind('/')] + elif config['trial']['codeDir'] == '../naive_trial': + containerCodeDir = '/test/config/naive_trial' + elif '../../../' in config['trial']['codeDir']: + # replace example folders to container folder + containerCodeDir = config['trial']['codeDir'].replace('../../../', '/') + it_ts_config[training_service]['trial']['codeDir'] = containerCodeDir + it_ts_config[training_service]['trial']['command'] = 'cd {0} && {1}'.format(containerCodeDir, config['trial']['command']) + + if training_service == 'remote': + testcase_config = get_yml_content(nni_source_dir + config_file_path) + sharedStorage = testcase_config.get('sharedStorage') + if sharedStorage is None: + it_ts_config[training_service].pop('sharedStorage') + elif str(sharedStorage.get('storageType')).lower() == 'nfs': + it_ts_config[training_service].get('sharedStorage').pop('storageAccountKey') + elif str(sharedStorage.get('storageType')).lower() == 'azureblob': + it_ts_config[training_service].get('sharedStorage').pop('nfsServer') + it_ts_config[training_service].get('sharedStorage').pop('exportedDirectory') + else: + it_ts_config[training_service].pop('sharedStorage') + + if training_service == 'hybrid': + it_ts_config = get_yml_content(os.path.join('config', 'training_service_v2.yml')) + elif reuse_mode != 'True': + deep_update(config, it_ts_config['all']) + deep_update(config, it_ts_config[training_service]) + + +def prepare_config_file(test_case_config, it_config, args): + config_path = args.nni_source_dir + test_case_config['configFile'] + test_yml_config = get_yml_content(config_path) + + # apply test case specific config + if test_case_config.get('config') is not None: + deep_update(test_yml_config, test_case_config['config']) + + # hack for windows + if sys.platform == 'win32' and args.ts == 'local': + test_yml_config['trial']['command'] = test_yml_config['trial']['command'].replace('python3', 'python') + + # apply training service config + # user's gpuNum, logCollection config is overwritten by the config in training_service.yml + # the hack for kubeflow should be applied at last step + update_training_service_config(test_yml_config, args.ts, test_case_config['configFile'], args.nni_source_dir, args.reuse_mode) + + # generate temporary config yml file to launch experiment + new_config_file = config_path + '.tmp' + dump_yml_content(new_config_file, test_yml_config) + print(yaml.safe_dump(test_yml_config, default_flow_style=False), flush=True) + + return new_config_file + + +def run_test_case(test_case_config, it_config, args): + new_config_file = prepare_config_file(test_case_config, it_config, args) + # set configFile variable + it_variables['$configFile'] = new_config_file + + try: + launch_test(new_config_file, args.ts, test_case_config) + invoke_validator(test_case_config, args.nni_source_dir, args.ts) + finally: + stop_command = get_command(test_case_config, 'stopCommand') + print('Stop command:', stop_command, flush=True) + if stop_command: + subprocess.run(stop_command, shell=True) + exit_command = get_command(test_case_config, 'onExitCommand') + print('Exit command:', exit_command, flush=True) + if exit_command: + subprocess.run(exit_command, shell=True, check=True) + # remove tmp config file + if os.path.exists(new_config_file): + os.remove(new_config_file) + + +def invoke_validator(test_case_config, nni_source_dir, training_service): + validator_config = test_case_config.get('validator') + if validator_config is None or validator_config.get('class') is None: + return + + validator = validators.__dict__[validator_config.get('class')]() + kwargs = validator_config.get('kwargs', {}) + print('kwargs:', kwargs) + experiment_id = get_experiment_id(EXPERIMENT_URL) + try: + validator(REST_ENDPOINT, get_experiment_dir(EXPERIMENT_URL), nni_source_dir, **kwargs) + except: + print_experiment_log(experiment_id=experiment_id) + print_trial_job_log(training_service, TRIAL_JOBS_URL) + raise + + +def get_max_values(config_file): + experiment_config = get_yml_content(config_file) + if experiment_config.get('maxExecDuration'): + return parse_max_duration_time(experiment_config['maxExecDuration']), experiment_config['maxTrialNum'] + else: + return parse_max_duration_time(experiment_config['maxExperimentDuration']), experiment_config['maxTrialNumber'] + + +def get_command(test_case_config, commandKey): + command = test_case_config.get(commandKey) + if commandKey == 'launchCommand': + assert command is not None + if command is None: + return None + + # replace variables + for k in it_variables: + command = command.replace(k, it_variables[k]) + + # hack for windows, not limited to local training service + if sys.platform == 'win32': + command = command.replace('python3', 'python') + + return command + + +def launch_test(config_file, training_service, test_case_config): + launch_command = get_command(test_case_config, 'launchCommand') + print('launch command: ', launch_command, flush=True) + + proc = subprocess.run(launch_command, shell=True) + + assert proc.returncode == 0, 'launch command failed with code %d' % proc.returncode + + # set experiment ID into variable + exp_var_name = test_case_config.get('setExperimentIdtoVar') + if exp_var_name is not None: + assert exp_var_name.startswith('$') + it_variables[exp_var_name] = get_experiment_id(EXPERIMENT_URL) + print('variables:', it_variables) + + max_duration, max_trial_num = get_max_values(config_file) + print('max_duration:', max_duration, ' max_trial_num:', max_trial_num) + + if not test_case_config.get('experimentStatusCheck'): + return + + bg_time = time.time() + print(str(datetime.datetime.now()), ' waiting ...', flush=True) + try: + # wait restful server to be ready + time.sleep(3) + experiment_id = get_experiment_id(EXPERIMENT_URL) + while True: + waited_time = time.time() - bg_time + if waited_time > max_duration + 10: + print('waited: {}, max_duration: {}'.format(waited_time, max_duration)) + break + status = get_experiment_status(STATUS_URL) + if status in ['DONE', 'ERROR']: + print('experiment status:', status) + break + num_failed = len(get_failed_trial_jobs(TRIAL_JOBS_URL)) + if num_failed > 0: + print('failed jobs: ', num_failed) + break + time.sleep(1) + except: + print_experiment_log(experiment_id=experiment_id) + raise + print(str(datetime.datetime.now()), ' waiting done', flush=True) + if get_experiment_status(STATUS_URL) == 'ERROR': + print_experiment_log(experiment_id=experiment_id) + + trial_stats = get_trial_stats(TRIAL_JOBS_URL) + print(json.dumps(trial_stats, indent=4), flush=True) + if status != 'DONE' or trial_stats['SUCCEEDED'] + trial_stats['EARLY_STOPPED'] < max_trial_num: + print_experiment_log(experiment_id=experiment_id) + print_trial_job_log(training_service, TRIAL_JOBS_URL) + raise AssertionError('Failed to finish in maxExecDuration') + + +def case_excluded(name, excludes): + if name is None: + return False + if excludes is not None: + excludes = excludes.split(',') + for e in excludes: + if name in e or e in name: + return True + return False + + +def case_included(name, cases): + assert cases is not None + for case in cases.split(','): + if case in name: + return True + return False + + +def match_platform(test_case_config): + return sys.platform in test_case_config['platform'].split(' ') + + +def match_training_service(test_case_config, cur_training_service): + case_ts = test_case_config['trainingService'] + assert case_ts is not None + if case_ts == 'all': + return True + if cur_training_service in case_ts.split(' '): + return True + return False + +def match_remoteConfig(test_case_config, nni_source_dir): + trainingservice_config = get_yml_content(os.path.join('config', 'training_service.yml')) + trainingservice_config_reuse_value = str(trainingservice_config['remote']['remoteConfig']['reuse']).lower() + testcase_config = get_yml_content(nni_source_dir + test_case_config['configFile']) + if testcase_config.get('remoteConfig') is not None: + if testcase_config['remoteConfig'].get('reuse') is not None: + return str(testcase_config['remoteConfig']['reuse']).lower() == trainingservice_config_reuse_value + return True + + +def run(args): + it_config = get_yml_content(args.config) + + for test_case_config in it_config['testCases']: + name = test_case_config['name'] + if case_excluded(name, args.exclude): + print('{} excluded'.format(name)) + continue + if args.cases and not case_included(name, args.cases): + continue + + # fill test case default config + for k in it_config['defaultTestCaseConfig']: + if k not in test_case_config: + test_case_config[k] = it_config['defaultTestCaseConfig'][k] + print(json.dumps(test_case_config, indent=4)) + + if not match_platform(test_case_config): + print('skipped {}, platform {} not match [{}]'.format(name, sys.platform, test_case_config['platform'])) + continue + + if not match_training_service(test_case_config, args.ts): + print('skipped {}, training service {} not match [{}]'.format( + name, args.ts, test_case_config['trainingService'])) + continue + + # remote mode need more time to cleanup + if args.ts == 'remote' or args.ts == 'hybrid': + if args.ts == 'remote': + if not match_remoteConfig(test_case_config, args.nni_source_dir): + print('skipped {}, remoteConfig not match.'.format(name)) + continue + wait_for_port_available(8080, 240) + else: + wait_for_port_available(8080, 60) + + # adl mode need more time to cleanup PVC + if args.ts == 'adl' and name == 'nnictl-resume-2': + time.sleep(30) + print('## {}Testing: {}{} ##'.format(GREEN, name, CLEAR)) + begin_time = time.time() + + run_test_case(test_case_config, it_config, args) + print('{}Test {}: TEST PASS IN {} SECONDS{}'.format(GREEN, name, int(time.time()-begin_time), CLEAR), flush=True) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, required=True) + parser.add_argument("--nni_source_dir", type=str, default='../') + parser.add_argument("--cases", type=str, default=None) + parser.add_argument("--exclude", type=str, default=None) + parser.add_argument("--reuse_mode", type=str, default='False') + parser.add_argument("--ts", type=str, choices=['local', 'remote', 'pai', + 'kubeflow', 'frameworkcontroller', 'adl', 'aml', 'hybrid'], default='local') + args = parser.parse_args() + + run(args) diff --git a/test/nni_test/nnitest/test_quantize_model_speedup.py b/test/nni_test/nnitest/test_quantize_model_speedup.py new file mode 100644 index 0000000000000000000000000000000000000000..31ab762acb81de15edcb19d9eea95a43e1dfc556 --- /dev/null +++ b/test/nni_test/nnitest/test_quantize_model_speedup.py @@ -0,0 +1,218 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch +import torch.nn.functional as F +from torchvision.models.vgg import vgg16 +from torchvision import datasets, transforms +import unittest +from unittest import TestCase, main + +from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer +from nni.compression.pytorch.quantization_speedup import ModelSpeedupTensorRT + +torch.manual_seed(0) + +class BackboneModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 20, 5, 1) + self.conv2 = torch.nn.Conv2d(20, 50, 5, 1) + self.fc1 = torch.nn.Linear(4 * 4 * 50, 500) + self.fc2 = torch.nn.Linear(500, 10) + self.relu1 = torch.nn.ReLU6() + self.relu2 = torch.nn.ReLU6() + self.relu3 = torch.nn.ReLU6() + self.max_pool1 = torch.nn.MaxPool2d(2, 2) + self.max_pool2 = torch.nn.MaxPool2d(2, 2) + + def forward(self, x): + x = self.relu1(self.conv1(x)) + x = self.max_pool1(x) + x = self.relu2(self.conv2(x)) + x = self.max_pool2(x) + x = x.view(-1, x.size()[1:].numel()) + x = self.relu3(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +class QuantizationSpeedupTestCase(TestCase): + def __init__(self, methodName: str) -> None: + super().__init__(methodName=methodName) + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + self.train_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=True, download=True, transform=trans), + batch_size=64, shuffle=True) + self.test_loader = torch.utils.data.DataLoader( + datasets.MNIST('data', train=False, transform=trans), + batch_size=1000, shuffle=True) + + def _train(self, model, optimizer): + model.train() + for batch_idx, (data, target) in enumerate(self.train_loader): + data, target = data.to(self.device), target.to(self.device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % 100 == 0: + print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(self.train_loader), loss.item())) + + def _test(self, model): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in self.test_loader: + data, target = data.to(self.device), target.to(self.device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + test_loss /= len(self.test_loader.dataset) + + print('Loss: {} Accuracy: {}%)\n'.format( + test_loss, 100 * correct / len(self.test_loader.dataset))) + + def _test_trt(self, engine): + test_loss = 0 + correct = 0 + time_elasped = 0 + for data, target in self.test_loader: + output, time = engine.inference(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() + pred = output.argmax(dim=1, keepdim=True) + correct += pred.eq(target.view_as(pred)).sum().item() + time_elasped += time + test_loss /= len(self.test_loader.dataset) + + print('Loss: {} Accuracy: {}%'.format( + test_loss, 100 * correct / len(self.test_loader.dataset))) + print("Inference elapsed_time (whole dataset): {}s".format(time_elasped)) + + def test_post_training_quantization_speedup(self): + model = BackboneModel() + + configure_list = { + 'conv1':{'weight_bits':8, 'output_bits':8}, + 'conv2':{'weight_bits':32, 'output_bits':32}, + 'fc1':{'weight_bits':16, 'output_bits':16}, + 'fc2':{'weight_bits':8, 'output_bits':8} + } + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + + model.to(self.device) + for epoch in range(1): + print('# Epoch {} #'.format(epoch)) + self._train(model, optimizer) + self._test(model) + + batch_size = 32 + input_shape = (batch_size, 1, 28, 28) + calibration_path = "calibration.cache" + onnx_path = "default_model.onnx" + + engine = ModelSpeedupTensorRT(model, input_shape, config=configure_list, calib_data_loader=self.train_loader, batchsize=batch_size) + engine.compress() + self._test_trt(engine) + os.remove(calibration_path) + os.remove(onnx_path) + + def test_qat_quantization_speedup(self): + model = BackboneModel() + + configure_list = [{ + 'quant_types': ['input', 'weight'], + 'quant_bits': {'input':8, 'weight':8}, + 'op_names': ['conv1'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output':8}, + 'op_names': ['relu1'] + }, { + 'quant_types': ['input', 'weight'], + 'quant_bits': {'input':8, 'weight':8}, + 'op_names': ['conv2'] + }, { + 'quant_types': ['output'], + 'quant_bits': {'output':8}, + 'op_names': ['relu2'] + } + ] + + # finetune the model by using QAT + dummy_input = torch.randn(1, 1, 28, 28) + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + quantizer = QAT_Quantizer(model, configure_list, optimizer, dummy_input) + quantizer.compress() + + model.to(self.device) + for epoch in range(1): + print('# Epoch {} #'.format(epoch)) + self._train(model, optimizer) + self._test(model) + + model_path = "mnist_model.pth" + calibration_path = "mnist_calibration.pth" + calibration_config = quantizer.export_model(model_path, calibration_path) + + self._test(model) + + print("calibration_config: ", calibration_config) + + batch_size = 32 + input_shape = (batch_size, 1, 28, 28) + + engine = ModelSpeedupTensorRT(model, input_shape, config=calibration_config, batchsize=batch_size) + engine.compress() + + self._test_trt(engine) + + os.remove(model_path) + os.remove(calibration_path) + + def test_export_load_quantized_model_vgg16(self): + model = vgg16() + + configure_list = { + 'features.0':{'weight_bits':8, 'output_bits':8}, + 'features.1':{'weight_bits':32, 'output_bits':32}, + 'features.2':{'weight_bits':16, 'output_bits':16}, + 'features.4':{'weight_bits':8, 'output_bits':8}, + 'features.7':{'weight_bits':8, 'output_bits':8}, + 'features.8':{'weight_bits':8, 'output_bits':8}, + 'features.11':{'weight_bits':8, 'output_bits':8} + } + + model.to(self.device) + + batch_size = 1 + input_shape = (batch_size, 3, 224, 224) + dummy_input = torch.randn(input_shape).to(self.device) + + output_torch = model(dummy_input) + + engine = ModelSpeedupTensorRT(model, input_shape, config=configure_list, calib_data_loader=dummy_input, batchsize=batch_size) + engine.compress() + output, _ = engine.inference(dummy_input) + + # verify result shape + assert(output.shape == output_torch.shape) + + export_path = "vgg16_trt.engine" + calibration_path = "calibration.cache" + engine.export_quantized_model(export_path) + engine.load_quantized_model(export_path) + output, _ = engine.inference(dummy_input) + + assert(output.shape == output_torch.shape) + + os.remove(export_path) + os.remove(calibration_path) + +if __name__ == '__main__': + main() diff --git a/test/nni_test/nnitest/utils.py b/test/nni_test/nnitest/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..300799003ff494d2b4b4ab5b45a736f9195f1f39 --- /dev/null +++ b/test/nni_test/nnitest/utils.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import contextlib +import collections +import os +import socket +import sys +import subprocess +import requests +import time +import yaml +import shlex + +EXPERIMENT_DONE_SIGNAL = 'Experiment done' + +GREEN = '\33[32m' +RED = '\33[31m' +CLEAR = '\33[0m' + +REST_ENDPOINT = 'http://localhost:8080' +API_ROOT_URL = REST_ENDPOINT + '/api/v1/nni' +EXPERIMENT_URL = API_ROOT_URL + '/experiment' +STATUS_URL = API_ROOT_URL + '/check-status' +TRIAL_JOBS_URL = API_ROOT_URL + '/trial-jobs' +METRICS_URL = API_ROOT_URL + '/metric-data' +GET_IMPORTED_DATA_URL = API_ROOT_URL + '/experiment/imported-data' + +def read_last_line(file_name): + '''read last line of a file and return None if file not found''' + try: + *_, last_line = open(file_name) + return last_line.strip() + except (FileNotFoundError, ValueError): + return None + +def remove_files(file_list): + '''remove a list of files''' + for file_path in file_list: + with contextlib.suppress(FileNotFoundError): + os.remove(file_path) + +def get_yml_content(file_path): + '''Load yaml file content''' + with open(file_path, 'r') as file: + return yaml.safe_load(file) + +def dump_yml_content(file_path, content): + '''Dump yaml file content''' + with open(file_path, 'w') as file: + file.write(yaml.safe_dump(content, default_flow_style=False)) + +def setup_experiment(installed=True): + '''setup the experiment if nni is not installed''' + if not installed: + os.environ['PATH'] = os.environ['PATH'] + ':' + os.getcwd() + sdk_path = os.path.abspath('../src/sdk/pynni') + cmd_path = os.path.abspath('../tools') + pypath = os.environ.get('PYTHONPATH') + if pypath: + pypath = ':'.join([pypath, sdk_path, cmd_path]) + else: + pypath = ':'.join([sdk_path, cmd_path]) + os.environ['PYTHONPATH'] = pypath + +def get_experiment_id(experiment_url): + experiment_id = requests.get(experiment_url).json()['id'] + return experiment_id + +def get_experiment_dir(experiment_url=None, experiment_id=None): + '''get experiment root directory''' + assert any([experiment_url, experiment_id]) + if experiment_id is None: + experiment_id = get_experiment_id(experiment_url) + return os.path.join(os.path.expanduser('~'), 'nni-experiments', experiment_id) + +def get_nni_log_dir(experiment_url=None, experiment_id=None): + '''get nni's log directory from nni's experiment url''' + return os.path.join(get_experiment_dir(experiment_url, experiment_id), 'log') + +def get_nni_log_path(experiment_url): + '''get nni's log path from nni's experiment url''' + return os.path.join(get_nni_log_dir(experiment_url), 'nnimanager.log') + +def is_experiment_done(nnimanager_log_path): + '''check if the experiment is done successfully''' + assert os.path.exists(nnimanager_log_path), 'Experiment starts failed' + + with open(nnimanager_log_path, 'r') as f: + log_content = f.read() + + return EXPERIMENT_DONE_SIGNAL in log_content + +def get_experiment_status(status_url): + nni_status = requests.get(status_url).json() + return nni_status['status'] + +def get_trial_stats(trial_jobs_url): + trial_jobs = requests.get(trial_jobs_url).json() + trial_stats = collections.defaultdict(int) + for trial_job in trial_jobs: + trial_stats[trial_job['status']] += 1 + return trial_stats + +def get_trial_jobs(trial_jobs_url, status=None): + '''Return failed trial jobs''' + trial_jobs = requests.get(trial_jobs_url).json() + res = [] + for trial_job in trial_jobs: + if status is None or trial_job['status'] == status: + res.append(trial_job) + return res + +def get_failed_trial_jobs(trial_jobs_url): + '''Return failed trial jobs''' + return get_trial_jobs(trial_jobs_url, 'FAILED') + +def print_file_content(filepath): + with open(filepath, 'r') as f: + content = f.read() + print(filepath, flush=True) + print(content, flush=True) + +def print_trial_job_log(training_service, trial_jobs_url): + trial_jobs = get_trial_jobs(trial_jobs_url) + for trial_job in trial_jobs: + trial_log_dir = os.path.join(get_experiment_dir(EXPERIMENT_URL), 'trials', trial_job['trialJobId']) + log_files = ['stderr', 'trial.log'] if training_service == 'local' else ['stdout_log_collection.log'] + for log_file in log_files: + print_file_content(os.path.join(trial_log_dir, log_file)) + +def print_experiment_log(experiment_id): + log_dir = get_nni_log_dir(experiment_id=experiment_id) + for log_file in ['dispatcher.log', 'nnimanager.log']: + filepath = os.path.join(log_dir, log_file) + print_file_content(filepath) + + print('nnictl log stderr:') + subprocess.run(shlex.split('nnictl log stderr {}'.format(experiment_id))) + print('nnictl log stdout:') + subprocess.run(shlex.split('nnictl log stdout {}'.format(experiment_id))) + +def parse_max_duration_time(max_exec_duration): + unit = max_exec_duration[-1] + time = max_exec_duration[:-1] + units_dict = {'s':1, 'm':60, 'h':3600, 'd':86400} + return int(time) * units_dict[unit] + +def deep_update(source, overrides): + """Update a nested dictionary or similar mapping. + + Modify ``source`` in place. + """ + for key, value in overrides.items(): + if isinstance(value, collections.Mapping) and value: + returned = deep_update(source.get(key, {}), value) + source[key] = returned + else: + source[key] = overrides[key] + return source + +def detect_port(port): + '''Detect if the port is used''' + socket_test = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + try: + socket_test.connect(('127.0.0.1', int(port))) + socket_test.close() + return True + except: + return False + + +def wait_for_port_available(port, timeout): + begin_time = time.time() + while True: + if not detect_port(port): + return + if time.time() - begin_time > timeout: + msg = 'port {} is not available in {} seconds.'.format(port, timeout) + raise RuntimeError(msg) + time.sleep(1) diff --git a/test/nni_test/nnitest/validators.py b/test/nni_test/nnitest/validators.py new file mode 100644 index 0000000000000000000000000000000000000000..7f9baee9ef89d16c95bea8109714085f44cacc82 --- /dev/null +++ b/test/nni_test/nnitest/validators.py @@ -0,0 +1,113 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os.path as osp +from os import remove +import subprocess +import json +import requests +from nni.experiment import Experiment +from nni.tools.nnictl.updater import load_imported_data +from utils import METRICS_URL, GET_IMPORTED_DATA_URL + + +class ITValidator: + def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs): + pass + +class ExportValidator(ITValidator): + def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs): + exp_id = osp.split(experiment_dir)[-1] + proc1 = subprocess.run(["nnictl", "experiment", "export", exp_id, "-t", "csv", "-f", "report.csv"]) + assert proc1.returncode == 0, '`nnictl experiment export -t csv` failed with code %d' % proc1.returncode + with open("report.csv", 'r') as f: + print('Exported CSV file: \n') + print(''.join(f.readlines())) + print('\n\n') + remove('report.csv') + + proc2 = subprocess.run(["nnictl", "experiment", "export", exp_id, "-t", "json", "-f", "report.json"]) + assert proc2.returncode == 0, '`nnictl experiment export -t json` failed with code %d' % proc2.returncode + with open("report.json", 'r') as f: + print('Exported JSON file: \n') + print('\n'.join(f.readlines())) + print('\n\n') + remove('report.json') + +class ImportValidator(ITValidator): + def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs): + exp_id = osp.split(experiment_dir)[-1] + import_data_file_path = kwargs.get('import_data_file_path') + proc = subprocess.run(['nnictl', 'experiment', 'import', exp_id, '-f', import_data_file_path]) + assert proc.returncode == 0, \ + '`nnictl experiment import {0} -f {1}` failed with code {2}'.format(exp_id, import_data_file_path, proc.returncode) + imported_data = requests.get(GET_IMPORTED_DATA_URL).json() + origin_data = load_imported_data(import_data_file_path).replace(' ', '') + assert origin_data in imported_data + +class MetricsValidator(ITValidator): + def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs): + self.check_metrics(nni_source_dir, **kwargs) + + def check_metrics(self, nni_source_dir, **kwargs): + expected_result_file = kwargs.get('expected_result_file', 'expected_metrics.json') + with open(osp.join(nni_source_dir, 'test', 'config', 'metrics_test', expected_result_file), 'r') as f: + expected_metrics = json.load(f) + print('expected metrics:', expected_metrics) + metrics = requests.get(METRICS_URL).json() + print('RAW METRICS:', json.dumps(metrics, indent=4)) + intermediate_result, final_result = self.get_metric_results(metrics) + + assert intermediate_result and final_result + for trialjob_id in intermediate_result: + trial_final_result = final_result[trialjob_id] + trial_intermediate_result = intermediate_result[trialjob_id] + print('intermediate result:', trial_intermediate_result) + print('final result:', trial_final_result) + assert len(trial_final_result) == 1, 'there should be 1 final result' + assert trial_final_result[0] == expected_metrics['final_result'] + # encode dict/number into json string to compare them in set + assert set([json.dumps(x, sort_keys=True) for x in trial_intermediate_result]) \ + == set([json.dumps(x, sort_keys=True) for x in expected_metrics['intermediate_result']]) + + def get_metric_results(self, metrics): + intermediate_result = {} + final_result = {} + for metric in metrics: + # metrics value are encoded by NNI SDK as json string, + # here we decode the value by json.loads twice + metric_value = json.loads(json.loads(metric['data'])) + if metric['type'] == 'PERIODICAL': + if metric['trialJobId'] in intermediate_result: + intermediate_result[metric['trialJobId']].append(metric_value) + else: + intermediate_result[metric['trialJobId']] = [metric_value] + elif metric['type'] == 'FINAL': + if metric['trialJobId'] in final_result: + final_result[metric['trialJobId']].append(metric_value) + else: + final_result[metric['trialJobId']] = [metric_value] + return intermediate_result, final_result + +class NnicliValidator(ITValidator): + def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs): + print(rest_endpoint) + exp = Experiment() + exp.connect(int(rest_endpoint.split(':')[-1])) + print(exp.get_job_statistics()) + print(exp.get_experiment_status()) + print(exp.list_trial_jobs()) + +class FileExistValidator(ITValidator): + def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs): + print(rest_endpoint) + exp_id = osp.split(experiment_dir)[-1] + rootpath = kwargs.get('rootpath') + + metrics = requests.get(METRICS_URL).json() + for metric in metrics: + trial_id = metric['trialJobId'] + checkpath = osp.join(rootpath, 'nni', exp_id, 'trials', trial_id, 'nnioutput', 'checkingfile.txt') + print('Checking shared storage log exists on trial ',trial_id) + assert osp.exists(checkpath) + diff --git a/test/nni_test/setup.py b/test/nni_test/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..e2f12606ffbef2c60f1eeff3fadb853d68c9bdc5 --- /dev/null +++ b/test/nni_test/setup.py @@ -0,0 +1,18 @@ +from setuptools import setup, find_packages + +setup( + name="nnitest", + version="0.0.1", + author = 'Microsoft NNI team', + author_email = 'nni@microsoft.com', + description = 'Neural Network Intelligence package', + license = 'MIT', + url = 'https://github.com/Microsoft/nni', + packages=find_packages('nnitest'), + long_description="", + classifiers = [ + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: MIT License', + "Operating System :: OS Independent" + ], +) diff --git a/test/pytest.ini b/test/pytest.ini new file mode 100644 index 0000000000000000000000000000000000000000..5daf6aa426f78b6c696f27449baea4cd420311b3 --- /dev/null +++ b/test/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +addopts = --cov=nni --cov-config=.coveragerc --junitxml=junit/test-results.xml --cov-report=xml --cov-report=html --cov-config=.coveragerc diff --git a/test/retiarii_test/cgo/darts_model.py b/test/retiarii_test/cgo/darts_model.py new file mode 100644 index 0000000000000000000000000000000000000000..1354bdc14bcad56dc311bb6a2a7846727e891c83 --- /dev/null +++ b/test/retiarii_test/cgo/darts_model.py @@ -0,0 +1,165 @@ +from collections import OrderedDict +from typing import (List, Optional) + +import torch +import torch.nn as torch_nn +#sys.path.append(str(Path(__file__).resolve().parents[2])) + +import ops +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import basic_unit + +@basic_unit +class AuxiliaryHead(nn.Module): + """ Auxiliary head in 2/3 place of network to let the gradient flow well """ + + def __init__(self, input_size, C, n_classes): + """ assuming input size 7x7 or 8x8 """ + assert input_size in [7, 8] + super().__init__() + self.net = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(5, stride=input_size - 5, padding=0, count_include_pad=False), # 2x2 out + nn.Conv2d(C, 128, kernel_size=1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, kernel_size=2, bias=False), # 1x1 out + nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.linear = nn.Linear(768, n_classes) + + def forward(self, x): + out = self.net(x) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + return logits + +class Node(nn.Module): + def __init__(self, node_id, num_prev_nodes, channels, num_downsample_connect): + super().__init__() + self.ops = nn.ModuleList() + choice_keys = [] + for i in range(num_prev_nodes): + stride = 2 if i < num_downsample_connect else 1 + choice_keys.append("{}_p{}".format(node_id, i)) + self.ops.append( + nn.LayerChoice([ + ops.PoolBN('max', channels, 3, stride, 1, affine=False), + ops.PoolBN('avg', channels, 3, stride, 1, affine=False), + nn.Identity() if stride == 1 else ops.FactorizedReduce(channels, channels, affine=False), + ops.SepConv(channels, channels, 3, stride, 1, affine=False), + ops.SepConv(channels, channels, 5, stride, 2, affine=False), + ops.DilConv(channels, channels, 3, stride, 2, 2, affine=False), + ops.DilConv(channels, channels, 5, stride, 4, 2, affine=False) + ])) + self.drop_path = ops.DropPath() + self.input_switch = nn.InputChoice(n_candidates=num_prev_nodes, n_chosen=2) + + def forward(self, prev_nodes: List['Tensor']) -> 'Tensor': + #assert self.ops.__len__() == len(prev_nodes) + #out = [op(node) for op, node in zip(self.ops, prev_nodes)] + out = [] + for i, op in enumerate(self.ops): + out.append(op(prev_nodes[i])) + #out = [self.drop_path(o) if o is not None else None for o in out] + return self.input_switch(out) + +class Cell(nn.Module): + + def __init__(self, n_nodes, channels_pp, channels_p, channels, reduction_p, reduction): + super().__init__() + self.reduction = reduction + self.n_nodes = n_nodes + + # If previous cell is reduction cell, current input size does not match with + # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. + if reduction_p: + self.preproc0 = ops.FactorizedReduce(channels_pp, channels, affine=False) + else: + self.preproc0 = ops.StdConv(channels_pp, channels, 1, 1, 0, affine=False) + self.preproc1 = ops.StdConv(channels_p, channels, 1, 1, 0, affine=False) + + # generate dag + self.mutable_ops = nn.ModuleList() + for depth in range(2, self.n_nodes + 2): + self.mutable_ops.append(Node("{}_n{}".format("reduce" if reduction else "normal", depth), + depth, channels, 2 if reduction else 0)) + + def forward(self, s0, s1): + # s0, s1 are the outputs of previous previous cell and previous cell, respectively. + tensors = [self.preproc0(s0), self.preproc1(s1)] + new_tensors = [] + for node in self.mutable_ops: + tmp = tensors + new_tensors + cur_tensor = node(tmp) + new_tensors.append(cur_tensor) + + output = torch.cat(new_tensors, dim=1) + return output + +class CNN(nn.Module): + + def __init__(self, input_size, in_channels, channels, n_classes, n_layers, n_nodes=4, + stem_multiplier=3, auxiliary=False): + super().__init__() + self.in_channels = in_channels + self.channels = channels + self.n_classes = n_classes + self.n_layers = n_layers + self.aux_pos = 2 * n_layers // 3 if auxiliary else -1 + + c_cur = stem_multiplier * self.channels + self.stem = nn.Sequential( + nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(c_cur) + ) + + # for the first cell, stem is used for both s0 and s1 + # [!] channels_pp and channels_p is output channel size, but c_cur is input channel size. + channels_pp, channels_p, c_cur = c_cur, c_cur, channels + + self.cells = nn.ModuleList() + reduction_p, reduction = False, False + for i in range(n_layers): + reduction_p, reduction = reduction, False + # Reduce featuremap size and double channels in 1/3 and 2/3 layer. + if i in [n_layers // 3, 2 * n_layers // 3]: + c_cur *= 2 + reduction = True + + cell = Cell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction) + self.cells.append(cell) + c_cur_out = c_cur * n_nodes + channels_pp, channels_p = channels_p, c_cur_out + + #if i == self.aux_pos: + # self.aux_head = AuxiliaryHead(input_size // 4, channels_p, n_classes) + + self.gap = nn.AdaptiveAvgPool2d(1) + self.linear = nn.Linear(channels_p, n_classes) + + def forward(self, x): + s0 = s1 = self.stem(x) + + #aux_logits = None + for i, cell in enumerate(self.cells): + s0, s1 = s1, cell(s0, s1) + #if i == self.aux_pos and self.training: + # aux_logits = self.aux_head(s1) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + + #if aux_logits is not None: + # return logits, aux_logits + return logits + + def drop_path_prob(self, p): + for module in self.modules(): + if isinstance(module, ops.DropPath): + module.p = p + +if __name__ == '__main__': + base_model = CNN(32, 3, 16, 10, 8) diff --git a/test/retiarii_test/cgo/ops.py b/test/retiarii_test/cgo/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..45b1e79eabec34f42f8446e18e5e31613f8e25ef --- /dev/null +++ b/test/retiarii_test/cgo/ops.py @@ -0,0 +1,133 @@ +import torch +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import basic_unit + +@basic_unit +class DropPath(nn.Module): + def __init__(self, p=0.): + """ + Drop path with probability. + Parameters + ---------- + p : float + Probability of an path to be zeroed. + """ + super().__init__() + self.p = p + + def forward(self, x): + if self.training and self.p > 0.: + keep_prob = 1. - self.p + # per data point mask + mask = torch.zeros((x.size(0), 1, 1, 1), device=x.device).bernoulli_(keep_prob) + return x / keep_prob * mask + + return x + +@basic_unit +class PoolBN(nn.Module): + """ + AvgPool or MaxPool with BN. `pool_type` must be `max` or `avg`. + """ + def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + self.bn = nn.BatchNorm2d(C, affine=affine) + + def forward(self, x): + out = self.pool(x) + out = self.bn(out) + return out + +@basic_unit +class StdConv(nn.Module): + """ + Standard conv: ReLU - Conv - BN + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class FacConv(nn.Module): + """ + Factorized conv: ReLU - Conv(Kx1) - Conv(1xK) - BN + """ + def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False), + nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class DilConv(nn.Module): + """ + (Dilated) depthwise separable conv. + ReLU - (Dilated) depthwise separable - Pointwise - BN. + If dilation == 2, 3x3 conv => 5x5 receptive field, 5x5 conv => 9x9 receptive field. + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in, + bias=False), + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class SepConv(nn.Module): + """ + Depthwise separable conv. + DilConv(dilation=1) * 2. + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine), + DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class FactorizedReduce(nn.Module): + """ + Reduce feature map size by factorized pointwise (stride=2). + """ + def __init__(self, C_in, C_out, affine=True): + super().__init__() + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + x = self.relu(x) + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out diff --git a/test/retiarii_test/cgo/test.py b/test/retiarii_test/cgo/test.py new file mode 100644 index 0000000000000000000000000000000000000000..3fdbfdcdf17e084f30c639e6ced9f3219647284f --- /dev/null +++ b/test/retiarii_test/cgo/test.py @@ -0,0 +1,54 @@ +import json +import os +import sys +import torch +from pathlib import Path + +import nni.retiarii.evaluator.pytorch.cgo.evaluator as cgo +import nni.retiarii.evaluator.pytorch.lightning as pl +import nni.retiarii.strategy as strategy +from nni.retiarii import serialize +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from darts_model import CNN + +if __name__ == '__main__': + base_model = CNN(32, 3, 16, 10, 8) + + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = serialize(CIFAR10, root='data/cifar10', train=True, download=True, transform=train_transform) + test_dataset = serialize(CIFAR10, root='data/cifar10', train=False, download=True, transform=valid_transform) + trainer = cgo.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=1, limit_train_batches=0.2) + + simple_strategy = strategy.Random() + + exp = RetiariiExperiment(base_model, trainer, [], simple_strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'darts_search' + exp_config.execution_engine = 'cgo' + exp_config.trial_concurrency = 3 + # since CGO may merge multiple trials into one, RetiariiExperiment may run more trials than max_trial_number + # when max_trial_number = 3, it actually runs 9 models since each merged trial contains 3 trials from strategy + exp_config.max_trial_number = 100 + exp_config.devices = ['cuda:0', 'cuda:1', 'cuda:2'] + exp_config.trial_gpu_number = 1 + exp_config.batch_waiting_time = 100 + exp_config.training_service.use_active_gpu = True + exp_config.training_service.gpu_indices = [0, 1, 2] + + exp.run(exp_config, 8081) diff --git a/test/retiarii_test/cgo_mnasnet/base_mnasnet.py b/test/retiarii_test/cgo_mnasnet/base_mnasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f431812e3cb07ab3ff43f8344d43dcf7faca0d96 --- /dev/null +++ b/test/retiarii_test/cgo_mnasnet/base_mnasnet.py @@ -0,0 +1,298 @@ +from nni.retiarii import basic_unit +import nni.retiarii.nn.pytorch as nn +import warnings + +import torch +import torch.nn as torch_nn +from torchvision.models.utils import load_state_dict_from_url +import torch.nn.functional as F + +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).resolve().parents[2])) + +# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is +# 1.0 - tensorflow. +_BN_MOMENTUM = 1 - 0.9997 +_FIRST_DEPTH = 32 +_MOBILENET_V2_FILTERS = [16, 24, 32, 64, 96, 160, 320] +_MOBILENET_V2_NUM_LAYERS = [1, 2, 3, 4, 3, 3, 1] + + +class _ResidualBlock(nn.Module): + def __init__(self, net): + super().__init__() + self.net = net + + def forward(self, x): + return self.net(x) + x + + +class _InvertedResidual(nn.Module): + + def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, skip, bn_momentum=0.1): + super(_InvertedResidual, self).__init__() + assert stride in [1, 2] + assert kernel_size in [3, 5] + mid_ch = in_ch * expansion_factor + self.apply_residual = skip and in_ch == out_ch and stride == 1 + self.layers = nn.Sequential( + # Pointwise + nn.Conv2d(in_ch, mid_ch, 1, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Depthwise + nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, + stride=stride, groups=mid_ch, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Linear pointwise. Note that there's no activation. + nn.Conv2d(mid_ch, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch, momentum=bn_momentum)) + + def forward(self, input): + if self.apply_residual: + ret = self.layers(input) + input + else: + ret = self.layers(input) + return ret + + +def _stack_inverted_residual(in_ch, out_ch, kernel_size, skip, stride, exp_factor, repeats, bn_momentum): + """ Creates a stack of inverted residuals. """ + assert repeats >= 1 + # First one has no skip, because feature map size changes. + first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, skip, bn_momentum=bn_momentum) + remaining = [] + for _ in range(1, repeats): + remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, skip, bn_momentum=bn_momentum)) + return nn.Sequential(first, *remaining) + + +def _stack_normal_conv(in_ch, out_ch, kernel_size, skip, dconv, stride, repeats, bn_momentum): + assert repeats >= 1 + stack = [] + for i in range(repeats): + s = stride if i == 0 else 1 + if dconv: + modules = [ + nn.Conv2d(in_ch, in_ch, kernel_size, padding=kernel_size // 2, stride=s, groups=in_ch, bias=False), + nn.BatchNorm2d(in_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + nn.Conv2d(in_ch, out_ch, 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(out_ch, momentum=bn_momentum) + ] + else: + modules = [ + nn.Conv2d(in_ch, out_ch, kernel_size, padding=kernel_size // 2, stride=s, bias=False), + nn.ReLU(inplace=True), + nn.BatchNorm2d(out_ch, momentum=bn_momentum) + ] + if skip and in_ch == out_ch and s == 1: + # use different implementation for skip and noskip to align with pytorch + stack.append(_ResidualBlock(nn.Sequential(*modules))) + else: + stack += modules + in_ch = out_ch + return stack + + +def _round_to_multiple_of(val, divisor, round_up_bias=0.9): + """ Asymmetric rounding to make `val` divisible by `divisor`. With default + bias, will round up, unless the number is no more than 10% greater than the + smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """ + assert 0.0 < round_up_bias < 1.0 + new_val = max(divisor, int(val + divisor / 2) // divisor * divisor) + return new_val if new_val >= round_up_bias * val else new_val + divisor + + +def _get_depths(depths, alpha): + """ Scales tensor depths as in reference MobileNet code, prefers rouding up + rather than down. """ + return [_round_to_multiple_of(depth * alpha, 8) for depth in depths] + + +class MNASNet(nn.Module): + """ MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This + implements the B1 variant of the model. + >>> model = MNASNet(1000, 1.0) + >>> x = torch.rand(1, 3, 224, 224) + >>> y = model(x) + >>> y.dim() + 1 + >>> y.nelement() + 1000 + """ + # Version 2 adds depth scaling in the initial stages of the network. + _version = 2 + + def __init__(self, alpha, depths, convops, kernel_sizes, num_layers, + skips, num_classes=1000, dropout=0.2): + super().__init__() + assert alpha > 0.0 + assert len(depths) == len(convops) == len(kernel_sizes) == len(num_layers) == len(skips) == 7 + self.alpha = alpha + self.num_classes = num_classes + depths = _get_depths([_FIRST_DEPTH] + depths, alpha) + base_filter_sizes = [16, 24, 40, 80, 96, 192, 320] + exp_ratios = [3, 3, 3, 6, 6, 6, 6] + strides = [1, 2, 2, 2, 1, 2, 1] + layers = [ + # First layer: regular conv. + nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False), + nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + ] + count = 0 + # for conv, prev_depth, depth, ks, skip, stride, repeat, exp_ratio in \ + # zip(convops, depths[:-1], depths[1:], kernel_sizes, skips, strides, num_layers, exp_ratios): + for filter_size, exp_ratio, stride in zip(base_filter_sizes, exp_ratios, strides): + # TODO: restrict that "choose" can only be used within mutator + ph = nn.Placeholder(label=f'mutable_{count}', **{ + 'kernel_size_options': [1, 3, 5], + 'n_layer_options': [1, 2, 3, 4], + 'op_type_options': ['__mutated__.base_mnasnet.RegularConv', + '__mutated__.base_mnasnet.DepthwiseConv', + '__mutated__.base_mnasnet.MobileConv'], + # 'se_ratio_options': [0, 0.25], + 'skip_options': ['identity', 'no'], + 'n_filter_options': [int(filter_size*x) for x in [0.75, 1.0, 1.25]], + 'exp_ratio': exp_ratio, + 'stride': stride, + 'in_ch': depths[0] if count == 0 else None + }) + layers.append(ph) + '''if conv == "mconv": + # MNASNet blocks: stacks of inverted residuals. + layers.append(_stack_inverted_residual(prev_depth, depth, ks, skip, + stride, exp_ratio, repeat, _BN_MOMENTUM)) + else: + # Normal conv and depth-separated conv + layers += _stack_normal_conv(prev_depth, depth, ks, skip, conv == "dconv", + stride, repeat, _BN_MOMENTUM)''' + count += 1 + if count >= 2: + break + layers += [ + # Final mapping to classifier input. + nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + ] + self.layers = nn.Sequential(*layers) + self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), + nn.Linear(1280, num_classes)) + self._initialize_weights() + #self.for_test = 10 + + def forward(self, x): + # if self.for_test == 10: + x = self.layers(x) + # Equivalent to global avgpool and removing H and W dimensions. + x = x.mean([2, 3]) + x = F.relu(x) + return self.classifier(x) + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + torch_nn.init.kaiming_normal_(m.weight, mode="fan_out", + nonlinearity="relu") + if m.bias is not None: + torch_nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + torch_nn.init.ones_(m.weight) + torch_nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + torch_nn.init.kaiming_uniform_(m.weight, mode="fan_out", + nonlinearity="sigmoid") + torch_nn.init.zeros_(m.bias) + + +def test_model(model): + model(torch.randn(2, 3, 224, 224)) + + +# ====================definition of candidate op classes +BN_MOMENTUM = 1 - 0.9997 + + +class RegularConv(nn.Module): + def __init__(self, kernel_size, in_ch, out_ch, skip, exp_ratio, stride): + super().__init__() + self.kernel_size = kernel_size + self.in_ch = in_ch + self.out_ch = out_ch + self.skip = skip + self.exp_ratio = exp_ratio + self.stride = stride + + self.conv = nn.Conv2d(in_ch, out_ch, kernel_size, padding=kernel_size // 2, stride=stride, bias=False) + self.relu = nn.ReLU(inplace=True) + self.bn = nn.BatchNorm2d(out_ch, momentum=BN_MOMENTUM) + + def forward(self, x): + out = self.bn(self.relu(self.conv(x))) + if self.skip == 'identity': + out = out + x + return out + + +class DepthwiseConv(nn.Module): + def __init__(self, kernel_size, in_ch, out_ch, skip, exp_ratio, stride): + super().__init__() + self.kernel_size = kernel_size + self.in_ch = in_ch + self.out_ch = out_ch + self.skip = skip + self.exp_ratio = exp_ratio + self.stride = stride + + self.conv1 = nn.Conv2d(in_ch, in_ch, kernel_size, padding=kernel_size // 2, stride=stride, groups=in_ch, bias=False) + self.bn1 = nn.BatchNorm2d(in_ch, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(in_ch, out_ch, 1, padding=0, stride=1, bias=False) + self.bn2 = nn.BatchNorm2d(out_ch, momentum=BN_MOMENTUM) + + def forward(self, x): + out = self.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + if self.skip == 'identity': + out = out + x + return out + + +class MobileConv(nn.Module): + def __init__(self, kernel_size, in_ch, out_ch, skip, exp_ratio, stride): + super().__init__() + self.kernel_size = kernel_size + self.in_ch = in_ch + self.out_ch = out_ch + self.skip = skip + self.exp_ratio = exp_ratio + self.stride = stride + + mid_ch = in_ch * exp_ratio + self.layers = nn.Sequential( + # Pointwise + nn.Conv2d(in_ch, mid_ch, 1, bias=False), + nn.BatchNorm2d(mid_ch, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + # Depthwise + nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=(kernel_size - 1) // 2, + stride=stride, groups=mid_ch, bias=False), + nn.BatchNorm2d(mid_ch, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + # Linear pointwise. Note that there's no activation. + nn.Conv2d(mid_ch, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch, momentum=BN_MOMENTUM)) + + def forward(self, x): + out = self.layers(x) + if self.skip == 'identity': + out = out + x + return out + + +# mnasnet0_5 +ir_module = _InvertedResidual(16, 16, 3, 1, 1, True) diff --git a/test/retiarii_test/cgo_mnasnet/mutator.py b/test/retiarii_test/cgo_mnasnet/mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..1a55d673af0cbd15bc694f14e830fb85686af97c --- /dev/null +++ b/test/retiarii_test/cgo_mnasnet/mutator.py @@ -0,0 +1,64 @@ +import logging +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).resolve().parents[2])) +from nni.retiarii import Mutator + +from base_mnasnet import RegularConv, DepthwiseConv, MobileConv + +_logger = logging.getLogger(__name__) + +class BlockMutator(Mutator): + def __init__(self, target: str): + super(BlockMutator, self).__init__() + self.target = target + + def mutate(self, model): + nodes = model.get_nodes_by_label(self.target) + assert len(nodes) == 1 + node = nodes[0] + graph = node.graph + + related_info = node.operation.parameters + kernel_size = self.choice(related_info['kernel_size_options']) + op_type = self.choice(related_info['op_type_options']) + #self.choice(related_info['se_ratio_options']) + skip = self.choice(related_info['skip_options']) + n_filter = self.choice(related_info['n_filter_options']) + + if related_info['in_ch'] is not None: + in_ch = related_info['in_ch'] + else: + assert len(node.predecessors) == 1 + the_node = node.predecessors[0] + _logger.debug(repr(the_node.operation.parameters)) + _logger.debug(the_node.__repr__()) + in_ch = the_node.operation.parameters['out_ch'] + + # update the placeholder to be a new operation + node.update_operation(op_type, { + 'kernel_size': kernel_size, + 'in_ch': in_ch, + 'out_ch': n_filter, + 'skip': 'no', + 'exp_ratio': related_info['exp_ratio'], + 'stride': related_info['stride'] + }) + + # insert new nodes after the placeholder + n_layer = self.choice(related_info['n_layer_options']) + for i in range(1, n_layer): + node = graph.insert_node_on_edge(node.outgoing_edges[0], + '{}_{}'.format(self.target, i), + op_type, + {'kernel_size': kernel_size, + 'in_ch': n_filter, + 'out_ch': n_filter, + 'skip': skip, + 'exp_ratio': related_info['exp_ratio'], + 'stride': 1}) + + # fix possible shape mismatch + # TODO: use formal method function to update parameters + if len(node.successors) == 1 and 'in_channels' in node.successors[0].operation.parameters: + node.successors[0].operation.parameters['in_channels'] = n_filter \ No newline at end of file diff --git a/test/retiarii_test/cgo_mnasnet/test.py b/test/retiarii_test/cgo_mnasnet/test.py new file mode 100644 index 0000000000000000000000000000000000000000..4569fa45ec8c2373a7cf0403eefc6fc763c98985 --- /dev/null +++ b/test/retiarii_test/cgo_mnasnet/test.py @@ -0,0 +1,80 @@ +import os +import sys +import torch +from pathlib import Path + +import nni.retiarii.evaluator.pytorch.lightning as pl +import nni.retiarii.evaluator.pytorch.cgo.evaluator as cgo +from nni.retiarii import serialize +from base_mnasnet import MNASNet +from nni.experiment import RemoteMachineConfig +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig +from nni.retiarii.strategy import TPEStrategy +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from mutator import BlockMutator + +if __name__ == '__main__': + _DEFAULT_DEPTHS = [16, 24, 40, 80, 96, 192, 320] + _DEFAULT_CONVOPS = ["dconv", "mconv", "mconv", "mconv", "mconv", "mconv", "mconv"] + _DEFAULT_SKIPS = [False, True, True, True, True, True, True] + _DEFAULT_KERNEL_SIZES = [3, 3, 5, 5, 3, 5, 3] + _DEFAULT_NUM_LAYERS = [1, 3, 3, 3, 2, 4, 1] + + base_model = MNASNet(0.5, _DEFAULT_DEPTHS, _DEFAULT_CONVOPS, _DEFAULT_KERNEL_SIZES, + _DEFAULT_NUM_LAYERS, _DEFAULT_SKIPS) + + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + train_dataset = serialize(CIFAR10, root='data/cifar10', train=True, download=True, transform=train_transform) + test_dataset = serialize(CIFAR10, root='data/cifar10', train=False, download=True, transform=valid_transform) + # trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + # val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + # max_epochs=1, limit_train_batches=0.2) + trainer = cgo.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=1, limit_train_batches=0.2) + + applied_mutators = [ + BlockMutator('mutable_0'), + BlockMutator('mutable_1') + ] + + simple_strategy = TPEStrategy() + + exp = RetiariiExperiment(base_model, trainer, applied_mutators, simple_strategy) + + exp_config = RetiariiExeConfig('remote') + exp_config.experiment_name = 'darts_search' + exp_config.trial_concurrency = 3 + exp_config.max_trial_number = 10 + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = True + exp_config.training_service.reuse_mode = True + exp_config.training_service.gpu_indices = [0, 1, 2] + exp_config.max_concurrency_cgo = 1 + exp_config.batch_waiting_time = 0 + + rm_conf = RemoteMachineConfig() + rm_conf.host = '127.0.0.1' + rm_conf.user = 'xxx' + rm_conf.password = 'xxx' + rm_conf.port = 22 + rm_conf.python_path = '/home/xxx/py38/bin' + rm_conf.gpu_indices = [0, 1, 2] + rm_conf.use_active_gpu = True + rm_conf.max_trial_number_per_gpu = 3 + + exp_config.training_service.machine_list = [rm_conf] + exp_config.execution_engine = 'cgo' + + exp.run(exp_config, 8099) \ No newline at end of file diff --git a/test/retiarii_test/darts/.nniignore b/test/retiarii_test/darts/.nniignore new file mode 100644 index 0000000000000000000000000000000000000000..68017eec8cffcbabc977e0df72b07ab98b20880e --- /dev/null +++ b/test/retiarii_test/darts/.nniignore @@ -0,0 +1,2 @@ +lightning_logs +data \ No newline at end of file diff --git a/test/retiarii_test/darts/darts_model.py b/test/retiarii_test/darts/darts_model.py new file mode 100644 index 0000000000000000000000000000000000000000..9a01e78b400c011130e7149aefd5da23f71f4893 --- /dev/null +++ b/test/retiarii_test/darts/darts_model.py @@ -0,0 +1,167 @@ +from collections import OrderedDict +from nni.retiarii.serializer import model_wrapper +from typing import (List, Optional) + +import torch +import torch.nn as torch_nn +#sys.path.append(str(Path(__file__).resolve().parents[2])) + +import ops +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import basic_unit, model_wrapper + +@basic_unit +class AuxiliaryHead(nn.Module): + """ Auxiliary head in 2/3 place of network to let the gradient flow well """ + + def __init__(self, input_size, C, n_classes): + """ assuming input size 7x7 or 8x8 """ + assert input_size in [7, 8] + super().__init__() + self.net = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(5, stride=input_size - 5, padding=0, count_include_pad=False), # 2x2 out + nn.Conv2d(C, 128, kernel_size=1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, kernel_size=2, bias=False), # 1x1 out + nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.linear = nn.Linear(768, n_classes) + + def forward(self, x): + out = self.net(x) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + return logits + +class Node(nn.Module): + def __init__(self, node_id, num_prev_nodes, channels, num_downsample_connect): + super().__init__() + self.ops = nn.ModuleList() + choice_keys = [] + for i in range(num_prev_nodes): + stride = 2 if i < num_downsample_connect else 1 + choice_keys.append("{}_p{}".format(node_id, i)) + self.ops.append( + nn.LayerChoice([ + ops.PoolBN('max', channels, 3, stride, 1, affine=False), + ops.PoolBN('avg', channels, 3, stride, 1, affine=False), + nn.Identity() if stride == 1 else ops.FactorizedReduce(channels, channels, affine=False), + ops.SepConv(channels, channels, 3, stride, 1, affine=False), + ops.SepConv(channels, channels, 5, stride, 2, affine=False), + ops.DilConv(channels, channels, 3, stride, 2, 2, affine=False), + ops.DilConv(channels, channels, 5, stride, 4, 2, affine=False) + ])) + self.drop_path = ops.DropPath() + self.input_switch = nn.InputChoice(n_candidates=num_prev_nodes, n_chosen=2) + + def forward(self, prev_nodes: List['Tensor']) -> 'Tensor': + #assert self.ops.__len__() == len(prev_nodes) + #out = [op(node) for op, node in zip(self.ops, prev_nodes)] + out = [] + for i, op in enumerate(self.ops): + out.append(op(prev_nodes[i])) + #out = [self.drop_path(o) if o is not None else None for o in out] + return self.input_switch(out) + +class Cell(nn.Module): + + def __init__(self, n_nodes, channels_pp, channels_p, channels, reduction_p, reduction): + super().__init__() + self.reduction = reduction + self.n_nodes = n_nodes + + # If previous cell is reduction cell, current input size does not match with + # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. + if reduction_p: + self.preproc0 = ops.FactorizedReduce(channels_pp, channels, affine=False) + else: + self.preproc0 = ops.StdConv(channels_pp, channels, 1, 1, 0, affine=False) + self.preproc1 = ops.StdConv(channels_p, channels, 1, 1, 0, affine=False) + + # generate dag + self.mutable_ops = nn.ModuleList() + for depth in range(2, self.n_nodes + 2): + self.mutable_ops.append(Node("{}_n{}".format("reduce" if reduction else "normal", depth), + depth, channels, 2 if reduction else 0)) + + def forward(self, s0, s1): + # s0, s1 are the outputs of previous previous cell and previous cell, respectively. + tensors = [self.preproc0(s0), self.preproc1(s1)] + new_tensors = [] + for node in self.mutable_ops: + tmp = tensors + new_tensors + cur_tensor = node(tmp) + new_tensors.append(cur_tensor) + + output = torch.cat(new_tensors, dim=1) + return output + +@model_wrapper +class CNN(nn.Module): + + def __init__(self, input_size, in_channels, channels, n_classes, n_layers, n_nodes=4, + stem_multiplier=3, auxiliary=False): + super().__init__() + self.in_channels = in_channels + self.channels = channels + self.n_classes = n_classes + self.n_layers = n_layers + self.aux_pos = 2 * n_layers // 3 if auxiliary else -1 + + c_cur = stem_multiplier * self.channels + self.stem = nn.Sequential( + nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(c_cur) + ) + + # for the first cell, stem is used for both s0 and s1 + # [!] channels_pp and channels_p is output channel size, but c_cur is input channel size. + channels_pp, channels_p, c_cur = c_cur, c_cur, channels + + self.cells = nn.ModuleList() + reduction_p, reduction = False, False + for i in range(n_layers): + reduction_p, reduction = reduction, False + # Reduce featuremap size and double channels in 1/3 and 2/3 layer. + if i in [n_layers // 3, 2 * n_layers // 3]: + c_cur *= 2 + reduction = True + + cell = Cell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction) + self.cells.append(cell) + c_cur_out = c_cur * n_nodes + channels_pp, channels_p = channels_p, c_cur_out + + #if i == self.aux_pos: + # self.aux_head = AuxiliaryHead(input_size // 4, channels_p, n_classes) + + self.gap = nn.AdaptiveAvgPool2d(1) + self.linear = nn.Linear(channels_p, n_classes) + + def forward(self, x): + s0 = s1 = self.stem(x) + + #aux_logits = None + for i, cell in enumerate(self.cells): + s0, s1 = s1, cell(s0, s1) + #if i == self.aux_pos and self.training: + # aux_logits = self.aux_head(s1) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.linear(out) + + #if aux_logits is not None: + # return logits, aux_logits + return logits + + def drop_path_prob(self, p): + for module in self.modules(): + if isinstance(module, ops.DropPath): + module.p = p + +if __name__ == '__main__': + base_model = CNN(32, 3, 16, 10, 8) diff --git a/test/retiarii_test/darts/ops.py b/test/retiarii_test/darts/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..45b1e79eabec34f42f8446e18e5e31613f8e25ef --- /dev/null +++ b/test/retiarii_test/darts/ops.py @@ -0,0 +1,133 @@ +import torch +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import basic_unit + +@basic_unit +class DropPath(nn.Module): + def __init__(self, p=0.): + """ + Drop path with probability. + Parameters + ---------- + p : float + Probability of an path to be zeroed. + """ + super().__init__() + self.p = p + + def forward(self, x): + if self.training and self.p > 0.: + keep_prob = 1. - self.p + # per data point mask + mask = torch.zeros((x.size(0), 1, 1, 1), device=x.device).bernoulli_(keep_prob) + return x / keep_prob * mask + + return x + +@basic_unit +class PoolBN(nn.Module): + """ + AvgPool or MaxPool with BN. `pool_type` must be `max` or `avg`. + """ + def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + self.bn = nn.BatchNorm2d(C, affine=affine) + + def forward(self, x): + out = self.pool(x) + out = self.bn(out) + return out + +@basic_unit +class StdConv(nn.Module): + """ + Standard conv: ReLU - Conv - BN + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class FacConv(nn.Module): + """ + Factorized conv: ReLU - Conv(Kx1) - Conv(1xK) - BN + """ + def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False), + nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class DilConv(nn.Module): + """ + (Dilated) depthwise separable conv. + ReLU - (Dilated) depthwise separable - Pointwise - BN. + If dilation == 2, 3x3 conv => 5x5 receptive field, 5x5 conv => 9x9 receptive field. + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in, + bias=False), + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class SepConv(nn.Module): + """ + Depthwise separable conv. + DilConv(dilation=1) * 2. + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine), + DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine) + ) + + def forward(self, x): + return self.net(x) + +@basic_unit +class FactorizedReduce(nn.Module): + """ + Reduce feature map size by factorized pointwise (stride=2). + """ + def __init__(self, C_in, C_out, affine=True): + super().__init__() + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + x = self.relu(x) + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + return out diff --git a/test/retiarii_test/darts/test.py b/test/retiarii_test/darts/test.py new file mode 100644 index 0000000000000000000000000000000000000000..933e05d25f8386e6f5a2c68bd3cf95933f629ebb --- /dev/null +++ b/test/retiarii_test/darts/test.py @@ -0,0 +1,49 @@ +import json +import os +import sys +import torch +from pathlib import Path + +import nni.retiarii.evaluator.pytorch.lightning as pl +import nni.retiarii.strategy as strategy +from nni.retiarii import serialize +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from darts_model import CNN + +if __name__ == '__main__': + base_model = CNN(32, 3, 16, 10, 8) + + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = serialize(CIFAR10, root='data/cifar10', train=True, download=True, transform=train_transform) + test_dataset = serialize(CIFAR10, root='data/cifar10', train=False, download=True, transform=valid_transform) + trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=1, limit_train_batches=0.2, + enable_progress_bar=False) + + simple_strategy = strategy.Random() + + exp = RetiariiExperiment(base_model, trainer, [], simple_strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'darts_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 10 + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = True + exp_config.training_service.gpu_indices = [1, 2] + + exp.run(exp_config, 8081) \ No newline at end of file diff --git a/test/retiarii_test/darts/test_oneshot.py b/test/retiarii_test/darts/test_oneshot.py new file mode 100644 index 0000000000000000000000000000000000000000..6cdcb1fba242d71776d2bc9c2763af5697eb175b --- /dev/null +++ b/test/retiarii_test/darts/test_oneshot.py @@ -0,0 +1,103 @@ +import json +import numpy as np +import os +import sys +import torch +import torch.nn as nn +from pathlib import Path +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from nni.retiarii.experiment.pytorch import RetiariiExperiment +from nni.retiarii.oneshot.pytorch import DartsTrainer + +from darts_model import CNN + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + + return img + + +def get_dataset(cls, cutout_length=0): + MEAN = [0.49139968, 0.48215827, 0.44653124] + STD = [0.24703233, 0.24348505, 0.26158768] + transf = [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip() + ] + normalize = [ + transforms.ToTensor(), + transforms.Normalize(MEAN, STD) + ] + cutout = [] + if cutout_length > 0: + cutout.append(Cutout(cutout_length)) + + train_transform = transforms.Compose(transf + normalize + cutout) + valid_transform = transforms.Compose(normalize) + + if cls == "cifar10": + dataset_train = CIFAR10(root="./data/cifar10", train=True, download=True, transform=train_transform) + dataset_valid = CIFAR10(root="./data/cifar10", train=False, download=True, transform=valid_transform) + else: + raise NotImplementedError + return dataset_train, dataset_valid + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = dict() + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item() + return res + +if __name__ == '__main__': + base_model = CNN(32, 3, 16, 10, 8) + + dataset_train, dataset_valid = get_dataset("cifar10") + criterion = nn.CrossEntropyLoss() + optim = torch.optim.SGD(base_model.parameters(), 0.025, momentum=0.9, weight_decay=3.0E-4) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, 50, eta_min=0.001) + trainer = DartsTrainer( + model=base_model, + loss=criterion, + metrics=lambda output, target: accuracy(output, target, topk=(1,)), + optimizer=optim, + num_epochs=50, + dataset=dataset_train, + batch_size=32, + log_frequency=10, + unrolled=False + ) + + exp = RetiariiExperiment(base_model, trainer) + exp.run() diff --git a/test/retiarii_test/darts/test_training_service.py b/test/retiarii_test/darts/test_training_service.py new file mode 100644 index 0000000000000000000000000000000000000000..991a9453d1e69b58f71eeb44980c1fe53ae4ff2e --- /dev/null +++ b/test/retiarii_test/darts/test_training_service.py @@ -0,0 +1,64 @@ +import json +from nni.common.device import GPUDevice +import os +import sys +import torch +from pathlib import Path + +import nni.retiarii.evaluator.pytorch.lightning as pl +import nni.retiarii.strategy as strategy +from nni.experiment import RemoteMachineConfig +from nni.retiarii import serialize +from nni.retiarii.experiment.pytorch import RetiariiExperiment, RetiariiExeConfig +from torchvision import transforms +from torchvision.datasets import CIFAR10 + +from darts_model import CNN + +if __name__ == '__main__': + base_model = CNN(32, 3, 16, 10, 8) + + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = serialize(CIFAR10, root='data/cifar10', train=True, download=True, transform=train_transform) + test_dataset = serialize(CIFAR10, root='data/cifar10', train=False, download=True, transform=valid_transform) + trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=1, limit_train_batches=0.2) + + simple_strategy = strategy.Random() + + exp = RetiariiExperiment(base_model, trainer, [], simple_strategy) + + exp_config = RetiariiExeConfig('remote') + exp_config.experiment_name = 'darts_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = 10 + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = True + exp_config.training_service.reuse_mode = True + exp_config.training_service.gpu_indices = [0, 1, 2] + + rm_conf = RemoteMachineConfig() + rm_conf.host = '127.0.0.1' + rm_conf.user = 'xxx' + rm_conf.password = 'xxx' + rm_conf.port = 22 + rm_conf.python_path = '/home/xxx/py38/bin' + rm_conf.gpu_indices = [0, 1, 2] + rm_conf.use_active_gpu = True + rm_conf.max_trial_number_per_gpu = 3 + + exp_config.training_service.machine_list = [rm_conf] + exp_config.execution_engine = 'py' + + exp.run(exp_config, 8081) diff --git a/test/retiarii_test/naive/search.py b/test/retiarii_test/naive/search.py new file mode 100644 index 0000000000000000000000000000000000000000..370a673ced70064745cebc3ef53f94a9e2ea0587 --- /dev/null +++ b/test/retiarii_test/naive/search.py @@ -0,0 +1,110 @@ +import argparse + +import nni.retiarii.nn.pytorch as nn +import nni.retiarii.strategy as strategy +import nni.retiarii.evaluator.pytorch.lightning as pl +import torch +import torch.nn.functional as F +from nni.retiarii import serialize, model_wrapper +from nni.retiarii.experiment.pytorch import RetiariiExeConfig, RetiariiExperiment +from torchvision import transforms +from torchvision.datasets import MNIST + + +class DepthwiseSeparableConv(nn.Module): + def __init__(self, in_ch, out_ch): + super().__init__() + self.depthwise = nn.Conv2d(in_ch, in_ch, kernel_size=3, groups=in_ch) + self.pointwise = nn.Conv2d(in_ch, out_ch, kernel_size=1) + + def forward(self, x): + return self.pointwise(self.depthwise(x)) + + +@model_wrapper +class ComplexNet(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + self.conv2 = nn.LayerChoice([ + nn.Conv2d(32, 64, 3, 1), + DepthwiseSeparableConv(32, 64) + ]) + self.dropout1 = nn.Dropout(nn.ValueChoice([0.25, 0.5, 0.75])) + self.dropout2 = nn.Dropout(0.5) + feature = nn.ValueChoice([64, 128, 256]) + self.fc1 = nn.Linear(9216, feature) + self.fc2 = nn.Linear(feature, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(self.conv2(x), 2) + x = torch.flatten(self.dropout1(x), 1) + x = self.fc2(self.dropout2(F.relu(self.fc1(x)))) + output = F.log_softmax(x, dim=1) + return output + + +@model_wrapper +class SimpleNet(nn.Module): + def __init__(self, hidden_size): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.LayerChoice([ + nn.Linear(4*4*50, hidden_size), + nn.Linear(4*4*50, hidden_size, bias=False) + ], label='fc1_choice') + self.fc2 = nn.Linear(hidden_size, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--net', choices=['simple', 'complex'], default='simple') + parser.add_argument('--exec', choices=['python', 'graph'], default='python') + parser.add_argument('--budget', default=2, type=int) + parser.add_argument('--port', default=8899, type=int) + + args = parser.parse_args() + + if args.net == 'simple': + base_model = SimpleNet(32) + else: + base_model = ComplexNet() + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_dataset = serialize(MNIST, root='data/mnist', train=True, download=True, transform=transform) + test_dataset = serialize(MNIST, root='data/mnist', train=False, download=True, transform=transform) + trainer = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=2, gpus=1, limit_train_batches=0.1, limit_val_batches=0.1) + + simple_strategy = strategy.Random() + + exp = RetiariiExperiment(base_model, trainer, [], simple_strategy) + + exp_config = RetiariiExeConfig('local') + exp_config.experiment_name = 'mnist_search' + exp_config.trial_concurrency = 2 + exp_config.max_trial_number = args.budget + exp_config.trial_gpu_number = 1 + exp_config.training_service.use_active_gpu = True # Integration test GPU has a Xorg running + export_formatter = 'dict' + + if args.exec == 'graph': + exp_config.execution_engine = 'base' + export_formatter = 'code' + + exp.run(exp_config, args.port) + print('Final model:') + for model_code in exp.export_top_models(formatter=export_formatter): + print(model_code) diff --git a/test/scripts/it.sh b/test/scripts/it.sh new file mode 100644 index 0000000000000000000000000000000000000000..9d6682a9e2aa32d758585ec5a2bf3d41d333670b --- /dev/null +++ b/test/scripts/it.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e +CWD=${PWD} + +## Export certain environment variables for unittest code to work +export COVERAGE_PROCESS_START=${CWD}/.coveragerc +export COVERAGE_DATA_FILE=${CWD}/coverage/data +export COVERAGE_HTML_DIR=${CWD}/coverhtml + +rm ${COVERAGE_DATA_FILE}* +rm -rf ${COVERAGE_HTML_DIR}/* +mkdir ${CWD}/coverage +mkdir ${COVERAGE_HTML_DIR} + +## ------Run integration test------ +echo "===========================Testing: integration test===========================" +coverage run sdk_test.py +coverage combine +coverage html diff --git a/test/scripts/model_compression.sh b/test/scripts/model_compression.sh new file mode 100644 index 0000000000000000000000000000000000000000..1be9c15d0549589dca543684839da03a8cbda327 --- /dev/null +++ b/test/scripts/model_compression.sh @@ -0,0 +1,49 @@ +#!/bin/bash +set -e +CWD=${PWD} + +echo "" +echo "===========================Testing: pruning and speedup===========================" +cd ${CWD}/../examples/model_compress/pruning + +echo "testing fpgm pruning and speedup..." +python3 basic_pruners_torch.py --pruner fpgm --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 +python3 speedup/model_speedup.py --example_name fpgm + +echo "testing slim pruning and speedup..." +python3 basic_pruners_torch.py --pruner slim --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg19 --dataset cifar10 --sparsity 0.7 +python3 speedup/model_speedup.py --example_name slim + +echo "testing l1filter pruning and speedup..." +python3 basic_pruners_torch.py --pruner l1filter --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth +python3 speedup/model_speedup.py --example_name l1filter + +echo "testing apoz pruning and speedup..." +python3 basic_pruners_torch.py --pruner apoz --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth +python3 speedup/model_speedup.py --example_name apoz + +echo 'testing level pruner pruning' +python3 basic_pruners_torch.py --pruner level --pretrain-epochs 1 --fine-tune-epochs 1 --model lenet --dataset mnist + +echo 'testing agp pruning' +python3 basic_pruners_torch.py --pruner agp --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth + +echo 'testing mean_activation pruning' +python3 basic_pruners_torch.py --pruner mean_activation --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth + +echo "testing lottery ticket pruning..." +python3 lottery_torch_mnist_fc.py --train_epochs 1 + +echo "" +echo "===========================Testing: quantizers===========================" +# to be enabled +#echo "testing QAT quantizer..." +#python3 QAT_torch_quantizer.py + +#echo "testing DoReFa quantizer..." +#python3 DoReFaQuantizer_torch_mnist.py + +#echo "testing BNN quantizer..." +#python3 BNN_quantizer_cifar10.py + +rm -rf ./experiment_data/* diff --git a/test/scripts/nas.sh b/test/scripts/nas.sh new file mode 100644 index 0000000000000000000000000000000000000000..f7acc6970c49d4e0242606c51e4cf822cdc933ad --- /dev/null +++ b/test/scripts/nas.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -e +CWD=${PWD} + +echo "" +echo "===========================Testing: NAS===========================" +EXAMPLE_DIR=${CWD}/../examples/nas +RETIARII_TEST_DIR=${CWD}/retiarii_test + +cd $RETIARII_TEST_DIR/naive +for net in "simple" "complex"; do + for exec in "python" "graph"; do + echo "testing multi-trial example on ${net}, ${exec}..." + python3 search.py --net $net --exec $exec + done +done + +echo "testing darts..." +cd $EXAMPLE_DIR/oneshot/darts +python3 search.py --epochs 1 --channels 2 --layers 4 +python3 retrain.py --arc-checkpoint ./checkpoint.json --layers 4 --epochs 1 + +echo "testing enas..." +cd $EXAMPLE_DIR/oneshot/enas +python3 search.py --search-for macro --epochs 1 +python3 search.py --search-for micro --epochs 1 + +#disabled for now +#echo "testing naive..." +#cd $EXAMPLE_DIR/naive +#python3 train.py + +#echo "testing pdarts..." +#cd $EXAMPLE_DIR/legacy/pdarts +#python3 search.py --epochs 1 --channels 4 --nodes 2 --log-frequency 10 --add_layers 0 --add_layers 1 --dropped_ops 3 --dropped_ops 3 diff --git a/test/ut/__init__.py b/test/ut/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c47c88b27b4909f6edc0787bd14aff8feefe303b --- /dev/null +++ b/test/ut/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +Unit test of NNI Python modules. + +Test cases of each module should be placed at same path of their source files. +For example if `nni/tool/annotation` has one test case, it should be placed at `test/ut/tool/test_annotation.py`; +if it has multiple test cases, they should be placed in `test/ut/tool/annotation/` directory. + +"Legacy" test cases carried from NNI v1.x might not follow above convention: + + + Directory `sdk` contains old test cases previously in `src/sdk/pynni/tests`. + + Directory `tools/nnictl` contains old test cases previously in `tools/nni_cmd/tests`. + + Directory `tools/annotation` contains old test cases previously in `tools/nni_annotation`. + + Directory `tools/trial_tool` contains old test cases previously in `tools/nni_trial_tool/test`. +""" + +import os + +os.environ['NNI_PLATFORM'] = 'unittest' +os.environ['NNI_TRIAL_JOB_ID'] = 'test_trial_job_id' diff --git a/test/ut/compression/__init__.py b/test/ut/compression/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/ut/compression/v1/__init__.py b/test/ut/compression/v1/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/ut/compression/v1/test_compression_utils.py b/test/ut/compression/v1/test_compression_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2a7af8c4b80b107565653612c0a42692efa5d4d0 --- /dev/null +++ b/test/ut/compression/v1/test_compression_utils.py @@ -0,0 +1,188 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import unittest +from unittest import TestCase, main +import torch +import torch.nn as nn +import torchvision.models as models +import numpy as np + +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner +from nni.compression.pytorch.utils.shape_dependency import ChannelDependency +from nni.compression.pytorch.utils.mask_conflict import fix_mask_conflict +from nni.compression.pytorch.utils.counter import count_flops_params + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +prefix = 'analysis_test' +model_names = ['alexnet', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg19', + 'resnet18', 'resnet34', 'squeezenet1_1', + 'mobilenet_v2', 'wide_resnet50_2'] + +channel_dependency_ground_truth = { + 'resnet18': [{'layer1.0.conv2', 'layer1.1.conv2', 'conv1'}, + {'layer2.1.conv2', 'layer2.0.conv2', 'layer2.0.downsample.0'}, + {'layer3.0.downsample.0', 'layer3.1.conv2', 'layer3.0.conv2'}, + {'layer4.0.downsample.0', 'layer4.1.conv2', 'layer4.0.conv2'}], + 'resnet34': [{'conv1', 'layer1.2.conv2', 'layer1.1.conv2', 'layer1.0.conv2'}, + {'layer2.3.conv2', 'layer2.0.conv2', 'layer2.0.downsample.0', + 'layer2.1.conv2', 'layer2.2.conv2'}, + {'layer3.3.conv2', 'layer3.0.conv2', 'layer3.4.conv2', 'layer3.0.downsample.0', + 'layer3.5.conv2', 'layer3.1.conv2', 'layer3.2.conv2'}, + {'layer4.0.downsample.0', 'layer4.1.conv2', 'layer4.2.conv2', 'layer4.0.conv2'}], + 'mobilenet_v2': [{'features.3.conv.2', 'features.2.conv.2'}, + {'features.6.conv.2', 'features.4.conv.2', 'features.5.conv.2'}, + {'features.8.conv.2', 'features.7.conv.2', + 'features.10.conv.2', 'features.9.conv.2'}, + {'features.11.conv.2', 'features.13.conv.2', + 'features.12.conv.2'}, + {'features.14.conv.2', 'features.16.conv.2', 'features.15.conv.2'}], + 'wide_resnet50_2': [{'layer1.2.conv3', 'layer1.1.conv3', 'layer1.0.conv3', 'layer1.0.downsample.0'}, + {'layer2.1.conv3', 'layer2.0.conv3', 'layer2.0.downsample.0', + 'layer2.2.conv3', 'layer2.3.conv3'}, + {'layer3.3.conv3', 'layer3.0.conv3', 'layer3.2.conv3', 'layer3.0.downsample.0', + 'layer3.1.conv3', 'layer3.4.conv3', 'layer3.5.conv3'}, + {'layer4.1.conv3', 'layer4.2.conv3', 'layer4.0.downsample.0', 'layer4.0.conv3'}], + 'alexnet': [], + 'vgg11': [], + 'vgg11_bn': [], + 'vgg13': [], + 'vgg19': [], + 'squeezenet1_1': [], + 'googlenet': [] + # comments the shufflenet temporary + # because it has the listunpack operation which + # will lead to a graph construction error. + # support the listunpack in the next release. + # 'shufflenet_v2_x1_0': [] +} + +unittest.TestLoader.sortTestMethodsUsing = None + + +class AnalysisUtilsTest(TestCase): + @unittest.skipIf(torch.__version__ < "1.3.0", "not supported") + def test_channel_dependency(self): + outdir = os.path.join(prefix, 'dependency') + os.makedirs(outdir, exist_ok=True) + for name in model_names: + print('Analyze channel dependency for %s' % name) + model = getattr(models, name) + net = model().to(device) + dummy_input = torch.ones(1, 3, 224, 224).to(device) + channel_depen = ChannelDependency(net, dummy_input) + depen_sets = channel_depen.dependency_sets + d_set_count = 0 + for d_set in depen_sets: + if len(d_set) > 1: + d_set_count += 1 + assert d_set in channel_dependency_ground_truth[name] + assert d_set_count == len(channel_dependency_ground_truth[name]) + fpath = os.path.join(outdir, name) + channel_depen.export(fpath) + + def get_pruned_index(self, mask): + pruned_indexes = [] + shape = mask.size() + for i in range(shape[0]): + if torch.sum(mask[i]).item() == 0: + pruned_indexes.append(i) + + return pruned_indexes + + @unittest.skipIf(torch.__version__ < "1.3.0", "not supported") + def test_mask_conflict(self): + outdir = os.path.join(prefix, 'masks') + os.makedirs(outdir, exist_ok=True) + for name in model_names: + print('Test mask conflict for %s' % name) + model = getattr(models, name) + net = model().to(device) + dummy_input = torch.ones(1, 3, 224, 224).to(device) + # random generate the prune sparsity for each layer + cfglist = [] + for layername, layer in net.named_modules(): + if isinstance(layer, nn.Conv2d): + # pruner cannot allow the sparsity to be 0 or 1 + sparsity = np.random.uniform(0.01, 0.99) + cfg = {'op_types': ['Conv2d'], 'op_names': [ + layername], 'sparsity': sparsity} + cfglist.append(cfg) + pruner = L1FilterPruner(net, cfglist) + pruner.compress() + ck_file = os.path.join(outdir, '%s.pth' % name) + mask_file = os.path.join(outdir, '%s_mask' % name) + pruner.export_model(ck_file, mask_file) + pruner._unwrap_model() + # Fix the mask conflict + fixed_mask = fix_mask_conflict(mask_file, net, dummy_input) + + # use the channel dependency groud truth to check if + # fix the mask conflict successfully + for dset in channel_dependency_ground_truth[name]: + lset = list(dset) + for i, _ in enumerate(lset): + assert fixed_mask[lset[0]]['weight'].size( + 0) == fixed_mask[lset[i]]['weight'].size(0) + w_index1 = self.get_pruned_index( + fixed_mask[lset[0]]['weight']) + w_index2 = self.get_pruned_index( + fixed_mask[lset[i]]['weight']) + assert w_index1 == w_index2 + if hasattr(fixed_mask[lset[0]], 'bias'): + b_index1 = self.get_pruned_index( + fixed_mask[lset[0]]['bias']) + b_index2 = self.get_pruned_index( + fixed_mask[lset[i]]['bias']) + assert b_index1 == b_index2 + + +class FlopsCounterTest(TestCase): + def test_flops_params(self): + class Model1(nn.Module): + def __init__(self): + super(Model1, self).__init__() + self.conv = nn.Conv2d(3, 5, 1, 1) + self.bn = nn.BatchNorm2d(5) + self.relu = nn.LeakyReLU() + self.linear = nn.Linear(20, 10) + self.upsample = nn.UpsamplingBilinear2d(size=2) + self.pool = nn.AdaptiveAvgPool2d((2, 2)) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + x = self.upsample(x) + x = self.pool(x) + x = x.view(x.size(0), -1) + x = self.linear(x) + return x + + class Model2(nn.Module): + def __init__(self): + super(Model2, self).__init__() + self.conv = nn.Conv2d(3, 5, 1, 1) + self.conv2 = nn.Conv2d(5, 5, 1, 1) + + def forward(self, x): + x = self.conv(x) + for _ in range(5): + x = self.conv2(x) + return x + + for bs in [1, 2]: + flops, params, results = count_flops_params(Model1(), (bs, 3, 2, 2), mode='full', verbose=False) + assert (flops, params) == (610, 240) + + flops, params, results = count_flops_params(Model2(), (bs, 3, 2, 2), verbose=False) + assert (flops, params) == (560, 50) + + from torchvision.models import resnet50 + flops, params, results = count_flops_params(resnet50(), (bs, 3, 224, 224), verbose=False) + assert (flops, params) == (4089184256, 25503912) + + +if __name__ == '__main__': + main() diff --git a/test/ut/compression/v1/test_compressor_tf.py b/test/ut/compression/v1/test_compressor_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..ed8fa9fc59d574b9228cce445807363987f05ac5 --- /dev/null +++ b/test/ut/compression/v1/test_compressor_tf.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from pathlib import Path +import tempfile +import unittest + +import numpy as np +import tensorflow as tf + + +#### +# +# This file tests pruners on 3 models: +# A classic CNN model built by inheriting `Model`; +# The same CNN model built with `Sequential`; +# A naive model with only one linear layer. +# +# The CNN models are used to test layer detecting and instrumenting. +# +# The naive model is used to test mask calculation. +# It has a single 10x10 linear layer without bias, and `reduce_sum` its result. +# To help predicting pruning result, the linear layer has fixed initial weights: +# [ [ 0.0, 1.0, 2.0, ..., 9.0 ], [0.1, 1.1, 2.1, ..., 9.1 ], ... , [0.9, 1.0, 2.9, ..., 9.9 ] ] +# +#### + + +# This tensor is used as input of 10x10 linear layer, the first dimension is batch size +tensor1x10 = tf.constant([[1.0] * 10]) + +# This tensor is used as input of CNN models +image_tensor = tf.zeros([1, 10, 10, 3]) + + +@unittest.skipIf(tf.__version__[0] != '2', 'Skip TF 1.x setup') +class TfCompressorTestCase(unittest.TestCase): + def test_layer_detection(self): + # Conv and dense layers should be compressed, pool and flatten should not. + # This also tests instrumenting functionality. + self._test_layer_detection_on_model(CnnModel()) + self._test_layer_detection_on_model(build_sequential_model()) + + def _test_layer_detection_on_model(self, model): + pruner = pruners['level'](model) + pruner.compress() + layer_types = sorted(type(wrapper.layer).__name__ for wrapper in pruner.wrappers) + assert layer_types == ['Conv2D', 'Dense', 'Dense'], layer_types + + def test_level_pruner_and_export_correctness(self): + # prune 90% : 9.0 + 9.1 + ... + 9.9 = 94.5 + model = build_naive_model() + pruner = pruners['level'](model) + model = pruner.compress() + + x = model(tensor1x10) + assert x.numpy() == 94.5 + + temp_dir = Path(tempfile.gettempdir()) + pruner.export_model(temp_dir / 'model', temp_dir / 'mask') + + # because exporting will uninstrument and re-instrument the model, + # we must test the model again + x = model(tensor1x10) + assert x.numpy() == 94.5 + + # load and test exported model + exported_model = tf.keras.models.load_model(temp_dir / 'model') + x = exported_model(tensor1x10) + assert x.numpy() == 94.5 + + def test_export_not_crash(self): + for model in [CnnModel(), build_sequential_model()]: + pruner = pruners['level'](model) + model = pruner.compress() + # cannot use model.build(image_tensor.shape) here + # it fails even without compression + # seems TF's bug, not ours + model(image_tensor) + pruner.export_model(tempfile.TemporaryDirectory().name) + +try: + from tensorflow.keras import Model, Sequential + from tensorflow.keras.layers import (Conv2D, Dense, Flatten, MaxPool2D) + + from nni.algorithms.compression.tensorflow.pruning import LevelPruner + + pruners = { + 'level': (lambda model: LevelPruner(model, [{'sparsity': 0.9, 'op_types': ['default']}])), + } + + class CnnModel(Model): + def __init__(self): + super().__init__() + self.conv = Conv2D(filters=10, kernel_size=3, activation='relu') + self.pool = MaxPool2D(pool_size=2) + self.flatten = Flatten() + self.fc1 = Dense(units=10, activation='relu') + self.fc2 = Dense(units=5, activation='softmax') + + def call(self, x): + x = self.conv(x) + x = self.pool(x) + x = self.flatten(x) + x = self.fc1(x) + x = self.fc2(x) + return x + + def build_sequential_model(): + return Sequential([ + Conv2D(filters=10, kernel_size=3, activation='relu'), + MaxPool2D(pool_size=2), + Flatten(), + Dense(units=10, activation='relu'), + Dense(units=5, activation='softmax'), + ]) + + class NaiveModel(Model): + def __init__(self): + super().__init__() + self.fc = Dense(units=10, use_bias=False) + + def call(self, x): + return tf.math.reduce_sum(self.fc(x)) + +except Exception: + pass + + +def build_naive_model(): + model = NaiveModel() + model.build(tensor1x10.shape) + weight = [[(i + j * 0.1) for i in range(10)] for j in range(10)] + model.set_weights([np.array(weight)]) + return model + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/compression/v1/test_compressor_torch.py b/test/ut/compression/v1/test_compressor_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..7d4fa796176d1d850d96a99804c3298a085b5db7 --- /dev/null +++ b/test/ut/compression/v1/test_compressor_torch.py @@ -0,0 +1,692 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import copy +from unittest import TestCase, main +import numpy as np +import torch +import torch.nn.functional as F +import schema +import nni.algorithms.compression.pytorch.pruning as torch_pruner +import nni.algorithms.compression.pytorch.quantization as torch_quantizer +from nni.compression.pytorch.quantization.utils import calculate_qmin_qmax, get_quant_shape +import math + + +class TorchModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 5, 5, 1) + self.bn1 = torch.nn.BatchNorm2d(5) + self.conv2 = torch.nn.Conv2d(5, 10, 5, 1) + self.bn2 = torch.nn.BatchNorm2d(10) + self.fc1 = torch.nn.Linear(4 * 4 * 10, 100) + self.fc2 = torch.nn.Linear(100, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4 * 4 * 10) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +class CompressorTestCase(TestCase): + def test_torch_quantizer_modules_detection(self): + # test if modules can be detected + model = TorchModel() + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 8, + 'op_types': ['Conv2d', 'Linear'] + }, { + 'quant_types': ['output'], + 'quant_bits': 8, + 'quant_start_step': 0, + 'op_types': ['ReLU'] + }] + + model.relu = torch.nn.ReLU() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + dummy = torch.randn(1, 1, 28, 28) + quantizer = torch_quantizer.QAT_Quantizer(model, config_list, optimizer, dummy_input=dummy) + quantizer.compress() + modules_to_compress = quantizer.get_modules_to_compress() + modules_to_compress_name = [t[0].name for t in modules_to_compress] + assert "conv1" in modules_to_compress_name + assert "conv2" in modules_to_compress_name + assert "fc1" in modules_to_compress_name + assert "fc2" in modules_to_compress_name + assert "relu" in modules_to_compress_name + assert len(modules_to_compress_name) == 5 + + def test_torch_level_pruner(self): + model = TorchModel() + configure_list = [{'sparsity': 0.8, 'op_types': ['default']}] + torch_pruner.LevelPruner(model, configure_list).compress() + + def test_torch_naive_quantizer(self): + model = TorchModel() + configure_list = [{ + 'quant_types': ['weight'], + 'quant_bits': { + 'weight': 8, + }, + 'op_types': ['Conv2d', 'Linear'] + }] + torch_quantizer.NaiveQuantizer(model, configure_list).compress() + + def test_torch_fpgm_pruner(self): + """ + With filters(kernels) weights defined as above (w), it is obvious that w[4] and w[5] is the Geometric Median + which minimize the total geometric distance by defination of Geometric Median in this paper: + Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration, + https://arxiv.org/pdf/1811.00250.pdf + + So if sparsity is 0.2, the expected masks should mask out w[4] and w[5], this can be verified through: + `all(torch.sum(masks, (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 0., 0., 125., 125., 125., 125.]))` + + If sparsity is 0.6, the expected masks should mask out w[2] - w[7], this can be verified through: + `all(torch.sum(masks, (1, 2, 3)).numpy() == np.array([125., 125., 0., 0., 0., 0., 0., 0., 125., 125.]))` + """ + w = np.array([np.ones((5, 5, 5)) * (i+1) for i in range(10)]).astype(np.float32) + + model = TorchModel() + config_list = [{'sparsity': 0.6, 'op_types': ['Conv2d']}, {'sparsity': 0.2, 'op_types': ['Conv2d']}] + pruner = torch_pruner.FPGMPruner(model, config_list) + + model.conv2.module.weight.data = torch.tensor(w).float() + masks = pruner.calc_mask(model.conv2) + assert all(torch.sum(masks['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 0., 0., 125., 125., 125., 125.])) + + model.conv2.module.weight.data = torch.tensor(w).float() + model.conv2.if_calculated = False + model.conv2.config = config_list[0] + masks = pruner.calc_mask(model.conv2) + assert all(torch.sum(masks['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 0., 0., 0., 0., 0., 0., 125., 125.])) + + + def test_torch_l1filter_pruner(self): + """ + Filters with the minimum sum of the weights' L1 norm are pruned in this paper: + PRUNING FILTERS FOR EFFICIENT CONVNETS, + https://arxiv.org/abs/1608.08710 + + So if sparsity is 0.2 for conv1, the expected masks should mask out filter 0, this can be verified through: + `all(torch.sum(mask1, (1, 2, 3)).numpy() == np.array([0., 25., 25., 25., 25.]))` + + If sparsity is 0.6 for conv2, the expected masks should mask out filter 0,1,2, this can be verified through: + `all(torch.sum(mask2, (1, 2, 3)).numpy() == np.array([0., 0., 0., 0., 0., 0., 125., 125., 125., 125.]))` + """ + w1 = np.array([np.ones((1, 5, 5))*i for i in range(5)]).astype(np.float32) + w2 = np.array([np.ones((5, 5, 5))*i for i in range(10)]).astype(np.float32) + + model = TorchModel() + config_list = [{'sparsity': 0.2, 'op_types': ['Conv2d'], 'op_names': ['conv1']}, + {'sparsity': 0.6, 'op_types': ['Conv2d'], 'op_names': ['conv2']}] + pruner = torch_pruner.L1FilterPruner(model, config_list) + + model.conv1.module.weight.data = torch.tensor(w1).float() + model.conv2.module.weight.data = torch.tensor(w2).float() + mask1 = pruner.calc_mask(model.conv1) + mask2 = pruner.calc_mask(model.conv2) + assert all(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 25., 25., 25., 25.])) + assert all(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 0., 0., 0., 0., 0., 125., 125., 125., 125.])) + + def test_torch_slim_pruner(self): + """ + Scale factors with minimum l1 norm in the BN layers are pruned in this paper: + Learning Efficient Convolutional Networks through Network Slimming, + https://arxiv.org/pdf/1708.06519.pdf + + So if sparsity is 0.2, the expected masks should mask out channel 0, this can be verified through: + `all(mask1.numpy() == np.array([0., 1., 1., 1., 1.]))` + `all(mask2.numpy() == np.array([0., 1., 1., 1., 1.]))` + + If sparsity is 0.6, the expected masks should mask out channel 0,1,2, this can be verified through: + `all(mask1.numpy() == np.array([0., 0., 0., 1., 1.]))` + `all(mask2.numpy() == np.array([0., 0., 0., 1., 1.]))` + """ + w = np.array([0, 1, 2, 3, 4]) + model = TorchModel() + config_list = [{'sparsity': 0.2, 'op_types': ['BatchNorm2d']}] + model.bn1.weight.data = torch.tensor(w).float() + model.bn2.weight.data = torch.tensor(-w).float() + pruner = torch_pruner.SlimPruner(model, config_list, optimizer=None, trainer=None, criterion=None) + + mask1 = pruner.calc_mask(model.bn1) + mask2 = pruner.calc_mask(model.bn2) + assert all(mask1['weight_mask'].numpy() == np.array([0., 1., 1., 1., 1.])) + assert all(mask2['weight_mask'].numpy() == np.array([0., 1., 1., 1., 1.])) + assert all(mask1['bias_mask'].numpy() == np.array([0., 1., 1., 1., 1.])) + assert all(mask2['bias_mask'].numpy() == np.array([0., 1., 1., 1., 1.])) + + model = TorchModel() + config_list = [{'sparsity': 0.6, 'op_types': ['BatchNorm2d']}] + model.bn1.weight.data = torch.tensor(w).float() + model.bn2.weight.data = torch.tensor(w).float() + pruner = torch_pruner.SlimPruner(model, config_list, optimizer=None, trainer=None, criterion=None) + + mask1 = pruner.calc_mask(model.bn1) + mask2 = pruner.calc_mask(model.bn2) + assert all(mask1['weight_mask'].numpy() == np.array([0., 0., 0., 1., 1.])) + assert all(mask2['weight_mask'].numpy() == np.array([0., 0., 0., 1., 1.])) + assert all(mask1['bias_mask'].numpy() == np.array([0., 0., 0., 1., 1.])) + assert all(mask2['bias_mask'].numpy() == np.array([0., 0., 0., 1., 1.])) + + def test_torch_taylorFOweight_pruner(self): + """ + Filters with the minimum importance approxiamtion based on the first order + taylor expansion on the weights (w*grad)**2 are pruned in this paper: + Importance Estimation for Neural Network Pruning, + http://jankautz.com/publications/Importance4NNPruning_CVPR19.pdf + + So if sparsity of conv1 is 0.2, the expected masks should mask out filter 0, this can be verified through: + `all(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 25., 25., 25., 25.]))` + + If sparsity of conv2 is 0.6, the expected masks should mask out filter 4,5,6,7,8,9 this can be verified through: + `all(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 0., 0., 0., 0., 0., 0., ]))` + """ + + w1 = np.array([np.zeros((1, 5, 5)), np.ones((1, 5, 5)), np.ones((1, 5, 5)) * 2, + np.ones((1, 5, 5)) * 3, np.ones((1, 5, 5)) * 4]) + w2 = np.array([[[[i + 1] * 5] * 5] * 5 for i in range(10)[::-1]]) + + grad1 = np.array([np.ones((1, 5, 5)) * -1, np.ones((1, 5, 5)) * 1, np.ones((1, 5, 5)) * -1, + np.ones((1, 5, 5)) * 1, np.ones((1, 5, 5)) * -1]) + + grad2 = np.array([[[[(-1)**i] * 5] * 5] * 5 for i in range(10)]) + + config_list = [{'sparsity': 0.2, 'op_types': ['Conv2d'], 'op_names': ['conv1']}, + {'sparsity': 0.6, 'op_types': ['Conv2d'], 'op_names': ['conv2']}] + + model = TorchModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + pruner = torch_pruner.TaylorFOWeightFilterPruner(model, config_list, optimizer, trainer=None, criterion=None, sparsifying_training_batches=1) + + x = torch.rand((1, 1, 28, 28), requires_grad=True) + model.conv1.module.weight.data = torch.tensor(w1).float() + model.conv2.module.weight.data = torch.tensor(w2).float() + + y = model(x) + y.backward(torch.ones_like(y)) + + model.conv1.module.weight.grad.data = torch.tensor(grad1).float() + model.conv2.module.weight.grad.data = torch.tensor(grad2).float() + optimizer.step() + + mask1 = pruner.calc_mask(model.conv1) + mask2 = pruner.calc_mask(model.conv2) + assert all(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 25., 25., 25., 25.])) + assert all(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 0., 0., 0., 0., 0., 0., ])) + + def test_torch_taylorFOweight_pruner_global_sort(self): + """ + After enabling global_sort, taylorFOweight pruner will calculate contributions and rank topk from all + of the conv operators. Then it will prune low contribution filters depends on the global information. + + So if sparsity of conv operator is 0.4, the expected masks should mask out filter 0 and filter 1 together, + this can be verified through: + `all(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 0., 0, 0., 25.]))` + `all(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 125., 125., 125., 0., 0., 0.]))` + """ + + w1 = np.array([np.zeros((1, 5, 5)), np.ones((1, 5, 5)), np.ones((1, 5, 5)) * 2, + np.ones((1, 5, 5)) * 3, np.ones((1, 5, 5)) * 4]) + w2 = np.array([[[[i + 1] * 5] * 5] * 5 for i in range(10)[::-1]]) + + grad1 = np.array([np.ones((1, 5, 5)) * -1, np.ones((1, 5, 5)) * 1, np.ones((1, 5, 5)) * -1, + np.ones((1, 5, 5)) * 1, np.ones((1, 5, 5)) * -1]) + + grad2 = np.array([[[[(-1)**i] * 5] * 5] * 5 for i in range(10)]) + + config_list = [{'sparsity': 0.4, 'op_types': ['Conv2d']}] + + model = TorchModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + pruner = torch_pruner.TaylorFOWeightFilterPruner(model, config_list, optimizer, trainer=None, criterion=None, sparsifying_training_batches=1, global_sort=True) + + x = torch.rand((1, 1, 28, 28), requires_grad=True) + model.conv1.module.weight.data = torch.tensor(w1).float() + model.conv2.module.weight.data = torch.tensor(w2).float() + + y = model(x) + y.backward(torch.ones_like(y)) + + model.conv1.module.weight.grad.data = torch.tensor(grad1).float() + model.conv2.module.weight.grad.data = torch.tensor(grad2).float() + optimizer.step() + + mask1 = pruner.calc_mask(model.conv1) + mask2 = pruner.calc_mask(model.conv2) + print(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy()) + print(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy()) + assert all(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 0., 0, 0., 25.])) + assert all(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 125., 125., 125., 0., 0., 0.])) + + def test_torch_observer_quantizer(self): + model = TorchModel() + # test invalid config + # only support 8bit for now + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 5, + 'op_types': ['Conv2d', 'Linear'] + }] + with self.assertRaises(schema.SchemaError): + torch_quantizer.ObserverQuantizer(model, config_list) + + # weight will not change for now + model = TorchModel().eval() + origin_parameters = copy.deepcopy(dict(model.named_parameters())) + + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 8, + 'op_types': ['Conv2d', 'Linear'] + }] + quantizer = torch_quantizer.ObserverQuantizer(model, config_list) + input = torch.randn(1, 1, 28, 28) + model(input) + quantizer.compress() + buffers = dict(model.named_buffers()) + scales = {k: v for k, v in buffers.items() if 'scale' in k} + model_path = "test_model.pth" + calibration_path = "test_calibration.pth" + calibration_config = quantizer.export_model(model_path, calibration_path) + new_parameters = dict(model.named_parameters()) + for layer_name, v in calibration_config.items(): + scale_name = layer_name + '.module.weight_scale' + weight_name = layer_name + '.weight' + s = float(scales[scale_name]) + self.assertTrue(torch.allclose(origin_parameters[weight_name], new_parameters[weight_name], atol=0.5 * s)) + + self.assertTrue(calibration_config is not None) + self.assertTrue(len(calibration_config) == 4) + + def test_torch_quantizer_weight_type(self): + quantizer_list = [ + torch_quantizer.QAT_Quantizer, + torch_quantizer.LsqQuantizer, + torch_quantizer.ObserverQuantizer, + torch_quantizer.NaiveQuantizer, + torch_quantizer.DoReFaQuantizer] + for quantizer_type in quantizer_list: + model = TorchModel().eval() + config_list = [{ + 'quant_types': ['weight'], + 'quant_bits': 8, + 'op_types': ['Conv2d', 'Linear'] + }] + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + dummy = torch.randn(1, 1, 28, 28) + if quantizer_type == torch_quantizer.QAT_Quantizer: + quantizer_type(model, config_list, optimizer, dummy_input=dummy) + else: + quantizer_type(model, config_list, optimizer) + + self.assertFalse(isinstance(model.conv1.module.weight, torch.nn.Parameter)) + self.assertFalse(isinstance(model.conv2.module.weight, torch.nn.Parameter)) + self.assertFalse(isinstance(model.fc1.module.weight, torch.nn.Parameter)) + self.assertFalse(isinstance(model.fc2.module.weight, torch.nn.Parameter)) + + def test_quantization_dtype_scheme(self): + class TestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 2, 3, 1) + self.bn1 = torch.nn.BatchNorm2d(2) + + def forward(self, x): + x = self.bn1(self.conv1(x)) + return x + dtypes = ['int', 'uint'] + qschemes = ['per_tensor_affine', 'per_tensor_symmetric', 'per_channel_affine', 'per_channel_symmetric'] + for dtype in dtypes: + for qscheme in qschemes: + config_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': 8, + 'op_types': ['Conv2d'], + 'quant_dtype': dtype, + 'quant_scheme': qscheme + }] + model = TestModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + # only QAT_quantizer is supported for now + dummy = torch.randn(1, 1, 4, 4) + quantizer = torch_quantizer.QAT_Quantizer(model, config_list, optimizer, dummy_input=dummy) + + # test layer setting + for layer, config in quantizer.modules_to_compress: + module = layer.module + name = layer.name + layer_setting = module.layer_quant_setting + qmin, qmax = calculate_qmin_qmax(8, dtype) + all_quant_types = ['input', 'weight'] + for quant_type in all_quant_types: + # check for settings + tensor_setting = getattr(layer_setting, quant_type) + self.assertTrue(tensor_setting is not None) + self.assertTrue(tensor_setting.quant_scheme == qscheme) + self.assertTrue(tensor_setting.quant_dtype == dtype) + self.assertTrue(tensor_setting.qmin == qmin) + self.assertTrue(tensor_setting.qmax == qmax) + + input_shape, output_shape = quantizer.all_shapes[name] + + shape = input_shape if quant_type == 'input' else module.weight.shape + quant_shape = get_quant_shape(shape, quant_type, qscheme) + scale_name = quant_type + '_scale' + zero_point_name = quant_type + '_zero_point' + scale = getattr(module, scale_name) + zero_point = getattr(module, zero_point_name) + self.assertTrue(list(scale.shape) == quant_shape) + self.assertTrue(list(zero_point.shape) == quant_shape) + + weight = torch.arange(start=1, end=19).view(2, 1, 3, 3) + if qscheme == 'per_channel_symmetric': + if dtype == 'int': + target_scale = torch.tensor([9. / 127, 18. / 127]).view([2, 1, 1, 1]) + target_zero_point = torch.ones([2, 1, 1, 1]) * 0 + else: + target_scale = torch.tensor([9. / 127.5, 18. / 127.5]).view([2, 1, 1, 1]) + target_zero_point = torch.ones([2, 1, 1, 1]) * 127 + elif qscheme == 'per_tensor_symmetric': + if dtype == 'int': + target_scale = torch.tensor([18. / 127]) + target_zero_point = torch.zeros([1]) + else: + target_scale = torch.tensor([18. / 127.5]) + target_zero_point = torch.ones([1]) * 127 + elif qscheme == 'per_channel_affine': + min_val = torch.tensor([0., 0.]).view([2, 1, 1, 1]) + if dtype == 'int': + target_scale = torch.tensor([9. / 254, 18. / 254]).view([2, 1, 1, 1]) + target_zero_point = -127 - torch.round(min_val / target_scale) + else: + target_scale = torch.tensor([9. / 255, 18. / 255]).view([2, 1, 1, 1]) + target_zero_point = 0 - torch.round(min_val / target_scale) + else: + if dtype == 'int': + target_scale = torch.tensor([18. / 254]) + target_zero_point = -127 - torch.round(0 / target_scale) + else: + target_scale = torch.tensor([18. / 255]) + target_zero_point = 0 - torch.round(0 / target_scale) + wrapper = getattr(model, name) + wrapper.module.weight = weight + quantizer.quantize_weight(wrapper) + self.assertTrue(torch.equal(getattr(model, name).module.weight_scale, target_scale)) + self.assertTrue(torch.equal(getattr(model, name).module.weight_zero_point, target_zero_point)) + + inp = torch.arange(start=0, end=16).view(1, 1, 4, 4) + if qscheme == 'per_channel_symmetric': + if dtype == 'int': + target_scale = torch.tensor([15. / 127]).view([1, 1, 1, 1]) + target_zero_point = torch.ones([1, 1, 1, 1]) * 0 + else: + target_scale = torch.tensor([15. / 127.5]).view([1, 1, 1, 1]) + target_zero_point = torch.ones([1, 1, 1, 1]) * 127 + elif qscheme == 'per_tensor_symmetric': + if dtype == 'int': + target_scale = torch.tensor([15. / 127]) + target_zero_point = torch.zeros([1]) + else: + target_scale = torch.tensor([15. / 127.5]) + target_zero_point = torch.ones([1]) * 127 + elif qscheme == 'per_channel_affine': + min_val = torch.tensor([0.]).view([1, 1, 1, 1]) + if dtype == 'int': + target_scale = torch.tensor([15. / 254]).view([1, 1, 1, 1]) + target_zero_point = -127 - torch.round(min_val / target_scale) + else: + target_scale = torch.tensor([15. / 255]).view([1, 1, 1, 1]) + target_zero_point = 0 - torch.round(min_val / target_scale) + else: + if dtype == 'int': + target_scale = torch.tensor([15. / 254]) + target_zero_point = -127 - torch.round(0 / target_scale) + else: + target_scale = torch.tensor([15. / 255]) + target_zero_point = 0 - torch.round(0 / target_scale) + quantizer.quantize_input(inp, wrapper) + self.assertTrue(torch.equal(getattr(model, name).module.input_scale, target_scale)) + self.assertTrue(torch.equal(getattr(model, name).module.input_zero_point, target_zero_point)) + + def test_torch_QAT_quantizer(self): + model = TorchModel() + config_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': 8, + 'op_types': ['Conv2d', 'Linear'] + }, { + 'quant_types': ['output'], + 'quant_bits': 8, + 'quant_start_step': 0, + 'op_types': ['ReLU'] + }] + model.relu = torch.nn.ReLU() + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + dummy = torch.randn(1, 1, 28, 28) + quantizer = torch_quantizer.QAT_Quantizer(model, config_list, optimizer, dummy_input=dummy) + quantizer.compress() + + # test quantize + # range not including 0 + eps = 1e-7 + input = torch.tensor([[1, 4], [2, 1]]) + weight = torch.tensor([[1, 2], [3, 5]]).float() + model.conv2.module.weight.data = weight + quantizer.quantize_weight(model.conv2, input_tensor=input) + assert math.isclose(model.conv2.module.weight_scale, 5 / 255, abs_tol=eps) + assert model.conv2.module.weight_zero_point == 0 + quantizer.quantize_input(input, model.conv2) + self.assertTrue(torch.allclose(model.conv2.module.input_scale, torch.tensor([4. / 255]))) + self.assertTrue(torch.equal(model.conv2.module.input_zero_point, torch.tensor([0.]))) + # range including 0 + weight = torch.tensor([[-1, 2], [3, 5]]).float() + model.conv2.module.weight = weight + quantizer.quantize_weight(model.conv2, input_tensor=input) + assert math.isclose(model.conv2.module.weight_scale, 6 / 255, abs_tol=eps) + assert model.conv2.module.weight_zero_point in (42, 43) + quantizer.quantize_input(input, model.conv2) + self.assertTrue(torch.allclose(model.conv2.module.input_scale, torch.tensor([4. / 255]))) + self.assertTrue(torch.equal(model.conv2.module.input_zero_point, torch.tensor([0.]))) + # test value of weight and bias after quantization + weight = torch.tensor([[1.1287, 2.3456], [3.7814, 5.9723]]) + weight_valid = torch.tensor([[1.1242, 2.3421], [3.7707, 5.9723]]) + bias = torch.tensor([2.3432, 3.4342, 1.3414, 5.2341]) + bias_valid = torch.tensor([2.3432, 3.4342, 1.3414, 5.2341]) + model.conv2.module.weight = weight + model.conv2.module.bias.data = bias + quantizer.quantize_weight(model.conv2, input_tensor=input) + assert torch.all(torch.isclose(model.conv2.module.weight.data, weight_valid, rtol=1e-4)) + assert torch.all(torch.isclose(model.conv2.module.bias.data, bias_valid, rtol=1e-7)) + + # test ema + eps = 1e-7 + x = torch.tensor([[-0.2, 0], [0.1, 0.2]]) + model.relu(x) + self.assertTrue(torch.equal(model.relu.module.tracked_min_output, torch.tensor([0.]))) + self.assertTrue(torch.equal(model.relu.module.tracked_max_output, torch.tensor([0.2]))) + + quantizer.step_with_optimizer() + x = torch.tensor([[0.2, 0.4], [0.6, 0.8]]) + model.relu(x) + self.assertTrue(torch.equal(model.relu.module.tracked_min_output, torch.tensor([0.002]))) + self.assertTrue(torch.equal(model.relu.module.tracked_max_output, torch.tensor([0.2060]))) + + def test_torch_quantizer_export(self): + config_list_qat = [{ + 'quant_types': ['weight'], + 'quant_bits': 8, + 'op_types': ['Conv2d', 'Linear'] + }, { + 'quant_types': ['output'], + 'quant_bits': 8, + 'quant_start_step': 0, + 'op_types': ['ReLU'] + }] + config_list_dorefa = [{ + 'quant_types': ['weight'], + 'quant_bits': { + 'weight': 8, + }, # you can just use `int` here because all `quan_types` share same bits length, see config for `ReLu6` below. + 'op_types':['Conv2d', 'Linear'] + }] + config_list_bnn = [{ + 'quant_types': ['weight'], + 'quant_bits': 1, + 'op_types': ['Conv2d', 'Linear'] + }, { + 'quant_types': ['output'], + 'quant_bits': 1, + 'op_types': ['ReLU'] + }] + config_set = [config_list_qat, config_list_dorefa, config_list_bnn] + quantize_algorithm_set = [torch_quantizer.QAT_Quantizer, torch_quantizer.DoReFaQuantizer, torch_quantizer.BNNQuantizer] + dummy = torch.randn(1, 1, 28, 28) + for config, quantize_algorithm in zip(config_set, quantize_algorithm_set): + model = TorchModel() + model.relu = torch.nn.ReLU() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + if quantize_algorithm == torch_quantizer.QAT_Quantizer: + quantizer = quantize_algorithm(model, config, optimizer, dummy) + else: + quantizer = quantize_algorithm(model, config, optimizer) + quantizer.compress() + + x = torch.rand((1, 1, 28, 28), requires_grad=True) + y = model(x) + y.backward(torch.ones_like(y)) + + model_path = "test_model.pth" + calibration_path = "test_calibration.pth" + onnx_path = "test_model.onnx" + input_shape = (1, 1, 28, 28) + device = torch.device("cpu") + + calibration_config = quantizer.export_model(model_path, calibration_path, onnx_path, input_shape, device) + assert calibration_config is not None + + def test_quantizer_load_calibration_config(self): + configure_list = [{ + 'quant_types': ['weight', 'input'], + 'quant_bits': {'weight': 8, 'input': 8}, + 'op_names': ['conv1', 'conv2'] + }, { + 'quant_types': ['output', 'weight', 'input'], + 'quant_bits': {'output': 8, 'weight': 8, 'input': 8}, + 'op_names': ['fc1', 'fc2'], + }] + quantize_algorithm_set = [torch_quantizer.ObserverQuantizer, torch_quantizer.QAT_Quantizer, torch_quantizer.LsqQuantizer] + calibration_config = None + for quantize_algorithm in quantize_algorithm_set: + model = TorchModel().eval() + model.relu = torch.nn.ReLU() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + if quantize_algorithm == torch_quantizer.QAT_Quantizer: + dummy = torch.randn(1, 1, 28, 28) + quantizer = quantize_algorithm(model, configure_list, optimizer, dummy_input=dummy) + else: + quantizer = quantize_algorithm(model, configure_list, optimizer) + quantizer.compress() + if calibration_config is not None: + quantizer.load_calibration_config(calibration_config) + + model_path = "test_model.pth" + calibration_path = "test_calibration.pth" + onnx_path = "test_model.onnx" + input_shape = (1, 1, 28, 28) + device = torch.device("cpu") + + calibration_config = quantizer.export_model(model_path, calibration_path, onnx_path, input_shape, device) + + def test_torch_pruner_validation(self): + # test bad configuraiton + pruner_classes = [torch_pruner.__dict__[x] for x in \ + ['LevelPruner', 'SlimPruner', 'FPGMPruner', 'L1FilterPruner', 'L2FilterPruner', 'AGPPruner',\ + 'ActivationMeanRankFilterPruner', 'ActivationAPoZRankFilterPruner']] + + bad_configs = [ + [ + {'sparsity': '0.2'}, + {'sparsity': 0.6 } + ], + [ + {'sparsity': 0.2}, + {'sparsity': 1.6 } + ], + [ + {'sparsity': 0.2, 'op_types': 'default'}, + {'sparsity': 0.6 } + ], + [ + {'sparsity': 0.2 }, + {'sparsity': 0.6, 'op_names': 'abc'} + ] + ] + model = TorchModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + for pruner_class in pruner_classes: + for config_list in bad_configs: + try: + kwargs = {} + if pruner_class in (torch_pruner.SlimPruner, torch_pruner.AGPPruner, torch_pruner.ActivationMeanRankFilterPruner, torch_pruner.ActivationAPoZRankFilterPruner): + kwargs = {'optimizer': None, 'trainer': None, 'criterion': None} + + print('kwargs', kwargs) + pruner_class(model, config_list, **kwargs) + + print(config_list) + assert False, 'Validation error should be raised for bad configuration' + except schema.SchemaError: + pass + except: + print('FAILED:', pruner_class, config_list) + raise + + def test_torch_quantizer_validation(self): + # test bad configuraiton + quantizer_classes = [torch_quantizer.__dict__[x] for x in \ + ['NaiveQuantizer', 'QAT_Quantizer', 'DoReFaQuantizer', 'BNNQuantizer']] + + bad_configs = [ + [ + {'bad_key': 'abc'} + ], + [ + {'quant_types': 'abc'} + ], + [ + {'quant_bits': 34} + ], + [ + {'op_types': 'default'} + ], + [ + {'quant_bits': {'abc': 123}} + ] + ] + model = TorchModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + for quantizer_class in quantizer_classes: + for config_list in bad_configs: + try: + quantizer_class(model, config_list, optimizer) + print(config_list) + assert False, 'Validation error should be raised for bad configuration' + except schema.SchemaError: + pass + except: + print('FAILED:', quantizer_class, config_list) + raise + +if __name__ == '__main__': + main() diff --git a/test/ut/compression/v1/test_dependecy_aware.py b/test/ut/compression/v1/test_dependecy_aware.py new file mode 100644 index 0000000000000000000000000000000000000000..bbbe7453b261ddaa0dae6e65c5c51cca079dd2a9 --- /dev/null +++ b/test/ut/compression/v1/test_dependecy_aware.py @@ -0,0 +1,159 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +import random +import unittest +from unittest import TestCase, main +import torch +import torch.nn as nn +import torchvision.models as models +import numpy as np + +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner, L2FilterPruner, FPGMPruner, \ + TaylorFOWeightFilterPruner, ActivationAPoZRankFilterPruner, \ + ActivationMeanRankFilterPruner +from nni.compression.pytorch import ModelSpeedup + +unittest.TestLoader.sortTestMethodsUsing = None + +MODEL_FILE, MASK_FILE = './model.pth', './mask.pth' + +def generate_random_sparsity(model): + """ + generate a random sparsity for all conv layers in the + model. + """ + cfg_list = [] + for name, module in model.named_modules(): + if isinstance(module, nn.Conv2d): + sparsity = np.random.uniform(0.5, 0.99) + cfg_list.append({'op_types': ['Conv2d'], 'op_names': [name], + 'sparsity': sparsity}) + return cfg_list + +def generate_random_sparsity_v2(model): + """ + only generate a random sparsity for some conv layers in + in the model. + """ + cfg_list = [] + for name, module in model.named_modules(): + # randomly pick 50% layers + if isinstance(module, nn.Conv2d) and random.uniform(0, 1) > 0.5: + sparsity = np.random.uniform(0.5, 0.99) + cfg_list.append({'op_types': ['Conv2d'], 'op_names': [name], + 'sparsity': sparsity}) + return cfg_list + +def train(model, criterion, optimizer, callback=None): + model.train() + device = next(model.parameters()).device + data = torch.randn(2, 3, 224, 224).to(device) + target = torch.tensor([0, 1]).long().to(device) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + + # callback should be inserted between loss.backward() and optimizer.step() + if callback: + callback() + + optimizer.step() + +def trainer(model, optimizer, criterion, epoch, callback=None): + return train(model, criterion, optimizer, callback=callback) + +class DependencyawareTest(TestCase): + @unittest.skipIf(torch.__version__ < "1.3.0", "not supported") + def test_dependency_aware_pruning(self): + model_zoo = ['resnet18'] + pruners = [L1FilterPruner, L2FilterPruner, FPGMPruner, TaylorFOWeightFilterPruner] + sparsity = 0.7 + cfg_list = [{'op_types': ['Conv2d'], 'sparsity':sparsity}] + dummy_input = torch.ones(1, 3, 224, 224) + + for model_name in model_zoo: + for pruner in pruners: + print('Testing on ', pruner) + ori_filters = {} + Model = getattr(models, model_name) + net = Model(pretrained=False, progress=False) + # record the number of the filter of each conv layer + for name, module in net.named_modules(): + if isinstance(module, nn.Conv2d): + ori_filters[name] = module.out_channels + + # for the pruners that based on the activations, we need feed + # enough data before we call the compress function. + optimizer = torch.optim.SGD(net.parameters(), lr=0.0001, + momentum=0.9, + weight_decay=4e-5) + criterion = torch.nn.CrossEntropyLoss() + if pruner == TaylorFOWeightFilterPruner: + tmp_pruner = pruner( + net, cfg_list, optimizer, trainer=trainer, criterion=criterion, dependency_aware=True, dummy_input=dummy_input) + else: + tmp_pruner = pruner( + net, cfg_list, dependency_aware=True, dummy_input=dummy_input) + + tmp_pruner.compress() + tmp_pruner.export_model(MODEL_FILE, MASK_FILE) + # if we want to use the same model, we should unwrap the pruner before the speedup + tmp_pruner._unwrap_model() + ms = ModelSpeedup(net, dummy_input, MASK_FILE) + ms.speedup_model() + for name, module in net.named_modules(): + if isinstance(module, nn.Conv2d): + expected = int(ori_filters[name] * (1 - sparsity)) + filter_diff = abs(expected - module.out_channels) + errmsg = '%s Ori: %d, Expected: %d, Real: %d' % ( + name, ori_filters[name], expected, module.out_channels) + + # because we are using the dependency-aware mode, so the number of the + # filters after speedup should be ori_filters[name] * ( 1 - sparsity ) + print(errmsg) + assert filter_diff <= 1, errmsg + + @unittest.skipIf(torch.__version__ < "1.3.0", "not supported") + def test_dependency_aware_random_config(self): + model_zoo = ['resnet18'] + pruners = [L1FilterPruner, L2FilterPruner, FPGMPruner, TaylorFOWeightFilterPruner, + ActivationMeanRankFilterPruner, ActivationAPoZRankFilterPruner] + dummy_input = torch.ones(1, 3, 224, 224) + for model_name in model_zoo: + for pruner in pruners: + Model = getattr(models, model_name) + cfg_generator = [generate_random_sparsity, generate_random_sparsity_v2] + for _generator in cfg_generator: + net = Model(pretrained=False, progress=False) + cfg_list = _generator(net) + + print('\n\nModel:', model_name) + print('Pruner', pruner) + print('Config_list:', cfg_list) + # for the pruners that based on the activations, we need feed + # enough data before we call the compress function. + optimizer = torch.optim.SGD(net.parameters(), lr=0.0001, + momentum=0.9, + weight_decay=4e-5) + criterion = torch.nn.CrossEntropyLoss() + + if pruner in (TaylorFOWeightFilterPruner, ActivationMeanRankFilterPruner, ActivationAPoZRankFilterPruner): + tmp_pruner = pruner( + net, cfg_list, optimizer, trainer=trainer, criterion=criterion, dependency_aware=True, dummy_input=dummy_input) + else: + tmp_pruner = pruner( + net, cfg_list, dependency_aware=True, dummy_input=dummy_input) + + tmp_pruner.compress() + tmp_pruner.export_model(MODEL_FILE, MASK_FILE) + # if we want to use the same model, we should unwrap the pruner before the speedup + tmp_pruner._unwrap_model() + ms = ModelSpeedup(net, dummy_input, MASK_FILE) + ms.speedup_model() + + +if __name__ == '__main__': + main() diff --git a/test/ut/compression/v1/test_model_speedup.py b/test/ut/compression/v1/test_model_speedup.py new file mode 100644 index 0000000000000000000000000000000000000000..9d0ff7cf860a9aa47324ab23be387de634123a22 --- /dev/null +++ b/test/ut/compression/v1/test_model_speedup.py @@ -0,0 +1,525 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import os +import gc +import psutil +import sys +import numpy as np +import torch +import torchvision.models as models +import torch.nn as nn +import torch.nn.functional as F +from torchvision.models.vgg import vgg16, vgg11 +from torchvision.models.resnet import resnet18 +from torchvision.models.mobilenet import mobilenet_v2 +import unittest +from unittest import TestCase, main + +from nni.compression.pytorch import ModelSpeedup, apply_compression_results +from nni.algorithms.compression.pytorch.pruning import L1FilterPruner, LevelPruner +from nni.algorithms.compression.pytorch.pruning.weight_masker import WeightMasker +from nni.algorithms.compression.pytorch.pruning.dependency_aware_pruner import DependencyAwarePruner + +torch.manual_seed(0) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +BATCH_SIZE = 2 +# the relative distance +RELATIVE_THRESHOLD = 0.01 +# Because of the precision of floating-point numbers, some errors +# between the original output tensors(without speedup) and the output +# tensors of the speedup model are normal. When the output tensor itself +# is small, such errors may exceed the relative threshold, so we also add +# an absolute threshold to determine whether the final result is correct. +# The error should meet the RELATIVE_THREHOLD or the ABSOLUTE_THRESHOLD. +ABSOLUTE_THRESHOLD = 0.0001 + + +class BackboneModel1(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 1, 1, 1) + + def forward(self, x): + return self.conv1(x) + + +class BackboneModel2(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.bn1 = nn.BatchNorm2d(self.conv1.out_channels) + self.bn2 = nn.BatchNorm2d(self.conv2.out_channels) + self.fc1 = nn.Linear(4 * 4 * 50, 500) + self.fc2 = nn.Linear(500, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(x.size(0), -1) + + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return x + + +class BigModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.backbone1 = BackboneModel1() + self.backbone2 = BackboneModel2() + self.fc3 = nn.Sequential( + nn.Linear(10, 10), + nn.BatchNorm1d(10), + nn.ReLU(inplace=True), + nn.Linear(10, 2) + ) + + def forward(self, x): + x = self.backbone1(x) + x = self.backbone2(x) + x = self.fc3(x) + return x + + +class TransposeModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 20, 5) + self.conv2 = nn.ConvTranspose2d(20, 50, 5, groups=2) + self.bn1 = nn.BatchNorm2d(self.conv1.out_channels) + self.bn2 = nn.BatchNorm2d(self.conv2.out_channels) + self.fc1 = nn.Linear(8 * 8 * 50, 500) + self.fc2 = nn.Linear(500, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + # x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + # x = F.max_pool2d(x, 2, 2) + x = x.view(x.size(0), -1) + + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return x + + +class TupleUnpack_backbone(nn.Module): + def __init__(self, width): + super(TupleUnpack_backbone, self).__init__() + self.model_backbone = mobilenet_v2( + pretrained=False, width_mult=width, num_classes=3) + + def forward(self, x): + x1 = self.model_backbone.features[:7](x) + x2 = self.model_backbone.features[7:14](x1) + x3 = self.model_backbone.features[14:18](x2) + return [x1, x2, x3] + + +class TupleUnpack_FPN(nn.Module): + def __init__(self): + super(TupleUnpack_FPN, self).__init__() + + self.conv1 = nn.Conv2d(32, 48, kernel_size=( + 1, 1), stride=(1, 1), bias=False) + self.conv2 = nn.Conv2d(96, 48, kernel_size=( + 1, 1), stride=(1, 1), bias=False) + self.conv3 = nn.Conv2d(320, 48, kernel_size=( + 1, 1), stride=(1, 1), bias=False) + + # self.init_weights() + + def forward(self, inputs): + """Forward function.""" + laterals = [] + + laterals.append(self.conv1(inputs[0])) # inputs[0]==x1 + laterals.append(self.conv2(inputs[1])) # inputs[1]==x2 + laterals.append(self.conv3(inputs[2])) # inputs[2]==x3 + + return laterals + + +class TupleUnpack_Model(nn.Module): + def __init__(self): + super(TupleUnpack_Model, self).__init__() + self.backbone = TupleUnpack_backbone(1.0) + self.fpn = TupleUnpack_FPN() + + def forward(self, x): + x1 = self.backbone(x) + out = self.fpn(x1) + return out + + +dummy_input = torch.randn(2, 1, 28, 28) +SPARSITY = 0.5 +MODEL_FILE, MASK_FILE = './11_model.pth', './l1_mask.pth' + + +def prune_model_l1(model): + config_list = [{ + 'sparsity': SPARSITY, + 'op_types': ['Conv2d'] + }] + pruner = L1FilterPruner(model, config_list) + pruner.compress() + pruner.export_model(model_path=MODEL_FILE, mask_path=MASK_FILE) + + +def generate_random_sparsity(model): + _start = 0.5 + _end = 0.99 + if isinstance(model, models.mobilenet.MobileNetV2): + # mobilenet models have great propagation characteristics + # so we use smaller sparsity ratio to avoid pruning the whole + # layer out + _start = 0.01 + _end = 0.3 + cfg_list = [] + for name, module in model.named_modules(): + if isinstance(module, nn.Conv2d): + sparsity = np.random.uniform(_start, _end) + cfg_list.append({'op_types': ['Conv2d'], 'op_names': [name], + 'sparsity': sparsity}) + return cfg_list + + +def generate_random_sparsity_v2(model): + """ + Only select 50% layers to prune. + """ + _start = 0.5 + _end = 0.99 + if isinstance(model, models.mobilenet.MobileNetV2): + # mobilenet models have great propagation characteristics + # so we use smaller sparsity ratio to avoid pruning the whole + # layer out + _start = 0.01 + _end = 0.3 + cfg_list = [] + for name, module in model.named_modules(): + if isinstance(module, nn.Conv2d): + if np.random.uniform(0, 1.0) > 0.5: + sparsity = np.random.uniform(_start, _end) + cfg_list.append({'op_types': ['Conv2d'], 'op_names': [name], + 'sparsity': sparsity}) + return cfg_list + + +def zero_bn_bias(model): + with torch.no_grad(): + for name, module in model.named_modules(): + if isinstance(module, nn.BatchNorm2d) \ + or isinstance(module, nn.BatchNorm3d) \ + or isinstance(module, nn.BatchNorm1d): + shape = module.bias.data.size() + device = module.bias.device + module.bias.data = torch.zeros(shape).to(device) + shape = module.running_mean.data.size() + module.running_mean = torch.zeros(shape).to(device) + + +class L1ChannelMasker(WeightMasker): + def __init__(self, model, pruner): + self.model = model + self.pruner = pruner + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None): + msg = 'module type {} is not supported!'.format(wrapper.type) + #assert wrapper.type == 'Conv2d', msg + weight = wrapper.module.weight.data + bias = None + if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None: + bias = wrapper.module.bias.data + + if wrapper.weight_mask is None: + mask_weight = torch.ones(weight.size()).type_as(weight).detach() + else: + mask_weight = wrapper.weight_mask.clone() + if bias is not None: + if wrapper.bias_mask is None: + mask_bias = torch.ones(bias.size()).type_as(bias).detach() + else: + mask_bias = wrapper.bias_mask.clone() + else: + mask_bias = None + base_mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias} + + num_total = weight.size(1) + num_prune = int(num_total * sparsity) + + if num_total < 2 or num_prune < 1: + return base_mask + w_abs = weight.abs() + if wrapper.type == 'Conv2d': + w_abs_structured = w_abs.sum((0, 2, 3)) + threshold = torch.topk( + w_abs_structured, num_prune, largest=False)[0].max() + mask_weight = torch.gt(w_abs_structured, threshold)[ + None, :, None, None].expand_as(weight).type_as(weight) + return {'weight_mask': mask_weight.detach()} + else: + # Linear + assert wrapper.type == 'Linear' + w_abs_structured = w_abs.sum((0)) + threshold = torch.topk( + w_abs_structured, num_prune, largest=False)[0].max() + mask_weight = torch.gt(w_abs_structured, threshold)[ + None, :].expand_as(weight).type_as(weight) + return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias} + + +class L1ChannelPruner(DependencyAwarePruner): + def __init__(self, model, config_list, optimizer=None, dependency_aware=False, dummy_input=None): + super().__init__(model, config_list, pruning_algorithm='l1', optimizer=optimizer, + dependency_aware=dependency_aware, dummy_input=dummy_input) + + def validate_config(self, model, config_list): + pass + + +def channel_prune(model): + config_list = [{ + 'sparsity': SPARSITY, + 'op_types': ['Conv2d', 'Linear'] + }, { + 'op_names': ['conv1'], + 'exclude': True + }] + + pruner = L1ChannelPruner(model, config_list) + masker = L1ChannelMasker(model, pruner) + pruner.masker = masker + pruner.compress() + pruner.export_model(model_path=MODEL_FILE, mask_path=MASK_FILE) + + +class SpeedupTestCase(TestCase): + + def test_speedup_bigmodel(self): + prune_model_l1(BigModel()) + model = BigModel() + apply_compression_results(model, MASK_FILE, 'cpu') + model.eval() + mask_out = model(dummy_input) + + model.train() + ms = ModelSpeedup(model, dummy_input, MASK_FILE, confidence=8) + ms.speedup_model() + assert model.training + + model.eval() + speedup_out = model(dummy_input) + if not torch.allclose(mask_out, speedup_out, atol=1e-07): + print('input:', dummy_input.size(), + torch.abs(dummy_input).sum((2, 3))) + print('mask_out:', mask_out) + print('speedup_out:', speedup_out) + raise RuntimeError('model speedup inference result is incorrect!') + + orig_model = BigModel() + + assert model.backbone2.conv1.out_channels == int( + orig_model.backbone2.conv1.out_channels * SPARSITY) + assert model.backbone2.conv2.in_channels == int( + orig_model.backbone2.conv2.in_channels * SPARSITY) + assert model.backbone2.conv2.out_channels == int( + orig_model.backbone2.conv2.out_channels * SPARSITY) + assert model.backbone2.fc1.in_features == int( + orig_model.backbone2.fc1.in_features * SPARSITY) + + def test_convtranspose_model(self): + ori_model = TransposeModel() + dummy_input = torch.rand(1, 3, 8, 8) + config_list = [{'sparsity': 0.5, 'op_types': ['Conv2d']}] + pruner = L1FilterPruner(ori_model, config_list) + pruner.compress() + ori_model(dummy_input) + pruner.export_model(MODEL_FILE, MASK_FILE) + pruner._unwrap_model() + new_model = TransposeModel() + state_dict = torch.load(MODEL_FILE) + new_model.load_state_dict(state_dict) + ms = ModelSpeedup(new_model, dummy_input, MASK_FILE, confidence=8) + ms.speedup_model() + zero_bn_bias(ori_model) + zero_bn_bias(new_model) + ori_out = ori_model(dummy_input) + new_out = new_model(dummy_input) + ori_sum = torch.sum(ori_out) + speeded_sum = torch.sum(new_out) + print('Tanspose Speedup Test: ori_sum={} speedup_sum={}'.format( + ori_sum, speeded_sum)) + assert (abs(ori_sum - speeded_sum) / abs(ori_sum) < RELATIVE_THRESHOLD) or \ + (abs(ori_sum - speeded_sum) < ABSOLUTE_THRESHOLD) + + def test_speedup_integration_small(self): + model_list = ['resnet18', 'mobilenet_v2', 'alexnet'] + self.speedup_integration(model_list) + + def test_speedup_integration_big(self): + # TODO: will revert vgg16, resnet50, wide_resnet50_2 after confidence refactor + model_list = ['vgg11', 'resnet34', 'squeezenet1_1', 'densenet121'] + mem_info = psutil.virtual_memory() + ava_gb = mem_info.available/1024.0/1024/1024 + print('Avaliable memory size: %.2f GB' % ava_gb) + if ava_gb < 8.0: + # memory size is too small that we may run into an OOM exception + # Skip this test in the pipeline test due to memory limitation + return + self.speedup_integration(model_list) + + def speedup_integration(self, model_list, speedup_cfg=None): + # Note: hack trick, may be updated in the future + if 'win' in sys.platform or 'Win'in sys.platform: + print('Skip test_speedup_integration on windows due to memory limit!') + return + Gen_cfg_funcs = [generate_random_sparsity, generate_random_sparsity_v2] + + # for model_name in ['vgg16', 'resnet18', 'mobilenet_v2', 'squeezenet1_1', 'densenet121', + # # 'inception_v3' inception is too large and may fail the pipeline + # 'resnet50']: + for model_name in model_list: + for gen_cfg_func in Gen_cfg_funcs: + kwargs = { + 'pretrained': True + } + if model_name == 'resnet50': + # testing multiple groups + kwargs = { + 'pretrained': False, + 'groups': 4 + } + Model = getattr(models, model_name) + net = Model(**kwargs).to(device) + speedup_model = Model(**kwargs).to(device) + net.eval() # this line is necessary + speedup_model.eval() + # random generate the prune config for the pruner + cfgs = gen_cfg_func(net) + print("Testing {} with compression config \n {}".format( + model_name, cfgs)) + if len(cfgs) == 0: + continue + pruner = L1FilterPruner(net, cfgs) + pruner.compress() + pruner.export_model(MODEL_FILE, MASK_FILE) + pruner._unwrap_model() + state_dict = torch.load(MODEL_FILE) + speedup_model.load_state_dict(state_dict) + zero_bn_bias(net) + zero_bn_bias(speedup_model) + + data = torch.ones(BATCH_SIZE, 3, 128, 128).to(device) + if speedup_cfg is None: + speedup_cfg = {} + ms = ModelSpeedup(speedup_model, data, + MASK_FILE, confidence=4, **speedup_cfg) + + ms.speedup_model() + + speedup_model.eval() + + ori_out = net(data) + speeded_out = speedup_model(data) + ori_sum = torch.sum(ori_out).item() + speeded_sum = torch.sum(speeded_out).item() + print('Sum of the output of %s (before speedup):' % + model_name, ori_sum) + print('Sum of the output of %s (after speedup):' % + model_name, speeded_sum) + assert (abs(ori_sum - speeded_sum) / abs(ori_sum) < RELATIVE_THRESHOLD) or \ + (abs(ori_sum - speeded_sum) < ABSOLUTE_THRESHOLD) + print("Collecting Garbage") + gc.collect(2) + + def test_channel_prune(self): + orig_net = resnet18(num_classes=10).to(device) + channel_prune(orig_net) + state_dict = torch.load(MODEL_FILE) + + orig_net = resnet18(num_classes=10).to(device) + orig_net.load_state_dict(state_dict) + apply_compression_results(orig_net, MASK_FILE) + orig_net.eval() + + net = resnet18(num_classes=10).to(device) + + net.load_state_dict(state_dict) + net.eval() + + data = torch.randn(BATCH_SIZE, 3, 128, 128).to(device) + ms = ModelSpeedup(net, data, MASK_FILE, confidence=8) + ms.speedup_model() + ms.bound_model(data) + + net.eval() + + ori_sum = orig_net(data).abs().sum().item() + speeded_sum = net(data).abs().sum().item() + + print(ori_sum, speeded_sum) + assert (abs(ori_sum - speeded_sum) / abs(ori_sum) < RELATIVE_THRESHOLD) or \ + (abs(ori_sum - speeded_sum) < ABSOLUTE_THRESHOLD) + + def test_speedup_tupleunpack(self): + """This test is reported in issue3645""" + model = TupleUnpack_Model() + cfg_list = [{'op_types': ['Conv2d'], 'sparsity':0.5}] + dummy_input = torch.rand(2, 3, 224, 224) + pruner = L1FilterPruner(model, cfg_list) + pruner.compress() + model(dummy_input) + pruner.export_model(MODEL_FILE, MASK_FILE) + ms = ModelSpeedup(model, dummy_input, MASK_FILE, confidence=8) + ms.speedup_model() + + def test_finegrained_speedup(self): + """ Test the speedup on the fine-grained sparsity""" + class MLP(nn.Module): + def __init__(self): + super(MLP, self).__init__() + self.fc1 = nn.Linear(1024, 1024) + self.fc2 = nn.Linear(1024, 1024) + self.fc3 = nn.Linear(1024, 512) + self.fc4 = nn.Linear(512, 10) + + def forward(self, x): + x = x.view(-1, 1024) + x = self.fc1(x) + x = self.fc2(x) + x = self.fc3(x) + x = self.fc4(x) + return x + model = MLP().to(device) + dummy_input = torch.rand(16, 1, 32, 32).to(device) + cfg_list = [{'op_types': ['Linear'], 'sparsity':0.99}] + pruner = LevelPruner(model, cfg_list) + pruner.compress() + print('Original Arch') + print(model) + pruner.export_model(MODEL_FILE, MASK_FILE) + pruner._unwrap_model() + ms = ModelSpeedup(model, dummy_input, MASK_FILE, confidence=8) + ms.speedup_model() + print("Fine-grained speeduped model") + print(model) + + def tearDown(self): + if os.path.exists(MODEL_FILE): + os.remove(MODEL_FILE) + if os.path.exists(MASK_FILE): + os.remove(MASK_FILE) + # GC to release memory + gc.collect(2) + + +if __name__ == '__main__': + main() diff --git a/test/ut/compression/v1/test_pruners.py b/test/ut/compression/v1/test_pruners.py new file mode 100644 index 0000000000000000000000000000000000000000..f4005b2dc6878c704746603309bce4332127f96a --- /dev/null +++ b/test/ut/compression/v1/test_pruners.py @@ -0,0 +1,324 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.data +import math +import sys +import unittest +from unittest import TestCase, main +from nni.algorithms.compression.pytorch.pruning import LevelPruner, SlimPruner, FPGMPruner, L1FilterPruner, \ + L2FilterPruner, AGPPruner, ActivationMeanRankFilterPruner, ActivationAPoZRankFilterPruner, \ + TaylorFOWeightFilterPruner, NetAdaptPruner, SimulatedAnnealingPruner, ADMMPruner, \ + AutoCompressPruner, AMCPruner + +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +from sdk.models.pytorch_models.mobilenet import MobileNet + +def validate_sparsity(wrapper, sparsity, bias=False): + masks = [wrapper.weight_mask] + if bias and wrapper.bias_mask is not None: + masks.append(wrapper.bias_mask) + for m in masks: + actual_sparsity = (m == 0).sum().item() / m.numel() + msg = 'actual sparsity: {:.2f}, target sparsity: {:.2f}'.format(actual_sparsity, sparsity) + assert math.isclose(actual_sparsity, sparsity, abs_tol=0.1), msg + +prune_config = { + 'level': { + 'pruner_class': LevelPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['default'], + }], + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, False), + lambda model: validate_sparsity(model.fc, 0.5, False) + ] + }, + 'agp': { + 'pruner_class': AGPPruner, + 'config_list': [{ + 'sparsity': 0.8, + 'op_types': ['Conv2d'] + }], + 'trainer': lambda model, optimizer, criterion, epoch: model, + 'validators': [] + }, + 'slim': { + 'pruner_class': SlimPruner, + 'config_list': [{ + 'sparsity': 0.7, + 'op_types': ['BatchNorm2d'] + }], + 'trainer': lambda model, optimizer, criterion, epoch: model, + 'validators': [ + lambda model: validate_sparsity(model.bn1, 0.7, model.bias) + ] + }, + 'fpgm': { + 'pruner_class': FPGMPruner, + 'config_list':[{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }], + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, model.bias) + ] + }, + 'l1': { + 'pruner_class': L1FilterPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, model.bias) + ] + }, + 'l2': { + 'pruner_class': L2FilterPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, model.bias) + ] + }, + 'taylorfo': { + 'pruner_class': TaylorFOWeightFilterPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'trainer': lambda model, optimizer, criterion, epoch: model, + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, model.bias) + ] + }, + 'mean_activation': { + 'pruner_class': ActivationMeanRankFilterPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'trainer': lambda model, optimizer, criterion, epoch: model, + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, model.bias) + ] + }, + 'apoz': { + 'pruner_class': ActivationAPoZRankFilterPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'trainer': lambda model, optimizer, criterion, epoch: model, + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, model.bias) + ] + }, + 'netadapt': { + 'pruner_class': NetAdaptPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }], + 'short_term_fine_tuner': lambda model: model, + 'evaluator':lambda model: 0.9, + 'validators': [] + }, + 'simulatedannealing': { + 'pruner_class': SimulatedAnnealingPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'] + }], + 'evaluator':lambda model: 0.9, + 'validators': [] + }, + 'admm': { + 'pruner_class': ADMMPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'trainer': lambda model, optimizer, criterion, epoch : model, + 'validators': [ + lambda model: validate_sparsity(model.conv1, 0.5, model.bias) + ] + }, + 'autocompress_l1': { + 'pruner_class': AutoCompressPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'base_algo': 'l1', + 'trainer': lambda model, optimizer, criterion, epoch : model, + 'evaluator': lambda model: 0.9, + 'dummy_input': torch.randn([64, 1, 28, 28]), + 'validators': [] + }, + 'autocompress_l2': { + 'pruner_class': AutoCompressPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'base_algo': 'l2', + 'trainer': lambda model, optimizer, criterion, epoch : model, + 'evaluator': lambda model: 0.9, + 'dummy_input': torch.randn([64, 1, 28, 28]), + 'validators': [] + }, + 'autocompress_fpgm': { + 'pruner_class': AutoCompressPruner, + 'config_list': [{ + 'sparsity': 0.5, + 'op_types': ['Conv2d'], + }], + 'base_algo': 'fpgm', + 'trainer': lambda model, optimizer, criterion, epoch : model, + 'evaluator': lambda model: 0.9, + 'dummy_input': torch.randn([64, 1, 28, 28]), + 'validators': [] + }, + 'amc': { + 'pruner_class': AMCPruner, + 'config_list':[{ + 'op_types': ['Conv2d', 'Linear'] + }] + } +} + +class Model(nn.Module): + def __init__(self, bias=True): + super(Model, self).__init__() + self.conv1 = nn.Conv2d(1, 8, kernel_size=3, padding=1, bias=bias) + self.bn1 = nn.BatchNorm2d(8) + self.pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(8, 2, bias=bias) + self.bias = bias + def forward(self, x): + return self.fc(self.pool(self.bn1(self.conv1(x))).view(x.size(0), -1)) + +class SimpleDataset: + def __getitem__(self, index): + return torch.randn(3, 32, 32), 1. + + def __len__(self): + return 1000 + +def train(model, train_loader, criterion, optimizer): + model.train() + device = next(model.parameters()).device + x = torch.randn(2, 1, 28, 28).to(device) + y = torch.tensor([0, 1]).long().to(device) + # print('hello...') + + for _ in range(2): + out = model(x) + loss = criterion(out, y) + optimizer.zero_grad() + loss.backward() + + optimizer.step() + +def pruners_test(pruner_names=['level', 'agp', 'slim', 'fpgm', 'l1', 'l2', 'taylorfo', 'mean_activation', 'apoz', 'netadapt', 'simulatedannealing', 'admm', 'autocompress_l1', 'autocompress_l2', 'autocompress_fpgm',], bias=True): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + dummy_input = torch.randn(2, 1, 28, 28).to(device) + + criterion = torch.nn.CrossEntropyLoss() + train_loader = torch.utils.data.DataLoader(SimpleDataset(), batch_size=16, shuffle=False, drop_last=True) + + def trainer(model, optimizer, criterion, epoch): + return train(model, train_loader, criterion, optimizer) + + for pruner_name in pruner_names: + print('testing {}...'.format(pruner_name)) + + model = Model(bias=bias).to(device) + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + config_list = prune_config[pruner_name]['config_list'] + + if pruner_name == 'netadapt': + pruner = prune_config[pruner_name]['pruner_class'](model, config_list, short_term_fine_tuner=prune_config[pruner_name]['short_term_fine_tuner'], evaluator=prune_config[pruner_name]['evaluator']) + elif pruner_name == 'simulatedannealing': + pruner = prune_config[pruner_name]['pruner_class'](model, config_list, evaluator=prune_config[pruner_name]['evaluator']) + elif pruner_name in ('agp', 'slim', 'taylorfo', 'apoz', 'mean_activation'): + pruner = prune_config[pruner_name]['pruner_class'](model, config_list, trainer=trainer, optimizer=optimizer, criterion=criterion) + elif pruner_name == 'admm': + pruner = prune_config[pruner_name]['pruner_class'](model, config_list, trainer=trainer) + elif pruner_name.startswith('autocompress'): + pruner = prune_config[pruner_name]['pruner_class'](model, config_list, trainer=prune_config[pruner_name]['trainer'], evaluator=prune_config[pruner_name]['evaluator'], criterion=torch.nn.CrossEntropyLoss(), dummy_input=dummy_input, base_algo=prune_config[pruner_name]['base_algo']) + else: + pruner = prune_config[pruner_name]['pruner_class'](model, config_list) + + pruner.compress() + pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', input_shape=(2,1,28,28), device=device) + + for v in prune_config[pruner_name]['validators']: + v(model) + + filePaths = ['./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', './search_history.csv', './search_result.json'] + for f in filePaths: + if os.path.exists(f): + os.remove(f) + + +def _test_agp(pruning_algorithm): + train_loader = torch.utils.data.DataLoader(SimpleDataset(), batch_size=16, shuffle=False, drop_last=True) + model = Model() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + def trainer(model, optimizer, criterion, epoch): + return train(model, train_loader, criterion, optimizer) + + config_list = prune_config['agp']['config_list'] + pruner = AGPPruner(model, config_list, optimizer=optimizer, trainer=trainer, criterion=torch.nn.CrossEntropyLoss(), pruning_algorithm=pruning_algorithm) + pruner.compress() + + target_sparsity = pruner.compute_target_sparsity(config_list[0]) + actual_sparsity = (model.conv1.weight_mask == 0).sum().item() / model.conv1.weight_mask.numel() + # set abs_tol = 0.2, considering the sparsity error for channel pruning when number of channels is small. + assert math.isclose(actual_sparsity, target_sparsity, abs_tol=0.2) + + +class PrunerTestCase(TestCase): + def test_pruners(self): + pruners_test(bias=True) + + def test_pruners_no_bias(self): + pruners_test(bias=False) + + def test_agp_pruner(self): + for pruning_algorithm in ['l1', 'l2', 'fpgm', 'taylorfo', 'apoz']: + _test_agp(pruning_algorithm) + + for pruning_algorithm in ['level']: + prune_config['agp']['config_list'][0]['op_types'] = ['default'] + _test_agp(pruning_algorithm) + + def testAMC(self): + model = MobileNet(n_class=10) + + def validate(val_loader, model): + return 80. + val_loader = torch.utils.data.DataLoader(SimpleDataset(), batch_size=16, shuffle=False, drop_last=True) + config_list = prune_config['amc']['config_list'] + pruner = AMCPruner(model, config_list, validate, val_loader, train_episode=1) + pruner.compress() + + pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', input_shape=(2,3,32,32)) + filePaths = ['./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth'] + for f in filePaths: + if os.path.exists(f): + os.remove(f) + +if __name__ == '__main__': + main() diff --git a/test/ut/compression/v1/test_transformer_pruners.py b/test/ut/compression/v1/test_transformer_pruners.py new file mode 100644 index 0000000000000000000000000000000000000000..030df0f4feef4c9dae976008fdca68eeb3b4e96c --- /dev/null +++ b/test/ut/compression/v1/test_transformer_pruners.py @@ -0,0 +1,140 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.data +import math +import sys +import unittest +from unittest import TestCase, main + +from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +from sdk.models.pytorch_models.transformer import TransformerEncoder + + +def validate_sparsity(wrapper, sparsity, bias=False): + masks = [wrapper.weight_mask] + if bias and wrapper.bias_mask is not None: + masks.append(wrapper.bias_mask) + for m in masks: + actual_sparsity = (m == 0).sum().item() / m.numel() + msg = 'actual sparsity: {:.2f}, target sparsity: {:.2f}'.format(actual_sparsity, sparsity) + assert math.isclose(actual_sparsity, sparsity, abs_tol=0.1), msg + + +class Model(nn.Module): + """ + A binary classifier using a transformer encoder for contextual embedding. + """ + def __init__(self, n_layer, hidden_dim, n_head): + super(Model, self).__init__() + self.embedding = TransformerEncoder(vocab_size=100, hidden_dim=hidden_dim, n_layers=n_layer, n_heads=n_head) + self.classifier = nn.Linear(hidden_dim, 1) + + def forward(self, x, mask): + raw_output = self.embedding(x, mask) + pooled_output = raw_output[0] + prediction = F.sigmoid(self.classifier(pooled_output)).squeeze() + return prediction + + +def train(model, dataloader, criterion, optimizer): + model.train() + device = next(model.parameters()).device + for _ in range(2): + y = torch.ones(10).to(device) + out = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device)) + loss = criterion(out, y) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + +def dry_run(model): + device = next(model.parameters()).device + for _ in range(2): + y = torch.ones(10).to(device) + _ = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device)) + + +def head_pruner_tests(criterion, global_sort, use_graph, iterative): + print("Testing criterion {} with global_sort={} and use_graph={}".format(criterion, global_sort, use_graph)) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # Build config list and arguments + config_list = [{'sparsity': 0.5, 'op_types': ['Linear']}] + + kwargs = {'ranking_criterion': criterion, 'head_hidden_dim': 64} + if global_sort: + kwargs['global_sort'] = True + else: + kwargs['global_sort'] = False + + if use_graph: + attention_name_groups = list(zip(['embedding.layers.{}.self_attn.q_proj'.format(i) for i in range(6)], + ['embedding.layers.{}.self_attn.k_proj'.format(i) for i in range(6)], + ['embedding.layers.{}.self_attn.v_proj'.format(i) for i in range(6)], + ['embedding.layers.{}.self_attn.output_proj'.format(i) for i in range(6)])) + kwargs['attention_name_groups'] = attention_name_groups + else: + dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device)) + kwargs['dummy_input'] = dummy_input + + if iterative: + kwargs['num_iterations'] = 2 + kwargs['epochs_per_iteration'] = 1 + + n_layers = 6 + n_heads = 8 + hidden_dim = 512 + model = Model(n_layers, hidden_dim, n_heads) + model.to(device) + kwargs['optimizer'] = torch.optim.SGD(model.parameters(), lr=0.001) + + def trainer(model, optimizer, criterion, epoch): + return train(model, None, criterion, optimizer) + kwargs['trainer'] = trainer + kwargs['criterion'] = nn.BCELoss() + + def forward_runner(model): + return dry_run(model) + kwargs['forward_runner'] = forward_runner + + # create pruner and call compress() + pruner = TransformerHeadPruner(model, config_list, **kwargs) + pruner.compress() + + # test model and mask export + pruner.export_model('./model_tmp.pth', './mask_tmp.pth', device=device) + dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device)) + pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', + dummy_input=dummy_input, opset_version=10) + + # validate sparsity + if not global_sort: + for wrapper in pruner.modules_wrapper: + validate_sparsity(wrapper, wrapper.config['sparsity']) + + +class PrunerTestCase(TestCase): + def test_head_pruner(self): + for criterion in ["l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo"]: + for global_sort in [False, True]: + for use_graph in [False, True]: + for iterative in [False, True]: + head_pruner_tests(criterion, global_sort, use_graph, iterative) + + file_paths = ['./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', './search_history.csv', + './search_result.json'] + for f in file_paths: + if os.path.exists(f): + os.remove(f) + + +if __name__ == '__main__': + main() diff --git a/test/ut/compression/v2/__init__.py b/test/ut/compression/v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/ut/compression/v2/test_iterative_pruner_torch.py b/test/ut/compression/v2/test_iterative_pruner_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..7e3209073b402fb766a221d651c4aa80c9cab1d2 --- /dev/null +++ b/test/ut/compression/v2/test_iterative_pruner_torch.py @@ -0,0 +1,140 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import random +import unittest + +import torch +import torch.nn.functional as F + +import nni +from nni.algorithms.compression.v2.pytorch.pruning import ( + LinearPruner, + AGPPruner, + LotteryTicketPruner, + SimulatedAnnealingPruner, + AutoCompressPruner, + AMCPruner +) +from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact + + +class TorchModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 10, 5, 1) + self.bn1 = torch.nn.BatchNorm2d(10) + self.conv2 = torch.nn.Conv2d(10, 10, 5, 1) + self.bn2 = torch.nn.BatchNorm2d(10) + self.fc1 = torch.nn.Linear(4 * 4 * 10, 100) + self.fc2 = torch.nn.Linear(100, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(x.size(0), -1) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def trainer(model, optimizer, criterion): + model.train() + for _ in range(10): + input = torch.rand(10, 1, 28, 28) + label = torch.Tensor(list(range(10))).type(torch.LongTensor) + optimizer.zero_grad() + output = model(input) + loss = criterion(output, label) + loss.backward() + optimizer.step() + + +def get_optimizer(model): + return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + + +criterion = torch.nn.CrossEntropyLoss() + + +def evaluator(model): + return random.random() + + +def finetuner(model): + optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + trainer(model, optimizer, criterion) + + +class IterativePrunerTestCase(unittest.TestCase): + def test_linear_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = LinearPruner(model, config_list, 'level', 3, log_dir='../../../logs') + pruner.compress() + _, pruned_model, masks, _, _ = pruner.get_best_result() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_agp_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = AGPPruner(model, config_list, 'level', 3, log_dir='../../../logs') + pruner.compress() + _, pruned_model, masks, _, _ = pruner.get_best_result() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_lottery_ticket_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = LotteryTicketPruner(model, config_list, 'level', 3, log_dir='../../../logs') + pruner.compress() + _, pruned_model, masks, _, _ = pruner.get_best_result() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_simulated_annealing_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}] + pruner = SimulatedAnnealingPruner(model, config_list, evaluator, start_temperature=40, log_dir='../../../logs') + pruner.compress() + _, pruned_model, masks, _, _ = pruner.get_best_result() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_auto_compress_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}] + admm_params = { + 'trainer': trainer, + 'traced_optimizer': get_optimizer(model), + 'criterion': criterion, + 'iterations': 10, + 'training_epochs': 1 + } + sa_params = { + 'evaluator': evaluator, + 'start_temperature': 40 + } + pruner = AutoCompressPruner(model, config_list, 10, admm_params, sa_params=sa_params, log_dir='../../../logs') + pruner.compress() + _, pruned_model, masks, _, _ = pruner.get_best_result() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + print(sparsity_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_amc_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.5, 'max_sparsity_per_layer': 0.8}] + dummy_input = torch.rand(10, 1, 28, 28) + ddpg_params = {'hidden1': 300, 'hidden2': 300, 'lr_c': 1e-3, 'lr_a': 1e-4, 'warmup': 5, 'discount': 1., + 'bsize': 64, 'rmsize': 100, 'window_length': 1, 'tau': 0.01, 'init_delta': 0.5, 'delta_decay': 0.99, + 'max_episode_length': 1e9, 'epsilon': 50000} + pruner = AMCPruner(10, model, config_list, dummy_input, evaluator, finetuner=finetuner, ddpg_params=ddpg_params, target='flops', log_dir='../../../logs') + pruner.compress() + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/compression/v2/test_pruner_torch.py b/test/ut/compression/v2/test_pruner_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f3828f9daf651ac5c59fe77db90c278355b5c5 --- /dev/null +++ b/test/ut/compression/v2/test_pruner_torch.py @@ -0,0 +1,172 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import unittest + +import torch +import torch.nn.functional as F + +import nni +from nni.algorithms.compression.v2.pytorch.pruning import ( + LevelPruner, + L1NormPruner, + L2NormPruner, + SlimPruner, + FPGMPruner, + ActivationAPoZRankPruner, + ActivationMeanRankPruner, + TaylorFOWeightPruner, + ADMMPruner, + MovementPruner +) +from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact + + +class TorchModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 5, 5, 1) + self.bn1 = torch.nn.BatchNorm2d(5) + self.conv2 = torch.nn.Conv2d(5, 10, 5, 1) + self.bn2 = torch.nn.BatchNorm2d(10) + self.fc1 = torch.nn.Linear(4 * 4 * 10, 100) + self.fc2 = torch.nn.Linear(100, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4 * 4 * 10) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def trainer(model, optimizer, criterion): + model.train() + for _ in range(10): + input = torch.rand(10, 1, 28, 28) + label = torch.Tensor(list(range(10))).type(torch.LongTensor) + optimizer.zero_grad() + output = model(input) + loss = criterion(output, label) + loss.backward() + optimizer.step() + + +def get_optimizer(model): + return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + + +criterion = torch.nn.CrossEntropyLoss() + + +class PrunerTestCase(unittest.TestCase): + def test_level_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = LevelPruner(model=model, config_list=config_list) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_l1_norm_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = L1NormPruner(model=model, config_list=config_list, mode='dependency_aware', + dummy_input=torch.rand(10, 1, 28, 28)) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_l2_norm_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = L2NormPruner(model=model, config_list=config_list, mode='dependency_aware', + dummy_input=torch.rand(10, 1, 28, 28)) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_fpgm_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = FPGMPruner(model=model, config_list=config_list, mode='dependency_aware', + dummy_input=torch.rand(10, 1, 28, 28)) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_slim_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['BatchNorm2d'], 'total_sparsity': 0.8}] + pruner = SlimPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model), + criterion=criterion, training_epochs=1, scale=0.001, mode='global') + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_activation_apoz_rank_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = ActivationAPoZRankPruner(model=model, config_list=config_list, trainer=trainer, + traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5, + activation='relu', mode='dependency_aware', + dummy_input=torch.rand(10, 1, 28, 28)) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_activation_mean_rank_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = ActivationMeanRankPruner(model=model, config_list=config_list, trainer=trainer, + traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5, + activation='relu', mode='dependency_aware', + dummy_input=torch.rand(10, 1, 28, 28)) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_taylor_fo_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = TaylorFOWeightPruner(model=model, config_list=config_list, trainer=trainer, + traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5, + mode='dependency_aware', dummy_input=torch.rand(10, 1, 28, 28)) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_admm_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8, 'rho': 1e-3}] + pruner = ADMMPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model), + criterion=criterion, iterations=2, training_epochs=1) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + def test_movement_pruner(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + pruner = MovementPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model), + criterion=criterion, training_epochs=5, warm_up_step=0, cool_down_beginning_step=4) + pruned_model, masks = pruner.compress() + pruner._unwrap_model() + sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list) + assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82 + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/compression/v2/test_pruning_tools_torch.py b/test/ut/compression/v2/test_pruning_tools_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..be7010dc2a2548a04f0d089f6657a308b7c0c785 --- /dev/null +++ b/test/ut/compression/v2/test_pruning_tools_torch.py @@ -0,0 +1,206 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import unittest + +import torch +import torch.nn.functional as F + +import nni +from nni.algorithms.compression.v2.pytorch.base import Pruner +from nni.algorithms.compression.v2.pytorch.pruning.tools import ( + WeightDataCollector, + WeightTrainerBasedDataCollector, + SingleHookTrainerBasedDataCollector +) +from nni.algorithms.compression.v2.pytorch.pruning.tools import ( + NormMetricsCalculator, + MultiDataNormMetricsCalculator, + DistMetricsCalculator, + APoZRankMetricsCalculator, + MeanRankMetricsCalculator +) +from nni.algorithms.compression.v2.pytorch.pruning.tools import ( + NormalSparsityAllocator, + GlobalSparsityAllocator +) +from nni.algorithms.compression.v2.pytorch.pruning.tools.base import HookCollectorInfo +from nni.algorithms.compression.v2.pytorch.utils import get_module_by_name +from nni.algorithms.compression.v2.pytorch.utils.constructor_helper import OptimizerConstructHelper + + +class TorchModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 5, 5, 1) + self.bn1 = torch.nn.BatchNorm2d(5) + self.conv2 = torch.nn.Conv2d(5, 10, 5, 1) + self.bn2 = torch.nn.BatchNorm2d(10) + self.fc1 = torch.nn.Linear(4 * 4 * 10, 100) + self.fc2 = torch.nn.Linear(100, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4 * 4 * 10) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def trainer(model, optimizer, criterion): + model.train() + for _ in range(10): + input = torch.rand(10, 1, 28, 28) + label = torch.Tensor(list(range(10))).type(torch.LongTensor) + optimizer.zero_grad() + output = model(input) + loss = criterion(output, label) + loss.backward() + optimizer.step() + + +def get_optimizer(model): + return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) + + +criterion = torch.nn.CrossEntropyLoss() + + +class PruningToolsTestCase(unittest.TestCase): + def test_data_collector(self): + model = TorchModel() + w1 = torch.rand(5, 1, 5, 5) + w2 = torch.rand(10, 5, 5, 5) + model.conv1.weight.data = w1 + model.conv2.weight.data = w2 + + config_list = [{'op_types': ['Conv2d']}] + pruner = Pruner(model, config_list) + + # Test WeightDataCollector + data_collector = WeightDataCollector(pruner) + data = data_collector.collect() + assert all(torch.equal(get_module_by_name(model, module_name)[1].module.weight.data, data[module_name]) for module_name in ['conv1', 'conv2']) + + # Test WeightTrainerBasedDataCollector + def opt_after(): + model.conv1.module.weight.data = torch.ones(5, 1, 5, 5) + model.conv2.module.weight.data = torch.ones(10, 5, 5, 5) + + optimizer_helper = OptimizerConstructHelper.from_trace(model, get_optimizer(model)) + data_collector = WeightTrainerBasedDataCollector(pruner, trainer, optimizer_helper, criterion, 1, opt_after_tasks=[opt_after]) + data = data_collector.collect() + assert all(torch.equal(get_module_by_name(model, module_name)[1].module.weight.data, data[module_name]) for module_name in ['conv1', 'conv2']) + assert all(t.numel() == (t == 1).type_as(t).sum().item() for t in data.values()) + + # Test SingleHookTrainerBasedDataCollector + def _collector(buffer, weight_tensor): + def collect_taylor(grad): + if len(buffer) < 2: + buffer.append(grad.clone().detach()) + return collect_taylor + hook_targets = {'conv1': model.conv1.module.weight, 'conv2': model.conv2.module.weight} + collector_info = HookCollectorInfo(hook_targets, 'tensor', _collector) + + optimizer_helper = OptimizerConstructHelper.from_trace(model, get_optimizer(model)) + data_collector = SingleHookTrainerBasedDataCollector(pruner, trainer, optimizer_helper, criterion, 2, collector_infos=[collector_info]) + data = data_collector.collect() + assert all(len(t) == 2 for t in data.values()) + + def test_metrics_calculator(self): + # Test NormMetricsCalculator + metrics_calculator = NormMetricsCalculator(dim=0, p=2) + data = { + '1': torch.ones(3, 3, 3), + '2': torch.ones(4, 4) * 2 + } + result = { + '1': torch.ones(3) * 3, + '2': torch.ones(4) * 4 + } + metrics = metrics_calculator.calculate_metrics(data) + assert all(torch.equal(result[k], v) for k, v in metrics.items()) + + # Test DistMetricsCalculator + metrics_calculator = DistMetricsCalculator(dim=0, p=2) + data = { + '1': torch.tensor([[1, 2], [4, 6]], dtype=torch.float32), + '2': torch.tensor([[0, 0], [1, 1]], dtype=torch.float32) + } + result = { + '1': torch.tensor([5, 5], dtype=torch.float32), + '2': torch.sqrt(torch.tensor([2, 2], dtype=torch.float32)) + } + metrics = metrics_calculator.calculate_metrics(data) + assert all(torch.equal(result[k], v) for k, v in metrics.items()) + + # Test MultiDataNormMetricsCalculator + metrics_calculator = MultiDataNormMetricsCalculator(dim=0, p=1) + data = { + '1': [2, torch.ones(3, 3, 3) * 2], + '2': [2, torch.ones(4, 4) * 2] + } + result = { + '1': torch.ones(3) * 18, + '2': torch.ones(4) * 8 + } + metrics = metrics_calculator.calculate_metrics(data) + assert all(torch.equal(result[k], v) for k, v in metrics.items()) + + # Test APoZRankMetricsCalculator + metrics_calculator = APoZRankMetricsCalculator(dim=1) + data = { + '1': [2, torch.tensor([[1, 1], [1, 1]], dtype=torch.float32)], + '2': [2, torch.tensor([[0, 0, 1], [0, 0, 0]], dtype=torch.float32)] + } + result = { + '1': torch.tensor([0.5, 0.5], dtype=torch.float32), + '2': torch.tensor([1, 1, 0.75], dtype=torch.float32) + } + metrics = metrics_calculator.calculate_metrics(data) + assert all(torch.equal(result[k], v) for k, v in metrics.items()) + + # Test MeanRankMetricsCalculator + metrics_calculator = MeanRankMetricsCalculator(dim=1) + data = { + '1': [2, torch.tensor([[0, 1], [1, 0]], dtype=torch.float32)], + '2': [2, torch.tensor([[0, 0, 1], [0, 0, 0]], dtype=torch.float32)] + } + result = { + '1': torch.tensor([0.25, 0.25], dtype=torch.float32), + '2': torch.tensor([0, 0, 0.25], dtype=torch.float32) + } + metrics = metrics_calculator.calculate_metrics(data) + assert all(torch.equal(result[k], v) for k, v in metrics.items()) + + def test_sparsity_allocator(self): + # Test NormalSparsityAllocator + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}] + pruner = Pruner(model, config_list) + metrics = { + 'conv1': torch.rand(5, 1, 5, 5), + 'conv2': torch.rand(10, 5, 5, 5) + } + sparsity_allocator = NormalSparsityAllocator(pruner) + masks = sparsity_allocator.generate_sparsity(metrics) + assert all(v['weight'].sum() / v['weight'].numel() == 0.2 for k, v in masks.items()) + + # Test GlobalSparsityAllocator + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}] + pruner = Pruner(model, config_list) + sparsity_allocator = GlobalSparsityAllocator(pruner) + masks = sparsity_allocator.generate_sparsity(metrics) + total_elements, total_masked_elements = 0, 0 + for t in masks.values(): + total_elements += t['weight'].numel() + total_masked_elements += t['weight'].sum().item() + assert total_masked_elements / total_elements == 0.2 + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/compression/v2/test_scheduler.py b/test/ut/compression/v2/test_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..916b7f763c03cb238b7ca83c37da6cae44f9e7c4 --- /dev/null +++ b/test/ut/compression/v2/test_scheduler.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import unittest + +import torch +import torch.nn.functional as F + +from nni.algorithms.compression.v2.pytorch.pruning import PruningScheduler, L1NormPruner +from nni.algorithms.compression.v2.pytorch.pruning.tools import AGPTaskGenerator + + +class TorchModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 5, 5, 1) + self.bn1 = torch.nn.BatchNorm2d(5) + self.conv2 = torch.nn.Conv2d(5, 10, 5, 1) + self.bn2 = torch.nn.BatchNorm2d(10) + self.fc1 = torch.nn.Linear(4 * 4 * 10, 100) + self.fc2 = torch.nn.Linear(100, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4 * 4 * 10) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +class PruningSchedulerTestCase(unittest.TestCase): + def test_pruning_scheduler(self): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + + task_generator = AGPTaskGenerator(1, model, config_list) + pruner = L1NormPruner(model, config_list) + scheduler = PruningScheduler(pruner, task_generator) + + scheduler.compress() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/compression/v2/test_task_generator.py b/test/ut/compression/v2/test_task_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..ffeabe4c719565d3a23163c0291923cc2a03dd9f --- /dev/null +++ b/test/ut/compression/v2/test_task_generator.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from typing import List +import unittest + +import torch +import torch.nn.functional as F + +from nni.algorithms.compression.v2.pytorch.base import TaskResult +from nni.algorithms.compression.v2.pytorch.pruning.tools import ( + AGPTaskGenerator, + LinearTaskGenerator, + LotteryTicketTaskGenerator, + SimulatedAnnealingTaskGenerator +) + + +class TorchModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 5, 5, 1) + self.bn1 = torch.nn.BatchNorm2d(5) + self.conv2 = torch.nn.Conv2d(5, 10, 5, 1) + self.bn2 = torch.nn.BatchNorm2d(10) + self.fc1 = torch.nn.Linear(4 * 4 * 10, 100) + self.fc2 = torch.nn.Linear(100, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4 * 4 * 10) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def run_task_generator(task_generator_type): + model = TorchModel() + config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}] + + if task_generator_type == 'agp': + task_generator = AGPTaskGenerator(5, model, config_list) + elif task_generator_type == 'linear': + task_generator = LinearTaskGenerator(5, model, config_list) + elif task_generator_type == 'lottery_ticket': + task_generator = LotteryTicketTaskGenerator(5, model, config_list) + elif task_generator_type == 'simulated_annealing': + task_generator = SimulatedAnnealingTaskGenerator(model, config_list) + + count = run_task_generator_(task_generator) + + if task_generator_type == 'agp': + assert count == 6 + elif task_generator_type == 'linear': + assert count == 6 + elif task_generator_type == 'lottery_ticket': + assert count == 5 + elif task_generator_type == 'simulated_annealing': + assert count == 17 + + +def run_task_generator_(task_generator): + task = task_generator.next() + factor = 0.9 + count = 0 + while task is not None: + factor = factor ** 2 + count += 1 + task_result = TaskResult(task.task_id, TorchModel(), {}, {}, 1 - factor) + task_generator.receive_task_result(task_result) + task = task_generator.next() + return count + + +class TaskGenerator(unittest.TestCase): + def test_agp_task_generator(self): + run_task_generator('agp') + + def test_linear_task_generator(self): + run_task_generator('linear') + + def test_lottery_ticket_task_generator(self): + run_task_generator('lottery_ticket') + + def test_simulated_annealing_task_generator(self): + run_task_generator('simulated_annealing') + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/experiment/assets/config.yaml b/test/ut/experiment/assets/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92a1b47d731fdbedd498a3d29719ea323bb50a22 --- /dev/null +++ b/test/ut/experiment/assets/config.yaml @@ -0,0 +1,32 @@ +experimentName: test case +searchSpaceFile: search_space.json +trialCommand: python main.py +trialCodeDirectory: ../assets +trialConcurrency: 2 +trialGpuNumber: 1 +maxExperimentDuration: 1.5h +maxTrialNumber: 10 +maxTrialDuration: 60 +nniManagerIp: 1.2.3.4 +debug: true +logLevel: warning +tunerGpuIndices: 0 +assessor: + name: assess +advisor: + className: Advisor + codeDirectory: . + classArgs: {random_seed: 0} +trainingService: + platform: local + useActiveGpu: false + maxTrialNumberPerGpu: 2 + gpuIndices: 1,2 + reuseMode: true +sharedStorage: + storageType: NFS + localMountPoint: . # git cannot commit empty dir, so just use this + remoteMountPoint: /tmp + localMounted: usermount + nfsServer: nfs.test.case + exportedDirectory: root diff --git a/test/ut/experiment/assets/search_space.json b/test/ut/experiment/assets/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..6ebebfe47fb4b15f47158de5cd38024d90f90c46 --- /dev/null +++ b/test/ut/experiment/assets/search_space.json @@ -0,0 +1,3 @@ +{ + "a": 1 +} diff --git a/test/ut/experiment/assets/ss.yaml b/test/ut/experiment/assets/ss.yaml new file mode 100644 index 0000000000000000000000000000000000000000..90912d52ce9422166e6d6596c9a01c5604b2eda7 --- /dev/null +++ b/test/ut/experiment/assets/ss.yaml @@ -0,0 +1,9 @@ +pool_type: + _type: choice + _value: + - max + - min + - avg +学习率: # test unicode + _type: loguniform + _value: [ 0.0000001, 0.1 ] diff --git a/test/ut/experiment/assets/ss_comma.json b/test/ut/experiment/assets/ss_comma.json new file mode 100644 index 0000000000000000000000000000000000000000..ccc5c0f4fbbd676c8c4cee561fa7f972ff969569 --- /dev/null +++ b/test/ut/experiment/assets/ss_comma.json @@ -0,0 +1,10 @@ +{ + "pool_type": { + "_type": "choice", + "_value": [ "max", "min", "avg" ], + }, + "学习率": { + "_type": "loguniform", + "_value": [ 0.0000001, 0.1 ], + }, +} diff --git a/test/ut/experiment/assets/ss_tab.json b/test/ut/experiment/assets/ss_tab.json new file mode 100644 index 0000000000000000000000000000000000000000..b5d0f659fa4398cc5a9f130506a1e585822649e3 --- /dev/null +++ b/test/ut/experiment/assets/ss_tab.json @@ -0,0 +1,10 @@ +{ + "pool_type": { + "_type": "choice", + "_value": [ "max", "min", "avg" ] + }, + "学习率": { + "_type": "loguniform", + "_value": [ 1e-7, 0.1 ] + } +} diff --git a/test/ut/experiment/assets/ss_tab_comma.json b/test/ut/experiment/assets/ss_tab_comma.json new file mode 100644 index 0000000000000000000000000000000000000000..4d7650c55e4260e8aafe6eafd3facf62b36b6c81 --- /dev/null +++ b/test/ut/experiment/assets/ss_tab_comma.json @@ -0,0 +1,10 @@ +{ + "pool_type": { + "_type": "choice", + "_value": [ "max", "min", "avg" ], + }, + "学习率": { + "_type": "loguniform", + "_value": [ 1e-7, 0.1 ], + }, +} diff --git a/test/ut/experiment/assets/ss_yaml12.yaml b/test/ut/experiment/assets/ss_yaml12.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec53c52218cec8fb1212b8545e0ca807fccff96f --- /dev/null +++ b/test/ut/experiment/assets/ss_yaml12.yaml @@ -0,0 +1,9 @@ +pool_type: + _type: choice + _value: + - max + - min + - avg +学习率: # test unicode + _type: loguniform + _value: [ 1e-7, 0.1 ] # test scientific notation diff --git a/test/ut/experiment/test_config_base.py b/test/ut/experiment/test_config_base.py new file mode 100644 index 0000000000000000000000000000000000000000..fe4a37ffe001fb9ad45ff760add5d360b939a836 --- /dev/null +++ b/test/ut/experiment/test_config_base.py @@ -0,0 +1,152 @@ +from copy import deepcopy +from dataclasses import dataclass +from typing import Dict, List, Optional, Union + +from nni.experiment.config.base import ConfigBase + +# config classes + +@dataclass(init=False) +class NestedChild(ConfigBase): + msg: str + int_field: int = 1 + + def _canonicalize(self, parents): + if '/' not in self.msg: + self.msg = parents[0].msg + '/' + self.msg + super()._canonicalize(parents) + + def _validate_canonical(self): + super()._validate_canonical() + if not self.msg.endswith('[2]'): + raise ValueError('not end with [2]') + +@dataclass(init=False) +class Child(ConfigBase): + msg: str + children: List[NestedChild] + + def _canonicalize(self, parents): + if '/' not in self.msg: + self.msg = parents[0].msg + '/' + self.msg + super()._canonicalize(parents) + + def _validate_canonical(self): + super()._validate_canonical() + if not self.msg.endswith('[1]'): + raise ValueError('not end with "[1]"') + +@dataclass(init=False) +class TestConfig(ConfigBase): + msg: str + required_field: Optional[int] + optional_field: Optional[int] = None + multi_type_field: Union[int, List[int]] + child: Optional[Child] = None + + def _canonicalize(self, parents): + if isinstance(self.multi_type_field, int): + self.multi_type_field = [self.multi_type_field] + super()._canonicalize(parents) + +# sample inputs + +good = { + 'msg': 'a', + 'required_field': 10, + 'multi_type_field': 20, + 'child': { + 'msg': 'b[1]', + 'children': [{ + 'msg': 'c[2]', + 'int_field': 30, + }, { + 'msg': 'd[2]', + }], + }, +} + +missing = deepcopy(good) +missing.pop('required_field') + +wrong_type = deepcopy(good) +wrong_type['optional_field'] = 0.5 + +nested_wrong_type = deepcopy(good) +nested_wrong_type['child']['children'][1]['int_field'] = 'str' + +bad_value = deepcopy(good) +bad_value['child']['msg'] = 'b' + +extra_field = deepcopy(good) +extra_field['hello'] = 'world' + +bads = { + 'missing': missing, + 'wrong_type': wrong_type, + 'nested_wrong_type': nested_wrong_type, + 'bad_value': bad_value, + 'extra_field': extra_field, +} + +# ground truth + +_nested_child_1 = NestedChild() +_nested_child_1.msg = 'c[2]' +_nested_child_1.int_field = 30 + +_nested_child_2 = NestedChild() +_nested_child_2.msg = 'd[2]' +_nested_child_2.int_field = 1 + +_child = Child() +_child.msg = 'b[1]' +_child.children = [_nested_child_1, _nested_child_2] + +good_config = TestConfig() +good_config.msg = 'a' +good_config.required_field = 10 +good_config.optional_field = None +good_config.multi_type_field = 20 +good_config.child = _child + +_nested_child_1 = NestedChild() +_nested_child_1.msg = 'a/b[1]/c[2]' +_nested_child_1.int_field = 30 + +_nested_child_2 = NestedChild() +_nested_child_2.msg = 'a/b[1]/d[2]' +_nested_child_2.int_field = 1 + +_child = Child() +_child.msg = 'a/b[1]' +_child.children = [_nested_child_1, _nested_child_2] + +good_canon_config = TestConfig() +good_canon_config.msg = 'a' +good_canon_config.required_field = 10 +good_canon_config.optional_field = None +good_canon_config.multi_type_field = [20] +good_canon_config.child = _child + +# test function + +def test_good(): + config = TestConfig(**good) + assert config == good_config + config.validate() + assert config.json() == good_canon_config.json() + +def test_bad(): + for tag, bad in bads.items(): + exc = None + try: + config = TestConfig(**bad) + config.validate() + except Exception as e: + exc = e + assert exc is not None + +if __name__ == '__main__': + test_good() + test_bad() diff --git a/test/ut/experiment/test_exp_config.py b/test/ut/experiment/test_exp_config.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7020e8147537043adb8208ce0da1b1c63c831a --- /dev/null +++ b/test/ut/experiment/test_exp_config.py @@ -0,0 +1,113 @@ +import copy +import os.path +from pathlib import Path + +from nni.experiment.config import ExperimentConfig + +def expand_path(path): + return os.path.realpath(os.path.join(os.path.dirname(__file__), path)) + +## minimal config ## + +minimal_json = { + 'searchSpace': {'a': 1}, + 'trialCommand': 'python main.py', + 'trialConcurrency': 2, + 'tuner': { + 'name': 'random', + }, + 'trainingService': { + 'platform': 'local', + }, +} + +minimal_class = ExperimentConfig('local') +minimal_class.search_space = {'a': 1} +minimal_class.trial_command = 'python main.py' +minimal_class.trial_concurrency = 2 +minimal_class.tuner.name = 'random' + +minimal_canon = { + 'searchSpace': {'a': 1}, + 'trialCommand': 'python main.py', + 'trialCodeDirectory': os.path.realpath('.'), + 'trialConcurrency': 2, + 'useAnnotation': False, + 'debug': False, + 'logLevel': 'info', + 'experimentWorkingDirectory': str(Path.home() / 'nni-experiments'), + 'tuner': {'name': 'random'}, + 'trainingService': { + 'platform': 'local', + 'trialCommand': 'python main.py', + 'trialCodeDirectory': os.path.realpath('.'), + 'debug': False, + 'maxTrialNumberPerGpu': 1, + 'reuseMode': False, + }, +} + +minimal_canon_2 = copy.deepcopy(minimal_canon) +minimal_canon_2['tuner']['classArgs'] = {} + +## detailed config ## + +detailed_canon = { + 'experimentName': 'test case', + 'searchSpaceFile': expand_path('assets/search_space.json'), + 'searchSpace': {'a': 1}, + 'trialCommand': 'python main.py', + 'trialCodeDirectory': expand_path('assets'), + 'trialConcurrency': 2, + 'trialGpuNumber': 1, + 'maxExperimentDuration': '1.5h', + 'maxTrialNumber': 10, + 'maxTrialDuration': 60, + 'nniManagerIp': '1.2.3.4', + 'useAnnotation': False, + 'debug': True, + 'logLevel': 'warning', + 'experimentWorkingDirectory': str(Path.home() / 'nni-experiments'), + 'tunerGpuIndices': [0], + 'assessor': { + 'name': 'assess', + }, + 'advisor': { + 'className': 'Advisor', + 'codeDirectory': expand_path('assets'), + 'classArgs': {'random_seed': 0}, + }, + 'trainingService': { + 'platform': 'local', + 'trialCommand': 'python main.py', + 'trialCodeDirectory': expand_path('assets'), + 'trialGpuNumber': 1, + 'debug': True, + 'useActiveGpu': False, + 'maxTrialNumberPerGpu': 2, + 'gpuIndices': [1, 2], + 'reuseMode': True, + }, + 'sharedStorage': { + 'storageType': 'NFS', + 'localMountPoint': expand_path('assets'), + 'remoteMountPoint': '/tmp', + 'localMounted': 'usermount', + 'nfsServer': 'nfs.test.case', + 'exportedDirectory': 'root', + }, +} + +## test function ## + +def test_all(): + minimal = ExperimentConfig(**minimal_json) + assert minimal.json() == minimal_canon + + assert minimal_class.json() == minimal_canon_2 + + detailed = ExperimentConfig.load(expand_path('assets/config.yaml')) + assert detailed.json() == detailed_canon + +if __name__ == '__main__': + test_all() diff --git a/test/ut/experiment/test_search_space.py b/test/ut/experiment/test_search_space.py new file mode 100644 index 0000000000000000000000000000000000000000..6df630f61f34dd30d894b074e7b25463fd20a661 --- /dev/null +++ b/test/ut/experiment/test_search_space.py @@ -0,0 +1,52 @@ +import json +from pathlib import Path + +import yaml + +from nni.experiment.config import ExperimentConfig, AlgorithmConfig, LocalConfig + +## template ## + +config = ExperimentConfig( + search_space_file = '', + trial_command = 'echo hello', + trial_concurrency = 1, + tuner = AlgorithmConfig(name='randomm'), + training_service = LocalConfig() +) + +space_correct = { + 'pool_type': { + '_type': 'choice', + '_value': ['max', 'min', 'avg'] + }, + '学习率': { + '_type': 'loguniform', + '_value': [1e-7, 0.1] + } +} + +# FIXME +# PyYAML 6.0 (YAML 1.1) does not support tab and scientific notation +# JSON does not support comment and extra comma +# So some combinations will fail to load +formats = [ + ('ss_tab.json', 'JSON (tabs + scientific notation)'), + ('ss_comma.json', 'JSON with extra comma'), + #('ss_tab_comma.json', 'JSON (tabs + scientific notation) with extra comma'), + ('ss.yaml', 'YAML'), + #('ss_yaml12.yaml', 'YAML 1.2 with scientific notation'), +] + +def test_search_space(): + for space_file, description in formats: + try: + config.search_space_file = Path(__file__).parent / 'assets' / space_file + space = config.json()['searchSpace'] + assert space == space_correct + except Exception as e: + print('Failed to load search space format: ' + description) + raise e + +if __name__ == '__main__': + test_search_space() diff --git a/test/ut/experiment/test_ts_remote.py b/test/ut/experiment/test_ts_remote.py new file mode 100644 index 0000000000000000000000000000000000000000..770e6faac9da94c12c26099f77e3ab93a02d9299 --- /dev/null +++ b/test/ut/experiment/test_ts_remote.py @@ -0,0 +1,163 @@ +import os.path +from pathlib import Path + +from nni.experiment.config import ExperimentConfig, AlgorithmConfig, RemoteConfig, RemoteMachineConfig + +## minimal config ## + +minimal_json = { + 'searchSpace': {'a': 1}, + 'trialCommand': 'python main.py', + 'trialConcurrency': 2, + 'tuner': { + 'name': 'random', + }, + 'trainingService': { + 'platform': 'remote', + 'machine_list': [ + { + 'host': '1.2.3.4', + 'user': 'test_user', + 'password': '123456', + }, + ], + }, +} + +minimal_class = ExperimentConfig( + search_space = {'a': 1}, + trial_command = 'python main.py', + trial_concurrency = 2, + tuner = AlgorithmConfig( + name = 'random', + ), + training_service = RemoteConfig( + machine_list = [ + RemoteMachineConfig( + host = '1.2.3.4', + user = 'test_user', + password = '123456', + ), + ], + ), +) + +minimal_canon = { + 'searchSpace': {'a': 1}, + 'trialCommand': 'python main.py', + 'trialCodeDirectory': os.path.realpath('.'), + 'trialConcurrency': 2, + 'useAnnotation': False, + 'debug': False, + 'logLevel': 'info', + 'experimentWorkingDirectory': str(Path.home() / 'nni-experiments'), + 'tuner': { + 'name': 'random', + }, + 'trainingService': { + 'platform': 'remote', + 'trialCommand': 'python main.py', + 'trialCodeDirectory': os.path.realpath('.'), + 'debug': False, + 'machineList': [ + { + 'host': '1.2.3.4', + 'port': 22, + 'user': 'test_user', + 'password': '123456', + 'useActiveGpu': False, + 'maxTrialNumberPerGpu': 1, + } + ], + 'reuseMode': True, + } +} + +## detailed config ## + +detailed_json = { + 'searchSpace': {'a': 1}, + 'trialCommand': 'python main.py', + 'trialConcurrency': 2, + 'trialGpuNumber': 1, + 'nni_manager_ip': '1.2.3.0', + 'tuner': { + 'name': 'random', + }, + 'trainingService': { + 'platform': 'remote', + 'machine_list': [ + { + 'host': '1.2.3.4', + 'user': 'test_user', + 'password': '123456', + }, + { + 'host': '1.2.3.5', + 'user': 'test_user_2', + 'password': 'abcdef', + 'use_active_gpu': True, + 'max_trial_number_per_gpu': 2, + 'gpu_indices': '0,1', + 'python_path': '~/path', # don't do this in actual experiment + }, + ], + }, +} + +detailed_canon = { + 'searchSpace': {'a': 1}, + 'trialCommand': 'python main.py', + 'trialCodeDirectory': os.path.realpath('.'), + 'trialConcurrency': 2, + 'trialGpuNumber': 1, + 'nniManagerIp': '1.2.3.0', + 'useAnnotation': False, + 'debug': False, + 'logLevel': 'info', + 'experimentWorkingDirectory': str(Path.home() / 'nni-experiments'), + 'tuner': {'name': 'random'}, + 'trainingService': { + 'platform': 'remote', + 'trialCommand': 'python main.py', + 'trialCodeDirectory': os.path.realpath('.'), + 'trialGpuNumber': 1, + 'nniManagerIp': '1.2.3.0', + 'debug': False, + 'machineList': [ + { + 'host': '1.2.3.4', + 'port': 22, + 'user': 'test_user', + 'password': '123456', + 'useActiveGpu': False, + 'maxTrialNumberPerGpu': 1 + }, + { + 'host': '1.2.3.5', + 'port': 22, + 'user': 'test_user_2', + 'password': 'abcdef', + 'useActiveGpu': True, + 'maxTrialNumberPerGpu': 2, + 'gpuIndices': [0, 1], + 'pythonPath': '~/path' + } + ], + 'reuseMode': True, + } +} + +## test function ## + +def test_remote(): + config = ExperimentConfig(**minimal_json) + assert config.json() == minimal_canon + + assert minimal_class.json() == minimal_canon + + config = ExperimentConfig(**detailed_json) + assert config.json() == detailed_canon + +if __name__ == '__main__': + test_remote() diff --git a/test/ut/retiarii/__init__.py b/test/ut/retiarii/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/ut/retiarii/convert_mixin.py b/test/ut/retiarii/convert_mixin.py new file mode 100644 index 0000000000000000000000000000000000000000..f538c9d277d94a3803b7467a4fdc23041c4b6880 --- /dev/null +++ b/test/ut/retiarii/convert_mixin.py @@ -0,0 +1,19 @@ +import torch + +from nni.retiarii.converter.graph_gen import convert_to_graph, GraphConverterWithShape + + +class ConvertMixin: + @staticmethod + def _convert_model(model, input): + script_module = torch.jit.script(model) + model_ir = convert_to_graph(script_module, model) + return model_ir + + +class ConvertWithShapeMixin: + @staticmethod + def _convert_model(model, input): + script_module = torch.jit.script(model) + model_ir = convert_to_graph(script_module, model, converter=GraphConverterWithShape(), dummy_input=input) + return model_ir diff --git a/test/ut/retiarii/debug_mnist_pytorch.py b/test/ut/retiarii/debug_mnist_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..c7511a5dbdcdf7ce43847cb654ec0e63ab6d87f2 --- /dev/null +++ b/test/ut/retiarii/debug_mnist_pytorch.py @@ -0,0 +1,43 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + +import nni.retiarii.nn.pytorch + +import torch + + +class _model(nn.Module): + def __init__(self): + super().__init__() + self.stem = stem() + self.flatten = torch.nn.Flatten() + self.fc1 = torch.nn.Linear(out_features=256, in_features=1024) + self.fc2 = torch.nn.Linear(out_features=10, in_features=256) + self.softmax = torch.nn.Softmax() + + def forward(self, image): + stem = self.stem(image) + flatten = self.flatten(stem) + fc1 = self.fc1(flatten) + fc2 = self.fc2(fc1) + softmax = self.softmax(fc2) + return softmax + + + +class stem(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(out_channels=32, in_channels=1, kernel_size=5) + self.pool1 = torch.nn.MaxPool2d(kernel_size=2) + self.conv2 = torch.nn.Conv2d(out_channels=64, in_channels=32, kernel_size=5) + self.pool2 = torch.nn.MaxPool2d(kernel_size=2) + + def forward(self, *_inputs): + conv1 = self.conv1(_inputs[0]) + pool1 = self.pool1(conv1) + conv2 = self.conv2(pool1) + pool2 = self.pool2(conv2) + return pool2 diff --git a/test/ut/retiarii/dedup_logical_graph.json b/test/ut/retiarii/dedup_logical_graph.json new file mode 100644 index 0000000000000000000000000000000000000000..5466d7be0e7ba210b30a531c65d68f7508ca71de --- /dev/null +++ b/test/ut/retiarii/dedup_logical_graph.json @@ -0,0 +1 @@ +{"inputs": null, "outputs": null, "nodes": {"2__outputs": {"operation": {"type": "_outputs", "parameters": {}}}, "2__model__Constant2": {"operation": {"type": "prim::Constant", "parameters": {}}}, "2__model__Constant3": {"operation": {"type": "prim::Constant", "parameters": {"value": 3}}}, "2__model__Constant4": {"operation": {"type": "prim::Constant", "parameters": {"value": -1}}}, "2__model__Constant5": {"operation": {"type": "prim::Constant", "parameters": {"value": 0}}}, "2__model__stem": {"operation": {"type": "_cell", "parameters": {}, "cell_name": "_model__stem"}}, "2__model__Size6": {"operation": {"type": "aten::size", "parameters": {}}}, "2__model__ListConstruct7": {"operation": {"type": "prim::ListConstruct", "parameters": {}}}, "2__model__View8": {"operation": {"type": "aten::view", "parameters": {}}}, "2__model__fc1": {"operation": {"type": "__torch__.torch.nn.modules.linear.Linear", "parameters": {"in_features": 1024, "out_features": 256}}}, "2__model__fc2": {"operation": {"type": "__torch__.torch.nn.modules.linear.Linear", "parameters": {"in_features": 256, "out_features": 10}}}, "2__model__softmax9": {"operation": {"type": "Function.softmax", "parameters": {}}}, "3__outputs": {"operation": {"type": "_outputs", "parameters": {}}}, "3__model__Constant2": {"operation": {"type": "prim::Constant", "parameters": {}}}, "3__model__Constant3": {"operation": {"type": "prim::Constant", "parameters": {"value": 3}}}, "3__model__Constant4": {"operation": {"type": "prim::Constant", "parameters": {"value": -1}}}, "3__model__Constant5": {"operation": {"type": "prim::Constant", "parameters": {"value": 0}}}, "3__model__stem": {"operation": {"type": "_cell", "parameters": {}, "cell_name": "_model__stem"}}, "3__model__Size6": {"operation": {"type": "aten::size", "parameters": {}}}, "3__model__ListConstruct7": {"operation": {"type": "prim::ListConstruct", "parameters": {}}}, "3__model__View8": {"operation": {"type": "aten::view", "parameters": {}}}, "3__model__fc1": {"operation": {"type": "__torch__.torch.nn.modules.linear.Linear", "parameters": {"in_features": 1024, "out_features": 256}}}, "3__model__fc2": {"operation": {"type": "__torch__.torch.nn.modules.linear.Linear", "parameters": {"in_features": 256, "out_features": 10}}}, "3__model__softmax9": {"operation": {"type": "Function.softmax", "parameters": {}}}, "4__outputs": {"operation": {"type": "_outputs", "parameters": {}}}, "4__model__Constant2": {"operation": {"type": "prim::Constant", "parameters": {}}}, "4__model__Constant3": {"operation": {"type": "prim::Constant", "parameters": {"value": 3}}}, "4__model__Constant4": {"operation": {"type": "prim::Constant", "parameters": {"value": -1}}}, "4__model__Constant5": {"operation": {"type": "prim::Constant", "parameters": {"value": 0}}}, "4__model__stem": {"operation": {"type": "_cell", "parameters": {}, "cell_name": "_model__stem"}}, "4__model__Size6": {"operation": {"type": "aten::size", "parameters": {}}}, "4__model__ListConstruct7": {"operation": {"type": "prim::ListConstruct", "parameters": {}}}, "4__model__View8": {"operation": {"type": "aten::view", "parameters": {}}}, "4__model__fc1": {"operation": {"type": "__torch__.torch.nn.modules.linear.Linear", "parameters": {"in_features": 1024, "out_features": 256}}}, "4__model__fc2": {"operation": {"type": "__torch__.torch.nn.modules.linear.Linear", "parameters": {"in_features": 256, "out_features": 10}}}, "4__model__softmax9": {"operation": {"type": "Function.softmax", "parameters": {}}}, "1_Dedup__inputs": {"operation": {"type": "_inputs", "parameters": {}}}}, "edges": [["Dedup__inputs", "2__model__stem"], ["2__model__stem", "2__model__Size6"], ["2__model__Constant5", "2__model__Size6"], ["2__model__Size6", "2__model__ListConstruct7"], ["2__model__Constant4", "2__model__ListConstruct7"], ["2__model__stem", "2__model__View8"], ["2__model__ListConstruct7", "2__model__View8"], ["2__model__View8", "2__model__fc1"], ["2__model__fc1", "2__model__fc2"], ["2__model__fc2", "2__model__softmax9"], ["2__model__Constant4", "2__model__softmax9"], ["2__model__Constant3", "2__model__softmax9"], ["2__model__Constant2", "2__model__softmax9"], ["2__model__softmax9", "2__outputs"], ["Dedup__inputs", "3__model__stem"], ["3__model__stem", "3__model__Size6"], ["3__model__Constant5", "3__model__Size6"], ["3__model__Size6", "3__model__ListConstruct7"], ["3__model__Constant4", "3__model__ListConstruct7"], ["3__model__stem", "3__model__View8"], ["3__model__ListConstruct7", "3__model__View8"], ["3__model__View8", "3__model__fc1"], ["3__model__fc1", "3__model__fc2"], ["3__model__fc2", "3__model__softmax9"], ["3__model__Constant4", "3__model__softmax9"], ["3__model__Constant3", "3__model__softmax9"], ["3__model__Constant2", "3__model__softmax9"], ["3__model__softmax9", "3__outputs"], ["Dedup__inputs", "4__model__stem"], ["4__model__stem", "4__model__Size6"], ["4__model__Constant5", "4__model__Size6"], ["4__model__Size6", "4__model__ListConstruct7"], ["4__model__Constant4", "4__model__ListConstruct7"], ["4__model__stem", "4__model__View8"], ["4__model__ListConstruct7", "4__model__View8"], ["4__model__View8", "4__model__fc1"], ["4__model__fc1", "4__model__fc2"], ["4__model__fc2", "4__model__softmax9"], ["4__model__Constant4", "4__model__softmax9"], ["4__model__Constant3", "4__model__softmax9"], ["4__model__Constant2", "4__model__softmax9"], ["4__model__softmax9", "4__outputs"]]} \ No newline at end of file diff --git a/test/ut/retiarii/inject_nn.py b/test/ut/retiarii/inject_nn.py new file mode 100644 index 0000000000000000000000000000000000000000..585c9a50be9b3e86af8a365828136f3d70a4f086 --- /dev/null +++ b/test/ut/retiarii/inject_nn.py @@ -0,0 +1,22 @@ +import inspect + +import torch.nn as nn + +from nni.retiarii import basic_unit + +_trace_module_names = [ + module_name for module_name in dir(nn) + if module_name not in ['Module', 'ModuleList', 'ModuleDict', 'Sequential'] and + inspect.isclass(getattr(nn, module_name)) and issubclass(getattr(nn, module_name), nn.Module) +] + + +def remove_inject_pytorch_nn(): + for name in _trace_module_names: + if hasattr(getattr(nn, name), '__wrapped__'): + setattr(nn, name, getattr(nn, name).__wrapped__) + + +def inject_pytorch_nn(): + for name in _trace_module_names: + setattr(nn, name, basic_unit(getattr(nn, name))) diff --git a/test/ut/retiarii/mnist-tensorflow.json b/test/ut/retiarii/mnist-tensorflow.json new file mode 100644 index 0000000000000000000000000000000000000000..6fd4fd27c05375138debab4a436f7879418e4d06 --- /dev/null +++ b/test/ut/retiarii/mnist-tensorflow.json @@ -0,0 +1,44 @@ +{ + "_model": { + "inputs": ["image"], + "outputs": ["metric"], + + "nodes": { + "stem": {"operation": {"type": "_cell", "parameters": {}, "attributes": {}, "cell_name": "stem"}}, + "flatten": {"operation": {"type": "Flatten", "parameters": {}, "attributes": {}}}, + "fc1": {"operation": {"type": "Dense", "parameters": {"units": 1024, "activation": "relu"}, "attributes": {}}}, + "fc2": {"operation": {"type": "Dense", "parameters": {"units": 10}, "attributes": {}}}, + "softmax": {"operation": {"type": "Softmax", "parameters": {}, "attributes": {}}} + }, + + "edges": [ + {"head": ["_inputs", 0], "tail": ["stem", 0]}, + {"head": ["stem", 0], "tail": ["flatten", null]}, + {"head": ["flatten", null], "tail": ["fc1", null]}, + {"head": ["fc1", null], "tail": ["fc2", null]}, + {"head": ["fc2", null], "tail": ["softmax", null]}, + {"head": ["softmax", null], "tail": ["_outputs", 0]} + ] + }, + + "stem": { + "nodes": { + "conv1": {"operation": {"type": "Conv2D", "parameters": {"filters": 32, "kernel_size": 5, "activation": "relu"}, "attributes": {}}}, + "pool1": {"operation": {"type": "MaxPool2D", "parameters": {"pool_size": 2}, "attributes": {}}}, + "conv2": {"operation": {"type": "Conv2D", "parameters": {"filters": 64, "kernel_size": 5, "activation": "relu"}, "attributes": {}}}, + "pool2": {"operation": {"type": "MaxPool2D", "parameters": {"pool_size": 2}, "attributes": {}}} + }, + + "edges": [ + {"head": ["_inputs", 0], "tail": ["conv1", null]}, + {"head": ["conv1", null], "tail": ["pool1", null]}, + {"head": ["pool1", null], "tail": ["conv2", null]}, + {"head": ["conv2", null], "tail": ["pool2", null]}, + {"head": ["pool2", null], "tail": ["_outputs", 0]} + ] + }, + + "_evaluator": { + "type": "DebugEvaluator" + } +} diff --git a/test/ut/retiarii/mnist_pytorch.json b/test/ut/retiarii/mnist_pytorch.json new file mode 100644 index 0000000000000000000000000000000000000000..b5ddc878873d051bb3b07c4dfb48bb805f516ceb --- /dev/null +++ b/test/ut/retiarii/mnist_pytorch.json @@ -0,0 +1,40 @@ +{ + "_model": { + "inputs": ["image"], + "outputs": ["metric"], + + "nodes": { + "stem": {"operation": {"type": "_cell", "cell_name": "stem"}}, + "flatten": {"operation": {"type": "__torch__.torch.nn.Flatten"}}, + "fc1": {"operation": {"type": "__torch__.torch.nn.Linear", "parameters": {"out_features": 256, "in_features": 1024}}}, + "fc2": {"operation": {"type": "__torch__.torch.nn.Linear", "parameters": {"out_features": 10, "in_features": 256}}}, + "softmax": {"operation": {"type": "__torch__.torch.nn.Softmax"}} + }, + + "edges": [ + {"head": ["_inputs", 0], "tail": ["stem", null]}, + {"head": ["stem", null], "tail": ["flatten", null]}, + {"head": ["flatten", null], "tail": ["fc1", null]}, + {"head": ["fc1", null], "tail": ["fc2", null]}, + {"head": ["fc2", null], "tail": ["softmax", null]}, + {"head": ["softmax", null], "tail": ["_outputs", 0]} + ] + }, + + "stem": { + "nodes": { + "conv1": {"operation": {"type": "__torch__.torch.nn.Conv2d", "parameters": {"out_channels": 32, "in_channels": 1, "kernel_size": 5}}}, + "pool1": {"operation": {"type": "__torch__.torch.nn.MaxPool2d", "parameters": {"kernel_size": 2}}}, + "conv2": {"operation": {"type": "__torch__.torch.nn.Conv2d", "parameters": {"out_channels": 64, "in_channels": 32, "kernel_size": 5}}}, + "pool2": {"operation": {"type": "__torch__.torch.nn.MaxPool2d", "parameters": {"kernel_size": 2}}} + }, + + "edges": [ + {"head": ["_inputs", 0], "tail": ["conv1", null]}, + {"head": ["conv1", null], "tail": ["pool1", null]}, + {"head": ["pool1", null], "tail": ["conv2", null]}, + {"head": ["conv2", null], "tail": ["pool2", null]}, + {"head": ["pool2", null], "tail": ["_outputs", 0]} + ] + } +} diff --git a/test/ut/retiarii/test_cgo_engine.py b/test/ut/retiarii/test_cgo_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..0b8fa626b5e85bf3090824903510f12a596b5fa2 --- /dev/null +++ b/test/ut/retiarii/test_cgo_engine.py @@ -0,0 +1,343 @@ +import os +import threading +import unittest +import time +import torch +import torch.nn as nn +from pytorch_lightning.utilities.seed import seed_everything + +from pathlib import Path + +import nni + +try: + from nni.common.device import GPUDevice + from nni.retiarii.execution.cgo_engine import CGOExecutionEngine + from nni.retiarii import Model + from nni.retiarii.graph import Node + + from nni.retiarii import Model, submit_models + from nni.retiarii.integration import RetiariiAdvisor + from nni.retiarii.execution import set_execution_engine + from nni.retiarii.execution.logical_optimizer.opt_dedup_input import DedupInputOptimizer + from nni.retiarii.execution.logical_optimizer.logical_plan import LogicalPlan + from nni.retiarii.utils import import_ + + from nni.retiarii import serialize + import nni.retiarii.evaluator.pytorch.lightning as pl + from nni.retiarii.evaluator.pytorch.cgo.evaluator import MultiModelSupervisedLearningModule, _MultiModelSupervisedLearningModule + import nni.retiarii.evaluator.pytorch.cgo.trainer as cgo_trainer + + module_import_failed = False +except ImportError: + module_import_failed = True + +import pytest +from torchvision.datasets import MNIST +from torchvision import transforms +from torch.utils.data import Dataset +from sklearn.datasets import load_diabetes + + +class _model_cpu(nn.Module): + def __init__(self): + super().__init__() + self.M_1_stem = M_1_stem() + self.M_2_stem = M_2_stem() + self.M_1_flatten = torch.nn.Flatten() + self.M_2_flatten = torch.nn.Flatten() + self.M_1_fc1 = torch.nn.Linear(out_features=256, in_features=1024) + self.M_2_fc1 = torch.nn.Linear(out_features=256, in_features=1024) + self.M_1_fc2 = torch.nn.Linear(out_features=10, in_features=256) + self.M_2_fc2 = torch.nn.Linear(out_features=10, in_features=256) + self.M_1_softmax = torch.nn.Softmax() + self.M_2_softmax = torch.nn.Softmax() + + def forward(self, *_inputs): + M_1__inputs_to_M_2_stem = _inputs[0] + M_1_stem = self.M_1_stem(_inputs[0]) + M_2_stem = self.M_2_stem(M_1__inputs_to_M_2_stem) + M_1_flatten = self.M_1_flatten(M_1_stem) + M_2_flatten = self.M_2_flatten(M_2_stem) + M_1_fc1 = self.M_1_fc1(M_1_flatten) + M_2_fc1 = self.M_2_fc1(M_2_flatten) + M_1_fc2 = self.M_1_fc2(M_1_fc1) + M_2_fc2 = self.M_2_fc2(M_2_fc1) + M_1_softmax = self.M_1_softmax(M_1_fc2) + M_2_softmax = self.M_2_softmax(M_2_fc2) + return M_1_softmax, M_2_softmax + + +class _model_gpu(nn.Module): + def __init__(self): + super().__init__() + self.M_1_stem = M_1_stem().to('cuda:0') + self.M_2_stem = M_2_stem().to('cuda:1') + self.M_1_flatten = torch.nn.Flatten().to('cuda:0') + self.M_2_flatten = torch.nn.Flatten().to('cuda:1') + self.M_1_fc1 = torch.nn.Linear(out_features=256, in_features=1024).to('cuda:0') + self.M_2_fc1 = torch.nn.Linear(out_features=256, in_features=1024).to('cuda:1') + self.M_1_fc2 = torch.nn.Linear(out_features=10, in_features=256).to('cuda:0') + self.M_2_fc2 = torch.nn.Linear(out_features=10, in_features=256).to('cuda:1') + self.M_1_softmax = torch.nn.Softmax().to('cuda:0') + self.M_2_softmax = torch.nn.Softmax().to('cuda:1') + + def forward(self, *_inputs): + M_1__inputs_to_M_1_stem = _inputs[0].to("cuda:0") + M_1__inputs_to_M_2_stem = _inputs[0].to("cuda:1") + M_1_stem = self.M_1_stem(M_1__inputs_to_M_1_stem) + M_2_stem = self.M_2_stem(M_1__inputs_to_M_2_stem) + M_1_flatten = self.M_1_flatten(M_1_stem) + M_2_flatten = self.M_2_flatten(M_2_stem) + M_1_fc1 = self.M_1_fc1(M_1_flatten) + M_2_fc1 = self.M_2_fc1(M_2_flatten) + M_1_fc2 = self.M_1_fc2(M_1_fc1) + M_2_fc2 = self.M_2_fc2(M_2_fc1) + M_1_softmax = self.M_1_softmax(M_1_fc2) + M_2_softmax = self.M_2_softmax(M_2_fc2) + return M_1_softmax, M_2_softmax + + +class M_1_stem(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(out_channels=32, in_channels=1, kernel_size=5) + self.pool1 = torch.nn.MaxPool2d(kernel_size=2) + self.conv2 = torch.nn.Conv2d(out_channels=64, in_channels=32, kernel_size=5) + self.pool2 = torch.nn.MaxPool2d(kernel_size=2) + + def forward(self, *_inputs): + conv1 = self.conv1(_inputs[0]) + pool1 = self.pool1(conv1) + conv2 = self.conv2(pool1) + pool2 = self.pool2(conv2) + return pool2 + + +class M_2_stem(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(out_channels=32, in_channels=1, kernel_size=5) + self.pool1 = torch.nn.MaxPool2d(kernel_size=2) + self.conv2 = torch.nn.Conv2d(out_channels=64, in_channels=32, kernel_size=5) + self.pool2 = torch.nn.MaxPool2d(kernel_size=2) + + def forward(self, *_inputs): + conv1 = self.conv1(_inputs[0]) + pool1 = self.pool1(conv1) + conv2 = self.conv2(pool1) + pool2 = self.pool2(conv2) + return pool2 + + +def _reset(): + # this is to not affect other tests in sdk + nni.trial._intermediate_seq = 0 + nni.trial._params = {'foo': 'bar', 'parameter_id': 0} + nni.runtime.platform.test._last_metric = None + nni.retiarii.integration_api._advisor = None + nni.retiarii.execution.api._execution_engine = None + + seed_everything(42) + + +def _new_trainer(): + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_dataset = serialize(MNIST, root='data/mnist', train=True, download=True, transform=transform) + test_dataset = serialize(MNIST, root='data/mnist', train=False, download=True, transform=transform) + + multi_module = MultiModelSupervisedLearningModule(nn.CrossEntropyLoss, {'acc': pl._AccuracyWithLogits}) + + lightning = pl.Lightning(multi_module, cgo_trainer.Trainer(use_cgo=True, + max_epochs=1, + limit_train_batches=0.25, + enable_progress_bar=False), + train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100)) + return lightning + + +def _load_mnist(n_models: int = 1): + path = Path(__file__).parent / 'mnist_pytorch.json' + with open(path) as f: + mnist_model = Model._load(nni.load(fp=f)) + mnist_model.evaluator = _new_trainer() + + if n_models == 1: + return mnist_model + else: + models = [mnist_model] + for i in range(n_models - 1): + forked_model = mnist_model.fork() + forked_model.evaluator = _new_trainer() + models.append(forked_model) + return models + + +def _get_final_result(): + result = nni.load(nni.runtime.platform.test._last_metric)['value'] + if isinstance(result, list): + return [float(_) for _ in result] + else: + if isinstance(result, str) and '[' in result: + return nni.load(result) + return [float(result)] + + +class CGOEngineTest(unittest.TestCase): + def setUp(self): + if module_import_failed: + self.skipTest('test skip due to failed import of nni.retiarii.evaluator.pytorch.lightning') + + def test_multi_model_trainer_cpu(self): + _reset() + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_dataset = serialize(MNIST, root='data/mnist', train=True, download=True, transform=transform) + test_dataset = serialize(MNIST, root='data/mnist', train=False, download=True, transform=transform) + + multi_module = _MultiModelSupervisedLearningModule(nn.CrossEntropyLoss, {'acc': pl._AccuracyWithLogits}, n_models=2) + + lightning = pl.Lightning(multi_module, cgo_trainer.Trainer(use_cgo=True, + max_epochs=1, + limit_train_batches=0.25), + train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100)) + + lightning._execute(_model_cpu) + + result = _get_final_result() + assert len(result) == 2 + + for _ in result: + assert _ > 0.8 + + def test_multi_model_trainer_gpu(self): + _reset() + if not (torch.cuda.is_available() and torch.cuda.device_count() >= 2): + pytest.skip('test requires GPU and torch+cuda') + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_dataset = serialize(MNIST, root='data/mnist', train=True, download=True, transform=transform) + test_dataset = serialize(MNIST, root='data/mnist', train=False, download=True, transform=transform) + + multi_module = _MultiModelSupervisedLearningModule(nn.CrossEntropyLoss, {'acc': pl._AccuracyWithLogits}, n_models=2) + + lightning = pl.Lightning(multi_module, cgo_trainer.Trainer(use_cgo=True, + max_epochs=1, + limit_train_batches=0.25), + train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100)) + + lightning._execute(_model_gpu) + + result = _get_final_result() + assert len(result) == 2 + + for _ in result: + assert _ > 0.8 + + def _build_logical_with_mnist(self, n_models: int): + lp = LogicalPlan() + models = _load_mnist(n_models=n_models) + for m in models: + lp.add_model(m) + return lp, models + + def test_add_model(self): + _reset() + + lp, models = self._build_logical_with_mnist(3) + + for node in lp.logical_graph.hidden_nodes: + old_nodes = [m.root_graph.get_node_by_id(node.id) for m in models] + + self.assertTrue(any([old_nodes[0].__repr__() == Node.__repr__(x) for x in old_nodes])) + + def test_dedup_input_four_devices(self): + _reset() + + lp, models = self._build_logical_with_mnist(3) + + opt = DedupInputOptimizer() + opt.convert(lp) + + advisor = RetiariiAdvisor() + available_devices = [GPUDevice("test", 0), GPUDevice("test", 1), GPUDevice("test", 2), GPUDevice("test", 3)] + cgo = CGOExecutionEngine(devices=available_devices, batch_waiting_time=0) + + phy_models = cgo._assemble(lp) + self.assertTrue(len(phy_models) == 1) + advisor.stopping = True + advisor.default_worker.join() + advisor.assessor_worker.join() + cgo.join() + + def test_dedup_input_two_devices(self): + _reset() + + lp, models = self._build_logical_with_mnist(3) + + opt = DedupInputOptimizer() + opt.convert(lp) + + advisor = RetiariiAdvisor() + available_devices = [GPUDevice("test", 0), GPUDevice("test", 1)] + cgo = CGOExecutionEngine(devices=available_devices, batch_waiting_time=0) + + phy_models = cgo._assemble(lp) + self.assertTrue(len(phy_models) == 2) + advisor.stopping = True + advisor.default_worker.join() + advisor.assessor_worker.join() + cgo.join() + + def test_submit_models(self): + _reset() + nni.retiarii.debug_configs.framework = 'pytorch' + os.makedirs('generated', exist_ok=True) + from nni.runtime import protocol + import nni.runtime.platform.test as tt + protocol._out_file = open('generated/debug_protocol_out_file.py', 'wb') + protocol._in_file = open('generated/debug_protocol_out_file.py', 'rb') + + models = _load_mnist(2) + + advisor = RetiariiAdvisor() + cgo_engine = CGOExecutionEngine(devices=[GPUDevice("test", 0), GPUDevice("test", 1), + GPUDevice("test", 2), GPUDevice("test", 3)], batch_waiting_time=0) + set_execution_engine(cgo_engine) + submit_models(*models) + time.sleep(3) + + if torch.cuda.is_available() and torch.cuda.device_count() >= 2: + cmd, data = protocol.receive() + params = nni.load(data) + + tt.init_params(params) + + trial_thread = threading.Thread(target=CGOExecutionEngine.trial_execute_graph) + trial_thread.start() + last_metric = None + while True: + time.sleep(1) + if tt._last_metric: + metric = tt.get_last_metric() + if metric == last_metric: + continue + if 'value' in metric: + metric['value'] = json.dumps(metric['value']) + advisor.handle_report_metric_data(metric) + last_metric = metric + if not trial_thread.is_alive(): + trial_thread.join() + break + + trial_thread.join() + + advisor.stopping = True + advisor.default_worker.join() + advisor.assessor_worker.join() + cgo_engine.join() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/retiarii/test_convert.py b/test/ut/retiarii/test_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..c79c7696afcdf36bf7769d5ad8e3d681175b20b2 --- /dev/null +++ b/test/ut/retiarii/test_convert.py @@ -0,0 +1,584 @@ +""" +Reference: We use tested models from https://github.com/pytorch/pytorch/blob/master/test/jit/test_models.py. +""" + +import os +import sys +import unittest + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import basic_unit +from nni.retiarii.codegen import model_to_pytorch_script + +from .convert_mixin import ConvertMixin, ConvertWithShapeMixin + +class MnistNet(nn.Module): + def __init__(self): + super(MnistNet, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +# NOTE: serialize module cannot be placed within class or function +@basic_unit +class Linear(nn.Module): + def __init__(self, d_embed, d_proj): + super().__init__() + self.linear = nn.Linear(d_embed, d_proj) + + def forward(self, input): + if len(input.size()) <= 2: + return self.linear(input) + size = input.size()[:2] + out = self.linear(input.view(size[0] * size[1], -1)) + return out.view(size[0], size[1], -1) + +class TestConvert(unittest.TestCase, ConvertMixin): + @staticmethod + def _match_state_dict(current_values, expected_format): + result = {} + for k, v in expected_format.items(): + for idx, cv in enumerate(current_values): + if cv.shape == v.shape: + result[k] = cv + current_values.pop(idx) + break + return result + + def checkExportImport(self, model, input): + model_ir = self._convert_model(model, input) + model_code = model_to_pytorch_script(model_ir) + + exec_vars = {} + exec(model_code + '\n\nconverted_model = _model()', exec_vars) + converted_model = exec_vars['converted_model'] + converted_state_dict = self._match_state_dict(list(model.state_dict().values()), + dict(converted_model.state_dict())) + converted_model.load_state_dict(converted_state_dict) + with torch.no_grad(): + expected_output = model.eval()(*input) + converted_output = converted_model.eval()(*input) + self.assertEqual(len(converted_output), len(expected_output)) + for a, b in zip(converted_output, expected_output): + self.assertLess((a - b).abs().max().item(), 1E-4) + return converted_model + + def setUp(self): + # FIXME + import nni.retiarii.debug_configs + nni.retiarii.debug_configs.framework = 'pytorch' + + def test_dcgan_models(self): + class DCGANGenerator(nn.Module): + def __init__(self, nz, ngf, nc): + super(DCGANGenerator, self).__init__() + self.main = nn.Sequential( + # input is Z, going into a convolution + nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), + nn.BatchNorm2d(ngf * 8), + nn.ReLU(True), + # state size. (ngf*8) x 4 x 4 + nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf * 4), + nn.ReLU(True), + # state size. (ngf*4) x 8 x 8 + nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf * 2), + nn.ReLU(True), + # state size. (ngf*2) x 16 x 16 + nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf), + nn.ReLU(True), + # state size. (ngf) x 32 x 32 + nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), + nn.Tanh() + # state size. (nc) x 64 x 64 + ) + + def forward(self, input): + return self.main(input) + + class DCGANDiscriminator(nn.Module): + def __init__(self, nc, ndf): + super(DCGANDiscriminator, self).__init__() + self.main = nn.Sequential( + # input is (nc) x 64 x 64 + nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf) x 32 x 32 + nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), + nn.BatchNorm2d(ndf * 2), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*2) x 16 x 16 + nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), + nn.BatchNorm2d(ndf * 4), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*4) x 8 x 8 + nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), + nn.BatchNorm2d(ndf * 8), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*8) x 4 x 4 + nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), + nn.Sigmoid() + ) + + def forward(self, input): + return self.main(input).view(-1, 1).squeeze(1) + + bs, nz, ngf, nc, ndf = 5, 6, 9, 3, 10 + input = (torch.rand(bs, nz, 1, 1),) + model = DCGANGenerator(nz, ngf, nc) + self.checkExportImport(model, input) + + def test_neural_style(self): + class TransformerNet(nn.Module): + def __init__(self): + super(TransformerNet, self).__init__() + # Initial convolution layers + self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) + self.in1 = nn.InstanceNorm2d(32, affine=True) + self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) + self.in2 = nn.InstanceNorm2d(64, affine=True) + self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2) + self.in3 = nn.InstanceNorm2d(128, affine=True) + # Residual layers + self.res1 = ResidualBlock(128) + self.res2 = ResidualBlock(128) + self.res3 = ResidualBlock(128) + self.res4 = ResidualBlock(128) + self.res5 = ResidualBlock(128) + # Upsampling Layers + self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) + self.in4 = nn.InstanceNorm2d(64, affine=True) + self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) + self.in5 = nn.InstanceNorm2d(32, affine=True) + self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) + # Non-linearities + self.relu = nn.ReLU() + + def forward(self, X): + y = self.relu(self.in1(self.conv1(X))) + y = self.relu(self.in2(self.conv2(y))) + y = self.relu(self.in3(self.conv3(y))) + y = self.res1(y) + y = self.res2(y) + y = self.res3(y) + y = self.res4(y) + y = self.res5(y) + y = self.relu(self.in4(self.deconv1(y))) + y = self.relu(self.in5(self.deconv2(y))) + y = self.deconv3(y) + return y + + class ConvLayer(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride): + super(ConvLayer, self).__init__() + reflection_padding = kernel_size // 2 + self.reflection_pad = nn.ReflectionPad2d(reflection_padding) + self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) + + def forward(self, x): + out = self.reflection_pad(x) + out = self.conv2d(out) + return out + + class ResidualBlock(nn.Module): + """ResidualBlock + introduced in: https://arxiv.org/abs/1512.03385 + recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html + """ + + def __init__(self, channels): + super(ResidualBlock, self).__init__() + self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) + self.in1 = nn.InstanceNorm2d(channels, affine=True) + self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) + self.in2 = nn.InstanceNorm2d(channels, affine=True) + self.relu = nn.ReLU() + + def forward(self, x): + residual = x + out = self.relu(self.in1(self.conv1(x))) + out = self.in2(self.conv2(out)) + out = out + residual + return out + + class UpsampleConvLayer(nn.Module): + """UpsampleConvLayer + Upsamples the input and then does a convolution. This method gives better results + compared to ConvTranspose2d. + ref: http://distill.pub/2016/deconv-checkerboard/ + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): + super(UpsampleConvLayer, self).__init__() + self.upsample = upsample + if upsample: + self.upsample_layer = nn.Upsample(mode='nearest', scale_factor=upsample) + reflection_padding = kernel_size // 2 + self.reflection_pad = nn.ReflectionPad2d(reflection_padding) + self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) + + def forward(self, x): + x_in = x + if self.upsample: + x_in = self.upsample_layer(x_in) + out = self.reflection_pad(x_in) + out = self.conv2d(out) + return out + + model = TransformerNet() + input = (torch.rand(5, 3, 16, 16),) + self.checkExportImport(model, input) + + def test_mnist(self): + # eval() is present because dropout makes this nondeterministic + self.checkExportImport(MnistNet().eval(), (torch.rand(5, 1, 28, 28),)) + + def test_reinforcement_learning(self): + class Policy(nn.Module): + def __init__(self): + super(Policy, self).__init__() + self.affine1 = nn.Linear(4, 128) + self.affine2 = nn.Linear(128, 2) + + def forward(self, x): + x = F.relu(self.affine1(x)) + action_scores = self.affine2(x) + return F.softmax(action_scores, dim=1) + + self.checkExportImport(Policy(), (torch.rand(1, 4),)) + + def test_snli(self): + + class Encoder(nn.Module): + + def __init__(self, config): + super(Encoder, self).__init__() + #self.config = config + input_size = config["d_proj"] if config["projection"] else config["d_embed"] + dropout = 0 if config["n_layers"] == 1 else config["dp_ratio"] + self.rnn = nn.LSTM(input_size=input_size, hidden_size=config["d_hidden"], + num_layers=config["n_layers"], dropout=dropout, + bidirectional=config["birnn"]) + self.n_cells = config["n_cells"] + self.d_hidden = config["d_hidden"] + self.birnn = config["birnn"] + + def forward(self, inputs): + batch_size = inputs.size()[1] + state_shape = self.n_cells, batch_size, self.d_hidden + h0 = c0 = inputs.new_zeros(state_shape) + outputs, (ht, ct) = self.rnn(inputs, (h0, c0)) + return ht[-1] if not self.birnn else ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1) + + class SNLIClassifier(nn.Module): + + def __init__(self, config): + super(SNLIClassifier, self).__init__() + self.embed = nn.Embedding(config["n_embed"], config["d_embed"]) + self.projection = Linear(config["d_embed"], config["d_proj"]) + self.encoder = Encoder(config) + self.dropout = nn.Dropout(p=config["dp_ratio"]) + self.relu = nn.ReLU() + seq_in_size = 2 * config["d_hidden"] + if config["birnn"]: + seq_in_size *= 2 + lin_config = [seq_in_size] * 2 + self.out = nn.Sequential( + Linear(*lin_config), + self.relu, + self.dropout, + Linear(*lin_config), + self.relu, + self.dropout, + Linear(*lin_config), + self.relu, + self.dropout, + Linear(seq_in_size, config["d_out"])) + self.fix_emb = config["fix_emb"] + self.project = config["projection"] + + def forward(self, premise, hypothesis): + prem_embed = self.embed(premise) + hypo_embed = self.embed(hypothesis) + if self.fix_emb: + prem_embed = prem_embed.detach() + hypo_embed = hypo_embed.detach() + if self.project: + prem_embed = self.relu(self.projection(prem_embed)) + hypo_embed = self.relu(self.projection(hypo_embed)) + premise = self.encoder(prem_embed) + hypothesis = self.encoder(hypo_embed) + scores = self.out(torch.cat([premise, hypothesis], 1)) + return scores + + Config = { + "n_embed": 100, + "d_embed": 100, + "d_proj": 300, + "dp_ratio": 0.0, # For deterministic testing TOD": change by fixing seed in checkTrace?, + "d_hidden": 30, + "birnn": True, + "d_out": 300, + "fix_emb": True, + "projection": True, + "n_layers": 2, + "n_cells": 4 # 2 * n_layers because birnn = True, + } + + premise = torch.LongTensor(48, 64).random_(0, 100) + hypothesis = torch.LongTensor(24, 64).random_(0, 100) + + self.checkExportImport(SNLIClassifier(Config), (premise, hypothesis)) + + def test_super_resolution(self): + class Net(nn.Module): + + def __init__(self, upscale_factor): + super(Net, self).__init__() + + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) + self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) + self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) + self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1)) + self.pixel_shuffle = nn.PixelShuffle(upscale_factor) + + def forward(self, x): + x = self.relu(self.conv1(x)) + x = self.relu(self.conv2(x)) + x = self.relu(self.conv3(x)) + x = self.pixel_shuffle(self.conv4(x)) + return x + + net = Net(upscale_factor=4) + self.checkExportImport(net, (torch.rand(5, 1, 32, 32),)) + + @unittest.skip('Need to support Loop') # FIXME + def test_time_sequence_prediction(self): + class Sequence(nn.Module): #torch.jit.ScriptModule + def __init__(self): + super(Sequence, self).__init__() + self.lstm1 = nn.LSTMCell(1, 51) + self.lstm2 = nn.LSTMCell(51, 51) + self.linear = nn.Linear(51, 1) + + #@torch.jit.script_method + def forward(self, input): + # TODO: add future as input with default val + # see https://github.com/pytorch/pytorch/issues/8724 + outputs = torch.empty((3, 0)) + h_t = torch.zeros((3, 51)) + c_t = torch.zeros((3, 51)) + h_t2 = torch.zeros((3, 51)) + c_t2 = torch.zeros((3, 51)) + + output = torch.zeros([3, 51]) + future = 2 + + # TODO: chunk call should appear as the for loop iterable + # We hard-code it to 4 for now. + a, b, c, d = input.chunk(input.size(1), dim=1) + for input_t in (a, b, c, d): + h_t, c_t = self.lstm1(input_t, (h_t, c_t)) + h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2)) + output = self.linear(h_t2) + outputs = torch.cat((outputs, output), 1) + for _ in range(future): # if we should predict the future + h_t, c_t = self.lstm1(output, (h_t, c_t)) + h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2)) + output = self.linear(h_t2) + outputs = torch.cat((outputs, output), 1) + return outputs + + class Traced(nn.Module): + def __init__(self): + super(Traced, self).__init__() + self.seq = Sequence() + + def forward(self, input): + return self.seq.forward(input) + + self.checkExportImport(Traced(), (torch.rand(3, 4),)) + + @unittest.skip('incorrectly assigned weights') # FIXME + def test_vae(self): + class VAE(nn.Module): + def __init__(self): + super(VAE, self).__init__() + + self.fc1 = nn.Linear(784, 400) + self.fc21 = nn.Linear(400, 20) + self.fc22 = nn.Linear(400, 20) + self.fc3 = nn.Linear(20, 400) + self.fc4 = nn.Linear(400, 784) + + def encode(self, x): + h1 = F.relu(self.fc1(x)) + return self.fc21(h1), self.fc22(h1) + + def reparameterize(self, mu, logvar): + if self.training: + std = torch.exp(0.5 * logvar) + eps = torch.randn_like(std) + return eps.mul(std).add_(mu) + else: + return mu + + def decode(self, z): + h3 = F.relu(self.fc3(z)) + return torch.sigmoid(self.fc4(h3)) + + def forward(self, x): + mu, logvar = self.encode(x.view(-1, 784)) + z = self.reparameterize(mu, logvar) + return self.decode(z), mu, logvar + + self.checkExportImport(VAE().eval(), (torch.rand(128, 1, 28, 28),)) + + def test_torchvision_resnet18(self): + from .inject_nn import inject_pytorch_nn, remove_inject_pytorch_nn + try: + inject_pytorch_nn() + self.checkExportImport(torchvision.models.resnet18().eval(), (torch.ones(1, 3, 224, 224),)) + finally: + remove_inject_pytorch_nn() + + def test_resnet(self): + def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + class BasicBlock(nn.Module): #torch.jit.ScriptModule + expansion = 1 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + # NOTE: jit cannot be annotated, otherwise, module id is not matched for recorded arguments + #@torch.jit.script_method + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + # NOTE: cannot inherit torch.jit.ScriptModule, otherwise, there would be error: 'RecursiveScriptModule' object has no attribute 'graph' + class ResNet(nn.Module): #torch.jit.ScriptModule + __constants__ = ['layer1', 'layer2', 'layer3', 'layer4'] + + def __init__(self, block, layers, num_classes=1000): + super(ResNet, self).__init__() + self.inplanes = 64 + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + torch.nn.init.constant_(m.weight, 1) + torch.nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + # NOTE: jit cannot be annotated, otherwise, module id is not matched for recorded arguments + #@torch.jit.script_method + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + resnet18 = ResNet(BasicBlock, [2, 2, 2, 2]) + + self.checkExportImport(resnet18, (torch.randn(1, 3, 224, 224),)) + + def test_alexnet(self): + from .inject_nn import inject_pytorch_nn, remove_inject_pytorch_nn + try: + inject_pytorch_nn() + x = torch.ones(1, 3, 224, 224) + model = torchvision.models.AlexNet() + self.checkExportImport(model, (x,)) + finally: + remove_inject_pytorch_nn() + +class TestConvertWithShape(TestConvert, ConvertWithShapeMixin): + pass diff --git a/test/ut/retiarii/test_convert_basic.py b/test/ut/retiarii/test_convert_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..145f62f636e09a1f27070c738a32ce3e25dfc7e7 --- /dev/null +++ b/test/ut/retiarii/test_convert_basic.py @@ -0,0 +1,286 @@ +import os +import sys +import unittest + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import basic_unit + +from .convert_mixin import ConvertMixin, ConvertWithShapeMixin +from nni.retiarii.codegen import model_to_pytorch_script + +# following pytorch v1.7.1 + +class TestConvert(unittest.TestCase, ConvertMixin): + @staticmethod + def _match_state_dict(current_values, expected_format): + result = {} + for k, v in expected_format.items(): + for idx, cv in enumerate(current_values): + if cv.shape == v.shape: + result[k] = cv + current_values.pop(idx) + break + return result + + def checkExportImport(self, model, input, check_value=True): + model_ir = self._convert_model(model, input) + model_code = model_to_pytorch_script(model_ir) + print(model_code) + + exec_vars = {} + exec(model_code + '\n\nconverted_model = _model()', exec_vars) + converted_model = exec_vars['converted_model'] + converted_state_dict = self._match_state_dict(list(model.state_dict().values()), + dict(converted_model.state_dict())) + converted_model.load_state_dict(converted_state_dict) + with torch.no_grad(): + expected_output = model.eval()(*input) + converted_output = converted_model.eval()(*input) + if check_value: + self.assertEqual(len(converted_output), len(expected_output)) + for a, b in zip(converted_output, expected_output): + if hasattr(a, 'dtype') and a.dtype == torch.bool: + self.assertEqual((a ^ b), False) + elif isinstance((a - b), int): + self.assertEqual((a - b), 0) + else: + self.assertLess((a - b).abs().max().item(), 1E-4) + return converted_model + + # skip torch.Tensor.new_tensor as it is not supported by jit + + def test_basic_new_full(self): + class SimpleOp(nn.Module): + def forward(self, x): + # requires_grad is not supported by jit + # aten::new_full(Tensor self, int[] size, Scalar fill_value, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor): + # Keyword argument requires_grad unknown. + out = x.new_full((3, 4), 3.141592, dtype=torch.float32, device=torch.device('cpu')) + return out + self.checkExportImport(SimpleOp(), (torch.ones((2,), dtype=torch.float64), )) + + def test_basic_new_empty(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.new_empty((2, 3), dtype=torch.int8, device=torch.device('cpu')) + return out + self.checkExportImport(SimpleOp(), (torch.ones(()), ), check_value=False) + + # skip torch.Tensor.new_ones as it is not supported by jit + + # requires_grad=False is not supported by jit + def test_basic_new_zeros(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.new_zeros((2, 3)) + return out + self.checkExportImport(SimpleOp(), (torch.tensor((), dtype=torch.int32), )) + + def test_basic_is_cuda(self): + class SimpleOp(nn.Module): + def forward(self, x): + return torch.tensor([x.is_cuda], dtype=torch.bool, device=torch.device('cpu')) + self.checkExportImport(SimpleOp(), (torch.tensor((), dtype=torch.int32), )) + + # is_quantized + # is_meta + # device + # grad + # ndim + # T + # real + # imag + + def test_basic_abs(self): + class SimpleOp(nn.Module): + def forward(self, x): + out1 = x.abs() + out11 = x.absolute() + out2 = torch.abs(x) + #out3 = x.abs_() + #out33 = x.absolute_() + return out1, out11, out2#, out3, out33 + self.checkExportImport(SimpleOp(), (torch.tensor([-1, -2, 3]), )) + + # TODO: topological sort should be improved + #def forward(self, x__1): + # __Acos2 = x__1.acos() + # __Acos_3 = x__1.acos_() + # __Acos1 = x__1.acos() + # __TupleConstruct4 = (__Acos1,__Acos2,__Acos_3) + # return __TupleConstruct4 + def test_basic_acos_asin_atan(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out1 = x.acos() + out2 = torch.acos(x) + # TODO: add back this line + #out = x.acos_() + out3 = x.asin() + out4 = torch.asin(x) + out5 = x.atan() + out6 = torch.atan(x) + out7 = x.atan2(y) + out8 = torch.atan2(x, y) + return out1, out2, out3, out4, out5, out6, out7, out8#, out + self.checkExportImport(SimpleOp(), (torch.tensor([-1.0, -0.5, 0.2]), torch.tensor([1.0, 0.6, -0.3]), )) + + # arccos is not supported by jit + + def test_basic_add(self): + class SimpleOp(nn.Module): + def forward(self, x): + t = torch.tensor([-1.0, -0.5, 0.2]) + out1 = x.add(t) + out2 = x.add(t, alpha=2) + #out3 = x.add_(t) + return out1, out2#, out3 + self.checkExportImport(SimpleOp(), (torch.tensor([-1.0, -0.5, 0.2]), )) + + def test_basic_addbmm(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z, m): + out1 = x.addbmm(y, z, beta=2, alpha=3) + out2 = torch.addbmm(x, y, z, beta=2, alpha=3) + #out3 = x.addbmm_(y, z, beta=2, alpha=3) + out3 = m.baddbmm(y, z, beta=2, alpha=3) + out4 = torch.baddbmm(m, y, z, beta=2, alpha=3) + out5 = torch.bmm(y, z) # deterministic is not supported by jit + return out1, out2, out3, out4, out5 + self.checkExportImport(SimpleOp(), (torch.randn(3, 5), torch.randn(10, 3, 4), torch.randn(10, 4, 5), torch.randn(10, 3, 5), )) + + def test_basic_addcdiv(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z): + out1 = x.addcdiv(y, z, value=2) + out2 = torch.addcdiv(x, y, z, value=2) + # addcdiv_ + return out1, out2 + self.checkExportImport(SimpleOp(), (torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), )) + + def test_basic_addcmul(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z): + out1 = x.addcmul(y, z, value=0.1) + out2 = torch.addcmul(x, y, z, value=0.1) + # addcmul_ + return out1, out2 + self.checkExportImport(SimpleOp(), (torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), )) + + def test_basic_addmm(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z): + out1 = x.addmm(y, z, beta=0.1, alpha=0.2) + out2 = torch.addmm(x, y, z, beta=0.1, alpha=0.2) + # addmm_ + return out1, out2 + self.checkExportImport(SimpleOp(), (torch.randn(2, 3), torch.randn(2, 3), torch.randn(3, 3), )) + + def test_basic_addmv(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z): + out1 = x.addmv(y, z, beta=0.1, alpha=0.2) + out2 = torch.addmv(x, y, z, beta=0.1, alpha=0.2) + return out1, out2 + self.checkExportImport(SimpleOp(), (torch.randn(2), torch.randn(2, 3), torch.randn(3), )) + + def test_basic_addr(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z): + out1 = x.addr(y, z, beta=2, alpha=3) + out2 = torch.addr(x, y, z, beta=2, alpha=3) + return out1, out2 + self.checkExportImport(SimpleOp(), (torch.zeros(3, 2), torch.arange(1., 4.), torch.arange(1., 3.), )) + + def test_basic_allclose(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out1 = x.allclose(y, rtol=1e-05, atol=1e-08, equal_nan=False) + out2 = torch.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False) + return out1, out2 + self.checkExportImport(SimpleOp(), (torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]), )) + + def test_basic_angle(self): + class SimpleOp(nn.Module): + def forward(self, x): + out1 = x.angle() + out2 = torch.angle(x) + return out1, out2 + self.checkExportImport(SimpleOp(), (torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]), )) + + # skip apply_(callable) for now + + def test_basic_argmax_argmin(self): + class SimpleOp(nn.Module): + def forward(self, x): + out1 = x.argmax() + out2 = torch.argmax(x) + out3 = x.argmax(dim=1) + out4 = torch.argmax(x, dim=1) + out5 = x.argmax(dim=1, keepdim=True) + o1 = x.argmin() + o2 = torch.argmin(x) + o3 = x.argmin(dim=1) + o4 = x.argmin(dim=1, keepdim=True) + return out1, out2, out3, out4, out5, o1, o2, o3, o4 + self.checkExportImport(SimpleOp(), (torch.randn(4, 4), )) + + def test_basic_argsort(self): + class SimpleOp(nn.Module): + def forward(self, x): + out1 = x.argsort() + out2 = x.argsort(dim=1) + out3 = x.argsort(dim=1, descending=True) + out4 = torch.argsort(x, dim=1, descending=True) + return out1, out2, out3, out4 + self.checkExportImport(SimpleOp(), (torch.randn(4, 4), )) + + # skip backward(gradient=None, retain_graph=None, create_graph=False) + + def test_basic_bernoulli(self): + class SimpleOp(nn.Module): + def forward(self, x): + # generator=torch.Generator() is not supported by jit + out = x.bernoulli() + return out + self.checkExportImport(SimpleOp(), (torch.ones(3, 3), )) + + # bfloat16/bool/byte/char is not supported by jit + + def test_basic_bincount(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out1 = x.bincount() + out2 = torch.bincount(x) + out3 = x.bincount(weights=y) + out4 = x.bincount(weights=y, minlength=2) + return out1, out2, out3, out4 + self.checkExportImport(SimpleOp(), (torch.randint(0, 8, (5,), dtype=torch.int64), torch.linspace(0, 1, steps=5), )) + + def test_basic_bitwise(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out1 = x.bitwise_not() + out2 = x.bitwise_and(y) + out3 = x.bitwise_or(y) + out4 = x.bitwise_xor(y) + return out1, out2, out3, out4 + self.checkExportImport(SimpleOp(), (torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8), )) + + # cauchy_ is not supported yet + + def test_ceil(self): + class SimpleOp(nn.Module): + def forward(self, x): + out1 = x.ceil() + return out1 + self.checkExportImport(SimpleOp(), (torch.randn(4), )) + + +class TestConvertWithShape(TestConvert, ConvertWithShapeMixin): + pass diff --git a/test/ut/retiarii/test_convert_models.py b/test/ut/retiarii/test_convert_models.py new file mode 100644 index 0000000000000000000000000000000000000000..26851d1e0b8b366dff9ca3e4ddfde4d6f2c1f119 --- /dev/null +++ b/test/ut/retiarii/test_convert_models.py @@ -0,0 +1,139 @@ +import os +import sys +import unittest +from typing import (Dict) + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import serialize +from nni.retiarii.codegen import model_to_pytorch_script + +from .convert_mixin import ConvertMixin, ConvertWithShapeMixin + + +class TestModels(unittest.TestCase, ConvertMixin): + @staticmethod + def _match_state_dict(current_values, expected_format): + result = {} + for k, v in expected_format.items(): + for idx, cv in enumerate(current_values): + if cv.shape == v.shape: + result[k] = cv + current_values.pop(idx) + break + return result + + def run_test(self, model, input, check_value=True): + model_ir = self._convert_model(model, input) + model_code = model_to_pytorch_script(model_ir) + print(model_code) + + exec_vars = {} + exec(model_code + '\n\nconverted_model = _model()', exec_vars) + converted_model = exec_vars['converted_model'] + converted_state_dict = self._match_state_dict(list(model.state_dict().values()), + dict(converted_model.state_dict())) + converted_model.load_state_dict(converted_state_dict) + with torch.no_grad(): + expected_output = model.eval()(*input) + converted_output = converted_model.eval()(*input) + if check_value: + try: + self.assertEqual(len(converted_output), len(expected_output)) + for a, b in zip(converted_output, expected_output): + torch.eq(a, b) + except: + self.assertEqual(converted_output, expected_output) + return converted_model + + def test_nested_modulelist(self): + class Net(nn.Module): + def __init__(self, num_nodes, num_ops_per_node): + super().__init__() + self.ops = nn.ModuleList() + self.num_nodes = num_nodes + self.num_ops_per_node = num_ops_per_node + for _ in range(num_nodes): + self.ops.append(nn.ModuleList([nn.Linear(16, 16) for __ in range(num_ops_per_node)])) + + def forward(self, x): + state = x + for ops in self.ops: + for op in ops: + state = op(state) + return state + + model = Net(4, 2) + x = torch.rand((16, 16), dtype=torch.float) + self.run_test(model, (x, )) + + def test_append_input_tensor(self): + from typing import List + + class Net(nn.Module): + def __init__(self, num_nodes): + super().__init__() + self.ops = nn.ModuleList() + self.num_nodes = num_nodes + for _ in range(num_nodes): + self.ops.append(nn.Linear(16, 16)) + + def forward(self, x: List[torch.Tensor]): + state = x + for ops in self.ops: + state.append(ops(state[-1])) + return state[-1] + + model = Net(4) + x = torch.rand((1, 16), dtype=torch.float) + self.run_test(model, ([x], )) + + def test_channels_shuffle(self): + class Net(nn.Module): + def forward(self, x): + bs, num_channels, height, width = x.size() + x = x.reshape(bs * num_channels // 2, 2, height * width) + x = x.permute(1, 0, 2) + x = x.reshape(2, -1, num_channels // 2, height, width) + return x[0], x[1] + + model = Net() + x = torch.rand((1, 64, 224, 224), dtype=torch.float) + self.run_test(model, (x, )) + + def test_identity_node(self): + class Net(nn.Module): + def forward(self, x): + return x + + model = Net() + x = torch.rand((1, 64, 224, 224), dtype=torch.float) + self.run_test(model, (x, )) + + def test_nn_sequential_inherit(self): + class ConvBNReLU(nn.Sequential): + def __init__(self): + super().__init__( + nn.Conv2d(3, 3, 1, 1, bias=False), + nn.BatchNorm2d(3), + nn.ReLU(inplace=False) + ) + + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv_bn_relu = ConvBNReLU() + + def forward(self, x): + return self.conv_bn_relu(x) + + model = Net() + x = torch.rand((1, 3, 224, 224), dtype=torch.float) + self.run_test(model, (x, )) + +class TestModelsWithShape(TestModels, ConvertWithShapeMixin): + pass diff --git a/test/ut/retiarii/test_convert_operators.py b/test/ut/retiarii/test_convert_operators.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc24fff64748b6aea2f859a023b2577c4ff5359 --- /dev/null +++ b/test/ut/retiarii/test_convert_operators.py @@ -0,0 +1,1399 @@ + +''' +The tests in this file is copied and transformed from +`https://github.com/pytorch/pytorch/blob/master/test/onnx/test_operators.py` +''' + +import os +import sys +import unittest +from typing import (Dict) + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +import nni.retiarii.nn.pytorch as nn +from nni.retiarii.codegen import model_to_pytorch_script + +from .convert_mixin import ConvertMixin, ConvertWithShapeMixin + +# following pytorch v1.7.1 + + +class TestOperators(unittest.TestCase, ConvertMixin): + @staticmethod + def _match_state_dict(current_values, expected_format): + result = {} + for k, v in expected_format.items(): + for idx, cv in enumerate(current_values): + if cv.shape == v.shape: + result[k] = cv + current_values.pop(idx) + break + return result + + def checkExportImport(self, model, input, check_value=True): + model_ir = self._convert_model(model, input) + model_code = model_to_pytorch_script(model_ir) + #print(model_code) + + exec_vars = {} + exec(model_code + '\n\nconverted_model = _model()', exec_vars) + converted_model = exec_vars['converted_model'] + converted_state_dict = self._match_state_dict(list(model.state_dict().values()), + dict(converted_model.state_dict())) + converted_model.load_state_dict(converted_state_dict) + with torch.no_grad(): + expected_output = model.eval()(*input) + converted_output = converted_model.eval()(*input) + if check_value: + try: + self.assertEqual(len(converted_output), len(expected_output)) + for a, b in zip(converted_output, expected_output): + torch.eq(a, b) + except: + self.assertEqual(converted_output, expected_output) + return converted_model + + def test_basic_basic(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = -torch.sigmoid(torch.tanh(x * (x + y))) + return out + x = torch.tensor([0.4], requires_grad=True) + y = torch.tensor([0.7], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, y, )) + + def test_basic_view(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.view(1, 1) + return out + x = torch.tensor([0.0], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_index(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x[0] + return out + x = torch.tensor([[0.0]], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_type_as(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.type_as(x) + return out + x = torch.tensor([0.0], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_addconstant(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x + 1 + return out + x = torch.randn(2, 3, requires_grad=True).double() + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_add_broadcast(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x + y + return out + x = torch.randn(2, 3, requires_grad=True).double() + y = torch.randn(3, requires_grad=True).double() + self.checkExportImport(SimpleOp(), (x, y, )) + + def test_basic_add_left_broadcast(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x + y + return out + x = torch.randn(3, requires_grad=True).double() + y = torch.randn(2, 3, requires_grad=True).double() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_add_size1_broadcast(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x + y + return out + x = torch.randn(2, 3, requires_grad=True).double() + y = torch.randn(2, 1, requires_grad=True).double() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_add_size1_right_broadcast(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x + y + return out + x = torch.randn(2, 3, requires_grad=True).double() + y = torch.randn(3, requires_grad=True).double() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_add_size1_singleton_broadcast(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x + y + return out + x = torch.randn(2, 3, requires_grad=True).double() + y = torch.randn(1, 3, requires_grad=True).double() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_rsub(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = 1 - x + return out + x = torch.randn(2, 3, requires_grad=True).double() + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_transpose(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.transpose(0, 1).transpose(1, 0) + return out + x = torch.tensor([[0.0, 1.0], [2.0, 3.0]], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_chunk(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.chunk(2) + return out + x = torch.tensor([0.0, 1.0, 2.0], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_split(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.split(x, 2, 1) + return out + x = torch.tensor([[0.0, 1.0, 1.0, 0.0, 2.0, 2.0], [2.0, 3.0, 3.0, 2.0, 1.0, 1.0]]) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_split_with_sizes(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.split(x, [2, 1, 3], 1) + return out + x = torch.tensor([[0.0, 1.0, 1.0, 0.0, 2.0, 2.0], [2.0, 3.0, 3.0, 2.0, 1.0, 1.0]]) + self.checkExportImport(SimpleOp(), (x, )) + + @unittest.skip('cannot be parsed by jit') + def test_basic_concat2(self): + class SimpleOp(nn.Module): + def forward(self, inputs): + out = torch.cat(inputs, 1) + return out + x = torch.randn(2, 3) + y = torch.randn(2, 3) + self.checkExportImport(SimpleOp(), ((x, y), )) + + + def test_basic_addmm(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z): + out = torch.addmm(torch.addmm(z, x, y), x, y) + return out + m1 = torch.randn(2, 3, requires_grad=True) + m2 = torch.randn(3, 4, requires_grad=True) + m3 = torch.randn(4, requires_grad=True) + self.checkExportImport(SimpleOp(), (m1, m2, m3, )) + + + def test_basic_permute2(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.permute(0, 1, 4, 2, 5, 3) + return out + x = torch.tensor([[[[[[0.0]]]]]], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_params(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = -torch.sigmoid(torch.tanh(x * (x + y))) + return out + x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True) + y = torch.nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)) + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_params_onnx_irv4(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = -torch.sigmoid(torch.tanh(x * (x + y))) + return out + x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True) + y = torch.nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)) + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_clip(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.clamp(x, min=-0.5, max=0.5) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_clip_min(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.clamp(min=-0.1) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_clip_max(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.clamp(max=0.1) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + @unittest.skip('cannot be parsed by jit') + def test_basic_hardtanh(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.nn.Hardtanh(-0.5, 0.5)(x) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_full(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.full(x.shape, 2., dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_full_like(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.full_like(x, 2, memory_format=torch.preserve_format) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_max(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = torch.max(x, y) + return out + x = torch.randn(3, 4, requires_grad=True) + y = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_min(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = torch.min(x, y) + return out + x = torch.randn(3, 4, requires_grad=True) + y = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_mean(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.mean(x) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reduced_mean(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.mean(x, dim=2) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reduced_mean_keepdim(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.mean(x, dim=(2, 3), keepdim=True) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_sum(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.sum(x) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reduced_sum(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.sum(x, dim=(1, 2)) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reduced_sum_keepdim(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.sum(x, dim=2, keepdim=True) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_prod(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.prod(x) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reduced_prod(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.prod(x, dim=2) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reduced_prod_keepdim(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.prod(x, dim=2, keepdim=True) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_sqrt(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.sqrt(x) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_rsqrt(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.rsqrt(x) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_equal(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x == y + return out + x = torch.randn(1, 2, 3, 1, requires_grad=False).int() + y = torch.randn(1, 4, requires_grad=False).int() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_lt(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x < y + return out + x = torch.randn(1, 2, 3, 1, requires_grad=False).int() + y = torch.randn(1, 4, requires_grad=False).int() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_gt(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x > y + return out + x = torch.randn(1, 2, 3, 1, requires_grad=False).int() + y = torch.randn(1, 4, requires_grad=False).int() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_le(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x <= y + return out + x = torch.randn(3, 4, requires_grad=False).int() + y = torch.randn(3, 4, requires_grad=False).int() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_ge(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x >= y + return out + x = torch.randn(3, 4, requires_grad=False).int() + y = torch.randn(3, 4, requires_grad=False).int() + self.checkExportImport(SimpleOp(), (x, y, )) + + def test_basic_exp(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.exp() + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_sin(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.sin() + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_cos(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.cos() + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_tan(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.tan() + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_asin(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.asin() + return out + x = torch.rand(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_acos(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.acos() + return out + x = torch.rand(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_slice(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x[:, 1:2] + return out + x = torch.rand(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_slice_dynamic(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x[x.size(0):, x.size(1) - 3] + return out + x = torch.rand(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_sign(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.sign() + return out + x = torch.rand(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_narrow(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.narrow(x, 0, 0, 2) + return out + x = torch.randn(3, 3, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_atan(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.atan() + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_view_flatten(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.view(x.size()[0], x.numel() // x.size()[0]) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_flatten(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.flatten(x) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_flatten2D(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.flatten(x, 1) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_isnan(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.isnan(x) + return out + x = torch.tensor([1, float('nan'), 2]) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_argmax(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.argmax(x, dim=1) + return out + x = torch.randn(4, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_pow(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x.pow(y) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + y = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_repeat(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.repeat(1, 2, 3, 4) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_repeat_dim_overflow(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.repeat(1, 2, 3, 4) + return out + x = torch.randn(1, 2, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_norm_p1(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.norm(p=1, dim=2) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_norm_p2(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.norm(p=2, dim=2) + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_upsample_nearest_size(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.nn.functional.interpolate(x, size=16, mode='nearest') + return out + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_unsqueeze(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.unsqueeze(len(x.shape)) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_implicit_expand(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x + 1 + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reduce_sum_negative_indices(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.sum(-1) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_randn(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.randn(1, 2, 3, 4) + x + return out + x = torch.randn(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_rand(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.rand(1, 2, 3, 4) + x + return out + x = torch.rand(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_empty_like(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.empty_like(x) + return out + x = torch.randn(5, 8, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_empty_like_opset7(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.empty_like(x) + return out + x = torch.randn(5, 8, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_zeros_like(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.zeros_like(x) + return out + x = torch.randn(5, 8, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_ones_like(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.ones_like(x) + return out + x = torch.randn(6, 10, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_expand(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.expand(4, 6, 2) + return out + x = torch.randn(6, 1, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_ne(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = torch.ne(x, y) + return out + x = torch.randn(1, 2, 3, 1, requires_grad=False).int() + y = torch.randn(1, 4, requires_grad=False).int() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_reducemax(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.max(x) + return out + x = torch.randn(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_reducemin(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.min(x) + return out + x = torch.randn(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_erf(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.erf() + return out + x = torch.randn(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_dropout(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.max(torch.nn.functional.dropout(x, training=False)) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_dropout_default(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.max(torch.nn.functional.dropout(x,)) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, ), check_value=False) + + def test_basic_dropout_training(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.max(torch.nn.functional.dropout(x)) + return out + x = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, ), check_value=False) + + def test_basic_nonzero(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.nonzero(x) + return out + x = torch.tensor([[[2., 2.], [1., 0.]], [[0., 0.], [1., 1.]]], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_gather(self): + class SimpleOp(nn.Module): + def forward(self, data, index): + out = data.gather(1, index) + return out + data = torch.randn(3, 4, 3, requires_grad=True) + index = torch.tensor([2, 0]).view(1, 2, 1).expand(3, 2, 3) + self.checkExportImport(SimpleOp(), (data, index, )) + + + def test_basic_gather_opset11(self): + class SimpleOp(nn.Module): + def forward(self, data, index): + out = data.gather(1, index) + return out + data = torch.randn(3, 4, 3, requires_grad=True) + index = torch.tensor([2, 0]).view(1, 2, 1).expand(3, 2, 3) + self.checkExportImport(SimpleOp(), (data, index, )) + + + def test_basic_scatter_add(self): + class SimpleOp(nn.Module): + def forward(self, data, indices, values): + out = data.scatter_add(1, indices, values) + return out + data = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) + indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64) + values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]) + self.checkExportImport(SimpleOp(), (data, indices, values, )) + + + def test_basic_scatter_add_opset11(self): + class SimpleOp(nn.Module): + def forward(self, data, indices, values): + out = data.scatter_add(1, indices, values) + return out + data = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) + indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64) + values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]) + self.checkExportImport(SimpleOp(), (data, indices, values, )) + + + def test_basic_master_opset(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = x + y + return out + x = torch.randn(2, 3).float() + y = torch.randn(2, 3).float() + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_std(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.std(x, dim=(0, 1), unbiased=True, keepdim=True) + return out + x = torch.randn(2, 3, 4).float() + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_cumsum(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.cumsum(x, dim=1) + return out + x = torch.randn(2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_pixel_shuffle(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.pixel_shuffle(x, upscale_factor=2) + return out + x = torch.randn(2, 8, 3, 4).float() + self.checkExportImport(SimpleOp(), (x, )) + + @unittest.skip('skip as torch.norm is called with prim::CallFunction, also torch.norm is deprecated') + def test_basic_frobenius_norm(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.norm(x, p="fro", dim=(0, 1), keepdim=True) + return out + x = torch.randn(2, 3, 4).float() + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_unfold(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = x.unfold(dimension=2, size=2, step=2) + return out + x = torch.randn(2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_remainder(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = torch.remainder(x, y) + return out + x = torch.randn(2, 3, 4) + y = torch.randn(2, 1, 4) + self.checkExportImport(SimpleOp(), (x, y, )) + + def test_basic_fmod(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = torch.fmod(x, y) + return out + x = torch.randn(2, 3, 4) + y = torch.randn(2, 1, 4) + self.checkExportImport(SimpleOp(), (x, y, )) + + + def test_basic_gelu(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.nn.functional.gelu(x) + return out + x = torch.randn(2, 3, 4, 5, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + @unittest.skip('skip as it is called with prim::CallFunction, and unknown func definition') + def test_basic_unique(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.unique(x, dim=0, sorted=True, return_inverse=False, return_counts=True) + return out + x = torch.randint(3, (2, 3, 4, 5)).float() + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_meshgrid(self): + class SimpleOp(nn.Module): + def forward(self, x, y, z): + out = torch.meshgrid(x, y, z) + return out + x = torch.ones(3, requires_grad=True) + y = torch.zeros(4, requires_grad=True) + z = torch.ones(5, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, y, z, )) + + + def test_basic_topk(self): + class SimpleOp(nn.Module): + def forward(self, x, k): + out = torch.topk(x, k) + return out + x = torch.arange(1., 6., requires_grad=True) + k = torch.tensor(3) + self.checkExportImport(SimpleOp(), (x, k, )) + + + def test_basic_topk_smallest_unsorted(self): + class SimpleOp(nn.Module): + def forward(self, x, k): + out = torch.topk(x, k, largest=False, sorted=False) + return out + x = torch.arange(1., 6., requires_grad=True) + k = torch.tensor(3) + self.checkExportImport(SimpleOp(), (x, k, )) + + + def test_basic_baddbmm(self): + class SimpleOp(nn.Module): + def forward(self, x, b1, b2): + out = torch.baddbmm(x, b1, b2) + return out + x = torch.randn(10, 3, 5) + b1 = torch.randn(10, 3, 4) + b2 = torch.randn(10, 4, 5) + self.checkExportImport(SimpleOp(), (x, b1, b2, )) + + + def test_basic_round(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.round(x) + return out + x = torch.tensor([0.9920, -1.0362, -1.5000, 2.5000], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_dim(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.scalar_tensor(x.dim()) + return out + x = torch.ones((2, 2), requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_det(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.det(x) + return out + x = torch.randn(2, 3, 5, 5, device=torch.device('cpu')) + self.checkExportImport(SimpleOp(), (x, )) + + # the followings are more complex tests + + def test_mm(self): + class SimpleOp(nn.Module): + def forward(self, x, y): + out = torch.mm(x, y) + return out + m1 = torch.randn(2, 3, requires_grad=True) + m2 = torch.randn(3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (m1, m2)) + + def test_basic_pad(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.ReflectionPad2d((2, 3, 0, 1)) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.tensor([[[[0.0, 1.0, 1.0, 1.0], [2.0, 3.0, 7.0, 7.0]]]], requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_batchnorm(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.BatchNorm2d(2) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.ones(2, 2, 2, 2, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_batchnorm_1d(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.BatchNorm1d(2) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.ones(2, 2, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_conv(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.Conv2d(16, 13, 3, bias=False) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.ones(20, 16, 50, 40, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_conv_onnx_irv4_opset8(self): + # This test point checks that for opset 8 (or lower), even if + # keep_initializers_as_inputs is set to False, it is ignored, + # and initializers are listed as ONNX graph input, in accordance + # with ONNX IR v3 semantics (which apply to opset version <= 8). + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.Conv2d(2, 4, 3, bias=False) + self.m.weight.data.fill_(1.0) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.ones(1, 2, 5, 7, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_convtranspose(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.ConvTranspose2d(3, 3, 3, stride=3, bias=False, + padding=1, output_padding=2) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.ones(2, 3, 4, 5, requires_grad=True) + self.checkExportImport(SimpleOp(), (x,)) + + def test_basic_maxpool(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool1d(3, stride=2) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(20, 16, 50) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_maxpool_dilations(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool1d(2, stride=1, dilation=2) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(20, 16, 50) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_avg_pool2d(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.AvgPool2d(3, stride=2) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(20, 16, 50, 32) + self.checkExportImport(SimpleOp(), (x, )) + + @unittest.skip('jit error: "Return value was annotated as having type Tensor but is actually of type Tuple[Tensor, Tensor]"') + def test_basic_maxpool_indices(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool1d(3, stride=2, return_indices=True) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(20, 16, 50) + self.checkExportImport(SimpleOp(), (x, )) + + @unittest.skip("jit error: Tried to access nonexistent attribute or method 'at' of type '__torch__.test_convert_operators.MyFun'") + def test_at_op(self): + from torch.autograd import Function + x = torch.randn(3, 4) + class MyFun(Function): + @staticmethod + def symbolic(g, x): + return g.at("add", x, x) + @staticmethod + def forward(ctx, x): + return x + x + class MyModule(nn.Module): + def forward(self, x): + return MyFun.apply(x) + self.checkExportImport(MyModule(), x) + + def test_basic_logsoftmax(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.LogSoftmax(dim=3) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_elu(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.ELU() + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_selu(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.SELU() + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_upsample_nearest_scale(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.nn.functional.interpolate(x, scale_factor=2., + mode='nearest', recompute_scale_factor=False) + return out + + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_upsample_nearest_scale_default_scale_factor(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.nn.functional.interpolate(x, scale_factor=2., + mode='nearest') + return out + + x = torch.randn(1, 2, 3, 4, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_batchnorm_noaffine(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.BatchNorm2d(128, affine=False, momentum=0.3) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(128, 128, 1, 1, requires_grad=True) + self.checkExportImport(SimpleOp(), (x, )) + + def test_embedding_bags(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.EmbeddingBag(10, 8) + + def forward(self, x, y): + out = self.m(x, y) + return out + + input = torch.tensor([1, 2, 3, 4]).long() + offset = torch.tensor([0]).long() + self.checkExportImport(SimpleOp(), (input, offset, )) + + def test_basic_rrelu(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.RReLU() + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_prelu(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.PReLU(2) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_log_sigmoid(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.LogSigmoid() + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(1, 2, 3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + + def test_basic_linear(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.Linear(4, 5, bias=True) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(3, 4) + self.checkExportImport(SimpleOp(), (x, )) + + def test_retain_param_name_disabled(self): + class MyModule(nn.Module): + def __init__(self): + super(MyModule, self).__init__() + self.fc1 = nn.Linear(4, 5, bias=False) + self.fc1.weight.data.fill_(2.) + self.fc2 = nn.Linear(5, 6, bias=False) + self.fc2.weight.data.fill_(3.) + def forward(self, x): + return self.fc2(self.fc1(x)) + + x = torch.randn(3, 4).float() + self.checkExportImport(MyModule(), (x, )) + + @unittest.skip('Segmentation fault') + def test_dict(self): + class MyModel(nn.Module): + def forward(self, x_in: Dict): + x_out = {} + x_out["test_key_out"] = torch.add(x_in[list(x_in.keys())[0]], list(x_in.keys())[0]) + return x_out + + x = {torch.tensor(1.): torch.randn(1, 2, 3)} + self.checkExportImport(MyModel(), (x, )) + + def test_arange_dynamic(self): + class TestModel(nn.Module): + def forward(self, input): + out = torch.arange(input.shape[0], input.shape[0] + 5, 0.5) + return out + + input = torch.randn(5, 3, 2) + self.checkExportImport(TestModel(), (input, )) + + def test_bitshift(self): + class BitshiftModel(nn.Module): + def forward(self, input, input2): + return input >> 1, input2 >> 2 + + input = torch.arange(24, dtype=torch.float32).reshape(3, 4, 2) + input2 = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2) + self.checkExportImport(BitshiftModel(), (input, input2, )) + + def test_layer_norm_aten(self): + class SimpleOp(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.LayerNorm([10, 10]) + + def forward(self, x): + out = self.m(x) + return out + + x = torch.randn(20, 5, 10, 10) + self.checkExportImport(SimpleOp(), (x, )) + + def test_basic_abs(self): + class SimpleOp(nn.Module): + def forward(self, x): + out = torch.abs(x) + return out + x = torch.randn(1, 2, 3, 1, requires_grad=False).int() + self.checkExportImport(SimpleOp(), (x, )) + +class TestOperatorsWithShape(TestOperators, ConvertWithShapeMixin): + pass diff --git a/test/ut/retiarii/test_convert_pytorch.py b/test/ut/retiarii/test_convert_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..692910642b3a890f26df6ecde6898ec4acfaaadf --- /dev/null +++ b/test/ut/retiarii/test_convert_pytorch.py @@ -0,0 +1,1264 @@ +''' +The tests in this file is copied and transformed from +https://github.com/pytorch/pytorch/blob/master/test/onnx/test_pytorch_onnx_onnxruntime.py +''' + +import os +import sys +import unittest +from typing import (Dict) + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import serialize +from nni.retiarii.codegen import model_to_pytorch_script + +from .convert_mixin import ConvertMixin, ConvertWithShapeMixin + + +class TestPytorch(unittest.TestCase, ConvertMixin): + @staticmethod + def _match_state_dict(current_values, expected_format): + result = {} + for k, v in expected_format.items(): + for idx, cv in enumerate(current_values): + if cv.shape == v.shape: + result[k] = cv + current_values.pop(idx) + break + return result + + def run_test(self, model, input, check_value=True): + model_ir = self._convert_model(model, input) + model_code = model_to_pytorch_script(model_ir) + print(model_code) + + from .inject_nn import remove_inject_pytorch_nn + remove_inject_pytorch_nn() + + exec_vars = {} + exec(model_code + '\n\nconverted_model = _model()', exec_vars) + converted_model = exec_vars['converted_model'] + converted_state_dict = self._match_state_dict(list(model.state_dict().values()), + dict(converted_model.state_dict())) + converted_model.load_state_dict(converted_state_dict) + with torch.no_grad(): + expected_output = model.eval()(*input) + converted_output = converted_model.eval()(*input) + if check_value: + try: + self.assertEqual(len(converted_output), len(expected_output)) + for a, b in zip(converted_output, expected_output): + torch.eq(a, b) + except: + self.assertEqual(converted_output, expected_output) + return converted_model + + def test_embedding_model_with_external_data(self): + class LargeModel(nn.Module): + def __init__(self): + super(LargeModel, self).__init__() + dim = 15 + n = 4 * 100 + self.emb = nn.Embedding(n, dim) + self.lin1 = nn.Linear(dim, 1) + self.seq = nn.Sequential( + self.emb, + self.lin1, + ) + + def forward(self, input): + return self.seq(input) + + model = LargeModel() + x = torch.tensor([2], dtype=torch.long) + self.run_test(model, (x, )) + + @unittest.skip('skip for now, as it needs inject_nn') + def test_mobilenet_v2_with_external_data(self): + model = torchvision.models.mobilenet_v2(pretrained=True) + x = torch.randn(2, 3, 224, 224, requires_grad=True) + # We are turning off Onnx Runtime optimization off in this test, + # because external data format is not supported to in ORT optimizer. + # Once that support is added, we can set ort_optim_on=True (default). + self.run_test(model, (x, )) + + def test_attribute_with_external_data(self): + class LargeModel(nn.Module): + def forward(self, x): + return x + torch.ones(2, 1024) + + x = torch.randn(2, 1) + self.run_test(LargeModel(), (x, )) + + @unittest.skip('skip as it has loop') + def test_subgraph_with_external_data(self): + class LargeModel(nn.Module): + def forward(self, x): + for i in range(x.size(0)): + x = x + torch.ones(2, 1024) + return x + + x = torch.randn(2, 1) + self.run_test((LargeModel()), (x, )) + + def test_fuse_conv_bn1d(self): + class Fuse(nn.Module): + def __init__(self): + super(Fuse, self).__init__() + self.conv = nn.Conv1d(16, 33, 3, stride=2) + self.bn = nn.BatchNorm1d(33) + + def forward(self, x): + out = self.conv(x) + return self.bn(out) + + model = Fuse() + x = torch.randn(20, 16, 50, requires_grad=True) + self.run_test(model, (x,)) + + def test_fuse_conv_bn2d(self): + class Fuse(nn.Module): + def __init__(self): + super(Fuse, self).__init__() + self.conv = nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=False) + self.bn = nn.BatchNorm2d(2) + + def forward(self, x): + out = self.conv(x) + return self.bn(out) + + model = Fuse() + x = torch.randn(2, 3, 2, 2, requires_grad=True) + self.run_test(model, (x,)) + + def test_fuse_conv_bn3d(self): + class Fuse(nn.Module): + def __init__(self): + super(Fuse, self).__init__() + self.conv = nn.Conv3d(3, 2, (3, 5, 2), stride=(2, 1, 1), padding=(3, 2, 0), bias=False) + self.bn = nn.BatchNorm3d(2) + + def forward(self, x): + out = self.conv(x) + return self.bn(out) + + model = Fuse() + x = torch.randn(2, 3, 10, 50, 100, requires_grad=True) + self.run_test(model, (x,)) + + @unittest.skip('have not supported register_buffer yet') + def test_reshape_constant_fold(self): + class Reshape(nn.Module): + def __init__(self, ): + super(Reshape, self).__init__() + self.register_buffer("weight", torch.ones(5)) + + def forward(self, x): + scale_1 = self.weight.reshape(1, -1, 1, 1) + return x * scale_1 + + x = torch.randn(4, 5) + self.run_test(Reshape(), (x,)) + + def run_word_language_model(self, model_name): + ntokens = 50 + emsize = 5 + nhid = 5 + nlayers = 5 + dropout = 0.2 + tied = False + batchsize = 5 + model = word_language_model.RNNModel(model_name, ntokens, emsize, + nhid, nlayers, dropout, tied, + batchsize) + x = torch.arange(0, ntokens).long().view(-1, batchsize) + # Only support CPU version, since tracer is not working in GPU RNN. + self.run_test(model, (x, model.hidden)) + + def get_image_from_url(self, url, size=(300, 200)): + import os + from urllib.parse import urlsplit + from urllib import request + from PIL import Image + from torchvision import transforms + from torch._utils_internal import get_writable_path + + filename = os.path.basename(urlsplit(url)[2]) + data_dir = get_writable_path(os.path.join(os.path.dirname(__file__))) + path = os.path.join(data_dir, filename) + data = request.urlopen(url, timeout=15).read() + with open(path, 'wb') as f: + f.write(data) + image = Image.open(path).convert("RGB") + + image = image.resize(size, Image.BILINEAR) + + to_tensor = transforms.ToTensor() + return to_tensor(image) + + def get_test_images(self): + image_url = "http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg" + image = self.get_image_from_url(url=image_url, size=(100, 320)) + + image_url2 = "https://pytorch.org/tutorials/_static/img/tv_tutorial/tv_image05.png" + image2 = self.get_image_from_url(url=image_url2, size=(250, 380)) + + return [image], [image2] + + @unittest.skip('does not support `if A and/or B`') + def test_faster_rcnn(self): + from .inject_nn import inject_pytorch_nn + inject_pytorch_nn() + + model = torchvision.models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=True, min_size=200, + max_size=300) + model.eval() + x = torch.randn(2, 3, 200, 300, requires_grad=True) + self.run_test(model, (x,)) + dummy_image = [torch.ones(3, 100, 100) * 0.3] + images, test_images = self.get_test_images() + self.run_test(model, (images,)) + self.run_test(model, (dummy_image,)) + + @unittest.skip('does not support `if A and/or B`') + def test_mask_rcnn(self): + from .inject_nn import inject_pytorch_nn + inject_pytorch_nn() + + model = torchvision.models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=True, min_size=200, + max_size=300) + images, test_images = self.get_test_images() + self.run_test(model, (images,)) + dummy_image = [torch.ones(3, 100, 100) * 0.3] + self.run_test(model, (dummy_image,)) + + @unittest.skip('does not support `if A and/or B`') + def test_keypoint_rcnn(self): + from .inject_nn import inject_pytorch_nn + inject_pytorch_nn() + + model = torchvision.models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200, + max_size=300) + images, test_images = self.get_test_images() + self.run_test(model, (images,)) + dummy_images = [torch.ones(3, 100, 100) * 0.3] + self.run_test(model, (dummy_images,)) + + def test_shufflenet_v2_dynamic_axes(self): + from .inject_nn import inject_pytorch_nn + inject_pytorch_nn() + + model = torchvision.models.shufflenet_v2_x0_5(pretrained=True) + dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True) + test_inputs = torch.randn(3, 3, 224, 224, requires_grad=True) + self.run_test(model, (dummy_input,)) + + @unittest.skip('') + def test_word_language_model_RNN_TANH(self): + self.run_word_language_model("RNN_TANH") + + @unittest.skip('') + def test_word_language_model_RNN_RELU(self): + self.run_word_language_model("RNN_RELU") + + @unittest.skip('') + def test_word_language_model_LSTM(self): + self.run_word_language_model("LSTM") + + @unittest.skip('') + def test_word_language_model_GRU(self): + self.run_word_language_model("GRU") + + def test_index_1d(self): + class MyModel(nn.Module): + def forward(self, input): + return input[0] + + m1 = torch.randn(3, 4, 5, 6, 7) + self.run_test(MyModel(), (m1, )) + + def test_index_2d_1dimslice(self): + class MyModel(nn.Module): + def forward(self, input): + return input[0:1, :] + + m1 = torch.randn(3, 4, 5, 6, 7) + self.run_test(MyModel(), (m1, )) + + def test_index_2d_sliceint(self): + class MyModel(nn.Module): + def forward(self, input): + return input[1, :] + + m1 = torch.randn(3, 4, 5, 6, 7) + self.run_test(MyModel(), (m1, )) + + def test_index_2d_neg_slice(self): + class MyModel(nn.Module): + def forward(self, input): + return input[0:-1, :] + + m1 = torch.randn(3, 4, 5, 6, 7) + self.run_test(MyModel(), (m1, )) + + def test_index_mask(self): + class MyModel(nn.Module): + def forward(self, input): + return input[torch.tensor([0, 1, 0], dtype=torch.uint8)] + + m1 = torch.randn(3, 4, 5, 6, 7) + self.run_test(MyModel(), (m1, )) + + class MyModel(nn.Module): + def forward(self, input): + return input[torch.tensor([0, 1, 0], dtype=torch.bool)] + + m1 = torch.randn(3, 4, 5, 6, 7) + self.run_test(MyModel(), (m1, )) + + def test_data(self): + class Data(nn.Module): + def forward(self, x): + return x.new_zeros(x.data.size()) + + x = torch.randn(3, 4) + self.run_test(Data(), (x, )) + + def test_index_mask_nd(self): + class MyModel(nn.Module): + def forward(self, input): + return input[input > 0] + + m1 = torch.randn(3, 4, 5, 6, 7) + self.run_test(MyModel(), (m1, )) + + @unittest.skip("Tried to access nonexistent attribute or method 'keys' of type 'Tensor (inferred)'.") + def test_dict(self): + class MyModel(nn.Module): + def forward(self, x_in): + x_out = {} + x_out["test_key_out"] = torch.add(x_in[list(x_in.keys())[0]], list(x_in.keys())[0]) + return x_out + + x = {torch.tensor(1.): torch.randn(1, 2, 3)} + self.run_test(MyModel(), (x, {})) + + @unittest.skip("Unsupported operation: indexing tensor with unsupported index type 'str'.") + def test_dict_str(self): + class MyModel(nn.Module): + def forward(self, x_in): + x_out = {} + x_out["test_key_out"] = torch.add(x_in["test_key_in"], 2.) + return x_out + + x = {"test_key_in": torch.randn(1, 2, 3)} + self.run_test(MyModel(), (x, {})) + + @unittest.skip('Convert graph error') + def test_optional_inputs_with_no_optionals(self): + class NoOptionalModel(nn.Module): + def forward(self, input): + return input + + # Without empty optional arguments dictionary + x = torch.randn(2, 3) + self.run_test(NoOptionalModel(), (x,)) + # With empty optional arguments dictionary + y = torch.randn(2, 3) + self.run_test(NoOptionalModel(), (y, {})) + + # NOTE: torch script gets an incorrect graph... + def test_optional_inputs_with_mixed_optionals(self): + class MixedModel(nn.Module): + def forward(self, x, y, z): + if y is not None: + return x + y + if z is not None: + return x + z + return x + + x = torch.randn(2, 3) + y = torch.randn(2, 3) + z = torch.randn(2, 3) + # Without optional arguments dictionary + self.run_test(MixedModel(), (x, y, None)) + #self.run_test(MixedModel(), (x, None, z, )) + # With optional arguments dictionary + #self.run_test(MixedModel(), (x, {'y': y, 'z': None})) + #self.run_test(MixedModel(), (x, {'y': None, 'z': z})) + #self.run_test(MixedModel(), (x, {'z': z})) + #self.run_test(MixedModel(), (x, {'y': y})) + + @unittest.skip('torch script gets an incorrect graph...') + def test_optional_inputs_with_all_optionals(self): + class AllOptionalModel(nn.Module): + def forward(self, y, z): + if y is not None: + return y + if z is not None: + return z + + y = torch.randn(2, 3) + # Without optional arguments dictionary + self.run_test(AllOptionalModel(), (y, None)) + # With optional arguments dictionary + #self.run_test(AllOptionalModel(), {'y': y, 'z': None}) + + @unittest.skip('torch script gets an incorrect graph...') + def test_none_as_input(self): + class Model(nn.Module): + def forward(self, x, y): + if y is not None: + return x + y + return x + + x = torch.randn(2, 3) + self.run_test(Model(), (x, None)) + + @unittest.skip('jit cannot correctly deal with tuple as input argument') + def test_none_as_tuple_input(self): + class Model(nn.Module): + def forward(self, x, y): + if y[0] is not None: + return x + y[0] + if y[1] is not None: + return x + y[1] + return x + + x = torch.randn(2, 3) + y = torch.randn(2, 3) + self.run_test(Model(), (x, (None, y))) + + def test_cste_script(self): + class MyModel(nn.Module): + def forward(self, x): + return torch.zeros(x.size(0)), torch.ones((x.size(1), x.size(0)), dtype=torch.int64) + + x = torch.randn(3, 4) + self.run_test(MyModel(), (x, )) + + def test_scalar_tensor(self): + class test(nn.Module): + def forward(self, input): + return torch.scalar_tensor(input.size(0)), \ + torch.scalar_tensor(input.size(1), dtype=torch.int64) + + x = torch.randn(2, 3, 4) + y = torch.randn(7, 8, 9) + model = test() + self.run_test(model, (x, )) + + def test_tensor(self): + class ScalarInputModel(nn.Module): + def forward(self, input): + return torch.tensor(input.shape[1]) + + x = torch.randn(3, 4) + self.run_test(ScalarInputModel(), (x, )) + + class TensorInputModel(nn.Module): + def forward(self, input): + return torch.tensor([input.shape[0], input.shape[1]]) + + x = torch.randn(3, 4) + self.run_test(TensorInputModel(), (x, )) + + class FloatInputModel(nn.Module): + def forward(self, input): + return torch.tensor([float(input)]) + + x = torch.randn(1) + self.run_test(FloatInputModel(), (x, )) + + class InputWithDtypeModel(nn.Module): + def forward(self, input): + return torch.tensor(input.shape[1], dtype=torch.long) + + x = torch.randn(3, 4) + self.run_test(InputWithDtypeModel(), (x, )) + + class MixedInputModel(nn.Module): + def forward(self, input): + return torch.tensor([input.shape[0], int(input)]) + + x = torch.randn(1) + self.run_test(MixedInputModel(), (x, )) + + def test_hardtanh(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.Hardtanh(-1.5, 2.5) + def forward(self, x): + return self.m(x) + + x = torch.arange(-5, 5).to(dtype=torch.float32) + self.run_test(MyModel(), (x, )) + + def test_hardtanh_script_with_default_values(self): + class MyModel(nn.Module): + def forward(self, x): + return F.hardtanh(x) + + x = torch.arange(-5, 5).to(dtype=torch.float32) + self.run_test(MyModel(), (x, )) + + def test_hardswish(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.Hardswish() + def forward(self, x): + return self.m(x) + + x = torch.rand(3, 3).to(dtype=torch.float32) + self.run_test(MyModel(), (x, )) + + # Testing edge cases + x = torch.tensor(3).to(dtype=torch.float32) + self.run_test(MyModel(), (x, )) + x = torch.tensor(-3).to(dtype=torch.float32) + self.run_test(MyModel(), (x, )) + + def test_hardswish_script(self): + class MyModel(nn.Module): + def forward(self, x): + return F.hardswish(x) + + x = torch.rand(3, 3).to(dtype=torch.float32) + self.run_test(MyModel(), (x, )) + + def test_clamp(self): + class ClampModel(nn.Module): + def forward(self, x): + return x.clamp(-0.5, 0.5) + + x = torch.randn(3, 4) + self.run_test(ClampModel(), (x, )) + + class ClampMinModel(nn.Module): + def forward(self, x): + return x.clamp(min=-0.5) + + x = torch.randn(3, 4) + self.run_test(ClampMinModel(), (x, )) + + class ClampMaxModel(nn.Module): + def forward(self, x): + return x.clamp(max=0.5) + + x = torch.randn(3, 4) + self.run_test(ClampMaxModel(), (x, )) + + def test_clamp_dyn(self): + class ClampMaxModel(nn.Module): + def forward(self, x): + return x.clamp(None, x.size(0)) + + x = torch.arange(16).view(4, 4).float() + self.run_test(ClampMaxModel(), (x, )) + + + class ClampMinModel(nn.Module): + def forward(self, x): + return x.clamp(x.size(0), None) + + x = torch.arange(16).view(4, 4).float() + self.run_test(ClampMinModel(), (x, )) + + class ClampMinMaxModel(nn.Module): + def forward(self, x): + return x.clamp(x.size(0), x.size(1)) + + x = torch.arange(16).view(2, 8).float() + self.run_test(ClampMinMaxModel(), (x, )) + + def test_full_trace(self): + class FullModel(nn.Module): + def forward(self, x): + return torch.full((3, 4), x, dtype=torch.long) + + x = torch.tensor(12) + self.run_test(FullModel(), (x, )) + + def test_full_script(self): + class FullModelScripting(nn.Module): + def forward(self, x): + return torch.full((3, 4), x, dtype=torch.long) + + x = torch.tensor(12) + self.run_test(FullModelScripting(), (x, )) + + def test_fuse_addmm(self): + class AddmmModel(nn.Module): + def forward(self, x): + return torch.mm(x, x) + x + + x = torch.ones(3, 3) + self.run_test(AddmmModel(), (x, )) + + def test_maxpool(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool1d(2, stride=1) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50) + self.run_test(MyModel(), (x, )) + + def test_conv(self): + class TraceModel(nn.Module): + def __init__(self): + super(TraceModel, self).__init__() + self.conv1 = nn.Conv1d(16, 33, 3, stride=2) + self.conv2 = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) + self.conv3 = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)) + + def forward(self, input1, input2, input3): + return self.conv1(input1), self.conv2(input2), self.conv3(input3) + + x1 = torch.randn(20, 16, 50) + x2 = torch.randn(20, 16, 50, 100) + x3 = torch.randn(20, 16, 10, 50, 100) + + self.run_test(TraceModel(), (x1, x2, x3, )) + + def test_conv_shape_inference(self): + class Model(nn.Module): + def __init__(self): + super(Model, self).__init__() + self.conv2 = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) + + def forward(self, input): + return self.conv2(input) + 2 + + x = torch.randn(20, 16, 50, 100) + self.run_test(Model(), (x, )) + + def test_conv_transpose(self): + class TraceModel(nn.Module): + def __init__(self): + super(TraceModel, self).__init__() + self.conv1 = nn.ConvTranspose1d(16, 33, 3, stride=2) + self.conv2 = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) + self.conv3 = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)) + + def forward(self, input1, input2, input3): + return self.conv1(input1), self.conv2(input2), self.conv3(input3) + + x1 = torch.randn(20, 16, 50) + x2 = torch.randn(20, 16, 50, 100) + x3 = torch.randn(20, 16, 10, 50, 100) + + self.run_test(TraceModel(), (x1, x2, x3, )) + + # Conversion of Transpose depends on input shape to be known. + # The following test only works when onnx shape inference is enabled. + def test_transpose_infer_shape(self): + class TransposeModule(nn.Module): + def __init__(self): + super(TransposeModule, self).__init__() + self.conv = nn.Conv2d(3, 1, 3, stride=2) + + def forward(self, x): + x = self.conv(x) + return x.transpose(0, 1) + + x = torch.randn(32, 3, 64, 64) + y = torch.randn(16, 3, 8, 64) + self.run_test(TransposeModule(), (x, )) + + def squeeze_model_tests(self, d, x1): + class Squeeze(nn.Module): + def __init__(self, d): + super(Squeeze, self).__init__() + self.d = d + + def forward(self, x): + if self.d is not None: + return torch.squeeze(x, dim=self.d) + else: + return torch.squeeze(x) + + self.run_test(Squeeze(d), (x1, )) + + def test_squeeze_without_no_op(self): + x = torch.randn(2, 1, 4) + self.squeeze_model_tests(1, x) + + def test_squeeze_neg_without_no_op(self): + x = torch.randn(2, 1, 4) + self.squeeze_model_tests(-2, x) + + def test_squeeze_all_dims(self): + x_squeeze = torch.randn(2, 1, 4) + self.squeeze_model_tests(None, x_squeeze) + + def test_squeeze_no_op(self): + x_noop = torch.randn(2, 1, 4) + self.squeeze_model_tests(2, x_noop) + + def test_squeeze_runtime_dim(self): + class Squeeze(nn.Module): + def forward(self, d1, d2): + t = torch.zeros(d1[0], d2[0]) + return t.squeeze(0) + + d1 = torch.tensor([1]) + d3 = torch.tensor([3]) + d4 = torch.tensor([4]) + self.run_test(Squeeze(), (d1, d4)) + self.run_test(Squeeze(), (d3, d4)) + + def test_squeeze(self): + class Squeeze(nn.Module): + def forward(self, x): + return torch.squeeze(x, dim=-2) + + x = torch.randn(2, 1, 4) + self.run_test(Squeeze(), (x, )) + + def test_unsqueeze(self): + class Unsqueeze(nn.Module): + def forward(self, x): + return torch.unsqueeze(x, dim=-2) + + x = torch.randn(2, 3, 4) + self.run_test(Unsqueeze(), (x, )) + + def test_maxpool_default_stride(self): + class MaxPoolModel(nn.Module): + def forward(self, x): + return F.max_pool2d(x, 2) + + model = MaxPoolModel() + x = torch.randn(10, 20, 16, 50) + self.run_test(model, (x, )) + + def test_maxpool_adaptive(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.AdaptiveMaxPool1d((5), return_indices=False) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50, requires_grad=True) + self.run_test(MyModel(), (x, )) + + def test_maxpool_2d(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool2d(5, padding=(1, 2)) + def forward(self, x): + return self.m(x) + x = torch.randn(1, 20, 16, 50, requires_grad=True) + self.run_test(MyModel(), (x, )) + + def test_maxpool_1d_ceil(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool1d(3, 2, ceil_mode=True) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50) + self.run_test(MyModel(), (x, )) + + def test_maxpool_2d_ceil(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool2d(3, 2, ceil_mode=True) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50, 32) + self.run_test(MyModel(), (x, )) + + def test_maxpool_3d_ceil(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool3d(3, 2, ceil_mode=True) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50, 44, 31) + self.run_test(MyModel(), (x, )) + + @unittest.skip('jit error: Return value was annotated as having type Tensor but is actually of type Tuple[Tensor, Tensor]') + def test_maxpool_with_indices(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool1d(2, stride=1, return_indices=True) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50) + self.run_test(MyModel(), (x, )) + + def test_maxpool_dilation(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.MaxPool1d(2, stride=1, dilation=2) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50) + self.run_test(MyModel(), (x, )) + + def test_avgpool_default_stride(self): + class AvgPoolModel(nn.Module): + def forward(self, x): + return F.avg_pool2d(x, 2) + + model = AvgPoolModel() + x = torch.randn(10, 20, 16, 50) + self.run_test(model, (x, )) + + def test_avgpool(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.AvgPool1d(2, stride=1) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50) + self.run_test(MyModel(), (x, )) + + def test_avgpool_1d_ceil(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.AvgPool1d(3, 2, ceil_mode=True) + def forward(self, x): + return self.m(x) + + x = torch.randn(1, 1, 7) + self.run_test(MyModel(), (x, )) + + def test_avgpool_2d_ceil(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.AvgPool2d(3, 2, ceil_mode=True) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50, 32) + self.run_test(MyModel(), (x, )) + + def test_avgpool_3d_ceil(self): + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.m = nn.AvgPool3d(3, 2, ceil_mode=True) + def forward(self, x): + return self.m(x) + + x = torch.randn(20, 16, 50, 44, 31) + self.run_test(MyModel(), (x, )) + + @unittest.skip('Unsupported op type aten::is_floating_point in if condition') + def test_floating_point(self): + class FloatingPoint(nn.Module): + def forward(self, x): + if x.is_floating_point(): + return x.new_zeros(x.shape) + return x.new_zeros(x.shape) + + x = torch.randn(2, 3, 4) + self.run_test(FloatingPoint(), (x, )) + + class FloatingPoint(nn.Module): + def forward(self, x): + if x.size(0) > 1: + a = x + 2 + if a.is_floating_point(): + return x + 1 + return x + 1 + return x + + x = torch.randn(2, 3, 4) + self.run_test(FloatingPoint(), (x, )) + + # Operator rank mismatch between outputs of two branches for opsets below 11. + @unittest.skip('Unsupported op type aten::size in if condition') + def test_floating_point_infer_dtype(self): + class FloatingPoint(nn.Module): + def forward(self, x): + if x.size(0) > 1: + a = x + 2 + if a.is_floating_point(): + return x.new_zeros(x.shape[1:]) + return x.new_zeros(x.shape) + return x + + x = torch.randn(2, 3, 4) + self.run_test(FloatingPoint(), (x, )) + + class FloatingPoint(nn.Module): + def forward(self, x): + if x.size(0) > 1: + a = x + 2 + if a.is_floating_point(): + return x + 1 + return x + return x + + x = torch.randn(2, 3, 4).to(torch.int32) + self.run_test(FloatingPoint(), (x, )) + + def test_arithmetic(self): + class ArithmeticModule(nn.Module): + def forward(self, x): + x = x + 2 + x = x - 4 + x = x * 6 + x = x / 8 + return x + + x = torch.randn(2, 3, 4) + self.run_test(ArithmeticModule(), (x, )) + + # In scripting the first transpose node do not carry shape and dtype info. + # The following test only works when onnx shape inference is enabled. + def test_arithmetic_infer_dtype(self): + class ArithmeticModule(nn.Module): + def forward(self, x): + x = x.t() + x = x + 2 + x = x - 4 + x = x * 6 + x = x / 8 + return x + + x = torch.randn(2, 3) + self.run_test(ArithmeticModule(), (x, )) + + @unittest.skip('tensor op type aten::to has more than one matched') + def test_floor_div(self): + class FloorDivModule(nn.Module): + def forward(self, x, y): + return x // 3, x // 2., \ + x.to(dtype=torch.float64) // 3, x.to(dtype=torch.float64) // 2., \ + x.to(dtype=torch.int64) // 3, x.to(dtype=torch.int64) // 2., \ + x // (y + 1.).to(dtype=torch.int64), x // y, \ + x.to(dtype=torch.float64) // y.to(dtype=torch.int64), x.to(dtype=torch.float64) // y.to(dtype=torch.float64), \ + x.to(dtype=torch.int64) // y.to(dtype=torch.int64), x.to(dtype=torch.int64) // y + + x = torch.randn(2, 3, 4) + y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4) + self.run_test(FloorDivModule(), (x, y)) + + def test_floor_div_script(self): + class FloorDivModule(nn.Module): + def forward(self, x, y): + return x // 3, x // 2., x // y + + x = torch.randn(2, 3, 4) + y = torch.randn(2, 3, 4) + self.run_test(FloorDivModule(), (x, y)) + + def test_floordiv(self): + class FloordivModule(nn.Module): + def forward(self, x): + return x.new_zeros(x.size(2) // x.size(1)) + + x = torch.randn(2, 3, 4) + self.run_test(FloordivModule(), (x,)) + + def test_div(self): + class DivModule(nn.Module): + def forward(self, x, y): + return torch.true_divide(x, y) + + x = torch.randn(2, 3, 4).to(torch.int) + y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int) + self.run_test(DivModule(), (x, y)) + self.run_test(DivModule(), (x.float(), y.float())) + + # Note: div cannot (generally) be exported via scripting + # since its type promotion logic is dependent on knowing the scalar types + # of the input tensors. That is, the ONNX graph is dependent on the + # data type of the inputs. This makes it appropriate for tracing only. + def test_div_promotion_trace(self): + class DivModule(nn.Module): + def forward(self, x, y): + return torch.true_divide(x, y) + + x = torch.randn(2, 3, 4).to(torch.int) + y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int) + + prev_default = torch.get_default_dtype() + + torch.set_default_dtype(torch.float) + self.run_test(DivModule(), (x, y)) + + torch.set_default_dtype(torch.double) + self.run_test(DivModule(), (x, y)) + + torch.set_default_dtype(prev_default) + + # In scripting x, y do not carry shape and dtype info. + # The following test only works when onnx shape inference is enabled. + def test_div_promotion_script(self): + class DivModule(nn.Module): + def forward(self, x, y): + # Add transpose to hide shape/type information + # Otherwise shape and type are still avaiable from input. + x = x.transpose(1, 2) + y = y.transpose(1, 2) + return torch.true_divide(x, y) + + x = torch.randn(2, 3, 4).to(torch.int) + y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int) + + prev_default = torch.get_default_dtype() + + # 1. x,y are int, and output is float. + # This can be handled by the default case, where both are cast to float. + # It works even if type of x, y are unknown. + torch.set_default_dtype(torch.float) + self.run_test((DivModule()), (x, y)) + + # 2. x,y are int, and output is double. + # This can be handled by the default case, where both are cast to double. + # It works even if type of x, y are unknown. + torch.set_default_dtype(torch.double) + self.run_test((DivModule()), (x, y)) + + # 3. x is int, y is double, and output is double. + # This can only be handled when both type of x and y are known. + torch.set_default_dtype(prev_default) + x = torch.randn(2, 3, 4).to(torch.int) + y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.double) + self.run_test((DivModule()), (x, y)) + + def test_slice_trace(self): + class MyModule(nn.Module): + def forward(self, x): + return x[0:1] + + x = torch.randn(3) + self.run_test(MyModule(), (x, )) + + def test_slice_neg(self): + class NegSlice(nn.Module): + def forward(self, x): + return x[-1:] + + x = torch.randn(3, 4, 5) + self.run_test(NegSlice(), (x, )) + + def test_slice_neg_large(self): + class NegSlice(nn.Module): + def forward(self, x): + return x[:, :, -3:-1, :, -1] + + x = torch.randn(3, 4, 5, 6, 7) + self.run_test(NegSlice(), (x, )) + + def test_slice_neg_large_negone(self): + class NegSlice(nn.Module): + def forward(self, x): + return x[:, :, :, :, -1] + + x = torch.randn(3, 4, 5, 6, 7) + self.run_test(NegSlice(), (x, )) + + @unittest.skip('strange torch script graph') + def test_slice_with_input_index(self): + class InputIndexSlice(nn.Module): + def forward(self, x, y): + x[:y.size(0), 0, :] = y + return x + + x = torch.zeros((56, 6, 256)) + y = torch.rand((22, 256)) + self.run_test(InputIndexSlice(), (x, y)) + + @unittest.skip('Loop has not been supported yet!') + def test_slice_dynamic(self): + class DynamicSliceExportMod(nn.Module): + def forward(self, x): + results = [] + for i in range(4): + results.append(x[:x.size(0) - i, i:x.size(2), i:3]) + return results + + x = torch.rand(5, 5, 5) + y = torch.randn(6, 7, 8) + self.run_test(DynamicSliceExportMod(), (x, )) + + def test_slice_dynamic_script(self): + class DynamicSliceModel(nn.Module): + def forward(self, x): + return x[1:x.size(1)] + + x = torch.rand(1, 2) + self.run_test(DynamicSliceModel(), (x, )) + + def test_slice_dynamic_shape_script(self): + class DynamicSliceModel(nn.Module): + def forward(self, x): + return x.new_zeros(x.shape[1:x.size(2)]) + + x = torch.rand(1, 2, 3, 4) + self.run_test(DynamicSliceModel(), (x, )) + + @unittest.skip('Loop has not been supported yet!') + def test_slice_dynamic_to_end(self): + class DynamicSliceExportMod(nn.Module): + def forward(self, x): + results = [] + for i in range(4): + results.append(x[:, i:, x.size(2) - 5]) + return results + + x = torch.rand(5, 5, 5) + self.run_test(DynamicSliceExportMod(), (x, )) + + def test_square(self): + class Square(nn.Module): + def forward(self, x): + return torch.square(x) + + x = torch.randn(2, 3, 4) + self.run_test(Square(), (x, )) + + def test_arange_dynamic(self): + class ArangeModel(nn.Module): + def forward(self, input): + return torch.arange(input.shape[0]), \ + torch.arange(12), \ + torch.arange(start=input.shape[0], end=input.shape[0] + 5) + + x = torch.randn(5, 3, 2) + y = torch.randn(8, 3, 2) + self.run_test(ArangeModel(), (x, )) + + @unittest.skip('mismatched aten::arange definition, does not support `out`') + def test_dynamic_arange_out(self): + class ArangeOutModel(nn.Module): + def forward(self, end): + out_t = torch.tensor([1], dtype=torch.int64) + return torch.arange(end, out=out_t) + + x = torch.tensor(8) + self.run_test(ArangeOutModel(), (x, )) + + @unittest.skip('mismatched aten::arange definition, does not support `out`') + def test_dynamic_arange_start_out(self): + class ArangeStartOutModel(nn.Module): + def forward(self, start, end): + out_t = torch.tensor([1], dtype=torch.int64) + return torch.arange(start.size(0), end, out=out_t) + + x = torch.randn(2, 3, 4) + y = torch.tensor(8) + self.run_test(ArangeStartOutModel(), (x, y)) + + def test_arange(self): + class ArangeModel(nn.Module): + def forward(self, start, end): + return torch.arange(start.size(0), end, 1.5, dtype=torch.int64) + + x = torch.randn(2, 3, 4) + y = torch.tensor(8.5, dtype=torch.float) + self.run_test(ArangeModel(), (x, y)) + + @unittest.skip('mismatched aten::arange definition, does not support `out`') + def test_arange_out(self): + class ArangeOutModel(nn.Module): + def forward(self, end): + out_t = torch.tensor([1], dtype=torch.float) + return torch.arange(end, out=out_t) + + x = torch.tensor(8.5, dtype=torch.float) + self.run_test(ArangeOutModel(), (x, )) + + @unittest.skip('mismatched aten::arange definition, does not support `out`') + def test_arange_start_out(self): + class ArangeStartOutModel(nn.Module): + def forward(self, start, end): + out_t = torch.tensor([1], dtype=torch.float) + return torch.arange(start.size(0), end, out=out_t) + + x = torch.randn(2, 3, 4) + y = torch.tensor(8.5, dtype=torch.float) + self.run_test(ArangeStartOutModel(), (x, y)) + + def test_arange_no_type(self): + class ArangeModel(nn.Module): + def forward(self, end): + return torch.arange(end), \ + torch.arange(0, end) + + x = torch.tensor(6.2, dtype=torch.float) + self.run_test(ArangeModel(), (x, )) + + def test_size(self): + class SizeModel(nn.Module): + def forward(self, input): + return torch.arange(input.size(0)), torch.arange(input.size(-1)), torch.ones(input.shape) + + x = torch.randn(5, 3, 2) + self.run_test(SizeModel(), (x, )) + + def test_size2(self): + class SizeModel(nn.Module): + def __init__(self, a, b): + super().__init__() + self.a = a + self.b = b + def forward(self, input): + if self.a < self.b: + return torch.arange(input.size(0)), torch.arange(input.size(-1)), torch.ones(input.shape) + + x = torch.randn(5, 3, 2) + self.run_test(SizeModel(10, 5), (x, )) + + def test_python_name(self): + from .inject_nn import inject_pytorch_nn, remove_inject_pytorch_nn + try: + inject_pytorch_nn() + torchvision_model_zoo = { + 'resnet18': torchvision.models.resnet18(), + 'alexnet': torchvision.models.alexnet(), + 'vgg16': torchvision.models.vgg16(), + 'squeezenet': torchvision.models.squeezenet1_0(), + 'shufflenet_v2': torchvision.models.shufflenet_v2_x1_0(), + 'mobilenet_v2': torchvision.models.mobilenet_v2(), + 'resnext50_32x4d': torchvision.models.resnext50_32x4d(), + 'wide_resnet50_2': torchvision.models.wide_resnet50_2(), + 'mnasnet': torchvision.models.mnasnet1_0(), + } + dummy_input=torch.randn(1, 3, 224, 224) + for model in torchvision_model_zoo.values(): + model_ir = self._convert_model(model, dummy_input) + current_name = [node.python_name for node in model_ir.get_nodes() if node.python_name] + mentioned = set() + for k in model.state_dict(): + k = ".".join(k.split(".")[:-1]) + if k not in mentioned: + assert k in current_name, f'{k} not in state_name' + mentioned.add(k) + finally: + remove_inject_pytorch_nn() + +class TestPytorchWithShape(TestPytorch, ConvertWithShapeMixin): + pass diff --git a/test/ut/retiarii/test_convert_shape.py b/test/ut/retiarii/test_convert_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..4013f6af6fc8478efe61e99573382c9f97675eb1 --- /dev/null +++ b/test/ut/retiarii/test_convert_shape.py @@ -0,0 +1,79 @@ +import unittest +import torch + +import nni.retiarii.nn.pytorch as nn + +from .convert_mixin import ConvertWithShapeMixin + + +class TestShape(unittest.TestCase, ConvertWithShapeMixin): + def test_simple_convnet(self): + class ConvNet(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 1, 3) + self.relu = nn.ReLU() + self.pool = nn.MaxPool2d(kernel_size=2) + def forward(self, x): + return self.pool(self.relu(self.conv(x))) + + net = ConvNet() + input = torch.randn((1, 3, 224, 224)) + model_ir = self._convert_model(net, input) + + conv_node = model_ir.get_nodes_by_type('__torch__.torch.nn.modules.conv.Conv2d')[0] + relu_node = model_ir.get_nodes_by_type('__torch__.torch.nn.modules.activation.ReLU')[0] + pool_node = model_ir.get_nodes_by_type('__torch__.torch.nn.modules.pooling.MaxPool2d')[0] + self.assertEqual(conv_node.operation.attributes.get('input_shape'), [[1, 3, 224, 224]]) + self.assertEqual(conv_node.operation.attributes.get('output_shape'), [[1, 1, 222, 222]]) + self.assertEqual(relu_node.operation.attributes.get('input_shape'), [[1, 1, 222, 222]]) + self.assertEqual(relu_node.operation.attributes.get('output_shape'), [[1, 1, 222, 222]]) + self.assertEqual(pool_node.operation.attributes.get('input_shape'), [[1, 1, 222, 222]]) + self.assertEqual(pool_node.operation.attributes.get('output_shape'), [[1, 1, 111, 111]]) + + def test_nested_module(self): + class ConvRelu(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 1, 3) + self.relu = nn.ReLU() + def forward(self, x): + return self.relu(self.conv(x)) + + class ConvNet(nn.Module): + def __init__(self): + super().__init__() + self.conv = ConvRelu() + self.pool = nn.MaxPool2d(kernel_size=2) + def forward(self, x): + return self.pool(self.conv(x)) + + net = ConvNet() + input = torch.randn((1, 3, 224, 224)) + model_ir = self._convert_model(net, input) + + # check if shape propagation works + cell_node = model_ir.get_nodes_by_type('_cell')[0] + self.assertEqual(cell_node.operation.attributes.get('input_shape'), [[1, 3, 224, 224]]) + self.assertEqual(cell_node.operation.attributes.get('output_shape'), [[1, 1, 222, 222]]) + + def test_layerchoice(self): + class ConvNet(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.LayerChoice([ + nn.Conv2d(3, 1, 3), + nn.Conv2d(3, 1, 5, padding=1), + ]) + self.pool = nn.MaxPool2d(kernel_size=2) + def forward(self, x): + return self.pool(self.conv(x)) + + net = ConvNet() + input = torch.randn((1, 3, 224, 224)) + model_ir = self._convert_model(net, input) + + # check shape info of each candidates + conv_nodes = model_ir.get_nodes_by_type('__torch__.torch.nn.modules.conv.Conv2d') + self.assertEqual(conv_nodes[0].operation.attributes.get('output_shape'), [[1, 1, 222, 222]]) + self.assertEqual(conv_nodes[1].operation.attributes.get('output_shape'), [[1, 1, 222, 222]]) diff --git a/test/ut/retiarii/test_engine.py b/test/ut/retiarii/test_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..39d45a088456c75d161b42ed74b2197e92b87d42 --- /dev/null +++ b/test/ut/retiarii/test_engine.py @@ -0,0 +1,71 @@ +import json +import os +import unittest +from pathlib import Path + +import nni.retiarii +from nni.retiarii import Model, submit_models +from nni.retiarii.codegen import model_to_pytorch_script +from nni.retiarii.execution import set_execution_engine +from nni.retiarii.execution.base import BaseExecutionEngine +from nni.retiarii.execution.python import PurePythonExecutionEngine +from nni.retiarii.integration import RetiariiAdvisor + + +class EngineTest(unittest.TestCase): + def test_codegen(self): + with open(self.enclosing_dir / 'mnist_pytorch.json') as f: + model = Model._load(json.load(f)) + script = model_to_pytorch_script(model) + with open(self.enclosing_dir / 'debug_mnist_pytorch.py') as f: + reference_script = f.read() + self.assertEqual(script.strip(), reference_script.strip()) + + def test_base_execution_engine(self): + nni.retiarii.integration_api._advisor = None + nni.retiarii.execution.api._execution_engine = None + advisor = RetiariiAdvisor() + set_execution_engine(BaseExecutionEngine()) + with open(self.enclosing_dir / 'mnist_pytorch.json') as f: + model = Model._load(json.load(f)) + submit_models(model, model) + + advisor.stopping = True + advisor.default_worker.join() + advisor.assessor_worker.join() + + def test_py_execution_engine(self): + nni.retiarii.integration_api._advisor = None + nni.retiarii.execution.api._execution_engine = None + advisor = RetiariiAdvisor() + set_execution_engine(PurePythonExecutionEngine()) + model = Model._load({ + '_model': { + 'inputs': None, + 'outputs': None, + 'nodes': { + 'layerchoice_1': { + 'operation': {'type': 'LayerChoice', 'parameters': {'candidates': ['0', '1']}} + } + }, + 'edges': [] + } + }) + model.python_class = object + submit_models(model, model) + + advisor.stopping = True + advisor.default_worker.join() + advisor.assessor_worker.join() + + def setUp(self) -> None: + self.enclosing_dir = Path(__file__).parent + os.makedirs(self.enclosing_dir / 'generated', exist_ok=True) + from nni.runtime import protocol + protocol._out_file = open(self.enclosing_dir / 'generated/debug_protocol_out_file.py', 'wb') + + def tearDown(self) -> None: + from nni.runtime import protocol + protocol._out_file.close() + nni.retiarii.execution.api._execution_engine = None + nni.retiarii.integration_api._advisor = None diff --git a/test/ut/retiarii/test_graph.py b/test/ut/retiarii/test_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..69dd8c52a9cf007fc8ab8c8b4fb6d3bcd343f6cf --- /dev/null +++ b/test/ut/retiarii/test_graph.py @@ -0,0 +1,45 @@ +import json +from pathlib import Path +import sys + +from nni.retiarii import * + + +json_files = [ + 'mnist-tensorflow.json' +] + + +def test_model_load_dump(): + for json_file in json_files: + path = Path(__file__).parent / json_file + _test_file(path) + + +def _test_file(json_path): + orig_ir = json.load(json_path.open()) + model = Model._load(orig_ir) + dump_ir = model._dump() + + # add default values to JSON, so we can compare with `==` + for graph_name, graph in orig_ir.items(): + if graph_name == '_evaluator': + continue + if 'inputs' not in graph: + graph['inputs'] = None + if 'outputs' not in graph: + graph['outputs'] = None + + # debug output + #json.dump(orig_ir, open('_orig.json', 'w'), indent=4) + #json.dump(dump_ir, open('_dump.json', 'w'), indent=4) + + # skip comparison of _evaluator + orig_ir.pop('_evaluator') + dump_ir.pop('_evaluator') + + assert orig_ir == dump_ir + + +if __name__ == '__main__': + test_model_load_dump() diff --git a/test/ut/retiarii/test_highlevel_apis.py b/test/ut/retiarii/test_highlevel_apis.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d1a62c29dae69f54b2036d1273e50bbb18ee66 --- /dev/null +++ b/test/ut/retiarii/test_highlevel_apis.py @@ -0,0 +1,660 @@ +import random +import unittest +from collections import Counter + +import nni.retiarii.nn.pytorch as nn +import torch +import torch.nn.functional as F +from nni.retiarii import InvalidMutation, Sampler, basic_unit +from nni.retiarii.converter import convert_to_graph +from nni.retiarii.codegen import model_to_pytorch_script +from nni.retiarii.execution.utils import _unpack_if_only_one +from nni.retiarii.nn.pytorch.mutator import process_inline_mutation, extract_mutation_from_pt_module +from nni.retiarii.serializer import model_wrapper +from nni.retiarii.utils import ContextStack + + +class EnumerateSampler(Sampler): + def __init__(self): + self.index = 0 + + def choice(self, candidates, *args, **kwargs): + choice = candidates[self.index % len(candidates)] + self.index += 1 + return choice + + +class RandomSampler(Sampler): + def __init__(self): + self.counter = 0 + + def choice(self, candidates, *args, **kwargs): + self.counter += 1 + return random.choice(candidates) + + +@basic_unit +class MutableConv(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 3, kernel_size=1) + self.conv2 = nn.Conv2d(3, 5, kernel_size=1) + + def forward(self, x: torch.Tensor, index: int): + if index == 0: + return self.conv1(x) + else: + return self.conv2(x) + + +class GraphIR(unittest.TestCase): + + def _convert_to_ir(self, model): + script_module = torch.jit.script(model) + return convert_to_graph(script_module, model) + + def _get_converted_pytorch_model(self, model_ir): + model_code = model_to_pytorch_script(model_ir) + exec_vars = {} + exec(model_code + '\n\nconverted_model = _model()', exec_vars) + return exec_vars['converted_model'] + + def _get_model_with_mutators(self, pytorch_model): + model = self._convert_to_ir(pytorch_model) + mutators = process_inline_mutation(model) + return model, mutators + + def get_serializer(self): + def dummy(cls): + return cls + + return dummy + + def test_layer_choice(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.module = nn.LayerChoice([ + nn.Conv2d(3, 3, kernel_size=1), + nn.Conv2d(3, 5, kernel_size=1) + ]) + + def forward(self, x): + return self.module(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)).size(), + torch.Size([1, 3, 3, 3])) + self.assertEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 3, 3)).size(), + torch.Size([1, 5, 3, 3])) + + def test_layer_choice_multiple(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.module = nn.LayerChoice([nn.Conv2d(3, i, kernel_size=1) for i in range(1, 11)]) + + def forward(self, x): + return self.module(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + for i in range(1, 11): + model_new = mutator.apply(model) + self.assertEqual(self._get_converted_pytorch_model(model_new)(torch.randn(1, 3, 3, 3)).size(), + torch.Size([1, i, 3, 3])) + + def test_nested_layer_choice(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.module = nn.LayerChoice([ + nn.LayerChoice([nn.Conv2d(3, 3, kernel_size=1), + nn.Conv2d(3, 4, kernel_size=1), + nn.Conv2d(3, 5, kernel_size=1)]), + nn.Conv2d(3, 1, kernel_size=1) + ]) + + def forward(self, x): + return self.module(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 2) + mutators[0].bind_sampler(EnumerateSampler()) + mutators[1].bind_sampler(EnumerateSampler()) + input = torch.randn(1, 3, 5, 5) + self.assertEqual(self._get_converted_pytorch_model(mutators[1].apply(mutators[0].apply(model)))(input).size(), + torch.Size([1, 3, 5, 5])) + self.assertEqual(self._get_converted_pytorch_model(mutators[1].apply(mutators[0].apply(model)))(input).size(), + torch.Size([1, 1, 5, 5])) + self.assertEqual(self._get_converted_pytorch_model(mutators[1].apply(mutators[0].apply(model)))(input).size(), + torch.Size([1, 5, 5, 5])) + + def test_input_choice(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 3, kernel_size=1) + self.conv2 = nn.Conv2d(3, 5, kernel_size=1) + self.input = nn.InputChoice(2) + + def forward(self, x): + x1 = self.conv1(x) + x2 = self.conv2(x) + return self.input([x1, x2]) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)).size(), + torch.Size([1, 3, 3, 3])) + self.assertEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 3, 3)).size(), + torch.Size([1, 5, 3, 3])) + + def test_chosen_inputs(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self, reduction): + super().__init__() + self.conv1 = nn.Conv2d(3, 3, kernel_size=1) + self.conv2 = nn.Conv2d(3, 3, kernel_size=1) + self.input = nn.InputChoice(2, n_chosen=2, reduction=reduction) + + def forward(self, x): + x1 = self.conv1(x) + x2 = self.conv2(x) + return self.input([x1, x2]) + + for reduction in ['none', 'sum', 'mean', 'concat']: + model, mutators = self._get_model_with_mutators(Net(reduction)) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model = mutator.apply(model) + result = self._get_converted_pytorch_model(model)(torch.randn(1, 3, 3, 3)) + if reduction == 'none': + self.assertEqual(len(result), 2) + self.assertEqual(result[0].size(), torch.Size([1, 3, 3, 3])) + self.assertEqual(result[1].size(), torch.Size([1, 3, 3, 3])) + elif reduction == 'concat': + self.assertEqual(result.size(), torch.Size([1, 6, 3, 3])) + else: + self.assertEqual(result.size(), torch.Size([1, 3, 3, 3])) + + def test_value_choice(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.index = nn.ValueChoice([0, 1]) + self.conv = MutableConv() + + def forward(self, x): + return self.conv(x, self.index()) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)).size(), + torch.Size([1, 3, 3, 3])) + self.assertEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 3, 3)).size(), + torch.Size([1, 5, 3, 3])) + + def test_value_choice_as_parameter(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 5, kernel_size=nn.ValueChoice([3, 5])) + + def forward(self, x): + return self.conv(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 5, 5)).size(), + torch.Size([1, 5, 3, 3])) + self.assertEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 5, 5)).size(), + torch.Size([1, 5, 1, 1])) + + def test_value_choice_as_parameter(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 5, kernel_size=nn.ValueChoice([3, 5])) + + def forward(self, x): + return self.conv(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 5, 5)).size(), + torch.Size([1, 5, 3, 3])) + self.assertEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 5, 5)).size(), + torch.Size([1, 5, 1, 1])) + + def test_value_choice_as_parameter(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, nn.ValueChoice([6, 8]), kernel_size=nn.ValueChoice([3, 5])) + + def forward(self, x): + return self.conv(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 2) + mutators[0].bind_sampler(EnumerateSampler()) + mutators[1].bind_sampler(EnumerateSampler()) + input = torch.randn(1, 3, 5, 5) + self.assertEqual(self._get_converted_pytorch_model(mutators[1].apply(mutators[0].apply(model)))(input).size(), + torch.Size([1, 6, 3, 3])) + self.assertEqual(self._get_converted_pytorch_model(mutators[1].apply(mutators[0].apply(model)))(input).size(), + torch.Size([1, 8, 1, 1])) + + def test_value_choice_as_parameter_shared(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, nn.ValueChoice([6, 8], label='shared'), 1) + self.conv2 = nn.Conv2d(3, nn.ValueChoice([6, 8], label='shared'), 1) + + def forward(self, x): + return self.conv1(x) + self.conv2(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 5, 5)).size(), + torch.Size([1, 6, 5, 5])) + self.assertEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 5, 5)).size(), + torch.Size([1, 8, 5, 5])) + + def test_value_choice_in_functional(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.dropout_rate = nn.ValueChoice([0., 1.]) + + def forward(self, x): + return F.dropout(x, self.dropout_rate()) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)).size(), torch.Size([1, 3, 3, 3])) + self.assertAlmostEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 3, 3)).abs().sum().item(), 0) + + def test_value_choice_in_layer_choice(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.linear = nn.LayerChoice([ + nn.Linear(3, nn.ValueChoice([10, 20])), + nn.Linear(3, nn.ValueChoice([30, 40])) + ]) + + def forward(self, x): + return self.linear(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 3) + sz_counter = Counter() + sampler = RandomSampler() + for i in range(100): + model_new = model + for mutator in mutators: + model_new = mutator.bind_sampler(sampler).apply(model_new) + sz_counter[self._get_converted_pytorch_model(model_new)(torch.randn(1, 3)).size(1)] += 1 + self.assertEqual(len(sz_counter), 4) + + def test_shared(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self, shared=True): + super().__init__() + labels = ['x', 'x'] if shared else [None, None] + self.module1 = nn.LayerChoice([ + nn.Conv2d(3, 3, kernel_size=1), + nn.Conv2d(3, 5, kernel_size=1) + ], label=labels[0]) + self.module2 = nn.LayerChoice([ + nn.Conv2d(3, 3, kernel_size=1), + nn.Conv2d(3, 5, kernel_size=1) + ], label=labels[1]) + + def forward(self, x): + return self.module1(x) + self.module2(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + sampler = RandomSampler() + mutator = mutators[0].bind_sampler(sampler) + self.assertEqual(self._get_converted_pytorch_model(mutator.apply(model))(torch.randn(1, 3, 3, 3)).size(0), 1) + self.assertEqual(sampler.counter, 1) + + model, mutators = self._get_model_with_mutators(Net(shared=False)) + self.assertEqual(len(mutators), 2) + sampler = RandomSampler() + # repeat test. Expectation: sometimes succeeds, sometimes fails. + failed_count = 0 + for i in range(30): + model_new = model + for mutator in mutators: + model_new = mutator.bind_sampler(sampler).apply(model_new) + self.assertEqual(sampler.counter, 2 * (i + 1)) + try: + self._get_converted_pytorch_model(model_new)(torch.randn(1, 3, 3, 3)) + except RuntimeError: + failed_count += 1 + self.assertGreater(failed_count, 0) + self.assertLess(failed_count, 30) + + def test_valuechoice_access(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + vc = nn.ValueChoice([(6, 3), (8, 5)]) + self.conv = nn.Conv2d(3, vc[0], kernel_size=vc[1]) + + def forward(self, x): + return self.conv(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutators[0].bind_sampler(EnumerateSampler()) + input = torch.randn(1, 3, 5, 5) + self.assertEqual(self._get_converted_pytorch_model(mutators[0].apply(model))(input).size(), + torch.Size([1, 6, 3, 3])) + self.assertEqual(self._get_converted_pytorch_model(mutators[0].apply(model))(input).size(), + torch.Size([1, 8, 1, 1])) + + @self.get_serializer() + class Net2(nn.Module): + def __init__(self): + super().__init__() + choices = [ + {'b': [3], 'bp': [6]}, + {'b': [6], 'bp': [12]} + ] + self.conv = nn.Conv2d(3, nn.ValueChoice(choices, label='a')['b'][0], 1) + self.conv1 = nn.Conv2d(nn.ValueChoice(choices, label='a')['bp'][0], 3, 1) + + def forward(self, x): + x = self.conv(x) + return self.conv1(torch.cat((x, x), 1)) + + model, mutators = self._get_model_with_mutators(Net2()) + self.assertEqual(len(mutators), 1) + mutators[0].bind_sampler(EnumerateSampler()) + input = torch.randn(1, 3, 5, 5) + self._get_converted_pytorch_model(mutators[0].apply(model))(input) + + def test_valuechoice_access_functional(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.dropout_rate = nn.ValueChoice([[0., ], [1., ]]) + + def forward(self, x): + return F.dropout(x, self.dropout_rate()[0]) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)).size(), torch.Size([1, 3, 3, 3])) + self.assertAlmostEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 3, 3)).abs().sum().item(), 0) + + def test_valuechoice_access_functional_expression(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.dropout_rate = nn.ValueChoice([[1.05, ], [1.1, ]]) + + def forward(self, x): + # if expression failed, the exception would be: + # ValueError: dropout probability has to be between 0 and 1, but got 1.05 + return F.dropout(x, self.dropout_rate()[0] - .1) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)) + self.assertEqual(self._get_converted_pytorch_model(model1)(torch.randn(1, 3, 3, 3)).size(), torch.Size([1, 3, 3, 3])) + self.assertAlmostEqual(self._get_converted_pytorch_model(model2)(torch.randn(1, 3, 3, 3)).abs().sum().item(), 0) + + def test_repeat(self): + class AddOne(nn.Module): + def forward(self, x): + return x + 1 + + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.block = nn.Repeat(AddOne(), (3, 5)) + + def forward(self, x): + return self.block(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 1) + mutator = mutators[0].bind_sampler(EnumerateSampler()) + model1 = mutator.apply(model) + model2 = mutator.apply(model) + model3 = mutator.apply(model) + self.assertTrue((self._get_converted_pytorch_model(model1)(torch.zeros(1, 16)) == 3).all()) + self.assertTrue((self._get_converted_pytorch_model(model2)(torch.zeros(1, 16)) == 4).all()) + self.assertTrue((self._get_converted_pytorch_model(model3)(torch.zeros(1, 16)) == 5).all()) + + def test_repeat_complex(self): + class AddOne(nn.Module): + def forward(self, x): + return x + 1 + + @model_wrapper + class Net(nn.Module): + def __init__(self): + super().__init__() + self.block = nn.Repeat(nn.LayerChoice([AddOne(), nn.Identity()], label='lc'), (3, 5), label='rep') + + def forward(self, x): + return self.block(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 2) + self.assertEqual(set([mutator.label for mutator in mutators]), {'lc', 'rep'}) + + sampler = RandomSampler() + for _ in range(10): + new_model = model + for mutator in mutators: + new_model = mutator.bind_sampler(sampler).apply(new_model) + result = self._get_converted_pytorch_model(new_model)(torch.zeros(1, 1)).item() + self.assertIn(result, [0., 3., 4., 5.]) + + # independent layer choice + @model_wrapper + class Net(nn.Module): + def __init__(self): + super().__init__() + self.block = nn.Repeat(lambda index: nn.LayerChoice([AddOne(), nn.Identity()]), (2, 3), label='rep') + + def forward(self, x): + return self.block(x) + + model, mutators = self._get_model_with_mutators(Net()) + self.assertEqual(len(mutators), 4) + + result = [] + for _ in range(20): + new_model = model + for mutator in mutators: + new_model = mutator.bind_sampler(sampler).apply(new_model) + result.append(self._get_converted_pytorch_model(new_model)(torch.zeros(1, 1)).item()) + + self.assertIn(1., result) + + def test_cell(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.cell = nn.Cell([nn.Linear(16, 16), nn.Linear(16, 16, bias=False)], + num_nodes=4, num_ops_per_node=2, num_predecessors=2, merge_op='all') + + def forward(self, x, y): + return self.cell([x, y]) + + raw_model, mutators = self._get_model_with_mutators(Net()) + for _ in range(10): + sampler = EnumerateSampler() + model = raw_model + for mutator in mutators: + model = mutator.bind_sampler(sampler).apply(model) + self.assertTrue(self._get_converted_pytorch_model(model)( + torch.randn(1, 16), torch.randn(1, 16)).size() == torch.Size([1, 64])) + + @self.get_serializer() + class Net2(nn.Module): + def __init__(self): + super().__init__() + self.cell = nn.Cell([nn.Linear(16, 16), nn.Linear(16, 16, bias=False)], num_nodes=4) + + def forward(self, x): + return self.cell([x]) + + raw_model, mutators = self._get_model_with_mutators(Net2()) + for _ in range(10): + sampler = EnumerateSampler() + model = raw_model + for mutator in mutators: + model = mutator.bind_sampler(sampler).apply(model) + self.assertTrue(self._get_converted_pytorch_model(model)(torch.randn(1, 16)).size() == torch.Size([1, 64])) + + def test_nasbench201_cell(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.cell = nn.NasBench201Cell([ + lambda x, y: nn.Linear(x, y), + lambda x, y: nn.Linear(x, y, bias=False) + ], 10, 16) + + def forward(self, x): + return self.cell(x) + + raw_model, mutators = self._get_model_with_mutators(Net()) + for _ in range(10): + sampler = EnumerateSampler() + model = raw_model + for mutator in mutators: + model = mutator.bind_sampler(sampler).apply(model) + self.assertTrue(self._get_converted_pytorch_model(model)(torch.randn(2, 10)).size() == torch.Size([2, 16])) + + def test_autoactivation(self): + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.act = nn.AutoActivation() + + def forward(self, x): + return self.act(x) + + raw_model, mutators = self._get_model_with_mutators(Net()) + for _ in range(10): + sampler = EnumerateSampler() + model = raw_model + for mutator in mutators: + model = mutator.bind_sampler(sampler).apply(model) + self.assertTrue(self._get_converted_pytorch_model(model)(torch.randn(2, 10)).size() == torch.Size([2, 10])) + + +class Python(GraphIR): + def _get_converted_pytorch_model(self, model_ir): + mutation = {mut.mutator.label: _unpack_if_only_one(mut.samples) for mut in model_ir.history} + with ContextStack('fixed', mutation): + model = model_ir.python_class(**model_ir.python_init_params) + return model + + def _get_model_with_mutators(self, pytorch_model): + return extract_mutation_from_pt_module(pytorch_model) + + def get_serializer(self): + return model_wrapper + + @unittest.skip + def test_value_choice(self): ... + + @unittest.skip + def test_value_choice_in_functional(self): ... + + @unittest.skip + def test_valuechoice_access_functional(self): ... + + @unittest.skip + def test_valuechoice_access_functional_expression(self): ... + + def test_nasbench101_cell(self): + # this is only supported in python engine for now. + @self.get_serializer() + class Net(nn.Module): + def __init__(self): + super().__init__() + self.cell = nn.NasBench101Cell([lambda x: nn.Linear(x, x), lambda x: nn.Linear(x, x, bias=False)], + 10, 16, lambda x, y: nn.Linear(x, y), max_num_nodes=5, max_num_edges=7) + + def forward(self, x): + return self.cell(x) + + raw_model, mutators = self._get_model_with_mutators(Net()) + + succeeded = 0 + sampler = RandomSampler() + while succeeded <= 10: + try: + model = raw_model + for mutator in mutators: + model = mutator.bind_sampler(sampler).apply(model) + succeeded += 1 + except InvalidMutation: + continue + self.assertTrue(self._get_converted_pytorch_model(model)(torch.randn(2, 10)).size() == torch.Size([2, 16])) diff --git a/test/ut/retiarii/test_lightning_trainer.py b/test/ut/retiarii/test_lightning_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..5eab33fb0f34948ce87e66d473d45856d362168b --- /dev/null +++ b/test/ut/retiarii/test_lightning_trainer.py @@ -0,0 +1,147 @@ +import json +import pytest + +import nni +import nni.retiarii.evaluator.pytorch.lightning as pl +import nni.runtime.platform.test +import pytorch_lightning +import torch +import torch.nn as nn +import torch.nn.functional as F +from nni.retiarii.evaluator import FunctionalEvaluator +from sklearn.datasets import load_diabetes +from torch.utils.data import Dataset +from torchvision import transforms +from torchvision.datasets import MNIST + +debug = False + +progress_bar_refresh_rate = 0 +if debug: + progress_bar_refresh_rate = 1 + + +class MNISTModel(nn.Module): + def __init__(self): + super().__init__() + self.layer_1 = nn.Linear(28 * 28, 128) + self.layer_2 = nn.Linear(128, 10) + + def forward(self, x): + x = x.view(x.size(0), -1) + x = self.layer_1(x) + x = F.relu(x) + x = self.layer_2(x) + return x + + +class FCNet(nn.Module): + def __init__(self, input_size, output_size): + super().__init__() + self.l1 = nn.Linear(input_size, 5) + self.relu = nn.ReLU() + self.l2 = nn.Linear(5, output_size) + + def forward(self, x): + output = self.l1(x) + output = self.relu(output) + output = self.l2(output) + return output.view(-1) + + +@nni.trace +class DiabetesDataset(Dataset): + def __init__(self, train=True): + data = load_diabetes() + self.x = torch.tensor(data['data'], dtype=torch.float32) + self.y = torch.tensor(data['target'], dtype=torch.float32) + self.length = self.x.shape[0] + split = int(self.length * 0.8) + if train: + self.x = self.x[:split] + self.y = self.y[:split] + else: + self.x = self.x[split:] + self.y = self.y[split:] + self.length = len(self.y) + + def __getitem__(self, idx): + return self.x[idx], self.y[idx] + + def __len__(self): + return self.length + + +def _get_final_result(): + return float(json.loads(nni.runtime.platform.test._last_metric)['value']) + + +def _foo(model_cls): + assert model_cls == MNISTModel + + +def _reset(): + # this is to not affect other tests in sdk + nni.trial._intermediate_seq = 0 + nni.trial._params = {'foo': 'bar', 'parameter_id': 0} + nni.runtime.platform.test._last_metric = None + + +@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.') +def test_mnist(): + _reset() + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_dataset = nni.trace(MNIST)(root='data/mnist', train=True, download=True, transform=transform) + test_dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True, transform=transform) + lightning = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=2, limit_train_batches=0.25, # for faster training + progress_bar_refresh_rate=progress_bar_refresh_rate) + lightning._execute(MNISTModel) + assert _get_final_result() > 0.7 + _reset() + + +@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.') +def test_diabetes(): + _reset() + nni.trial._params = {'foo': 'bar', 'parameter_id': 0} + nni.runtime.platform.test._last_metric = None + train_dataset = DiabetesDataset(train=True) + test_dataset = DiabetesDataset(train=False) + lightning = pl.Regression(optimizer=torch.optim.SGD, + train_dataloader=pl.DataLoader(train_dataset, batch_size=20), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=20), + max_epochs=100, + progress_bar_refresh_rate=progress_bar_refresh_rate) + lightning._execute(FCNet(train_dataset.x.shape[1], 1)) + assert _get_final_result() < 2e4 + _reset() + + +@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.') +def test_functional(): + FunctionalEvaluator(_foo)._execute(MNISTModel) + + +@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.') +def test_fit_api(): + _reset() + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + train_dataset = nni.trace(MNIST)(root='data/mnist', train=True, download=True, transform=transform) + test_dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True, transform=transform) + lightning = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100), + val_dataloaders=pl.DataLoader(test_dataset, batch_size=100), + max_epochs=1, limit_train_batches=0.1, # for faster training + progress_bar_refresh_rate=progress_bar_refresh_rate) + lightning.fit(lambda: MNISTModel()) + lightning.fit(MNISTModel) + lightning.fit(MNISTModel()) + _reset() + + +if __name__ == '__main__': + test_mnist() + test_diabetes() + test_functional() + test_fit_api() diff --git a/test/ut/retiarii/test_mutator.py b/test/ut/retiarii/test_mutator.py new file mode 100644 index 0000000000000000000000000000000000000000..a0cd05296d21fd4d949c4e6b3d2cd2a18e455bcd --- /dev/null +++ b/test/ut/retiarii/test_mutator.py @@ -0,0 +1,83 @@ +import json +from pathlib import Path +import sys + +from nni.retiarii import * + +# FIXME +import nni.retiarii.debug_configs +nni.retiarii.debug_configs.framework = 'tensorflow' + +max_pool = Operation.new('MaxPool2D', {'pool_size': 2}) +avg_pool = Operation.new('AveragePooling2D', {'pool_size': 2}) +global_pool = Operation.new('GlobalAveragePooling2D') + + +class DebugSampler(Sampler): + def __init__(self): + self.iteration = 0 + + def choice(self, candidates, mutator, model, index): + idx = (self.iteration + index) % len(candidates) + return candidates[idx] + + def mutation_start(self, mutator, model): + self.iteration += 1 + + +class DebugMutator(Mutator): + def mutate(self, model): + ops = [max_pool, avg_pool, global_pool] + + pool1 = model.graphs['stem'].get_node_by_name('pool1') + pool1.update_operation(self.choice(ops)) + + pool2 = model.graphs['stem'].get_node_by_name('pool2') + pool2.update_operation(self.choice(ops)) + + +sampler = DebugSampler() +mutator = DebugMutator() +mutator.bind_sampler(sampler) + + +json_path = Path(__file__).parent / 'mnist-tensorflow.json' +ir = json.load(json_path.open()) +model0 = Model._load(ir) + + +def test_dry_run(): + candidates, _ = mutator.dry_run(model0) + assert len(candidates) == 2 + assert candidates[0] == [max_pool, avg_pool, global_pool] + assert candidates[1] == [max_pool, avg_pool, global_pool] + + +def test_mutation(): + model1 = mutator.apply(model0) + assert _get_pools(model1) == (avg_pool, global_pool) + + model2 = mutator.apply(model1) + assert _get_pools(model2) == (global_pool, max_pool) + + assert len(model2.history) == 2 + assert model2.history[0].from_ == model0 + assert model2.history[0].to == model1 + assert model2.history[1].from_ == model1 + assert model2.history[1].to == model2 + assert model2.history[0].mutator == mutator + assert model2.history[1].mutator == mutator + + assert _get_pools(model0) == (max_pool, max_pool) + assert _get_pools(model1) == (avg_pool, global_pool) + + +def _get_pools(model): + pool1 = model.graphs['stem'].get_node_by_name('pool1').operation + pool2 = model.graphs['stem'].get_node_by_name('pool2').operation + return pool1, pool2 + + +if __name__ == '__main__': + test_dry_run() + test_mutation() diff --git a/test/ut/retiarii/test_namespace.py b/test/ut/retiarii/test_namespace.py new file mode 100644 index 0000000000000000000000000000000000000000..23766e873c95bc6cda01c1198fcd4679b7058c5a --- /dev/null +++ b/test/ut/retiarii/test_namespace.py @@ -0,0 +1,88 @@ +import torch +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import model_wrapper + + +@model_wrapper +class Model(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.conv1 = nn.Conv2d(in_channels, 10, 3) + self.conv2 = nn.LayerChoice([ + nn.Conv2d(10, 10, 3), + nn.MaxPool2d(3) + ]) + self.conv3 = nn.LayerChoice([ + nn.Identity(), + nn.Conv2d(10, 10, 1) + ]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(10, 1) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + x = self.avgpool(x).view(x.size(0), -1) + x = self.fc(x) + return x + + +@model_wrapper +class ModelInner(nn.Module): + def __init__(self): + super().__init__() + self.net1 = nn.LayerChoice([ + nn.Linear(10, 10), + nn.Linear(10, 10, bias=False) + ]) + self.net2 = nn.LayerChoice([ + nn.Linear(10, 10), + nn.Linear(10, 10, bias=False) + ]) + + def forward(self, x): + x = self.net1(x) + x = self.net2(x) + return x + + +@model_wrapper +class ModelNested(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = ModelInner() + self.fc2 = nn.LayerChoice([ + nn.Linear(10, 10), + nn.Linear(10, 10, bias=False) + ]) + self.fc3 = ModelInner() + + def forward(self, x): + return self.fc3(self.fc2(self.fc1(x))) + + +def test_model_wrapper(): + model = Model(3) + assert model.trace_symbol == Model.__wrapped__ + assert model.trace_kwargs == {'in_channels': 3} + assert model.conv2.label == 'model_1' + assert model.conv3.label == 'model_2' + assert model(torch.randn(1, 3, 5, 5)).size() == torch.Size([1, 1]) + + model = Model(4) + assert model.trace_symbol == Model.__wrapped__ + assert model.conv2.label == 'model_1' # not changed + + +def test_model_wrapper_nested(): + model = ModelNested() + assert model.fc1.net1.label == 'model_1_1' + assert model.fc1.net2.label == 'model_1_2' + assert model.fc2.label == 'model_2' + assert model.fc3.net1.label == 'model_3_1' + assert model.fc3.net2.label == 'model_3_2' + + +if __name__ == '__main__': + test_model_wrapper_nested() diff --git a/test/ut/retiarii/test_strategy.py b/test/ut/retiarii/test_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd688b4fb408675254860e293caf4c85419efc8 --- /dev/null +++ b/test/ut/retiarii/test_strategy.py @@ -0,0 +1,164 @@ +import random +import sys +import time +import threading +from typing import * + +import nni.retiarii.execution.api +import nni.retiarii.nn.pytorch as nn +import nni.retiarii.strategy as strategy +import pytest +import torch +import torch.nn.functional as F +from nni.retiarii import Model +from nni.retiarii.converter import convert_to_graph +from nni.retiarii.execution import wait_models +from nni.retiarii.execution.interface import AbstractExecutionEngine, WorkerInfo, MetricData, AbstractGraphListener +from nni.retiarii.graph import DebugEvaluator, ModelStatus +from nni.retiarii.nn.pytorch.mutator import process_inline_mutation + + +class MockExecutionEngine(AbstractExecutionEngine): + def __init__(self, failure_prob=0.): + self.models = [] + self.failure_prob = failure_prob + self._resource_left = 4 + + def _model_complete(self, model: Model): + time.sleep(random.uniform(0, 1)) + if random.uniform(0, 1) < self.failure_prob: + model.status = ModelStatus.Failed + else: + model.metric = random.uniform(0, 1) + model.status = ModelStatus.Trained + self._resource_left += 1 + + def submit_models(self, *models: Model) -> None: + for model in models: + self.models.append(model) + self._resource_left -= 1 + threading.Thread(target=self._model_complete, args=(model, )).start() + + def list_models(self) -> List[Model]: + return self.models + + def query_available_resource(self) -> Union[List[WorkerInfo], int]: + return self._resource_left + + def budget_exhausted(self) -> bool: + pass + + def register_graph_listener(self, listener: AbstractGraphListener) -> None: + pass + + def trial_execute_graph(cls) -> MetricData: + pass + + +def _reset_execution_engine(engine=None): + nni.retiarii.execution.api._execution_engine = engine + + +class Net(nn.Module): + def __init__(self, hidden_size=32, diff_size=False): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.LayerChoice([ + nn.Linear(4*4*50, hidden_size, bias=True), + nn.Linear(4*4*50, hidden_size, bias=False) + ], label='fc1') + self.fc2 = nn.LayerChoice([ + nn.Linear(hidden_size, 10, bias=False), + nn.Linear(hidden_size, 10, bias=True) + ] + ([] if not diff_size else [nn.Linear(hidden_size, 10, bias=False)]), label='fc2') + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def _get_model_and_mutators(**kwargs): + base_model = Net(**kwargs) + script_module = torch.jit.script(base_model) + base_model_ir = convert_to_graph(script_module, base_model) + base_model_ir.evaluator = DebugEvaluator() + mutators = process_inline_mutation(base_model_ir) + return base_model_ir, mutators + + +def test_grid_search(): + gridsearch = strategy.GridSearch() + engine = MockExecutionEngine() + _reset_execution_engine(engine) + gridsearch.run(*_get_model_and_mutators()) + wait_models(*engine.models) + selection = set() + for model in engine.models: + selection.add(( + model.graphs['_model__fc1'].hidden_nodes[0].operation.parameters['bias'], + model.graphs['_model__fc2'].hidden_nodes[0].operation.parameters['bias'] + )) + assert len(selection) == 4 + _reset_execution_engine() + + +def test_random_search(): + random = strategy.Random() + engine = MockExecutionEngine() + _reset_execution_engine(engine) + random.run(*_get_model_and_mutators()) + wait_models(*engine.models) + selection = set() + for model in engine.models: + selection.add(( + model.graphs['_model__fc1'].hidden_nodes[0].operation.parameters['bias'], + model.graphs['_model__fc2'].hidden_nodes[0].operation.parameters['bias'] + )) + assert len(selection) == 4 + _reset_execution_engine() + + +def test_evolution(): + evolution = strategy.RegularizedEvolution(population_size=5, sample_size=3, cycles=10, mutation_prob=0.5, on_failure='ignore') + engine = MockExecutionEngine(failure_prob=0.2) + _reset_execution_engine(engine) + evolution.run(*_get_model_and_mutators()) + wait_models(*engine.models) + _reset_execution_engine() + + evolution = strategy.RegularizedEvolution(population_size=5, sample_size=3, cycles=10, mutation_prob=0.5, on_failure='worst') + engine = MockExecutionEngine(failure_prob=0.4) + _reset_execution_engine(engine) + evolution.run(*_get_model_and_mutators()) + wait_models(*engine.models) + _reset_execution_engine() + + +def test_rl(): + rl = strategy.PolicyBasedRL(max_collect=2, trial_per_collect=10) + engine = MockExecutionEngine(failure_prob=0.2) + _reset_execution_engine(engine) + rl.run(*_get_model_and_mutators(diff_size=True)) + wait_models(*engine.models) + _reset_execution_engine() + + rl = strategy.PolicyBasedRL(max_collect=2, trial_per_collect=10) + engine = MockExecutionEngine(failure_prob=0.2) + _reset_execution_engine(engine) + rl.run(*_get_model_and_mutators()) + wait_models(*engine.models) + _reset_execution_engine() + + +if __name__ == '__main__': + test_grid_search() + test_random_search() + test_evolution() + test_rl() diff --git a/test/ut/sdk/__init__.py b/test/ut/sdk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..406650d5b1b30d2f7684e4ab5fd45854d7dd3241 --- /dev/null +++ b/test/ut/sdk/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os + +os.environ['NNI_PLATFORM'] = 'unittest' +os.environ['NNI_TRIAL_JOB_ID'] = 'test_trial_job_id' diff --git a/test/ut/sdk/assets/classic_nas_search_space.json b/test/ut/sdk/assets/classic_nas_search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..bac470b72f33dffbe5c5532840e0a506c10d9dd3 --- /dev/null +++ b/test/ut/sdk/assets/classic_nas_search_space.json @@ -0,0 +1,26 @@ +{ + "first_conv": { + "_type": "layer_choice", + "_value": [ + "conv5x5", + "conv3x3" + ] + }, + "mid_conv": { + "_type": "layer_choice", + "_value": [ + "0", + "1" + ] + }, + "skip": { + "_type": "input_choice", + "_value": { + "candidates": [ + "", + "" + ], + "n_chosen": 1 + } + } +} diff --git a/test/ut/sdk/assets/search_space.json b/test/ut/sdk/assets/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..21b6f90996548c2411d14c32b7f6adbaf9671245 --- /dev/null +++ b/test/ut/sdk/assets/search_space.json @@ -0,0 +1,86 @@ +{ + "choice_str": { + "_type": "choice", + "_value": ["cat", "dog", "elephant", "cow", "sheep", "panda"] + }, + "choice_int": { + "_type": "choice", + "_value": [42, 43, -1] + }, + "choice_mixed": { + "_type": "choice", + "_value": [0.3, "cat", 1, null] + }, + "choice_float": { + "_type": "choice", + "_value": [0.3, 1, 2.0] + }, + "choice_single": { + "_type": "choice", + "_value": [1] + }, + "randint_ok": { + "_type": "randint", + "_value": [-2, 3] + }, + "randint_single": { + "_type": "randint", + "_value": [10, 11] + }, + "randint_fail_equal": { + "_type": "randint", + "_value": [0, 0] + }, + "uniform_ok": { + "_type": "uniform", + "_value": [-1.0, 1.5] + }, + "uniform_equal": { + "_type": "uniform", + "_value": [99.9, 99.9] + }, + "quniform_ok": { + "_type": "quniform", + "_value": [0.0, 10.0, 2.5] + }, + "quniform_clip": { + "_type": "quniform", + "_value": [2.0, 10.0, 5.0] + }, + "quniform_clip_2": { + "_type": "quniform", + "_value": [-5.5, -0.5, 6] + }, + "loguniform_ok": { + "_type": "loguniform", + "_value": [0.001, 100] + }, + "loguniform_equal": { + "_type": "loguniform", + "_value": [1, 1] + }, + "qloguniform_ok": { + "_type": "qloguniform", + "_value": [0.001, 100, 1] + }, + "qloguniform_equal": { + "_type": "qloguniform", + "_value": [2, 2, 1] + }, + "normal_ok": { + "_type": "normal", + "_value": [-1.0, 5.0] + }, + "qnormal_ok": { + "_type": "qnormal", + "_value": [-1.5, 5.0, 0.1] + }, + "lognormal_ok": { + "_type": "lognormal", + "_value": [-1.0, 5.0] + }, + "qlognormal_ok": { + "_type": "qlognormal", + "_value": [-1.5, 5.0, 0.1] + } +} \ No newline at end of file diff --git a/test/ut/sdk/expect/test_graph_module1.expect b/test/ut/sdk/expect/test_graph_module1.expect new file mode 100644 index 0000000000000000000000000000000000000000..b13e4a80c381a6fa0dd5926365601c663bb03ffc --- /dev/null +++ b/test/ut/sdk/expect/test_graph_module1.expect @@ -0,0 +1,152 @@ +node { + name: "input/input" + op: "IO Node" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 1 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "attr" + value { + s: "" + } + } +} +node { + name: "output/output.1" + op: "IO Node" + input: "myLinear/Linear[l]/22" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 1 + } + dim { + size: 5 + } + } + } + } + } + attr { + key: "attr" + value { + s: "" + } + } +} +node { + name: "myLinear/Linear[l]/bias/17" + op: "prim::GetAttr" + input: "myLinear/Linear[l]/weight/14" + attr { + key: "attr" + value { + s: "{ name : bias }" + } + } +} +node { + name: "myLinear/Linear[l]/weight/18" + op: "prim::GetAttr" + input: "myLinear/Linear[l]/weight/14" + attr { + key: "attr" + value { + s: "{ name : weight }" + } + } +} +node { + name: "myLinear/Linear[l]/19" + op: "aten::t" + input: "myLinear/Linear[l]/weight/18" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + dim { + size: 5 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "myLinear/Linear[l]/20" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "myLinear/Linear[l]/21" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "myLinear/Linear[l]/22" + op: "aten::addmm" + input: "myLinear/Linear[l]/bias/17" + input: "input/input" + input: "myLinear/Linear[l]/19" + input: "myLinear/Linear[l]/20" + input: "myLinear/Linear[l]/21" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 1 + } + dim { + size: 5 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +versions { + producer: 22 +} diff --git a/test/ut/sdk/expect/test_graph_module2.expect b/test/ut/sdk/expect/test_graph_module2.expect new file mode 100644 index 0000000000000000000000000000000000000000..52715ea48065648c400dde0069990f38339fc006 --- /dev/null +++ b/test/ut/sdk/expect/test_graph_module2.expect @@ -0,0 +1,309 @@ +node { + name: "input/input.1" + op: "IO Node" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 5 + } + } + } + } + } + attr { + key: "attr" + value { + s: "" + } + } +} +node { + name: "output/output.1" + op: "IO Node" + input: "input/input.1" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 5 + } + } + } + } + } + attr { + key: "attr" + value { + s: "" + } + } +} +node { + name: "MyModule/Linear[weight]/bias/49" + op: "prim::GetAttr" + input: "MyModule/Linear[weight]/weight/35" + attr { + key: "attr" + value { + s: "{ name : bias }" + } + } +} +node { + name: "MyModule/Linear[weight]/weight/50" + op: "prim::GetAttr" + input: "MyModule/Linear[weight]/weight/35" + attr { + key: "attr" + value { + s: "{ name : weight }" + } + } +} +node { + name: "MyModule/Linear[weight]/51" + op: "aten::t" + input: "MyModule/Linear[weight]/weight/50" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/Linear[weight]/52" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/Linear[weight]/53" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/Linear[weight]/54" + op: "aten::addmm" + input: "MyModule/Linear[weight]/bias/49" + input: "input/input.1" + input: "MyModule/Linear[weight]/51" + input: "MyModule/Linear[weight]/52" + input: "MyModule/Linear[weight]/53" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/Linear[bias]/bias/55" + op: "prim::GetAttr" + input: "MyModule/Linear[bias]/weight/38" + attr { + key: "attr" + value { + s: "{ name : bias }" + } + } +} +node { + name: "MyModule/Linear[bias]/weight/56" + op: "prim::GetAttr" + input: "MyModule/Linear[bias]/weight/38" + attr { + key: "attr" + value { + s: "{ name : weight }" + } + } +} +node { + name: "MyModule/Linear[bias]/57" + op: "aten::t" + input: "MyModule/Linear[bias]/weight/56" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/Linear[bias]/58" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/Linear[bias]/59" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/Linear[bias]/60" + op: "aten::addmm" + input: "MyModule/Linear[bias]/bias/55" + input: "input/input.1" + input: "MyModule/Linear[bias]/57" + input: "MyModule/Linear[bias]/58" + input: "MyModule/Linear[bias]/59" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/23" + op: "prim::ListConstruct" + input: "MyModule/Linear[weight]/54" + input: "MyModule/Linear[bias]/60" + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/24" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/input" + op: "aten::cat" + input: "MyModule/23" + input: "MyModule/24" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 6 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/61" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{}" + } + } +} +versions { + producer: 22 +} diff --git a/test/ut/sdk/expect/test_graph_module3.expect b/test/ut/sdk/expect/test_graph_module3.expect new file mode 100644 index 0000000000000000000000000000000000000000..abdd714788c5ab80acb64942783cb3320d2b69e5 --- /dev/null +++ b/test/ut/sdk/expect/test_graph_module3.expect @@ -0,0 +1,250 @@ +node { + name: "input/input.1" + op: "IO Node" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 5 + } + } + } + } + } + attr { + key: "attr" + value { + s: "" + } + } +} +node { + name: "output/output.1" + op: "IO Node" + input: "MyModule/ModuleList[module]/Linear[1]/46" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 1 + } + } + } + } + } + attr { + key: "attr" + value { + s: "" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[0]/bias/35" + op: "prim::GetAttr" + input: "MyModule/ModuleList[module]/Linear[0]/weight/26" + attr { + key: "attr" + value { + s: "{ name : bias }" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[0]/weight/36" + op: "prim::GetAttr" + input: "MyModule/ModuleList[module]/Linear[0]/weight/26" + attr { + key: "attr" + value { + s: "{ name : weight }" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[0]/37" + op: "aten::t" + input: "MyModule/ModuleList[module]/Linear[0]/weight/36" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[0]/38" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[0]/39" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[0]/input" + op: "aten::addmm" + input: "MyModule/ModuleList[module]/Linear[0]/bias/35" + input: "input/input.1" + input: "MyModule/ModuleList[module]/Linear[0]/37" + input: "MyModule/ModuleList[module]/Linear[0]/38" + input: "MyModule/ModuleList[module]/Linear[0]/39" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[1]/bias/41" + op: "prim::GetAttr" + input: "MyModule/ModuleList[module]/Linear[1]/weight/30" + attr { + key: "attr" + value { + s: "{ name : bias }" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[1]/weight/42" + op: "prim::GetAttr" + input: "MyModule/ModuleList[module]/Linear[1]/weight/30" + attr { + key: "attr" + value { + s: "{ name : weight }" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[1]/43" + op: "aten::t" + input: "MyModule/ModuleList[module]/Linear[1]/weight/42" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + dim { + size: 1 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[1]/44" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[1]/45" + op: "prim::Constant" + attr { + key: "attr" + value { + s: "{ value : 1}" + } + } +} +node { + name: "MyModule/ModuleList[module]/Linear[1]/46" + op: "aten::addmm" + input: "MyModule/ModuleList[module]/Linear[1]/bias/41" + input: "MyModule/ModuleList[module]/Linear[0]/input" + input: "MyModule/ModuleList[module]/Linear[1]/43" + input: "MyModule/ModuleList[module]/Linear[1]/44" + input: "MyModule/ModuleList[module]/Linear[1]/45" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 4 + } + dim { + size: 1 + } + } + } + } + } + attr { + key: "attr" + value { + s: "{}" + } + } +} +versions { + producer: 22 +} diff --git a/test/ut/sdk/imported/model.py b/test/ut/sdk/imported/model.py new file mode 100644 index 0000000000000000000000000000000000000000..40efb00e4fa473ada622c43ce2c8a7d806df1d44 --- /dev/null +++ b/test/ut/sdk/imported/model.py @@ -0,0 +1,13 @@ +import nni.retiarii.nn.pytorch as nn +from nni.retiarii import basic_unit + + +@basic_unit +class ImportTest(nn.Module): + def __init__(self, foo, bar): + super().__init__() + self.foo = nn.Linear(foo, 3) + self.bar = nn.Dropout(bar) + + def __eq__(self, other): + return self.foo.in_features == other.foo.in_features and self.bar.p == other.bar.p diff --git a/test/ut/sdk/models/pytorch_models/__init__.py b/test/ut/sdk/models/pytorch_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..363c7d3c9c939aa6b62d27361f3c5acd6816a2c9 --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .layer_choice_only import LayerChoiceOnlySearchSpace +from .mutable_scope import SpaceWithMutableScope +from .naive import NaiveSearchSpace +from .nested import NestedSpace diff --git a/test/ut/sdk/models/pytorch_models/layer_choice_only.py b/test/ut/sdk/models/pytorch_models/layer_choice_only.py new file mode 100644 index 0000000000000000000000000000000000000000..c500bc9cdc9b408360ad454dc2f8103701aabe8f --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/layer_choice_only.py @@ -0,0 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch.mutables import LayerChoice + + +class LayerChoiceOnlySearchSpace(nn.Module): + def __init__(self, test_case): + super().__init__() + self.test_case = test_case + self.conv1 = LayerChoice([nn.Conv2d(3, 6, 3, padding=1), nn.Conv2d(3, 6, 5, padding=2)]) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = LayerChoice([nn.Conv2d(6, 16, 3, padding=1), nn.Conv2d(6, 16, 5, padding=2)], + return_mask=True) + self.conv3 = nn.Conv2d(16, 16, 1) + self.bn = nn.BatchNorm2d(16) + + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(16, 10) + + def forward(self, x): + bs = x.size(0) + + x = self.pool(F.relu(self.conv1(x))) + x0, mask = self.conv2(x) + self.test_case.assertEqual(mask.size(), torch.Size([2])) + x1 = F.relu(self.conv3(x0)) + + x = self.pool(self.bn(x1)) + self.test_case.assertEqual(mask.size(), torch.Size([2])) + + x = self.gap(x).view(bs, -1) + x = self.fc(x) + return x diff --git a/test/ut/sdk/models/pytorch_models/mobilenet.py b/test/ut/sdk/models/pytorch_models/mobilenet.py new file mode 100644 index 0000000000000000000000000000000000000000..8d60c90a4ce0175444f049fa0cc48af042c98063 --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/mobilenet.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch.nn as nn +import math + + +def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + +def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + +class MobileNet(nn.Module): + def __init__(self, n_class, profile='normal'): + super(MobileNet, self).__init__() + + # original + if profile == 'normal': + in_planes = 32 + cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024] + # 0.5 AMC + elif profile == '0.5flops': + in_planes = 24 + cfg = [48, (96, 2), 80, (192, 2), 200, (328, 2), 352, 368, 360, 328, 400, (736, 2), 752] + else: + raise NotImplementedError + + self.conv1 = conv_bn(3, in_planes, stride=2) + + self.features = self._make_layers(in_planes, cfg, conv_dw) + + self.classifier = nn.Sequential( + nn.Linear(cfg[-1], n_class), + ) + + self._initialize_weights() + + def forward(self, x): + x = self.conv1(x) + x = self.features(x) + x = x.mean(3).mean(2) # global average pooling + + x = self.classifier(x) + return x + + def _make_layers(self, in_planes, cfg, layer): + layers = [] + for x in cfg: + out_planes = x if isinstance(x, int) else x[0] + stride = 1 if isinstance(x, int) else x[1] + layers.append(layer(in_planes, out_planes, stride)) + in_planes = out_planes + return nn.Sequential(*layers) + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() diff --git a/test/ut/sdk/models/pytorch_models/mutable_scope.py b/test/ut/sdk/models/pytorch_models/mutable_scope.py new file mode 100644 index 0000000000000000000000000000000000000000..505a14880f710e7587c1222295237029adf05e1a --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/mutable_scope.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch.mutables import LayerChoice, InputChoice, MutableScope + + +class Cell(MutableScope): + def __init__(self, cell_name, prev_labels, channels): + super().__init__(cell_name) + self.input_choice = InputChoice(choose_from=prev_labels, n_chosen=1, return_mask=True, + key=cell_name + "_input") + self.op_choice = LayerChoice([ + nn.Conv2d(channels, channels, 3, padding=1), + nn.Conv2d(channels, channels, 5, padding=2), + nn.MaxPool2d(3, stride=1, padding=1), + nn.AvgPool2d(3, stride=1, padding=1), + nn.Identity() + ], key=cell_name + "_op") + + def forward(self, prev_layers): + chosen_input, chosen_mask = self.input_choice(prev_layers) + cell_out = self.op_choice(chosen_input) + return cell_out, chosen_mask + + +class Node(MutableScope): + def __init__(self, node_name, prev_node_names, channels): + super().__init__(node_name) + self.cell_x = Cell(node_name + "_x", prev_node_names, channels) + self.cell_y = Cell(node_name + "_y", prev_node_names, channels) + + def forward(self, prev_layers): + out_x, mask_x = self.cell_x(prev_layers) + out_y, mask_y = self.cell_y(prev_layers) + return out_x + out_y, mask_x | mask_y + + +class Layer(nn.Module): + def __init__(self, num_nodes, channels): + super().__init__() + self.num_nodes = num_nodes + self.nodes = nn.ModuleList() + node_labels = [InputChoice.NO_KEY, InputChoice.NO_KEY] + for i in range(num_nodes): + node_labels.append("node_{}".format(i)) + self.nodes.append(Node(node_labels[-1], node_labels[:-1], channels)) + self.final_conv_w = nn.Parameter(torch.zeros(channels, self.num_nodes + 2, channels, 1, 1), + requires_grad=True) + self.bn = nn.BatchNorm2d(channels, affine=False) + + def forward(self, pprev, prev): + prev_nodes_out = [pprev, prev] + nodes_used_mask = torch.zeros(self.num_nodes + 2, dtype=torch.bool, device=prev.device) + for i in range(self.num_nodes): + node_out, mask = self.nodes[i](prev_nodes_out) + nodes_used_mask[:mask.size(0)] |= mask.to(prev.device) + # NOTE: which device should we put mask on? + prev_nodes_out.append(node_out) + + unused_nodes = torch.cat([out for used, out in zip(nodes_used_mask, prev_nodes_out) if not used], 1) + unused_nodes = F.relu(unused_nodes) + conv_weight = self.final_conv_w[:, ~nodes_used_mask, :, :, :] + conv_weight = conv_weight.view(conv_weight.size(0), -1, 1, 1) + out = F.conv2d(unused_nodes, conv_weight) + return prev, self.bn(out) + + +class SpaceWithMutableScope(nn.Module): + def __init__(self, test_case, num_layers=4, num_nodes=5, channels=16, in_channels=3, num_classes=10): + super().__init__() + self.test_case = test_case + self.num_layers = num_layers + + self.stem = nn.Sequential( + nn.Conv2d(in_channels, channels, 3, 1, 1, bias=False), + nn.BatchNorm2d(channels) + ) + + self.layers = nn.ModuleList() + for _ in range(self.num_layers + 2): + self.layers.append(Layer(num_nodes, channels)) + self.gap = nn.AdaptiveAvgPool2d(1) + self.dense = nn.Linear(channels, num_classes) + + def forward(self, x): + prev = cur = self.stem(x) + for layer in self.layers: + prev, cur = layer(prev, cur) + + cur = self.gap(F.relu(cur)).view(x.size(0), -1) + return self.dense(cur) diff --git a/test/ut/sdk/models/pytorch_models/naive.py b/test/ut/sdk/models/pytorch_models/naive.py new file mode 100644 index 0000000000000000000000000000000000000000..0555ec17e44da660276e968bbeab0f5cd166bda4 --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/naive.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch.mutables import LayerChoice, InputChoice + + +class NaiveSearchSpace(nn.Module): + def __init__(self, test_case): + super().__init__() + self.test_case = test_case + self.conv1 = LayerChoice([nn.Conv2d(3, 6, 3, padding=1), nn.Conv2d(3, 6, 5, padding=2)]) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = LayerChoice([nn.Conv2d(6, 16, 3, padding=1), nn.Conv2d(6, 16, 5, padding=2)], + return_mask=True) + self.conv3 = nn.Conv2d(16, 16, 1) + + self.skipconnect = InputChoice(n_candidates=1) + self.skipconnect2 = InputChoice(n_candidates=2, return_mask=True) + self.bn = nn.BatchNorm2d(16) + + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(16, 10) + + def forward(self, x): + bs = x.size(0) + + x = self.pool(F.relu(self.conv1(x))) + x0, mask = self.conv2(x) + self.test_case.assertEqual(mask.size(), torch.Size([2])) + x1 = F.relu(self.conv3(x0)) + + _, mask = self.skipconnect2([x0, x1]) + x0 = self.skipconnect([x0]) + if x0 is not None: + x1 += x0 + x = self.pool(self.bn(x1)) + self.test_case.assertEqual(mask.size(), torch.Size([2])) + + x = self.gap(x).view(bs, -1) + x = self.fc(x) + return x diff --git a/test/ut/sdk/models/pytorch_models/nested.py b/test/ut/sdk/models/pytorch_models/nested.py new file mode 100644 index 0000000000000000000000000000000000000000..71e1ccf2c3b9f5b8ecb9ee7181a7241add443218 --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/nested.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch.nn as nn +import torch.nn.functional as F + +from nni.nas.pytorch.mutables import LayerChoice, InputChoice + + +class MutableOp(nn.Module): + def __init__(self, kernel_size): + super().__init__() + self.conv = nn.Conv2d(3, 120, kernel_size, padding=kernel_size // 2) + self.nested_mutable = InputChoice(n_candidates=10) + + def forward(self, x): + return self.conv(x) + + +class NestedSpace(nn.Module): + # this doesn't pass tests + def __init__(self, test_case): + super().__init__() + self.test_case = test_case + self.conv1 = LayerChoice([MutableOp(3), MutableOp(5)]) + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc1 = nn.Linear(120, 10) + + def forward(self, x): + bs = x.size(0) + x = F.relu(self.conv1(x)) + x = self.gap(x).view(bs, -1) + x = self.fc(x) + return x diff --git a/test/ut/sdk/models/pytorch_models/transformer.py b/test/ut/sdk/models/pytorch_models/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..608d4ed93b2b60698952fab60d52840af7a0ee2a --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/transformer.py @@ -0,0 +1,190 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math +import copy + + +class PosEncoding(nn.Module): + def __init__(self, hidden_dim, max_seq_len=80): + super().__init__() + self.hidden_dim = hidden_dim + + pe = torch.zeros(max_seq_len, hidden_dim) + for pos in range(max_seq_len): + for i in range(0, hidden_dim, 2): + pe[pos, i] = math.sin(pos / (10000 ** ((2 * i) / hidden_dim))) + pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1)) / hidden_dim))) + + pe = pe.unsqueeze(0) + self.register_buffer('pe', pe) + + def forward(self, x): + x = x * math.sqrt(self.hidden_dim) + x = x + torch.autograd.Variable(self.pe[:, :x.size(1)], requires_grad=False) + return x + + +def attention(query, key, value, mask=None, dropout=None): + d_k = query.size(-1) + logits = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) + if mask is not None: + logits = logits.masked_fill(mask == 0, -1e9) + attention_map = F.softmax(logits, dim=-1) + if dropout is not None: + attention_map = dropout(attention_map) + return torch.matmul(attention_map, value) + + +class MultiHeadAttention(nn.Module): + def __init__(self, hidden_dim, n_heads, dropout=0.1): + super().__init__() + + self.hidden_dim = hidden_dim + self.head_dim = hidden_dim // n_heads + self.n_heads = n_heads + + self.q_proj = nn.Linear(hidden_dim, hidden_dim) + self.v_proj = nn.Linear(hidden_dim, hidden_dim) + self.k_proj = nn.Linear(hidden_dim, hidden_dim) + self.dropout = nn.Dropout(dropout) + self.output_proj = nn.Linear(hidden_dim, hidden_dim) + + def forward(self, query, key, value, mask=None): + batch_size = query.size(0) + + # project and reshaping + k_project = self.k_proj(key) + q_project = self.q_proj(query) + v_project = self.v_proj(value) + k_reshape = k_project.view(batch_size, -1, self.n_heads, self.head_dim).transpose(1, 2) + q_reshape = q_project.view(batch_size, -1, self.n_heads, self.head_dim).transpose(1, 2) + v_reshape = v_project.view(batch_size, -1, self.n_heads, self.head_dim).transpose(1, 2) + + # merge heads and output + scores = attention(q_reshape, k_reshape, v_reshape, mask, self.dropout) + scores = scores.transpose(1, 2).contiguous() + scores = scores.view(batch_size, -1, self.hidden_dim) + + return self.output_proj(scores) + + +class FeedForwardLayer(nn.Module): + def __init__(self, hidden_dim, intermediate_dim=2048, dropout=0.1): + super().__init__() + self.dense1 = nn.Linear(hidden_dim, intermediate_dim) + self.dense2 = nn.Linear(intermediate_dim, hidden_dim) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + return self.dense2(self.dropout(F.relu(self.dense1(x)))) + + +class LayerNorm(nn.Module): + def __init__(self, hidden_dim, eps=1e-6): + super(LayerNorm, self).__init__() + + self.alpha = nn.Parameter(torch.ones(hidden_dim)) + self.beta = nn.Parameter(torch.zeros(hidden_dim)) + self.eps = eps + + def forward(self, x): + mean = x.mean(-1, keepdim=True) + std = x.std(-1, keepdim=True) + return self.alpha * (x - mean) / (std + self.eps) + self.beta + + +class TransformerEncoderLayer(nn.Module): + def __init__(self, n_heads, hidden_dim, dropout=0.1): + super().__init__() + + self.self_attn = MultiHeadAttention(hidden_dim, n_heads) + self.ff_layer = FeedForwardLayer(hidden_dim) + + self.norm1 = LayerNorm(hidden_dim) + self.dropout1 = nn.Dropout(dropout) + self.norm2 = LayerNorm(hidden_dim) + self.dropout2 = nn.Dropout(dropout) + + def forward(self, inp, mask): + x = self.norm1(inp) + x = inp + self.dropout1(self.self_attn(x, x, x, mask)) + x = x + self.dropout2(self.ff_layer(self.norm2(x))) + return x + + +class TransformerDecoderLayer(nn.Module): + def __init__(self, n_heads, hidden_dim, dropout=0.1): + super().__init__() + + self.self_attn = MultiHeadAttention(hidden_dim, n_heads) + self.cross_attn = MultiHeadAttention(hidden_dim, n_heads) + self.ff = FeedForwardLayer(hidden_dim) + + self.norm1 = LayerNorm(hidden_dim) + self.norm2 = LayerNorm(hidden_dim) + self.norm3 = LayerNorm(hidden_dim) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + def forward(self, inp, mask, encoder_output, encoder_output_mask): + x = self.norm1(inp) + x = inp + self.dropout1(self.self_attn(x, x, x, mask)) + x = x + self.dropout2(self.cross_attn(self.norm2(x), encoder_output, encoder_output, encoder_output_mask)) + x = x + self.dropout3(self.ff(self.norm3(x))) + return x + + +class TransformerEncoder(nn.Module): + def __init__(self, vocab_size, n_layers, hidden_dim, n_heads): + super().__init__() + + self.n_layers = n_layers + self.embedding = nn.Embedding(vocab_size, hidden_dim) + self.posencoding = PosEncoding(hidden_dim) + self.layers = nn.ModuleList([copy.deepcopy(TransformerEncoderLayer(n_heads, hidden_dim)) for _ in range(n_layers)]) + self.layernorm = LayerNorm(hidden_dim) + + def forward(self, src, mask): + x = self.embedding(src) + x = self.posencoding(x) + for i in range(self.n_layers): + x = self.layers[i](x, mask) + return self.layernorm(x) + + +class TransformerDecoder(nn.Module): + def __init__(self, vocab_size, n_layers, hidden_dim, n_heads): + super().__init__() + + self.n_layers = n_layers + self.embedding = nn.Embedding(vocab_size, hidden_dim) + self.posencoding = PosEncoding(hidden_dim) + self.layers = nn.ModuleList([copy.deepcopy(TransformerDecoderLayer(n_heads, hidden_dim)) for _ in range(n_layers)]) + self.layernorm = LayerNorm(hidden_dim) + + def forward(self, inp, mask, encoder_output, encoder_output_mask): + x = self.embedding(inp) + x = self.posencoding(x) + for i in range(self.n_layers): + x = self.layers[i](x, mask, encoder_output, encoder_output_mask) + return self.layernorm(x) + + +class TransformerForSeq2Seq(nn.Module): + def __init__(self, src_vocab_size, tgt_vocab_size, n_layers, hidden_dim, n_heads): + super().__init__() + + self.encoder = TransformerEncoder(src_vocab_size, n_layers, hidden_dim, n_heads) + self.decoder = TransformerDecoder(tgt_vocab_size, n_layers, hidden_dim, n_heads) + self.output_dense = nn.Linear(hidden_dim, tgt_vocab_size) + + def forward(self, src, tgt, src_mask, tgt_mask): + encoder_outputs = self.encoder(src, src_mask) + decoder_outputs = self.decoder(tgt, tgt_mask, encoder_outputs, src_mask) + + return self.output_dense(decoder_outputs) diff --git a/test/ut/sdk/test_assessor.py b/test/ut/sdk/test_assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..5e9e3ff6914ace66cf10d46b41370e152eafc4fb --- /dev/null +++ b/test/ut/sdk/test_assessor.py @@ -0,0 +1,80 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from io import BytesIO +import json +from unittest import TestCase, main + +from nni.assessor import Assessor, AssessResult +from nni.runtime import msg_dispatcher_base as msg_dispatcher_base +from nni.runtime.msg_dispatcher import MsgDispatcher +from nni.runtime import protocol +from nni.runtime.protocol import CommandType, send, receive + +_trials = [] +_end_trials = [] + + +class NaiveAssessor(Assessor): + def assess_trial(self, trial_job_id, trial_history): + _trials.append(trial_job_id) + if sum(trial_history) % 2 == 0: + return AssessResult.Good + else: + return AssessResult.Bad + + def trial_end(self, trial_job_id, success): + _end_trials.append((trial_job_id, success)) + + +_in_buf = BytesIO() +_out_buf = BytesIO() + + +def _reverse_io(): + _in_buf.seek(0) + _out_buf.seek(0) + protocol._out_file = _in_buf + protocol._in_file = _out_buf + + +def _restore_io(): + _in_buf.seek(0) + _out_buf.seek(0) + protocol._in_file = _in_buf + protocol._out_file = _out_buf + + +class AssessorTestCase(TestCase): + def test_assessor(self): + pass + _reverse_io() + send(CommandType.ReportMetricData, '{"trial_job_id":"A","type":"PERIODICAL","sequence":0,"value":"2"}') + send(CommandType.ReportMetricData, '{"trial_job_id":"B","type":"PERIODICAL","sequence":0,"value":"2"}') + send(CommandType.ReportMetricData, '{"trial_job_id":"A","type":"PERIODICAL","sequence":1,"value":"3"}') + send(CommandType.TrialEnd, '{"trial_job_id":"A","event":"SYS_CANCELED"}') + send(CommandType.TrialEnd, '{"trial_job_id":"B","event":"SUCCEEDED"}') + send(CommandType.NewTrialJob, 'null') + _restore_io() + + assessor = NaiveAssessor() + dispatcher = MsgDispatcher(None, assessor) + msg_dispatcher_base._worker_fast_exit_on_terminate = False + + dispatcher.run() + e = dispatcher.worker_exceptions[0] + self.assertIs(type(e), AssertionError) + self.assertEqual(e.args[0], 'Unsupported command: CommandType.NewTrialJob') + + self.assertEqual(_trials, ['A', 'B', 'A']) + self.assertEqual(_end_trials, [('A', False), ('B', True)]) + + _reverse_io() + command, data = receive() + self.assertIs(command, CommandType.KillTrialJob) + self.assertEqual(data, '"A"') + self.assertEqual(len(_out_buf.read()), 0) + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_builtin_tuners.py b/test/ut/sdk/test_builtin_tuners.py new file mode 100644 index 0000000000000000000000000000000000000000..c201ae8a9dd67594480225a94600ebe3eca71d5d --- /dev/null +++ b/test/ut/sdk/test_builtin_tuners.py @@ -0,0 +1,418 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import glob +import json +import logging +import os +import random +import shutil +import sys +from collections import deque +from unittest import TestCase, main + +from nni.algorithms.hpo.batch_tuner import BatchTuner +from nni.algorithms.hpo.dngo_tuner import DNGOTuner +from nni.algorithms.hpo.evolution_tuner import EvolutionTuner +from nni.algorithms.hpo.gp_tuner import GPTuner +from nni.algorithms.hpo.gridsearch_tuner import GridSearchTuner +from nni.algorithms.hpo.hyperopt_tuner import HyperoptTuner +from nni.algorithms.hpo.metis_tuner import MetisTuner +from nni.algorithms.hpo.pbt_tuner import PBTTuner +from nni.algorithms.hpo.random_tuner import RandomTuner +from nni.algorithms.hpo.regularized_evolution_tuner import RegularizedEvolutionTuner +from nni.algorithms.hpo.tpe_tuner import TpeTuner +from nni.runtime.msg_dispatcher import _pack_parameter, MsgDispatcher + +smac_imported = False +if sys.platform != 'win32' and sys.version_info < (3, 9): + from nni.algorithms.hpo.smac_tuner import SMACTuner + smac_imported = True + +from nni.tuner import Tuner + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger('test_tuner') + + +class BuiltinTunersTestCase(TestCase): + """ + Targeted at testing functions of built-in tuners, including + - [ ] load_checkpoint + - [ ] save_checkpoint + - [X] update_search_space + - [X] generate_multiple_parameters + - [X] import_data + - [ ] trial_end + - [x] receive_trial_result + """ + + def setUp(self): + self.test_round = 3 + self.params_each_round = 50 + self.exhaustive = False + + def send_trial_callback(self, param_queue): + def receive(*args): + param_queue.append(tuple(args)) + return receive + + def send_trial_result(self, tuner, parameter_id, parameters, metrics): + if parameter_id % 2 == 1: + metrics = {'default': metrics, 'extra': 'hello'} + tuner.receive_trial_result(parameter_id, parameters, metrics) + tuner.trial_end(parameter_id, True) + + def search_space_test_one(self, tuner_factory, search_space, nas=False): + # nas: whether the test checks classic nas tuner + tuner = tuner_factory() + self.assertIsInstance(tuner, Tuner) + tuner.update_search_space(search_space) + + for i in range(self.test_round): + queue = deque() + parameters = tuner.generate_multiple_parameters(list(range(i * self.params_each_round, + (i + 1) * self.params_each_round)), + st_callback=self.send_trial_callback(queue)) + logger.debug(parameters) + check_range = lambda parameters, search_space: self.nas_check_range(parameters, search_space) \ + if nas else self.check_range(parameters, search_space) + check_range(parameters, search_space) + for k in range(min(len(parameters), self.params_each_round)): + self.send_trial_result(tuner, self.params_each_round * i + k, parameters[k], random.uniform(-100, 100)) + while queue: + id_, params = queue.popleft() + check_range([params], search_space) + self.send_trial_result(tuner, id_, params, random.uniform(-100, 100)) + if not parameters and not self.exhaustive: + raise ValueError("No parameters generated") + + def check_range(self, generated_params, search_space): + EPS = 1E-6 + for param in generated_params: + if self._testMethodName == "test_batch": + param = {list(search_space.keys())[0]: param} + for k, v in param.items(): + if k == "load_checkpoint_dir" or k == "save_checkpoint_dir": + self.assertIsInstance(v, str) + continue + if k.startswith("_mutable_layer"): + _, block, layer, choice = k.split("/") + cand = search_space[block]["_value"][layer].get(choice) + # cand could be None, e.g., optional_inputs_chosen_state + if choice == "layer_choice": + self.assertIn(v, cand) + if choice == "optional_input_size": + if isinstance(cand, int): + self.assertEqual(v, cand) + else: + self.assertGreaterEqual(v, cand[0]) + self.assertLessEqual(v, cand[1]) + if choice == "optional_inputs": + pass # ignore for now + continue + item = search_space[k] + if item["_type"] == "choice": + self.assertIn(v, item["_value"]) + if item["_type"] == "randint": + self.assertIsInstance(v, int) + if item["_type"] == "uniform": + self.assertIsInstance(v, float) + if item["_type"] in ("randint", "uniform", "quniform", "loguniform", "qloguniform"): + self.assertGreaterEqual(v, item["_value"][0]) + self.assertLessEqual(v, item["_value"][1]) + if item["_type"].startswith("q"): + multiple = v / item["_value"][2] + print(k, v, multiple, item) + if item["_value"][0] + EPS < v < item["_value"][1] - EPS: + self.assertAlmostEqual(int(round(multiple)), multiple) + if item["_type"] in ("qlognormal", "lognormal"): + self.assertGreaterEqual(v, 0) + if item["_type"] == "mutable_layer": + for layer_name in item["_value"].keys(): + self.assertIn(v[layer_name]["chosen_layer"], item["layer_choice"]) + + def nas_check_range(self, generated_params, search_space): + for params in generated_params: + for k in params: + v = params[k] + items = search_space[k] + if items['_type'] == 'layer_choice': + self.assertIn(v['_value'], items['_value']) + elif items['_type'] == 'input_choice': + for choice in v['_value']: + self.assertIn(choice, items['_value']['candidates']) + else: + raise KeyError + + def search_space_test_all(self, tuner_factory, supported_types=None, ignore_types=None, fail_types=None): + # Three types: 1. supported; 2. ignore; 3. fail. + # NOTE(yuge): ignore types + # Supported types are listed in the table. They are meant to be supported and should be correct. + # Other than those, all the rest are "unsupported", which are expected to produce ridiculous results + # or throw some exceptions. However, there are certain types I can't check. For example, generate + # "normal" using GP Tuner returns successfully and results are fine if we check the range (-inf to +inf), + # but they make no sense: it's not a normal distribution. So they are ignored in tests for now. + with open(os.path.join(os.path.dirname(__file__), "assets/search_space.json"), "r") as fp: + search_space_all = json.load(fp) + if supported_types is None: + supported_types = ["choice", "randint", "uniform", "quniform", "loguniform", "qloguniform", + "normal", "qnormal", "lognormal", "qlognormal"] + if fail_types is None: + fail_types = [] + if ignore_types is None: + ignore_types = [] + full_supported_search_space = dict() + for single in search_space_all: + space = search_space_all[single] + if any(single.startswith(t) for t in ignore_types): + continue + expected_fail = not any(single.startswith(t) for t in supported_types) or \ + any(single.startswith(t) for t in fail_types) or \ + "fail" in single # name contains fail (fail on all) + single_search_space = {single: space} + if not expected_fail: + # supports this key + self.search_space_test_one(tuner_factory, single_search_space) + full_supported_search_space.update(single_search_space) + else: + # unsupported key + with self.assertRaises(Exception, msg="Testing {}".format(single)) as cm: + self.search_space_test_one(tuner_factory, single_search_space) + logger.info("%s %s %s", tuner_factory, single, cm.exception) + if not any(t in self._testMethodName for t in ["batch", "grid_search"]): + # grid search fails for too many combinations + logger.info("Full supported search space: %s", full_supported_search_space) + self.search_space_test_one(tuner_factory, full_supported_search_space) + + def nas_search_space_test_all(self, tuner_factory): + # Since classic tuner should support only LayerChoice and InputChoice, + # ignore type and fail type are dismissed here. + with open(os.path.join(os.path.dirname(__file__), "assets/classic_nas_search_space.json"), "r") as fp: + search_space_all = json.load(fp) + full_supported_search_space = dict() + for single in search_space_all: + space = search_space_all[single] + single_search_space = {single: space} + self.search_space_test_one(tuner_factory, single_search_space, nas=True) + full_supported_search_space.update(single_search_space) + logger.info("Full supported search space: %s", full_supported_search_space) + self.search_space_test_one(tuner_factory, full_supported_search_space, nas=True) + + def import_data_test_for_pbt(self): + """ + test1: import data with complete epoch + test2: import data with incomplete epoch + """ + search_space = { + "choice_str": { + "_type": "choice", + "_value": ["cat", "dog", "elephant", "cow", "sheep", "panda"] + } + } + all_checkpoint_dir = os.path.expanduser("~/nni/checkpoint/test/") + population_size = 4 + # ===import data at the beginning=== + tuner = PBTTuner( + all_checkpoint_dir=all_checkpoint_dir, + population_size=population_size + ) + self.assertIsInstance(tuner, Tuner) + tuner.update_search_space(search_space) + save_dirs = [os.path.join(all_checkpoint_dir, str(i), str(0)) for i in range(population_size)] + # create save checkpoint directory + for save_dir in save_dirs: + os.makedirs(save_dir, exist_ok=True) + # for simplicity, omit "load_checkpoint_dir" + data = [{"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[0]}, "value": 1.1}, + {"parameter": {"choice_str": "dog", "save_checkpoint_dir": save_dirs[1]}, "value": {"default": 1.2, "tmp": 2}}, + {"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[2]}, "value": 11}, + {"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[3]}, "value": 7}] + epoch = tuner.import_data(data) + self.assertEqual(epoch, 1) + logger.info("Imported data successfully at the beginning") + shutil.rmtree(all_checkpoint_dir) + # ===import another data at the beginning, test the case when there is an incompleted epoch=== + tuner = PBTTuner( + all_checkpoint_dir=all_checkpoint_dir, + population_size=population_size + ) + self.assertIsInstance(tuner, Tuner) + tuner.update_search_space(search_space) + for i in range(population_size - 1): + save_dirs.append(os.path.join(all_checkpoint_dir, str(i), str(1))) + for save_dir in save_dirs: + os.makedirs(save_dir, exist_ok=True) + data = [{"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[0]}, "value": 1.1}, + {"parameter": {"choice_str": "dog", "save_checkpoint_dir": save_dirs[1]}, "value": {"default": 1.2, "tmp": 2}}, + {"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[2]}, "value": 11}, + {"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[3]}, "value": 7}, + {"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[4]}, "value": 1.1}, + {"parameter": {"choice_str": "dog", "save_checkpoint_dir": save_dirs[5]}, "value": {"default": 1.2, "tmp": 2}}, + {"parameter": {"choice_str": "cat", "save_checkpoint_dir": save_dirs[6]}, "value": 11}] + epoch = tuner.import_data(data) + self.assertEqual(epoch, 1) + logger.info("Imported data successfully at the beginning with incomplete epoch") + shutil.rmtree(all_checkpoint_dir) + + def import_data_test(self, tuner_factory, stype="choice_str", support_middle=True): + """ + import data at the beginning with number value and dict value + import data in the middle also with number value and dict value, and duplicate data record + generate parameters after data import + + Parameters + ---------- + tuner_factory : lambda + a lambda for instantiate a tuner + stype : str + the value type of hp choice, support "choice_str" and "choice_num" + """ + if stype == "choice_str": + search_space = { + "choice_str": { + "_type": "choice", + "_value": ["cat", "dog", "elephant", "cow", "sheep", "panda"] + } + } + elif stype == "choice_num": + search_space = { + "choice_num": { + "_type": "choice", + "_value": [10, 20, 30, 40, 50, 60] + } + } + else: + raise RuntimeError("Unexpected stype") + tuner = tuner_factory() + self.assertIsInstance(tuner, Tuner) + tuner.update_search_space(search_space) + # import data at the beginning + if stype == "choice_str": + data = [{"parameter": {"choice_str": "cat"}, "value": 1.1}, + {"parameter": {"choice_str": "dog"}, "value": {"default": 1.2, "tmp": 2}}] + else: + data = [{"parameter": {"choice_num": 20}, "value": 1.1}, + {"parameter": {"choice_num": 60}, "value": {"default": 1.2, "tmp": 2}}] + tuner.import_data(data) + logger.info("Imported data successfully at the beginning") + # generate parameters + parameters = tuner.generate_multiple_parameters(list(range(3))) + for i in range(3): + tuner.receive_trial_result(i, parameters[i], random.uniform(-100, 100)) + if not support_middle: + return + # import data in the middle + if stype == "choice_str": + data = [{"parameter": {"choice_str": "cat"}, "value": 1.1}, + {"parameter": {"choice_str": "dog"}, "value": {"default": 1.2, "tmp": 2}}, + {"parameter": {"choice_str": "cow"}, "value": 1.3}] + else: + data = [{"parameter": {"choice_num": 20}, "value": 1.1}, + {"parameter": {"choice_num": 60}, "value": {"default": 1.2, "tmp": 2}}, + {"parameter": {"choice_num": 50}, "value": 1.3}] + tuner.import_data(data) + logger.info("Imported data successfully in the middle") + # generate parameters again + parameters = tuner.generate_multiple_parameters([3]) + tuner.receive_trial_result(3, parameters[0], random.uniform(-100, 100)) + + def test_grid_search(self): + self.exhaustive = True + tuner_fn = lambda: GridSearchTuner() + self.search_space_test_all(tuner_fn) + self.import_data_test(tuner_fn, support_middle=False) + + def test_tpe(self): + tuner_fn = TpeTuner + self.search_space_test_all(TpeTuner) + self.import_data_test(tuner_fn) + + def test_random_search(self): + tuner_fn = RandomTuner + self.search_space_test_all(tuner_fn) + self.import_data_test(tuner_fn) + + def test_anneal(self): + tuner_fn = lambda: HyperoptTuner("anneal") + self.search_space_test_all(tuner_fn) + self.import_data_test(tuner_fn) + + def test_smac(self): + if not smac_imported: + return # smac doesn't work on windows + tuner_fn = lambda: SMACTuner() + self.search_space_test_all(tuner_fn, + supported_types=["choice", "randint", "uniform", "quniform", "loguniform"]) + self.import_data_test(tuner_fn) + + def test_batch(self): + self.exhaustive = True + tuner_fn = lambda: BatchTuner() + self.search_space_test_all(tuner_fn, + supported_types=["choice"]) + self.import_data_test(tuner_fn) + + def test_evolution(self): + # Needs enough population size, otherwise it will throw a runtime error + tuner_fn = lambda: EvolutionTuner(population_size=100) + self.search_space_test_all(tuner_fn) + self.import_data_test(tuner_fn) + + def test_gp(self): + self.test_round = 1 # NOTE: GP tuner got hanged for multiple testing round + tuner_fn = lambda: GPTuner() + self.search_space_test_all(tuner_fn, + supported_types=["choice", "randint", "uniform", "quniform", "loguniform", + "qloguniform"], + ignore_types=["normal", "lognormal", "qnormal", "qlognormal"], + fail_types=["choice_str", "choice_mixed"]) + self.import_data_test(tuner_fn, "choice_num") + + def test_metis(self): + self.test_round = 1 # NOTE: Metis tuner got hanged for multiple testing round + tuner_fn = lambda: MetisTuner() + self.search_space_test_all(tuner_fn, + supported_types=["choice", "randint", "uniform", "quniform"], + fail_types=["choice_str", "choice_mixed"]) + self.import_data_test(tuner_fn, "choice_num") + + def test_networkmorphism(self): + pass + + def test_ppo(self): + pass + + def test_pbt(self): + self.search_space_test_all(lambda: PBTTuner( + all_checkpoint_dir=os.path.expanduser("~/nni/checkpoint/test/"), + population_size=12 + )) + self.search_space_test_all(lambda: PBTTuner( + all_checkpoint_dir=os.path.expanduser("~/nni/checkpoint/test/"), + population_size=100 + )) + self.import_data_test_for_pbt() + + def test_dngo(self): + tuner_fn = lambda: DNGOTuner(trials_per_update=100, num_epochs_per_training=1) + self.search_space_test_all(tuner_fn, fail_types=["choice_str", "choice_mixed", + "normal", "lognormal", "qnormal", "qlognormal"]) + self.import_data_test(tuner_fn, stype='choice_num') + + def test_regularized_evolution_tuner(self): + tuner_fn = lambda: RegularizedEvolutionTuner() + self.nas_search_space_test_all(tuner_fn) + + def tearDown(self): + file_list = glob.glob("smac3*") + ["param_config_space.pcs", "scenario.txt", "model_path"] + for file in file_list: + if os.path.exists(file): + if os.path.isdir(file): + shutil.rmtree(file) + else: + os.remove(file) + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_curvefitting_assessor.py b/test/ut/sdk/test_curvefitting_assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..80a3a39c279d188a8160f9c0da1ee8ef02e57052 --- /dev/null +++ b/test/ut/sdk/test_curvefitting_assessor.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import numpy as np +import unittest + +from nni.algorithms.hpo.curvefitting_assessor import CurvefittingAssessor +from nni.algorithms.hpo.curvefitting_assessor.model_factory import CurveModel +from nni.assessor import AssessResult + +class TestCurveFittingAssessor(unittest.TestCase): + def test_init(self): + new_assessor = CurvefittingAssessor(20) + self.assertEqual(new_assessor.start_step, 6) + self.assertEqual(new_assessor.target_pos, 20) + + def test_insufficient_point(self): + new_assessor = CurvefittingAssessor(20) + ret = new_assessor.assess_trial(1, [1]) + self.assertEqual(ret, AssessResult.Good) + + def test_not_converged(self): + new_assessor = CurvefittingAssessor(20) + with self.assertRaises(TypeError): + ret = new_assessor.assess_trial([1, 199, 0, 199, 1, 209, 2]) + ret = new_assessor.assess_trial(1, [1, 199, 0, 199, 1, 209, 2]) + self.assertEqual(ret, AssessResult.Good) + models = CurveModel(21) + self.assertEqual(models.predict([1, 199, 0, 199, 1, 209, 2]), None) + + def test_curve_model(self): + test_model = CurveModel(21) + test_model.effective_model = ['vap', 'pow3', 'linear', 'logx_linear', 'dr_hill_zero_background', 'log_power', 'pow4', 'mmf', 'exp4', 'ilog2', 'weibull', 'janoschek'] + test_model.effective_model_num = 12 + test_model.point_num = 9 + test_model.target_pos = 20 + test_model.trial_history = ([1, 1, 1, 1, 1, 1, 1, 1, 1]) + test_model.weight_samples = np.ones((test_model.effective_model_num), dtype=np.float) / test_model.effective_model_num + self.assertAlmostEqual(test_model.predict_y('vap', 9), 0.5591906328335763) + self.assertAlmostEqual(test_model.predict_y('logx_linear', 15), 1.0704360293379522) + self.assertAlmostEqual(test_model.f_comb(9, test_model.weight_samples), 1.1543379521172443) + self.assertAlmostEqual(test_model.f_comb(15, test_model.weight_samples), 1.6949395581692737) + +if __name__ == '__main__': + unittest.main() diff --git a/test/ut/sdk/test_evolution_tuner.py b/test/ut/sdk/test_evolution_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..bb99841cc110a8ed9e397f5d109b3e010b8caa42 --- /dev/null +++ b/test/ut/sdk/test_evolution_tuner.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +test_evolution_tuner.py +""" + +import numpy as np + +from unittest import TestCase, main + +from nni.utils import json2space, json2parameter + + +class EvolutionTunerTestCase(TestCase): + def test_json2space(self): + """test for json2space + """ + json_search_space = { + "optimizer": { + "_type": "choice", + "_value": ["Adam", "SGD"] + }, + "learning_rate": { + "_type": "choice", + "_value": [0.0001, 0.001, 0.002, 0.005, 0.01] + } + } + search_space_instance = json2space(json_search_space) + self.assertIn('root[optimizer]-choice', search_space_instance) + self.assertIn('root[learning_rate]-choice', search_space_instance) + + def test_json2parameter(self): + """test for json2parameter + """ + json_search_space = { + "optimizer":{ + "_type":"choice","_value":["Adam", "SGD"] + }, + "learning_rate":{ + "_type":"choice", + "_value":[0.0001, 0.001, 0.002, 0.005, 0.01] + } + } + space = json2space(json_search_space) + random_state = np.random.RandomState() + is_rand = dict() + for item in space: + is_rand[item] = True + search_space_instance = json2parameter(json_search_space, is_rand, random_state) + self.assertIn(search_space_instance["optimizer"]["_index"], range(2)) + self.assertIn(search_space_instance["optimizer"]["_value"], ["Adam", "SGD"]) + self.assertIn(search_space_instance["learning_rate"]["_index"], range(5)) + self.assertIn(search_space_instance["learning_rate"]["_value"], [0.0001, 0.001, 0.002, 0.005, 0.01]) + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_graph_utils.py b/test/ut/sdk/test_graph_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b00100b141559695255bd548dfb14638f25bcb73 --- /dev/null +++ b/test/ut/sdk/test_graph_utils.py @@ -0,0 +1,295 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import sys +import os +import math +import uuid +import shutil +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from tensorboard.compat.proto.graph_pb2 import GraphDef +from google.protobuf import text_format +import unittest +from unittest import TestCase, main + +from nni.common.graph_utils import build_module_graph, build_graph, TorchModuleGraph, TUPLE_UNPACK_KIND + +class BackboneModel1(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 1, 1, 1) + def forward(self, x): + return self.conv1(x) + +class BackboneModel2(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.bn1 = nn.BatchNorm2d(self.conv1.out_channels) + self.bn2 = nn.BatchNorm2d(self.conv2.out_channels) + self.fc1 = nn.Linear(4 * 4 * 50, 500) + self.fc2 = nn.Linear(500, 10) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.bn2(self.conv2(x))) + x = F.max_pool2d(x, 2, 2) + x = x.view(x.size(0), -1) + + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return x + +class BigModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.backbone1 = BackboneModel1() + self.backbone2 = BackboneModel2() + self.fc3 = nn.Linear(10, 2) + def forward(self, x): + x = self.backbone1(x) + x = self.backbone2(x) + x = self.fc3(x) + return x + +@unittest.skipIf(torch.__version__ >= '1.6.0', 'not supported') +class GraphUtilsTestCase(TestCase): + def test_build_module_graph(self): + big_model = BigModel() + g = build_module_graph(big_model, torch.randn(2, 1, 28, 28)) + print(g.name_to_node.keys()) + leaf_modules = set([ + 'backbone1.conv1', 'backbone2.bn1', 'backbone2.bn2', 'backbone2.conv1', + 'backbone2.conv2', 'backbone2.fc1', 'backbone2.fc2', 'fc3' + ]) + + assert set(g.leaf_modules) == leaf_modules + assert not leaf_modules - set(g.name_to_node.keys()) + assert g.find_successors('backbone2.conv1') == ['backbone2.bn1'] + assert g.find_successors('backbone2.conv2') == ['backbone2.bn2'] + assert g.find_predecessors('backbone2.bn1') == ['backbone2.conv1'] + assert g.find_predecessors('backbone2.bn2') == ['backbone2.conv2'] + + def _test_graph(self, model, dummy_input, expected_file): + actual_proto, _ = build_graph(model, dummy_input) + + assert os.path.exists(expected_file), expected_file + with open(expected_file, "r") as f: + expected_str = f.read() + + expected_proto = GraphDef() + text_format.Parse(expected_str, expected_proto) + + self.assertEqual(len(expected_proto.node), len(actual_proto.node)) + for i in range(len(expected_proto.node)): + expected_node = expected_proto.node[i] + actual_node = actual_proto.node[i] + self.assertEqual(expected_node.name, actual_node.name) + self.assertEqual(expected_node.op, actual_node.op) + self.assertEqual(expected_node.input, actual_node.input) + self.assertEqual(expected_node.device, actual_node.device) + self.assertEqual( + sorted(expected_node.attr.keys()), sorted(actual_node.attr.keys())) + + @unittest.skipIf(torch.__version__ < "1.4.0", "not supported") + def test_graph_module1(self): + dummy_input = (torch.zeros(1, 3),) + + class myLinear(torch.nn.Module): + def __init__(self): + super(myLinear, self).__init__() + self.l = torch.nn.Linear(3, 5) + + def forward(self, x): + return self.l(x) + + self._test_graph( + myLinear(), + dummy_input, + os.path.join(os.path.dirname(__file__), "expect", "test_graph_module1.expect") + ) + + @unittest.skipIf(torch.__version__ < "1.4.0", "not supported") + def test_graph_module2(self): + class MyModule(nn.Module): + def __init__(self): + super().__init__() + self.weight = nn.Linear(5, 3) + self.bias = nn.Linear(5, 3) + self.module = nn.Linear(6, 1) + + def forward(self, x): + tensors = [self.weight(x), self.bias(x)] + self.module(torch.cat(tensors, dim=1)) + return x + + self._test_graph( + MyModule(), + torch.randn(4, 5), + os.path.join(os.path.dirname(__file__), "expect", "test_graph_module2.expect") + ) + + @unittest.skipIf(torch.__version__ < "1.4.0", "not supported") + def test_graph_module3(self): + class MyModule(nn.Module): + def __init__(self): + super().__init__() + self.module = nn.ModuleList([ + nn.Linear(5, 3), + nn.Linear(3, 1) + ]) + + def forward(self, x): + x = self.module[0](x) + x = self.module[1](x) + return x + + self._test_graph( + MyModule(), + torch.randn(4, 5), + os.path.join(os.path.dirname(__file__), "expect", "test_graph_module3.expect") + ) + + @unittest.skipIf(torch.__version__ < "1.4.0", "not supported") + def test_module_reuse(self): + class MyModule(nn.Module): + def __init__(self): + super().__init__() + self.liner1 = nn.Linear(10, 10) + self.relu = nn.ReLU(inplace=True) + self.liner2 = nn.Linear(10, 20) + self.liner3 = nn.Linear(20, 10) + + def forward(self, x): + x = self.liner1(x) + x = self.relu(x) + x = self.liner2(x) + x = self.relu(x) + x = self.liner3(x) + x = self.relu(x) + return x + + data = torch.rand(10, 10) + net = MyModule() + traced = torch.jit.trace(net, data) + modulegraph = TorchModuleGraph(traced_model=traced) + # Traverse the TorchModuleGraph, due the resue of the relu module, + # there will be three cpp_nodes corrspoding to the same module. + # During traversing the graph, there should be only one + # successor of each cpp-node (including the cpp_nodes that corresponds + # to the same relu module). + for name, nodeio in modulegraph.nodes_py.nodes_io.items(): + if nodeio.input_or_output == 'input': + # Find the first node of the whole graph + start_nodes = modulegraph.input_to_node[name] + # We have only one single path top-down + assert len(start_nodes) == 1 + node = start_nodes[0].unique_name + while modulegraph.find_successors(node): + nodes = modulegraph.find_successors(node) + assert len(nodes) == 1 + node = nodes[0] + + @unittest.skipIf(torch.__version__ < "1.4.0", "not supported") + def test_module_unpack(self): + """ + test the tuple/list unpack function of TorchModuleGraph. + Following models are from the issue 2756 + https://github.com/microsoft/nni/issues/2756. + MyModule will have two successive tuple unpack operations + between the B and C. + """ + class CBR(nn.Module): + def __init__(self, i, o): + super(CBR, self).__init__() + self.conv1 = nn.Conv2d(i, o, kernel_size=1) + self.bn1 = nn.BatchNorm2d(o) + self.act1 = nn.ReLU() + + def forward(self, x): + return self.act1(self.bn1(self.conv1(x))) + + + class A(nn.Module): + def __init__(self): + super(A, self).__init__() + self.conv1 = CBR(3, 6, ) + self.conv2 = CBR(6, 8, ) + self.conv3 = CBR(6, 12) + + def forward(self, x): + x1 = self.conv1(x) + x2 = self.conv2(x1) + x3 = self.conv3(x1) + return (x2, x3) + + + class B1(nn.Module): + def __init__(self): + super(B1, self).__init__() + self.conv1 = CBR(12, 32) + self.conv2 = CBR(32, 32) + self.conv3 = CBR(32, 32) + + def forward(self, x): + x1 = self.conv1(x) + x2 = self.conv2(x1) + x3 = self.conv3(x2) + return (x1, x2, x3) + + class B(nn.Module): + def __init__(self): + super(B, self).__init__() + self.b = B1() + + def forward(self, x): + return self.b(x[-1]) + + class C(nn.Module): + def __init__(self): + super(C, self).__init__() + self.conv1 = CBR(8, 32) + self.conv2 = CBR(12, 32) + self.conv3 = CBR(32, 32) + self.conv4 = CBR(32, 32) + self.conv5 = CBR(32, 32) + + def forward(self, x): + return(self.conv1(x[0]), self.conv2(x[1]), self.conv3(x[2]),self.conv4(x[3]),self.conv5(x[4])) + + class MyModule(nn.Module): + def __init__(self): + super(MyModule, self).__init__() + self.a = A() + self.b = B() + # self.dummy = Dummy() + self.c = C() + + def forward(self, x): + x_a = self.a(x) + x_b = self.b(x_a) + xc = self.c(x_a + x_b) + return xc + + dummy_input = torch.rand(1, 3, 28, 28) + model = MyModule() + graph = TorchModuleGraph(model, dummy_input) + graph.unpack_manually() + for node in graph.nodes_py.nodes_op: + # The input of the function nodes should + # not come from the TupleUnpack node, because + # all the TupleUnpack nodes have been removed(unpacked) + # manually + for _input in node.inputs: + if _input in graph.output_to_node: + preprocessor = graph.output_to_node[_input] + assert preprocessor.op_type != TUPLE_UNPACK_KIND + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_hpo_formatting.py b/test/ut/sdk/test_hpo_formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..7b13908074e09ef7179582da086a7dad8af28342 --- /dev/null +++ b/test/ut/sdk/test_hpo_formatting.py @@ -0,0 +1,186 @@ +from math import exp, log +from nni.common.hpo_utils import deformat_parameters, format_parameters, format_search_space + +user_space = { + 'pool': { '_type': 'choice', '_value': ['max', 'min', 'avg'] }, + 'kernel': { '_type': 'randint', '_value': [2, 8] }, + 'D': { # distribution + '_type': 'choice', + '_value': [ + { + '_name': 'UNIFORM', + 'dropout': { '_type': 'uniform', '_value': [0.5, 0.9] }, + 'hidden': { '_type': 'quniform', '_value': [100, 1000, 3] }, + 'U_lr': { '_type': 'loguniform', '_value': [0.0001, 0.1] }, + 'U_batch': { '_type': 'qloguniform', '_value': [16.0, 128.0, 0.725] }, + }, + { + '_name': 'NORMAL', + 'dropout': { '_type': 'normal', '_value': [0.7, 0.2] }, + 'hidden': { '_type': 'qnormal', '_value': [500, 200, 3] }, + 'N_lr': { '_type': 'lognormal', '_value': [-6, 3] }, + 'N_batch': { '_type': 'qlognormal', '_value': [3.5, 1.2, 0.725] }, + }, + { + '_name': 'EMPTY', + }, + ] + }, + 'not_nested': { + '_type': 'choice', + '_value': [ + {'x': 0, 'y': 0}, + {'x': 1, 'y': 2}, + ], + }, +} + +spec_names = ['pool', 'kernel', 'D', 'dropout', 'hidden', 'U_lr', 'U_batch', 'dropout', 'hidden', 'N_lr', 'N_batch', 'not_nested'] +spec_types = ['choice', 'randint', 'choice', 'uniform', 'quniform', 'loguniform', 'qloguniform', 'normal', 'qnormal', 'lognormal', 'qlognormal', 'choice'] +spec_values = [['max','min','avg'], [2,8], user_space['D']['_value'], [0.5,0.9], [100.0,1000.0,3.0], [0.0001,0.1], [16.0,128.0,0.725], [0.7,0.2], [500.0,200.0,3.0], [-6.0,3.0], [3.5,1.2,0.725], [{'x':0,'y':0},{'x':1,'y':2}]] +spec_keys = [('pool',), ('kernel',), ('D',), ('D',0,'dropout'), ('D',0,'hidden'), ('D',0,'U_lr'), ('D',0,'U_batch'), ('D',1,'dropout'), ('D',1,'hidden'), ('D',1,'N_lr'), ('D',1,'N_batch'), ('not_nested',)] +spec_categoricals = [True, True, True, False, False, False, False, False, False, False, False, True] +spec_sizes = [3, 6, 3, None, None, None, None, None, None, None, None, 2] +spec_lows = [None, None, None, 0.5, 100.0, log(0.0001), log(16.0), None, None, None, None, None] +spec_highs = [None, None, None, 0.9, 1000.0, log(0.1), log(128.0), None, None, None, None, None] +spec_normals = [None, None, None, False, False, False, False, True, True, True, True, None] +spec_mus = [None, None, None, None, None, None, None, 0.7, 500.0, -6.0, 3.5, None] +spec_sigmas = [None, None, None, None, None, None, None, 0.2, 200.0, 3.0, 1.2, None] +spec_qs = [None, None, None, None, 3.0, None, 0.725, None, 3.0, None, 0.725, None] +spec_clips = [None, None, None, None, (100.0,1000.0), None, (16.0,128.0), None, None, None, None, None] +spec_logs = [None, None, None, False, False, True, True, False, False, True, True, None] + +def test_formatting(): + internal_space = format_search_space(user_space) + assert all(key == value.key for key, value in internal_space.items()) + specs = list(internal_space.values()) + assert spec_names == [spec.name for spec in specs] + assert spec_types == [spec.type for spec in specs] + assert spec_values == [spec.values for spec in specs] + assert spec_keys == [spec.key for spec in specs] + assert spec_categoricals == [spec.categorical for spec in specs] + assert spec_sizes == [spec.size for spec in specs] + assert spec_lows == [spec.low for spec in specs] + assert spec_highs == [spec.high for spec in specs] + assert spec_normals == [spec.normal_distributed for spec in specs] + assert spec_mus == [spec.mu for spec in specs] + assert spec_sigmas == [spec.sigma for spec in specs] + assert spec_qs == [spec.q for spec in specs] + assert spec_clips == [spec.clip for spec in specs] + assert spec_logs == [spec.log_distributed for spec in specs] + + +internal_params_1 = { + ('pool',): 0, + ('kernel',): 5, + ('D',): 0, + ('D',0,'dropout'): 0.7, + ('D',0,'hidden'): 100.1, # round to 99.0, then clip to 100.0 + ('D',0,'U_lr'): -4.6, + ('D',0,'U_batch'): 4.0, + ('not_nested',): 0, +} + +user_params_1 = { + 'pool': 'max', + 'kernel': 7, + 'D': { + '_name': 'UNIFORM', + 'dropout': 0.7, + 'hidden': 100.0, + 'U_lr': exp(-4.6), + 'U_batch': 54.375, + }, + 'not_nested': {'x': 0, 'y': 0}, +} + +resume_params_1 = dict(internal_params_1) +resume_params_1[('D', 0, 'hidden')] = 100.0 +resume_params_1[('D', 0, 'U_lr')] = log(exp(-4.6)) +resume_params_1[('D', 0, 'U_batch')] = log(54.375) + +internal_params_2 = { + ('pool',): 2, + ('kernel',): 0, + ('D',): 1, + ('D',1,'dropout'): 0.7, + ('D',1,'hidden'): 100.1, + ('D',1,'N_lr'): -4.6, + ('D',1,'N_batch'): 4.0, + ('not_nested',): 1, +} + +user_params_2 = { + 'pool': 'avg', + 'kernel': 2, + 'D': { + '_name': 'NORMAL', + 'dropout': 0.7, + 'hidden': 99.0, + 'N_lr': exp(-4.6), + 'N_batch': 54.375, + }, + 'not_nested': {'x': 1, 'y': 2}, +} + +resume_params_2 = dict(internal_params_2) +resume_params_2[('D', 1, 'hidden')] = 99.0 +resume_params_2[('D', 1, 'N_lr')] = log(exp(-4.6)) +resume_params_2[('D', 1, 'N_batch')] = log(54.375) + +internal_params_3 = { + ('pool',): 1, + ('kernel',): 1, + ('D',): 2, + ('not_nested',): 1, +} + +user_params_3 = { + 'pool': 'min', + 'kernel': 3, + 'D': { + '_name': 'EMPTY', + }, + 'not_nested': {'x': 1, 'y': 2}, +} + +resume_params_3 = dict(internal_params_3) + +def test_deformatting(): + internal_space = format_search_space(user_space) + assert deformat_parameters(internal_params_1, internal_space) == user_params_1 + assert deformat_parameters(internal_params_2, internal_space) == user_params_2 + assert deformat_parameters(internal_params_3, internal_space) == user_params_3 + +def test_resuming(): + internal_space = format_search_space(user_space) + assert format_parameters(user_params_1, internal_space) == resume_params_1 + assert format_parameters(user_params_2, internal_space) == resume_params_2 + assert format_parameters(user_params_3, internal_space) == resume_params_3 + + +def test_activate(): + internal_space = format_search_space(user_space) + + assert internal_space[('pool',)].is_activated_in({}) + + partial = { ('pool',): 1, ('kernel',): 1, ('D',): 0 } + assert internal_space[('D', 0, 'dropout')].is_activated_in(partial) + assert internal_space[('D', 0, 'U_lr')].is_activated_in(partial) + assert not internal_space[('D', 1, 'dropout')].is_activated_in(partial) + assert not internal_space[('D', 1, 'N_lr')].is_activated_in(partial) + + partial = { ('pool',): 1, ('kernel',): 1, ('D',): 2 } + assert not internal_space[('D', 0, 'dropout')].is_activated_in(partial) + assert not internal_space[('D', 0, 'U_lr')].is_activated_in(partial) + assert not internal_space[('D', 1, 'dropout')].is_activated_in(partial) + assert not internal_space[('D', 1, 'N_lr')].is_activated_in(partial) + + assert internal_space[('not_nested',)].is_activated_in(partial) + + +if __name__ == '__main__': + test_formatting() + test_deformatting() + test_resuming() + test_activate() diff --git a/test/ut/sdk/test_hpo_validation.py b/test/ut/sdk/test_hpo_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..9856ce36dd1a2fb0f2e619fa7cf085a94368873d --- /dev/null +++ b/test/ut/sdk/test_hpo_validation.py @@ -0,0 +1,64 @@ +from nni.common.hpo_utils import validate_search_space + +good = { + 'choice': { '_type': 'choice', '_value': ['a', 'b'] }, + 'randint': { '_type': 'randint', '_value': [1, 10] }, + 'uniform': { '_type': 'uniform', '_value': [0, 1.0] }, + 'quniform': { '_type': 'quniform', '_value': [1, 10, 0.1] }, + 'loguniform': { '_type': 'loguniform', '_value': [0.001, 0.1] }, + 'qloguniform': { '_type': 'qloguniform', '_value': [0.001, 0.1, 0.001] }, + 'normal': { '_type': 'normal', '_value': [0, 0.1] }, + 'qnormal': { '_type': 'qnormal', '_value': [0.5, 0.1, 0.1] }, + 'lognormal': { '_type': 'lognormal', '_value': [0.0, 1] }, + 'qlognormal': { '_type': 'qlognormal', '_value': [-1, 1, 0.1] }, +} +good_partial = { + 'choice': good['choice'], + 'randint': good['randint'], +} +good_nested = { + 'outer': { + '_type': 'choice', + '_value': [ + { '_name': 'empty' }, + { '_name': 'a', 'a_1': { '_type': 'choice', '_value': ['a', 'b'] } } + ] + } +} + +bad_type = 'x' +bad_spec_type = { 'x': [1, 2, 3] } +bad_fields = { 'x': { 'type': 'choice', 'value': ['a', 'b'] } } +bad_type_name = { 'x': { '_type': 'choic', '_value': ['a'] } } +bad_value = { 'x': { '_type': 'choice', '_value': 'ab' } } +bad_choice_args = { 'x': { '_type': 'choice', '_value': [ 'a', object() ] } } +bad_2_args = { 'x': { '_type': 'randint', '_value': [1, 2, 3] } } +bad_3_args = { 'x': { '_type': 'quniform', '_value': [0] } } +bad_int_args = { 'x': { '_type': 'randint', '_value': [1.0, 2.0] } } +bad_float_args = { 'x': { '_type': 'uniform', '_value': ['0.1', '0.2'] } } +bad_low_high = { 'x': { '_type': 'quniform', '_value': [2, 1, 0.1] } } +bad_log = { 'x': { '_type': 'loguniform', '_value': [0, 1] } } +bad_sigma = { 'x': { '_type': 'normal', '_value': [0, 0] } } + +def test_hpo_utils(): + assert validate_search_space(good, raise_exception=False) + assert validate_search_space(good_nested, raise_exception=False) + assert not validate_search_space(bad_type, raise_exception=False) + assert not validate_search_space(bad_spec_type, raise_exception=False) + assert not validate_search_space(bad_fields, raise_exception=False) + assert not validate_search_space(bad_type_name, raise_exception=False) + assert not validate_search_space(bad_value, raise_exception=False) + assert not validate_search_space(bad_choice_args, raise_exception=False) + assert not validate_search_space(bad_2_args, raise_exception=False) + assert not validate_search_space(bad_3_args, raise_exception=False) + assert not validate_search_space(bad_int_args, raise_exception=False) + assert not validate_search_space(bad_float_args, raise_exception=False) + assert not validate_search_space(bad_low_high, raise_exception=False) + assert not validate_search_space(bad_log, raise_exception=False) + assert not validate_search_space(bad_sigma, raise_exception=False) + + assert validate_search_space(good_partial, ['choice', 'randint'], False) + assert not validate_search_space(good, ['choice', 'randint'], False) + +if __name__ == '__main__': + test_hpo_utils() diff --git a/test/ut/sdk/test_hyperopt_tuner.py b/test/ut/sdk/test_hyperopt_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..555ddea0d65bb45e607d8df99b9f54b1116a0849 --- /dev/null +++ b/test/ut/sdk/test_hyperopt_tuner.py @@ -0,0 +1,111 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +""" +test_hyperopt_tuner.py +""" + +from unittest import TestCase, main + +import hyperopt as hp + +from nni.algorithms.hpo.hyperopt_tuner import json2space, json2parameter, json2vals, HyperoptTuner + + +class HyperoptTunerTestCase(TestCase): + def test_json2space(self): + """test for json2space + """ + json_search_space = { + "optimizer": { + "_type": "choice", + "_value": ["Adam", "SGD"] + }, + "learning_rate": { + "_type": "choice", + "_value": [0.0001, 0.001, 0.002, 0.005, 0.01] + } + } + search_space_instance = json2space(json_search_space) + self.assertIsInstance(search_space_instance["optimizer"], + hp.pyll.base.Apply) + self.assertIsInstance(search_space_instance["learning_rate"], + hp.pyll.base.Apply) + + def test_json2parameter(self): + """test for json2parameter + """ + json_search_space = { + "optimizer": { + "_type": "choice", + "_value": ["Adam", "SGD"] + }, + "learning_rate": { + "_type": "choice", + "_value": [0.0001, 0.001, 0.002, 0.005, 0.01] + } + } + parameter = { + 'root[learning_rate]-choice': 2, + 'root[optimizer]-choice': 0 + } + search_space_instance = json2parameter(json_search_space, parameter) + self.assertEqual(search_space_instance["optimizer"]["_index"], 0) + self.assertEqual(search_space_instance["optimizer"]["_value"], "Adam") + self.assertEqual(search_space_instance["learning_rate"]["_index"], 2) + self.assertEqual(search_space_instance["learning_rate"]["_value"], 0.002) + + def test_json2vals(self): + """test for json2vals + """ + json_search_space = { + "optimizer": { + "_type": "choice", + "_value": ["Adam", "SGD"] + }, + "learning_rate": { + "_type": "choice", + "_value": [0.0001, 0.001, 0.002, 0.005, 0.01] + } + } + out_y = dict() + vals = { + 'optimizer': { + '_index': 0, + '_value': 'Adam' + }, + 'learning_rate': { + '_index': 1, + '_value': 0.001 + } + } + json2vals(json_search_space, vals, out_y) + self.assertEqual(out_y["root[optimizer]-choice"], 0) + self.assertEqual(out_y["root[learning_rate]-choice"], 1) + + def test_tuner_generate(self): + for algorithm in ["tpe", "random_search", "anneal"]: + tuner = HyperoptTuner(algorithm) + choice_list = ["a", "b", 1, 2] + tuner.update_search_space({ + "a": { + "_type": "randint", + "_value": [1, 3] + }, + "b": { + "_type": "choice", + "_value": choice_list + } + }) + for k in range(30): + # sample multiple times + param = tuner.generate_parameters(k) + print(param) + self.assertIsInstance(param["a"], int) + self.assertGreaterEqual(param["a"], 1) + self.assertLessEqual(param["a"], 2) + self.assertIn(param["b"], choice_list) + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_msg_dispatcher.py b/test/ut/sdk/test_msg_dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff4c80909dfba1ae9d17d59d02afa5a00bc42a9 --- /dev/null +++ b/test/ut/sdk/test_msg_dispatcher.py @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +from io import BytesIO +from unittest import TestCase, main + +from nni.runtime import protocol +from nni.runtime import msg_dispatcher_base +from nni.runtime.msg_dispatcher import MsgDispatcher +from nni.runtime.protocol import CommandType, send, receive +from nni.tuner import Tuner +from nni.utils import extract_scalar_reward + + +class NaiveTuner(Tuner): + def __init__(self): + self.param = 0 + self.trial_results = [] + self.search_space = None + self._accept_customized_trials() + + def generate_parameters(self, parameter_id, **kwargs): + # report Tuner's internal states to generated parameters, + # so we don't need to pause the main loop + self.param += 2 + return { + 'param': self.param, + 'trial_results': self.trial_results, + 'search_space': self.search_space + } + + def receive_trial_result(self, parameter_id, parameters, value, **kwargs): + reward = extract_scalar_reward(value) + self.trial_results.append((parameter_id, parameters['param'], reward, kwargs.get("customized"))) + + def update_search_space(self, search_space): + self.search_space = search_space + + +_in_buf = BytesIO() +_out_buf = BytesIO() + + +def _reverse_io(): + _in_buf.seek(0) + _out_buf.seek(0) + protocol._out_file = _in_buf + protocol._in_file = _out_buf + + +def _restore_io(): + _in_buf.seek(0) + _out_buf.seek(0) + protocol._in_file = _in_buf + protocol._out_file = _out_buf + + +class MsgDispatcherTestCase(TestCase): + def test_msg_dispatcher(self): + _reverse_io() # now we are sending to Tuner's incoming stream + send(CommandType.RequestTrialJobs, '2') + send(CommandType.ReportMetricData, '{"parameter_id":0,"type":"PERIODICAL","value":"10"}') + send(CommandType.ReportMetricData, '{"parameter_id":1,"type":"FINAL","value":"11"}') + send(CommandType.UpdateSearchSpace, '{"name":"SS0"}') + send(CommandType.RequestTrialJobs, '1') + send(CommandType.KillTrialJob, 'null') + _restore_io() + + tuner = NaiveTuner() + dispatcher = MsgDispatcher(tuner) + msg_dispatcher_base._worker_fast_exit_on_terminate = False + + dispatcher.run() + e = dispatcher.worker_exceptions[0] + self.assertIs(type(e), AssertionError) + self.assertEqual(e.args[0], 'Unsupported command: CommandType.KillTrialJob') + + _reverse_io() # now we are receiving from Tuner's outgoing stream + self._assert_params(0, 2, [], None) + self._assert_params(1, 4, [], None) + + self._assert_params(2, 6, [[1, 4, 11, False]], {'name': 'SS0'}) + + self.assertEqual(len(_out_buf.read()), 0) # no more commands + + def _assert_params(self, parameter_id, param, trial_results, search_space): + command, data = receive() + self.assertIs(command, CommandType.NewTrialJob) + data = json.loads(data) + self.assertEqual(data['parameter_id'], parameter_id) + self.assertEqual(data['parameter_source'], 'algorithm') + self.assertEqual(data['parameters']['param'], param) + self.assertEqual(data['parameters']['trial_results'], trial_results) + self.assertEqual(data['parameters']['search_space'], search_space) + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_networkmorphism_tuner.py b/test/ut/sdk/test_networkmorphism_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..79bba340081c4e6f2ffb7b3339c79c324a12410d --- /dev/null +++ b/test/ut/sdk/test_networkmorphism_tuner.py @@ -0,0 +1,188 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +from unittest import TestCase, main +from copy import deepcopy +import torch + +from nni.algorithms.hpo.networkmorphism_tuner import NetworkMorphismTuner +from nni.algorithms.hpo.networkmorphism_tuner.graph import graph_to_json, json_to_graph +from nni.algorithms.hpo.networkmorphism_tuner.graph_transformer import ( + to_deeper_graph, + to_skip_connection_graph, + to_wider_graph, +) +from nni.algorithms.hpo.networkmorphism_tuner.layers import layer_description_extractor +from nni.algorithms.hpo.networkmorphism_tuner.nn import CnnGenerator + + +class NetworkMorphismTestCase(TestCase): + """ unittest for NetworkMorphismTuner + """ + + def test_graph_json_transform(self): + """ unittest for graph_json_transform function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + graph_init = to_wider_graph(deepcopy(graph_init)) + graph_init = to_deeper_graph(deepcopy(graph_init)) + graph_init = to_skip_connection_graph(deepcopy(graph_init)) + json_out = graph_to_json(graph_init, "temp.json") + + graph_recover = json_to_graph(json_out) + + # compare all data in graph + self.assertEqual(graph_init.input_shape, graph_recover.input_shape) + self.assertEqual(graph_init.weighted, graph_recover.weighted) + self.assertEqual( + graph_init.layer_id_to_input_node_ids, + graph_recover.layer_id_to_input_node_ids, + ) + self.assertEqual(graph_init.adj_list, graph_recover.adj_list) + self.assertEqual( + graph_init.reverse_adj_list, + graph_recover.reverse_adj_list) + self.assertEqual( + len(graph_init.operation_history), len( + graph_recover.operation_history) + ) + self.assertEqual(graph_init.n_dim, graph_recover.n_dim) + self.assertEqual(graph_init.conv, graph_recover.conv) + self.assertEqual(graph_init.batch_norm, graph_recover.batch_norm) + self.assertEqual(graph_init.vis, graph_recover.vis) + + node_list_init = [node.shape for node in graph_init.node_list] + node_list_recover = [node.shape for node in graph_recover.node_list] + self.assertEqual(node_list_init, node_list_recover) + self.assertEqual(len(graph_init.node_to_id), + len(graph_recover.node_to_id)) + layer_list_init = [ + layer_description_extractor(item, graph_init.node_to_id) + for item in graph_init.layer_list + ] + layer_list_recover = [ + layer_description_extractor(item, graph_recover.node_to_id) + for item in graph_recover.layer_list + ] + self.assertEqual(layer_list_init, layer_list_recover) + + node_to_id_init = [graph_init.node_to_id[node] + for node in graph_init.node_list] + node_to_id_recover = [ + graph_recover.node_to_id[node] for node in graph_recover.node_list + ] + self.assertEqual(node_to_id_init, node_to_id_recover) + + layer_to_id_init = [ + graph_init.layer_to_id[layer] for layer in graph_init.layer_list + ] + layer_to_id_recover = [ + graph_recover.layer_to_id[layer] for layer in graph_recover.layer_list + ] + self.assertEqual(layer_to_id_init, layer_to_id_recover) + + def test_to_wider_graph(self): + """ unittest for to_wider_graph function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + json_out = graph_to_json(graph_init, "temp.json") + graph_recover = json_to_graph(json_out) + wider_graph = to_wider_graph(deepcopy(graph_recover)) + model = wider_graph.produce_torch_model() + out = model(torch.ones(1, 3, 32, 32)) + self.assertEqual(out.shape, torch.Size([1, 10])) + + def test_to_deeper_graph(self): + """ unittest for to_deeper_graph function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + json_out = graph_to_json(graph_init, "temp.json") + graph_recover = json_to_graph(json_out) + deeper_graph = to_deeper_graph(deepcopy(graph_recover)) + model = deeper_graph.produce_torch_model() + out = model(torch.ones(1, 3, 32, 32)) + self.assertEqual(out.shape, torch.Size([1, 10])) + + def test_to_skip_connection_graph(self): + """ unittest for to_skip_connection_graph function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + json_out = graph_to_json(graph_init, "temp.json") + graph_recover = json_to_graph(json_out) + skip_connection_graph = to_skip_connection_graph(deepcopy(graph_recover)) + model = skip_connection_graph.produce_torch_model() + out = model(torch.ones(1, 3, 32, 32)) + self.assertEqual(out.shape, torch.Size([1, 10])) + + def test_generate_parameters(self): + """ unittest for generate_parameters function + """ + + tuner = NetworkMorphismTuner() + model_json = tuner.generate_parameters(0) + model_json = json.loads(model_json) + self.assertEqual(model_json["input_shape"], [32, 32, 3]) + self.assertEqual(tuner.total_data[0][1:], (-1, 0)) + + def test_receive_trial_result(self): + """ unittest for receive_trial_result function + """ + + tuner = NetworkMorphismTuner() + model_json = tuner.generate_parameters(0) + tuner.receive_trial_result(0, {}, 0.7) + (json_out, father_id, model_id) = tuner.total_data[0] + + self.assertEqual(father_id, -1) + self.assertEqual(model_json, json_out) + + ret = {"model_id": 0, "metric_value": 0.7} + self.assertEqual(tuner.bo.search_tree.adj_list[model_id], []) + self.assertEqual(tuner.history[-1], ret) + + def test_update_search_space(self): + """ unittest for update_search_space function + """ + + tuner = NetworkMorphismTuner() + self.assertEqual(tuner.search_space, dict()) + tuner.update_search_space("Test") + self.assertEqual(tuner.search_space, "Test") + + def test_init_search(self): + """ unittest for init_search function + """ + + tuner = NetworkMorphismTuner() + self.assertEqual(tuner.history, []) + tuner.init_search() + self.assertEqual(tuner.model_count, 1) + self.assertEqual(len(tuner.training_queue), 1) + self.assertEqual(len(tuner.descriptors), 1) + + def test_add_model(self): + """ unittest for add_model function + """ + + tuner = NetworkMorphismTuner() + tuner.add_model(0.8, 0) + ret = {"model_id": 0, "metric_value": 0.8} + self.assertEqual(tuner.history[-1], ret) + + def test_get_best_model_id(self): + """ unittest for get_best_model_id function + """ + + tuner = NetworkMorphismTuner() + tuner.add_model(0.8, 0) + tuner.add_model(0.9, 1) + self.assertEqual(tuner.get_best_model_id(), 1) + + +if __name__ == "__main__": + main() diff --git a/test/ut/sdk/test_protocol.py b/test/ut/sdk/test_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..c51bf2deba799a8171f7a3f2338ab83c1dbbfc1f --- /dev/null +++ b/test/ut/sdk/test_protocol.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from nni.runtime import protocol +from nni.runtime.protocol import CommandType, send, receive + +from io import BytesIO +from unittest import TestCase, main + + +def _prepare_send(): + protocol._out_file = BytesIO() + return protocol._out_file + +def _prepare_receive(data): + protocol._in_file = BytesIO(data) + + +class ProtocolTestCase(TestCase): + def test_send_en(self): + out_file = _prepare_send() + send(CommandType.NewTrialJob, 'CONTENT') + self.assertEqual(out_file.getvalue(), b'TR00000000000007CONTENT') + + def test_send_zh(self): + out_file = _prepare_send() + send(CommandType.NewTrialJob, '你好') + self.assertEqual(out_file.getvalue(), 'TR00000000000006你好'.encode('utf8')) + + def test_receive_en(self): + _prepare_receive(b'IN00000000000005hello') + command, data = receive() + self.assertIs(command, CommandType.Initialize) + self.assertEqual(data, 'hello') + + def test_receive_zh(self): + _prepare_receive('IN00000000000006世界'.encode('utf8')) + command, data = receive() + self.assertIs(command, CommandType.Initialize) + self.assertEqual(data, '世界') + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_serializer.py b/test/ut/sdk/test_serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..88924222dad0b7dfa3c264ce7e07584da01fa90e --- /dev/null +++ b/test/ut/sdk/test_serializer.py @@ -0,0 +1,248 @@ +import math +import re +import sys +from pathlib import Path + +import pytest +import nni +import torch +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.datasets import MNIST + +from nni.common.serializer import is_traceable + +if True: # prevent auto formatting + sys.path.insert(0, Path(__file__).parent.as_posix()) + from imported.model import ImportTest + + +@nni.trace +class SimpleClass: + def __init__(self, a, b=1): + self._a = a + self._b = b + + +class UnserializableSimpleClass: + def __init__(self): + self._a = 1 + + +def test_simple_class(): + instance = SimpleClass(1, 2) + assert instance._a == 1 + assert instance._b == 2 + + dump_str = nni.dump(instance) + assert '"__kwargs__": {"a": 1, "b": 2}' in dump_str + assert '"__symbol__"' in dump_str + instance = nni.load(dump_str) + assert instance._a == 1 + assert instance._b == 2 + + +def test_external_class(): + from collections import OrderedDict + d = nni.trace(kw_only=False)(OrderedDict)([('a', 1), ('b', 2)]) + assert d['a'] == 1 + assert d['b'] == 2 + dump_str = nni.dump(d) + assert dump_str == '{"a": 1, "b": 2}' + + conv = nni.trace(torch.nn.Conv2d)(3, 16, 3) + assert conv.in_channels == 3 + assert conv.out_channels == 16 + assert conv.kernel_size == (3, 3) + assert nni.dump(conv) == \ + r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", ' \ + r'"__kwargs__": {"in_channels": 3, "out_channels": 16, "kernel_size": 3}}' + + conv = nni.load(nni.dump(conv)) + assert conv.kernel_size == (3, 3) + + +def test_nested_class(): + a = SimpleClass(1, 2) + b = SimpleClass(a) + assert b._a._a == 1 + dump_str = nni.dump(b) + b = nni.load(dump_str) + assert 'SimpleClass object at' in repr(b) + assert b._a._a == 1 + + +def test_unserializable(): + a = UnserializableSimpleClass() + dump_str = nni.dump(a) + a = nni.load(dump_str) + assert a._a == 1 + + +def test_function(): + t = nni.trace(math.sqrt, kw_only=False)(3) + assert 1 < t < 2 + assert t.trace_symbol == math.sqrt + assert t.trace_args == [3] + t = nni.load(nni.dump(t)) + assert 1 < t < 2 + assert not is_traceable(t) # trace not recovered, expected, limitation + + def simple_class_factory(bb=3.): + return SimpleClass(1, bb) + + t = nni.trace(simple_class_factory)(4) + ts = nni.dump(t) + assert '__kwargs__' in ts + t = nni.load(ts) + assert t._a == 1 + assert is_traceable(t) + t = t.trace_copy() + assert is_traceable(t) + assert t.trace_symbol(10)._b == 10 + assert t.trace_kwargs['bb'] == 4 + assert is_traceable(t.trace_copy()) + + +class Foo: + def __init__(self, a, b=1): + self.aa = a + self.bb = [b + 1 for _ in range(1000)] + + def __eq__(self, other): + return self.aa == other.aa and self.bb == other.bb + + +def test_custom_class(): + module = nni.trace(Foo)(3) + assert nni.load(nni.dump(module)) == module + module = nni.trace(Foo)(b=2, a=1) + assert nni.load(nni.dump(module)) == module + + module = nni.trace(Foo)(Foo(1), 5) + dumped_module = nni.dump(module) + assert len(dumped_module) > 200 # should not be too longer if the serialization is correct + + module = nni.trace(Foo)(nni.trace(Foo)(1), 5) + dumped_module = nni.dump(module) + assert nni.load(dumped_module) == module + + +class Foo: + def __init__(self, a, b=1): + self.aa = a + self.bb = [b + 1 for _ in range(1000)] + + def __eq__(self, other): + return self.aa == other.aa and self.bb == other.bb + + +def test_basic_unit_and_custom_import(): + module = ImportTest(3, 0.5) + ss = nni.dump(module) + assert ss == r'{"__symbol__": "path:imported.model.ImportTest", "__kwargs__": {"foo": 3, "bar": 0.5}}' + assert nni.load(nni.dump(module)) == module + + import nni.retiarii.nn.pytorch as nn + module = nn.Conv2d(3, 10, 3, bias=False) + ss = nni.dump(module) + assert ss == r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", "__kwargs__": {"in_channels": 3, "out_channels": 10, "kernel_size": 3, "bias": false}}' + assert nni.load(ss).bias is None + + +def test_dataset(): + dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True) + dataloader = nni.trace(DataLoader)(dataset, batch_size=10) + + dumped_ans = { + "__symbol__": "path:torch.utils.data.dataloader.DataLoader", + "__kwargs__": { + "dataset": { + "__symbol__": "path:torchvision.datasets.mnist.MNIST", + "__kwargs__": {"root": "data/mnist", "train": False, "download": True} + }, + "batch_size": 10 + } + } + print(nni.dump(dataloader)) + print(nni.dump(dumped_ans)) + assert nni.dump(dataloader) == nni.dump(dumped_ans) + dataloader = nni.load(nni.dump(dumped_ans)) + assert isinstance(dataloader, DataLoader) + + dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True, + transform=nni.trace(transforms.Compose)([ + nni.trace(transforms.ToTensor)(), + nni.trace(transforms.Normalize)((0.1307,), (0.3081,)) + ])) + dataloader = nni.trace(DataLoader)(dataset, batch_size=10) + x, y = next(iter(nni.load(nni.dump(dataloader)))) + assert x.size() == torch.Size([10, 1, 28, 28]) + assert y.size() == torch.Size([10]) + + dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True, + transform=nni.trace(transforms.Compose)( + [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] + )) + dataloader = nni.trace(DataLoader)(dataset, batch_size=10) + x, y = next(iter(nni.load(nni.dump(dataloader)))) + assert x.size() == torch.Size([10, 1, 28, 28]) + assert y.size() == torch.Size([10]) + + +@pytest.mark.skipif(sys.platform != 'linux', reason='https://github.com/microsoft/nni/issues/4434') +def test_multiprocessing_dataloader(): + # check whether multi-processing works + # it's possible to have pickle errors + dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True, + transform=nni.trace(transforms.Compose)( + [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] + )) + import nni.retiarii.evaluator.pytorch.lightning as pl + dataloader = pl.DataLoader(dataset, batch_size=10, num_workers=2) + x, y = next(iter(dataloader)) + assert x.size() == torch.Size([10, 1, 28, 28]) + assert y.size() == torch.Size([10]) + + +def test_type(): + assert nni.dump(torch.optim.Adam) == '{"__nni_type__": "path:torch.optim.adam.Adam"}' + assert nni.load('{"__nni_type__": "path:torch.optim.adam.Adam"}') == torch.optim.Adam + assert Foo == nni.load(nni.dump(Foo)) + assert nni.dump(math.floor) == '{"__nni_type__": "path:math.floor"}' + assert nni.load('{"__nni_type__": "path:math.floor"}') == math.floor + + +def test_lightning_earlystop(): + import nni.retiarii.evaluator.pytorch.lightning as pl + from pytorch_lightning.callbacks.early_stopping import EarlyStopping + trainer = pl.Trainer(callbacks=[nni.trace(EarlyStopping)(monitor="val_loss")]) + trainer = nni.load(nni.dump(trainer)) + assert any(isinstance(callback, EarlyStopping) for callback in trainer.callbacks) + + +def test_generator(): + import torch.nn as nn + import torch.optim as optim + + class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 10, 1) + + def forward(self, x): + return self.conv(x) + + model = Net() + optimizer = nni.trace(optim.Adam)(model.parameters()) + print(optimizer.trace_kwargs) + + + +if __name__ == '__main__': + # test_simple_class() + # test_external_class() + # test_nested_class() + # test_unserializable() + # test_basic_unit() + test_generator() diff --git a/test/ut/sdk/test_smartparam.py b/test/ut/sdk/test_smartparam.py new file mode 100644 index 0000000000000000000000000000000000000000..86af4b18a1e4ecf9e923420bd0b6ee52540b6bb6 --- /dev/null +++ b/test/ut/sdk/test_smartparam.py @@ -0,0 +1,71 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os + +os.environ['NNI_PLATFORM'] = 'unittest' + +import nni +import nni.runtime.platform.test as test_platform +import nni.trial + +from unittest import TestCase, main + + + +class SmartParamTestCase(TestCase): + def setUp(self): + params = { + 'test_smartparam/choice1/choice': 'a', + 'test_smartparam/choice2/choice': '3*2+1', + 'test_smartparam/choice3/choice': '[1, 2]', + 'test_smartparam/choice4/choice': '{"a", 2}', + 'test_smartparam/func/function_choice': 'bar', + 'test_smartparam/lambda_func/function_choice': "lambda: 2*3", + 'mutable_block_66':{ + 'mutable_layer_0':{ + 'chosen_layer': 'conv2D(size=5)', + 'chosen_inputs': ['y'] + } + } + } + nni.trial._params = { 'parameter_id': 'test_trial', 'parameters': params } + + + def test_specified_name(self): + val = nni.choice({'a': 'a', '3*2+1': 3*2+1, '[1, 2]': [1, 2], '{"a", 2}': {"a", 2}}, name = 'choice1', key='test_smartparam/choice1/choice') + self.assertEqual(val, 'a') + val = nni.choice({'a': 'a', '3*2+1': 3*2+1, '[1, 2]': [1, 2], '{"a", 2}': {"a", 2}}, name = 'choice2', key='test_smartparam/choice2/choice') + self.assertEqual(val, 7) + val = nni.choice({'a': 'a', '3*2+1': 3*2+1, '[1, 2]': [1, 2], '{"a", 2}': {"a", 2}}, name = 'choice3', key='test_smartparam/choice3/choice') + self.assertEqual(val, [1, 2]) + val = nni.choice({'a': 'a', '3*2+1': 3*2+1, '[1, 2]': [1, 2], '{"a", 2}': {"a", 2}}, name = 'choice4', key='test_smartparam/choice4/choice') + self.assertEqual(val, {"a", 2}) + + def test_func(self): + val = nni.function_choice({'foo': foo, 'bar': bar}, name='func', key='test_smartparam/func/function_choice') + self.assertEqual(val, 'bar') + + def test_lambda_func(self): + val = nni.function_choice({"lambda: 2*3": lambda: 2*3, "lambda: 3*4": lambda: 3*4}, name = 'lambda_func', key='test_smartparam/lambda_func/function_choice') + self.assertEqual(val, 6) + + def test_mutable_layer(self): + layer_out = nni.mutable_layer('mutable_block_66', + 'mutable_layer_0', {'conv2D(size=3)': conv2D, 'conv2D(size=5)': conv2D}, {'conv2D(size=3)': + {'size':3}, 'conv2D(size=5)': {'size':5}}, [100], {'x':1,'y':2}, 1, 'classic_mode') + self.assertEqual(layer_out, [100, 2, 5]) + + + +def foo(): + return 'foo' + +def bar(): + return 'bar' + +def conv2D(inputs, size=3): + return inputs[0] + inputs[1] + [size] + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_trial.py b/test/ut/sdk/test_trial.py new file mode 100644 index 0000000000000000000000000000000000000000..778b85b439c45feb624efee4a3575e242a29d680 --- /dev/null +++ b/test/ut/sdk/test_trial.py @@ -0,0 +1,79 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import nni +import nni.runtime.platform.test as test_platform +import nni.trial + +import numpy as np +from unittest import TestCase, main + + +class TrialTestCase(TestCase): + def setUp(self): + self._trial_params = { 'msg': 'hi', 'x': 123, 'dict': { 'key': 'value', 'y': None } } + test_platform._params = { 'parameter_id': 'test_param', 'parameters': self._trial_params } + + def test_get_next_parameter(self): + self.assertEqual(nni.get_next_parameter(), self._trial_params) + + def test_get_current_parameter(self): + nni.get_next_parameter() + self.assertEqual(nni.get_current_parameter('x'), 123) + + def test_get_experiment_id(self): + self.assertEqual(nni.get_experiment_id(), 'fakeidex') + + def test_get_trial_id(self): + self.assertEqual(nni.get_trial_id(), 'fakeidtr') + + def test_get_sequence_id(self): + self.assertEqual(nni.get_sequence_id(), 0) + + def test_report_intermediate_result(self): + nni.report_intermediate_result(123) + self.assertEqual(test_platform.get_last_metric(), { + 'parameter_id': 'test_param', + 'trial_job_id': 'test_trial_job_id', + 'type': 'PERIODICAL', + 'sequence': 0, + 'value': 123 + }) + + def test_report_final_result_simple(self): + self._test_report_final_result(123, 123) + + def test_report_final_result_object(self): + obj = ['obj1', {'key1': 'v1', 'k2': None}, 233, 0.456] + self._test_report_final_result(obj, obj) + + def test_report_final_result_numpy(self): + self._test_report_final_result(np.float32(0.25), 0.25) + + def test_report_final_result_nparray(self): + arr = np.array([[1, 2, 3], [4, 5, 6]]) + nni.report_final_result(arr) + out = test_platform.get_last_metric() + self.assertEqual(len(arr), 2) + self.assertEqual(len(arr[0]), 3) + self.assertEqual(len(arr[1]), 3) + self.assertEqual(arr[0][0], 1) + self.assertEqual(arr[0][1], 2) + self.assertEqual(arr[0][2], 3) + self.assertEqual(arr[1][0], 4) + self.assertEqual(arr[1][1], 5) + self.assertEqual(arr[1][2], 6) + + def _test_report_final_result(self, in_, out): + nni.report_final_result(in_) + self.assertEqual(test_platform.get_last_metric(), { + 'parameter_id': 'test_param', + 'trial_job_id': 'test_trial_job_id', + 'type': 'FINAL', + 'sequence': 0, + 'value': out + }) + + +if __name__ == '__main__': + main() diff --git a/test/ut/sdk/test_utils.py b/test/ut/sdk/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5e7d01fa89a8cbd299a7758925cb39460a318b --- /dev/null +++ b/test/ut/sdk/test_utils.py @@ -0,0 +1,88 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from unittest import TestCase, main + +import nni +from nni.utils import split_index + + +class UtilsTestCase(TestCase): + def test_split_index_normal(self): + """test for normal search space + """ + normal__params_with_index = { + "dropout_rate": { + "_index" : 1, + "_value" : 0.9 + }, + "hidden_size": { + "_index" : 1, + "_value" : 512 + } + } + normal__params= { + "dropout_rate": 0.9, + "hidden_size": 512 + } + + params = split_index(normal__params_with_index) + self.assertEqual(params, normal__params) + + def test_split_index_nested(self): + """test for nested search space + """ + nested_params_with_index = { + "layer0": { + "_name": "Avg_pool", + "pooling_size":{ + "_index" : 1, + "_value" : 2 + } + }, + "layer1": { + "_name": "Empty" + }, + "layer2": { + "_name": "Max_pool", + "pooling_size": { + "_index" : 2, + "_value" : 3 + } + }, + "layer3": { + "_name": "Conv", + "kernel_size": { + "_index" : 3, + "_value" : 5 + }, + "output_filters": { + "_index" : 3, + "_value" : 64 + } + } + } + nested_params = { + "layer0": { + "_name": "Avg_pool", + "pooling_size": 2 + }, + "layer1": { + "_name": "Empty" + }, + "layer2": { + "_name": "Max_pool", + "pooling_size": 3 + }, + "layer3": { + "_name": "Conv", + "kernel_size": 5, + "output_filters": 64 + } + } + params = split_index(nested_params_with_index) + self.assertEqual(params, nested_params) + + +if __name__ == '__main__': + main() diff --git a/test/ut/tools/annotation/examples/mnist_generated.py b/test/ut/tools/annotation/examples/mnist_generated.py new file mode 100644 index 0000000000000000000000000000000000000000..273e1de4ae2336443260b855da874d08a9e1a888 --- /dev/null +++ b/test/ut/tools/annotation/examples/mnist_generated.py @@ -0,0 +1,203 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +"""A deep MNIST classifier using convolutional layers.""" +import logging +import math +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +import nni + +FLAGS = None +logger = logging.getLogger('mnist_AutoML') + + +class MnistNetwork(object): + """ + MnistNetwork is for initlizing and building basic network for mnist. + """ + + def __init__(self, channel_1_num, channel_2_num, conv_size, hidden_size, + pool_size, learning_rate, x_dim=784, y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + self.conv_size = nni.choice(2, 3, 5, 7, name='self.conv_size') + self.hidden_size = nni.choice(124, 512, 1024, name='self.hidden_size') + self.pool_size = pool_size + self.learning_rate = nni.uniform(0.0001, 0.1, name='self.learning_rate' + ) + self.x_dim = x_dim + self.y_dim = y_dim + self.images = tf.placeholder(tf.float32, [None, self.x_dim], name= + 'input_x') + self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name= + 'input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + self.train_step = None + self.accuracy = None + + def build_network(self): + """ + Building network for mnist + """ + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + print('input dim cannot be sqrt and reshape. input dim: ' + + str(self.x_dim)) + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: %s', + str(self.x_dim)) + raise + x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) + with tf.name_scope('conv1'): + w_conv1 = weight_variable([self.conv_size, self.conv_size, 1, + self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + h_conv1 = nni.function_choice(lambda : tf.nn.relu(conv2d( + x_image, w_conv1) + b_conv1), lambda : tf.nn.sigmoid(conv2d + (x_image, w_conv1) + b_conv1), lambda : tf.nn.tanh(conv2d( + x_image, w_conv1) + b_conv1), name='tf.nn.relu') + with tf.name_scope('pool1'): + h_pool1 = nni.function_choice(lambda : max_pool(h_conv1, self. + pool_size), lambda : avg_pool(h_conv1, self.pool_size), + name='max_pool') + with tf.name_scope('conv2'): + w_conv2 = weight_variable([self.conv_size, self.conv_size, self + .channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) + with tf.name_scope('pool2'): + h_pool2 = max_pool(h_conv2, self.pool_size) + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + w_fc1 = weight_variable([last_dim * last_dim * self. + channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self. + channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + with tf.name_scope('fc2'): + w_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean(tf.nn. + softmax_cross_entropy_with_logits(labels=self.labels, + logits=y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer(self.learning_rate + ).minimize(cross_entropy) + with tf.name_scope('accuracy'): + correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax( + self.labels, 1)) + self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf. + float32)) + + +def conv2d(x_input, w_matrix): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME' + ) + + +def max_pool(x_input, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def avg_pool(x_input, pool_size): + return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + """ + Main function, build mnist network, run and send result to NNI. + """ + +def main(params): + # Import data + mnist = download_mnist_retry(params['data_dir']) + print('Mnist download data done.') + logger.debug('Mnist download data done.') + mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], + channel_2_num=params['channel_2_num'], conv_size=params['conv_size' + ], hidden_size=params['hidden_size'], pool_size=params['pool_size'], + learning_rate=params['learning_rate']) + mnist_network.build_network() + logger.debug('Mnist build network done.') + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + batch_num = nni.choice(50, 250, 500, name='batch_num') + for i in range(batch_num): + batch = mnist.train.next_batch(batch_num) + dropout_rate = nni.choice(1, 5, name='dropout_rate') + mnist_network.train_step.run(feed_dict={mnist_network.images: + batch[0], mnist_network.labels: batch[1], mnist_network. + keep_prob: dropout_rate}) + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval(feed_dict={ + mnist_network.images: mnist.test.images, mnist_network. + labels: mnist.test.labels, mnist_network.keep_prob: 1.0}) + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + test_acc = mnist_network.accuracy.eval(feed_dict={mnist_network. + images: mnist.test.images, mnist_network.labels: mnist.test. + labels, mnist_network.keep_prob: 1.0}) + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + +def generate_defualt_params(): + """ + Generate default parameters for mnist network. + """ + params = {'data_dir': '/tmp/tensorflow/mnist/input_data', + 'dropout_rate': 0.5, 'channel_1_num': 32, 'channel_2_num': 64, + 'conv_size': 5, 'pool_size': 2, 'hidden_size': 1024, + 'learning_rate': 0.0001, 'batch_num': 200} + return params + + +if __name__ == '__main__': + try: + main(generate_defualt_params()) + except Exception as exception: + logger.exception(exception) + raise diff --git a/test/ut/tools/annotation/examples/mnist_with_annotation.json b/test/ut/tools/annotation/examples/mnist_with_annotation.json new file mode 100644 index 0000000000000000000000000000000000000000..cb5cedf191a2894c70907e5322330899300335bb --- /dev/null +++ b/test/ut/tools/annotation/examples/mnist_with_annotation.json @@ -0,0 +1,56 @@ +{ + "mnist_with_annotation/batch_num/choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2 + ] + }, + "mnist_with_annotation/dropout_rate/choice": { + "_type": "choice", + "_value": [ + 0, + 1 + ] + }, + "mnist_with_annotation/max_pool/function_choice": { + "_type": "choice", + "_value": [ + 0, + 1 + ] + }, + "mnist_with_annotation/self.conv_size/choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2, + 3 + ] + }, + "mnist_with_annotation/self.hidden_size/choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2 + ] + }, + "mnist_with_annotation/self.learning_rate/uniform": { + "_type": "uniform", + "_value": [ + 0.0001, + 0.1 + ] + }, + "mnist_with_annotation/tf.nn.relu/function_choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2 + ] + } +} \ No newline at end of file diff --git a/test/ut/tools/annotation/examples/mnist_with_annotation.py b/test/ut/tools/annotation/examples/mnist_with_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..d2a9c8541735b7241992e47ee002c56c14b172d6 --- /dev/null +++ b/test/ut/tools/annotation/examples/mnist_with_annotation.py @@ -0,0 +1,251 @@ +#!/usr/bin/python + +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +"""A deep MNIST classifier using convolutional layers.""" + +import logging +import math +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +FLAGS = None + +logger = logging.getLogger('mnist_AutoML') + + +class MnistNetwork(object): + ''' + MnistNetwork is for initlizing and building basic network for mnist. + ''' + def __init__(self, + channel_1_num, + channel_2_num, + conv_size, + hidden_size, + pool_size, + learning_rate, + x_dim=784, + y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + """@nni.variable(nni.choice(2, 3, 5, 7),name=self.conv_size)""" + self.conv_size = conv_size + """@nni.variable(nni.choice(124, 512, 1024), name=self.hidden_size)""" + self.hidden_size = hidden_size + self.pool_size = pool_size + """@nni.variable(nni.uniform(0.0001, 0.1), name=self.learning_rate)""" + self.learning_rate = learning_rate + self.x_dim = x_dim + self.y_dim = y_dim + + self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') + self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + + self.train_step = None + self.accuracy = None + + def build_network(self): + ''' + Building network for mnist + ''' + + # Reshape to use within a convolutional neural net. + # Last dimension is for "features" - there is only one here, since images are + # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + print( + 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) + raise + x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) + + # First convolutional layer - maps one grayscale image to 32 feature maps. + with tf.name_scope('conv1'): + w_conv1 = weight_variable( + [self.conv_size, self.conv_size, 1, self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + """@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)""" + h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) + + # Pooling layer - downsamples by 2X. + with tf.name_scope('pool1'): + """@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)""" + h_pool1 = max_pool(h_conv1, self.pool_size) + + # Second convolutional layer -- maps 32 feature maps to 64. + with tf.name_scope('conv2'): + w_conv2 = weight_variable([self.conv_size, self.conv_size, + self.channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) + + # Second pooling layer. + with tf.name_scope('pool2'): + h_pool2 = max_pool(h_conv2, self.pool_size) + + # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image + # is down to 7x7x64 feature maps -- maps this to 1024 features. + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + w_fc1 = weight_variable( + [last_dim * last_dim * self.channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + + h_pool2_flat = tf.reshape( + h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) + + # Dropout - controls the complexity of the model, prevents co-adaptation of features. + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + + # Map the 1024 features to 10 classes, one for each digit + with tf.name_scope('fc2'): + w_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 + + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer( + self.learning_rate).minimize(cross_entropy) + + with tf.name_scope('accuracy'): + correct_prediction = tf.equal( + tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) + self.accuracy = tf.reduce_mean( + tf.cast(correct_prediction, tf.float32)) + + +def conv2d(x_input, w_matrix): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') + + +def max_pool(x_input, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def avg_pool(x_input, pool_size): + return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + ''' + Main function, build mnist network, run and send result to NNI. + ''' + # Import data + mnist = download_mnist_retry(params['data_dir']) + print('Mnist download data done.') + logger.debug('Mnist download data done.') + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], + channel_2_num=params['channel_2_num'], + conv_size=params['conv_size'], + hidden_size=params['hidden_size'], + pool_size=params['pool_size'], + learning_rate=params['learning_rate']) + mnist_network.build_network() + logger.debug('Mnist build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + """@nni.variable(nni.choice(50, 250, 500), name=batch_num)""" + batch_num = params['batch_num'] + for i in range(batch_num): + batch = mnist.train.next_batch(batch_num) + """@nni.variable(nni.choice(1, 5), name=dropout_rate)""" + dropout_rate = params['dropout_rate'] + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: dropout_rate} + ) + + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_intermediate_result(test_acc)""" + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + """@nni.report_final_result(test_acc)""" + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + +def generate_defualt_params(): + ''' + Generate default parameters for mnist network. + ''' + params = { + 'data_dir': '/tmp/tensorflow/mnist/input_data', + 'dropout_rate': 0.5, + 'channel_1_num': 32, + 'channel_2_num': 64, + 'conv_size': 5, + 'pool_size': 2, + 'hidden_size': 1024, + 'learning_rate': 1e-4, + 'batch_num': 200} + return params + + +if __name__ == '__main__': + """@nni.get_next_parameter()""" + try: + main(generate_defualt_params()) + except Exception as exception: + logger.exception(exception) + raise diff --git a/test/ut/tools/annotation/examples/mnist_without_annotation.json b/test/ut/tools/annotation/examples/mnist_without_annotation.json new file mode 100644 index 0000000000000000000000000000000000000000..ac5928046842ddba3e81ac7d2cb70a17ae32be4a --- /dev/null +++ b/test/ut/tools/annotation/examples/mnist_without_annotation.json @@ -0,0 +1,56 @@ +{ + "mnist_without_annotation/#31/choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2 + ] + }, + "mnist_without_annotation/#68/function_choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2 + ] + }, + "mnist_without_annotation/batch_num/choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2 + ] + }, + "mnist_without_annotation/conv-size/choice": { + "_type": "choice", + "_value": [ + 0, + 1, + 2, + 3 + ] + }, + "mnist_without_annotation/dropout_rate/choice": { + "_type": "choice", + "_value": [ + 0, + 1 + ] + }, + "mnist_without_annotation/h_pool1/function_choice": { + "_type": "choice", + "_value": [ + 0, + 1 + ] + }, + "mnist_without_annotation/learning_rate/uniform": { + "_type": "uniform", + "_value": [ + 0.0001, + 0.1 + ] + } +} \ No newline at end of file diff --git a/test/ut/tools/annotation/examples/mnist_without_annotation.py b/test/ut/tools/annotation/examples/mnist_without_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..04668dab682ac1fad797e65e9ce323af9620891a --- /dev/null +++ b/test/ut/tools/annotation/examples/mnist_without_annotation.py @@ -0,0 +1,244 @@ +#!/usr/bin/python + +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +"""A deep MNIST classifier using convolutional layers.""" + +import logging +import math +import tempfile +import time + +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +import nni + +FLAGS = None + +logger = logging.getLogger('mnist_AutoML') + + +class MnistNetwork(object): + ''' + MnistNetwork is for initlizing and building basic network for mnist. + ''' + def __init__(self, + channel_1_num, + channel_2_num, + pool_size, + learning_rate, + x_dim=784, + y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + self.conv_size = nni.choice(2, 3, 5, 7, name='conv-size') + self.hidden_size = nni.choice(124, 512, 1024) # example: without name + self.pool_size = pool_size + self.learning_rate = nni.uniform(0.0001, 0.1, name='learning_rate') + self.x_dim = x_dim + self.y_dim = y_dim + + self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') + self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + + self.train_step = None + self.accuracy = None + + def build_network(self): + ''' + Building network for mnist + ''' + + # Reshape to use within a convolutional neural net. + # Last dimension is for "features" - there is only one here, since images are + # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + print( + 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) + raise + x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) + + # First convolutional layer - maps one grayscale image to 32 feature maps. + with tf.name_scope('conv1'): + w_conv1 = weight_variable( + [self.conv_size, self.conv_size, 1, self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + h_conv1 = nni.function_choice( + lambda: tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), + lambda: tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), + lambda: tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1) + ) # example: without name + + # Pooling layer - downsamples by 2X. + with tf.name_scope('pool1'): + h_pool1 = max_pool(h_conv1, self.pool_size) + h_pool1 = nni.function_choice( + lambda: max_pool(h_conv1, self.pool_size), + lambda: avg_pool(h_conv1, self.pool_size), + name='h_pool1') + + + # Second convolutional layer -- maps 32 feature maps to 64. + with tf.name_scope('conv2'): + w_conv2 = weight_variable([self.conv_size, self.conv_size, + self.channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) + + # Second pooling layer. + with tf.name_scope('pool2'): # example: another style + h_pool2 = max_pool(h_conv2, self.pool_size) + + # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image + # is down to 7x7x64 feature maps -- maps this to 1024 features. + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + w_fc1 = weight_variable( + [last_dim * last_dim * self.channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + + h_pool2_flat = tf.reshape( + h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) + + # Dropout - controls the complexity of the model, prevents co-adaptation of features. + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + + # Map the 1024 features to 10 classes, one for each digit + with tf.name_scope('fc2'): + w_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 + + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer( + self.learning_rate).minimize(cross_entropy) + + with tf.name_scope('accuracy'): + correct_prediction = tf.equal( + tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) + self.accuracy = tf.reduce_mean( + tf.cast(correct_prediction, tf.float32)) + + +def conv2d(x_input, w_matrix): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') + + +def max_pool(x_input, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def avg_pool(x_input, pool_size): + return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + +def download_mnist_retry(data_dir, max_num_retries=20): + """Try to download mnist dataset and avoid errors""" + for _ in range(max_num_retries): + try: + return input_data.read_data_sets(data_dir, one_hot=True) + except tf.errors.AlreadyExistsError: + time.sleep(1) + raise Exception("Failed to download MNIST.") + +def main(params): + ''' + Main function, build mnist network, run and send result to NNI. + ''' + # Import data + mnist = download_mnist_retry(params['data_dir']) + print('Mnist download data done.') + logger.debug('Mnist download data done.') + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], + channel_2_num=params['channel_2_num'], + pool_size=params['pool_size']) + mnist_network.build_network() + logger.debug('Mnist build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + batch_num = nni.choice(50, 250, 500, name='batch_num') + for i in range(batch_num): + batch = mnist.train.next_batch(batch_num) + dropout_rate = nni.choice(1, 5, name='dropout_rate') + mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], + mnist_network.labels: batch[1], + mnist_network.keep_prob: dropout_rate} + ) + + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + test_acc = mnist_network.accuracy.eval( + feed_dict={mnist_network.images: mnist.test.images, + mnist_network.labels: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') + + +def generate_defualt_params(): + ''' + Generate default parameters for mnist network. + ''' + params = { + 'data_dir': '/tmp/tensorflow/mnist/input_data', + 'channel_1_num': 32, + 'channel_2_num': 64, + 'pool_size': 2} + return params + + +if __name__ == '__main__': + try: + main(generate_defualt_params()) + except Exception as exception: + logger.exception(exception) + raise diff --git a/test/ut/tools/annotation/test_annotation.py b/test/ut/tools/annotation/test_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb1ab6986f263c01f211b6e4872c0a8cfc85ecc --- /dev/null +++ b/test/ut/tools/annotation/test_annotation.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from nni.tools import annotation + +import ast +import json +from pathlib import Path +import shutil +import tempfile + +import pytest + +cwd = Path(__file__).parent +shutil.rmtree(cwd / '_generated', ignore_errors=True) +shutil.copytree(cwd / 'testcase/annotated', cwd / '_generated/annotated') + +def test_search_space_generator(): + search_space = annotation.generate_search_space(cwd / '_generated/annotated') + expected = json.load((cwd / 'testcase/searchspace.json').open()) + assert search_space == expected + +def test_code_generator(): + src_dir = cwd / 'testcase/usercode' + dst_dir = cwd / '_generated/usercode' + code_dir = annotation.expand_annotations(src_dir, dst_dir, nas_mode='classic_mode') + assert Path(code_dir) == dst_dir + expect_dir = cwd / 'testcase/annotated' + _assert_source_equal(dst_dir, expect_dir, 'dir/simple.py') + _assert_source_equal(dst_dir, expect_dir, 'mnist.py') + _assert_source_equal(dst_dir, expect_dir, 'nas.py') + assert (src_dir / 'nonpy.txt').read_text() == (dst_dir / 'nonpy.txt').read_text() + +def test_annotation_detecting(): + src_dir = cwd / 'testcase/usercode/non_annotation' + code_dir = annotation.expand_annotations(src_dir, tempfile.mkdtemp()) + assert Path(code_dir) == src_dir + + +def _assert_source_equal(dir1, dir2, file_name): + ast1 = ast.parse((dir1 / file_name).read_text()) + ast2 = ast.parse((dir2 / file_name).read_text()) + _assert_ast_equal(ast1, ast2) + +def _assert_ast_equal(ast1, ast2): + assert type(ast1) is type(ast2) + if isinstance(ast1, ast.AST): + assert sorted(ast1._fields) == sorted(ast2._fields) + for field_name in ast1._fields: + field1 = getattr(ast1, field_name) + field2 = getattr(ast2, field_name) + _assert_ast_equal(field1, field2) + elif isinstance(ast1, list): + assert len(ast1) == len(ast2) + for item1, item2 in zip(ast1, ast2): + _assert_ast_equal(item1, item2) + else: + assert ast1 == ast2 + + +if __name__ == '__main__': + pytest.main() diff --git a/test/ut/tools/annotation/testcase/annotated/dir/simple.py b/test/ut/tools/annotation/testcase/annotated/dir/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..3cdaab7d0a7eb68a93f19e9e48bf3b9fcac3070b --- /dev/null +++ b/test/ut/tools/annotation/testcase/annotated/dir/simple.py @@ -0,0 +1,21 @@ +import nni + + +def max_pool(k): + pass + + +h_conv1 = 1 +conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size') +abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, "{(1): 2, '3': 4}": {(1 + ): 2, '3': 4}, '[1, 2, 3]': [1, 2, 3]}, name='abc') +h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool( + h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2, + h_conv3)}, name='max_pool') +h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1 + ), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda + x: 1 + x}, name='max_poo') +test_acc = 1 +nni.report_intermediate_result(test_acc) +test_acc = 2 +nni.report_final_result(test_acc) diff --git a/test/ut/tools/annotation/testcase/annotated/handwrite.py b/test/ut/tools/annotation/testcase/annotated/handwrite.py new file mode 100644 index 0000000000000000000000000000000000000000..742d81b9ef0eae8d2a138809ba186426bf9511d7 --- /dev/null +++ b/test/ut/tools/annotation/testcase/annotated/handwrite.py @@ -0,0 +1,23 @@ +import nni +def max_pool(k): + pass + +h_conv1 = 1 +nni.choice({'foo': foo, 'bar': bar})(1) +conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size') +abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, 7: 7}, name='abc') +h_pool1 = nni.function_choice({'max_pool': lambda : max_pool(h_conv1), + 'h_conv1': lambda : h_conv1, + 'avg_pool': lambda : avg_pool(h_conv2, h_conv3)} +) +h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool( + h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2, + h_conv3)}, name='max_pool') +h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1 + ), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda + x: 1 + x}, name='max_poo') +tmp = nni.qlognormal(1.2, 3, 4.5) +test_acc = 1 +nni.report_intermediate_result(test_acc) +test_acc = 2 +nni.report_final_result(test_acc) diff --git a/test/ut/tools/annotation/testcase/annotated/mnist.py b/test/ut/tools/annotation/testcase/annotated/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..1648f8db9394fd2189ed22f7d83e9d5ad3ce92eb --- /dev/null +++ b/test/ut/tools/annotation/testcase/annotated/mnist.py @@ -0,0 +1,177 @@ +"""A deep MNIST classifier using convolutional layers. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import nni +import logging +import math +import tempfile +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data +logger = logging.getLogger('mnist') +FLAGS = None + + +class MnistNetwork(object): + + def __init__(self, channel_1_num=32, channel_2_num=64, conv_size=5, + hidden_size=1024, pool_size=2, learning_rate=0.0001, x_dim=784, + y_dim=10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + self.conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name= + 'self.conv_size') + self.hidden_size = nni.choice({124: 124, 512: 512, 1024: 1024}, + name='self.hidden_size') + self.pool_size = pool_size + self.learning_rate = nni.randint(2, 3, 5, name='self.learning_rate') + self.x_dim = x_dim + self.y_dim = y_dim + + def build_network(self): + self.x = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') + self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + logger.debug( + 'input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim)) + raise + x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1]) + with tf.name_scope('conv1'): + W_conv1 = weight_variable([self.conv_size, self.conv_size, 1, + self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + h_conv1 = nni.function_choice({ + 'tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)': lambda : + tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1), + 'tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1)': lambda : + tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1), + 'tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1)': lambda : + tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1)}, name= + 'tf.nn.relu') + with tf.name_scope('pool1'): + h_pool1 = nni.function_choice({ + 'max_pool(h_conv1, self.pool_size)': lambda : max_pool( + h_conv1, self.pool_size), + 'avg_pool(h_conv1, self.pool_size)': lambda : avg_pool( + h_conv1, self.pool_size)}, name='max_pool') + with tf.name_scope('conv2'): + W_conv2 = weight_variable([self.conv_size, self.conv_size, self + .channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) + with tf.name_scope('pool2'): + h_pool2 = max_pool(h_conv2, self.pool_size) + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + W_fc1 = weight_variable([last_dim * last_dim * self. + channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self. + channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + with tf.name_scope('fc2'): + W_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean(tf.nn. + softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv) + ) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer(self.learning_rate + ).minimize(cross_entropy) + with tf.name_scope('accuracy'): + correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax( + self.y, 1)) + self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf. + float32)) + return + + +def conv2d(x, W): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') + + +def max_pool(x, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x, ksize=[1, pool_size, pool_size, 1], strides=[1, + pool_size, pool_size, 1], padding='SAME') + + +def avg_pool(x, pool_size): + return tf.nn.avg_pool(x, ksize=[1, pool_size, pool_size, 1], strides=[1, + pool_size, pool_size, 1], padding='SAME') + + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + + +def main(): + data_dir = '/tmp/tensorflow/mnist/input_data' + mnist = input_data.read_data_sets(data_dir, one_hot=True) + logger.debug('Mnist download data done.') + mnist_network = MnistNetwork() + mnist_network.build_network() + logger.debug('Mnist build network done.') + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + batch_num = 200 + for i in range(batch_num): + batch_size = nni.choice({50: 50, 250: 250, 500: 500}, name= + 'batch_size') + batch = mnist.train.next_batch(batch_size) + dropout_rate = nni.choice({1: 1, 5: 5}, name='dropout_rate') + mnist_network.train_step.run(feed_dict={mnist_network.x: batch[ + 0], mnist_network.y: batch[1], mnist_network.keep_prob: + dropout_rate}) + if i % 100 == 0: + test_acc = mnist_network.accuracy.eval(feed_dict={ + mnist_network.x: mnist.test.images, mnist_network.y: + mnist.test.labels, mnist_network.keep_prob: 1.0}) + nni.report_intermediate_result(test_acc) + test_acc = mnist_network.accuracy.eval(feed_dict={mnist_network.x: + mnist.test.images, mnist_network.y: mnist.test.labels, + mnist_network.keep_prob: 1.0}) + nni.report_final_result(test_acc) + + +def generate_default_params(): + params = {'data_dir': '/tmp/tensorflow/mnist/input_data', + 'dropout_rate': 0.5, 'channel_1_num': 32, 'channel_2_num': 64, + 'conv_size': 5, 'pool_size': 2, 'hidden_size': 1024, 'batch_size': + 50, 'batch_num': 200, 'learning_rate': 0.0001} + return params + + +if __name__ == '__main__': + nni.get_next_parameter() + try: + params = generate_default_params() + logger.debug('params') + logger.debug('params update') + main() + except: + logger.exception('Got some exception in while loop in mnist.py') + raise diff --git a/test/ut/tools/annotation/testcase/annotated/nas.py b/test/ut/tools/annotation/testcase/annotated/nas.py new file mode 100644 index 0000000000000000000000000000000000000000..227f7960b25b3c19717dfb3b1504b4b38eb370e7 --- /dev/null +++ b/test/ut/tools/annotation/testcase/annotated/nas.py @@ -0,0 +1,49 @@ +import nni +import time + + +def add_one(inputs): + return inputs + 1 + + +def add_two(inputs): + return inputs + 2 + + +def add_three(inputs): + return inputs + 3 + + +def add_four(inputs): + return inputs + 4 + + +def main(): + images = 5 + layer_1_out = nni.mutable_layer('mutable_block_39', 'mutable_layer_0', + {'add_one()': add_one, 'add_two()': add_two, 'add_three()': + add_three, 'add_four()': add_four}, {'add_one()': {}, 'add_two()': + {}, 'add_three()': {}, 'add_four()': {}}, [], {'images': images}, 1, + 'classic_mode') + layer_2_out = nni.mutable_layer('mutable_block_39', 'mutable_layer_1', + {'add_one()': add_one, 'add_two()': add_two, 'add_three()': + add_three, 'add_four()': add_four}, {'add_one()': {}, 'add_two()': + {}, 'add_three()': {}, 'add_four()': {}}, [], {'layer_1_out': + layer_1_out}, 1, 'classic_mode') + layer_3_out = nni.mutable_layer('mutable_block_39', 'mutable_layer_2', + {'add_one()': add_one, 'add_two()': add_two, 'add_three()': + add_three, 'add_four()': add_four}, {'add_one()': {}, 'add_two()': + {}, 'add_three()': {}, 'add_four()': {}}, [], {'layer_1_out': + layer_1_out, 'layer_2_out': layer_2_out}, 1, 'classic_mode') + nni.report_intermediate_result(layer_1_out) + time.sleep(2) + nni.report_intermediate_result(layer_2_out) + time.sleep(2) + nni.report_intermediate_result(layer_3_out) + time.sleep(2) + layer_3_out = layer_3_out + 10 + nni.report_final_result(layer_3_out) + + +if __name__ == '__main__': + main() diff --git a/test/ut/tools/annotation/testcase/annotated/non_annotation/bar.py b/test/ut/tools/annotation/testcase/annotated/non_annotation/bar.py new file mode 100644 index 0000000000000000000000000000000000000000..8c5e05a6b7f34c625c665d9b324619958d2f5a3b --- /dev/null +++ b/test/ut/tools/annotation/testcase/annotated/non_annotation/bar.py @@ -0,0 +1,5 @@ +import nni + +def bar(): + """I'm doc string""" + return nni.report_final_result(0) diff --git a/test/ut/tools/annotation/testcase/annotated/non_annotation/foo.py b/test/ut/tools/annotation/testcase/annotated/non_annotation/foo.py new file mode 100644 index 0000000000000000000000000000000000000000..b376c9941fda362c8d2c5c8ddb35db3e0b003402 --- /dev/null +++ b/test/ut/tools/annotation/testcase/annotated/non_annotation/foo.py @@ -0,0 +1 @@ +print('hello') diff --git a/test/ut/tools/annotation/testcase/searchspace.json b/test/ut/tools/annotation/testcase/searchspace.json new file mode 100644 index 0000000000000000000000000000000000000000..0ba6df6cfc3dfe26436543e3a1250efb71ede180 --- /dev/null +++ b/test/ut/tools/annotation/testcase/searchspace.json @@ -0,0 +1,189 @@ +{ + "handwrite/__line6/choice": { + "_type": "choice", + "_value": [ + "foo", + "bar" + ] + }, + "handwrite/conv_size/choice": { + "_type": "choice", + "_value": [ + 2, + 3, + 5, + 7 + ] + }, + "handwrite/abc/choice": { + "_type": "choice", + "_value": [ + "2", + 3, + "(5 * 6)", + 7 + ] + }, + "handwrite/__line9/function_choice": { + "_type": "choice", + "_value": [ + "max_pool", + "h_conv1", + "avg_pool" + ] + }, + "handwrite/max_pool/function_choice": { + "_type": "choice", + "_value": [ + "max_pool(h_conv1)", + "avg_pool(h_conv2, h_conv3)" + ] + }, + "handwrite/max_poo/function_choice": { + "_type": "choice", + "_value": [ + "max_poo(h_conv1)", + "(2 * 3 + 4)", + "(lambda x: 1 + x)" + ] + }, + "handwrite/__line19/qlognormal": { + "_type": "qlognormal", + "_value": [ + 1.2, + 3, + 4.5 + ] + }, + "mnist/self.conv_size/choice": { + "_type": "choice", + "_value": [ + 2, + 3, + 5, + 7 + ] + }, + "mnist/self.hidden_size/choice": { + "_type": "choice", + "_value": [ + 124, + 512, + 1024 + ] + }, + "mnist/self.learning_rate/randint": { + "_type": "randint", + "_value": [ + 2, + 3, + 5 + ] + }, + "mnist/tf.nn.relu/function_choice": { + "_type": "choice", + "_value": [ + "tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)", + "tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1)", + "tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1)" + ] + }, + "mnist/max_pool/function_choice": { + "_type": "choice", + "_value": [ + "max_pool(h_conv1, self.pool_size)", + "avg_pool(h_conv1, self.pool_size)" + ] + }, + "mnist/batch_size/choice": { + "_type": "choice", + "_value": [ + 50, + 250, + 500 + ] + }, + "mnist/dropout_rate/choice": { + "_type": "choice", + "_value": [ + 1, + 5 + ] + }, + "dir.simple/conv_size/choice": { + "_type": "choice", + "_value": [ + 2, + 3, + 5, + 7 + ] + }, + "dir.simple/abc/choice": { + "_type": "choice", + "_value": [ + "2", + 3, + "(5 * 6)", + "{(1): 2, '3': 4}", + "[1, 2, 3]" + ] + }, + "dir.simple/max_pool/function_choice": { + "_type": "choice", + "_value": [ + "max_pool(h_conv1)", + "avg_pool(h_conv2, h_conv3)" + ] + }, + "dir.simple/max_poo/function_choice": { + "_type": "choice", + "_value": [ + "max_poo(h_conv1)", + "(2 * 3 + 4)", + "(lambda x: 1 + x)" + ] + }, + "nas/mutable_block_39": { + "_type": "mutable_layer", + "_value": { + "mutable_layer_0": { + "layer_choice": [ + "add_one()", + "add_two()", + "add_three()", + "add_four()" + ], + "optional_inputs": [ + "images" + ], + "optional_input_size": 1 + }, + "mutable_layer_1": { + "layer_choice": [ + "add_one()", + "add_two()", + "add_three()", + "add_four()" + ], + "optional_inputs": [ + "layer_1_out" + ], + "optional_input_size": 1 + }, + "mutable_layer_2": { + "layer_choice": [ + "add_one()", + "add_two()", + "add_three()", + "add_four()" + ], + "optional_inputs": [ + "layer_1_out", + "layer_2_out" + ], + "optional_input_size": 1 + } + } + } +} \ No newline at end of file diff --git a/test/ut/tools/annotation/testcase/usercode/dir/simple.py b/test/ut/tools/annotation/testcase/usercode/dir/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..193eedb633b3701b473e82e98969b18c380e78c2 --- /dev/null +++ b/test/ut/tools/annotation/testcase/usercode/dir/simple.py @@ -0,0 +1,15 @@ +def max_pool(k): + pass +h_conv1=1 +"""@nni.variable(nni.choice(2,3,5,7),name=conv_size)""" +conv_size = 5 +"""@nni.variable(nni.choice('2',3,5*6,{1:2, '3':4},[1,2,3]),name=abc)""" +abc = 5 +"""@nni.function_choice(max_pool(h_conv1), avg_pool(h_conv2,h_conv3), name=max_pool)""" +h_pool1 = max_pool(h_conv1) +"""@nni.function_choice(max_poo(h_conv1), 2 * 3 + 4, lambda x: 1+x, name=max_poo)""" +h_pool2 = max_poo(h_conv1) +test_acc=1 +'''@nni.report_intermediate_result(test_acc)''' +test_acc=2 +'''@nni.report_final_result(test_acc)''' diff --git a/test/ut/tools/annotation/testcase/usercode/mnist.py b/test/ut/tools/annotation/testcase/usercode/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..f734e6fd78fd01730a7c42ee39a5882d91564b45 --- /dev/null +++ b/test/ut/tools/annotation/testcase/usercode/mnist.py @@ -0,0 +1,209 @@ +# -*- encoding:utf8 -*- + +"""A deep MNIST classifier using convolutional layers. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import math +import tempfile +import tensorflow as tf + +from tensorflow.examples.tutorials.mnist import input_data + +logger = logging.getLogger('mnist') + +FLAGS = None + +class MnistNetwork(object): + def __init__(self, + channel_1_num = 32, + channel_2_num = 64, + conv_size = 5, + hidden_size = 1024, + pool_size = 2, + learning_rate = 0.0001, + x_dim = 784, + y_dim = 10): + self.channel_1_num = channel_1_num + self.channel_2_num = channel_2_num + '''@nni.variable(nni.choice(2,3,5,7),name=self.conv_size)''' + self.conv_size = conv_size + '''@nni.variable(nni.choice(124,512,1024),name=self.hidden_size)''' + self.hidden_size = hidden_size + self.pool_size = pool_size + '''@nni.variable(nni.randint(2,3,5),name=self.learning_rate)''' + self.learning_rate = learning_rate + self.x_dim = x_dim + self.y_dim = y_dim + + def build_network(self): + self.x = tf.placeholder(tf.float32, [None, self.x_dim], name = 'input_x') + self.y = tf.placeholder(tf.float32, [None, self.y_dim], name = 'input_y') + self.keep_prob = tf.placeholder(tf.float32, name = 'keep_prob') + + # Reshape to use within a convolutional neural net. + # Last dimension is for "features" - there is only one here, since images are + # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. + with tf.name_scope('reshape'): + try: + input_dim = int(math.sqrt(self.x_dim)) + except: + #print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) + logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim)) + raise + x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1]) + + # First convolutional layer - maps one grayscale image to 32 feature maps. + with tf.name_scope('conv1'): + W_conv1 = weight_variable([self.conv_size, self.conv_size, 1, self.channel_1_num]) + b_conv1 = bias_variable([self.channel_1_num]) + """@nni.function_choice(tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1),tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1),tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1),name=tf.nn.relu)""" + h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) + + # Pooling layer - downsamples by 2X. + with tf.name_scope('pool1'): + """@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)""" + h_pool1 = max_pool(h_conv1, self.pool_size) + + # Second convolutional layer -- maps 32 feature maps to 64. + with tf.name_scope('conv2'): + W_conv2 = weight_variable([self.conv_size, self.conv_size, self.channel_1_num, self.channel_2_num]) + b_conv2 = bias_variable([self.channel_2_num]) + h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) + + # Second pooling layer. + with tf.name_scope('pool2'): + #"""@nni.dynamic(input={cnn_block:1, concat:2},function_choice={"cnn_block":(x,nni.choice([3,4])),"cnn_block":(x),"concat":(x,y)},limit={"cnn_block.input":[concat,input],"concat.input":[this.depth-1,this.depth-3,this.depth-5],"graph.width":[1]})""" + h_pool2 = max_pool(h_conv2, self.pool_size) + + # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image + # is down to 7x7x64 feature maps -- maps this to 1024 features. + last_dim = int(input_dim / (self.pool_size * self.pool_size)) + with tf.name_scope('fc1'): + W_fc1 = weight_variable([last_dim * last_dim * self.channel_2_num, self.hidden_size]) + b_fc1 = bias_variable([self.hidden_size]) + + h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) + h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) + + # Dropout - controls the complexity of the model, prevents co-adaptation of features. + with tf.name_scope('dropout'): + h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) + + # Map the 1024 features to 10 classes, one for each digit + with tf.name_scope('fc2'): + W_fc2 = weight_variable([self.hidden_size, self.y_dim]) + b_fc2 = bias_variable([self.y_dim]) + y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 + + with tf.name_scope('loss'): + cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y, logits = y_conv)) + with tf.name_scope('adam_optimizer'): + self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(cross_entropy) + + with tf.name_scope('accuracy'): + correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(self.y, 1)) + self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + + return + +def conv2d(x, W): + """conv2d returns a 2d convolution layer with full stride.""" + return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') + +def max_pool(x, pool_size): + """max_pool downsamples a feature map by 2X.""" + return tf.nn.max_pool(x, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') +def avg_pool(x,pool_size): + return tf.nn.avg_pool(x, ksize=[1, pool_size, pool_size, 1], + strides=[1, pool_size, pool_size, 1], padding='SAME') + +def weight_variable(shape): + """weight_variable generates a weight variable of a given shape.""" + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + +def bias_variable(shape): + """bias_variable generates a bias variable of a given shape.""" + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + +def main(): + # Import data + data_dir= '/tmp/tensorflow/mnist/input_data' + mnist = input_data.read_data_sets(data_dir, one_hot=True) + logger.debug('Mnist download data done.') + + # Create the model + # Build the graph for the deep net + mnist_network = MnistNetwork() + mnist_network.build_network() + logger.debug('Mnist build network done.') + + # Write log + graph_location = tempfile.mkdtemp() + logger.debug('Saving graph to: %s', graph_location) + # print('Saving graph to: %s' % graph_location) + train_writer = tf.summary.FileWriter(graph_location) + train_writer.add_graph(tf.get_default_graph()) + + test_acc = 0.0 + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + batch_num=200 + for i in range(batch_num): + '''@nni.variable(nni.choice(50,250,500),name=batch_size)''' + batch_size=50 + batch = mnist.train.next_batch(batch_size) + '''@nni.variable(nni.choice(1,5),name=dropout_rate)''' + dropout_rate=0.5 + mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: dropout_rate}) + + if i % 100 == 0: + #train_accuracy = mnist_network.accuracy.eval(feed_dict={ + # mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: params['dropout_rate']}) + #print('step %d, training accuracy %g' % (i, train_accuracy)) + + test_acc = mnist_network.accuracy.eval(feed_dict={ + mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0}) + '''@nni.report_intermediate_result(test_acc)''' + + test_acc = mnist_network.accuracy.eval(feed_dict={ + mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0}) + '''@nni.report_final_result(test_acc)''' + + +def generate_default_params(): + params = {'data_dir': '/tmp/tensorflow/mnist/input_data', + 'dropout_rate': 0.5, + 'channel_1_num': 32, + 'channel_2_num': 64, + 'conv_size': 5, + 'pool_size': 2, + 'hidden_size': 1024, + 'batch_size': 50, + 'batch_num': 200, + 'learning_rate': 1e-4} + return params + +if __name__ == '__main__': + # run command: python mnist.py --init_file_path ./init.json + + #FLAGS, unparsed = parse_command() + #original_params = parse_init_json(FLAGS.init_file_path, {}) + + #pipe_interface.set_params_to_env() + '''@nni.get_next_parameter()''' + try: + params = generate_default_params() + logger.debug('params') + logger.debug('params update') + main() + except: + logger.exception('Got some exception in while loop in mnist.py') + raise diff --git a/test/ut/tools/annotation/testcase/usercode/nas.py b/test/ut/tools/annotation/testcase/usercode/nas.py new file mode 100644 index 0000000000000000000000000000000000000000..5838e84f039834156896de9b6c540e5524cf4c33 --- /dev/null +++ b/test/ut/tools/annotation/testcase/usercode/nas.py @@ -0,0 +1,53 @@ +import time + +def add_one(inputs): + return inputs + 1 + +def add_two(inputs): + return inputs + 2 + +def add_three(inputs): + return inputs + 3 + +def add_four(inputs): + return inputs + 4 + + +def main(): + + images = 5 + + """@nni.mutable_layers( + { + layer_choice: [add_one(), add_two(), add_three(), add_four()], + optional_inputs: [images], + optional_input_size: 1, + layer_output: layer_1_out + }, + { + layer_choice: [add_one(), add_two(), add_three(), add_four()], + optional_inputs: [layer_1_out], + optional_input_size: 1, + layer_output: layer_2_out + }, + { + layer_choice: [add_one(), add_two(), add_three(), add_four()], + optional_inputs: [layer_1_out, layer_2_out], + optional_input_size: 1, + layer_output: layer_3_out + } + )""" + + """@nni.report_intermediate_result(layer_1_out)""" + time.sleep(2) + """@nni.report_intermediate_result(layer_2_out)""" + time.sleep(2) + """@nni.report_intermediate_result(layer_3_out)""" + time.sleep(2) + + layer_3_out = layer_3_out + 10 + + """@nni.report_final_result(layer_3_out)""" + +if __name__ == '__main__': + main() diff --git a/test/ut/tools/annotation/testcase/usercode/non_annotation/bar.py b/test/ut/tools/annotation/testcase/usercode/non_annotation/bar.py new file mode 100644 index 0000000000000000000000000000000000000000..8c5e05a6b7f34c625c665d9b324619958d2f5a3b --- /dev/null +++ b/test/ut/tools/annotation/testcase/usercode/non_annotation/bar.py @@ -0,0 +1,5 @@ +import nni + +def bar(): + """I'm doc string""" + return nni.report_final_result(0) diff --git a/test/ut/tools/annotation/testcase/usercode/non_annotation/foo.py b/test/ut/tools/annotation/testcase/usercode/non_annotation/foo.py new file mode 100644 index 0000000000000000000000000000000000000000..b376c9941fda362c8d2c5c8ddb35db3e0b003402 --- /dev/null +++ b/test/ut/tools/annotation/testcase/usercode/non_annotation/foo.py @@ -0,0 +1 @@ +print('hello') diff --git a/test/ut/tools/annotation/testcase/usercode/nonpy.txt b/test/ut/tools/annotation/testcase/usercode/nonpy.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce013625030ba8dba906f756967f9e9ca394464a --- /dev/null +++ b/test/ut/tools/annotation/testcase/usercode/nonpy.txt @@ -0,0 +1 @@ +hello diff --git a/test/ut/tools/nnictl/config_files/invalid/custom-tuner-1.yml b/test/ut/tools/nnictl/config_files/invalid/custom-tuner-1.yml new file mode 100644 index 0000000000000000000000000000000000000000..5fda582d43831cc2da3e9ef893d6bd889ac94103 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/custom-tuner-1.yml @@ -0,0 +1,25 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +# error: no className +tuner: + codeDir: ./ + classFileName: mytuner.py + +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ./ + command: python3 main.py + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/ut/tools/nnictl/config_files/invalid/custom-tuner-2.yml b/test/ut/tools/nnictl/config_files/invalid/custom-tuner-2.yml new file mode 100644 index 0000000000000000000000000000000000000000..dca12da1015207803cf5f2525e8c9715e0541072 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/custom-tuner-2.yml @@ -0,0 +1,27 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +# error: builtinTunerName conflicts with custom tuner settings +tuner: + codeDir: ./ + classFileName: mytuner.py + className: MyTuner + builtinTunerName: Random + +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ./ + command: python3 main.py + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/ut/tools/nnictl/config_files/invalid/mytuner.py b/test/ut/tools/nnictl/config_files/invalid/mytuner.py new file mode 100644 index 0000000000000000000000000000000000000000..4979cf58132344ae1b2f12520bbcb5fce767d760 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/mytuner.py @@ -0,0 +1,5 @@ +from nni import Tuner + +class MyTuner(Tuner): + def __init__(self): + pass diff --git a/test/ut/tools/nnictl/config_files/invalid/no-tuner.yml b/test/ut/tools/nnictl/config_files/invalid/no-tuner.yml new file mode 100644 index 0000000000000000000000000000000000000000..3cc59ddaa3bbe3fdac09444339faae01b5ccc0f0 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/no-tuner.yml @@ -0,0 +1,21 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +# error: no tuner or advisor +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ./ + command: python3 main.py + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/ut/tools/nnictl/config_files/invalid/search_space.json b/test/ut/tools/nnictl/config_files/invalid/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c26cdce369fa13e3fdf7c34f10b9cd89a6fc931e --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/search_space.json @@ -0,0 +1,6 @@ +{ + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "hidden_size":{"_type":"choice","_value":[128, 256, 512, 1024]}, + "lr":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"uniform","_value":[0, 1]} +} diff --git a/test/ut/tools/nnictl/config_files/invalid/searchspace-path.yml b/test/ut/tools/nnictl/config_files/invalid/searchspace-path.yml new file mode 100644 index 0000000000000000000000000000000000000000..0ed1321194a819e4375c145acbb0632465c42232 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/searchspace-path.yml @@ -0,0 +1,24 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 + +# error: searchSpacePath can not be found +searchSpacePath: ./wrong_search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ./ + command: python3 mnist.py --epochs 1 --batch_num 10 + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/ut/tools/nnictl/config_files/invalid/tuner-wrong-key.yml b/test/ut/tools/nnictl/config_files/invalid/tuner-wrong-key.yml new file mode 100644 index 0000000000000000000000000000000000000000..35cae1ac329851d7942f5191363c66cbc2b6e97c --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/tuner-wrong-key.yml @@ -0,0 +1,24 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +tuner: + # error: wrong key + wrongTunerKey: abc + +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ./ + command: python3 main.py + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/ut/tools/nnictl/config_files/invalid/wrong-class-args.yml b/test/ut/tools/nnictl/config_files/invalid/wrong-class-args.yml new file mode 100644 index 0000000000000000000000000000000000000000..fbbed974c88bee173419d7fe03f7b36ed3c8b729 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/wrong-class-args.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + # wrong class args, should be detected by assessor validator + optimize_mode: aaaaaa +trial: + codeDir: ./ + command: python3 main.py + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/ut/tools/nnictl/config_files/invalid/wrong-training-service.yml b/test/ut/tools/nnictl/config_files/invalid/wrong-training-service.yml new file mode 100644 index 0000000000000000000000000000000000000000..0ce77c6160f9206039cdc06004415ffdefbd3739 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/invalid/wrong-training-service.yml @@ -0,0 +1,23 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ./ + command: python3 main.py + +useAnnotation: false +multiPhase: false +multiThread: false + +# error: wrong training service name +trainingServicePlatform: local222 diff --git a/test/ut/tools/nnictl/config_files/test_files/test_json.json b/test/ut/tools/nnictl/config_files/test_files/test_json.json new file mode 100644 index 0000000000000000000000000000000000000000..82770c6491fe15a1b5ecc9420e38b201e7424252 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/test_files/test_json.json @@ -0,0 +1 @@ +{"field":"test"} diff --git a/test/ut/tools/nnictl/config_files/test_files/test_yaml.yml b/test/ut/tools/nnictl/config_files/test_files/test_yaml.yml new file mode 100644 index 0000000000000000000000000000000000000000..b8bfc243a0cf819cd5e44825a6bb75e97f4c9d78 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/test_files/test_yaml.yml @@ -0,0 +1 @@ +field: test diff --git a/test/ut/tools/nnictl/config_files/valid/main.py b/test/ut/tools/nnictl/config_files/valid/main.py new file mode 100644 index 0000000000000000000000000000000000000000..a40d7a46c17c51cee7b82edb6761124cee68b006 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/valid/main.py @@ -0,0 +1 @@ +print('my trial') diff --git a/test/ut/tools/nnictl/config_files/valid/search_space.json b/test/ut/tools/nnictl/config_files/valid/search_space.json new file mode 100644 index 0000000000000000000000000000000000000000..c26cdce369fa13e3fdf7c34f10b9cd89a6fc931e --- /dev/null +++ b/test/ut/tools/nnictl/config_files/valid/search_space.json @@ -0,0 +1,6 @@ +{ + "batch_size": {"_type":"choice", "_value": [16, 32, 64, 128]}, + "hidden_size":{"_type":"choice","_value":[128, 256, 512, 1024]}, + "lr":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}, + "momentum":{"_type":"uniform","_value":[0, 1]} +} diff --git a/test/ut/tools/nnictl/config_files/valid/test.yml b/test/ut/tools/nnictl/config_files/valid/test.yml new file mode 100644 index 0000000000000000000000000000000000000000..8565cd5f59348fa0f61ee90e9793fdaec5c08ea6 --- /dev/null +++ b/test/ut/tools/nnictl/config_files/valid/test.yml @@ -0,0 +1,22 @@ +authorName: nni +experimentName: default_test +maxExecDuration: 15m +maxTrialNum: 2 +trialConcurrency: 2 +searchSpacePath: ./search_space.json + +tuner: + builtinTunerName: Random +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize +trial: + codeDir: ./ + command: python3 main.py + +useAnnotation: false +multiPhase: false +multiThread: false + +trainingServicePlatform: local diff --git a/test/ut/tools/nnictl/mock/__init__.py b/test/ut/tools/nnictl/mock/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/ut/tools/nnictl/mock/experiment.py b/test/ut/tools/nnictl/mock/experiment.py new file mode 100644 index 0000000000000000000000000000000000000000..f5a42ffeb59ec23ec2bad013a7d0caad4a6bcf42 --- /dev/null +++ b/test/ut/tools/nnictl/mock/experiment.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import argparse +from pathlib import Path +from subprocess import Popen, PIPE, STDOUT +from nni.tools.nnictl.config_utils import Experiments +from nni.tools.nnictl.common_utils import print_green +from nni.tools.nnictl.command_utils import kill_command +from nni.tools.nnictl.nnictl_utils import get_yml_content + +def create_mock_experiment(): + nnictl_experiment_config = Experiments() + nnictl_experiment_config.add_experiment('xOpEwA5w', 8080, 123456, + 'local', 'example_sklearn-classification') + # mock process + cmds = ['sleep', '3600000'] + process = Popen(cmds, stdout=PIPE, stderr=STDOUT) + nnictl_experiment_config.update_experiment('xOpEwA5w', 'pid', process.pid) + nnictl_experiment_config.update_experiment('xOpEwA5w', 'port', 8080) + nnictl_experiment_config.update_experiment('xOpEwA5w', 'webuiUrl', ['http://localhost:8080']) + print_green("expriment start success, experiment id: xOpEwA5w") + +def stop_mock_experiment(): + nnictl_experiment_config = Experiments() + experiments_dict = nnictl_experiment_config.get_all_experiments() + kill_command(experiments_dict['xOpEwA5w'].get('pid')) + nnictl_experiment_config = Experiments() + nnictl_experiment_config.remove_experiment('xOpEwA5w') + +def generate_args_parser(): + parser = argparse.ArgumentParser() + parser.add_argument('id', nargs='?') + parser.add_argument('--port', '-p', type=int, dest='port') + parser.add_argument('--all', '-a', action='store_true') + parser.add_argument('--head', type=int) + parser.add_argument('--tail', type=int) + return parser + +def generate_args(): + parser = generate_args_parser() + args = parser.parse_args(['xOpEwA5w']) + return args diff --git a/test/ut/tools/nnictl/mock/nnictl_metadata/.experiment b/test/ut/tools/nnictl/mock/nnictl_metadata/.experiment new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/test/ut/tools/nnictl/mock/nnictl_metadata/.experiment @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/ut/tools/nnictl/mock/nnictl_metadata/xOpEwA5w/db/nni.sqlite b/test/ut/tools/nnictl/mock/nnictl_metadata/xOpEwA5w/db/nni.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..df031e664e429cd7a25886d4cbd66845e86b52fc Binary files /dev/null and b/test/ut/tools/nnictl/mock/nnictl_metadata/xOpEwA5w/db/nni.sqlite differ diff --git a/test/ut/tools/nnictl/mock/restful_server.py b/test/ut/tools/nnictl/mock/restful_server.py new file mode 100644 index 0000000000000000000000000000000000000000..c104d51b0ce34d506c76f0a5c84db8db2ed28b36 --- /dev/null +++ b/test/ut/tools/nnictl/mock/restful_server.py @@ -0,0 +1,186 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import responses + +def mock_check_status(): + responses.add( + responses.GET, + "http://localhost:8080/api/v1/nni/check-status", + json={"status":"RUNNING","errors":[]}, + status=200 + ) + +def mock_version(): + responses.add( + responses.GET, + "http://localhost:8080/api/v1/nni/version", + json={'value':1.8}, + status=200 + ) + +def mock_get_experiment_profile(): + responses.add( + responses.GET, + "http://localhost:8080/api/v1/nni/experiment", + json={"id":"bkfhOdUl","revision":5,"execDuration":10,"logDir":"/home/shinyang/nni-experiments/bkfhOdUl", + "nextSequenceId":2,"params":{"authorName":"default","experimentName":"example_sklearn-classification", + "trialConcurrency":1,"maxExecDuration":3600,"maxTrialNum":1, + "searchSpace":"{\"C\": {\"_type\": \"uniform\", \"_value\": [0.1, 1]}, \ + \"kernel\": {\"_type\": \"choice\", \"_value\": [\"linear\", \"rbf\", \"poly\", \"sigmoid\"]}, \ + \"degree\": {\"_type\": \"choice\", \"_value\": [1, 2, 3, 4]}, \"gamma\": {\"_type\": \"uniform\", \ + \"_value\": [0.01, 0.1]}}", \ + "trainingServicePlatform":"local","tuner":{"builtinTunerName":"TPE","classArgs":{"optimize_mode":"maximize"}, \ + "checkpointDir":"/home/shinyang/nni-experiments/bkfhOdUl/checkpoint"},"versionCheck":"true", \ + "clusterMetaData":[{"key":"codeDir","value":"/home/shinyang/folder/examples/trials/sklearn/classification/."}, \ + {"key":"command","value":"python3 main.py"}]},"startTime":1600326895536,"endTime":1600326910605}, + status=200 + ) + +def mock_update_experiment_profile(): + responses.add( + responses.PUT, 'http://localhost:8080/api/v1/nni/experiment', + json={"status":"RUNNING","errors":[]}, + status=200, + content_type='application/json', + ) + +def mock_import_data(): + responses.add( + responses.POST, 'http://localhost:8080/api/v1/nni/experiment/import-data', + json={"result":"data"}, + status=201, + content_type='application/json', + ) + +def mock_start_experiment(): + responses.add( + responses.POST, 'http://localhost:8080/api/v1/nni/experiment', + json={"status":"RUNNING","errors":[]}, + status=201, + content_type='application/json', + ) + +def mock_get_trial_job_statistics(): + responses.add( + responses.GET, 'http://localhost:8080/api/v1/nni/job-statistics', + json=[{"trialJobStatus":"SUCCEEDED","trialJobNumber":1}], + status=200, + content_type='application/json', + ) + +def mock_set_cluster_metadata(): + responses.add( + responses.PUT, 'http://localhost:8080/api/v1/nni/experiment/cluster-metadata', + json=[{"trialJobStatus":"SUCCEEDED","trialJobNumber":1}], + status=201, + content_type='application/json', + ) + +def mock_list_trial_jobs(): + responses.add( + responses.GET, 'http://localhost:8080/api/v1/nni/trial-jobs', + json=[{"id":"GPInz","status":"SUCCEEDED","hyperParameters":["{\"parameter_id\":0, \ + \"parameter_source\":\"algorithm\",\"parameters\":{\"C\":0.8748364659110364, \ + \"kernel\":\"linear\",\"degree\":1,\"gamma\":0.040451413392113666}, \ + \"parameter_index\":0}"],"logPath":"file://localhost:/home/shinyang/nni-experiments/bkfhOdUl/trials/GPInz", + "startTime":1600326905581,"sequenceId":0,"endTime":1600326906629, + "finalMetricData":[{"timestamp":1600326906493,"trialJobId":"GPInz","parameterId":"0", + "type":"FINAL","sequence":0,"data":"\"0.9866666666666667\""}]}], + status=200, + content_type='application/json', + ) + +def mock_get_trial_job(): + responses.add( + responses.GET, 'http://localhost:8080/api/v1/nni/trial-jobs/:id', + json={"id":"GPInz","status":"SUCCEEDED","hyperParameters":["{\"parameter_id\":0, \ + \"parameter_source\":\"algorithm\",\"parameters\":{\"C\":0.8748364659110364, \ + \"kernel\":\"linear\",\"degree\":1,\"gamma\":0.040451413392113666}, \ + \"parameter_index\":0}"],"logPath":"file://localhost:/home/shinyang/nni-experiments/bkfhOdUl/trials/GPInz", + "startTime":1600326905581,"sequenceId":0,"endTime":1600326906629, + "finalMetricData":[{"timestamp":1600326906493,"trialJobId":"GPInz","parameterId":"0","type":"FINAL", + "sequence":0,"data":"\"0.9866666666666667\""}]}, + status=200, + content_type='application/json', + ) + +def mock_add_trial_job(): + responses.add( + responses.POST, 'http://localhost:8080/api/v1/nni/trial-jobs', + json=[{"trialJobStatus":"SUCCEEDED","trialJobNumber":1}], + status=201, + content_type='application/json', + ) + +def mock_cancel_trial_job(): + responses.add( + responses.DELETE, 'http://localhost:8080/api/v1/nni/trial-jobs/:id', + json=[{"trialJobStatus":"SUCCEEDED","trialJobNumber":1}], + status=200, + content_type='application/json', + ) + +def mock_get_metric_data(): + responses.add( + responses.DELETE, 'http://localhost:8080/api/v1/nni/metric-data/:job_id*?', + json=[{"timestamp":1600326906486,"trialJobId":"GPInz","parameterId":"0", + "type":"PERIODICAL","sequence":0,"data":"\"0.9866666666666667\""}, + {"timestamp":1600326906493,"trialJobId":"GPInz","parameterId":"0", + "type":"FINAL","sequence":0,"data":"\"0.9866666666666667\""}], + status=200, + content_type='application/json', + ) + +def mock_get_metric_data_by_range(): + responses.add( + responses.DELETE, 'http://localhost:8080/api/v1/nni/metric-data-range/:min_seq_id/:max_seq_id', + json=[{"timestamp":1600326906486,"trialJobId":"GPInz","parameterId":"0", + "type":"PERIODICAL","sequence":0,"data":"\"0.9866666666666667\""}, + {"timestamp":1600326906493,"trialJobId":"GPInz","parameterId":"0", + "type":"FINAL","sequence":0,"data":"\"0.9866666666666667\""}], + status=200, + content_type='application/json', + ) + +def mock_get_latest_metric_data(): + responses.add( + responses.DELETE, 'http://localhost:8080/api/v1/nni/metric-data-latest/', + json=[{"timestamp":1600326906493,"trialJobId":"GPInz","parameterId":"0", + "type":"FINAL","sequence":0,"data":"\"0.9866666666666667\""},{"timestamp":1600326906486, + "trialJobId":"GPInz","parameterId":"0","type":"PERIODICAL", + "sequence":0,"data":"\"0.9866666666666667\""}], + status=200, + content_type='application/json', + ) + +def mock_get_trial_log(): + responses.add( + responses.DELETE, 'http://localhost:8080/api/v1/nni/trial-file/:id/:filename', + json={"status":"RUNNING","errors":[]}, + status=200, + content_type='application/json', + ) + +def mock_export_data(): + responses.add( + responses.DELETE, 'http://localhost:8080/api/v1/nni/export-data', + json={"status":"RUNNING","errors":[]}, + status=200, + content_type='application/json', + ) + +def init_response(): + mock_check_status() + mock_version() + mock_get_experiment_profile() + mock_set_cluster_metadata() + mock_list_trial_jobs() + mock_get_trial_job() + mock_add_trial_job() + mock_cancel_trial_job() + mock_get_metric_data() + mock_get_metric_data_by_range() + mock_get_latest_metric_data() + mock_get_trial_log() + mock_export_data() diff --git a/test/ut/tools/nnictl/test_common_utils.py b/test/ut/tools/nnictl/test_common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f9211015528e05a5dcc4c1d19af7198a6a6c28f7 --- /dev/null +++ b/test/ut/tools/nnictl/test_common_utils.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from pathlib import Path +from subprocess import Popen, PIPE, STDOUT +import sys +from unittest import TestCase, main, skipIf + +sys.path.append(str(Path(__file__).parent)) +from mock.restful_server import init_response + +from nni.tools.nnictl.command_utils import kill_command +from nni.tools.nnictl.common_utils import get_yml_content, get_json_content, detect_process + +cwd = Path(__file__).parent + +class CommonUtilsTestCase(TestCase): + + @classmethod + def setUpClass(cls): + init_response() + + def test_get_yml(self): + yml_path = cwd / 'config_files/test_files/test_yaml.yml' + content = get_yml_content(str(yml_path)) + self.assertEqual(content, {'field':'test'}) + + def test_get_json(self): + json_path = cwd / 'config_files/test_files/test_json.json' + content = get_json_content(str(json_path)) + self.assertEqual(content, {'field':'test'}) + + @skipIf(sys.platform == 'win32', 'FIXME: Fails randomly on Windows, cannot reproduce locally') + def test_detect_process(self): + if sys.platform == 'win32': + cmds = ['timeout', '360000'] + else: + cmds = ['sleep', '360000'] + process = Popen(cmds, stdout=PIPE, stderr=STDOUT) + self.assertTrue(detect_process(process.pid)) + kill_command(process.pid) + +if __name__ == '__main__': + main() diff --git a/test/ut/tools/nnictl/test_config_utils.py b/test/ut/tools/nnictl/test_config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a19e864daf2848fcf4cdf87e9bacf4b62c9ab08e --- /dev/null +++ b/test/ut/tools/nnictl/test_config_utils.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from pathlib import Path +from unittest import TestCase, main +from nni.tools.nnictl.config_utils import Config, Experiments + +HOME_PATH = str(Path(__file__).parent / "mock/nnictl_metadata") + +class CommonUtilsTestCase(TestCase): + + def test_update_experiment(self): + experiment = Experiments(HOME_PATH) + experiment.add_experiment('xOpEwA5w', 8081, 'N/A', 'local', 'test', endTime='N/A', status='INITIALIZED') + self.assertTrue('xOpEwA5w' in experiment.get_all_experiments()) + experiment.remove_experiment('xOpEwA5w') + self.assertFalse('xOpEwA5w' in experiment.get_all_experiments()) + + def test_get_config(self): + config = Config('xOpEwA5w', HOME_PATH) + self.assertEqual(config.get_config()['experimentName'], 'test_config') + +if __name__ == '__main__': + main() diff --git a/test/ut/tools/nnictl/test_config_validation.py b/test/ut/tools/nnictl/test_config_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb0543d9fe711cee5def552e73e8a09a6e8ed20 --- /dev/null +++ b/test/ut/tools/nnictl/test_config_validation.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import glob +from unittest import TestCase, main +from schema import SchemaError +from nni.tools.nnictl.launcher_utils import validate_all_content +from nni.tools.nnictl.nnictl_utils import get_yml_content +from nni.tools.nnictl.common_utils import print_error, print_green + +class ConfigValidationTestCase(TestCase): + def test_valid_config(self): + file_names = glob.glob('./config_files/valid/*.yml') + for fn in file_names: + experiment_config = get_yml_content(fn) + validate_all_content(experiment_config, fn) + print_green('config file:', fn, 'validation success!') + + def test_invalid_config(self): + file_names = glob.glob('./config_files/invalid/*.yml') + for fn in file_names: + experiment_config = get_yml_content(fn) + try: + validate_all_content(experiment_config, fn) + print_error('config file:', fn,'Schema error should be raised for invalid config file!') + assert False + except SchemaError as e: + print_green('config file:', fn, 'Expected error catched:', e) + +if __name__ == '__main__': + main() diff --git a/test/ut/tools/nnictl/test_nnictl_utils.py b/test/ut/tools/nnictl/test_nnictl_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..696534a7bdb2f66f1346ec38b79137cdfdbc9db9 --- /dev/null +++ b/test/ut/tools/nnictl/test_nnictl_utils.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys + +sys.path.append(os.path.dirname(__file__)) +from mock.restful_server import init_response +from mock.experiment import create_mock_experiment, stop_mock_experiment, generate_args_parser, generate_args + +from nni.tools.nnictl.nnictl_utils import get_experiment_time, get_experiment_status, \ +check_experiment_id, parse_ids, get_config_filename, get_experiment_port, check_rest, \ +trial_ls, list_experiment +import unittest +from unittest import TestCase, main +import responses + +# FIXME: debug it later +# This test case failed on Windows and the output was messed on VSO web. +# https://msrasrg.visualstudio.com/NNIOpenSource/_build/results?buildId=15665 + +@unittest.skipIf(sys.platform == 'win32', 'Failed, debug later') +class CommonUtilsTestCase(TestCase): + @classmethod + def setUp(self): + init_response() + create_mock_experiment() + + @classmethod + def tearDown(self): + stop_mock_experiment() + + @responses.activate + def test_get_experiment_status(self): + self.assertEqual('RUNNING', get_experiment_status(8080)) + + @responses.activate + def test_check_experiment_id(self): + parser = generate_args_parser() + args = parser.parse_args(['xOpEwA5w']) + self.assertEqual('xOpEwA5w', check_experiment_id(args)) + + @responses.activate + def test_parse_ids(self): + parser = generate_args_parser() + args = parser.parse_args(['xOpEwA5w']) + self.assertEqual(['xOpEwA5w'], parse_ids(args)) + + @responses.activate + def test_get_config_file_name(self): + args = generate_args() + self.assertEqual('xOpEwA5w', get_config_filename(args)) + + @responses.activate + def test_get_experiment_port(self): + args = generate_args() + self.assertEqual(8080, get_experiment_port(args)) + + @responses.activate + def test_check_rest(self): + args = generate_args() + self.assertEqual(True, check_rest(args)) + + @responses.activate + def test_trial_ls(self): + args = generate_args() + trials = trial_ls(args) + self.assertEqual(trials[0]['id'], 'GPInz') + + +if __name__ == '__main__': + main() diff --git a/test/ut/tools/trial_tool/__init__.py b/test/ut/tools/trial_tool/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb551d508894cb6a98c3ca658601a90babcc5d69 --- /dev/null +++ b/test/ut/tools/trial_tool/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os + +os.environ['NNI_PLATFORM'] = 'unittest' +os.environ['NNI_TRIAL_JOB_ID'] = 'test_trial_job_id' +os.environ["NNI_OUTPUT_DIR"] = "./unittest" +os.environ["NNI_SYS_DIR"] = "./unittest" +os.environ["NNI_EXP_ID"] = "test_exp_id" +os.environ["MULTI_PHASE"] = "true" diff --git a/test/ut/tools/trial_tool/test_file_channel.py b/test/ut/tools/trial_tool/test_file_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1117e46fb439823a82ed4fd4eb003170d3966e --- /dev/null +++ b/test/ut/tools/trial_tool/test_file_channel.py @@ -0,0 +1,133 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import json +import os +import random +import shutil +import string +import sys +import time +import unittest +from argparse import Namespace +from datetime import datetime + +from nni.tools.trial_tool.base_channel import CommandType +from nni.tools.trial_tool.file_channel import (FileChannel, command_path, + manager_commands_file_name) + +sys.path.append("..") + +runner_file_name = "commands/runner_commands.txt" +manager_file_name = "commands/manager_commands.txt" + + +class FileChannelTest(unittest.TestCase): + + def setUp(self): + self.args = Namespace() + self.args.node_count = 1 + self.args.node_id = None + if os.path.exists(command_path): + shutil.rmtree(command_path) + + # FIXME: + # In the docstring of `BaseChannel.send(self, command, data)`, + # `data` is "string playload". + # But in its body it treats `data` as a dict. + + #def test_send(self): + # fc = None + # try: + # fc = FileChannel(self.args) + # fc.send(CommandType.ReportGpuInfo, "command1") + # fc.send(CommandType.ReportGpuInfo, "command2") + + # self.check_timeout(2, lambda: os.path.exists(runner_file_name)) + + # self.assertTrue(os.path.exists(runner_file_name)) + # with open(runner_file_name, "rb") as runner: + # lines = runner.readlines() + # self.assertListEqual(lines, [b'GI00000000000010"command1"\n', b'GI00000000000010"command2"\n']) + # finally: + # if fc is not None: + # fc.close() + + #def test_send_multi_node(self): + # fc1 = None + # fc2 = None + # try: + # runner1_file_name = "commands/runner_commands_1.txt" + # self.args.node_id = 1 + # fc1 = FileChannel(self.args) + # fc1.send(CommandType.ReportGpuInfo, "command1") + # # wait command have enough time to write before closed. + + # runner2_file_name = "commands/runner_commands_2.txt" + # self.args.node_id = 2 + # fc2 = FileChannel(self.args) + # fc2.send(CommandType.ReportGpuInfo, "command1") + + # self.check_timeout(2, lambda: os.path.exists(runner1_file_name) and os.path.exists(runner2_file_name)) + + # self.assertTrue(os.path.exists(runner1_file_name)) + # with open(runner1_file_name, "rb") as runner: + # lines1 = runner.readlines() + # self.assertTrue(os.path.exists(runner2_file_name)) + # with open(runner2_file_name, "rb") as runner: + # lines2 = runner.readlines() + # self.assertListEqual(lines1, [b'GI00000000000010"command1"\n']) + # self.assertListEqual(lines2, [b'GI00000000000010"command1"\n']) + # finally: + # if fc1 is not None: + # fc1.close() + # if fc2 is not None: + # fc2.close() + + # FIXME: + # `fc.received()` tries to read `BaseChannel.receive_queue` + # `BaseChannel.receive_queue` is defined in `BaseChannel.open()` + # `fc.open()` is never invoked. + + #def test_receive(self): + # fc = None + # manager_file = None + # try: + # fc = FileChannel(self.args) + # message = fc.receive() + # self.assertEqual(message, (None, None)) + + # os.mkdir(command_path) + # manager_file = open(manager_file_name, "wb") + # manager_file.write(b'TR00000000000009"manager"\n') + # manager_file.flush() + + # self.check_timeout(2, lambda: fc.received()) + # message = fc.receive() + # self.assertEqual(message, (CommandType.NewTrialJob, "manager")) + + # manager_file.write(b'TR00000000000010"manager2"\n') + # manager_file.flush() + + # self.check_timeout(2, lambda: fc.received()) + # message = fc.receive() + # self.assertEqual(message, (CommandType.NewTrialJob, "manager2")) + # finally: + # if fc is not None: + # fc.close() + # if manager_file is not None: + # manager_file.close() + + def check_timeout(self, timeout, callback): + interval = 0.01 + start = datetime.now().timestamp() + count = int(timeout / interval) + for x in range(count): + if callback(): + break + time.sleep(interval) + print("checked {} times, {:3F} seconds".format(x, datetime.now().timestamp()-start)) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/vso_tools/_common.py b/test/vso_tools/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e5fb550f55e9e50ed1c18585457db427e0f29887 --- /dev/null +++ b/test/vso_tools/_common.py @@ -0,0 +1,37 @@ +from pathlib import Path +import os +import subprocess +import sys + +nni_root = str(Path(__file__).parents[2]) + +def build_wheel(): + python = sys.executable + version = sys.argv[1] + os_spec = { + 'linux': 'manylinux1_x86_64', + 'darwin': 'macosx_10_9_x86_64', + 'win32': 'win_amd64' + }[sys.platform] + + run_command(f'{python} setup.py clean --all') + run_command(f'{python} setup.py build_ts', NNI_RELEASE=version) + run_command(f'{python} setup.py bdist_wheel -p {os_spec}', NNI_RELEASE=version) + + return f'dist/nni-{version}-py3-none-{os_spec}.whl' + +def run_command(command, **extra_environ): + print('# run command:', command) + if isinstance(command, str): + command = command.split() + subprocess.run( + command, + stderr = subprocess.STDOUT, # azure will highlight stderr, which is annoying + cwd = nni_root, + check = True, + env = {**os.environ, **extra_environ} + ) + +def set_variable(key, value): + print('# set variable:', key, value) + print(f'##vso[task.setvariable variable={key}]{value}') diff --git a/test/vso_tools/build_wheel.py b/test/vso_tools/build_wheel.py new file mode 100644 index 0000000000000000000000000000000000000000..4355f14fa748e69d03505baafe861e2c86da2623 --- /dev/null +++ b/test/vso_tools/build_wheel.py @@ -0,0 +1,3 @@ +from _common import build_wheel + +build_wheel() diff --git a/test/vso_tools/generate_nni_version.py b/test/vso_tools/generate_nni_version.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa82067b236e94350f5f78a57a845b123f10ae7 --- /dev/null +++ b/test/vso_tools/generate_nni_version.py @@ -0,0 +1,6 @@ +from datetime import datetime + +from _common import set_variable + +time = datetime.now().strftime('%Y%m%d%H%M%S') +set_variable('NNI_RELEASE', '999.' + time) diff --git a/test/vso_tools/install_nni.py b/test/vso_tools/install_nni.py new file mode 100644 index 0000000000000000000000000000000000000000..af277dd95356e75812a2eeec1d4367a101495600 --- /dev/null +++ b/test/vso_tools/install_nni.py @@ -0,0 +1,11 @@ +import sys + +from _common import build_wheel, run_command + +if len(sys.argv) <= 2: + extra_dep = '' +else: + extra_dep = f'[{sys.argv[2]}]' + +wheel = build_wheel() +run_command(f'{sys.executable} -m pip install {wheel}{extra_dep}') diff --git a/test/vso_tools/interim_patch.py b/test/vso_tools/interim_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..83f3183e97d25b3c6e90fa87e72327ace49b15c5 --- /dev/null +++ b/test/vso_tools/interim_patch.py @@ -0,0 +1,17 @@ +# fix setuptools.distutils import in torch +import os +from torch import utils + +file_name = os.path.join(os.path.dirname(utils.__file__), 'tensorboard/__init__.py') +dummy_file_name = os.path.join(os.path.dirname(file_name), '__dummy_init__.py') +if os.path.exists(file_name): + with open(file_name, 'r') as fr, open(dummy_file_name, 'w') as fw: + origin_text = fr.read() + patched_text = origin_text.replace('from setuptools import distutils', '', 1) + patched_text = patched_text.replace('LooseVersion = distutils.version.LooseVersion', 'from distutils.version import LooseVersion', 1) + patched_text = patched_text.replace('del distutils', '', 1) + fw.write(patched_text) + +if os.path.exists(dummy_file_name): + os.remove(file_name) + os.rename(dummy_file_name, file_name) diff --git a/test/vso_tools/start_docker.py b/test/vso_tools/start_docker.py new file mode 100644 index 0000000000000000000000000000000000000000..817e2292859d978912bbe2059b1b1d83088aca8e --- /dev/null +++ b/test/vso_tools/start_docker.py @@ -0,0 +1,33 @@ +""" +Build docker image, start container, then set its SSH service port to VSO variable "docker_port". + +Usage: + python start_docker.py +""" + +import random +import socket +import sys + +from _common import build_wheel, run_command, set_variable + +# find idle port +port = random.randint(10000, 20000) +while True: + sock = socket.socket() + if sock.connect_ex(('localhost', port)) != 0: + break # failed to connect, so this is idle + sock.close() + port = random.randint(10000, 20000) + +version = sys.argv[1] +container = sys.argv[2] +password = sys.argv[3] + +run_command(f'docker build --build-arg NNI_RELEASE={version} -t nnidev/nni-nightly .') +run_command(f'docker run --privileged -d -t -p {port}:22 --name {container} nnidev/nni-nightly') +run_command(f'docker exec {container} useradd --create-home --password {password} nni') +run_command(['docker', 'exec', container, 'bash', '-c', f'echo "nni:{password}" | chpasswd']) +run_command(['docker', 'exec', container, 'bash', '-c', 'echo "nni ALL=(ALL:ALL) NOPASSWD:ALL" >> /etc/sudoers']) +run_command(f'docker exec {container} service ssh start') +set_variable('docker_port', port) diff --git a/test/vso_tools/stop_docker.py b/test/vso_tools/stop_docker.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1f563e60d606dd7f25d377f91febd97a8d3c6b --- /dev/null +++ b/test/vso_tools/stop_docker.py @@ -0,0 +1,7 @@ +import sys + +from _common import run_command + +name = sys.argv[1] +run_command(f'docker container stop {name}') +run_command(f'docker container rm {name}') diff --git a/ts/jupyter_extension/.gitignore b/ts/jupyter_extension/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..29aaa7e92682373dfe737ed54c7acd427d870ecd --- /dev/null +++ b/ts/jupyter_extension/.gitignore @@ -0,0 +1,2 @@ +/build +/nni diff --git a/ts/jupyter_extension/README.md b/ts/jupyter_extension/README.md new file mode 100644 index 0000000000000000000000000000000000000000..019ba08cc517aa001efe1dd9e74eb661dd3871c7 --- /dev/null +++ b/ts/jupyter_extension/README.md @@ -0,0 +1,53 @@ +NNI is under development to support JupyterLab. +You can install this extension to preview the feature. + +Currently you can view NNI web UI inside JupyterLab. + +## Install ## + +To preview the extension, you need to have `nni` and `jupyterlab` installed at first: + +``` +$ pip install nni jupyterlab +``` + +Then run following command to register extension: + +``` +$ nnictl jupyter-extension install +``` + +It does not have prompt message. Exit without error means success. + +## Run ## + +For now, the extension does not support creating experiment, so you have to create one with nnictl: + +``` +$ nnictl create --config /config.yml +``` + +And you need to launch JupyterLab: + +``` +$ jupyter lab --ip=0.0.0.0 +``` + +Following JupyterLab's guide to open its web page, you should find an NNI icon. +Click the icon and it will open NNI web UI for your running experiment. + +## Uninstall ## + +To uninstall (or more accurately, unregister) the extension, run following command: + +``` +$ nnictl jupyter-extension uninstall +``` + +## Known Issues ## + +The JupyterLab extension is under development and there are many issues need to fix before public announcement: + + * Clicking a link in experiment management page will open it outside JupyterLab. To fix it will need modify in web UI. + * Downloading log file might not work. + * Post requests (update experiment config) might not work. diff --git a/ts/jupyter_extension/package.json b/ts/jupyter_extension/package.json new file mode 100644 index 0000000000000000000000000000000000000000..895daaa70a84d7830b1d5a2ffa2783c89775216b --- /dev/null +++ b/ts/jupyter_extension/package.json @@ -0,0 +1,20 @@ +{ + "name": "nni-jupyter-extension", + "version": "999.0.0-developing", + "license": "MIT", + "scripts": { + "build": "tsc && jupyter labextension build ." + }, + "dependencies": { + "@jupyterlab/application": "^3.0.11", + "@jupyterlab/launcher": "^3.0.9" + }, + "devDependencies": { + "@jupyterlab/builder": "^3.0.9" + }, + "jupyterlab": { + "extension": true, + "outputDir": "dist" + }, + "main": "build/index.js" +} diff --git a/ts/jupyter_extension/src/index.ts b/ts/jupyter_extension/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..097a9347c283ff3d87aff0c5c28fbe6014de45c4 --- /dev/null +++ b/ts/jupyter_extension/src/index.ts @@ -0,0 +1,77 @@ +import { JupyterFrontEnd, JupyterFrontEndPlugin } from '@jupyterlab/application'; +import { ICommandPalette, IFrame, Dialog, showDialog } from '@jupyterlab/apputils'; +import { PageConfig } from '@jupyterlab/coreutils'; +import { ILauncher } from '@jupyterlab/launcher'; +import { LabIcon } from '@jupyterlab/ui-components'; +import React from 'react'; + +const nniIconSvg = ` + + + + + + +`; +const nniIcon = new LabIcon({ name: 'nni', svgstr: nniIconSvg }); + +const NNI_URL = PageConfig.getBaseUrl() + 'nni/index'; +class NniWidget extends IFrame { + constructor(url) { + super({ + sandbox: [ + 'allow-same-origin', + 'allow-scripts', + ] + }); + this.url = url; + this.id = 'nni'; + this.title.label = 'NNI'; + this.title.icon = nniIcon; + this.title.closable = true; + } +} + +async function activate(app: JupyterFrontEnd, palette: ICommandPalette, launcher: ILauncher | null) { + console.log('nni extension is activated'); + const { commands, shell } = app; + const command = 'nni'; + const category = 'Other'; + + commands.addCommand(command, { + label: 'NNI', + caption: 'NNI', + icon: (args) => (args.isPalette ? null : nniIcon), + execute: () => { + fetch(NNI_URL).then(async (resp) => { + if (resp.status !== 200) { + showDialog({ + title: 'NNI-HPO Launcher Error', + body: React.createElement("div", null, + "please run command:", + React.createElement("div", { style: { color: 'blue', fontSize: "14px", lineHeight: "28px" } }, "nnictl create --config experiment.yml")), + buttons: [Dialog.warnButton({ label: 'OK' })] + }); + return; + } + shell.add(new NniWidget(NNI_URL), 'main'); + }); + } + }); + + palette.addItem({ command, category }); + + if (launcher) { + launcher.add({ command, category }); + } +} + +const extension: JupyterFrontEndPlugin = { + id: 'nni', + autoStart: true, + optional: [ILauncher], + requires: [ICommandPalette], + activate, +}; + +export default extension; diff --git a/ts/jupyter_extension/tsconfig.json b/ts/jupyter_extension/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..e767c1e3472f276d646acba485db667b840b8a34 --- /dev/null +++ b/ts/jupyter_extension/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "allowSyntheticDefaultImports": true, + "jsx": "react", + "module": "esnext", + "moduleResolution": "node", + "outDir": "build", + "rootDir": "src", + "target": "es2017" + }, + "include": [ + "src/*" + ] +} diff --git a/ts/jupyter_extension/yarn.lock b/ts/jupyter_extension/yarn.lock new file mode 100644 index 0000000000000000000000000000000000000000..d807d775d49cac28b516cf5e45767e5db670b931 --- /dev/null +++ b/ts/jupyter_extension/yarn.lock @@ -0,0 +1,3053 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@babel/runtime@^7.1.2": + version "7.14.8" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.14.8.tgz#7119a56f421018852694290b9f9148097391b446" + integrity sha512-twj3L8Og5SaCRCErB4x4ajbvBIVV77CGeFglHpeg5WC5FF8TZzBWXtTJ4MqaD9QszLYTtr+IsaAL2rEUevb+eg== + dependencies: + regenerator-runtime "^0.13.4" + +"@blueprintjs/core@^3.36.0", "@blueprintjs/core@^3.47.0": + version "3.47.0" + resolved "https://registry.yarnpkg.com/@blueprintjs/core/-/core-3.47.0.tgz#bf33155d224b742ba51c6e1cf5be4523290337a7" + integrity sha512-u+bfmCyPXwKZMnwY4+e/iWjO2vDUvr8hA8ydmV0afyvcEe7Sh85UPEorIgQ/CBuRIbVMNm8FpLsFzDxgkfrCNA== + dependencies: + "@blueprintjs/icons" "^3.27.0" + "@types/dom4" "^2.0.1" + classnames "^2.2" + dom4 "^2.1.5" + normalize.css "^8.0.1" + popper.js "^1.16.1" + react-lifecycles-compat "^3.0.4" + react-popper "^1.3.7" + react-transition-group "^2.9.0" + resize-observer-polyfill "^1.5.1" + tslib "~1.13.0" + +"@blueprintjs/icons@^3.27.0": + version "3.27.0" + resolved "https://registry.yarnpkg.com/@blueprintjs/icons/-/icons-3.27.0.tgz#f4c03e8bc2f9310f7eaefaab26dd91f65935da43" + integrity sha512-ItRioyrr2s70chclj5q38HS9omKOa15b3JZXv9JcMIFz+6w6rAcoAH7DA+5xIs27bFjax/SdAZp/eYXSw0+QpA== + dependencies: + classnames "^2.2" + tslib "~1.13.0" + +"@blueprintjs/select@^3.15.0": + version "3.16.6" + resolved "https://registry.yarnpkg.com/@blueprintjs/select/-/select-3.16.6.tgz#ae41a73bc7c23b07a20b0c50c71273c9d5d0d83d" + integrity sha512-lg2duuzlRw18+pbET6vlRY/TVSuuSI6wI4DObUiBAfU7G3fMa6d10Sp+0Yn00XaMPQ5y3MGn1gz0EbIJ3/A5OA== + dependencies: + "@blueprintjs/core" "^3.47.0" + classnames "^2.2" + tslib "~1.13.0" + +"@discoveryjs/json-ext@^0.5.0": + version "0.5.3" + resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.3.tgz#90420f9f9c6d3987f176a19a7d8e764271a2f55d" + integrity sha512-Fxt+AfXgjMoin2maPIYzFZnQjAXjAL0PHscM5pRTtatFqB+vZxAM9tLp2Optnuw3QOQC40jTNeGYFOMvyf7v9g== + +"@fortawesome/fontawesome-free@^5.12.0": + version "5.15.3" + resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-free/-/fontawesome-free-5.15.3.tgz#c36ffa64a2a239bf948541a97b6ae8d729e09a9a" + integrity sha512-rFnSUN/QOtnOAgqFRooTA3H57JLDm0QEG/jPdk+tLQNL/eWd+Aok8g3qCI+Q1xuDPWpGW/i9JySpJVsq8Q0s9w== + +"@hypnosphi/create-react-context@^0.3.1": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@hypnosphi/create-react-context/-/create-react-context-0.3.1.tgz#f8bfebdc7665f5d426cba3753e0e9c7d3154d7c6" + integrity sha512-V1klUed202XahrWJLLOT3EXNeCpFHCcJntdFGI15ntCwau+jfT386w7OFTMaCqOgXUH1fa0w/I1oZs+i/Rfr0A== + dependencies: + gud "^1.0.0" + warning "^4.0.3" + +"@jupyterlab/application@^3.0.11": + version "3.0.11" + resolved "https://registry.yarnpkg.com/@jupyterlab/application/-/application-3.0.11.tgz#01d502656db1aa07afc439a58171897af2a2fdd1" + integrity sha512-UBqnRcXSy/Iz5vq1dCYkQvSkCFGPqjQdDFvOhvXacGxHklVjiku5Epltdbe2kQl+uhhn7VC4HEh1kzxiYamwcg== + dependencies: + "@fortawesome/fontawesome-free" "^5.12.0" + "@jupyterlab/apputils" "^3.0.9" + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/docregistry" "^3.0.11" + "@jupyterlab/rendermime" "^3.0.10" + "@jupyterlab/rendermime-interfaces" "^3.0.9" + "@jupyterlab/services" "^6.0.9" + "@jupyterlab/statedb" "^3.0.6" + "@jupyterlab/translation" "^3.0.9" + "@jupyterlab/ui-components" "^3.0.7" + "@lumino/algorithm" "^1.3.3" + "@lumino/application" "^1.13.1" + "@lumino/commands" "^1.12.0" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/messaging" "^1.4.3" + "@lumino/polling" "^1.3.3" + "@lumino/properties" "^1.2.3" + "@lumino/signaling" "^1.4.3" + "@lumino/widgets" "^1.16.1" + +"@jupyterlab/apputils@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/apputils/-/apputils-3.0.9.tgz#504273fc0b69f74d8a8b87b7a89ee3d4decd679d" + integrity sha512-fsJjl+NX2+e+1FM7SMfpI1VsaPQsaIPnPGsdpQoboJJqdQJHuj1oPXNwc/aI1daEElirB15fYGCUGc2oUrv6RQ== + dependencies: + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/services" "^6.0.9" + "@jupyterlab/settingregistry" "^3.0.6" + "@jupyterlab/statedb" "^3.0.6" + "@jupyterlab/translation" "^3.0.9" + "@jupyterlab/ui-components" "^3.0.7" + "@lumino/algorithm" "^1.3.3" + "@lumino/commands" "^1.12.0" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/domutils" "^1.2.3" + "@lumino/messaging" "^1.4.3" + "@lumino/properties" "^1.2.3" + "@lumino/signaling" "^1.4.3" + "@lumino/virtualdom" "^1.8.0" + "@lumino/widgets" "^1.16.1" + "@types/react" "^17.0.0" + buffer "^5.6.0" + react "^17.0.1" + react-dom "^17.0.1" + sanitize-html "~2.3.3" + url "^0.11.0" + +"@jupyterlab/builder@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/builder/-/builder-3.0.9.tgz#60b184accd63afced196d2369c886f6fe238acc4" + integrity sha512-IjWHk/xTgufTyggbT/0tGFeRdsHe3rNMQkOMqDN5+8YqFV4uCTUNokgvyysedgwB7JP+tcPclN/a3QoIPjAq/w== + dependencies: + "@jupyterlab/buildutils" "^3.0.7" + "@lumino/algorithm" "^1.3.3" + "@lumino/application" "^1.13.1" + "@lumino/commands" "^1.12.0" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/domutils" "^1.2.3" + "@lumino/dragdrop" "^1.7.1" + "@lumino/messaging" "^1.4.3" + "@lumino/properties" "^1.2.3" + "@lumino/signaling" "^1.4.3" + "@lumino/virtualdom" "^1.8.0" + "@lumino/widgets" "^1.16.1" + ajv "^6.12.3" + commander "~6.0.0" + css-loader "^5.0.1" + duplicate-package-checker-webpack-plugin "^3.0.0" + file-loader "~6.0.0" + fs-extra "^9.0.1" + glob "~7.1.6" + mini-css-extract-plugin "~1.3.2" + path-browserify "^1.0.0" + process "^0.11.10" + raw-loader "~4.0.0" + style-loader "~2.0.0" + supports-color "^7.2.0" + svg-url-loader "~6.0.0" + terser-webpack-plugin "^4.1.0" + to-string-loader "^1.1.6" + url-loader "~4.1.0" + webpack "^5.3.1" + webpack-cli "^4.1.0" + webpack-merge "^5.1.2" + worker-loader "^3.0.2" + +"@jupyterlab/buildutils@^3.0.7": + version "3.0.7" + resolved "https://registry.yarnpkg.com/@jupyterlab/buildutils/-/buildutils-3.0.7.tgz#e83d3303b5f2bddd0b6b39fdab81864b67574dec" + integrity sha512-V3A9foBIP9CJcblyZTEKHtkY6o/3pRodxNAZqveiDDpBdQhM8xv4uOLBKkkMxroa5Eh5Goub4fw+JHxI2nKGXw== + dependencies: + "@lumino/coreutils" "^1.5.3" + "@yarnpkg/lockfile" "^1.1.0" + child_process "~1.0.2" + commander "~6.0.0" + crypto "~1.0.1" + dependency-graph "^0.9.0" + fs-extra "^9.0.1" + glob "~7.1.6" + inquirer "^7.0.0" + package-json "^6.5.0" + prettier "^2.1.1" + semver "^7.3.2" + sort-package-json "~1.44.0" + typescript "~4.1.3" + +"@jupyterlab/codeeditor@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/codeeditor/-/codeeditor-3.0.9.tgz#e06f82ad3c5199be8e9eb97b598d212af0c4ca08" + integrity sha512-OUymghTH6CsAXc4z8EA7BqwdT99mdjJ/X488EOgXCBgeKz3QKB1gQ3GpH26soUv4S0prAs8RKU7rHjfb+DLYBQ== + dependencies: + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/nbformat" "^3.0.6" + "@jupyterlab/observables" "^4.0.6" + "@jupyterlab/translation" "^3.0.9" + "@jupyterlab/ui-components" "^3.0.7" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/dragdrop" "^1.7.1" + "@lumino/messaging" "^1.4.3" + "@lumino/signaling" "^1.4.3" + "@lumino/widgets" "^1.16.1" + +"@jupyterlab/codemirror@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/codemirror/-/codemirror-3.0.9.tgz#2b66c998547ce30a6162141bfb168fb7d2db2ea0" + integrity sha512-RgB4ZS1Rhzvk20VDvnP7oQ8Bh9fC0dWDO/hZZwLJamlJLgtQNsCnU3Qw/K2dxhCMWBexI3n+E+0mcv1IXbEtLQ== + dependencies: + "@jupyterlab/apputils" "^3.0.9" + "@jupyterlab/codeeditor" "^3.0.9" + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/nbformat" "^3.0.6" + "@jupyterlab/observables" "^4.0.6" + "@jupyterlab/statusbar" "^3.0.9" + "@jupyterlab/translation" "^3.0.9" + "@lumino/algorithm" "^1.3.3" + "@lumino/commands" "^1.12.0" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/polling" "^1.3.3" + "@lumino/signaling" "^1.4.3" + "@lumino/widgets" "^1.16.1" + codemirror "~5.58.0" + react "^17.0.1" + +"@jupyterlab/coreutils@^5.0.6": + version "5.0.6" + resolved "https://registry.yarnpkg.com/@jupyterlab/coreutils/-/coreutils-5.0.6.tgz#dd36591d01191762ff35e3b096f324e990e0e617" + integrity sha512-nXGpI1IJw+4pNq6Afy+oI3LrTsaQ14xG7Kxbhg9UPfoDgsNt2rdG4pwYe4NZyj2GJHAkUj00lcUD9eBTrxMWvw== + dependencies: + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/signaling" "^1.4.3" + minimist "~1.2.0" + moment "^2.24.0" + path-browserify "^1.0.0" + url-parse "~1.5.1" + +"@jupyterlab/docregistry@^3.0.11": + version "3.0.11" + resolved "https://registry.yarnpkg.com/@jupyterlab/docregistry/-/docregistry-3.0.11.tgz#21ffbabbbac56b6c8a7db5547068790a4b077bd6" + integrity sha512-kx+ZXgM2UcBXvy+LDwGOVa/zP3+CjKMj0jM5qaUW+sHFZzkFIV/ke/MuiX2p6J+78s2VY5Hyy2Tq07jZhMEACg== + dependencies: + "@jupyterlab/apputils" "^3.0.9" + "@jupyterlab/codeeditor" "^3.0.9" + "@jupyterlab/codemirror" "^3.0.9" + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/observables" "^4.0.6" + "@jupyterlab/rendermime" "^3.0.10" + "@jupyterlab/rendermime-interfaces" "^3.0.9" + "@jupyterlab/services" "^6.0.9" + "@jupyterlab/translation" "^3.0.9" + "@jupyterlab/ui-components" "^3.0.7" + "@lumino/algorithm" "^1.3.3" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/messaging" "^1.4.3" + "@lumino/signaling" "^1.4.3" + "@lumino/widgets" "^1.16.1" + +"@jupyterlab/launcher@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/launcher/-/launcher-3.0.9.tgz#ed1cf1a93e793d336ea0a3c0a27aa113f4bbb114" + integrity sha512-UoPTtRukvViRlmASr7XeecmUu8VGbhV4Ui4lQ3YQh2k6LG6t1Whgu/uS6XzjIzeIB5RtAFQ1Psk4TnjASJXs/A== + dependencies: + "@jupyterlab/apputils" "^3.0.9" + "@jupyterlab/translation" "^3.0.9" + "@jupyterlab/ui-components" "^3.0.7" + "@lumino/algorithm" "^1.3.3" + "@lumino/commands" "^1.12.0" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/properties" "^1.2.3" + "@lumino/widgets" "^1.16.1" + react "^17.0.1" + +"@jupyterlab/nbformat@^3.0.6": + version "3.0.6" + resolved "https://registry.yarnpkg.com/@jupyterlab/nbformat/-/nbformat-3.0.6.tgz#858a6567cdd60879bc7f9dad6c9dcb5587417b5d" + integrity sha512-4+u770JYPmRpLyEPpnG0crj8ePUkg/vCF1W4hnDDxnLTVjzKw5kv6KVb5yJGEHAihUOf51bjceNUOp/+nLVBTg== + dependencies: + "@lumino/coreutils" "^1.5.3" + +"@jupyterlab/observables@^4.0.6": + version "4.0.6" + resolved "https://registry.yarnpkg.com/@jupyterlab/observables/-/observables-4.0.6.tgz#be3bb0f08d2e79f86f4553857ed0aa90d7b293f2" + integrity sha512-PYJosNXGSkLExaEXqpUuDjEXTEcxTpvM6kG8I6NFJyDQVD6E50LggC6NofY5EIcEsJsO771BLvI4kwNk7LRQSA== + dependencies: + "@lumino/algorithm" "^1.3.3" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/messaging" "^1.4.3" + "@lumino/signaling" "^1.4.3" + +"@jupyterlab/rendermime-interfaces@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/rendermime-interfaces/-/rendermime-interfaces-3.0.9.tgz#13badf733d79b34bed0392e8a34d30291090e536" + integrity sha512-KvoDcIzgvDhvCGDYqFhRM753iOryWFujAEzXjpzvYz/1yNUh5weYsdwdmdCjUTkToM9rFiIDMwjferJPU54thw== + dependencies: + "@jupyterlab/translation" "^3.0.9" + "@lumino/coreutils" "^1.5.3" + "@lumino/widgets" "^1.16.1" + +"@jupyterlab/rendermime@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@jupyterlab/rendermime/-/rendermime-3.0.10.tgz#7592155ea00c3a81f0d9a5662d3ccdeb37f722b1" + integrity sha512-9Q32zYpBkbrlAkuHJ7760ZETWQYZkKT9UcJWOMVF7iNgoBfRohAYvPHsoc6JFZyFEFhKzkLwa+CTcL48aGjg7A== + dependencies: + "@jupyterlab/apputils" "^3.0.9" + "@jupyterlab/codemirror" "^3.0.9" + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/nbformat" "^3.0.6" + "@jupyterlab/observables" "^4.0.6" + "@jupyterlab/rendermime-interfaces" "^3.0.9" + "@jupyterlab/services" "^6.0.9" + "@jupyterlab/translation" "^3.0.9" + "@lumino/algorithm" "^1.3.3" + "@lumino/coreutils" "^1.5.3" + "@lumino/messaging" "^1.4.3" + "@lumino/signaling" "^1.4.3" + "@lumino/widgets" "^1.16.1" + lodash.escape "^4.0.1" + marked "^2.0.0" + +"@jupyterlab/services@^6.0.9": + version "6.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/services/-/services-6.0.9.tgz#70a10d7f6883b8fafff81216663d96858b0cf46b" + integrity sha512-zeN9roqwbYo6b2I5BXWx+Mr4KzTpe2UcVwrcAGw9NXqIieb0ZnvtHqtNj/vcHCM2xQKuPup9W1X1bE5b3wF5Yw== + dependencies: + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/nbformat" "^3.0.6" + "@jupyterlab/observables" "^4.0.6" + "@jupyterlab/settingregistry" "^3.0.6" + "@jupyterlab/statedb" "^3.0.6" + "@lumino/algorithm" "^1.3.3" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/polling" "^1.3.3" + "@lumino/signaling" "^1.4.3" + node-fetch "^2.6.0" + ws "^7.2.0" + +"@jupyterlab/settingregistry@^3.0.6": + version "3.0.6" + resolved "https://registry.yarnpkg.com/@jupyterlab/settingregistry/-/settingregistry-3.0.6.tgz#000cd9dc4984a1ccac01d73c7967893befe14b8d" + integrity sha512-fIeVJjkaf8FYSJ4jwJobwNeco8J2CEuWzmEJKiDjhmzmRZApS9Jjx+CJXDkTxoSMDQ41ELxQKJq5bcbih/90zQ== + dependencies: + "@jupyterlab/statedb" "^3.0.6" + "@lumino/commands" "^1.12.0" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/signaling" "^1.4.3" + ajv "^6.12.3" + json5 "^2.1.1" + +"@jupyterlab/statedb@^3.0.6": + version "3.0.6" + resolved "https://registry.yarnpkg.com/@jupyterlab/statedb/-/statedb-3.0.6.tgz#d331c815496f80083d53277e1972095da954f31f" + integrity sha512-hXewp5TAKneWJcYXenTZuzSUagGjyWv5vRHDFarw1O4pkEg7zz8IyN2yAvbYH6+GDqIhF/91rgGu9alkx/yjjA== + dependencies: + "@lumino/commands" "^1.12.0" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/properties" "^1.2.3" + "@lumino/signaling" "^1.4.3" + +"@jupyterlab/statusbar@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/statusbar/-/statusbar-3.0.9.tgz#b00d8b74e813bb9534e7a57d0419579e9367da7a" + integrity sha512-MaA6GVi59mH3YRkV5iJPcpdS9opMTgFvcfMQLzKeMJvEQvM2fFGMVixp+q2U6Pa8iJsCp59CUoTyuQQdkw1UFw== + dependencies: + "@jupyterlab/apputils" "^3.0.9" + "@jupyterlab/codeeditor" "^3.0.9" + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/services" "^6.0.9" + "@jupyterlab/translation" "^3.0.9" + "@jupyterlab/ui-components" "^3.0.7" + "@lumino/algorithm" "^1.3.3" + "@lumino/coreutils" "^1.5.3" + "@lumino/disposable" "^1.4.3" + "@lumino/messaging" "^1.4.3" + "@lumino/signaling" "^1.4.3" + "@lumino/widgets" "^1.16.1" + csstype "~3.0.3" + react "^17.0.1" + typestyle "^2.0.4" + +"@jupyterlab/translation@^3.0.9": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@jupyterlab/translation/-/translation-3.0.9.tgz#54472d3d2fef0d56dfa61c2711a9155f3308ad5b" + integrity sha512-XsIUt08HDpA2zqhJFmNV9iuxMriV4sAdx4rM1rA0tEUuvWSXerLvpzNUw4LAz+iaJgyUgqqV1gKrOgoMTjtvWA== + dependencies: + "@jupyterlab/coreutils" "^5.0.6" + "@jupyterlab/services" "^6.0.9" + "@jupyterlab/statedb" "^3.0.6" + "@lumino/coreutils" "^1.5.3" + +"@jupyterlab/ui-components@^3.0.7": + version "3.0.7" + resolved "https://registry.yarnpkg.com/@jupyterlab/ui-components/-/ui-components-3.0.7.tgz#83525d98051e9c74bd415da9e4a0fb20ec6bd609" + integrity sha512-kuq2aZ3DcCQNqf5ucsXWREHxbYq23+S12zMertOs+74KQr8jm8chX9HmqpmefNKnSIqqi/RKVSS2PWuSTpkEEw== + dependencies: + "@blueprintjs/core" "^3.36.0" + "@blueprintjs/select" "^3.15.0" + "@jupyterlab/coreutils" "^5.0.6" + "@lumino/coreutils" "^1.5.3" + "@lumino/signaling" "^1.4.3" + "@lumino/virtualdom" "^1.8.0" + "@lumino/widgets" "^1.16.1" + react "^17.0.1" + react-dom "^17.0.1" + typestyle "^2.0.4" + +"@lumino/algorithm@^1.3.3", "@lumino/algorithm@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@lumino/algorithm/-/algorithm-1.6.0.tgz#771e7896cd94e660f9b58a52f80e1bb255de1d41" + integrity sha512-NMOcm5Yr9nXz5gokS/K4jHBbUMQYBkvDXl1n51XWdcz0LY+oGuIKPhjazhUgmbNRehzdZBj5hMMd1+htYWeVKQ== + +"@lumino/application@^1.13.1": + version "1.20.0" + resolved "https://registry.yarnpkg.com/@lumino/application/-/application-1.20.0.tgz#b50ca4180bc400589fdfcfcaab08c4af937fccd0" + integrity sha512-FAoQcq4L3ZswTK0lWfLKnG1ecG26cwqjzg2fyoBeuWGBi1TG9BYjFBdV7ErTFMxW8jE1CLOLuxsZaKFLNErcKA== + dependencies: + "@lumino/commands" "^1.15.0" + "@lumino/coreutils" "^1.8.0" + "@lumino/widgets" "^1.23.0" + +"@lumino/collections@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@lumino/collections/-/collections-1.6.0.tgz#7d3e94cee26409b0cd719c1934bdda471e6a5662" + integrity sha512-ZETm0/xF0oUHV03sOXNOfFI1EEpS317HvN5n+fZBJvCNZIrJkWmKD8QuxcfwHb7AChKUhXlVHhDbWlb1LKnd7g== + dependencies: + "@lumino/algorithm" "^1.6.0" + +"@lumino/commands@^1.12.0", "@lumino/commands@^1.15.0": + version "1.15.0" + resolved "https://registry.yarnpkg.com/@lumino/commands/-/commands-1.15.0.tgz#06eb94fb4b34cad59f35b1fcaf473e8d2047f779" + integrity sha512-JOE68KfbR9xw5YTfcwo+9E0PSWidifEMAcOC/aXd7iSzfsCRknMTcMQIUGL277IU7J7CJvoe10DUE5QKwTmX+g== + dependencies: + "@lumino/algorithm" "^1.6.0" + "@lumino/coreutils" "^1.8.0" + "@lumino/disposable" "^1.7.0" + "@lumino/domutils" "^1.5.0" + "@lumino/keyboard" "^1.5.0" + "@lumino/signaling" "^1.7.0" + "@lumino/virtualdom" "^1.11.0" + +"@lumino/coreutils@^1.5.3", "@lumino/coreutils@^1.8.0": + version "1.8.0" + resolved "https://registry.yarnpkg.com/@lumino/coreutils/-/coreutils-1.8.0.tgz#4feb3ccbfbc3efc8e395a90f22b5a938fbad959a" + integrity sha512-OvCsaASUqOE7R6Dxngyk4/b5QMOjyRUNxuZuuL+fx+JvGKZFZ/B2c9LYtAJ9mDmQ1BQiGNV/qSpL4o7x8PCfjw== + +"@lumino/disposable@^1.4.3", "@lumino/disposable@^1.7.0": + version "1.7.0" + resolved "https://registry.yarnpkg.com/@lumino/disposable/-/disposable-1.7.0.tgz#539463490cb42e8d2dc46b5ff7cc291f4f1a8d07" + integrity sha512-3mWi11ko3XVY63BPwvys7MXrbFddA2i+gp72d0wAKM2NDDUopVPikMHhJpjGJcw+otjahzXYiTewxPDEau9dYg== + dependencies: + "@lumino/algorithm" "^1.6.0" + "@lumino/signaling" "^1.7.0" + +"@lumino/domutils@^1.2.3", "@lumino/domutils@^1.5.0": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@lumino/domutils/-/domutils-1.5.0.tgz#fdba0cfe404b4817e63aa064f63b3c965655e76e" + integrity sha512-dZ0Aa+/qhvfPc1aa5kX4LLGE3B6BW1XmJa0R1XVCEpAFY3cZiujuQWmhYHJtZPrOiqn0UtioT2OpqnWdtCWc0A== + +"@lumino/dragdrop@^1.10.0", "@lumino/dragdrop@^1.7.1": + version "1.10.0" + resolved "https://registry.yarnpkg.com/@lumino/dragdrop/-/dragdrop-1.10.0.tgz#2fddacfee055e660dd33dd9a3cfbd8fbba811673" + integrity sha512-A3cNLcp09zygOprWmLTkLZCQYNq3dJfN+mhni4IZizqCTkKbTCEzo2/IwoCWvy+ABKft8d/A9Y40wFW6yJ9OyA== + dependencies: + "@lumino/coreutils" "^1.8.0" + "@lumino/disposable" "^1.7.0" + +"@lumino/keyboard@^1.5.0": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@lumino/keyboard/-/keyboard-1.5.0.tgz#c12213822dd2645c412e8689aecd4a2726113ac6" + integrity sha512-/uF9xqHYVbIkser2Q6UIv7VWrzThr1fxAmSOShjSoKGocL0XHeaBaCOMezSaVxnJ1wm1ciNdhMsjscVM8Inp7g== + +"@lumino/messaging@^1.4.3", "@lumino/messaging@^1.7.0": + version "1.7.0" + resolved "https://registry.yarnpkg.com/@lumino/messaging/-/messaging-1.7.0.tgz#32542f9e9a266fd5b3f71842f70cfe141e016d93" + integrity sha512-QYWf9QGIGD0Oes104zw7mVln4S8yRije2mZhNNRBjkYcDuQlPW+eRSuC5LwAMsFnGymBlUPwPbKOUEO2RbhAtg== + dependencies: + "@lumino/algorithm" "^1.6.0" + "@lumino/collections" "^1.6.0" + +"@lumino/polling@^1.3.3": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@lumino/polling/-/polling-1.6.0.tgz#64f40bba4602fe9eceb9f3fae8f3647831e5b7e9" + integrity sha512-jG1nqw6UO5XEN7QamOr6iDW8WvYeZQcBVRjM38fszz62dwJ/VGPvO2hlNl6QWWIfCynbJudms0LQm+z0BT1EdA== + dependencies: + "@lumino/coreutils" "^1.8.0" + "@lumino/disposable" "^1.7.0" + "@lumino/signaling" "^1.7.0" + +"@lumino/properties@^1.2.3", "@lumino/properties@^1.5.0": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@lumino/properties/-/properties-1.5.0.tgz#7e8638e84c51bb110c5a69f91ca8b0e40b2c3fca" + integrity sha512-YqpJE6/1Wkjrie0E+ypu+yzd55B5RlvKYMnQs3Ox+SrJsnNBhA6Oj44EhVf8SUTuHgn1t/mm+LvbswKN5RM4+g== + +"@lumino/signaling@^1.4.3", "@lumino/signaling@^1.7.0": + version "1.7.0" + resolved "https://registry.yarnpkg.com/@lumino/signaling/-/signaling-1.7.0.tgz#76da4738bf8f19e7da6de1d457a54220e2140670" + integrity sha512-a5kd11Sf04jTfpzxCr7TOBD2o5YvItA4IGwiOoG+QR6sPR0Rwmcf47fPItqXo5st58iNIblC3F+c264N+Me+gg== + dependencies: + "@lumino/algorithm" "^1.6.0" + +"@lumino/virtualdom@^1.11.0", "@lumino/virtualdom@^1.8.0": + version "1.11.0" + resolved "https://registry.yarnpkg.com/@lumino/virtualdom/-/virtualdom-1.11.0.tgz#468b4d28a07e2b8988dc583b4aab40e37dc6955e" + integrity sha512-G0sIx4pLYbgJ4w+SIgsCYQgKP/GBrWgjh8wcumD6XpaYZNivJv4c01xITYYlh7FU61jZmMWMrxtZztArNRDSqg== + dependencies: + "@lumino/algorithm" "^1.6.0" + +"@lumino/widgets@^1.16.1", "@lumino/widgets@^1.23.0": + version "1.23.0" + resolved "https://registry.yarnpkg.com/@lumino/widgets/-/widgets-1.23.0.tgz#096c7574de75fa67b32bcb914c5dae290fbee6f3" + integrity sha512-0Akt9ESgc06SJ3EJG3VK1Liw+AAjRWkKMfm8VUTwT/1QJYYGZ8kfHNO97mkBLv+0EkLEkZIeaQb8fIoU6vh7bw== + dependencies: + "@lumino/algorithm" "^1.6.0" + "@lumino/commands" "^1.15.0" + "@lumino/coreutils" "^1.8.0" + "@lumino/disposable" "^1.7.0" + "@lumino/domutils" "^1.5.0" + "@lumino/dragdrop" "^1.10.0" + "@lumino/keyboard" "^1.5.0" + "@lumino/messaging" "^1.7.0" + "@lumino/properties" "^1.5.0" + "@lumino/signaling" "^1.7.0" + "@lumino/virtualdom" "^1.11.0" + +"@nodelib/fs.scandir@2.1.5": + version "2.1.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== + dependencies: + "@nodelib/fs.stat" "2.0.5" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== + +"@nodelib/fs.walk@^1.2.3": + version "1.2.8" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" + integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== + dependencies: + "@nodelib/fs.scandir" "2.1.5" + fastq "^1.6.0" + +"@npmcli/move-file@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@npmcli/move-file/-/move-file-1.1.2.tgz#1a82c3e372f7cae9253eb66d72543d6b8685c674" + integrity sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg== + dependencies: + mkdirp "^1.0.4" + rimraf "^3.0.2" + +"@sindresorhus/is@^0.14.0": + version "0.14.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" + integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ== + +"@szmarczak/http-timer@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-1.1.2.tgz#b1665e2c461a2cd92f4c1bbf50d5454de0d4b421" + integrity sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA== + dependencies: + defer-to-connect "^1.0.1" + +"@types/dom4@^2.0.1": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@types/dom4/-/dom4-2.0.2.tgz#6495303f049689ce936ed328a3e5ede9c51408ee" + integrity sha512-Rt4IC1T7xkCWa0OG1oSsPa0iqnxlDeQqKXZAHrQGLb7wFGncWm85MaxKUjAGejOrUynOgWlFi4c6S6IyJwoK4g== + +"@types/eslint-scope@^3.7.0": + version "3.7.1" + resolved "https://registry.yarnpkg.com/@types/eslint-scope/-/eslint-scope-3.7.1.tgz#8dc390a7b4f9dd9f1284629efce982e41612116e" + integrity sha512-SCFeogqiptms4Fg29WpOTk5nHIzfpKCemSN63ksBQYKTcXoJEmJagV+DhVmbapZzY4/5YaOV1nZwrsU79fFm1g== + dependencies: + "@types/eslint" "*" + "@types/estree" "*" + +"@types/eslint@*": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-7.28.0.tgz#7e41f2481d301c68e14f483fe10b017753ce8d5a" + integrity sha512-07XlgzX0YJUn4iG1ocY4IX9DzKSmMGUs6ESKlxWhZRaa0fatIWaHWUVapcuGa8r5HFnTqzj+4OCjd5f7EZ/i/A== + dependencies: + "@types/estree" "*" + "@types/json-schema" "*" + +"@types/estree@*", "@types/estree@^0.0.50": + version "0.0.50" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.50.tgz#1e0caa9364d3fccd2931c3ed96fdbeaa5d4cca83" + integrity sha512-C6N5s2ZFtuZRj54k2/zyRhNDjJwwcViAM3Nbm8zjBpbqAdZ00mr0CFxvSKeO8Y/e03WVFLpQMdHYVfUd6SB+Hw== + +"@types/glob@^7.1.1": + version "7.1.4" + resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.1.4.tgz#ea59e21d2ee5c517914cb4bc8e4153b99e566672" + integrity sha512-w+LsMxKyYQm347Otw+IfBXOv9UWVjpHpCDdbBMt8Kz/xbvCYNjP+0qPh91Km3iKfSRLBB0P7fAMf0KHrPu+MyA== + dependencies: + "@types/minimatch" "*" + "@types/node" "*" + +"@types/json-schema@*", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8": + version "7.0.8" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.8.tgz#edf1bf1dbf4e04413ca8e5b17b3b7d7d54b59818" + integrity sha512-YSBPTLTVm2e2OoQIDYx8HaeWJ5tTToLH67kXR7zYNGupXMEHa2++G8k+DczX2cFVgalypqtyZIcU19AFcmOpmg== + +"@types/minimatch@*": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.5.tgz#1001cc5e6a3704b83c236027e77f2f58ea010f40" + integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ== + +"@types/node@*": + version "16.4.0" + resolved "https://registry.yarnpkg.com/@types/node/-/node-16.4.0.tgz#2c219eaa3b8d1e4d04f4dd6e40bc68c7467d5272" + integrity sha512-HrJuE7Mlqcjj+00JqMWpZ3tY8w7EUd+S0U3L1+PQSWiXZbOgyQDvi+ogoUxaHApPJq5diKxYBQwA3iIlNcPqOg== + +"@types/prop-types@*": + version "15.7.4" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.4.tgz#fcf7205c25dff795ee79af1e30da2c9790808f11" + integrity sha512-rZ5drC/jWjrArrS8BR6SIr4cWpW09RNTYt9AMZo3Jwwif+iacXAqgVjm0B0Bv/S1jhDXKHqRVNCbACkJ89RAnQ== + +"@types/react@^17.0.0": + version "17.0.14" + resolved "https://registry.yarnpkg.com/@types/react/-/react-17.0.14.tgz#f0629761ca02945c4e8fea99b8177f4c5c61fb0f" + integrity sha512-0WwKHUbWuQWOce61UexYuWTGuGY/8JvtUe/dtQ6lR4sZ3UiylHotJeWpf3ArP9+DSGUoLY3wbU59VyMrJps5VQ== + dependencies: + "@types/prop-types" "*" + "@types/scheduler" "*" + csstype "^3.0.2" + +"@types/scheduler@*": + version "0.16.2" + resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.2.tgz#1a62f89525723dde24ba1b01b092bf5df8ad4d39" + integrity sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew== + +"@webassemblyjs/ast@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.11.1.tgz#2bfd767eae1a6996f432ff7e8d7fc75679c0b6a7" + integrity sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw== + dependencies: + "@webassemblyjs/helper-numbers" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + +"@webassemblyjs/floating-point-hex-parser@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz#f6c61a705f0fd7a6aecaa4e8198f23d9dc179e4f" + integrity sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ== + +"@webassemblyjs/helper-api-error@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz#1a63192d8788e5c012800ba6a7a46c705288fd16" + integrity sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg== + +"@webassemblyjs/helper-buffer@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz#832a900eb444884cde9a7cad467f81500f5e5ab5" + integrity sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA== + +"@webassemblyjs/helper-numbers@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz#64d81da219fbbba1e3bd1bfc74f6e8c4e10a62ae" + integrity sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ== + dependencies: + "@webassemblyjs/floating-point-hex-parser" "1.11.1" + "@webassemblyjs/helper-api-error" "1.11.1" + "@xtuc/long" "4.2.2" + +"@webassemblyjs/helper-wasm-bytecode@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz#f328241e41e7b199d0b20c18e88429c4433295e1" + integrity sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q== + +"@webassemblyjs/helper-wasm-section@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz#21ee065a7b635f319e738f0dd73bfbda281c097a" + integrity sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + +"@webassemblyjs/ieee754@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz#963929e9bbd05709e7e12243a099180812992614" + integrity sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ== + dependencies: + "@xtuc/ieee754" "^1.2.0" + +"@webassemblyjs/leb128@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.11.1.tgz#ce814b45574e93d76bae1fb2644ab9cdd9527aa5" + integrity sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw== + dependencies: + "@xtuc/long" "4.2.2" + +"@webassemblyjs/utf8@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.11.1.tgz#d1f8b764369e7c6e6bae350e854dec9a59f0a3ff" + integrity sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ== + +"@webassemblyjs/wasm-edit@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz#ad206ebf4bf95a058ce9880a8c092c5dec8193d6" + integrity sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/helper-wasm-section" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/wasm-opt" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + "@webassemblyjs/wast-printer" "1.11.1" + +"@webassemblyjs/wasm-gen@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz#86c5ea304849759b7d88c47a32f4f039ae3c8f76" + integrity sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/ieee754" "1.11.1" + "@webassemblyjs/leb128" "1.11.1" + "@webassemblyjs/utf8" "1.11.1" + +"@webassemblyjs/wasm-opt@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz#657b4c2202f4cf3b345f8a4c6461c8c2418985f2" + integrity sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + +"@webassemblyjs/wasm-parser@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz#86ca734534f417e9bd3c67c7a1c75d8be41fb199" + integrity sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-api-error" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/ieee754" "1.11.1" + "@webassemblyjs/leb128" "1.11.1" + "@webassemblyjs/utf8" "1.11.1" + +"@webassemblyjs/wast-printer@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz#d0c73beda8eec5426f10ae8ef55cee5e7084c2f0" + integrity sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@xtuc/long" "4.2.2" + +"@webpack-cli/configtest@^1.0.4": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@webpack-cli/configtest/-/configtest-1.0.4.tgz#f03ce6311c0883a83d04569e2c03c6238316d2aa" + integrity sha512-cs3XLy+UcxiP6bj0A6u7MLLuwdXJ1c3Dtc0RkKg+wiI1g/Ti1om8+/2hc2A2B60NbBNAbMgyBMHvyymWm/j4wQ== + +"@webpack-cli/info@^1.3.0": + version "1.3.0" + resolved "https://registry.yarnpkg.com/@webpack-cli/info/-/info-1.3.0.tgz#9d78a31101a960997a4acd41ffd9b9300627fe2b" + integrity sha512-ASiVB3t9LOKHs5DyVUcxpraBXDOKubYu/ihHhU+t1UPpxsivg6Od2E2qU4gJCekfEddzRBzHhzA/Acyw/mlK/w== + dependencies: + envinfo "^7.7.3" + +"@webpack-cli/serve@^1.5.1": + version "1.5.1" + resolved "https://registry.yarnpkg.com/@webpack-cli/serve/-/serve-1.5.1.tgz#b5fde2f0f79c1e120307c415a4c1d5eb15a6f278" + integrity sha512-4vSVUiOPJLmr45S8rMGy7WDvpWxfFxfP/Qx/cxZFCfvoypTYpPPL1X8VIZMe0WTA+Jr7blUxwUSEZNkjoMTgSw== + +"@xtuc/ieee754@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz#eef014a3145ae477a1cbc00cd1e552336dceb790" + integrity sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA== + +"@xtuc/long@4.2.2": + version "4.2.2" + resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" + integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ== + +"@yarnpkg/lockfile@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz#e77a97fbd345b76d83245edcd17d393b1b41fb31" + integrity sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ== + +acorn@^8.4.1: + version "8.4.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.4.1.tgz#56c36251fc7cabc7096adc18f05afe814321a28c" + integrity sha512-asabaBSkEKosYKMITunzX177CXxQ4Q8BSSzMTKD+FefUhipQC70gfW5SiUDhYQ3vk8G+81HqQk7Fv9OXwwn9KA== + +aggregate-error@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" + integrity sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA== + dependencies: + clean-stack "^2.0.0" + indent-string "^4.0.0" + +ajv-keywords@^3.5.2: + version "3.5.2" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.5.2.tgz#31f29da5ab6e00d1c2d329acf7b5929614d5014d" + integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ== + +ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +ansi-escapes@^4.2.1: + version "4.3.2" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" + integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== + dependencies: + type-fest "^0.21.3" + +ansi-regex@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== + dependencies: + color-convert "^2.0.1" + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +at-least-node@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" + integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== + +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + +base64-js@^1.3.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + +big.js@^5.2.2: + version "5.2.2" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" + integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + dependencies: + fill-range "^7.0.1" + +browserslist@^4.14.5: + version "4.16.6" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.6.tgz#d7901277a5a88e554ed305b183ec9b0c08f66fa2" + integrity sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ== + dependencies: + caniuse-lite "^1.0.30001219" + colorette "^1.2.2" + electron-to-chromium "^1.3.723" + escalade "^3.1.1" + node-releases "^1.1.71" + +buffer-from@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" + integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== + +buffer@^5.6.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" + integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.1.13" + +cacache@^15.0.5: + version "15.2.0" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-15.2.0.tgz#73af75f77c58e72d8c630a7a2858cb18ef523389" + integrity sha512-uKoJSHmnrqXgthDFx/IU6ED/5xd+NNGe+Bb+kLZy7Ku4P+BaiWEUflAKPZ7eAzsYGcsAGASJZsybXp+quEcHTw== + dependencies: + "@npmcli/move-file" "^1.0.1" + chownr "^2.0.0" + fs-minipass "^2.0.0" + glob "^7.1.4" + infer-owner "^1.0.4" + lru-cache "^6.0.0" + minipass "^3.1.1" + minipass-collect "^1.0.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.2" + mkdirp "^1.0.3" + p-map "^4.0.0" + promise-inflight "^1.0.1" + rimraf "^3.0.2" + ssri "^8.0.1" + tar "^6.0.2" + unique-filename "^1.1.1" + +cacheable-request@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912" + integrity sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg== + dependencies: + clone-response "^1.0.2" + get-stream "^5.1.0" + http-cache-semantics "^4.0.0" + keyv "^3.0.0" + lowercase-keys "^2.0.0" + normalize-url "^4.1.0" + responselike "^1.0.2" + +call-bind@^1.0.0, call-bind@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c" + integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA== + dependencies: + function-bind "^1.1.1" + get-intrinsic "^1.0.2" + +caniuse-lite@^1.0.30001219: + version "1.0.30001245" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001245.tgz#45b941bbd833cb0fa53861ff2bae746b3c6ca5d4" + integrity sha512-768fM9j1PKXpOCKws6eTo3RHmvTUsG9UrpT4WoREFeZgJBTi4/X9g565azS/rVUGtqb8nt7FjLeF5u4kukERnA== + +chalk@^2.3.0: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.1.tgz#c80b3fab28bf6371e6863325eee67e618b77e6ad" + integrity sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chardet@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" + integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== + +child_process@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/child_process/-/child_process-1.0.2.tgz#b1f7e7fc73d25e7fd1d455adc94e143830182b5a" + integrity sha1-sffn/HPSXn/R1FWtyU4UODAYK1o= + +chownr@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece" + integrity sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ== + +chrome-trace-event@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz#1015eced4741e15d06664a957dbbf50d041e26ac" + integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg== + +classnames@^2.2: + version "2.3.1" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.3.1.tgz#dfcfa3891e306ec1dad105d0e88f4417b8535e8e" + integrity sha512-OlQdbZ7gLfGarSqxesMesDa5uz7KFbID8Kpq/SxIoNGDqY8lSYs0D+hhtBXhcdB3rcbXArFr7vlHheLk1voeNA== + +clean-stack@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" + integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== + +cli-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" + integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== + dependencies: + restore-cursor "^3.1.0" + +cli-width@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6" + integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw== + +clone-deep@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-4.0.1.tgz#c19fd9bdbbf85942b4fd979c84dcf7d5f07c2387" + integrity sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ== + dependencies: + is-plain-object "^2.0.4" + kind-of "^6.0.2" + shallow-clone "^3.0.0" + +clone-response@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + integrity sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws= + dependencies: + mimic-response "^1.0.0" + +codemirror@~5.58.0: + version "5.58.3" + resolved "https://registry.yarnpkg.com/codemirror/-/codemirror-5.58.3.tgz#3f0689854ecfbed5d4479a98b96148b2c3b79796" + integrity sha512-KBhB+juiyOOgn0AqtRmWyAT3yoElkuvWTI6hsHa9E6GQrl6bk/fdAYcvuqW1/upO9T9rtEtapWdw4XYcNiVDEA== + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= + +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +colorette@^1.2.1, colorette@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.2.2.tgz#cbcc79d5e99caea2dbf10eb3a26fd8b3e6acfa94" + integrity sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w== + +commander@^2.20.0: + version "2.20.3" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== + +commander@^7.0.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" + integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== + +commander@~6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-6.0.0.tgz#2b270da94f8fb9014455312f829a1129dbf8887e" + integrity sha512-s7EA+hDtTYNhuXkTlhqew4txMZVdszBmKWSPEMxGr8ru8JXR7bLUFIAtPhcSuFdJQ0ILMxnJi8GkQL0yvDy/YA== + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs= + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= + +cross-spawn@^7.0.3: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +crypto@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/crypto/-/crypto-1.0.1.tgz#2af1b7cad8175d24c8a1b0778255794a21803037" + integrity sha512-VxBKmeNcqQdiUQUW2Tzq0t377b54N2bMtXO/qiLa+6eRRmmC4qT3D4OnTGoT/U6O9aklQ/jTwbOtRMTTY8G0Ig== + +css-loader@^5.0.1: + version "5.2.7" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-5.2.7.tgz#9b9f111edf6fb2be5dc62525644cbc9c232064ae" + integrity sha512-Q7mOvpBNBG7YrVGMxRxcBJZFL75o+cH2abNASdibkj/fffYD8qWbInZrD0S9ccI6vZclF3DsHE7njGlLtaHbhg== + dependencies: + icss-utils "^5.1.0" + loader-utils "^2.0.0" + postcss "^8.2.15" + postcss-modules-extract-imports "^3.0.0" + postcss-modules-local-by-default "^4.0.0" + postcss-modules-scope "^3.0.0" + postcss-modules-values "^4.0.0" + postcss-value-parser "^4.1.0" + schema-utils "^3.0.0" + semver "^7.3.5" + +cssesc@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" + integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg== + +csstype@2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.9.tgz#05141d0cd557a56b8891394c1911c40c8a98d098" + integrity sha512-xz39Sb4+OaTsULgUERcCk+TJj8ylkL4aSVDQiX/ksxbELSqwkgt4d4RD7fovIdgJGSuNYqwZEiVjYY5l0ask+Q== + +csstype@^3.0.2, csstype@~3.0.3: + version "3.0.8" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.8.tgz#d2266a792729fb227cd216fb572f43728e1ad340" + integrity sha512-jXKhWqXPmlUeoQnF/EhTtTl4C9SnrxSH/jZUih3jmO6lBKr99rP3/+FmrMj4EFpOXzMtXHAZkd3x0E6h6Fgflw== + +decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + integrity sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M= + dependencies: + mimic-response "^1.0.0" + +deep-equal@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.1.1.tgz#b5c98c942ceffaf7cb051e24e1434a25a2e6076a" + integrity sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g== + dependencies: + is-arguments "^1.0.4" + is-date-object "^1.0.1" + is-regex "^1.0.4" + object-is "^1.0.1" + object-keys "^1.1.1" + regexp.prototype.flags "^1.2.0" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== + +deepmerge@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.2.2.tgz#44d2ea3679b8f4d4ffba33f03d865fc1e7bf4955" + integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg== + +defer-to-connect@^1.0.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-1.1.3.tgz#331ae050c08dcf789f8c83a7b81f0ed94f4ac591" + integrity sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ== + +define-properties@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" + integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ== + dependencies: + object-keys "^1.0.12" + +dependency-graph@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/dependency-graph/-/dependency-graph-0.9.0.tgz#11aed7e203bc8b00f48356d92db27b265c445318" + integrity sha512-9YLIBURXj4DJMFALxXw9K3Y3rwb5Fk0X5/8ipCzaN84+gKxoHK43tVKRNakCQbiEx07E8Uwhuq21BpUagFhZ8w== + +detect-indent@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-6.1.0.tgz#592485ebbbf6b3b1ab2be175c8393d04ca0d57e6" + integrity sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA== + +detect-newline@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651" + integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +dom-helpers@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-3.4.0.tgz#e9b369700f959f62ecde5a6babde4bccd9169af8" + integrity sha512-LnuPJ+dwqKDIyotW1VzmOZ5TONUN7CwkCR5hrgawTUbkBGYdeoNLZo6nNfGkCrjtE1nXXaj7iMMpDa8/d9WoIA== + dependencies: + "@babel/runtime" "^7.1.2" + +dom-serializer@^1.0.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-1.3.2.tgz#6206437d32ceefaec7161803230c7a20bc1b4d91" + integrity sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig== + dependencies: + domelementtype "^2.0.1" + domhandler "^4.2.0" + entities "^2.0.0" + +dom4@^2.1.5: + version "2.1.6" + resolved "https://registry.yarnpkg.com/dom4/-/dom4-2.1.6.tgz#c90df07134aa0dbd81ed4d6ba1237b36fc164770" + integrity sha512-JkCVGnN4ofKGbjf5Uvc8mmxaATIErKQKSgACdBXpsQ3fY6DlIpAyWfiBSrGkttATssbDCp3psiAKWXk5gmjycA== + +domelementtype@^2.0.1, domelementtype@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.2.0.tgz#9a0b6c2782ed6a1c7323d42267183df9bd8b1d57" + integrity sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A== + +domhandler@^4.0.0, domhandler@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.2.0.tgz#f9768a5f034be60a89a27c2e4d0f74eba0d8b059" + integrity sha512-zk7sgt970kzPks2Bf+dwT/PLzghLnsivb9CcxkvR8Mzr66Olr0Ofd8neSbglHJHaHa2MadfoSdNlKYAaafmWfA== + dependencies: + domelementtype "^2.2.0" + +domutils@^2.5.2: + version "2.7.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.7.0.tgz#8ebaf0c41ebafcf55b0b72ec31c56323712c5442" + integrity sha512-8eaHa17IwJUPAiB+SoTYBo5mCdeMgdcAoXJ59m6DT1vw+5iLS3gNoqYaRowaBKtGVrOF1Jz4yDTgYKLK2kvfJg== + dependencies: + dom-serializer "^1.0.1" + domelementtype "^2.2.0" + domhandler "^4.2.0" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + integrity sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI= + +duplicate-package-checker-webpack-plugin@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/duplicate-package-checker-webpack-plugin/-/duplicate-package-checker-webpack-plugin-3.0.0.tgz#78bb89e625fa7cf8c2a59c53f62b495fda9ba287" + integrity sha512-aO50/qPC7X2ChjRFniRiscxBLT/K01bALqfcDaf8Ih5OqQ1N4iT/Abx9Ofu3/ms446vHTm46FACIuJUmgUQcDQ== + dependencies: + chalk "^2.3.0" + find-root "^1.0.0" + lodash "^4.17.4" + semver "^5.4.1" + +electron-to-chromium@^1.3.723: + version "1.3.781" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.781.tgz#aeb54595edc0ae56721ff6428ffeb4c885c09e57" + integrity sha512-l2adP72hnTZANALhjUDLm0zt1fY8Cm+kEU0Ikfy3sP/99BNsOGnnPf2nfCO/3fIL4LwmTcbQD+Ap2Deeuwe9yQ== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +emojis-list@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" + integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q== + +end-of-stream@^1.1.0: + version "1.4.4" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" + integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + dependencies: + once "^1.4.0" + +enhanced-resolve@^5.8.0: + version "5.8.2" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.8.2.tgz#15ddc779345cbb73e97c611cd00c01c1e7bf4d8b" + integrity sha512-F27oB3WuHDzvR2DOGNTaYy0D5o0cnrv8TeI482VM4kYgQd/FT9lUQwuNsJ0oOHtBUq7eiW5ytqzp7nBFknL+GA== + dependencies: + graceful-fs "^4.2.4" + tapable "^2.2.0" + +entities@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" + integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== + +envinfo@^7.7.3: + version "7.8.1" + resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.8.1.tgz#06377e3e5f4d379fea7ac592d5ad8927e0c4d475" + integrity sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw== + +es-module-lexer@^0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-0.7.1.tgz#c2c8e0f46f2df06274cdaf0dd3f3b33e0a0b267d" + integrity sha512-MgtWFl5No+4S3TmhDmCz2ObFGm6lEpTnzbQi+Dd+pw4mlTIZTmM2iAs5gRlmx5zS9luzobCSBSI90JM/1/JgOw== + +escalade@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" + integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== + +escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= + +escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +eslint-scope@5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" + integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== + dependencies: + esrecurse "^4.3.0" + estraverse "^4.1.1" + +esrecurse@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" + integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== + dependencies: + estraverse "^5.2.0" + +estraverse@^4.1.1: + version "4.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" + integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== + +estraverse@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.2.0.tgz#307df42547e6cc7324d3cf03c155d5cdb8c53880" + integrity sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ== + +events@^3.2.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" + integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== + +execa@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" + integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.0" + human-signals "^2.1.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.1" + onetime "^5.1.2" + signal-exit "^3.0.3" + strip-final-newline "^2.0.0" + +external-editor@^3.0.3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495" + integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew== + dependencies: + chardet "^0.7.0" + iconv-lite "^0.4.24" + tmp "^0.0.33" + +fast-deep-equal@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-glob@^3.0.3: + version "3.2.7" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.7.tgz#fd6cb7a2d7e9aa7a7846111e85a196d6b2f766a1" + integrity sha512-rYGMRwip6lUMvYD3BTScMwT1HtAs2d71SMv66Vrxs0IekGZEjhM0pcMfjQPnknBt2zeCwQMEupiN02ZP4DiT1Q== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fastest-levenshtein@^1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/fastest-levenshtein/-/fastest-levenshtein-1.0.12.tgz#9990f7d3a88cc5a9ffd1f1745745251700d497e2" + integrity sha512-On2N+BpYJ15xIC974QNVuYGMOlEVt4s0EOI3wwMqOmK1fdDY+FN/zltPV8vosq4ad4c/gJ1KHScUn/6AWIgiow== + +fastq@^1.6.0: + version "1.11.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.11.1.tgz#5d8175aae17db61947f8b162cfc7f63264d22807" + integrity sha512-HOnr8Mc60eNYl1gzwp6r5RoUyAn5/glBolUzP/Ez6IFVPMPirxn/9phgL6zhOtaTy7ISwPvQ+wT+hfcRZh/bzw== + dependencies: + reusify "^1.0.4" + +figures@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" + integrity sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg== + dependencies: + escape-string-regexp "^1.0.5" + +file-loader@~6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-6.0.0.tgz#97bbfaab7a2460c07bcbd72d3a6922407f67649f" + integrity sha512-/aMOAYEFXDdjG0wytpTL5YQLfZnnTmLNjn+AIrJ/6HVnTfDqLsVKUUwkDf4I4kgex36BvjuXEn/TX9B/1ESyqQ== + dependencies: + loader-utils "^2.0.0" + schema-utils "^2.6.5" + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +find-cache-dir@^3.3.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-3.3.1.tgz#89b33fad4a4670daa94f855f7fbe31d6d84fe880" + integrity sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ== + dependencies: + commondir "^1.0.1" + make-dir "^3.0.2" + pkg-dir "^4.1.0" + +find-root@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/find-root/-/find-root-1.1.0.tgz#abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4" + integrity sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng== + +find-up@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +free-style@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/free-style/-/free-style-3.1.0.tgz#4e2996029534e6b1731611d843437b9e2f473f08" + integrity sha512-vJujYSIyT30iDoaoeigNAxX4yB1RUrh+N2ZMhIElMr3BvCuGXOw7XNJMEEJkDUeamK2Rnb/IKFGKRKlTWIGRWA== + +fs-extra@^9.0.1: + version "9.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" + integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== + dependencies: + at-least-node "^1.0.0" + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + +fs-minipass@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb" + integrity sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg== + dependencies: + minipass "^3.0.0" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +get-intrinsic@^1.0.2: + version "1.1.1" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.1.tgz#15f59f376f855c446963948f0d24cd3637b4abc6" + integrity sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q== + dependencies: + function-bind "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.1" + +get-stream@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" + integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== + dependencies: + pump "^3.0.0" + +get-stream@^5.1.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.2.0.tgz#4966a1795ee5ace65e706c4b7beb71257d6e22d3" + integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA== + dependencies: + pump "^3.0.0" + +get-stream@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" + integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== + +git-hooks-list@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/git-hooks-list/-/git-hooks-list-1.0.3.tgz#be5baaf78203ce342f2f844a9d2b03dba1b45156" + integrity sha512-Y7wLWcrLUXwk2noSka166byGCvhMtDRpgHdzCno1UQv/n/Hegp++a2xBWJL1lJarnKD3SWaljD+0z1ztqxuKyQ== + +glob-parent@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== + dependencies: + is-glob "^4.0.1" + +glob-to-regexp@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz#c75297087c851b9a578bd217dd59a92f59fe546e" + integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw== + +glob@^7.1.3, glob@^7.1.4, glob@~7.1.6: + version "7.1.7" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.7.tgz#3b193e9233f01d42d0b3f78294bbeeb418f94a90" + integrity sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globby@10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-10.0.0.tgz#abfcd0630037ae174a88590132c2f6804e291072" + integrity sha512-3LifW9M4joGZasyYPz2A1U74zbC/45fvpXUvO/9KbSa+VV0aGZarWkfdgKyR9sExNP0t0x0ss/UMJpNpcaTspw== + dependencies: + "@types/glob" "^7.1.1" + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.0.3" + glob "^7.1.3" + ignore "^5.1.1" + merge2 "^1.2.3" + slash "^3.0.0" + +got@^9.6.0: + version "9.6.0" + resolved "https://registry.yarnpkg.com/got/-/got-9.6.0.tgz#edf45e7d67f99545705de1f7bbeeeb121765ed85" + integrity sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q== + dependencies: + "@sindresorhus/is" "^0.14.0" + "@szmarczak/http-timer" "^1.1.2" + cacheable-request "^6.0.0" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^4.1.0" + lowercase-keys "^1.0.1" + mimic-response "^1.0.1" + p-cancelable "^1.0.0" + to-readable-stream "^1.0.0" + url-parse-lax "^3.0.0" + +graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4: + version "4.2.6" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.6.tgz#ff040b2b0853b23c3d31027523706f1885d76bee" + integrity sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ== + +gud@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/gud/-/gud-1.0.0.tgz#a489581b17e6a70beca9abe3ae57de7a499852c0" + integrity sha512-zGEOVKFM5sVPPrYs7J5/hYEw2Pof8KCyOwyhG8sAF26mCAeUFAcYPu1mwB7hhpIP29zOIBaDqwuHdLp0jvZXjw== + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-symbols@^1.0.1, has-symbols@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.2.tgz#165d3070c00309752a1236a479331e3ac56f1423" + integrity sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw== + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +htmlparser2@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-6.1.0.tgz#c4d762b6c3371a05dbe65e94ae43a9f845fb8fb7" + integrity sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A== + dependencies: + domelementtype "^2.0.1" + domhandler "^4.0.0" + domutils "^2.5.2" + entities "^2.0.0" + +http-cache-semantics@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" + integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== + +human-signals@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" + integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== + +iconv-lite@^0.4.24: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +icss-utils@^5.0.0, icss-utils@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-5.1.0.tgz#c6be6858abd013d768e98366ae47e25d5887b1ae" + integrity sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA== + +ieee754@^1.1.13: + version "1.2.1" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" + integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== + +ignore@^5.1.1: + version "5.1.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" + integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== + +import-local@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.0.2.tgz#a8cfd0431d1de4a2199703d003e3e62364fa6db6" + integrity sha512-vjL3+w0oulAVZ0hBHnxa/Nm5TAurf9YLQJDhqRZyqb+VKGOB6LU8t9H1Nr5CIo16vh9XfJTOoHwU0B71S557gA== + dependencies: + pkg-dir "^4.2.0" + resolve-cwd "^3.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= + +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + +infer-owner@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467" + integrity sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +ini@~1.3.0: + version "1.3.8" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" + integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== + +inquirer@^7.0.0: + version "7.3.3" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-7.3.3.tgz#04d176b2af04afc157a83fd7c100e98ee0aad003" + integrity sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA== + dependencies: + ansi-escapes "^4.2.1" + chalk "^4.1.0" + cli-cursor "^3.1.0" + cli-width "^3.0.0" + external-editor "^3.0.3" + figures "^3.0.0" + lodash "^4.17.19" + mute-stream "0.0.8" + run-async "^2.4.0" + rxjs "^6.6.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + through "^2.3.6" + +interpret@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-2.2.0.tgz#1a78a0b5965c40a5416d007ad6f50ad27c417df9" + integrity sha512-Ju0Bz/cEia55xDwUWEa8+olFpCiQoypjnQySseKtmjNrnps3P+xfpUmGr90T7yjlVJmOtybRvPXhKMbHr+fWnw== + +is-arguments@^1.0.4: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.1.0.tgz#62353031dfbee07ceb34656a6bde59efecae8dd9" + integrity sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg== + dependencies: + call-bind "^1.0.0" + +is-core-module@^2.2.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.5.0.tgz#f754843617c70bfd29b7bd87327400cda5c18491" + integrity sha512-TXCMSDsEHMEEZ6eCA8rwRDbLu55MRGmrctljsBX/2v1d9/GzqHOxW5c5oPSgrUt2vBFXebu9rGqckXGPWOlYpg== + dependencies: + has "^1.0.3" + +is-date-object@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.4.tgz#550cfcc03afada05eea3dd30981c7b09551f73e5" + integrity sha512-/b4ZVsG7Z5XVtIxs/h9W8nvfLgSAyKYdtGWQLbqy6jA1icmgjf8WCoTKgeS4wy5tYaPePouzFMANbnj94c2Z+A== + +is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-glob@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" + integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg== + dependencies: + is-extglob "^2.1.1" + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-plain-obj@2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" + integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== + +is-plain-object@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== + dependencies: + isobject "^3.0.1" + +is-plain-object@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-5.0.0.tgz#4427f50ab3429e9025ea7d52e9043a9ef4159344" + integrity sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q== + +is-regex@^1.0.4: + version "1.1.3" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.3.tgz#d029f9aff6448b93ebbe3f33dac71511fdcbef9f" + integrity sha512-qSVXFz28HM7y+IWX6vLCsexdlvzT1PJNFSBuaQLQ5o0IEw8UDYW6/2+eCMVyIsbM8CNLX2a/QWmSpyxYEHY7CQ== + dependencies: + call-bind "^1.0.2" + has-symbols "^1.0.2" + +is-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3" + integrity sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw== + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= + +isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= + +jest-worker@^26.5.0: + version "26.6.2" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-26.6.2.tgz#7f72cbc4d643c365e27b9fd775f9d0eaa9c7a8ed" + integrity sha512-KWYVV1c4i+jbMpaBC+U++4Va0cp8OisU185o73T1vo99hqi7w8tSJfUXYswwqqrjzwxa6KpRK54WhPvwf5w6PQ== + dependencies: + "@types/node" "*" + merge-stream "^2.0.0" + supports-color "^7.0.0" + +jest-worker@^27.0.2: + version "27.0.6" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-27.0.6.tgz#a5fdb1e14ad34eb228cfe162d9f729cdbfa28aed" + integrity sha512-qupxcj/dRuA3xHPMUd40gr2EaAurFbkwzOh7wfPaeE9id7hyjURRQoqNfHifHK3XjJU6YJJUQKILGUnwGPEOCA== + dependencies: + "@types/node" "*" + merge-stream "^2.0.0" + supports-color "^8.0.0" + +"js-tokens@^3.0.0 || ^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + integrity sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg= + +json-parse-better-errors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json5@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" + integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow== + dependencies: + minimist "^1.2.0" + +json5@^2.1.1, json5@^2.1.2: + version "2.2.0" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.0.tgz#2dfefe720c6ba525d9ebd909950f0515316c89a3" + integrity sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA== + dependencies: + minimist "^1.2.5" + +jsonfile@^6.0.1: + version "6.1.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" + integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== + dependencies: + universalify "^2.0.0" + optionalDependencies: + graceful-fs "^4.1.6" + +keyv@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" + integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== + dependencies: + json-buffer "3.0.0" + +kind-of@^6.0.2: + version "6.0.3" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" + integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== + +klona@^2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/klona/-/klona-2.0.4.tgz#7bb1e3affb0cb8624547ef7e8f6708ea2e39dfc0" + integrity sha512-ZRbnvdg/NxqzC7L9Uyqzf4psi1OM4Cuc+sJAkQPjO6XkQIJTNbfK2Rsmbw8fx1p2mkZdp2FZYo2+LwXYY/uwIA== + +loader-runner@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-4.2.0.tgz#d7022380d66d14c5fb1d496b89864ebcfd478384" + integrity sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw== + +loader-utils@^1.0.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.4.0.tgz#c579b5e34cb34b1a74edc6c1fb36bfa371d5a613" + integrity sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA== + dependencies: + big.js "^5.2.2" + emojis-list "^3.0.0" + json5 "^1.0.1" + +loader-utils@^2.0.0, loader-utils@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-2.0.0.tgz#e4cace5b816d425a166b5f097e10cd12b36064b0" + integrity sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ== + dependencies: + big.js "^5.2.2" + emojis-list "^3.0.0" + json5 "^2.1.2" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +lodash.escape@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/lodash.escape/-/lodash.escape-4.0.1.tgz#c9044690c21e04294beaa517712fded1fa88de98" + integrity sha1-yQRGkMIeBClL6qUXcS/e0fqI3pg= + +lodash@^4.17.19, lodash@^4.17.4: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== + +lowercase-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" + integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== + +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + +make-dir@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" + integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== + dependencies: + semver "^6.0.0" + +marked@^2.0.0: + version "2.1.3" + resolved "https://registry.yarnpkg.com/marked/-/marked-2.1.3.tgz#bd017cef6431724fd4b27e0657f5ceb14bff3753" + integrity sha512-/Q+7MGzaETqifOMWYEA7HVMaZb4XbcRfaOzcSsHZEith83KGlvaSG33u0SKu89Mj5h+T8V2hM+8O45Qc5XTgwA== + +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== + +merge2@^1.2.3, merge2@^1.3.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + +micromatch@^4.0.4: + version "4.0.4" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.4.tgz#896d519dfe9db25fce94ceb7a500919bf881ebf9" + integrity sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg== + dependencies: + braces "^3.0.1" + picomatch "^2.2.3" + +mime-db@1.48.0: + version "1.48.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.48.0.tgz#e35b31045dd7eada3aaad537ed88a33afbef2d1d" + integrity sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ== + +mime-types@^2.1.27: + version "2.1.31" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.31.tgz#a00d76b74317c61f9c2db2218b8e9f8e9c5c9e6b" + integrity sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg== + dependencies: + mime-db "1.48.0" + +mimic-fn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== + +mimic-response@^1.0.0, mimic-response@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" + integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== + +mini-css-extract-plugin@~1.3.2: + version "1.3.9" + resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-1.3.9.tgz#47a32132b0fd97a119acd530e8421e8f6ab16d5e" + integrity sha512-Ac4s+xhVbqlyhXS5J/Vh/QXUz3ycXlCqoCPpg0vdfhsIBH9eg/It/9L1r1XhSCH737M1lqcWnMuWL13zcygn5A== + dependencies: + loader-utils "^2.0.0" + schema-utils "^3.0.0" + webpack-sources "^1.1.0" + +minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +minimist@^1.2.0, minimist@^1.2.5, minimist@~1.2.0: + version "1.2.5" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" + integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== + +minipass-collect@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/minipass-collect/-/minipass-collect-1.0.2.tgz#22b813bf745dc6edba2576b940022ad6edc8c617" + integrity sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA== + dependencies: + minipass "^3.0.0" + +minipass-flush@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/minipass-flush/-/minipass-flush-1.0.5.tgz#82e7135d7e89a50ffe64610a787953c4c4cbb373" + integrity sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw== + dependencies: + minipass "^3.0.0" + +minipass-pipeline@^1.2.2: + version "1.2.4" + resolved "https://registry.yarnpkg.com/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz#68472f79711c084657c067c5c6ad93cddea8214c" + integrity sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A== + dependencies: + minipass "^3.0.0" + +minipass@^3.0.0, minipass@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.1.3.tgz#7d42ff1f39635482e15f9cdb53184deebd5815fd" + integrity sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg== + dependencies: + yallist "^4.0.0" + +minizlib@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-2.1.2.tgz#e90d3466ba209b932451508a11ce3d3632145931" + integrity sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg== + dependencies: + minipass "^3.0.0" + yallist "^4.0.0" + +mkdirp@^1.0.3, mkdirp@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e" + integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== + +moment@^2.24.0: + version "2.29.1" + resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.1.tgz#b2be769fa31940be9eeea6469c075e35006fa3d3" + integrity sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ== + +mute-stream@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" + integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== + +nanoid@^3.1.23: + version "3.1.23" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.23.tgz#f744086ce7c2bc47ee0a8472574d5c78e4183a81" + integrity sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw== + +neo-async@^2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.2.tgz#b4aafb93e3aeb2d8174ca53cf163ab7d7308305f" + integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw== + +node-fetch@^2.6.0: + version "2.6.1" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" + integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== + +node-releases@^1.1.71: + version "1.1.73" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.73.tgz#dd4e81ddd5277ff846b80b52bb40c49edf7a7b20" + integrity sha512-uW7fodD6pyW2FZNZnp/Z3hvWKeEW1Y8R1+1CnErE8cXFXzl5blBOoVB41CvMer6P6Q0S5FXDwcHgFd1Wj0U9zg== + +normalize-url@^4.1.0: + version "4.5.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.1.tgz#0dd90cf1288ee1d1313b87081c9a5932ee48518a" + integrity sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA== + +normalize.css@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/normalize.css/-/normalize.css-8.0.1.tgz#9b98a208738b9cc2634caacbc42d131c97487bf3" + integrity sha512-qizSNPO93t1YUuUhP22btGOo3chcvDFqFaj2TRybP0DMxkHOCTYwp3n34fel4a31ORXy4m1Xq0Gyqpb5m33qIg== + +npm-run-path@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" + integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== + dependencies: + path-key "^3.0.0" + +object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= + +object-is@^1.0.1: + version "1.1.5" + resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.1.5.tgz#b9deeaa5fc7f1846a0faecdceec138e5778f53ac" + integrity sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.3" + +object-keys@^1.0.12, object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + +once@^1.3.0, once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= + dependencies: + wrappy "1" + +onetime@^5.1.0, onetime@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" + integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== + dependencies: + mimic-fn "^2.1.0" + +os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= + +p-cancelable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" + integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== + +p-limit@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-limit@^3.0.2, p-limit@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" + +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + +p-map@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" + integrity sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ== + dependencies: + aggregate-error "^3.0.0" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +package-json@^6.5.0: + version "6.5.0" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-6.5.0.tgz#6feedaca35e75725876d0b0e64974697fed145b0" + integrity sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ== + dependencies: + got "^9.6.0" + registry-auth-token "^4.0.0" + registry-url "^5.0.0" + semver "^6.2.0" + +parse-srcset@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/parse-srcset/-/parse-srcset-1.0.2.tgz#f2bd221f6cc970a938d88556abc589caaaa2bde1" + integrity sha1-8r0iH2zJcKk42IVWq8WJyqqiveE= + +path-browserify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd" + integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g== + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= + +path-key@^3.0.0, path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-parse@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +picomatch@^2.2.3: + version "2.3.0" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.0.tgz#f1f061de8f6a4bf022892e2d128234fb98302972" + integrity sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw== + +pkg-dir@^4.1.0, pkg-dir@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" + integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== + dependencies: + find-up "^4.0.0" + +popper.js@^1.14.4, popper.js@^1.16.1: + version "1.16.1" + resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.16.1.tgz#2a223cb3dc7b6213d740e40372be40de43e65b1b" + integrity sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ== + +postcss-modules-extract-imports@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz#cda1f047c0ae80c97dbe28c3e76a43b88025741d" + integrity sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw== + +postcss-modules-local-by-default@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz#ebbb54fae1598eecfdf691a02b3ff3b390a5a51c" + integrity sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ== + dependencies: + icss-utils "^5.0.0" + postcss-selector-parser "^6.0.2" + postcss-value-parser "^4.1.0" + +postcss-modules-scope@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz#9ef3151456d3bbfa120ca44898dfca6f2fa01f06" + integrity sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg== + dependencies: + postcss-selector-parser "^6.0.4" + +postcss-modules-values@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz#d7c5e7e68c3bb3c9b27cbf48ca0bb3ffb4602c9c" + integrity sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ== + dependencies: + icss-utils "^5.0.0" + +postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4: + version "6.0.6" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.6.tgz#2c5bba8174ac2f6981ab631a42ab0ee54af332ea" + integrity sha512-9LXrvaaX3+mcv5xkg5kFwqSzSH1JIObIx51PrndZwlmznwXRfxMddDvo9gve3gVR8ZTKgoFDdWkbRFmEhT4PMg== + dependencies: + cssesc "^3.0.0" + util-deprecate "^1.0.2" + +postcss-value-parser@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz#443f6a20ced6481a2bda4fa8532a6e55d789a2cb" + integrity sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ== + +postcss@^8.0.2, postcss@^8.2.15: + version "8.3.5" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.3.5.tgz#982216b113412bc20a86289e91eb994952a5b709" + integrity sha512-NxTuJocUhYGsMiMFHDUkmjSKT3EdH4/WbGF6GCi1NDGk+vbcUTun4fpbOqaPtD8IIsztA2ilZm2DhYCuyN58gA== + dependencies: + colorette "^1.2.2" + nanoid "^3.1.23" + source-map-js "^0.6.2" + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + integrity sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc= + +prettier@^2.1.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.3.2.tgz#ef280a05ec253712e486233db5c6f23441e7342d" + integrity sha512-lnJzDfJ66zkMy58OL5/NY5zp70S7Nz6KqcKkXYzn2tMVrNxvbqaBpg7H3qHaLxCJ5lNMsGuM8+ohS7cZrthdLQ== + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= + +promise-inflight@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3" + integrity sha1-mEcocL8igTL8vdhoEputEsPAKeM= + +prop-types@^15.6.1, prop-types@^15.6.2: + version "15.7.2" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" + integrity sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.8.1" + +pump@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" + integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +punycode@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d" + integrity sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0= + +punycode@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== + +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + integrity sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA= + +querystringify@^2.1.1: + version "2.2.0" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6" + integrity sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ== + +queue-microtask@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== + +randombytes@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" + integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== + dependencies: + safe-buffer "^5.1.0" + +raw-loader@~4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/raw-loader/-/raw-loader-4.0.2.tgz#1aac6b7d1ad1501e66efdac1522c73e59a584eb6" + integrity sha512-ZnScIV3ag9A4wPX/ZayxL/jZH+euYb6FcUinPcgiQW0+UBtEv0O6Q3lGd3cqJ+GHH+rksEv3Pj99oxJ3u3VIKA== + dependencies: + loader-utils "^2.0.0" + schema-utils "^3.0.0" + +rc@^1.2.8: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +react-dom@^17.0.1: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-17.0.2.tgz#ecffb6845e3ad8dbfcdc498f0d0a939736502c23" + integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + scheduler "^0.20.2" + +react-is@^16.8.1: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-lifecycles-compat@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362" + integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA== + +react-popper@^1.3.7: + version "1.3.11" + resolved "https://registry.yarnpkg.com/react-popper/-/react-popper-1.3.11.tgz#a2cc3f0a67b75b66cfa62d2c409f9dd1fcc71ffd" + integrity sha512-VSA/bS+pSndSF2fiasHK/PTEEAyOpX60+H5EPAjoArr8JGm+oihu4UbrqcEBpQibJxBVCpYyjAX7abJ+7DoYVg== + dependencies: + "@babel/runtime" "^7.1.2" + "@hypnosphi/create-react-context" "^0.3.1" + deep-equal "^1.1.1" + popper.js "^1.14.4" + prop-types "^15.6.1" + typed-styles "^0.0.7" + warning "^4.0.2" + +react-transition-group@^2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-2.9.0.tgz#df9cdb025796211151a436c69a8f3b97b5b07c8d" + integrity sha512-+HzNTCHpeQyl4MJ/bdE0u6XRMe9+XG/+aL4mCxVN4DnPBQ0/5bfHWPDuOZUzYdMj94daZaZdCCc1Dzt9R/xSSg== + dependencies: + dom-helpers "^3.4.0" + loose-envify "^1.4.0" + prop-types "^15.6.2" + react-lifecycles-compat "^3.0.4" + +react@^17.0.1: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react/-/react-17.0.2.tgz#d0b5cc516d29eb3eee383f75b62864cfb6800037" + integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + +rechoir@^0.7.0: + version "0.7.1" + resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.7.1.tgz#9478a96a1ca135b5e88fc027f03ee92d6c645686" + integrity sha512-/njmZ8s1wVeR6pjTZ+0nCnv8SpZNRMT2D1RLOJQESlYFDBvwpTA4KWJpZ+sBJ4+vhjILRcK7JIFdGCdxEAAitg== + dependencies: + resolve "^1.9.0" + +regenerator-runtime@^0.13.4: + version "0.13.7" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz#cac2dacc8a1ea675feaabaeb8ae833898ae46f55" + integrity sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew== + +regexp.prototype.flags@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz#7ef352ae8d159e758c0eadca6f8fcb4eef07be26" + integrity sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.3" + +registry-auth-token@^4.0.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-4.2.1.tgz#6d7b4006441918972ccd5fedcd41dc322c79b250" + integrity sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw== + dependencies: + rc "^1.2.8" + +registry-url@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-5.1.0.tgz#e98334b50d5434b81136b44ec638d9c2009c5009" + integrity sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw== + dependencies: + rc "^1.2.8" + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8= + +resize-observer-polyfill@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" + integrity sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg== + +resolve-cwd@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-3.0.0.tgz#0f0075f1bb2544766cf73ba6a6e2adfebcb13f2d" + integrity sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg== + dependencies: + resolve-from "^5.0.0" + +resolve-from@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" + integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== + +resolve@^1.9.0: + version "1.20.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" + integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== + dependencies: + is-core-module "^2.2.0" + path-parse "^1.0.6" + +responselike@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + integrity sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec= + dependencies: + lowercase-keys "^1.0.0" + +restore-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" + integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== + dependencies: + onetime "^5.1.0" + signal-exit "^3.0.2" + +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + +rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +run-async@^2.4.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.4.1.tgz#8440eccf99ea3e70bd409d49aab88e10c189a455" + integrity sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ== + +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" + +rxjs@^6.6.0: + version "6.6.7" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.6.7.tgz#90ac018acabf491bf65044235d5863c4dab804c9" + integrity sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ== + dependencies: + tslib "^1.9.0" + +safe-buffer@^5.1.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +"safer-buffer@>= 2.1.2 < 3": + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sanitize-html@~2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/sanitize-html/-/sanitize-html-2.3.3.tgz#3db382c9a621cce4c46d90f10c64f1e9da9e8353" + integrity sha512-DCFXPt7Di0c6JUnlT90eIgrjs6TsJl/8HYU3KLdmrVclFN4O0heTcVbJiMa23OKVr6aR051XYtsgd8EWwEBwUA== + dependencies: + deepmerge "^4.2.2" + escape-string-regexp "^4.0.0" + htmlparser2 "^6.0.0" + is-plain-object "^5.0.0" + klona "^2.0.3" + parse-srcset "^1.0.2" + postcss "^8.0.2" + +scheduler@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.20.2.tgz#4baee39436e34aa93b4874bddcbf0fe8b8b50e91" + integrity sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + +schema-utils@^2.6.5: + version "2.7.1" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-2.7.1.tgz#1ca4f32d1b24c590c203b8e7a50bf0ea4cd394d7" + integrity sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg== + dependencies: + "@types/json-schema" "^7.0.5" + ajv "^6.12.4" + ajv-keywords "^3.5.2" + +schema-utils@^3.0.0, schema-utils@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-3.1.1.tgz#bc74c4b6b6995c1d88f76a8b77bea7219e0c8281" + integrity sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw== + dependencies: + "@types/json-schema" "^7.0.8" + ajv "^6.12.5" + ajv-keywords "^3.5.2" + +semver@^5.4.1: + version "5.7.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + +semver@^6.0.0, semver@^6.2.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" + integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== + +semver@^7.3.2, semver@^7.3.5: + version "7.3.5" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.5.tgz#0b621c879348d8998e4b0e4be94b3f12e6018ef7" + integrity sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ== + dependencies: + lru-cache "^6.0.0" + +serialize-javascript@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-5.0.1.tgz#7886ec848049a462467a97d3d918ebb2aaf934f4" + integrity sha512-SaaNal9imEO737H2c05Og0/8LUXG7EnsZyMa8MzkmuHoELfT6txuj0cMqRj6zfPKnmQ1yasR4PCJc8x+M4JSPA== + dependencies: + randombytes "^2.1.0" + +serialize-javascript@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" + integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag== + dependencies: + randombytes "^2.1.0" + +shallow-clone@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-3.0.1.tgz#8f2981ad92531f55035b01fb230769a40e02efa3" + integrity sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA== + dependencies: + kind-of "^6.0.2" + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +signal-exit@^3.0.2, signal-exit@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" + integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +sort-object-keys@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/sort-object-keys/-/sort-object-keys-1.1.3.tgz#bff833fe85cab147b34742e45863453c1e190b45" + integrity sha512-855pvK+VkU7PaKYPc+Jjnmt4EzejQHyhhF33q31qG8x7maDzkeFhAAThdCYay11CISO+qAMwjOBP+fPZe0IPyg== + +sort-package-json@~1.44.0: + version "1.44.0" + resolved "https://registry.yarnpkg.com/sort-package-json/-/sort-package-json-1.44.0.tgz#470330be868f8a524a4607b26f2a0233e93d8b6d" + integrity sha512-u9GUZvpavUCXV5SbEqXu9FRbsJrYU6WM10r3zA0gymGPufK5X82MblCLh9GW9l46pXKEZvK+FA3eVTqC4oMp4A== + dependencies: + detect-indent "^6.0.0" + detect-newline "3.1.0" + git-hooks-list "1.0.3" + globby "10.0.0" + is-plain-obj "2.1.0" + sort-object-keys "^1.1.3" + +source-list-map@^2.0.0, source-list-map@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34" + integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw== + +source-map-js@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-0.6.2.tgz#0bb5de631b41cfbda6cfba8bd05a80efdfd2385e" + integrity sha512-/3GptzWzu0+0MBQFrDKzw/DvvMTUORvgY6k6jd/VS6iCR4RDTKWH6v6WPwQoUO8667uQEf9Oe38DxAYWY5F/Ug== + +source-map-support@~0.5.19: + version "0.5.19" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61" + integrity sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +source-map@~0.7.2: + version "0.7.3" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383" + integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ== + +ssri@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-8.0.1.tgz#638e4e439e2ffbd2cd289776d5ca457c4f51a2af" + integrity sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ== + dependencies: + minipass "^3.1.1" + +string-width@^4.1.0: + version "4.2.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.2.tgz#dafd4f9559a7585cfba529c6a0a4f73488ebd4c5" + integrity sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.0" + +strip-ansi@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" + integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== + dependencies: + ansi-regex "^5.0.0" + +strip-final-newline@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" + integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + +style-loader@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-2.0.0.tgz#9669602fd4690740eaaec137799a03addbbc393c" + integrity sha512-Z0gYUJmzZ6ZdRUqpg1r8GsaFKypE+3xAzuFeMuoHgjc9KZv3wMyCRjQIWEbhoFSq7+7yoHXySDJyyWQaPajeiQ== + dependencies: + loader-utils "^2.0.0" + schema-utils "^3.0.0" + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.0.0, supports-color@^7.1.0, supports-color@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== + dependencies: + has-flag "^4.0.0" + +supports-color@^8.0.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + +svg-url-loader@~6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/svg-url-loader/-/svg-url-loader-6.0.0.tgz#b94861d9f6badfb8ca3e7d3ec4655c1bf732ac5d" + integrity sha512-Qr5SCKxyxKcRnvnVrO3iQj9EX/v40UiGEMshgegzV7vpo3yc+HexELOdtWcA3MKjL8IyZZ1zOdcILmDEa/8JJQ== + dependencies: + file-loader "~6.0.0" + loader-utils "~2.0.0" + +tapable@^2.1.1, tapable@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.2.0.tgz#5c373d281d9c672848213d0e037d1c4165ab426b" + integrity sha512-FBk4IesMV1rBxX2tfiK8RAmogtWn53puLOQlvO8XuwlgxcYbP4mVPS9Ph4aeamSyyVjOl24aYWAuc8U5kCVwMw== + +tar@^6.0.2: + version "6.1.11" + resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.11.tgz#6760a38f003afa1b2ffd0ffe9e9abbd0eab3d621" + integrity sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA== + dependencies: + chownr "^2.0.0" + fs-minipass "^2.0.0" + minipass "^3.0.0" + minizlib "^2.1.1" + mkdirp "^1.0.3" + yallist "^4.0.0" + +terser-webpack-plugin@^4.1.0: + version "4.2.3" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-4.2.3.tgz#28daef4a83bd17c1db0297070adc07fc8cfc6a9a" + integrity sha512-jTgXh40RnvOrLQNgIkwEKnQ8rmHjHK4u+6UBEi+W+FPmvb+uo+chJXntKe7/3lW5mNysgSWD60KyesnhW8D6MQ== + dependencies: + cacache "^15.0.5" + find-cache-dir "^3.3.1" + jest-worker "^26.5.0" + p-limit "^3.0.2" + schema-utils "^3.0.0" + serialize-javascript "^5.0.1" + source-map "^0.6.1" + terser "^5.3.4" + webpack-sources "^1.4.3" + +terser-webpack-plugin@^5.1.3: + version "5.1.4" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.1.4.tgz#c369cf8a47aa9922bd0d8a94fe3d3da11a7678a1" + integrity sha512-C2WkFwstHDhVEmsmlCxrXUtVklS+Ir1A7twrYzrDrQQOIMOaVAYykaoo/Aq1K0QRkMoY2hhvDQY1cm4jnIMFwA== + dependencies: + jest-worker "^27.0.2" + p-limit "^3.1.0" + schema-utils "^3.0.0" + serialize-javascript "^6.0.0" + source-map "^0.6.1" + terser "^5.7.0" + +terser@^5.3.4, terser@^5.7.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/terser/-/terser-5.7.1.tgz#2dc7a61009b66bb638305cb2a824763b116bf784" + integrity sha512-b3e+d5JbHAe/JSjwsC3Zn55wsBIM7AsHLjKxT31kGCldgbpFePaFo+PiddtO6uwRZWRw7sPXmAN8dTW61xmnSg== + dependencies: + commander "^2.20.0" + source-map "~0.7.2" + source-map-support "~0.5.19" + +through@^2.3.6: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= + +tmp@^0.0.33: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== + dependencies: + os-tmpdir "~1.0.2" + +to-readable-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" + integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +to-string-loader@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/to-string-loader/-/to-string-loader-1.1.6.tgz#230529ccc63dd0ecca052a85e1fb82afe946b0ab" + integrity sha512-VNg62//PS1WfNwrK3n7t6wtK5Vdtx/qeYLLEioW46VMlYUwAYT6wnfB+OwS2FMTCalIHu0tk79D3RXX8ttmZTQ== + dependencies: + loader-utils "^1.0.0" + +tslib@^1.9.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" + integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== + +tslib@~1.13.0: + version "1.13.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.13.0.tgz#c881e13cc7015894ed914862d276436fa9a47043" + integrity sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q== + +type-fest@^0.21.3: + version "0.21.3" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" + integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== + +typed-styles@^0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/typed-styles/-/typed-styles-0.0.7.tgz#93392a008794c4595119ff62dde6809dbc40a3d9" + integrity sha512-pzP0PWoZUhsECYjABgCGQlRGL1n7tOHsgwYv3oIiEpJwGhFTuty/YNeduxQYzXXa3Ge5BdT6sHYIQYpl4uJ+5Q== + +typescript@~4.1.3: + version "4.1.6" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.1.6.tgz#1becd85d77567c3c741172339e93ce2e69932138" + integrity sha512-pxnwLxeb/Z5SP80JDRzVjh58KsM6jZHRAOtTpS7sXLS4ogXNKC9ANxHHZqLLeVHZN35jCtI4JdmLLbLiC1kBow== + +typestyle@^2.0.4: + version "2.1.0" + resolved "https://registry.yarnpkg.com/typestyle/-/typestyle-2.1.0.tgz#7c5cc567de72cd8bfb686813150b92791aaa7636" + integrity sha512-6uCYPdG4xWLeEcl9O0GtNFnNGhami+irKiLsXSuvWHC/aTS7wdj49WeikWAKN+xHN3b1hm+9v0svwwgSBhCsNA== + dependencies: + csstype "2.6.9" + free-style "3.1.0" + +unique-filename@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230" + integrity sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ== + dependencies: + unique-slug "^2.0.0" + +unique-slug@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.2.tgz#baabce91083fc64e945b0f3ad613e264f7cd4e6c" + integrity sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w== + dependencies: + imurmurhash "^0.1.4" + +universalify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" + integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== + +uri-js@^4.2.2: + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +url-loader@~4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-4.1.1.tgz#28505e905cae158cf07c92ca622d7f237e70a4e2" + integrity sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA== + dependencies: + loader-utils "^2.0.0" + mime-types "^2.1.27" + schema-utils "^3.0.0" + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + integrity sha1-FrXK/Afb42dsGxmZF3gj1lA6yww= + dependencies: + prepend-http "^2.0.0" + +url-parse@~1.5.1: + version "1.5.3" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.5.3.tgz#71c1303d38fb6639ade183c2992c8cc0686df862" + integrity sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ== + dependencies: + querystringify "^2.1.1" + requires-port "^1.0.0" + +url@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" + integrity sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE= + dependencies: + punycode "1.3.2" + querystring "0.2.0" + +util-deprecate@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= + +v8-compile-cache@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz#2de19618c66dc247dcfb6f99338035d8245a2cee" + integrity sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA== + +warning@^4.0.2, warning@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3" + integrity sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w== + dependencies: + loose-envify "^1.0.0" + +watchpack@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.2.0.tgz#47d78f5415fe550ecd740f99fe2882323a58b1ce" + integrity sha512-up4YAn/XHgZHIxFBVCdlMiWDj6WaLKpwVeGQk2I5thdYxF/KmF0aaz6TfJZ/hfl1h/XlcDr7k1KH7ThDagpFaA== + dependencies: + glob-to-regexp "^0.4.1" + graceful-fs "^4.1.2" + +webpack-cli@^4.1.0: + version "4.7.2" + resolved "https://registry.yarnpkg.com/webpack-cli/-/webpack-cli-4.7.2.tgz#a718db600de6d3906a4357e059ae584a89f4c1a5" + integrity sha512-mEoLmnmOIZQNiRl0ebnjzQ74Hk0iKS5SiEEnpq3dRezoyR3yPaeQZCMCe+db4524pj1Pd5ghZXjT41KLzIhSLw== + dependencies: + "@discoveryjs/json-ext" "^0.5.0" + "@webpack-cli/configtest" "^1.0.4" + "@webpack-cli/info" "^1.3.0" + "@webpack-cli/serve" "^1.5.1" + colorette "^1.2.1" + commander "^7.0.0" + execa "^5.0.0" + fastest-levenshtein "^1.0.12" + import-local "^3.0.2" + interpret "^2.2.0" + rechoir "^0.7.0" + v8-compile-cache "^2.2.0" + webpack-merge "^5.7.3" + +webpack-merge@^5.1.2, webpack-merge@^5.7.3: + version "5.8.0" + resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-5.8.0.tgz#2b39dbf22af87776ad744c390223731d30a68f61" + integrity sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q== + dependencies: + clone-deep "^4.0.1" + wildcard "^2.0.0" + +webpack-sources@^1.1.0, webpack-sources@^1.4.3: + version "1.4.3" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.4.3.tgz#eedd8ec0b928fbf1cbfe994e22d2d890f330a933" + integrity sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ== + dependencies: + source-list-map "^2.0.0" + source-map "~0.6.1" + +webpack-sources@^2.3.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-2.3.1.tgz#570de0af163949fe272233c2cefe1b56f74511fd" + integrity sha512-y9EI9AO42JjEcrTJFOYmVywVZdKVUfOvDUPsJea5GIr1JOEGFVqwlY2K098fFoIjOkDzHn2AjRvM8dsBZu+gCA== + dependencies: + source-list-map "^2.0.1" + source-map "^0.6.1" + +webpack@^5.3.1: + version "5.45.1" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.45.1.tgz#d78dcbeda18a872dc62b0455d3ed3dcfd1c886bb" + integrity sha512-68VT2ZgG9EHs6h6UxfV2SEYewA9BA3SOLSnC2NEbJJiEwbAiueDL033R1xX0jzjmXvMh0oSeKnKgbO2bDXIEyQ== + dependencies: + "@types/eslint-scope" "^3.7.0" + "@types/estree" "^0.0.50" + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/wasm-edit" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + acorn "^8.4.1" + browserslist "^4.14.5" + chrome-trace-event "^1.0.2" + enhanced-resolve "^5.8.0" + es-module-lexer "^0.7.1" + eslint-scope "5.1.1" + events "^3.2.0" + glob-to-regexp "^0.4.1" + graceful-fs "^4.2.4" + json-parse-better-errors "^1.0.2" + loader-runner "^4.2.0" + mime-types "^2.1.27" + neo-async "^2.6.2" + schema-utils "^3.1.0" + tapable "^2.1.1" + terser-webpack-plugin "^5.1.3" + watchpack "^2.2.0" + webpack-sources "^2.3.0" + +which@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +wildcard@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/wildcard/-/wildcard-2.0.0.tgz#a77d20e5200c6faaac979e4b3aadc7b3dd7f8fec" + integrity sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw== + +worker-loader@^3.0.2: + version "3.0.8" + resolved "https://registry.yarnpkg.com/worker-loader/-/worker-loader-3.0.8.tgz#5fc5cda4a3d3163d9c274a4e3a811ce8b60dbb37" + integrity sha512-XQyQkIFeRVC7f7uRhFdNMe/iJOdO6zxAaR3EWbDp45v3mDhrTi+++oswKNxShUNjPC/1xUp5DB29YKLhFo129g== + dependencies: + loader-utils "^2.0.0" + schema-utils "^3.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= + +ws@^7.2.0: + version "7.5.3" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.3.tgz#160835b63c7d97bfab418fc1b8a9fced2ac01a74" + integrity sha512-kQ/dHIzuLrS6Je9+uv81ueZomEwH0qVYstcAQ4/Z93K8zeko9gtAbttJWzoC5ukqXY1PpoouV3+VSOqEAFt5wg== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yocto-queue@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== diff --git a/ts/nni_manager/.eslintrc b/ts/nni_manager/.eslintrc new file mode 100644 index 0000000000000000000000000000000000000000..5cdba21a4f4b5786f64b5dc0dfec7037fd4e2948 --- /dev/null +++ b/ts/nni_manager/.eslintrc @@ -0,0 +1,45 @@ +{ + "env": { + "browser": true, + "node": true, + "es6": true + }, + "parser": "@typescript-eslint/parser", + "parserOptions": { + "ecmaVersion": 2018, + "sourceType": "module" + }, + "plugins": [ + "@typescript-eslint" + ], + "extends": [ + "eslint:recommended", + "plugin:@typescript-eslint/eslint-recommended", + "plugin:@typescript-eslint/recommended" + ], + "rules": { + "@typescript-eslint/no-explicit-any": 0, + "@typescript-eslint/no-namespace": 0, + "@typescript-eslint/consistent-type-assertions": 0, + "@typescript-eslint/no-inferrable-types": 0, + "no-inner-declarations": 0, + "@typescript-eslint/explicit-function-return-type": "error", + "@typescript-eslint/no-var-requires": 0, + "@typescript-eslint/no-non-null-assertion": 0, + + "@typescript-eslint/no-unused-vars": [ + "off", + { + "argsIgnorePattern": "^_" + } + ], + "@typescript-eslint/no-use-before-define": 0 + }, + "ignorePatterns": [ + "node_modules/", + "test/", + "dist/", + "types/", + "**/*.js" + ] +} diff --git a/ts/nni_manager/.gitignore b/ts/nni_manager/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d7f26254f898d7b95b75ff38f1365b4013f5a40e --- /dev/null +++ b/ts/nni_manager/.gitignore @@ -0,0 +1,8 @@ +# Build result +/dist + +# node modules +/node_modules + +# test files +.experiment.test diff --git a/ts/nni_manager/.mocharc.json b/ts/nni_manager/.mocharc.json new file mode 100644 index 0000000000000000000000000000000000000000..8c049e2515d53545bcf284f2d30d7ec7691ac644 --- /dev/null +++ b/ts/nni_manager/.mocharc.json @@ -0,0 +1,5 @@ +{ + "color": true, + "require": "test/register.js", + "timeout": "15s" +} diff --git a/ts/nni_manager/common/component.ts b/ts/nni_manager/common/component.ts new file mode 100644 index 0000000000000000000000000000000000000000..0d20d4eb923853224825d55b848e28dadc35e350 --- /dev/null +++ b/ts/nni_manager/common/component.ts @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as ioc from 'typescript-ioc'; + +const Inject: (...args: any[]) => any = ioc.Inject; +const Singleton: (target: Function) => void = ioc.Singleton; +const Container = ioc.Container; +const Provides = ioc.Provides; + +function get(source: Function): T { + return ioc.Container.get(source) as T; +} + +export { Provides, Container, Inject, Singleton, get }; diff --git a/ts/nni_manager/common/datastore.ts b/ts/nni_manager/common/datastore.ts new file mode 100644 index 0000000000000000000000000000000000000000..eecd277699bd3804b18b454e252fcc87b51c977c --- /dev/null +++ b/ts/nni_manager/common/datastore.ts @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { ExperimentProfile, TrialJobStatistics } from './manager'; +import { TrialJobDetail, TrialJobStatus } from './trainingService'; + +type TrialJobEvent = TrialJobStatus | 'USER_TO_CANCEL' | 'ADD_CUSTOMIZED' | 'ADD_HYPERPARAMETER' | 'IMPORT_DATA'; +type MetricType = 'PERIODICAL' | 'FINAL' | 'CUSTOM' | 'REQUEST_PARAMETER'; + +interface ExperimentProfileRecord { + readonly timestamp: number; + readonly experimentId: number; + readonly revision: number; + readonly data: ExperimentProfile; +} + +interface TrialJobEventRecord { + readonly timestamp: number; + readonly trialJobId: string; + readonly event: TrialJobEvent; + readonly data?: string; + readonly logPath?: string; + readonly sequenceId?: number; + readonly message?: string; +} + +interface MetricData { + readonly parameter_id: string; + readonly trial_job_id: string; + readonly type: MetricType; + readonly sequence: number; + readonly value: any; +} + +interface MetricDataRecord { + readonly timestamp: number; + readonly trialJobId: string; + readonly parameterId: string; + readonly type: MetricType; + readonly sequence: number; + readonly data: any; +} + +interface TrialJobInfo { + trialJobId: string; + sequenceId?: number; + status: TrialJobStatus; + message?: string; + startTime?: number; + endTime?: number; + hyperParameters?: string[]; + logPath?: string; + finalMetricData?: MetricDataRecord[]; + stderrPath?: string; +} + +interface HyperParameterFormat { + parameter_source: string; + parameters: Record; + parameter_id: number; +} + +interface ExportedDataFormat { + parameter: Record; + value: Record; + trialJobId: string; +} + +abstract class DataStore { + public abstract init(): Promise; + public abstract close(): Promise; + public abstract storeExperimentProfile(experimentProfile: ExperimentProfile): Promise; + public abstract getExperimentProfile(experimentId: string): Promise; + public abstract storeTrialJobEvent( + event: TrialJobEvent, trialJobId: string, hyperParameter?: string, jobDetail?: TrialJobDetail): Promise; + public abstract getTrialJobStatistics(): Promise; + public abstract listTrialJobs(status?: TrialJobStatus): Promise; + public abstract getTrialJob(trialJobId: string): Promise; + public abstract storeMetricData(trialJobId: string, data: string): Promise; + public abstract getMetricData(trialJobId?: string, metricType?: MetricType): Promise; + public abstract exportTrialHpConfigs(): Promise; + public abstract getImportedData(): Promise; +} + +abstract class Database { + public abstract init(createNew: boolean, dbDir: string): Promise; + public abstract close(): Promise; + public abstract storeExperimentProfile(experimentProfile: ExperimentProfile): Promise; + public abstract queryExperimentProfile(experimentId: string, revision?: number): Promise; + public abstract queryLatestExperimentProfile(experimentId: string): Promise; + public abstract storeTrialJobEvent( + event: TrialJobEvent, trialJobId: string, timestamp: number, hyperParameter?: string, jobDetail?: TrialJobDetail): Promise; + public abstract queryTrialJobEvent(trialJobId?: string, event?: TrialJobEvent): Promise; + public abstract storeMetricData(trialJobId: string, data: string): Promise; + public abstract queryMetricData(trialJobId?: string, type?: MetricType): Promise; +} + +export { + DataStore, Database, TrialJobEvent, MetricType, MetricData, TrialJobInfo, + ExperimentProfileRecord, TrialJobEventRecord, MetricDataRecord, HyperParameterFormat, ExportedDataFormat +}; diff --git a/ts/nni_manager/common/errors.ts b/ts/nni_manager/common/errors.ts new file mode 100644 index 0000000000000000000000000000000000000000..40140dbd566fcd2f3bd847d030483f142a983310 --- /dev/null +++ b/ts/nni_manager/common/errors.ts @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export namespace NNIErrorNames { + export const NOT_FOUND: string = 'NOT_FOUND'; + export const INVALID_JOB_DETAIL: string = 'NO_VALID_JOB_DETAIL_FOUND'; + export const RESOURCE_NOT_AVAILABLE: string = 'RESOURCE_NOT_AVAILABLE'; +} + +export class NNIError extends Error { + public cause!: Error | undefined; + constructor (name: string, message: string, err?: Error) { + super(message); + this.name = name; + if (err !== undefined) { + this.stack = err.stack; + } + this.cause = err; + } + + public static FromError(err: NNIError | Error | string, messagePrefix?: string): NNIError { + const msgPrefix: string = messagePrefix === undefined ? '' : messagePrefix; + if (err instanceof NNIError) { + if (err.message !== undefined) { + err.message = msgPrefix + err.message; + } + + return err; + } else if (typeof(err) === 'string') { + return new NNIError('', msgPrefix + err); + } else if (err instanceof Error) { + return new NNIError('', msgPrefix + err.message, err); + } else { + throw new Error(`Wrong instance type: ${typeof(err)}`); + } + } +} + +export class MethodNotImplementedError extends Error { + constructor() { + super('Method not implemented.'); + } +} diff --git a/ts/nni_manager/common/experimentConfig.ts b/ts/nni_manager/common/experimentConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..c0b42ed359c80301996169748a7c284bd803e3ff --- /dev/null +++ b/ts/nni_manager/common/experimentConfig.ts @@ -0,0 +1,247 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; + +import { KubeflowOperator, OperatorApiVersion } from '../training_service/kubernetes/kubeflow/kubeflowConfig' +import { KubernetesStorageKind } from '../training_service/kubernetes/kubernetesConfig'; + +export interface TrainingServiceConfig { + platform: string; + trialCommand: string; + trialCodeDirectory: string; + trialGpuNumber?: number; + nniManagerIp?: string; + + // FIXME + // "debug" is only used by openpai to decide whether to check remote nni version + // it should be better to check when local nni version is not "dev" + // it should be even better to check version before launching the experiment and let user to confirm + // log level is currently handled by global logging module and has nothing to do with this + debug?: boolean; +} + +/* Local */ + +export interface LocalConfig extends TrainingServiceConfig { + platform: 'local'; + useActiveGpu?: boolean; + maxTrialNumberPerGpu: number; + gpuIndices?: number[]; + reuseMode: boolean; +} + +/* Remote */ + +export interface RemoteMachineConfig { + host: string; + port: number; + user: string; + password?: string; + sshKeyFile: string; + sshPassphrase?: string; + useActiveGpu: boolean; + maxTrialNumberPerGpu: number; + gpuIndices?: number[]; + pythonPath?: string; +} + +export interface RemoteConfig extends TrainingServiceConfig { + platform: 'remote'; + machineList: RemoteMachineConfig[]; + reuseMode: boolean; +} + +/* OpenPAI */ + +export interface OpenpaiConfig extends TrainingServiceConfig { + platform: 'openpai'; + host: string; + username: string; + token: string; + trialCpuNumber: number; + trialMemorySize: string; + storageConfigName: string; + dockerImage: string; + virtualCluster?: string; + localStorageMountPoint: string; + containerStorageMountPoint: string; + reuseMode: boolean; + openpaiConfig?: object; +} + +/* AML */ + +export interface AmlConfig extends TrainingServiceConfig { + platform: 'aml'; + subscriptionId: string; + resourceGroup: string; + workspaceName: string; + computeTarget: string; + dockerImage: string; + maxTrialNumberPerGpu: number; +} + + +/* Alibaba PAI DLC */ +export interface DlcConfig extends TrainingServiceConfig { + platfrom: 'dlc'; + type: string; + image: string; + jobType: string; + podCount: number; + ecsSpec: string; + region: string; + nasDataSourceId: string; + accessKeyId: string; + accessKeySecret: string; + localStorageMountPoint: string; + containerStorageMountPoint: string; +} +/* Kubeflow */ + +export interface KubernetesStorageConfig { + storageType: string; + server?: string; + path?: string; + azureAccount?: string; + azureShare?: string; + keyVaultName?: string; + keyVaultKey?: string; +} + +export interface KubeflowRoleConfig { + replicas: number; + command: string; + gpuNumber: number; + cpuNumber: number; + memorySize: string | number; + dockerImage: string; + codeDirectory: string; + privateRegistryAuthPath?: string; +} + +export interface KubeflowConfig extends TrainingServiceConfig { + platform: 'kubeflow'; + operator: KubeflowOperator; + apiVersion: OperatorApiVersion; + storage: KubernetesStorageConfig; + worker?: KubeflowRoleConfig; + ps?: KubeflowRoleConfig; + master?: KubeflowRoleConfig; + reuseMode: boolean; + maxTrialNumberPerGpu?: number; +} + +export interface FrameworkControllerTaskRoleConfig { + name: string; + dockerImage: string; + taskNumber: number; + command: string; + gpuNumber: number; + cpuNumber: number; + memorySize: string | number; + frameworkAttemptCompletionPolicy: { + minFailedTaskCount: number; + minSucceedTaskCount: number; + }; + privateRegistryAuthPath?: string; +} + +export interface FrameworkControllerConfig extends TrainingServiceConfig { + platform: 'frameworkcontroller'; + storage: KubernetesStorageConfig; + serviceAccountName: string; + taskRoles: FrameworkControllerTaskRoleConfig[]; + reuseMode: boolean; + maxTrialNumberPerGpu?: number; + namespace?: 'default'; + apiVersion?: string; +} + +/* shared storage */ + +export interface SharedStorageConfig { + storageType: string; + localMountPoint: string; + remoteMountPoint: string; + localMounted: string; +} + +export interface NfsConfig extends SharedStorageConfig { + storageType: 'NFS'; + nfsServer: string; + exportedDirectory: string; +} + +export interface AzureBlobConfig extends SharedStorageConfig { + storageAccountName: string; + storageAccountKey?: string; + containerName: string; +} + +/* common */ + +export interface AlgorithmConfig { + name?: string; + className?: string; + codeDirectory?: string; + classArgs?: object; +} + +export interface ExperimentConfig { + experimentName?: string; + // searchSpaceFile (handled in python part) + searchSpace: any; + trialCommand: string; + trialCodeDirectory: string; + trialConcurrency: number; + trialGpuNumber?: number; + maxExperimentDuration?: string | number; + maxTrialNumber?: number; + maxTrialDuration?: string | number; + nniManagerIp?: string; + // useAnnotation (handled in python part) + debug: boolean; + logLevel?: string; + experimentWorkingDirectory?: string; + tunerGpuIndices?: number[]; + tuner?: AlgorithmConfig; + assessor?: AlgorithmConfig; + advisor?: AlgorithmConfig; + trainingService: TrainingServiceConfig | TrainingServiceConfig[]; + sharedStorage?: SharedStorageConfig; + deprecated?: any; // configs that are not yet natively supported by v2 (workaround) +} + +/* util functions */ + +const timeUnits = { d: 24 * 3600, h: 3600, m: 60, s: 1 }; +const sizeUnits = { tb: 1024 ** 4, gb: 1024 ** 3, mb: 1024 ** 2, kb: 1024, b: 1 }; + +function toUnit(value: string | number, targetUnit: string, allUnits: any): number { + if (typeof value === 'number') { + return value; + } + value = value.toLowerCase(); + for (const [unit, factor] of Object.entries(allUnits)) { + if (value.endsWith(unit)) { + const digits = value.slice(0, -unit.length); + const num = Number(digits) * (factor as number); + return Math.ceil(num / allUnits[targetUnit]); + } + } + throw new Error(`Bad unit in "${value}"`); +} + +export function toSeconds(time: string | number): number { + return toUnit(time, 's', timeUnits); +} + +export function toMegaBytes(size: string | number): number { + return toUnit(size, 'mb', sizeUnits); +} + +export function toCudaVisibleDevices(gpuIndices?: number[]): string { + return gpuIndices === undefined ? '' : gpuIndices.join(','); +} diff --git a/ts/nni_manager/common/experimentManager.ts b/ts/nni_manager/common/experimentManager.ts new file mode 100644 index 0000000000000000000000000000000000000000..313c75a92f550adf3a94057766471ce652cc2cc2 --- /dev/null +++ b/ts/nni_manager/common/experimentManager.ts @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +abstract class ExperimentManager { + public abstract getExperimentsInfo(): Promise; + public abstract setExperimentPath(newPath: string): void; + public abstract setExperimentInfo(experimentId: string, key: string, value: any): void; + public abstract stop(): Promise; +} + +export {ExperimentManager}; diff --git a/ts/nni_manager/common/experimentStartupInfo.ts b/ts/nni_manager/common/experimentStartupInfo.ts new file mode 100644 index 0000000000000000000000000000000000000000..eb17b531ae3b60bfccfc1ae46a2aff424e805558 --- /dev/null +++ b/ts/nni_manager/common/experimentStartupInfo.ts @@ -0,0 +1,131 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import os from 'os'; +import path from 'path'; + +const API_ROOT_URL: string = '/api/v1/nni'; + +let singleton: ExperimentStartupInfo | null = null; + +export class ExperimentStartupInfo { + + public experimentId: string = ''; + public newExperiment: boolean = true; + public basePort: number = -1; + public initialized: boolean = false; + public logDir: string = ''; + public logLevel: string = ''; + public readonly: boolean = false; + public dispatcherPipe: string | null = null; + public platform: string = ''; + public urlprefix: string = ''; + + constructor( + newExperiment: boolean, + experimentId: string, + basePort: number, + platform: string, + logDir?: string, + logLevel?: string, + readonly?: boolean, + dispatcherPipe?: string, + urlprefix?: string) { + this.newExperiment = newExperiment; + this.experimentId = experimentId; + this.basePort = basePort; + this.platform = platform; + + if (logDir !== undefined && logDir.length > 0) { + this.logDir = path.join(path.normalize(logDir), experimentId); + } else { + this.logDir = path.join(os.homedir(), 'nni-experiments', experimentId); + } + + if (logLevel !== undefined && logLevel.length > 1) { + this.logLevel = logLevel; + } + + if (readonly !== undefined) { + this.readonly = readonly; + } + + if (dispatcherPipe != undefined && dispatcherPipe.length > 0) { + this.dispatcherPipe = dispatcherPipe; + } + + if(urlprefix != undefined && urlprefix.length > 0){ + this.urlprefix = urlprefix; + } + } + + public get apiRootUrl(): string { + return this.urlprefix === '' ? API_ROOT_URL : `/${this.urlprefix}${API_ROOT_URL}`; + } + + public static getInstance(): ExperimentStartupInfo { + assert(singleton !== null); + return singleton!; + } +} + +export function getExperimentStartupInfo(): ExperimentStartupInfo { + return ExperimentStartupInfo.getInstance(); +} + +export function setExperimentStartupInfo( + newExperiment: boolean, + experimentId: string, + basePort: number, + platform: string, + logDir?: string, + logLevel?: string, + readonly?: boolean, + dispatcherPipe?: string, + urlprefix?: string): void { + singleton = new ExperimentStartupInfo( + newExperiment, + experimentId, + basePort, + platform, + logDir, + logLevel, + readonly, + dispatcherPipe, + urlprefix + ); +} + +export function getExperimentId(): string { + return getExperimentStartupInfo().experimentId; +} + +export function getBasePort(): number { + return getExperimentStartupInfo().basePort; +} + +export function isNewExperiment(): boolean { + return getExperimentStartupInfo().newExperiment; +} + +export function getPlatform(): string { + return getExperimentStartupInfo().platform; +} + +export function isReadonly(): boolean { + return getExperimentStartupInfo().readonly; +} + +export function getDispatcherPipe(): string | null { + return getExperimentStartupInfo().dispatcherPipe; +} + +export function getAPIRootUrl(): string { + return getExperimentStartupInfo().apiRootUrl; +} + +export function getPrefixUrl(): string { + const prefix = getExperimentStartupInfo().urlprefix === '' ? '' : `/${getExperimentStartupInfo().urlprefix}`; + return prefix; +} diff --git a/ts/nni_manager/common/log.ts b/ts/nni_manager/common/log.ts new file mode 100644 index 0000000000000000000000000000000000000000..f31940b75a12d86b51c6a1eb2d115375b079ed67 --- /dev/null +++ b/ts/nni_manager/common/log.ts @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import { Writable } from 'stream'; +import util from 'util'; + +/* log level constants */ + +export const DEBUG = 10; +export const INFO = 20; +export const WARNING = 30; +export const ERROR = 40; +export const CRITICAL = 50; + +export const TRACE = 1; +export const FATAL = 50; + +const levelNames = new Map([ + [CRITICAL, 'CRITICAL'], + [ERROR, 'ERROR'], + [WARNING, 'WARNING'], + [INFO, 'INFO'], + [DEBUG, 'DEBUG'], + [TRACE, 'TRACE'], +]); + +/* global_ states */ + +let logLevel: number = 0; +const loggers = new Map(); + +/* major api */ + +export class Logger { + private name: string; + + constructor(name: string = 'root') { + this.name = name; + } + + public trace(...args: any[]): void { + this.log(TRACE, args); + } + + public debug(...args: any[]): void { + this.log(DEBUG, args); + } + + public info(...args: any[]): void { + this.log(INFO, args); + } + + public warning(...args: any[]): void { + this.log(WARNING, args); + } + + public error(...args: any[]): void { + this.log(ERROR, args); + } + + public critical(...args: any[]): void { + this.log(CRITICAL, args); + } + + public fatal(...args: any[]): void { + this.log(FATAL, args); + } + + private log(level: number, args: any[]): void { + const logFile: Writable | undefined = (global as any).logFile; + if (level < logLevel) { + return; + } + + const zeroPad = (num: number): string => num.toString().padStart(2, '0'); + const now = new Date(); + const date = now.getFullYear() + '-' + zeroPad(now.getMonth() + 1) + '-' + zeroPad(now.getDate()); + const time = zeroPad(now.getHours()) + ':' + zeroPad(now.getMinutes()) + ':' + zeroPad(now.getSeconds()); + const datetime = date + ' ' + time; + + const levelName = levelNames.has(level) ? levelNames.get(level) : level.toString(); + + const message = args.map(arg => (typeof arg === 'string' ? arg : util.inspect(arg))).join(' '); + + const record = `[${datetime}] ${levelName} (${this.name}) ${message}\n`; + + if (logFile === undefined) { + console.log(record); + } else { + logFile.write(record); + } + } +} + +export function getLogger(name: string = 'root'): Logger { + let logger = loggers.get(name); + if (logger === undefined) { + logger = new Logger(name); + loggers.set(name, logger); + } + return logger; +} + +/* management functions */ + +export function setLogLevel(levelName: string): void { + if (levelName) { + const level = module.exports[levelName.toUpperCase()]; + if (typeof level === 'number') { + logLevel = level; + } else { + console.log('[ERROR] Bad log level:', levelName); + getLogger('logging').error('Bad log level:', levelName); + } + } +} + +export function startLogging(logPath: string): void { + (global as any).logFile = fs.createWriteStream(logPath, { + flags: 'a+', + encoding: 'utf8', + autoClose: true + }); +} + +export function stopLogging(): void { + if ((global as any).logFile !== undefined) { + (global as any).logFile.end(); + (global as any).logFile = undefined; + } +} diff --git a/ts/nni_manager/common/manager.ts b/ts/nni_manager/common/manager.ts new file mode 100644 index 0000000000000000000000000000000000000000..6f0908301e9a80146f47046b25242a565b3f21fc --- /dev/null +++ b/ts/nni_manager/common/manager.ts @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { MetricDataRecord, MetricType, TrialJobInfo } from './datastore'; +import { TrialJobStatus } from './trainingService'; +import { ExperimentConfig } from './experimentConfig'; + +type ProfileUpdateType = 'TRIAL_CONCURRENCY' | 'MAX_EXEC_DURATION' | 'SEARCH_SPACE' | 'MAX_TRIAL_NUM'; +type ExperimentStatus = 'INITIALIZED' | 'RUNNING' | 'ERROR' | 'STOPPING' | 'STOPPED' | 'DONE' | 'NO_MORE_TRIAL' | 'TUNER_NO_MORE_TRIAL' | 'VIEWED'; +namespace ExperimentStartUpMode { + export const NEW = 'new'; + export const RESUME = 'resume'; +} + +interface ExperimentProfile { + params: ExperimentConfig; + id: string; + execDuration: number; + logDir: string; + startTime: number; + endTime?: number; + nextSequenceId: number; + revision: number; +} + +interface TrialJobStatistics { + trialJobStatus: TrialJobStatus; + trialJobNumber: number; +} + +interface NNIManagerStatus { + status: ExperimentStatus; + errors: string[]; +} + +abstract class Manager { + public abstract startExperiment(experimentConfig: ExperimentConfig): Promise; + public abstract resumeExperiment(readonly: boolean): Promise; + public abstract stopExperiment(): Promise; + public abstract stopExperimentTopHalf(): Promise; + public abstract stopExperimentBottomHalf(): Promise; + public abstract getExperimentProfile(): Promise; + public abstract updateExperimentProfile(experimentProfile: ExperimentProfile, updateType: ProfileUpdateType): Promise; + public abstract importData(data: string): Promise; + public abstract getImportedData(): Promise; + public abstract exportData(): Promise; + + public abstract addCustomizedTrialJob(hyperParams: string): Promise; + public abstract cancelTrialJobByUser(trialJobId: string): Promise; + + public abstract listTrialJobs(status?: TrialJobStatus): Promise; + public abstract getTrialJob(trialJobId: string): Promise; + public abstract setClusterMetadata(key: string, value: string): Promise; + public abstract getClusterMetadata(key: string): Promise; + + public abstract getMetricData(trialJobId?: string, metricType?: MetricType): Promise; + public abstract getMetricDataByRange(minSeqId: number, maxSeqId: number): Promise; + public abstract getLatestMetricData(): Promise; + + public abstract getTrialFile(trialJobId: string, fileName: string): Promise; + + public abstract getTrialJobStatistics(): Promise; + public abstract getStatus(): NNIManagerStatus; + + public abstract getTrialOutputLocalPath(trialJobId: string): Promise; + public abstract fetchTrialOutput(trialJobId: string, subpath: string): Promise; +} + +export { Manager, ExperimentConfig, ExperimentProfile, TrialJobStatistics, ProfileUpdateType, NNIManagerStatus, ExperimentStatus, ExperimentStartUpMode }; diff --git a/ts/nni_manager/common/nniConfig.ts b/ts/nni_manager/common/nniConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..583690e71317e7a565dfcc5e1458a7069f35feb4 --- /dev/null +++ b/ts/nni_manager/common/nniConfig.ts @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import { promisify } from 'util'; +import { runPythonScript } from './pythonScript'; + +export interface CustomEnvironmentServiceConfig { + name: string; + nodeModulePath: string; + nodeClassName: string; +} + +const readFile = promisify(fs.readFile); + +async function readConfigFile(fileName: string): Promise { + const script = 'import nni.runtime.config ; print(nni.runtime.config.get_config_directory())'; + const configDir = (await runPythonScript(script)).trim(); + const stream = await readFile(path.join(configDir, fileName)); + return stream.toString(); +} + +export async function getCustomEnvironmentServiceConfig(name: string): Promise { + const configJson = await readConfigFile('training_services.json'); + const config = JSON.parse(configJson); + if (config[name] === undefined) { + return null; + } + return { + name, + nodeModulePath: config[name].nodeModulePath as string, + nodeClassName: config[name].nodeClassName as string, + } +} diff --git a/ts/nni_manager/common/observableTimer.ts b/ts/nni_manager/common/observableTimer.ts new file mode 100644 index 0000000000000000000000000000000000000000..59ec00c13716a1eb42deca21537cf132faabee9c --- /dev/null +++ b/ts/nni_manager/common/observableTimer.ts @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import rx from 'rx'; +import * as component from '../common/component'; + +@component.Singleton +class ObservableTimer { + private observableSource: rx.Observable; + constructor() { + // TODO: move 100 and 1000 into constants class + this.observableSource = rx.Observable.timer(100, 1000).takeWhile(() => true); + } + + public subscribe(onNext?: (value: any) => void, onError?: (exception: any) => void, onCompleted?: () => void): Rx.IDisposable { + return this.observableSource.subscribe(onNext, onError, onCompleted); + } + + public unsubscribe( subscription: Rx.IDisposable): void { + if(typeof subscription !== 'undefined') { + subscription.dispose(); + } + } +} + +export { ObservableTimer }; diff --git a/ts/nni_manager/common/pythonScript.ts b/ts/nni_manager/common/pythonScript.ts new file mode 100644 index 0000000000000000000000000000000000000000..2387c018282363e5b5a8cfde3ef995cced15df97 --- /dev/null +++ b/ts/nni_manager/common/pythonScript.ts @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { spawn } from 'child_process'; +import { Logger, getLogger } from './log'; + +const logger: Logger = getLogger('pythonScript'); + +const python: string = process.platform === 'win32' ? 'python.exe' : 'python3'; + +export async function runPythonScript(script: string, logTag?: string): Promise { + const proc = spawn(python, [ '-c', script ]); + + let stdout: string = ''; + let stderr: string = ''; + proc.stdout.on('data', (data: string) => { stdout += data; }); + proc.stderr.on('data', (data: string) => { stderr += data; }); + + const procPromise = new Promise((resolve, reject) => { + proc.on('error', (err: Error) => { reject(err); }); + proc.on('exit', () => { resolve(); }); + }); + await procPromise; + + if (stderr) { + if (logTag) { + logger.warning(`Python script [${logTag}] has stderr:`, stderr); + } else { + logger.warning('Python script has stderr.'); + logger.warning(' script:', script); + logger.warning(' stderr:', stderr); + } + } + + return stdout; +} diff --git a/ts/nni_manager/common/restServer.ts b/ts/nni_manager/common/restServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..3dff743e3bf9b61fe1a7b0437595e5202de83688 --- /dev/null +++ b/ts/nni_manager/common/restServer.ts @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import express from 'express'; +import http from 'http'; +import { Deferred } from 'ts-deferred'; +import { getLogger, Logger } from './log'; +import { getBasePort } from './experimentStartupInfo'; + + +/** + * Abstraction class to create a RestServer + * The module who wants to use a RestServer could extends this abstract class + * And implement its own registerRestHandler() function to register routers + */ +export abstract class RestServer { + private startTask!: Deferred; + private stopTask!: Deferred; + private server!: http.Server; + + /** The fields can be inherited by subclass */ + protected hostName: string = '0.0.0.0'; + protected port?: number; + protected app: express.Application = express(); + protected log: Logger = getLogger('RestServer'); + protected basePort?: number; + + constructor() { + this.port = getBasePort(); + assert(this.port && this.port > 1024); + } + + get endPoint(): string { + return `http://${this.hostName}:${this.port}`; + } + + public start(hostName?: string): Promise { + this.log.info(`RestServer start`); + if (this.startTask !== undefined) { + return this.startTask.promise; + } + this.startTask = new Deferred(); + + this.registerRestHandler(); + + if (hostName) { + this.hostName = hostName; + } + + this.log.info(`RestServer base port is ${this.port}`); + + this.server = this.app.listen(this.port as number, this.hostName).on('listening', () => { + this.startTask.resolve(); + }).on('error', (e: Error) => { + this.startTask.reject(e); + }); + + return this.startTask.promise; + } + + public stop(): Promise { + if (this.stopTask !== undefined) { + return this.stopTask.promise; + } + this.stopTask = new Deferred(); + + if (this.startTask === undefined) { + this.stopTask.resolve(); + + return this.stopTask.promise; + } else { + this.startTask.promise.then( + () => { // Started + //Stops the server from accepting new connections and keeps existing connections. + //This function is asynchronous, the server is finally closed when all connections + //are ended and the server emits a 'close' event. + //Refer https://nodejs.org/docs/latest/api/net.html#net_server_close_callback + this.server.close().on('close', () => { + this.log.info('Rest server stopped.'); + this.stopTask.resolve(); + }).on('error', (e: Error) => { + this.log.error(`Error occurred stopping Rest server: ${e.message}`); + this.stopTask.reject(); + }); + }, + () => { // Start task rejected + this.stopTask.resolve(); + } + ); + } + this.stopTask.resolve() + return this.stopTask.promise; + } + + /** + * Register REST handler, which is left for subclass to implement + */ + protected abstract registerRestHandler(): void; +} diff --git a/ts/nni_manager/common/shellUtils.ts b/ts/nni_manager/common/shellUtils.ts new file mode 100644 index 0000000000000000000000000000000000000000..f5b8c3a252192984be754411f255bf15632be562 --- /dev/null +++ b/ts/nni_manager/common/shellUtils.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// for readability +const singleQuote = "'"; +const doubleQuote = '"'; +const backtick = '`'; +const backslash = '\\'; +const doubleBacktick = '``'; +const doubleBackslash = '\\\\'; +const newline = '\n'; + +/** + * Convert a string into quoted and escaped string for shell script. + * This function supports multi-line strings as well. + * + * Examples: + * hello --> 'hello' + * C:\Program Files\$app --> 'C:\Program Files\$app' + * a'b"c$de\f`g --> $'a\'b"c$d\ne\\f`g' (Linux & macOS) + * a'b"c$de\f`g --> "a'b`"c`$d`ne\f``g" (Windows) + **/ +export function shellString(str: string): string { + return process.platform === 'win32' ? powershellString(str) : bashString(str); +} + +/** + * Convert a string into quoted and escaped string for bash script. It supports multi-line strings. + **/ +export function bashString(str: string): string { + // for readability of generated script, + // use ansi-c quoting when `str` contains single quote or newline, + // use single quotes otherwise + if (str.includes(singleQuote) || str.includes(newline)) { + str = str.replaceAll(backslash, doubleBackslash); + str = str.replaceAll(singleQuote, backslash + singleQuote); + str = str.replaceAll(newline, backslash + 'n'); + return '$' + singleQuote + str + singleQuote; + } else { + return singleQuote + str + singleQuote; + } +} + +/** + * Convert a string into quoted and escaped string for PowerShell script. It supports multi-line strings. + **/ +export function powershellString(str: string): string { + // for readability and robustness of generated script, + // use double quotes for multi-line string, + // use single quotes otherwise + if (str.includes(newline)) { + str = str.replaceAll(backtick, doubleBacktick); + str = str.replaceAll(doubleQuote, backtick + doubleQuote); + str = str.replaceAll(newline, backtick + 'n'); + str = str.replaceAll('$', backtick + '$'); + return doubleQuote + str + doubleQuote; + } else { + str = str.replaceAll(singleQuote, singleQuote + singleQuote); + return singleQuote + str + singleQuote; + } +} diff --git a/ts/nni_manager/common/tensorboardManager.ts b/ts/nni_manager/common/tensorboardManager.ts new file mode 100644 index 0000000000000000000000000000000000000000..28319483d464fbb9de5d14bb482336f860b27902 --- /dev/null +++ b/ts/nni_manager/common/tensorboardManager.ts @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +interface TensorboardParams { + trials: string; +} + +type TensorboardTaskStatus = 'RUNNING' | 'DOWNLOADING_DATA' | 'STOPPING' | 'STOPPED' | 'ERROR' | 'FAIL_DOWNLOAD_DATA'; + +interface TensorboardTaskInfo { + readonly id: string; + readonly status: TensorboardTaskStatus; + readonly trialJobIdList: string[]; + readonly trialLogDirectoryList: string[]; + readonly pid?: number; + readonly port?: string; +} + +abstract class TensorboardManager { + public abstract startTensorboardTask(tensorboardParams: TensorboardParams): Promise; + public abstract getTensorboardTask(tensorboardTaskId: string): Promise; + public abstract updateTensorboardTask(tensorboardTaskId: string): Promise; + public abstract listTensorboardTasks(): Promise; + public abstract stopTensorboardTask(tensorboardTaskId: string): Promise; + public abstract stopAllTensorboardTask(): Promise; + public abstract stop(): Promise; +} + +export { + TensorboardParams, TensorboardTaskStatus, TensorboardTaskInfo, TensorboardManager +} diff --git a/ts/nni_manager/common/trainingService.ts b/ts/nni_manager/common/trainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..057a797932d41b2345ff9a9e47c7be9c0e410726 --- /dev/null +++ b/ts/nni_manager/common/trainingService.ts @@ -0,0 +1,122 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** + * define TrialJobStatus + */ +type TrialJobStatus = 'UNKNOWN' | 'WAITING' | 'RUNNING' | 'SUCCEEDED' | 'FAILED' | 'USER_CANCELED' | 'SYS_CANCELED' | 'EARLY_STOPPED'; + +interface TrainingServiceMetadata { + readonly key: string; + readonly value: string; +} + +interface HyperParameters { + readonly value: string; + readonly index: number; +} + +type PlacementConstraintType = 'None' | 'GPUNumber' | 'Device' +interface PlacementConstraint{ + readonly type: PlacementConstraintType; + readonly gpus: Array | Array<[string,number]>; + /** + * GPUNumber constraint is in form of Array, e.g., [3] means it must be placed on a node of 3 GPUs + * + * Device constraint is in form of Array<[string,number]>, e.g., [('Node-0',1),('Node-1',0)] means it must be placed on + * Node-0's GPU-1 and Node-1's GPU-0 + */ +} +/** + * define TrialJobApplicationForm + */ +interface TrialJobApplicationForm { + readonly sequenceId: number; + readonly hyperParameters: HyperParameters; + readonly placementConstraint?: PlacementConstraint; +} + +interface TrialCommandContent { + readonly parameter_id: string; + readonly parameters: string; + readonly parameter_source: string; + readonly placement_constraint?: PlacementConstraint; +} + +/** + * define TrialJobDetail + */ +interface TrialJobDetail { + readonly id: string; + readonly status: TrialJobStatus; + readonly submitTime: number; + readonly startTime?: number; + readonly endTime?: number; + readonly tags?: string[]; + readonly url?: string; + readonly workingDirectory: string; + readonly form: TrialJobApplicationForm; + isEarlyStopped?: boolean; + message?: string; +} + +/** + * define TrialJobMetric + */ +interface TrialJobMetric { + readonly id: string; + readonly data: string; +} + +/** + * define TrainingServiceError + */ +class TrainingServiceError extends Error { + + private errCode: number; + + constructor(errorCode: number, errorMessage: string) { + super(errorMessage); + this.errCode = errorCode; + } + + get errorCode(): number { + return this.errCode; + } +} + +/** + * define TrainingService + */ +abstract class TrainingService { + public abstract listTrialJobs(): Promise; + public abstract getTrialJob(trialJobId: string): Promise; + public abstract addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void; + public abstract removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void; + public abstract submitTrialJob(form: TrialJobApplicationForm): Promise; + public abstract updateTrialJob(trialJobId: string, form: TrialJobApplicationForm): Promise; + public abstract cancelTrialJob(trialJobId: string, isEarlyStopped?: boolean): Promise; + public abstract getTrialFile(trialJobId: string, fileName: string): Promise; + public abstract setClusterMetadata(key: string, value: string): Promise; + public abstract getClusterMetadata(key: string): Promise; + public abstract getTrialOutputLocalPath(trialJobId: string): Promise; + public abstract fetchTrialOutput(trialJobId: string, subpath: string): Promise; + public abstract cleanUp(): Promise; + public abstract run(): Promise; +} + +/** + * the ip of nni manager + */ +class NNIManagerIpConfig { + public readonly nniManagerIp: string; + constructor(nniManagerIp: string){ + this.nniManagerIp = nniManagerIp; + } +} + +export { + TrainingService, TrainingServiceError, TrialJobStatus, TrialJobApplicationForm, + TrainingServiceMetadata, TrialJobDetail, TrialJobMetric, HyperParameters, + NNIManagerIpConfig, PlacementConstraint, TrialCommandContent +}; diff --git a/ts/nni_manager/common/utils.ts b/ts/nni_manager/common/utils.ts new file mode 100644 index 0000000000000000000000000000000000000000..74ae81d133c048e173043eb93dd2acd59b08094d --- /dev/null +++ b/ts/nni_manager/common/utils.ts @@ -0,0 +1,438 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { randomBytes } from 'crypto'; +import cpp from 'child-process-promise'; +import cp from 'child_process'; +import { ChildProcess, spawn, StdioOptions } from 'child_process'; +import dgram from 'dgram'; +import fs from 'fs'; +import net from 'net'; +import os from 'os'; +import path from 'path'; +import * as timersPromises from 'timers/promises'; +import lockfile from 'lockfile'; +import { Deferred } from 'ts-deferred'; +import { Container } from 'typescript-ioc'; +import glob from 'glob'; + +import { Database, DataStore } from './datastore'; +import { getExperimentStartupInfo, setExperimentStartupInfo } from './experimentStartupInfo'; +import { ExperimentConfig, Manager } from './manager'; +import { ExperimentManager } from './experimentManager'; +import { HyperParameters, TrainingService, TrialJobStatus } from './trainingService'; + +function getExperimentRootDir(): string { + return getExperimentStartupInfo().logDir; +} + +function getLogDir(): string { + return path.join(getExperimentRootDir(), 'log'); +} + +function getLogLevel(): string { + return getExperimentStartupInfo().logLevel; +} + +function getDefaultDatabaseDir(): string { + return path.join(getExperimentRootDir(), 'db'); +} + +function getCheckpointDir(): string { + return path.join(getExperimentRootDir(), 'checkpoint'); +} + +function getExperimentsInfoPath(): string { + return path.join(os.homedir(), 'nni-experiments', '.experiment'); +} + +async function mkDirP(dirPath: string): Promise { + await fs.promises.mkdir(dirPath, { recursive: true }); +} + +function mkDirPSync(dirPath: string): void { + fs.mkdirSync(dirPath, { recursive: true }); +} + +const delay = timersPromises.setTimeout; + +/** + * Convert index to character + * @param index index + * @returns a mapping character + */ +function charMap(index: number): number { + if (index < 26) { + return index + 97; + } else if (index < 52) { + return index - 26 + 65; + } else { + return index - 52 + 48; + } +} + +/** + * Generate a unique string by length + * @param len length of string + * @returns a unique string + */ +function uniqueString(len: number): string { + if (len === 0) { + return ''; + } + const byteLength: number = Math.ceil((Math.log2(52) + Math.log2(62) * (len - 1)) / 8); + let num: number = randomBytes(byteLength).reduce((a: number, b: number) => a * 256 + b, 0); + const codes: number[] = []; + codes.push(charMap(num % 52)); + num = Math.floor(num / 52); + for (let i: number = 1; i < len; i++) { + codes.push(charMap(num % 62)); + num = Math.floor(num / 62); + } + + return String.fromCharCode(...codes); +} + +function randomInt(max: number): number { + return Math.floor(Math.random() * max); +} + +function randomSelect(a: T[]): T { + assert(a !== undefined); + + return a[Math.floor(Math.random() * a.length)]; +} + +function parseArg(names: string[]): string { + if (process.argv.length >= 4) { + for (let i: number = 2; i < process.argv.length - 1; i++) { + if (names.includes(process.argv[i])) { + return process.argv[i + 1]; + } + } + } + + return ''; +} + +function getCmdPy(): string { + let cmd = 'python3'; + if (process.platform === 'win32') { + cmd = 'python'; + } + return cmd; +} + +/** + * Generate command line to start automl algorithm(s), + * either start advisor or start a process which runs tuner and assessor + * + * @param expParams: experiment startup parameters + * + */ +function getMsgDispatcherCommand(expParams: ExperimentConfig): string { + const clonedParams = Object.assign({}, expParams); + delete clonedParams.searchSpace; + return `${getCmdPy()} -m nni --exp_params ${Buffer.from(JSON.stringify(clonedParams)).toString('base64')}`; +} + +/** + * Generate parameter file name based on HyperParameters object + * @param hyperParameters HyperParameters instance + */ +function generateParamFileName(hyperParameters: HyperParameters): string { + assert(hyperParameters !== undefined); + assert(hyperParameters.index >= 0); + + let paramFileName: string; + if (hyperParameters.index == 0) { + paramFileName = 'parameter.cfg'; + } else { + paramFileName = `parameter_${hyperParameters.index}.cfg` + } + return paramFileName; +} + +/** + * Initialize a pseudo experiment environment for unit test. + * Must be paired with `cleanupUnitTest()`. + */ +function prepareUnitTest(): void { + Container.snapshot(Database); + Container.snapshot(DataStore); + Container.snapshot(TrainingService); + Container.snapshot(Manager); + Container.snapshot(ExperimentManager); + + const logLevel: string = parseArg(['--log_level', '-ll']); + + setExperimentStartupInfo(true, 'unittest', 8080, 'unittest', undefined, logLevel); + mkDirPSync(getLogDir()); + + const sqliteFile: string = path.join(getDefaultDatabaseDir(), 'nni.sqlite'); + try { + fs.unlinkSync(sqliteFile); + } catch (err) { + // file not exists, good + } +} + +/** + * Clean up unit test pseudo experiment. + * Must be paired with `prepareUnitTest()`. + */ +function cleanupUnitTest(): void { + Container.restore(Manager); + Container.restore(TrainingService); + Container.restore(DataStore); + Container.restore(Database); + Container.restore(ExperimentManager); + const logLevel: string = parseArg(['--log_level', '-ll']); + setExperimentStartupInfo(true, 'unittest', 8080, 'unittest', undefined, logLevel); +} + +let cachedIpv4Address: string | null = null; + +/** + * Get IPv4 address of current machine. + */ +async function getIPV4Address(): Promise { + if (cachedIpv4Address !== null) { + return cachedIpv4Address; + } + + // creates "udp connection" to a non-exist target, and get local address of the connection. + // since udp is connectionless, this does not send actual packets. + const socket = dgram.createSocket('udp4'); + socket.connect(1, '192.0.2.0'); + for (let i = 0; i < 10; i++) { // wait the system to initialize "connection" + await timersPromises.setTimeout(1); + try { + cachedIpv4Address = socket.address().address; + socket.close(); + return cachedIpv4Address; + } catch (error) { + /* retry */ + } + } + + cachedIpv4Address = socket.address().address; // if it still fails, throw the error + socket.close(); + return cachedIpv4Address; +} + +/** + * Get the status of canceled jobs according to the hint isEarlyStopped + */ +function getJobCancelStatus(isEarlyStopped: boolean): TrialJobStatus { + return isEarlyStopped ? 'EARLY_STOPPED' : 'USER_CANCELED'; +} + +/** + * Utility method to calculate file numbers under a directory, recursively + * @param directory directory name + */ +function countFilesRecursively(directory: string): Promise { + if (!fs.existsSync(directory)) { + throw Error(`Direcotory ${directory} doesn't exist`); + } + + const deferred: Deferred = new Deferred(); + + let timeoutId: NodeJS.Timer + const delayTimeout: Promise = new Promise((_resolve: Function, reject: Function): void => { + // Set timeout and reject the promise once reach timeout (5 seconds) + timeoutId = setTimeout(() => { + reject(new Error(`Timeout: path ${directory} has too many files`)); + }, 5000); + }); + + let fileCount: number = -1; + let cmd: string; + if (process.platform === "win32") { + cmd = `powershell "Get-ChildItem -Path ${directory} -Recurse -File | Measure-Object | %{$_.Count}"` + } else { + cmd = `find ${directory} -type f | wc -l`; + } + cpp.exec(cmd).then((result) => { + if (result.stdout && parseInt(result.stdout)) { + fileCount = parseInt(result.stdout); + } + deferred.resolve(fileCount); + }); + return Promise.race([deferred.promise, delayTimeout]).finally(() => { + clearTimeout(timeoutId); + }); +} + +/** + * get the version of current package + */ +async function getVersion(): Promise { + const deferred: Deferred = new Deferred(); + import(path.join(__dirname, '..', 'package.json')).then((pkg) => { + deferred.resolve(pkg.version); + }).catch(() => { + deferred.resolve('999.0.0-developing'); + }); + return deferred.promise; +} + +/** + * run command as ChildProcess + */ +function getTunerProc(command: string, stdio: StdioOptions, newCwd: string, newEnv: any, newShell: boolean = true, isDetached: boolean = false): ChildProcess { + let cmd: string = command; + let arg: string[] = []; + if (process.platform === "win32") { + cmd = command.split(" ", 1)[0]; + arg = command.substr(cmd.length + 1).split(" "); + newShell = false; + isDetached = true; + } + const tunerProc: ChildProcess = spawn(cmd, arg, { + stdio, + cwd: newCwd, + env: newEnv, + shell: newShell, + detached: isDetached + }); + return tunerProc; +} + +/** + * judge whether the process is alive + */ +async function isAlive(pid: any): Promise { + const deferred: Deferred = new Deferred(); + let alive: boolean = false; + if (process.platform === 'win32') { + try { + const str = cp.execSync(`powershell.exe Get-Process -Id ${pid} -ErrorAction SilentlyContinue`).toString(); + if (str) { + alive = true; + } + } + catch (error) { + //ignore + } + } + else { + try { + await cpp.exec(`kill -0 ${pid}`); + alive = true; + } catch (error) { + //ignore + } + } + deferred.resolve(alive); + return deferred.promise; +} + +/** + * kill process + */ +async function killPid(pid: any): Promise { + const deferred: Deferred = new Deferred(); + try { + if (process.platform === "win32") { + await cpp.exec(`cmd.exe /c taskkill /PID ${pid} /F`); + } + else { + await cpp.exec(`kill -9 ${pid}`); + } + } catch (error) { + // pid does not exist, do nothing here + } + deferred.resolve(); + return deferred.promise; +} + +function getNewLine(): string { + if (process.platform === "win32") { + return "\r\n"; + } + else { + return "\n"; + } +} + +/** + * Use '/' to join path instead of '\' for all kinds of platform + * @param path + */ +function unixPathJoin(...paths: any[]): string { + const dir: string = paths.filter((path: any) => path !== '').join('/'); + if (dir === '') return '.'; + return dir; +} + +/** + * lock a file sync + */ +function withLockSync(func: Function, filePath: string, lockOpts: {[key: string]: any}, ...args: any): any { + const lockName = path.join(path.dirname(filePath), path.basename(filePath) + `.lock.${process.pid}`); + if (typeof lockOpts['stale'] === 'number'){ + const lockPath = path.join(path.dirname(filePath), path.basename(filePath) + '.lock.*'); + const lockFileNames: string[] = glob.sync(lockPath); + const canLock: boolean = lockFileNames.map((fileName) => { + return fs.existsSync(fileName) && Date.now() - fs.statSync(fileName).mtimeMs < lockOpts['stale']; + }).filter(unexpired=>unexpired === true).length === 0; + if (!canLock) { + throw new Error('File has been locked.'); + } + } + lockfile.lockSync(lockName, lockOpts); + const result = func(...args); + lockfile.unlockSync(lockName); + return result; +} + +async function isPortOpen(host: string, port: number): Promise { + return new Promise((resolve, reject) => { + try{ + const stream = net.createConnection(port, host); + const id = setTimeout(() => { + stream.destroy(); + resolve(false); + }, 1000); + + stream.on('connect', () => { + clearTimeout(id); + stream.destroy(); + resolve(true); + }); + + stream.on('error', () => { + clearTimeout(id); + stream.destroy(); + resolve(false); + }); + } catch (error) { + reject(error); + } + }); +} + +async function getFreePort(host: string, start: number, end: number): Promise { + if (start > end) { + throw new Error(`no more free port`); + } + if (await isPortOpen(host, start)) { + return await getFreePort(host, start + 1, end); + } else { + return start; + } +} + +export function importModule(modulePath: string): any { + module.paths.unshift(path.dirname(modulePath)); + return require(path.basename(modulePath)); +} + +export { + countFilesRecursively, generateParamFileName, getMsgDispatcherCommand, getCheckpointDir, getExperimentsInfoPath, + getLogDir, getExperimentRootDir, getJobCancelStatus, getDefaultDatabaseDir, getIPV4Address, unixPathJoin, withLockSync, getFreePort, isPortOpen, + mkDirP, mkDirPSync, delay, prepareUnitTest, parseArg, cleanupUnitTest, uniqueString, randomInt, randomSelect, getLogLevel, getVersion, getCmdPy, getTunerProc, isAlive, killPid, getNewLine +}; diff --git a/ts/nni_manager/config/adl/adaptdl-crd-v1.json b/ts/nni_manager/config/adl/adaptdl-crd-v1.json new file mode 100644 index 0000000000000000000000000000000000000000..368a1841680ed5f7b5457b7b53fa9e398fcc8684 --- /dev/null +++ b/ts/nni_manager/config/adl/adaptdl-crd-v1.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "apiextensions.k8s.io/v1beta1", + "kind": "CustomResourceDefinition", + "metadata": { + "name": "adaptdljobs.adaptdl.petuum.com" + }, + "spec": { + "group": "adaptdl.petuum.com", + "version": "v1", + "scope": "Namespaced", + "names": { + "plural": "adaptdljobs", + "singular": "adaptdljob", + "kind": "AdaptDLJob" + } + } +} diff --git a/ts/nni_manager/config/adl/adaptdl-nni-configmap-template.json b/ts/nni_manager/config/adl/adaptdl-nni-configmap-template.json new file mode 100644 index 0000000000000000000000000000000000000000..42a0e6ce7d8bfc02df79944bc913a91a32eb05a7 --- /dev/null +++ b/ts/nni_manager/config/adl/adaptdl-nni-configmap-template.json @@ -0,0 +1,19 @@ +{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "", + "ownerReferences": [ + { + "apiVersion": "adaptdl.petuum.com/v1", + "kind": "AdaptDLJob", + "name": "", + "uid": "" + } + ] + }, + "data": { + "run.sh": "", + "cleanup.sh": "" + } +} diff --git a/ts/nni_manager/config/adl/adaptdl-pvc-template.json b/ts/nni_manager/config/adl/adaptdl-pvc-template.json new file mode 100644 index 0000000000000000000000000000000000000000..b98c1ef902a98ea240de0913276a4141443aeefd --- /dev/null +++ b/ts/nni_manager/config/adl/adaptdl-pvc-template.json @@ -0,0 +1,27 @@ +{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "", + "ownerReferences": [ + { + "apiVersion": "adaptdl.petuum.com/v1", + "kind": "AdaptDLJob", + "name": "", + "uid": "" + } + ] + }, + "spec": { + "accessModes": [ + "ReadWriteMany" + ], + "resources": { + "requests": { + "storage": "" + } + }, + "storageClassName": "", + "volumeMode": "Filesystem" + } +} diff --git a/ts/nni_manager/config/adl/adaptdl-tensorboard-deployment-template.json b/ts/nni_manager/config/adl/adaptdl-tensorboard-deployment-template.json new file mode 100644 index 0000000000000000000000000000000000000000..30acc1408c0376c8bbcb0c2dc162942ea8a4750b --- /dev/null +++ b/ts/nni_manager/config/adl/adaptdl-tensorboard-deployment-template.json @@ -0,0 +1,55 @@ +{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "", + "labels": { + "expId": "" + } + }, + "spec": { + "selector": { + "matchLabels": { + "app": "" + } + }, + "replicas": 1, + "template": { + "metadata": { + "labels": { + "app": "" + } + }, + "spec": { + "containers": [ + { + "command": ["tensorboard"], + "args": ["--host=0.0.0.0", "--logdir=/adaptdl/tensorboard", "--port=6006"], + "image": "tensorflow/tensorflow", + "name": "tensorboard", + "ports": [ + { + "containerPort": 6006 + } + ], + "volumeMounts": [ + { + "mountPath": "/adaptdl/tensorboard", + "name": "adaptdl-tensorboard-pvc", + "subPath": "adaptdl/tensorboard" + } + ] + } + ], + "volumes": [ + { + "name": "adaptdl-tensorboard-pvc", + "persistentVolumeClaim": { + "claimName": "" + } + } + ] + } + } + } +} \ No newline at end of file diff --git a/ts/nni_manager/config/adl/adaptdl-tensorboard-pvc-template.json b/ts/nni_manager/config/adl/adaptdl-tensorboard-pvc-template.json new file mode 100644 index 0000000000000000000000000000000000000000..a2230de16d0d43be0472c92ee6de10aceed50911 --- /dev/null +++ b/ts/nni_manager/config/adl/adaptdl-tensorboard-pvc-template.json @@ -0,0 +1,27 @@ +{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "", + "uid": "" + } + ] + }, + "spec": { + "accessModes": [ + "ReadWriteMany" + ], + "resources": { + "requests": { + "storage": "" + } + }, + "storageClassName": "", + "volumeMode": "Filesystem" + } +} diff --git a/ts/nni_manager/config/adl/adaptdljob-template.json b/ts/nni_manager/config/adl/adaptdljob-template.json new file mode 100644 index 0000000000000000000000000000000000000000..462f561ca58540160b2f4a3db45ef06fd60347db --- /dev/null +++ b/ts/nni_manager/config/adl/adaptdljob-template.json @@ -0,0 +1,109 @@ +{ + "apiVersion": "adaptdl.petuum.com/v1", + "kind": "AdaptDLJob", + "metadata": { + "name": "", + "labels": { + "app": "", + "expId": "", + "trialId": "" + } + }, + "spec": { + "preemptible": false, + "template": { + "spec": { + "containers": [ + { + "lifecycle": + { + "preStop": + { + "exec": + { + "command": ["/cleanup.sh"] + } + } + }, + "command": ["/run.sh"], + "env": [ + { + "name": "ADAPTDL_CHECKPOINT_PATH", + "value": "/adaptdl/checkpoint" + }, + { + "name": "ADAPTDL_TENSORBOARD_LOGDIR", + "value": "/adaptdl/tensorboard" + }, + { + "name": "ADAPTDL_SHARE_PATH", + "value": "/adaptdl/share" + } + ], + "image": "", + "imagePullPolicy": "Always", + "name": "main", + "resources": { + "requests": { + "memory": "", + "cpu": "" + }, + "limits": { + "nvidia.com/gpu": 1 + } + }, + "volumeMounts": [ + { + "mountPath": "/adaptdl/checkpoint", + "name": "adaptdl-pvc", + "subPath": "adaptdl/checkpoint" + }, + { + "mountPath": "/adaptdl/share", + "name": "adaptdl-pvc", + "subPath": "adaptdl/share" + }, + { + "mountPath": "/adaptdl/tensorboard", + "name": "adaptdl-tensorboard-pvc", + "subPath": "adaptdl/tensorboard" + }, + { + "mountPath": "/cleanup.sh", + "name": "adaptdl-nni-configmap", + "subPath": "cleanup.sh" + }, + { + "mountPath": "/run.sh", + "name": "adaptdl-nni-configmap", + "subPath": "run.sh" + } + ] + } + ], + "imagePullSecrets": [], + "volumes": [ + { + "name": "adaptdl-pvc", + "persistentVolumeClaim": { + "claimName": "" + } + }, + { + "name": "adaptdl-tensorboard-pvc", + "persistentVolumeClaim": { + "claimName": "" + } + }, + { + "name": "adaptdl-nni-configmap", + "configMap": { + "name": "", + "defaultMode": 511 + } + } + ] + } + } + } +} diff --git a/ts/nni_manager/config/aml/amlUtil.py b/ts/nni_manager/config/aml/amlUtil.py new file mode 100644 index 0000000000000000000000000000000000000000..a5b2a6bc2dc19f3b4720051b0cd25ff7b43567b2 --- /dev/null +++ b/ts/nni_manager/config/aml/amlUtil.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import time +import json +from argparse import ArgumentParser +from azureml.core import Experiment, RunConfiguration, ScriptRunConfig +from azureml.core.compute import ComputeTarget +from azureml.core.run import RUNNING_STATES, RunStatus, Run +from azureml.core import Workspace +from azureml.core.conda_dependencies import CondaDependencies + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument('--subscription_id', help='the subscription id of aml') + parser.add_argument('--resource_group', help='the resource group of aml') + parser.add_argument('--workspace_name', help='the workspace name of aml') + parser.add_argument('--compute_target', help='the compute cluster name of aml') + parser.add_argument('--docker_image', help='the docker image of job') + parser.add_argument('--experiment_name', help='the experiment name') + parser.add_argument('--script_dir', help='script directory') + parser.add_argument('--script_name', help='script name') + args = parser.parse_args() + + ws = Workspace(args.subscription_id, args.resource_group, args.workspace_name) + compute_target = ComputeTarget(workspace=ws, name=args.compute_target) + experiment = Experiment(ws, args.experiment_name) + run_config = RunConfiguration() + run_config.environment.python.user_managed_dependencies = True + run_config.environment.docker.enabled = True + run_config.environment.docker.base_image = args.docker_image + run_config.target = compute_target + run_config.node_count = 1 + config = ScriptRunConfig(source_directory=args.script_dir, script=args.script_name, run_config=run_config) + run = experiment.submit(config) + print(run.get_details()["runId"]) + while True: + line = sys.stdin.readline().rstrip() + if line == 'update_status': + print('status:' + run.get_status()) + elif line == 'tracking_url': + print('tracking_url:' + run.get_portal_url()) + elif line == 'stop': + run.cancel() + loop_count = 0 + status = run.get_status() + # wait until the run is canceled + while status != 'Canceled': + if loop_count > 5: + print('stop_result:failed') + exit(0) + loop_count += 1 + time.sleep(500) + print('stop_result:success') + exit(0) + elif line == 'receive': + print('receive:' + json.dumps(run.get_metrics())) + elif line: + items = line.split(':') + if items[0] == 'command': + run.log('nni_manager', line[8:]) diff --git a/ts/nni_manager/config/dlc/dlcUtil.py b/ts/nni_manager/config/dlc/dlcUtil.py new file mode 100644 index 0000000000000000000000000000000000000000..333fc5e0788dcfe11e7596b42210847d5f955a1d --- /dev/null +++ b/ts/nni_manager/config/dlc/dlcUtil.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import sys +import time +import json +from argparse import ArgumentParser +# ref: https://help.aliyun.com/document_detail/203290.html?spm=a2c4g.11186623.6.727.6f9b5db6bzJh4x +from alibabacloud_pai_dlc20201203.client import Client +from alibabacloud_tea_openapi.models import Config +from alibabacloud_pai_dlc20201203.models import * #CreateJobRequest, JobSpec + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument('--type', help='the type of job spec') + parser.add_argument('--image', help='the docker image of job') + parser.add_argument('--job_type', choices=['TFJob', 'PyTorchJob'], help='the job type') + parser.add_argument('--pod_count', type=int, default=1, help='pod count') + parser.add_argument('--ecs_spec', help='ecs spec') + parser.add_argument('--region', help='region') + parser.add_argument('--nas_data_source_id', help='nas data_source_id of DLC dataset configuration') + parser.add_argument('--access_key_id', help='access_key_id') + parser.add_argument('--access_key_secret', help='access_key_secret') + parser.add_argument('--experiment_name', help='the experiment name') + parser.add_argument('--user_command', help='user command') + args = parser.parse_args() + + # init client + client = Client( + Config( + access_key_id=args.access_key_id, + access_key_secret=args.access_key_secret, + region_id=args.region, + endpoint=f'pai-dlc.{args.region}.aliyuncs.com' + ) + ) + + nas_1 = DataSourceItem( + data_source_type = 'nas', + data_source_id=args.nas_data_source_id, + ) + + # job spec + spec = JobSpec( + type=args.type, + image=args.image, + pod_count=args.pod_count, + ecs_spec=args.ecs_spec, + ) + + req = CreateJobRequest( + display_name=args.experiment_name, + job_type=args.job_type, + job_specs=[spec], + data_sources=[nas_1], + user_command=args.user_command + ) + + # DLC submit + response = client.create_job(req) + job_id = response.body.job_id + print('job id: ' + job_id) + + while True: + line = sys.stdin.readline().rstrip() + if line == 'update_status': + print('status:' + client.get_job(job_id).body.status) + elif line == 'tracking_url': + #TODO: 1. get this url by api? 2. change this url in private dlc mode. + print('tracking_url:' + f'https://pai-dlc.console.aliyun.com/#/jobs/detail?jobId={job_id}®ionId={args.region}') + elif line == 'stop': + client.stop_job(job_id) + exit(0) diff --git a/ts/nni_manager/config/frameworkcontroller/frameworkcontrollerjob-crd-v1.json b/ts/nni_manager/config/frameworkcontroller/frameworkcontrollerjob-crd-v1.json new file mode 100644 index 0000000000000000000000000000000000000000..88777ee376d278dd7fad26d19bd99c1177ec0b3b --- /dev/null +++ b/ts/nni_manager/config/frameworkcontroller/frameworkcontrollerjob-crd-v1.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1", + "group": "frameworkcontroller.microsoft.com", + "names": { + "kind": "Framework", + "plural": "frameworks", + "singular": "framework" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta1", + "metadata": { + "name": "frameworks.frameworkcontroller.microsoft.com" + } +} diff --git a/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1.json b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1.json new file mode 100644 index 0000000000000000000000000000000000000000..70fed1692235a53085c62bb8a51009dbff1265f0 --- /dev/null +++ b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1", + "group": "kubeflow.org", + "names": { + "kind": "PyTorchJob", + "plural": "pytorchjobs", + "singular": "pytorchjob" + } + }, + "apiVersion": "kubeflow.org/v1", + "metadata": { + "name": "pytorchjobs.kubeflow.org" + } +} \ No newline at end of file diff --git a/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1alpha2.json b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1alpha2.json new file mode 100644 index 0000000000000000000000000000000000000000..b6694538f5a72c0b83ae3a829d9ab4d6f59b4c47 --- /dev/null +++ b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1alpha2.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1alpha2", + "group": "kubeflow.org", + "names": { + "kind": "PyTorchJob", + "plural": "pytorchjobs", + "singular": "pytorchjob" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta1", + "metadata": { + "name": "pytorchjobs.kubeflow.org" + } +} diff --git a/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1beta1.json b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1beta1.json new file mode 100644 index 0000000000000000000000000000000000000000..8759b2d101f44fbef164ffa8b15c4fd1f6852b5f --- /dev/null +++ b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1beta1.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1beta1", + "group": "kubeflow.org", + "names": { + "kind": "PyTorchJob", + "plural": "pytorchjobs", + "singular": "pytorchjob" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta1", + "metadata": { + "name": "pytorchjobs.kubeflow.org" + } +} diff --git a/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1beta2.json b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1beta2.json new file mode 100644 index 0000000000000000000000000000000000000000..8c454ba378f4b78786fa890ebd249a708344d299 --- /dev/null +++ b/ts/nni_manager/config/kubeflow/pytorchjob-crd-v1beta2.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1beta2", + "group": "kubeflow.org", + "names": { + "kind": "PyTorchJob", + "plural": "pytorchjobs", + "singular": "pytorchjob" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta2", + "metadata": { + "name": "pytorchjobs.kubeflow.org" + } +} diff --git a/ts/nni_manager/config/kubeflow/tfjob-crd-v1.json b/ts/nni_manager/config/kubeflow/tfjob-crd-v1.json new file mode 100644 index 0000000000000000000000000000000000000000..bcc9d26002d72354a3417f1bb69560e0d490a48f --- /dev/null +++ b/ts/nni_manager/config/kubeflow/tfjob-crd-v1.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1", + "group": "kubeflow.org", + "names": { + "kind": "TFJob", + "plural": "tfjobs", + "singular": "tfjob" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta1", + "metadata": { + "name": "tfjobs.kubeflow.org" + } +} diff --git a/ts/nni_manager/config/kubeflow/tfjob-crd-v1alpha2.json b/ts/nni_manager/config/kubeflow/tfjob-crd-v1alpha2.json new file mode 100644 index 0000000000000000000000000000000000000000..7b0e53f3f2ea7bd32a49239223a3871b491468b4 --- /dev/null +++ b/ts/nni_manager/config/kubeflow/tfjob-crd-v1alpha2.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1alpha2", + "group": "kubeflow.org", + "names": { + "kind": "TFJob", + "plural": "tfjobs", + "singular": "tfjob" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta1", + "metadata": { + "name": "tfjobs.kubeflow.org" + } +} diff --git a/ts/nni_manager/config/kubeflow/tfjob-crd-v1beta1.json b/ts/nni_manager/config/kubeflow/tfjob-crd-v1beta1.json new file mode 100644 index 0000000000000000000000000000000000000000..4dd846287d6ebae73adf247b18301f4df36a1fd7 --- /dev/null +++ b/ts/nni_manager/config/kubeflow/tfjob-crd-v1beta1.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1beta1", + "group": "kubeflow.org", + "names": { + "kind": "TFJob", + "plural": "tfjobs", + "singular": "tfjob" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta1", + "metadata": { + "name": "tfjobs.kubeflow.org" + } +} diff --git a/ts/nni_manager/config/kubeflow/tfjob-crd-v1beta2.json b/ts/nni_manager/config/kubeflow/tfjob-crd-v1beta2.json new file mode 100644 index 0000000000000000000000000000000000000000..cc6fd104bb7f76229e4aff9c74df4783dd227b55 --- /dev/null +++ b/ts/nni_manager/config/kubeflow/tfjob-crd-v1beta2.json @@ -0,0 +1,17 @@ +{ + "kind": "CustomResourceDefinition", + "spec": { + "scope": "Namespaced", + "version": "v1beta2", + "group": "kubeflow.org", + "names": { + "kind": "TFJob", + "plural": "tfjobs", + "singular": "tfjob" + } + }, + "apiVersion": "apiextensions.k8s.io/v1beta2", + "metadata": { + "name": "tfjobs.kubeflow.org" + } +} diff --git a/ts/nni_manager/core/commands.ts b/ts/nni_manager/core/commands.ts new file mode 100644 index 0000000000000000000000000000000000000000..ba1e9b3925ecbb23cc3135cf4005e94737cea452 --- /dev/null +++ b/ts/nni_manager/core/commands.ts @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + + +const INITIALIZE = 'IN'; +const REQUEST_TRIAL_JOBS = 'GE'; +const REPORT_METRIC_DATA = 'ME'; +const UPDATE_SEARCH_SPACE = 'SS'; +const IMPORT_DATA = 'FD' +const ADD_CUSTOMIZED_TRIAL_JOB = 'AD'; +const TRIAL_END = 'EN'; +const TERMINATE = 'TE'; +const PING = 'PI'; + +const GPU_INFO = 'GI'; +const STDOUT = 'SO'; +const VERSION_CHECK = 'VC'; + +const INITIALIZED = 'ID'; +const NEW_TRIAL_JOB = 'TR'; +const SEND_TRIAL_JOB_PARAMETER = 'SP'; +const NO_MORE_TRIAL_JOBS = 'NO'; +const KILL_TRIAL_JOB = 'KI'; + +const TRIAL_COMMANDS: Set = new Set([ + // from ctl to node + NEW_TRIAL_JOB, + SEND_TRIAL_JOB_PARAMETER, + KILL_TRIAL_JOB, + + // from node to ctl + INITIALIZED, + TRIAL_END, + GPU_INFO, + STDOUT, + VERSION_CHECK, +]); + +const TUNER_COMMANDS: Set = new Set([ + INITIALIZE, + REQUEST_TRIAL_JOBS, + REPORT_METRIC_DATA, + UPDATE_SEARCH_SPACE, + IMPORT_DATA, + ADD_CUSTOMIZED_TRIAL_JOB, + TERMINATE, + PING, + + INITIALIZED, + NEW_TRIAL_JOB, + SEND_TRIAL_JOB_PARAMETER, + NO_MORE_TRIAL_JOBS +]); + +const ASSESSOR_COMMANDS: Set = new Set([ + INITIALIZE, + REPORT_METRIC_DATA, + TRIAL_END, + TERMINATE, + + KILL_TRIAL_JOB +]); + +export { + INITIALIZE, + REQUEST_TRIAL_JOBS, + REPORT_METRIC_DATA, + UPDATE_SEARCH_SPACE, + IMPORT_DATA, + ADD_CUSTOMIZED_TRIAL_JOB, + TRIAL_END, + TERMINATE, + PING, + GPU_INFO, + STDOUT, + VERSION_CHECK, + INITIALIZED, + NEW_TRIAL_JOB, + NO_MORE_TRIAL_JOBS, + KILL_TRIAL_JOB, + TUNER_COMMANDS, + ASSESSOR_COMMANDS, + TRIAL_COMMANDS, + SEND_TRIAL_JOB_PARAMETER +}; diff --git a/ts/nni_manager/core/ipcInterface.ts b/ts/nni_manager/core/ipcInterface.ts new file mode 100644 index 0000000000000000000000000000000000000000..aa8b5a0b8eeaa6a88dc52a626da0c66d2cc3362e --- /dev/null +++ b/ts/nni_manager/core/ipcInterface.ts @@ -0,0 +1,144 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { ChildProcess } from 'child_process'; +import { EventEmitter } from 'events'; +import net from 'net'; +import { Readable, Writable } from 'stream'; +import { NNIError } from '../common/errors'; +import { getLogger, Logger } from '../common/log'; +import { getLogDir } from '../common/utils'; +import * as CommandType from './commands'; + +const ipcOutgoingFd: number = 3; +const ipcIncomingFd: number = 4; + +/** + * Encode a command + * @param commandType a command type defined in 'core/commands' + * @param content payload of the command + * @returns binary command data + */ +function encodeCommand(commandType: string, content: string): Buffer { + const contentBuffer: Buffer = Buffer.from(content); + const contentLengthBuffer: Buffer = Buffer.from(contentBuffer.length.toString().padStart(14, '0')); + return Buffer.concat([Buffer.from(commandType), contentLengthBuffer, contentBuffer]); +} + +/** + * Decode a command + * @param Buffer binary incoming data + * @returns a tuple of (success, commandType, content, remain) + * success: true if the buffer contains at least one complete command; otherwise false + * remain: remaining data after the first command + */ +function decodeCommand(data: Buffer): [boolean, string, string, Buffer] { + if (data.length < 8) { + return [false, '', '', data]; + } + const commandType: string = data.slice(0, 2).toString(); + const contentLength: number = parseInt(data.slice(2, 16).toString(), 10); + if (data.length < contentLength + 16) { + return [false, '', '', data]; + } + const content: string = data.slice(16, contentLength + 16).toString(); + const remain: Buffer = data.slice(contentLength + 16); + + return [true, commandType, content, remain]; +} + +class IpcInterface { + private acceptCommandTypes: Set; + private outgoingStream: Writable; + private incomingStream: Readable; + private eventEmitter: EventEmitter; + private readBuffer: Buffer; + private logger: Logger = getLogger('IpcInterface'); + + /** + * Construct a IPC proxy + * @param proc the process to wrap + * @param acceptCommandTypes set of accepted commands for this process + */ + constructor(outStream: Writable, inStream: Readable, acceptCommandTypes: Set) { + this.acceptCommandTypes = acceptCommandTypes; + this.outgoingStream = outStream; + this.incomingStream = inStream; + this.eventEmitter = new EventEmitter(); + this.readBuffer = Buffer.alloc(0); + + this.incomingStream.on('data', (data: Buffer) => { this.receive(data); }); + this.incomingStream.on('error', (error: Error) => { this.eventEmitter.emit('error', error); }); + this.outgoingStream.on('error', (error: Error) => { this.eventEmitter.emit('error', error); }); + } + + /** + * Send a command to process + * @param commandType: a command type defined in 'core/commands' + * @param content: payload of command + */ + public sendCommand(commandType: string, content: string = ''): void { + this.logger.debug(`ipcInterface command type: [${commandType}], content:[${content}]`); + assert.ok(this.acceptCommandTypes.has(commandType)); + + try { + const data: Buffer = encodeCommand(commandType, content); + if (!this.outgoingStream.write(data)) { + this.logger.warning('Commands jammed in buffer!'); + } + } catch (err) { + throw NNIError.FromError( + err, + `Dispatcher Error, please check this dispatcher log file for more detailed information: ${getLogDir()}/dispatcher.log . ` + ); + } + } + + /** + * Add a command listener + * @param listener the listener callback + */ + public onCommand(listener: (commandType: string, content: string) => void): void { + this.eventEmitter.on('command', listener); + } + + public onError(listener: (error: Error) => void): void { + this.eventEmitter.on('error', listener); + } + + /** + * Deal with incoming data from process + * Invoke listeners for each complete command received, save incomplete command to buffer + * @param data binary incoming data + */ + private receive(data: Buffer): void { + this.readBuffer = Buffer.concat([this.readBuffer, data]); + while (this.readBuffer.length > 0) { + const [success, commandType, content, remain] = decodeCommand(this.readBuffer); + if (!success) { + break; + } + assert.ok(this.acceptCommandTypes.has(commandType)); + this.eventEmitter.emit('command', commandType, content); + this.readBuffer = remain; + } + } +} + +/** + * Create IPC proxy for tuner process + * @param process_ the tuner process + */ +function createDispatcherInterface(process: ChildProcess): IpcInterface { + const outStream = process.stdio[ipcOutgoingFd]; + const inStream = process.stdio[ipcIncomingFd]; + return new IpcInterface(outStream, inStream, new Set([...CommandType.TUNER_COMMANDS, ...CommandType.ASSESSOR_COMMANDS])); +} + +function createDispatcherPipeInterface(pipePath: string): IpcInterface { + const client = net.createConnection(pipePath); + return new IpcInterface(client, client, new Set([...CommandType.TUNER_COMMANDS, ...CommandType.ASSESSOR_COMMANDS])); +} + +export { IpcInterface, createDispatcherInterface, createDispatcherPipeInterface, encodeCommand, decodeCommand }; diff --git a/ts/nni_manager/core/nniDataStore.ts b/ts/nni_manager/core/nniDataStore.ts new file mode 100644 index 0000000000000000000000000000000000000000..160d46c91ff5a8471d755ce630954a886ce1e89d --- /dev/null +++ b/ts/nni_manager/core/nniDataStore.ts @@ -0,0 +1,366 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { Deferred } from 'ts-deferred'; + +import * as component from '../common/component'; +import { Database, DataStore, MetricData, MetricDataRecord, MetricType, + TrialJobEvent, TrialJobEventRecord, TrialJobInfo, HyperParameterFormat, + ExportedDataFormat } from '../common/datastore'; +import { NNIError } from '../common/errors'; +import { isNewExperiment } from '../common/experimentStartupInfo'; +import { getLogger, Logger } from '../common/log'; +import { ExperimentProfile, TrialJobStatistics } from '../common/manager'; +import { TrialJobDetail, TrialJobStatus } from '../common/trainingService'; +import { getDefaultDatabaseDir, mkDirP } from '../common/utils'; + +class NNIDataStore implements DataStore { + private db: Database = component.get(Database); + private log: Logger = getLogger('NNIDataStore'); + private initTask!: Deferred; + + public init(): Promise { + if (this.initTask !== undefined) { + return this.initTask.promise; + } + this.initTask = new Deferred(); + + // TODO support specify database dir + const databaseDir: string = getDefaultDatabaseDir(); + if(isNewExperiment()) { + mkDirP(databaseDir).then(() => { + this.db.init(true, databaseDir).then(() => { + this.log.info('Datastore initialization done'); + this.initTask.resolve(); + }).catch((err: Error) => { + this.initTask.reject(err); + }); + }).catch((err: Error) => { + this.initTask.reject(err); + }); + } else { + this.db.init(false, databaseDir).then(() => { + this.log.info('Datastore initialization done'); + this.initTask.resolve(); + }).catch((err: Error) => { + this.initTask.reject(err); + }); + } + + return this.initTask.promise; + } + + public async close(): Promise { + await this.db.close(); + } + + public async storeExperimentProfile(experimentProfile: ExperimentProfile): Promise { + try { + await this.db.storeExperimentProfile(experimentProfile); + } catch (err) { + throw NNIError.FromError(err, 'Datastore error: '); + } + } + + public getExperimentProfile(experimentId: string): Promise { + return this.db.queryLatestExperimentProfile(experimentId); + } + + public storeTrialJobEvent( + event: TrialJobEvent, trialJobId: string, hyperParameter?: string, jobDetail?: TrialJobDetail): Promise { + + // Use the timestamp in jobDetail as TrialJobEvent timestamp for different events + let timestamp: number | undefined; + if (event === 'WAITING' && jobDetail) { + timestamp = jobDetail.submitTime; + } else if (event === 'RUNNING' && jobDetail) { + timestamp = jobDetail.startTime; + } else if (['EARLY_STOPPED', 'SUCCEEDED', 'FAILED', 'USER_CANCELED', 'SYS_CANCELED'].includes(event) && jobDetail) { + timestamp = jobDetail.endTime; + } + // Use current time as timestamp if timestamp is not assigned from jobDetail + if (timestamp === undefined) { + timestamp = Date.now(); + } + return this.db.storeTrialJobEvent(event, trialJobId, timestamp, hyperParameter, jobDetail).catch( + (err: Error) => { + throw NNIError.FromError(err, 'Datastore error: '); + } + ); + } + + public async getTrialJobStatistics(): Promise { + const result: TrialJobStatistics[] = []; + const jobs: TrialJobInfo[] = await this.listTrialJobs(); + const map: Map = new Map(); + + jobs.forEach((value: TrialJobInfo) => { + let n: number|undefined = map.get(value.status); + if (!n) { + n = 0; + } + map.set(value.status, n + 1); + }); + + map.forEach((value: number, key: TrialJobStatus) => { + const statistics: TrialJobStatistics = { + trialJobStatus: key, + trialJobNumber: value + }; + result.push(statistics); + }); + + return result; + } + + public listTrialJobs(status?: TrialJobStatus): Promise { + return this.queryTrialJobs(status); + } + + public async getTrialJob(trialJobId: string): Promise { + const trialJobs: TrialJobInfo[] = await this.queryTrialJobs(undefined, trialJobId); + assert(trialJobs.length <= 1); + + return trialJobs[0]; + } + + public async storeMetricData(trialJobId: string, data: string): Promise { + const metrics: MetricData = JSON.parse(data); + // REQUEST_PARAMETER is used to request new parameters for multiphase trial job, + // it is not metrics, so it is skipped here. + if (metrics.type === 'REQUEST_PARAMETER') { + + return; + } + assert(trialJobId === metrics.trial_job_id); + try { + await this.db.storeMetricData(trialJobId, JSON.stringify({ + trialJobId: metrics.trial_job_id, + parameterId: metrics.parameter_id, + type: metrics.type, + sequence: metrics.sequence, + data: metrics.value, + timestamp: Date.now() + })); + } catch (err) { + throw NNIError.FromError(err, 'Datastore error'); + } + } + + public getMetricData(trialJobId?: string, metricType?: MetricType): Promise { + return this.db.queryMetricData(trialJobId, metricType); + } + + public async exportTrialHpConfigs(): Promise { + const jobs: TrialJobInfo[] = await this.listTrialJobs(); + const exportedData: ExportedDataFormat[] = []; + for (const job of jobs) { + if (job.hyperParameters && job.finalMetricData) { + if (job.hyperParameters.length === 1 && job.finalMetricData.length === 1) { + // optimization for non-multi-phase case + const parameters: HyperParameterFormat = JSON.parse(job.hyperParameters[0]); + const oneEntry: ExportedDataFormat = { + parameter: parameters.parameters, + value: JSON.parse(job.finalMetricData[0].data), + trialJobId: job.trialJobId + }; + exportedData.push(oneEntry); + } else { + const paraMap: Map> = new Map(); + const metricMap: Map> = new Map(); + for (const eachPara of job.hyperParameters) { + const parameters: HyperParameterFormat = JSON.parse(eachPara); + paraMap.set(parameters.parameter_id, parameters.parameters); + } + for (const eachMetric of job.finalMetricData) { + const value: Record = JSON.parse(eachMetric.data); + metricMap.set(Number(eachMetric.parameterId), value); + } + paraMap.forEach((value: Record, key: number) => { + const metricValue: Record | undefined = metricMap.get(key); + if (metricValue) { + const oneEntry: ExportedDataFormat = { + parameter: value, + value: metricValue, + trialJobId: job.trialJobId + }; + exportedData.push(oneEntry); + } + }); + } + } + } + + return JSON.stringify(exportedData); + } + + public async getImportedData(): Promise { + const importedData: string[] = []; + const importDataEvents: TrialJobEventRecord[] = await this.db.queryTrialJobEvent(undefined, 'IMPORT_DATA'); + for (const event of importDataEvents) { + if (event.data) { + importedData.push(event.data); + } + } + + return importedData; + } + + private async queryTrialJobs(status?: TrialJobStatus, trialJobId?: string): Promise { + const result: TrialJobInfo[] = []; + const trialJobEvents: TrialJobEventRecord[] = await this.db.queryTrialJobEvent(trialJobId); + if (trialJobEvents === undefined) { + return result; + } + const map: Map = this.getTrialJobsByReplayEvents(trialJobEvents); + + const finalMetricsMap: Map = await this.getFinalMetricData(trialJobId); + + for (const key of map.keys()) { + const jobInfo: TrialJobInfo | undefined = map.get(key); + if (jobInfo === undefined) { + continue; + } + if (!(status !== undefined && jobInfo.status !== status)) { + if (jobInfo.status === 'SUCCEEDED') { + jobInfo.finalMetricData = finalMetricsMap.get(jobInfo.trialJobId); + } + result.push(jobInfo); + } + } + + return result; + } + + private async getFinalMetricData(trialJobId?: string): Promise> { + const map: Map = new Map(); + const metrics: MetricDataRecord[] = await this.getMetricData(trialJobId, 'FINAL'); + + for (const metric of metrics) { + const existMetrics: MetricDataRecord[] | undefined = map.get(metric.trialJobId); + if (existMetrics !== undefined) { + this.log.error(`Found multiple FINAL results for trial job ${trialJobId}, metrics:`, metrics); + } else { + map.set(metric.trialJobId, [metric]); + } + } + + return map; + } + + private getJobStatusByLatestEvent(oldStatus: TrialJobStatus, event: TrialJobEvent): TrialJobStatus { + switch (event) { + case 'USER_TO_CANCEL': + return 'USER_CANCELED'; + case 'ADD_CUSTOMIZED': + return 'WAITING'; + case 'ADD_HYPERPARAMETER': + return oldStatus; + default: + } + + return event; + } + + private parseHyperParameter(hParamStr: string): any { + let hParam: any; + try { + hParam = JSON.parse(hParamStr); + + return hParam; + } catch (err) { + this.log.error(`Hyper parameter needs to be in json format: ${hParamStr}`); + + return undefined; + } + } + + private getTrialJobsByReplayEvents(trialJobEvents: TrialJobEventRecord[]): Map { + this.log.debug('getTrialJobsByReplayEvents begin'); + + const map: Map = new Map(); + const hParamIdMap: Map> = new Map(); + + // assume data is stored by time ASC order + for (const record of trialJobEvents) { + let jobInfo: TrialJobInfo | undefined; + if (record.trialJobId === undefined || record.trialJobId.length < 1) { + continue; + } + if (map.has(record.trialJobId)) { + jobInfo = map.get(record.trialJobId); + } else { + jobInfo = { + trialJobId: record.trialJobId, + status: this.getJobStatusByLatestEvent('UNKNOWN', record.event), + hyperParameters: [] + }; + } + if (!jobInfo) { + throw new Error('Empty JobInfo'); + } + /* eslint-disable no-fallthrough */ + switch (record.event) { + case 'RUNNING': + if (record.timestamp !== undefined) { + jobInfo.startTime = record.timestamp; + } + case 'WAITING': + if (record.logPath !== undefined) { + jobInfo.logPath = record.logPath; + } + // Initially assign WAITING timestamp as job's start time, + // If there is RUNNING state event, it will be updated as RUNNING state timestamp + if (jobInfo.startTime === undefined && record.timestamp !== undefined) { + jobInfo.startTime = record.timestamp; + } + break; + case 'SUCCEEDED': + case 'FAILED': + case 'USER_CANCELED': + case 'SYS_CANCELED': + case 'EARLY_STOPPED': + if (record.logPath !== undefined) { + jobInfo.logPath = record.logPath; + } + jobInfo.endTime = record.timestamp; + if (jobInfo.startTime === undefined && record.timestamp !== undefined) { + jobInfo.startTime = record.timestamp; + } + default: + } + /* eslint-enable no-fallthrough */ + jobInfo.status = this.getJobStatusByLatestEvent(jobInfo.status, record.event); + if (record.data !== undefined && record.data.trim().length > 0) { + const newHParam: any = this.parseHyperParameter(record.data); + if (newHParam !== undefined) { + if (jobInfo.hyperParameters !== undefined) { + let hParamIds: Set | undefined = hParamIdMap.get(jobInfo.trialJobId); + if (hParamIds === undefined) { + hParamIds = new Set(); + } + if (!hParamIds.has(newHParam.parameter_index)) { + jobInfo.hyperParameters.push(JSON.stringify(newHParam)); + hParamIds.add(newHParam.parameter_index); + hParamIdMap.set(jobInfo.trialJobId, hParamIds); + } + } else { + assert(false, 'jobInfo.hyperParameters is undefined'); + } + } + } + if (record.sequenceId !== undefined && jobInfo.sequenceId === undefined) { + jobInfo.sequenceId = record.sequenceId; + } + jobInfo.message = record.message; + map.set(record.trialJobId, jobInfo); + } + + this.log.debug('getTrialJobsByReplayEvents done'); + + return map; + } +} + +export { NNIDataStore }; diff --git a/ts/nni_manager/core/nniExperimentsManager.ts b/ts/nni_manager/core/nniExperimentsManager.ts new file mode 100644 index 0000000000000000000000000000000000000000..af1f3d2576a5c3c9a225487cd696e6c82b150b94 --- /dev/null +++ b/ts/nni_manager/core/nniExperimentsManager.ts @@ -0,0 +1,174 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import assert from 'assert'; + +import { getLogger, Logger } from '../common/log'; +import { isAlive, withLockSync, getExperimentsInfoPath, delay } from '../common/utils'; +import { ExperimentManager } from '../common/experimentManager'; +import { Deferred } from 'ts-deferred'; + +interface CrashedInfo { + experimentId: string; + isCrashed: boolean; +} + +interface FileInfo { + buffer: Buffer; + mtime: number; +} + +class NNIExperimentsManager implements ExperimentManager { + private experimentsPath: string; + private log: Logger; + private profileUpdateTimer: {[key: string]: any}; + + constructor() { + this.experimentsPath = getExperimentsInfoPath(); + this.log = getLogger('NNIExperimentsManager'); + this.profileUpdateTimer = {}; + } + + public async getExperimentsInfo(): Promise { + const fileInfo: FileInfo = await this.withLockIterated(this.readExperimentsInfo, 100); + const experimentsInformation = JSON.parse(fileInfo.buffer.toString()); + const expIdList: Array = Object.keys(experimentsInformation).filter((expId) => { + return experimentsInformation[expId]['status'] !== 'STOPPED'; + }); + const updateList: Array = (await Promise.all(expIdList.map((expId) => { + return this.checkCrashed(expId, experimentsInformation[expId]['pid']); + }))).filter(crashedInfo => crashedInfo.isCrashed); + if (updateList.length > 0){ + const result = await this.withLockIterated(this.updateAllStatus, 100, updateList.map(crashedInfo => crashedInfo.experimentId), fileInfo.mtime); + if (result !== undefined) { + return JSON.parse(JSON.stringify(Object.keys(result).map(key=>result[key]))); + } else { + await delay(500); + return await this.getExperimentsInfo(); + } + } else { + return JSON.parse(JSON.stringify(Object.keys(experimentsInformation).map(key=>experimentsInformation[key]))); + } + } + + public setExperimentPath(newPath: string): void { + if (newPath[0] === '~') { + newPath = path.join(os.homedir(), newPath.slice(1)); + } + if (!path.isAbsolute(newPath)) { + newPath = path.resolve(newPath); + } + this.log.info(`Set new experiment information path: ${newPath}`); + this.experimentsPath = newPath; + } + + public setExperimentInfo(experimentId: string, key: string, value: any): void { + try { + if (this.profileUpdateTimer[key] !== undefined) { + // if a new call with the same timerId occurs, destroy the unfinished old one + clearTimeout(this.profileUpdateTimer[key]); + this.profileUpdateTimer[key] = undefined; + } + this.withLockSync(() => { + const experimentsInformation = JSON.parse(fs.readFileSync(this.experimentsPath).toString()); + assert(experimentId in experimentsInformation, `Experiment Manager: Experiment Id ${experimentId} not found, this should not happen`); + if (value !== undefined) { + experimentsInformation[experimentId][key] = value; + } else { + delete experimentsInformation[experimentId][key]; + } + fs.writeFileSync(this.experimentsPath, JSON.stringify(experimentsInformation, null, 4)); + }); + } catch (err) { + this.log.error(err); + this.log.debug(`Experiment Manager: Retry set key value: ${experimentId} {${key}: ${value}}`); + if (err.code === 'EEXIST' || err.message === 'File has been locked.') { + this.profileUpdateTimer[key] = setTimeout(this.setExperimentInfo.bind(this), 100, experimentId, key, value); + } + } + } + + private async withLockIterated (func: Function, retry: number, ...args: any): Promise { + if (retry < 0) { + throw new Error('Lock file out of retries.'); + } + try { + return this.withLockSync(func, ...args); + } catch(err) { + if (err.code === 'EEXIST' || err.message === 'File has been locked.') { + // retry wait is 50ms + await delay(50); + return await this.withLockIterated(func, retry - 1, ...args); + } + throw err; + } + } + + private withLockSync (func: Function, ...args: any): any { + return withLockSync(func.bind(this), this.experimentsPath, {stale: 2 * 1000}, ...args); + } + + private readExperimentsInfo(): FileInfo { + const buffer: Buffer = fs.readFileSync(this.experimentsPath); + const mtime: number = fs.statSync(this.experimentsPath).mtimeMs; + return {buffer: buffer, mtime: mtime}; + } + + private async checkCrashed(expId: string, pid: number): Promise { + const alive: boolean = await isAlive(pid); + return {experimentId: expId, isCrashed: !alive} + } + + private updateAllStatus(updateList: Array, timestamp: number): {[key: string]: any} | undefined { + if (timestamp !== fs.statSync(this.experimentsPath).mtimeMs) { + return; + } else { + const experimentsInformation = JSON.parse(fs.readFileSync(this.experimentsPath).toString()); + updateList.forEach((expId: string) => { + if (experimentsInformation[expId]) { + experimentsInformation[expId]['status'] = 'STOPPED'; + delete experimentsInformation[expId]['port']; + } else { + this.log.error(`Experiment Manager: Experiment Id ${expId} not found, this should not happen`); + } + }); + fs.writeFileSync(this.experimentsPath, JSON.stringify(experimentsInformation, null, 4)); + return experimentsInformation; + } + } + + public async stop(): Promise { + this.log.debug('Stopping experiment manager.'); + await this.cleanUp().catch(err=>this.log.error(err.message)); + this.log.debug('Experiment manager stopped.'); + } + + private async cleanUp(): Promise { + const deferred = new Deferred(); + if (this.isUndone()) { + this.log.debug('Experiment manager: something undone'); + setTimeout(((deferred: Deferred): void => { + if (this.isUndone()) { + deferred.reject(new Error('Still has undone after 5s, forced stop.')); + } else { + deferred.resolve(); + } + }).bind(this), 5 * 1000, deferred); + } else { + this.log.debug('Experiment manager: all clean up'); + deferred.resolve(); + } + return deferred.promise; + } + + private isUndone(): boolean { + return Object.keys(this.profileUpdateTimer).filter((key: string) => { + return this.profileUpdateTimer[key] !== undefined; + }).length > 0; + } +} + +export { NNIExperimentsManager }; diff --git a/ts/nni_manager/core/nniTensorboardManager.ts b/ts/nni_manager/core/nniTensorboardManager.ts new file mode 100644 index 0000000000000000000000000000000000000000..e1280d85e4c84e6a267e5bf8d0ca744a1e95f5ea --- /dev/null +++ b/ts/nni_manager/core/nniTensorboardManager.ts @@ -0,0 +1,227 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import cp from 'child_process'; +import path from 'path'; +import { ChildProcess } from 'child_process'; + +import * as component from '../common/component'; +import { getLogger, Logger } from '../common/log'; +import { getTunerProc, isAlive, uniqueString, mkDirPSync, getFreePort } from '../common/utils'; +import { Manager } from '../common/manager'; +import { TensorboardParams, TensorboardTaskStatus, TensorboardTaskInfo, TensorboardManager } from '../common/tensorboardManager'; + +class TensorboardTaskDetail implements TensorboardTaskInfo { + public id: string; + public status: TensorboardTaskStatus; + public trialJobIdList: string[]; + public trialLogDirectoryList: string[]; + public pid?: number; + public port?: string; + + constructor(id: string, status: TensorboardTaskStatus, trialJobIdList: string[], trialLogDirectoryList: string[]) { + this.id = id; + this.status = status; + this.trialJobIdList = trialJobIdList; + this.trialLogDirectoryList = trialLogDirectoryList; + } +} + +class NNITensorboardManager implements TensorboardManager { + private log: Logger; + private tensorboardTaskMap: Map; + private tensorboardVersion: string | undefined; + private nniManager: Manager; + + constructor() { + this.log = getLogger('NNITensorboardManager'); + this.tensorboardTaskMap = new Map(); + this.setTensorboardVersion(); + this.nniManager = component.get(Manager); + } + + public async startTensorboardTask(tensorboardParams: TensorboardParams): Promise { + const trialJobIds = tensorboardParams.trials; + const trialJobIdList: string[] = []; + const trialLogDirectoryList: string[] = []; + await Promise.all(trialJobIds.split(',').map(async (trialJobId) => { + const trialTensorboardDataPath = path.join(await this.nniManager.getTrialOutputLocalPath(trialJobId), 'tensorboard'); + mkDirPSync(trialTensorboardDataPath); + trialJobIdList.push(trialJobId); + trialLogDirectoryList.push(trialTensorboardDataPath); + })); + this.log.info(`tensorboard: ${trialJobIdList} ${trialLogDirectoryList}`); + return await this.startTensorboardTaskProcess(trialJobIdList, trialLogDirectoryList); + } + + private async startTensorboardTaskProcess(trialJobIdList: string[], trialLogDirectoryList: string[]): Promise { + const host = 'localhost'; + const port = await getFreePort(host, 6006, 65535); + const command = await this.getTensorboardStartCommand(trialJobIdList, trialLogDirectoryList, port); + this.log.info(`tensorboard start command: ${command}`); + const tensorboardTask = new TensorboardTaskDetail(uniqueString(5), 'RUNNING', trialJobIdList, trialLogDirectoryList); + this.tensorboardTaskMap.set(tensorboardTask.id, tensorboardTask); + + const tensorboardProc: ChildProcess = getTunerProc(command, 'ignore', process.cwd(), process.env, true, true); + tensorboardProc.on('error', async (error) => { + this.log.error(error); + const alive: boolean = await isAlive(tensorboardProc.pid); + if (alive) { + process.kill(-tensorboardProc.pid!); + } + this.setTensorboardTaskStatus(tensorboardTask, 'ERROR'); + }); + tensorboardTask.pid = tensorboardProc.pid; + + tensorboardTask.port = `${port}`; + this.log.info(`tensorboard task id: ${tensorboardTask.id}`); + this.updateTensorboardTask(tensorboardTask.id); + return tensorboardTask; + } + + private async getTensorboardStartCommand(trialJobIdList: string[], trialLogDirectoryList: string[], port: number): Promise { + if (this.tensorboardVersion === undefined) { + this.setTensorboardVersion(); + if (this.tensorboardVersion === undefined) { + throw new Error(`Tensorboard may not installed, if you want to use tensorboard, please check if tensorboard installed.`); + } + } + if (trialJobIdList.length !== trialLogDirectoryList.length) { + throw new Error('trial list length does not match'); + } + if (trialJobIdList.length === 0) { + throw new Error('trial list length is 0'); + } + let logdirCmd = '--logdir'; + if (this.tensorboardVersion >= '2.0') { + logdirCmd = '--bind_all --logdir_spec' + } + try { + const logRealPaths: string[] = []; + for (const idx in trialJobIdList) { + const realPath = fs.realpathSync(trialLogDirectoryList[idx]); + const trialJob = await this.nniManager.getTrialJob(trialJobIdList[idx]); + logRealPaths.push(`${trialJob.sequenceId}-${trialJobIdList[idx]}:${realPath}`); + } + const command = `tensorboard ${logdirCmd}=${logRealPaths.join(',')} --port=${port}`; + return command; + } catch (error){ + throw new Error(`${error.message}`); + } + } + + private setTensorboardVersion(): void { + let command = `python3 -c 'import tensorboard ; print(tensorboard.__version__)' 2>&1`; + if (process.platform === 'win32') { + command = `python -c "import tensorboard ; print(tensorboard.__version__)" 2>&1`; + } + try { + const tensorboardVersion = cp.execSync(command).toString(); + if (/\d+(.\d+)*/.test(tensorboardVersion)) { + this.tensorboardVersion = tensorboardVersion; + } + } catch (error) { + this.log.warning(`Tensorboard may not installed, if you want to use tensorboard, please check if tensorboard installed.`); + } + } + + public async getTensorboardTask(tensorboardTaskId: string): Promise { + const tensorboardTask: TensorboardTaskDetail | undefined = this.tensorboardTaskMap.get(tensorboardTaskId); + if (tensorboardTask === undefined) { + throw new Error('Tensorboard task not found'); + } + else{ + if (tensorboardTask.status !== 'STOPPED'){ + const alive: boolean = await isAlive(tensorboardTask.pid); + if (!alive) { + this.setTensorboardTaskStatus(tensorboardTask, 'ERROR'); + } + } + return tensorboardTask; + } + } + + public async listTensorboardTasks(): Promise { + const result: TensorboardTaskDetail[] = []; + this.tensorboardTaskMap.forEach((value) => { + result.push(value); + }); + return result; + } + + private setTensorboardTaskStatus(tensorboardTask: TensorboardTaskDetail, newStatus: TensorboardTaskStatus): void { + if (tensorboardTask.status !== newStatus) { + const oldStatus = tensorboardTask.status; + tensorboardTask.status = newStatus; + this.log.info(`tensorboardTask ${tensorboardTask.id} status update: ${oldStatus} to ${tensorboardTask.status}`); + } + } + + private downloadDataFinished(tensorboardTask: TensorboardTaskDetail): void { + this.setTensorboardTaskStatus(tensorboardTask, 'RUNNING'); + } + + public async updateTensorboardTask(tensorboardTaskId: string): Promise { + const tensorboardTask: TensorboardTaskDetail = await this.getTensorboardTask(tensorboardTaskId); + if (['RUNNING', 'FAIL_DOWNLOAD_DATA'].includes(tensorboardTask.status)){ + this.setTensorboardTaskStatus(tensorboardTask, 'DOWNLOADING_DATA'); + Promise.all(tensorboardTask.trialJobIdList.map((trialJobId) => { + this.nniManager.fetchTrialOutput(trialJobId, 'tensorboard'); + })).then(() => { + this.downloadDataFinished(tensorboardTask); + }).catch((error: Error) => { + this.setTensorboardTaskStatus(tensorboardTask, 'FAIL_DOWNLOAD_DATA'); + this.log.error(`${error.message}`); + }); + return tensorboardTask; + } else { + throw new Error('only tensorboard task with RUNNING or FAIL_DOWNLOAD_DATA can update data'); + } + } + + public async stopTensorboardTask(tensorboardTaskId: string): Promise { + const tensorboardTask = await this.getTensorboardTask(tensorboardTaskId); + if (['RUNNING', 'FAIL_DOWNLOAD_DATA'].includes(tensorboardTask.status)){ + this.killTensorboardTaskProc(tensorboardTask); + return tensorboardTask; + } else { + throw new Error('Only RUNNING FAIL_DOWNLOAD_DATA task can be stopped'); + } + } + + private async killTensorboardTaskProc(tensorboardTask: TensorboardTaskDetail): Promise { + if (['ERROR', 'STOPPED'].includes(tensorboardTask.status)) { + return + } + const alive: boolean = await isAlive(tensorboardTask.pid); + if (!alive) { + this.setTensorboardTaskStatus(tensorboardTask, 'ERROR'); + } else { + this.setTensorboardTaskStatus(tensorboardTask, 'STOPPING'); + if (tensorboardTask.pid) { + process.kill(-tensorboardTask.pid); + } + this.log.debug(`Tensorboard task ${tensorboardTask.id} stopped.`); + this.setTensorboardTaskStatus(tensorboardTask, 'STOPPED'); + this.tensorboardTaskMap.delete(tensorboardTask.id); + } + } + + public async stopAllTensorboardTask(): Promise { + this.log.info('Forced stopping all tensorboard task.') + for (const task of this.tensorboardTaskMap) { + await this.killTensorboardTaskProc(task[1]); + } + this.log.info('All tensorboard task stopped.') + } + + public async stop(): Promise { + await this.stopAllTensorboardTask(); + this.log.info('Tensorboard manager stopped.'); + } +} + +export { + NNITensorboardManager, TensorboardTaskDetail +}; diff --git a/ts/nni_manager/core/nnimanager.ts b/ts/nni_manager/core/nnimanager.ts new file mode 100644 index 0000000000000000000000000000000000000000..f3d4b3f88b67467f1cabea83afe87065883b18c4 --- /dev/null +++ b/ts/nni_manager/core/nnimanager.ts @@ -0,0 +1,906 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { ChildProcess, StdioOptions } from 'child_process'; +import { Deferred } from 'ts-deferred'; +import * as component from '../common/component'; +import { DataStore, MetricDataRecord, MetricType, TrialJobInfo } from '../common/datastore'; +import { NNIError } from '../common/errors'; +import { getExperimentId, getDispatcherPipe } from '../common/experimentStartupInfo'; +import { Logger, getLogger, stopLogging } from '../common/log'; +import { + ExperimentProfile, Manager, ExperimentStatus, + NNIManagerStatus, ProfileUpdateType, TrialJobStatistics +} from '../common/manager'; +import { ExperimentConfig, LocalConfig, toSeconds, toCudaVisibleDevices } from '../common/experimentConfig'; +import { ExperimentManager } from '../common/experimentManager'; +import { TensorboardManager } from '../common/tensorboardManager'; +import { + TrainingService, TrialJobApplicationForm, TrialJobDetail, TrialJobMetric, TrialJobStatus, TrialCommandContent, PlacementConstraint +} from '../common/trainingService'; +import { delay, getCheckpointDir, getExperimentRootDir, getLogDir, getMsgDispatcherCommand, mkDirP, getTunerProc, getLogLevel, isAlive, killPid } from '../common/utils'; +import { + INITIALIZE, INITIALIZED, KILL_TRIAL_JOB, NEW_TRIAL_JOB, NO_MORE_TRIAL_JOBS, PING, + REPORT_METRIC_DATA, REQUEST_TRIAL_JOBS, SEND_TRIAL_JOB_PARAMETER, TERMINATE, TRIAL_END, UPDATE_SEARCH_SPACE, IMPORT_DATA +} from './commands'; +import { createDispatcherInterface, createDispatcherPipeInterface, IpcInterface } from './ipcInterface'; +import { NNIRestServer } from '../rest_server/nniRestServer'; + +/** + * NNIManager which implements Manager interface + */ +class NNIManager implements Manager { + private trainingService!: TrainingService; + private dispatcher: IpcInterface | undefined; + private experimentManager: ExperimentManager; + private currSubmittedTrialNum: number; // need to be recovered + private trialConcurrencyChange: number; // >0: increase, <0: decrease + private log: Logger; + private dataStore: DataStore; + private experimentProfile!: ExperimentProfile; + private dispatcherPid: number; + private status: NNIManagerStatus; + private waitingTrials: TrialJobApplicationForm[]; + private trialJobs: Map; + private trialDataForTuner: string; + private readonly: boolean; + private config!: ExperimentConfig; + + private trialJobMetricListener: (metric: TrialJobMetric) => void; + + constructor() { + this.currSubmittedTrialNum = 0; + this.trialConcurrencyChange = 0; + this.experimentManager = component.get(ExperimentManager); + this.dispatcherPid = 0; + this.waitingTrials = []; + this.trialJobs = new Map(); + this.trialDataForTuner = ''; + this.readonly = false; + + this.log = getLogger('NNIManager'); + this.dataStore = component.get(DataStore); + this.status = { + status: 'INITIALIZED', + errors: [] + }; + this.trialJobMetricListener = (metric: TrialJobMetric): void => { + this.onTrialJobMetrics(metric).catch((err: Error) => { + this.criticalError(NNIError.FromError(err, 'Job metrics error: ')); + }); + }; + + const pipe = getDispatcherPipe(); + if (pipe !== null) { + this.dispatcher = createDispatcherPipeInterface(pipe); + } + } + + public updateExperimentProfile(experimentProfile: ExperimentProfile, updateType: ProfileUpdateType): Promise { + if (this.readonly) { + return Promise.reject(new Error('Error: can not update experiment profile in readonly mode!')); + } + switch (updateType) { + case 'TRIAL_CONCURRENCY': + this.updateTrialConcurrency(experimentProfile.params.trialConcurrency); + break; + case 'MAX_EXEC_DURATION': + this.experimentProfile.params.maxExperimentDuration = experimentProfile.params.maxExperimentDuration; + break; + case 'SEARCH_SPACE': + this.updateSearchSpace(experimentProfile.params.searchSpace); + break; + case 'MAX_TRIAL_NUM': + this.experimentProfile.params.maxTrialNumber = experimentProfile.params.maxTrialNumber; + break; + default: + throw new Error('Error: unrecognized updateType'); + } + + return this.storeExperimentProfile(); + } + + public importData(data: string): Promise { + if (this.readonly) { + return Promise.reject(new Error('Error: can not import data in readonly mode!')); + } + if (this.dispatcher === undefined) { + return Promise.reject( + new Error('tuner has not been setup') + ); + } + this.dispatcher.sendCommand(IMPORT_DATA, data); + + return this.dataStore.storeTrialJobEvent('IMPORT_DATA', '', data); + } + + public getImportedData(): Promise { + return this.dataStore.getImportedData(); + } + + public async exportData(): Promise { + return this.dataStore.exportTrialHpConfigs(); + } + + public addCustomizedTrialJob(hyperParams: string): Promise { + if (this.readonly) { + return Promise.reject(new Error('Error: can not add customized trial job in readonly mode!')); + } + if (this.currSubmittedTrialNum >= this.maxTrialNum) { + return Promise.reject(new Error('reach maxTrialNum')); + } + + // TODO: NNI manager should not peek tuner's internal protocol, let's refactor this later + const packedParameter = { + parameter_id: null, // eslint-disable-line @typescript-eslint/camelcase + parameter_source: 'customized', // eslint-disable-line @typescript-eslint/camelcase + parameters: JSON.parse(hyperParams) + } + + const form: TrialJobApplicationForm = { + sequenceId: this.experimentProfile.nextSequenceId++, + hyperParameters: { + value: JSON.stringify(packedParameter), + index: 0 + } + }; + this.waitingTrials.push(form); + + // trial id has not been generated yet, thus use '' instead + this.dataStore.storeTrialJobEvent('ADD_CUSTOMIZED', '', hyperParams); + + return Promise.resolve(form.sequenceId); + } + + public async cancelTrialJobByUser(trialJobId: string): Promise { + if (this.readonly) { + return Promise.reject(new Error('Error: can not cancel trial job in readonly mode!')); + } + this.log.info(`User cancelTrialJob: ${trialJobId}`); + await this.trainingService.cancelTrialJob(trialJobId); + await this.dataStore.storeTrialJobEvent('USER_TO_CANCEL', trialJobId, ''); + } + + public async startExperiment(config: ExperimentConfig): Promise { + this.experimentProfile = { + params: config, + id: getExperimentId(), + execDuration: 0, + logDir: getExperimentRootDir(), + startTime: Date.now(), + endTime: undefined, + nextSequenceId: 0, + revision: 0 + }; + this.config = config; + this.log.info(`Starting experiment: ${this.experimentProfile.id}`); + await this.storeExperimentProfile(); + + if (this.trainingService === undefined) { + this.log.info('Setup training service...'); + this.trainingService = await this.initTrainingService(config); + } + + this.log.info('Setup tuner...'); + const dispatcherCommand: string = getMsgDispatcherCommand(config); + this.log.debug(`dispatcher command: ${dispatcherCommand}`); + const checkpointDir: string = await this.createCheckpointDir(); + this.setupTuner(dispatcherCommand, undefined, 'start', checkpointDir); + this.setStatus('RUNNING'); + await this.storeExperimentProfile(); + this.run().catch((err: Error) => { + this.criticalError(err); + }); + + return this.experimentProfile.id; + } + + public async resumeExperiment(readonly: boolean): Promise { + //Fetch back the experiment profile + const experimentId: string = getExperimentId(); + this.log.info(`Resuming experiment: ${experimentId}`); + this.experimentProfile = await this.dataStore.getExperimentProfile(experimentId); + + const config: ExperimentConfig = this.experimentProfile.params; + this.config = config; + if (this.trainingService === undefined) { + this.log.info('Setup training service...'); + this.trainingService = await this.initTrainingService(config); + } + + this.readonly = readonly; + if (readonly) { + this.setStatus('VIEWED'); + return; + } + + this.log.info('Setup tuner...'); + const dispatcherCommand: string = getMsgDispatcherCommand(config); + this.log.debug(`dispatcher command: ${dispatcherCommand}`); + const checkpointDir: string = await this.createCheckpointDir(); + this.setupTuner(dispatcherCommand, undefined, 'resume', checkpointDir); + + const allTrialJobs: TrialJobInfo[] = await this.dataStore.listTrialJobs(); + + // Resume currSubmittedTrialNum + this.currSubmittedTrialNum = allTrialJobs.length; + + // Check the final status for WAITING and RUNNING jobs + await Promise.all(allTrialJobs + .filter((job: TrialJobInfo) => job.status === 'WAITING' || job.status === 'RUNNING') + .map((job: TrialJobInfo) => this.dataStore.storeTrialJobEvent('FAILED', job.trialJobId))); + + // Collect generated trials and imported trials + const finishedTrialData: string = await this.exportData(); + const importedData: string[] = await this.dataStore.getImportedData(); + let trialData: Record[] = JSON.parse(finishedTrialData); + for (const oneImportedData of importedData) { + // do not deduplicate + trialData = trialData.concat([]>JSON.parse(oneImportedData)); + } + this.trialDataForTuner = JSON.stringify(trialData); + + if (this.experimentProfile.execDuration < this.maxDuration && + this.currSubmittedTrialNum < this.maxTrialNum && + this.experimentProfile.endTime) { + delete this.experimentProfile.endTime; + } + this.setStatus('RUNNING'); + + // TO DO: update database record for resume event + this.run().catch((err: Error) => { + this.criticalError(err); + }); + } + + public getTrialJob(trialJobId: string): Promise { + return this.dataStore.getTrialJob(trialJobId); + } + + public async setClusterMetadata(key: string, value: string): Promise { + // Hack for supporting v2 config, need refactor + if (this.trainingService === undefined) { + this.log.info('Setup training service...'); + switch (key) { + case 'kubeflow_config': { + const kubeflowModule = await import('../training_service/kubernetes/kubeflow/kubeflowTrainingService'); + this.trainingService = new kubeflowModule.KubeflowTrainingService(); + break; + } + case 'frameworkcontroller_config': { + const fcModule = await import('../training_service/kubernetes/frameworkcontroller/frameworkcontrollerTrainingService'); + this.trainingService = new fcModule.FrameworkControllerTrainingService(); + break; + } + case 'adl_config': { + const adlModule = await import('../training_service/kubernetes/adl/adlTrainingService'); + this.trainingService = new adlModule.AdlTrainingService(); + break; + } + default: + throw new Error("Setup training service failed."); + } + } + await this.trainingService.setClusterMetadata(key, value); + } + + public getClusterMetadata(key: string): Promise { + return this.trainingService.getClusterMetadata(key); + } + + public async getTrialJobStatistics(): Promise { + return this.dataStore.getTrialJobStatistics(); + } + + public async stopExperiment(): Promise { + await this.stopExperimentTopHalf(); + await this.stopExperimentBottomHalf(); + } + + public async stopExperimentTopHalf(): Promise { + this.setStatus('STOPPING'); + this.log.info('Stopping experiment, cleaning up ...'); + + if (this.dispatcher === undefined) { + this.log.error('Tuner has not been setup'); + return; + } + + this.trainingService.removeTrialJobMetricListener(this.trialJobMetricListener); + if (this.dispatcherPid > 0) { + this.dispatcher.sendCommand(TERMINATE); + // gracefully terminate tuner and assessor here, wait at most 30 seconds. + for (let i: number = 0; i < 30; i++) { + if (!await isAlive(this.dispatcherPid)) { + break; + } + await delay(1000); + } + await killPid(this.dispatcherPid); + } + this.dispatcher = undefined; + } + + public async stopExperimentBottomHalf(): Promise { + try { + const trialJobList: TrialJobDetail[] = await this.trainingService.listTrialJobs(); + + // DON'T try to make it in parallel, the training service may not handle it well. + // If there is performance concern, consider to support batch cancellation on training service. + for (const trialJob of trialJobList) { + if (trialJob.status === 'RUNNING' || + trialJob.status === 'WAITING') { + try { + this.log.info(`cancelTrialJob: ${trialJob.id}`); + await this.trainingService.cancelTrialJob(trialJob.id); + } catch (error) { + this.log.debug(`ignorable error on canceling trial ${trialJob.id}. ${error}`); + } + } + } + await this.trainingService.cleanUp(); + } catch (err) { + this.log.error(`${err.stack}`); + } + if (this.experimentProfile.endTime === undefined) { + this.setEndtime(); + } + await this.storeExperimentProfile(); + this.setStatus('STOPPED'); + this.log.info('Experiment stopped.'); + + let hasError: boolean = false; + try { + await this.experimentManager.stop(); + await component.get(TensorboardManager).stop(); + await this.dataStore.close(); + await component.get(NNIRestServer).stop(); + } catch (err) { + hasError = true; + this.log.error(`${err.stack}`); + } finally { + stopLogging(); + process.exit(hasError ? 1 : 0); + } + } + + public async getMetricData(trialJobId?: string, metricType?: MetricType): Promise { + return this.dataStore.getMetricData(trialJobId, metricType); + } + + public async getMetricDataByRange(minSeqId: number, maxSeqId: number): Promise { + const trialJobs = await this.dataStore.listTrialJobs(); + const targetTrials = trialJobs.filter(trial => ( + // FIXME: can this be undefined? + trial.sequenceId !== undefined && minSeqId <= trial.sequenceId && trial.sequenceId <= maxSeqId + )); + const targetTrialIds = new Set(targetTrials.map(trial => trial.trialJobId)); + + const allMetrics = await this.dataStore.getMetricData(); + return allMetrics.filter(metric => targetTrialIds.has(metric.trialJobId)); + } + + public async getLatestMetricData(): Promise { + // FIXME: this can take a long time + const allMetrics: MetricDataRecord[] = await this.dataStore.getMetricData(); + const finals: MetricDataRecord[] = []; + const latestIntermediates: Map = new Map(); + for (const metric of allMetrics) { + if (metric.type !== 'PERIODICAL') { + finals.push(metric); + } else { + const old: MetricDataRecord | undefined = latestIntermediates.get(metric.trialJobId); + if (old === undefined || old.sequence <= metric.sequence) { + latestIntermediates.set(metric.trialJobId, metric); + } + } + } + return finals.concat(Array.from(latestIntermediates.values())); + // FIXME: unit test + } + + public async getTrialFile(trialJobId: string, fileName: string): Promise { + return this.trainingService.getTrialFile(trialJobId, fileName); + } + + public getExperimentProfile(): Promise { + // TO DO: using Promise.resolve() + const deferred: Deferred = new Deferred(); + deferred.resolve(this.experimentProfile); + + return deferred.promise; + } + + public getStatus(): NNIManagerStatus { + return this.status; + } + + public async listTrialJobs(status?: TrialJobStatus): Promise { + return this.dataStore.listTrialJobs(status); + } + + private get maxDuration(): number { + const value = this.experimentProfile.params.maxExperimentDuration; + return (value === undefined ? Infinity : toSeconds(value)); + } + + private get maxTrialNum(): number { + const value = this.experimentProfile.params.maxTrialNumber; + return (value === undefined ? Infinity : value); + } + + private get maxTrialDuration(): number { + const value = this.experimentProfile.params.maxTrialDuration; + return (value === undefined ? Infinity : toSeconds(value)); + } + + private async initTrainingService(config: ExperimentConfig): Promise { + let platform: string; + if (Array.isArray(config.trainingService)) { + platform = 'hybrid'; + } else if (config.trainingService.platform) { + platform = config.trainingService.platform; + } else { + platform = (config as any).trainingServicePlatform; + } + if (!platform) { + throw new Error('Cannot detect training service platform'); + } + const reuseMode = Array.isArray(config.trainingService) || (config.trainingService as any).reuseMode; + + if (reuseMode) { + const module_ = await import('../training_service/reusable/routerTrainingService'); + return await module_.RouterTrainingService.construct(config); + } else if (platform === 'local') { + const module_ = await import('../training_service/local/localTrainingService'); + return new module_.LocalTrainingService(config.trainingService); + } else if (platform === 'kubeflow') { + const module_ = await import('../training_service/kubernetes/kubeflow/kubeflowTrainingService'); + return new module_.KubeflowTrainingService(); + } else if (platform === 'frameworkcontroller') { + const module_ = await import('../training_service/kubernetes/frameworkcontroller/frameworkcontrollerTrainingService'); + return new module_.FrameworkControllerTrainingService(); + } else if (platform === 'adl') { + const module_ = await import('../training_service/kubernetes/adl/adlTrainingService'); + return new module_.AdlTrainingService(); + } else { + const module_ = await import('../training_service/reusable/routerTrainingService'); + return await module_.RouterTrainingService.construct(config); + } + } + + private setupTuner(command: string, cwd: string | undefined, mode: 'start' | 'resume', dataDirectory: string): void { + if (this.dispatcher !== undefined) { + return; + } + const stdio: StdioOptions = ['ignore', process.stdout, process.stderr, 'pipe', 'pipe']; + let newCwd: string; + if (cwd === undefined || cwd === '') { + newCwd = getLogDir(); + } else { + newCwd = cwd; + } + // TO DO: add HIP_VISIBLE_DEVICES + const includeIntermediateResultsEnv = !!(this.config.deprecated && this.config.deprecated.includeIntermediateResults); + + const nniEnv = { + SDK_PROCESS: 'dispatcher', + NNI_MODE: mode, + NNI_CHECKPOINT_DIRECTORY: dataDirectory, + NNI_LOG_DIRECTORY: getLogDir(), + NNI_LOG_LEVEL: getLogLevel(), + NNI_INCLUDE_INTERMEDIATE_RESULTS: includeIntermediateResultsEnv, + HIP_VISIBLE_DEVICES: toCudaVisibleDevices(this.experimentProfile.params.tunerGpuIndices) + }; + const newEnv = Object.assign({}, process.env, nniEnv); + const tunerProc: ChildProcess = getTunerProc(command, stdio, newCwd, newEnv); + this.dispatcherPid = tunerProc.pid!; + this.dispatcher = createDispatcherInterface(tunerProc); + + return; + } + + private updateTrialConcurrency(trialConcurrency: number): void { + // we assume trialConcurrency >= 0, which is checked by restserver + this.trialConcurrencyChange += (trialConcurrency - this.experimentProfile.params.trialConcurrency); + this.experimentProfile.params.trialConcurrency = trialConcurrency; + + return; + } + + private updateSearchSpace(searchSpace: object): void { + if (this.dispatcher === undefined) { + throw new Error('Error: tuner has not been setup'); + } + this.log.info(`Updated search space ${searchSpace}`); + this.dispatcher.sendCommand(UPDATE_SEARCH_SPACE, JSON.stringify(searchSpace)); + this.experimentProfile.params.searchSpace = searchSpace; + + return; + } + + private async periodicallyUpdateExecDuration(): Promise { + let count: number = 1; + while (!['ERROR', 'STOPPING', 'STOPPED'].includes(this.status.status)) { + await delay(1000 * 1); // 1 seconds + if (['RUNNING', 'NO_MORE_TRIAL', 'TUNER_NO_MORE_TRIAL'].includes(this.status.status)) { + this.experimentProfile.execDuration += 1; + if (count % 10 === 0) { + await this.storeExperimentProfile(); + } + } + count += 1; + } + } + + private async pingDispatcher(): Promise { + if (this.dispatcher === undefined) { + throw new Error('Error: tuner has not been setup'); + } + while (!['ERROR', 'STOPPING', 'STOPPED'].includes(this.status.status)) { + this.dispatcher.sendCommand(PING); + await delay(1000 * 5); + } + } + + private async stopTrialIfOverMaxDurationLimit(): Promise { + if(this.maxTrialDuration === Infinity){ + return; + } + + for (const trialJobId of Array.from(this.trialJobs.keys())) { + const trialJobDetail: TrialJobDetail | undefined = this.trialJobs.get(trialJobId); + if(undefined !== trialJobDetail && + trialJobDetail.status === 'RUNNING' && + trialJobDetail.startTime !== undefined){ + const currentTrialDuration = (new Date().getTime() - trialJobDetail.startTime) / 1000; + if(currentTrialDuration>this.maxTrialDuration) { + const isEarlyStopped = true; + await this.trainingService.cancelTrialJob(trialJobId, isEarlyStopped); + this.log.info(`Trial job ${trialJobDetail.id} has been canceled because it is over max trial duration.`); + } + } + } + } + + private async requestTrialJobsStatus(): Promise { + let finishedTrialJobNum: number = 0; + if (this.dispatcher === undefined) { + throw new Error('Error: tuner has not been setup'); + } + for (const trialJobId of Array.from(this.trialJobs.keys())) { + const trialJobDetail: TrialJobDetail = await this.trainingService.getTrialJob(trialJobId); + const oldTrialJobDetail: TrialJobDetail | undefined = this.trialJobs.get(trialJobId); + if (oldTrialJobDetail !== undefined && oldTrialJobDetail.status !== trialJobDetail.status) { + this.log.info(`Trial job ${trialJobDetail.id} status changed from ${oldTrialJobDetail.status} to ${trialJobDetail.status}`); + this.trialJobs.set(trialJobId, Object.assign({}, trialJobDetail)); + await this.dataStore.storeTrialJobEvent(trialJobDetail.status, trialJobDetail.id, undefined, trialJobDetail); + } + const newTrialJobDetail: TrialJobDetail | undefined = this.trialJobs.get(trialJobId); + if (newTrialJobDetail !== undefined) { + newTrialJobDetail.message = trialJobDetail.message; + } + let hyperParams: string | undefined = undefined; + switch (trialJobDetail.status) { + case 'SUCCEEDED': + case 'USER_CANCELED': + case 'EARLY_STOPPED': + this.trialJobs.delete(trialJobId); + finishedTrialJobNum++; + hyperParams = trialJobDetail.form.hyperParameters.value; + this.dispatcher.sendCommand(TRIAL_END, JSON.stringify({ + trial_job_id: trialJobDetail.id, // eslint-disable-line @typescript-eslint/camelcase + event: trialJobDetail.status, + hyper_params: hyperParams // eslint-disable-line @typescript-eslint/camelcase + })); + break; + case 'FAILED': + case 'SYS_CANCELED': + // In the current version, we do not retry + // TO DO: push this job to queue for retry + this.trialJobs.delete(trialJobId); + finishedTrialJobNum++; + hyperParams = trialJobDetail.form.hyperParameters.value; + this.dispatcher.sendCommand(TRIAL_END, JSON.stringify({ + trial_job_id: trialJobDetail.id, // eslint-disable-line @typescript-eslint/camelcase + event: trialJobDetail.status, + hyper_params: hyperParams // eslint-disable-line @typescript-eslint/camelcase + })); + break; + case 'WAITING': + case 'RUNNING': + case 'UNKNOWN': + // Do nothing + break; + default: + // TO DO: add warning in log + } + } + + return finishedTrialJobNum; + } + + private async manageTrials(): Promise { + if (this.dispatcher === undefined) { + throw new Error('Error: tuner has not been setup'); + } + let allFinishedTrialJobNum: number = this.currSubmittedTrialNum; + let waitSubmittedToFinish: number; + while (!['ERROR', 'STOPPING', 'STOPPED'].includes(this.status.status)) { + await this.stopTrialIfOverMaxDurationLimit(); + + const finishedTrialJobNum: number = await this.requestTrialJobsStatus(); + allFinishedTrialJobNum += finishedTrialJobNum; + + // requestTrialNum is the number of trials that will be requested from tuner. + // If trialConcurrency does not change, requestTrialNum equals finishedTrialJobNum. + // If trialConcurrency changes, for example, trialConcurrency increases by 2 (trialConcurrencyChange=2), then + // requestTrialNum equals 2 + finishedTrialJobNum and trialConcurrencyChange becomes 0. + // If trialConcurrency changes, for example, trialConcurrency decreases by 4 (trialConcurrencyChange=-4) and + // finishedTrialJobNum is 2, then requestTrialNum becomes -2. No trial will be requested from tuner, + // and trialConcurrencyChange becomes -2. + const requestTrialNum: number = this.trialConcurrencyChange + finishedTrialJobNum; + if (requestTrialNum >= 0) { + this.trialConcurrencyChange = 0; + } else { + this.trialConcurrencyChange = requestTrialNum; + } + + // check maxtrialnum and maxduration here + // NO_MORE_TRIAL is more like a subset of RUNNING, because during RUNNING tuner + // might tell nnimanager that this is no more trials. In NO_MORE_TRIAL state, the experiment is viewed + // as still running. DONE could be transfered from RUNNING or NO_MORE_TRIAL. + assert(this.status.status === 'RUNNING' || + this.status.status === 'DONE' || + this.status.status === 'NO_MORE_TRIAL' || + this.status.status === 'TUNER_NO_MORE_TRIAL', `Actual status: ${this.status.status}`); + if (this.experimentProfile.execDuration > this.maxDuration || + this.currSubmittedTrialNum >= this.maxTrialNum) { + if (this.status.status !== 'DONE') { + this.setStatus('NO_MORE_TRIAL'); + waitSubmittedToFinish = this.currSubmittedTrialNum; + + assert(allFinishedTrialJobNum <= waitSubmittedToFinish); + if (allFinishedTrialJobNum >= waitSubmittedToFinish) { + this.setStatus('DONE'); + this.setEndtime(); + await this.storeExperimentProfile(); + // write this log for travis CI + this.log.info('Experiment done.'); + } + } + } else { + this.requestTrialJobs(requestTrialNum); + + if (this.status.status === 'DONE') { + delete this.experimentProfile.endTime; + await this.storeExperimentProfile(); + } + if (this.status.status !== 'TUNER_NO_MORE_TRIAL') { + this.setStatus('RUNNING'); + } + for (let i: number = this.trialJobs.size; i < this.experimentProfile.params.trialConcurrency; i++) { + if (this.waitingTrials.length === 0 || + this.currSubmittedTrialNum >= this.maxTrialNum) { + break; + } + const form = this.waitingTrials.shift() as TrialJobApplicationForm; + this.currSubmittedTrialNum++; + this.log.info('submitTrialJob: form:', form); + const trialJobDetail: TrialJobDetail = await this.trainingService.submitTrialJob(form); + const Snapshot: TrialJobDetail = Object.assign({}, trialJobDetail); + await this.storeExperimentProfile(); + this.trialJobs.set(trialJobDetail.id, Snapshot); + const trialJobDetailSnapshot: TrialJobDetail | undefined = this.trialJobs.get(trialJobDetail.id); + if (trialJobDetailSnapshot != undefined) { + await this.dataStore.storeTrialJobEvent( + trialJobDetailSnapshot.status, trialJobDetailSnapshot.id, form.hyperParameters.value, trialJobDetailSnapshot); + } else { + assert(false, `undefined trialJobDetail in trialJobs: ${trialJobDetail.id}`); + } + } + } + await delay(1000 * 5); // 5 seconds + } + } + + private storeExperimentProfile(): Promise { + this.experimentProfile.revision += 1; + + return this.dataStore.storeExperimentProfile(this.experimentProfile); + } + + private async run(): Promise { + assert(this.dispatcher !== undefined); + + this.addEventListeners(); + + this.sendInitTunerCommands(); + + await Promise.all([ + this.periodicallyUpdateExecDuration(), + this.pingDispatcher().catch((err: Error) => { + throw NNIError.FromError(err, 'Dispatcher error: '); + }), + this.trainingService.run().catch((err: Error) => { + throw NNIError.FromError(err, 'Training service error: '); + }), + this.manageTrials().catch((err: Error) => { + throw NNIError.FromError(err, 'Job management error: '); + })]); + } + + private addEventListeners(): void { + this.log.info('Add event listeners'); + // TO DO: cannot run this method more than once in one NNIManager instance + if (this.dispatcher === undefined) { + throw new Error('Error: tuner or job maintainer have not been setup'); + } + this.trainingService.addTrialJobMetricListener(this.trialJobMetricListener); + + this.dispatcher.onCommand((commandType: string, content: string) => { + this.onTunerCommand(commandType, content).catch((err: Error) => { + this.criticalError(NNIError.FromError(err, 'Tuner command event error: ')); + }); + }); + this.dispatcher.onError((error: Error) => { + this.log.error(`Dispatcher error: ${error.message}`); + this.criticalError(new Error('Dispatcher stream error, tuner may have crashed.')); + }); + } + + private sendInitTunerCommands(): void { + if (this.dispatcher === undefined) { + throw new Error('Dispatcher error: tuner has not been setup'); + } + this.log.debug(`Send tuner command: INITIALIZE: ${this.experimentProfile.params.searchSpace}`); + // Tuner need to be initialized with search space before generating any hyper parameters + this.dispatcher.sendCommand(INITIALIZE, JSON.stringify(this.experimentProfile.params.searchSpace)); + } + + private async onTrialJobMetrics(metric: TrialJobMetric): Promise { + this.log.debug('NNIManager received trial job metrics:', metric); + if (this.trialJobs.has(metric.id)) { + await this.dataStore.storeMetricData(metric.id, metric.data); + if (this.dispatcher === undefined) { + throw new Error('Error: tuner has not been setup'); + } + this.dispatcher.sendCommand(REPORT_METRIC_DATA, metric.data); + } else { + this.log.warning('NNIManager received non-existent trial job metrics:', metric); + } + } + + private requestTrialJobs(jobNum: number): void { + if (jobNum < 1) { + return; + } + if (this.dispatcher === undefined) { + throw new Error('Dispatcher error: tuner has not been setup'); + } + if (this.config.deprecated && this.config.deprecated.multiThread) { + // Send multiple requests to ensure multiple hyper parameters are generated in non-blocking way. + // For a single REQUEST_TRIAL_JOBS request, hyper parameters are generated one by one + // sequentially. + for (let i: number = 0; i < jobNum; i++) { + this.dispatcher.sendCommand(REQUEST_TRIAL_JOBS, '1'); + } + } else { + this.dispatcher.sendCommand(REQUEST_TRIAL_JOBS, String(jobNum)); + } + } + + private async onTunerCommand(commandType: string, content: string): Promise { + this.log.info(`NNIManager received command from dispatcher: ${commandType}, ${content}`); + switch (commandType) { + case INITIALIZED: { + // Tuner is intialized, search space is set, request tuner to generate hyper parameters + if (this.trialDataForTuner.length > 0) { + if (this.dispatcher === undefined) { + throw new Error('Dispatcher error: tuner has not been setup'); + } + this.dispatcher.sendCommand(IMPORT_DATA, this.trialDataForTuner); + } + this.requestTrialJobs(this.experimentProfile.params.trialConcurrency); + break; + } + case NEW_TRIAL_JOB: { + if (this.status.status === 'TUNER_NO_MORE_TRIAL') { + this.log.warning('It is not supposed to receive more trials after NO_MORE_TRIAL is set'); + this.setStatus('RUNNING'); + } + const trialRequestContent: TrialCommandContent = JSON.parse(content); + const noneConstraint: PlacementConstraint = {type: 'None', gpus: []}; + const form: TrialJobApplicationForm = { + sequenceId: this.experimentProfile.nextSequenceId++, + hyperParameters: { + value: content, + index: 0 + }, + placementConstraint: trialRequestContent.placement_constraint? trialRequestContent.placement_constraint : noneConstraint + }; + this.waitingTrials.push(form); + break; + } + case SEND_TRIAL_JOB_PARAMETER: { + const tunerCommand: any = JSON.parse(content); + assert(tunerCommand.parameter_index >= 0); + assert(tunerCommand.trial_job_id !== undefined); + + const trialJobForm: TrialJobApplicationForm = { + sequenceId: -1, // FIXME: multi-phase tuner should use sequence ID instead of trial job ID + hyperParameters: { + value: content, + index: tunerCommand.parameter_index + } + }; + this.log.info('updateTrialJob: job id:', tunerCommand.trial_job_id, 'form:', trialJobForm); + await this.trainingService.updateTrialJob(tunerCommand.trial_job_id, trialJobForm); + if (tunerCommand['parameters'] !== null) { + // parameters field is set as empty string if no more hyper parameter can be generated by tuner. + await this.dataStore.storeTrialJobEvent( + 'ADD_HYPERPARAMETER', tunerCommand.trial_job_id, content, undefined); + } + break; + } + case NO_MORE_TRIAL_JOBS: { + if (!['ERROR', 'STOPPING', 'STOPPED'].includes(this.status.status)) { + this.setStatus('TUNER_NO_MORE_TRIAL'); + } + break; + } + case KILL_TRIAL_JOB: { + this.log.info('cancelTrialJob:', content); + await this.trainingService.cancelTrialJob(JSON.parse(content), true); + break; + } + default: + throw new Error('Error: unsupported command type from tuner'); + } + } + + private criticalError(err: Error): void { + this.logError(err); + console.error(err); + } + + private logError(err: Error): void { + if (err.stack !== undefined) { + this.log.error(err.stack); + } + this.status.errors.push(err.message); + this.setEndtime(); + this.setStatus('ERROR'); + } + + private setStatus(status: ExperimentStatus): void { + if (status !== this.status.status) { + this.log.info(`Change NNIManager status from: ${this.status.status} to: ${status}`); + this.status.status = status; + this.experimentManager.setExperimentInfo(this.experimentProfile.id, 'status', this.status.status); + } + } + + private setEndtime(): void { + this.experimentProfile.endTime = Date.now(); + this.experimentManager.setExperimentInfo(this.experimentProfile.id, 'endTime', this.experimentProfile.endTime); + } + + private async createCheckpointDir(): Promise { + // TODO: test + const chkpDir: string = getCheckpointDir(); + await mkDirP(chkpDir); + return chkpDir; + } + + public async getTrialOutputLocalPath(trialJobId: string): Promise { + return this.trainingService.getTrialOutputLocalPath(trialJobId); + } + + public async fetchTrialOutput(trialJobId: string, subpath: string): Promise { + return this.trainingService.fetchTrialOutput(trialJobId, subpath); + } +} + +export { NNIManager }; diff --git a/ts/nni_manager/core/sqlDatabase.ts b/ts/nni_manager/core/sqlDatabase.ts new file mode 100644 index 0000000000000000000000000000000000000000..01000dafb882c45b9ad00b1d20aa5bc7d0cd17e6 --- /dev/null +++ b/ts/nni_manager/core/sqlDatabase.ts @@ -0,0 +1,266 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import fs from 'fs'; +import path from 'path'; +import sqlite3 from 'sqlite3'; +import { Deferred } from 'ts-deferred'; + +import { + Database, + MetricDataRecord, + MetricType, + TrialJobEvent, + TrialJobEventRecord +} from '../common/datastore'; +import { getLogger, Logger } from '../common/log'; +import { ExperimentProfile } from '../common/manager'; +import { TrialJobDetail } from '../common/trainingService'; + + +const createTables: string = ` +create table TrialJobEvent (timestamp integer, trialJobId text, event text, data text, logPath text, sequenceId integer, message text); +create index TrialJobEvent_trialJobId on TrialJobEvent(trialJobId); +create index TrialJobEvent_event on TrialJobEvent(event); + +create table MetricData (timestamp integer, trialJobId text, parameterId text, type text, sequence integer, data text); +create index MetricData_trialJobId on MetricData(trialJobId); +create index MetricData_type on MetricData(type); + +create table ExperimentProfile ( + params text, + id text, + execDuration integer, + startTime integer, + endTime integer, + logDir text, + nextSequenceId integer, + revision integer); +create index ExperimentProfile_id on ExperimentProfile(id); +`; + +function loadExperimentProfile(row: any): ExperimentProfile { + return { + params: JSON.parse(row.params), + id: row.id, + execDuration: row.execDuration, + startTime: row.startTime === null ? undefined : row.startTime, + endTime: row.endTime === null ? undefined : row.endTime, + logDir: row.logDir === null ? undefined : row.logDir, + nextSequenceId: row.nextSequenceId, + revision: row.revision + }; +} + +function loadTrialJobEvent(row: any): TrialJobEventRecord { + return { + timestamp: row.timestamp, + trialJobId: row.trialJobId, + event: row.event, + data: row.data === null ? undefined : row.data, + logPath: row.logPath === null ? undefined : row.logPath, + sequenceId: row.sequenceId === null ? undefined : row.sequenceId, + message: row.message === null ? undefined: row.message + }; +} + +function loadMetricData(row: any): MetricDataRecord { + return { + timestamp: row.timestamp, + trialJobId: row.trialJobId, + parameterId: row.parameterId, + type: row.type, + sequence: row.sequence, + data: row.data + }; +} + +class SqlDB implements Database { + private db!: sqlite3.Database; + private log: Logger = getLogger('SqlDB'); + private initTask!: Deferred; + + public init(createNew: boolean, dbDir: string): Promise { + if (this.initTask !== undefined) { + return this.initTask.promise; + } + this.initTask = new Deferred(); + this.log.debug(`Database directory: ${dbDir}`); + assert(fs.existsSync(dbDir)); + + const mode: number = createNew ? (sqlite3.OPEN_CREATE | sqlite3.OPEN_READWRITE) : sqlite3.OPEN_READWRITE; + const dbFileName: string = path.join(dbDir, 'nni.sqlite'); + + this.db = new sqlite3.Database(dbFileName, mode, (err: Error | null): void => { + if (err) { + this.resolve(this.initTask, err); + } else { + if (createNew) { + this.db.exec(createTables, (_error: Error | null) => { + this.resolve(this.initTask, err); + }); + } else { + this.initTask.resolve(); + } + } + }); + + return this.initTask.promise; + } + + public close(): Promise { + const deferred: Deferred = new Deferred(); + this.db.close((err: Error | null) => { this.resolve(deferred, err); }); + + return deferred.promise; + } + + public storeExperimentProfile(exp: ExperimentProfile): Promise { + const sql: string = 'insert into ExperimentProfile values (?,?,?,?,?,?,?,?)'; + const args: any[] = [ + JSON.stringify(exp.params), + exp.id, + exp.execDuration, + exp.startTime === undefined ? null : exp.startTime, + exp.endTime === undefined ? null : exp.endTime, + exp.logDir === undefined ? null : exp.logDir, + exp.nextSequenceId, + exp.revision + ]; + this.log.trace(`storeExperimentProfile: SQL: ${sql}, args:`, args); + const deferred: Deferred = new Deferred(); + this.db.run(sql, args, (err: Error | null) => { this.resolve(deferred, err); }); + + return deferred.promise; + } + + public queryExperimentProfile(experimentId: string, revision?: number): Promise { + let sql: string = ''; + let args: any[] = []; + if (revision === undefined) { + sql = 'select * from ExperimentProfile where id=? order by revision DESC'; + args = [experimentId]; + } else { + sql = 'select * from ExperimentProfile where id=? and revision=?'; + args = [experimentId, revision]; + } + this.log.trace(`queryExperimentProfile: SQL: ${sql}, args:`, args); + const deferred: Deferred = new Deferred(); + this.db.all(sql, args, (err: Error | null, rows: any[]) => { + this.resolve(deferred, err, rows, loadExperimentProfile); + }); + + return deferred.promise; + } + + public async queryLatestExperimentProfile(experimentId: string): Promise { + const profiles: ExperimentProfile[] = await this.queryExperimentProfile(experimentId); + + return profiles[0]; + } + + public storeTrialJobEvent( + event: TrialJobEvent, trialJobId: string, timestamp: number, hyperParameter?: string, jobDetail?: TrialJobDetail): Promise { + const sql: string = 'insert into TrialJobEvent values (?,?,?,?,?,?,?)'; + const logPath: string | undefined = jobDetail === undefined ? undefined : jobDetail.url; + const sequenceId: number | undefined = jobDetail === undefined ? undefined : jobDetail.form.sequenceId; + const message: string | undefined = jobDetail === undefined ? undefined : jobDetail.message; + const args: any[] = [timestamp, trialJobId, event, hyperParameter, logPath, sequenceId, message]; + + this.log.trace(`storeTrialJobEvent: SQL: ${sql}, args:`, args); + const deferred: Deferred = new Deferred(); + this.db.run(sql, args, (err: Error | null) => { this.resolve(deferred, err); }); + + return deferred.promise; + } + + public queryTrialJobEvent(trialJobId?: string, event?: TrialJobEvent): Promise { + let sql: string = ''; + let args: any[] | undefined; + if (trialJobId === undefined && event === undefined) { + sql = 'select * from TrialJobEvent'; + } else if (trialJobId === undefined) { + sql = 'select * from TrialJobEvent where event=?'; + args = [event]; + } else if (event === undefined) { + sql = 'select * from TrialJobEvent where trialJobId=?'; + args = [trialJobId]; + } else { + sql = 'select * from TrialJobEvent where trialJobId=? and event=?'; + args = [trialJobId, event]; + } + + this.log.trace(`queryTrialJobEvent: SQL: ${sql}, args:`, args); + const deferred: Deferred = new Deferred(); + this.db.all(sql, args, (err: Error | null, rows: any[]) => { + this.resolve(deferred, err, rows, loadTrialJobEvent); + }); + + return deferred.promise; + } + + public storeMetricData(_trialJobId: string, data: string): Promise { + const sql: string = 'insert into MetricData values (?,?,?,?,?,?)'; + const json: MetricDataRecord = JSON.parse(data); + const args: any[] = [Date.now(), json.trialJobId, json.parameterId, json.type, json.sequence, JSON.stringify(json.data)]; + + this.log.trace(`storeMetricData: SQL: ${sql}, args:`, args); + const deferred: Deferred = new Deferred(); + this.db.run(sql, args, (err: Error | null) => { this.resolve(deferred, err); }); + + return deferred.promise; + } + + public queryMetricData(trialJobId?: string, metricType?: MetricType): Promise { + let sql: string = ''; + let args: any[] | undefined; + if (metricType === undefined && trialJobId === undefined) { + sql = 'select * from MetricData'; + } else if (trialJobId === undefined) { + sql = 'select * from MetricData where type=?'; + args = [metricType]; + } else if (metricType === undefined) { + sql = 'select * from MetricData where trialJobId=?'; + args = [trialJobId]; + } else { + sql = 'select * from MetricData where trialJobId=? and type=?'; + args = [trialJobId, metricType]; + } + + this.log.trace(`queryMetricData: SQL: ${sql}, args:`, args); + const deferred: Deferred = new Deferred(); + this.db.all(sql, args, (err: Error | null, rows: any[]) => { + this.resolve(deferred, err, rows, loadMetricData); + }); + + return deferred.promise; + } + + private resolve( + deferred: Deferred | Deferred, + error: Error | null, + rows?: any[], + rowLoader?: (row: any) => T + ): void { + if (error !== null) { + deferred.reject(error); + + return; + } + + if (rowLoader === undefined) { + (>deferred).resolve(); + + } else { + const data: T[] = []; + for (const row of (rows)) { + data.push(rowLoader(row)); + } + this.log.trace(`sql query result:`, data); + (>deferred).resolve(data); + } + } +} + +export { SqlDB }; diff --git a/ts/nni_manager/main.ts b/ts/nni_manager/main.ts new file mode 100644 index 0000000000000000000000000000000000000000..5a231bae6109699021546ca57bb9262b3f3a86ed --- /dev/null +++ b/ts/nni_manager/main.ts @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import 'app-module-path/register'; +import { Container, Scope } from 'typescript-ioc'; + +import * as fs from 'fs'; +import * as path from 'path'; +import * as component from './common/component'; +import { Database, DataStore } from './common/datastore'; +import { setExperimentStartupInfo } from './common/experimentStartupInfo'; +import { getLogger, setLogLevel, startLogging } from './common/log'; +import { Manager, ExperimentStartUpMode } from './common/manager'; +import { ExperimentManager } from './common/experimentManager'; +import { TensorboardManager } from './common/tensorboardManager'; +import { getLogDir, mkDirP, parseArg } from './common/utils'; +import { NNIDataStore } from './core/nniDataStore'; +import { NNIManager } from './core/nnimanager'; +import { SqlDB } from './core/sqlDatabase'; +import { NNIExperimentsManager } from './core/nniExperimentsManager'; +import { NNITensorboardManager } from './core/nniTensorboardManager'; +import { NNIRestServer } from './rest_server/nniRestServer'; + + +function initStartupInfo( + startExpMode: string, experimentId: string, basePort: number, platform: string, + logDirectory: string, experimentLogLevel: string, readonly: boolean, dispatcherPipe: string, urlprefix: string): void { + const createNew: boolean = (startExpMode === ExperimentStartUpMode.NEW); + setExperimentStartupInfo(createNew, experimentId, basePort, platform, logDirectory, experimentLogLevel, readonly, dispatcherPipe, urlprefix); +} + +async function initContainer(foreground: boolean, _platformMode: string, logFileName?: string): Promise { + Container.bind(Manager) + .to(NNIManager) + .scope(Scope.Singleton); + Container.bind(Database) + .to(SqlDB) + .scope(Scope.Singleton); + Container.bind(DataStore) + .to(NNIDataStore) + .scope(Scope.Singleton); + Container.bind(ExperimentManager) + .to(NNIExperimentsManager) + .scope(Scope.Singleton); + Container.bind(TensorboardManager) + .to(NNITensorboardManager) + .scope(Scope.Singleton); + const DEFAULT_LOGFILE: string = path.join(getLogDir(), 'nnimanager.log'); + if (!foreground) { + if (logFileName === undefined) { + startLogging(DEFAULT_LOGFILE); + } else { + startLogging(logFileName); + } + } + // eslint-disable-next-line @typescript-eslint/no-use-before-define + setLogLevel(logLevel); + const ds: DataStore = component.get(DataStore); + + await ds.init(); +} + +function usage(): void { + console.info('usage: node main.js --port --mode \ + --start_mode --experiment_id --foreground '); +} + +const strPort: string = parseArg(['--port', '-p']); +if (!strPort || strPort.length === 0) { + usage(); + process.exit(1); +} + +const foregroundArg: string = parseArg(['--foreground', '-f']); +if (foregroundArg && !['true', 'false'].includes(foregroundArg.toLowerCase())) { + console.log(`FATAL: foreground property should only be true or false`); + usage(); + process.exit(1); +} +const foreground: boolean = (foregroundArg && foregroundArg.toLowerCase() === 'true') ? true : false; + +const port: number = parseInt(strPort, 10); + +const mode: string = parseArg(['--mode', '-m']); + +const startMode: string = parseArg(['--start_mode', '-s']); +if (![ExperimentStartUpMode.NEW, ExperimentStartUpMode.RESUME].includes(startMode)) { + console.log(`FATAL: unknown start_mode: ${startMode}`); + usage(); + process.exit(1); +} + +const experimentId: string = parseArg(['--experiment_id', '-id']); +if (experimentId.trim().length < 1) { + console.log(`FATAL: cannot resume the experiment, invalid experiment_id: ${experimentId}`); + usage(); + process.exit(1); +} + +const logDir: string = parseArg(['--log_dir', '-ld']); +if (logDir.length > 0) { + if (!fs.existsSync(logDir)) { + console.log(`FATAL: log_dir ${logDir} does not exist`); + } +} + +const logLevel: string = parseArg(['--log_level', '-ll']); + +const readonlyArg: string = parseArg(['--readonly', '-r']); +if (readonlyArg && !['true', 'false'].includes(readonlyArg.toLowerCase())) { + console.log(`FATAL: readonly property should only be true or false`); + usage(); + process.exit(1); +} +const readonly = (readonlyArg && readonlyArg.toLowerCase() == 'true') ? true : false; + +const dispatcherPipe: string = parseArg(['--dispatcher_pipe']); + +const urlPrefix: string = parseArg(['--url_prefix']); + +initStartupInfo(startMode, experimentId, port, mode, logDir, logLevel, readonly, dispatcherPipe, urlPrefix); + +mkDirP(getLogDir()) + .then(async () => { + try { + await initContainer(foreground, mode); + const restServer: NNIRestServer = component.get(NNIRestServer); + await restServer.start(); + getLogger('main').info(`Rest server listening on: ${restServer.endPoint}`); + } catch (err) { + getLogger('main').error(`${err.stack}`); + throw err; + } + }) + .catch((err: Error) => { + console.error(`Failed to create log dir: ${err.stack}`); + }); + +function cleanUp(): void { + (component.get(Manager) as Manager).stopExperiment(); +} + +process.on('SIGTERM', cleanUp); +process.on('SIGBREAK', cleanUp); +process.on('SIGINT', cleanUp); diff --git a/ts/nni_manager/package.json b/ts/nni_manager/package.json new file mode 100644 index 0000000000000000000000000000000000000000..3b35d541795096161de31af5615403d479a7bc64 --- /dev/null +++ b/ts/nni_manager/package.json @@ -0,0 +1,109 @@ +{ + "name": "nni", + "version": "999.0.0-developing", + "main": "index.js", + "scripts": { + "build": "tsc", + "test": "nyc --reporter=cobertura --reporter=text mocha test/**/*.test.ts", + "start": "node dist/main.js", + "watch": "tsc --watch", + "eslint": "npx eslint ./ --ext .ts" + }, + "license": "MIT", + "dependencies": { + "app-module-path": "^2.2.0", + "azure-storage": "^2.10.6", + "child-process-promise": "^2.2.1", + "express": "^4.17.1", + "express-joi-validator": "^2.0.1", + "http-proxy": "^1.18.1", + "ignore": "^5.1.8", + "js-base64": "^3.6.1", + "kubernetes-client": "^6.12.1", + "lockfile": "^1.0.4", + "npm": ">=8.3.0", + "python-shell": "^3.0.0", + "rx": "^4.1.0", + "sqlite3": "5.0.2", + "ssh2": "^1.4.0", + "stream-buffers": "^3.0.2", + "tail-stream": "^0.3.4", + "tar": "^6.1.11", + "tree-kill": "^1.2.2", + "ts-deferred": "^1.0.4", + "typescript-ioc": "^1.2.6", + "typescript-string-operations": "^1.4.1", + "ws": "^7.4.6" + }, + "devDependencies": { + "@types/chai": "^4.2.18", + "@types/chai-as-promised": "^7.1.0", + "@types/express": "^4.17.2", + "@types/glob": "^7.1.3", + "@types/http-proxy": "^1.17.7", + "@types/js-base64": "^3.3.1", + "@types/js-yaml": "^4.0.1", + "@types/lockfile": "^1.0.0", + "@types/mocha": "^8.2.2", + "@types/node": "^15.12.1", + "@types/request": "^2.48.5", + "@types/rx": "^4.1.2", + "@types/sqlite3": "^3.1.7", + "@types/ssh2": "^0.5.46", + "@types/stream-buffers": "^3.0.3", + "@types/tar": "^4.0.4", + "@types/tmp": "^0.2.0", + "@types/ws": "^7.4.4", + "@typescript-eslint/eslint-plugin": "^2.10.0", + "@typescript-eslint/parser": "^4.26.0", + "chai": "^4.3.4", + "chai-as-promised": "^7.1.1", + "eslint": "^7.28.0", + "glob": "^7.1.7", + "mocha": "^9.0.2", + "nyc": "^15.1.0", + "request": "^2.88.2", + "rmdir": "^1.2.0", + "tmp": "^0.2.1", + "ts-node": "^10.0.0", + "typescript": "^4.3.2" + }, + "resolutions": { + "acorn": ">=8.3.0", + "hoek": ">=6.1.3", + "node.extend": ">=1.1.8", + "y18n": ">=5.0.8", + "yargs-parser": ">=20.2.7", + "joi": ">=17.4.0", + "node-forge": ">=0.10.0", + "glob-parent": ">=6.0.0", + "node-gyp": ">=8.4.1", + "strip-ansi": "=6.0.1", + "http-signature": ">=1.3.6" + }, + "engines": { + "node": "^16.3.0" + }, + "nyc": { + "include": [ + "**/*.ts" + ], + "exclude": [ + "**/test/*", + "./node_modules/" + ], + "extension": [ + ".ts", + ".tsx" + ], + "require": [ + "ts-node/register" + ], + "reporter": [ + "text-summary", + "html" + ], + "sourceMap": true, + "instrument": true + } +} diff --git a/ts/nni_manager/rest_server/nniRestServer.ts b/ts/nni_manager/rest_server/nniRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..051c0a52dc9b5dba1d2fc502f588ea78a07d1065 --- /dev/null +++ b/ts/nni_manager/rest_server/nniRestServer.ts @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import bodyParser from 'body-parser'; +import express from 'express'; +import httpProxy from 'http-proxy'; +import path from 'path'; +import * as component from '../common/component'; +import { RestServer } from '../common/restServer' +import { getLogDir } from '../common/utils'; +import { createRestHandler } from './restHandler'; +import { getAPIRootUrl, getPrefixUrl } from '../common/experimentStartupInfo'; + +/** + * NNI Main rest server, provides rest API to support + * # nnictl CLI tool + * # NNI WebUI + * + */ +@component.Singleton +export class NNIRestServer extends RestServer { + private readonly LOGS_ROOT_URL: string = '/logs'; + protected netronProxy: any = null; + protected API_ROOT_URL: string = '/api/v1/nni'; + + /** + * constructor to provide NNIRestServer's own rest property, e.g. port + */ + constructor() { + super(); + this.API_ROOT_URL = getAPIRootUrl(); + this.netronProxy = httpProxy.createProxyServer(); + } + + /** + * NNIRestServer's own router registration + */ + protected registerRestHandler(): void { + this.app.use(getPrefixUrl(), express.static('static')); + this.app.use(bodyParser.json({limit: '50mb'})); + this.app.use(this.API_ROOT_URL, createRestHandler(this)); + this.app.use(this.LOGS_ROOT_URL, express.static(getLogDir())); + this.app.all('/netron/*', (req: express.Request, res: express.Response) => { + delete req.headers.host; + req.url = req.url.replace('/netron', '/'); + this.netronProxy.web(req, res, { + changeOrigin: true, + target: 'https://netron.app' + }); + }); + this.app.get(`${getPrefixUrl()}/*`, (_req: express.Request, res: express.Response) => { + res.sendFile(path.resolve('static/index.html')); + }); + } +} diff --git a/ts/nni_manager/rest_server/restHandler.ts b/ts/nni_manager/rest_server/restHandler.ts new file mode 100644 index 0000000000000000000000000000000000000000..5bcc9060cb2bca5631917ec67f787d2018030841 --- /dev/null +++ b/ts/nni_manager/rest_server/restHandler.ts @@ -0,0 +1,441 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Request, Response, Router } from 'express'; +import path from 'path'; + +import * as component from '../common/component'; +import { DataStore, MetricDataRecord, TrialJobInfo } from '../common/datastore'; +import { NNIError, NNIErrorNames } from '../common/errors'; +import { isNewExperiment, isReadonly } from '../common/experimentStartupInfo'; +import { getLogger, Logger } from '../common/log'; +import { ExperimentProfile, Manager, TrialJobStatistics } from '../common/manager'; +import { ExperimentManager } from '../common/experimentManager'; +import { TensorboardManager, TensorboardTaskInfo } from '../common/tensorboardManager'; +import { ValidationSchemas } from './restValidationSchemas'; +import { NNIRestServer } from './nniRestServer'; +import { getVersion } from '../common/utils'; +import { MetricType } from '../common/datastore'; +import { ProfileUpdateType } from '../common/manager'; +import { TrialJobStatus } from '../common/trainingService'; + +// TODO: fix expressJoi +//const expressJoi = require('express-joi-validator'); + +class NNIRestHandler { + private restServer: NNIRestServer; + private nniManager: Manager; + private experimentsManager: ExperimentManager; + private tensorboardManager: TensorboardManager; + private log: Logger; + + constructor(rs: NNIRestServer) { + this.nniManager = component.get(Manager); + this.experimentsManager = component.get(ExperimentManager); + this.tensorboardManager = component.get(TensorboardManager); + this.restServer = rs; + this.log = getLogger('NNIRestHandler'); + } + + public createRestHandler(): Router { + const router: Router = Router(); + + router.use((req: Request, res: Response, next) => { + this.log.debug(`${req.method}: ${req.url}: body:`, req.body); + res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'); + res.header('Access-Control-Allow-Methods', 'PUT,POST,GET,DELETE,OPTIONS'); + + res.setHeader('Content-Type', 'application/json'); + next(); + }); + + this.version(router); + this.checkStatus(router); + this.getExperimentProfile(router); + this.getExperimentMetadata(router); + this.updateExperimentProfile(router); + this.importData(router); + this.getImportedData(router); + this.startExperiment(router); + this.getTrialJobStatistics(router); + this.setClusterMetaData(router); + this.listTrialJobs(router); + this.getTrialJob(router); + this.addTrialJob(router); + this.cancelTrialJob(router); + this.getMetricData(router); + this.getMetricDataByRange(router); + this.getLatestMetricData(router); + this.getTrialFile(router); + this.exportData(router); + this.getExperimentsInfo(router); + this.startTensorboardTask(router); + this.getTensorboardTask(router); + this.updateTensorboardTask(router); + this.stopTensorboardTask(router); + this.stopAllTensorboardTask(router); + this.listTensorboardTask(router); + this.stop(router); + + // Express-joi-validator configuration + router.use((err: any, _req: Request, res: Response, _next: any): any => { + if (err.isBoom) { + this.log.error(err.output.payload); + + return res.status(err.output.statusCode).json(err.output.payload); + } + }); + + return router; + } + + private handleError(err: Error, res: Response, isFatal: boolean = false, errorCode: number = 500): void { + if (err instanceof NNIError && err.name === NNIErrorNames.NOT_FOUND) { + res.status(404); + } else { + res.status(errorCode); + } + res.send({ + error: err.message + }); + + // If it's a fatal error, exit process + if (isFatal) { + this.log.fatal(err); + process.exit(1); + } else { + this.log.error(err); + } + } + + private version(router: Router): void { + router.get('/version', async (_req: Request, res: Response) => { + const version = await getVersion(); + res.send(version); + }); + } + + // TODO add validators for request params, query, body + private checkStatus(router: Router): void { + router.get('/check-status', (_req: Request, res: Response) => { + const ds: DataStore = component.get(DataStore); + ds.init().then(() => { + res.send(this.nniManager.getStatus()); + }).catch(async (err: Error) => { + this.handleError(err, res); + this.log.error(err.message); + this.log.error(`Datastore initialize failed, stopping rest server...`); + await this.restServer.stop(); + }); + }); + } + + private getExperimentProfile(router: Router): void { + router.get('/experiment', (_req: Request, res: Response) => { + this.nniManager.getExperimentProfile().then((profile: ExperimentProfile) => { + res.send(profile); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private updateExperimentProfile(router: Router): void { + router.put('/experiment', (req: Request, res: Response) => { + this.nniManager.updateExperimentProfile(req.body, req.query['update_type'] as ProfileUpdateType).then(() => { + res.send(); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private importData(router: Router): void { + router.post('/experiment/import-data', (req: Request, res: Response) => { + this.nniManager.importData(JSON.stringify(req.body)).then(() => { + res.send(); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getImportedData(router: Router): void { + router.get('/experiment/imported-data', (_req: Request, res: Response) => { + this.nniManager.getImportedData().then((importedData: string[]) => { + res.send(JSON.stringify(importedData)); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private startExperiment(router: Router): void { + router.post('/experiment', (req: Request, res: Response) => { + if (isNewExperiment()) { + this.nniManager.startExperiment(req.body).then((eid: string) => { + res.send({ + experiment_id: eid // eslint-disable-line @typescript-eslint/camelcase + }); + }).catch((err: Error) => { + // Start experiment is a step of initialization, so any exception thrown is a fatal + this.handleError(err, res); + }); + } else { + this.nniManager.resumeExperiment(isReadonly()).then(() => { + res.send(); + }).catch((err: Error) => { + // Resume experiment is a step of initialization, so any exception thrown is a fatal + this.handleError(err, res); + }); + } + }); + } + + private getTrialJobStatistics(router: Router): void { + router.get('/job-statistics', (_req: Request, res: Response) => { + this.nniManager.getTrialJobStatistics().then((statistics: TrialJobStatistics[]) => { + res.send(statistics); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private setClusterMetaData(router: Router): void { + router.put( + '/experiment/cluster-metadata', //TODO: Fix validation expressJoi(ValidationSchemas.SETCLUSTERMETADATA), + async (req: Request, res: Response) => { + const metadata: any = req.body; + const keys: string[] = Object.keys(metadata); + try { + for (const key of keys) { + await this.nniManager.setClusterMetadata(key, JSON.stringify(metadata[key])); + } + res.send(); + } catch (err) { + // setClusterMetata is a step of initialization, so any exception thrown is a fatal + this.handleError(NNIError.FromError(err), res, true); + } + }); + } + + private listTrialJobs(router: Router): void { + router.get('/trial-jobs', (req: Request, res: Response) => { + this.nniManager.listTrialJobs(req.query['status'] as TrialJobStatus).then((jobInfos: TrialJobInfo[]) => { + jobInfos.forEach((trialJob: TrialJobInfo) => { + this.setErrorPathForFailedJob(trialJob); + }); + res.send(jobInfos); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getTrialJob(router: Router): void { + router.get('/trial-jobs/:id', (req: Request, res: Response) => { + this.nniManager.getTrialJob(req.params['id']).then((jobDetail: TrialJobInfo) => { + const jobInfo: TrialJobInfo = this.setErrorPathForFailedJob(jobDetail); + res.send(jobInfo); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private addTrialJob(router: Router): void { + router.post('/trial-jobs', async (req: Request, res: Response) => { + this.nniManager.addCustomizedTrialJob(JSON.stringify(req.body)).then((sequenceId: number) => { + res.send({sequenceId}); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private cancelTrialJob(router: Router): void { + router.delete('/trial-jobs/:id', async (req: Request, res: Response) => { + this.nniManager.cancelTrialJobByUser(req.params['id']).then(() => { + res.send(); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getMetricData(router: Router): void { + router.get('/metric-data/:job_id*?', async (req: Request, res: Response) => { + this.nniManager.getMetricData(req.params['job_id'], req.query['type'] as MetricType).then((metricsData: MetricDataRecord[]) => { + res.send(metricsData); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getMetricDataByRange(router: Router): void { + router.get('/metric-data-range/:min_seq_id/:max_seq_id', async (req: Request, res: Response) => { + const minSeqId = Number(req.params['min_seq_id']); + const maxSeqId = Number(req.params['max_seq_id']); + this.nniManager.getMetricDataByRange(minSeqId, maxSeqId).then((metricsData: MetricDataRecord[]) => { + res.send(metricsData); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getLatestMetricData(router: Router): void { + router.get('/metric-data-latest/', async (_req: Request, res: Response) => { + this.nniManager.getLatestMetricData().then((metricsData: MetricDataRecord[]) => { + res.send(metricsData); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getTrialFile(router: Router): void { + router.get('/trial-file/:id/:filename', async(req: Request, res: Response) => { + let encoding: string | null = null; + const filename = req.params['filename']; + if (!filename.includes('.') || filename.match(/.*\.(txt|log)/g)) { + encoding = 'utf8'; + } + this.nniManager.getTrialFile(req.params['id'], filename).then((content: Buffer | string) => { + const contentType = content instanceof Buffer ? 'application/octet-stream' : 'text/plain'; + res.header('Content-Type', contentType); + if (content === '') { + content = `${filename} is empty.`; // FIXME: this should be handled in front-end + } + res.send(content); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private exportData(router: Router): void { + router.get('/export-data', (_req: Request, res: Response) => { + this.nniManager.exportData().then((exportedData: string) => { + res.send(exportedData); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getExperimentMetadata(router: Router): void { + router.get('/experiment-metadata', (_req: Request, res: Response) => { + Promise.all([ + this.nniManager.getExperimentProfile(), + this.experimentsManager.getExperimentsInfo() + ]).then(([profile, experimentInfo]) => { + for (const info of experimentInfo as any) { + if (info.id === profile.id) { + res.send(info); + break; + } + } + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private getExperimentsInfo(router: Router): void { + router.get('/experiments-info', (_req: Request, res: Response) => { + this.experimentsManager.getExperimentsInfo().then((experimentInfo: JSON) => { + res.send(JSON.stringify(experimentInfo)); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private startTensorboardTask(router: Router): void { + router.post('/tensorboard', (req: Request, res: Response) => { + this.tensorboardManager.startTensorboardTask(req.body).then((taskDetail: TensorboardTaskInfo) => { + this.log.info(taskDetail); + res.send(Object.assign({}, taskDetail)); + }).catch((err: Error) => { + this.handleError(err, res, false, 400); + }); + }); + } + + private getTensorboardTask(router: Router): void { + router.get('/tensorboard/:id', (req: Request, res: Response) => { + this.tensorboardManager.getTensorboardTask(req.params['id']).then((taskDetail: TensorboardTaskInfo) => { + res.send(Object.assign({}, taskDetail)); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private updateTensorboardTask(router: Router): void { + router.put('/tensorboard/:id', (req: Request, res: Response) => { + this.tensorboardManager.updateTensorboardTask(req.params['id']).then((taskDetail: TensorboardTaskInfo) => { + res.send(Object.assign({}, taskDetail)); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private stopTensorboardTask(router: Router): void { + router.delete('/tensorboard/:id', (req: Request, res: Response) => { + this.tensorboardManager.stopTensorboardTask(req.params['id']).then((taskDetail: TensorboardTaskInfo) => { + res.send(Object.assign({}, taskDetail)); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private stopAllTensorboardTask(router: Router): void { + router.delete('/tensorboard-tasks', (_req: Request, res: Response) => { + this.tensorboardManager.stopAllTensorboardTask().then(() => { + res.send(); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private listTensorboardTask(router: Router): void { + router.get('/tensorboard-tasks', (_req: Request, res: Response) => { + this.tensorboardManager.listTensorboardTasks().then((taskDetails: TensorboardTaskInfo[]) => { + res.send(taskDetails); + }).catch((err: Error) => { + this.handleError(err, res); + }); + }); + } + + private stop(router: Router): void { + router.delete('/experiment', (_req: Request, res: Response) => { + this.nniManager.stopExperimentTopHalf().then(() => { + res.send(); + this.nniManager.stopExperimentBottomHalf(); + }); + }); + } + + private setErrorPathForFailedJob(jobInfo: TrialJobInfo): TrialJobInfo { + if (jobInfo === undefined || jobInfo.status !== 'FAILED' || jobInfo.logPath === undefined) { + return jobInfo; + } + jobInfo.stderrPath = path.join(jobInfo.logPath, 'stderr'); + + return jobInfo; + } +} + +export function createRestHandler(rs: NNIRestServer): Router { + const handler: NNIRestHandler = new NNIRestHandler(rs); + + return handler.createRestHandler(); +} diff --git a/ts/nni_manager/rest_server/restValidationSchemas.ts b/ts/nni_manager/rest_server/restValidationSchemas.ts new file mode 100644 index 0000000000000000000000000000000000000000..ed184f59b6bbb378f420b330c4077f9a27e5300f --- /dev/null +++ b/ts/nni_manager/rest_server/restValidationSchemas.ts @@ -0,0 +1,281 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +const joi = require('joi'); + +export namespace ValidationSchemas { + export const SETCLUSTERMETADATA = { + body: { + machine_list: joi.array().items(joi.object({ // eslint-disable-line @typescript-eslint/camelcase + username: joi.string().required(), + ip: joi.string().hostname().required(), + port: joi.number().min(1).max(65535).required(), + passwd: joi.string(), + sshKeyPath: joi.string(), + passphrase: joi.string(), + gpuIndices: joi.string(), + maxTrialNumPerGpu: joi.number(), + useActiveGpu: joi.boolean(), + pythonPath: joi.string() + })), + local_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + gpuIndices: joi.string(), + maxTrialNumPerGpu: joi.number(), + useActiveGpu: joi.boolean(), + reuse: joi.boolean() + }), + trial_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + image: joi.string().min(1), + codeDir: joi.string().min(1).required(), + dataDir: joi.string(), + outputDir: joi.string(), + cpuNum: joi.number().min(1), + memoryMB: joi.number().min(100), + // ############## adl cpu and memory config ############### + memorySize: joi.string(), + // ######################################################## + gpuNum: joi.number().min(0), + command: joi.string().min(1), + virtualCluster: joi.string(), + shmMB: joi.number(), + authFile: joi.string(), + nniManagerNFSMountPath: joi.string().min(1), + containerNFSMountPath: joi.string().min(1), + paiConfigPath: joi.string(), + nodeCount: joi.number(), + paiStorageConfigName: joi.string().min(1), + nasMode: joi.string().valid('classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'), + portList: joi.array().items(joi.object({ + label: joi.string().required(), + beginAt: joi.number().required(), + portNumber: joi.number().required(), + })), + worker: joi.object({ + replicas: joi.number().min(1).required(), + image: joi.string().min(1), + privateRegistryAuthPath: joi.string().min(1), + outputDir: joi.string(), + cpuNum: joi.number().min(1), + memoryMB: joi.number().min(100), + gpuNum: joi.number().min(0).required(), + command: joi.string().min(1).required() + }), + ps: joi.object({ + replicas: joi.number().min(1).required(), + image: joi.string().min(1), + privateRegistryAuthPath: joi.string().min(1), + outputDir: joi.string(), + cpuNum: joi.number().min(1), + memoryMB: joi.number().min(100), + gpuNum: joi.number().min(0).required(), + command: joi.string().min(1).required() + }), + master: joi.object({ + replicas: joi.number().min(1).required(), + image: joi.string().min(1), + privateRegistryAuthPath: joi.string().min(1), + outputDir: joi.string(), + cpuNum: joi.number().min(1), + memoryMB: joi.number().min(100), + gpuNum: joi.number().min(0).required(), + command: joi.string().min(1).required() + }), + taskRoles: joi.array().items({ + name: joi.string().min(1), + taskNum: joi.number().min(1).required(), + image: joi.string().min(1), + privateRegistryAuthPath: joi.string().min(1), + outputDir: joi.string(), + cpuNum: joi.number().min(1), + memoryMB: joi.number().min(100), + shmMB: joi.number(), + gpuNum: joi.number().min(0).required(), + command: joi.string().min(1).required(), + frameworkAttemptCompletionPolicy: joi.object({ + minFailedTaskCount: joi.number(), + minSucceededTaskCount: joi.number() + }) + }), + imagePullSecrets: joi.array().items({ + name: joi.string().min(1).required() + }), + // ############## adl ############### + namespace: joi.string(), + adaptive: joi.boolean(), + checkpoint: joi.object({ + storageClass: joi.string().min(1).required(), + storageSize: joi.string().min(1).required() + }), + nfs: joi.object({ + server: joi.string().min(1).required(), + path: joi.string().min(1).required(), + containerMountPath: joi.string().min(1).required() + }) + }), + pai_yarn_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + userName: joi.string().min(1).required(), + passWord: joi.string().min(1), + token: joi.string().min(1), + host: joi.string().min(1).required() + }), + pai_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + userName: joi.string().min(1).required(), + token: joi.string().min(1), + host: joi.string().min(1).required(), + reuse: joi.boolean(), + cpuNum: joi.number().min(1), + memoryMB: joi.number().min(100), + gpuNum: joi.number().min(1), + maxTrialNumPerGpu: joi.number(), + useActiveGpu: joi.boolean(), + }), + adl_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + // hack for v2 configuration + }), + kubeflow_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + operator: joi.string().min(1).required(), + storage: joi.string().min(1), + apiVersion: joi.string().min(1), + nfs: joi.object({ + server: joi.string().min(1).required(), + path: joi.string().min(1).required() + }), + keyVault: joi.object({ + vaultName: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){1,127}$/), + name: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){1,127}$/) + }), + azureStorage: joi.object({ + accountName: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){3,31}$/), + azureShare: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){3,63}$/) + }), + uploadRetryCount: joi.number().min(1) + }), + frameworkcontroller_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + storage: joi.string().min(1), + serviceAccountName: joi.string().min(1), + pvc: joi.object({ + path: joi.string().min(1).required() + }), + configPath: joi.string().min(1), + nfs: joi.object({ + server: joi.string().min(1).required(), + path: joi.string().min(1).required() + }), + keyVault: joi.object({ + vaultName: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){1,127}$/), + name: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){1,127}$/) + }), + azureStorage: joi.object({ + accountName: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){3,31}$/), + azureShare: joi.string().regex(/^([0-9]|[a-z]|[A-Z]|-){3,63}$/) + }), + uploadRetryCount: joi.number().min(1), + namespace: joi.string().min(1) + }), + dlts_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + dashboard: joi.string().min(1), + + cluster: joi.string().min(1), + team: joi.string().min(1), + + email: joi.string().min(1), + password: joi.string().min(1) + }), + aml_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + subscriptionId: joi.string().min(1), + resourceGroup: joi.string().min(1), + workspaceName: joi.string().min(1), + computeTarget: joi.string().min(1), + maxTrialNumPerGpu: joi.number(), + useActiveGpu: joi.boolean() + }), + hybrid_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + trainingServicePlatforms: joi.array(), + }), + nni_manager_ip: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + nniManagerIp: joi.string().min(1) + }), + version_check: joi.boolean(), // eslint-disable-line @typescript-eslint/camelcase + log_collection: joi.string(), // eslint-disable-line @typescript-eslint/camelcase + remote_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + reuse: joi.boolean() + }), + shared_storage_config: joi.object({ // eslint-disable-line @typescript-eslint/camelcase + storageType: joi.string(), + localMountPoint: joi.string(), + remoteMountPoint: joi.string(), + nfsServer: joi.string(), + exportedDirectory: joi.string(), + storageAccountName: joi.string(), + storageAccountKey: joi.string(), + containerName: joi.string(), + localMounted: joi.string() + }) + } + }; + export const STARTEXPERIMENT = { + body: { + experimentName: joi.string().required(), + description: joi.string(), + authorName: joi.string(), + maxTrialNum: joi.number().min(0).required(), + trialConcurrency: joi.number().min(0).required(), + trainingServicePlatform: joi.string(), + searchSpace: joi.string().required(), + maxExecDuration: joi.number().min(0).required(), + maxTrialDuration: joi.number().min(0).required(), + multiPhase: joi.boolean(), + multiThread: joi.boolean(), + nniManagerIp: joi.string(), + versionCheck: joi.boolean(), + logCollection: joi.string(), + advisor: joi.object({ + builtinAdvisorName: joi.string(), + codeDir: joi.string(), + classFileName: joi.string(), + className: joi.string(), + classArgs: joi.any(), + checkpointDir: joi.string().allow(''), + gpuIndices: joi.string() + }), + tuner: joi.object({ + builtinTunerName: joi.string(), + codeDir: joi.string(), + classFileName: joi.string(), + className: joi.string(), + classArgs: joi.any(), + checkpointDir: joi.string().allow(''), + includeIntermediateResults: joi.boolean(), + gpuIndices: joi.string() + }), + assessor: joi.object({ + builtinAssessorName: joi.string(), + codeDir: joi.string(), + classFileName: joi.string(), + className: joi.string(), + classArgs: joi.any(), + checkpointDir: joi.string().allow('') + }), + clusterMetaData: joi.array().items(joi.object({ + key: joi.string(), + value: joi.any() + })) + } + }; + export const UPDATEEXPERIMENT = { + query: { + /* eslint-disable-next-line @typescript-eslint/camelcase */ + update_type: joi.string().required().valid('TRIAL_CONCURRENCY', 'MAX_EXEC_DURATION', 'SEARCH_SPACE', 'MAX_TRIAL_NUM') + }, + body: { + id: joi.string().required(), + revision: joi.number().min(0).required(), + params: joi.object(STARTEXPERIMENT.body), + execDuration: joi.number().required(), + startTime: joi.number(), + endTime: joi.number(), + logDir: joi.string(), + nextSequenceId: joi.number() + } + }; +} diff --git a/ts/nni_manager/test/common/getIpv4Address.test.ts b/ts/nni_manager/test/common/getIpv4Address.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..1a21b2f76974790fdc6935d1110e53e505161dab --- /dev/null +++ b/ts/nni_manager/test/common/getIpv4Address.test.ts @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { getIPV4Address } from '../../common/utils'; + +it('getIpv4Address', async () => { + const ip1 = await getIPV4Address(); + const ip2 = await getIPV4Address(); + assert.match(ip1, /^\d+\.\d+\.\d+\.\d+$/); + assert.equal(ip1, ip2); +}); diff --git a/ts/nni_manager/test/core/assessor.py b/ts/nni_manager/test/core/assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..004283cb51b5404172392cf0efb604bacdbd2a7a --- /dev/null +++ b/ts/nni_manager/test/core/assessor.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +_in_file = open(3, 'rb') +_out_file = open(4, 'wb') + + +def send(command, data): + command = command.encode('utf8') + data = data.encode('utf8') + msg = b'%b%14d%b' % (command, len(data), data) + _out_file.write(msg) + _out_file.flush() + + +def receive(): + header = _in_file.read(16) + l = int(header[2:]) + command = header[:2].decode('utf8') + data = _in_file.read(l).decode('utf8') + return command, data + + +print(receive()) + +send('KI', '') + +print(receive()) + +send('KI', 'hello') + +send('KI', '世界') diff --git a/ts/nni_manager/test/core/dataStore.test.ts b/ts/nni_manager/test/core/dataStore.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..cc3fa086f7d54f67e2b198a0adb7fb22b011948a --- /dev/null +++ b/ts/nni_manager/test/core/dataStore.test.ts @@ -0,0 +1,144 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import { expect } from 'chai'; +import { Container, Scope } from 'typescript-ioc'; + +import * as component from '../../common/component'; +import { Database, DataStore, TrialJobInfo } from '../../common/datastore'; +import { setExperimentStartupInfo } from '../../common/experimentStartupInfo'; +import { ExperimentProfile, TrialJobStatistics } from '../../common/manager'; +import { TrialJobStatus } from '../../common/trainingService'; +import { cleanupUnitTest, prepareUnitTest } from '../../common/utils'; +import { NNIDataStore } from '../../core/nniDataStore'; +import { SqlDB } from '../../core/sqlDatabase'; + +describe('Unit test for dataStore', () => { + let ds: DataStore; + before(async () => { + prepareUnitTest(); + Container.bind(Database).to(SqlDB).scope(Scope.Singleton); + Container.bind(DataStore).to(NNIDataStore).scope(Scope.Singleton); + ds = component.get(DataStore); + await ds.init(); + }); + + after(() => { + ds.close(); + cleanupUnitTest(); + }); + + it('test emtpy experiment profile', async () => { + const result: ExperimentProfile = await ds.getExperimentProfile('abc'); + expect(result).to.equal(undefined, 'Should not get any profile'); + }); + + it('test experiment profiles CRUD', async () => { + const profile: ExperimentProfile = { + params: { + experimentName: 'exp1', + trialConcurrency: 2, + maxExperimentDuration: '10s', + maxTrialNumber: 5, + trainingService: { + platform: 'local' + }, + searchSpace: `{ + "dropout_rate": { + "_type": "uniform", + "_value": [0.1, 0.5] + }, + "batch_size": { + "_type": "choice", + "_value": [50, 250, 500] + } + }`, + tuner: { + className: 'testTuner' + }, + trialCommand: '', + trialCodeDirectory: '', + debug: true + }, + id: 'exp123', + execDuration: 0, + logDir: '', + startTime: Date.now(), + endTime: Date.now(), + nextSequenceId: 0, + revision: 0 + } + const id: string = profile.id; + for (let i: number = 0; i < 5; i++) { + await ds.storeExperimentProfile(profile); + profile.revision += 1; + } + const result: ExperimentProfile = await ds.getExperimentProfile(id); + expect(result.revision).to.equal(4); + }); + + const testEventRecords: { + event: string; + jobId: string; + data?: string; + }[] = [ + { + event: 'WAITING', + jobId: '111' + }, + { + event: 'WAITING', + jobId: '222' + }, + { + event: 'RUNNING', + jobId: '111' + }, + { + event: 'RUNNING', + jobId: '222' + }, + { + event: 'SUCCEEDED', + jobId: '111', + data: 'lr: 0.001' + }, + { + event: 'FAILED', + jobId: '222' + } + ]; + + const metricsData: any = [ + { + trial_job_id: '111', + parameter_id: 'abc', + type: 'PERIODICAL', + value: 'acc: 0.88', + timestamp: Date.now() + }, + { + trial_job_id: '111', + parameter_id: 'abc', + type: 'FINAL', + value: 'acc: 0.88', + timestamp: Date.now() + } + ]; + + it('test trial job events store /query', async () => { + for (const event of testEventRecords) { + await ds.storeTrialJobEvent(event.event, event.jobId, event.data); + } + for (const metrics of metricsData) { + await ds.storeMetricData(metrics.trial_job_id, JSON.stringify(metrics)); + } + const jobs: TrialJobInfo[] = await ds.listTrialJobs(); + expect(jobs.length).to.equals(2, 'There should be 2 jobs'); + + const statistics: TrialJobStatistics[] = await ds.getTrialJobStatistics(); + expect(statistics.length).to.equals(2, 'There should be 2 statistics'); + }); +}); diff --git a/ts/nni_manager/test/core/dummy_assessor.py b/ts/nni_manager/test/core/dummy_assessor.py new file mode 100644 index 0000000000000000000000000000000000000000..a003c97baa4638f85cbbd5a2fa32831b4429c9de --- /dev/null +++ b/ts/nni_manager/test/core/dummy_assessor.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from nni.assessor import Assessor, AssessResult + +class DummyAssessor(Assessor): + def assess_trial(self, trial_job_id, trial_history): + return AssessResult.Good diff --git a/ts/nni_manager/test/core/dummy_tuner.py b/ts/nni_manager/test/core/dummy_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..1f118ac1da09f3b1bd1fd5332ec578e95fb3d4b3 --- /dev/null +++ b/ts/nni_manager/test/core/dummy_tuner.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from nni.tuner import Tuner + +class DummyTuner(Tuner): + def generate_parameters(self, parameter_id): + return 'unit-test-parm' + + def generate_multiple_parameters(self, parameter_id_list): + return ['unit-test-param1', 'unit-test-param2'] + + def receive_trial_result(self, parameter_id, parameters, value): + pass + + def receive_customized_trial_result(self, parameter_id, parameters, value): + pass + + def update_search_space(self, search_space): + pass diff --git a/ts/nni_manager/test/core/experimentManager.test.ts b/ts/nni_manager/test/core/experimentManager.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..c1017a4404e4f08b1d863b0d5a894e6af76e578a --- /dev/null +++ b/ts/nni_manager/test/core/experimentManager.test.ts @@ -0,0 +1,60 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import { assert, expect } from 'chai'; +import * as fs from 'fs'; +import { Container, Scope } from 'typescript-ioc'; + +import * as component from '../../common/component'; +import { cleanupUnitTest, prepareUnitTest } from '../../common/utils'; +import { ExperimentManager } from '../../common/experimentManager'; +import { NNIExperimentsManager } from '../../core/nniExperimentsManager'; + + +describe('Unit test for experiment manager', function () { + let experimentManager: NNIExperimentsManager; + const mockedInfo = { + "test": { + "port": 8080, + "startTime": 1605246730756, + "endTime": "N/A", + "status": "INITIALIZED", + "platform": "local", + "experimentName": "testExp", + "tag": [], "pid": 11111, + "webuiUrl": [], + "logDir": null + } + } + + before(() => { + prepareUnitTest(); + fs.writeFileSync('.experiment.test', JSON.stringify(mockedInfo)); + Container.bind(ExperimentManager).to(NNIExperimentsManager).scope(Scope.Singleton); + experimentManager = component.get(NNIExperimentsManager); + experimentManager.setExperimentPath('.experiment.test'); + }); + + after(() => { + if (fs.existsSync('.experiment.test')) { + fs.unlinkSync('.experiment.test'); + } + cleanupUnitTest(); + }); + + it('test getExperimentsInfo', () => { + return experimentManager.getExperimentsInfo().then(function (experimentsInfo: {[key: string]: any}) { + new Array(experimentsInfo) + for (let idx in experimentsInfo) { + if (experimentsInfo[idx]['id'] === 'test') { + expect(experimentsInfo[idx]['status']).to.be.oneOf(['STOPPED', 'ERROR']); + break; + } + } + }).catch((error) => { + assert.fail(error); + }) + }); +}); diff --git a/ts/nni_manager/test/core/import_all.test.ts b/ts/nni_manager/test/core/import_all.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..69126ba5c218c596d7dcc7b5f3d7229ec2d87453 --- /dev/null +++ b/ts/nni_manager/test/core/import_all.test.ts @@ -0,0 +1,15 @@ +import * as glob from 'glob'; + +// Istanbul only generates report for used/imported files, the files are not used/imported by test cases +// are not included in code coverage reports. +// This is a workaround to import all files in order to show all source files in code coverage reports. + +glob.sync('**/*.ts').forEach((file) => { + if (file.indexOf('node_modules/') < 0 && file.indexOf('types/') < 0 + && file.indexOf('.test.ts') < 0 && file.indexOf('dlts') < 0 && file.indexOf('main.ts')) { + try { + import('../../' + file); + } catch(err) { + } + } +}) diff --git a/ts/nni_manager/test/core/ipcInterface.test.ts b/ts/nni_manager/test/core/ipcInterface.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..5de2153ca763fa103c0ce263f0ff0ad2605be13f --- /dev/null +++ b/ts/nni_manager/test/core/ipcInterface.test.ts @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import * as assert from 'assert'; +import { ChildProcess, spawn, StdioOptions } from 'child_process'; +import { Deferred } from 'ts-deferred'; +import { cleanupUnitTest, prepareUnitTest, getTunerProc, getCmdPy } from '../../common/utils'; +import * as CommandType from '../../core/commands'; +import { createDispatcherInterface, IpcInterface } from '../../core/ipcInterface'; +import { NNIError } from '../../common/errors'; + +let sentCommands: { [key: string]: string }[] = []; +const receivedCommands: { [key: string]: string }[] = []; + +let rejectCommandType: Error | undefined; + +function runProcess(): Promise { + // the process is intended to throw error, do not reject + const deferred: Deferred = new Deferred(); + + // create fake assessor process + const stdio: StdioOptions = ['ignore', 'pipe', process.stderr, 'pipe', 'pipe']; + const command: string = getCmdPy() + ' assessor.py'; + const proc: ChildProcess = getTunerProc(command, stdio, 'core/test', process.env); + // record its sent/received commands on exit + proc.on('error', (error: Error): void => { deferred.resolve(error); }); + proc.on('exit', (code: number): void => { + if (code !== 0) { + deferred.resolve(new Error(`return code: ${code}`)); + } else { + let str = proc.stdout!.read().toString(); + if(str.search("\r\n")!=-1){ + sentCommands = str.split("\r\n"); + } + else{ + sentCommands = str.split('\n'); + } + deferred.resolve(null); + } + }); + + // create IPC interface + const dispatcher: IpcInterface = createDispatcherInterface(proc); + dispatcher.onCommand((commandType: string, content: string): void => { + receivedCommands.push({ commandType, content }); + }); + + // Command #1: ok + dispatcher.sendCommand('IN'); + + // Command #2: ok + dispatcher.sendCommand('ME', '123'); + + // Command #3: FE is not tuner/assessor command, test the exception type of send non-valid command + try { + dispatcher.sendCommand('FE', '1'); + } catch (error) { + rejectCommandType = error; + } + + return deferred.promise; +} + +/* FIXME +describe('core/protocol', (): void => { + + before(async () => { + prepareUnitTest(); + await runProcess(); + }); + + after(() => { + cleanupUnitTest(); + }); + + it('should have sent 2 successful commands', (): void => { + assert.equal(sentCommands.length, 3); + assert.equal(sentCommands[2], ''); + }); + + it('sendCommand() should work without content', (): void => { + assert.equal(sentCommands[0], "('IN', '')"); + }); + + it('sendCommand() should work with content', (): void => { + assert.equal(sentCommands[1], "('ME', '123')"); + }); + + it('sendCommand() should throw on wrong command type', (): void => { + assert.equal((rejectCommandType).name.split(' ')[0], 'AssertionError'); + }); + + it('should have received 3 commands', (): void => { + assert.equal(receivedCommands.length, 3); + }); + + it('onCommand() should work without content', (): void => { + assert.deepStrictEqual(receivedCommands[0], { + commandType: 'KI', + content: '' + }); + }); + + it('onCommand() should work with content', (): void => { + assert.deepStrictEqual(receivedCommands[1], { + commandType: 'KI', + content: 'hello' + }); + }); + + it('onCommand() should work with Unicode content', (): void => { + assert.deepStrictEqual(receivedCommands[2], { + commandType: 'KI', + content: '世界' + }); + }); + +}); +*/ diff --git a/ts/nni_manager/test/core/ipcInterfaceTerminate.test.ts b/ts/nni_manager/test/core/ipcInterfaceTerminate.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..e256dbf41f163c4eb770c0d4a896ee66edfbb0f1 --- /dev/null +++ b/ts/nni_manager/test/core/ipcInterfaceTerminate.test.ts @@ -0,0 +1,105 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import * as assert from 'assert'; +import { ChildProcess, spawn, StdioOptions } from 'child_process'; +import { Deferred } from 'ts-deferred'; +import { cleanupUnitTest, prepareUnitTest, getMsgDispatcherCommand, getTunerProc } from '../../common/utils'; +import * as CommandType from '../../core/commands'; +import { createDispatcherInterface, IpcInterface } from '../../core/ipcInterface'; + +let dispatcher: IpcInterface | undefined; +let procExit: boolean = false; +let procError: boolean = false; + +function startProcess(): void { + // create fake assessor process + const stdio: StdioOptions = ['ignore', 'pipe', process.stderr, 'pipe', 'pipe']; + + const dispatcherCmd: string = getMsgDispatcherCommand( + // Mock tuner config + { + experimentName: 'exp1', + maxExperimentDuration: '1h', + searchSpace: '', + trainingService: { + platform: 'local' + }, + trialConcurrency: 1, + maxTrialNumber: 5, + tuner: { + className: 'dummy_tuner.DummyTuner', + codeDirectory: '.' + }, + assessor: { + className: 'dummy_assessor.DummyAssessor', + codeDirectory: '.' + }, + trialCommand: '', + trialCodeDirectory: '', + debug: true + } + ); + const proc: ChildProcess = getTunerProc(dispatcherCmd, stdio, 'core/test', process.env); + proc.on('error', (_error: Error): void => { + procExit = true; + procError = true; + }); + proc.on('exit', (code: number): void => { + procExit = true; + procError = (code !== 0); + }); + + // create IPC interface + dispatcher = createDispatcherInterface(proc); + (dispatcher).onCommand((commandType: string, content: string): void => { + console.log(commandType, content); + }); +} + +/* FIXME +describe('core/ipcInterface.terminate', (): void => { + before(() => { + prepareUnitTest(); + startProcess(); + }); + + after(() => { + cleanupUnitTest(); + }); + + it('normal', () => { + (dispatcher).sendCommand( + CommandType.REPORT_METRIC_DATA, + '{"trial_job_id":"A","type":"PERIODICAL","value":1,"sequence":123}'); + + const deferred: Deferred = new Deferred(); + setTimeout( + () => { + assert.ok(!procExit); + assert.ok(!procError); + deferred.resolve(); + }, + 1000); + + return deferred.promise; + }); + + it('terminate', () => { + (dispatcher).sendCommand(CommandType.TERMINATE); + + const deferred: Deferred = new Deferred(); + setTimeout( + () => { + assert.ok(procExit); + assert.ok(!procError); + deferred.resolve(); + }, + 10000); + + return deferred.promise; + }); +}); +*/ diff --git a/ts/nni_manager/test/core/nnimanager.test.ts b/ts/nni_manager/test/core/nnimanager.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..20eb94910ca33ab83920615280502e2f705779c3 --- /dev/null +++ b/ts/nni_manager/test/core/nnimanager.test.ts @@ -0,0 +1,296 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import * as fs from 'fs'; +import * as os from 'os'; +import { assert, expect } from 'chai'; +import { Container, Scope } from 'typescript-ioc'; + +import * as component from '../../common/component'; +import { Database, DataStore } from '../../common/datastore'; +import { Manager, ExperimentProfile} from '../../common/manager'; +import { ExperimentManager } from '../../common/experimentManager'; +import { TrainingService } from '../../common/trainingService'; +import { cleanupUnitTest, prepareUnitTest } from '../../common/utils'; +import { NNIExperimentsManager } from '../../core/nniExperimentsManager'; +import { NNIManager } from '../../core/nnimanager'; +import { SqlDB } from '../../core/sqlDatabase'; +import { MockedTrainingService } from '../mock/trainingService'; +import { MockedDataStore } from '../mock/datastore'; +import { TensorboardManager } from '../../common/tensorboardManager'; +import { NNITensorboardManager } from '../../core/nniTensorboardManager'; +import * as path from 'path'; + +async function initContainer(): Promise { + prepareUnitTest(); + Container.bind(Manager).to(NNIManager).scope(Scope.Singleton); + Container.bind(Database).to(SqlDB).scope(Scope.Singleton); + Container.bind(DataStore).to(MockedDataStore).scope(Scope.Singleton); + Container.bind(ExperimentManager).to(NNIExperimentsManager).scope(Scope.Singleton); + Container.bind(TensorboardManager).to(NNITensorboardManager).scope(Scope.Singleton); + await component.get(DataStore).init(); +} + +// FIXME: timeout on macOS +describe('Unit test for nnimanager', function () { + + let nniManager: NNIManager; + + let ClusterMetadataKey = 'mockedMetadataKey'; + + let experimentParams: any = { + experimentName: 'naive_experiment', + trialConcurrency: 3, + maxExperimentDuration: '5s', + maxTrialNumber: 3, + trainingService: { + platform: 'local' + }, + searchSpace: {'lr': {'_type': 'choice', '_value': [0.01,0.001]}}, + tuner: { + name: 'TPE', + classArgs: { + optimize_mode: 'maximize' + } + }, + assessor: { + name: 'Medianstop' + }, + trialCommand: 'sleep 2', + trialCodeDirectory: '', + debug: true + } + + let updateExperimentParams = { + experimentName: 'another_experiment', + trialConcurrency: 2, + maxExperimentDuration: '6s', + maxTrialNumber: 2, + trainingService: { + platform: 'local' + }, + searchSpace: '{"lr": {"_type": "choice", "_value": [0.01,0.001]}}', + tuner: { + name: 'TPE', + classArgs: { + optimize_mode: 'maximize' + } + }, + assessor: { + name: 'Medianstop' + }, + trialCommand: 'sleep 2', + trialCodeDirectory: '', + debug: true + } + + let experimentProfile: any = { + params: updateExperimentParams, + id: 'test', + execDuration: 0, + logDir: '', + startTime: 0, + nextSequenceId: 0, + revision: 0 + } + + let mockedInfo = { + "unittest": { + "port": 8080, + "startTime": 1605246730756, + "endTime": "N/A", + "status": "INITIALIZED", + "platform": "local", + "experimentName": "testExp", + "tag": [], "pid": 11111, + "webuiUrl": [], + "logDir": null + } + } + + + before(async () => { + await initContainer(); + fs.writeFileSync('.experiment.test', JSON.stringify(mockedInfo)); + const experimentsManager: ExperimentManager = component.get(ExperimentManager); + experimentsManager.setExperimentPath('.experiment.test'); + nniManager = component.get(Manager); + + const expId: string = await nniManager.startExperiment(experimentParams); + assert.strictEqual(expId, 'unittest'); + + // TODO: + // In current architecture we cannot prevent NNI manager from creating a training service. + // The training service must be manually stopped here or its callbacks will block exit. + // I'm planning on a custom training service register system similar to custom tuner, + // and when that is done we can let NNI manager to use MockedTrainingService through config. + const manager = nniManager as any; + manager.trainingService.removeTrialJobMetricListener(manager.trialJobMetricListener); + manager.trainingService.cleanUp(); + + manager.trainingService = new MockedTrainingService(); + }) + + after(async () => { + // FIXME + await nniManager.stopExperimentTopHalf(); + cleanupUnitTest(); + }) + + + + it('test addCustomizedTrialJob', () => { + return nniManager.addCustomizedTrialJob('"hyperParams"').then(() => { + + }).catch((error) => { + assert.fail(error); + }) + }) + + + it('test listTrialJobs', () => { + return nniManager.listTrialJobs().then(function (trialjobdetails) { + expect(trialjobdetails.length).to.be.equal(2); + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test getTrialJob valid', () => { + //query a exist id + return nniManager.getTrialJob('1234').then(function (trialJobDetail) { + expect(trialJobDetail.trialJobId).to.be.equal('1234'); + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test getTrialJob with invalid id', () => { + //query a not exist id, and the function should throw error, and should not process then() method + return nniManager.getTrialJob('4567').then((_jobid) => { + assert.fail(); + }).catch((_error) => { + assert.isTrue(true); + }) + }) + + it('test cancelTrialJobByUser', () => { + return nniManager.cancelTrialJobByUser('1234').then(() => { + + }).catch((error) => { + console.log(error); + assert.fail(error); + }) + }) + + it('test getExperimentProfile', () => { + return nniManager.getExperimentProfile().then((experimentProfile) => { + expect(experimentProfile.id).to.be.equal('unittest'); + expect(experimentProfile.logDir).to.be.equal(path.join(os.homedir(),'nni-experiments','unittest')); + + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test updateExperimentProfile TRIAL_CONCURRENCY', () => { + return nniManager.updateExperimentProfile(experimentProfile, 'TRIAL_CONCURRENCY').then(() => { + nniManager.getExperimentProfile().then((updateProfile) => { + expect(updateProfile.params.trialConcurrency).to.be.equal(2); + }); + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test updateExperimentProfile MAX_EXEC_DURATION', () => { + return nniManager.updateExperimentProfile(experimentProfile, 'MAX_EXEC_DURATION').then(() => { + nniManager.getExperimentProfile().then((updateProfile) => { + expect(updateProfile.params.maxExperimentDuration).to.be.equal('6s'); + }); + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test updateExperimentProfile SEARCH_SPACE', () => { + return nniManager.updateExperimentProfile(experimentProfile, 'SEARCH_SPACE').then(() => { + nniManager.getExperimentProfile().then((updateProfile) => { + expect(updateProfile.params.searchSpace).to.be.equal('{"lr": {"_type": "choice", "_value": [0.01,0.001]}}'); + }); + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test updateExperimentProfile MAX_TRIAL_NUM', () => { + return nniManager.updateExperimentProfile(experimentProfile, 'MAX_TRIAL_NUM').then(() => { + nniManager.getExperimentProfile().then((updateProfile) => { + expect(updateProfile.params.maxTrialNumber).to.be.equal(2); + }); + }).catch((error: any) => { + assert.fail(error); + }) + }) + + it('test getStatus', () => { + assert.strictEqual(nniManager.getStatus().status,'RUNNING'); + }) + + it('test getMetricData with trialJobId', () => { + //query a exist trialJobId + return nniManager.getMetricData('4321', 'CUSTOM').then((metricData) => { + expect(metricData.length).to.be.equal(1); + expect(metricData[0].trialJobId).to.be.equal('4321'); + expect(metricData[0].parameterId).to.be.equal('param1'); + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test getMetricData with invalid trialJobId', () => { + //query an invalid trialJobId + return nniManager.getMetricData('43210', 'CUSTOM').then((_metricData) => { + assert.fail(); + }).catch((_error) => { + }) + }) + + it('test getTrialJobStatistics', () => { + // get 3 trial jobs (init, addCustomizedTrialJob, cancelTrialJobByUser) + return nniManager.getTrialJobStatistics().then(function (trialJobStatistics) { + expect(trialJobStatistics.length).to.be.equal(2); + if (trialJobStatistics[0].trialJobStatus === 'WAITING') { + expect(trialJobStatistics[0].trialJobNumber).to.be.equal(2); + expect(trialJobStatistics[1].trialJobNumber).to.be.equal(1); + } + else { + expect(trialJobStatistics[1].trialJobNumber).to.be.equal(2); + expect(trialJobStatistics[0].trialJobNumber).to.be.equal(1); + } + }).catch((error) => { + assert.fail(error); + }) + }) + + it('test addCustomizedTrialJob reach maxTrialNumber', () => { + // test currSubmittedTrialNum reach maxTrialNumber + return nniManager.addCustomizedTrialJob('"hyperParam"').then(() => { + nniManager.getTrialJobStatistics().then(function (trialJobStatistics) { + if (trialJobStatistics[0].trialJobStatus === 'WAITING') + expect(trialJobStatistics[0].trialJobNumber).to.be.equal(2); + else + expect(trialJobStatistics[1].trialJobNumber).to.be.equal(2); + }) + }).catch((error) => { + assert.fail(error); + }) + }) + + //it('test resumeExperiment', async () => { + //TODO: add resume experiment unit test + //}) + +}) diff --git a/ts/nni_manager/test/core/sqlDatabase.test.ts b/ts/nni_manager/test/core/sqlDatabase.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..d1e8fa72919babb8bfa95871f03e4c6fe7a7ebe3 --- /dev/null +++ b/ts/nni_manager/test/core/sqlDatabase.test.ts @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import * as assert from 'assert'; +import * as os from 'os'; +import * as path from 'path'; +import { Container } from 'typescript-ioc'; +import * as component from '../../common/component'; +import { Database, MetricDataRecord, TrialJobEvent, TrialJobEventRecord } from '../../common/datastore'; +import { setExperimentStartupInfo } from '../../common/experimentStartupInfo'; +import { ExperimentConfig, ExperimentProfile } from '../../common/manager'; +import { cleanupUnitTest, getDefaultDatabaseDir, mkDirP, prepareUnitTest } from '../../common/utils'; +import { SqlDB } from '../../core/sqlDatabase'; + +const expParams1: ExperimentConfig = { + experimentName: 'Exp1', + trialConcurrency: 3, + maxExperimentDuration: '100s', + maxTrialNumber: 5, + trainingService: { + platform: 'local' + }, + searchSpace: 'SS', + tuner: { + className: 'testTuner' + }, + trialCommand: '', + trialCodeDirectory: '', + debug: true +}; + +const expParams2: ExperimentConfig = { + experimentName: 'Exp2', + trialConcurrency: 5, + maxExperimentDuration: '1000s', + maxTrialNumber: 5, + trainingService: { + platform: 'local' + }, + searchSpace: '', + tuner: { + className: 'testTuner' + }, + assessor: { + className: 'testAssessor' + }, + trialCommand: '', + trialCodeDirectory: '', + debug: true +}; + +const profiles: ExperimentProfile[] = [ + { params: expParams1, id: '#1', execDuration: 0, logDir: '/log', startTime: Date.now(), endTime: undefined, nextSequenceId: 0, revision: 1,}, + { params: expParams1, id: '#1', execDuration: 0, logDir: '/log', startTime: Date.now(), endTime: Date.now(), nextSequenceId: 1, revision: 2 }, + { params: expParams2, id: '#2', execDuration: 0, logDir: '/log', startTime: Date.now(), endTime: Date.now(), nextSequenceId: 0, revision: 2 }, + { params: expParams2, id: '#2', execDuration: 0, logDir: '/log', startTime: Date.now(), endTime: Date.now(), nextSequenceId: 2, revision: 3 } +]; + +const events: TrialJobEventRecord[] = [ + { timestamp: Date.now(), event: 'WAITING', trialJobId: 'A', data: 'hello' }, // 0 + { timestamp: Date.now(), event: 'UNKNOWN', trialJobId: 'B', data: 'world' }, // 1 + { timestamp: Date.now(), event: 'RUNNING', trialJobId: 'B', data: undefined }, // 2 + { timestamp: Date.now(), event: 'RUNNING', trialJobId: 'A', data: '123' }, // 3 + { timestamp: Date.now(), event: 'FAILED', trialJobId: 'A', data: undefined } // 4 +]; + +const metrics: MetricDataRecord[] = [ + { timestamp: Date.now(), trialJobId: 'A', parameterId: '1', type: 'PERIODICAL', sequence: 0, data: 1.1 }, // 0 + { timestamp: Date.now(), trialJobId: 'B', parameterId: '2', type: 'PERIODICAL', sequence: 0, data: 2.1 }, // 1 + { timestamp: Date.now(), trialJobId: 'A', parameterId: '1', type: 'PERIODICAL', sequence: 1, data: 1.2 }, // 2 + { timestamp: Date.now(), trialJobId: 'A', parameterId: '1', type: 'FINAL', sequence: 0, data: 1.3 }, // 3 + { timestamp: Date.now(), trialJobId: 'C', parameterId: '2', type: 'PERIODICAL', sequence: 1, data: 2.1 }, // 4 + { timestamp: Date.now(), trialJobId: 'C', parameterId: '2', type: 'FINAL', sequence: 0, data: 2.2 } // 5 +]; + +function assertRecordEqual(record: any, value: any): void { + assert.ok(record.timestamp > new Date(2018, 6, 1).getTime()); + assert.ok(record.timestamp < Date.now()); + + for (const key in value) { + if (key !== 'timestamp') { + assert.equal(record[key], value[key]); + } + } +} + +function assertRecordsEqual(records: any[], inputs: any[], indices: number[]): void { + assert.equal(records.length, indices.length); + for (let i: number = 0; i < records.length; i++) { + assertRecordEqual(records[i], inputs[indices[i]]); + } +} + +describe('core/sqlDatabase', () => { + let db: SqlDB | undefined; + + before(async () => { + prepareUnitTest(); + const dbDir: string = getDefaultDatabaseDir(); + await mkDirP(dbDir); + db = new SqlDB(); + await (db).init(true, dbDir); + for (const profile of profiles) { + await (db).storeExperimentProfile(profile); + } + for (const event of events) { + await (db).storeTrialJobEvent(event.event, event.trialJobId, Date.now(), event.data); + } + for (const metric of metrics) { + await (db).storeMetricData(metric.trialJobId, JSON.stringify(metric)); + } + }); + + after(() => { + cleanupUnitTest(); + }); + + it('queryExperimentProfile without revision', async () => { + const records: ExperimentProfile[] = await (db).queryExperimentProfile('#1'); + assert.equal(records.length, 2); + assert.deepEqual(records[0], profiles[1]); + assert.deepEqual(records[1], profiles[0]); + }); + + it('queryExperimentProfile with revision', async () => { + const records: ExperimentProfile[] = await (db).queryExperimentProfile('#1', 2); + assert.equal(records.length, 1); + assert.deepEqual(records[0], profiles[1]); + }); + + it('queryLatestExperimentProfile', async () => { + const record: ExperimentProfile = await (db).queryLatestExperimentProfile('#2'); + assert.deepEqual(record, profiles[3]); + }); + + it('queryTrialJobEventByEvent without trialJobId', async () => { + const records: TrialJobEventRecord[] = await (db).queryTrialJobEvent(undefined, 'RUNNING'); + assertRecordsEqual(records, events, [2, 3]); + }); + + it('queryTrialJobEventByEvent with trialJobId', async () => { + const records: TrialJobEventRecord[] = await (db).queryTrialJobEvent('A', 'RUNNING'); + assertRecordsEqual(records, events, [3]); + }); + + it('queryTrialJobEventById', async () => { + const records: TrialJobEventRecord[] = await (db).queryTrialJobEvent('B'); + assertRecordsEqual(records, events, [1, 2]); + }); + + it('queryMetricDataByType without trialJobId', async () => { + const records: MetricDataRecord[] = await (db).queryMetricData(undefined, 'FINAL'); + assertRecordsEqual(records, metrics, [3, 5]); + }); + + it('queryMetricDataByType with trialJobId', async () => { + const records: MetricDataRecord[] = await (db).queryMetricData('A', 'PERIODICAL'); + assertRecordsEqual(records, metrics, [0, 2]); + }); + + it('queryMetricDataById', async () => { + const records: MetricDataRecord[] = await (db).queryMetricData('B'); + assertRecordsEqual(records, metrics, [1]); + }); + + it('empty result', async () => { + const records: MetricDataRecord[] = await (db).queryMetricData('X'); + assert.equal(records.length, 0); + }); + +}); diff --git a/ts/nni_manager/test/mock/datastore.ts b/ts/nni_manager/test/mock/datastore.ts new file mode 100644 index 0000000000000000000000000000000000000000..1bea68d92da4d62b604fb79fa89432f26163e6a2 --- /dev/null +++ b/ts/nni_manager/test/mock/datastore.ts @@ -0,0 +1,270 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import { assert } from 'console'; +import * as fs from 'fs'; +import { Deferred } from 'ts-deferred'; + +import { DataStore, MetricData, MetricDataRecord, MetricType, + TrialJobEvent, TrialJobEventRecord, TrialJobInfo } from '../../common/datastore'; +import { ExperimentProfile, TrialJobStatistics } from '../../common/manager'; +import { TrialJobStatus } from '../../common/trainingService'; + +class SimpleDb { + private name: string = ''; + private fileName: string = ''; + + private db: Array = new Array(); + private map: Map = new Map(); // map key to data index + + constructor (name: string, filename: string) { + this.name = name; + this.fileName = filename; + } + + async saveData(data: any, key?: string): Promise { + let index; + if (key && this.map.has(key)) { + index = this.map.get(key); + } + + if (index === undefined) { + index = this.db.push(data) - 1; + } else { + this.db[index] = data; + } + + if (key) { + this.map.set(key, index); + } + await this.persist(); + } + + listAllData(): Promise> { + const deferred = new Deferred>(); + deferred.resolve(this.db); + + return deferred.promise; + } + + getData(key: string): Promise { + const deferred = new Deferred(); + if (this.map.has(key)) { + const index = this.map.get(key); + if(index !== undefined && index >= 0) { + deferred.resolve(this.db[index]); + } else { + deferred.reject(new Error(`Key or index not found: ${this.name}, ${key}`)); + } + } else { + console.log(`Key not found: ${this.name}, ${key}`); + deferred.resolve(undefined); + } + return deferred.promise; + } + + persist(): Promise { + const deferred = new Deferred(); + fs.writeFileSync(this.fileName, JSON.stringify({ + name: this.name, + data: this.db, + index: JSON.stringify([...this.map]) + }, null, 4)); + deferred.resolve(); + return deferred.promise; + } +} + +class MockedDataStore implements DataStore { + + private dbExpProfile: SimpleDb = new SimpleDb('exp_profile', './exp_profile.json'); + private dbTrialJobs: SimpleDb = new SimpleDb('trial_jobs', './trial_jobs.json'); + private dbMetrics: SimpleDb = new SimpleDb('metrics', './metrics.json'); + + trailJob1 = { + event: 'ADD_CUSTOMIZED', + timestamp: Date.now(), + trialJobId: "4321", + data: '' + } + + metrics1 = { + timestamp: Date.now(), + trialJobId: '4321', + parameterId: 'param1', + type: 'CUSTOM', + sequence: 21, + data: '' + } + + init(): Promise { + this.dbTrialJobs.saveData(this.trailJob1); + this.dbMetrics.saveData(this.metrics1); + return Promise.resolve(); + } + + close(): Promise { + return Promise.resolve(); + } + + async storeExperimentProfile(experimentProfile: ExperimentProfile): Promise { + await this.dbExpProfile.saveData(experimentProfile, experimentProfile.id); + } + + async getExperimentProfile(experimentId: string): Promise { + return await this.dbExpProfile.getData(experimentId); + } + + async storeTrialJobEvent(event: TrialJobEvent, trialJobId: string, data?: string | undefined): Promise { + const dataRecord: TrialJobEventRecord = { + event: event, + timestamp: Date.now(), + trialJobId: trialJobId, + data: data + } + await this.dbTrialJobs.saveData(dataRecord); + } + + async getTrialJobStatistics(): Promise { + const result: TrialJobStatistics[] = []; + const jobs = await this.listTrialJobs(); + const map: Map = new Map(); + + jobs.forEach((value) => { + let n: number|undefined = map.get(value.status); + if (!n) { + n = 0; + } + map.set(value.status, n + 1); + }) + + map.forEach((value, key) => { + const statistics: TrialJobStatistics = { + trialJobStatus: key, + trialJobNumber: value + } + result.push(statistics); + }) + return result; + } + + async listTrialJobs(status?: TrialJobStatus): Promise { + const trialJobEvents: TrialJobEventRecord[] = await this.dbTrialJobs.listAllData(); + const map: Map = this.getTrialJobsByReplayEvents(trialJobEvents); + const result: TrialJobInfo[]= []; + for (let key of map.keys()) { + const jobInfo = map.get(key); + if (jobInfo === undefined) { + continue; + } + if (!(status && jobInfo.status !== status)) { + if (jobInfo.status === 'SUCCEEDED') { + jobInfo.finalMetricData = await this.getFinalMetricData(jobInfo.trialJobId); + } + result.push(jobInfo); + } + } + return result; + } + + async storeMetricData(trialJobId: string, data: string): Promise { + const metrics = JSON.parse(data) as MetricData; + assert(trialJobId === metrics.trial_job_id); + await this.dbMetrics.saveData({ + trialJobId: metrics.trial_job_id, + parameterId: metrics.parameter_id, + type: metrics.type, + data: metrics.value, + timestamp: Date.now() + }); + } + + async getMetricData(trialJobId: string, metricType: MetricType): Promise { + const result: MetricDataRecord[] = [] + const allMetrics = await this.dbMetrics.listAllData(); + allMetrics.forEach((value) => { + const metrics = value; + if (metrics.type === metricType && metrics.trialJobId === trialJobId) { + result.push(metrics); + } + }); + + return result; + } + + async exportTrialHpConfigs(): Promise { + const ret: string = ''; + return Promise.resolve(ret); + } + + async getImportedData(): Promise { + const ret: string[] = []; + return Promise.resolve(ret); + } + + public getTrialJob(_trialJobId: string): Promise { + return Promise.resolve({ + trialJobId: '1234', + status: 'SUCCEEDED', + startTime: Date.now(), + endTime: Date.now() + }); + } + + private async getFinalMetricData(trialJobId: string): Promise { + const metrics: MetricDataRecord[] = await this.getMetricData(trialJobId, "FINAL"); + assert(metrics.length <= 1); + if (metrics.length == 1) { + return metrics[0]; + } else { + return undefined; + } + } + + private getJobStatusByLatestEvent(event: TrialJobEvent): TrialJobStatus { + switch(event) { + case 'USER_TO_CANCEL': + return 'USER_CANCELED'; + case 'ADD_CUSTOMIZED': + return 'WAITING'; + } + return event; + } + + private getTrialJobsByReplayEvents(trialJobEvents: TrialJobEventRecord[]): Map { + const map: Map = new Map(); + // assume data is stored by time ASC order + for (let record of trialJobEvents) { + let jobInfo: TrialJobInfo | undefined; + if (map.has(record.trialJobId)) { + jobInfo = map.get(record.trialJobId); + } else { + jobInfo = { + trialJobId: record.trialJobId, + status: this.getJobStatusByLatestEvent(record.event), + }; + } + if (!jobInfo) { + throw new Error('Empty JobInfo'); + } + switch (record.event) { + case 'RUNNING': + jobInfo.startTime = Date.now(); + break; + case 'SUCCEEDED': + case 'FAILED': + case 'USER_CANCELED': + case 'SYS_CANCELED': + case 'EARLY_STOPPED': + jobInfo.endTime = Date.now(); + } + jobInfo.status = this.getJobStatusByLatestEvent(record.event); + map.set(record.trialJobId, jobInfo); + } + return map; + } +} + +export { MockedDataStore }; diff --git a/ts/nni_manager/test/mock/experimentManager.ts b/ts/nni_manager/test/mock/experimentManager.ts new file mode 100644 index 0000000000000000000000000000000000000000..a38148b99c4823271d639ff5ce65320893947709 --- /dev/null +++ b/ts/nni_manager/test/mock/experimentManager.ts @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import { ExperimentManager } from '../../common/experimentManager'; +import { Provider } from 'typescript-ioc'; + +export const testExperimentManagerProvider: Provider = { + get: (): ExperimentManager => { return new mockedeExperimentManager(); } +}; + +export class mockedeExperimentManager extends ExperimentManager { + public getExperimentsInfo(): Promise { + const expInfo = JSON.parse(JSON.stringify({ + "test": { + "port": 8080, + "startTime": 1605246730756, + "endTime": "N/A", + "status": "RUNNING", + "platform": "local", + "experimentName": "testExp", + "tag": [], "pid": 11111, + "webuiUrl": [], + "logDir": null + } + })); + return new Promise((resolve, _reject) => { + resolve(expInfo); + }); + } + + public setExperimentPath(_newPath: string): void { + return + } + + public setExperimentInfo(_experimentId: string, _key: string, _value: any): void { + return + } + + public stop(): Promise { + return new Promise(()=>{}); + } +} diff --git a/ts/nni_manager/test/mock/mockedTrial.py b/ts/nni_manager/test/mock/mockedTrial.py new file mode 100644 index 0000000000000000000000000000000000000000..5f85934cea94ab0981a6a18c1c9cc72f2a77eeb4 --- /dev/null +++ b/ts/nni_manager/test/mock/mockedTrial.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import time + +METRICS_FILENAME = '.nni/metrics' +MAGIC = 'ME' + +def sdk_send_data(data): + out_dir = os.getenv('NNI_SYS_DIR') + if not os.path.isdir(out_dir): + raise Exception('Can not find NNI_SYS_DIR: {}'.format(out_dir)) + + filename = os.path.join(out_dir, METRICS_FILENAME) + wrapped_data = data + '\n' + datalen = len(wrapped_data) + if datalen < 2: + return + with open(filename, 'a') as f: + f.write('ME{:06d}{}'.format(datalen, wrapped_data)) + +def user_code(): + + epochs = 20 + + val_acc = 0 + batch_size = 32 + for epoch in range(epochs): + #Training + time.sleep(1) + val_acc += 0.5 + metrics = 'epoch: {}, val accuracy: {:.2f}, batch size: {}'.format(epoch, val_acc, batch_size) + sdk_send_data(metrics) + +if __name__ == '__main__': + print('>>>start...') + user_code() + print('>>>end...') diff --git a/ts/nni_manager/test/mock/nniManager.ts b/ts/nni_manager/test/mock/nniManager.ts new file mode 100644 index 0000000000000000000000000000000000000000..58f06040b6051eb5f43aee4497f2767806e8eeb0 --- /dev/null +++ b/ts/nni_manager/test/mock/nniManager.ts @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import { Deferred } from 'ts-deferred'; +import { Provider } from 'typescript-ioc'; + +import { MetricDataRecord, MetricType, TrialJobInfo } from '../../common/datastore'; +import { MethodNotImplementedError } from '../../common/errors'; +import { + ExperimentConfig, ExperimentProfile, Manager, ProfileUpdateType, + TrialJobStatistics, NNIManagerStatus +} from '../../common/manager'; +import { + TrialJobApplicationForm, TrialJobDetail, TrialJobStatus +} from '../../common/trainingService'; + +export const testManagerProvider: Provider = { + get: (): Manager => { return new MockedNNIManager(); } +}; + +export class MockedNNIManager extends Manager { + public getStatus(): NNIManagerStatus { + return { + status: 'RUNNING', + errors: [] + } + } + public updateExperimentProfile(_experimentProfile: ExperimentProfile, _updateType: ProfileUpdateType): Promise { + return Promise.resolve(); + } + public importData(_data: string): Promise { + return Promise.resolve(); + } + public getImportedData(): Promise { + const ret: string[] = ["1", "2"]; + return Promise.resolve(ret); + } + public async exportData(): Promise { + const ret: string = ''; + return Promise.resolve(ret); + } + public getTrialJobStatistics(): Promise { + const deferred: Deferred = new Deferred(); + deferred.resolve([{ + trialJobStatus: 'RUNNING', + trialJobNumber: 2 + }, { + trialJobStatus: 'FAILED', + trialJobNumber: 1 + }]); + + return deferred.promise; + } + public addCustomizedTrialJob(_hyperParams: string): Promise { + return Promise.resolve(99); + } + + public resumeExperiment(): Promise { + return Promise.resolve(); + } + + public submitTrialJob(_form: TrialJobApplicationForm): Promise { + const deferred: Deferred = new Deferred(); + const jobDetail: TrialJobDetail = { + id: '1234', + status: 'RUNNING', + submitTime: Date.now(), + startTime: Date.now(), + endTime: Date.now(), + tags: ['test'], + url: 'http://test', + workingDirectory: '/tmp/mocked', + form: { + sequenceId: 0, + hyperParameters: { value: '', index: 0 } + } + }; + deferred.resolve(jobDetail); + + return deferred.promise; + } + + public cancelTrialJobByUser(_trialJobId: string): Promise { + return Promise.resolve(); + } + + public getClusterMetadata(_key: string): Promise { + return Promise.resolve('METAVALUE1'); + } + + public startExperiment(_experimentParams: ExperimentConfig): Promise { + return Promise.resolve('id-1234'); + } + + public setClusterMetadata(_key: string, _value: string): Promise { + return Promise.resolve(); + } + + public getTrialJob(_trialJobId: string): Promise { + const deferred: Deferred = new Deferred(); + const jobInfo: TrialJobInfo = { + trialJobId: '1234', + status: 'SUCCEEDED', + startTime: Date.now(), + endTime: Date.now() + }; + deferred.resolve(jobInfo); + + return deferred.promise; + } + + public stopExperiment(): Promise { + throw new MethodNotImplementedError(); + } + public stopExperimentTopHalf(): Promise { + throw new MethodNotImplementedError(); + } + public stopExperimentBottomHalf(): Promise { + throw new MethodNotImplementedError(); + } + public getMetricData(_trialJobId: string, _metricType: MetricType): Promise { + throw new MethodNotImplementedError(); + } + public getMetricDataByRange(_minSeqId: number, _maxSeqId: number): Promise { + throw new MethodNotImplementedError(); + } + public getLatestMetricData(): Promise { + throw new MethodNotImplementedError(); + } + public getTrialFile(_trialJobId: string, _fileName: string): Promise { + throw new MethodNotImplementedError(); + } + public getExperimentProfile(): Promise { + const profile: ExperimentProfile = { + params: { + experimentName: 'exp1', + trialConcurrency: 2, + maxExperimentDuration: '30s', + maxTrialNumber: 3, + trainingService: { + platform: 'local' + }, + searchSpace: '{lr: 0.01}', + tuner: { + className: 'testTuner', + }, + trialCommand: '', + trialCodeDirectory: '', + debug: true + }, + id: '2345', + execDuration: 0, + logDir: '', + startTime: Date.now(), + endTime: Date.now(), + nextSequenceId: 0, + revision: 0 + }; + + return Promise.resolve(profile); + } + public listTrialJobs(_status?: TrialJobStatus): Promise { + const job1: TrialJobInfo = { + trialJobId: '1234', + status: 'SUCCEEDED', + startTime: Date.now(), + endTime: Date.now(), + finalMetricData: [{ + timestamp: 0, + trialJobId: '3456', + parameterId: '123', + type: 'FINAL', + sequence: 0, + data: '0.2' + }] + }; + const job2: TrialJobInfo = { + trialJobId: '3456', + status: 'FAILED', + startTime: Date.now(), + endTime: Date.now(), + finalMetricData: [{ + timestamp: 0, + trialJobId: '3456', + parameterId: '123', + type: 'FINAL', + sequence: 0, + data: '0.2' + }] + }; + + return Promise.resolve([job1, job2]); + } + + public async getTrialOutputLocalPath(_trialJobId: string): Promise { + throw new MethodNotImplementedError(); + } + + public async fetchTrialOutput(_trialJobId: string, _subpath: string): Promise { + throw new MethodNotImplementedError(); + } +} diff --git a/ts/nni_manager/test/mock/trainingService.ts b/ts/nni_manager/test/mock/trainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..906c8d4c0853086fa8a1f205780a05a131c62d65 --- /dev/null +++ b/ts/nni_manager/test/mock/trainingService.ts @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import { Deferred } from 'ts-deferred'; +import { Provider } from 'typescript-ioc'; + +import { MethodNotImplementedError } from '../../common/errors'; +import { TrainingService, TrialJobApplicationForm, TrialJobDetail, TrialJobMetric } from '../../common/trainingService'; + +const testTrainingServiceProvider: Provider = { + get: () => { return new MockedTrainingService(); } +}; + +class MockedTrainingService extends TrainingService { + public mockedMetaDataValue: string = "default"; + public jobDetail1: TrialJobDetail = { + id: '1234', + status: 'SUCCEEDED', + submitTime: Date.now(), + startTime: Date.now(), + endTime: Date.now(), + tags: ['test'], + url: 'http://test', + workingDirectory: '/tmp/mocked', + form: { + sequenceId: 0, + hyperParameters: { value: '', index: 0 } + }, + }; + public jobDetail2: TrialJobDetail = { + id: '3456', + status: 'SUCCEEDED', + submitTime: Date.now(), + startTime: Date.now(), + endTime: Date.now(), + tags: ['test'], + url: 'http://test', + workingDirectory: '/tmp/mocked', + form: { + sequenceId: 1, + hyperParameters: { value: '', index: 1 } + }, + }; + + public listTrialJobs(): Promise { + const deferred = new Deferred(); + + deferred.resolve([this.jobDetail1, this.jobDetail2]); + return deferred.promise; + } + + public getTrialJob(trialJobId: string): Promise { + const deferred = new Deferred(); + if(trialJobId === '1234'){ + deferred.resolve(this.jobDetail1); + }else if(trialJobId === '3456'){ + deferred.resolve(this.jobDetail2); + }else{ + deferred.reject(); + } + return deferred.promise; + } + + public getTrialFile(_trialJobId: string, _fileName: string): Promise { + throw new MethodNotImplementedError(); + } + + async run(): Promise { + + } + + public addTrialJobMetricListener(_listener: (_metric: TrialJobMetric) => void): void { + } + + public removeTrialJobMetricListener(_listener: (_metric: TrialJobMetric) => void): void { + } + + public submitTrialJob(_form: TrialJobApplicationForm): Promise { + const deferred = new Deferred(); + return deferred.promise; + } + + public updateTrialJob(_trialJobId: string, _form: TrialJobApplicationForm): Promise { + throw new MethodNotImplementedError(); + } + + public get isMultiPhaseJobSupported(): boolean { + return false; + } + + public cancelTrialJob(trialJobId: string, _isEarlyStopped: boolean = false): Promise { + const deferred = new Deferred(); + if(trialJobId === '1234' || trialJobId === '3456'){ + deferred.resolve(); + }else{ + deferred.reject('job id error'); + } + return deferred.promise; + } + + public setClusterMetadata(key: string, value: string): Promise { + const deferred = new Deferred(); + if(key == 'mockedMetadataKey'){ + this.mockedMetaDataValue = value; + deferred.resolve(); + }else{ + deferred.reject('key error'); + } + return deferred.promise; + } + + public getClusterMetadata(key: string): Promise { + const deferred = new Deferred(); + if(key == 'mockedMetadataKey'){ + deferred.resolve(this.mockedMetaDataValue); + }else{ + deferred.reject('key error'); + } + return deferred.promise; + } + + public cleanUp(): Promise { + return Promise.resolve(); + } + + public getTrialOutputLocalPath(_trialJobId: string): Promise { + throw new MethodNotImplementedError(); + } + + public fetchTrialOutput(_trialJobId: string, _subpath: string): Promise { + throw new MethodNotImplementedError(); + } +} + +export{MockedTrainingService, testTrainingServiceProvider} diff --git a/ts/nni_manager/test/register.js b/ts/nni_manager/test/register.js new file mode 100644 index 0000000000000000000000000000000000000000..1a166b2f502c290d1a58e301f306fb0533bd2cf7 --- /dev/null +++ b/ts/nni_manager/test/register.js @@ -0,0 +1,2 @@ +require('ts-node/register'); +require('app-module-path/cwd'); diff --git a/ts/nni_manager/test/rest_server/restserver.test.ts b/ts/nni_manager/test/rest_server/restserver.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..d7e95afc4d56ee3091d8c94913bf512832018421 --- /dev/null +++ b/ts/nni_manager/test/rest_server/restserver.test.ts @@ -0,0 +1,183 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import { assert, expect } from 'chai'; +import request from 'request'; +import { Container } from 'typescript-ioc'; + +import * as component from '../../common/component'; +import { DataStore } from '../../common/datastore'; +import { ExperimentProfile, Manager } from '../../common/manager'; +import { ExperimentManager } from '../../common/experimentManager' +import { TrainingService } from '../../common/trainingService'; +import { cleanupUnitTest, prepareUnitTest } from '../../common/utils'; +import { MockedDataStore } from '../mock/datastore'; +import { MockedTrainingService } from '../mock/trainingService'; +import { NNIRestServer } from '../../rest_server/nniRestServer'; +import { testManagerProvider } from '../mock/nniManager'; +import { testExperimentManagerProvider } from '../mock/experimentManager'; +import { TensorboardManager } from '../../common/tensorboardManager'; +import { NNITensorboardManager } from '../../core/nniTensorboardManager'; + +describe('Unit test for rest server', () => { + + let ROOT_URL: string; + + before((done: Mocha.Done) => { + prepareUnitTest(); + Container.bind(Manager).provider(testManagerProvider); + Container.bind(DataStore).to(MockedDataStore); + Container.bind(TrainingService).to(MockedTrainingService); + Container.bind(ExperimentManager).provider(testExperimentManagerProvider); + Container.bind(TensorboardManager).to(NNITensorboardManager); + const restServer: NNIRestServer = component.get(NNIRestServer); + restServer.start().then(() => { + ROOT_URL = `${restServer.endPoint}/api/v1/nni`; + done(); + }).catch((e: Error) => { + assert.fail(`Failed to start rest server: ${e.message}`); + }); + }); + + after(() => { + component.get(NNIRestServer).stop(); + cleanupUnitTest(); + }); + + it('Test GET check-status', (done: Mocha.Done) => { + request.get(`${ROOT_URL}/check-status`, (err: Error, res: request.Response) => { + if (err) { + assert.fail(err.message); + } else { + expect(res.statusCode).to.equal(200); + } + done(); + }); + }); + + it('Test GET trial-jobs/:id', (done: Mocha.Done) => { + request.get(`${ROOT_URL}/trial-jobs/1234`, (err: Error, res: request.Response, body: any) => { + if (err) { + assert.fail(err.message); + } else { + expect(res.statusCode).to.equal(200); + expect(JSON.parse(body).trialJobId).to.equal('1234'); + } + done(); + }); + }); + + it('Test GET experiment', (done: Mocha.Done) => { + request.get(`${ROOT_URL}/experiment`, (err: Error, res: request.Response) => { + if (err) { + assert.fail(err.message); + } else { + expect(res.statusCode).to.equal(200); + } + done(); + }); + }); + + it('Test GET trial-jobs', (done: Mocha.Done) => { + request.get(`${ROOT_URL}/trial-jobs`, (err: Error, res: request.Response) => { + expect(res.statusCode).to.equal(200); + if (err) { + assert.fail(err.message); + } + done(); + }); + }); + + it('Test GET experiments-info', (done: Mocha.Done) => { + request.get(`${ROOT_URL}/experiments-info`, (err: Error, res: request.Response) => { + expect(res.statusCode).to.equal(200); + if (err) { + assert.fail(err.message); + } + done(); + }); + }); + + it('Test change concurrent-trial-jobs', (done: Mocha.Done) => { + request.get(`${ROOT_URL}/experiment`, (err: Error, res: request.Response, body: any) => { + if (err) { + assert.fail(err.message); + } else { + expect(res.statusCode).to.equal(200); + const profile: ExperimentProfile = JSON.parse(body); + if (profile.params && profile.params.trialConcurrency) { + profile.params.trialConcurrency = 10; + } + + const req: request.Options = { + uri: `${ROOT_URL}/experiment?update_type=TRIAL_CONCURRENCY`, + method: 'PUT', + json: true, + body: profile + }; + request(req, (error: Error, response: request.Response) => { + if (error) { + assert.fail(error.message); + } else { + expect(response.statusCode).to.equal(200); + } + done(); + }); + } + }); + }); + + /* FIXME + it('Test PUT experiment/cluster-metadata bad key', (done: Mocha.Done) => { + const req: request.Options = { + uri: `${ROOT_URL}/experiment/cluster-metadata`, + method: 'PUT', + json: true, + body: { + exception_test_key: 'test' + } + }; + request(req, (err: Error, res: request.Response) => { + if (err) { + assert.fail(err.message); + } else { + expect(res.statusCode).to.equal(400); + } + done(); + }); + }); + */ + + /* FIXME + it('Test PUT experiment/cluster-metadata', (done: Mocha.Done) => { + const req: request.Options = { + uri: `${ROOT_URL}/experiment/cluster-metadata`, + method: 'PUT', + json: true, + body: { + machine_list: [{ + ip: '10.10.10.101', + port: 22, + username: 'test', + passwd: '1234' + }, { + ip: '10.10.10.102', + port: 22, + username: 'test', + passwd: '1234' + }] + } + }; + request(req, (err: Error, res: request.Response) => { + if (err) { + assert.fail(err.message); + } else { + expect(res.statusCode).to.equal(200); + } + done(); + }); + }); + */ +}); diff --git a/ts/nni_manager/test/training_service/adlTrainingService.test.ts b/ts/nni_manager/test/training_service/adlTrainingService.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..277e6797e425bc4fb178b327d78c0d23d8c227b3 --- /dev/null +++ b/ts/nni_manager/test/training_service/adlTrainingService.test.ts @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import chai from 'chai'; +import chaiAsPromised from 'chai-as-promised'; +import fs from 'fs'; +import tmp from 'tmp'; +import * as component from '../../common/component'; +import { TrialJobApplicationForm, TrialJobDetail, TrainingService } from '../../common/trainingService'; +import { cleanupUnitTest, prepareUnitTest } from '../../common/utils'; +import { TrialConfigMetadataKey } from '../../training_service/common/trialConfigMetadataKey'; +import { AdlTrainingService } from '../../training_service/kubernetes/adl/adlTrainingService'; + +const localCodeDir: string = tmp.dirSync().name + +describe('Unit Test for AdlTrainingService', () => { + let skip: boolean = false; + try { + const testKubeflowConfig = fs.readFileSync('/home/vsts/.kube/config', 'utf8'); + } catch (err) { + console.log('Please have kubernetes cluster to enable its training service unit test.'); + skip = true; + } + + let testAdlTrialConfig: any = JSON.stringify({ + "command": "python3 /root/apps/nni_linear_regression/main.py", + "codeDir": ".", + "gpuNum": 0, + "image": "test.image:latest", + "imagePullSecrets": [ + { + "name": "stagingsecrets" + } + ], + "nfs": { + "server": "172.20.188.236", + "path": "/exports", + "containerMountPath": "/nfs" + }, + "memorySize": "1Gi", + "cpuNum": 1 + }); + let testAdlTrialConfig2: any = JSON.stringify({ + "command": "python3 /root/apps/nni_linear_regression/main.py", + "codeDir": ".", + "gpuNum": 0, + "image": "test.image:latest", + "imagePullSecrets": [ + { + "name": "stagingsecrets" + } + ], + "adaptive": true, + "checkpoint": { + "storageClass": "aws-efs", + "storageSize": "1Gi" + }, + "nfs": { + "server": "172.20.188.236", + "path": "/exports", + "containerMountPath": "/nfs" + } + }); + let testNniManagerIp: any = JSON.stringify({ + "nniManagerIp": "0.0.0.0" + }); + let adlTrainingService: AdlTrainingService; + console.log(tmp.dirSync().name); + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + }); + + beforeEach(() => { + if (skip) { + return; + } + adlTrainingService = component.get(AdlTrainingService); + adlTrainingService.run() + }); + + afterEach(() => { + if (skip) { + return; + } + adlTrainingService.cleanUp(); + }); + + it('Set and get cluster metadata', async () => { + if (skip) { + return; + } + await adlTrainingService.setClusterMetadata(TrialConfigMetadataKey.TRIAL_CONFIG, testAdlTrialConfig2); + await adlTrainingService.setClusterMetadata(TrialConfigMetadataKey.NNI_MANAGER_IP, testNniManagerIp); + let data:string = await adlTrainingService.getClusterMetadata(TrialConfigMetadataKey.TRIAL_CONFIG); + chai.expect(data).to.be.equals(testAdlTrialConfig2); + }); + + it('Submit job', async () => { + if (skip) { + return; + } + // job without given checkpoint, with resource config + await adlTrainingService.setClusterMetadata(TrialConfigMetadataKey.TRIAL_CONFIG, testAdlTrialConfig); + let form: TrialJobApplicationForm = { + sequenceId: 0, + hyperParameters: { + value: 'mock hyperparameters', + index: 0 + } + }; + let jobDetail: TrialJobDetail = await adlTrainingService.submitTrialJob(form); + chai.expect(jobDetail.status).to.be.equals('WAITING'); + await adlTrainingService.cancelTrialJob(jobDetail.id); + chai.expect(jobDetail.status).to.be.equals('USER_CANCELED'); + // job with given checkpoint + await adlTrainingService.setClusterMetadata(TrialConfigMetadataKey.TRIAL_CONFIG, testAdlTrialConfig2); + form = { + sequenceId: 0, + hyperParameters: { + value: 'mock hyperparameters', + index: 0 + } + }; + jobDetail = await adlTrainingService.submitTrialJob(form); + chai.expect(jobDetail.status).to.be.equals('WAITING'); + await adlTrainingService.cancelTrialJob(jobDetail.id); + chai.expect(jobDetail.status).to.be.equals('USER_CANCELED'); + }).timeout(3000000); +}); diff --git a/ts/nni_manager/test/training_service/fileUtility.test.ts b/ts/nni_manager/test/training_service/fileUtility.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..e0df4eacc18f6156eacd64a6571cc38ae28ec1db --- /dev/null +++ b/ts/nni_manager/test/training_service/fileUtility.test.ts @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; +import * as assert from 'assert'; +import * as chai from 'chai'; +import * as fs from 'fs'; +import * as path from 'path'; +import * as tar from 'tar'; +import { execCopydir, tarAdd, validateCodeDir } from '../../training_service/common/util'; + +const deleteFolderRecursive = (filePath: string) => { + if (fs.existsSync(filePath)) { + fs.readdirSync(filePath).forEach((file, _index) => { + const curPath = path.join(filePath, file); + if (fs.lstatSync(curPath).isDirectory()) { // recurse + deleteFolderRecursive(curPath); + } else { // delete file + fs.unlinkSync(curPath); + } + }); + fs.rmdirSync(filePath); + } +}; + +describe('fileUtility', () => { + /* + Test file utilities, includes: + - Copy directory + - Ignore with ignore file + - Add to tar + */ + + const sourceDir = 'test-fileUtilityTestSource'; + const destDir = 'test-fileUtilityTestDest'; + + beforeEach(() => { + fs.mkdirSync(sourceDir); + fs.writeFileSync(path.join(sourceDir, '.nniignore'), 'abc\nxyz'); + fs.writeFileSync(path.join(sourceDir, 'abc'), '123'); + fs.writeFileSync(path.join(sourceDir, 'abcd'), '1234'); + fs.mkdirSync(path.join(sourceDir, 'xyz')); + fs.mkdirSync(path.join(sourceDir, 'xyy')); + fs.mkdirSync(path.join(sourceDir, 'www')); + fs.mkdirSync(path.join(sourceDir, 'xx')); // empty dir + fs.writeFileSync(path.join(sourceDir, 'xyy', '.nniignore'), 'qq'); // nested nniignore + fs.writeFileSync(path.join(sourceDir, 'xyy', 'abc'), '123'); + fs.writeFileSync(path.join(sourceDir, 'xyy', 'qq'), '1234'); + fs.writeFileSync(path.join(sourceDir, 'xyy', 'pp'), '1234'); + fs.writeFileSync(path.join(sourceDir, 'www', '.nniignore'), 'pp'); // pop nniignore + fs.writeFileSync(path.join(sourceDir, 'www', 'abc'), '123'); + fs.writeFileSync(path.join(sourceDir, 'www', 'qq'), '1234'); + fs.writeFileSync(path.join(sourceDir, 'www', 'pp'), '1234'); + }); + + afterEach(() => { + deleteFolderRecursive(sourceDir); + deleteFolderRecursive(destDir); + if (fs.existsSync(`${destDir}.tar`)) { + fs.unlinkSync(`${destDir}.tar`); + } + }); + + it('Test file copy', async () => { + await execCopydir(sourceDir, destDir); + const existFiles = [ + 'abcd', + 'xyy', + 'xx', + path.join('xyy', '.nniignore'), + path.join('xyy', 'pp'), + path.join('www', '.nniignore'), + path.join('www', 'qq'), + ] + const notExistFiles = [ + 'abc', + 'xyz', + path.join('xyy', 'abc'), + path.join('xyy', 'qq'), + path.join('www', 'pp'), + path.join('www', 'abc'), + ] + existFiles.forEach(d => assert.ok(fs.existsSync(path.join(destDir, d)))); + notExistFiles.forEach(d => assert.ok(!fs.existsSync(path.join(destDir, d)))); + }); + + it('Test file copy without ignore', async () => { + fs.unlinkSync(path.join(sourceDir, '.nniignore')); + await execCopydir(sourceDir, destDir); + assert.ok(fs.existsSync(path.join(destDir, 'abcd'))); + assert.ok(fs.existsSync(path.join(destDir, 'abc'))); + assert.ok(fs.existsSync(path.join(destDir, 'xyz'))); + assert.ok(fs.existsSync(path.join(destDir, 'xyy'))); + assert.ok(fs.existsSync(path.join(destDir, 'xx'))); + }); + + it('Test tar file', async () => { + const tarPath = `${destDir}.tar`; + await tarAdd(tarPath, sourceDir); + assert.ok(fs.existsSync(tarPath)); + fs.mkdirSync(destDir); + tar.extract({ + file: tarPath, + cwd: destDir, + sync: true + }) + assert.ok(fs.existsSync(path.join(destDir, 'abcd'))); + assert.ok(!fs.existsSync(path.join(destDir, 'abc'))); + }); + + it('Validate code ok', async () => { + assert.doesNotThrow(async () => validateCodeDir(sourceDir)); + }); + + it('Validate code too many files', async () => { + for (let i = 0; i < 2000; ++i) + fs.writeFileSync(path.join(sourceDir, `${i}.txt`), 'a'); + try { + await validateCodeDir(sourceDir); + } catch (error) { + chai.expect(error.message).to.contains('many files'); + return; + } + chai.expect.fail(null, null, 'Did not fail.'); + }); + + it('Validate code too many files ok', async() => { + for (let i = 0; i < 2000; ++i) + fs.writeFileSync(path.join(sourceDir, `${i}.txt`), 'a'); + fs.writeFileSync(path.join(sourceDir, '.nniignore'), '*.txt'); + assert.doesNotThrow(async () => validateCodeDir(sourceDir)); + }); +}); diff --git a/ts/nni_manager/test/training_service/kubeflowTrainingService.test.ts b/ts/nni_manager/test/training_service/kubeflowTrainingService.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..a75f7e6cbdbedd259f0339d40f97bfe0f7f07f0e --- /dev/null +++ b/ts/nni_manager/test/training_service/kubeflowTrainingService.test.ts @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import chai from 'chai'; +import chaiAsPromised from 'chai-as-promised'; +import fs from 'fs'; +import tmp from 'tmp'; +import * as component from '../../common/component'; +import { cleanupUnitTest, prepareUnitTest } from '../../common/utils'; +import { TrialConfigMetadataKey } from '../../training_service/common/trialConfigMetadataKey'; +import { KubeflowTrainingService } from '../../training_service/kubernetes/kubeflow/kubeflowTrainingService'; + +// TODO: copy mockedTrail.py to local folder +const localCodeDir: string = tmp.dirSync().name +const mockedTrialPath: string = './test/mock/mockedTrial.py' +fs.copyFileSync(mockedTrialPath, localCodeDir + '/mockedTrial.py') + +describe('Unit Test for KubeflowTrainingService', () => { + let skip: boolean = false; + let testKubeflowConfig: any; + let testKubeflowTrialConfig : any; + try { + testKubeflowConfig = JSON.parse(fs.readFileSync('../../.vscode/kubeflowCluster.json', 'utf8')); + testKubeflowTrialConfig = `{\"command\":\"python3 mnist.py\",\"codeDir\":\"/tmp/nni/examples/trials/mnist",\"gpuNum\":\"1\",\"cpuNum\":\"2\",\"memoryMB\":\"8196\",\"image\":\"msranni/nni:latest\"}`; + } catch (err) { + console.log('Please configure kubeflowCluster.json to enable kubeflow training service unit test.'); + skip = true; + } + + let kubeflowTrainingService: KubeflowTrainingService; + + console.log(tmp.dirSync().name); + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + }); + + beforeEach(() => { + if (skip) { + return; + } + kubeflowTrainingService = component.get(KubeflowTrainingService); + }); + + afterEach(() => { + if (skip) { + return; + } + kubeflowTrainingService.cleanUp(); + }); + + it('Set cluster metadata', async () => { + if (skip) { + return; + } + await kubeflowTrainingService.setClusterMetadata(TrialConfigMetadataKey.KUBEFLOW_CLUSTER_CONFIG, testKubeflowConfig), + await kubeflowTrainingService.setClusterMetadata(TrialConfigMetadataKey.TRIAL_CONFIG, testKubeflowTrialConfig); + }); +}); diff --git a/ts/nni_manager/test/training_service/localTrainingService.test.ts b/ts/nni_manager/test/training_service/localTrainingService.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..441e488cf72879d4c97b701753fea3df79251fe2 --- /dev/null +++ b/ts/nni_manager/test/training_service/localTrainingService.test.ts @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import chai from 'chai'; +import chaiAsPromised from 'chai-as-promised'; +import fs from 'fs'; +import path from 'path'; +import tmp from 'tmp'; +import * as component from '../../common/component'; +import { TrialJobApplicationForm, TrialJobDetail} from '../../common/trainingService'; +import { cleanupUnitTest, delay, prepareUnitTest, getExperimentRootDir } from '../../common/utils'; +import { TrialConfigMetadataKey } from '../../training_service/common/trialConfigMetadataKey'; +import { LocalTrainingService } from '../../training_service/local/localTrainingService'; + +// TODO: copy mockedTrail.py to local folder +const localCodeDir: string = tmp.dirSync().name.split('\\').join('\\\\'); +const mockedTrialPath: string = './test/mock/mockedTrial.py' +fs.copyFileSync(mockedTrialPath, localCodeDir + '/mockedTrial.py') + +describe('Unit Test for LocalTrainingService', () => { + const config = { + platform: 'local', + trialCommand: 'sleep 1h && echo hello', + trialCodeDirectory: `${localCodeDir}`, + trialGpuNumber: 0, // TODO: add test case for gpu? + maxTrialNumberPerGpu: 1, + reuseMode: true, + }; + + const config2 = { + platform: 'local', + trialCommand: 'python3 mockedTrial.py', + trialCodeDirectory: `${localCodeDir}`, + trialGpuNumber: 0, + maxTrialNumberPerGpu: 1, + reuseMode: true, + }; + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + }); + + it('List empty trial jobs', async () => { + const localTrainingService = new LocalTrainingService(config); + localTrainingService.run(); + + //trial jobs should be empty, since there are no submitted jobs + chai.expect(await localTrainingService.listTrialJobs()).to.be.empty; + + localTrainingService.cleanUp(); + }); + + it('Submit job and Cancel job', async () => { + const localTrainingService = new LocalTrainingService(config); + localTrainingService.run(); + + // submit job + const form: TrialJobApplicationForm = { + sequenceId: 0, + hyperParameters: { + value: 'mock hyperparameters', + index: 0 + } + }; + const jobDetail: TrialJobDetail = await localTrainingService.submitTrialJob(form); + chai.expect(jobDetail.status).to.be.equals('WAITING'); + await localTrainingService.cancelTrialJob(jobDetail.id); + chai.expect(jobDetail.status).to.be.equals('USER_CANCELED'); + + localTrainingService.cleanUp(); + }).timeout(20000); + + it('Get trial log', async () => { + const localTrainingService = new LocalTrainingService(config); + localTrainingService.run(); + + // submit job + const form: TrialJobApplicationForm = { + sequenceId: 0, + hyperParameters: { + value: 'mock hyperparameters', + index: 0 + } + }; + + const jobDetail: TrialJobDetail = await localTrainingService.submitTrialJob(form); + + // get trial log + const rootDir: string = getExperimentRootDir() + fs.mkdirSync(path.join(rootDir, 'trials')) + fs.mkdirSync(jobDetail.workingDirectory) + fs.writeFileSync(path.join(jobDetail.workingDirectory, 'trial.log'), 'trial log') + fs.writeFileSync(path.join(jobDetail.workingDirectory, 'stderr'), 'trial stderr') + chai.expect(await localTrainingService.getTrialFile(jobDetail.id, 'trial.log')).to.be.equals('trial log'); + chai.expect(await localTrainingService.getTrialFile(jobDetail.id, 'stderr')).to.be.equals('trial stderr'); + fs.unlinkSync(path.join(jobDetail.workingDirectory, 'trial.log')) + fs.unlinkSync(path.join(jobDetail.workingDirectory, 'stderr')) + fs.rmdirSync(jobDetail.workingDirectory) + fs.rmdirSync(path.join(rootDir, 'trials')) + + await localTrainingService.cancelTrialJob(jobDetail.id); + localTrainingService.cleanUp(); + }).timeout(20000); + + it('Read metrics, Add listener, and remove listener', async () => { + const localTrainingService = new LocalTrainingService(config2); + localTrainingService.run(); + + // set meta data + // submit job + const form: TrialJobApplicationForm = { + sequenceId: 0, + hyperParameters: { + value: 'mock hyperparameters', + index: 0 + } + }; + const jobDetail: TrialJobDetail = await localTrainingService.submitTrialJob(form); + chai.expect(jobDetail.status).to.be.equals('WAITING'); + localTrainingService.listTrialJobs().then((jobList)=>{ + chai.expect(jobList.length).to.be.equals(1); + }); + // Add metrics listeners + const listener1 = function f1(metric: any) { + chai.expect(metric.id).to.be.equals(jobDetail.id); + } + localTrainingService.addTrialJobMetricListener(listener1); + // Wait to collect metric + await delay(1000); + + await localTrainingService.cancelTrialJob(jobDetail.id); + localTrainingService.removeTrialJobMetricListener(listener1); + localTrainingService.cleanUp(); + }).timeout(20000); +}); diff --git a/ts/nni_manager/test/training_service/remote/linuxCommands.test.ts b/ts/nni_manager/test/training_service/remote/linuxCommands.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..2a9c95e877921f013f095ce1f108e10d3b76e8a2 --- /dev/null +++ b/ts/nni_manager/test/training_service/remote/linuxCommands.test.ts @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import chai from 'chai'; +import chaiAsPromised from 'chai-as-promised'; +import * as component from '../../../common/component'; +import { cleanupUnitTest, prepareUnitTest } from '../../../common/utils'; +import { LinuxCommands } from '../../../training_service/remote_machine/extends/linuxCommands'; + + +describe('Unit Test for linuxCommands', () => { + + let linuxCommands: LinuxCommands + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + }); + + beforeEach(() => { + linuxCommands = component.get(LinuxCommands); + }); + + afterEach(() => { + }); + + it('joinPath', async () => { + chai.expect(linuxCommands.joinPath("/root/", "/first")).to.equal("/root/first"); + chai.expect(linuxCommands.joinPath("/root", "first")).to.equal("/root/first"); + chai.expect(linuxCommands.joinPath("/root/", "first")).to.equal("/root/first"); + chai.expect(linuxCommands.joinPath("root/", "first")).to.equal("root/first"); + chai.expect(linuxCommands.joinPath("root/")).to.equal("root/"); + chai.expect(linuxCommands.joinPath("root")).to.equal("root"); + chai.expect(linuxCommands.joinPath("./root")).to.equal("./root"); + chai.expect(linuxCommands.joinPath("")).to.equal("."); + chai.expect(linuxCommands.joinPath("..")).to.equal(".."); + }) + + it('createFolder', async () => { + chai.expect(linuxCommands.createFolder("test")).to.equal("mkdir -p 'test'"); + chai.expect(linuxCommands.createFolder("test", true)).to.equal("umask 0; mkdir -p 'test'"); + }) + + it('allowPermission', async () => { + chai.expect(linuxCommands.allowPermission(true, "test", "test1")).to.equal("chmod 777 -R 'test' 'test1'"); + chai.expect(linuxCommands.allowPermission(false, "test")).to.equal("chmod 777 'test'"); + }) + + it('removeFolder', async () => { + chai.expect(linuxCommands.removeFolder("test")).to.equal("rm -df 'test'"); + chai.expect(linuxCommands.removeFolder("test", true)).to.equal("rm -rf 'test'"); + chai.expect(linuxCommands.removeFolder("test", true, false)).to.equal("rm -r 'test'"); + chai.expect(linuxCommands.removeFolder("test", false, false)).to.equal("rm 'test'"); + }) + + it('removeFiles', async () => { + chai.expect(linuxCommands.removeFiles("test", "*.sh")).to.equal("rm 'test/*.sh'"); + chai.expect(linuxCommands.removeFiles("test", "")).to.equal("rm 'test'"); + }) + + it('readLastLines', async () => { + chai.expect(linuxCommands.readLastLines("test", 3)).to.equal("tail -n 3 'test'"); + }) + + it('isProcessAlive', async () => { + chai.expect(linuxCommands.isProcessAliveCommand("test")).to.equal("kill -0 `cat 'test'`"); + chai.expect(linuxCommands.isProcessAliveProcessOutput( + { + exitCode: 0, + stdout: "", + stderr: "" + } + )).to.equal(true); + chai.expect(linuxCommands.isProcessAliveProcessOutput( + { + exitCode: 10, + stdout: "", + stderr: "" + } + )).to.equal(false); + }) + + it('extractFile', async () => { + chai.expect(linuxCommands.extractFile("test.tar", "testfolder")).to.equal("tar -oxzf 'test.tar' -C 'testfolder'"); + }) + + it('executeScript', async () => { + chai.expect(linuxCommands.executeScript("test.sh", true)).to.equal("bash 'test.sh'"); + chai.expect(linuxCommands.executeScript("test script'\"", false)).to.equal(`bash -c \"test script'\\""`); + }) +}); diff --git a/ts/nni_manager/test/training_service/remote/shellExecutor.test.ts b/ts/nni_manager/test/training_service/remote/shellExecutor.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..3b30825750185e250b0807c45185a4d37c036ac4 --- /dev/null +++ b/ts/nni_manager/test/training_service/remote/shellExecutor.test.ts @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import cpp from 'child-process-promise'; +import fs from 'fs'; +import chai from 'chai'; +import chaiAsPromised from 'chai-as-promised'; + +import { ShellExecutor } from '../../../training_service/remote_machine/shellExecutor'; +import { prepareUnitTest, cleanupUnitTest } from '../../../common/utils'; + +const LOCALFILE: string = 'localSshUTData'; +const REMOTEFILE: string = 'remoteSshUTData'; +const REMOTEFOLDER: string = 'remoteSshUTFolder'; + +async function copyFile(executor: ShellExecutor): Promise { + const remoteFullName = executor.joinPath(executor.getTempPath(), REMOTEFILE); + await executor.copyFileToRemote(LOCALFILE, remoteFullName); +} + +async function copyFileToRemoteLoop(executor: ShellExecutor): Promise { + const remoteFullName = executor.joinPath(executor.getTempPath(), REMOTEFILE); + for (let i: number = 0; i < 3; i++) { + await executor.copyFileToRemote(LOCALFILE, remoteFullName); + } +} + +async function getRemoteFileContentLoop(executor: ShellExecutor): Promise { + const remoteFullName = executor.joinPath(executor.getTempPath(), REMOTEFILE); + for (let i: number = 0; i < 3; i++) { + await executor.getRemoteFileContent(remoteFullName); + } +} + +describe('ShellExecutor test', () => { + let skip: boolean = false; + let isWindows: boolean; + let rmMeta: any; + try { + rmMeta = JSON.parse(fs.readFileSync('../../.vscode/rminfo.json', 'utf8')); + console.log(rmMeta); + } catch (err) { + console.log(`Please configure rminfo.json to enable remote machine test. ${err}`); + skip = true; + } + + before(async () => { + chai.should(); + chai.use(chaiAsPromised); + if (!fs.existsSync(LOCALFILE)){ + await cpp.exec(`echo '1234' > ${LOCALFILE}`); + } + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + fs.unlinkSync(LOCALFILE); + }); + + it('Test mkdir', async () => { + if (skip) { + return; + } + const executor: ShellExecutor = new ShellExecutor(); + await executor.initialize(rmMeta); + const remoteFullPath = executor.joinPath(executor.getTempPath(), REMOTEFOLDER); + let result = await executor.createFolder(remoteFullPath, false); + chai.expect(result).eq(true); + const commandResult = await executor.executeScript("dir"); + chai.expect(commandResult.exitCode).eq(0); + result = await executor.removeFolder(remoteFullPath); + chai.expect(result).eq(true); + await executor.close(); + }); + + it('Test ShellExecutor', async () => { + if (skip) { + return; + } + const executor: ShellExecutor = new ShellExecutor(); + await executor.initialize(rmMeta); + await copyFile(executor); + await copyFileToRemoteLoop(executor); + await getRemoteFileContentLoop(executor); + await executor.close(); + }); + + it('Test pythonPath-1', async () => { + if (skip) { + return; + } + const executor: ShellExecutor = new ShellExecutor(); + await executor.initialize(rmMeta); + const result = await executor.executeScript("ver", false, false); + isWindows = result.exitCode == 0 && result.stdout.search("Windows") > -1; + await executor.close(); + }); + + it('Test pythonPath-2', async () => { + if (skip) { + return; + } + const executor: ShellExecutor = new ShellExecutor(); + rmMeta.pythonPath = "test_python_path"; + await executor.initialize(rmMeta); + const command = isWindows ? "python -c \"import os; print(os.environ.get(\'PATH\'))\"" : "python3 -c \"import os; print(os.environ.get(\'PATH\'))\""; + const result = (await executor.executeScript(command, false, false)).stdout.replace(/[\ +\r\n]/g, ""); + chai.expect(result).contain("test_python_path"); + await executor.close(); + }); +}); diff --git a/ts/nni_manager/test/training_service/remote/windowsCommands.test.ts b/ts/nni_manager/test/training_service/remote/windowsCommands.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..d39b09f47d4651a2f5598c530ce7942d1fa23c07 --- /dev/null +++ b/ts/nni_manager/test/training_service/remote/windowsCommands.test.ts @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import chai from 'chai'; +import chaiAsPromised from 'chai-as-promised'; +import * as component from '../../../common/component'; +import { cleanupUnitTest, prepareUnitTest } from '../../../common/utils'; +import { WindowsCommands } from '../../../training_service/remote_machine/extends/windowsCommands'; + + +describe('Unit Test for Windows Commands', () => { + + let windowsCommands: WindowsCommands + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + }); + + beforeEach(() => { + windowsCommands = component.get(WindowsCommands); + }); + + afterEach(() => { + }); + + it('joinPath', async () => { + chai.expect(windowsCommands.joinPath("/root/", "\\first")).to.equal("\\root\\first"); + chai.expect(windowsCommands.joinPath("root/", "first")).to.equal("root\\first"); + chai.expect(windowsCommands.joinPath("\\root/", "\\first")).to.equal("\\root\\first"); + chai.expect(windowsCommands.joinPath("\\root\\", "\\first")).to.equal("\\root\\first"); + chai.expect(windowsCommands.joinPath("\\root", "first")).to.equal("\\root\\first"); + chai.expect(windowsCommands.joinPath("\\root\\", "first")).to.equal("\\root\\first"); + chai.expect(windowsCommands.joinPath("root\\", "first")).to.equal("root\\first"); + chai.expect(windowsCommands.joinPath("root\\")).to.equal("root\\"); + chai.expect(windowsCommands.joinPath("root")).to.equal("root"); + chai.expect(windowsCommands.joinPath(".\\root")).to.equal(".\\root"); + chai.expect(windowsCommands.joinPath("")).to.equal("."); + chai.expect(windowsCommands.joinPath("..")).to.equal(".."); + }) + + it('createFolder', async () => { + chai.expect(windowsCommands.createFolder("test")).to.equal("mkdir \"test\""); + chai.expect(windowsCommands.createFolder("test", true)).to.equal("mkdir \"test\"\r\nICACLS \"test\" /grant \"Users\":F"); + }) + + it('allowPermission', async () => { + chai.expect(windowsCommands.allowPermission(true, "test", "test1")).to.equal("ICACLS \"test\" /grant \"Users\":F /T\r\nICACLS \"test1\" /grant \"Users\":F /T\r\n"); + chai.expect(windowsCommands.allowPermission(false, "test")).to.equal("ICACLS \"test\" /grant \"Users\":F\r\n"); + }) + + it('removeFolder', async () => { + chai.expect(windowsCommands.removeFolder("test")).to.equal("rmdir /q \"test\""); + chai.expect(windowsCommands.removeFolder("test", true)).to.equal("rmdir /s /q \"test\""); + chai.expect(windowsCommands.removeFolder("test", true, false)).to.equal("rmdir /s \"test\""); + chai.expect(windowsCommands.removeFolder("test", false, false)).to.equal("rmdir \"test\""); + chai.expect(windowsCommands.removeFolder("test", true, true)).to.equal("rmdir /s /q \"test\""); + }) + + it('removeFiles', async () => { + chai.expect(windowsCommands.removeFiles("test", "*.sh")).to.equal("del \"test\\*.sh\""); + chai.expect(windowsCommands.removeFiles("test", "")).to.equal("del \"test\""); + }) + + it('readLastLines', async () => { + chai.expect(windowsCommands.readLastLines("test", 3)).to.equal("powershell.exe Get-Content \"test\" -Tail 3"); + }) + + it('isProcessAlive', async () => { + chai.expect(windowsCommands.isProcessAliveCommand("test")).to.equal("powershell.exe Get-Process -Id (get-content \"test\") -ErrorAction SilentlyContinue"); + chai.expect(windowsCommands.isProcessAliveProcessOutput( + { + exitCode: 0, + stdout: "", + stderr: "" + } + )).to.equal(true); + chai.expect(windowsCommands.isProcessAliveProcessOutput( + { + exitCode: 10, + stdout: "", + stderr: "" + } + )).to.equal(false); + }) + + it('extractFile', async () => { + chai.expect(windowsCommands.extractFile("test.tar", "testfolder")).to.equal("tar -xf \"test.tar\" -C \"testfolder\""); + }) + + it('executeScript', async () => { + chai.expect(windowsCommands.executeScript("test.sh", true)).to.equal("test.sh"); + chai.expect(windowsCommands.executeScript("test script'\"", false)).to.equal("test script'\""); + }) +}); diff --git a/ts/nni_manager/test/training_service/remoteMachineTrainingService.test.ts b/ts/nni_manager/test/training_service/remoteMachineTrainingService.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..c4fcf26265e8dc69639cf77ff631919b41232d62 --- /dev/null +++ b/ts/nni_manager/test/training_service/remoteMachineTrainingService.test.ts @@ -0,0 +1,158 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import assert from 'assert'; +import chai from 'chai'; +import chaiAsPromised from 'chai-as-promised'; +import fs from 'fs'; +import tmp from 'tmp'; +import * as component from '../../common/component'; +import { TrialJobApplicationForm, TrialJobDetail, TrainingService } from '../../common/trainingService'; +import { cleanupUnitTest, delay, prepareUnitTest } from '../../common/utils'; +import { TrialConfigMetadataKey } from '../../training_service/common/trialConfigMetadataKey'; +import { RemoteMachineTrainingService } from '../../training_service/remote_machine/remoteMachineTrainingService'; + +// copy mockedTrail.py to local folder +const localCodeDir: string = tmp.dirSync().name +const mockedTrialPath: string = './test/mock/mockedTrial.py' +fs.copyFileSync(mockedTrialPath, localCodeDir + '/mockedTrial.py') + +describe('Unit Test for RemoteMachineTrainingService', () => { + /* + To enable remote machine unit test, remote machine information needs to be configured in: + Default/.vscode/rminfo.json, whose content looks like: + { + "ip": "10.172.121.40", + "username": "user1", + "passwd": "mypassword" + } + */ + let skip: boolean = false; + let testRmInfo: any; + let machineList: any; + try { + testRmInfo = JSON.parse(fs.readFileSync('../../.vscode/rminfo.json', 'utf8')); + console.log(testRmInfo); + machineList = `[{\"ip\":\"${testRmInfo.ip}\",\"port\":22,\"username\":\"${testRmInfo.user}\",\"passwd\":\"${testRmInfo.password}\"}]`; + } catch (err) { + console.log('Please configure rminfo.json to enable remote machine unit test.'); + skip = true; + } + + let remoteMachineTrainingService: RemoteMachineTrainingService + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + }); + + beforeEach(() => { + if (skip) { + return; + } + remoteMachineTrainingService = component.get(RemoteMachineTrainingService); + remoteMachineTrainingService.run(); + }); + + afterEach(() => { + if (skip) { + return; + } + remoteMachineTrainingService.cleanUp(); + }); + + it('List trial jobs', async () => { + if (skip) { + return; + } + chai.expect(await remoteMachineTrainingService.listTrialJobs()).to.be.empty; + }); + + it('Set cluster metadata', async () => { + if (skip) { + return; + } + await remoteMachineTrainingService.setClusterMetadata(TrialConfigMetadataKey.MACHINE_LIST, machineList); + await remoteMachineTrainingService.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, `{"command":"sleep 1h && echo ","codeDir":"${localCodeDir}","gpuNum":1}`); + const form: TrialJobApplicationForm = { + sequenceId: 0, + hyperParameters: { + value: 'mock hyperparameters', + index: 0 + } + }; + const trialJob = await remoteMachineTrainingService.submitTrialJob(form); + + // After a job is cancelled, the status should be changed to 'USER_CANCELED' + await remoteMachineTrainingService.cancelTrialJob(trialJob.id); + + // After a job is cancelled, the status should be changed to 'USER_CANCELED' + const trialJob2 = await remoteMachineTrainingService.getTrialJob(trialJob.id); + chai.expect(trialJob2.status).to.be.equals('USER_CANCELED'); + + //Expect rejected if passing invalid trial job id + await remoteMachineTrainingService.cancelTrialJob(trialJob.id + 'ddd').should.eventually.be.rejected; + }); + + it('Submit job test', async () => { + if (skip) { + return; + } + }); + + it('Submit job and read metrics data', async () => { + if (skip) { + return; + } + // set machine list' + await remoteMachineTrainingService.setClusterMetadata(TrialConfigMetadataKey.MACHINE_LIST, machineList); + + // set meta data + const trialConfig: string = `{\"command\":\"python3 mockedTrial.py\", \"codeDir\":\"${localCodeDir}\",\"gpuNum\":0}` + await remoteMachineTrainingService.setClusterMetadata(TrialConfigMetadataKey.TRIAL_CONFIG, trialConfig); + + // submit job + const form: TrialJobApplicationForm = { + sequenceId: 0, + hyperParameters: { + value: 'mock hyperparameters', + index: 0 + }, + placementConstraint: { + type: "None", + gpus: [] + } + }; + const jobDetail: TrialJobDetail = await remoteMachineTrainingService.submitTrialJob(form); + // Add metrics listeners + const listener1 = function f1(_metric: any) { + } + + const listener2 = function f1(_metric: any) { + } + + remoteMachineTrainingService.addTrialJobMetricListener(listener1); + remoteMachineTrainingService.addTrialJobMetricListener(listener2); + await delay(10000); + // remove listender1 + remoteMachineTrainingService.removeTrialJobMetricListener(listener1); + await delay(5000); + }).timeout(30000); + + it('Test getTrialJob exception', async () => { + if (skip) { + return; + } + await remoteMachineTrainingService.getTrialJob('wrongid').catch((err) => { + assert(err !== undefined); + }); + }); +}); diff --git a/ts/nni_manager/test/training_service/reusable/amlClient.test.ts b/ts/nni_manager/test/training_service/reusable/amlClient.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..a2dbd2fceb4050a5a16862db46b7f89444500edf --- /dev/null +++ b/ts/nni_manager/test/training_service/reusable/amlClient.test.ts @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as chai from 'chai'; +import { cleanupUnitTest, prepareUnitTest } from '../../../common/utils'; +import chaiAsPromised = require("chai-as-promised"); +import { AMLClient } from '../../../training_service/reusable/aml/amlClient'; + + +describe('Unit Test for amlClient', () => { + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + }); + + after(() => { + cleanupUnitTest(); + }); + + it('test parseContent', async () => { + + let amlClient: AMLClient = new AMLClient('', '', '', '', '', '', '', ''); + + chai.assert.equal(amlClient.parseContent('test', 'test:1234'), '1234', "The content should be 1234"); + chai.assert.equal(amlClient.parseContent('test', 'abcd:1234'), '', "The content should be null"); + }); +}); diff --git a/ts/nni_manager/test/training_service/reusable/mountedStorageService.test.ts b/ts/nni_manager/test/training_service/reusable/mountedStorageService.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..cff262b64093db83df592889b80eca17a665ca34 --- /dev/null +++ b/ts/nni_manager/test/training_service/reusable/mountedStorageService.test.ts @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import * as chai from 'chai'; +import * as fs from 'fs'; +import * as path from 'path'; +import { getLogger, Logger } from "../../../common/log"; +import { cleanupUnitTest, prepareUnitTest } from '../../../common/utils'; +import { MountedStorageService } from "../../../training_service/reusable/storages/mountedStorageService"; +import chaiAsPromised = require("chai-as-promised"); + + +async function remove(removedPath: string, isDirectory: boolean, isRecursive: boolean): Promise { + if (isDirectory) { + if (isRecursive) { + const children = await fs.promises.readdir(removedPath); + for (const fileName of children) { + const filePath = path.join(removedPath, fileName); + const stat = await fs.promises.lstat(filePath); + await remove(filePath, stat.isDirectory(), isRecursive); + } + } + await fs.promises.rmdir(removedPath); + } else { + await fs.promises.unlink(removedPath); + } +} + +describe('Unit Test for MountedStorageService', () => { + + let service: MountedStorageService; + let log: Logger; + let localPath = "reusableut/local"; + let mountedPath = "reusableut/mounted"; + + const testPath = "testpath"; + const testFileName = "testfile.txt"; + let localCopiedPath: string; + let localFileName: string; + let mountedFileName: string; + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + log = getLogger(); + + const testRoot = path.dirname(__filename); + localPath = path.join(testRoot, localPath); + mountedPath = path.join(testRoot, mountedPath); + service = new MountedStorageService(); + service.initialize(localPath, mountedPath); + + localCopiedPath = path.join(localPath, testPath); + localFileName = path.join(localCopiedPath, testFileName); + mountedFileName = path.join(testPath, testFileName); + }); + + after(() => { + cleanupUnitTest(); + }); + + beforeEach(async () => { + if (!fs.existsSync(localPath)) { + await fs.promises.mkdir(localPath, { recursive: true }); + } + if (!fs.existsSync(mountedPath)) { + await fs.promises.mkdir(mountedPath, { recursive: true }); + } + log.info(`localFileName: ${localFileName}`); + + await fs.promises.mkdir(localCopiedPath, { recursive: true }); + await fs.promises.writeFile(localFileName, "hello world"); + }); + + afterEach(async () => { + const testRootPath = path.normalize(`${localPath}/../../reusableut`); + await remove(testRootPath, true, true); + }); + + it('copyAndRename', async () => { + await service.copyDirectory(localCopiedPath, "."); + chai.expect(fs.existsSync(mountedPath)); + + const newName = `${testFileName}new`; + await service.rename(mountedFileName, newName); + chai.assert.isFalse(fs.existsSync(testPath)); + const newTestPath = `${mountedFileName}new`; + chai.assert.isTrue(await service.exists(newTestPath)); + + await service.copyFileBack(newTestPath, "."); + const localNewFileName = `${localPath}/${newName}`; + chai.assert.isTrue(fs.existsSync(localNewFileName)); + + fs.unlinkSync(`${localFileName}`); + fs.rmdirSync(`${localPath}/${testPath}`); + await service.copyDirectoryBack(`${mountedPath}/${testPath}`, `.`); + const localNewName = `${localFileName}new`; + chai.assert.isTrue(fs.existsSync(localNewName)); + }) + + it('FileContentTest', async () => { + const savedFileName = "savedfile.txt"; + await service.save("01234", savedFileName); + chai.expect(fs.existsSync(savedFileName)); + + let content = await service.readFileContent(savedFileName, 0, -1); + chai.assert.equal(content, "01234"); + + await service.save("56789", savedFileName, true); + content = await service.readFileContent(savedFileName, 0, -1); + chai.assert.equal(content, "0123456789"); + + content = await service.readFileContent(savedFileName, -1, 1); + chai.assert.equal(content, "0"); + + content = await service.readFileContent(savedFileName, 5, 1); + chai.assert.equal(content, "5"); + + content = await service.readFileContent(savedFileName, 5, -1); + chai.assert.equal(content, "56789"); + }); +}); diff --git a/ts/nni_manager/test/training_service/reusable/trialDispatcher.test.ts b/ts/nni_manager/test/training_service/reusable/trialDispatcher.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..b31e22eab5d4d6df933d255f67aac39906f66956 --- /dev/null +++ b/ts/nni_manager/test/training_service/reusable/trialDispatcher.test.ts @@ -0,0 +1,714 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as chai from 'chai'; +import * as path from 'path'; +import { getLogger, Logger } from "../../../common/log"; +import { TrialJobApplicationForm, TrialJobStatus } from '../../../common/trainingService'; +import { cleanupUnitTest, delay, prepareUnitTest, uniqueString } from '../../../common/utils'; +import { INITIALIZED, KILL_TRIAL_JOB, NEW_TRIAL_JOB, SEND_TRIAL_JOB_PARAMETER, TRIAL_END, GPU_INFO } from '../../../core/commands'; +import { TrialConfigMetadataKey } from '../../../training_service/common/trialConfigMetadataKey'; +import { Command, CommandChannel } from '../../../training_service/reusable/commandChannel'; +import { Channel, EnvironmentInformation, EnvironmentService } from "../../../training_service/reusable/environment"; +import { TrialDetail } from '../../../training_service/reusable/trial'; +import { TrialDispatcher } from "../../../training_service/reusable/trialDispatcher"; +import { UtCommandChannel } from './utCommandChannel'; +import { UtEnvironmentService } from "./utEnvironmentService"; +import chaiAsPromised = require("chai-as-promised"); +import { promises } from 'fs'; +import { Deferred } from 'ts-deferred'; +import { NNIErrorNames, NNIError, MethodNotImplementedError } from '../../../common/errors'; + +function createTrialForm(content: any = undefined): TrialJobApplicationForm { + if (content === undefined) { + content = { + "test": 1 + }; + } + const trialForm = { + sequenceId: 0, + hyperParameters: { + value: JSON.stringify(content), + index: 0 + } + }; + return trialForm; +} + +async function waitResult(callback: () => Promise, waitMs: number = 1000, interval: number = 1, throwError: boolean = false): Promise { + while (waitMs > 0) { + const result = await callback(); + if (result !== undefined) { + return result; + } + await delay(interval); + waitMs -= interval; + }; + + if (throwError) { + throw new Error(`wait result timeout!\n${callback.toString()}`); + } + + return undefined; +} + +async function waitResultMust(callback: () => Promise, waitMs: number = 10000, interval: number = 1): Promise { + const result = await waitResult(callback, waitMs, interval, true); + // this error should be thrown in waitResult already. + if (result === undefined) { + throw new Error(`wait result timeout!`); + } + return result; +} + +async function newTrial(trialDispatcher: TrialDispatcher): Promise { + const trialDetail = await trialDispatcher.submitTrialJob(createTrialForm()); + + return trialDetail; +} + +function newGpuInfo(gpuCount: Number = 2, nodeId: string | undefined = undefined): any { + let gpuInfos = []; + for (let index = 0; index < gpuCount; index++) { + gpuInfos.push({ + index: index, + activeProcessNum: 0, + }); + } + const gpuInfo = { + gpuInfos: gpuInfos, + gpuCount: gpuInfos.length, + node: nodeId + } + return gpuInfo; +} + +async function verifyTrialRunning(commandChannel: UtCommandChannel, trialDetail: TrialDetail): Promise { + + let command = await waitResultMust(async () => { + return await commandChannel.testReceiveCommandFromTrialDispatcher(); + }); + chai.assert.equal(command.command, NEW_TRIAL_JOB, "verifyTrialRunning command type"); + chai.assert.equal(command.data["trialId"], trialDetail.id, "verifyTrialRunning trialDetail.id should be equal."); + + return command; +} + +async function verifyTrialResult(commandChannel: UtCommandChannel, trialDetail: TrialDetail, returnCode: number = 0): Promise { + let trialResult = { + trial: trialDetail.id, + code: returnCode, + timestamp: Date.now(), + }; + if (trialDetail.environment === undefined) { + throw new Error(`environment shouldn't be undefined.`) + } + + await commandChannel.testSendCommandToTrialDispatcher(trialDetail.environment, TRIAL_END, trialResult); + await waitResultMust(async () => { + return trialDetail.status !== 'RUNNING' ? true : undefined; + }); + if (returnCode === 0) { + chai.assert.equal(trialDetail.status, 'SUCCEEDED', "trial should be succeeded"); + } else { + chai.assert.equal(trialDetail.status, 'FAILED', "trial should be failed"); + } +} + +async function waitEnvironment(waitCount: number, + previousEnvironments: Map, + environmentService: UtEnvironmentService, commandChannel: UtCommandChannel, + gpuCount: number = 2, nodeCount: number = 1, + callback: ((environment: EnvironmentInformation) => Promise) | undefined = undefined): Promise { + const waitRequestEnvironment = await waitResultMust(async () => { + const environments = environmentService.testGetEnvironments(); + if (environments.size === waitCount) { + for (const [id, environment] of environments) { + if (!previousEnvironments.has(id)) { + previousEnvironments.set(id, environment); + return environment; + } + } + } + return undefined; + }); + + if (waitRequestEnvironment === undefined) { + throw new Error(`waitRequestEnvironment is not defined.`); + } + + const nodeIds = []; + waitRequestEnvironment.nodeCount = nodeCount; + if (nodeCount > 1) { + for (let index = 0; index < nodeCount; index++) { + nodeIds.push(uniqueString(5)); + } + } else { + nodeIds.push(undefined); + } + for (const nodeId of nodeIds) { + // set runner is ready. + await commandChannel.testSendCommandToTrialDispatcher(waitRequestEnvironment, INITIALIZED, { node: nodeId }); + + if (gpuCount > 0) { + await commandChannel.testSendCommandToTrialDispatcher(waitRequestEnvironment, GPU_INFO, newGpuInfo(gpuCount, nodeId)); + } + } + + if (callback) { + await callback(waitRequestEnvironment); + } + + // set env to running + environmentService.testSetEnvironmentStatus(waitRequestEnvironment, 'RUNNING'); + + await waitResultMust(async () => { + return waitRequestEnvironment.isRunnerReady ? true : undefined; + }); + + return waitRequestEnvironment; +} + +const config: any = { + searchSpace: { }, + trialCommand: 'echo hi', + trialCodeDirectory: path.dirname(__filename), + trialConcurrency: 0, + nniManagerIp: '127.0.0.1', + trainingService: { + platform: 'local' + }, + debug: true +}; + +describe('Unit Test for TrialDispatcher', () => { + + let trialRunPromise: Promise; + let trialDispatcher: TrialDispatcher; + let commandChannel: UtCommandChannel; + let environmentService: UtEnvironmentService; + let log: Logger; + let previousEnvironments: Map = new Map(); + const currentDir = path.dirname(__filename); + + before(() => { + chai.should(); + chai.use(chaiAsPromised); + prepareUnitTest(); + log = getLogger(); + }); + + after(() => { + cleanupUnitTest(); + }); + + beforeEach(async () => { + trialDispatcher = await TrialDispatcher.construct(config); + + // set ut environment + let environmentServiceList: EnvironmentService[] = []; + environmentService = new UtEnvironmentService(); + environmentServiceList.push(environmentService); + trialDispatcher.environmentServiceList = environmentServiceList; + // set ut command channel + environmentService.initCommandChannel(trialDispatcher.commandEmitter); + commandChannel = environmentService.getCommandChannel as UtCommandChannel; + trialDispatcher.commandChannelSet = new Set().add(environmentService.getCommandChannel); + trialDispatcher.environmentMaintenceLoopInterval = 1000; + + trialRunPromise = trialDispatcher.run(); + }); + + afterEach(async () => { + previousEnvironments.clear(); + await trialDispatcher.cleanUp(); + environmentService.testReset(); + await trialRunPromise; + }); + + it('reuse env', async () => { + let trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + trialDetail = await newTrial(trialDispatcher); + await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, -1); + + chai.assert.equal(environmentService.testGetEnvironments().size, 1, "as env reused, so only 1 env should be here."); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, "there should be 2 trials"); + }); + + it('not reusable env', async () => { + //trialDispatcher.setClusterMetadata( + // TrialConfigMetadataKey.TRIAL_CONFIG, + // JSON.stringify({ + // reuseEnvironment: false, + // codeDir: currentDir, + // })); + + //let trialDetail = await newTrial(trialDispatcher); + + //let environment = await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + //await verifyTrialRunning(commandChannel, trialDetail); + //await verifyTrialResult(commandChannel, trialDetail, 0); + //await waitResultMust(async () => { + // return environment.status === 'USER_CANCELED' ? true : undefined; + //}); + + //trialDetail = await newTrial(trialDispatcher); + + //await waitEnvironment(2, previousEnvironments, environmentService, commandChannel); + //await verifyTrialRunning(commandChannel, trialDetail); + //await verifyTrialResult(commandChannel, trialDetail, -1); + + //chai.assert.equal(environmentService.testGetEnvironments().size, 2, "as env not reused, so only 2 envs should be here."); + //const trials = await trialDispatcher.listTrialJobs(); + //chai.assert.equal(trials.length, 2, "there should be 2 trials"); + }); + + it('no more env', async () => { + + const trialDetail1 = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + + // set to no more environment + environmentService.testSetNoMoreEnvironment(false); + + const trialDetail2 = await newTrial(trialDispatcher); + + await verifyTrialRunning(commandChannel, trialDetail1); + await verifyTrialResult(commandChannel, trialDetail1, 0); + + await verifyTrialRunning(commandChannel, trialDetail2); + await verifyTrialResult(commandChannel, trialDetail2, -1); + + chai.assert.equal(environmentService.testGetEnvironments().size, 1, "as env not reused, so only 1 envs should be here."); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, "there should be 2 trials"); + }); + + + it('2trial2env', async () => { + + let trialDetail1 = await newTrial(trialDispatcher); + let trialDetail2 = await newTrial(trialDispatcher); + + await waitEnvironment(2, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail1); + await verifyTrialResult(commandChannel, trialDetail1, 0); + await verifyTrialRunning(commandChannel, trialDetail2); + await verifyTrialResult(commandChannel, trialDetail2, 0); + + chai.assert.equal(environmentService.testGetEnvironments().size, 2, "2 envs should be here."); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, "there should be 2 trials"); + }); + + it('3trial2env', async () => { + + let trialDetail1 = await newTrial(trialDispatcher); + let trialDetail2 = await newTrial(trialDispatcher); + + await waitEnvironment(2, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail1); + await verifyTrialResult(commandChannel, trialDetail1, 0); + await verifyTrialRunning(commandChannel, trialDetail2); + await verifyTrialResult(commandChannel, trialDetail2, 0); + + chai.assert.equal(environmentService.testGetEnvironments().size, 2, "2 envs should be here."); + let trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, "there should be 2 trials"); + + + let trialDetail3 = await newTrial(trialDispatcher); + await verifyTrialRunning(commandChannel, trialDetail3); + await verifyTrialResult(commandChannel, trialDetail3, 0); + + chai.assert.equal(environmentService.testGetEnvironments().size, 2, "2 envs should be here."); + trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 3, "there should be 2 trials"); + }); + + it('stop trial', async () => { + + let trialDetail1 = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail1); + await trialDispatcher.cancelTrialJob(trialDetail1.id, false); + + let command = await waitResultMust(async () => { + return await commandChannel.testReceiveCommandFromTrialDispatcher(); + }); + chai.assert.equal(command.command, KILL_TRIAL_JOB); + log.info(`command: ${JSON.stringify(command)}`); + chai.assert.equal(command.data, trialDetail1.id); + + await waitResultMust(async () => { + return trialDetail1.status !== 'RUNNING' ? true : undefined; + }); + + let trialDetail2 = await newTrial(trialDispatcher); + await verifyTrialRunning(commandChannel, trialDetail2); + await trialDispatcher.cancelTrialJob(trialDetail2.id, true); + command = await waitResultMust(async () => { + return await commandChannel.testReceiveCommandFromTrialDispatcher(); + }); + chai.assert.equal(command.command, KILL_TRIAL_JOB); + log.info(`command: ${JSON.stringify(command)}`); + chai.assert.equal(command.data, trialDetail2.id); + await waitResultMust(async () => { + return trialDetail2.status !== 'RUNNING' ? true : undefined; + }); + + chai.assert.equal(environmentService.testGetEnvironments().size, 1, "only one trial, so one env"); + const trials = await trialDispatcher.listTrialJobs(); + + chai.assert.equal(trials.length, 2, "there should be 1 stopped trial only"); + let trial = await trialDispatcher.getTrialJob(trialDetail1.id); + chai.assert.equal(trial.status, 'USER_CANCELED', `trial is canceled.`); + trial = await trialDispatcher.getTrialJob(trialDetail2.id); + chai.assert.equal(trial.status, 'EARLY_STOPPED', `trial is earlier stopped.`); + }); + + it('multi phase', async () => { + let trialDetail = await newTrial(trialDispatcher); + + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail); + + let content = { + test: 2, + } + await trialDispatcher.updateTrialJob(trialDetail.id, createTrialForm(content)); + + let command = await waitResultMust(async () => { + return await commandChannel.testReceiveCommandFromTrialDispatcher(); + }); + + chai.assert.equal(command.command, SEND_TRIAL_JOB_PARAMETER); + chai.assert.equal(command.data["trialId"], trialDetail.id); + chai.assert.equal(command.data.parameters.index, 0); + chai.assert.equal(command.data.parameters.value, JSON.stringify(content)); + + content = { + test: 3, + } + await trialDispatcher.updateTrialJob(trialDetail.id, createTrialForm(content)); + command = await waitResultMust(async () => { + return await commandChannel.testReceiveCommandFromTrialDispatcher(); + }); + chai.assert.equal(command.command, SEND_TRIAL_JOB_PARAMETER); + chai.assert.equal(command.data["trialId"], trialDetail.id); + chai.assert.equal(command.data.parameters.index, 0); + chai.assert.equal(command.data.parameters.value, JSON.stringify(content)); + + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(environmentService.testGetEnvironments().size, 1, "only one trial, so one env"); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 1, "there should be 1 stopped trial only"); + }); + + it('multi node', async () => { + let trialDetail = await newTrial(trialDispatcher); + + const environment = await waitEnvironment(1, previousEnvironments, environmentService, commandChannel, 2, 2); + log.debug(`environment ${JSON.stringify(environment)}`); + await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(environment.nodes.size, 2); + let command = await waitResultMust(async () => { + return await commandChannel.testReceiveCommandFromTrialDispatcher(); + }); + chai.assert.equal(command.command, KILL_TRIAL_JOB); + chai.assert.equal(environmentService.testGetEnvironments().size, 1, "only one trial, so one env"); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 1, "there should be 1 stopped trial only"); + }); + + it('env timeout', async () => { + let trialDetail = await newTrial(trialDispatcher); + let environment = await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + environmentService.testSetEnvironmentStatus(environment, 'SUCCEEDED'); + await waitResultMust(async () => { + return environment.status === 'SUCCEEDED' ? true : undefined; + }); + trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(2, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(previousEnvironments.size, 2, "as an env timeout, so 2 envs should be here."); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, "there should be 2 trials"); + }); + + it('env failed with trial', async () => { + let trialDetail = await newTrial(trialDispatcher); + let environment = await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + await verifyTrialRunning(commandChannel, trialDetail); + + environmentService.testSetEnvironmentStatus(environment, 'FAILED'); + await waitResultMust(async () => { + return environment.status === 'FAILED' ? true : undefined; + }); + + await waitResultMust(async () => { + return trialDetail.status === 'FAILED' ? true : undefined; + }); + + chai.assert.equal(trialDetail.status, 'FAILED', "env failed, so trial also failed."); + }); + + /* FIXME: setClusterMetadata + it('GPUScheduler disabled gpuNum === undefined', async () => { + + let trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + const command = await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(command.data["gpuIndices"], undefined); + }); + + it('GPUScheduler disabled gpuNum === 0', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 0, + })); + + let trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + const command = await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(command.data["gpuIndices"], ""); + }); + + it('GPUScheduler enable no cluster gpu config', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 1, + })); + + let trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + const command = await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(command.data["gpuIndices"], "0"); + }); + + it('GPUScheduler skipped no GPU info', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + })); + + let trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + const command = await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(command.data["gpuIndices"], undefined); + }); + + it('GPUScheduler disabled multi-node', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 0, + })); + + let trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + const command = await verifyTrialRunning(commandChannel, trialDetail); + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(command.data["gpuIndices"], ""); + }); + + it('GPUScheduler enabled 2 gpus 2 trial', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 1, + })); + + const trialDetail1 = await newTrial(trialDispatcher); + const trialDetail2 = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + let command = await verifyTrialRunning(commandChannel, trialDetail1); + chai.assert.equal(command.data["gpuIndices"], "0"); + command = await verifyTrialRunning(commandChannel, trialDetail2); + chai.assert.equal(command.data["gpuIndices"], "1"); + + await verifyTrialResult(commandChannel, trialDetail1, 0); + await verifyTrialResult(commandChannel, trialDetail2, 0); + + chai.assert.equal(environmentService.testGetEnvironments().size, 1); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, "there should be 2 trials"); + }); + + it('GPUScheduler enabled 4 gpus 2 trial(need 2 gpus)', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 2, + })); + + const trialDetail1 = await newTrial(trialDispatcher); + const trialDetail2 = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel, 4); + let command = await verifyTrialRunning(commandChannel, trialDetail1); + chai.assert.equal(command.data["gpuIndices"], "0,1"); + command = await verifyTrialRunning(commandChannel, trialDetail2); + chai.assert.equal(command.data["gpuIndices"], "2,3"); + + await verifyTrialResult(commandChannel, trialDetail1, 0); + await verifyTrialResult(commandChannel, trialDetail2, 0); + + chai.assert.equal(environmentService.testGetEnvironments().size, 1); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, "there should be 2 trials"); + }); + + it('GPUScheduler enabled use 4 gpus but only 1 usable(4)', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 1, + })); + + const trialDetail = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel, 4, 1, async (environment) => { + environment.usableGpus = [3]; + }); + let command = await verifyTrialRunning(commandChannel, trialDetail); + chai.assert.equal(command.data["gpuIndices"], "3"); + + await verifyTrialResult(commandChannel, trialDetail, 0); + + chai.assert.equal(environmentService.testGetEnvironments().size, 1); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 1); + }); + + it('GPUScheduler enabled TMP_NO_AVAILABLE_GPU, request new env', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 1, + })); + + const trialDetail1 = await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel, 1); + let command = await verifyTrialRunning(commandChannel, trialDetail1); + chai.assert.equal(command.data["gpuIndices"], "0"); + + const trialDetail2 = await newTrial(trialDispatcher); + await waitEnvironment(2, previousEnvironments, environmentService, commandChannel, 1); + + await verifyTrialResult(commandChannel, trialDetail1, 0); + + command = await verifyTrialRunning(commandChannel, trialDetail2); + await verifyTrialResult(commandChannel, trialDetail2, 0); + chai.assert.equal(command.data["gpuIndices"], "0"); + + chai.assert.equal(environmentService.testGetEnvironments().size, 2, 'environments'); + const trials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(trials.length, 2, 'trials'); + }); + + it('GPUScheduler enabled REQUIRE_EXCEED_TOTAL, need fail', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 8, + })); + + await newTrial(trialDispatcher); + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel); + await chai.expect(trialRunPromise).rejectedWith(NNIError, "REQUIRE_EXCEED_TOTAL"); + const deferred = new Deferred(); + trialRunPromise = deferred.promise; + deferred.resolve(); + }); + + it('GPUScheduler enabled maxTrialNumberPerGpu=2, 4 trials, 2 gpus', async () => { + trialDispatcher.setClusterMetadata( + TrialConfigMetadataKey.TRIAL_CONFIG, + JSON.stringify({ + reuseEnvironment: false, + codeDir: currentDir, + gpuNum: 1, + })); + const trials = []; + + // last two trials shouldn't be in first environment. + for (let index = 0; index < 6; index++) { + const trial = await newTrial(trialDispatcher); + trials.push(trial); + } + await waitEnvironment(1, previousEnvironments, environmentService, commandChannel, 2, 1, async (environment) => { + environment.maxTrialNumberPerGpu = 2; + }); + await waitEnvironment(2, previousEnvironments, environmentService, commandChannel, 2, 1, async (environment) => { + environment.maxTrialNumberPerGpu = 2; + }); + const gpuIndexMap = new Map(); + for (let index = 0; index < 6; index++) { + const trial = trials[index]; + let command = await verifyTrialRunning(commandChannel, trial); + const gpuIndex = command.data["gpuIndices"]; + const trialNumbers = gpuIndexMap.get(gpuIndex); + if (index < 4) { + if (undefined === trialNumbers) { + gpuIndexMap.set(gpuIndex, 1); + } else { + gpuIndexMap.set(gpuIndex, trialNumbers + 1); + } + } + } + chai.assert.equal(gpuIndexMap.size, 2); + chai.assert.equal(gpuIndexMap.get("0"), 2); + chai.assert.equal(gpuIndexMap.get("1"), 2); + + for (let index = 0; index < 6; index++) { + const trial = trials[index]; + await verifyTrialResult(commandChannel, trial, 0); + } + + chai.assert.equal(environmentService.testGetEnvironments().size, 2); + const listedTrials = await trialDispatcher.listTrialJobs(); + chai.assert.equal(listedTrials.length, 6); + }); + */ +}); diff --git a/ts/nni_manager/test/training_service/reusable/utCommandChannel.ts b/ts/nni_manager/test/training_service/reusable/utCommandChannel.ts new file mode 100644 index 0000000000000000000000000000000000000000..fdd879f149e612eea8a415df446d3d5ca4cce2a3 --- /dev/null +++ b/ts/nni_manager/test/training_service/reusable/utCommandChannel.ts @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { encodeCommand } from "../../../core/ipcInterface"; +import { Command, CommandChannel, RunnerConnection } from "../../../training_service/reusable/commandChannel"; +import { Channel, EnvironmentInformation } from "../../../training_service/reusable/environment"; + +class UtRunnerConnection extends RunnerConnection { + +} + +export class UtCommandChannel extends CommandChannel { + private readonly receivedCommands: Command[] = []; + + public get channelName(): Channel { + return "ut"; + } + + public async testSendCommandToTrialDispatcher(environment: EnvironmentInformation, commandType: string, commandData: any) { + const content = encodeCommand(commandType, JSON.stringify(commandData)); + this.log.debug(`UtCommandChannel: env ${environment.id} send test command ${content}`); + this.handleCommand(environment, content.toString("utf8")); + } + + public async testReceiveCommandFromTrialDispatcher(): Promise { + return this.receivedCommands.shift(); + } + + public async config(_key: string, _value: any): Promise { + // do nothing + } + + public async start(): Promise { + // do nothing + } + + public async stop(): Promise { + // do nothing + } + + public async run(): Promise { + // do nothing + } + + protected async sendCommandInternal(environment: EnvironmentInformation, message: string): Promise { + const parsedCommands = this.parseCommands(message); + for (const parsedCommand of parsedCommands) { + const command = new Command(environment, parsedCommand[0], parsedCommand[1]); + this.receivedCommands.push(command); + } + } + + protected createRunnerConnection(environment: EnvironmentInformation): RunnerConnection { + // do nothing + return new UtRunnerConnection(environment); + } +} diff --git a/ts/nni_manager/test/training_service/reusable/utEnvironmentService.ts b/ts/nni_manager/test/training_service/reusable/utEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..d0c887edce4ddb12cc58736efbf43109840e0f00 --- /dev/null +++ b/ts/nni_manager/test/training_service/reusable/utEnvironmentService.ts @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Channel, EnvironmentInformation, EnvironmentService, EnvironmentStatus } from "../../../training_service/reusable/environment"; +import { EventEmitter } from 'events'; +import { UtCommandChannel } from "./utCommandChannel"; + +export class UtEnvironmentService extends EnvironmentService { + private allEnvironments = new Map(); + private hasMoreEnvironmentsInternal = true; + + constructor() { + super(); + } + + public get hasStorageService(): boolean { + // storage service is tested by integration testing. + return false; + } + + public get useSharedStorage(): boolean { + return false; + } + + public get environmentMaintenceLoopInterval(): number { + return 1; + } + + public get getName(): string { + return 'ut'; + } + + public initCommandChannel(eventEmitter: EventEmitter): void { + this.commandChannel = new UtCommandChannel(eventEmitter); + } + + public testSetEnvironmentStatus(environment: EnvironmentInformation, newStatus: EnvironmentStatus): void { + environment.status = newStatus; + } + + public testReset(): void { + this.allEnvironments.clear(); + } + + public testGetEnvironments(): Map { + return this.allEnvironments; + } + + public testSetNoMoreEnvironment(hasMore: boolean): void { + this.hasMoreEnvironmentsInternal = hasMore; + } + + public get hasMoreEnvironments(): boolean { + return this.hasMoreEnvironmentsInternal; + } + + public async config(_key: string, _value: string): Promise { + // do nothing + } + + public async refreshEnvironmentsStatus(_environments: EnvironmentInformation[]): Promise { + // do nothing + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + if (!this.allEnvironments.has(environment.id)) { + this.allEnvironments.set(environment.id, environment); + environment.status = "WAITING"; + } + } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + environment.status = "USER_CANCELED"; + } +} diff --git a/ts/nni_manager/training_service/common/clusterJobRestServer.ts b/ts/nni_manager/training_service/common/clusterJobRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..6f26327ac5caef56c028bc9a18b56d6eea1f880c --- /dev/null +++ b/ts/nni_manager/training_service/common/clusterJobRestServer.ts @@ -0,0 +1,163 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import bodyParser from 'body-parser'; +import { Request, Response, Router } from 'express'; +import fs from 'fs'; +import path from 'path'; +import { Writable } from 'stream'; +import { String } from 'typescript-string-operations'; +import * as component from 'common/component'; +import { getBasePort, getExperimentId } from 'common/experimentStartupInfo'; +import { RestServer } from 'common/restServer'; +import { getExperimentRootDir, mkDirPSync } from 'common/utils'; + +/** + * Cluster Job Training service Rest server, provides rest API to support Cluster job metrics update + * + */ +@component.Singleton +export abstract class ClusterJobRestServer extends RestServer { + private readonly API_ROOT_URL: string = '/api/v1/nni-pai'; + private readonly NNI_METRICS_PATTERN: string = `NNISDK_MEb'(?.*?)'`; + + private readonly expId: string = getExperimentId(); + + private enableVersionCheck: boolean = true; //switch to enable version check + private versionCheckSuccess: boolean | undefined; + private errorMessage?: string; + + /** + * constructor to provide NNIRestServer's own rest property, e.g. port + */ + constructor() { + super(); + const basePort: number = getBasePort(); + assert(basePort !== undefined && basePort > 1024); + + this.port = basePort + 1; + } + + get apiRootUrl(): string { + return this.API_ROOT_URL; + } + + public get clusterRestServerPort(): number { + if (this.port === undefined) { + throw new Error('PAI Rest server port is undefined'); + } + + return this.port; + } + + public get getErrorMessage(): string | undefined { + return this.errorMessage; + } + + public set setEnableVersionCheck(versionCheck: boolean) { + this.enableVersionCheck = versionCheck; + } + + /** + * NNIRestServer's own router registration + */ + protected registerRestHandler(): void { + this.app.use(bodyParser.json()); + this.app.use(this.API_ROOT_URL, this.createRestHandler()); + } + + // Abstract method to handle trial metrics data + protected abstract handleTrialMetrics(jobId: string, trialMetrics: any[]): void; + + protected createRestHandler(): Router { + const router: Router = Router(); + + router.use((req: Request, res: Response, next: any) => { + this.log.info(`${req.method}: ${req.url}: body:`, req.body); + res.setHeader('Content-Type', 'application/json'); + next(); + }); + + router.post(`/version/${this.expId}/:trialId`, (req: Request, res: Response) => { + if (this.enableVersionCheck) { + try { + const checkResultSuccess: boolean = req.body.tag === 'VCSuccess' ? true : false; + if (this.versionCheckSuccess !== undefined && this.versionCheckSuccess !== checkResultSuccess) { + this.errorMessage = 'Version check error, version check result is inconsistent!'; + this.log.error(this.errorMessage); + } else if (checkResultSuccess) { + this.log.info(`Version check in trialKeeper success!`); + this.versionCheckSuccess = true; + } else { + this.versionCheckSuccess = false; + this.errorMessage = req.body.msg; + } + } catch (err) { + this.log.error(`json parse metrics error: ${err}`); + res.status(500); + res.send(err.message); + } + } else { + this.log.info(`Skipping version check!`); + } + res.send(); + }); + + router.post(`/update-metrics/${this.expId}/:trialId`, (req: Request, res: Response) => { + try { + this.log.info(`Get update-metrics request, trial job id is ${req.params['trialId']}`); + this.log.info('update-metrics body is', req.body); + + this.handleTrialMetrics(req.body.jobId, req.body.metrics); + + res.send(); + } catch (err) { + this.log.error(`json parse metrics error: ${err}`); + res.status(500); + res.send(err.message); + } + }); + + router.post(`/stdout/${this.expId}/:trialId`, (req: Request, res: Response) => { + if (this.enableVersionCheck && (this.versionCheckSuccess === undefined || !this.versionCheckSuccess) + && this.errorMessage === undefined) { + this.errorMessage = `Version check failed, didn't get version check response from trialKeeper,` + + ` please check your NNI version in NNIManager and TrialKeeper!`; + } + const trialLogDir: string = path.join(getExperimentRootDir(), 'trials', req.params['trialId']); + mkDirPSync(trialLogDir); + const trialLogPath: string = path.join(trialLogDir, 'stdout_log_collection.log'); + try { + let skipLogging: boolean = false; + if (req.body.tag === 'trial' && req.body.msg !== undefined) { + const metricsContent: any = req.body.msg.match(this.NNI_METRICS_PATTERN); + if (metricsContent && metricsContent.groups) { + const key: string = 'metrics'; + this.handleTrialMetrics(req.params['trialId'], [metricsContent.groups[key]]); + skipLogging = true; + } + } + + if (!skipLogging) { + // Construct write stream to write remote trial's log into local file + const writeStream: Writable = fs.createWriteStream(trialLogPath, { + flags: 'a+', + encoding: 'utf8', + autoClose: true + }); + + writeStream.write(String.Format('{0}\n', req.body.msg)); + writeStream.end(); + } + res.send(); + } catch (err) { + this.log.error(`json parse stdout data error: ${err}`); + res.status(500); + res.send(err.message); + } + }); + + return router; + } +} diff --git a/ts/nni_manager/training_service/common/containerJobData.ts b/ts/nni_manager/training_service/common/containerJobData.ts new file mode 100644 index 0000000000000000000000000000000000000000..0ae84b5fb18a52c54a66620faabee83486ad3253 --- /dev/null +++ b/ts/nni_manager/training_service/common/containerJobData.ts @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export const CONTAINER_INSTALL_NNI_SHELL_FORMAT: string = +`#!/bin/bash +if python3 -c 'import nni' > /dev/null 2>&1; then + # nni module is already installed, skip + : +else + # Install nni + python3 -m pip install --user --upgrade nni +fi`; + +export const CONTAINER_INSTALL_NNI_SHELL_FORMAT_FOR_WIN: string = +`python -c "import nni" 2>$error +if ($error -ne ''){ + python -m pip install --user --upgrade nni +} +exit`; diff --git a/ts/nni_manager/training_service/common/gpuData.ts b/ts/nni_manager/training_service/common/gpuData.ts new file mode 100644 index 0000000000000000000000000000000000000000..f36efa4bac51fe6b21a9dcfbc05bc70c62fa1a7e --- /dev/null +++ b/ts/nni_manager/training_service/common/gpuData.ts @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export enum ScheduleResultType { + // Schedule succeeded + SUCCEED, + + // Temporarily, no enough available GPU right now + TMP_NO_AVAILABLE_GPU, + + // Cannot match requirement even if all GPU are a + REQUIRE_EXCEED_TOTAL +} + +/** + * GPU Infromation class + * Representing the dynamic and static information retrieved from Nvidia-smi + */ +export class GPUInfo { + // The number of active process running on this GPU + public activeProcessNum: number; + // Memory utilization of this GPU + public gpuMemUtil: number; + // GPU utilization of this GPU + public gpuUtil: number; + // the index number of this GPU (starting from 0) + public readonly index: number; + public gpuMemTotal: number; + public gpuMemFree: number; + public gpuMemUsed: number; + public gpuType: string; + + constructor(activeProcessNum: number, gpuMemUtil: number, gpuUtil: number, index: number, + gpuMemTotal: number, gpuMemFree: number, gpuMemUsed: number, gpuType: string) { + this.activeProcessNum = activeProcessNum; + this.gpuMemUtil = gpuMemUtil; + this.gpuUtil = gpuUtil; + this.index = index; + this.gpuMemTotal = gpuMemTotal; + this.gpuMemFree = gpuMemFree; + this.gpuMemUsed = gpuMemUsed; + this.gpuType = gpuType; + } +} + +/** + * GPU Sumamry for each machine + */ +export class GPUSummary { + // GPU count on the machine + public readonly gpuCount: number; + // The timestamp when GPU summary data queried + public readonly timestamp: string; + // The array of GPU information for each GPU card + public readonly gpuInfos: GPUInfo[]; + + constructor(gpuCount: number, timestamp: string, gpuInfos: GPUInfo[]) { + this.gpuCount = gpuCount; + this.timestamp = timestamp; + this.gpuInfos = gpuInfos; + } +} + + +export function parseGpuIndices(gpuIndices?: string): Set | undefined { + if (gpuIndices === undefined) { + return undefined; + } + const indices: number[] = gpuIndices.split(',') + .map((x: string) => parseInt(x, 10)); + if (indices.length > 0) { + return new Set(indices); + } else { + throw new Error('gpuIndices can not be empty if specified.'); + } +} + +export const GPU_INFO_COLLECTOR_FORMAT_WINDOWS: string = + ` +$env:METRIC_OUTPUT_DIR="{0}" +$app = Start-Process "python" -ArgumentList "-m nni.tools.gpu_tool.gpu_metrics_collector" -passthru -NoNewWindow \ +-redirectStandardOutput {0}\\stdout -redirectStandardError {0}\\stderr +Write $app.ID | Out-File {1} -NoNewline -encoding utf8 +`; diff --git a/ts/nni_manager/training_service/common/jobMetrics.ts b/ts/nni_manager/training_service/common/jobMetrics.ts new file mode 100644 index 0000000000000000000000000000000000000000..62b9484068448e753a13e61eefa00a51798294aa --- /dev/null +++ b/ts/nni_manager/training_service/common/jobMetrics.ts @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TrialJobStatus } from 'common/trainingService'; + +/** + * Trial job metrics class + * Representing trial job metrics properties + */ +export class JobMetrics { + public readonly jobId: string; + public readonly metrics: string[]; + public readonly jobStatus: TrialJobStatus; + public readonly endTimestamp: number; + + constructor(jobId: string, metrics: string[], jobStatus: TrialJobStatus, endTimestamp: number) { + this.jobId = jobId; + this.metrics = metrics; + this.jobStatus = jobStatus; + this.endTimestamp = endTimestamp; + } +} diff --git a/ts/nni_manager/training_service/common/trialConfig.ts b/ts/nni_manager/training_service/common/trialConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..f0ae7f88f1b1e1dfc6d3cb8bb8e33424ba5bbce3 --- /dev/null +++ b/ts/nni_manager/training_service/common/trialConfig.ts @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** + * Trial job configuration class + * Representing trial job configurable properties + */ +export class TrialConfig { + // Trail command + public readonly command: string; + + // Code directory + public readonly codeDir: string; + + // Required GPU number for trial job. The number should be in [0,100] + public readonly gpuNum: number; + + // this flag uses for UT now. + // in future, all environments should be reusable, and this can be configurable by user. + public reuseEnvironment: boolean | undefined = true; + + /** + * Constructor + * @param command Trail command + * @param codeDir Code directory + * @param gpuNum Required GPU number for trial job + */ + constructor(command: string, codeDir: string, gpuNum: number) { + this.command = command; + this.codeDir = codeDir; + this.gpuNum = gpuNum; + } +} diff --git a/ts/nni_manager/training_service/common/trialConfigMetadataKey.ts b/ts/nni_manager/training_service/common/trialConfigMetadataKey.ts new file mode 100644 index 0000000000000000000000000000000000000000..41c397f98d7d36056fe39bf15d83894e5b42db3d --- /dev/null +++ b/ts/nni_manager/training_service/common/trialConfigMetadataKey.ts @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** + * Enum of metadata keys for configuration + */ +export enum TrialConfigMetadataKey { + MACHINE_LIST = 'machine_list', + LOCAL_CONFIG = 'local_config', + TRIAL_CONFIG = 'trial_config', + REMOTE_CONFIG = 'remote_config', + HYBRID_CONFIG = 'hybrid_config', + EXPERIMENT_ID = 'experimentId', + MULTI_PHASE = 'multiPhase', + RANDOM_SCHEDULER = 'random_scheduler', + PAI_YARN_CLUSTER_CONFIG = 'pai_yarn_config', + PAI_CLUSTER_CONFIG = 'pai_config', + KUBEFLOW_CLUSTER_CONFIG = 'kubeflow_config', + NNI_MANAGER_IP = 'nni_manager_ip', + FRAMEWORKCONTROLLER_CLUSTER_CONFIG = 'frameworkcontroller_config', + DLTS_CLUSTER_CONFIG = 'dlts_config', + AML_CLUSTER_CONFIG = 'aml_config', + VERSION_CHECK = 'version_check', + LOG_COLLECTION = 'log_collection', + // Used to set platform for hybrid in reuse mode, + // temproarily change and will refactor config schema in the future + PLATFORM_LIST = 'platform_list', + SHARED_STORAGE_CONFIG = 'shared_storage_config' +} diff --git a/ts/nni_manager/training_service/common/util.ts b/ts/nni_manager/training_service/common/util.ts new file mode 100644 index 0000000000000000000000000000000000000000..2b3a367256f8715013ee26c59084b92bc820fa53 --- /dev/null +++ b/ts/nni_manager/training_service/common/util.ts @@ -0,0 +1,250 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import cpp from 'child-process-promise'; +import cp from 'child_process'; +import fs from 'fs'; +import ignore from 'ignore'; +import path from 'path'; +import tar from 'tar'; +import { getLogger } from 'common/log'; +import { String } from 'typescript-string-operations'; +import { GPU_INFO_COLLECTOR_FORMAT_WINDOWS } from './gpuData'; + +/** + * List all files in directory except those ignored by .nniignore. + * @param source + * @param destination + */ +export function* listDirWithIgnoredFiles(root: string, relDir: string, ignoreFiles: string[]): Iterable { + let ignoreFile = undefined; + const source = path.join(root, relDir); + if (fs.existsSync(path.join(source, '.nniignore'))) { + ignoreFile = path.join(source, '.nniignore'); + ignoreFiles.push(ignoreFile); + } + const ig = ignore(); + ignoreFiles.forEach((i) => ig.add(fs.readFileSync(i).toString())); + for (const d of fs.readdirSync(source)) { + const entry = path.join(relDir, d); + if (ig.ignores(entry)) + continue; + const entryStat = fs.statSync(path.join(root, entry)); + if (entryStat.isDirectory()) { + yield entry; + yield* listDirWithIgnoredFiles(root, entry, ignoreFiles); + } + else if (entryStat.isFile()) + yield entry; + } + if (ignoreFile !== undefined) { + ignoreFiles.pop(); + } +} + +/** + * Validate codeDir, calculate file count recursively under codeDir, and throw error if any rule is broken + * + * @param codeDir codeDir in nni config file + * @returns file number under codeDir + */ +export async function validateCodeDir(codeDir: string): Promise { + let fileCount: number = 0; + let fileTotalSize: number = 0; + for (const relPath of listDirWithIgnoredFiles(codeDir, '', [])) { + const d = path.join(codeDir, relPath); + fileCount += 1; + fileTotalSize += fs.statSync(d).size; + if (fileCount > 2000) { + throw new Error(`Too many files and directories (${fileCount} already scanned) in ${codeDir},` + + ` please check if it's a valid code dir`); + } + if (fileTotalSize > 300 * 1024 * 1024) { + throw new Error(`File total size too large in code dir (${fileTotalSize} bytes already scanned, exceeds 300MB).`); + } + // NOTE: We added this test in case any training service or shared storage (e.g. HDFS) does not support complex file name. + // If there is no bug found for long time, feel free to remove it. + const fileNameValid = relPath.split(path.sep).every(fpart => (fpart.match('^[a-z0-9A-Z._-]*$') !== null)); + if (!fileNameValid) { + const message = [ + `File ${relPath} in directory ${codeDir} contains spaces or special characters in its name.`, + 'This might cause problem when uploading to cloud or remote machine.', + 'If you encounter any error, please report an issue: https://github.com/microsoft/nni/issues' + ].join(' '); + getLogger('validateCodeDir').warning(message); + } + } + + return fileCount; +} + +/** + * crete a new directory + * @param directory + */ +export async function execMkdir(directory: string, share: boolean = false): Promise { + if (process.platform === 'win32') { + await cpp.exec(`powershell.exe New-Item -Path "${directory}" -ItemType "directory" -Force`); + } else if (share) { + await cpp.exec(`(umask 0; mkdir -p '${directory}')`); + } else { + await cpp.exec(`mkdir -p '${directory}'`); + } + + return Promise.resolve(); +} + +/** + * copy files to the directory + * @param source + * @param destination + */ +export async function execCopydir(source: string, destination: string): Promise { + if (!fs.existsSync(destination)) + await fs.promises.mkdir(destination); + for (const relPath of listDirWithIgnoredFiles(source, '', [])) { + const sourcePath = path.join(source, relPath); + const destPath = path.join(destination, relPath); + if (fs.statSync(sourcePath).isDirectory()) { + if (!fs.existsSync(destPath)) { + await fs.promises.mkdir(destPath); + } + } else { + getLogger('execCopydir').debug(`Copying file from ${sourcePath} to ${destPath}`); + await fs.promises.copyFile(sourcePath, destPath); + } + } + + return Promise.resolve(); +} + +/** + * crete a new file + * @param filename + */ +export async function execNewFile(filename: string): Promise { + if (process.platform === 'win32') { + await cpp.exec(`powershell.exe New-Item -Path "${filename}" -ItemType "file" -Force`); + } else { + await cpp.exec(`touch '${filename}'`); + } + + return Promise.resolve(); +} + +/** + * run script using powershell or bash + * @param filePath + */ +export function runScript(filePath: string): cp.ChildProcess { + if (process.platform === 'win32') { + return cp.exec(`powershell.exe -ExecutionPolicy Bypass -file "${filePath}"`); + } else { + return cp.exec(`bash '${filePath}'`); + } +} + +/** + * output the last line of a file + * @param filePath + */ +export async function execTail(filePath: string): Promise { + let cmdresult: cpp.childProcessPromise.Result; + if (process.platform === 'win32') { + cmdresult = await cpp.exec(`powershell.exe Get-Content "${filePath}" -Tail 1`); + } else { + cmdresult = await cpp.exec(`tail -n 1 '${filePath}'`); + } + + return Promise.resolve(cmdresult); +} + +/** + * delete a directory + * @param directory + */ +export async function execRemove(directory: string): Promise { + if (process.platform === 'win32') { + await cpp.exec(`powershell.exe Remove-Item "${directory}" -Recurse -Force`); + } else { + await cpp.exec(`rm -rf '${directory}'`); + } + + return Promise.resolve(); +} + +/** + * kill a process + * @param directory + */ +export async function execKill(pid: string): Promise { + if (process.platform === 'win32') { + await cpp.exec(`cmd.exe /c taskkill /PID ${pid} /T /F`); + } else { + await cpp.exec(`pkill -P ${pid}`); + } + + return Promise.resolve(); +} + +/** + * get command of setting environment variable + * @param variable + * @returns command string + */ +export function setEnvironmentVariable(variable: { key: string; value: string }): string { + if (process.platform === 'win32') { + return `$env:${variable.key}="${variable.value}"`; + } else { + return `export ${variable.key}='${variable.value}'`; + } +} + +/** + * Compress files in directory to tar file + * @param sourcePath + * @param tarPath + */ +export async function tarAdd(tarPath: string, sourcePath: string): Promise { + const fileList = []; + for (const d of listDirWithIgnoredFiles(sourcePath, '', [])) { + fileList.push(d); + } + tar.create( + { + gzip: true, + file: tarPath, + sync: true, + cwd: sourcePath, + }, + fileList + ); + return Promise.resolve(); +} + +/** + * generate script file name + * @param fileNamePrefix + */ +export function getScriptName(fileNamePrefix: string): string { + if (process.platform === 'win32') { + return String.Format('{0}.ps1', fileNamePrefix); + } else { + return String.Format('{0}.sh', fileNamePrefix); + } +} + +export function getGpuMetricsCollectorBashScriptContent(scriptFolder: string): string { + return `echo $$ > ${scriptFolder}/pid ; METRIC_OUTPUT_DIR=${scriptFolder} python3 -m nni.tools.gpu_tool.gpu_metrics_collector \ +1>${scriptFolder}/stdout 2>${scriptFolder}/stderr`; +} + +export function runGpuMetricsCollector(scriptFolder: string): void { + if (process.platform === 'win32') { + const scriptPath = path.join(scriptFolder, 'gpu_metrics_collector.ps1'); + const content = String.Format(GPU_INFO_COLLECTOR_FORMAT_WINDOWS, scriptFolder, path.join(scriptFolder, 'pid')); + fs.writeFile(scriptPath, content, { encoding: 'utf8' }, () => { runScript(scriptPath); }); + } else { + cp.exec(getGpuMetricsCollectorBashScriptContent(scriptFolder), { shell: '/bin/bash' }); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/adl/adlApiClient.ts b/ts/nni_manager/training_service/kubernetes/adl/adlApiClient.ts new file mode 100644 index 0000000000000000000000000000000000000000..a8d8607f57681d3a7baddb7e91bd65291daef64d --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/adl/adlApiClient.ts @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import { GeneralK8sClient, KubernetesCRDClient } from '../kubernetesApiClient'; + +/** + * Adl ClientV1 + */ +class AdlClientV1 extends KubernetesCRDClient { + /** + * constructor, to initialize adl CRD definition + */ + protected readonly namespace: string; + + public constructor(namespace: string) { + super(); + this.namespace = namespace; + this.crdSchema = JSON.parse(fs.readFileSync('./config/adl/adaptdl-crd-v1.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['adaptdl.petuum.com'].v1.namespaces(this.namespace).adaptdljobs; + } + + public get containerName(): string { + return 'main'; + } + + public async getKubernetesPods(jobName: string): Promise { + let result: Promise; + const response = await this.client.api.v1.namespaces(this.namespace).pods + .get({ qs: { labelSelector: `adaptdl/job=${jobName}` } }); + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(response.body); + } else { + result = Promise.reject(`AdlClient getKubernetesPods failed, statusCode is ${response.statusCode}`); + } + return result; + } +} + +/** + * Adl Client + */ +class AdlClientFactory { + /** + * Factory method to generate operator client + */ + public static createClient(namespace: string): KubernetesCRDClient { + return new AdlClientV1(namespace); + } +} + +export { AdlClientFactory, GeneralK8sClient }; +export { AdlClientV1 } diff --git a/ts/nni_manager/training_service/kubernetes/adl/adlConfig.ts b/ts/nni_manager/training_service/kubernetes/adl/adlConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..90765ce0b8f41103d328aebb1d1b3a401bcb84f1 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/adl/adlConfig.ts @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import {KubernetesTrialConfig} from "../kubernetesConfig"; + +/** + * Checkpoint Config + */ +export class CheckpointConfig { + public readonly storageClass: string; + + public readonly storageSize: string; + + constructor(storageClass: string, storageSize: string) { + this.storageClass = storageClass; + this.storageSize = storageSize; + } +} + +/** + * imagePullSecret Config + */ +export class ImagePullSecretConfig{ + public readonly name: string; + + constructor(name: string) { + this.name = name + } +} + +/** + * NFS Config + */ +export class NFSConfig { + public readonly server: string; + + public readonly path: string; + + public readonly containerMountPath: string; + + constructor(server: string, path: string, containerMountPath: string) { + this.server = server; + this.path = path; + this.containerMountPath = containerMountPath; + } +} + +/** + * Trial job configuration for Adl + */ +export class AdlTrialConfig extends KubernetesTrialConfig { + + public readonly command: string; + + public readonly gpuNum: number; + + public readonly image: string; + + public readonly namespace?: string; + + public readonly imagePullSecrets?: ImagePullSecretConfig[]; + + public readonly nfs?: NFSConfig; + + public readonly checkpoint?: CheckpointConfig; + + public readonly cpuNum?: number; + + public readonly memorySize?: string; + + public readonly adaptive?: boolean; // adaptive == preemptible + + constructor(codeDir: string, + command: string, gpuNum: number, + image: string, namespace?: string, + imagePullSecrets?: ImagePullSecretConfig[], + nfs?: NFSConfig, checkpoint?: CheckpointConfig, + cpuNum?: number, memorySize?: string, + adaptive?: boolean + ) { + super(codeDir); + this.command = command; + this.gpuNum = gpuNum; + this.image = image; + this.namespace = namespace; + this.imagePullSecrets = imagePullSecrets; + this.nfs = nfs; + this.checkpoint = checkpoint; + this.cpuNum = cpuNum; + this.memorySize = memorySize; + this.adaptive = adaptive; + } +} + +export type AdlJobStatus = "Pending" | "Running" | "Starting" | "Stopping" | "Failed" | "Succeeded"; diff --git a/ts/nni_manager/training_service/kubernetes/adl/adlJobInfoCollector.ts b/ts/nni_manager/training_service/kubernetes/adl/adlJobInfoCollector.ts new file mode 100644 index 0000000000000000000000000000000000000000..45bf6fd8beab19011b4efba1f8ccadc6e4413808 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/adl/adlJobInfoCollector.ts @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { AdlClientV1 } from './adlApiClient'; +import { KubernetesTrialJobDetail} from '../kubernetesData'; +import { KubernetesJobInfoCollector } from '../kubernetesJobInfoCollector'; +import { AdlJobStatus } from './adlConfig'; + +/** + * Collector Adl jobs info from Kubernetes cluster, and update adl job status locally + */ +export class AdlJobInfoCollector extends KubernetesJobInfoCollector { + constructor(jobMap: Map) { + super(jobMap); + } + + protected async retrieveSingleTrialJobInfo(adlClient: AdlClientV1 | undefined, + kubernetesTrialJob: KubernetesTrialJobDetail): Promise { + if (!this.statusesNeedToCheck.includes(kubernetesTrialJob.status)) { + return Promise.resolve(); + } + + if (adlClient === undefined) { + return Promise.reject('AdlClient is undefined'); + } + + let kubernetesJobInfo: any; + let kubernetesPodsInfo: any; + try { + kubernetesJobInfo = await adlClient.getKubernetesJob(kubernetesTrialJob.kubernetesJobName); + kubernetesPodsInfo = await adlClient.getKubernetesPods(kubernetesTrialJob.kubernetesJobName); + } catch (error) { + // Notice: it maynot be a 'real' error since cancel trial job can also cause getKubernetesJob failed. + this.log.error(`Get job ${kubernetesTrialJob.kubernetesJobName} info failed, error is ${error}`); + + //This is not treat as a error status + return Promise.resolve(); + } + /* eslint-disable require-atomic-updates */ + if (kubernetesJobInfo.status) { + const phase: AdlJobStatus = kubernetesJobInfo.status.phase + switch (phase) { + case 'Pending': + case 'Starting': + kubernetesTrialJob.status = 'WAITING'; + if (kubernetesPodsInfo.items.length > 0){ + if (kubernetesPodsInfo.items[0].status.containerStatuses != undefined) { + const currState: any = kubernetesPodsInfo.items[0].status.containerStatuses[0].state + if (currState.waiting != undefined) { + const msg: string = currState.waiting.reason + if (msg == "ImagePullBackOff" || msg == "ErrImagePull") { + kubernetesTrialJob.status = 'FAILED'; + } + } + } + kubernetesTrialJob.message = kubernetesPodsInfo.items + .map((pod: any) => JSON.stringify(pod.status.containerStatuses)) + .join('\n'); + } + kubernetesTrialJob.startTime = Date.parse(kubernetesJobInfo.metadata.creationTimestamp); + break; + case 'Running': + case 'Stopping': + kubernetesTrialJob.status = 'RUNNING'; + kubernetesTrialJob.message = `Use 'nnictl log trial --trial_id ${kubernetesTrialJob.id}' to check the log stream.`; + if (kubernetesTrialJob.startTime === undefined) { + kubernetesTrialJob.startTime = Date.parse(kubernetesJobInfo.metadata.creationTimestamp); + } + break; + case 'Failed': + kubernetesTrialJob.status = 'FAILED'; + kubernetesTrialJob.message = kubernetesJobInfo.status.message; + if (kubernetesPodsInfo.items.length > 0) { + kubernetesTrialJob.message += " ; "; + kubernetesTrialJob.message += `Use 'nnictl log trial --trial_id ${kubernetesTrialJob.id}' for the path of the collected logs.`; + } + // undefined => NaN as endTime here + kubernetesTrialJob.endTime = Date.parse(kubernetesJobInfo.status.completionTimestamp); + break; + case 'Succeeded': + kubernetesTrialJob.status = 'SUCCEEDED'; + kubernetesTrialJob.endTime = Date.parse(kubernetesJobInfo.status.completionTimestamp); + kubernetesTrialJob.message = `Succeeded at ${kubernetesJobInfo.status.completionTimestamp}` + break; + default: + } + } + /* eslint-enable require-atomic-updates */ + + return Promise.resolve(); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/adl/adlJobRestServer.ts b/ts/nni_manager/training_service/kubernetes/adl/adlJobRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..040942de6de0bf45961a5aef29b19f10a2e0c55c --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/adl/adlJobRestServer.ts @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as component from 'common/component'; +import { KubernetesJobRestServer } from '../kubernetesJobRestServer'; +import { AdlTrainingService } from './adlTrainingService'; + +/** + * Adl Training service Rest server, provides rest API to support adl job metrics update + * + */ +@component.Singleton +export class AdlJobRestServer extends KubernetesJobRestServer { + /** + * constructor to provide NNIRestServer's own rest property, e.g. port + */ + constructor() { + super(component.get(AdlTrainingService)); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/adl/adlTrainingService.ts b/ts/nni_manager/training_service/kubernetes/adl/adlTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..0c777e418ae19d9eb808296205c7e5cfea00c3c4 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/adl/adlTrainingService.ts @@ -0,0 +1,362 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import * as component from 'common/component'; + +import { String } from 'typescript-string-operations'; +import { getExperimentId } from 'common/experimentStartupInfo'; +import { + NNIManagerIpConfig, TrialJobApplicationForm, TrialJobDetail, TrialJobStatus +} from 'common/trainingService'; +import { delay, generateParamFileName, getVersion, uniqueString } from 'common/utils'; +import { TrialConfigMetadataKey } from 'training_service/common/trialConfigMetadataKey'; +import { KubernetesTrialJobDetail } from '../kubernetesData'; +import { KubernetesTrainingService } from '../kubernetesTrainingService'; +import { AdlClientFactory } from './adlApiClient' +import { AdlJobInfoCollector } from './adlJobInfoCollector'; +import { AdlJobRestServer } from './adlJobRestServer'; +import { AdlTrialConfig } from './adlConfig' + +/** + * Training Service implementation for Adl + */ +@component.Singleton +class AdlTrainingService extends KubernetesTrainingService implements KubernetesTrainingService { + private adlTrialConfig?: AdlTrialConfig; + private readonly adlJobInfoCollector: AdlJobInfoCollector; + private configmapTemplateStr: string; + private jobTemplateStr: string; + private pvcTemplateStr: string; + private tensorboardPvcTemplate: any; + private tensorboardDeploymentTemplate: any; + //TODO: change the logic here when we want to support multiple tensorboard + private tensorboardName: string = "adaptdl-tensorboard-" + getExperimentId().toLowerCase(); + + constructor() { + super(); + this.adlJobInfoCollector = new AdlJobInfoCollector(this.trialJobsMap); + this.experimentId = getExperimentId(); + this.configmapTemplateStr = fs.readFileSync( + './config/adl/adaptdl-nni-configmap-template.json', 'utf8'); + this.jobTemplateStr = fs.readFileSync('./config/adl/adaptdljob-template.json', 'utf8'); + this.pvcTemplateStr = fs.readFileSync('./config/adl/adaptdl-pvc-template.json', 'utf8'); + this.tensorboardPvcTemplate = JSON.parse( + fs.readFileSync('./config/adl/adaptdl-tensorboard-pvc-template.json', 'utf8')); + this.tensorboardDeploymentTemplate = JSON.parse( + fs.readFileSync('./config/adl/adaptdl-tensorboard-deployment-template.json', 'utf8')); + + this.log.info('Construct Adl training service.'); + } + + public async run(): Promise { + this.log.info(this.tensorboardName); + this.log.info('Start tensorboard deployment.'); + await this.launchTensorboard() + + this.log.info('Run Adl training service.'); + this.kubernetesJobRestServer = component.get(AdlJobRestServer); + if (this.kubernetesJobRestServer === undefined) { + throw new Error('kubernetesJobRestServer not initialized!'); + } + await this.kubernetesJobRestServer.start(); + this.kubernetesJobRestServer.setEnableVersionCheck = this.versionCheck; + this.log.info(`Adl Training service rest server listening on: ${this.kubernetesJobRestServer.endPoint}`); + while (!this.stopping) { + // collect metrics for Adl jobs by interacting with Kubernetes API server + await delay(3000); + await this.adlJobInfoCollector.retrieveTrialStatus(this.kubernetesCRDClient); + if (this.kubernetesJobRestServer.getErrorMessage !== undefined) { + throw new Error(this.kubernetesJobRestServer.getErrorMessage); + } + } + this.log.info('Adl training service exit.'); + } + private async launchTensorboard(): Promise { + // Start the tensorboard at the beginning of the experiment. + if (this.adlTrialConfig === undefined) { + throw new Error('Adl trial config is undefined'); + } + // Create tensorboard deployment + this.tensorboardDeploymentTemplate.metadata.name = this.tensorboardName + this.tensorboardDeploymentTemplate.metadata.labels.expId = this.experimentId + this.tensorboardDeploymentTemplate.spec.selector.matchLabels.app = this.tensorboardName + this.tensorboardDeploymentTemplate.spec.template.metadata.labels.app = this.tensorboardName + this.tensorboardDeploymentTemplate.spec.template.spec.volumes[0] + .persistentVolumeClaim.claimName = this.tensorboardName + const deploymentUid: string = await this.genericK8sClient.createDeployment(this.tensorboardDeploymentTemplate); + // Create pvc + this.tensorboardPvcTemplate.metadata.name = this.tensorboardName; + this.tensorboardPvcTemplate.metadata.ownerReferences[0].name = this.tensorboardName; + this.tensorboardPvcTemplate.metadata.ownerReferences[0].uid = deploymentUid + if (this.adlTrialConfig.checkpoint != undefined) { + this.tensorboardPvcTemplate.spec.resources.requests.storage = this.adlTrialConfig.checkpoint.storageSize; + this.tensorboardPvcTemplate.spec.storageClassName = this.adlTrialConfig.checkpoint.storageClass; + } + else { + this.tensorboardPvcTemplate.spec.resources.requests.storage = "1Gi" + this.tensorboardPvcTemplate.spec.storageClassName = await this.genericK8sClient.getStorageClass(); + } + await this.genericK8sClient.createPersistentVolumeClaim(this.tensorboardPvcTemplate); + + return Promise.resolve() + } + + public async submitTrialJob(form: TrialJobApplicationForm): Promise { + if (this.kubernetesCRDClient === undefined) { + throw new Error('Adl job operator client is undefined'); + } + + if (this.adlTrialConfig === undefined) { + throw new Error('Adl trial config is undefined'); + } + + if (this.kubernetesRestServerPort === undefined) { + const restServer: AdlJobRestServer = component.get(AdlJobRestServer); + this.kubernetesRestServerPort = restServer.clusterRestServerPort; + } + + const trialJobId: string = uniqueString(5); + const adlJobName: string = `nni-exp-${this.experimentId}-trial-${trialJobId}`.toLowerCase(); + const initStatus: TrialJobStatus = 'WAITING'; + const codeDir = this.adlTrialConfig.codeDir; + const outputDir = "output" + const trialJobDetail: KubernetesTrialJobDetail = new KubernetesTrialJobDetail( + trialJobId, + initStatus, + Date.now(), + codeDir, + form, + adlJobName, + outputDir + ); + + // Create adljob + const job: any = JSON.parse(this.jobTemplateStr); + job.metadata.name = adlJobName + job.metadata.labels.app = this.NNI_KUBERNETES_TRIAL_LABEL + job.metadata.labels.expId = this.experimentId + job.metadata.labels.trialId = trialJobId + if (this.adlTrialConfig.adaptive !== undefined){ + job.spec.preemptible = this.adlTrialConfig.adaptive + } + job.spec.template.spec.containers[0] + .image = this.adlTrialConfig.image; + job.spec.template.spec.volumes[0] + .persistentVolumeClaim.claimName = adlJobName + job.spec.template.spec.volumes[1] + .persistentVolumeClaim.claimName = this.tensorboardName + job.spec.template.spec.volumes[2] + .configMap.name = adlJobName + // Handle Pod Resource + let cpu: number = 1; + let memory: string = "1Gi"; + if (this.adlTrialConfig.cpuNum !== undefined) { + cpu = this.adlTrialConfig.cpuNum; + } + if (this.adlTrialConfig.memorySize !== undefined) { + memory = this.adlTrialConfig.memorySize; + } + job.spec.template.spec.containers[0] + .resources.requests.memory = memory; + job.spec.template.spec.containers[0] + .resources.requests.cpu = cpu; + job.spec.template.spec.containers[0] + .resources.limits["nvidia.com/gpu"] = this.adlTrialConfig.gpuNum; + // Handle imagePullSecrets + if (this.adlTrialConfig.imagePullSecrets !== undefined) { + job.spec.template.spec.imagePullSecrets = job.spec.template.spec + .imagePullSecrets.concat(this.adlTrialConfig.imagePullSecrets); + } + // Handle NFS + if (this.adlTrialConfig.nfs !== undefined) { + job.spec.template.spec.volumes.push({ + "name": "nfs", + "nfs": { + "server": this.adlTrialConfig.nfs.server, + "path": this.adlTrialConfig.nfs.path, + "readOnly": false + } + }); + job.spec.template.spec.containers[0].volumeMounts.push({ + "name": "nfs", + "mountPath": this.adlTrialConfig.nfs.containerMountPath + }); + } + await this.kubernetesCRDClient.createKubernetesJob(job); + const k8sadlJob: any = await this.kubernetesCRDClient.getKubernetesJob(adlJobName); + + // Create pvc + const pvc: any = JSON.parse(this.pvcTemplateStr); + pvc.metadata.name = adlJobName; + pvc.metadata.ownerReferences[0].name = adlJobName; + pvc.metadata.ownerReferences[0].uid = k8sadlJob.metadata.uid; + if (this.adlTrialConfig.checkpoint != undefined) { + pvc.spec.resources.requests.storage = this.adlTrialConfig + .checkpoint.storageSize; + pvc.spec.storageClassName = this.adlTrialConfig.checkpoint.storageClass; + } + else { + pvc.spec.resources.requests.storage = "1Gi" + pvc.spec.storageClassName = await this.genericK8sClient.getStorageClass(); + } + await this.genericK8sClient.createPersistentVolumeClaim(pvc); + + // prepare the runscript and convert it to configmap and mount it + const configmap: any = JSON.parse(this.configmapTemplateStr); + configmap.metadata.name = adlJobName; + configmap.metadata.ownerReferences[0].name = adlJobName; + configmap.metadata.ownerReferences[0].uid = k8sadlJob.metadata.uid; + configmap.data["run.sh"] = await this.prepareRunScript( + trialJobId, form, codeDir, outputDir) + const cleanupScriptTemplate: string = +`#!/bin/bash +ps aux | grep "python3 -m nni.tools.trial_tool.trial_keeper" | awk '{print $2}' | xargs kill -2 +while true; +do + proc=\`ps aux | grep "python3 -m nni.tools.trial_tool.trial_keeper" | awk '{print $2}' | grep "" -c\` + if (( $proc == 1 )); then + exit 0 + else + echo "waiting" + fi + sleep 1 +done +`; + configmap.data["cleanup.sh"] = cleanupScriptTemplate + await this.genericK8sClient.createConfigMap(configmap) + + // Set trial job detail until create Adl job successfully + this.trialJobsMap.set(trialJobId, trialJobDetail); + + return Promise.resolve(trialJobDetail); + } + + private async prepareRunScript(jobId: string, + form: TrialJobApplicationForm, + codeDir: string, + outputDir: string): Promise { + if (this.adlTrialConfig === undefined) { + throw new Error('Adl trial config is undefined'); + } + + if (this.kubernetesRestServerPort === undefined) { + throw new Error('Adl rest server port is undefined'); + } + + if (this.nniManagerIpConfig === undefined) { + throw new Error('Adl nniManager ip config is undefined'); + } + + const expId: string = this.experimentId; + const seqId: string = form.sequenceId.toString(); + const command: string = this.adlTrialConfig.command; + const hyperParameters: string = form.hyperParameters.value; + const hyperParametersFile: string = generateParamFileName(form.hyperParameters); + const nniManagerPort: string = this.kubernetesRestServerPort.toString(); + const nniManagerIp: string = this.nniManagerIpConfig.nniManagerIp; + let nniManagerVersion: string = ''; + if (this.versionCheck) { + nniManagerVersion = await getVersion(); + } + + let nvidiaScript: string = ''; + if (this.adlTrialConfig.gpuNum == 0) { + nvidiaScript = 'export HIP_VISIBLE_DEVICES='; + } + + const runScriptTemplate: string = +`#!/bin/bash +export NNI_PLATFORM=adl +export MULTI_PHASE=false +export NNI_SYS_DIR={0} +export NNI_CODE_DIR={0} +export NNI_OUTPUT_DIR={1} +export NNI_TRIAL_JOB_ID={2} +export NNI_EXP_ID={3} +export NNI_TRIAL_SEQ_ID={4} +mkdir -p $NNI_OUTPUT_DIR +{5} +echo '{6}' > $NNI_CODE_DIR/{7} +python3 -m nni.tools.trial_tool.trial_keeper --trial_command '{8}' \ +--nnimanager_ip {9} --nnimanager_port {10} \ +--nni_manager_version '{11}' --log_collection '{12}' +`; + const runScript = String.Format( + runScriptTemplate, codeDir, outputDir, + jobId, expId, seqId, nvidiaScript, + hyperParameters, hyperParametersFile, command, + nniManagerIp, nniManagerPort, nniManagerVersion, + this.logCollection); + return Promise.resolve(runScript); + } + + public async cleanUp(): Promise { + super.cleanUp(); + + // Delete Tensorboard deployment + try { + await this.genericK8sClient.deleteDeployment("adaptdl-tensorboard-" + this.experimentId.toLowerCase()); + this.log.info('tensorboard deployment deleted'); + } catch (error) { + this.log.error(`tensorboard deployment deletion failed: ${error.message}`); + } + } + + public async setClusterMetadata(key: string, value: string): Promise { + this.log.info('SetCluster ' + key + ', ' +value); + switch (key) { + case TrialConfigMetadataKey.NNI_MANAGER_IP: + this.nniManagerIpConfig = JSON.parse(value); + break; + case TrialConfigMetadataKey.TRIAL_CONFIG: { + this.adlTrialConfig = JSON.parse(value); + let namespace: string = 'default'; + if (this.adlTrialConfig.namespace !== undefined) { + namespace = this.adlTrialConfig.namespace; + } + this.genericK8sClient.setNamespace = namespace; + this.kubernetesCRDClient = AdlClientFactory.createClient(namespace); + break; + } + case TrialConfigMetadataKey.VERSION_CHECK: + this.versionCheck = (value === 'true' || value === 'True'); + break; + case TrialConfigMetadataKey.LOG_COLLECTION: + this.logCollection = value; + break; + default: + } + + return Promise.resolve(); + } + + public getClusterMetadata(key: string): Promise { + let result: string; + switch (key) { + case TrialConfigMetadataKey.TRIAL_CONFIG: + if (this.adlTrialConfig === undefined) { + return Promise.reject(`${key} is not set yet`); + } + + result = JSON.stringify(this.adlTrialConfig); + break; + case TrialConfigMetadataKey.NNI_MANAGER_IP: + if (this.nniManagerIpConfig === undefined) { + return Promise.reject(`${key} is not set yet`); + } + + result = JSON.stringify(this.nniManagerIpConfig); + break; + default: + return Promise.reject(`${key} not set`); + } + + return Promise.resolve(result); + } + + public async updateTrialJob(_1: any, _2: any): Promise { + throw new Error('not supported'); + } +} +export { AdlTrainingService }; diff --git a/ts/nni_manager/training_service/kubernetes/azureStorageClientUtils.ts b/ts/nni_manager/training_service/kubernetes/azureStorageClientUtils.ts new file mode 100644 index 0000000000000000000000000000000000000000..e77f52bb17f403dfa66426c8b7a2717565e51184 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/azureStorageClientUtils.ts @@ -0,0 +1,218 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import azureStorage from 'azure-storage'; +import fs from 'fs'; +import path from 'path'; +import { Deferred } from 'ts-deferred'; +import { String } from 'typescript-string-operations'; +import { getLogger } from 'common/log'; +import { mkDirP } from 'common/utils'; + +export namespace AzureStorageClientUtility { + + /** + * create azure share + * @param fileServerClient + * @param azureShare + */ + export async function createShare(fileServerClient: any, azureShare: any): Promise { + const deferred: Deferred = new Deferred(); + fileServerClient.createShareIfNotExists(azureShare, (error: any, _result: any, _response: any) => { + if (error) { + getLogger('AzureStorageClientUtility') + .error(`Create share failed:, ${error}`); + deferred.resolve(false); + } else { + deferred.resolve(true); + } + }); + + return deferred.promise; + } + + /** + * Create a new directory (NOT recursively) in azure file storage. + * @param fileServerClient + * @param azureFoler + * @param azureShare + */ + export async function createDirectory(fileServerClient: azureStorage.FileService, azureFoler: any, azureShare: any): Promise { + const deferred: Deferred = new Deferred(); + fileServerClient.createDirectoryIfNotExists(azureShare, azureFoler, (error: any, _result: any, _response: any) => { + if (error) { + getLogger('AzureStorageClientUtility') + .error(`Create directory failed:, ${error}`); + deferred.resolve(false); + } else { + deferred.resolve(true); + } + }); + return deferred.promise; + } + + /** + * Create a new directory recursively in azure file storage + * @param fileServerClient + * @param azureDirectory + */ + export async function createDirectoryRecursive(fileServerClient: azureStorage.FileService, azureDirectory: string, + azureShare: any): Promise { + const deferred: Deferred = new Deferred(); + const directories: string[] = azureDirectory.split('/'); + let rootDirectory: string = ''; + for (const directory of directories) { + rootDirectory += directory; + const result: boolean = await createDirectory(fileServerClient, rootDirectory, azureShare); + if (!result) { + deferred.resolve(false); + return deferred.promise; + } + rootDirectory += '/'; + } + deferred.resolve(true); + + return deferred.promise; + } + + /** + * upload a file to azure storage + * @param fileServerClient + * @param azureDirectory + * @param azureFileName + * @param azureShare + * @param localFilePath + */ + async function uploadFileToAzure(fileServerClient: any, azureDirectory: string, azureFileName: any, azureShare: any, + localFilePath: string): Promise { + const deferred: Deferred = new Deferred(); + await fileServerClient.createFileFromLocalFile(azureShare, azureDirectory, azureFileName, localFilePath, + (error: any, _result: any, _response: any) => { + if (error) { + getLogger('AzureStorageClientUtility') + .error(`Upload file failed:, ${error}`); + deferred.resolve(false); + } else { + deferred.resolve(true); + } + }); + + return deferred.promise; + } + + /** + * download a file from azure storage + * @param fileServerClient + * @param azureDirectory + * @param azureFileName + * @param azureShare + * @param localFilePath + */ + async function downloadFile(fileServerClient: any, azureDirectory: string, azureFileName: any, azureShare: any, + localFilePath: string): Promise { + const deferred: Deferred = new Deferred(); + await fileServerClient.getFileToStream(azureShare, azureDirectory, azureFileName, fs.createWriteStream(localFilePath), + (error: any, _result: any, _response: any) => { + if (error) { + getLogger('AzureStorageClientUtility') + .error(`Download file failed:, ${error}`); + deferred.resolve(false); + } else { + deferred.resolve(true); + } + }); + + return deferred.promise; + } + + /** + * Upload a directory to azure file storage + * @param fileServerClient : the client of file server + * @param azureDirectory : the directory in azure file storage + * @param azureShare : the azure share used + * @param localDirectory : local directory to be uploaded + */ + export async function uploadDirectory(fileServerClient: azureStorage.FileService, azureDirectory: string, azureShare: any, + localDirectory: string): Promise { + const deferred: Deferred = new Deferred(); + const fileNameArray: string[] = fs.readdirSync(localDirectory); + const result: boolean = await createDirectoryRecursive(fileServerClient, azureDirectory, azureShare); + if (!result) { + deferred.resolve(false); + return deferred.promise; + } + for (const fileName of fileNameArray) { + const fullFilePath: string = path.join(localDirectory, fileName); + try { + let resultUploadFile: boolean = true; + let resultUploadDir: boolean = true; + if (fs.lstatSync(fullFilePath) + .isFile()) { + resultUploadFile = await uploadFileToAzure(fileServerClient, azureDirectory, fileName, azureShare, fullFilePath); + } else { + // If filePath is a directory, recuisively copy it to azure + resultUploadDir = await uploadDirectory(fileServerClient, String.Format('{0}/{1}', azureDirectory, fileName), azureShare, fullFilePath); + } + if (!(resultUploadFile && resultUploadDir)) { + deferred.resolve(false); + return deferred.promise; + } + } catch (error) { + deferred.resolve(false); + + return deferred.promise; + } + } + // All files/directories are copied successfully, resolve + deferred.resolve(true); + + return deferred.promise; + } + + /** + * downlod a directory from azure + * @param fileServerClient + * @param azureDirectory + * @param azureShare + * @param localDirectory + */ + export async function downloadDirectory(fileServerClient: any, azureDirectory: string, azureShare: any, localDirectory: string): + Promise { + const deferred: Deferred = new Deferred(); + await mkDirP(localDirectory); + fileServerClient.listFilesAndDirectoriesSegmented(azureShare, azureDirectory, 'null', + async (_error: any, result: any, _response: any) => { + if (('entries' in result) === false) { + getLogger('AzureStorageClientUtility') + .error(`list files failed, can't get entries in result`); + throw new Error(`list files failed, can't get entries in result`); + } + + if (('files' in result.entries) === false) { + getLogger('AzureStorageClientUtility') + .error(`list files failed, can't get files in result['entries']`); + throw new Error(`list files failed, can't get files in result['entries']`); + } + + if (('directories' in result.directories) === false) { + getLogger('AzureStorageClientUtility') + .error(`list files failed, can't get directories in result['entries']`); + throw new Error(`list files failed, can't get directories in result['entries']`); + } + + for (const fileName of result.entries.files) { + const fullFilePath: string = path.join(localDirectory, fileName.name); + await downloadFile(fileServerClient, azureDirectory, fileName.name, azureShare, fullFilePath); + } + + for (const directoryName of result.entries.directories) { + const fullDirectoryPath: string = path.join(localDirectory, directoryName.name); + const fullAzureDirectory: string = path.join(azureDirectory, directoryName.name); + await downloadDirectory(fileServerClient, fullAzureDirectory, azureShare, fullDirectoryPath); + } + deferred.resolve(); + }); + + return deferred.promise; + } +} diff --git a/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerApiClient.ts b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerApiClient.ts new file mode 100644 index 0000000000000000000000000000000000000000..64598f2ef5bbdb6fb289d46a8cc8684d6de05bcd --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerApiClient.ts @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import {GeneralK8sClient, KubernetesCRDClient} from '../kubernetesApiClient'; + +/** + * FrameworkController ClientV1 + */ +class FrameworkControllerClientV1 extends KubernetesCRDClient { + /** + * constructor, to initialize frameworkcontroller CRD definition + */ + public namespace: string; + public constructor(namespace?: string) { + super(); + this.namespace = namespace ? namespace : "default" + this.crdSchema = JSON.parse(fs.readFileSync('./config/frameworkcontroller/frameworkcontrollerjob-crd-v1.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['frameworkcontroller.microsoft.com'].v1.namespaces(this.namespace).frameworks; + } + + public get containerName(): string { + return 'framework'; + } +} + +/** + * FrameworkController Client + */ +class FrameworkControllerClientFactory { + /** + * Factory method to generate operator client + */ + public static createClient(namespace?: string): KubernetesCRDClient { + return new FrameworkControllerClientV1(namespace); + } +} + +export {FrameworkControllerClientFactory, GeneralK8sClient}; diff --git a/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerConfig.ts b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..839ca2729d4acf486ab9441761319bcd103c9d95 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerConfig.ts @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; + +import { + AzureStorage, KeyVaultConfig, KubernetesClusterConfig, KubernetesClusterConfigAzure, KubernetesClusterConfigNFS, + KubernetesStorageKind, KubernetesTrialConfig, KubernetesTrialConfigTemplate, NFSConfig, StorageConfig +} from '../kubernetesConfig'; + +export class FrameworkAttemptCompletionPolicy { + public readonly minFailedTaskCount: number; + public readonly minSucceededTaskCount: number; + constructor(minFailedTaskCount: number, minSucceededTaskCount: number) { + this.minFailedTaskCount = minFailedTaskCount; + this.minSucceededTaskCount = minSucceededTaskCount; + } +} + +/** + * Trial job configuration for FrameworkController + */ +export class FrameworkControllerTrialConfigTemplate extends KubernetesTrialConfigTemplate { + public readonly frameworkAttemptCompletionPolicy: FrameworkAttemptCompletionPolicy; + public readonly name: string; + public readonly taskNum: number; + constructor(name: string, taskNum: number, command: string, gpuNum: number, + cpuNum: number, memoryMB: number, image: string, + frameworkAttemptCompletionPolicy: FrameworkAttemptCompletionPolicy, privateRegistryFilePath?: string | undefined) { + super(command, gpuNum, cpuNum, memoryMB, image, privateRegistryFilePath); + this.frameworkAttemptCompletionPolicy = frameworkAttemptCompletionPolicy; + this.name = name; + this.taskNum = taskNum; + } +} + +export class FrameworkControllerTrialConfig extends KubernetesTrialConfig { + public readonly taskRoles: FrameworkControllerTrialConfigTemplate[]; + public readonly codeDir: string; + constructor(codeDir: string, taskRoles: FrameworkControllerTrialConfigTemplate[]) { + super(codeDir); + this.taskRoles = taskRoles; + this.codeDir = codeDir; + } +} + +export class FrameworkControllerClusterConfig extends KubernetesClusterConfig { + public readonly serviceAccountName: string; + constructor(apiVersion: string, serviceAccountName: string, _configPath?: string, namespace?: string) { + super(apiVersion, undefined, namespace); + this.serviceAccountName = serviceAccountName; + } +} + +export class FrameworkControllerClusterConfigNFS extends KubernetesClusterConfigNFS { + public readonly serviceAccountName: string; + public readonly configPath?: string; + constructor( + serviceAccountName: string, + apiVersion: string, + nfs: NFSConfig, + storage?: KubernetesStorageKind, + namespace?: string, + configPath?: string + ) { + super(apiVersion, nfs, storage, namespace); + this.serviceAccountName = serviceAccountName; + this.configPath = configPath + } + + public static getInstance(jsonObject: object): FrameworkControllerClusterConfigNFS { + const kubernetesClusterConfigObjectNFS: FrameworkControllerClusterConfigNFS = jsonObject; + assert(kubernetesClusterConfigObjectNFS !== undefined); + + return new FrameworkControllerClusterConfigNFS( + kubernetesClusterConfigObjectNFS.serviceAccountName, + kubernetesClusterConfigObjectNFS.apiVersion, + kubernetesClusterConfigObjectNFS.nfs, + kubernetesClusterConfigObjectNFS.storage, + kubernetesClusterConfigObjectNFS.namespace + ); + } +} + +export class FrameworkControllerClusterConfigAzure extends KubernetesClusterConfigAzure { + public readonly serviceAccountName: string; + public readonly configPath?: string; + + constructor( + serviceAccountName: string, + apiVersion: string, + keyVault: KeyVaultConfig, + azureStorage: AzureStorage, + storage?: KubernetesStorageKind, + uploadRetryCount?: number, + namespace?: string, + configPath?: string + ) { + super(apiVersion, keyVault, azureStorage, storage, uploadRetryCount, namespace); + this.serviceAccountName = serviceAccountName; + this.configPath = configPath + } + + public static getInstance(jsonObject: object): FrameworkControllerClusterConfigAzure { + const kubernetesClusterConfigObjectAzure: FrameworkControllerClusterConfigAzure = jsonObject; + + return new FrameworkControllerClusterConfigAzure( + kubernetesClusterConfigObjectAzure.serviceAccountName, + kubernetesClusterConfigObjectAzure.apiVersion, + kubernetesClusterConfigObjectAzure.keyVault, + kubernetesClusterConfigObjectAzure.azureStorage, + kubernetesClusterConfigObjectAzure.storage, + kubernetesClusterConfigObjectAzure.uploadRetryCount, + kubernetesClusterConfigObjectAzure.namespace + ); + } +} + +export class FrameworkControllerClusterConfigFactory { + + public static generateFrameworkControllerClusterConfig(jsonObject: object): FrameworkControllerClusterConfig { + const storageConfig: StorageConfig = jsonObject; + if (storageConfig === undefined) { + throw new Error('Invalid json object as a StorageConfig instance'); + } + if (storageConfig.storage !== undefined && storageConfig.storage === 'azureStorage') { + return FrameworkControllerClusterConfigAzure.getInstance(jsonObject); + } else if (storageConfig.storage === undefined || storageConfig.storage === 'nfs') { + return FrameworkControllerClusterConfigNFS.getInstance(jsonObject); + } + throw new Error(`Invalid json object ${jsonObject}`); + } +} + +export type FrameworkControllerJobStatus = + 'AttemptRunning' | 'Completed' | 'AttemptCreationPending' | 'AttemptCreationRequested' | 'AttemptPreparing' | 'AttemptCompleted'; + +export type FrameworkControllerJobCompleteStatus = 'Succeeded' | 'Failed'; diff --git a/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerJobInfoCollector.ts b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerJobInfoCollector.ts new file mode 100644 index 0000000000000000000000000000000000000000..44bf05cf47f1383aa77b8892ee5d9730aefccb9d --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerJobInfoCollector.ts @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { KubernetesCRDClient } from '../kubernetesApiClient'; +import { KubernetesTrialJobDetail} from '../kubernetesData'; +import { KubernetesJobInfoCollector } from '../kubernetesJobInfoCollector'; +import { FrameworkControllerJobCompleteStatus, FrameworkControllerJobStatus } from './frameworkcontrollerConfig'; + +/** + * Collector frameworkcontroller jobs info from Kubernetes cluster, and update frameworkcontroller job status locally + */ +export class FrameworkControllerJobInfoCollector extends KubernetesJobInfoCollector { + constructor(jobMap: Map) { + super(jobMap); + } + + protected async retrieveSingleTrialJobInfo(kubernetesCRDClient: KubernetesCRDClient | undefined, + kubernetesTrialJob: KubernetesTrialJobDetail): Promise { + if (!this.statusesNeedToCheck.includes(kubernetesTrialJob.status)) { + return Promise.resolve(); + } + + if (kubernetesCRDClient === undefined) { + return Promise.reject('kubernetesCRDClient is undefined'); + } + + let kubernetesJobInfo: any; + try { + kubernetesJobInfo = await kubernetesCRDClient.getKubernetesJob(kubernetesTrialJob.kubernetesJobName); + } catch (error) { + this.log.error(`Get job ${kubernetesTrialJob.kubernetesJobName} info failed, error is ${error}`); + //This is not treat as a error status + + return Promise.resolve(); + } + + if (kubernetesJobInfo.status && kubernetesJobInfo.status.state) { + const frameworkJobType: FrameworkControllerJobStatus = kubernetesJobInfo.status.state; + /* eslint-disable require-atomic-updates */ + switch (frameworkJobType) { + case 'AttemptCreationPending': + case 'AttemptCreationRequested': + case 'AttemptPreparing': + kubernetesTrialJob.status = 'WAITING'; + break; + case 'AttemptRunning': + kubernetesTrialJob.status = 'RUNNING'; + if (kubernetesTrialJob.startTime === undefined) { + kubernetesTrialJob.startTime = Date.parse(kubernetesJobInfo.status.startTime); + } + break; + case 'Completed': { + const completedJobType: FrameworkControllerJobCompleteStatus = + kubernetesJobInfo.status.attemptStatus.completionStatus.type.name; + switch (completedJobType) { + case 'Succeeded': + kubernetesTrialJob.status = 'SUCCEEDED'; + break; + case 'Failed': + kubernetesTrialJob.status = 'FAILED'; + break; + default: + } + kubernetesTrialJob.endTime = Date.parse(kubernetesJobInfo.status.completionTime); + break; + } + default: + } + /* eslint-enable require-atomic-updates */ + } + + return Promise.resolve(); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerJobRestServer.ts b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerJobRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..9c66c6cccfc4d06575494b0f77d094fe44a30168 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerJobRestServer.ts @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as component from 'common/component'; +import { KubernetesJobRestServer } from '../kubernetesJobRestServer'; +import { FrameworkControllerTrainingService } from './frameworkcontrollerTrainingService'; + +/** + * frameworkcontroller Training service Rest server, provides rest API to support frameworkcontroller job metrics update + * + */ +@component.Singleton +export class FrameworkControllerJobRestServer extends KubernetesJobRestServer { + constructor() { + super(component.get(FrameworkControllerTrainingService)); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerTrainingService.ts b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..8683e6c6c2d0fb80ddd7417a77a45a6836d8eb9a --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/frameworkcontroller/frameworkcontrollerTrainingService.ts @@ -0,0 +1,557 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import cpp from 'child-process-promise'; +import fs from 'fs'; +import path from 'path'; +import * as component from 'common/component'; +import {getExperimentId} from 'common/experimentStartupInfo'; +import { + NNIManagerIpConfig, TrialJobApplicationForm, TrialJobDetail, TrialJobStatus +} from 'common/trainingService'; +import {delay, generateParamFileName, getExperimentRootDir, uniqueString} from 'common/utils'; +import {CONTAINER_INSTALL_NNI_SHELL_FORMAT} from 'training_service/common/containerJobData'; +import {TrialConfigMetadataKey} from 'training_service/common/trialConfigMetadataKey'; +import {validateCodeDir} from 'training_service/common/util'; +import {NFSConfig} from '../kubernetesConfig'; +import {KubernetesTrialJobDetail} from '../kubernetesData'; +import {KubernetesTrainingService} from '../kubernetesTrainingService'; +import {KubernetesJobRestServer} from '../kubernetesJobRestServer'; +import {FrameworkControllerClientFactory} from './frameworkcontrollerApiClient'; +import { + FrameworkControllerClusterConfig, + FrameworkControllerClusterConfigAzure, + FrameworkControllerClusterConfigFactory, + FrameworkControllerClusterConfigNFS, + FrameworkControllerTrialConfig, + FrameworkControllerTrialConfigTemplate, +} from './frameworkcontrollerConfig'; +import {FrameworkControllerJobInfoCollector} from './frameworkcontrollerJobInfoCollector'; +import {FrameworkControllerJobRestServer} from './frameworkcontrollerJobRestServer'; + +const yaml = require('js-yaml'); + +/** + * Training Service implementation for frameworkcontroller + */ +@component.Singleton +class FrameworkControllerTrainingService extends KubernetesTrainingService implements KubernetesTrainingService { + private fcTrialConfig?: FrameworkControllerTrialConfig; // frameworkcontroller trial configuration + private fcTemplate: any = undefined; // custom frameworkcontroller template + private readonly fcJobInfoCollector: FrameworkControllerJobInfoCollector; // frameworkcontroller job info collector + private readonly fcContainerPortMap: Map = new Map(); // store frameworkcontroller container port + private fcClusterConfig?: FrameworkControllerClusterConfig; + + constructor() { + super(); + this.fcJobInfoCollector = new FrameworkControllerJobInfoCollector(this.trialJobsMap); + this.experimentId = getExperimentId(); + } + + public async run(): Promise { + this.kubernetesJobRestServer = new KubernetesJobRestServer(this); + if (this.kubernetesJobRestServer === undefined) { + throw new Error('kubernetesJobRestServer not initialized!'); + } + await this.kubernetesJobRestServer.start(); + this.kubernetesJobRestServer.setEnableVersionCheck = this.versionCheck; + this.log.info(`frameworkcontroller Training service rest server listening on: ${this.kubernetesJobRestServer.endPoint}`); + while (!this.stopping) { + // collect metrics for frameworkcontroller jobs by interacting with Kubernetes API server + await delay(3000); + await this.fcJobInfoCollector.retrieveTrialStatus(this.kubernetesCRDClient); + if (this.kubernetesJobRestServer.getErrorMessage !== undefined) { + throw new Error(this.kubernetesJobRestServer.getErrorMessage); + } + } + } + private parseCustomTaskRoles(customTaskRoles: any[]): FrameworkControllerTrialConfigTemplate[] { + const taskRoles: FrameworkControllerTrialConfigTemplate[] = [] + customTaskRoles.map((x) => { + if (x.task === undefined || + x.task.pod === undefined || + x.task.pod.spec === undefined || + x.task.pod.spec.containers === undefined) { + throw new Error('invalid custom frameworkcontroller configuration') + } + if (x.task.pod.spec.containers.length > 1) { + throw new Error('custom config may only define one non-init container for tasks') + } + const defaultAttempt = { + minFailedTaskCount: 1, + minSucceededTaskCount: -1 + } + const trialConfig = { + name: x.name, + taskNum: x.taskNumber ? x.taskNumber : 1, + command: x.task.pod.spec.containers[0].command.join(" "), + gpuNum: x.task.gpuNum ? x.task.gpuNum : 0, + cpuNum: x.task.cpuNum ? x.task.cpuNum : 1, + memoryMB: x.task.memoryMB ? x.task.memoryMB : 8192, + image: x.task.pod.spec.containers[0].image, + frameworkAttemptCompletionPolicy: x.task.frameworkAttemptCompletionPolicy ? + x.task.frameworkAttemptCompletionPolicy : + defaultAttempt + } + taskRoles.push(trialConfig) + }) + return taskRoles + } + + public async submitTrialJob(form: TrialJobApplicationForm): Promise { + let configTaskRoles: any = undefined; + if (this.fcClusterConfig === undefined) { + throw new Error('frameworkcontrollerClusterConfig is not initialized'); + } + if (this.kubernetesCRDClient === undefined) { + throw new Error('kubernetesCRDClient is undefined'); + } + + if (this.fcTemplate === undefined) { + if (this.fcTrialConfig === undefined) { + throw new Error( + 'neither trialConfig nor fcTemplate is initialized' + ); + } + configTaskRoles = this.fcTrialConfig.taskRoles; + } else { + configTaskRoles = this.parseCustomTaskRoles(this.fcTemplate.spec.taskRoles) + } + const namespace = this.fcClusterConfig.namespace ? this.fcClusterConfig.namespace : "default"; + this.genericK8sClient.setNamespace = namespace; + + if (this.kubernetesRestServerPort === undefined) { + const restServer: FrameworkControllerJobRestServer = component.get(FrameworkControllerJobRestServer); + this.kubernetesRestServerPort = restServer.clusterRestServerPort; + } + + // wait upload of code Dir to finish + if (this.copyExpCodeDirPromise !== undefined) { + await this.copyExpCodeDirPromise; + } + + const trialJobId: string = uniqueString(5); + // Set trial's NFS working folder + const trialWorkingFolder: string = path.join(this.CONTAINER_MOUNT_PATH, 'nni', getExperimentId(), trialJobId); + const trialLocalTempFolder: string = path.join(getExperimentRootDir(), 'trials-local', trialJobId); + let frameworkcontrollerJobName: string = `nniexp${this.experimentId}trial${trialJobId}`.toLowerCase(); + + let frameworkcontrollerJobConfig: any; + + if (this.fcTemplate !== undefined) { + // Create frameworkcontroller job based on generated frameworkcontroller job resource config + frameworkcontrollerJobConfig = JSON.parse(JSON.stringify(this.fcTemplate)); + // add a custom name extension to the job name and apply it to the custom template + frameworkcontrollerJobName += "xx" + this.fcTemplate.metadata.name; + // Process custom task roles commands + configTaskRoles.map((x: any, i: number) => { + const scriptName = path.join(trialWorkingFolder, "run_" + x.name + ".sh") + frameworkcontrollerJobConfig.spec.taskRoles[i].task.pod.spec.containers[0].command = ["sh", scriptName] + }) + } + + //Generate the port used for taskRole + this.generateContainerPort(configTaskRoles); + await this.prepareRunScript(trialLocalTempFolder, trialJobId, trialWorkingFolder, form, configTaskRoles); + + //wait upload of script files to finish + const trialJobOutputUrl: string = await this.uploadFolder(trialLocalTempFolder, `nni/${getExperimentId()}/${trialJobId}`); + let initStatus: TrialJobStatus = 'WAITING'; + if (!trialJobOutputUrl) { + initStatus = 'FAILED'; + } + const trialJobDetail: KubernetesTrialJobDetail = new KubernetesTrialJobDetail( + trialJobId, + initStatus, + Date.now(), + trialWorkingFolder, + form, + frameworkcontrollerJobName, + trialJobOutputUrl + ); + + // Set trial job detail until create frameworkcontroller job successfully + this.trialJobsMap.set(trialJobId, trialJobDetail); + + if (this.fcTemplate !== undefined) { + frameworkcontrollerJobConfig = { + ...frameworkcontrollerJobConfig, + metadata: {...this.fcTemplate.metadata, name: frameworkcontrollerJobName} + }; + } else { + frameworkcontrollerJobConfig = await this.prepareFrameworkControllerConfig( + trialJobId, + trialWorkingFolder, + frameworkcontrollerJobName + ); + } + await this.kubernetesCRDClient.createKubernetesJob(frameworkcontrollerJobConfig); + + // Set trial job detail until create frameworkcontroller job successfully + this.trialJobsMap.set(trialJobId, trialJobDetail); + + return Promise.resolve(trialJobDetail); + } + + public async setClusterMetadata(key: string, value: string): Promise { + switch (key) { + case TrialConfigMetadataKey.NNI_MANAGER_IP: + this.nniManagerIpConfig = JSON.parse(value); + break; + case TrialConfigMetadataKey.FRAMEWORKCONTROLLER_CLUSTER_CONFIG: { + const frameworkcontrollerClusterJsonObject: any = JSON.parse(value); + let namespace: string | undefined; + this.fcClusterConfig = FrameworkControllerClusterConfigFactory + .generateFrameworkControllerClusterConfig(frameworkcontrollerClusterJsonObject); + if (this.fcClusterConfig.storageType === 'azureStorage') { + const azureFrameworkControllerClusterConfig: FrameworkControllerClusterConfigAzure = + this.fcClusterConfig; + this.azureStorageAccountName = azureFrameworkControllerClusterConfig.azureStorage.accountName; + this.azureStorageShare = azureFrameworkControllerClusterConfig.azureStorage.azureShare; + if (azureFrameworkControllerClusterConfig.configPath !== undefined) { + this.fcTemplate = yaml.safeLoad( + fs.readFileSync( + azureFrameworkControllerClusterConfig.configPath, + 'utf8' + ) + ); + } + await this.createAzureStorage( + azureFrameworkControllerClusterConfig.keyVault.vaultName, + azureFrameworkControllerClusterConfig.keyVault.name + ); + namespace = azureFrameworkControllerClusterConfig.namespace; + } else if (this.fcClusterConfig.storageType === 'nfs') { + const nfsFrameworkControllerClusterConfig: FrameworkControllerClusterConfigNFS = + this.fcClusterConfig; + if (nfsFrameworkControllerClusterConfig.configPath !== undefined) { + this.fcTemplate = yaml.safeLoad( + fs.readFileSync( + nfsFrameworkControllerClusterConfig.configPath, + 'utf8' + ) + ); + } + await this.createNFSStorage( + nfsFrameworkControllerClusterConfig.nfs.server, + nfsFrameworkControllerClusterConfig.nfs.path + ); + namespace = nfsFrameworkControllerClusterConfig.namespace + } + namespace = namespace ? namespace : "default"; + this.kubernetesCRDClient = FrameworkControllerClientFactory.createClient(namespace); + + break; + } + case TrialConfigMetadataKey.TRIAL_CONFIG: { + const frameworkcontrollerTrialJsonObjsect: any = JSON.parse(value); + + this.fcTrialConfig = new FrameworkControllerTrialConfig( + frameworkcontrollerTrialJsonObjsect.codeDir, + frameworkcontrollerTrialJsonObjsect.taskRoles + ); + + // Validate to make sure codeDir doesn't have too many files + try { + await validateCodeDir(this.fcTrialConfig.codeDir); + //upload codeDir to storage + this.copyExpCodeDirPromise = this.uploadFolder(this.fcTrialConfig.codeDir, `nni/${getExperimentId()}/nni-code`); + } catch (error) { + this.log.error(error); + + return Promise.reject(new Error(error)); + } + break; + } + case TrialConfigMetadataKey.VERSION_CHECK: + this.versionCheck = (value === 'true' || value === 'True'); + break; + case TrialConfigMetadataKey.LOG_COLLECTION: + this.logCollection = value; + break; + default: + } + + return Promise.resolve(); + } + + /** + * upload local folder to nfs or azureStroage + */ + private async uploadFolder(srcDirectory: string, destDirectory: string): Promise { + if (this.fcClusterConfig === undefined) { + throw new Error('Kubeflow Cluster config is not initialized'); + } + + assert(this.fcClusterConfig.storage === undefined || + this.fcClusterConfig.storage === 'azureStorage' || + this.fcClusterConfig.storage === 'nfs' || + this.fcClusterConfig.storage === 'pvc' + ); + + if (this.fcClusterConfig.storage === 'azureStorage') { + if (this.azureStorageClient === undefined) { + throw new Error('azureStorageClient is not initialized'); + } + const fcClusterConfigAzure: FrameworkControllerClusterConfigAzure = this.fcClusterConfig; + return await this.uploadFolderToAzureStorage(srcDirectory, destDirectory, fcClusterConfigAzure.uploadRetryCount); + } else if (this.fcClusterConfig.storage === 'nfs' || this.fcClusterConfig.storage === undefined) { + await cpp.exec(`mkdir -p ${this.trialLocalTempFolder}/${destDirectory}`); + await cpp.exec(`cp -r ${srcDirectory}/* ${this.trialLocalTempFolder}/${destDirectory}/.`); + const fcClusterConfigNFS: FrameworkControllerClusterConfigNFS = this.fcClusterConfig; + const nfsConfig: NFSConfig = fcClusterConfigNFS.nfs; + return `nfs://${nfsConfig.server}:${destDirectory}`; + } else if (this.fcClusterConfig.storage === 'pvc') { + await cpp.exec(`mkdir -p ${this.trialLocalTempFolder}/${destDirectory}`); + await cpp.exec(`cp -r ${srcDirectory}/* ${this.trialLocalTempFolder}/${destDirectory}/.`); + return `${this.trialLocalTempFolder}/${destDirectory}`; + } + return ''; + } + + /** + * generate trial's command for frameworkcontroller + * expose port and execute injector.sh before executing user's command + * @param command + */ + private generateCommandScript(taskRoles: FrameworkControllerTrialConfigTemplate[], command: string): string { + let portScript: string = ''; + for (const taskRole of taskRoles) { + portScript += `FB_${taskRole.name.toUpperCase()}_PORT=${this.fcContainerPortMap.get( + taskRole.name + )} `; + } + + return `${portScript} . /mnt/frameworkbarrier/injector.sh && ${command}`; + } + + private async prepareRunScript(trialLocalTempFolder: string, trialJobId: string, + trialWorkingFolder: string, form: TrialJobApplicationForm, + configTaskRoles: FrameworkControllerTrialConfigTemplate[] + ): Promise { + if (configTaskRoles === undefined) { + throw new Error( + 'neither frameworkcontroller trial config nor template is not initialized' + ); + } + + await cpp.exec(`mkdir -p ${trialLocalTempFolder}`); + + const installScriptContent: string = CONTAINER_INSTALL_NNI_SHELL_FORMAT; + // Write NNI installation file to local tmp files + await fs.promises.writeFile(path.join(trialLocalTempFolder, 'install_nni.sh'), installScriptContent, {encoding: 'utf8'}); + // Create tmp trial working folder locally. + + for (const taskRole of configTaskRoles) { + const runScriptContent: string = + await this.generateRunScript('frameworkcontroller', trialJobId, trialWorkingFolder, + this.generateCommandScript(configTaskRoles, taskRole.command), form.sequenceId.toString(), + taskRole.name, taskRole.gpuNum ? taskRole.gpuNum : 0); + await fs.promises.writeFile(path.join(trialLocalTempFolder, `run_${taskRole.name}.sh`), runScriptContent, {encoding: 'utf8'}); + } + + // Write file content ( parameter.cfg ) to local tmp folders + if (form !== undefined) { + await fs.promises.writeFile(path.join(trialLocalTempFolder, generateParamFileName(form.hyperParameters)), + form.hyperParameters.value, {encoding: 'utf8'}); + } + } + + private async prepareFrameworkControllerConfig(trialJobId: string, trialWorkingFolder: string, frameworkcontrollerJobName: string): + Promise { + + if (this.fcTrialConfig === undefined) { + throw new Error('frameworkcontroller trial config is not initialized'); + } + + const podResources: any = []; + for (const taskRole of this.fcTrialConfig.taskRoles) { + const resource: any = {}; + resource.requests = this.generatePodResource(taskRole.memoryMB, taskRole.cpuNum, taskRole.gpuNum); + resource.limits = {...resource.requests}; + podResources.push(resource); + } + // Generate frameworkcontroller job resource config object + const frameworkcontrollerJobConfig: any = + await this.generateFrameworkControllerJobConfig(trialJobId, trialWorkingFolder, frameworkcontrollerJobName, podResources); + + return Promise.resolve(frameworkcontrollerJobConfig); + } + + private generateContainerPort(taskRoles: FrameworkControllerTrialConfigTemplate[]): void { + if (taskRoles === undefined) { + throw new Error('frameworkcontroller trial config is not initialized'); + } + + let port: number = 4000; //The default port used in container + for (const index of taskRoles.keys()) { + this.fcContainerPortMap.set(taskRoles[index].name, port); + port += 1; + } + } + + /** + * Generate frameworkcontroller resource config file + * @param trialJobId trial job id + * @param trialWorkingFolder working folder + * @param frameworkcontrollerJobName job name + * @param podResources pod template + */ + private async generateFrameworkControllerJobConfig(trialJobId: string, trialWorkingFolder: string, + frameworkcontrollerJobName: string, podResources: any): Promise { + if (this.fcClusterConfig === undefined) { + throw new Error('frameworkcontroller Cluster config is not initialized'); + } + + if (this.fcTrialConfig === undefined) { + throw new Error('frameworkcontroller trial config is not initialized'); + } + + const taskRoles: any = []; + for (const index of this.fcTrialConfig.taskRoles.keys()) { + const containerPort: number | undefined = this.fcContainerPortMap.get(this.fcTrialConfig.taskRoles[index].name); + if (containerPort === undefined) { + throw new Error('Container port is not initialized'); + } + + const taskRole: any = this.generateTaskRoleConfig( + trialWorkingFolder, + this.fcTrialConfig.taskRoles[index].image, + `run_${this.fcTrialConfig.taskRoles[index].name}.sh`, + podResources[index], + containerPort, + await this.createRegistrySecret(this.fcTrialConfig.taskRoles[index].privateRegistryAuthPath) + ); + taskRoles.push({ + name: this.fcTrialConfig.taskRoles[index].name, + taskNumber: this.fcTrialConfig.taskRoles[index].taskNum, + frameworkAttemptCompletionPolicy: { + minFailedTaskCount: this.fcTrialConfig.taskRoles[index].frameworkAttemptCompletionPolicy.minFailedTaskCount, + minSucceededTaskCount: this.fcTrialConfig.taskRoles[index].frameworkAttemptCompletionPolicy.minSucceededTaskCount + }, + task: taskRole + }); + } + + return Promise.resolve({ + apiVersion: `frameworkcontroller.microsoft.com/v1`, + kind: 'Framework', + metadata: { + name: frameworkcontrollerJobName, + namespace: this.fcClusterConfig.namespace ? this.fcClusterConfig.namespace : "default", + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: getExperimentId(), + trialId: trialJobId + } + }, + spec: { + executionType: 'Start', + taskRoles: taskRoles + } + }); + } + + private generateTaskRoleConfig(trialWorkingFolder: string, replicaImage: string, runScriptFile: string, + podResources: any, containerPort: number, privateRegistrySecretName: string | undefined): any { + if (this.fcClusterConfig === undefined) { + throw new Error('frameworkcontroller Cluster config is not initialized'); + } + + if (this.fcTrialConfig === undefined) { + throw new Error('frameworkcontroller trial config is not initialized'); + } + + const volumeSpecMap: Map = new Map(); + if (this.fcClusterConfig.storageType === 'azureStorage') { + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + azureFile: { + secretName: `${this.azureStorageSecretName}`, + shareName: `${this.azureStorageShare}`, + readonly: false + } + }, { + name: 'frameworkbarrier-volume', + emptyDir: {} + }]); + } else { + const frameworkcontrollerClusterConfigNFS: FrameworkControllerClusterConfigNFS = + this.fcClusterConfig; + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + nfs: { + server: `${frameworkcontrollerClusterConfigNFS.nfs.server}`, + path: `${frameworkcontrollerClusterConfigNFS.nfs.path}` + } + }, { + name: 'frameworkbarrier-volume', + emptyDir: {} + }]); + } + + const containers: any = [ + { + name: 'framework', + image: replicaImage, + command: ['sh', `${path.join(trialWorkingFolder, runScriptFile)}`], + volumeMounts: [ + { + name: 'nni-vol', + mountPath: this.CONTAINER_MOUNT_PATH + }, { + name: 'frameworkbarrier-volume', + mountPath: '/mnt/frameworkbarrier' + }], + resources: podResources, + ports: [{ + containerPort: containerPort + }] + }]; + + const initContainers: any = [ + { + name: 'frameworkbarrier', + image: 'frameworkcontroller/frameworkbarrier', + volumeMounts: [ + { + name: 'frameworkbarrier-volume', + mountPath: '/mnt/frameworkbarrier' + }] + }]; + + const spec: any = { + containers: containers, + initContainers: initContainers, + restartPolicy: 'OnFailure', + volumes: volumeSpecMap.get('nniVolumes'), + hostNetwork: false + }; + if (privateRegistrySecretName) { + spec.imagePullSecrets = [ + { + name: privateRegistrySecretName + } + ] + } + + if (this.fcClusterConfig.serviceAccountName !== undefined) { + spec.serviceAccountName = this.fcClusterConfig.serviceAccountName; + } + + return { + pod: { + spec: spec + } + }; + } + + public async updateTrialJob(_1: any, _2: any): Promise { + throw new Error('not supported'); + } +} + +export {FrameworkControllerTrainingService}; diff --git a/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowApiClient.ts b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowApiClient.ts new file mode 100644 index 0000000000000000000000000000000000000000..978c9247868089a4fa17b34b4f39d3d3aea7c4f6 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowApiClient.ts @@ -0,0 +1,210 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import { GeneralK8sClient, KubernetesCRDClient } from '../kubernetesApiClient'; +import { KubeflowOperator } from './kubeflowConfig'; + + +class TFOperatorClientV1Alpha2 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/tfjob-crd-v1alpha2.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1alpha2.namespaces('default').tfjobs; + } + + public get containerName(): string { + return 'tensorflow'; + } +} + +class TFOperatorClientV1Beta1 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/tfjob-crd-v1beta1.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1beta1.namespaces('default').tfjobs; + } + + public get containerName(): string { + return 'tensorflow'; + } +} + +class TFOperatorClientV1Beta2 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/tfjob-crd-v1beta2.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1beta2.namespaces('default').tfjobs; + } + + public get containerName(): string { + return 'tensorflow'; + } +} + +class TFOperatorClientV1 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/tfjob-crd-v1.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1.namespaces('default').tfjobs; + } + + public get containerName(): string { + return 'tensorflow'; + } +} +class PyTorchOperatorClientV1 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/pytorchjob-crd-v1.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1.namespaces('default').pytorchjobs; + } + + public get containerName(): string { + return 'pytorch'; + } +} +class PyTorchOperatorClientV1Alpha2 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/pytorchjob-crd-v1alpha2.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1alpha2.namespaces('default').pytorchjobs; + } + + public get containerName(): string { + return 'pytorch'; + } +} + +class PyTorchOperatorClientV1Beta1 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/pytorchjob-crd-v1beta1.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1beta1.namespaces('default').pytorchjobs; + } + + public get containerName(): string { + return 'pytorch'; + } +} + +class PyTorchOperatorClientV1Beta2 extends KubernetesCRDClient { + /** + * constructor, to initialize tfjob CRD definition + */ + public constructor() { + super(); + this.crdSchema = JSON.parse(fs.readFileSync('./config/kubeflow/pytorchjob-crd-v1beta2.json', 'utf8')); + this.client.addCustomResourceDefinition(this.crdSchema); + } + + protected get operator(): any { + return this.client.apis['kubeflow.org'].v1beta2.namespaces('default').pytorchjobs; + } + + public get containerName(): string { + return 'pytorch'; + } +} + +/** + * KubeflowOperator Client + */ +class KubeflowOperatorClientFactory { + /** + * Factory method to generate operator client + */ + public static createClient(kubeflowOperator: KubeflowOperator, operatorApiVersion: string): KubernetesCRDClient { + switch (kubeflowOperator) { + case 'tf-operator': { + switch (operatorApiVersion) { + case 'v1alpha2': { + return new TFOperatorClientV1Alpha2(); + } + case 'v1beta1': { + return new TFOperatorClientV1Beta1(); + } + case 'v1beta2': { + return new TFOperatorClientV1Beta2(); + } + case 'v1': { + return new TFOperatorClientV1(); + } + default: + throw new Error(`Invalid tf-operator apiVersion ${operatorApiVersion}`); + } + } + case 'pytorch-operator': { + switch (operatorApiVersion) { + case 'v1alpha2': { + return new PyTorchOperatorClientV1Alpha2(); + } + case 'v1beta1': { + return new PyTorchOperatorClientV1Beta1(); + } + case 'v1beta2': { + return new PyTorchOperatorClientV1Beta2(); + } + case 'v1': { + return new PyTorchOperatorClientV1(); + } + default: + throw new Error(`Invalid pytorch-operator apiVersion ${operatorApiVersion}`); + } + } + default: + throw new Error(`Invalid operator ${kubeflowOperator}`); + } + } +} + +export { KubeflowOperatorClientFactory, GeneralK8sClient }; diff --git a/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowConfig.ts b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..cffc88d376fc9a3f04201ef0a9489fb71db43ede --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowConfig.ts @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { MethodNotImplementedError } from 'common/errors'; +import { AzureStorage, KeyVaultConfig, KubernetesClusterConfig, KubernetesClusterConfigAzure, KubernetesClusterConfigNFS, + KubernetesStorageKind, KubernetesTrialConfig, KubernetesTrialConfigTemplate, NFSConfig, StorageConfig +} from '../kubernetesConfig'; + +// operator types that kubeflow supported +export type KubeflowOperator = 'tf-operator' | 'pytorch-operator' ; +export type DistTrainRole = 'worker' | 'ps' | 'master'; +export type KubeflowJobStatus = 'Created' | 'Running' | 'Failed' | 'Succeeded'; +export type OperatorApiVersion = 'v1alpha2' | 'v1beta1' | 'v1beta2' | 'v1'; + +/** + * Kubeflow Cluster Configuration + */ +export class KubeflowClusterConfig extends KubernetesClusterConfig { + public readonly operator: KubeflowOperator; + constructor(apiVersion: string, operator: KubeflowOperator) { + super(apiVersion); + this.operator = operator; + } +} + +export class KubeflowClusterConfigNFS extends KubernetesClusterConfigNFS { + public readonly operator: KubeflowOperator; + constructor( + operator: KubeflowOperator, + apiVersion: string, + nfs: NFSConfig, + storage?: KubernetesStorageKind + ) { + super(apiVersion, nfs, storage); + this.operator = operator; + } + + public get storageType(): KubernetesStorageKind { + return 'nfs'; + } + + public static getInstance(jsonObject: object): KubeflowClusterConfigNFS { + const kubeflowClusterConfigObjectNFS: KubeflowClusterConfigNFS = jsonObject; + assert (kubeflowClusterConfigObjectNFS !== undefined); + + return new KubeflowClusterConfigNFS( + kubeflowClusterConfigObjectNFS.operator, + kubeflowClusterConfigObjectNFS.apiVersion, + kubeflowClusterConfigObjectNFS.nfs, + kubeflowClusterConfigObjectNFS.storage + ); + } +} + +export class KubeflowClusterConfigAzure extends KubernetesClusterConfigAzure { + public readonly operator: KubeflowOperator; + + constructor( + operator: KubeflowOperator, + apiVersion: string, + keyVault: KeyVaultConfig, + azureStorage: AzureStorage, + storage?: KubernetesStorageKind + ) { + super(apiVersion, keyVault, azureStorage, storage); + this.operator = operator; + } + + public get storageType(): KubernetesStorageKind { + return 'azureStorage'; + } + + public static getInstance(jsonObject: object): KubeflowClusterConfigAzure { + const kubeflowClusterConfigObjectAzure: KubeflowClusterConfigAzure = jsonObject; + + return new KubeflowClusterConfigAzure( + kubeflowClusterConfigObjectAzure.operator, + kubeflowClusterConfigObjectAzure.apiVersion, + kubeflowClusterConfigObjectAzure.keyVault, + kubeflowClusterConfigObjectAzure.azureStorage, + kubeflowClusterConfigObjectAzure.storage + ); + } +} + +export class KubeflowClusterConfigFactory { + + public static generateKubeflowClusterConfig(jsonObject: object): KubeflowClusterConfig { + const storageConfig: StorageConfig = jsonObject; + if (storageConfig === undefined) { + throw new Error('Invalid json object as a StorageConfig instance'); + } + if (storageConfig.storage !== undefined && storageConfig.storage === 'azureStorage') { + return KubeflowClusterConfigAzure.getInstance(jsonObject); + } else if (storageConfig.storage === undefined || storageConfig.storage === 'nfs') { + return KubeflowClusterConfigNFS.getInstance(jsonObject); + } + throw new Error(`Invalid json object ${jsonObject}`); + } +} + +export class KubeflowTrialConfig extends KubernetesTrialConfig { + constructor(codeDir: string) { + super(codeDir); + } + + public get operatorType(): KubeflowOperator { + throw new MethodNotImplementedError(); + } +} + +export class KubeflowTrialConfigTemplate extends KubernetesTrialConfigTemplate { + public readonly replicas: number; + constructor(replicas: number, command: string, gpuNum: number, + cpuNum: number, memoryMB: number, image: string, privateRegistryAuthPath?: string) { + super(command, gpuNum, cpuNum, memoryMB, image, privateRegistryAuthPath); + this.replicas = replicas; + } +} + +export class KubeflowTrialConfigTensorflow extends KubeflowTrialConfig { + public readonly ps?: KubeflowTrialConfigTemplate; + public readonly worker: KubeflowTrialConfigTemplate; + + constructor(codeDir: string, worker: KubeflowTrialConfigTemplate, ps?: KubeflowTrialConfigTemplate) { + super(codeDir); + this.ps = ps; + this.worker = worker; + } + + public get operatorType(): KubeflowOperator { + return 'tf-operator'; + } +} + +export class KubeflowTrialConfigPytorch extends KubeflowTrialConfig { + public readonly master: KubeflowTrialConfigTemplate; + public readonly worker?: KubeflowTrialConfigTemplate; + + constructor(codeDir: string, master: KubeflowTrialConfigTemplate, worker?: KubeflowTrialConfigTemplate) { + super(codeDir); + this.master = master; + this.worker = worker; + } + + public get operatorType(): KubeflowOperator { + return 'pytorch-operator'; + } +} + +export class KubeflowTrialConfigFactory { + public static generateKubeflowTrialConfig(jsonObject: object, operator: KubeflowOperator): KubeflowTrialConfig { + if (operator === 'tf-operator') { + const kubeflowTrialConfigObject: KubeflowTrialConfigTensorflow = jsonObject; + + return new KubeflowTrialConfigTensorflow( + kubeflowTrialConfigObject.codeDir, + kubeflowTrialConfigObject.worker, + kubeflowTrialConfigObject.ps + ); + } else if (operator === 'pytorch-operator') { + const kubeflowTrialConfigObject: KubeflowTrialConfigPytorch = jsonObject; + + return new KubeflowTrialConfigPytorch( + kubeflowTrialConfigObject.codeDir, + kubeflowTrialConfigObject.master, + kubeflowTrialConfigObject.worker + ); + } + throw new Error(`Invalid json object ${jsonObject}`); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowJobInfoCollector.ts b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowJobInfoCollector.ts new file mode 100644 index 0000000000000000000000000000000000000000..85ab91adb3d59e72d724ab1f689c76c72841ab2d --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowJobInfoCollector.ts @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { KubernetesCRDClient } from '../kubernetesApiClient'; +import { KubernetesTrialJobDetail} from '../kubernetesData'; +import { KubernetesJobInfoCollector } from '../kubernetesJobInfoCollector'; +import { KubeflowJobStatus } from './kubeflowConfig'; + +/** + * Collector Kubeflow jobs info from Kubernetes cluster, and update kubeflow job status locally + */ +export class KubeflowJobInfoCollector extends KubernetesJobInfoCollector { + constructor(jobMap: Map) { + super(jobMap); + } + + protected async retrieveSingleTrialJobInfo(kubernetesCRDClient: KubernetesCRDClient | undefined, + kubernetesTrialJob: KubernetesTrialJobDetail): Promise { + if (!this.statusesNeedToCheck.includes(kubernetesTrialJob.status)) { + return Promise.resolve(); + } + + if (kubernetesCRDClient === undefined) { + return Promise.reject('kubernetesCRDClient is undefined'); + } + + let kubernetesJobInfo: any; + try { + kubernetesJobInfo = await kubernetesCRDClient.getKubernetesJob(kubernetesTrialJob.kubernetesJobName); + } catch (error) { + // Notice: it maynot be a 'real' error since cancel trial job can also cause getKubernetesJob failed. + this.log.error(`Get job ${kubernetesTrialJob.kubernetesJobName} info failed, error is ${error}`); + + //This is not treat as a error status + return Promise.resolve(); + } + /* eslint-disable require-atomic-updates */ + if (kubernetesJobInfo.status && kubernetesJobInfo.status.conditions) { + const latestCondition: any = kubernetesJobInfo.status.conditions[kubernetesJobInfo.status.conditions.length - 1]; + const tfJobType: KubeflowJobStatus = latestCondition.type; + switch (tfJobType) { + case 'Created': + kubernetesTrialJob.status = 'WAITING'; + kubernetesTrialJob.startTime = Date.parse(latestCondition.lastUpdateTime); + break; + case 'Running': + kubernetesTrialJob.status = 'RUNNING'; + if (kubernetesTrialJob.startTime === undefined) { + kubernetesTrialJob.startTime = Date.parse(latestCondition.lastUpdateTime); + } + break; + case 'Failed': + kubernetesTrialJob.status = 'FAILED'; + kubernetesTrialJob.endTime = Date.parse(latestCondition.lastUpdateTime); + break; + case 'Succeeded': + kubernetesTrialJob.status = 'SUCCEEDED'; + kubernetesTrialJob.endTime = Date.parse(latestCondition.lastUpdateTime); + break; + default: + } + } + /* eslint-enable require-atomic-updates */ + + return Promise.resolve(); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowJobRestServer.ts b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowJobRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..cee55400766835d1474dfea1ecd8d16665e620c5 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowJobRestServer.ts @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as component from 'common/component'; +import { KubernetesJobRestServer } from '../kubernetesJobRestServer'; +import { KubeflowTrainingService } from './kubeflowTrainingService'; + +/** + * Kubeflow Training service Rest server, provides rest API to support kubeflow job metrics update + * + */ +@component.Singleton +export class KubeflowJobRestServer extends KubernetesJobRestServer { + /** + * constructor to provide NNIRestServer's own rest property, e.g. port + */ + constructor() { + super(component.get(KubeflowTrainingService)); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowTrainingService.ts b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..7fee90b2ffbeb4be4ccd1c6cc75e14fc48126ad8 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubeflow/kubeflowTrainingService.ts @@ -0,0 +1,469 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import cpp from 'child-process-promise'; +import fs from 'fs'; +import path from 'path'; +import * as component from 'common/component'; + +import { getExperimentId } from 'common/experimentStartupInfo'; +import { + NNIManagerIpConfig, TrialJobApplicationForm, TrialJobDetail, TrialJobStatus +} from 'common/trainingService'; +import { delay, generateParamFileName, getExperimentRootDir, uniqueString } from 'common/utils'; +import { CONTAINER_INSTALL_NNI_SHELL_FORMAT } from 'training_service/common/containerJobData'; +import { TrialConfigMetadataKey } from 'training_service/common/trialConfigMetadataKey'; +import { validateCodeDir } from 'training_service/common/util'; +import { NFSConfig } from '../kubernetesConfig'; +import { KubernetesTrialJobDetail } from '../kubernetesData'; +import { KubernetesJobRestServer } from '../kubernetesJobRestServer'; +import { KubernetesTrainingService } from '../kubernetesTrainingService'; +import { KubeflowOperatorClientFactory } from './kubeflowApiClient'; +import { KubeflowClusterConfig, KubeflowClusterConfigAzure, KubeflowClusterConfigFactory, KubeflowClusterConfigNFS, + KubeflowTrialConfig, KubeflowTrialConfigFactory, KubeflowTrialConfigPytorch, KubeflowTrialConfigTensorflow +} from './kubeflowConfig'; +import { KubeflowJobInfoCollector } from './kubeflowJobInfoCollector'; +import { KubeflowJobRestServer } from './kubeflowJobRestServer'; + +/** + * Training Service implementation for Kubeflow + * Refer https://github.com/kubeflow/kubeflow for more info about Kubeflow + */ +@component.Singleton +class KubeflowTrainingService extends KubernetesTrainingService implements KubernetesTrainingService { + private kubeflowClusterConfig?: KubeflowClusterConfig; + private kubeflowTrialConfig?: KubeflowTrialConfig; + private readonly kubeflowJobInfoCollector: KubeflowJobInfoCollector; + + constructor() { + super(); + this.kubeflowJobInfoCollector = new KubeflowJobInfoCollector(this.trialJobsMap); + this.experimentId = getExperimentId(); + this.log.info('Construct Kubeflow training service.'); + } + + public async run(): Promise { + this.log.info('Run Kubeflow training service.'); + this.kubernetesJobRestServer = new KubernetesJobRestServer(this); + if (this.kubernetesJobRestServer === undefined) { + throw new Error('kubernetesJobRestServer not initialized!'); + } + await this.kubernetesJobRestServer.start(); + this.kubernetesJobRestServer.setEnableVersionCheck = this.versionCheck; + this.log.info(`Kubeflow Training service rest server listening on: ${this.kubernetesJobRestServer.endPoint}`); + while (!this.stopping) { + // collect metrics for Kubeflow jobs by interacting with Kubernetes API server + await delay(3000); + await this.kubeflowJobInfoCollector.retrieveTrialStatus(this.kubernetesCRDClient); + if (this.kubernetesJobRestServer.getErrorMessage !== undefined) { + throw new Error(this.kubernetesJobRestServer.getErrorMessage); + } + } + this.log.info('Kubeflow training service exit.'); + } + + public async submitTrialJob(form: TrialJobApplicationForm): Promise { + if (this.kubernetesCRDClient === undefined) { + throw new Error('Kubeflow job operator client is undefined'); + } + + if (this.kubernetesRestServerPort === undefined) { + const restServer: KubeflowJobRestServer = component.get(KubeflowJobRestServer); + this.kubernetesRestServerPort = restServer.clusterRestServerPort; + } + + // upload code Dir to storage + if (this.copyExpCodeDirPromise !== undefined) { + await this.copyExpCodeDirPromise; + } + + const trialJobId: string = uniqueString(5); + const trialWorkingFolder: string = path.join(this.CONTAINER_MOUNT_PATH, 'nni', getExperimentId(), trialJobId); + const kubeflowJobName: string = `nni-exp-${this.experimentId}-trial-${trialJobId}`.toLowerCase(); + const trialLocalTempFolder: string = path.join(getExperimentRootDir(), 'trials-local', trialJobId); + //prepare the runscript + await this.prepareRunScript(trialLocalTempFolder, trialJobId, trialWorkingFolder, form); + //upload script files to sotrage + const trialJobOutputUrl: string = await this.uploadFolder(trialLocalTempFolder, `nni/${getExperimentId()}/${trialJobId}`); + let initStatus: TrialJobStatus = 'WAITING'; + if (!trialJobOutputUrl) { + initStatus = 'FAILED'; + } + const trialJobDetail: KubernetesTrialJobDetail = new KubernetesTrialJobDetail( + trialJobId, + initStatus, + Date.now(), + trialWorkingFolder, + form, + kubeflowJobName, + trialJobOutputUrl + ); + + // Generate kubeflow job resource config object + const kubeflowJobConfig: any = await this.prepareKubeflowConfig(trialJobId, trialWorkingFolder, kubeflowJobName); + // Create kubeflow job based on generated kubeflow job resource config + await this.kubernetesCRDClient.createKubernetesJob(kubeflowJobConfig); + + // Set trial job detail until create Kubeflow job successfully + this.trialJobsMap.set(trialJobId, trialJobDetail); + + return Promise.resolve(trialJobDetail); + } + + public async setClusterMetadata(key: string, value: string): Promise { + switch (key) { + case TrialConfigMetadataKey.NNI_MANAGER_IP: + this.nniManagerIpConfig = JSON.parse(value); + break; + + case TrialConfigMetadataKey.KUBEFLOW_CLUSTER_CONFIG: { + const kubeflowClusterJsonObject: object = JSON.parse(value); + this.kubeflowClusterConfig = KubeflowClusterConfigFactory.generateKubeflowClusterConfig(kubeflowClusterJsonObject); + if (this.kubeflowClusterConfig.storageType === 'azureStorage') { + const azureKubeflowClusterConfig: KubeflowClusterConfigAzure = this.kubeflowClusterConfig; + this.azureStorageAccountName = azureKubeflowClusterConfig.azureStorage.accountName; + this.azureStorageShare = azureKubeflowClusterConfig.azureStorage.azureShare; + await this.createAzureStorage( + azureKubeflowClusterConfig.keyVault.vaultName, + azureKubeflowClusterConfig.keyVault.name + ); + } else if (this.kubeflowClusterConfig.storageType === 'nfs') { + const nfsKubeflowClusterConfig: KubeflowClusterConfigNFS = this.kubeflowClusterConfig; + await this.createNFSStorage( + nfsKubeflowClusterConfig.nfs.server, + nfsKubeflowClusterConfig.nfs.path + ); + } + this.kubernetesCRDClient = KubeflowOperatorClientFactory.createClient( + this.kubeflowClusterConfig.operator, this.kubeflowClusterConfig.apiVersion); + break; + } + case TrialConfigMetadataKey.TRIAL_CONFIG: { + if (this.kubeflowClusterConfig === undefined) { + this.log.error('kubeflow cluster config is not initialized'); + + return Promise.reject(new Error('kubeflow cluster config is not initialized')); + } + + assert(this.kubeflowClusterConfig !== undefined); + const kubeflowTrialJsonObjsect: object = JSON.parse(value); + this.kubeflowTrialConfig = KubeflowTrialConfigFactory.generateKubeflowTrialConfig( + kubeflowTrialJsonObjsect, + this.kubeflowClusterConfig.operator + ); + + // Validate to make sure codeDir doesn't have too many files + try { + await validateCodeDir(this.kubeflowTrialConfig.codeDir); + //upload codeDir to storage + this.copyExpCodeDirPromise = this.uploadFolder(this.kubeflowTrialConfig.codeDir, `nni/${getExperimentId()}/nni-code`); + } catch (error) { + this.log.error(error); + + return Promise.reject(new Error(error)); + } + break; + } + case TrialConfigMetadataKey.VERSION_CHECK: + this.versionCheck = (value === 'true' || value === 'True'); + break; + case TrialConfigMetadataKey.LOG_COLLECTION: + this.logCollection = value; + break; + default: + } + + return Promise.resolve(); + } + + /** + * upload local folder to nfs or azureStroage + */ + private async uploadFolder(srcDirectory: string, destDirectory: string): Promise { + if (this.kubeflowClusterConfig === undefined) { + throw new Error('Kubeflow Cluster config is not initialized'); + } + + if (this.kubeflowTrialConfig === undefined) { + throw new Error('Kubeflow Trial config is not initialized'); + } + + assert(this.kubeflowClusterConfig.storage === undefined + || this.kubeflowClusterConfig.storage === 'azureStorage' + || this.kubeflowClusterConfig.storage === 'nfs'); + + if (this.kubeflowClusterConfig.storage === 'azureStorage') { + if (this.azureStorageClient === undefined) { + throw new Error('azureStorageClient is not initialized'); + } + const azureKubeflowClusterConfig: KubeflowClusterConfigAzure = this.kubeflowClusterConfig; + return await this.uploadFolderToAzureStorage(srcDirectory, destDirectory, azureKubeflowClusterConfig.uploadRetryCount); + } else if (this.kubeflowClusterConfig.storage === 'nfs' || this.kubeflowClusterConfig.storage === undefined) { + await cpp.exec(`mkdir -p ${this.trialLocalTempFolder}/${destDirectory}`); + await cpp.exec(`cp -r ${srcDirectory}/* ${this.trialLocalTempFolder}/${destDirectory}/.`); + const nfsKubeflowClusterConfig: KubeflowClusterConfigNFS = this.kubeflowClusterConfig; + const nfsConfig: NFSConfig = nfsKubeflowClusterConfig.nfs; + return `nfs://${nfsConfig.server}:${destDirectory}`; + } + return ''; + } + + private async prepareRunScript(trialLocalTempFolder: string, trialJobId: string, trialWorkingFolder: string, + form: TrialJobApplicationForm): Promise { + if (this.kubeflowClusterConfig === undefined) { + throw new Error('Kubeflow Cluster config is not initialized'); + } + + // initialize kubeflow trial config to specific type + let kubeflowTrialConfig: any; + if (this.kubeflowClusterConfig.operator === 'tf-operator') { + kubeflowTrialConfig = this.kubeflowTrialConfig; + } else if (this.kubeflowClusterConfig.operator === 'pytorch-operator') { + kubeflowTrialConfig = this.kubeflowTrialConfig; + } else { + throw Error(`operator ${this.kubeflowClusterConfig.operator} is invalid`); + } + + //create tmp trial working folder locally. + await cpp.exec(`mkdir -p ${trialLocalTempFolder}`); + const runScriptContent: string = CONTAINER_INSTALL_NNI_SHELL_FORMAT; + // Write NNI installation file to local tmp files + await fs.promises.writeFile(path.join(trialLocalTempFolder, 'install_nni.sh'), runScriptContent, { encoding: 'utf8' }); + + // Write worker file content run_worker.sh to local tmp folders + if (kubeflowTrialConfig.worker !== undefined) { + const workerRunScriptContent: string = await this.generateRunScript('kubeflow', trialJobId, trialWorkingFolder, + kubeflowTrialConfig.worker.command, + form.sequenceId.toString(), 'worker', + kubeflowTrialConfig.worker.gpuNum); + await fs.promises.writeFile(path.join(trialLocalTempFolder, 'run_worker.sh'), workerRunScriptContent, { encoding: 'utf8' }); + } + // Write parameter server file content run_ps.sh to local tmp folders + if (this.kubeflowClusterConfig.operator === 'tf-operator') { + const tensorflowTrialConfig: KubeflowTrialConfigTensorflow = this.kubeflowTrialConfig; + if (tensorflowTrialConfig.ps !== undefined) { + const psRunScriptContent: string = await this.generateRunScript('kubeflow', trialJobId, trialWorkingFolder, + tensorflowTrialConfig.ps.command, + form.sequenceId.toString(), + 'ps', tensorflowTrialConfig.ps.gpuNum); + await fs.promises.writeFile(path.join(trialLocalTempFolder, 'run_ps.sh'), psRunScriptContent, { encoding: 'utf8' }); + } + } else if (this.kubeflowClusterConfig.operator === 'pytorch-operator') { + const pytorchTrialConfig: KubeflowTrialConfigPytorch = this.kubeflowTrialConfig; + if (pytorchTrialConfig.master !== undefined) { + const masterRunScriptContent: string = await this.generateRunScript('kubeflow', trialJobId, trialWorkingFolder, + pytorchTrialConfig.master.command, + form.sequenceId.toString(), 'master', + pytorchTrialConfig.master.gpuNum); + await fs.promises.writeFile(path.join(trialLocalTempFolder, 'run_master.sh'), masterRunScriptContent, { encoding: 'utf8' }); + } + } + // Write file content ( parameter.cfg ) to local tmp folders + if (form !== undefined) { + await fs.promises.writeFile(path.join(trialLocalTempFolder, generateParamFileName(form.hyperParameters)), + form.hyperParameters.value, { encoding: 'utf8' }); + } + } + + private async prepareKubeflowConfig(trialJobId: string, trialWorkingFolder: string, kubeflowJobName: string): Promise { + if (this.kubeflowClusterConfig === undefined) { + throw new Error('Kubeflow Cluster config is not initialized'); + } + + if (this.kubeflowTrialConfig === undefined) { + throw new Error('Kubeflow trial config is not initialized'); + } + + // initialize kubeflow trial config to specific type + let kubeflowTrialConfig: any; + if (this.kubeflowClusterConfig.operator === 'tf-operator') { + kubeflowTrialConfig = this.kubeflowTrialConfig; + } else if (this.kubeflowClusterConfig.operator === 'pytorch-operator') { + kubeflowTrialConfig = this.kubeflowTrialConfig; + } else { + throw Error(`operator ${this.kubeflowClusterConfig.operator} is invalid`); + } + + const workerPodResources: any = {}; + if (kubeflowTrialConfig.worker !== undefined) { + workerPodResources.requests = this.generatePodResource(kubeflowTrialConfig.worker.memoryMB, kubeflowTrialConfig.worker.cpuNum, + kubeflowTrialConfig.worker.gpuNum); + } + workerPodResources.limits = {...workerPodResources.requests}; + + const nonWorkerResources: any = {}; + if (this.kubeflowClusterConfig.operator === 'tf-operator') { + const tensorflowTrialConfig: KubeflowTrialConfigTensorflow = this.kubeflowTrialConfig; + if (tensorflowTrialConfig.ps !== undefined) { + nonWorkerResources.requests = this.generatePodResource(tensorflowTrialConfig.ps.memoryMB, tensorflowTrialConfig.ps.cpuNum, + tensorflowTrialConfig.ps.gpuNum); + nonWorkerResources.limits = {...nonWorkerResources.requests}; + } + } else if (this.kubeflowClusterConfig.operator === 'pytorch-operator') { + const pyTorchTrialConfig: KubeflowTrialConfigPytorch = this.kubeflowTrialConfig; + nonWorkerResources.requests = this.generatePodResource(pyTorchTrialConfig.master.memoryMB, pyTorchTrialConfig.master.cpuNum, + pyTorchTrialConfig.master.gpuNum); + nonWorkerResources.limits = {...nonWorkerResources.requests}; + } + + // Generate kubeflow job resource config object + const kubeflowJobConfig: any = await this.generateKubeflowJobConfig(trialJobId, trialWorkingFolder, kubeflowJobName, workerPodResources, + nonWorkerResources); + + return Promise.resolve(kubeflowJobConfig); + } + + /** + * Generate kubeflow resource config file + * @param trialJobId trial job id + * @param trialWorkingFolder working folder + * @param kubeflowJobName job name + * @param workerPodResources worker pod template + * @param nonWorkerPodResources non-worker pod template, like ps or master + */ + private async generateKubeflowJobConfig(trialJobId: string, trialWorkingFolder: string, kubeflowJobName: string, workerPodResources: any, + nonWorkerPodResources?: any): Promise { + if (this.kubeflowClusterConfig === undefined) { + throw new Error('Kubeflow Cluster config is not initialized'); + } + + if (this.kubeflowTrialConfig === undefined) { + throw new Error('Kubeflow trial config is not initialized'); + } + + if (this.kubernetesCRDClient === undefined) { + throw new Error('Kubeflow operator client is not initialized'); + } + + const replicaSpecsObj: any = {}; + const replicaSpecsObjMap: Map = new Map(); + if (this.kubeflowTrialConfig.operatorType === 'tf-operator') { + const tensorflowTrialConfig: KubeflowTrialConfigTensorflow = this.kubeflowTrialConfig; + const privateRegistrySecretName = await this.createRegistrySecret(tensorflowTrialConfig.worker.privateRegistryAuthPath); + replicaSpecsObj.Worker = this.generateReplicaConfig(trialWorkingFolder, tensorflowTrialConfig.worker.replicas, + tensorflowTrialConfig.worker.image, 'run_worker.sh', workerPodResources, privateRegistrySecretName); + if (tensorflowTrialConfig.ps !== undefined) { + const privateRegistrySecretName: string | undefined = await this.createRegistrySecret(tensorflowTrialConfig.ps.privateRegistryAuthPath); + replicaSpecsObj.Ps = this.generateReplicaConfig(trialWorkingFolder, tensorflowTrialConfig.ps.replicas, + tensorflowTrialConfig.ps.image, 'run_ps.sh', nonWorkerPodResources, privateRegistrySecretName); + } + replicaSpecsObjMap.set(this.kubernetesCRDClient.jobKind, {tfReplicaSpecs: replicaSpecsObj}); + } else if (this.kubeflowTrialConfig.operatorType === 'pytorch-operator') { + const pytorchTrialConfig: KubeflowTrialConfigPytorch = this.kubeflowTrialConfig; + if (pytorchTrialConfig.worker !== undefined) { + const privateRegistrySecretName: string | undefined = await this.createRegistrySecret(pytorchTrialConfig.worker.privateRegistryAuthPath); + replicaSpecsObj.Worker = this.generateReplicaConfig(trialWorkingFolder, pytorchTrialConfig.worker.replicas, + pytorchTrialConfig.worker.image, 'run_worker.sh', workerPodResources, privateRegistrySecretName); + } + const privateRegistrySecretName: string | undefined = await this.createRegistrySecret(pytorchTrialConfig.master.privateRegistryAuthPath); + replicaSpecsObj.Master = this.generateReplicaConfig(trialWorkingFolder, pytorchTrialConfig.master.replicas, + pytorchTrialConfig.master.image, 'run_master.sh', nonWorkerPodResources, privateRegistrySecretName); + + replicaSpecsObjMap.set(this.kubernetesCRDClient.jobKind, {pytorchReplicaSpecs: replicaSpecsObj}); + } + + return Promise.resolve({ + apiVersion: `kubeflow.org/${this.kubernetesCRDClient.apiVersion}`, + kind: this.kubernetesCRDClient.jobKind, + metadata: { + name: kubeflowJobName, + namespace: 'default', + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: getExperimentId(), + trialId: trialJobId + } + }, + spec: replicaSpecsObjMap.get(this.kubernetesCRDClient.jobKind) + }); + } + + /** + * Generate tf-operator's tfjobs replica config section + * @param trialWorkingFolder trial working folder + * @param replicaNumber replica number + * @param replicaImage image + * @param runScriptFile script file name + * @param podResources pod resource config section + */ + private generateReplicaConfig(trialWorkingFolder: string, replicaNumber: number, replicaImage: string, runScriptFile: string, + podResources: any, privateRegistrySecretName: string | undefined): any { + if (this.kubeflowClusterConfig === undefined) { + throw new Error('Kubeflow Cluster config is not initialized'); + } + + if (this.kubeflowTrialConfig === undefined) { + throw new Error('Kubeflow trial config is not initialized'); + } + + if (this.kubernetesCRDClient === undefined) { + throw new Error('Kubeflow operator client is not initialized'); + } + // The config spec for volume field + const volumeSpecMap: Map = new Map(); + if (this.kubeflowClusterConfig.storageType === 'azureStorage') { + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + azureFile: { + secretName: `${this.azureStorageSecretName}`, + shareName: `${this.azureStorageShare}`, + readonly: false + } + }]); + } else { + const nfsKubeflowClusterConfig: KubeflowClusterConfigNFS = this.kubeflowClusterConfig; + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + nfs: { + server: `${nfsKubeflowClusterConfig.nfs.server}`, + path: `${nfsKubeflowClusterConfig.nfs.path}` + } + }]); + } + // The config spec for container field + const containersSpecMap: Map = new Map(); + containersSpecMap.set('containers', [ + { + // Kubeflow tensorflow operator requires that containers' name must be tensorflow + // TODO: change the name based on operator's type + name: this.kubernetesCRDClient.containerName, + image: replicaImage, + args: ['sh', `${path.join(trialWorkingFolder, runScriptFile)}`], + volumeMounts: [ + { + name: 'nni-vol', + mountPath: this.CONTAINER_MOUNT_PATH + }], + resources: podResources + } + ]); + const spec: any = { + containers: containersSpecMap.get('containers'), + restartPolicy: 'ExitCode', + volumes: volumeSpecMap.get('nniVolumes') + } + if (privateRegistrySecretName) { + spec.imagePullSecrets = [ + { + name: privateRegistrySecretName + }] + } + return { + replicas: replicaNumber, + template: { + metadata: { + creationTimestamp: null + }, + spec: spec + } + } + } + + public async updateTrialJob(_1: any, _2: any): Promise { + throw new Error('not supported'); + } +} +export { KubeflowTrainingService }; diff --git a/ts/nni_manager/training_service/kubernetes/kubernetesApiClient.ts b/ts/nni_manager/training_service/kubernetes/kubernetesApiClient.ts new file mode 100644 index 0000000000000000000000000000000000000000..f4a735822b4329e9c0fd2e54f2423563ca006980 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubernetesApiClient.ts @@ -0,0 +1,238 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// eslint-disable-next-line @typescript-eslint/camelcase +import {Client1_10, config} from 'kubernetes-client'; +import {getLogger, Logger} from 'common/log'; + +/** + * This function uses the environment variable + * 'KUBERNETES_SERVICE_HOST' to determine whether + * the code is running from within a kubernetes container. + * If it is, it returns the in-cluster config + * instead of the kubeconfig. + */ +function getKubernetesConfig(): any { + if ('KUBERNETES_SERVICE_HOST' in process.env) { + return config.getInCluster(); + } else { + return config.fromKubeconfig(); + } +} + +/** + * Generic Kubernetes client, target version >= 1.9 + */ +class GeneralK8sClient { + protected readonly client: any; + protected readonly log: Logger = getLogger('GeneralK8sClient'); + protected namespace: string = 'default'; + + constructor() { + this.client = new Client1_10({config: getKubernetesConfig(), version: '1.9'}); + this.client.loadSpec(); + } + + public set setNamespace(namespace: string) { + this.namespace = namespace; + } + public get getNamespace(): string { + return this.namespace; + } + + private matchStorageClass(response: any): string { + const adlSupportedProvisioners: RegExp[] = [ + new RegExp("microk8s.io/hostpath"), + new RegExp(".*cephfs.csi.ceph.com"), + new RegExp(".*azure.*"), + new RegExp("\\b" + "efs" + "\\b") + ] + const templateLen = adlSupportedProvisioners.length, + responseLen = response.items.length + let i = 0, + j = 0; + for (; i < responseLen; i++) { + const provisioner: string = response.items[i].provisioner + for (; j < templateLen; j++) { + if (provisioner.match(adlSupportedProvisioners[j])) { + return response.items[i].metadata.name; + } + } + } + return "Not Found!"; + } + + public async getStorageClass(): Promise { + let result: Promise; + const response: any = await this.client.apis["storage.k8s.io"].v1beta1.storageclasses.get() + const storageClassType: string = this.matchStorageClass(response.body) + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + if (storageClassType != "Not Found!") { + result = Promise.resolve(storageClassType); + } + else { + result = Promise.reject("No StorageClasses are supported!") + } + } else { + result = Promise.reject(`List storageclasses failed, statusCode is ${response.statusCode}`); + } + return result; + } + + public async createDeployment(deploymentManifest: any): Promise { + let result: Promise; + const response: any = await this.client.apis.apps.v1.namespaces(this.namespace) + .deployments.post({body: deploymentManifest}) + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(response.body.metadata.uid); + } else { + result = Promise.reject(`Create deployment failed, statusCode is ${response.statusCode}`); + } + return result; + } + + public async deleteDeployment(deploymentName: string): Promise { + let result: Promise; + // TODO: change this hard coded deployment name after demo + const response: any = await this.client.apis.apps.v1.namespaces(this.namespace) + .deployment(deploymentName).delete(); + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(true); + } else { + result = Promise.reject(`Delete deployment failed, statusCode is ${response.statusCode}`); + } + return result; + } + + public async createConfigMap(configMapManifest: any): Promise { + let result: Promise; + const response: any = await this.client.api.v1.namespaces(this.namespace) + .configmaps.post({body: configMapManifest}); + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(true); + } else { + result = Promise.reject(`Create configMap failed, statusCode is ${response.statusCode}`); + } + + return result; + } + + public async createPersistentVolumeClaim(pvcManifest: any): Promise { + let result: Promise; + const response: any = await this.client.api.v1.namespaces(this.namespace) + .persistentvolumeclaims.post({body: pvcManifest}); + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(true); + } else { + result = Promise.reject(`Create pvc failed, statusCode is ${response.statusCode}`); + } + return result; + } + + public async createSecret(secretManifest: any): Promise { + let result: Promise; + const response: any = await this.client.api.v1.namespaces(this.namespace) + .secrets.post({body: secretManifest}); + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(true); + } else { + result = Promise.reject(`Create secrets failed, statusCode is ${response.statusCode}`); + } + + return result; + } +} + +/** + * Kubernetes CRD client + */ +abstract class KubernetesCRDClient { + protected readonly client: any; + protected readonly log: Logger = getLogger('KubernetesCRDClient'); + protected crdSchema: any; + + constructor() { + this.client = new Client1_10({config: getKubernetesConfig()}); + this.client.loadSpec(); + } + + protected abstract get operator(): any; + + public abstract get containerName(): string; + + public get jobKind(): string { + if (this.crdSchema + && this.crdSchema.spec + && this.crdSchema.spec.names + && this.crdSchema.spec.names.kind) { + return this.crdSchema.spec.names.kind; + } else { + throw new Error('KubeflowOperatorClient: getJobKind failed, kind is undefined in crd schema!'); + } + } + + public get apiVersion(): string { + if (this.crdSchema + && this.crdSchema.spec + && this.crdSchema.spec.version) { + return this.crdSchema.spec.version; + } else { + throw new Error('KubeflowOperatorClient: get apiVersion failed, version is undefined in crd schema!'); + } + } + + public async createKubernetesJob(jobManifest: any): Promise { + let result: Promise; + const response: any = await this.operator.post({body: jobManifest}); + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(true); + } else { + result = Promise.reject(`KubernetesApiClient createKubernetesJob failed, statusCode is ${response.statusCode}`); + } + + return result; + } + + //TODO : replace any + public async getKubernetesJob(kubeflowJobName: string): Promise { + let result: Promise; + const response: any = await this.operator(kubeflowJobName) + .get(); + if (response.statusCode && (response.statusCode >= 200 && response.statusCode <= 299)) { + result = Promise.resolve(response.body); + } else { + result = Promise.reject(`KubernetesApiClient getKubernetesJob failed, statusCode is ${response.statusCode}`); + } + + return result; + } + + public async deleteKubernetesJob(labels: Map): Promise { + let result: Promise; + // construct match query from labels for deleting tfjob + const matchQuery: string = Array.from(labels.keys()) + .map((labelKey: string) => `${labelKey}=${labels.get(labelKey)}`) + .join(','); + try { + const deleteResult: any = await this.operator() + .delete({ + qs: { + labelSelector: matchQuery, + propagationPolicy: 'Background' + } + }); + if (deleteResult.statusCode && deleteResult.statusCode >= 200 && deleteResult.statusCode <= 299) { + result = Promise.resolve(true); + } else { + result = Promise.reject( + `KubernetesApiClient, delete labels ${matchQuery} get wrong statusCode ${deleteResult.statusCode}`); + } + } catch (err) { + result = Promise.reject(err); + } + + return result; + } +} + +export {KubernetesCRDClient, GeneralK8sClient}; diff --git a/ts/nni_manager/training_service/kubernetes/kubernetesConfig.ts b/ts/nni_manager/training_service/kubernetes/kubernetesConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..e13921a841aef521162bf1f8259e92398a148dac --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubernetesConfig.ts @@ -0,0 +1,240 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export type KubernetesStorageKind = 'nfs' | 'azureStorage' | 'pvc'; +import {MethodNotImplementedError} from 'common/errors'; + +export abstract class KubernetesClusterConfig { + public readonly storage?: KubernetesStorageKind; + public readonly apiVersion: string; + public readonly namespace?: string; + + constructor(apiVersion: string, storage?: KubernetesStorageKind, namespace?: string) { + this.storage = storage; + this.apiVersion = apiVersion; + this.namespace = namespace + } + + public get storageType(): KubernetesStorageKind { + throw new MethodNotImplementedError(); + } +} + +export class StorageConfig { + public readonly storage?: KubernetesStorageKind; + + constructor(storage?: KubernetesStorageKind) { + this.storage = storage; + } +} + +export class KubernetesClusterConfigNFS extends KubernetesClusterConfig { + public readonly nfs: NFSConfig; + + constructor( + apiVersion: string, + nfs: NFSConfig, + storage?: KubernetesStorageKind, + namespace?: string + ) { + super(apiVersion, storage, namespace); + this.nfs = nfs; + } + + public get storageType(): KubernetesStorageKind { + return 'nfs'; + } + + public static getInstance(jsonObject: object): KubernetesClusterConfigNFS { + const kubernetesClusterConfigObjectNFS: KubernetesClusterConfigNFS = jsonObject; + + return new KubernetesClusterConfigNFS( + kubernetesClusterConfigObjectNFS.apiVersion, + kubernetesClusterConfigObjectNFS.nfs, + kubernetesClusterConfigObjectNFS.storage, + kubernetesClusterConfigObjectNFS.namespace + ); + } +} + +export class KubernetesClusterConfigAzure extends KubernetesClusterConfig { + public readonly keyVault: KeyVaultConfig; + public readonly azureStorage: AzureStorage; + public readonly uploadRetryCount: number | undefined; + + constructor( + apiVersion: string, + keyVault: KeyVaultConfig, + azureStorage: AzureStorage, + storage?: KubernetesStorageKind, + uploadRetryCount?: number, + namespace?: string, + + ) { + super(apiVersion, storage, namespace); + this.keyVault = keyVault; + this.azureStorage = azureStorage; + this.uploadRetryCount = uploadRetryCount; + } + + public get storageType(): KubernetesStorageKind { + return 'azureStorage'; + } + + public static getInstance(jsonObject: object): KubernetesClusterConfigAzure { + const kubernetesClusterConfigObjectAzure: KubernetesClusterConfigAzure = jsonObject; + + return new KubernetesClusterConfigAzure( + kubernetesClusterConfigObjectAzure.apiVersion, + kubernetesClusterConfigObjectAzure.keyVault, + kubernetesClusterConfigObjectAzure.azureStorage, + kubernetesClusterConfigObjectAzure.storage, + kubernetesClusterConfigObjectAzure.uploadRetryCount, + kubernetesClusterConfigObjectAzure.namespace + ); + } +} + +export class KubernetesClusterConfigPVC extends KubernetesClusterConfig { + public readonly pvc: PVCConfig; + + constructor( + apiVersion: string, + pvc: PVCConfig, + storage?: KubernetesStorageKind, + namespace?: string, + ) { + super(apiVersion, storage, namespace); + this.pvc = pvc; + } + + public get storageType(): KubernetesStorageKind { + return 'pvc'; + } + + public static getInstance(jsonObject: object): KubernetesClusterConfigPVC { + const kubernetesClusterConfigObjectPVC: KubernetesClusterConfigPVC = + jsonObject; + return new KubernetesClusterConfigPVC( + kubernetesClusterConfigObjectPVC.apiVersion, + kubernetesClusterConfigObjectPVC.pvc, + kubernetesClusterConfigObjectPVC.storage, + kubernetesClusterConfigObjectPVC.namespace + ); + } +} +export class KubernetesClusterConfigFactory { + public static generateKubernetesClusterConfig(jsonObject: object): KubernetesClusterConfig { + const storageConfig: StorageConfig = jsonObject; + switch (storageConfig.storage) { + case 'azureStorage': + return KubernetesClusterConfigAzure.getInstance(jsonObject); + case 'pvc': + return KubernetesClusterConfigPVC.getInstance(jsonObject); + case 'nfs': + case undefined: + return KubernetesClusterConfigNFS.getInstance(jsonObject); + default: + throw new Error(`Invalid json object ${jsonObject}`); + } + } +} + +/** + * NFS configuration to store Kubeflow job related files + */ +export class NFSConfig { + // IP Adress of NFS server + public readonly server: string; + // exported NFS path on NFS server + public readonly path: string; + + constructor(server: string, path: string) { + this.server = server; + this.path = path; + } +} + +/** + * PVC configuration to store Kubernetes job related files + */ +export class PVCConfig { + // Path of the mounted pvc + public readonly path: string; + + constructor(path: string) { + this.path = path; + } +} + +/** + * KeyVault configuration to store the key of Azure Storage Service + * Refer https://docs.microsoft.com/en-us/azure/key-vault/key-vault-manage-with-cli2 + */ +export class KeyVaultConfig { + // The vault-name to specify vault + public readonly vaultName: string; + // The name to specify private key + public readonly name: string; + + constructor(vaultName: string, name: string) { + this.vaultName = vaultName; + this.name = name; + } +} + +/** + * Azure Storage Service + */ +export class AzureStorage { + // The azure share to storage files + public readonly azureShare: string; + + // The account name of sotrage service + public readonly accountName: string; + constructor(azureShare: string, accountName: string) { + this.azureShare = azureShare; + this.accountName = accountName; + } +} + +/** + * Trial job configuration for Kubernetes + */ +export class KubernetesTrialConfigTemplate { + // CPU number + public readonly cpuNum: number; + + // Memory + public readonly memoryMB: number; + + // Docker image + public readonly image: string; + + // Private registry config file path to download docker iamge + public readonly privateRegistryAuthPath?: string; + + // Trail command + public readonly command: string; + + // Required GPU number for trial job. The number should be in [0,100] + public readonly gpuNum: number; + + constructor(command: string, gpuNum: number, + cpuNum: number, memoryMB: number, image: string, privateRegistryAuthPath?: string) { + this.command = command; + this.gpuNum = gpuNum; + this.cpuNum = cpuNum; + this.memoryMB = memoryMB; + this.image = image; + this.privateRegistryAuthPath = privateRegistryAuthPath; + } +} + +export class KubernetesTrialConfig { + public readonly codeDir: string; + + constructor(codeDir: string) { + this.codeDir = codeDir; + } +} diff --git a/ts/nni_manager/training_service/kubernetes/kubernetesData.ts b/ts/nni_manager/training_service/kubernetes/kubernetesData.ts new file mode 100644 index 0000000000000000000000000000000000000000..baa76d514d0402793b3114b5401d1e750c942649 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubernetesData.ts @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TrialJobApplicationForm, TrialJobDetail, TrialJobStatus } from 'common/trainingService'; + +/** + * KubeflowTrialJobDetail + */ +export class KubernetesTrialJobDetail implements TrialJobDetail { + public id: string; + public status: TrialJobStatus; + public message?: string; + public submitTime: number; + public startTime?: number; + public endTime?: number; + public tags?: string[]; + public url?: string; + public workingDirectory: string; + public form: TrialJobApplicationForm; + public kubernetesJobName: string; + public queryJobFailedCount: number; + + constructor(id: string, status: TrialJobStatus, submitTime: number, + workingDirectory: string, form: TrialJobApplicationForm, + kubernetesJobName: string, url: string) { + this.id = id; + this.status = status; + this.message = 'Pending for creating the trial job.'; + this.submitTime = submitTime; + this.workingDirectory = workingDirectory; + this.form = form; + this.kubernetesJobName = kubernetesJobName; + this.tags = []; + this.queryJobFailedCount = 0; + this.url = url; + } +} + +export const kubernetesScriptFormat: string = +`#!/bin/bash +export NNI_PLATFORM={0} +export NNI_SYS_DIR={1} +export NNI_OUTPUT_DIR={2} +export MULTI_PHASE=false +export NNI_TRIAL_JOB_ID={3} +export NNI_EXP_ID={4} +export NNI_CODE_DIR={5} +export NNI_TRIAL_SEQ_ID={6} +{7} +mkdir -p $NNI_SYS_DIR/code +mkdir -p $NNI_OUTPUT_DIR +cp -r $NNI_CODE_DIR/. $NNI_SYS_DIR/code +sh $NNI_SYS_DIR/install_nni.sh +cd $NNI_SYS_DIR/code +python3 -m nni.tools.trial_tool.trial_keeper --trial_command '{8}' --nnimanager_ip {9} --nnimanager_port {10} \ +--nni_manager_version '{11}' --log_collection '{12}' 1>$NNI_OUTPUT_DIR/trialkeeper_stdout 2>$NNI_OUTPUT_DIR/trialkeeper_stderr`; diff --git a/ts/nni_manager/training_service/kubernetes/kubernetesJobInfoCollector.ts b/ts/nni_manager/training_service/kubernetes/kubernetesJobInfoCollector.ts new file mode 100644 index 0000000000000000000000000000000000000000..1a6ade5e31cf8c5b8996a104ba7c4d301724afb6 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubernetesJobInfoCollector.ts @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { MethodNotImplementedError, NNIError, NNIErrorNames } from 'common/errors'; +import { getLogger, Logger } from 'common/log'; +import { TrialJobStatus } from 'common/trainingService'; +import { KubernetesCRDClient } from './kubernetesApiClient'; +import { KubernetesTrialJobDetail } from './kubernetesData'; + +/** + * Collector Kubeflow jobs info from Kubernetes cluster, and update kubeflow job status locally + */ +export class KubernetesJobInfoCollector { + protected readonly trialJobsMap: Map; + protected readonly log: Logger = getLogger('KubernetesJobInfoCollector'); + protected readonly statusesNeedToCheck: TrialJobStatus[]; + + constructor(jobMap: Map) { + this.trialJobsMap = jobMap; + this.statusesNeedToCheck = ['RUNNING', 'WAITING']; + } + + public async retrieveTrialStatus(kubernetesCRDClient: KubernetesCRDClient | undefined): Promise { + assert(kubernetesCRDClient !== undefined); + const updateKubernetesTrialJobs: Promise[] = []; + for (const [trialJobId, kubernetesTrialJob] of this.trialJobsMap) { + if (kubernetesTrialJob === undefined) { + throw new NNIError(NNIErrorNames.NOT_FOUND, `trial job id ${trialJobId} not found`); + } + updateKubernetesTrialJobs.push(this.retrieveSingleTrialJobInfo(kubernetesCRDClient, kubernetesTrialJob)); + } + return Promise.all(updateKubernetesTrialJobs); + } + + protected async retrieveSingleTrialJobInfo(_kubernetesCRDClient: KubernetesCRDClient | undefined, + _kubernetesTrialJob: KubernetesTrialJobDetail): Promise { + throw new MethodNotImplementedError(); + } +} diff --git a/ts/nni_manager/training_service/kubernetes/kubernetesJobRestServer.ts b/ts/nni_manager/training_service/kubernetes/kubernetesJobRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..cfa2d2f3de4e3dfcf02bdb075d04e384cc6030db --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubernetesJobRestServer.ts @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Inject } from 'typescript-ioc'; +import * as component from 'common/component'; +import { ClusterJobRestServer } from '../common/clusterJobRestServer'; +import { KubernetesTrainingService } from './kubernetesTrainingService'; + +/** + * Kubeflow Training service Rest server, provides rest API to support kubeflow job metrics update + * + */ +@component.Singleton +export class KubernetesJobRestServer extends ClusterJobRestServer { + @Inject + private readonly kubernetesTrainingService? : KubernetesTrainingService; + /** + * constructor to provide NNIRestServer's own rest property, e.g. port + */ + constructor(kubernetesTrainingService: KubernetesTrainingService) { + super(); + this.kubernetesTrainingService = kubernetesTrainingService; + } + + protected handleTrialMetrics(jobId: string, metrics: any[]): void { + if (this.kubernetesTrainingService === undefined) { + throw Error('kubernetesTrainingService not initialized!'); + } + // Split metrics array into single metric, then emit + // Warning: If not split metrics into single ones, the behavior will be UNKNOWN + for (const singleMetric of metrics) { + this.kubernetesTrainingService.MetricsEmitter.emit('metric', { + id : jobId, + data : singleMetric + }); + } + } +} diff --git a/ts/nni_manager/training_service/kubernetes/kubernetesTrainingService.ts b/ts/nni_manager/training_service/kubernetes/kubernetesTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..17ccde336b1fe7f9f502a81ce9bc91bf94c081c2 --- /dev/null +++ b/ts/nni_manager/training_service/kubernetes/kubernetesTrainingService.ts @@ -0,0 +1,403 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import cpp from 'child-process-promise'; +import path from 'path'; + +import azureStorage from 'azure-storage'; +import {EventEmitter} from 'events'; +import {Base64} from 'js-base64'; +import {String} from 'typescript-string-operations'; +import {getExperimentId} from 'common/experimentStartupInfo'; +import {getLogger, Logger} from 'common/log'; +import {MethodNotImplementedError} from 'common/errors'; +import { + NNIManagerIpConfig, TrialJobDetail, TrialJobMetric +} from 'common/trainingService'; +import {delay, getExperimentRootDir, getIPV4Address, getJobCancelStatus, getVersion, uniqueString} from 'common/utils'; +import {AzureStorageClientUtility} from './azureStorageClientUtils'; +import {GeneralK8sClient, KubernetesCRDClient} from './kubernetesApiClient'; +import {KubernetesClusterConfig} from './kubernetesConfig'; +import {kubernetesScriptFormat, KubernetesTrialJobDetail} from './kubernetesData'; +import {KubernetesJobRestServer} from './kubernetesJobRestServer'; + +const fs = require('fs'); + +/** + * Training Service implementation for Kubernetes + */ +abstract class KubernetesTrainingService { + protected readonly NNI_KUBERNETES_TRIAL_LABEL: string = 'nni-kubernetes-trial'; + protected readonly log!: Logger; + protected readonly metricsEmitter: EventEmitter; + protected readonly trialJobsMap: Map; + // experiment root dir in NFS + protected readonly trialLocalTempFolder: string; + protected stopping: boolean = false; + protected experimentId!: string; + protected kubernetesRestServerPort?: number; + protected readonly CONTAINER_MOUNT_PATH: string; + protected azureStorageClient?: azureStorage.FileService; + protected azureStorageShare?: string; + protected azureStorageSecretName?: string; + protected azureStorageAccountName?: string; + protected nniManagerIpConfig?: NNIManagerIpConfig; + protected readonly genericK8sClient: GeneralK8sClient; + protected kubernetesCRDClient?: KubernetesCRDClient; + protected kubernetesJobRestServer?: KubernetesJobRestServer; + protected kubernetesClusterConfig?: KubernetesClusterConfig; + protected versionCheck: boolean = true; + protected logCollection: string; + protected copyExpCodeDirPromise?: Promise; + protected expContainerCodeFolder: string; + + constructor() { + this.log = getLogger('KubernetesTrainingService'); + this.metricsEmitter = new EventEmitter(); + this.trialJobsMap = new Map(); + this.trialLocalTempFolder = path.join(getExperimentRootDir(), 'trials-nfs-tmp'); + this.experimentId = getExperimentId(); + this.CONTAINER_MOUNT_PATH = '/tmp/mount'; + this.expContainerCodeFolder = path.join(this.CONTAINER_MOUNT_PATH, 'nni', this.experimentId, 'nni-code'); + this.genericK8sClient = new GeneralK8sClient(); + this.logCollection = 'none'; + } + + public generatePodResource(memory: number, cpuNum: number, gpuNum: number): any { + const resources: any = { + memory: `${memory}Mi`, + cpu: `${cpuNum}` + }; + + if (gpuNum !== 0) { + resources['nvidia.com/gpu'] = `${gpuNum}`; + } + + return resources; + } + + public async listTrialJobs(): Promise { + const jobs: TrialJobDetail[] = []; + + for (const key of this.trialJobsMap.keys()) { + jobs.push(await this.getTrialJob(key)); + } + + return Promise.resolve(jobs); + } + + public async getTrialJob(trialJobId: string): Promise { + + const kubernetesTrialJob: TrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + + if (kubernetesTrialJob === undefined) { + return Promise.reject(`trial job ${trialJobId} not found`); + } + + return Promise.resolve(kubernetesTrialJob); + } + + public async getTrialFile(_trialJobId: string, _filename: string): Promise { + throw new MethodNotImplementedError(); + } + + public addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.on('metric', listener); + } + + public removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.off('metric', listener); + } + + public get isMultiPhaseJobSupported(): boolean { + return false; + } + + public getClusterMetadata(_key: string): Promise { + return Promise.resolve(''); + } + + public get MetricsEmitter(): EventEmitter { + return this.metricsEmitter; + } + + public async cancelTrialJob(trialJobId: string, isEarlyStopped: boolean = false): Promise { + const trialJobDetail: KubernetesTrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + if (trialJobDetail === undefined) { + const errorMessage: string = `CancelTrialJob: trial job id ${trialJobId} not found`; + this.log.error(errorMessage); + + return Promise.reject(errorMessage); + } + if (this.kubernetesCRDClient === undefined) { + const errorMessage: string = `CancelTrialJob: trial job id ${trialJobId} failed because operatorClient is undefined`; + this.log.error(errorMessage); + + return Promise.reject(errorMessage); + } + + try { + await this.kubernetesCRDClient.deleteKubernetesJob(new Map( + [ + ['app', this.NNI_KUBERNETES_TRIAL_LABEL], + ['expId', getExperimentId()], + ['trialId', trialJobId] + ] + )); + } catch (err) { + const errorMessage: string = `Delete trial ${trialJobId} failed: ${err}`; + this.log.error(errorMessage); + + return Promise.reject(errorMessage); + } + + trialJobDetail.endTime = Date.now(); + trialJobDetail.status = getJobCancelStatus(isEarlyStopped); + + return Promise.resolve(); + } + + public async cleanUp(): Promise { + this.stopping = true; + + // First, cancel all running kubernetes jobs + for (const [trialJobId, kubernetesTrialJob] of this.trialJobsMap) { + if (['RUNNING', 'WAITING', 'UNKNOWN'].includes(kubernetesTrialJob.status)) { + try { + await this.cancelTrialJob(trialJobId); + } catch (error) { + // DONT throw error during cleanup + } + kubernetesTrialJob.status = 'SYS_CANCELED'; + } + } + + // Delete all kubernetes jobs whose expId label is current experiment id + try { + if (this.kubernetesCRDClient !== undefined) { + await this.kubernetesCRDClient.deleteKubernetesJob(new Map( + [ + ['app', this.NNI_KUBERNETES_TRIAL_LABEL], + ['expId', getExperimentId()] + ] + )); + } + } catch (error) { + this.log.error(`Delete kubernetes job with label: app=${this.NNI_KUBERNETES_TRIAL_LABEL},\ + expId=${getExperimentId()} failed, error is ${error}`); + } + + // Unmount NFS + try { + await cpp.exec(`sudo umount ${this.trialLocalTempFolder}`); + } catch (error) { + this.log.error(`Unmount ${this.trialLocalTempFolder} failed, error is ${error}`); + } + + // Stop kubernetes rest server + if (this.kubernetesJobRestServer === undefined) { + throw new Error('kubernetesJobRestServer not initialized!'); + } + try { + await this.kubernetesJobRestServer.stop(); + this.log.info('Kubernetes Training service rest server stopped successfully.'); + } catch (error) { + this.log.error(`Kubernetes Training service rest server stopped failed, error: ${error.message}`); + + return Promise.reject(error); + } + + return Promise.resolve(); + } + + protected async createAzureStorage(vaultName: string, valutKeyName: string): Promise { + try { + const result: any = await cpp.exec(`az keyvault secret show --name ${valutKeyName} --vault-name ${vaultName}`); + if (result.stderr) { + const errorMessage: string = result.stderr; + this.log.error(errorMessage); + + return Promise.reject(errorMessage); + } + const storageAccountKey: any = JSON.parse(result.stdout).value; + if (this.azureStorageAccountName === undefined) { + throw new Error('azureStorageAccountName not initialized!'); + } + //create storage client + this.azureStorageClient = azureStorage.createFileService(this.azureStorageAccountName, storageAccountKey); + await AzureStorageClientUtility.createShare(this.azureStorageClient, this.azureStorageShare); + //create sotrage secret + this.azureStorageSecretName = String.Format('nni-secret-{0}', uniqueString(8) + .toLowerCase()); + + const namespace = this.genericK8sClient.getNamespace ? this.genericK8sClient.getNamespace : "default" + await this.genericK8sClient.createSecret( + { + apiVersion: 'v1', + kind: 'Secret', + metadata: { + name: this.azureStorageSecretName, + namespace: namespace, + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: getExperimentId() + } + }, + type: 'Opaque', + data: { + azurestorageaccountname: Base64.encode(this.azureStorageAccountName), + azurestorageaccountkey: Base64.encode(storageAccountKey) + } + } + ); + } catch (error) { + this.log.error(error); + + return Promise.reject(error); + } + + return Promise.resolve(); + } + + /** + * Genereate run script for different roles(like worker or ps) + * @param trialJobId trial job id + * @param trialWorkingFolder working folder + * @param command command + * @param trialSequenceId sequence id + */ + protected async generateRunScript(platform: string, trialJobId: string, trialWorkingFolder: string, + command: string, trialSequenceId: string, roleName: string, gpuNum: number): Promise { + let nvidiaScript: string = ''; + // Nvidia devcie plugin for K8S has a known issue that requesting zero GPUs allocates all GPUs + // Refer https://github.com/NVIDIA/k8s-device-plugin/issues/61 + // So we have to explicitly set HIP_VISIBLE_DEVICES to empty if user sets gpuNum to 0 in NNI config file + if (gpuNum === 0) { + nvidiaScript = 'export HIP_VISIBLE_DEVICES='; + } + const nniManagerIp: string = this.nniManagerIpConfig ? this.nniManagerIpConfig.nniManagerIp : await getIPV4Address(); + const version: string = this.versionCheck ? await getVersion() : ''; + const runScript: string = String.Format( + kubernetesScriptFormat, + platform, + trialWorkingFolder, + path.join(trialWorkingFolder, 'output', `${roleName}_output`), + trialJobId, + getExperimentId(), + this.expContainerCodeFolder, + trialSequenceId, + nvidiaScript, + command, + nniManagerIp, + this.kubernetesRestServerPort, + version, + this.logCollection + ); + + return Promise.resolve(runScript); + } + protected async createNFSStorage(nfsServer: string, nfsPath: string): Promise { + await cpp.exec(`mkdir -p ${this.trialLocalTempFolder}`); + try { + await cpp.exec(`sudo mount ${nfsServer}:${nfsPath} ${this.trialLocalTempFolder}`); + } catch (error) { + const mountError: string = `Mount NFS ${nfsServer}:${nfsPath} to ${this.trialLocalTempFolder} failed, error is ${error}`; + this.log.error(mountError); + + return Promise.reject(mountError); + } + + return Promise.resolve(); + } + protected async createPVCStorage(pvcPath: string): Promise { + try { + await cpp.exec(`mkdir -p ${pvcPath}`); + await cpp.exec(`sudo ln -s ${pvcPath} ${this.trialLocalTempFolder}`); + } catch (error) { + const linkError: string = `Linking ${pvcPath} to ${this.trialLocalTempFolder} failed, error is ${error}`; + this.log.error(linkError); + + return Promise.reject(linkError); + } + + return Promise.resolve(); + } + + protected async createRegistrySecret(filePath: string | undefined): Promise { + if (filePath === undefined || filePath === '') { + return undefined; + } + const body = fs.readFileSync(filePath).toString('base64'); + const registrySecretName = String.Format('nni-secret-{0}', uniqueString(8) + .toLowerCase()); + const namespace = this.genericK8sClient.getNamespace ? this.genericK8sClient.getNamespace : "default" + await this.genericK8sClient.createSecret( + { + apiVersion: 'v1', + kind: 'Secret', + metadata: { + name: registrySecretName, + namespace: namespace, + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: getExperimentId() + } + }, + type: 'kubernetes.io/dockerconfigjson', + data: { + '.dockerconfigjson': body + } + } + ); + return registrySecretName; + } + + /** + * upload local directory to azureStorage + * @param srcDirectory the source directory of local folder + * @param destDirectory the target directory in azure + * @param uploadRetryCount the retry time when upload failed + */ + protected async uploadFolderToAzureStorage(srcDirectory: string, destDirectory: string, uploadRetryCount: number | undefined): Promise { + if (this.azureStorageClient === undefined) { + throw new Error('azureStorageClient is not initialized'); + } + let retryCount: number = 1; + if (uploadRetryCount) { + retryCount = uploadRetryCount; + } + let uploadSuccess: boolean = false; + let folderUriInAzure = ''; + try { + do { + uploadSuccess = await AzureStorageClientUtility.uploadDirectory( + this.azureStorageClient, + `${destDirectory}`, + this.azureStorageShare, + `${srcDirectory}`); + if (!uploadSuccess) { + //wait for 5 seconds to re-upload files + await delay(5000); + this.log.info('Upload failed, Retry: upload files to azure-storage'); + } else { + folderUriInAzure = `https://${this.azureStorageAccountName}.file.core.windows.net/${this.azureStorageShare}/${destDirectory}`; + break; + } + } while (retryCount-- >= 0) + } catch (error) { + this.log.error(error); + //return a empty url when got error + return Promise.resolve(''); + } + return Promise.resolve(folderUriInAzure); + } + + public getTrialOutputLocalPath(_trialJobId: string): Promise { + throw new MethodNotImplementedError(); + } + + public fetchTrialOutput(_trialJobId: string, _subpath: string): Promise { + throw new MethodNotImplementedError(); + } +} +export {KubernetesTrainingService}; diff --git a/ts/nni_manager/training_service/local/gpuScheduler.ts b/ts/nni_manager/training_service/local/gpuScheduler.ts new file mode 100644 index 0000000000000000000000000000000000000000..32e1f1eb449447987fb054cd43aef0aa763f3cc3 --- /dev/null +++ b/ts/nni_manager/training_service/local/gpuScheduler.ts @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import cpp from 'child-process-promise'; +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import { getLogger, Logger } from 'common/log'; +import { delay } from 'common/utils'; +import { GPUInfo, GPUSummary } from '../common/gpuData'; +import { execKill, execMkdir, execRemove, execTail, runGpuMetricsCollector } from '../common/util'; + +/** + * GPUScheduler for local training service + */ +class GPUScheduler { + + private gpuSummary!: GPUSummary; + private stopping: boolean; + private readonly log: Logger; + private readonly gpuMetricCollectorScriptFolder: string; + + constructor() { + this.stopping = false; + this.log = getLogger('GPUScheduler'); + this.gpuMetricCollectorScriptFolder = `${os.tmpdir()}/${os.userInfo().username}/nni/script`; + } + + public async run(): Promise { + await this.runGpuMetricsCollectorScript(); + while (!this.stopping) { + try { + await this.updateGPUSummary(); + } catch (error) { + this.log.error('Read GPU summary failed with error: ', error); + } + if (this.gpuSummary !== undefined && this.gpuSummary.gpuCount === 0) { + throw new Error('GPU not available. Please check your HIP configuration'); + } + await delay(5000); + } + } + + public getAvailableGPUIndices(useActiveGpu: boolean | undefined, occupiedGpuIndexNumMap: Map): number[] { + if (this.gpuSummary !== undefined) { + if (process.platform === 'win32' || useActiveGpu) { + return this.gpuSummary.gpuInfos.map((info: GPUInfo) => info.index); + } else { + return this.gpuSummary.gpuInfos.filter((info: GPUInfo) => + occupiedGpuIndexNumMap.get(info.index) === undefined && info.activeProcessNum === 0 || + occupiedGpuIndexNumMap.get(info.index) !== undefined) + .map((info: GPUInfo) => info.index); + } + } + + return []; + } + + public getSystemGpuCount(): number | undefined{ + if (this.gpuSummary !== undefined) { + return this.gpuSummary.gpuCount; + } + + return undefined; + } + + public async stop(): Promise { + this.stopping = true; + try { + const pid: string = await fs.promises.readFile(path.join(this.gpuMetricCollectorScriptFolder, 'pid'), 'utf8'); + await execKill(pid); + await execRemove(this.gpuMetricCollectorScriptFolder); + } catch (error) { + this.log.error(`GPU scheduler error: ${error}`); + } + } + + /** + * Generate gpu metric collector shell script in local machine, + * used to run in remote machine, and will be deleted after uploaded from local. + */ + private async runGpuMetricsCollectorScript(): Promise { + await execMkdir(this.gpuMetricCollectorScriptFolder, true); + runGpuMetricsCollector(this.gpuMetricCollectorScriptFolder); + } + + private async updateGPUSummary(): Promise { + const gpuMetricPath: string = path.join(this.gpuMetricCollectorScriptFolder, 'gpu_metrics'); + if (fs.existsSync(gpuMetricPath)) { + const cmdresult: cpp.childProcessPromise.Result = await execTail(gpuMetricPath); + if (cmdresult !== undefined && cmdresult.stdout !== undefined) { + this.gpuSummary = JSON.parse(cmdresult.stdout); + } else { + this.log.error('Could not get gpu metrics information!'); + } + } else { + this.log.warning('gpu_metrics file does not exist!'); + } + } +} + +export { GPUScheduler }; diff --git a/ts/nni_manager/training_service/local/localTrainingService.ts b/ts/nni_manager/training_service/local/localTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..02763ab51d0abd95b63efc251d9a18d8dc2b4e2d --- /dev/null +++ b/ts/nni_manager/training_service/local/localTrainingService.ts @@ -0,0 +1,511 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import cp from 'child_process'; +import { EventEmitter } from 'events'; +import fs from 'fs'; +import path from 'path'; +import ts from 'tail-stream'; +import tkill from 'tree-kill'; +import { NNIError, NNIErrorNames } from 'common/errors'; +import { getExperimentId } from 'common/experimentStartupInfo'; +import { getLogger, Logger } from 'common/log'; +import { powershellString } from 'common/shellUtils'; +import { + HyperParameters, TrainingService, TrialJobApplicationForm, + TrialJobDetail, TrialJobMetric, TrialJobStatus +} from 'common/trainingService'; +import { + delay, generateParamFileName, getExperimentRootDir, getJobCancelStatus, getNewLine, isAlive, uniqueString +} from 'common/utils'; +import { LocalConfig } from 'common/experimentConfig'; +import { execMkdir, execNewFile, getScriptName, runScript, setEnvironmentVariable } from '../common/util'; +import { GPUScheduler } from './gpuScheduler'; + +/** + * Decode a command + * @param Buffer binary incoming data + * @returns a tuple of (success, commandType, content, remain) + * success: true if the buffer contains at least one complete command; otherwise false + * remain: remaining data after the first command + */ +function decodeCommand(data: Buffer): [boolean, string, string, Buffer] { + if (data.length < 8) { + return [false, '', '', data]; + } + const commandType: string = data.slice(0, 2).toString(); + const contentLength: number = parseInt(data.slice(2, 8).toString(), 10); + if (data.length < contentLength + 8) { + return [false, '', '', data]; + } + const content: string = data.slice(8, contentLength + 8).toString(); + const remain: Buffer = data.slice(contentLength + 8); + + return [true, commandType, content, remain]; +} + +/** + * LocalTrialJobDetail + */ +class LocalTrialJobDetail implements TrialJobDetail { + public id: string; + public status: TrialJobStatus; + public submitTime: number; + public startTime?: number; + public endTime?: number; + public tags?: string[]; + public url?: string; + public workingDirectory: string; + public form: TrialJobApplicationForm; + public pid?: number; + public gpuIndices?: number[]; + + constructor( + id: string, status: TrialJobStatus, submitTime: number, + workingDirectory: string, form: TrialJobApplicationForm) { + this.id = id; + this.status = status; + this.submitTime = submitTime; + this.workingDirectory = workingDirectory; + this.form = form; + this.url = `file://localhost:${workingDirectory}`; + this.gpuIndices = []; + } +} + +/** + * Local machine training service + */ +class LocalTrainingService implements TrainingService { + private readonly config: LocalConfig; + private readonly eventEmitter: EventEmitter; + private readonly jobMap: Map; + private readonly jobQueue: string[]; + private initialized: boolean; + private stopping: boolean; + private rootDir!: string; + private readonly experimentId!: string; + private gpuScheduler!: GPUScheduler; + private readonly occupiedGpuIndexNumMap: Map; + private readonly log: Logger; + private readonly jobStreamMap: Map; + + constructor(config: LocalConfig) { + this.config = config; + this.eventEmitter = new EventEmitter(); + this.jobMap = new Map(); + this.jobQueue = []; + this.stopping = false; + this.log = getLogger('LocalTrainingService'); + this.experimentId = getExperimentId(); + this.jobStreamMap = new Map(); + this.log.info('Construct local machine training service.'); + this.occupiedGpuIndexNumMap = new Map(); + + if (this.config.trialGpuNumber !== undefined && this.config.trialGpuNumber > 0) { + this.gpuScheduler = new GPUScheduler(); + } + + if (this.config.gpuIndices === []) { + throw new Error('gpuIndices cannot be empty when specified.'); + } + + this.rootDir = getExperimentRootDir(); + if (!fs.existsSync(this.rootDir)) { + throw new Error('root dir not created'); + } + this.initialized = true; + } + + public async run(): Promise { + this.log.info('Run local machine training service.'); + const longRunningTasks: Promise[] = [this.runJobLoop()]; + if (this.gpuScheduler !== undefined) { + longRunningTasks.push(this.gpuScheduler.run()); + } + await Promise.all(longRunningTasks); + this.log.info('Local machine training service exit.'); + } + + public async listTrialJobs(): Promise { + const jobs: TrialJobDetail[] = []; + for (const key of this.jobMap.keys()) { + const trialJob: TrialJobDetail = await this.getTrialJob(key); + jobs.push(trialJob); + } + + return jobs; + } + + public async getTrialJob(trialJobId: string): Promise { + const trialJob: LocalTrialJobDetail | undefined = this.jobMap.get(trialJobId); + if (trialJob === undefined) { + throw new NNIError(NNIErrorNames.NOT_FOUND, 'Trial job not found'); + } + if (trialJob.status === 'RUNNING') { + const alive: boolean = await isAlive(trialJob.pid); + if (!alive) { + trialJob.endTime = Date.now(); + this.setTrialJobStatus(trialJob, 'FAILED'); + try { + const state: string = await fs.promises.readFile(path.join(trialJob.workingDirectory, '.nni', 'state'), 'utf8'); + const match: RegExpMatchArray | null = state.trim() + .match(/^(\d+)\s+(\d+)/); + if (match !== null) { + const { 1: code, 2: timestamp } = match; + if (parseInt(code, 10) === 0) { + this.setTrialJobStatus(trialJob, 'SUCCEEDED'); + } + trialJob.endTime = parseInt(timestamp, 10); + } + } catch (error) { + //ignore + } + this.log.debug(`trialJob status update: ${trialJobId}, ${trialJob.status}`); + } + } + + return trialJob; + } + + public async getTrialFile(trialJobId: string, fileName: string): Promise { + // check filename here for security + if (!['trial.log', 'stderr', 'model.onnx', 'stdout'].includes(fileName)) { + throw new Error(`File unaccessible: ${fileName}`); + } + let encoding: string | null = null; + if (!fileName.includes('.') || fileName.match(/.*\.(txt|log)/g)) { + encoding = 'utf8'; + } + const logPath = path.join(this.rootDir, 'trials', trialJobId, fileName); + if (!fs.existsSync(logPath)) { + throw new Error(`File not found: ${logPath}`); + } + return fs.promises.readFile(logPath, {encoding: encoding as any}); + } + + public addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.eventEmitter.on('metric', listener); + } + + public removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.eventEmitter.off('metric', listener); + } + + public submitTrialJob(form: TrialJobApplicationForm): Promise { + const trialJobId: string = uniqueString(5); + const trialJobDetail: LocalTrialJobDetail = new LocalTrialJobDetail( + trialJobId, + 'WAITING', + Date.now(), + path.join(this.rootDir, 'trials', trialJobId), + form + ); + this.jobQueue.push(trialJobId); + this.jobMap.set(trialJobId, trialJobDetail); + + this.log.debug('submitTrialJob: return:', trialJobDetail); + + return Promise.resolve(trialJobDetail); + } + + /** + * Update trial job for multi-phase + * @param trialJobId trial job id + * @param form job application form + */ + public async updateTrialJob(trialJobId: string, form: TrialJobApplicationForm): Promise { + const trialJobDetail: undefined | TrialJobDetail = this.jobMap.get(trialJobId); + if (trialJobDetail === undefined) { + throw new Error(`updateTrialJob failed: ${trialJobId} not found`); + } + await this.writeParameterFile(trialJobDetail.workingDirectory, form.hyperParameters); + + return trialJobDetail; + } + + public async cancelTrialJob(trialJobId: string, isEarlyStopped: boolean = false): Promise { + const trialJob: LocalTrialJobDetail | undefined = this.jobMap.get(trialJobId); + if (trialJob === undefined) { + throw new NNIError(NNIErrorNames.NOT_FOUND, 'Trial job not found'); + } + if (trialJob.pid === undefined) { + this.setTrialJobStatus(trialJob, 'USER_CANCELED'); + + return Promise.resolve(); + } + tkill(trialJob.pid, 'SIGTERM'); + this.setTrialJobStatus(trialJob, getJobCancelStatus(isEarlyStopped)); + + const startTime = Date.now(); + while(await isAlive(trialJob.pid)) { + if (Date.now() - startTime > 4999) { + tkill(trialJob.pid, 'SIGKILL', (err) => { + if (err) { + this.log.error(`kill trial job error: ${err}`); + } + }); + break; + } + await delay(500); + } + + return Promise.resolve(); + } + + public async setClusterMetadata(_key: string, _value: string): Promise { return; } + public async getClusterMetadata(_key: string): Promise { return ''; } + + public async cleanUp(): Promise { + this.log.info('Stopping local machine training service...'); + this.stopping = true; + for (const stream of this.jobStreamMap.values()) { + stream.end(0); + stream.emit('end'); + } + if (this.gpuScheduler !== undefined) { + await this.gpuScheduler.stop(); + } + + return Promise.resolve(); + } + + private onTrialJobStatusChanged(trialJob: LocalTrialJobDetail, oldStatus: TrialJobStatus): void { + //if job is not running, destory job stream + if (['SUCCEEDED', 'FAILED', 'USER_CANCELED', 'SYS_CANCELED', 'EARLY_STOPPED'].includes(trialJob.status)) { + if (this.jobStreamMap.has(trialJob.id)) { + const stream: ts.Stream | undefined = this.jobStreamMap.get(trialJob.id); + if (stream === undefined) { + throw new Error(`Could not find stream in trial ${trialJob.id}`); + } + //Refer https://github.com/Juul/tail-stream/issues/20 + setTimeout(() => { + stream.end(0); + stream.emit('end'); + this.jobStreamMap.delete(trialJob.id); + }, 5000); + } + } + if (trialJob.gpuIndices !== undefined && trialJob.gpuIndices.length > 0 && this.gpuScheduler !== undefined) { + if (oldStatus === 'RUNNING' && trialJob.status !== 'RUNNING') { + for (const index of trialJob.gpuIndices) { + const num: number | undefined = this.occupiedGpuIndexNumMap.get(index); + if (num === undefined) { + throw new Error(`gpu resource schedule error`); + } else if (num === 1) { + this.occupiedGpuIndexNumMap.delete(index); + } else { + this.occupiedGpuIndexNumMap.set(index, num - 1); + } + } + } + } + } + + private getEnvironmentVariables( + trialJobDetail: TrialJobDetail, + resource: { gpuIndices: number[] }, + gpuNum: number | undefined): { key: string; value: string }[] { + const envVariables: { key: string; value: string }[] = [ + { key: 'NNI_PLATFORM', value: 'local' }, + { key: 'NNI_EXP_ID', value: this.experimentId }, + { key: 'NNI_SYS_DIR', value: trialJobDetail.workingDirectory }, + { key: 'NNI_TRIAL_JOB_ID', value: trialJobDetail.id }, + { key: 'NNI_OUTPUT_DIR', value: trialJobDetail.workingDirectory }, + { key: 'NNI_TRIAL_SEQ_ID', value: trialJobDetail.form.sequenceId.toString() }, + { key: 'NNI_CODE_DIR', value: this.config.trialCodeDirectory} + ]; + if (gpuNum !== undefined) { + envVariables.push({ + key: 'HIP_VISIBLE_DEVICES', + value: this.gpuScheduler === undefined ? '-1' : resource.gpuIndices.join(',') + }); + } + + return envVariables; + } + + private setExtraProperties(trialJobDetail: LocalTrialJobDetail, resource: { gpuIndices: number[] }): void { + trialJobDetail.gpuIndices = resource.gpuIndices; + } + + private tryGetAvailableResource(): [boolean, { gpuIndices: number[]}] { + const resource: { gpuIndices: number[] } = { gpuIndices: [] }; + if (this.gpuScheduler === undefined) { + return [true, resource]; + } + + let selectedGPUIndices: number[] = []; + const availableGpuIndices: number[] = this.gpuScheduler.getAvailableGPUIndices(this.config.useActiveGpu, this.occupiedGpuIndexNumMap); + for (const index of availableGpuIndices) { + const num: number | undefined = this.occupiedGpuIndexNumMap.get(index); + if (num === undefined || num < this.config.maxTrialNumberPerGpu) { + selectedGPUIndices.push(index); + } + } + + if (this.config.gpuIndices !== undefined) { + this.checkSpecifiedGpuIndices(); + selectedGPUIndices = selectedGPUIndices.filter((index: number) => this.config.gpuIndices!.includes(index)); + } + + if (selectedGPUIndices.length < this.config.trialGpuNumber!) { + return [false, resource]; + } + + selectedGPUIndices.splice(this.config.trialGpuNumber!); + Object.assign(resource, { gpuIndices: selectedGPUIndices }); + + return [true, resource]; + } + + private checkSpecifiedGpuIndices(): void { + const gpuCount: number | undefined = this.gpuScheduler.getSystemGpuCount(); + if (this.config.gpuIndices !== undefined && gpuCount !== undefined) { + for (const index of this.config.gpuIndices) { + if (index >= gpuCount) { + throw new Error(`Specified GPU index not found: ${index}`); + } + } + } + } + + private occupyResource(resource: {gpuIndices: number[]}): void { + if (this.gpuScheduler !== undefined) { + for (const index of resource.gpuIndices) { + const num: number | undefined = this.occupiedGpuIndexNumMap.get(index); + if (num === undefined) { + this.occupiedGpuIndexNumMap.set(index, 1); + } else { + this.occupiedGpuIndexNumMap.set(index, num + 1); + } + } + } + } + + private async runJobLoop(): Promise { + while (!this.stopping) { + while (!this.stopping && this.jobQueue.length !== 0) { + const trialJobId: string = this.jobQueue[0]; + const trialJobDetail: LocalTrialJobDetail | undefined = this.jobMap.get(trialJobId); + if (trialJobDetail !== undefined && trialJobDetail.status === 'WAITING') { + const [success, resource] = this.tryGetAvailableResource(); + if (!success) { + break; + } + + this.occupyResource(resource); + await this.runTrialJob(trialJobId, resource); + } + this.jobQueue.shift(); + } + await delay(5000); + } + } + + private setTrialJobStatus(trialJob: LocalTrialJobDetail, newStatus: TrialJobStatus): void { + if (trialJob.status !== newStatus) { + const oldStatus: TrialJobStatus = trialJob.status; + trialJob.status = newStatus; + this.onTrialJobStatusChanged(trialJob, oldStatus); + } + } + + private getScript(workingDirectory: string): string[] { + const script: string[] = []; + if (process.platform === 'win32') { + script.push(`$PSDefaultParameterValues = @{'Out-File:Encoding' = 'utf8'}`); + script.push(`cd $env:NNI_CODE_DIR`); + script.push( + `cmd.exe /c ${this.config.trialCommand} 1>${path.join(workingDirectory, 'stdout')} 2>${path.join(workingDirectory, 'stderr')}`, + `$NOW_DATE = [int64](([datetime]::UtcNow)-(get-date "1/1/1970")).TotalSeconds`, + `$NOW_DATE = "$NOW_DATE" + (Get-Date -Format fff).ToString()`, + `Write $LASTEXITCODE " " $NOW_DATE | Out-File "${path.join(workingDirectory, '.nni', 'state')}" -NoNewline -encoding utf8`); + } else { + script.push(`cd $NNI_CODE_DIR`); + script.push(`eval ${this.config.trialCommand} 1>${path.join(workingDirectory, 'stdout')} 2>${path.join(workingDirectory, 'stderr')}`); + if (process.platform === 'darwin') { + // https://superuser.com/questions/599072/how-to-get-bash-execution-time-in-milliseconds-under-mac-os-x + // Considering the worst case, write 999 to avoid negative duration + script.push(`echo $? \`date +%s999\` >'${path.join(workingDirectory, '.nni', 'state')}'`); + } else { + script.push(`echo $? \`date +%s%3N\` >'${path.join(workingDirectory, '.nni', 'state')}'`); + } + } + + return script; + } + + private async runTrialJob(trialJobId: string, resource: {gpuIndices: number[]}): Promise { + const trialJobDetail: LocalTrialJobDetail = this.jobMap.get(trialJobId); + const variables: { key: string; value: string }[] = this.getEnvironmentVariables(trialJobDetail, resource, this.config.trialGpuNumber); + + const runScriptContent: string[] = []; + if (process.platform !== 'win32') { + runScriptContent.push('#!/bin/bash'); + } else { + runScriptContent.push(`$env:PATH=${powershellString(process.env['path']!)}`) + } + for (const variable of variables) { + runScriptContent.push(setEnvironmentVariable(variable)); + } + const scripts: string[] = this.getScript(trialJobDetail.workingDirectory); + scripts.forEach((script: string) => { + runScriptContent.push(script); + }); + await execMkdir(trialJobDetail.workingDirectory); + await execMkdir(path.join(trialJobDetail.workingDirectory, '.nni')); + await execNewFile(path.join(trialJobDetail.workingDirectory, '.nni', 'metrics')); + const scriptName: string = getScriptName('run'); + await fs.promises.writeFile(path.join(trialJobDetail.workingDirectory, scriptName), + runScriptContent.join(getNewLine()), { encoding: 'utf8', mode: 0o777 }); + await this.writeParameterFile(trialJobDetail.workingDirectory, trialJobDetail.form.hyperParameters); + const trialJobProcess: cp.ChildProcess = runScript(path.join(trialJobDetail.workingDirectory, scriptName)); + this.setTrialJobStatus(trialJobDetail, 'RUNNING'); + trialJobDetail.startTime = Date.now(); // eslint-disable-line require-atomic-updates + trialJobDetail.pid = trialJobProcess.pid; // eslint-disable-line require-atomic-updates + this.setExtraProperties(trialJobDetail, resource); + + let buffer: Buffer = Buffer.alloc(0); + const stream: ts.Stream = ts.createReadStream(path.join(trialJobDetail.workingDirectory, '.nni', 'metrics')); + stream.on('data', (data: Buffer) => { + buffer = Buffer.concat([buffer, data]); + while (buffer.length > 0) { + const [success, , content, remain] = decodeCommand(buffer); + if (!success) { + break; + } + this.eventEmitter.emit('metric', { + id: trialJobDetail.id, + data: content + }); + this.log.debug(`Sending metrics, job id: ${trialJobDetail.id}, metrics: ${content}`); + buffer = remain; + } + }); + this.jobStreamMap.set(trialJobDetail.id, stream); + } + + private async writeParameterFile(directory: string, hyperParameters: HyperParameters): Promise { + const filepath: string = path.join(directory, generateParamFileName(hyperParameters)); + await fs.promises.writeFile(filepath, hyperParameters.value, { encoding: 'utf8' }); + } + + public async getTrialOutputLocalPath(trialJobId: string): Promise { + return Promise.resolve(path.join(this.rootDir, 'trials', trialJobId)); + } + + public async fetchTrialOutput(trialJobId: string, subpath: string): Promise { + let trialLocalPath = await this.getTrialOutputLocalPath(trialJobId); + if (subpath !== undefined) { + trialLocalPath = path.join(trialLocalPath, subpath); + } + if (fs.existsSync(trialLocalPath)) { + return Promise.resolve(); + } else { + return Promise.reject(new Error('Trial local path not exist.')); + } + } +} + +export { LocalTrainingService }; diff --git a/ts/nni_manager/training_service/pai/paiConfig.ts b/ts/nni_manager/training_service/pai/paiConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..dfbff936d67e25272eebc3b53cf9ec83bb37e69e --- /dev/null +++ b/ts/nni_manager/training_service/pai/paiConfig.ts @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TrialJobApplicationForm, TrialJobDetail, TrialJobStatus } from 'common/trainingService'; +import {TrialConfig} from '../common/trialConfig'; + +export class PAIClusterConfig { + public readonly userName: string; + public readonly passWord?: string; + public host: string; + public readonly token?: string; + public readonly reuse?: boolean; + + public cpuNum?: number; + public memoryMB?: number; + public gpuNum?: number; + + public useActiveGpu?: boolean; + public maxTrialNumPerGpu?: number; + + /** + * Constructor + * @param userName User name of PAI Cluster + * @param passWord password of PAI Cluster + * @param host Host IP of PAI Cluster + * @param token PAI token of PAI Cluster + * @param reuse If job is reusable for multiple trials + */ + constructor(userName: string, host: string, passWord?: string, token?: string, reuse?: boolean, + cpuNum?: number, memoryMB?: number, gpuNum?: number) { + this.userName = userName; + this.passWord = passWord; + this.host = host; + this.token = token; + this.reuse = reuse; + this.cpuNum = cpuNum; + this.memoryMB = memoryMB; + this.gpuNum = gpuNum; + } +} + +/** + * PAI trial job detail + */ +export class PAITrialJobDetail implements TrialJobDetail { + public id: string; + public status: TrialJobStatus; + public paiJobName: string; + public submitTime: number; + public startTime?: number; + public endTime?: number; + public tags?: string[]; + public url?: string; + public workingDirectory: string; + public form: TrialJobApplicationForm; + public logPath: string; + public isEarlyStopped?: boolean; + public paiJobDetailUrl?: string; + + constructor(id: string, status: TrialJobStatus, paiJobName: string, + submitTime: number, workingDirectory: string, form: TrialJobApplicationForm, logPath: string, paiJobDetailUrl?: string) { + this.id = id; + this.status = status; + this.paiJobName = paiJobName; + this.submitTime = submitTime; + this.workingDirectory = workingDirectory; + this.form = form; + this.tags = []; + this.logPath = logPath; + this.paiJobDetailUrl = paiJobDetailUrl; + } +} + +export const PAI_TRIAL_COMMAND_FORMAT: string = +`export NNI_PLATFORM=pai NNI_SYS_DIR={0} NNI_OUTPUT_DIR={1} NNI_TRIAL_JOB_ID={2} NNI_EXP_ID={3} NNI_TRIAL_SEQ_ID={4} MULTI_PHASE={5} \ +&& NNI_CODE_DIR={6} && mkdir -p $NNI_SYS_DIR/code && cp -r $NNI_CODE_DIR/. $NNI_SYS_DIR/code && sh $NNI_SYS_DIR/install_nni.sh \ +&& cd $NNI_SYS_DIR/code && python3 -m nni.tools.trial_tool.trial_keeper --trial_command '{7}' --nnimanager_ip '{8}' --nnimanager_port '{9}' \ +--nni_manager_version '{10}' --log_collection '{11}' | tee $NNI_OUTPUT_DIR/trial.log`; + +/** + * PAI trial configuration + */ +export class NNIPAITrialConfig extends TrialConfig { + public readonly cpuNum: number; + public readonly memoryMB: number; + public readonly image: string; + public virtualCluster?: string; + public readonly nniManagerNFSMountPath: string; + public readonly containerNFSMountPath: string; + public readonly paiStorageConfigName: string; + public readonly paiConfigPath?: string; + + constructor(command: string, codeDir: string, gpuNum: number, cpuNum: number, memoryMB: number, + image: string, nniManagerNFSMountPath: string, containerNFSMountPath: string, + paiStorageConfigName: string, virtualCluster?: string, paiConfigPath?: string) { + super(command, codeDir, gpuNum); + this.cpuNum = cpuNum; + this.memoryMB = memoryMB; + this.image = image; + this.virtualCluster = virtualCluster; + this.nniManagerNFSMountPath = nniManagerNFSMountPath; + this.containerNFSMountPath = containerNFSMountPath; + this.paiStorageConfigName = paiStorageConfigName; + this.paiConfigPath = paiConfigPath; + } +} diff --git a/ts/nni_manager/training_service/pai/paiJobInfoCollector.ts b/ts/nni_manager/training_service/pai/paiJobInfoCollector.ts new file mode 100644 index 0000000000000000000000000000000000000000..7043dff23f6527a54303c1c609836118490d128c --- /dev/null +++ b/ts/nni_manager/training_service/pai/paiJobInfoCollector.ts @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import request from 'request'; +import { Deferred } from 'ts-deferred'; +import { NNIError, NNIErrorNames } from 'common/errors'; +import { getLogger, Logger } from 'common/log'; +import { TrialJobStatus } from 'common/trainingService'; +import { OpenpaiConfig } from 'common/experimentConfig'; +import { PAITrialJobDetail } from './paiConfig'; + +/** + * Collector PAI jobs info from PAI cluster, and update pai job status locally + */ +export class PAIJobInfoCollector { + private readonly trialJobsMap: Map; + private readonly log: Logger = getLogger('PAIJobInfoCollector'); + private readonly statusesNeedToCheck: TrialJobStatus[]; + private readonly finalStatuses: TrialJobStatus[]; + + constructor(jobMap: Map) { + this.trialJobsMap = jobMap; + this.statusesNeedToCheck = ['RUNNING', 'UNKNOWN', 'WAITING']; + this.finalStatuses = ['SUCCEEDED', 'FAILED', 'USER_CANCELED', 'SYS_CANCELED', 'EARLY_STOPPED']; + } + + public async retrieveTrialStatus(protocol: string, token? : string, config?: OpenpaiConfig): Promise { + if (config === undefined || token === undefined) { + return Promise.resolve(); + } + + const updatePaiTrialJobs: Promise[] = []; + for (const [trialJobId, paiTrialJob] of this.trialJobsMap) { + if (paiTrialJob === undefined) { + throw new NNIError(NNIErrorNames.NOT_FOUND, `trial job id ${trialJobId} not found`); + } + updatePaiTrialJobs.push(this.getSinglePAITrialJobInfo(protocol, paiTrialJob, token, config)); + } + + await Promise.all(updatePaiTrialJobs); + } + + private getSinglePAITrialJobInfo(_protocol: string, paiTrialJob: PAITrialJobDetail, paiToken: string, config: OpenpaiConfig): Promise { + const deferred: Deferred = new Deferred(); + if (!this.statusesNeedToCheck.includes(paiTrialJob.status)) { + deferred.resolve(); + + return deferred.promise; + } + + // Rest call to get PAI job info and update status + // Refer https://github.com/Microsoft/pai/blob/master/docs/rest-server/API.md for more detail about PAI Rest API + const getJobInfoRequest: request.Options = { + uri: `${config.host}/rest-server/api/v2/jobs/${config.username}~${paiTrialJob.paiJobName}`, + method: 'GET', + json: true, + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${paiToken}` + } + }; + + //TODO : pass in request timeout param? + request(getJobInfoRequest, (error: Error, response: request.Response, _body: any) => { + // Status code 200 for success + if ((error !== undefined && error !== null) || response.statusCode >= 400) { + // The job refresh time could be ealier than job submission, so it might return 404 error code, need refactor + // Queried PAI job info failed, set job status to UNKNOWN + if (paiTrialJob.status === 'WAITING' || paiTrialJob.status === 'RUNNING') { + paiTrialJob.status = 'UNKNOWN'; + } + } else { + if (response.body.jobStatus && response.body.jobStatus.state) { + switch (response.body.jobStatus.state) { + case 'WAITING': + paiTrialJob.status = 'WAITING'; + break; + case 'RUNNING': + paiTrialJob.status = 'RUNNING'; + if (paiTrialJob.startTime === undefined) { + paiTrialJob.startTime = response.body.jobStatus.appLaunchedTime; + } + if (paiTrialJob.url === undefined) { + if (response.body.jobStatus.appTrackingUrl) { + paiTrialJob.url = response.body.jobStatus.appTrackingUrl; + } else { + paiTrialJob.url = paiTrialJob.paiJobDetailUrl; + } + } + break; + case 'SUCCEEDED': + paiTrialJob.status = 'SUCCEEDED'; + break; + case 'STOPPED': + case 'STOPPING': + if (paiTrialJob.isEarlyStopped !== undefined) { + paiTrialJob.status = paiTrialJob.isEarlyStopped === true ? + 'EARLY_STOPPED' : 'USER_CANCELED'; + } else { + /* if paiTrialJob's isEarlyStopped is undefined, that mean we didn't stop it via cancellation, + * mark it as SYS_CANCELLED by PAI + */ + paiTrialJob.status = 'SYS_CANCELED'; + } + break; + case 'FAILED': + paiTrialJob.status = 'FAILED'; + break; + default: + paiTrialJob.status = 'UNKNOWN'; + } + // For final job statues, update startTime, endTime and url + if (this.finalStatuses.includes(paiTrialJob.status)) { + if (paiTrialJob.startTime === undefined) { + paiTrialJob.startTime = response.body.jobStatus.appLaunchedTime; + } + if (paiTrialJob.endTime === undefined) { + paiTrialJob.endTime = response.body.jobStatus.completedTime; + } + // Set pai trial job's url to WebHDFS output path + if (paiTrialJob.logPath !== undefined) { + if (paiTrialJob.url && paiTrialJob.url !== paiTrialJob.logPath) { + paiTrialJob.url += `,${paiTrialJob.logPath}`; + } else { + paiTrialJob.url = `${paiTrialJob.logPath}`; + } + } + } + } + } + deferred.resolve(); + }); + + return deferred.promise; + } +} diff --git a/ts/nni_manager/training_service/pai/paiJobRestServer.ts b/ts/nni_manager/training_service/pai/paiJobRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..5c9dfdfc1f32ced75ff4f8cf382a312b7e932249 --- /dev/null +++ b/ts/nni_manager/training_service/pai/paiJobRestServer.ts @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Request, Response, Router } from 'express'; +import { ClusterJobRestServer } from '../common/clusterJobRestServer'; +import { PAITrainingService } from './paiTrainingService'; + +export interface ParameterFileMeta { + readonly experimentId: string; + readonly trialId: string; + readonly filePath: string; +} + +/** + * PAI Training service Rest server, provides rest API to support pai job metrics update + * + */ +export class PAIJobRestServer extends ClusterJobRestServer { + protected parameterFileMetaList: ParameterFileMeta[] = []; + + protected readonly paiTrainingService: PAITrainingService; + + /** + * constructor to provide NNIRestServer's own rest property, e.g. port + */ + constructor (paiTrainingService: PAITrainingService) { + super(); + this.paiTrainingService = paiTrainingService; + } + + protected handleTrialMetrics(jobId: string, metrics: any[]): void { + // Split metrics array into single metric, then emit + // Warning: If not split metrics into single ones, the behavior will be UNKNOWN + for (const singleMetric of metrics) { + this.paiTrainingService.MetricsEmitter.emit('metric', { + id : jobId, + data : singleMetric + }); + } + } + + protected createRestHandler(): Router { + const router: Router = super.createRestHandler(); + + router.post(`/parameter-file-meta`, (req: Request, res: Response) => { + try { + this.log.info('POST /parameter-file-meta, body is', req.body); + this.parameterFileMetaList.push(req.body); + res.send(); + } catch (err) { + this.log.error(`POST parameter-file-meta error: ${err}`); + res.status(500); + res.send(err.message); + } + }); + + router.get(`/parameter-file-meta`, (_req: Request, res: Response) => { + try { + this.log.info(`GET /parameter-file-meta`); + res.send(this.parameterFileMetaList); + } catch (err) { + this.log.error(`GET parameter-file-meta error: ${err}`); + res.status(500); + res.send(err.message); + } + }); + + return router; + } +} diff --git a/ts/nni_manager/training_service/pai/paiTrainingService.ts b/ts/nni_manager/training_service/pai/paiTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..3b0fee940a435d60043ce16a0038c1f53c17a196 --- /dev/null +++ b/ts/nni_manager/training_service/pai/paiTrainingService.ts @@ -0,0 +1,442 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import request from 'request'; +import * as component from 'common/component'; + +import { EventEmitter } from 'events'; +import { Deferred } from 'ts-deferred'; +import { getExperimentId } from 'common/experimentStartupInfo'; +import { getLogger, Logger } from 'common/log'; +import { MethodNotImplementedError } from 'common/errors'; +import { + HyperParameters, NNIManagerIpConfig, TrainingService, + TrialJobApplicationForm, TrialJobDetail, TrialJobMetric +} from 'common/trainingService'; +import { delay } from 'common/utils'; +import { OpenpaiConfig, toMegaBytes } from 'common/experimentConfig'; +import { PAIJobInfoCollector } from './paiJobInfoCollector'; +import { PAIJobRestServer } from './paiJobRestServer'; +import { PAITrialJobDetail, PAI_TRIAL_COMMAND_FORMAT } from './paiConfig'; +import { String } from 'typescript-string-operations'; +import { generateParamFileName, getIPV4Address, uniqueString } from 'common/utils'; +import { CONTAINER_INSTALL_NNI_SHELL_FORMAT } from '../common/containerJobData'; +import { execMkdir, validateCodeDir, execCopydir } from '../common/util'; + +const yaml = require('js-yaml'); + +/** + * Training Service implementation for OpenPAI (Open Platform for AI) + * Refer https://github.com/Microsoft/pai for more info about OpenPAI + */ +@component.Singleton +class PAITrainingService implements TrainingService { + private readonly log!: Logger; + private readonly metricsEmitter: EventEmitter; + private readonly trialJobsMap: Map; + private readonly expRootDir: string; + private readonly jobQueue: string[]; + private stopping: boolean = false; + private paiToken?: string; + private paiTokenUpdateTime?: number; + private readonly paiTokenUpdateInterval: number; + private readonly experimentId!: string; + private readonly paiJobCollector: PAIJobInfoCollector; + private paiRestServerPort?: number; + private nniManagerIpConfig?: NNIManagerIpConfig; + private versionCheck: boolean = true; + private logCollection: string = 'none'; + private paiJobRestServer?: PAIJobRestServer; + private protocol: string; + private copyExpCodeDirPromise?: Promise; + private paiJobConfig: any; + private nniVersion: string | undefined; + private config: OpenpaiConfig; + + constructor(config: OpenpaiConfig) { + this.log = getLogger('PAITrainingService'); + this.metricsEmitter = new EventEmitter(); + this.trialJobsMap = new Map(); + this.jobQueue = []; + this.expRootDir = path.join('/nni-experiments', getExperimentId()); + this.experimentId = getExperimentId(); + this.paiJobCollector = new PAIJobInfoCollector(this.trialJobsMap); + this.paiTokenUpdateInterval = 7200000; //2hours + this.log.info('Construct paiBase training service.'); + this.config = config; + this.versionCheck = !this.config.debug; + this.paiJobRestServer = new PAIJobRestServer(this); + this.paiToken = this.config.token; + this.protocol = this.config.host.toLowerCase().startsWith('https://') ? 'https' : 'http'; + this.copyExpCodeDirPromise = this.copyTrialCode(); + } + + private async copyTrialCode(): Promise { + await validateCodeDir(this.config.trialCodeDirectory); + const nniManagerNFSExpCodeDir = path.join(this.config.localStorageMountPoint, this.experimentId, 'nni-code'); + await execMkdir(nniManagerNFSExpCodeDir); + this.log.info(`Starting copy codeDir data from ${this.config.trialCodeDirectory} to ${nniManagerNFSExpCodeDir}`); + await execCopydir(this.config.trialCodeDirectory, nniManagerNFSExpCodeDir); + } + + public async run(): Promise { + this.log.info('Run PAI training service.'); + if (this.paiJobRestServer === undefined) { + throw new Error('paiJobRestServer not initialized!'); + } + await this.paiJobRestServer.start(); + this.paiJobRestServer.setEnableVersionCheck = this.versionCheck; + this.log.info(`PAI Training service rest server listening on: ${this.paiJobRestServer.endPoint}`); + await Promise.all([ + this.statusCheckingLoop(), + this.submitJobLoop()]); + this.log.info('PAI training service exit.'); + } + + protected async submitJobLoop(): Promise { + while (!this.stopping) { + while (!this.stopping && this.jobQueue.length > 0) { + const trialJobId: string = this.jobQueue[0]; + if (await this.submitTrialJobToPAI(trialJobId)) { + // Remove trial job with trialJobId from job queue + this.jobQueue.shift(); + } else { + // Break the while loop since failed to submitJob + break; + } + } + await delay(3000); + } + } + + public async listTrialJobs(): Promise { + const jobs: TrialJobDetail[] = []; + + for (const key of this.trialJobsMap.keys()) { + jobs.push(await this.getTrialJob(key)); + } + + return jobs; + } + + public async getTrialFile(_trialJobId: string, _fileName: string): Promise { + throw new MethodNotImplementedError(); + } + + public async getTrialJob(trialJobId: string): Promise { + const paiTrialJob: PAITrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + + if (paiTrialJob === undefined) { + throw new Error(`trial job ${trialJobId} not found`); + } + + return paiTrialJob; + } + + public addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.on('metric', listener); + } + + public removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.off('metric', listener); + } + + public cancelTrialJob(trialJobId: string, isEarlyStopped: boolean = false): Promise { + const trialJobDetail: PAITrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + if (trialJobDetail === undefined) { + return Promise.reject(new Error(`cancelTrialJob: trial job id ${trialJobId} not found`)); + } + + if (trialJobDetail.status === 'UNKNOWN') { + trialJobDetail.status = 'USER_CANCELED'; + return Promise.resolve(); + } + + const stopJobRequest: request.Options = { + uri: `${this.config.host}/rest-server/api/v2/jobs/${this.config.username}~${trialJobDetail.paiJobName}/executionType`, + method: 'PUT', + json: true, + body: { value: 'STOP' }, + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.paiToken}` + } + }; + + // Set trialjobDetail's early stopped field, to mark the job's cancellation source + trialJobDetail.isEarlyStopped = isEarlyStopped; + const deferred: Deferred = new Deferred(); + + request(stopJobRequest, (error: Error, response: request.Response, _body: any) => { + // Status code 202 for success. + if ((error !== undefined && error !== null) || response.statusCode >= 400) { + this.log.error(`PAI Training service: stop trial ${trialJobId} to PAI Cluster failed!`); + deferred.reject((error !== undefined && error !== null) ? error.message : + `Stop trial failed, http code: ${response.statusCode}`); + } else { + deferred.resolve(); + } + }); + + return deferred.promise; + } + + public async cleanUp(): Promise { + this.log.info('Stopping PAI training service...'); + this.stopping = true; + + if (this.paiJobRestServer === undefined) { + throw new Error('paiJobRestServer not initialized!'); + } + + try { + await this.paiJobRestServer.stop(); + this.log.info('PAI Training service rest server stopped successfully.'); + } catch (error) { + this.log.error(`PAI Training service rest server stopped failed, error: ${error.message}`); + } + } + + public get MetricsEmitter(): EventEmitter { + return this.metricsEmitter; + } + + protected formatPAIHost(host: string): string { + // If users' host start with 'http://' or 'https://', use the original host, + // or format to 'http//${host}' + if (host.startsWith('http://')) { + this.protocol = 'http'; + return host.replace('http://', ''); + } else if (host.startsWith('https://')) { + this.protocol = 'https'; + return host.replace('https://', ''); + } else { + return host; + } + } + + protected async statusCheckingLoop(): Promise { + while (!this.stopping) { + await this.paiJobCollector.retrieveTrialStatus(this.protocol, this.paiToken, this.config); + if (this.paiJobRestServer === undefined) { + throw new Error('paiBaseJobRestServer not implemented!'); + } + if (this.paiJobRestServer.getErrorMessage !== undefined) { + throw new Error(this.paiJobRestServer.getErrorMessage); + } + await delay(3000); + } + } + + public async setClusterMetadata(_key: string, _value: string): Promise { return; } + public async getClusterMetadata(_key: string): Promise { return ''; } + + // update trial parameters for multi-phase + public async updateTrialJob(trialJobId: string, form: TrialJobApplicationForm): Promise { + const trialJobDetail: PAITrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + if (trialJobDetail === undefined) { + throw new Error(`updateTrialJob failed: ${trialJobId} not found`); + } + // Write file content ( parameter.cfg ) to working folders + await this.writeParameterFile(trialJobDetail.logPath, form.hyperParameters); + + return trialJobDetail; + } + + public async submitTrialJob(form: TrialJobApplicationForm): Promise { + this.log.info('submitTrialJob: form:', form); + + const trialJobId: string = uniqueString(5); + //TODO: use HDFS working folder instead + const trialWorkingFolder: string = path.join(this.expRootDir, 'trials', trialJobId); + const paiJobName: string = `nni_exp_${this.experimentId}_trial_${trialJobId}`; + const logPath: string = path.join(this.config.localStorageMountPoint, this.experimentId, trialJobId); + const paiJobDetailUrl: string = `${this.config.host}/job-detail.html?username=${this.config.username}&jobName=${paiJobName}`; + const trialJobDetail: PAITrialJobDetail = new PAITrialJobDetail( + trialJobId, + 'WAITING', + paiJobName, + Date.now(), + trialWorkingFolder, + form, + logPath, + paiJobDetailUrl); + + this.trialJobsMap.set(trialJobId, trialJobDetail); + this.jobQueue.push(trialJobId); + + return trialJobDetail; + } + + private async generateNNITrialCommand(trialJobDetail: PAITrialJobDetail, command: string): Promise { + const containerNFSExpCodeDir = `${this.config.containerStorageMountPoint}/${this.experimentId}/nni-code`; + const containerWorkingDir: string = `${this.config.containerStorageMountPoint}/${this.experimentId}/${trialJobDetail.id}`; + const nniPaiTrialCommand: string = String.Format( + PAI_TRIAL_COMMAND_FORMAT, + `${containerWorkingDir}`, + `${containerWorkingDir}/nnioutput`, + trialJobDetail.id, + this.experimentId, + trialJobDetail.form.sequenceId, + false, // multi-phase + containerNFSExpCodeDir, + command, + this.config.nniManagerIp || await getIPV4Address(), + this.paiRestServerPort, + this.nniVersion, + this.logCollection + ) + .replace(/\r\n|\n|\r/gm, ''); + + return nniPaiTrialCommand; + + } + + private async generateJobConfigInYamlFormat(trialJobDetail: PAITrialJobDetail): Promise { + const jobName = `nni_exp_${this.experimentId}_trial_${trialJobDetail.id}` + + let nniJobConfig: any = undefined; + if (this.config.openpaiConfig !== undefined) { + nniJobConfig = JSON.parse(JSON.stringify(this.config.openpaiConfig)); //Trick for deep clone in Typescript + nniJobConfig.name = jobName; + // Each taskRole will generate new command in NNI's command format + // Each command will be formatted to NNI style + for (const taskRoleIndex in nniJobConfig.taskRoles) { + const commands = nniJobConfig.taskRoles[taskRoleIndex].commands + const nniTrialCommand = await this.generateNNITrialCommand(trialJobDetail, commands.join(" && ").replace(/(["'$`\\])/g, '\\$1')); + nniJobConfig.taskRoles[taskRoleIndex].commands = [nniTrialCommand] + } + + } else { + nniJobConfig = { + protocolVersion: 2, + name: jobName, + type: 'job', + jobRetryCount: 0, + prerequisites: [ + { + type: 'dockerimage', + uri: this.config.dockerImage, + name: 'docker_image_0' + } + ], + taskRoles: { + taskrole: { + instances: 1, + completion: { + minFailedInstances: 1, + minSucceededInstances: -1 + }, + taskRetryCount: 0, + dockerImage: 'docker_image_0', + resourcePerInstance: { + gpu: this.config.trialGpuNumber, + cpu: this.config.trialCpuNumber, + memoryMB: toMegaBytes(this.config.trialMemorySize) + }, + commands: [ + await this.generateNNITrialCommand(trialJobDetail, this.config.trialCommand) + ] + } + }, + extras: { + 'storages': [ + { + name: this.config.storageConfigName + } + ], + submitFrom: 'submit-job-v2' + } + } + if (this.config.virtualCluster) { + nniJobConfig.defaults = { + virtualCluster: this.config.virtualCluster + } + } + } + return yaml.safeDump(nniJobConfig); + } + + protected async submitTrialJobToPAI(trialJobId: string): Promise { + const deferred: Deferred = new Deferred(); + const trialJobDetail: PAITrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + + if (trialJobDetail === undefined) { + throw new Error(`Failed to find PAITrialJobDetail for job ${trialJobId}`); + } + + if (this.paiJobRestServer === undefined) { + throw new Error('paiJobRestServer is not initialized'); + } + + // Make sure experiment code files is copied from local to NFS + if (this.copyExpCodeDirPromise !== undefined) { + await this.copyExpCodeDirPromise; + this.log.info(`Copy codeDir data finished.`); + // All trials share same destination NFS code folder, only copy codeDir once for an experiment. + // After copy data finished, set copyExpCodeDirPromise be undefined to avoid log content duplicated. + this.copyExpCodeDirPromise = undefined; + } + + this.paiRestServerPort = this.paiJobRestServer.clusterRestServerPort; + + // Step 1. Prepare PAI job configuration + //create trial local working folder locally. + await execMkdir(trialJobDetail.logPath); + // Write NNI installation file to local files + await fs.promises.writeFile(path.join(trialJobDetail.logPath, 'install_nni.sh'), CONTAINER_INSTALL_NNI_SHELL_FORMAT, { encoding: 'utf8' }); + + // Write file content ( parameter.cfg ) to local working folders + if (trialJobDetail.form !== undefined) { + await this.writeParameterFile(trialJobDetail.logPath, trialJobDetail.form.hyperParameters); + } + + //Generate Job Configuration in yaml format + const paiJobConfig = await this.generateJobConfigInYamlFormat(trialJobDetail); + this.log.debug(paiJobConfig); + // Step 2. Submit PAI job via Rest call + // Refer https://github.com/Microsoft/pai/blob/master/docs/rest-server/API.md for more detail about PAI Rest API + const submitJobRequest: request.Options = { + uri: `${this.config.host}/rest-server/api/v2/jobs`, + method: 'POST', + body: paiJobConfig, + followAllRedirects: true, + headers: { + 'Content-Type': 'text/yaml', + Authorization: `Bearer ${this.paiToken}` + } + }; + request(submitJobRequest, (error: Error, response: request.Response, body: any) => { + // If submit success, will get status code 202. refer: https://github.com/microsoft/pai/blob/master/src/rest-server/docs/swagger.yaml + if ((error !== undefined && error !== null) || response.statusCode >= 400) { + const errorMessage: string = (error !== undefined && error !== null) ? error.message : + `Submit trial ${trialJobId} failed, http code:${response.statusCode}, http body: ${body}`; + this.log.error(errorMessage); + trialJobDetail.status = 'FAILED'; + deferred.reject(errorMessage); + } else { + trialJobDetail.submitTime = Date.now(); + } + deferred.resolve(true); + }); + + return deferred.promise; + } + + private async writeParameterFile(directory: string, hyperParameters: HyperParameters): Promise { + const filepath: string = path.join(directory, generateParamFileName(hyperParameters)); + await fs.promises.writeFile(filepath, hyperParameters.value, { encoding: 'utf8' }); + } + + public getTrialOutputLocalPath(_trialJobId: string): Promise { + throw new MethodNotImplementedError(); + } + + public fetchTrialOutput(_trialJobId: string, _subpath: string): Promise { + throw new MethodNotImplementedError(); + } +} + +export { PAITrainingService }; diff --git a/ts/nni_manager/training_service/remote_machine/extends/linuxCommands.ts b/ts/nni_manager/training_service/remote_machine/extends/linuxCommands.ts new file mode 100644 index 0000000000000000000000000000000000000000..45f42a91804555ebec93046a7ed03fdbaa8b9b20 --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/extends/linuxCommands.ts @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { OsCommands } from "../osCommands"; +import { RemoteCommandResult } from "../remoteMachineData"; + +class LinuxCommands extends OsCommands { + + public getScriptExt(): string { + return "sh"; + } + + public generateStartScript(workingDirectory: string, trialJobId: string, experimentId: string, + trialSequenceId: string, isMultiPhase: boolean, jobIdFileName: string, + command: string, nniManagerAddress: string, nniManagerPort: number, + nniManagerVersion: string, logCollection: string, exitCodeFile: string, + codeDir: string, cudaVisibleSetting: string): string { + + return `#!/bin/bash + export NNI_PLATFORM=remote NNI_SYS_DIR=${workingDirectory} NNI_OUTPUT_DIR=${workingDirectory} NNI_TRIAL_JOB_ID=${trialJobId} \ + NNI_EXP_ID=${experimentId} NNI_TRIAL_SEQ_ID=${trialSequenceId} NNI_CODE_DIR=${codeDir} + export MULTI_PHASE=${isMultiPhase} + mkdir -p $NNI_SYS_DIR/code + cp -r $NNI_CODE_DIR/. $NNI_SYS_DIR/code + sh $NNI_SYS_DIR/install_nni.sh + cd $NNI_SYS_DIR/code + python3 -m nni.tools.trial_tool.trial_keeper --trial_command '${cudaVisibleSetting} ${command}' --nnimanager_ip '${nniManagerAddress}' \ + --nnimanager_port '${nniManagerPort}' --nni_manager_version '${nniManagerVersion}' \ + --job_id_file ${jobIdFileName} \ + --log_collection '${logCollection}' 1>$NNI_OUTPUT_DIR/trialkeeper_stdout 2>$NNI_OUTPUT_DIR/trialkeeper_stderr + echo $? \`date +%s%3N\` >${exitCodeFile}`; + } + + public generateGpuStatsScript(scriptFolder: string): string { + return `echo $$ > ${scriptFolder}/pid ; METRIC_OUTPUT_DIR=${scriptFolder} python3 -m nni.tools.gpu_tool.gpu_metrics_collector`; + } + + public createFolder(folderName: string, sharedFolder: boolean = false): string { + let command; + if (sharedFolder) { + command = `umask 0; mkdir -p '${folderName}'`; + } else { + command = `mkdir -p '${folderName}'`; + } + return command; + } + + public allowPermission(isRecursive: boolean = false, ...folders: string[]): string { + const folderString = folders.join("' '"); + let command; + + if (isRecursive) { + command = `chmod 777 -R '${folderString}'`; + } else { + command = `chmod 777 '${folderString}'`; + } + return command; + } + + public removeFolder(folderName: string, isRecursive: boolean = false, isForce: boolean = true): string { + let flags = ''; + if (isForce || isRecursive) { + flags = `-${isRecursive ? 'r' : 'd'}${isForce ? 'f' : ''} `; + } + + const command = `rm ${flags}'${folderName}'`; + return command; + } + + public removeFiles(folderName: string, filePattern: string): string { + const files = this.joinPath(folderName, filePattern); + const command = `rm '${files}'`; + return command; + } + + public readLastLines(fileName: string, lineCount: number = 1): string { + const command = `tail -n ${lineCount} '${fileName}'`; + return command; + } + + public isProcessAliveCommand(pidFileName: string): string { + const command = `kill -0 \`cat '${pidFileName}'\``; + return command; + } + + public isProcessAliveProcessOutput(commandResult: RemoteCommandResult): boolean { + let result = true; + if (commandResult.exitCode !== 0) { + result = false; + } + return result; + } + + public killChildProcesses(pidFileName: string, killSelf: boolean): string { + // prevent trialkeeper to be killed, so it can save exit code. + let command = `list_descendants () + { + local children=$(ps -o pid= --ppid "$1") + + for pid in $children + do + list_descendants "$pid" + done + + echo "$children" + } + kill $(list_descendants \`cat '${pidFileName}'\`)` + if (killSelf) { + command += `\nkill \`cat '${pidFileName}'\`` + } + return command; + } + + public extractFile(tarFileName: string, targetFolder: string): string { + const command = `tar -oxzf '${tarFileName}' -C '${targetFolder}'`; + return command; + } + + public executeScript(script: string, isFile: boolean): string { + let command: string; + if (isFile) { + command = `bash '${script}'`; + } else { + script = script.replace(/"/g, '\\"'); + const result = script.match(/[^\\]\\\\"/g); + if (result) { + result.forEach((res) => { + script = script.replace(res, res.replace(/"$/g, '\\"')); + }) + } + command = `bash -c "${script}"`; + } + return command; + } + + public setPythonPath(pythonPath: string | undefined, command: string | undefined): string | undefined{ + if (command === undefined || command === '' || pythonPath === undefined || pythonPath === ''){ + return command; + } else { + return `export PATH=${pythonPath}:$PATH && ${command}`; + } + } + + public fileExistCommand(filePath: string): string { + return `test -e ${filePath} && echo True || echo False`; + } + + public getCurrentPath(): string { + return `pwd`; + } +} + +export { LinuxCommands }; diff --git a/ts/nni_manager/training_service/remote_machine/extends/windowsCommands.ts b/ts/nni_manager/training_service/remote_machine/extends/windowsCommands.ts new file mode 100644 index 0000000000000000000000000000000000000000..c36cbd76bf929ce5250abb4a3704569a98b81117 --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/extends/windowsCommands.ts @@ -0,0 +1,141 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { OsCommands } from "../osCommands"; +import { RemoteCommandResult } from "../remoteMachineData"; + +class WindowsCommands extends OsCommands { + + protected pathSpliter: string = '\\'; + + public getScriptExt(): string { + return "cmd"; + } + public generateStartScript(workingDirectory: string, trialJobId: string, experimentId: string, + trialSequenceId: string, isMultiPhase: boolean, jobIdFileName: string, + command: string, nniManagerAddress: string, nniManagerPort: number, + nniManagerVersion: string, logCollection: string, exitCodeFile: string, + codeDir: string, cudaVisibleSetting: string): string { + return `echo off + set NNI_PLATFORM=remote + set NNI_SYS_DIR=${workingDirectory} + set NNI_OUTPUT_DIR=${workingDirectory} + set NNI_TRIAL_JOB_ID=${trialJobId} + set NNI_EXP_ID=${experimentId} + set NNI_TRIAL_SEQ_ID=${trialSequenceId} + set MULTI_PHASE=${isMultiPhase} + set NNI_CODE_DIR=${codeDir} + ${cudaVisibleSetting !== "" ? "set " + cudaVisibleSetting : ""} + md %NNI_SYS_DIR%/code + robocopy /s %NNI_CODE_DIR%/. %NNI_SYS_DIR%/code + cd %NNI_SYS_DIR%/code + python -c "import nni" 2>nul + if not %ERRORLEVEL% EQU 0 ( + echo installing NNI as exit code of "import nni" is %ERRORLEVEL% + python -m pip install --user --upgrade nni + ) + + echo starting script + python -m nni.tools.trial_tool.trial_keeper --trial_command "${command}" --nnimanager_ip "${nniManagerAddress}" --nnimanager_port "${nniManagerPort}" --nni_manager_version "${nniManagerVersion}" --log_collection "${logCollection}" --job_id_file ${jobIdFileName} 1>%NNI_OUTPUT_DIR%/trialkeeper_stdout 2>%NNI_OUTPUT_DIR%/trialkeeper_stderr + + echo save exit code(%ERRORLEVEL%) and time + echo|set /p="%ERRORLEVEL% " > ${exitCodeFile} + powershell -command "Write (((New-TimeSpan -Start (Get-Date "01/01/1970") -End (Get-Date).ToUniversalTime()).TotalMilliseconds).ToString("0")) | Out-file ${exitCodeFile} -Append -NoNewline -encoding utf8"`; + } + + public generateGpuStatsScript(scriptFolder: string): string { + return `powershell -command $setEnv = Start-Job -script {$env:Path=If($env:prePath){$env:prePath}Else{$env:Path};$env:METRIC_OUTPUT_DIR='${scriptFolder}'};wait-job $setEnv;$app = Start-Process -FilePath python -NoNewWindow -passthru -ArgumentList '-m nni.tools.gpu_tool.gpu_metrics_collector' -RedirectStandardOutput ${scriptFolder}\\scriptstdout -RedirectStandardError ${scriptFolder}\\scriptstderr;Write $PID ^| Out-File ${scriptFolder}\\pid -NoNewline -encoding utf8;wait-process $app.ID`; + } + + public createFolder(folderName: string, sharedFolder: boolean = false): string { + let command; + if (sharedFolder) { + command = `mkdir "${folderName}"\r\nICACLS "${folderName}" /grant "Users":F`; + } else { + command = `mkdir "${folderName}"`; + } + return command; + } + + public allowPermission(isRecursive: boolean = false, ...folders: string[]): string { + let commands: string = ""; + + folders.forEach(folder => { + commands += `ICACLS "${folder}" /grant "Users":F${isRecursive ? " /T" : ""}\r\n` + }); + return commands; + } + + public removeFolder(folderName: string, isRecursive: boolean = false, isForce: boolean = true): string { + let flags = ''; + if (isForce || isRecursive) { + flags = `${isRecursive ? ' /s' : ''}${isForce ? ' /q' : ''}`; + } + + const command = `rmdir${flags} "${folderName}"`; + return command; + } + + public removeFiles(folderName: string, filePattern: string): string { + const files = this.joinPath(folderName, filePattern); + const command = `del "${files}"`; + return command; + } + + public readLastLines(fileName: string, lineCount: number = 1): string { + const command = `powershell.exe Get-Content "${fileName}" -Tail ${lineCount}`; + return command; + } + + public isProcessAliveCommand(pidFileName: string): string { + const command = `powershell.exe Get-Process -Id (get-content "${pidFileName}") -ErrorAction SilentlyContinue`; + return command; + } + + public isProcessAliveProcessOutput(commandResult: RemoteCommandResult): boolean { + let result = true; + if (commandResult.exitCode !== 0) { + result = false; + } + return result; + } + + public killChildProcesses(pidFileName: string, killSelf: boolean): string { + let command = `powershell "$ppid=(type ${pidFileName}); function Kill-Tree {Param([int]$subppid);` + + `Get-CimInstance Win32_Process | Where-Object { $_.ParentProcessId -eq $subppid } | ForEach-Object { Kill-Tree $_.ProcessId }; ` + + `if ($subppid -ne $ppid){Stop-Process -Id $subppid -Force"}}` + + `kill-tree $ppid"`; + if (killSelf){ + command += `;Stop-Process -Id $ppid`; + } + return command; + } + + public extractFile(tarFileName: string, targetFolder: string): string { + const command = `tar -xf "${tarFileName}" -C "${targetFolder}"`; + return command; + } + + public executeScript(script: string, _isFile: boolean): string { + const command = `${script}`; + return command; + } + + public setPythonPath(pythonPath: string | undefined, command: string | undefined): string | undefined{ + if (command === undefined || command === '' || pythonPath === undefined || pythonPath === ''){ + return command; + } else { + return `set path=${pythonPath};%path% && set prePath=%path% && ${command}`; + } + } + + public fileExistCommand(filePath: string): string { + return `powershell Test-Path ${filePath} -PathType Leaf`; + } + + public getCurrentPath(): string { + return `chdir`; + } +} + +export { WindowsCommands }; diff --git a/ts/nni_manager/training_service/remote_machine/gpuScheduler.ts b/ts/nni_manager/training_service/remote_machine/gpuScheduler.ts new file mode 100644 index 0000000000000000000000000000000000000000..1ba1513cbb562472851ae6d8cafc84b324a94c91 --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/gpuScheduler.ts @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { getLogger, Logger } from 'common/log'; +import { randomSelect } from 'common/utils'; +import { RemoteMachineConfig } from 'common/experimentConfig'; +import { GPUInfo, ScheduleResultType } from '../common/gpuData'; +import { ExecutorManager, RemoteMachineMeta, RemoteMachineScheduleResult, RemoteMachineTrialJobDetail } from './remoteMachineData'; + +type SCHEDULE_POLICY_NAME = 'random' | 'round-robin'; + +/** + * A simple GPU scheduler implementation + */ +export class GPUScheduler { + + private readonly machineExecutorMap: Map; + private readonly log: Logger = getLogger('GPUScheduler'); + private readonly policyName: SCHEDULE_POLICY_NAME = 'round-robin'; + private roundRobinIndex: number = 0; + private configuredRMs: RemoteMachineMeta[] = []; + + /** + * Constructor + * @param machineExecutorMap map from remote machine to executor + */ + constructor(machineExecutorMap: Map) { + assert(machineExecutorMap.size > 0); + this.machineExecutorMap = machineExecutorMap; + this.configuredRMs = Array.from(machineExecutorMap.values(), manager => manager.rmMeta); + } + + /** + * Schedule a machine according to the constraints (requiredGPUNum) + * @param requiredGPUNum required GPU number + */ + public scheduleMachine(requiredGPUNum: number | undefined, trialJobDetail: RemoteMachineTrialJobDetail): RemoteMachineScheduleResult { + if (requiredGPUNum === undefined) { + requiredGPUNum = 0; + } + assert(requiredGPUNum >= 0); + const allRMs: RemoteMachineMeta[] = Array.from(this.machineExecutorMap.values(), manager => manager.rmMeta); + assert(allRMs.length > 0); + + // Step 1: Check if required GPU number not exceeds the total GPU number in all machines + const eligibleRM: RemoteMachineMeta[] = allRMs.filter((rmMeta: RemoteMachineMeta) => + rmMeta.gpuSummary === undefined || requiredGPUNum === 0 || (requiredGPUNum !== undefined && rmMeta.gpuSummary.gpuCount >= requiredGPUNum)); + if (eligibleRM.length === 0) { + // If the required gpu number exceeds the upper limit of all machine's GPU number + // Return REQUIRE_EXCEED_TOTAL directly + return ({ + resultType: ScheduleResultType.REQUIRE_EXCEED_TOTAL, + scheduleInfo: undefined + }); + } + + // Step 2: Allocate Host/GPU for specified trial job + // Currenty the requireGPUNum parameter for all trial jobs are identical. + if (requiredGPUNum > 0) { + // Trial job requires GPU + const result: RemoteMachineScheduleResult | undefined = this.scheduleGPUHost(requiredGPUNum, trialJobDetail); + if (result !== undefined) { + return result; + } + } else { + // Trail job does not need GPU + const allocatedRm: RemoteMachineMeta = this.selectMachine(allRMs); + + return this.allocateHost(requiredGPUNum, allocatedRm, [], trialJobDetail); + } + this.log.warning(`Scheduler: trialJob id ${trialJobDetail.id}, no machine can be scheduled, return TMP_NO_AVAILABLE_GPU `); + + return { + resultType: ScheduleResultType.TMP_NO_AVAILABLE_GPU, + scheduleInfo: undefined + }; + } + + /** + * remove the job's gpu reversion + */ + public removeGpuReservation(trialJobId: string, trialJobMap: Map): void { + const trialJobDetail: RemoteMachineTrialJobDetail | undefined = trialJobMap.get(trialJobId); + if (trialJobDetail === undefined) { + throw new Error(`could not get trialJobDetail by id ${trialJobId}`); + } + if (trialJobDetail.rmMeta !== undefined && + trialJobDetail.rmMeta.occupiedGpuIndexMap !== undefined && + trialJobDetail.gpuIndices !== undefined && + trialJobDetail.gpuIndices.length > 0) { + for (const gpuInfo of trialJobDetail.gpuIndices) { + const num: number | undefined = trialJobDetail.rmMeta.occupiedGpuIndexMap.get(gpuInfo.index); + if (num !== undefined) { + if (num === 1) { + trialJobDetail.rmMeta.occupiedGpuIndexMap.delete(gpuInfo.index); + } else { + trialJobDetail.rmMeta.occupiedGpuIndexMap.set(gpuInfo.index, num - 1); + } + } + } + } + trialJobDetail.gpuIndices = []; + trialJobMap.set(trialJobId, trialJobDetail); + } + + private scheduleGPUHost(requiredGPUNum: number, trialJobDetail: RemoteMachineTrialJobDetail): RemoteMachineScheduleResult | undefined { + const totalResourceMap: Map = this.gpuResourceDetection(); + const qualifiedRMs: RemoteMachineMeta[] = []; + totalResourceMap.forEach((gpuInfos: GPUInfo[], rmMeta: RemoteMachineMeta) => { + if (gpuInfos !== undefined && gpuInfos.length >= requiredGPUNum) { + qualifiedRMs.push(rmMeta); + } + }); + if (qualifiedRMs.length > 0) { + const allocatedRm: RemoteMachineMeta = this.selectMachine(qualifiedRMs); + const gpuInfos: GPUInfo[] | undefined = totalResourceMap.get(allocatedRm); + if (gpuInfos !== undefined) { // should always true + return this.allocateHost(requiredGPUNum, allocatedRm, gpuInfos, trialJobDetail); + } else { + assert(false, 'gpuInfos is undefined'); + } + } + return undefined; + } + + /** + * Detect available GPU resource for a remote machine + * @param rmMeta Remote machine metadata + * @param requiredGPUNum required GPU number by application + * @param availableGPUMap available GPU resource filled by this detection + * @returns Available GPU number on this remote machine + */ + private gpuResourceDetection(): Map { + const totalResourceMap: Map = new Map(); + this.machineExecutorMap.forEach((executorManager: ExecutorManager, machineConfig: RemoteMachineConfig) => { + const rmMeta = executorManager.rmMeta; + // Assgin totoal GPU count as init available GPU number + if (rmMeta.gpuSummary !== undefined) { + const availableGPUs: GPUInfo[] = []; + const designatedGpuIndices: number[] | undefined = machineConfig.gpuIndices; + if (designatedGpuIndices !== undefined) { + for (const gpuIndex of designatedGpuIndices) { + if (gpuIndex >= rmMeta.gpuSummary.gpuCount) { + throw new Error(`Specified GPU index not found: ${gpuIndex}`); + } + } + } + this.log.debug(`designated gpu indices: ${designatedGpuIndices}`); + rmMeta.gpuSummary.gpuInfos.forEach((gpuInfo: GPUInfo) => { + // if the GPU has active process, OR be reserved by a job, + // or index not in gpuIndices configuration in machineList, + // or trial number on a GPU reach max number, + // We should NOT allocate this GPU + // if users set useActiveGpu, use the gpu whether there is another activeProcess + if (designatedGpuIndices === undefined || designatedGpuIndices.includes(gpuInfo.index)) { + if (rmMeta.occupiedGpuIndexMap !== undefined) { + const num: number | undefined = rmMeta.occupiedGpuIndexMap.get(gpuInfo.index); + if ((num === undefined && (!machineConfig.useActiveGpu && gpuInfo.activeProcessNum === 0 || machineConfig.useActiveGpu)) || + (num !== undefined && num < machineConfig.maxTrialNumberPerGpu)) { + availableGPUs.push(gpuInfo); + } + } else { + throw new Error(`occupiedGpuIndexMap initialize error!`); + } + } + }); + totalResourceMap.set(rmMeta, availableGPUs); + } + }); + + return totalResourceMap; + } + + private selectMachine(rmMetas: RemoteMachineMeta[]): RemoteMachineMeta { + assert(rmMetas !== undefined && rmMetas.length > 0); + + if (this.policyName === 'random') { + return randomSelect(rmMetas); + } else if (this.policyName === 'round-robin') { + return this.roundRobinSelect(rmMetas); + } else { + throw new Error(`Unsupported schedule policy: ${this.policyName}`); + } + } + + private roundRobinSelect(rmMetas: RemoteMachineMeta[]): RemoteMachineMeta { + while (!rmMetas.includes(this.configuredRMs[this.roundRobinIndex % this.configuredRMs.length])) { + this.roundRobinIndex++; + } + + return this.configuredRMs[this.roundRobinIndex++ % this.configuredRMs.length]; + } + + private selectGPUsForTrial(gpuInfos: GPUInfo[], requiredGPUNum: number): GPUInfo[] { + // Sequentially allocate GPUs + return gpuInfos.slice(0, requiredGPUNum); + } + + private allocateHost(requiredGPUNum: number, rmMeta: RemoteMachineMeta, + gpuInfos: GPUInfo[], trialJobDetail: RemoteMachineTrialJobDetail): RemoteMachineScheduleResult { + assert(gpuInfos.length >= requiredGPUNum); + const allocatedGPUs: GPUInfo[] = this.selectGPUsForTrial(gpuInfos, requiredGPUNum); + allocatedGPUs.forEach((gpuInfo: GPUInfo) => { + if (rmMeta.occupiedGpuIndexMap !== undefined) { + let num: number | undefined = rmMeta.occupiedGpuIndexMap.get(gpuInfo.index); + if (num === undefined) { + num = 0; + } + rmMeta.occupiedGpuIndexMap.set(gpuInfo.index, num + 1); + } else { + throw new Error(`Machine ${rmMeta.config.host} occupiedGpuIndexMap initialize error!`); + } + }); + trialJobDetail.gpuIndices = allocatedGPUs; + trialJobDetail.rmMeta = rmMeta; + + return { + resultType: ScheduleResultType.SUCCEED, + scheduleInfo: { + rmMeta: rmMeta, + cudaVisibleDevice: allocatedGPUs + .map((gpuInfo: GPUInfo) => { + return gpuInfo.index; + }) + .join(',') + } + }; + } +} diff --git a/ts/nni_manager/training_service/remote_machine/osCommands.ts b/ts/nni_manager/training_service/remote_machine/osCommands.ts new file mode 100644 index 0000000000000000000000000000000000000000..05df0b0bd0be34c444083913e583aff1f49c7610 --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/osCommands.ts @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { RemoteCommandResult } from "./remoteMachineData"; + +abstract class OsCommands { + + protected pathSpliter: string = '/'; + protected multiplePathSpliter: RegExp = new RegExp(`[\\\\/]{2,}`); + protected normalizePath: RegExp = new RegExp(`[\\\\/]`); + + public abstract getScriptExt(): string; + public abstract generateStartScript(workingDirectory: string, trialJobId: string, experimentId: string, + trialSequenceId: string, isMultiPhase: boolean, jobIdFileName: string, + command: string, nniManagerAddress: string, nniManagerPort: number, + nniManagerVersion: string, logCollection: string, exitCodeFile: string, + codeDir: string, cudaVisibleSetting: string): string; + public abstract generateGpuStatsScript(scriptFolder: string): string; + public abstract createFolder(folderName: string, sharedFolder: boolean): string; + public abstract allowPermission(isRecursive: boolean, ...folders: string[]): string; + public abstract removeFolder(folderName: string, isRecursive: boolean, isForce: boolean): string; + public abstract removeFiles(folderOrFileName: string, filePattern: string): string; + public abstract readLastLines(fileName: string, lineCount: number): string; + public abstract isProcessAliveCommand(pidFileName: string): string; + public abstract isProcessAliveProcessOutput(result: RemoteCommandResult): boolean; + public abstract killChildProcesses(pidFileName: string, killSelf: boolean): string; + public abstract extractFile(tarFileName: string, targetFolder: string): string; + public abstract executeScript(script: string, isFile: boolean): string; + public abstract setPythonPath(pythonPath: string | undefined, command: string | undefined): string | undefined; + public abstract fileExistCommand(filePath: string): string | undefined; + public abstract getCurrentPath(): string; + + public joinPath(...paths: string[]): string { + let dir: string = paths.filter((path: any) => path !== '').join(this.pathSpliter); + if (dir === '') { + dir = '.'; + } else { + // normalize + dir = dir.replace(this.normalizePath, this.pathSpliter); + // reduce duplicate ones + dir = dir.replace(this.multiplePathSpliter, this.pathSpliter); + } + return dir; + } +} + +export { OsCommands }; diff --git a/ts/nni_manager/training_service/remote_machine/remoteMachineData.ts b/ts/nni_manager/training_service/remote_machine/remoteMachineData.ts new file mode 100644 index 0000000000000000000000000000000000000000..40145f4af8c03842706a5da1232e913264b24bcf --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/remoteMachineData.ts @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TrialJobApplicationForm, TrialJobDetail, TrialJobStatus } from 'common/trainingService'; +import { RemoteMachineConfig } from 'common/experimentConfig'; +import { GPUInfo, GPUSummary, ScheduleResultType } from '../common/gpuData'; +import { ShellExecutor } from './shellExecutor'; + +/** + * Metadata of remote machine for configuration and statuc query + */ +export class RemoteMachineMeta { + public readonly config: RemoteMachineConfig; + public gpuSummary: GPUSummary | undefined; + public occupiedGpuIndexMap: Map; + + constructor(config: RemoteMachineConfig) { + this.config = config; + this.occupiedGpuIndexMap = new Map(); + } +} + +/** + * The execution result for command executed on remote machine + */ +export class RemoteCommandResult { + public readonly stdout: string; + public readonly stderr: string; + public readonly exitCode: number; + + constructor(stdout: string, stderr: string, exitCode: number) { + this.stdout = stdout; + this.stderr = stderr; + this.exitCode = exitCode; + } +} + +/** + * RemoteMachineTrialJobDetail + */ +export class RemoteMachineTrialJobDetail implements TrialJobDetail { + public id: string; + public status: TrialJobStatus; + public submitTime: number; + public startTime?: number; + public endTime?: number; + public tags?: string[]; + public url?: string; + public workingDirectory: string; + public form: TrialJobApplicationForm; + public rmMeta?: RemoteMachineMeta; + public isEarlyStopped?: boolean; + public gpuIndices: GPUInfo[]; + + constructor(id: string, status: TrialJobStatus, submitTime: number, + workingDirectory: string, form: TrialJobApplicationForm) { + this.id = id; + this.status = status; + this.submitTime = submitTime; + this.workingDirectory = workingDirectory; + this.form = form; + this.tags = []; + this.gpuIndices = []; + } +} + +/** + * The remote machine executor manager + */ +export class ExecutorManager { + public readonly rmMeta: RemoteMachineMeta; + private readonly executorMap: Map = new Map(); + + private executors: ShellExecutor[] = []; + + constructor(config: RemoteMachineConfig) { + this.rmMeta = new RemoteMachineMeta(config); + } + + public async getExecutor(id: string): Promise { + let isFound = false; + let executor: ShellExecutor | undefined; + + // already assigned + if (this.executorMap.has(id)) { + executor = this.executorMap.get(id); + if (executor === undefined) { + throw new Error("executor shouldn't be undefined before return!"); + } + return executor; + } + + for (const candidateExecutor of this.executors) { + if (candidateExecutor.addUsage()) { + isFound = true; + executor = candidateExecutor; + break; + } + } + // init a new executor if no free one. + if (!isFound) { + executor = await this.createShellExecutor(); + } + + if (executor === undefined) { + throw new Error("executor shouldn't be undefined before set!"); + } + this.executorMap.set(id, executor); + + return executor; + } + + /** + * close all of executor + */ + public releaseAllExecutor(): void { + this.executorMap.clear(); + for (const executor of this.executors) { + executor.close(); + } + this.executors = []; + } + + /** + * retrieve resource, minus a number for given executor + * @param executor executor + */ + public releaseExecutor(id: string): void { + const executor = this.executorMap.get(id); + if (executor === undefined) { + throw new Error(`executor for ${id} is not found`); + } + executor.releaseUsage(); + this.executorMap.delete(id); + } + + /** + * Create a new connection executor and initialize it + */ + private async createShellExecutor(): Promise { + const executor = new ShellExecutor(); + await executor.initialize(this.rmMeta); + if (!executor.addUsage()) { + throw new Error("failed to add usage on new created Executor! It's a wired bug!"); + } + this.executors.push(executor); + return executor; + } +} + +export type RemoteMachineScheduleResult = { scheduleInfo: RemoteMachineScheduleInfo | undefined; resultType: ScheduleResultType }; + +export type RemoteMachineScheduleInfo = { rmMeta: RemoteMachineMeta; cudaVisibleDevice: string }; diff --git a/ts/nni_manager/training_service/remote_machine/remoteMachineJobRestServer.ts b/ts/nni_manager/training_service/remote_machine/remoteMachineJobRestServer.ts new file mode 100644 index 0000000000000000000000000000000000000000..1657b784503780af1d1db8fafe50a89fb4f8ee64 --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/remoteMachineJobRestServer.ts @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { ClusterJobRestServer } from '../common/clusterJobRestServer'; +import { RemoteMachineTrainingService } from './remoteMachineTrainingService'; + +/** + * RemoteMachine Training service Rest server, provides rest RemoteMachine to support remotemachine job metrics update + * + */ +export class RemoteMachineJobRestServer extends ClusterJobRestServer { + private readonly remoteMachineTrainingService: RemoteMachineTrainingService; + + /** + * constructor to provide NNIRestServer's own rest property, e.g. port + */ + constructor(remoteMachineTrainingService: RemoteMachineTrainingService) { + super(); + this.remoteMachineTrainingService = remoteMachineTrainingService; + } + + protected handleTrialMetrics(jobId: string, metrics: any[]): void { + // Split metrics array into single metric, then emit + // Warning: If not split metrics into single ones, the behavior will be UNKNOWNls + for (const singleMetric of metrics) { + this.remoteMachineTrainingService.MetricsEmitter.emit('metric', { + id : jobId, + data : singleMetric + }); + } + } +} diff --git a/ts/nni_manager/training_service/remote_machine/remoteMachineTrainingService.ts b/ts/nni_manager/training_service/remote_machine/remoteMachineTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..6cbb216658156caabdf6969ff9e362864646ca0e --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/remoteMachineTrainingService.ts @@ -0,0 +1,603 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { EventEmitter } from 'events'; +import fs from 'fs'; +import path from 'path'; +import { ShellExecutor } from 'training_service/remote_machine/shellExecutor'; +import { Deferred } from 'ts-deferred'; +import * as component from 'common/component'; +import { NNIError, NNIErrorNames, MethodNotImplementedError } from 'common/errors'; +import { getExperimentId } from 'common/experimentStartupInfo'; +import { getLogger, Logger } from 'common/log'; +import { ObservableTimer } from 'common/observableTimer'; +import { + HyperParameters, TrainingService, TrialJobApplicationForm, + TrialJobDetail, TrialJobMetric +} from 'common/trainingService'; +import { + delay, generateParamFileName, getExperimentRootDir, getIPV4Address, getJobCancelStatus, + getVersion, uniqueString +} from 'common/utils'; +import { RemoteConfig, RemoteMachineConfig } from 'common/experimentConfig'; +import { CONTAINER_INSTALL_NNI_SHELL_FORMAT } from '../common/containerJobData'; +import { GPUSummary, ScheduleResultType } from '../common/gpuData'; +import { execMkdir, validateCodeDir } from '../common/util'; +import { GPUScheduler } from './gpuScheduler'; +import { + ExecutorManager, RemoteMachineScheduleInfo, RemoteMachineScheduleResult, RemoteMachineTrialJobDetail +} from './remoteMachineData'; +import { RemoteMachineJobRestServer } from './remoteMachineJobRestServer'; + +/** + * Training Service implementation for Remote Machine (Linux) + */ +@component.Singleton +class RemoteMachineTrainingService implements TrainingService { + private readonly initExecutorId = "initConnection"; + private readonly machineExecutorManagerMap: Map; //machine excutor map + private readonly machineCopyExpCodeDirPromiseMap: Map>; + private readonly trialExecutorManagerMap: Map; //trial excutor map + private readonly trialJobsMap: Map; + private readonly expRootDir: string; + private gpuScheduler?: GPUScheduler; + private readonly jobQueue: string[]; + private readonly timer: ObservableTimer; + private stopping: boolean = false; + private readonly metricsEmitter: EventEmitter; + private readonly log: Logger; + private remoteRestServerPort?: number; + private versionCheck: boolean = true; + private logCollection: string = 'none'; + private sshConnectionPromises: any[]; + private config: RemoteConfig; + + constructor(config: RemoteConfig) { + this.metricsEmitter = new EventEmitter(); + this.trialJobsMap = new Map(); + this.trialExecutorManagerMap = new Map(); + this.machineCopyExpCodeDirPromiseMap = new Map>(); + this.machineExecutorManagerMap = new Map(); + this.jobQueue = []; + this.sshConnectionPromises = []; + this.expRootDir = getExperimentRootDir(); + this.timer = component.get(ObservableTimer); + this.log = getLogger('RemoteMachineTrainingService'); + this.log.info('Construct remote machine training service.'); + this.config = config; + + if (!fs.lstatSync(this.config.trialCodeDirectory).isDirectory()) { + throw new Error(`codeDir ${this.config.trialCodeDirectory} is not a directory`); + } + validateCodeDir(this.config.trialCodeDirectory); + + this.sshConnectionPromises = this.config.machineList.map( + machine => this.initRemoteMachineOnConnected(machine) + ); + } + + /** + * Loop to launch trial jobs and collect trial metrics + */ + public async run(): Promise { + const restServer = new RemoteMachineJobRestServer(this); + await restServer.start(); + restServer.setEnableVersionCheck = this.versionCheck; + this.log.info('Run remote machine training service.'); + if (this.sshConnectionPromises.length > 0) { + await Promise.all(this.sshConnectionPromises); + this.log.info('ssh connection initialized!'); + // set sshConnectionPromises to [] to avoid log information duplicated + this.sshConnectionPromises = []; + // initialize gpuScheduler + this.gpuScheduler = new GPUScheduler(this.machineExecutorManagerMap); + // Copy codeDir to remote machine + for (const [machineConfig, executorManager] of this.machineExecutorManagerMap.entries()) { + const executor: ShellExecutor = await executorManager.getExecutor(this.initExecutorId); + if (executor !== undefined) { + this.machineCopyExpCodeDirPromiseMap.set( + machineConfig, + executor.copyDirectoryToRemote(this.config.trialCodeDirectory, executor.getRemoteCodePath(getExperimentId())) + ); + } + } + } + while (!this.stopping) { + while (this.jobQueue.length > 0) { + this.updateGpuReservation(); + const trialJobId: string = this.jobQueue[0]; + const prepareResult: boolean = await this.prepareTrialJob(trialJobId); + if (prepareResult) { + // Remove trial job with trialJobId from job queue + this.jobQueue.shift(); + } else { + // Break the while loop since no GPU resource is available right now, + // Wait to schedule job in next time iteration + break; + } + } + if (restServer.getErrorMessage !== undefined) { + this.stopping = true; + throw new Error(restServer.getErrorMessage); + } + await delay(3000); + } + this.log.info('RemoteMachineTrainingService run loop exited.'); + } + + /** + * give trial an executor + * @param trial remote machine trial job detail + */ + public allocateExecutorManagerForTrial(trial: RemoteMachineTrialJobDetail): void { + if (trial.rmMeta === undefined) { + throw new Error(`rmMeta not set in trial ${trial.id}`); + } + const executorManager: ExecutorManager | undefined = this.machineExecutorManagerMap.get(trial.rmMeta.config); + if (executorManager === undefined) { + throw new Error(`executorManager not initialized`); + } + this.trialExecutorManagerMap.set(trial.id, executorManager); + } + + /** + * If a trial is finished, release the connection resource + * @param trial remote machine trial job detail + */ + public releaseTrialResource(trial: RemoteMachineTrialJobDetail): void { + if (trial.rmMeta === undefined) { + throw new Error(`rmMeta not set in trial ${trial.id}`); + } + const executorManager = this.trialExecutorManagerMap.get(trial.id); + if (executorManager === undefined) { + throw new Error(`ExecutorManager is not assigned for trial ${trial.id}`); + } + // Note, it still keep reference in trialExecutorManagerMap, as there may be following requests from nni manager. + executorManager.releaseExecutor(trial.id); + } + + /** + * List submitted trial jobs + */ + public async listTrialJobs(): Promise { + const jobs: TrialJobDetail[] = []; + const deferred: Deferred = new Deferred(); + + for (const [key,] of this.trialJobsMap) { + jobs.push(await this.getTrialJob(key)); + } + deferred.resolve(jobs); + + return deferred.promise; + } + + /** + * Get trial job detail information + * @param trialJobId ID of trial job + */ + public async getTrialJob(trialJobId: string): Promise { + const trialJob: RemoteMachineTrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + if (trialJob === undefined) { + throw new NNIError(NNIErrorNames.NOT_FOUND, `trial job id ${trialJobId} not found`); + } + //TO DO: add another job status, and design new job status change logic + if (trialJob.status === 'RUNNING' || trialJob.status === 'UNKNOWN') { + // Get executor where the job is running + if (trialJob.rmMeta === undefined) { + throw new Error(`rmMeta not set for submitted job ${trialJobId}`); + } + const executor = await this.getExecutor(trialJob.id); + + return this.updateTrialJobStatus(trialJob, executor); + } else { + return trialJob; + } + } + + /** + * Get trial job log + * @param _trialJobId ID of trial job + * @param _logType 'TRIAL_LOG' | 'TRIAL_STDERR' + */ + public async getTrialFile(_trialJobId: string, _fileName: string): Promise { + throw new MethodNotImplementedError(); + } + + /** + * Add job metrics listener + * @param listener callback listener + */ + public addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.on('metric', listener); + } + + /** + * Remove job metrics listener + * @param listener callback listener + */ + public removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.off('metric', listener); + } + + /** + * Submit trial job + * @param form trial job description form + */ + public async submitTrialJob(form: TrialJobApplicationForm): Promise { + // Generate trial job id(random) + const trialJobId: string = uniqueString(5); + + const trialJobDetail: RemoteMachineTrialJobDetail = new RemoteMachineTrialJobDetail( + trialJobId, + 'WAITING', + Date.now(), + "unset", + form + ); + this.jobQueue.push(trialJobId); + this.trialJobsMap.set(trialJobId, trialJobDetail); + + return Promise.resolve(trialJobDetail); + } + + /** + * Update trial job for multi-phase + * @param trialJobId trial job id + * @param form job application form + */ + public async updateTrialJob(trialJobId: string, form: TrialJobApplicationForm): Promise { + const trialJobDetail: undefined | TrialJobDetail = this.trialJobsMap.get(trialJobId); + if (trialJobDetail === undefined) { + throw new Error(`updateTrialJob failed: ${trialJobId} not found`); + } + await this.writeParameterFile(trialJobId, form.hyperParameters); + + return trialJobDetail; + } + + /** + * Cancel trial job + * @param trialJobId ID of trial job + */ + public async cancelTrialJob(trialJobId: string, isEarlyStopped: boolean = false): Promise { + const trialJob: RemoteMachineTrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + if (trialJob === undefined) { + throw new Error(`trial job id ${trialJobId} not found`); + } + + // Remove the job with trialJobId from job queue + const index: number = this.jobQueue.indexOf(trialJobId); + if (index >= 0) { + this.jobQueue.splice(index, 1); + } + + // Get executor where the job is running + if (trialJob.rmMeta !== undefined) { + // If the trial job is already scheduled, check its status and kill the trial process in remote machine + const executor = await this.getExecutor(trialJob.id); + + if (trialJob.status === 'UNKNOWN') { + trialJob.status = 'USER_CANCELED'; + this.releaseTrialResource(trialJob); + return + } + + const jobpidPath: string = this.getJobPidPath(executor, trialJob.id); + try { + // Mark the toEarlyStop tag here + trialJob.isEarlyStopped = isEarlyStopped; + await executor.killChildProcesses(jobpidPath); + this.releaseTrialResource(trialJob); + } catch (error) { + // Not handle the error since pkill failed will not impact trial job's current status + this.log.error(`remoteTrainingService.cancelTrialJob: ${error}`); + } + } else { + // Job is not scheduled yet, set status to 'USER_CANCELLED' directly + assert(isEarlyStopped === false, 'isEarlyStopped is not supposed to be true here.'); + trialJob.status = getJobCancelStatus(isEarlyStopped); + } + } + + public async setClusterMetadata(_key: string, _value: string): Promise { return; } + public async getClusterMetadata(_key: string): Promise { return ''; } + + /** + * cleanup() has a time out of 10s to clean remote connections + */ + public async cleanUp(): Promise { + this.log.info('Stopping remote machine training service...'); + this.stopping = true; + await this.cleanupConnections(); + } + + private async getExecutor(trialId: string): Promise { + const executorManager = this.trialExecutorManagerMap.get(trialId); + if (executorManager === undefined) { + throw new Error(`ExecutorManager is not assigned for trial ${trialId}`); + } + return await executorManager.getExecutor(trialId); + } + + /** + * remove gpu reversion when job is not running + */ + private updateGpuReservation(): void { + if (this.gpuScheduler) { + for (const [key, value] of this.trialJobsMap) { + if (!['WAITING', 'RUNNING'].includes(value.status)) { + this.gpuScheduler.removeGpuReservation(key, this.trialJobsMap); + } + } + } + } + + /** + * stop gpu_metric_collector process in remote machine and remove unused scripts + */ + private async cleanupConnections(): Promise { + try { + for (const executorManager of this.machineExecutorManagerMap.values()) { + const executor = await executorManager.getExecutor(this.initExecutorId); + if (executor !== undefined) { + this.log.info(`killing gpu metric collector on ${executor.name}`); + const gpuJobPidPath: string = executor.joinPath(executor.getRemoteScriptsPath(getExperimentId()), 'pid'); + await executor.killChildProcesses(gpuJobPidPath, true); + } + executorManager.releaseAllExecutor(); + } + } catch (error) { + //ignore error, this function is called to cleanup remote connections when experiment is stopping + this.log.error(`Cleanup connection exception, error is ${error}`); + } + } + + private async initRemoteMachineOnConnected(machineConfig: RemoteMachineConfig): Promise { + const executorManager: ExecutorManager = new ExecutorManager(machineConfig); + this.log.info(`connecting to ${machineConfig.user}@${machineConfig.host}:${machineConfig.port}`); + const executor: ShellExecutor = await executorManager.getExecutor(this.initExecutorId); + this.log.debug(`reached ${executor.name}`); + this.machineExecutorManagerMap.set(machineConfig, executorManager); + this.log.debug(`initializing ${executor.name}`); + + // Create root working directory after executor is ready + const nniRootDir: string = executor.joinPath(executor.getTempPath(), 'nni'); + await executor.createFolder(executor.getRemoteExperimentRootDir(getExperimentId())); + + // the directory to store temp scripts in remote machine + const remoteGpuScriptCollectorDir: string = executor.getRemoteScriptsPath(getExperimentId()); + + // clean up previous result. + await executor.createFolder(remoteGpuScriptCollectorDir, true); + await executor.allowPermission(true, nniRootDir); + + //Begin to execute gpu_metrics_collection scripts + const script = executor.generateGpuStatsScript(getExperimentId()); + executor.executeScript(script, false, true); + // the timer is trigger in 1 second, it causes multiple runs on server. + // So reduce it's freqeunce, only allow one of it run. + const collectingCount: boolean[] = []; + + const disposable: Rx.IDisposable = this.timer.subscribe( + async () => { + if (collectingCount.length == 0) { + collectingCount.push(true); + const cmdresult = await executor.readLastLines(executor.joinPath(remoteGpuScriptCollectorDir, 'gpu_metrics')); + if (cmdresult !== "") { + executorManager.rmMeta.gpuSummary = JSON.parse(cmdresult); + if (executorManager.rmMeta.gpuSummary.gpuCount === 0) { + this.log.warning(`No GPU found on remote machine ${machineConfig.host}`); + this.timer.unsubscribe(disposable); + } + } + if (this.stopping) { + this.timer.unsubscribe(disposable); + this.log.debug(`Stopped GPU collector on ${machineConfig.host}, since experiment is exiting.`); + } + collectingCount.pop(); + } + } + ); + } + + private async prepareTrialJob(trialJobId: string): Promise { + const deferred: Deferred = new Deferred(); + + if (this.gpuScheduler === undefined) { + throw new Error('gpuScheduler is not initialized'); + } + const trialJobDetail: RemoteMachineTrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + if (trialJobDetail === undefined) { + throw new NNIError(NNIErrorNames.INVALID_JOB_DETAIL, `Invalid job detail information for trial job ${trialJobId}`); + } + // If job is not WATIING, Don't prepare and resolve true immediately + if (trialJobDetail.status !== 'WAITING') { + deferred.resolve(true); + + return deferred.promise; + } + // get an executor from scheduler + const rmScheduleResult: RemoteMachineScheduleResult = this.gpuScheduler.scheduleMachine(this.config.trialGpuNumber, trialJobDetail); + if (rmScheduleResult.resultType === ScheduleResultType.REQUIRE_EXCEED_TOTAL) { + const errorMessage: string = `Required GPU number ${this.config.trialGpuNumber} is too large, no machine can meet`; + this.log.error(errorMessage); + deferred.reject(); + throw new NNIError(NNIErrorNames.RESOURCE_NOT_AVAILABLE, errorMessage); + } else if (rmScheduleResult.resultType === ScheduleResultType.SUCCEED + && rmScheduleResult.scheduleInfo !== undefined) { + const rmScheduleInfo: RemoteMachineScheduleInfo = rmScheduleResult.scheduleInfo; + + trialJobDetail.rmMeta = rmScheduleInfo.rmMeta; + const copyExpCodeDirPromise = this.machineCopyExpCodeDirPromiseMap.get(rmScheduleInfo.rmMeta.config); + if (copyExpCodeDirPromise !== undefined) { + await copyExpCodeDirPromise; + } + + this.allocateExecutorManagerForTrial(trialJobDetail); + const executor = await this.getExecutor(trialJobDetail.id); + + trialJobDetail.workingDirectory = executor.joinPath(executor.getRemoteExperimentRootDir(getExperimentId()), 'trials', trialJobDetail.id); + + await this.launchTrialOnScheduledMachine( + trialJobId, trialJobDetail.form, rmScheduleInfo); + + trialJobDetail.status = 'RUNNING'; + trialJobDetail.url = `file://${rmScheduleInfo.rmMeta.config.host}:${trialJobDetail.workingDirectory}`; + trialJobDetail.startTime = Date.now(); + + this.trialJobsMap.set(trialJobId, trialJobDetail); + deferred.resolve(true); + } else if (rmScheduleResult.resultType === ScheduleResultType.TMP_NO_AVAILABLE_GPU) { + this.log.info(`Right now no available GPU can be allocated for trial ${trialJobId}, will try to schedule later`); + deferred.resolve(false); + } else { + deferred.reject(`Invalid schedule resutl type: ${rmScheduleResult.resultType}`); + } + + return deferred.promise; + } + + private async launchTrialOnScheduledMachine(trialJobId: string, form: TrialJobApplicationForm, + rmScheduleInfo: RemoteMachineScheduleInfo): Promise { + const cudaVisibleDevice: string = rmScheduleInfo.cudaVisibleDevice; + const executor = await this.getExecutor(trialJobId); + const trialJobDetail: RemoteMachineTrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); + if (trialJobDetail === undefined) { + throw new Error(`Can not get trial job detail for job: ${trialJobId}`); + } + + const trialLocalTempFolder: string = path.join(this.expRootDir, 'trials-local', trialJobId); + + await executor.createFolder(executor.joinPath(trialJobDetail.workingDirectory, '.nni')); + + // RemoteMachineRunShellFormat is the run shell format string, + // See definition in remoteMachineData.ts + + let cudaVisible: string; + // Set HIP_VISIBLE_DEVICES environment variable based on cudaVisibleDevice + // If no valid cudaVisibleDevice is defined, set HIP_VISIBLE_DEVICES to empty string to hide GPU device + // If gpuNum is undefined, will not set hip_VISIBLE_DEVICES in script + if (this.config.trialGpuNumber === undefined) { + cudaVisible = "" + } else { + if (typeof cudaVisibleDevice === 'string' && cudaVisibleDevice.length > 0) { + cudaVisible = `HIP_VISIBLE_DEVICES=${cudaVisibleDevice}`; + } else { + cudaVisible = `HIP_VISIBLE_DEVICES=" "`; + } + } + const nniManagerIp: string = this.config.nniManagerIp ? this.config.nniManagerIp : await getIPV4Address(); + if (this.remoteRestServerPort === undefined) { + const restServer: RemoteMachineJobRestServer = component.get(RemoteMachineJobRestServer); + this.remoteRestServerPort = restServer.clusterRestServerPort; + } + const version: string = this.versionCheck ? await getVersion() : ''; + const runScriptTrialContent: string = executor.generateStartScript( + trialJobDetail.workingDirectory, + trialJobId, + getExperimentId(), + trialJobDetail.form.sequenceId.toString(), + false, // multi-phase + this.config.trialCommand, + nniManagerIp, + this.remoteRestServerPort, + version, + this.logCollection, + cudaVisible); + + //create tmp trial working folder locally. + await execMkdir(path.join(trialLocalTempFolder, '.nni')); + + // Write install_nni.sh, it's not used in Windows platform. + await fs.promises.writeFile(path.join(trialLocalTempFolder, executor.getScriptName("install_nni")), CONTAINER_INSTALL_NNI_SHELL_FORMAT, { encoding: 'utf8' }); + // Write file content ( run.sh and parameter.cfg ) to local tmp files + await fs.promises.writeFile(path.join(trialLocalTempFolder, executor.getScriptName("run")), runScriptTrialContent, { encoding: 'utf8' }); + await this.writeParameterFile(trialJobId, form.hyperParameters); + // Copy files in codeDir to remote working directory + await executor.copyDirectoryToRemote(trialLocalTempFolder, trialJobDetail.workingDirectory); + // Execute command in remote machine + executor.executeScript(executor.joinPath(trialJobDetail.workingDirectory, executor.getScriptName("run")), true, true); + } + + private async updateTrialJobStatus(trialJob: RemoteMachineTrialJobDetail, executor: ShellExecutor): Promise { + const deferred: Deferred = new Deferred(); + const jobpidPath: string = this.getJobPidPath(executor, trialJob.id); + const trialReturnCodeFilePath: string = executor.joinPath(executor.getRemoteExperimentRootDir(getExperimentId()), 'trials', trialJob.id, '.nni', 'code'); + /* eslint-disable require-atomic-updates */ + try { + const isAlive = await executor.isProcessAlive(jobpidPath); + // if the process of jobpid is not alive any more + if (!isAlive) { + const trialReturnCode: string = await executor.getRemoteFileContent(trialReturnCodeFilePath); + this.log.debug(`trailjob ${trialJob.id} return code: ${trialReturnCode}`); + const match: RegExpMatchArray | null = trialReturnCode.trim() + .match(/^-?(\d+)\s+(\d+)$/); + if (match !== null) { + const { 1: code, 2: timestamp } = match; + // Update trial job's status based on result code + if (parseInt(code, 10) === 0) { + trialJob.status = 'SUCCEEDED'; + } else { + // isEarlyStopped is never set, mean it's not cancelled by NNI, so if the process's exit code >0, mark it as FAILED + if (trialJob.isEarlyStopped === undefined) { + trialJob.status = 'FAILED'; + } else { + trialJob.status = getJobCancelStatus(trialJob.isEarlyStopped); + } + } + trialJob.endTime = parseInt(timestamp, 10); + this.releaseTrialResource(trialJob); + } + this.log.debug(`trailJob status update: ${trialJob.id}, ${trialJob.status}`); + } + deferred.resolve(trialJob); + } catch (error) { + this.log.debug(`(Ignorable mostly)Update job status exception, error is ${error.message}`); + if (error instanceof NNIError && error.name === NNIErrorNames.NOT_FOUND) { + deferred.resolve(trialJob); + } else { + trialJob.status = 'UNKNOWN'; + deferred.resolve(trialJob); + } + } + /* eslint-enable require-atomic-updates */ + return deferred.promise; + } + + public get MetricsEmitter(): EventEmitter { + return this.metricsEmitter; + } + + private getJobPidPath(executor: ShellExecutor, jobId: string): string { + const trialJobDetail: RemoteMachineTrialJobDetail | undefined = this.trialJobsMap.get(jobId); + if (trialJobDetail === undefined) { + throw new NNIError(NNIErrorNames.INVALID_JOB_DETAIL, `Invalid job detail information for trial job ${jobId}`); + } + + return executor.joinPath(trialJobDetail.workingDirectory, '.nni', 'jobpid'); + } + + private async writeParameterFile(trialJobId: string, hyperParameters: HyperParameters): Promise { + const executor = await this.getExecutor(trialJobId); + + const trialWorkingFolder: string = executor.joinPath(executor.getRemoteExperimentRootDir(getExperimentId()), 'trials', trialJobId); + const trialLocalTempFolder: string = path.join(this.expRootDir, 'trials-local', trialJobId); + + const fileName: string = generateParamFileName(hyperParameters); + const localFilepath: string = path.join(trialLocalTempFolder, fileName); + await fs.promises.writeFile(localFilepath, hyperParameters.value, { encoding: 'utf8' }); + + await executor.copyFileToRemote(localFilepath, executor.joinPath(trialWorkingFolder, fileName)); + } + + public getTrialOutputLocalPath(_trialJobId: string): Promise { + throw new MethodNotImplementedError(); + } + + public fetchTrialOutput(_trialJobId: string, _subpath: string): Promise { + throw new MethodNotImplementedError(); + } +} + +export { RemoteMachineTrainingService }; diff --git a/ts/nni_manager/training_service/remote_machine/shellExecutor.ts b/ts/nni_manager/training_service/remote_machine/shellExecutor.ts new file mode 100644 index 0000000000000000000000000000000000000000..c815653a102bea033ec3f648b641d3813df5fff1 --- /dev/null +++ b/ts/nni_manager/training_service/remote_machine/shellExecutor.ts @@ -0,0 +1,439 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import { Client, ClientChannel, ConnectConfig, SFTPWrapper } from 'ssh2'; +import stream from 'stream'; +import { Deferred } from "ts-deferred"; +import { getLogger, Logger } from 'common/log'; +import { uniqueString, randomInt } from 'common/utils'; +import { execRemove, tarAdd } from '../common/util'; +import { LinuxCommands } from "./extends/linuxCommands"; +import { WindowsCommands } from './extends/windowsCommands'; +import { OsCommands } from "./osCommands"; +import { RemoteCommandResult, RemoteMachineMeta } from "./remoteMachineData"; +import { NNIError, NNIErrorNames } from 'common/errors'; + +class ShellExecutor { + public name: string = ""; + + private readonly lineBreaker = new RegExp(`[\r\n]+`); + private readonly maxUsageCount = 5; + + private osCommands: OsCommands | undefined; + private usedCount: number = 0; //count the connection number of every client + private readonly sshClient: Client; + private readonly log: Logger; + private tempPath: string = ""; + private channelDefaultOutputs: string[] = []; + private pythonPath: string | undefined; + + public isWindows: boolean = false; + + constructor() { + this.log = getLogger('ShellExecutor'); + this.sshClient = new Client(); + } + + public async initialize(rmMeta: RemoteMachineMeta): Promise { + const deferred: Deferred = new Deferred(); + + const connectConfig: ConnectConfig = { + host: rmMeta.config.host, + port: rmMeta.config.port, + username: rmMeta.config.user, + tryKeyboard: true, + }; + this.pythonPath = rmMeta.config.pythonPath; + this.name = `${rmMeta.config.user}@${rmMeta.config.host}:${rmMeta.config.port}`; + if (rmMeta.config.password !== undefined) { + connectConfig.password = rmMeta.config.password; + } else if (rmMeta.config.sshKeyFile !== undefined) { + if (!fs.existsSync(rmMeta.config.sshKeyFile)) { + //SSh key path is not a valid file, reject + deferred.reject(new Error(`${rmMeta.config.sshKeyFile} does not exist.`)); + } + const privateKey: string = fs.readFileSync(rmMeta.config.sshKeyFile, 'utf8'); + + connectConfig.privateKey = privateKey; + connectConfig.passphrase = rmMeta.config.sshPassphrase; + } else { + deferred.reject(new Error(`No valid passwd or sshKeyPath is configed.`)); + } + + this.sshClient.on('ready', async () => { + // check OS type: windows or else + const result = await this.execute("ver"); + if (result.exitCode == 0 && result.stdout.search("Windows") > -1) { + this.osCommands = new WindowsCommands(); + this.isWindows = true; + + // detect default output and trying to remove it under windows. + // Anaconda has this kind of output. + let defaultResult = await this.execute(""); + if (defaultResult.stdout !== "") { + deferred.reject(new Error(`The windows remote node shouldn't output welcome message, below content should be removed from the command window! \n` + + `${defaultResult.stdout}`)); + } + defaultResult = await this.execute("powershell -command \"\""); + if (defaultResult.stdout !== "") { + this.channelDefaultOutputs.push(defaultResult.stdout); + } + this.log.debug(`set channelDefaultOutput to "${this.channelDefaultOutputs}"`); + + // parse temp folder to expand possible environment variables. + const commandResult = await this.execute("echo %TEMP%"); + this.tempPath = commandResult.stdout.replace(this.lineBreaker, ""); + } else { + this.osCommands = new LinuxCommands(); + // it's not stable to get tmp path by Linux command, like "echo /tmp" or "ld -d /tmp". + // Sometime it returns empty back, so hard code tmp path here. + this.tempPath = "/tmp"; + } + + deferred.resolve(); + }).on('error', (err: Error) => { + // SSH connection error, reject with error message + deferred.reject(new Error(err.message)); + }).on("keyboard-interactive", (_name, _instructions, _lang, _prompts, finish) => { + finish([rmMeta.config.password || '']); + }).connect(connectConfig); + + return deferred.promise; + } + + public close(): void { + this.sshClient.end(); + } + + public addUsage(): boolean { + let isAddedSuccess = false; + if (this.usedCount < this.maxUsageCount) { + this.usedCount++; + isAddedSuccess = true; + } + return isAddedSuccess; + } + + public releaseUsage(): boolean { + let canBeReleased = false; + if (this.usedCount > 0) { + this.usedCount--; + } + if (this.usedCount == 0) { + canBeReleased = true; + } + return canBeReleased; + } + + public getScriptName(mainName: string): string { + if (this.osCommands === undefined) { + throw new Error("osCommands must be initialized!"); + } + return `${mainName}.${this.osCommands.getScriptExt()}`; + } + + public generateStartScript(workingDirectory: string, trialJobId: string, experimentId: string, + trialSequenceId: string, isMultiPhase: boolean, + command: string, nniManagerAddress: string, nniManagerPort: number, + nniManagerVersion: string, logCollection: string, cudaVisibleSetting: string): string { + if (this.osCommands === undefined) { + throw new Error("osCommands must be initialized!"); + } + const jobIdFileName = this.joinPath(workingDirectory, '.nni', 'jobpid'); + const exitCodeFile = this.joinPath(workingDirectory, '.nni', 'code'); + const codeDir = this.getRemoteCodePath(experimentId); + + return this.osCommands.generateStartScript(workingDirectory, trialJobId, experimentId, + trialSequenceId, isMultiPhase, jobIdFileName, command, + nniManagerAddress, nniManagerPort, nniManagerVersion, + logCollection, exitCodeFile, codeDir, cudaVisibleSetting); + } + + public generateGpuStatsScript(experimentId: string): string { + if (this.osCommands === undefined) { + throw new Error("osCommands must be initialized!"); + } + return this.osCommands.generateGpuStatsScript(this.getRemoteScriptsPath(experimentId)); + } + + public getTempPath(): string { + if (this.tempPath === "") { + throw new Error("tempPath must be initialized!"); + } + return this.tempPath; + } + + public async getCurrentPath(): Promise { + const commandText = this.osCommands && this.osCommands.getCurrentPath(); + const commandResult = await this.execute(commandText); + if (commandResult.exitCode == 0) { + return commandResult.stdout; + } else { + throw Error(commandResult.stderr); + } + } + + public getRemoteScriptsPath(experimentId: string): string { + return this.joinPath(this.getRemoteExperimentRootDir(experimentId), 'scripts'); + } + + public getRemoteCodePath(experimentId: string): string { + return this.joinPath(this.getRemoteExperimentRootDir(experimentId), 'nni-code'); + } + + public getRemoteExperimentRootDir(experimentId: string): string { + return this.joinPath(this.tempPath, 'nni-experiments', experimentId); + } + + public joinPath(...paths: string[]): string { + if (!this.osCommands) { + throw new Error("osCommands must be initialized!"); + } + return this.osCommands.joinPath(...paths); + } + + public async createFolder(folderName: string, sharedFolder: boolean = false): Promise { + const commandText = this.osCommands && this.osCommands.createFolder(folderName, sharedFolder); + const commandResult = await this.execute(commandText); + const result = commandResult.exitCode == 0; + return result; + } + + public async allowPermission(isRecursive: boolean = false, ...folders: string[]): Promise { + const commandText = this.osCommands && this.osCommands.allowPermission(isRecursive, ...folders); + const commandResult = await this.execute(commandText); + const result = commandResult.exitCode == 0; + return result; + } + + public async removeFolder(folderName: string, isRecursive: boolean = false, isForce: boolean = true): Promise { + const commandText = this.osCommands && this.osCommands.removeFolder(folderName, isRecursive, isForce); + const commandResult = await this.execute(commandText); + const result = commandResult.exitCode == 0; + return result; + } + + public async removeFiles(folderOrFileName: string, filePattern: string = ""): Promise { + const commandText = this.osCommands && this.osCommands.removeFiles(folderOrFileName, filePattern); + const commandResult = await this.execute(commandText); + const result = commandResult.exitCode == 0; + return result; + } + + public async readLastLines(fileName: string, lineCount: number = 1): Promise { + const commandText = this.osCommands && this.osCommands.readLastLines(fileName, lineCount); + const commandResult = await this.execute(commandText); + let result: string = ""; + if (commandResult !== undefined && commandResult.stdout !== undefined && commandResult.stdout.length > 0) { + result = commandResult.stdout; + } + return result; + } + + public async isProcessAlive(pidFileName: string): Promise { + const commandText = this.osCommands && this.osCommands.isProcessAliveCommand(pidFileName); + const commandResult = await this.execute(commandText); + const result = this.osCommands && this.osCommands.isProcessAliveProcessOutput(commandResult); + return result !== undefined ? result : false; + } + + public async killChildProcesses(pidFileName: string, killSelf: boolean = false): Promise { + const commandText = this.osCommands && this.osCommands.killChildProcesses(pidFileName, killSelf); + const commandResult = await this.execute(commandText); + return commandResult.exitCode == 0; + } + + public async fileExist(filePath: string): Promise { + const commandText = this.osCommands && this.osCommands.fileExistCommand(filePath); + const commandResult = await this.execute(commandText); + return commandResult.stdout !== undefined && commandResult.stdout.trim() === 'True'; + } + + public async extractFile(tarFileName: string, targetFolder: string): Promise { + const commandText = this.osCommands && this.osCommands.extractFile(tarFileName, targetFolder); + const commandResult = await this.execute(commandText); + return commandResult.exitCode == 0; + } + + public async executeScript(script: string, isFile: boolean = false, isInteractive: boolean = false): Promise { + const commandText = this.osCommands && this.osCommands.executeScript(script, isFile); + const commandResult = await this.execute(commandText, undefined, isInteractive); + return commandResult; + } + + /** + * Copy local file to remote path + * @param localFilePath the path of local file + * @param remoteFilePath the target path in remote machine + */ + public async copyFileToRemote(localFilePath: string, remoteFilePath: string): Promise { + const commandIndex = randomInt(10000); + this.log.debug(`copyFileToRemote(${commandIndex}): localFilePath: ${localFilePath}, remoteFilePath: ${remoteFilePath}`); + + const deferred: Deferred = new Deferred(); + this.sshClient.sftp((err: Error | undefined, sftp: SFTPWrapper) => { + if (err !== undefined && err !== null) { + this.log.error(`copyFileToRemote(${commandIndex}): ${err}`); + deferred.reject(err); + + return; + } + assert(sftp !== undefined); + sftp.fastPut(localFilePath, remoteFilePath, (fastPutErr: Error) => { + sftp.end(); + if (fastPutErr !== undefined && fastPutErr !== null) { + this.log.error(`copyFileToRemote(${commandIndex}) fastPutErr: ${fastPutErr}, ${localFilePath}, ${remoteFilePath}`); + deferred.reject(fastPutErr); + } else { + deferred.resolve(true); + } + }); + }); + + return deferred.promise; + } + + /** + * Copy files and directories in local directory recursively to remote directory + * @param localDirectory local diretory + * @param remoteDirectory remote directory + */ + public async copyDirectoryToRemote(localDirectory: string, remoteDirectory: string): Promise { + const tmpSuffix: string = uniqueString(5); + const localTarPath: string = path.join(os.tmpdir(), `nni_tmp_local_${tmpSuffix}.tar.gz`); + if (!this.osCommands) { + throw new Error("osCommands must be initialized!"); + } + const remoteTarPath: string = this.osCommands.joinPath(this.tempPath, `nni_tmp_remote_${tmpSuffix}.tar.gz`); + + // Create remote directory + await this.createFolder(remoteDirectory); + // Compress files in local directory to experiment root directory + await tarAdd(localTarPath, localDirectory); + // Copy the compressed file to remoteDirectory and delete it + await this.copyFileToRemote(localTarPath, remoteTarPath); + await execRemove(localTarPath); + // Decompress the remote compressed file in and delete it + await this.extractFile(remoteTarPath, remoteDirectory); + await this.removeFiles(remoteTarPath); + } + + public async getRemoteFileContent(filePath: string): Promise { + const commandIndex = randomInt(10000); + this.log.debug(`getRemoteFileContent(${commandIndex}): filePath: ${filePath}`); + const deferred: Deferred = new Deferred(); + this.sshClient.sftp((err: Error | undefined, sftp: SFTPWrapper) => { + if (err !== undefined && err !== null) { + this.log.error(`getRemoteFileContent(${commandIndex}) sftp: ${err}`); + deferred.reject(new Error(`SFTP error: ${err}`)); + + return; + } + try { + const sftpStream: stream.Readable = sftp.createReadStream(filePath); + + let dataBuffer: string = ''; + sftpStream.on('data', (data: Buffer | string) => { + dataBuffer += data; + }) + .on('error', (streamErr: Error) => { + sftp.end(); + deferred.reject(new NNIError(NNIErrorNames.NOT_FOUND, streamErr.message)); + }) + .on('end', () => { + // sftp connection need to be released manually once operation is done + sftp.end(); + deferred.resolve(dataBuffer); + }); + } catch (error) { + this.log.error(`getRemoteFileContent(${commandIndex}): ${error.message}`); + sftp.end(); + deferred.reject(new Error(`SFTP error: ${error.message}`)); + } + }); + + return deferred.promise; + } + + private async execute(command: string | undefined, processOutput: ((input: RemoteCommandResult) => RemoteCommandResult) | undefined = undefined, useShell: boolean = false): Promise { + const deferred: Deferred = new Deferred(); + let stdout: string = ''; + let stderr: string = ''; + let exitCode: number; + + const commandIndex = randomInt(10000); + if(this.osCommands !== undefined){ + command = this.osCommands.setPythonPath(this.pythonPath, command); + } + this.log.debug(`remoteExeCommand(${commandIndex}): [${command}]`); + + // Windows always uses shell, and it needs to disable to get it works. + useShell = useShell && !this.isWindows; + + const callback = (err: Error | undefined, channel: ClientChannel): void => { + if (err !== undefined && err !== null) { + this.log.error(`remoteExeCommand(${commandIndex}): ${err.message}`); + deferred.reject(err); + return; + } + + channel.on('data', (data: any) => { + stdout += data; + }); + channel.on('exit', (code: any) => { + exitCode = code; + + // remove default output to get stdout correct. + if (this.channelDefaultOutputs.length > 0) { + let modifiedStdout = stdout; + this.channelDefaultOutputs.forEach(defaultOutput => { + if (modifiedStdout.startsWith(defaultOutput)) { + if (modifiedStdout.length > defaultOutput.length) { + modifiedStdout = modifiedStdout.substr(defaultOutput.length); + } else if (modifiedStdout.length === defaultOutput.length) { + modifiedStdout = ""; + } + } + }); + stdout = modifiedStdout; + } + + this.log.debug(`remoteExeCommand(${commandIndex}) exit(${exitCode})\nstdout: ${stdout}\nstderr: ${stderr}`); + let result = { + stdout: stdout, + stderr: stderr, + exitCode: exitCode + }; + + if (processOutput != undefined) { + result = processOutput(result); + } + deferred.resolve(result); + }); + channel.stderr.on('data', function (data: any) { + stderr += data; + }); + + if (useShell) { + channel.stdin.write(`${command}\n`); + channel.end("exit\n"); + } + + return; + }; + + if (useShell) { + this.sshClient.shell(callback); + } else { + this.sshClient.exec(command !== undefined ? command : "", callback); + } + + return deferred.promise; + } +} + +export { ShellExecutor }; diff --git a/ts/nni_manager/training_service/reusable/aml/amlClient.ts b/ts/nni_manager/training_service/reusable/aml/amlClient.ts new file mode 100644 index 0000000000000000000000000000000000000000..a93eb767ad92aa5aa036bf0bd24ae99cb38756ad --- /dev/null +++ b/ts/nni_manager/training_service/reusable/aml/amlClient.ts @@ -0,0 +1,154 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Deferred } from 'ts-deferred'; +import { PythonShell } from 'python-shell'; + +export class AMLClient { + public subscriptionId: string; + public resourceGroup: string; + public workspaceName: string; + public experimentId: string; + public image: string; + public scriptName: string; + public pythonShellClient: undefined | PythonShell; + public codeDir: string; + public computeTarget: string; + + constructor( + subscriptionId: string, + resourceGroup: string, + workspaceName: string, + experimentId: string, + computeTarget: string, + image: string, + scriptName: string, + codeDir: string, + ) { + this.subscriptionId = subscriptionId; + this.resourceGroup = resourceGroup; + this.workspaceName = workspaceName; + this.experimentId = experimentId; + this.image = image; + this.scriptName = scriptName; + this.codeDir = codeDir; + this.computeTarget = computeTarget; + } + + public submit(): Promise { + const deferred: Deferred = new Deferred(); + this.pythonShellClient = new PythonShell('amlUtil.py', { + scriptPath: './config/aml', + pythonPath: process.platform === 'win32' ? 'python' : 'python3', + pythonOptions: ['-u'], // get print results in real-time + args: [ + '--subscription_id', this.subscriptionId, + '--resource_group', this.resourceGroup, + '--workspace_name', this.workspaceName, + '--compute_target', this.computeTarget, + '--docker_image', this.image, + '--experiment_name', `nni_exp_${this.experimentId}`, + '--script_dir', this.codeDir, + '--script_name', this.scriptName + ] + }); + this.pythonShellClient.on('message', function (envId: any) { + // received a message sent from the Python script (a simple "print" statement) + deferred.resolve(envId); + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + public stop(): Promise { + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + const deferred: Deferred = new Deferred(); + this.pythonShellClient.send('stop'); + this.pythonShellClient.on('message', (result: any) => { + const stopResult = this.parseContent('stop_result', result); + if (stopResult === 'success') { + deferred.resolve(true); + } else if (stopResult === 'failed') { + deferred.resolve(false); + } + }); + return deferred.promise; + } + + public getTrackingUrl(): Promise { + const deferred: Deferred = new Deferred(); + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send('tracking_url'); + this.pythonShellClient.on('message', (status: any) => { + const trackingUrl = this.parseContent('tracking_url', status); + if (trackingUrl !== '') { + deferred.resolve(trackingUrl); + } + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + public updateStatus(oldStatus: string): Promise { + const deferred: Deferred = new Deferred(); + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send('update_status'); + this.pythonShellClient.on('message', (status: any) => { + let newStatus = this.parseContent('status', status); + if (newStatus === '') { + newStatus = oldStatus; + } + deferred.resolve(newStatus); + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + public sendCommand(message: string): void { + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send(`command:${message}`); + } + + public receiveCommand(): Promise { + const deferred: Deferred = new Deferred(); + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send('receive'); + this.pythonShellClient.on('message', (command: any) => { + const message = this.parseContent('receive', command); + if (message !== '') { + deferred.resolve(JSON.parse(message)) + } + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + // Monitor error information in aml python shell client + private monitorError(pythonShellClient: PythonShell, deferred: Deferred): void { + pythonShellClient.on('error', function (error: any) { + deferred.reject(error); + }); + pythonShellClient.on('close', function (error: any) { + deferred.reject(error); + }); + } + + // Parse command content, command format is {head}:{content} + public parseContent(head: string, command: string): string { + const items = command.split(':'); + if (items[0] === head) { + return command.slice(head.length + 1); + } + return ''; + } +} diff --git a/ts/nni_manager/training_service/reusable/aml/amlConfig.ts b/ts/nni_manager/training_service/reusable/aml/amlConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..17e0bd8deb0ee618f2ef84ef4673635adc21af4a --- /dev/null +++ b/ts/nni_manager/training_service/reusable/aml/amlConfig.ts @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TrialConfig } from 'training_service/common/trialConfig'; +import { EnvironmentInformation } from '../environment'; +import { AMLClient } from '../aml/amlClient'; + +export class AMLClusterConfig { + public readonly subscriptionId: string; + public readonly resourceGroup: string; + public readonly workspaceName: string; + public readonly computeTarget: string; + public maxTrialNumPerGpu?: number; + + constructor(subscriptionId: string, resourceGroup: string, workspaceName: string, computeTarget: string, + maxTrialNumPerGpu?: number) { + this.subscriptionId = subscriptionId; + this.resourceGroup = resourceGroup; + this.workspaceName = workspaceName; + this.computeTarget = computeTarget; + this.maxTrialNumPerGpu = maxTrialNumPerGpu; + } +} + +export class AMLTrialConfig extends TrialConfig { + public readonly image: string; + public readonly command: string; + public readonly codeDir: string; + + constructor(codeDir: string, command: string, image: string) { + super("", codeDir, 0); + this.codeDir = codeDir; + this.command = command; + this.image = image; + } +} + +export class AMLEnvironmentInformation extends EnvironmentInformation { + public amlClient?: AMLClient; + public currentMessageIndex: number = -1; +} diff --git a/ts/nni_manager/training_service/reusable/channels/amlCommandChannel.ts b/ts/nni_manager/training_service/reusable/channels/amlCommandChannel.ts new file mode 100644 index 0000000000000000000000000000000000000000..e8745d4b124314a989b0430f4fc875eb5aaaf724 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/channels/amlCommandChannel.ts @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { delay } from "common/utils"; +import { AMLEnvironmentInformation } from '../aml/amlConfig'; +import { CommandChannel, RunnerConnection } from "../commandChannel"; +import { Channel, EnvironmentInformation } from "../environment"; + +class AMLRunnerConnection extends RunnerConnection { +} + +export class AMLCommandChannel extends CommandChannel { + private stopping: boolean = false; + private sendQueues: [EnvironmentInformation, string][] = []; + + public get channelName(): Channel { + return "aml"; + } + + public async config(_key: string, _value: any): Promise { + // do nothing + } + + public async start(): Promise { + // do nothing + } + + public async stop(): Promise { + this.stopping = true; + } + + public async run(): Promise { + // start command loops + await Promise.all([ + this.receiveLoop(), + this.sendLoop() + ]); + } + + protected async sendCommandInternal(environment: EnvironmentInformation, message: string): Promise { + this.sendQueues.push([environment, message]); + } + + protected createRunnerConnection(environment: EnvironmentInformation): RunnerConnection { + return new AMLRunnerConnection(environment); + } + + private async sendLoop(): Promise { + const intervalSeconds = 0.5; + while (!this.stopping) { + const start = new Date(); + if (this.sendQueues.length > 0) { + while (this.sendQueues.length > 0) { + const item = this.sendQueues.shift(); + if (item === undefined) { + break; + } + const environment = item[0]; + const message = item[1]; + const amlClient = (environment as AMLEnvironmentInformation).amlClient; + if (!amlClient) { + throw new Error('aml client not initialized!'); + } + amlClient.sendCommand(message); + } + } + + const end = new Date(); + const delayMs = intervalSeconds * 1000 - (end.valueOf() - start.valueOf()); + if (delayMs > 0) { + await delay(delayMs); + } + } + } + + private async receiveLoop(): Promise { + const intervalSeconds = 2; + + while (!this.stopping) { + const start = new Date(); + const runnerConnections = [...this.runnerConnections.values()] as AMLRunnerConnection[]; + for (const runnerConnection of runnerConnections) { + // to loop all commands + const amlEnvironmentInformation: AMLEnvironmentInformation = runnerConnection.environment as AMLEnvironmentInformation; + const amlClient = amlEnvironmentInformation.amlClient; + let currentMessageIndex = amlEnvironmentInformation.currentMessageIndex; + if (!amlClient) { + throw new Error('AML client not initialized!'); + } + const command = await amlClient.receiveCommand(); + if (command && Object.prototype.hasOwnProperty.call(command, "trial_runner")) { + const messages = command['trial_runner']; + if (messages) { + if (messages instanceof Object && currentMessageIndex < messages.length - 1) { + for (let index = currentMessageIndex + 1; index < messages.length; index++) { + this.handleCommand(runnerConnection.environment, messages[index]); + } + currentMessageIndex = messages.length - 1; + } else if (currentMessageIndex === -1) { + this.handleCommand(runnerConnection.environment, messages); + currentMessageIndex += 1; + } + amlEnvironmentInformation.currentMessageIndex = currentMessageIndex; + } + } + } + + const end = new Date(); + const delayMs = intervalSeconds * 1000 - (end.valueOf() - start.valueOf()); + if (delayMs > 0) { + await delay(delayMs); + } + } + } +} diff --git a/ts/nni_manager/training_service/reusable/channels/fileCommandChannel.ts b/ts/nni_manager/training_service/reusable/channels/fileCommandChannel.ts new file mode 100644 index 0000000000000000000000000000000000000000..f42655f5f8fc36dba6a7d57828ab1397026a9e3c --- /dev/null +++ b/ts/nni_manager/training_service/reusable/channels/fileCommandChannel.ts @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as component from "common/component"; +import { delay } from "common/utils"; +import { CommandChannel, RunnerConnection } from "../commandChannel"; +import { Channel, EnvironmentInformation } from "../environment"; +import { StorageService } from "../storageService"; + +class FileHandler { + public fileName: string; + public offset: number = 0; + + constructor(fileName: string) { + this.fileName = fileName; + } +} + + +class FileRunnerConnection extends RunnerConnection { + public handlers: Map = new Map(); +} + +export class FileCommandChannel extends CommandChannel { + private readonly commandPath = "commands"; + private stopping: boolean = false; + // make sure no concurrent issue when sending commands. + private sendQueues: [EnvironmentInformation, string][] = []; + + public get channelName(): Channel { + return "file"; + } + + public async config(_key: string, _value: any): Promise { + // do nothing + } + + public async start(): Promise { + // do nothing + } + + public async stop(): Promise { + this.stopping = true; + } + + public async run(): Promise { + // start command loops + await Promise.all([ + this.receiveLoop(), + this.sendLoop() + ]); + } + + protected async sendCommandInternal(environment: EnvironmentInformation, message: string): Promise { + this.sendQueues.push([environment, message]); + } + + protected createRunnerConnection(environment: EnvironmentInformation): RunnerConnection { + return new FileRunnerConnection(environment); + } + + private async sendLoop(): Promise { + const intervalSeconds = 0.5; + while (!this.stopping) { + const start = new Date(); + + if (this.sendQueues.length > 0) { + const storageService = component.get(StorageService); + + while (this.sendQueues.length > 0) { + const item = this.sendQueues.shift(); + if (item === undefined) { + break; + } + const environment = item[0]; + const message = `${item[1]}\n`; + + const fileName = storageService.joinPath(environment.workingFolder, this.commandPath, `manager_commands.txt`); + await storageService.save(message, fileName, true); + } + } + + const end = new Date(); + const delayMs = intervalSeconds * 1000 - (end.valueOf() - start.valueOf()); + if (delayMs > 0) { + await delay(delayMs); + } + } + } + + private async receiveLoop(): Promise { + const intervalSeconds = 2; + const storageService = component.get(StorageService); + + while (!this.stopping) { + const start = new Date(); + + const runnerConnections = [...this.runnerConnections.values()] as FileRunnerConnection[]; + for (const runnerConnection of runnerConnections) { + const envCommandFolder = storageService.joinPath(runnerConnection.environment.workingFolder, this.commandPath); + // open new command files + if (runnerConnection.handlers.size < runnerConnection.environment.nodeCount) { + // to find all node commands file + const commandFileNames = await storageService.listDirectory(envCommandFolder); + const toAddedFileNames = []; + for (const commandFileName of commandFileNames) { + if (commandFileName.startsWith("runner_commands") && !runnerConnection.handlers.has(commandFileName)) { + toAddedFileNames.push(commandFileName); + } + } + + for (const toAddedFileName of toAddedFileNames) { + const fullPath = storageService.joinPath(envCommandFolder, toAddedFileName); + const fileHandler: FileHandler = new FileHandler(fullPath); + runnerConnection.handlers.set(toAddedFileName, fileHandler); + this.log.debug(`FileCommandChannel: added fileHandler env ${runnerConnection.environment.id} ${toAddedFileName}`); + } + } + + // to loop all commands + for (const fileHandler of runnerConnection.handlers.values()) { + const newContent = await storageService.readFileContent(fileHandler.fileName, fileHandler.offset, undefined); + if (newContent.length > 0) { + const commands = newContent.split('\n'); + for (const command of commands) { + this.handleCommand(runnerConnection.environment, command); + } + fileHandler.offset += newContent.length; + } + } + } + + const end = new Date(); + const delayMs = intervalSeconds * 1000 - (end.valueOf() - start.valueOf()); + if (delayMs > 0) { + await delay(delayMs); + } + } + } +} diff --git a/ts/nni_manager/training_service/reusable/channels/webCommandChannel.ts b/ts/nni_manager/training_service/reusable/channels/webCommandChannel.ts new file mode 100644 index 0000000000000000000000000000000000000000..315170f51719826beca9501ae7eecfdd27e344ca --- /dev/null +++ b/ts/nni_manager/training_service/reusable/channels/webCommandChannel.ts @@ -0,0 +1,141 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Server as SocketServer } from "ws"; +import { getBasePort, getExperimentId } from "common/experimentStartupInfo"; +import { INITIALIZED } from 'core/commands'; +import { CommandChannel, RunnerConnection } from "../commandChannel"; +import { Channel, EnvironmentInformation } from "../environment"; +import { EventEmitter } from "events"; + +class WebRunnerConnection extends RunnerConnection { + public readonly clients: WebSocket[] = []; + + public async close(): Promise { + await super.close(); + while (this.clients.length > 0) { + const client = this.clients.shift(); + if (client !== undefined) { + client.close(); + } + } + } + + public AddClient(client: WebSocket): void { + this.clients.push(client); + } +} + +export class WebCommandChannel extends CommandChannel { + private readonly expId: string = getExperimentId(); + private static commandChannel: WebCommandChannel; + private webSocketServer: SocketServer | undefined; + private clients: Map = new Map(); + + public get channelName(): Channel { + return "web"; + } + + public async config(_key: string, _value: any): Promise { + // do nothing + } + + // Set WebCommandChannel as singleton mode, one experiment could only start one webCommandChannel instance + private constructor(commandEmitter: EventEmitter) { + super(commandEmitter); + } + + public static getInstance(commandEmitter: EventEmitter): CommandChannel { + if (!this.commandChannel) { + this.commandChannel = new WebCommandChannel(commandEmitter); + } + return this.commandChannel; + } + + public async start(): Promise { + const port = getBasePort() + 1; + this.webSocketServer = new SocketServer({ port }); + + this.webSocketServer.on('connection', (client: WebSocket) => { + this.log.debug(`WebCommandChannel: received connection`); + client.onerror = (event): void => { + this.log.error('error on client', event); + } + + this.clients.set(client, undefined); + client.onmessage = (message): void => { + this.receivedWebSocketMessage(client, message); + }; + }).on('error', (error) => { + this.log.error(`error on websocket server ${error}`); + }); + } + + public async stop(): Promise { + if (this.webSocketServer !== undefined) { + this.webSocketServer.close(); + } + } + + public async run(): Promise{ + // do nothing + } + + protected async sendCommandInternal(environment: EnvironmentInformation, message: string): Promise { + if (this.webSocketServer === undefined) { + throw new Error(`WebCommandChannel: uninitialized!`) + } + const runnerConnection = this.runnerConnections.get(environment.id) as WebRunnerConnection; + if (runnerConnection !== undefined) { + for (const client of runnerConnection.clients) { + client.send(message); + } + } else { + this.log.warning(`WebCommandChannel: cannot find client for env ${environment.id}, message is ignored.`); + } + } + + protected createRunnerConnection(environment: EnvironmentInformation): RunnerConnection { + return new WebRunnerConnection(environment); + } + + private receivedWebSocketMessage(client: WebSocket, message: MessageEvent): void { + let connection = this.clients.get(client) as WebRunnerConnection | undefined; + const rawCommands = message.data.toString(); + + if (connection === undefined) { + // undefined means it's expecting initializing message. + const commands = this.parseCommands(rawCommands); + let isValid = false; + this.log.debug('WebCommandChannel: received initialize message:', rawCommands); + + if (commands.length > 0) { + const commandType = commands[0][0]; + const result = commands[0][1]; + if (commandType === INITIALIZED && + result.expId === this.expId && + this.runnerConnections.has(result.runnerId) + ) { + const runnerConnection = this.runnerConnections.get(result.runnerId) as WebRunnerConnection; + this.clients.set(client, runnerConnection); + runnerConnection.AddClient(client); + connection = runnerConnection; + isValid = true; + this.log.debug(`WebCommandChannel: client of env ${runnerConnection.environment.id} initialized`); + } else { + this.log.warning(`WebCommandChannel: client is not initialized, runnerId: ${result.runnerId}, command: ${commandType}, expId: ${this.expId}, exists: ${this.runnerConnections.has(result.runnerId)}`); + } + } + + if (!isValid) { + this.log.warning(`WebCommandChannel: rejected client with invalid init message ${rawCommands}`); + client.close(); + this.clients.delete(client); + } + } + + if (connection !== undefined) { + this.handleCommand(connection.environment, rawCommands); + } + } +} diff --git a/ts/nni_manager/training_service/reusable/commandChannel.ts b/ts/nni_manager/training_service/reusable/commandChannel.ts new file mode 100644 index 0000000000000000000000000000000000000000..af08e86b363d383b52d9a0575e451a999df8e5aa --- /dev/null +++ b/ts/nni_manager/training_service/reusable/commandChannel.ts @@ -0,0 +1,130 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { EventEmitter } from "events"; +import { getLogger, Logger } from "common/log"; +import { TRIAL_COMMANDS } from "core/commands"; +import { encodeCommand } from "core/ipcInterface"; +import { Channel, EnvironmentInformation } from "./environment"; + +const acceptedCommands: Set = new Set(TRIAL_COMMANDS); + +export class Command { + public readonly environment: EnvironmentInformation; + public readonly command: string; + public readonly data: any; + + constructor(environment: EnvironmentInformation, command: string, data: any) { + if (!acceptedCommands.has(command)) { + throw new Error(`unaccepted command ${command}`); + } + this.environment = environment; + this.command = command; + this.data = data; + } +} + +export class RunnerConnection { + public readonly environment: EnvironmentInformation; + + constructor(environment: EnvironmentInformation) { + this.environment = environment; + } + + public async open(): Promise { + // do nothing + } + + public async close(): Promise { + // do nothing + } +} + +export abstract class CommandChannel { + protected readonly log: Logger; + protected runnerConnections: Map = new Map(); + protected readonly commandEmitter: EventEmitter; + + private readonly commandPattern: RegExp = /(?[\w]{2})(?[\d]{14})(?.*)\n?/gm; + + public constructor(commandEmitter: EventEmitter) { + this.log = getLogger('CommandChannel'); + this.commandEmitter = commandEmitter; + } + + public abstract get channelName(): Channel; + public abstract config(key: string, value: any): Promise; + public abstract start(): Promise; + public abstract stop(): Promise; + + // Pull-based command channels need loop to check messages, the loop should be started with await here. + public abstract run(): Promise; + + protected abstract sendCommandInternal(environment: EnvironmentInformation, message: string): Promise; + protected abstract createRunnerConnection(environment: EnvironmentInformation): RunnerConnection; + + public async sendCommand(environment: EnvironmentInformation, commandType: string, data: any): Promise { + const command = encodeCommand(commandType, JSON.stringify(data)); + this.log.debug(`CommandChannel: env ${environment.id} sending command: ${command}`); + await this.sendCommandInternal(environment, command.toString("utf8")); + } + + public async open(environment: EnvironmentInformation): Promise { + if (this.runnerConnections.has(environment.id)) { + throw new Error(`CommandChannel: env ${environment.id} is opened already, shouldn't be opened again.`); + } + const connection = this.createRunnerConnection(environment); + this.runnerConnections.set(environment.id, connection); + await connection.open(); + } + + public async close(environment: EnvironmentInformation): Promise { + if (this.runnerConnections.has(environment.id)) { + const connection = this.runnerConnections.get(environment.id); + this.runnerConnections.delete(environment.id); + if (connection !== undefined) { + await connection.close(); + } + } + } + + protected parseCommands(content: string): [string, any][] { + const commands: [string, any][] = []; + + let matches = this.commandPattern.exec(content); + + while (matches) { + if (undefined !== matches.groups) { + const commandType = matches.groups["type"]; + const dataLength = parseInt(matches.groups["length"]); + const data: any = matches.groups["data"]; + if (dataLength !== data.length) { + throw new Error(`dataLength ${dataLength} not equal to actual length ${data.length}: ${data}`); + } + try { + const finalData = JSON.parse(data); + // to handle encode('utf8') of Python + commands.push([commandType, finalData]); + } catch (error) { + this.log.error(`CommandChannel: error on parseCommands ${error}, original: ${matches.groups["data"]}`); + throw error; + } + } + matches = this.commandPattern.exec(content); + } + + return commands; + } + + protected handleCommand(environment: EnvironmentInformation, content: string): void { + const parsedResults = this.parseCommands(content); + + for (const parsedResult of parsedResults) { + const commandType = parsedResult[0]; + const data = parsedResult[1]; + const command = new Command(environment, commandType, data); + this.commandEmitter.emit("command", command); + this.log.trace(`CommandChannel: env ${environment.id} emit command: ${commandType}, ${data}.`); + } + } +} diff --git a/ts/nni_manager/training_service/reusable/dlc/dlcClient.ts b/ts/nni_manager/training_service/reusable/dlc/dlcClient.ts new file mode 100644 index 0000000000000000000000000000000000000000..6f919edaa4c960ae1e15bd19a1d7e02bcf000f92 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/dlc/dlcClient.ts @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Deferred } from 'ts-deferred'; +import { PythonShell } from 'python-shell'; +import { getLogger, Logger } from 'common/log'; + +export class DlcClient { + private log: Logger; + public type: string; + public image: string; + public jobType: string; + public podCount: number; + public ecsSpec: string; + public region: string; + // e.g., data1e6vg1tu0zi7, to generate it, go to 'Dataset Config' page of DLC + // create a NAS data and copy the 'DataSet ConfigurationID' + public nasDataSourceId: string; + public accessKeyId: string; + public accessKeySecret: string; + public experimentId: string; + public environmentId: string; + public userCommand: string; + public pythonShellClient: undefined | PythonShell; + + constructor( + type: string, + image: string, + jobType: string, + podCount: number, + experimentId: string, + environmentId: string, + ecsSpec: string, + region: string, + nasDataSourceId: string, + accessKeyId: string, + accessKeySecret: string, + userCommand: string, + ) { + this.log = getLogger('DlcClient'); + this.type = type; + this.image = image; + this.jobType = jobType; + this.podCount = podCount; + this.ecsSpec = ecsSpec; + this.image = image; + this.region = region; + this.nasDataSourceId = nasDataSourceId; + this.accessKeyId = accessKeyId; + this.accessKeySecret = accessKeySecret + this.experimentId = experimentId; + this.environmentId = environmentId; + this.userCommand = userCommand; + } + + public submit(): Promise { + const deferred: Deferred = new Deferred(); + this.pythonShellClient = new PythonShell('dlcUtil.py', { + scriptPath: './config/dlc', + pythonPath: 'python3', + pythonOptions: ['-u'], // get print results in real-time + args: [ + '--type', this.type, + '--image', this.image, + '--job_type', this.jobType, + '--pod_count', String(this.podCount), + '--ecs_spec', this.ecsSpec, + '--region', this.region, + '--nas_data_source_id', this.nasDataSourceId, + '--access_key_id', this.accessKeyId, + '--access_key_secret', this.accessKeySecret, + '--experiment_name', `nni_exp_${this.experimentId}_env_${this.environmentId}`, + '--user_command', this.userCommand, + ] + }); + this.log.debug(this.pythonShellClient.command); + this.pythonShellClient.on('message', function (envId: any) { + // received a message sent from the Python script (a simple "print" statement) + deferred.resolve(envId); + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + public stop(): void { + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send('stop'); + } + + public getTrackingUrl(): Promise { + const deferred: Deferred = new Deferred(); + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send('tracking_url'); + this.pythonShellClient.on('message', (status: any) => { + const trackingUrl = this.parseContent('tracking_url', status); + if (trackingUrl !== '') { + deferred.resolve(trackingUrl); + } + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + public updateStatus(oldStatus: string): Promise { + const deferred: Deferred = new Deferred(); + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send('update_status'); + this.pythonShellClient.on('message', (status: any) => { + let newStatus = this.parseContent('status', status); + if (newStatus === '') { + newStatus = oldStatus; + } + deferred.resolve(newStatus); + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + public sendCommand(message: string): void { + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.log.debug(`command:${message}`); + this.pythonShellClient.send(`command:${message}`); + } + + public receiveCommand(): Promise { + const deferred: Deferred = new Deferred(); + if (this.pythonShellClient === undefined) { + throw Error('python shell client not initialized!'); + } + this.pythonShellClient.send('receive'); + this.pythonShellClient.on('message', (command: any) => { + const message = this.parseContent('receive', command); + if (message !== '') { + deferred.resolve(JSON.parse(message)) + } + }); + this.monitorError(this.pythonShellClient, deferred); + return deferred.promise; + } + + // Monitor error information in dlc python shell client + private monitorError(pythonShellClient: PythonShell, deferred: Deferred): void { + pythonShellClient.on('error', function (error: any) { + deferred.reject(error); + }); + pythonShellClient.on('close', function (error: any) { + deferred.reject(error); + }); + } + + // Parse command content, command format is {head}:{content} + public parseContent(head: string, command: string): string { + const items = command.split(':'); + if (items[0] === head) { + return command.slice(head.length + 1); + } + return ''; + } +} diff --git a/ts/nni_manager/training_service/reusable/dlc/dlcConfig.ts b/ts/nni_manager/training_service/reusable/dlc/dlcConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..00c0b63b85bbad4fa39759f74c33ae206bfc4f3e --- /dev/null +++ b/ts/nni_manager/training_service/reusable/dlc/dlcConfig.ts @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TrialConfig } from 'training_service/common/trialConfig'; +import { EnvironmentInformation } from '../environment'; +import { DlcClient } from '../dlc/dlcClient'; + +export class DlcClusterConfig { + public readonly type: string; + public readonly image: string; + public readonly podCount: number; + public readonly ecsSpec: string; + + constructor(type: string, image: string, podCount: number, ecsSpec: string) { + this.type = type; + this.image = image; + this.podCount = podCount; + this.ecsSpec = ecsSpec; + } +} + +export class DlcTrialConfig extends TrialConfig { + public readonly image: string; + public readonly command: string; + public readonly codeDir: string; + + constructor(codeDir: string, command: string, image: string) { + super("", codeDir, 0); + this.codeDir = codeDir; + this.command = command; + this.image = image; + } +} + +export class DlcEnvironmentInformation extends EnvironmentInformation { + public dlcClient?: DlcClient; + public currentMessageIndex: number = -1; +} diff --git a/ts/nni_manager/training_service/reusable/environment.ts b/ts/nni_manager/training_service/reusable/environment.ts new file mode 100644 index 0000000000000000000000000000000000000000..2874ab2104e3bda329a459a79696d4d32d8eea70 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environment.ts @@ -0,0 +1,201 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { getLogger, Logger } from "common/log"; +import { TrialJobStatus } from "common/trainingService"; +import { GPUInfo } from "training_service/common/gpuData"; +import { CommandChannel } from "./commandChannel"; +import { WebCommandChannel } from './channels/webCommandChannel'; +import { EventEmitter } from "events"; + + +export type EnvironmentStatus = 'UNKNOWN' | 'WAITING' | 'RUNNING' | 'SUCCEEDED' | 'FAILED' | 'USER_CANCELED'; +export type Channel = "web" | "file" | "aml" | "ut"; + + +export class TrialGpuSummary { + // GPU count on the machine + public gpuCount: number; + // The timestamp when GPU summary data queried + public timestamp: string; + // The array of GPU information for each GPU card + public gpuInfos: GPUInfo[]; + // GPU assigned status + public assignedGpuIndexMap: Map = new Map(); + + constructor(gpuCount: number, timestamp: string, gpuInfos: GPUInfo[]) { + this.gpuCount = gpuCount; + this.timestamp = timestamp; + this.gpuInfos = gpuInfos; + } +} + +export class EnvironmentInformation { + // node id is 5 chars, so won't conflict. + private readonly defaultNodeId = "default"; + private log: Logger; + private isNoGpuWarned: boolean = false; + + // key states + // true: environment is running, waiting, or unknown. + public isAlive: boolean = true; + // true: Runner is initialized, and can receive trials. + public isRunnerReady: boolean = false; + // don't set status in environment directly, use setFinalState function to set a final state. + public status: EnvironmentStatus = "UNKNOWN"; + + // true: environment is ready to run trial. + public runningTrialCount: number = 0; + // uses to count how many trial runs on this environment. + // it can be used in many scenarios, but for now, it uses for reusable. + public assignedTrialCount: number = 0; + // it is used to get environment idle time interval + public latestTrialReleasedTime: number = -1; + + // NNI environment ID + public id: string; + // training platform unique job ID. + public envId: string; + // training platform job friendly name, in case it's different with job ID. + public name: string; + public trackingUrl: string = ""; + public workingFolder: string = ""; + public runnerWorkingFolder: string = ""; + public command: string = ""; + public nodeCount: number = 1; + + // it's used to aggregate node status for multiple node trial + public nodes: Map; + public gpuSummaries: Map = new Map(); + + // use can specify which gpus can be used by NNI. + // it's usable for sharable environment like remote machine. + public usableGpus?: number[]; + // user can specify how to use GPU resource for an environment, like local and remote. + public maxTrialNumberPerGpu?: number; + public useActiveGpu?: boolean; + + public environmentService?: EnvironmentService; + + public useSharedStorage?: boolean; + + constructor(id: string, name: string, envId?: string) { + this.log = getLogger('EnvironmentInformation'); + this.id = id; + this.name = name; + this.envId = envId ? envId : name; + this.nodes = new Map(); + } + + public setStatus(status: EnvironmentStatus): void { + if (this.status !== status) { + this.log.info(`EnvironmentInformation: ${this.envId} change status from ${this.status} to ${status}.`) + this.status = status; + } + } + + public setGpuSummary(nodeId: string, newGpuSummary: TrialGpuSummary): void { + if (nodeId === null || nodeId === undefined) { + nodeId = this.defaultNodeId; + } + + const originalGpuSummary = this.gpuSummaries.get(nodeId); + if (undefined === originalGpuSummary) { + newGpuSummary.assignedGpuIndexMap = new Map(); + this.gpuSummaries.set(nodeId, newGpuSummary); + } else { + originalGpuSummary.gpuCount = newGpuSummary.gpuCount; + originalGpuSummary.timestamp = newGpuSummary.timestamp; + originalGpuSummary.gpuInfos = newGpuSummary.gpuInfos; + } + } + + public get defaultGpuSummary(): TrialGpuSummary | undefined { + const gpuSummary = this.gpuSummaries.get(this.defaultNodeId); + if (gpuSummary === undefined) { + if (false === this.isNoGpuWarned) { + this.log.warning(`EnvironmentInformation: ${this.envId} no default gpu found. current gpu info`, this.gpuSummaries); + this.isNoGpuWarned = true; + } + } else { + this.isNoGpuWarned = false; + } + return gpuSummary; + } +} + +export abstract class EnvironmentService { + + public async init(): Promise { + return; + } + + public abstract get hasStorageService(): boolean; + public abstract refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise; + public abstract stopEnvironment(environment: EnvironmentInformation): Promise; + public abstract startEnvironment(environment: EnvironmentInformation): Promise; + // Make public for ut + protected commandChannel: CommandChannel | undefined; + + // It is used to set prefetched environment count, default value is 0 for OpenPAI and AML mode, + // in remote mode, this value is set to the length of machine list. + public get prefetchedEnvironmentCount(): number { + return 0; + } + + public abstract get getName(): string; + + // Initialize command channel, use WebCommandChannel as default command channel + public initCommandChannel(eventEmitter: EventEmitter): void { + this.commandChannel = WebCommandChannel.getInstance(eventEmitter); + } + + public get getCommandChannel(): CommandChannel { + if (this.commandChannel === undefined) { + throw new Error("Command channel not initialized!"); + } + return this.commandChannel; + } + + // It depends on environment pressure and settings + // for example, OpenPAI relies on API calls, and there is an limitation for frequence, so it need to be bigger. + public get environmentMaintenceLoopInterval(): number { + return 5000; + } + + // it's needed in two scenario + // 1. remote machine has fixed number, so it can return false, when all environment are assigned. + // 2. If there are consistent error on requested environments, for example, authentication failure on platform. + public get hasMoreEnvironments(): boolean { + return true; + } + + public createEnvironmentInformation(envId: string, envName: string): EnvironmentInformation { + return new EnvironmentInformation(envId, envName); + } +} + +export class NodeInformation { + public id: string; + public status: TrialJobStatus = "UNKNOWN"; + public endTime?: number; + + constructor(id: string) { + this.id = id; + } +} + +export class RunnerSettings { + public experimentId: string = ""; + public platform: string = ""; + public nniManagerIP: string = ""; + public nniManagerPort: number = 8081; + public nniManagerVersion: string = ""; + public logCollection: string = "none"; + public command: string = ""; + public enableGpuCollector: boolean = true; + + // specify which communication channel is used by runner. + // supported channel includes: rest, storage, aml + public commandChannel: Channel = "file"; +} diff --git a/ts/nni_manager/training_service/reusable/environments/amlEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/amlEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..30a24b4641876f3465f395d0996aae4f3ef4716a --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/amlEnvironmentService.ts @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import * as component from 'common/component'; +import { getLogger, Logger } from 'common/log'; +import { AmlConfig } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { validateCodeDir } from 'training_service/common/util'; +import { AMLClient } from '../aml/amlClient'; +import { AMLEnvironmentInformation } from '../aml/amlConfig'; +import { EnvironmentInformation, EnvironmentService } from '../environment'; +import { EventEmitter } from "events"; +import { AMLCommandChannel } from '../channels/amlCommandChannel'; +import { SharedStorageService } from '../sharedStorage' + +/** + * Collector AML jobs info from AML cluster, and update aml job status locally + */ +@component.Singleton +export class AMLEnvironmentService extends EnvironmentService { + + private readonly log: Logger = getLogger('AMLEnvironmentService'); + private experimentId: string; + private experimentRootDir: string; + private config: AmlConfig; + + constructor(config: AmlConfig, info: ExperimentStartupInfo) { + super(); + this.experimentId = info.experimentId; + this.experimentRootDir = info.logDir; + this.config = config; + validateCodeDir(this.config.trialCodeDirectory); + } + + public get hasStorageService(): boolean { + return false; + } + + public initCommandChannel(eventEmitter: EventEmitter): void { + this.commandChannel = new AMLCommandChannel(eventEmitter); + } + + public createEnvironmentInformation(envId: string, envName: string): EnvironmentInformation { + return new AMLEnvironmentInformation(envId, envName); + } + + public get getName(): string { + return 'aml'; + } + + public async refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise { + environments.forEach(async (environment) => { + const amlClient = (environment as AMLEnvironmentInformation).amlClient; + if (!amlClient) { + return Promise.reject('AML client not initialized!'); + } + const newStatus = await amlClient.updateStatus(environment.status); + switch (newStatus.toUpperCase()) { + case 'WAITING': + case 'QUEUED': + environment.setStatus('WAITING'); + break; + case 'RUNNING': + environment.setStatus('RUNNING'); + break; + case 'COMPLETED': + case 'SUCCEEDED': + environment.setStatus('SUCCEEDED'); + break; + case 'FAILED': + environment.setStatus('FAILED'); + return Promise.reject(`AML: job ${environment.envId} is failed!`); + case 'STOPPED': + case 'STOPPING': + environment.setStatus('USER_CANCELED'); + break; + default: + environment.setStatus('UNKNOWN'); + } + }); + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + const amlEnvironment: AMLEnvironmentInformation = environment as AMLEnvironmentInformation; + const environmentLocalTempFolder = path.join(this.experimentRootDir, "environment-temp"); + if (!fs.existsSync(environmentLocalTempFolder)) { + await fs.promises.mkdir(environmentLocalTempFolder, {recursive: true}); + } + if (amlEnvironment.useSharedStorage) { + const environmentRoot = component.get(SharedStorageService).remoteWorkingRoot; + const remoteMountCommand = component.get(SharedStorageService).remoteMountCommand; + amlEnvironment.command = `${remoteMountCommand} && cd ${environmentRoot} && ${amlEnvironment.command}`.replace(/"/g, `\\"`); + } else { + amlEnvironment.command = `mv envs outputs/envs && cd outputs && ${amlEnvironment.command}`; + } + amlEnvironment.command = `import os\nos.system('${amlEnvironment.command}')`; + amlEnvironment.maxTrialNumberPerGpu = this.config.maxTrialNumberPerGpu; + + await fs.promises.writeFile(path.join(environmentLocalTempFolder, 'nni_script.py'), amlEnvironment.command, { encoding: 'utf8' }); + const amlClient = new AMLClient( + this.config.subscriptionId, + this.config.resourceGroup, + this.config.workspaceName, + this.experimentId, + this.config.computeTarget, + this.config.dockerImage, + 'nni_script.py', + environmentLocalTempFolder + ); + amlEnvironment.id = await amlClient.submit(); + this.log.debug('aml: before getTrackingUrl'); + amlEnvironment.trackingUrl = await amlClient.getTrackingUrl(); + this.log.debug('aml: after getTrackingUrl'); + amlEnvironment.amlClient = amlClient; + } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + const amlEnvironment: AMLEnvironmentInformation = environment as AMLEnvironmentInformation; + const amlClient = amlEnvironment.amlClient; + if (!amlClient) { + throw new Error('AML client not initialized!'); + } + const result = await amlClient.stop(); + if (result) { + this.log.info(`Stop aml run ${environment.id} success!`); + } else { + this.log.info(`Stop aml run ${environment.id} failed!`); + } + } +} diff --git a/ts/nni_manager/training_service/reusable/environments/dlcEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/dlcEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..ac92938462c20d6a3d0393c28f557f2aaee785eb --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/dlcEnvironmentService.ts @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import * as component from 'common/component'; +import { getLogger, Logger } from 'common/log'; +import { DlcConfig } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { DlcClient } from '../dlc/dlcClient'; +import { DlcEnvironmentInformation } from '../dlc/dlcConfig'; +import { EnvironmentInformation, EnvironmentService } from '../environment'; +import { EventEmitter } from "events"; +import { FileCommandChannel } from '../channels/fileCommandChannel'; +import { MountedStorageService } from '../storages/mountedStorageService'; +import { Scope } from 'typescript-ioc'; +import { StorageService } from '../storageService'; + +/** + * Collector DLC jobs info from DLC cluster, and update dlc job status locally + */ +@component.Singleton +export class DlcEnvironmentService extends EnvironmentService { + + private readonly log: Logger = getLogger('dlcEnvironmentService'); + private experimentId: string; + private config: DlcConfig; + + constructor(config: DlcConfig, info: ExperimentStartupInfo) { + super(); + this.experimentId = info.experimentId; + this.config = config; + component.Container.bind(StorageService).to(MountedStorageService).scope(Scope.Singleton); + const storageService = component.get(StorageService) + const remoteRoot = storageService.joinPath(this.config.localStorageMountPoint, 'nni-experiments', this.experimentId); + const localRoot = storageService.joinPath(this.config.localStorageMountPoint, 'nni-experiments'); + storageService.initialize(localRoot, remoteRoot); + } + + public get hasStorageService(): boolean { + return true; + } + + public initCommandChannel(eventEmitter: EventEmitter): void { + this.commandChannel = new FileCommandChannel(eventEmitter); + } + + public createEnvironmentInformation(envId: string, envName: string): EnvironmentInformation { + return new DlcEnvironmentInformation(envId, envName); + } + + public get getName(): string { + return 'dlc'; + } + + public async refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise { + environments.forEach(async (environment) => { + const dlcClient = (environment as DlcEnvironmentInformation).dlcClient; + if (!dlcClient) { + return Promise.reject('DLC client not initialized!'); + } + const newStatus = await dlcClient.updateStatus(environment.status); + switch (newStatus.toUpperCase()) { + case 'CREATING': + case 'CREATED': + case 'WAITING': + case 'QUEUED': + environment.setStatus('WAITING'); + break; + case 'RUNNING': + environment.setStatus('RUNNING'); + break; + case 'COMPLETED': + case 'SUCCEEDED': + environment.setStatus('SUCCEEDED'); + break; + case 'FAILED': + environment.setStatus('FAILED'); + return Promise.reject(`DLC: job ${environment.envId} is failed!`); + case 'STOPPED': + case 'STOPPING': + environment.setStatus('USER_CANCELED'); + break; + default: + environment.setStatus('UNKNOWN'); + } + }); + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + const dlcEnvironment: DlcEnvironmentInformation = environment as DlcEnvironmentInformation; + + const environmentRoot = path.join(this.config.containerStorageMountPoint, `/nni-experiments/${this.experimentId}`); + const localRoot = path.join(this.config.localStorageMountPoint, `/nni-experiments/${this.experimentId}`); + + dlcEnvironment.workingFolder = `${localRoot}/envs/${environment.id}`; + dlcEnvironment.runnerWorkingFolder = `${environmentRoot}/envs/${environment.id}`; + + // environment id dir and command dir, folder created on DLC side can't be accessed on DSW. + if (!fs.existsSync(`${dlcEnvironment.workingFolder}/commands`)) { + await fs.promises.mkdir(`${dlcEnvironment.workingFolder}/commands`, {recursive: true}); + } + + environment.command = `cd ${environmentRoot} && ${environment.command} 1>${environment.runnerWorkingFolder}/trialrunner_stdout 2>${environment.runnerWorkingFolder}/trialrunner_stderr`; + + const dlcClient = new DlcClient( + this.config.type, + this.config.image, + this.config.jobType, + this.config.podCount, + this.experimentId, + environment.id, + this.config.ecsSpec, + this.config.region, + this.config.nasDataSourceId, + this.config.accessKeyId, + this.config.accessKeySecret, + environment.command, + ); + + dlcEnvironment.id = await dlcClient.submit(); + this.log.debug('dlc: before getTrackingUrl'); + dlcEnvironment.trackingUrl = await dlcClient.getTrackingUrl(); + this.log.debug(`dlc trackingUrl: ${dlcEnvironment.trackingUrl}`); + dlcEnvironment.dlcClient = dlcClient; + } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + const dlcEnvironment: DlcEnvironmentInformation = environment as DlcEnvironmentInformation; + const dlcClient = dlcEnvironment.dlcClient; + if (!dlcClient) { + throw new Error('DLC client not initialized!'); + } + dlcClient.stop(); + } +} diff --git a/ts/nni_manager/training_service/reusable/environments/environmentServiceFactory.ts b/ts/nni_manager/training_service/reusable/environments/environmentServiceFactory.ts new file mode 100644 index 0000000000000000000000000000000000000000..69a9c301b37a47a82992409366eec95dc6aa54a0 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/environmentServiceFactory.ts @@ -0,0 +1,42 @@ +import { AMLEnvironmentService } from './amlEnvironmentService'; +import { OpenPaiEnvironmentService } from './openPaiEnvironmentService'; +import { LocalEnvironmentService } from './localEnvironmentService'; +import { RemoteEnvironmentService } from './remoteEnvironmentService'; +import { KubeflowEnvironmentService } from './kubernetes/kubeflowEnvironmentService'; +import { FrameworkControllerEnvironmentService } from './kubernetes/frameworkcontrollerEnvironmentService'; +import { EnvironmentService } from '../environment'; +import { TrainingServiceConfig } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { getCustomEnvironmentServiceConfig } from 'common/nniConfig'; +import { importModule } from 'common/utils'; +import { DlcEnvironmentService } from './dlcEnvironmentService'; + +export async function createEnvironmentService(config: TrainingServiceConfig): Promise { + const info = ExperimentStartupInfo.getInstance(); + const configAsAny: any = config; // environment services have different config types, skip type check + + switch (config.platform) { + case 'local': + return new LocalEnvironmentService(configAsAny, info); + case 'remote': + return new RemoteEnvironmentService(configAsAny, info); + case 'aml': + return new AMLEnvironmentService(configAsAny, info); + case 'openpai': + return new OpenPaiEnvironmentService(configAsAny, info); + case 'kubeflow': + return new KubeflowEnvironmentService(configAsAny, info); + case 'frameworkcontroller': + return new FrameworkControllerEnvironmentService(configAsAny, info); + case 'dlc': + return new DlcEnvironmentService(configAsAny, info); + } + + const esConfig = await getCustomEnvironmentServiceConfig(config.platform); + if (esConfig === null) { + throw new Error(`${config.platform} is not a supported training service!`); + } + const esModule = importModule(esConfig.nodeModulePath); + const esClass = esModule[esConfig.nodeClassName] as any; + return new esClass(configAsAny, info); +} diff --git a/ts/nni_manager/training_service/reusable/environments/kubernetes/frameworkcontrollerEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/kubernetes/frameworkcontrollerEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..c823b5ca933230be4caee8868868f3a899d573aa --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/kubernetes/frameworkcontrollerEnvironmentService.ts @@ -0,0 +1,338 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +'use strict'; + +import * as fs from 'fs'; +import * as path from 'path'; +import * as component from '../../../../common/component'; +import { FrameworkControllerConfig, FrameworkControllerTaskRoleConfig, toMegaBytes } from '../../../../common/experimentConfig'; +import { ExperimentStartupInfo } from '../../../../common/experimentStartupInfo'; +import { EnvironmentInformation } from '../../environment'; +import { KubernetesEnvironmentService } from './kubernetesEnvironmentService'; +import { FrameworkControllerClientFactory } from '../../../kubernetes/frameworkcontroller/frameworkcontrollerApiClient'; +import { FrameworkControllerClusterConfigAzure, FrameworkControllerJobStatus, FrameworkControllerTrialConfigTemplate, + FrameworkControllerJobCompleteStatus } from '../../../kubernetes/frameworkcontroller/frameworkcontrollerConfig'; +import { KeyVaultConfig, AzureStorage } from '../../../kubernetes/kubernetesConfig'; + +@component.Singleton +export class FrameworkControllerEnvironmentService extends KubernetesEnvironmentService { + + private config: FrameworkControllerConfig; + private createStoragePromise?: Promise; + private readonly fcContainerPortMap: Map = new Map(); // store frameworkcontroller container port + + + constructor(config: FrameworkControllerConfig, info: ExperimentStartupInfo) { + super(config, info); + this.experimentId = info.experimentId; + this.config = config; + // Create kubernetesCRDClient + this.kubernetesCRDClient = FrameworkControllerClientFactory.createClient(this.config.namespace); + // Create storage + if (this.config.storage.storageType === 'azureStorage') { + if (this.config.storage.azureShare === undefined || + this.config.storage.azureAccount === undefined || + this.config.storage.keyVaultName === undefined || + this.config.storage.keyVaultKey === undefined) { + throw new Error("Azure storage configuration error!"); + } + this.azureStorageAccountName = this.config.storage.azureAccount; + this.azureStorageShare = this.config.storage.azureShare; + this.createStoragePromise = this.createAzureStorage(this.config.storage.keyVaultName, this.config.storage.keyVaultKey); + } else if (this.config.storage.storageType === 'nfs') { + if (this.config.storage.server === undefined || + this.config.storage.path === undefined) { + throw new Error("NFS storage configuration error!"); + } + this.createStoragePromise = this.createNFSStorage(this.config.storage.server, this.config.storage.path); + } + } + + public get environmentMaintenceLoopInterval(): number { + return 5000; + } + + public get hasStorageService(): boolean { + return false; + } + + public get getName(): string { + return 'frameworkcontroller'; + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + if (this.kubernetesCRDClient === undefined) { + throw new Error("kubernetesCRDClient not initialized!"); + } + if (this.createStoragePromise) { + await this.createStoragePromise; + } + let configTaskRoles: any = undefined; + configTaskRoles = this.config.taskRoles; + //Generate the port used for taskRole + this.generateContainerPort(configTaskRoles); + + const expFolder = `${this.CONTAINER_MOUNT_PATH}/nni/${this.experimentId}`; + environment.command = `cd ${expFolder} && ${environment.command} \ +1>${expFolder}/envs/${environment.id}/trialrunner_stdout 2>${expFolder}/envs/${environment.id}/trialrunner_stderr`; + environment.maxTrialNumberPerGpu = this.config.maxTrialNumberPerGpu; + + const frameworkcontrollerJobName: string = `nniexp${this.experimentId}env${environment.id}`.toLowerCase(); + const command = this.generateCommandScript(this.config.taskRoles, environment.command); + await fs.promises.writeFile(path.join(this.environmentLocalTempFolder, "run.sh"), command, { encoding: 'utf8' }); + + //upload script files to sotrage + const trialJobOutputUrl: string = await this.uploadFolder(this.environmentLocalTempFolder, `nni/${this.experimentId}`); + environment.trackingUrl = trialJobOutputUrl; + // Generate kubeflow job resource config object + const frameworkcontrollerJobConfig: any = await this.prepareFrameworkControllerConfig( + environment.id, + this.environmentWorkingFolder, + frameworkcontrollerJobName + ); + // Create kubeflow job based on generated kubeflow job resource config + await this.kubernetesCRDClient.createKubernetesJob(frameworkcontrollerJobConfig); + } + + /** + * upload local folder to nfs or azureStroage + */ + private async uploadFolder(srcDirectory: string, destDirectory: string): Promise { + if (this.config.storage.storageType === 'azureStorage') { + if (this.azureStorageClient === undefined) { + throw new Error('azureStorageClient is not initialized'); + } + return await this.uploadFolderToAzureStorage(srcDirectory, destDirectory, 2); + } else { + // do not need to upload files to nfs server, temp folder already mounted to nfs + return `nfs://${this.config.storage.server}:${destDirectory}`; + } + } + + /** + * generate trial's command for frameworkcontroller + * expose port and execute injector.sh before executing user's command + * @param command + */ + private generateCommandScript(taskRoles: FrameworkControllerTaskRoleConfig[], command: string): string { + let portScript: string = ''; + for (const taskRole of taskRoles) { + portScript += `FB_${taskRole.name.toUpperCase()}_PORT=${this.fcContainerPortMap.get( + taskRole.name + )} `; + } + return `${portScript} . /mnt/frameworkbarrier/injector.sh && ${command}`; + } + + private async prepareFrameworkControllerConfig(envId: string, trialWorkingFolder: string, frameworkcontrollerJobName: string): + Promise { + const podResources: any = []; + for (const taskRole of this.config.taskRoles) { + const resource: any = {}; + resource.requests = this.generatePodResource(toMegaBytes(taskRole.memorySize), taskRole.cpuNumber, taskRole.gpuNumber); + resource.limits = {...resource.requests}; + podResources.push(resource); + } + // Generate frameworkcontroller job resource config object + const frameworkcontrollerJobConfig: any = + await this.generateFrameworkControllerJobConfig(envId, trialWorkingFolder, frameworkcontrollerJobName, podResources); + + return Promise.resolve(frameworkcontrollerJobConfig); + } + + private generateContainerPort(taskRoles: FrameworkControllerTrialConfigTemplate[]): void { + if (taskRoles === undefined) { + throw new Error('frameworkcontroller trial config is not initialized'); + } + + let port: number = 4000; //The default port used in container + for (const index of taskRoles.keys()) { + this.fcContainerPortMap.set(taskRoles[index].name, port); + port += 1; + } + } + + /** + * Generate frameworkcontroller resource config file + * @param trialJobId trial job id + * @param trialWorkingFolder working folder + * @param frameworkcontrollerJobName job name + * @param podResources pod template + */ + private async generateFrameworkControllerJobConfig(envId: string, trialWorkingFolder: string, + frameworkcontrollerJobName: string, podResources: any): Promise { + + const taskRoles: any = []; + for (const index of this.config.taskRoles.keys()) { + const containerPort: number | undefined = this.fcContainerPortMap.get(this.config.taskRoles[index].name); + if (containerPort === undefined) { + throw new Error('Container port is not initialized'); + } + + const taskRole: any = this.generateTaskRoleConfig( + trialWorkingFolder, + this.config.taskRoles[index].dockerImage, + `run.sh`, + podResources[index], + containerPort, + await this.createRegistrySecret(this.config.taskRoles[index].privateRegistryAuthPath) + ); + taskRoles.push({ + name: this.config.taskRoles[index].name, + taskNumber: this.config.taskRoles[index].taskNumber, + frameworkAttemptCompletionPolicy: { + minFailedTaskCount: this.config.taskRoles[index].frameworkAttemptCompletionPolicy.minFailedTaskCount, + minSucceededTaskCount: this.config.taskRoles[index].frameworkAttemptCompletionPolicy.minSucceedTaskCount + }, + task: taskRole + }); + } + + return Promise.resolve({ + apiVersion: `frameworkcontroller.microsoft.com/v1`, + kind: 'Framework', + metadata: { + name: frameworkcontrollerJobName, + namespace: this.config.namespace ? this.config.namespace : "default", + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: this.experimentId, + envId: envId + } + }, + spec: { + executionType: 'Start', + taskRoles: taskRoles + } + }); + } + + private generateTaskRoleConfig(trialWorkingFolder: string, replicaImage: string, runScriptFile: string, + podResources: any, containerPort: number, privateRegistrySecretName: string | undefined): any { + + const volumeSpecMap: Map = new Map(); + if (this.config.storage.storageType === 'azureStorage') { + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + azureFile: { + secretName: `${this.azureStorageSecretName}`, + shareName: `${this.azureStorageShare}`, + readonly: false + } + }, { + name: 'frameworkbarrier-volume', + emptyDir: {} + }]); + } else { + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + nfs: { + server: `${this.config.storage.server}`, + path: `${this.config.storage.path}` + } + }, { + name: 'frameworkbarrier-volume', + emptyDir: {} + }]); + } + + const containers: any = [ + { + name: 'framework', + image: replicaImage, + command: ['sh', `${path.join(trialWorkingFolder, runScriptFile)}`], + volumeMounts: [ + { + name: 'nni-vol', + mountPath: this.CONTAINER_MOUNT_PATH + }, { + name: 'frameworkbarrier-volume', + mountPath: '/mnt/frameworkbarrier' + }], + resources: podResources, + ports: [{ + containerPort: containerPort + }] + }]; + + const initContainers: any = [ + { + name: 'frameworkbarrier', + image: 'frameworkcontroller/frameworkbarrier', + volumeMounts: [ + { + name: 'frameworkbarrier-volume', + mountPath: '/mnt/frameworkbarrier' + }] + }]; + + const spec: any = { + containers: containers, + initContainers: initContainers, + restartPolicy: 'OnFailure', + volumes: volumeSpecMap.get('nniVolumes'), + hostNetwork: false + }; + if (privateRegistrySecretName) { + spec.imagePullSecrets = [ + { + name: privateRegistrySecretName + } + ] + } + + if (this.config.serviceAccountName !== undefined) { + spec.serviceAccountName = this.config.serviceAccountName; + } + + return { + pod: { + spec: spec + } + }; + } + + public async refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise { + environments.forEach(async (environment) => { + if (this.kubernetesCRDClient === undefined) { + throw new Error("kubernetesCRDClient undefined") + } + const kubeflowJobName: string = `nniexp${this.experimentId}env${environment.id}`.toLowerCase(); + const kubernetesJobInfo = await this.kubernetesCRDClient.getKubernetesJob(kubeflowJobName); + + if (kubernetesJobInfo.status && kubernetesJobInfo.status.state) { + const frameworkJobType: FrameworkControllerJobStatus = kubernetesJobInfo.status.state; + /* eslint-disable require-atomic-updates */ + switch (frameworkJobType) { + case 'AttemptCreationPending': + case 'AttemptCreationRequested': + case 'AttemptPreparing': + environment.setStatus('WAITING'); + break; + case 'AttemptRunning': + environment.setStatus('RUNNING'); + break; + case 'Completed': { + const completedJobType: FrameworkControllerJobCompleteStatus = + kubernetesJobInfo.status.attemptStatus.completionStatus.type.name; + switch (completedJobType) { + case 'Succeeded': + environment.setStatus('SUCCEEDED'); + break; + case 'Failed': + environment.setStatus('FAILED'); + break; + default: + } + break; + } + default: + } + /* eslint-enable require-atomic-updates */ + } + }); + } +} diff --git a/ts/nni_manager/training_service/reusable/environments/kubernetes/kubeflowEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/kubernetes/kubeflowEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..2a0b84dde098a9ec98cfcb4a548fd2ab036cfd31 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/kubernetes/kubeflowEnvironmentService.ts @@ -0,0 +1,272 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import * as component from 'common/component'; +import { KubeflowConfig, toMegaBytes } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { EnvironmentInformation } from 'training_service/reusable/environment'; +import { KubernetesEnvironmentService } from './kubernetesEnvironmentService'; +import { KubeflowOperatorClientFactory } from 'training_service/kubernetes/kubeflow/kubeflowApiClient'; +import { KubeflowClusterConfigAzure } from 'training_service/kubernetes/kubeflow/kubeflowConfig'; +import { KeyVaultConfig, AzureStorage } from 'training_service/kubernetes/kubernetesConfig'; + +@component.Singleton +export class KubeflowEnvironmentService extends KubernetesEnvironmentService { + + private config: KubeflowConfig; + private createStoragePromise?: Promise; + + + constructor(config: KubeflowConfig, info: ExperimentStartupInfo) { + super(config, info); + this.experimentId = info.experimentId; + this.config = config; + // Create kubernetesCRDClient + this.kubernetesCRDClient = KubeflowOperatorClientFactory.createClient( + this.config.operator, this.config.apiVersion); + // Create storage + if (this.config.storage.storageType === 'azureStorage') { + if (this.config.storage.azureShare === undefined || + this.config.storage.azureAccount === undefined || + this.config.storage.keyVaultName === undefined || + this.config.storage.keyVaultKey === undefined) { + throw new Error("Azure storage configuration error!"); + } + + const azureStorage: AzureStorage = new AzureStorage(this.config.storage.azureShare, this.config.storage.azureAccount); + const keyValutConfig: KeyVaultConfig = new KeyVaultConfig(this.config.storage.keyVaultName, this.config.storage.keyVaultKey); + const azureKubeflowClusterConfig: KubeflowClusterConfigAzure = new KubeflowClusterConfigAzure( + this.config.operator, this.config.apiVersion, keyValutConfig, azureStorage); + this.azureStorageAccountName = azureKubeflowClusterConfig.azureStorage.accountName; + this.azureStorageShare = azureKubeflowClusterConfig.azureStorage.azureShare; + + this.createStoragePromise = this.createAzureStorage( + azureKubeflowClusterConfig.keyVault.vaultName, + azureKubeflowClusterConfig.keyVault.name + ); + } else if (this.config.storage.storageType === 'nfs') { + if (this.config.storage.server === undefined || + this.config.storage.path === undefined) { + throw new Error("NFS storage configuration error!"); + } + this.createStoragePromise = this.createNFSStorage( + this.config.storage.server, + this.config.storage.path + ); + } + } + + public get environmentMaintenceLoopInterval(): number { + return 5000; + } + + public get hasStorageService(): boolean { + return false; + } + + public get getName(): string { + return 'kubeflow'; + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + if (this.kubernetesCRDClient === undefined) { + throw new Error("kubernetesCRDClient not initialized!"); + } + if (this.createStoragePromise) { + await this.createStoragePromise; + } + const expFolder = `${this.CONTAINER_MOUNT_PATH}/nni/${this.experimentId}`; + environment.command = `cd ${expFolder} && ${environment.command} \ +1>${expFolder}/envs/${environment.id}/trialrunner_stdout 2>${expFolder}/envs/${environment.id}/trialrunner_stderr`; + environment.maxTrialNumberPerGpu = this.config.maxTrialNumberPerGpu; + + const kubeflowJobName: string = `nniexp${this.experimentId}env${environment.id}`.toLowerCase(); + + await fs.promises.writeFile(path.join(this.environmentLocalTempFolder, "run.sh"), environment.command, { encoding: 'utf8' }); + + //upload script files to sotrage + const trialJobOutputUrl: string = await this.uploadFolder(this.environmentLocalTempFolder, `nni/${this.experimentId}`); + environment.trackingUrl = trialJobOutputUrl; + // Generate kubeflow job resource config object + const kubeflowJobConfig: any = await this.prepareKubeflowConfig(environment.id, kubeflowJobName); + // Create kubeflow job based on generated kubeflow job resource config + await this.kubernetesCRDClient.createKubernetesJob(kubeflowJobConfig); + } + + /** + * upload local folder to nfs or azureStroage + */ + private async uploadFolder(srcDirectory: string, destDirectory: string): Promise { + if (this.config.storage.storageType === 'azureStorage') { + if (this.azureStorageClient === undefined) { + throw new Error('azureStorageClient is not initialized'); + } + return await this.uploadFolderToAzureStorage(srcDirectory, destDirectory, 2); + } else { + // do not need to upload files to nfs server, temp folder already mounted to nfs + return `nfs://${this.config.storage.server}:${destDirectory}`; + } + } + + private async prepareKubeflowConfig(envId: string, kubeflowJobName: string): Promise { + const workerPodResources: any = {}; + if (this.config.worker !== undefined) { + workerPodResources.requests = this.generatePodResource(toMegaBytes(this.config.worker.memorySize), + this.config.worker.cpuNumber, this.config.worker.gpuNumber); + } + workerPodResources.limits = {...workerPodResources.requests}; + + const nonWorkerResources: any = {}; + if (this.config.operator === 'tf-operator') { + if (this.config.ps !== undefined) { + nonWorkerResources.requests = this.generatePodResource(toMegaBytes(this.config.ps.memorySize), + this.config.ps.cpuNumber, this.config.ps.gpuNumber); + nonWorkerResources.limits = {...nonWorkerResources.requests}; + } + } else if (this.config.operator === 'pytorch-operator') { + if (this.config.master !== undefined) { + nonWorkerResources.requests = this.generatePodResource(toMegaBytes(this.config.master.memorySize), + this.config.master.cpuNumber, this.config.master.gpuNumber); + nonWorkerResources.limits = {...nonWorkerResources.requests}; + } + } + + // Generate kubeflow job resource config object + const kubeflowJobConfig: any = await this.generateKubeflowJobConfig(envId, kubeflowJobName, workerPodResources, nonWorkerResources); + + return Promise.resolve(kubeflowJobConfig); + } + + /** + * Generate kubeflow resource config file + * @param kubeflowJobName job name + * @param workerPodResources worker pod template + * @param nonWorkerPodResources non-worker pod template, like ps or master + */ + private async generateKubeflowJobConfig(envId: string, kubeflowJobName: string, workerPodResources: any, + nonWorkerPodResources?: any): Promise { + + if (this.kubernetesCRDClient === undefined) { + throw new Error('Kubeflow operator client is not initialized'); + } + + const replicaSpecsObj: any = {}; + const replicaSpecsObjMap: Map = new Map(); + if (this.config.operator === 'tf-operator') { + if (this.config.worker) { + const privateRegistrySecretName = await this.createRegistrySecret(this.config.worker.privateRegistryAuthPath); + replicaSpecsObj.Worker = this.generateReplicaConfig(this.config.worker.replicas, + this.config.worker.dockerImage, 'run.sh', workerPodResources, privateRegistrySecretName); + } + if (this.config.ps !== undefined) { + const privateRegistrySecretName: string | undefined = await this.createRegistrySecret(this.config.ps.privateRegistryAuthPath); + replicaSpecsObj.Ps = this.generateReplicaConfig(this.config.ps.replicas, + this.config.ps.dockerImage, 'run.sh', nonWorkerPodResources, privateRegistrySecretName); + } + replicaSpecsObjMap.set(this.kubernetesCRDClient.jobKind, {tfReplicaSpecs: replicaSpecsObj}); + } else if (this.config.operator === 'pytorch-operator') { + if (this.config.worker !== undefined) { + const privateRegistrySecretName: string | undefined = await this.createRegistrySecret(this.config.worker.privateRegistryAuthPath); + replicaSpecsObj.Worker = this.generateReplicaConfig(this.config.worker.replicas, + this.config.worker.dockerImage, 'run.sh', workerPodResources, privateRegistrySecretName); + } + if (this.config.master !== undefined) { + const privateRegistrySecretName: string | undefined = await this.createRegistrySecret(this.config.master.privateRegistryAuthPath); + replicaSpecsObj.Master = this.generateReplicaConfig(this.config.master.replicas, + this.config.master.dockerImage, 'run.sh', nonWorkerPodResources, privateRegistrySecretName); + + } + replicaSpecsObjMap.set(this.kubernetesCRDClient.jobKind, {pytorchReplicaSpecs: replicaSpecsObj}); + } + + return Promise.resolve({ + apiVersion: `kubeflow.org/${this.kubernetesCRDClient.apiVersion}`, + kind: this.kubernetesCRDClient.jobKind, + metadata: { + name: kubeflowJobName, + namespace: 'default', + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: this.experimentId, + envId: envId + } + }, + spec: replicaSpecsObjMap.get(this.kubernetesCRDClient.jobKind) + }); + } + + /** + * Generate tf-operator's tfjobs replica config section + * @param replicaNumber replica number + * @param replicaImage image + * @param runScriptFile script file name + * @param podResources pod resource config section + */ + private generateReplicaConfig(replicaNumber: number, replicaImage: string, runScriptFile: string, + podResources: any, privateRegistrySecretName: string | undefined): any { + if (this.kubernetesCRDClient === undefined) { + throw new Error('Kubeflow operator client is not initialized'); + } + // The config spec for volume field + const volumeSpecMap: Map = new Map(); + if (this.config.storage.storageType === 'azureStorage') { + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + azureFile: { + secretName: `${this.azureStorageSecretName}`, + shareName: `${this.azureStorageShare}`, + readonly: false + } + }]); + } else { + volumeSpecMap.set('nniVolumes', [ + { + name: 'nni-vol', + nfs: { + server: `${this.config.storage.server}`, + path: `${this.config.storage.path}` + } + }]); + } + // The config spec for container field + const containersSpecMap: Map = new Map(); + containersSpecMap.set('containers', [ + { + // Kubeflow tensorflow operator requires that containers' name must be tensorflow + // TODO: change the name based on operator's type + name: this.kubernetesCRDClient.containerName, + image: replicaImage, + args: ['sh', `${path.join(this.environmentWorkingFolder, runScriptFile)}`], + volumeMounts: [ + { + name: 'nni-vol', + mountPath: this.CONTAINER_MOUNT_PATH + }], + resources: podResources + } + ]); + const spec: any = { + containers: containersSpecMap.get('containers'), + restartPolicy: 'ExitCode', + volumes: volumeSpecMap.get('nniVolumes') + } + if (privateRegistrySecretName) { + spec.imagePullSecrets = [ + { + name: privateRegistrySecretName + }] + } + return { + replicas: replicaNumber, + template: { + metadata: { + creationTimestamp: null + }, + spec: spec + } + } + } +} diff --git a/ts/nni_manager/training_service/reusable/environments/kubernetes/kubernetesEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/kubernetes/kubernetesEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..95b6d1c6098b42412724e099970c2a98abdd5877 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/kubernetes/kubernetesEnvironmentService.ts @@ -0,0 +1,271 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import cpp from 'child-process-promise'; +import path from 'path'; +import azureStorage from 'azure-storage'; +import {Base64} from 'js-base64'; +import {String} from 'typescript-string-operations'; +import { ExperimentConfig } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { getLogger, Logger } from 'common/log'; +import { EnvironmentInformation, EnvironmentService } from 'training_service/reusable/environment'; +import {GeneralK8sClient, KubernetesCRDClient} from 'training_service/kubernetes/kubernetesApiClient'; +import {AzureStorageClientUtility} from 'training_service/kubernetes/azureStorageClientUtils'; +import { KubeflowJobStatus } from 'training_service/kubernetes/kubeflow/kubeflowConfig'; +import {delay, uniqueString} from 'common/utils'; +const fs = require('fs'); + +export class KubernetesEnvironmentService extends EnvironmentService { + protected azureStorageClient?: azureStorage.FileService; + protected azureStorageShare?: string; + protected azureStorageSecretName?: string; + protected azureStorageAccountName?: string; + protected genericK8sClient: GeneralK8sClient; + protected kubernetesCRDClient?: KubernetesCRDClient; + protected experimentRootDir: string; + protected experimentId: string; + + // experiment root dir in NFS + protected environmentLocalTempFolder: string; + protected NNI_KUBERNETES_TRIAL_LABEL: string = 'nni-kubernetes-trial'; + protected CONTAINER_MOUNT_PATH: string; + protected log: Logger = getLogger('KubernetesEnvironmentService'); + protected environmentWorkingFolder: string; + + constructor(_config: any, info: ExperimentStartupInfo) { + super(); + this.CONTAINER_MOUNT_PATH = '/tmp/mount'; + this.genericK8sClient = new GeneralK8sClient(); + this.experimentRootDir = info.logDir; + this.environmentLocalTempFolder = path.join(this.experimentRootDir, "environment-temp"); + this.experimentId = info.experimentId; + this.environmentWorkingFolder = path.join(this.CONTAINER_MOUNT_PATH, 'nni', this.experimentId); + } + + public get environmentMaintenceLoopInterval(): number { + return 5000; + } + + public get hasStorageService(): boolean { + return false; + } + + public get getName(): string { + return 'kubernetes'; + } + + protected async createAzureStorage(vaultName: string, valutKeyName: string): Promise { + try { + const result: any = await cpp.exec(`az keyvault secret show --name ${valutKeyName} --vault-name ${vaultName}`); + if (result.stderr) { + const errorMessage: string = result.stderr; + this.log.error(errorMessage); + + return Promise.reject(errorMessage); + } + const storageAccountKey: any = JSON.parse(result.stdout).value; + if (this.azureStorageAccountName === undefined) { + throw new Error('azureStorageAccountName not initialized!'); + } + //create storage client + this.azureStorageClient = azureStorage.createFileService(this.azureStorageAccountName, storageAccountKey); + await AzureStorageClientUtility.createShare(this.azureStorageClient, this.azureStorageShare); + //create sotrage secret + this.azureStorageSecretName = String.Format('nni-secret-{0}', uniqueString(8) + .toLowerCase()); + if (this.genericK8sClient === undefined) { + throw new Error("genericK8sClient undefined!"); + } + const namespace = this.genericK8sClient.getNamespace ? this.genericK8sClient.getNamespace : "default" + await this.genericK8sClient.createSecret( + { + apiVersion: 'v1', + kind: 'Secret', + metadata: { + name: this.azureStorageSecretName, + namespace: namespace, + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: this.experimentId + } + }, + type: 'Opaque', + data: { + azurestorageaccountname: Base64.encode(this.azureStorageAccountName), + azurestorageaccountkey: Base64.encode(storageAccountKey) + } + } + ); + } catch (error) { + this.log.error(error); + + return Promise.reject(error); + } + + return Promise.resolve(); + } + + /** + * upload local directory to azureStorage + * @param srcDirectory the source directory of local folder + * @param destDirectory the target directory in azure + * @param uploadRetryCount the retry time when upload failed + */ + protected async uploadFolderToAzureStorage(srcDirectory: string, destDirectory: string, uploadRetryCount: number | undefined): Promise { + if (this.azureStorageClient === undefined) { + throw new Error('azureStorageClient is not initialized'); + } + let retryCount: number = 1; + if (uploadRetryCount) { + retryCount = uploadRetryCount; + } + let uploadSuccess: boolean = false; + let folderUriInAzure = ''; + try { + do { + uploadSuccess = await AzureStorageClientUtility.uploadDirectory( + this.azureStorageClient, + `${destDirectory}`, + this.azureStorageShare, + `${srcDirectory}`); + if (!uploadSuccess) { + //wait for 5 seconds to re-upload files + await delay(5000); + this.log.info('Upload failed, Retry: upload files to azure-storage'); + } else { + folderUriInAzure = `https://${this.azureStorageAccountName}.file.core.windows.net/${this.azureStorageShare}/${destDirectory}`; + break; + } + } while (retryCount-- >= 0) + } catch (error) { + this.log.error(error); + //return a empty url when got error + return Promise.resolve(''); + } + return Promise.resolve(folderUriInAzure); + } + + protected async createNFSStorage(nfsServer: string, nfsPath: string): Promise { + await cpp.exec(`mkdir -p ${this.environmentLocalTempFolder}`); + try { + await cpp.exec(`sudo mount ${nfsServer}:${nfsPath} ${this.environmentLocalTempFolder}`); + } catch (error) { + const mountError: string = `Mount NFS ${nfsServer}:${nfsPath} to ${this.environmentLocalTempFolder} failed, error is ${error}`; + this.log.error(mountError); + + return Promise.reject(mountError); + } + + return Promise.resolve(); + } + protected async createPVCStorage(pvcPath: string): Promise { + try { + await cpp.exec(`mkdir -p ${pvcPath}`); + await cpp.exec(`sudo ln -s ${pvcPath} ${this.environmentLocalTempFolder}`); + } catch (error) { + const linkError: string = `Linking ${pvcPath} to ${this.environmentLocalTempFolder} failed, error is ${error}`; + this.log.error(linkError); + + return Promise.reject(linkError); + } + + return Promise.resolve(); + } + + protected async createRegistrySecret(filePath: string | undefined): Promise { + if (filePath === undefined || filePath === '') { + return undefined; + } + const body = fs.readFileSync(filePath).toString('base64'); + const registrySecretName = String.Format('nni-secret-{0}', uniqueString(8) + .toLowerCase()); + const namespace = this.genericK8sClient.getNamespace ? this.genericK8sClient.getNamespace : "default" + await this.genericK8sClient.createSecret( + { + apiVersion: 'v1', + kind: 'Secret', + metadata: { + name: registrySecretName, + namespace: namespace, + labels: { + app: this.NNI_KUBERNETES_TRIAL_LABEL, + expId: this.experimentId + } + }, + type: 'kubernetes.io/dockerconfigjson', + data: { + '.dockerconfigjson': body + } + } + ); + return registrySecretName; + } + + + public async refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise { + environments.forEach(async (environment) => { + if (this.kubernetesCRDClient === undefined) { + throw new Error("kubernetesCRDClient undefined") + } + const kubeflowJobName: string = `nniexp${this.experimentId}env${environment.id}`.toLowerCase(); + const kubernetesJobInfo = await this.kubernetesCRDClient.getKubernetesJob(kubeflowJobName); + if (kubernetesJobInfo.status && kubernetesJobInfo.status.conditions) { + const latestCondition: any = kubernetesJobInfo.status.conditions[kubernetesJobInfo.status.conditions.length - 1]; + const tfJobType: KubeflowJobStatus = latestCondition.type; + switch (tfJobType) { + case 'Created': + environment.setStatus('WAITING'); + break; + case 'Running': + environment.setStatus('RUNNING'); + break; + case 'Failed': + environment.setStatus('FAILED'); + break; + case 'Succeeded': + environment.setStatus('SUCCEEDED'); + break; + default: + } + } + }); + } + + public async startEnvironment(_environment: EnvironmentInformation): Promise { + throw new Error("Not implemented"); + } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + if (this.kubernetesCRDClient === undefined) { + throw new Error('kubernetesCRDClient not initialized!'); + } + try { + await this.kubernetesCRDClient.deleteKubernetesJob(new Map( + [ + ['app', this.NNI_KUBERNETES_TRIAL_LABEL], + ['expId', this.experimentId], + ['envId', environment.id] + ] + )); + } catch (err) { + const errorMessage: string = `Delete env ${environment.id} failed: ${err}`; + this.log.error(errorMessage); + + return Promise.reject(errorMessage); + } + } + + public generatePodResource(memory: number, cpuNum: number, gpuNum: number): any { + const resources: any = { + memory: `${memory}Mi`, + cpu: `${cpuNum}` + }; + + if (gpuNum !== 0) { + resources['nvidia.com/gpu'] = `${gpuNum}`; + } + + return resources; + } +} diff --git a/ts/nni_manager/training_service/reusable/environments/localEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/localEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..20f29e6229e44bc3cb151ed70978ba369a2b19c9 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/localEnvironmentService.ts @@ -0,0 +1,141 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import tkill from 'tree-kill'; +import * as component from 'common/component'; +import { getLogger, Logger } from 'common/log'; +import { ExperimentConfig } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { powershellString } from 'common/shellUtils'; +import { EnvironmentInformation, EnvironmentService } from '../environment'; +import { isAlive, getNewLine } from 'common/utils'; +import { execMkdir, runScript, getScriptName, execCopydir } from 'training_service/common/util'; +import { SharedStorageService } from '../sharedStorage' + +@component.Singleton +export class LocalEnvironmentService extends EnvironmentService { + + private readonly log: Logger = getLogger('LocalEnvironmentService'); + private experimentRootDir: string; + private experimentId: string; + + constructor(_config: ExperimentConfig, info: ExperimentStartupInfo) { + super(); + this.experimentId = info.experimentId; + this.experimentRootDir = info.logDir; + } + + public get environmentMaintenceLoopInterval(): number { + return 100; + } + + public get hasStorageService(): boolean { + return false; + } + + public get getName(): string { + return 'local'; + } + + public async refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise { + environments.forEach(async (environment) => { + const jobpidPath: string = `${path.join(environment.runnerWorkingFolder, 'pid')}`; + const runnerReturnCodeFilePath: string = `${path.join(environment.runnerWorkingFolder, 'code')}`; + /* eslint-disable require-atomic-updates */ + try { + // check if pid file exist + const pidExist = await fs.existsSync(jobpidPath); + if (!pidExist) { + return; + } + const pid: string = await fs.promises.readFile(jobpidPath, 'utf8'); + const alive: boolean = await isAlive(pid); + environment.status = 'RUNNING'; + // if the process of jobpid is not alive any more + if (!alive) { + if (fs.existsSync(runnerReturnCodeFilePath)) { + const runnerReturnCode: string = await fs.promises.readFile(runnerReturnCodeFilePath, 'utf8'); + const match: RegExpMatchArray | null = runnerReturnCode.trim() + .match(/^-?(\d+)\s+(\d+)$/); + if (match !== null) { + const { 1: code } = match; + // Update trial job's status based on result code + if (parseInt(code, 10) === 0) { + environment.setStatus('SUCCEEDED'); + } else { + environment.setStatus('FAILED'); + } + } + } + } + } catch (error) { + this.log.error(`Update job status exception, error is ${error.message}`); + } + }); + } + + private getScript(environment: EnvironmentInformation): string[] { + const script: string[] = []; + if (process.platform === 'win32') { + script.push(`$env:PATH=${powershellString(process.env['path']!)}`) + script.push(`cd $env:${this.experimentRootDir}`); + script.push(`New-Item -ItemType "directory" -Path ${path.join(this.experimentRootDir, 'envs', environment.id)} -Force`); + script.push(`cd envs\\${environment.id}`); + environment.command = `python -m nni.tools.trial_tool.trial_runner`; + script.push( + `cmd.exe /c ${environment.command} --job_pid_file ${path.join(environment.runnerWorkingFolder, 'pid')} 2>&1 | Out-File "${path.join(environment.runnerWorkingFolder, 'trial_runner.log')}" -encoding utf8`, + `$NOW_DATE = [int64](([datetime]::UtcNow)-(get-date "1/1/1970")).TotalSeconds`, + `$NOW_DATE = "$NOW_DATE" + (Get-Date -Format fff).ToString()`, + `Write $LASTEXITCODE " " $NOW_DATE | Out-File "${path.join(environment.runnerWorkingFolder, 'code')}" -NoNewline -encoding utf8`); + } else { + script.push(`cd ${this.experimentRootDir}`); + script.push(`eval ${environment.command} --job_pid_file ${environment.runnerWorkingFolder}/pid 1>${environment.runnerWorkingFolder}/trialrunner_stdout 2>${environment.runnerWorkingFolder}/trialrunner_stderr`); + if (process.platform === 'darwin') { + // https://superuser.com/questions/599072/how-to-get-bash-execution-time-in-milliseconds-under-mac-os-x + // Considering the worst case, write 999 to avoid negative duration + script.push(`echo $? \`date +%s999\` >'${environment.runnerWorkingFolder}/code'`); + } else { + script.push(`echo $? \`date +%s%3N\` >'${environment.runnerWorkingFolder}/code'`); + } + } + + return script; + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + // Need refactor, this temp folder path is not appropriate, there are two expId in this path + const sharedStorageService = component.get(SharedStorageService); + if (environment.useSharedStorage && sharedStorageService.canLocalMounted) { + this.experimentRootDir = sharedStorageService.localWorkingRoot; + } + const localEnvCodeFolder: string = path.join(this.experimentRootDir, "envs"); + if (environment.useSharedStorage && !sharedStorageService.canLocalMounted) { + await sharedStorageService.storageService.copyDirectoryBack("envs", localEnvCodeFolder) + } else if (!environment.useSharedStorage) { + const localTempFolder: string = path.join(this.experimentRootDir, "environment-temp", "envs"); + await execCopydir(localTempFolder, localEnvCodeFolder); + } + environment.runnerWorkingFolder = path.join(localEnvCodeFolder, environment.id); + await execMkdir(environment.runnerWorkingFolder); + environment.command = this.getScript(environment).join(getNewLine()); + const scriptName: string = getScriptName('run'); + await fs.promises.writeFile(path.join(localEnvCodeFolder, scriptName), + environment.command, { encoding: 'utf8', mode: 0o777 }); + + // Execute command in local machine + runScript(path.join(localEnvCodeFolder, scriptName)); + environment.trackingUrl = `${environment.runnerWorkingFolder}`; + } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + if (environment.isAlive === false) { + return Promise.resolve(); + } + + const jobpidPath: string = `${path.join(environment.runnerWorkingFolder, 'pid')}`; + const pid: string = await fs.promises.readFile(jobpidPath, 'utf8'); + tkill(Number(pid), 'SIGKILL'); + } +} diff --git a/ts/nni_manager/training_service/reusable/environments/openPaiEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/openPaiEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..212ca9283f81d5ecfbd9842cee1dc102909c14dc --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/openPaiEnvironmentService.ts @@ -0,0 +1,328 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import yaml from 'js-yaml'; +import request from 'request'; +import { Container, Scope } from 'typescript-ioc'; +import { Deferred } from 'ts-deferred'; +import * as component from 'common/component'; +import { OpenpaiConfig, toMegaBytes } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { getLogger, Logger } from 'common/log'; +import { PAIClusterConfig } from 'training_service/pai/paiConfig'; +import { NNIPAITrialConfig } from 'training_service/pai/paiConfig'; +import { EnvironmentInformation, EnvironmentService } from '../environment'; +import { SharedStorageService } from '../sharedStorage'; +import { MountedStorageService } from '../storages/mountedStorageService'; +import { StorageService } from '../storageService'; + +/** + * Collector PAI jobs info from PAI cluster, and update pai job status locally + */ +@component.Singleton +export class OpenPaiEnvironmentService extends EnvironmentService { + + private readonly log: Logger = getLogger('OpenPaiEnvironmentService'); + private paiClusterConfig: PAIClusterConfig | undefined; + private paiTrialConfig: NNIPAITrialConfig | undefined; + private paiToken: string; + private protocol: string; + private experimentId: string; + private config: OpenpaiConfig; + + constructor(config: OpenpaiConfig, info: ExperimentStartupInfo) { + super(); + this.experimentId = info.experimentId; + this.config = config; + this.paiToken = this.config.token; + this.protocol = this.config.host.toLowerCase().startsWith('https://') ? 'https' : 'http'; + Container.bind(StorageService) + .to(MountedStorageService) + .scope(Scope.Singleton); + const storageService = component.get(StorageService) + const remoteRoot = storageService.joinPath(this.config.localStorageMountPoint, this.experimentId); + storageService.initialize(this.config.localStorageMountPoint, remoteRoot); + } + + public get environmentMaintenceLoopInterval(): number { + return 5000; + } + + public get hasStorageService(): boolean { + return true; + } + + public get getName(): string { + return 'pai'; + } + + public async refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise { + const deferred: Deferred = new Deferred(); + + if (this.paiToken === undefined) { + throw new Error('PAI token is not initialized'); + } + + const getJobInfoRequest: request.Options = { + uri: `${this.config.host}/rest-server/api/v2/jobs?username=${this.config.username}`, + method: 'GET', + json: true, + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.paiToken}` + } + }; + + request(getJobInfoRequest, async (error: any, response: request.Response, body: any) => { + // Status code 200 for success + if ((error !== undefined && error !== null) || response.statusCode >= 400) { + const errorMessage: string = (error !== undefined && error !== null) ? error.message : + `OpenPAI: get environment list from PAI Cluster failed!, http code:${response.statusCode}, http body:' ${JSON.stringify(body)}`; + this.log.error(`${errorMessage}`); + deferred.reject(errorMessage); + } else { + const jobInfos = new Map(); + body.forEach((jobInfo: any) => { + jobInfos.set(jobInfo.name, jobInfo); + }); + + environments.forEach((environment) => { + if (jobInfos.has(environment.envId)) { + const jobResponse = jobInfos.get(environment.envId); + if (jobResponse && jobResponse.state) { + const oldEnvironmentStatus = environment.status; + switch (jobResponse.state) { + case 'RUNNING': + case 'WAITING': + case 'SUCCEEDED': + environment.setStatus(jobResponse.state); + break; + case 'FAILED': + environment.setStatus(jobResponse.state); + deferred.reject(`OpenPAI: job ${environment.envId} is failed!`); + break; + case 'STOPPED': + case 'STOPPING': + environment.setStatus('USER_CANCELED'); + break; + default: + this.log.error(`OpenPAI: job ${environment.envId} returns unknown state ${jobResponse.state}.`); + environment.setStatus('UNKNOWN'); + } + if (oldEnvironmentStatus !== environment.status) { + this.log.debug(`OpenPAI: job ${environment.envId} change status ${oldEnvironmentStatus} to ${environment.status} due to job is ${jobResponse.state}.`) + } + } else { + this.log.error(`OpenPAI: job ${environment.envId} has no state returned. body:`, jobResponse); + // some error happens, and mark this environment + environment.status = 'FAILED'; + } + } else { + this.log.error(`OpenPAI job ${environment.envId} is not found in job list.`); + environment.status = 'UNKNOWN'; + } + }); + deferred.resolve(); + } + }); + return deferred.promise; + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + const deferred: Deferred = new Deferred(); + + if (this.paiToken === undefined) { + throw new Error('PAI token is not initialized'); + } + // Step 1. Prepare PAI job configuration + let environmentRoot: string; + if (environment.useSharedStorage) { + environmentRoot = component.get(SharedStorageService).remoteWorkingRoot; + environment.command = `${component.get(SharedStorageService).remoteMountCommand.replace(/echo -e /g, `echo `).replace(/echo /g, `echo -e `)} && cd ${environmentRoot} && ${environment.command}`; + } else { + environmentRoot = `${this.config.containerStorageMountPoint}/${this.experimentId}`; + environment.command = `cd ${environmentRoot} && ${environment.command}`; + } + environment.runnerWorkingFolder = `${environmentRoot}/envs/${environment.id}`; + environment.trackingUrl = `${this.config.host}/job-detail.html?username=${this.config.username}&jobName=${environment.envId}`; + environment.useActiveGpu = false; // does openpai supports these? + environment.maxTrialNumberPerGpu = 1; + + // Step 2. Generate Job Configuration in yaml format + const paiJobConfig = this.generateJobConfigInYamlFormat(environment); + this.log.debug(`generated paiJobConfig: ${paiJobConfig}`); + + // Step 3. Submit PAI job via Rest call + const submitJobRequest: request.Options = { + uri: `${this.config.host}/rest-server/api/v2/jobs`, + method: 'POST', + body: paiJobConfig, + followAllRedirects: true, + headers: { + 'Content-Type': 'text/yaml', + Authorization: `Bearer ${this.paiToken}` + } + }; + request(submitJobRequest, (error, response, body) => { + // Status code 202 for success, refer https://github.com/microsoft/pai/blob/master/src/rest-server/docs/swagger.yaml + if ((error !== undefined && error !== null) || response.statusCode >= 400) { + const errorMessage: string = (error !== undefined && error !== null) ? error.message : + `start environment ${environment.envId} failed, http code:${response.statusCode}, http body: ${body}`; + + this.log.error(errorMessage); + environment.status = 'FAILED'; + deferred.reject(errorMessage); + } + deferred.resolve(); + }); + + return deferred.promise; + } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + const deferred: Deferred = new Deferred(); + + if (environment.isAlive === false) { + return Promise.resolve(); + } + if (this.paiToken === undefined) { + return Promise.reject(Error('PAI token is not initialized')); + } + + const stopJobRequest: request.Options = { + uri: `${this.config.host}/rest-server/api/v2/jobs/${this.config.username}~${environment.envId}/executionType`, + method: 'PUT', + json: true, + body: { value: 'STOP' }, + time: true, + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.paiToken}` + } + }; + + this.log.debug(`stopping OpenPAI environment ${environment.envId}, ${stopJobRequest.uri}`); + + try { + request(stopJobRequest, (error, response, _body) => { + try { + // Status code 202 for success. + if ((error !== undefined && error !== null) || (response && response.statusCode >= 400)) { + const errorMessage: string = (error !== undefined && error !== null) ? error.message : + `OpenPAI: stop job ${environment.envId} failed, http code:${response.statusCode}, http body: ${_body}`; + this.log.error(`${errorMessage}`); + deferred.reject((error !== undefined && error !== null) ? error : + `Stop trial failed, http code: ${response.statusCode}`); + } else { + this.log.info(`OpenPAI job ${environment.envId} stopped.`); + } + deferred.resolve(); + } catch (error) { + this.log.error(`OpenPAI error when inner stopping environment ${error}`); + deferred.reject(error); + } + }); + } catch (error) { + this.log.error(`OpenPAI error when stopping environment ${error}`); + return Promise.reject(error); + } + + return deferred.promise; + } + + private generateJobConfigInYamlFormat(environment: EnvironmentInformation): any { + const jobName = environment.envId; + + let nniJobConfig: any = undefined; + if (this.config.openpaiConfig !== undefined) { + nniJobConfig = JSON.parse(JSON.stringify(this.config.openpaiConfig)); //Trick for deep clone in Typescript + nniJobConfig.name = jobName; + if (nniJobConfig.taskRoles) { + + environment.nodeCount = 0; + // count instance + for (const taskRoleName in nniJobConfig.taskRoles) { + const taskRole = nniJobConfig.taskRoles[taskRoleName]; + let instanceCount = 1; + if (taskRole.instances) { + instanceCount = taskRole.instances; + } + environment.nodeCount += instanceCount; + } + + // Each taskRole will generate new command in NNI's command format + // Each command will be formatted to NNI style + for (const taskRoleName in nniJobConfig.taskRoles) { + const taskRole = nniJobConfig.taskRoles[taskRoleName]; + // replace ' to '\'' + const joinedCommand = taskRole.commands.join(" && ").replace("'", "'\\''").trim(); + const nniTrialCommand = `${environment.command} --node_count ${environment.nodeCount} --trial_command '${joinedCommand}'`; + this.log.debug(`replace command ${taskRole.commands} to ${[nniTrialCommand]}`); + taskRole.commands = [nniTrialCommand]; + } + } + + } else { + nniJobConfig = { + protocolVersion: 2, + name: jobName, + type: 'job', + jobRetryCount: 0, + prerequisites: [ + { + type: 'dockerimage', + uri: this.config.dockerImage, + name: 'docker_image_0' + } + ], + taskRoles: { + taskrole: { + instances: 1, + completion: { + minFailedInstances: 1, + minSucceededInstances: -1 + }, + taskRetryCount: 0, + dockerImage: 'docker_image_0', + resourcePerInstance: { + gpu: this.config.trialGpuNumber === undefined? 0: this.config.trialGpuNumber, + cpu: this.config.trialCpuNumber, + memoryMB: toMegaBytes(this.config.trialMemorySize) + }, + commands: [ + environment.command + ] + } + }, + extras: { + 'storages': [ + { + name: this.config.storageConfigName + } + ], + submitFrom: 'submit-job-v2' + } + } + if (this.config.virtualCluster) { + nniJobConfig.defaults = { + virtualCluster: this.config.virtualCluster + } + } + } + return yaml.dump(nniJobConfig); + } + + protected formatPAIHost(host: string): string { + // If users' host start with 'http://' or 'https://', use the original host, + // or format to 'http//${host}' + if (host.startsWith('http://')) { + this.protocol = 'http'; + return host.replace('http://', ''); + } else if (host.startsWith('https://')) { + this.protocol = 'https'; + return host.replace('https://', ''); + } else { + return host; + } + } +} diff --git a/ts/nni_manager/training_service/reusable/environments/remoteEnvironmentService.ts b/ts/nni_manager/training_service/reusable/environments/remoteEnvironmentService.ts new file mode 100644 index 0000000000000000000000000000000000000000..a5fed303b8876e405043f9e0174026c8a849203b --- /dev/null +++ b/ts/nni_manager/training_service/reusable/environments/remoteEnvironmentService.ts @@ -0,0 +1,300 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import * as component from 'common/component'; +import { getLogger, Logger } from 'common/log'; +import { EnvironmentInformation, EnvironmentService } from '../environment'; +import { getLogLevel } from 'common/utils'; +import { RemoteConfig, RemoteMachineConfig } from 'common/experimentConfig'; +import { ExperimentStartupInfo } from 'common/experimentStartupInfo'; +import { execMkdir } from 'training_service/common/util'; +import { ExecutorManager } from 'training_service/remote_machine/remoteMachineData'; +import { ShellExecutor } from 'training_service/remote_machine/shellExecutor'; +import { RemoteMachineEnvironmentInformation } from '../remote/remoteConfig'; +import { SharedStorageService } from '../sharedStorage' + +@component.Singleton +export class RemoteEnvironmentService extends EnvironmentService { + + private readonly initExecutorId = "initConnection"; + private readonly machineExecutorManagerMap: Map; + private readonly environmentExecutorManagerMap: Map; + private readonly remoteMachineMetaOccupiedMap: Map; + private readonly log: Logger; + private sshConnectionPromises: Promise; + private experimentRootDir: string; + private remoteExperimentRootDir: string = ""; + private experimentId: string; + private config: RemoteConfig; + + constructor(config: RemoteConfig, info: ExperimentStartupInfo) { + super(); + this.experimentId = info.experimentId; + this.environmentExecutorManagerMap = new Map(); + this.machineExecutorManagerMap = new Map(); + this.remoteMachineMetaOccupiedMap = new Map(); + this.experimentRootDir = info.logDir; + this.log = getLogger('RemoteEnvironmentService'); + this.config = config; + + // codeDir is not a valid directory, throw Error + if (!fs.lstatSync(this.config.trialCodeDirectory).isDirectory()) { + throw new Error(`codeDir ${this.config.trialCodeDirectory} is not a directory`); + } + + this.sshConnectionPromises = Promise.all(this.config.machineList.map( + machine => this.initRemoteMachineOnConnected(machine) + )); + } + + public async init(): Promise { + await this.sshConnectionPromises; + this.log.info('ssh connection initialized!'); + Array.from(this.machineExecutorManagerMap.keys()).forEach(rmMeta => { + // initialize remoteMachineMetaOccupiedMap, false means not occupied + this.remoteMachineMetaOccupiedMap.set(rmMeta, false); + }); + } + + public get prefetchedEnvironmentCount(): number { + return this.machineExecutorManagerMap.size; + } + + public get environmentMaintenceLoopInterval(): number { + return 5000; + } + + public get hasMoreEnvironments(): boolean { + return false; + } + + public get hasStorageService(): boolean { + return false; + } + + public get getName(): string { + return 'remote'; + } + + private scheduleMachine(): RemoteMachineConfig | undefined { + for (const [rmMeta, occupied] of this.remoteMachineMetaOccupiedMap) { + if (!occupied) { + this.remoteMachineMetaOccupiedMap.set(rmMeta, true); + return rmMeta; + } + } + return undefined; + } + + private async initRemoteMachineOnConnected(rmMeta: RemoteMachineConfig): Promise { + const executorManager: ExecutorManager = new ExecutorManager(rmMeta); + this.log.info(`connecting to ${rmMeta.user}@${rmMeta.host}:${rmMeta.port}`); + const executor: ShellExecutor = await executorManager.getExecutor(this.initExecutorId); + this.log.debug(`reached ${executor.name}`); + this.machineExecutorManagerMap.set(rmMeta, executorManager); + this.log.debug(`initializing ${executor.name}`); + + // Create root working directory after executor is ready + const nniRootDir: string = executor.joinPath(executor.getTempPath(), 'nni-experiments'); + await executor.createFolder(executor.getRemoteExperimentRootDir(this.experimentId)); + + // the directory to store temp scripts in remote machine + const remoteGpuScriptCollectorDir: string = executor.getRemoteScriptsPath(this.experimentId); + + // clean up previous result. + await executor.createFolder(remoteGpuScriptCollectorDir, true); + await executor.allowPermission(true, nniRootDir); + } + + public async refreshEnvironmentsStatus(environments: EnvironmentInformation[]): Promise { + const tasks = environments.map(environment => this.refreshEnvironment(environment)); + await Promise.all(tasks); + } + + private async refreshEnvironment(environment: EnvironmentInformation): Promise { + const executor = await this.getExecutor(environment.id); + const jobpidPath: string = `${environment.runnerWorkingFolder}/pid`; + const runnerReturnCodeFilePath: string = `${environment.runnerWorkingFolder}/code`; + /* eslint-disable require-atomic-updates */ + try { + // check if pid file exist + const pidExist = await executor.fileExist(jobpidPath); + if (!pidExist) { + return; + } + const isAlive = await executor.isProcessAlive(jobpidPath); + environment.status = 'RUNNING'; + // if the process of jobpid is not alive any more + if (!isAlive) { + const remoteEnvironment: RemoteMachineEnvironmentInformation = environment as RemoteMachineEnvironmentInformation; + if (remoteEnvironment.rmMachineMeta === undefined) { + throw new Error(`${remoteEnvironment.id} machine meta not initialized!`); + } + this.log.info(`pid in ${remoteEnvironment.rmMachineMeta.host}:${jobpidPath} is not alive!`); + if (fs.existsSync(runnerReturnCodeFilePath)) { + const runnerReturnCode: string = await executor.getRemoteFileContent(runnerReturnCodeFilePath); + const match: RegExpMatchArray | null = runnerReturnCode.trim() + .match(/^-?(\d+)\s+(\d+)$/); + if (match !== null) { + const { 1: code } = match; + // Update trial job's status based on result code + if (parseInt(code, 10) === 0) { + environment.setStatus('SUCCEEDED'); + } else { + environment.setStatus('FAILED'); + } + await this.releaseEnvironmentResource(environment); + } + } + } + } catch (error) { + this.log.error(`Update job status exception, error is ${error.message}`); + } + } + + /** + * If a environment is finished, release the connection resource + * @param environment remote machine environment job detail + */ + private async releaseEnvironmentResource(environment: EnvironmentInformation): Promise { + if (environment.useSharedStorage) { + const executor = await this.getExecutor(environment.id); + const remoteUmountCommand = component.get(SharedStorageService).remoteUmountCommand; + const result = await executor.executeScript(remoteUmountCommand, false, false); + if (result.exitCode !== 0) { + this.log.error(`Umount shared storage on remote machine failed.\n ERROR: ${result.stderr}`); + } + } + + const executorManager = this.environmentExecutorManagerMap.get(environment.id); + if (executorManager === undefined) { + throw new Error(`ExecutorManager is not assigned for environment ${environment.id}`); + } + + // Note, it still keep reference in trialExecutorManagerMap, as there may be following requests from nni manager. + executorManager.releaseExecutor(environment.id); + const remoteEnvironment: RemoteMachineEnvironmentInformation = environment as RemoteMachineEnvironmentInformation; + if (remoteEnvironment.rmMachineMeta === undefined) { + throw new Error(`${remoteEnvironment.id} rmMachineMeta not initialized!`); + } + this.remoteMachineMetaOccupiedMap.set(remoteEnvironment.rmMachineMeta, false); + } + + private async getScript(environment: EnvironmentInformation): Promise { + const executor = await this.getExecutor(environment.id); + const isDebug = getLogLevel() == "debug"; + let script: string = environment.command; + environment.runnerWorkingFolder = executor.joinPath(this.remoteExperimentRootDir, 'envs', environment.id); + + let codeScript = `echo $? \`date +%s%3N\` >${environment.runnerWorkingFolder}/code`; + if (executor.isWindows) { + const prepare = `mkdir envs\\${environment.id} 2>NUL & cd envs\\${environment.id}`; + const startrun = `powershell ..\\install_nni.ps1 && python -m nni.tools.trial_tool.trial_runner`; + const developingScript = "IF EXIST nni_trial_tool (ECHO \"nni_trial_tool exists already\") ELSE (mkdir nni_trial_tool && tar -xof ../nni_trial_tool.tar.gz -C ./nni_trial_tool) && pip3 install websockets"; + + script = isDebug ? `${prepare} && ${developingScript} && ${startrun}` : `${prepare} && ${startrun}`; + codeScript = `powershell -command "Write $? " " (((New-TimeSpan -Start (Get-Date "01/01/1970") -End (Get-Date).ToUniversalTime()).TotalMilliseconds).ToString("0")) | Out-file ${path.join(environment.runnerWorkingFolder, 'code')} -Append -NoNewline -encoding utf8"`; + } + + script = `cd ${this.remoteExperimentRootDir} && \ + ${script} --job_pid_file ${environment.runnerWorkingFolder}/pid \ + 1>${environment.runnerWorkingFolder}/trialrunner_stdout 2>${environment.runnerWorkingFolder}/trialrunner_stderr \ + && ${codeScript}`; + + return script; + } + + public async startEnvironment(environment: EnvironmentInformation): Promise { + const remoteEnvironment: RemoteMachineEnvironmentInformation = environment as RemoteMachineEnvironmentInformation; + remoteEnvironment.status = 'WAITING'; + // schedule machine for environment, generate command + await this.prepareEnvironment(remoteEnvironment); + // launch runner process in machine + await this.launchRunner(environment); + } + + private async prepareEnvironment(environment: RemoteMachineEnvironmentInformation): Promise { + // get an executor from scheduler + const rmMachineMeta: RemoteMachineConfig | undefined = this.scheduleMachine(); + if (rmMachineMeta === undefined) { + this.log.warning(`No available machine!`); + return Promise.resolve(false); + } else { + environment.rmMachineMeta = rmMachineMeta; + const executorManager: ExecutorManager | undefined = this.machineExecutorManagerMap.get(environment.rmMachineMeta); + if (executorManager === undefined) { + throw new Error(`executorManager not initialized`); + } + this.environmentExecutorManagerMap.set(environment.id, executorManager); + const executor = await this.getExecutor(environment.id); + if (environment.useSharedStorage) { + this.remoteExperimentRootDir = component.get(SharedStorageService).remoteWorkingRoot; + if (!this.remoteExperimentRootDir.startsWith('/')) { + this.remoteExperimentRootDir = executor.joinPath((await executor.getCurrentPath()).trim(), this.remoteExperimentRootDir); + } + const remoteMountCommand = component.get(SharedStorageService).remoteMountCommand.replace(/echo -e /g, `echo `).replace(/echo /g, `echo -e `).replace(/\\\$/g, `\\\\\\$`); + const result = await executor.executeScript(remoteMountCommand, false, false); + if (result.exitCode !== 0) { + throw new Error(`Mount shared storage on remote machine failed.\n ERROR: ${result.stderr}`); + } + } else { + this.remoteExperimentRootDir = executor.getRemoteExperimentRootDir(this.experimentId); + } + + environment.command = await this.getScript(environment); + environment.useActiveGpu = rmMachineMeta.useActiveGpu; + return Promise.resolve(true); + } + } + + private async launchRunner(environment: RemoteMachineEnvironmentInformation): Promise { + const executor = await this.getExecutor(environment.id); + const environmentLocalTempFolder: string = + path.join(this.experimentRootDir, "environment-temp") + await executor.createFolder(environment.runnerWorkingFolder); + await execMkdir(environmentLocalTempFolder); + await fs.promises.writeFile(path.join(environmentLocalTempFolder, executor.getScriptName("run")), + environment.command, { encoding: 'utf8' }); + // Copy files in codeDir to remote working directory + await executor.copyDirectoryToRemote(environmentLocalTempFolder, this.remoteExperimentRootDir); + // Execute command in remote machine, set isInteractive=true to run script in conda environment + executor.executeScript(executor.joinPath(this.remoteExperimentRootDir, + executor.getScriptName("run")), true, true); + if (environment.rmMachineMeta === undefined) { + throw new Error(`${environment.id} rmMachineMeta not initialized!`); + } + environment.trackingUrl = `file://${environment.rmMachineMeta.host}:${environment.runnerWorkingFolder}`; + } + + private async getExecutor(environmentId: string): Promise { + const executorManager = this.environmentExecutorManagerMap.get(environmentId); + if (executorManager === undefined) { + throw new Error(`ExecutorManager is not assigned for environment ${environmentId}`); + } + return await executorManager.getExecutor(environmentId); + } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + if (environment.isAlive === false) { + return; + } + + const executor = await this.getExecutor(environment.id); + + if (environment.status === 'UNKNOWN') { + environment.status = 'USER_CANCELED'; + await this.releaseEnvironmentResource(environment); + return; + } + + const jobpidPath: string = `${environment.runnerWorkingFolder}/pid`; + try { + await executor.killChildProcesses(jobpidPath); + await this.releaseEnvironmentResource(environment); + } catch (error) { + this.log.error(`stopEnvironment: ${error}`); + } + } +} diff --git a/ts/nni_manager/training_service/reusable/gpuScheduler.ts b/ts/nni_manager/training_service/reusable/gpuScheduler.ts new file mode 100644 index 0000000000000000000000000000000000000000..d685c9a25437646669ecefd3845cc7438900c480 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/gpuScheduler.ts @@ -0,0 +1,354 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import assert from 'assert'; +import { PlacementConstraint } from 'common/trainingService'; +import { getLogger, Logger } from 'common/log'; +import { randomSelect } from 'common/utils'; +import { GPUInfo, ScheduleResultType } from '../common/gpuData'; +import { EnvironmentInformation } from './environment'; +import { RemoteMachineEnvironmentInformation } from './remote/remoteConfig'; +import { TrialDetail } from './trial'; + +type SCHEDULE_POLICY_NAME = 'random' | 'round-robin' | 'recently-idle'; + +export class GpuSchedulerSetting { + public useActiveGpu: boolean = false; + public maxTrialNumberPerGpu: number = 1; +} + +export type GpuScheduleResult = { + resultType: ScheduleResultType; + environment: EnvironmentInformation | undefined; + gpuIndices: GPUInfo[] | undefined; +}; + +/** + * A simple GPU scheduler implementation + */ +export class GpuScheduler { + + // private readonly machineExecutorMap: Set; + private readonly log: Logger = getLogger('GpuScheduler'); + private readonly policyName: SCHEDULE_POLICY_NAME = 'recently-idle'; + private defaultSetting: GpuSchedulerSetting; + private roundRobinIndex: number = 0; + + /** + * Constructor + * @param environments map from remote machine to executor + */ + constructor(gpuSchedulerSetting: GpuSchedulerSetting | undefined = undefined) { + if (undefined === gpuSchedulerSetting) { + gpuSchedulerSetting = new GpuSchedulerSetting(); + } + this.defaultSetting = gpuSchedulerSetting; + } + + public setSettings(gpuSchedulerSetting: GpuSchedulerSetting): void { + this.defaultSetting = gpuSchedulerSetting; + } + + /** + * Schedule a machine according to the constraints (requiredGPUNum) + * @param defaultRequiredGPUNum the default required GPU number when constraint.type === 'None' + */ + public scheduleMachine(environments: EnvironmentInformation[], constraint: PlacementConstraint, + defaultRequiredGPUNum: number | undefined, trialDetail: TrialDetail): GpuScheduleResult { + if (constraint.type == 'None' || constraint.type == 'GPUNumber') { + let requiredGPUNum = 0; + if (constraint.type == 'None') { + if (defaultRequiredGPUNum === undefined) { + requiredGPUNum = 0; + } else { + requiredGPUNum = defaultRequiredGPUNum; + } + } else if (constraint.type == 'GPUNumber') { + const gpus = constraint.gpus as Array; + // TODO: remove the following constraint when supporting distributed trial + if (gpus.length != 1) { + throw new Error("Placement constraint of GPUNumber must have exactly one number."); + } + requiredGPUNum = gpus[0]; + } + + assert(requiredGPUNum >= 0); + // Step 1: Check if required GPU number not exceeds the total GPU number in all machines + const eligibleEnvironments: EnvironmentInformation[] = environments.filter((environment: EnvironmentInformation) => + environment.defaultGpuSummary === undefined || requiredGPUNum === 0 || + (requiredGPUNum !== undefined && environment.defaultGpuSummary.gpuCount >= requiredGPUNum)); + if (eligibleEnvironments.length === 0) { + // If the required gpu number exceeds the upper limit of all machine's GPU number + // Return REQUIRE_EXCEED_TOTAL directly + return ({ + resultType: ScheduleResultType.REQUIRE_EXCEED_TOTAL, + gpuIndices: undefined, + environment: undefined, + }); + } + + // Step 2: Allocate Host/GPU for specified trial job + // Currenty the requireGPUNum parameter for all trial jobs are identical. + if (requiredGPUNum > 0) { + // Trial job requires GPU + const result: GpuScheduleResult | undefined = this.scheduleGPUHost(environments, requiredGPUNum, trialDetail); + if (result !== undefined) { + return result; + } + } else { + // Trail job does not need GPU + const allocatedRm: EnvironmentInformation = this.selectMachine(environments, environments); + + return this.allocateHost(requiredGPUNum, allocatedRm, [], trialDetail); + } + + return { + resultType: ScheduleResultType.TMP_NO_AVAILABLE_GPU, + gpuIndices: undefined, + environment: undefined, + }; + } else { + assert(constraint.type === 'Device') + if (constraint.gpus.length == 0) { + throw new Error("Device constraint is used but no device is specified."); + } + const gpus = constraint.gpus as Array<[string, number]>; + const selectedHost = gpus[0][0]; + + const differentHosts: Array<[string, number]> = gpus.filter((gpuTuple: [string, number]) => gpuTuple[0] != selectedHost); + if (differentHosts.length >= 1) { + //TODO: remove this constraint when supporting multi-host placement + throw new Error("Device constraint does not support using multiple hosts") + } + if (environments.length == 0) { + return { + resultType: ScheduleResultType.TMP_NO_AVAILABLE_GPU, + gpuIndices: undefined, + environment: undefined, + }; + } + for (const environment of environments) { + if(!('rmMachineMeta' in environment)){ + //TODO: remove this constraint when supporting other training services + throw new Error(`Environment Device placement constraint only supports remote training service for now.`); + } + } + //TODO: + const eligibleEnvironments: EnvironmentInformation[] = environments.filter( + (environment: EnvironmentInformation) => + (environment as RemoteMachineEnvironmentInformation).rmMachineMeta != undefined && + (environment as RemoteMachineEnvironmentInformation).rmMachineMeta?.host == selectedHost); + if (eligibleEnvironments.length === 0) { + throw new Error(`The the required host (host: ${selectedHost}) is not found.`); + } + const selectedEnvironment = eligibleEnvironments[0]; + const availableResources = this.gpuResourceDetection([selectedEnvironment]); + const selectedGPUs: Array = []; + + if (selectedEnvironment.defaultGpuSummary === undefined) { + //GPU summary may not be ready, retry until it is ready + return { + resultType: ScheduleResultType.TMP_NO_AVAILABLE_GPU, + gpuIndices: undefined, + environment: undefined, + }; + } + for (const gpuTuple of gpus) { + const gpuIdx: number = gpuTuple[1]; + if (gpuIdx >= selectedEnvironment.defaultGpuSummary.gpuCount) { + throw new Error(`The gpuIdx of placement constraint ${gpuIdx} exceeds gpuCount of the host ${selectedHost}`); + } + + if (availableResources.has(selectedEnvironment)) { + for (const gpuInfo of availableResources.get(selectedEnvironment)!) { + if (gpuInfo.index === gpuIdx) { + selectedGPUs.push(gpuInfo); + } + } + } + } + if (selectedGPUs.length === constraint.gpus.length) { + for (const gpuInfo of selectedGPUs) { + let num = selectedEnvironment.defaultGpuSummary?.assignedGpuIndexMap.get(gpuInfo.index); + if (num === undefined) { + num = 0; + } + selectedEnvironment.defaultGpuSummary?.assignedGpuIndexMap.set(gpuInfo.index, num + 1); + } + return { + resultType: ScheduleResultType.SUCCEED, + environment: selectedEnvironment, + gpuIndices: selectedGPUs, + }; + } else { + return { + resultType: ScheduleResultType.TMP_NO_AVAILABLE_GPU, + gpuIndices: undefined, + environment: undefined, + }; + } + } + } + + /** + * remove the job's gpu reversion + */ + public removeGpuReservation(trial: TrialDetail): void { + if (trial.environment !== undefined && + trial.environment.defaultGpuSummary !== undefined && + trial.assignedGpus !== undefined && + trial.assignedGpus.length > 0) { + + for (const gpuInfo of trial.assignedGpus) { + const defaultGpuSummary = trial.environment.defaultGpuSummary; + const num: number | undefined = defaultGpuSummary.assignedGpuIndexMap.get(gpuInfo.index); + if (num !== undefined) { + if (num === 1) { + defaultGpuSummary.assignedGpuIndexMap.delete(gpuInfo.index); + } else { + defaultGpuSummary.assignedGpuIndexMap.set(gpuInfo.index, num - 1); + } + } + } + } + } + + private scheduleGPUHost(environments: EnvironmentInformation[], requiredGPUNumber: number, trial: TrialDetail): GpuScheduleResult | undefined { + const totalResourceMap: Map = this.gpuResourceDetection(environments); + const qualifiedEnvironments: EnvironmentInformation[] = []; + totalResourceMap.forEach((gpuInfos: GPUInfo[], environment: EnvironmentInformation) => { + if (gpuInfos !== undefined && gpuInfos.length >= requiredGPUNumber) { + qualifiedEnvironments.push(environment); + } + }); + if (qualifiedEnvironments.length > 0) { + const allocatedEnvironment: EnvironmentInformation = this.selectMachine(qualifiedEnvironments, environments); + const gpuInfos: GPUInfo[] | undefined = totalResourceMap.get(allocatedEnvironment); + if (gpuInfos !== undefined) { // should always true + return this.allocateHost(requiredGPUNumber, allocatedEnvironment, gpuInfos, trial); + } else { + assert(false, 'gpuInfos is undefined'); + } + } + return undefined; + } + + /** + * Detect available GPU resource for an environment + * @returns Available GPUs on environments + */ + private gpuResourceDetection(environments: EnvironmentInformation[]): Map { + const totalResourceMap: Map = new Map(); + environments.forEach((environment: EnvironmentInformation) => { + // Assgin totoal GPU count as init available GPU number + if (environment.defaultGpuSummary !== undefined) { + const defaultGpuSummary = environment.defaultGpuSummary; + const availableGPUs: GPUInfo[] = []; + const designatedGpuIndices: Set = new Set(environment.usableGpus); + if (designatedGpuIndices.size > 0) { + for (const gpuIndex of designatedGpuIndices) { + if (gpuIndex >= environment.defaultGpuSummary.gpuCount) { + throw new Error(`Specified GPU index not found: ${gpuIndex}`); + } + } + } + + if (undefined !== defaultGpuSummary.gpuInfos) { + defaultGpuSummary.gpuInfos.forEach((gpuInfo: GPUInfo) => { + // if the GPU has active process, OR be reserved by a job, + // or index not in gpuIndices configuration in machineList, + // or trial number on a GPU reach max number, + // We should NOT allocate this GPU + // if users set useActiveGpu, use the gpu whether there is another activeProcess + if (designatedGpuIndices.size === 0 || designatedGpuIndices.has(gpuInfo.index)) { + if (defaultGpuSummary.assignedGpuIndexMap !== undefined) { + const num: number | undefined = defaultGpuSummary.assignedGpuIndexMap.get(gpuInfo.index); + const maxTrialNumberPerGpu: number = environment.maxTrialNumberPerGpu ? environment.maxTrialNumberPerGpu : this.defaultSetting.maxTrialNumberPerGpu; + const useActiveGpu: boolean = environment.useActiveGpu ? environment.useActiveGpu : this.defaultSetting.useActiveGpu; + if ((num === undefined && (!useActiveGpu && gpuInfo.activeProcessNum === 0 || useActiveGpu)) || + (num !== undefined && num < maxTrialNumberPerGpu)) { + availableGPUs.push(gpuInfo); + } + } else { + throw new Error(`occupiedGpuIndexMap is undefined!`); + } + } + }); + } + totalResourceMap.set(environment, availableGPUs); + } + }); + + return totalResourceMap; + } + + private selectMachine(qualifiedEnvironments: EnvironmentInformation[], allEnvironments: EnvironmentInformation[]): EnvironmentInformation { + assert(qualifiedEnvironments !== undefined && qualifiedEnvironments.length > 0); + + if (this.policyName === 'random') { + return randomSelect(qualifiedEnvironments); + } else if (this.policyName === 'round-robin') { + return this.roundRobinSelect(qualifiedEnvironments, allEnvironments); + } else if (this.policyName === 'recently-idle') { + return this.recentlyIdleSelect(qualifiedEnvironments, allEnvironments); + } else { + throw new Error(`Unsupported schedule policy: ${this.policyName}`); + } + } + + // Select the environment which is idle most recently. If all environments are not idle, use round robin to select an environment. + private recentlyIdleSelect(qualifiedEnvironments: EnvironmentInformation[], allEnvironments: EnvironmentInformation[]): EnvironmentInformation { + const now = Date.now(); + let selectedEnvironment: EnvironmentInformation | undefined = undefined; + let minTimeInterval = Number.MAX_SAFE_INTEGER; + for (const environment of qualifiedEnvironments) { + if (environment.latestTrialReleasedTime > 0 && (now - environment.latestTrialReleasedTime) < minTimeInterval) { + selectedEnvironment = environment; + minTimeInterval = now - environment.latestTrialReleasedTime; + } + } + if (selectedEnvironment === undefined) { + return this.roundRobinSelect(qualifiedEnvironments, allEnvironments); + } + selectedEnvironment.latestTrialReleasedTime = -1; + return selectedEnvironment; + } + + private roundRobinSelect(qualifiedEnvironments: EnvironmentInformation[], allEnvironments: EnvironmentInformation[]): EnvironmentInformation { + while (!qualifiedEnvironments.includes(allEnvironments[this.roundRobinIndex % allEnvironments.length])) { + this.roundRobinIndex++; + } + + return allEnvironments[this.roundRobinIndex++ % allEnvironments.length]; + } + + private selectGPUsForTrial(gpuInfos: GPUInfo[], requiredGPUNum: number): GPUInfo[] { + // Sequentially allocate GPUs + return gpuInfos.slice(0, requiredGPUNum); + } + + private allocateHost(requiredGPUNum: number, environment: EnvironmentInformation, + gpuInfos: GPUInfo[], trialDetails: TrialDetail): GpuScheduleResult { + assert(gpuInfos.length >= requiredGPUNum); + const allocatedGPUs: GPUInfo[] = this.selectGPUsForTrial(gpuInfos, requiredGPUNum); + const defaultGpuSummary = environment.defaultGpuSummary; + if (undefined === defaultGpuSummary) { + throw new Error(`Environment ${environment.id} defaultGpuSummary shouldn't be undefined!`); + } + + allocatedGPUs.forEach((gpuInfo: GPUInfo) => { + let num: number | undefined = defaultGpuSummary.assignedGpuIndexMap.get(gpuInfo.index); + if (num === undefined) { + num = 0; + } + defaultGpuSummary.assignedGpuIndexMap.set(gpuInfo.index, num + 1); + }); + trialDetails.assignedGpus = allocatedGPUs; + + return { + resultType: ScheduleResultType.SUCCEED, + environment: environment, + gpuIndices: allocatedGPUs, + }; + } +} diff --git a/ts/nni_manager/training_service/reusable/heterogenous/heterogenousConfig.ts b/ts/nni_manager/training_service/reusable/heterogenous/heterogenousConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..2c3012bb12c1cefcbb09f9c8ca46e9f0c0894cdb --- /dev/null +++ b/ts/nni_manager/training_service/reusable/heterogenous/heterogenousConfig.ts @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + + +export class HeterogenousConfig { + public readonly trainingServicePlatforms: string[]; + + constructor(trainingServicePlatforms: string[]) { + this.trainingServicePlatforms = trainingServicePlatforms; + } +} diff --git a/ts/nni_manager/training_service/reusable/remote/remoteConfig.ts b/ts/nni_manager/training_service/reusable/remote/remoteConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..d7a481ab0ec6e19bc0a43a93ef3fc1b357479581 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/remote/remoteConfig.ts @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { EnvironmentInformation } from '../environment'; +import { RemoteMachineConfig } from 'common/experimentConfig'; + +/** + * RemoteMachineEnvironmentInformation + */ +export class RemoteMachineEnvironmentInformation extends EnvironmentInformation { + public rmMachineMeta?: RemoteMachineConfig; +} diff --git a/ts/nni_manager/training_service/reusable/routerTrainingService.ts b/ts/nni_manager/training_service/reusable/routerTrainingService.ts new file mode 100644 index 0000000000000000000000000000000000000000..1ded550292b01721c7be00c74878316fd066abb0 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/routerTrainingService.ts @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { getLogger, Logger } from 'common/log'; +import { MethodNotImplementedError } from 'common/errors'; +import { ExperimentConfig, RemoteConfig, OpenpaiConfig, KubeflowConfig, FrameworkControllerConfig } from 'common/experimentConfig'; +import { TrainingService, TrialJobApplicationForm, TrialJobDetail, TrialJobMetric } from 'common/trainingService'; +import { delay } from 'common/utils'; +import { PAITrainingService } from '../pai/paiTrainingService'; +import { RemoteMachineTrainingService } from '../remote_machine/remoteMachineTrainingService'; +import { KubeflowTrainingService } from '../kubernetes/kubeflow/kubeflowTrainingService'; +import { FrameworkControllerTrainingService } from '../kubernetes/frameworkcontroller/frameworkcontrollerTrainingService'; +import { TrialDispatcher } from './trialDispatcher'; + + +/** + * It's a intermedia implementation to support reusable training service. + * The final goal is to support reusable training job in higher level than training service. + */ +class RouterTrainingService implements TrainingService { + private log!: Logger; + private internalTrainingService!: TrainingService; + + public static async construct(config: ExperimentConfig): Promise { + const instance = new RouterTrainingService(); + instance.log = getLogger('RouterTrainingService'); + const platform = Array.isArray(config.trainingService) ? 'hybrid' : config.trainingService.platform; + if (platform === 'remote' && (config.trainingService).reuseMode === false) { + instance.internalTrainingService = new RemoteMachineTrainingService(config.trainingService); + } else if (platform === 'openpai' && (config.trainingService).reuseMode === false) { + instance.internalTrainingService = new PAITrainingService(config.trainingService); + } else if (platform === 'kubeflow' && (config.trainingService).reuseMode === false) { + instance.internalTrainingService = new KubeflowTrainingService(); + } else if (platform === 'frameworkcontroller' && (config.trainingService).reuseMode === false) { + instance.internalTrainingService = new FrameworkControllerTrainingService(); + } else { + instance.internalTrainingService = await TrialDispatcher.construct(config); + } + return instance; + } + + // eslint-disable-next-line @typescript-eslint/no-empty-function + private constructor() { } + + public async listTrialJobs(): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + return await this.internalTrainingService.listTrialJobs(); + } + + public async getTrialJob(trialJobId: string): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + return await this.internalTrainingService.getTrialJob(trialJobId); + } + + public async getTrialFile(_trialJobId: string, _fileName: string): Promise { + throw new MethodNotImplementedError(); + } + + public addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + this.internalTrainingService.addTrialJobMetricListener(listener); + } + + public removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + this.internalTrainingService.removeTrialJobMetricListener(listener); + } + + public async submitTrialJob(form: TrialJobApplicationForm): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + return await this.internalTrainingService.submitTrialJob(form); + } + + public async updateTrialJob(trialJobId: string, form: TrialJobApplicationForm): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + return await this.internalTrainingService.updateTrialJob(trialJobId, form); + } + + public async cancelTrialJob(trialJobId: string, isEarlyStopped?: boolean | undefined): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + await this.internalTrainingService.cancelTrialJob(trialJobId, isEarlyStopped); + } + + public async setClusterMetadata(_key: string, _value: string): Promise { return; } + public async getClusterMetadata(_key: string): Promise { return ''; } + + public async cleanUp(): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + await this.internalTrainingService.cleanUp(); + } + + public async run(): Promise { + // wait internal training service is assigned. + // It will be assigned after set metadata of paiConfig + while (this.internalTrainingService === undefined) { + await delay(100); + } + return await this.internalTrainingService.run(); + } + + public async getTrialOutputLocalPath(trialJobId: string): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + return this.internalTrainingService.getTrialOutputLocalPath(trialJobId); + } + + public async fetchTrialOutput(trialJobId: string, subpath: string): Promise { + if (this.internalTrainingService === undefined) { + throw new Error("TrainingService is not assigned!"); + } + return this.internalTrainingService.fetchTrialOutput(trialJobId, subpath); + } +} + +export { RouterTrainingService }; diff --git a/ts/nni_manager/training_service/reusable/sharedStorage.ts b/ts/nni_manager/training_service/reusable/sharedStorage.ts new file mode 100644 index 0000000000000000000000000000000000000000..361db5987e84be7fc3c1842ceb62fbecf9dda2a5 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/sharedStorage.ts @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { SharedStorageConfig } from 'common/experimentConfig'; +import { StorageService } from './storageService' + +export type SharedStorageType = 'NFS' | 'AzureBlob' +export type LocalMountedType = 'usermount' | 'nnimount' | 'nomount' + +export abstract class SharedStorageService { + public abstract config(config: SharedStorageConfig): Promise; + public abstract get canLocalMounted(): boolean; + public abstract get storageService(): StorageService; + public abstract get localMountCommand(): string; + public abstract get remoteMountCommand(): string; + public abstract get remoteUmountCommand(): string; + public abstract get localWorkingRoot(): string; + public abstract get remoteWorkingRoot(): string; + public abstract cleanUp(): Promise; +} diff --git a/ts/nni_manager/training_service/reusable/shared_storages/azureblobStorageService.ts b/ts/nni_manager/training_service/reusable/shared_storages/azureblobStorageService.ts new file mode 100644 index 0000000000000000000000000000000000000000..fda888d36ea2db6d986a9d263c9d6e1af4f6a577 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/shared_storages/azureblobStorageService.ts @@ -0,0 +1,189 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import cpp from 'child-process-promise'; +import path from 'path'; + +import { SharedStorageService, SharedStorageType } from '../sharedStorage' +import { MountedStorageService } from '../storages/mountedStorageService'; +import { getLogger, Logger } from 'common/log'; +import { getExperimentId } from 'common/experimentStartupInfo'; +import { AzureBlobConfig } from 'common/experimentConfig'; + +const INSTALL_BLOBFUSE = ` +#!/bin/bash +if [ -n "$(command -v blobfuse)" ] +then + exit 0 +fi + +if [ -n "$(command -v apt-get)" ] +then + sudo apt-get update + sudo apt-get install -y lsb-release +elif [ -n "$(command -v yum)" ] +then + sudo yum install -y redhat-lsb +else + echo "Unknown package management." + exit 1 +fi + +id=$(lsb_release -i | cut -c16- | sed s/[[:space:]]//g) +version=$(lsb_release -r | cut -c9- | sed s/[[:space:]]//g) + +if [ "$id" = "Ubuntu" ] +then + wget https://packages.microsoft.com/config/ubuntu/$version/packages-microsoft-prod.deb + sudo DEBIAN_FRONTEND=noninteractive dpkg -i packages-microsoft-prod.deb + sudo apt-get update + sudo apt-get install -y blobfuse fuse +elif [ "$id" = "CentOS" ] || [ "$id" = "RHEL" ] +then + sudo rpm -Uvh https://packages.microsoft.com/config/rhel/$(echo $version | cut -c1)/packages-microsoft-prod.rpm + sudo yum install -y blobfuse fuse +else + echo "Not support distributor." + exit 1 +fi +` + +export class AzureBlobSharedStorageService extends SharedStorageService { + private log: Logger; + private internalStorageService: MountedStorageService; + private experimentId: string; + private localMounted?: string; + + private storageType?: SharedStorageType; + private storageAccountName?: string; + private storageAccountKey?: string; + private containerName?: string; + + private localMountPoint?: string; + private remoteMountPoint?: string; + + constructor() { + super(); + this.log = getLogger('AzureBlobSharedStorageService'); + this.internalStorageService = new MountedStorageService(); + this.experimentId = getExperimentId(); + } + + public async config(azureblobConfig: AzureBlobConfig): Promise { + this.localMountPoint = azureblobConfig.localMountPoint; + this.remoteMountPoint = azureblobConfig.remoteMountPoint; + + this.storageType = azureblobConfig.storageType as SharedStorageType; + this.storageAccountName = azureblobConfig.storageAccountName; + this.containerName = azureblobConfig.containerName; + if (azureblobConfig.storageAccountKey !== undefined) { + this.storageAccountKey = azureblobConfig.storageAccountKey; + } else { + const errorMessage = `${this.storageType} Shared Storage: must set 'storageAccountKey'.`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + this.localMounted = azureblobConfig.localMounted; + if (this.localMounted === 'nnimount') { + await this.helpLocalMount(); + } else if (this.localMounted === 'nomount') { + const errorMessage = `${this.storageType} Shared Storage: ${this.storageType} not Support 'nomount' yet.`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + + if (this.canLocalMounted && this.localMountPoint) { + this.internalStorageService.initialize(this.localMountPoint, path.join(this.localMountPoint, 'nni', this.experimentId)); + } + } + + public get canLocalMounted(): boolean{ + return true; + } + + public get storageService(): MountedStorageService { + return this.internalStorageService; + } + + public get localMountCommand(): string { + if (this.localMountPoint) { + return this.getCommand(this.localMountPoint); + } else { + this.log.error(`${this.storageType} Shared Storage: localMountPoint is not initialized.`); + return ''; + } + } + + public get remoteMountCommand(): string { + if (this.remoteMountPoint) { + return this.getCommand(this.remoteMountPoint); + } else { + this.log.error(`${this.storageType} Shared Storage: remoteMountPoint is not initialized.`); + return ''; + } + } + + public get remoteUmountCommand(): string { + if (this.remoteMountPoint) { + return `sudo umount -l ${this.remoteMountPoint}`; + } else { + this.log.error(`${this.storageType} Shared Storage: remoteMountPoint is not initialized.`); + return ''; + } + } + + private getCommand(mountPoint: string): string { + const install = `rm -f nni_install_fuseblob.sh && touch nni_install_fuseblob.sh && echo "${INSTALL_BLOBFUSE.replace(/\$/g, `\\$`).replace(/\n/g, `\\n`).replace(/"/g, `\\"`)}" >> nni_install_fuseblob.sh && bash nni_install_fuseblob.sh`; + const prepare = `sudo mkdir /mnt/resource/nniblobfusetmp -p && rm -f nni_fuse_connection.cfg && touch nni_fuse_connection.cfg && echo "accountName ${this.storageAccountName}\\naccountKey ${this.storageAccountKey}\\ncontainerName ${this.containerName}" >> nni_fuse_connection.cfg`; + const mount = `mkdir -p ${mountPoint} && sudo blobfuse ${mountPoint} --tmp-path=/mnt/resource/nniblobfusetmp --config-file=$(pwd)/nni_fuse_connection.cfg -o attr_timeout=240 -o entry_timeout=240 -o negative_timeout=120 -o allow_other`; + const clean = `rm -f nni_install_fuseblob.sh nni_fuse_connection.cfg`; + return `${install} && ${prepare} && ${mount} && ${clean}`; + } + + public get localWorkingRoot(): string { + return `${this.localMountPoint}/nni/${this.experimentId}`; + } + + public get remoteWorkingRoot(): string { + return `${this.remoteMountPoint}/nni/${this.experimentId}`; + } + + private async helpLocalMount(): Promise { + if (process.platform === 'win32') { + const errorMessage = `${this.storageType} Shared Storage: ${this.storageType} do not support mount under Windows yet.`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + + try { + this.log.debug(`Local mount command is: ${this.localMountCommand}`); + const result = await cpp.exec(this.localMountCommand); + if (result.stderr) { + throw new Error(result.stderr); + } + } catch (error) { + const errorMessage: string = `${this.storageType} Shared Storage: Mount ${this.storageAccountName}/${this.containerName} to ${this.localMountPoint} failed, error is ${error}`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + + return Promise.resolve(); + } + + public async cleanUp(): Promise { + if (this.localMounted !== 'nnimount') { + return Promise.resolve(); + } + try { + const result = await cpp.exec(`sudo umount -l ${this.localMountPoint}`); + if (result.stderr) { + throw new Error(result.stderr); + } + } catch (error) { + const errorMessage: string = `${this.storageType} Shared Storage: Umount ${this.localMountPoint} failed, error is ${error}`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + return Promise.resolve(); + } +} diff --git a/ts/nni_manager/training_service/reusable/shared_storages/nfsStorageService.ts b/ts/nni_manager/training_service/reusable/shared_storages/nfsStorageService.ts new file mode 100644 index 0000000000000000000000000000000000000000..eda6d6127bce6f9e0265ea27598ba82a4c141d4d --- /dev/null +++ b/ts/nni_manager/training_service/reusable/shared_storages/nfsStorageService.ts @@ -0,0 +1,163 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import cpp from 'child-process-promise'; +import path from 'path'; + +import { SharedStorageService, SharedStorageType } from '../sharedStorage' +import { MountedStorageService } from '../storages/mountedStorageService'; +import { getLogger, Logger } from 'common/log'; +import { getExperimentId } from 'common/experimentStartupInfo'; +import { NfsConfig } from 'common/experimentConfig'; + +const INSTALL_NFS_CLIENT = ` +#!/bin/bash +if [ -n "$(command -v nfsstat)" ] +then + exit 0 +fi + +if [ -n "$(command -v apt-get)" ] +then + sudo apt-get update + sudo apt-get install -y nfs-common +elif [ -n "$(command -v yum)" ] +then + sudo yum install -y nfs-utils +elif [ -n "$(command -v dnf)" ] +then + sudo dnf install -y nfs-utils +else + echo "Unknown package management." + exit 1 +fi +` + +export class NFSSharedStorageService extends SharedStorageService { + private log: Logger; + private internalStorageService: MountedStorageService; + private experimentId: string; + private localMounted?: string; + + private storageType?: SharedStorageType; + private nfsServer?: string; + private exportedDirectory?: string; + + private localMountPoint?: string; + private remoteMountPoint?: string; + + constructor() { + super(); + this.log = getLogger('NFSSharedStorageService'); + this.internalStorageService = new MountedStorageService(); + this.experimentId = getExperimentId(); + } + + public async config(nfsConfig: NfsConfig): Promise { + this.localMountPoint = nfsConfig.localMountPoint; + this.remoteMountPoint = nfsConfig.remoteMountPoint; + + this.storageType = nfsConfig.storageType; + this.nfsServer = nfsConfig.nfsServer; + this.exportedDirectory = nfsConfig.exportedDirectory; + this.localMounted = nfsConfig.localMounted; + if (this.localMounted === 'nnimount') { + await this.helpLocalMount(); + } else if (this.localMounted === 'nomount') { + const errorMessage = `${this.storageType} Shared Storage: ${this.storageType} not Support 'nomount'.`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + + this.internalStorageService.initialize(this.localMountPoint, path.join(this.localMountPoint, 'nni', this.experimentId)); + return Promise.resolve(); + } + + public get canLocalMounted(): boolean{ + return true; + } + + public get storageService(): MountedStorageService { + return this.internalStorageService; + } + + public get localMountCommand(): string { + if (this.localMountPoint) { + return this.getCommand(this.localMountPoint); + } else { + this.log.error(`${this.storageType} Shared Storage: localMountPoint is not initialized.`); + return ''; + } + } + + public get remoteMountCommand(): string { + if (this.remoteMountPoint) { + return this.getCommand(this.remoteMountPoint); + } else { + this.log.error(`${this.storageType} Shared Storage: remoteMountPoint is not initialized.`); + return ''; + } + } + + public get remoteUmountCommand(): string { + if (this.remoteMountPoint) { + return `sudo umount -f -l ${this.remoteMountPoint}`; + } else { + this.log.error(`${this.storageType} Shared Storage: remoteMountPoint is not initialized.`); + return ''; + } + } + + private getCommand(mountPoint: string): string { + const install = `rm -f nni_install_nfsclient.sh && touch nni_install_nfsclient.sh && echo "${INSTALL_NFS_CLIENT.replace(/\$/g, `\\$`).replace(/\n/g, `\\n`).replace(/"/g, `\\"`)}" >> nni_install_nfsclient.sh && bash nni_install_nfsclient.sh`; + const mount = `mkdir -p ${mountPoint} && sudo mount ${this.nfsServer}:${this.exportedDirectory} ${mountPoint}`; + const clean = `rm -f nni_install_nfsclient.sh`; + return `${install} && ${mount} && ${clean}`; + } + + public get localWorkingRoot(): string { + return `${this.localMountPoint}/nni/${this.experimentId}`; + } + + public get remoteWorkingRoot(): string { + return `${this.remoteMountPoint}/nni/${this.experimentId}`; + } + + private async helpLocalMount(): Promise { + if (process.platform === 'win32') { + const errorMessage = `${this.storageType} Shared Storage: NNI not support auto mount ${this.storageType} under Windows yet.`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + + try { + const result = await cpp.exec(this.localMountCommand); + if (result.stderr) { + throw new Error(result.stderr); + } + } catch (error) { + const errorMessage: string = `${this.storageType} Shared Storage: Mount ${this.nfsServer}:${this.exportedDirectory} to ${this.localMountPoint} failed, error is ${error}`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + + return Promise.resolve(); + } + + public async cleanUp(): Promise { + if (this.localMounted !== 'nnimount') { + return Promise.resolve(); + } + try { + const result = await cpp.exec(`sudo umount -f -l ${this.localMountPoint}`); + if (result.stderr) { + throw new Error(result.stderr); + } + } catch (error) { + const errorMessage: string = `${this.storageType} Shared Storage: Umount ${this.localMountPoint} failed, error is ${error}`; + this.log.error(errorMessage); + return Promise.reject(errorMessage); + } + return Promise.resolve(); + } +} diff --git a/ts/nni_manager/training_service/reusable/storageService.ts b/ts/nni_manager/training_service/reusable/storageService.ts new file mode 100644 index 0000000000000000000000000000000000000000..9148d7ae70fa257bc7d00fe4b96c0ab366e9a3a8 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/storageService.ts @@ -0,0 +1,183 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import { getLogger, Logger } from 'common/log'; +import { uniqueString } from 'common/utils'; +import { tarAdd } from '../common/util'; + +export abstract class StorageService { + protected localRoot: string = ""; + protected remoteRoot: string = ""; + protected logger: Logger; + + protected abstract internalConfig(key: string, value: string): void; + protected abstract internalRemove(remotePath: string, isDirectory: boolean, isRecursive: boolean): Promise; + protected abstract internalRename(remotePath: string, newName: string): Promise; + protected abstract internalMkdir(remotePath: string): Promise; + protected abstract internalCopy(sourcePath: string, targetPath: string, isDirectory: boolean, isFromRemote: boolean, isToRemote: boolean): Promise; + protected abstract internalExists(remotePath: string): Promise; + protected abstract internalRead(remotePath: string, offset: number, length: number): Promise; + protected abstract internalList(remotePath: string): Promise; + protected abstract internalAttach(remotePath: string, content: string): Promise; + protected abstract internalIsRelativePath(path: string): boolean; + protected abstract internalJoin(...paths: string[]): string; + protected abstract internalDirname(...paths: string[]): string; + protected abstract internalBasename(...paths: string[]): string; + + constructor() { + this.logger = getLogger('StorageService'); + } + + public initialize(localRoot: string, remoteRoot: string): void { + this.logger.debug(`Initializing storage to local: ${localRoot} remote: ${remoteRoot}`); + this.localRoot = localRoot; + this.remoteRoot = remoteRoot; + } + + public async rename(remotePath: string, newName: string): Promise { + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`rename remotePath: ${remotePath} to: ${newName}`); + await this.internalRename(remotePath, newName); + } + + public async createDirectory(remotePath: string): Promise { + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`create remotePath: ${remotePath}`); + await this.internalMkdir(remotePath); + } + + public async copyDirectory(localPath: string, remotePath: string, asGzip: boolean = false): Promise { + localPath = this.expandPath(false, localPath); + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`copy localPath: ${localPath} to remotePath: ${remotePath}, asGzip ${asGzip}`); + if (!await this.internalExists(remotePath)) { + await this.internalMkdir(remotePath); + } + + if (asGzip) { + const localPathBaseName = path.basename(localPath); + const tempTarFileName = `nni_tmp_${localPathBaseName}_${uniqueString(5)}.tar.gz`; + const tarFileName = `${localPathBaseName}.tar.gz`; + const localTarPath: string = path.join(os.tmpdir(), tempTarFileName); + await tarAdd(localTarPath, localPath); + await this.internalCopy(localTarPath, remotePath, false, false, true); + const remoteFileName = this.internalJoin(remotePath, tempTarFileName); + await this.internalRename(remoteFileName, tarFileName); + await fs.promises.unlink(localTarPath); + + remotePath = this.internalJoin(remotePath, tarFileName); + } else { + await this.internalCopy(localPath, remotePath, true, false, true); + remotePath = this.internalJoin(remotePath, path.basename(localPath)); + } + + return remotePath; + } + + public async copyDirectoryBack(remotePath: string, localPath: string): Promise { + localPath = this.expandPath(false, localPath); + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`copy remotePath: ${remotePath} to localPath: ${localPath}`); + return await this.internalCopy(remotePath, localPath, true, true, false); + } + + public async removeDirectory(remotePath: string, isRecursive: boolean): Promise { + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`remove remotePath: ${remotePath}`); + await this.internalRemove(remotePath, true, isRecursive); + } + + public async readFileContent(remotePath: string, offset: number = -1, length: number = -1): Promise { + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`read remote file: ${remotePath}, offset: ${offset}, length: ${length}`); + return this.internalRead(remotePath, offset, length); + } + + public async listDirectory(remotePath: string): Promise { + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`list remotePath: ${remotePath}`); + return await this.internalList(remotePath); + } + + public async exists(remotePath: string): Promise { + remotePath = this.expandPath(true, remotePath); + const exists = await this.internalExists(remotePath); + this.logger.debug(`exists remotePath: ${remotePath} is ${exists}`); + return exists + } + + public async save(content: string, remotePath: string, isAttach: boolean = false): Promise { + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`saving content to remotePath: ${remotePath}, length: ${content.length}, isAttach: ${isAttach}`); + const remoteDir = this.internalDirname(remotePath); + + if (isAttach) { + if (await this.internalExists(remoteDir) === false) { + await this.internalMkdir(remoteDir); + } + const result = await this.internalAttach(remotePath, content); + if (false === result) { + throw new Error("this.internalAttach doesn't support"); + } + } else { + const fileName = this.internalBasename(remotePath); + const tempFileName = `temp_${uniqueString(4)}_${fileName}`; + const localTempFileName = path.join(os.tmpdir(), tempFileName); + const remoteTempFile = this.internalJoin(remoteDir, tempFileName); + + if (await this.internalExists(remotePath) === true) { + await this.internalRemove(remotePath, false, false); + } + await fs.promises.writeFile(localTempFileName, content); + await this.internalCopy(localTempFileName, remoteDir, false, false, true); + await this.rename(remoteTempFile, fileName); + await fs.promises.unlink(localTempFileName); + } + } + + public async copyFile(localPath: string, remotePath: string): Promise { + localPath = this.expandPath(false, localPath); + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`copying file localPath: ${localPath} to remotePath: ${remotePath}`); + await this.internalCopy(localPath, remotePath, false, false, true); + } + + public async copyFileBack(remotePath: string, localPath: string): Promise { + localPath = this.expandPath(false, localPath); + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`copy file remotePath: ${remotePath} to localPath: ${localPath}`); + await this.internalCopy(remotePath, localPath, false, true, false); + } + + public async removeFile(remotePath: string): Promise { + remotePath = this.expandPath(true, remotePath); + this.logger.debug(`remove file remotePath: ${remotePath}`); + await this.internalRemove(remotePath, false, false); + } + + public joinPath(...paths: string[]): string { + let fullPath = this.internalJoin(...paths); + if (this.internalIsRelativePath(fullPath) === true && this.remoteRoot !== "") { + fullPath = this.internalJoin(this.remoteRoot, fullPath); + } + return fullPath; + } + + private expandPath(isRemote: boolean, ...paths: string[]): string { + let normalizedPath: string; + + if (isRemote) { + normalizedPath = this.joinPath(...paths); + } else { + normalizedPath = path.join(...paths); + if (!path.isAbsolute(normalizedPath) && this.localRoot !== "") { + normalizedPath = path.join(this.localRoot, normalizedPath); + } + } + + return normalizedPath; + } +} diff --git a/ts/nni_manager/training_service/reusable/storages/mountedStorageService.ts b/ts/nni_manager/training_service/reusable/storages/mountedStorageService.ts new file mode 100644 index 0000000000000000000000000000000000000000..f5aafabf26d6361cc7a9b83b1c42472b9d3bc861 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/storages/mountedStorageService.ts @@ -0,0 +1,148 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import fs from 'fs'; +import path from 'path'; +import { Deferred } from "ts-deferred"; +import { StorageService } from "../storageService"; + +export class MountedStorageService extends StorageService { + + protected internalConfig(_key: string, _value: string): void { + // nothing to config + } + + protected async internalRemove(path: string, isDirectory: boolean, isRecursive: boolean): Promise { + if (isDirectory) { + if (isRecursive) { + const children = await fs.promises.readdir(path); + for (const file of children) { + const filePath = this.internalJoin(path, file); + const stat = await fs.promises.lstat(filePath); + await this.internalRemove(filePath, stat.isDirectory(), isRecursive); + } + } + await fs.promises.rmdir(path); + } else { + await fs.promises.unlink(path); + } + } + + protected async internalRename(remotePath: string, newName: string): Promise { + const dirName = path.dirname(remotePath); + newName = this.internalJoin(dirName, newName); + + await fs.promises.rename(remotePath, newName); + } + + protected async internalMkdir(remotePath: string): Promise { + if (!fs.existsSync(remotePath)) { + await fs.promises.mkdir(remotePath, { recursive: true }); + } + } + + protected async internalCopy(sourcePath: string, targetPath: string, isDirectory: boolean, isFromRemote: boolean = false, isToRemote: boolean = true): Promise { + if (sourcePath === targetPath) { + return targetPath; + } + + this.logger.debug(`copying ${sourcePath} to ${targetPath}, dir ${isDirectory}, isFromRemote ${isFromRemote}, isToRemote: ${isToRemote}`); + if (isDirectory) { + const basename = isFromRemote ? this.internalBasename(sourcePath) : path.basename(sourcePath); + if (isToRemote) { + targetPath = this.internalJoin(targetPath, basename); + await this.internalMkdir(targetPath); + } else { + targetPath = path.join(targetPath, basename); + await fs.promises.mkdir(targetPath); + } + const children = await fs.promises.readdir(sourcePath); + for (const child of children) { + const childSourcePath = this.internalJoin(sourcePath, child); + const stat = await fs.promises.lstat(childSourcePath); + await this.internalCopy(childSourcePath, targetPath, stat.isDirectory(), isFromRemote, isToRemote); + } + return targetPath; + } else { + // This behavior may not be consistent for each platform, but it needs to correct to same + await this.internalMkdir(targetPath); + const targetFileName = path.join(targetPath, path.basename(sourcePath)); + await fs.promises.copyFile(sourcePath, targetFileName); + return targetFileName; + } + } + + protected async internalExists(remotePath: string): Promise { + const deferred = new Deferred(); + fs.exists(remotePath, (exists) => { + deferred.resolve(exists); + }); + return deferred.promise; + } + + protected async internalRead(remotePath: string, offset?: number, length?: number): Promise { + const deferred = new Deferred(); + // set a max length to 1MB for performance concern. + const maxLength = 1024 * 1024; + if (offset === undefined) { + offset = -1; + } + const current: number = offset < 0 ? 0 : offset; + if (length === undefined) { + length = -1; + } + const readLength: number = length < 0 ? maxLength : length; + let result: string = ""; + + const stream = fs.createReadStream(remotePath, + { + encoding: "utf8", + start: current, + end: readLength + current - 1, + }).on("data", (data) => { + result += data; + }).on("end", () => { + stream.close(); + deferred.resolve(result); + }).on("error", (err) => { + deferred.reject(err); + }); + + return deferred.promise; + + } + + protected async internalList(remotePath: string): Promise { + let results: string[] = []; + + if (await this.internalExists(remotePath) === true) { + results = await fs.promises.readdir(remotePath); + } + + return results; + } + + protected async internalAttach(remotePath: string, content: string): Promise { + await fs.promises.appendFile(remotePath, content, { + encoding: "utf8", + flag: "a", + }); + return true; + } + + protected internalIsRelativePath(remotePath: string): boolean { + return !path.isAbsolute(remotePath); + } + + protected internalJoin(...paths: string[]): string { + return path.join(...paths); + } + + protected internalDirname(remotePath: string): string { + return path.dirname(remotePath); + } + + protected internalBasename(remotePath: string): string { + return path.basename(remotePath); + } +} diff --git a/ts/nni_manager/training_service/reusable/trial.ts b/ts/nni_manager/training_service/reusable/trial.ts new file mode 100644 index 0000000000000000000000000000000000000000..4476716b2ef812828f02f6c5c66170d2733e43d4 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/trial.ts @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TrialJobApplicationForm, TrialJobDetail, TrialJobStatus } from "common/trainingService"; +import { GPUInfo } from "training_service/common/gpuData"; +import { EnvironmentInformation, NodeInformation } from "./environment"; + +export class TrialDetail implements TrialJobDetail { + public id: string; + public status: TrialJobStatus; + public submitTime: number; + public startTime?: number; + public endTime?: number; + public tags?: string[]; + public url?: string; + public workingDirectory: string; + public form: TrialJobApplicationForm; + public isEarlyStopped?: boolean; + public environment?: EnvironmentInformation; + public message?: string; + + // init settings of trial + public settings = {}; + // it's used to aggregate node status for multiple node trial + public nodes: Map; + // assigned GPUs for multi-trial scheduled. + public assignedGpus: GPUInfo[] | undefined; + + public readonly TRIAL_METADATA_DIR = ".nni"; + + constructor(id: string, status: TrialJobStatus, submitTime: number, + workingDirectory: string, form: TrialJobApplicationForm) { + this.id = id; + this.status = status; + this.submitTime = submitTime; + this.workingDirectory = workingDirectory; + this.form = form; + this.tags = []; + this.nodes = new Map(); + } +} diff --git a/ts/nni_manager/training_service/reusable/trialDispatcher.ts b/ts/nni_manager/training_service/reusable/trialDispatcher.ts new file mode 100644 index 0000000000000000000000000000000000000000..4efc4f2b7ab9a99533048d6b595fe59c1c4a61b8 --- /dev/null +++ b/ts/nni_manager/training_service/reusable/trialDispatcher.ts @@ -0,0 +1,965 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { EventEmitter } from 'events'; +import fs from 'fs'; +import path from 'path'; +import { Writable } from 'stream'; +import { Container, Scope } from 'typescript-ioc'; +import { String } from 'typescript-string-operations'; +import * as component from 'common/component'; +import { NNIError, NNIErrorNames, MethodNotImplementedError } from 'common/errors'; +import { getBasePort, getExperimentId } from 'common/experimentStartupInfo'; +import { getLogger, Logger } from 'common/log'; +import { TrainingService, TrialJobApplicationForm, TrialJobMetric, TrialJobStatus } from 'common/trainingService'; +import { delay, getExperimentRootDir, getIPV4Address, getLogLevel, getVersion, mkDirPSync, randomSelect, uniqueString } from 'common/utils'; +import { ExperimentConfig, SharedStorageConfig } from 'common/experimentConfig'; +import { GPU_INFO, INITIALIZED, KILL_TRIAL_JOB, NEW_TRIAL_JOB, REPORT_METRIC_DATA, SEND_TRIAL_JOB_PARAMETER, STDOUT, TRIAL_END, VERSION_CHECK } from 'core/commands'; +import { ScheduleResultType } from 'training_service/common/gpuData'; +import { CONTAINER_INSTALL_NNI_SHELL_FORMAT } from '../common/containerJobData'; +import { CONTAINER_INSTALL_NNI_SHELL_FORMAT_FOR_WIN } from '../common/containerJobData'; +import { TrialConfig } from '../common/trialConfig'; +import { validateCodeDir } from '../common/util'; +import { Command, CommandChannel } from './commandChannel'; +import { EnvironmentInformation, EnvironmentService, NodeInformation, RunnerSettings, TrialGpuSummary } from './environment'; +import { createEnvironmentService } from './environments/environmentServiceFactory'; +import { GpuScheduler } from './gpuScheduler'; +import { MountedStorageService } from './storages/mountedStorageService'; +import { StorageService } from './storageService'; +import { SharedStorageService } from './sharedStorage'; +import { NFSSharedStorageService } from './shared_storages/nfsStorageService' +import { AzureBlobSharedStorageService } from './shared_storages/azureblobStorageService' +import { TrialDetail } from './trial'; + +/** + * It uses to manage jobs on training platforms + * and expose trial as trial job to upper level. +**/ +@component.Singleton +class TrialDispatcher implements TrainingService { + private log: Logger; + private isDeveloping: boolean = false; + private stopping: boolean = false; + + private metricsEmitter: EventEmitter; + private experimentId: string; + private experimentRootDir: string; + + private enableVersionCheck: boolean = true; + + private trialConfig: TrialConfig | undefined; + + private trials: Map; + private environments: Map; + // make public for ut + public environmentServiceList: EnvironmentService[] = []; + public commandChannelSet: Set; + public commandEmitter: EventEmitter; + public environmentMaintenceLoopInterval: number = -1; + + // uses to accelerate trial manager loop + // true means there is updates, and trial loop should run a cycle immediately. + private shouldUpdateTrials: boolean = true; + // uses to decide environment assign strategy. + // true means use gpu scheduler to decide if there is free resource for new trial. + // false means one env run one trial in same time. + private enableGpuScheduler: boolean = false; + // uses to save if user like to reuse environment + private reuseEnvironment: boolean = true; + private logCollection: string = 'none'; + + private gpuScheduler: GpuScheduler; + + // uses to reduce log count. + private isLoggedNoMoreEnvironment: boolean = false; + private isLoggedNoGpuAvailable: boolean = false; + + // uses to mark whether to use shared storage + private useSharedStorage: boolean = false; + private fileCopyCompleted: boolean = false; + + private config: ExperimentConfig; + + public static async construct(config: ExperimentConfig): Promise { + const instance = new TrialDispatcher(config); + await instance.asyncConstructor(config); + return instance; + } + + private constructor(config: ExperimentConfig) { + this.log = getLogger('TrialDispatcher'); + this.trials = new Map(); + this.environments = new Map(); + this.metricsEmitter = new EventEmitter(); + this.experimentId = getExperimentId(); + this.experimentRootDir = getExperimentRootDir(); + this.commandChannelSet = new Set(); + + const logLevel = getLogLevel(); + this.log.debug(`current folder ${__dirname}`); + // different source folder in Linux and Windows + if (logLevel == "debug" && (fs.existsSync("../../../src/nni_manager") || __dirname.endsWith("src\\nni_manager\\dist\\training_service\\reusable"))) { + this.log.debug("log level is debug, and exist code folder, so set to developing mode."); + this.isDeveloping = true; + } + + this.commandEmitter = new EventEmitter(); + + this.gpuScheduler = new GpuScheduler(); + + this.config = config; + + this.enableGpuScheduler = !!config.trialGpuNumber; + if (this.enableGpuScheduler) { + this.log.info(`TrialDispatcher: GPU scheduler is enabled.`) + } + } + + private async asyncConstructor(config: ExperimentConfig): Promise { + await validateCodeDir(config.trialCodeDirectory); + + const serviceConfigs = Array.isArray(config.trainingService) ? config.trainingService : [ config.trainingService ]; + const servicePromises = serviceConfigs.map(serviceConfig => createEnvironmentService(serviceConfig)); + this.environmentServiceList = await Promise.all(servicePromises); + + this.environmentMaintenceLoopInterval = Math.max( + ...this.environmentServiceList.map((env) => env.environmentMaintenceLoopInterval) + ); + + for (const env of this.environmentServiceList) { + env.initCommandChannel(this.commandEmitter); + this.commandChannelSet.add(env.getCommandChannel); + } + + if (this.config.sharedStorage !== undefined) { + await this.initializeSharedStorage(this.config.sharedStorage); + } + } + + public async listTrialJobs(): Promise { + const trials: TrialDetail[] = []; + + for (const key of this.trials.keys()) { + trials.push(await this.getTrialJob(key)); + } + + return trials; + } + + public async getTrialJob(trialJobId: string): Promise { + const trial: TrialDetail | undefined = this.trials.get(trialJobId); + if (trial === undefined) { + throw new Error(`trial job ${trialJobId} not found`); + } + + return trial; + } + + public async getTrialFile(_trialJobId: string, _fileName: string): Promise { + throw new MethodNotImplementedError(); + } + + public async submitTrialJob(form: TrialJobApplicationForm): Promise { + const trialId: string = uniqueString(5); + + const trialJobDetail: TrialDetail = new TrialDetail(trialId, "WAITING", Date.now(), "", form); + + this.trials.set(trialId, trialJobDetail); + + return trialJobDetail; + } + + // to support multi phase + public async updateTrialJob(trialJobId: string, form: TrialJobApplicationForm): Promise { + const trialDetail = await this.getTrialJob(trialJobId); + const environment = trialDetail.environment; + if (environment === undefined) { + throw new Error(`TrialDispatcher: trial ${trialJobId}'s env shouldn't be undefined in updateTrialJob.`); + } + if (environment.environmentService === undefined) { + throw new Error(`Environment ${environment.id} does not assigned environment service.`); + } + + const message = { + "trialId": trialJobId, + "parameters": form.hyperParameters, + } + await environment.environmentService.getCommandChannel.sendCommand(environment, SEND_TRIAL_JOB_PARAMETER, message); + + return trialDetail; + } + + public async cancelTrialJob(trialJobId: string, isEarlyStopped?: boolean | undefined): Promise { + const trial = await this.getTrialJob(trialJobId); + switch (trial.status) { + case "RUNNING": + case "WAITING": + case "UNKNOWN": + { + const environment = trial.environment; + if (environment && environment.environmentService) { + await environment.environmentService.getCommandChannel.sendCommand(environment, KILL_TRIAL_JOB, trial.id); + trial.isEarlyStopped = isEarlyStopped; + trial.status = trial.isEarlyStopped === true ? + 'EARLY_STOPPED' : 'USER_CANCELED'; + this.releaseEnvironment(trial); + } + } + break; + } + } + + private getStorageService(environmentService: EnvironmentService): StorageService { + let storageService: StorageService; + if (this.useSharedStorage) { + this.log.debug(`TrialDispatcher: use shared storage service.`); + storageService = component.get(SharedStorageService).storageService; + } else if (environmentService.hasStorageService) { + this.log.debug(`TrialDispatcher: use existing storage service.`); + storageService = component.get(StorageService); + } else { + this.log.debug(`TrialDispatcher: create temp storage service to temp folder.`); + storageService = new MountedStorageService(); + const environmentLocalTempFolder = path.join(this.experimentRootDir, "environment-temp"); + storageService.initialize(this.config.trialCodeDirectory, environmentLocalTempFolder); + } + return storageService; + } + public async run(): Promise { + await Promise.all(this.environmentServiceList.map(env => env.init())); + for(const environmentService of this.environmentServiceList) { + + + + await environmentService.getCommandChannel.start(); + this.log.info(`TrialDispatcher: started channel: ${environmentService.getCommandChannel.constructor.name}`); + + this.log.info(`TrialDispatcher: copying code.`); + if (this.useSharedStorage) { + if (this.fileCopyCompleted) { + continue; + } + } + const storageService: StorageService = this.getStorageService(environmentService); + + // Copy the compressed file to remoteDirectory and delete it + const codeDir = path.resolve(this.config.trialCodeDirectory); + const envDir = storageService.joinPath("envs"); + const codeFileName = await storageService.copyDirectory(codeDir, envDir, true); + storageService.rename(codeFileName, "nni-code.tar.gz"); + + const installFileName = storageService.joinPath(envDir, `install_nni.sh`); + const installFileNameForWin = storageService.joinPath(envDir, `install_nni.ps1`); + await storageService.save(CONTAINER_INSTALL_NNI_SHELL_FORMAT, installFileName); + await storageService.save(CONTAINER_INSTALL_NNI_SHELL_FORMAT_FOR_WIN, installFileNameForWin); + + if (this.isDeveloping) { + let trialToolsPath = path.join(__dirname, "../../../../../tools/nni_trial_tool"); + if (false === fs.existsSync(trialToolsPath)) { + trialToolsPath = path.join(__dirname, "..\\..\\..\\..\\..\\tools\\nni_trial_tool"); + } + await storageService.copyDirectory(trialToolsPath, envDir, true); + } + + if (this.useSharedStorage) { + this.fileCopyCompleted = true; + } + } + // start channel + this.commandEmitter.on("command", (command: Command): void => { + this.handleCommand(command).catch((err: Error) => { + this.log.error(`TrialDispatcher: error on handle env ${command.environment.id} command: ${command.command}, data: ${command.data}, error: ${err}`); + }) + }); + await this.prefetchEnvironments(); + this.log.info(`TrialDispatcher: run loop started.`); + const promiseList: Promise[] = []; + for(const commandChannel of this.commandChannelSet) { + promiseList.push(commandChannel.run()); + } + promiseList.push(this.environmentMaintenanceLoop()); + promiseList.push(this.trialManagementLoop()); + await Promise.all(promiseList); + } + + public addTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.on('metric', listener); + } + + public removeTrialJobMetricListener(listener: (metric: TrialJobMetric) => void): void { + this.metricsEmitter.off('metric', listener); + } + + public async setClusterMetadata(_key: string, _value: string): Promise { return; } + public async getClusterMetadata(_key: string): Promise { return ""; } + + public async stopEnvironment(environment: EnvironmentInformation): Promise { + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} do not have environmentService!`); + } + this.log.info(`stopping environment ${environment.id}...`); + await environment.environmentService.stopEnvironment(environment); + this.log.info(`stopped environment ${environment.id}.`); + return; + } + + public async cleanUp(): Promise { + if (this.commandEmitter === undefined) { + throw new Error(`TrialDispatcher: commandEmitter shouldn't be undefined in cleanUp.`); + } + this.stopping = true; + this.shouldUpdateTrials = true; + const environments = [...this.environments.values()]; + + const stopEnvironmentPromise: Promise[] = []; + for (let index = 0; index < environments.length; index++) { + stopEnvironmentPromise.push(this.stopEnvironment(environments[index])); + } + await Promise.all(stopEnvironmentPromise); + this.commandEmitter.off("command", this.handleCommand); + for (const commandChannel of this.commandChannelSet) { + await commandChannel.stop(); + } + if (this.useSharedStorage) { + this.log.info(`stopping shared storage...`) + await component.get(SharedStorageService).cleanUp(); + this.log.info(`shared storage stopped.`) + } + } + + private async environmentMaintenanceLoop(): Promise { + while (!this.stopping) { + const environments: EnvironmentInformation[] = []; + for (const environment of this.environments.values()) { + if (environment.isAlive === true) { + environments.push(environment); + } else { + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} do not have environment service!`); + } + await environment.environmentService.getCommandChannel.close(environment); + } + } + // Group environments according to environmentService + const environmentServiceDict: Map = + new Map(); + for (const environment of environments) { + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} do not have environment service!`); + } + if (!environmentServiceDict.has(environment.environmentService)) { + environmentServiceDict.set(environment.environmentService, [environment]); + } else { + const environmentsList: EnvironmentInformation[] | undefined = environmentServiceDict.get(environment.environmentService); + if (environmentsList === undefined) { + throw new Error(`Environment list not initialized!`); + } + environmentsList.push(environment); + environmentServiceDict.set(environment.environmentService, environmentsList); + } + } + // Refresh all environments + const taskList: Promise[] = []; + for (const environmentService of environmentServiceDict.keys()) { + const environmentsList: EnvironmentInformation[] | undefined = environmentServiceDict.get(environmentService); + if (environmentsList) { + taskList.push(environmentService.refreshEnvironmentsStatus(environmentsList)); + } + } + await Promise.all(taskList); + + for (const environment of environments) { + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} do not have environment service!`); + } + const oldIsAlive = environment.isAlive; + switch (environment.status) { + case 'WAITING': + case 'RUNNING': + case 'UNKNOWN': + environment.isAlive = true; + break; + default: + environment.isAlive = false; + break; + } + if (oldIsAlive !== environment.isAlive) { + this.log.debug(`set environment ${environment.id} isAlive from ${oldIsAlive} to ${environment.isAlive} due to status is ${environment.status}.`); + } + } + this.shouldUpdateTrials = true; + if (this.environmentMaintenceLoopInterval === -1) { + throw new Error("EnvironmentMaintenceLoopInterval not initialized!"); + } + await delay(this.environmentMaintenceLoopInterval); + } + } + + private async trialManagementLoop(): Promise { + const interval = 1; + + while (!this.stopping) { + let totalInterval = 1000; + while (totalInterval > 0) { + if (this.shouldUpdateTrials) { + this.shouldUpdateTrials = false; + break; + } + totalInterval -= interval; + await delay(interval); + } + + const toRefreshedTrials: TrialDetail[] = []; + for (const trial of this.trials.values()) { + if (trial.status === "RUNNING" || trial.status === "WAITING" || trial.status === "UNKNOWN") { + toRefreshedTrials.push(trial); + } + } + + if (toRefreshedTrials.length == 0) { + continue; + } + + let waitingTrials: TrialDetail[] = []; + let liveTrialsCount = 0; + for (const trial of toRefreshedTrials) { + const currentStatus = trial.status; + switch (currentStatus) { + case "RUNNING": + { + const environment = trial.environment; + + if (environment === undefined) { + this.log.error(`found running trial ${trial.id} has no environment, set trial to UNKNOWN.`); + trial.status = "UNKNOWN"; + liveTrialsCount++; + continue; + } + + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} does not has environment service!`); + } + trial.url = environment.trackingUrl; + const environmentStatus = environment.status; + + // any node exit, then make sure the whole trial stopped. + if (trial.nodes.size > 0) { + const completedCount = trial.nodes.size; + let finalStatus: TrialJobStatus = "SUCCEEDED"; + let lastTimestamp: number | undefined; + this.log.debug(`found ${completedCount} completed trial node(s), nodeCount: ${environment.nodeCount}`); + + // if some trial processes doesn't exit, kill it for next one. + // for example, in horovod, it's just sleep command, has no impact on trial result. + if (environment.nodeCount > completedCount) { + this.log.info(`stop partial completed trial ${trial.id}`); + await environment.environmentService.getCommandChannel.sendCommand(environment, KILL_TRIAL_JOB, trial.id); + } + for (const node of trial.nodes.values()) { + if (node.status === "FAILED") { + finalStatus = "FAILED"; + } + if (node.endTime !== undefined) { + if (lastTimestamp === undefined) { + lastTimestamp = node.endTime + } else { + lastTimestamp = Math.max(node.endTime, lastTimestamp); + } + } + } + trial.status = finalStatus; + if (lastTimestamp === undefined) { + trial.endTime = lastTimestamp; + } + this.releaseEnvironment(trial); + } else if (environmentStatus !== "RUNNING") { + this.log.error(`found running trial ${trial.id} on '${environment.envId}' with '${environmentStatus}', set trial to environment status.`); + this.releaseEnvironment(trial); + trial.status = environmentStatus; + } else { + liveTrialsCount++; + } + } + break; + case "WAITING": + case "UNKNOWN": + // deal it later, if there is free environment. + waitingTrials.push(trial); + liveTrialsCount++; + break; + } + } + + let liveEnvironmentsCount = 0; + const reusableEnvironments: EnvironmentInformation[] = []; + for (const environment of this.environments.values()) { + if (environment.isAlive === true) { + liveEnvironmentsCount++; + if (environment.status === "RUNNING" && environment.isRunnerReady) { + // if environment is not reusable and used, stop and not count as idle; + const reuseMode = Array.isArray(this.config.trainingService) || (this.config.trainingService as any).reuseMode; + if ( + 0 === environment.runningTrialCount && + reuseMode === false && + environment.assignedTrialCount > 0 + ) { + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} does not has environment service!`); + } + await environment.environmentService.stopEnvironment(environment); + continue; + } + + // if gpu scheduler is not enabled, and there is running trial, skip it. + if (false === this.enableGpuScheduler && environment.runningTrialCount > 0) { + continue; + } + + reusableEnvironments.push(environment); + } + } + } + + let neededEnvironmentCount = 0; + if (true === this.enableGpuScheduler) { + let noGpuAvailable: boolean = false; + while (waitingTrials.length > 0) { + // skip following trials, if first trial doesn't find available GPU. + if (true === noGpuAvailable) { + // break loop to try next time. + break; + } + const trial = waitingTrials.shift(); + if (undefined === trial) { + throw new Error(`TrialDispatcher: waiting trial shouldn't be undefined!`); + } + const defaultGpuNum = this.config.trialGpuNumber; + const result = this.gpuScheduler.scheduleMachine(reusableEnvironments, trial.form.placementConstraint!, defaultGpuNum, trial); + switch (result.resultType) { + case ScheduleResultType.REQUIRE_EXCEED_TOTAL: + { + if (liveEnvironmentsCount == 0) { + this.log.debug(`TrialDispatcher: no live environment, so request one.`); + neededEnvironmentCount = 1; + waitingTrials = []; + this.isLoggedNoGpuAvailable = false; + } else if (reusableEnvironments.length > 0) { + const errorMessage: string = `TrialDispatcher: REQUIRE_EXCEED_TOTAL Required GPU number ${defaultGpuNum} is too large, no machine can meet`; + this.log.error(errorMessage); + throw new NNIError(NNIErrorNames.RESOURCE_NOT_AVAILABLE, errorMessage); + } else { + if (false === this.isLoggedNoGpuAvailable) { + this.log.debug(`TrialDispatcher: wait GPU, live environment ${liveEnvironmentsCount}, no reusable, REQUIRE_EXCEED_TOTAL.`) + this.isLoggedNoGpuAvailable = true; + } + } + break; + } + case ScheduleResultType.TMP_NO_AVAILABLE_GPU: + { + if (false === this.isLoggedNoGpuAvailable) { + this.log.debug(`TrialDispatcher: wait GPU, live environment ${liveEnvironmentsCount}, reusable ${reusableEnvironments.length}, TMP_NO_AVAILABLE_GPU.`) + this.isLoggedNoGpuAvailable = true; + } + + // if some environment is alive, but not ready, no need to create more. + if (liveEnvironmentsCount <= reusableEnvironments.length) { + neededEnvironmentCount = 1; + this.isLoggedNoGpuAvailable = false; + this.log.info(`TrialDispatcher: ${liveEnvironmentsCount} live env, and ${reusableEnvironments.length} reusable, but no GPU available so request a new one.`); + } + noGpuAvailable = true; + } + break + case ScheduleResultType.SUCCEED: + { + const environment = result.environment; + if (undefined === environment) { + throw new Error(`TrialDispatcher: scheduled env shouldn't be undefined!`); + } + trial.assignedGpus = result.gpuIndices; + await this.allocateEnvironment(trial, environment); + this.isLoggedNoGpuAvailable = false; + } + break + default: + throw new Error(`TrialDispatcher: Unknown gpu schecduler type: ${result.resultType}`); + } + } + } else { + while (reusableEnvironments.length > 0 && waitingTrials.length > 0) { + const trial = waitingTrials.shift(); + const idleEnvironment = reusableEnvironments.shift(); + if (trial !== undefined && idleEnvironment != undefined) { + await this.allocateEnvironment(trial, idleEnvironment); + } + } + neededEnvironmentCount = liveTrialsCount - liveEnvironmentsCount; + } + + if (neededEnvironmentCount > 0) { + let requestedCount = 0; + let hasMoreEnvironments = false; + for (let index = 0; index < neededEnvironmentCount; index++) { + const environmentService: EnvironmentService | undefined = this.selectEnvironmentService(); + if (environmentService !== undefined) { + hasMoreEnvironments = true; + await this.requestEnvironment(environmentService); + requestedCount++; + this.isLoggedNoMoreEnvironment = false; + } else { + if (this.isLoggedNoMoreEnvironment === false) { + this.isLoggedNoMoreEnvironment = true; + this.log.info(`no more environment so far, so skip to request environment.`) + } + } + } + if (hasMoreEnvironments === true || requestedCount > 0) { + this.log.info(`requested new environment, live trials: ${liveTrialsCount}, ` + + `live environments: ${liveEnvironmentsCount}, neededEnvironmentCount: ${neededEnvironmentCount}, ` + + `requestedCount: ${requestedCount}`); + } + } + + } + } + + // Schedule a environment platform for environment + private selectEnvironmentService(): EnvironmentService | undefined { + const validEnvironmentServiceList = []; + for(const environmentService of this.environmentServiceList){ + if (environmentService.hasMoreEnvironments) { + validEnvironmentServiceList.push(environmentService); + } + } + if (validEnvironmentServiceList.length === 0) { + return undefined; + } + // Random scheduler + return randomSelect(validEnvironmentServiceList); + } + + private async prefetchEnvironments (): Promise { + for (const environmentService of this.environmentServiceList) { + const number = environmentService.prefetchedEnvironmentCount; + this.log.info(`Initialize environments total number: ${number}`); + for (let index = 0; index < number; index++) { + await this.requestEnvironment(environmentService); + } + } + } + + private async setEnvironmentSetting(environment: EnvironmentInformation): Promise { + if (environment.environmentService === undefined) { + throw new Error(`Environmentservice for ${environment.id} not initialized!`); + } + const environmentService = environment.environmentService; + const runnerSettings: RunnerSettings = new RunnerSettings(); + runnerSettings.nniManagerIP = this.config.nniManagerIp === undefined? await getIPV4Address() : this.config.nniManagerIp; + runnerSettings.nniManagerPort = getBasePort() + 1; + runnerSettings.commandChannel = environmentService.getCommandChannel.channelName; + runnerSettings.enableGpuCollector = this.enableGpuScheduler; + runnerSettings.command = this.config.trialCommand; + runnerSettings.nniManagerVersion = this.enableVersionCheck ? await getVersion() : ''; + runnerSettings.logCollection = this.logCollection; + runnerSettings.platform = environmentService.getName; + runnerSettings.experimentId = this.experimentId; + const storageService: StorageService = this.getStorageService(environmentService); + const envDir = storageService.joinPath("envs"); + const runnerSettingsConfig = storageService.joinPath(envDir, environment.id, "settings.json"); + await storageService.save(JSON.stringify(runnerSettings), runnerSettingsConfig); + } + + private async requestEnvironment(environmentService: EnvironmentService): Promise { + if (this.stopping) { + this.log.info(`Experiment is stopping, stop creating new environment`); + return; + } + const envId = uniqueString(5); + const envName = `nni_exp_${this.experimentId}_env_${envId}`; + const environment = environmentService.createEnvironmentInformation(envId, envName); + environment.environmentService = environmentService; + this.log.info(`Assign environment service ${environmentService.getName} to environment ${envId}`); + environment.command = `sh ../install_nni.sh && python3 -m nni.tools.trial_tool.trial_runner`; + + if (this.isDeveloping) { + environment.command = "[ -d \"nni_trial_tool\" ] && echo \"nni_trial_tool exists already\" || (mkdir ./nni_trial_tool && tar -xof ../nni_trial_tool.tar.gz -C ./nni_trial_tool) && pip3 install websockets && " + environment.command; + } + + environment.command = `mkdir -p envs/${envId} && cd envs/${envId} && ${environment.command}`; + + environment.useSharedStorage = this.useSharedStorage; + // Generate setting.json file per environment to avoid conflict + await this.setEnvironmentSetting(environment); + + await environmentService.startEnvironment(environment); + this.environments.set(environment.id, environment); + + if (environment.status === "FAILED") { + environment.isAlive = false; + throw new Error(`error on request environment ${environment.envId}, please check log for more details.`); + } else { + environment.isAlive = true; + } + await environment.environmentService.getCommandChannel.open(environment); + this.log.info(`requested environment ${environment.id} and job id is ${environment.envId}.`); + } + + private async allocateEnvironment(trial: TrialDetail, environment: EnvironmentInformation): Promise { + if (trial.environment) { + throw new Error(`TrialDispatcher: trial ${trial.id} has assigned environment ${trial.environment.id} already, not assign to ${environment.id}!`); + } + if (environment.runningTrialCount > 0 && false === this.enableGpuScheduler) { + throw new Error(`TrialDispatcher: environment ${environment.id} has running trial, and gpu scheduler is not enabled, it cannot be assigned again!`); + } + this.log.info(`assigning environment ${environment.id} to trial ${trial.id}.`); + + // convert assigned gpus to string for nvidia visible settings + // undefined means no constraint, [] means no gpu visible. + let gpuIndices: string | undefined = undefined; + if (undefined !== this.config.trialGpuNumber) { + const gpuArray: number[] = []; + if (undefined !== trial.assignedGpus) { + trial.assignedGpus.map((value) => { + gpuArray.push(value.index); + }); + } + gpuIndices = gpuArray.join(','); + } + + environment.runningTrialCount++; + environment.assignedTrialCount++; + trial.environment = environment; + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} environmentService not initialized!`); + } + trial.message = `Platform: ${environment.environmentService.getName}, environment: ${environment.id}`; + if (this.useSharedStorage) { + const storageService = component.get(SharedStorageService).storageService; + trial.workingDirectory = storageService.joinPath('trials', trial.id); + } else if (environment.environmentService.hasStorageService) { + const storageService = component.get(StorageService); + trial.workingDirectory = storageService.joinPath('trials', trial.id); + } + trial.settings = { + trialId: trial.id, + gpuIndices: gpuIndices, + sequenceId: trial.form.sequenceId, + parameter: trial.form.hyperParameters, + } + trial.startTime = Date.now(); + trial.status = "RUNNING"; + if (environment.environmentService === undefined) { + throw new Error(`${environment.id} does not have environment service!`); + } + await environment.environmentService.getCommandChannel.sendCommand(trial.environment, NEW_TRIAL_JOB, trial.settings); + } + + /** + * release the trial assigned environment resources + * @param trial + */ + private releaseEnvironment(trial: TrialDetail): void { + if (true === this.enableGpuScheduler) { + this.gpuScheduler.removeGpuReservation(trial); + } + if (trial.environment !== undefined) { + if (trial.environment.runningTrialCount <= 0) { + throw new Error(`TrialDispatcher: environment ${trial.environment.id} has no counted running trial!`); + } + trial.environment.runningTrialCount--; + trial.environment.latestTrialReleasedTime = Date.now(); + trial.environment = undefined; + } + } + + private async handleMetricData(trialId: string, data: any): Promise { + if (Array.isArray(data)) { + for (const subItem of data) { + this.metricsEmitter.emit('metric', { + id: trialId, + data: subItem + }); + } + } else { + this.metricsEmitter.emit('metric', { + id: trialId, + data: data + }); + } + } + + private async handleStdout(commandData: any): Promise { + const metricPattern: RegExp = /NNISDK_MEb'(?.*a?)'$/gm; + const trialLogDir: string = path.join(getExperimentRootDir(), 'trials', commandData["trial"]); + mkDirPSync(trialLogDir); + const trialLogPath: string = path.join(trialLogDir, 'stdout_log_collection.log'); + try { + let skipLogging: boolean = false; + if (commandData["tag"] === 'trial' && commandData["msg"] !== undefined) { + const message: string = commandData["msg"]; + let metricsContent = metricPattern.exec(message); + while (metricsContent && metricsContent.groups) { + const key: string = 'metrics'; + const data = metricsContent.groups[key]; + await this.handleMetricData(commandData["trial"], data); + metricsContent = metricPattern.exec(message); + skipLogging = true; + } + } + + if (!skipLogging) { + // Construct write stream to write remote trial's log into local file + const writeStream: Writable = fs.createWriteStream(trialLogPath, { + flags: 'a+', + encoding: 'utf8', + autoClose: true + }); + + writeStream.write(String.Format('{0}\n', commandData["msg"])); + writeStream.end(); + } + } catch (err) { + this.log.error(`TrialDispatcher: handleStdout error: ${err}`); + } + } + + private async handleCommand(command: Command): Promise { + this.log.debug(`TrialDispatcher: env ${command.environment.id} received command ${command.command}.`); + const environment = command.environment; + const data = command.data; + const nodeId = data["node"]; + switch (command.command) { + case REPORT_METRIC_DATA: + this.log.error(`TrialDispatcher: TODO: not implement to handle direct REPORT_METRIC_DATA command yet.`); + break; + case STDOUT: + await this.handleStdout(data); + break; + case INITIALIZED: + { + let isAllReady = true; + if (environment.nodeCount > 1) { + let node = environment.nodes.get(nodeId); + if (node === undefined) { + node = new NodeInformation(nodeId); + environment.nodes.set(nodeId, node); + } + const oldNodeStatus = node.status; + if (oldNodeStatus === "UNKNOWN" || oldNodeStatus === "WAITING") { + node.status = "RUNNING"; + } + + if (environment.nodes.size === environment.nodeCount) { + for (const node of environment.nodes.values()) { + if (node.status !== "RUNNING") { + isAllReady = false; + break; + } + } + } else { + isAllReady = false; + } + } + + // single node is always ready to set env status + if (isAllReady) { + environment.isRunnerReady = true; + this.log.info(`TrialDispatcher: env ${environment.id} received initialized message and runner is ready, env status: ${environment.status}.`); + } + } + break; + case VERSION_CHECK: + { + if (this.enableVersionCheck) { + const checkResultSuccess: boolean = data["tag"] === 'VCSuccess' ? true : false; + if (checkResultSuccess) { + this.log.info(`TrialDispatcher: Version check in trialKeeper success!`); + } else { + const errorMessage = `TrialDispatcher: Version check error, ${data["msg"]}!`; + this.log.error(errorMessage); + } + } + } + break; + case GPU_INFO: + { + const gpuData = (data); + environment.setGpuSummary(nodeId, gpuData); + } + break; + case TRIAL_END: + { + const trialId = data["trial"]; + const trial = await this.getTrialJob(trialId); + const code = parseInt(data["code"]); + const timestamp = parseInt(data["time"]); + let exitStatus: TrialJobStatus = "SUCCEEDED"; + if (code !== 0) { + exitStatus = "FAILED"; + } + + let node = environment.nodes.get(nodeId); + if (node === undefined) { + node = new NodeInformation(nodeId); + trial.nodes.set(nodeId, node); + } + if (undefined === node) { + throw new Error("node is impossible to be undefined (see above code), but make eslint happy!"); + } + node.status = exitStatus; + node.endTime = timestamp; + } + break; + } + this.shouldUpdateTrials = true; + } + + private async initializeSharedStorage(config: SharedStorageConfig): Promise { + switch (config.storageType) { + case 'NFS': + Container.bind(SharedStorageService) + .to(NFSSharedStorageService) + .scope(Scope.Singleton); + break; + case 'AzureBlob': + Container.bind(SharedStorageService) + .to(AzureBlobSharedStorageService) + .scope(Scope.Singleton); + break; + default: { + const errorMessage = `Shared storage type '${config.storageType}' not support.`; + this.log.error(errorMessage) + return Promise.reject(errorMessage); + } + } + await component.get(SharedStorageService).config(config); + this.useSharedStorage = true; + return Promise.resolve(); + } + + public async getTrialOutputLocalPath(trialJobId: string): Promise { + // TODO: support non shared storage + if (this.useSharedStorage) { + const localWorkingRoot = component.get(SharedStorageService).localWorkingRoot; + return Promise.resolve(path.join(localWorkingRoot, 'trials', trialJobId)); + } else { + return Promise.reject(new Error('Only support shared storage right now.')); + } + } + + public async fetchTrialOutput(trialJobId: string, subpath: string | undefined): Promise { + // TODO: support non shared storage + let trialLocalPath = await this.getTrialOutputLocalPath(trialJobId); + if (subpath !== undefined) { + trialLocalPath = path.join(trialLocalPath, subpath); + } + if (fs.existsSync(trialLocalPath)) { + return Promise.resolve(); + } else { + return Promise.reject(new Error('Trial local path not exist.')); + } + } +} + +export { TrialDispatcher }; diff --git a/ts/nni_manager/tsconfig.json b/ts/nni_manager/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..4ec304054000d856f6d45cb3b759faaba25638e8 --- /dev/null +++ b/ts/nni_manager/tsconfig.json @@ -0,0 +1,31 @@ +{ + "include": [ + "common/**/*", + "core/**/*", + "rest_server/**/*", + "training_service/**/*", + "main.ts" + ], + "compilerOptions": { + "allowUnreachableCode": false, + "noImplicitReturns": true, + "noPropertyAccessFromIndexSignature": true, + "noUnusedParameters": true, + "strict": true, + "baseUrl": ".", + "module": "CommonJS", + "typeRoots": [ + "node_modules/@types", + "types" + ], + "outDir": "dist", + "removeComments": true, + "allowSyntheticDefaultImports": true, + "esModuleInterop": true, + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "target": "ESNext", + "incremental": true, + "tsBuildInfoFile": "dist/nni_manager.tsbuildinfo" + } +} diff --git a/ts/nni_manager/types/child-process-promise/index.d.ts b/ts/nni_manager/types/child-process-promise/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0d0765e7e77c7e8b2bdc21281712dd4fc4dc8118 --- /dev/null +++ b/ts/nni_manager/types/child-process-promise/index.d.ts @@ -0,0 +1,11 @@ +declare module 'child-process-promise' { + export function exec(command: string): Promise; + + export namespace childProcessPromise { + interface Result { + stdout: string; + stderr: string, + message: string + } + } +} \ No newline at end of file diff --git a/ts/nni_manager/types/tail-stream/index.d.ts b/ts/nni_manager/types/tail-stream/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..f7f3abb376c75a0cedbeb7fb4b2710f322df0db1 --- /dev/null +++ b/ts/nni_manager/types/tail-stream/index.d.ts @@ -0,0 +1,8 @@ +declare module 'tail-stream' { + export interface Stream { + on(type: 'data', callback: (data: Buffer) => void): void; + end(data: number): void; + emit(data: string): void; + } + export function createReadStream(path: string): Stream; +} \ No newline at end of file diff --git a/ts/nni_manager/types/webhdfs/index.d.ts b/ts/nni_manager/types/webhdfs/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7ee13b37f090804c1d2a4c99f610f3ab8bf84d84 --- /dev/null +++ b/ts/nni_manager/types/webhdfs/index.d.ts @@ -0,0 +1,3 @@ +declare module 'webhdfs' { + export function createClient(arg: any): any; +} \ No newline at end of file diff --git a/ts/nni_manager/yarn.lock b/ts/nni_manager/yarn.lock new file mode 100644 index 0000000000000000000000000000000000000000..45e28d0e637483d9f242d23127face6037f794ba --- /dev/null +++ b/ts/nni_manager/yarn.lock @@ -0,0 +1,5787 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@babel/code-frame@7.12.11": + version "7.12.11" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.12.11.tgz#f4ad435aa263db935b8f10f2c552d23fb716a63f" + integrity sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw== + dependencies: + "@babel/highlight" "^7.10.4" + +"@babel/code-frame@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.12.13.tgz#dcfc826beef65e75c50e21d3837d7d95798dd658" + integrity sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g== + dependencies: + "@babel/highlight" "^7.12.13" + +"@babel/compat-data@^7.14.4": + version "7.14.4" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.14.4.tgz#45720fe0cecf3fd42019e1d12cc3d27fadc98d58" + integrity sha512-i2wXrWQNkH6JplJQGn3Rd2I4Pij8GdHkXwHMxm+zV5YG/Jci+bCNrWZEWC4o+umiDkRrRs4dVzH3X4GP7vyjQQ== + +"@babel/core@^7.7.5": + version "7.14.3" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.14.3.tgz#5395e30405f0776067fbd9cf0884f15bfb770a38" + integrity sha512-jB5AmTKOCSJIZ72sd78ECEhuPiDMKlQdDI/4QRI6lzYATx5SSogS1oQA2AoPecRCknm30gHi2l+QVvNUu3wZAg== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/generator" "^7.14.3" + "@babel/helper-compilation-targets" "^7.13.16" + "@babel/helper-module-transforms" "^7.14.2" + "@babel/helpers" "^7.14.0" + "@babel/parser" "^7.14.3" + "@babel/template" "^7.12.13" + "@babel/traverse" "^7.14.2" + "@babel/types" "^7.14.2" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.1.2" + semver "^6.3.0" + source-map "^0.5.0" + +"@babel/generator@^7.14.2", "@babel/generator@^7.14.3": + version "7.14.3" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.14.3.tgz#0c2652d91f7bddab7cccc6ba8157e4f40dcedb91" + integrity sha512-bn0S6flG/j0xtQdz3hsjJ624h3W0r3llttBMfyHX3YrZ/KtLYr15bjA0FXkgW7FpvrDuTuElXeVjiKlYRpnOFA== + dependencies: + "@babel/types" "^7.14.2" + jsesc "^2.5.1" + source-map "^0.5.0" + +"@babel/helper-compilation-targets@^7.13.16": + version "7.14.4" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.14.4.tgz#33ebd0ffc34248051ee2089350a929ab02f2a516" + integrity sha512-JgdzOYZ/qGaKTVkn5qEDV/SXAh8KcyUVkCoSWGN8T3bwrgd6m+/dJa2kVGi6RJYJgEYPBdZ84BZp9dUjNWkBaA== + dependencies: + "@babel/compat-data" "^7.14.4" + "@babel/helper-validator-option" "^7.12.17" + browserslist "^4.16.6" + semver "^6.3.0" + +"@babel/helper-function-name@^7.14.2": + version "7.14.2" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.14.2.tgz#397688b590760b6ef7725b5f0860c82427ebaac2" + integrity sha512-NYZlkZRydxw+YT56IlhIcS8PAhb+FEUiOzuhFTfqDyPmzAhRge6ua0dQYT/Uh0t/EDHq05/i+e5M2d4XvjgarQ== + dependencies: + "@babel/helper-get-function-arity" "^7.12.13" + "@babel/template" "^7.12.13" + "@babel/types" "^7.14.2" + +"@babel/helper-get-function-arity@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz#bc63451d403a3b3082b97e1d8b3fe5bd4091e583" + integrity sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-member-expression-to-functions@^7.13.12": + version "7.13.12" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz#dfe368f26d426a07299d8d6513821768216e6d72" + integrity sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw== + dependencies: + "@babel/types" "^7.13.12" + +"@babel/helper-module-imports@^7.13.12": + version "7.13.12" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.13.12.tgz#c6a369a6f3621cb25da014078684da9196b61977" + integrity sha512-4cVvR2/1B693IuOvSI20xqqa/+bl7lqAMR59R4iu39R9aOX8/JoYY1sFaNvUMyMBGnHdwvJgUrzNLoUZxXypxA== + dependencies: + "@babel/types" "^7.13.12" + +"@babel/helper-module-transforms@^7.14.2": + version "7.14.2" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.14.2.tgz#ac1cc30ee47b945e3e0c4db12fa0c5389509dfe5" + integrity sha512-OznJUda/soKXv0XhpvzGWDnml4Qnwp16GN+D/kZIdLsWoHj05kyu8Rm5kXmMef+rVJZ0+4pSGLkeixdqNUATDA== + dependencies: + "@babel/helper-module-imports" "^7.13.12" + "@babel/helper-replace-supers" "^7.13.12" + "@babel/helper-simple-access" "^7.13.12" + "@babel/helper-split-export-declaration" "^7.12.13" + "@babel/helper-validator-identifier" "^7.14.0" + "@babel/template" "^7.12.13" + "@babel/traverse" "^7.14.2" + "@babel/types" "^7.14.2" + +"@babel/helper-optimise-call-expression@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz#5c02d171b4c8615b1e7163f888c1c81c30a2aaea" + integrity sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-replace-supers@^7.13.12": + version "7.14.4" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.14.4.tgz#b2ab16875deecfff3ddfcd539bc315f72998d836" + integrity sha512-zZ7uHCWlxfEAAOVDYQpEf/uyi1dmeC7fX4nCf2iz9drnCwi1zvwXL3HwWWNXUQEJ1k23yVn3VbddiI9iJEXaTQ== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.13.12" + "@babel/helper-optimise-call-expression" "^7.12.13" + "@babel/traverse" "^7.14.2" + "@babel/types" "^7.14.4" + +"@babel/helper-simple-access@^7.13.12": + version "7.13.12" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz#dd6c538afb61819d205a012c31792a39c7a5eaf6" + integrity sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA== + dependencies: + "@babel/types" "^7.13.12" + +"@babel/helper-split-export-declaration@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz#e9430be00baf3e88b0e13e6f9d4eaf2136372b05" + integrity sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-validator-identifier@^7.14.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.0.tgz#d26cad8a47c65286b15df1547319a5d0bcf27288" + integrity sha512-V3ts7zMSu5lfiwWDVWzRDGIN+lnCEUdaXgtVHJgLb1rGaA6jMrtB9EmE7L18foXJIE8Un/A/h6NJfGQp/e1J4A== + +"@babel/helper-validator-option@^7.12.17": + version "7.12.17" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz#d1fbf012e1a79b7eebbfdc6d270baaf8d9eb9831" + integrity sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw== + +"@babel/helpers@^7.14.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.14.0.tgz#ea9b6be9478a13d6f961dbb5f36bf75e2f3b8f62" + integrity sha512-+ufuXprtQ1D1iZTO/K9+EBRn+qPWMJjZSw/S0KlFrxCw4tkrzv9grgpDHkY9MeQTjTY8i2sp7Jep8DfU6tN9Mg== + dependencies: + "@babel/template" "^7.12.13" + "@babel/traverse" "^7.14.0" + "@babel/types" "^7.14.0" + +"@babel/highlight@^7.10.4", "@babel/highlight@^7.12.13": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.14.0.tgz#3197e375711ef6bf834e67d0daec88e4f46113cf" + integrity sha512-YSCOwxvTYEIMSGaBQb5kDDsCopDdiUGsqpatp3fOlI4+2HQSkTmEVWnVuySdAC5EWCqSWWTv0ib63RjR7dTBdg== + dependencies: + "@babel/helper-validator-identifier" "^7.14.0" + chalk "^2.0.0" + js-tokens "^4.0.0" + +"@babel/parser@^7.12.13", "@babel/parser@^7.14.2", "@babel/parser@^7.14.3": + version "7.14.4" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.14.4.tgz#a5c560d6db6cd8e6ed342368dea8039232cbab18" + integrity sha512-ArliyUsWDUqEGfWcmzpGUzNfLxTdTp6WU4IuP6QFSp9gGfWS6boxFCkJSJ/L4+RG8z/FnIU3WxCk6hPL9SSWeA== + +"@babel/template@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.12.13.tgz#530265be8a2589dbb37523844c5bcb55947fb327" + integrity sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/parser" "^7.12.13" + "@babel/types" "^7.12.13" + +"@babel/traverse@^7.14.0", "@babel/traverse@^7.14.2": + version "7.14.2" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.14.2.tgz#9201a8d912723a831c2679c7ebbf2fe1416d765b" + integrity sha512-TsdRgvBFHMyHOOzcP9S6QU0QQtjxlRpEYOy3mcCO5RgmC305ki42aSAmfZEMSSYBla2oZ9BMqYlncBaKmD/7iA== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/generator" "^7.14.2" + "@babel/helper-function-name" "^7.14.2" + "@babel/helper-split-export-declaration" "^7.12.13" + "@babel/parser" "^7.14.2" + "@babel/types" "^7.14.2" + debug "^4.1.0" + globals "^11.1.0" + +"@babel/types@^7.12.13", "@babel/types@^7.13.12", "@babel/types@^7.14.0", "@babel/types@^7.14.2", "@babel/types@^7.14.4": + version "7.14.4" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.14.4.tgz#bfd6980108168593b38b3eb48a24aa026b919bc0" + integrity sha512-lCj4aIs0xUefJFQnwwQv2Bxg7Omd6bgquZ6LGC+gGMh6/s5qDVfjuCMlDmYQ15SLsWHd9n+X3E75lKIhl5Lkiw== + dependencies: + "@babel/helper-validator-identifier" "^7.14.0" + to-fast-properties "^2.0.0" + +"@eslint/eslintrc@^0.4.2": + version "0.4.2" + resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-0.4.2.tgz#f63d0ef06f5c0c57d76c4ab5f63d3835c51b0179" + integrity sha512-8nmGq/4ycLpIwzvhI4tNDmQztZ8sp+hI7cyG8i1nQDhkAbRzHpXPidRAHlNvCZQpJTKw5ItIpMw9RSToGF00mg== + dependencies: + ajv "^6.12.4" + debug "^4.1.1" + espree "^7.3.0" + globals "^13.9.0" + ignore "^4.0.6" + import-fresh "^3.2.1" + js-yaml "^3.13.1" + minimatch "^3.0.4" + strip-json-comments "^3.1.1" + +"@gar/promisify@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.2.tgz#30aa825f11d438671d585bd44e7fd564535fc210" + integrity sha512-82cpyJyKRoQoRi+14ibCeGPu0CwypgtBAdBhq1WfvagpCZNKqwXbKwXllYSMG91DhmG4jt9gN8eP6lGOtozuaw== + +"@hapi/hoek@^9.0.0": + version "9.2.0" + resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.2.0.tgz#f3933a44e365864f4dad5db94158106d511e8131" + integrity sha512-sqKVVVOe5ivCaXDWivIJYVSaEgdQK9ul7a4Kity5Iw7u9+wBAPbX1RMSnLLmp7O4Vzj0WOWwMAJsTL00xwaNug== + +"@hapi/topo@^5.0.0": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-5.0.0.tgz#c19af8577fa393a06e9c77b60995af959be721e7" + integrity sha512-tFJlT47db0kMqVm3H4nQYgn6Pwg10GTZHb1pwmSiv1K4ks6drQOtfEF5ZnPjkvC+y4/bUPHK+bc87QvLcL+WMw== + dependencies: + "@hapi/hoek" "^9.0.0" + +"@isaacs/string-locale-compare@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@isaacs/string-locale-compare/-/string-locale-compare-1.1.0.tgz#291c227e93fd407a96ecd59879a35809120e432b" + integrity sha512-SQ7Kzhh9+D+ZW9MA0zkYv3VXhIDNx+LzM6EJ+/65I3QY+enU6Itte7E5XX7EWrqLW2FN4n06GWzBnPoC3th2aQ== + +"@istanbuljs/load-nyc-config@^1.0.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" + integrity sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ== + dependencies: + camelcase "^5.3.1" + find-up "^4.1.0" + get-package-type "^0.1.0" + js-yaml "^3.13.1" + resolve-from "^5.0.0" + +"@istanbuljs/schema@^0.1.2": + version "0.1.3" + resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98" + integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA== + +"@nodelib/fs.scandir@2.1.5": + version "2.1.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== + dependencies: + "@nodelib/fs.stat" "2.0.5" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== + +"@nodelib/fs.walk@^1.2.3": + version "1.2.7" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.7.tgz#94c23db18ee4653e129abd26fb06f870ac9e1ee2" + integrity sha512-BTIhocbPBSrRmHxOAJFtR18oLhxTtAFDAvL8hY1S3iU8k+E60W/YFs4jrixGzQjMpF4qPXxIQHcjVD9dz1C2QA== + dependencies: + "@nodelib/fs.scandir" "2.1.5" + fastq "^1.6.0" + +"@npmcli/arborist@^4.0.0", "@npmcli/arborist@^4.1.1": + version "4.1.1" + resolved "https://registry.yarnpkg.com/@npmcli/arborist/-/arborist-4.1.1.tgz#a36202f85b0b8d47f5fc0e056e9836282bc5a38c" + integrity sha512-sASzHngGWt8l6ic1VP0Qf3+ral/RL8L+MculTp2w8NYjjkDiurByOT39KiYmLwpeJ2GQoDR/rdhEwnII8wZQ9g== + dependencies: + "@isaacs/string-locale-compare" "^1.1.0" + "@npmcli/installed-package-contents" "^1.0.7" + "@npmcli/map-workspaces" "^2.0.0" + "@npmcli/metavuln-calculator" "^2.0.0" + "@npmcli/move-file" "^1.1.0" + "@npmcli/name-from-folder" "^1.0.1" + "@npmcli/node-gyp" "^1.0.3" + "@npmcli/package-json" "^1.0.1" + "@npmcli/run-script" "^2.0.0" + bin-links "^2.3.0" + cacache "^15.0.3" + common-ancestor-path "^1.0.1" + json-parse-even-better-errors "^2.3.1" + json-stringify-nice "^1.1.4" + mkdirp "^1.0.4" + mkdirp-infer-owner "^2.0.0" + npm-install-checks "^4.0.0" + npm-package-arg "^8.1.5" + npm-pick-manifest "^6.1.0" + npm-registry-fetch "^11.0.0" + pacote "^12.0.2" + parse-conflict-json "^2.0.1" + proc-log "^1.0.0" + promise-all-reject-late "^1.0.0" + promise-call-limit "^1.0.1" + read-package-json-fast "^2.0.2" + readdir-scoped-modules "^1.1.0" + rimraf "^3.0.2" + semver "^7.3.5" + ssri "^8.0.1" + treeverse "^1.0.4" + walk-up-path "^1.0.0" + +"@npmcli/ci-detect@^1.3.0": + version "1.3.0" + resolved "https://registry.yarnpkg.com/@npmcli/ci-detect/-/ci-detect-1.3.0.tgz#6c1d2c625fb6ef1b9dea85ad0a5afcbef85ef22a" + integrity sha512-oN3y7FAROHhrAt7Rr7PnTSwrHrZVRTS2ZbyxeQwSSYD0ifwM3YNgQqbaRmjcWoPyq77MjchusjJDspbzMmip1Q== + +"@npmcli/ci-detect@^1.4.0": + version "1.4.0" + resolved "https://registry.yarnpkg.com/@npmcli/ci-detect/-/ci-detect-1.4.0.tgz#18478bbaa900c37bfbd8a2006a6262c62e8b0fe1" + integrity sha512-3BGrt6FLjqM6br5AhWRKTr3u5GIVkjRYeAFrMp3HjnfICrg4xOrVRwFavKT6tsp++bq5dluL5t8ME/Nha/6c1Q== + +"@npmcli/config@^2.3.2": + version "2.3.2" + resolved "https://registry.yarnpkg.com/@npmcli/config/-/config-2.3.2.tgz#6027efc132fcc809abef749c2f2e13dc4dcd6e0b" + integrity sha512-2/9dj143BFgQR8qxJbYptd8k+4+Po2uHYq3H6498ynZcRu4LrsDlngov5HGrvo2+f0pe0fBJwDEP2rRtaW8bkw== + dependencies: + ini "^2.0.0" + mkdirp-infer-owner "^2.0.0" + nopt "^5.0.0" + semver "^7.3.4" + walk-up-path "^1.0.0" + +"@npmcli/disparity-colors@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/disparity-colors/-/disparity-colors-1.0.1.tgz#b23c864c9658f9f0318d5aa6d17986619989535c" + integrity sha512-kQ1aCTTU45mPXN+pdAaRxlxr3OunkyztjbbxDY/aIcPS5CnCUrx+1+NvA6pTcYR7wmLZe37+Mi5v3nfbwPxq3A== + dependencies: + ansi-styles "^4.3.0" + +"@npmcli/fs@^1.0.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@npmcli/fs/-/fs-1.1.0.tgz#bec1d1b89c170d40e1b73ad6c943b0b75e7d2951" + integrity sha512-VhP1qZLXcrXRIaPoqb4YA55JQxLNF3jNR4T55IdOJa3+IFJKNYHtPvtXx8slmeMavj37vCzCfrqQM1vWLsYKLA== + dependencies: + "@gar/promisify" "^1.0.1" + semver "^7.3.5" + +"@npmcli/git@^2.0.1", "@npmcli/git@^2.0.7": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@npmcli/git/-/git-2.0.9.tgz#915bbfe66300e67b4da5ef765a4475ffb2ca5b6b" + integrity sha512-hTMbMryvOqGLwnmMBKs5usbPsJtyEsMsgXwJbmNrsEuQQh1LAIMDU77IoOrwkCg+NgQWl+ySlarJASwM3SutCA== + dependencies: + "@npmcli/promise-spawn" "^1.3.2" + lru-cache "^6.0.0" + mkdirp "^1.0.4" + npm-pick-manifest "^6.1.1" + promise-inflight "^1.0.1" + promise-retry "^2.0.1" + semver "^7.3.5" + which "^2.0.2" + +"@npmcli/git@^2.1.0": + version "2.1.0" + resolved "https://registry.yarnpkg.com/@npmcli/git/-/git-2.1.0.tgz#2fbd77e147530247d37f325930d457b3ebe894f6" + integrity sha512-/hBFX/QG1b+N7PZBFs0bi+evgRZcK9nWBxQKZkGoXUT5hJSwl5c4d7y8/hm+NQZRPhQ67RzFaj5UM9YeyKoryw== + dependencies: + "@npmcli/promise-spawn" "^1.3.2" + lru-cache "^6.0.0" + mkdirp "^1.0.4" + npm-pick-manifest "^6.1.1" + promise-inflight "^1.0.1" + promise-retry "^2.0.1" + semver "^7.3.5" + which "^2.0.2" + +"@npmcli/installed-package-contents@^1.0.6", "@npmcli/installed-package-contents@^1.0.7": + version "1.0.7" + resolved "https://registry.yarnpkg.com/@npmcli/installed-package-contents/-/installed-package-contents-1.0.7.tgz#ab7408c6147911b970a8abe261ce512232a3f4fa" + integrity sha512-9rufe0wnJusCQoLpV9ZPKIVP55itrM5BxOXs10DmdbRfgWtHy1LDyskbwRnBghuB0PrF7pNPOqREVtpz4HqzKw== + dependencies: + npm-bundled "^1.1.1" + npm-normalize-package-bin "^1.0.1" + +"@npmcli/map-workspaces@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/map-workspaces/-/map-workspaces-2.0.0.tgz#e342efbbdd0dad1bba5d7723b674ca668bf8ac5a" + integrity sha512-QBJfpCY1NOAkkW3lFfru9VTdqvMB2TN0/vrevl5xBCv5Fi0XDVcA6rqqSau4Ysi4Iw3fBzyXV7hzyTBDfadf7g== + dependencies: + "@npmcli/name-from-folder" "^1.0.1" + glob "^7.1.6" + minimatch "^3.0.4" + read-package-json-fast "^2.0.1" + +"@npmcli/metavuln-calculator@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/metavuln-calculator/-/metavuln-calculator-2.0.0.tgz#70937b8b5a5cad5c588c8a7b38c4a8bd6f62c84c" + integrity sha512-VVW+JhWCKRwCTE+0xvD6p3uV4WpqocNYYtzyvenqL/u1Q3Xx6fGTJ+6UoIoii07fbuEO9U3IIyuGY0CYHDv1sg== + dependencies: + cacache "^15.0.5" + json-parse-even-better-errors "^2.3.1" + pacote "^12.0.0" + semver "^7.3.2" + +"@npmcli/move-file@^1.0.1", "@npmcli/move-file@^1.1.0": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@npmcli/move-file/-/move-file-1.1.2.tgz#1a82c3e372f7cae9253eb66d72543d6b8685c674" + integrity sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg== + dependencies: + mkdirp "^1.0.4" + rimraf "^3.0.2" + +"@npmcli/name-from-folder@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/name-from-folder/-/name-from-folder-1.0.1.tgz#77ecd0a4fcb772ba6fe927e2e2e155fbec2e6b1a" + integrity sha512-qq3oEfcLFwNfEYOQ8HLimRGKlD8WSeGEdtUa7hmzpR8Sa7haL1KVQrvgO6wqMjhWFFVjgtrh1gIxDz+P8sjUaA== + +"@npmcli/node-gyp@^1.0.2": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@npmcli/node-gyp/-/node-gyp-1.0.2.tgz#3cdc1f30e9736dbc417373ed803b42b1a0a29ede" + integrity sha512-yrJUe6reVMpktcvagumoqD9r08fH1iRo01gn1u0zoCApa9lnZGEigVKUd2hzsCId4gdtkZZIVscLhNxMECKgRg== + +"@npmcli/node-gyp@^1.0.3": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@npmcli/node-gyp/-/node-gyp-1.0.3.tgz#a912e637418ffc5f2db375e93b85837691a43a33" + integrity sha512-fnkhw+fmX65kiLqk6E3BFLXNC26rUhK90zVwe2yncPliVT/Qos3xjhTLE59Df8KnPlcwIERXKVlU1bXoUQ+liA== + +"@npmcli/package-json@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-1.0.1.tgz#1ed42f00febe5293c3502fd0ef785647355f6e89" + integrity sha512-y6jnu76E9C23osz8gEMBayZmaZ69vFOIk8vR1FJL/wbEJ54+9aVG9rLTjQKSXfgYZEr50nw1txBBFfBZZe+bYg== + dependencies: + json-parse-even-better-errors "^2.3.1" + +"@npmcli/promise-spawn@^1.2.0", "@npmcli/promise-spawn@^1.3.2": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@npmcli/promise-spawn/-/promise-spawn-1.3.2.tgz#42d4e56a8e9274fba180dabc0aea6e38f29274f5" + integrity sha512-QyAGYo/Fbj4MXeGdJcFzZ+FkDkomfRBrPM+9QYJSg+PxgAUL+LU3FneQk37rKR2/zjqkCV1BLHccX98wRXG3Sg== + dependencies: + infer-owner "^1.0.4" + +"@npmcli/run-script@^1.8.2": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-1.8.5.tgz#f250a0c5e1a08a792d775a315d0ff42fc3a51e1d" + integrity sha512-NQspusBCpTjNwNRFMtz2C5MxoxyzlbuJ4YEhxAKrIonTiirKDtatsZictx9RgamQIx6+QuHMNmPl0wQdoESs9A== + dependencies: + "@npmcli/node-gyp" "^1.0.2" + "@npmcli/promise-spawn" "^1.3.2" + infer-owner "^1.0.4" + node-gyp "^7.1.0" + read-package-json-fast "^2.0.1" + +"@npmcli/run-script@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-2.0.0.tgz#9949c0cab415b17aaac279646db4f027d6f1e743" + integrity sha512-fSan/Pu11xS/TdaTpTB0MRn9guwGU8dye+x56mEVgBEd/QsybBbYcAL0phPXi8SGWFEChkQd6M9qL4y6VOpFig== + dependencies: + "@npmcli/node-gyp" "^1.0.2" + "@npmcli/promise-spawn" "^1.3.2" + node-gyp "^8.2.0" + read-package-json-fast "^2.0.1" + +"@sideway/address@^4.1.0": + version "4.1.2" + resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.2.tgz#811b84333a335739d3969cfc434736268170cad1" + integrity sha512-idTz8ibqWFrPU8kMirL0CoPH/A29XOzzAzpyN3zQ4kAWnzmNfFmRaoMNN6VI8ske5M73HZyhIaW4OuSFIdM4oA== + dependencies: + "@hapi/hoek" "^9.0.0" + +"@sideway/formula@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@sideway/formula/-/formula-3.0.0.tgz#fe158aee32e6bd5de85044be615bc08478a0a13c" + integrity sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg== + +"@sideway/pinpoint@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz#cff8ffadc372ad29fd3f78277aeb29e632cc70df" + integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== + +"@sindresorhus/is@^0.7.0": + version "0.7.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" + integrity sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow== + +"@tootallnate/once@1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@tootallnate/once/-/once-1.1.2.tgz#ccb91445360179a04e7fe6aff78c00ffc1eeaf82" + integrity sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw== + +"@tsconfig/node10@^1.0.7": + version "1.0.7" + resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.7.tgz#1eb1de36c73478a2479cc661ef5af1c16d86d606" + integrity sha512-aBvUmXLQbayM4w3A8TrjwrXs4DZ8iduJnuJLLRGdkWlyakCf1q6uHZJBzXoRA/huAEknG5tcUyQxN3A+In5euQ== + +"@tsconfig/node12@^1.0.7": + version "1.0.7" + resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.7.tgz#677bd9117e8164dc319987dd6ff5fc1ba6fbf18b" + integrity sha512-dgasobK/Y0wVMswcipr3k0HpevxFJLijN03A8mYfEPvWvOs14v0ZlYTR4kIgMx8g4+fTyTFv8/jLCIfRqLDJ4A== + +"@tsconfig/node14@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.0.tgz#5bd046e508b1ee90bc091766758838741fdefd6e" + integrity sha512-RKkL8eTdPv6t5EHgFKIVQgsDapugbuOptNd9OOunN/HAkzmmTnZELx1kNCK0rSdUYGmiFMM3rRQMAWiyp023LQ== + +"@tsconfig/node16@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.1.tgz#a6ca6a9a0ff366af433f42f5f0e124794ff6b8f1" + integrity sha512-FTgBI767POY/lKNDNbIzgAX6miIDBs6NTCbdlDb8TrWovHsSvaVIZDlTqym29C6UqhzwcJx4CYr+AlrMywA0cA== + +"@types/body-parser@*": + version "1.19.0" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.0.tgz#0685b3c47eb3006ffed117cdd55164b61f80538f" + integrity sha512-W98JrE0j2K78swW4ukqMleo8R7h/pFETjM2DQ90MF6XK2i4LO4W3gQ71Lt4w3bfm2EvVSyWHplECvB5sK22yFQ== + dependencies: + "@types/connect" "*" + "@types/node" "*" + +"@types/caseless@*": + version "0.12.2" + resolved "https://registry.yarnpkg.com/@types/caseless/-/caseless-0.12.2.tgz#f65d3d6389e01eeb458bd54dc8f52b95a9463bc8" + integrity sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w== + +"@types/chai-as-promised@^7.1.0": + version "7.1.4" + resolved "https://registry.yarnpkg.com/@types/chai-as-promised/-/chai-as-promised-7.1.4.tgz#caf64e76fb056b8c8ced4b761ed499272b737601" + integrity sha512-1y3L1cHePcIm5vXkh1DSGf/zQq5n5xDKG1fpCvf18+uOkpce0Z1ozNFPkyWsVswK7ntN1sZBw3oU6gmN+pDUcA== + dependencies: + "@types/chai" "*" + +"@types/chai@*", "@types/chai@^4.2.18": + version "4.2.18" + resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.2.18.tgz#0c8e298dbff8205e2266606c1ea5fbdba29b46e4" + integrity sha512-rS27+EkB/RE1Iz3u0XtVL5q36MGDWbgYe7zWiodyKNUnthxY0rukK5V36eiUCtCisB7NN8zKYH6DO2M37qxFEQ== + +"@types/connect@*": + version "3.4.34" + resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.34.tgz#170a40223a6d666006d93ca128af2beb1d9b1901" + integrity sha512-ePPA/JuI+X0vb+gSWlPKOY0NdNAie/rPUqX2GUPpbZwiKTkSPhjXWuee47E4MtE54QVzGCQMQkAL6JhV2E1+cQ== + dependencies: + "@types/node" "*" + +"@types/express-serve-static-core@^4.17.18": + version "4.17.21" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.21.tgz#a427278e106bca77b83ad85221eae709a3414d42" + integrity sha512-gwCiEZqW6f7EoR8TTEfalyEhb1zA5jQJnRngr97+3pzMaO1RKoI1w2bw07TK72renMUVWcWS5mLI6rk1NqN0nA== + dependencies: + "@types/node" "*" + "@types/qs" "*" + "@types/range-parser" "*" + +"@types/express@^4.17.2": + version "4.17.12" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.12.tgz#4bc1bf3cd0cfe6d3f6f2853648b40db7d54de350" + integrity sha512-pTYas6FrP15B1Oa0bkN5tQMNqOcVXa9j4FTFtO8DWI9kppKib+6NJtfTOOLcwxuuYvcX2+dVG6et1SxW/Kc17Q== + dependencies: + "@types/body-parser" "*" + "@types/express-serve-static-core" "^4.17.18" + "@types/qs" "*" + "@types/serve-static" "*" + +"@types/glob@^7.1.3": + version "7.1.3" + resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.1.3.tgz#e6ba80f36b7daad2c685acd9266382e68985c183" + integrity sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w== + dependencies: + "@types/minimatch" "*" + "@types/node" "*" + +"@types/http-proxy@^1.17.7": + version "1.17.7" + resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.7.tgz#30ea85cc2c868368352a37f0d0d3581e24834c6f" + integrity sha512-9hdj6iXH64tHSLTY+Vt2eYOGzSogC+JQ2H7bdPWkuh7KXP5qLllWx++t+K9Wk556c3dkDdPws/SpMRi0sdCT1w== + dependencies: + "@types/node" "*" + +"@types/js-base64@^3.3.1": + version "3.3.1" + resolved "https://registry.yarnpkg.com/@types/js-base64/-/js-base64-3.3.1.tgz#36c2d6dc126277ea28a4d0599d0cafbf547b51e6" + integrity sha512-Zw33oQNAvDdAN9b0IE5stH0y2MylYvtU7VVTKEJPxhyM2q57CVaNJhtJW258ah24NRtaiA23tptUmVn3dmTKpw== + dependencies: + js-base64 "*" + +"@types/js-yaml@^4.0.1": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@types/js-yaml/-/js-yaml-4.0.1.tgz#5544730b65a480b18ace6b6ce914e519cec2d43b" + integrity sha512-xdOvNmXmrZqqPy3kuCQ+fz6wA0xU5pji9cd1nDrflWaAWtYLLGk5ykW0H6yg5TVyehHP1pfmuuSaZkhP+kspVA== + +"@types/json-schema@^7.0.3": + version "7.0.7" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.7.tgz#98a993516c859eb0d5c4c8f098317a9ea68db9ad" + integrity sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA== + +"@types/lockfile@^1.0.0": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@types/lockfile/-/lockfile-1.0.1.tgz#434a3455e89843312f01976e010c60f1bcbd56f7" + integrity sha512-65WZedEm4AnOsBDdsapJJG42MhROu3n4aSSiu87JXF/pSdlubxZxp3S1yz3kTfkJ2KBPud4CpjoHVAptOm9Zmw== + +"@types/mime@^1": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.2.tgz#93e25bf9ee75fe0fd80b594bc4feb0e862111b5a" + integrity sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw== + +"@types/minimatch@*": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.4.tgz#f0ec25dbf2f0e4b18647313ac031134ca5b24b21" + integrity sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA== + +"@types/minipass@*": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@types/minipass/-/minipass-2.2.0.tgz#51ad404e8eb1fa961f75ec61205796807b6f9651" + integrity sha512-wuzZksN4w4kyfoOv/dlpov4NOunwutLA/q7uc00xU02ZyUY+aoM5PWIXEKBMnm0NHd4a+N71BMjq+x7+2Af1fg== + dependencies: + "@types/node" "*" + +"@types/mocha@^8.2.2": + version "8.2.2" + resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-8.2.2.tgz#91daa226eb8c2ff261e6a8cbf8c7304641e095e0" + integrity sha512-Lwh0lzzqT5Pqh6z61P3c3P5nm6fzQK/MMHl9UKeneAeInVflBSz1O2EkX6gM6xfJd7FBXBY5purtLx7fUiZ7Hw== + +"@types/node@*": + version "15.12.1" + resolved "https://registry.yarnpkg.com/@types/node/-/node-15.12.1.tgz#9b60797dee1895383a725f828a869c86c6caa5c2" + integrity sha512-zyxJM8I1c9q5sRMtVF+zdd13Jt6RU4r4qfhTd7lQubyThvLfx6yYekWSQjGCGV2Tkecgxnlpl/DNlb6Hg+dmEw== + +"@types/node@^15.12.1": + version "15.12.2" + resolved "https://registry.yarnpkg.com/@types/node/-/node-15.12.2.tgz#1f2b42c4be7156ff4a6f914b2fb03d05fa84e38d" + integrity sha512-zjQ69G564OCIWIOHSXyQEEDpdpGl+G348RAKY0XXy9Z5kU9Vzv1GMNnkar/ZJ8dzXB3COzD9Mo9NtRZ4xfgUww== + +"@types/qs@*": + version "6.9.6" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.6.tgz#df9c3c8b31a247ec315e6996566be3171df4b3b1" + integrity sha512-0/HnwIfW4ki2D8L8c9GVcG5I72s9jP5GSLVF0VIXDW00kmIpA6O33G7a8n59Tmh7Nz0WUC3rSb7PTY/sdW2JzA== + +"@types/range-parser@*": + version "1.2.3" + resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.3.tgz#7ee330ba7caafb98090bece86a5ee44115904c2c" + integrity sha512-ewFXqrQHlFsgc09MK5jP5iR7vumV/BYayNC6PgJO2LPe8vrnNFyjQjSppfEngITi0qvfKtzFvgKymGheFM9UOA== + +"@types/request@^2.48.5": + version "2.48.5" + resolved "https://registry.yarnpkg.com/@types/request/-/request-2.48.5.tgz#019b8536b402069f6d11bee1b2c03e7f232937a0" + integrity sha512-/LO7xRVnL3DxJ1WkPGDQrp4VTV1reX9RkC85mJ+Qzykj2Bdw+mG15aAfDahc76HtknjzE16SX/Yddn6MxVbmGQ== + dependencies: + "@types/caseless" "*" + "@types/node" "*" + "@types/tough-cookie" "*" + form-data "^2.5.0" + +"@types/rx-core-binding@*": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@types/rx-core-binding/-/rx-core-binding-4.0.4.tgz#d969d32f15a62b89e2862c17b3ee78fe329818d3" + integrity sha512-5pkfxnC4w810LqBPUwP5bg7SFR/USwhMSaAeZQQbEHeBp57pjKXRlXmqpMrLJB4y1oglR/c2502853uN0I+DAQ== + dependencies: + "@types/rx-core" "*" + +"@types/rx-core@*": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/rx-core/-/rx-core-4.0.3.tgz#0b3354b1238cedbe2b74f6326f139dbc7a591d60" + integrity sha1-CzNUsSOM7b4rdPYybxOdvHpZHWA= + +"@types/rx-lite-aggregates@*": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/rx-lite-aggregates/-/rx-lite-aggregates-4.0.3.tgz#6efb2b7f3d5f07183a1cb2bd4b1371d7073384c2" + integrity sha512-MAGDAHy8cRatm94FDduhJF+iNS5//jrZ/PIfm+QYw9OCeDgbymFHChM8YVIvN2zArwsRftKgE33QfRWvQk4DPg== + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite-async@*": + version "4.0.2" + resolved "https://registry.yarnpkg.com/@types/rx-lite-async/-/rx-lite-async-4.0.2.tgz#27fbf0caeff029f41e2d2aae638b05e91ceb600c" + integrity sha512-vTEv5o8l6702ZwfAM5aOeVDfUwBSDOs+ARoGmWAKQ6LOInQ8J4/zjM7ov12fuTpktUKdMQjkeCp07Vd73mPkxw== + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite-backpressure@*": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/rx-lite-backpressure/-/rx-lite-backpressure-4.0.3.tgz#05abb19bdf87cc740196c355e5d0b37bb50b5d56" + integrity sha512-Y6aIeQCtNban5XSAF4B8dffhIKu6aAy/TXFlScHzSxh6ivfQBQw6UjxyEJxIOt3IT49YkS+siuayM2H/Q0cmgA== + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite-coincidence@*": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/rx-lite-coincidence/-/rx-lite-coincidence-4.0.3.tgz#80bd69acc4054a15cdc1638e2dc8843498cd85c0" + integrity sha512-1VNJqzE9gALUyMGypDXZZXzR0Tt7LC9DdAZQ3Ou/Q0MubNU35agVUNXKGHKpNTba+fr8GdIdkC26bRDqtCQBeQ== + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite-experimental@*": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@types/rx-lite-experimental/-/rx-lite-experimental-4.0.1.tgz#c532f5cbdf3f2c15da16ded8930d1b2984023cbd" + integrity sha1-xTL1y98/LBXaFt7Ykw0bKYQCPL0= + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite-joinpatterns@*": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@types/rx-lite-joinpatterns/-/rx-lite-joinpatterns-4.0.1.tgz#f70fe370518a8432f29158cc92ffb56b4e4afc3e" + integrity sha1-9w/jcFGKhDLykVjMkv+1a05K/D4= + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite-testing@*": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@types/rx-lite-testing/-/rx-lite-testing-4.0.1.tgz#21b19d11f4dfd6ffef5a9d1648e9c8879bfe21e9" + integrity sha1-IbGdEfTf1v/vWp0WSOnIh5v+Iek= + dependencies: + "@types/rx-lite-virtualtime" "*" + +"@types/rx-lite-time@*": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/rx-lite-time/-/rx-lite-time-4.0.3.tgz#0eda65474570237598f3448b845d2696f2dbb1c4" + integrity sha512-ukO5sPKDRwCGWRZRqPlaAU0SKVxmWwSjiOrLhoQDoWxZWg6vyB9XLEZViKOzIO6LnTIQBlk4UylYV0rnhJLxQw== + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite-virtualtime@*": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/rx-lite-virtualtime/-/rx-lite-virtualtime-4.0.3.tgz#4b30cacd0fe2e53af29f04f7438584c7d3959537" + integrity sha512-3uC6sGmjpOKatZSVHI2xB1+dedgml669ZRvqxy+WqmGJDVusOdyxcKfyzjW0P3/GrCiN4nmRkLVMhPwHCc5QLg== + dependencies: + "@types/rx-lite" "*" + +"@types/rx-lite@*": + version "4.0.6" + resolved "https://registry.yarnpkg.com/@types/rx-lite/-/rx-lite-4.0.6.tgz#3c02921c4244074234f26b772241bcc20c18c253" + integrity sha512-oYiDrFIcor9zDm0VDUca1UbROiMYBxMLMaM6qzz4ADAfOmA9r1dYEcAFH+2fsPI5BCCjPvV9pWC3X3flbrvs7w== + dependencies: + "@types/rx-core" "*" + "@types/rx-core-binding" "*" + +"@types/rx@^4.1.2": + version "4.1.2" + resolved "https://registry.yarnpkg.com/@types/rx/-/rx-4.1.2.tgz#a4061b3d72b03cf11a38d69e2022a17334c54dc0" + integrity sha512-1r8ZaT26Nigq7o4UBGl+aXB2UMFUIdLPP/8bLIP0x3d0pZL46ybKKjhWKaJQWIkLl5QCLD0nK3qTOO1QkwdFaA== + dependencies: + "@types/rx-core" "*" + "@types/rx-core-binding" "*" + "@types/rx-lite" "*" + "@types/rx-lite-aggregates" "*" + "@types/rx-lite-async" "*" + "@types/rx-lite-backpressure" "*" + "@types/rx-lite-coincidence" "*" + "@types/rx-lite-experimental" "*" + "@types/rx-lite-joinpatterns" "*" + "@types/rx-lite-testing" "*" + "@types/rx-lite-time" "*" + "@types/rx-lite-virtualtime" "*" + +"@types/serve-static@*": + version "1.13.9" + resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.13.9.tgz#aacf28a85a05ee29a11fb7c3ead935ac56f33e4e" + integrity sha512-ZFqF6qa48XsPdjXV5Gsz0Zqmux2PerNd3a/ktL45mHpa19cuMi/cL8tcxdAx497yRh+QtYPuofjT9oWw9P7nkA== + dependencies: + "@types/mime" "^1" + "@types/node" "*" + +"@types/sqlite3@^3.1.7": + version "3.1.7" + resolved "https://registry.yarnpkg.com/@types/sqlite3/-/sqlite3-3.1.7.tgz#84fbc65946603d15cff4968d0cb283d1879dd156" + integrity sha512-8FHV/8Uzd7IwdHm5mvmF2Aif4aC/gjrt4axWD9SmfaxITnOjtOhCbOSTuqv/VbH1uq0QrwlaTj9aTz3gmR6u4w== + dependencies: + "@types/node" "*" + +"@types/ssh2-streams@*": + version "0.1.8" + resolved "https://registry.yarnpkg.com/@types/ssh2-streams/-/ssh2-streams-0.1.8.tgz#142af404dae059931aea7fcd1511b5478964feb6" + integrity sha512-I7gixRPUvVIyJuCEvnmhr3KvA2dC0639kKswqD4H5b4/FOcnPtNU+qWLiXdKIqqX9twUvi5j0U1mwKE5CUsrfA== + dependencies: + "@types/node" "*" + +"@types/ssh2@^0.5.46": + version "0.5.46" + resolved "https://registry.yarnpkg.com/@types/ssh2/-/ssh2-0.5.46.tgz#e12341a242aea0e98ac2dec89e039bf421fd3584" + integrity sha512-1pC8FHrMPYdkLoUOwTYYifnSEPzAFZRsp3JFC/vokQ+dRrVI+hDBwz0SNmQ3pL6h39OSZlPs0uCG7wKJkftnaA== + dependencies: + "@types/node" "*" + "@types/ssh2-streams" "*" + +"@types/stream-buffers@^3.0.3": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/stream-buffers/-/stream-buffers-3.0.3.tgz#34e565bf64e3e4bdeee23fd4aa58d4636014a02b" + integrity sha512-NeFeX7YfFZDYsCfbuaOmFQ0OjSmHreKBpp7MQ4alWQBHeh2USLsj7qyMyn9t82kjqIX516CR/5SRHnARduRtbQ== + dependencies: + "@types/node" "*" + +"@types/tar@^4.0.4": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@types/tar/-/tar-4.0.4.tgz#d680de60855e7778a51c672b755869a3b8d2889f" + integrity sha512-0Xv+xcmkTsOZdIF4yCnd7RkOOyfyqPaqJ7RZFKnwdxfDbkN3eAAE9sHl8zJFqBz4VhxolW9EErbjR1oyH7jK2A== + dependencies: + "@types/minipass" "*" + "@types/node" "*" + +"@types/tmp@^0.2.0": + version "0.2.0" + resolved "https://registry.yarnpkg.com/@types/tmp/-/tmp-0.2.0.tgz#e3f52b4d7397eaa9193592ef3fdd44dc0af4298c" + integrity sha512-flgpHJjntpBAdJD43ShRosQvNC0ME97DCfGvZEDlAThQmnerRXrLbX6YgzRBQCZTthET9eAWFAMaYP0m0Y4HzQ== + +"@types/tough-cookie@*": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@types/tough-cookie/-/tough-cookie-4.0.0.tgz#fef1904e4668b6e5ecee60c52cc6a078ffa6697d" + integrity sha512-I99sngh224D0M7XgW1s120zxCt3VYQ3IQsuw3P3jbq5GG4yc79+ZjyKznyOGIQrflfylLgcfekeZW/vk0yng6A== + +"@types/ws@^7.4.4": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-7.4.4.tgz#93e1e00824c1de2608c30e6de4303ab3b4c0c9bc" + integrity sha512-d/7W23JAXPodQNbOZNXvl2K+bqAQrCMwlh/nuQsPSQk6Fq0opHoPrUw43aHsvSbIiQPr8Of2hkFbnz1XBFVyZQ== + dependencies: + "@types/node" "*" + +"@typescript-eslint/eslint-plugin@^2.10.0": + version "2.34.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-2.34.0.tgz#6f8ce8a46c7dea4a6f1d171d2bb8fbae6dac2be9" + integrity sha512-4zY3Z88rEE99+CNvTbXSyovv2z9PNOVffTWD2W8QF5s2prBQtwN2zadqERcrHpcR7O/+KMI3fcTAmUUhK/iQcQ== + dependencies: + "@typescript-eslint/experimental-utils" "2.34.0" + functional-red-black-tree "^1.0.1" + regexpp "^3.0.0" + tsutils "^3.17.1" + +"@typescript-eslint/experimental-utils@2.34.0": + version "2.34.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/experimental-utils/-/experimental-utils-2.34.0.tgz#d3524b644cdb40eebceca67f8cf3e4cc9c8f980f" + integrity sha512-eS6FTkq+wuMJ+sgtuNTtcqavWXqsflWcfBnlYhg/nS4aZ1leewkXGbvBhaapn1q6qf4M71bsR1tez5JTRMuqwA== + dependencies: + "@types/json-schema" "^7.0.3" + "@typescript-eslint/typescript-estree" "2.34.0" + eslint-scope "^5.0.0" + eslint-utils "^2.0.0" + +"@typescript-eslint/parser@^4.26.0": + version "4.26.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-4.26.0.tgz#31b6b732c9454f757b020dab9b6754112aa5eeaf" + integrity sha512-b4jekVJG9FfmjUfmM4VoOItQhPlnt6MPOBUL0AQbiTmm+SSpSdhHYlwayOm4IW9KLI/4/cRKtQCmDl1oE2OlPg== + dependencies: + "@typescript-eslint/scope-manager" "4.26.0" + "@typescript-eslint/types" "4.26.0" + "@typescript-eslint/typescript-estree" "4.26.0" + debug "^4.3.1" + +"@typescript-eslint/scope-manager@4.26.0": + version "4.26.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-4.26.0.tgz#60d1a71df162404e954b9d1c6343ff3bee496194" + integrity sha512-G6xB6mMo4xVxwMt5lEsNTz3x4qGDt0NSGmTBNBPJxNsrTXJSm21c6raeYroS2OwQsOyIXqKZv266L/Gln1BWqg== + dependencies: + "@typescript-eslint/types" "4.26.0" + "@typescript-eslint/visitor-keys" "4.26.0" + +"@typescript-eslint/types@4.26.0": + version "4.26.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-4.26.0.tgz#7c6732c0414f0a69595f4f846ebe12616243d546" + integrity sha512-rADNgXl1kS/EKnDr3G+m7fB9yeJNnR9kF7xMiXL6mSIWpr3Wg5MhxyfEXy/IlYthsqwBqHOr22boFbf/u6O88A== + +"@typescript-eslint/typescript-estree@2.34.0": + version "2.34.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-2.34.0.tgz#14aeb6353b39ef0732cc7f1b8285294937cf37d5" + integrity sha512-OMAr+nJWKdlVM9LOqCqh3pQQPwxHAN7Du8DR6dmwCrAmxtiXQnhHJ6tBNtf+cggqfo51SG/FCwnKhXCIM7hnVg== + dependencies: + debug "^4.1.1" + eslint-visitor-keys "^1.1.0" + glob "^7.1.6" + is-glob "^4.0.1" + lodash "^4.17.15" + semver "^7.3.2" + tsutils "^3.17.1" + +"@typescript-eslint/typescript-estree@4.26.0": + version "4.26.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-4.26.0.tgz#aea17a40e62dc31c63d5b1bbe9a75783f2ce7109" + integrity sha512-GHUgahPcm9GfBuy3TzdsizCcPjKOAauG9xkz9TR8kOdssz2Iz9jRCSQm6+aVFa23d5NcSpo1GdHGSQKe0tlcbg== + dependencies: + "@typescript-eslint/types" "4.26.0" + "@typescript-eslint/visitor-keys" "4.26.0" + debug "^4.3.1" + globby "^11.0.3" + is-glob "^4.0.1" + semver "^7.3.5" + tsutils "^3.21.0" + +"@typescript-eslint/visitor-keys@4.26.0": + version "4.26.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-4.26.0.tgz#26d2583169222815be4dcd1da4fe5459bc3bcc23" + integrity sha512-cw4j8lH38V1ycGBbF+aFiLUls9Z0Bw8QschP3mkth50BbWzgFS33ISIgBzUMuQ2IdahoEv/rXstr8Zhlz4B1Zg== + dependencies: + "@typescript-eslint/types" "4.26.0" + eslint-visitor-keys "^2.0.0" + +"@ungap/promise-all-settled@1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz#aa58042711d6e3275dd37dc597e5d31e8c290a44" + integrity sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q== + +abbrev@1, abbrev@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== + +accepts@~1.3.7: + version "1.3.7" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.7.tgz#531bc726517a3b2b41f850021c6cc15eaab507cd" + integrity sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA== + dependencies: + mime-types "~2.1.24" + negotiator "0.6.2" + +acorn-jsx@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.1.tgz#fc8661e11b7ac1539c47dbfea2e72b3af34d267b" + integrity sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng== + +acorn@>=8.3.0, acorn@^7.4.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.3.0.tgz#1193f9b96c4e8232f00b11a9edff81b2c8b98b88" + integrity sha512-tqPKHZ5CaBJw0Xmy0ZZvLs1qTV+BNFSyvn77ASXkpBNfIRk8ev26fKrD9iLGwGA9zedPao52GSHzq8lyZG0NUw== + +agent-base@6, agent-base@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-6.0.2.tgz#49fff58577cfee3f37176feab4c22e00f86d7f77" + integrity sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ== + dependencies: + debug "4" + +agentkeepalive@^4.1.3: + version "4.1.4" + resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-4.1.4.tgz#d928028a4862cb11718e55227872e842a44c945b" + integrity sha512-+V/rGa3EuU74H6wR04plBb7Ks10FbtUQgRj/FQOG7uUIEuaINI+AiqJR1k6t3SVNs7o7ZjIdus6706qqzVq8jQ== + dependencies: + debug "^4.1.0" + depd "^1.1.2" + humanize-ms "^1.2.1" + +aggregate-error@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-1.0.0.tgz#888344dad0220a72e3af50906117f48771925fac" + integrity sha1-iINE2tAiCnLjr1CQYRf0h3GSX6w= + dependencies: + clean-stack "^1.0.0" + indent-string "^3.0.0" + +aggregate-error@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" + integrity sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA== + dependencies: + clean-stack "^2.0.0" + indent-string "^4.0.0" + +ajv@^6.10.0, ajv@^6.12.3, ajv@^6.12.4: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +ajv@^8.0.1: + version "8.6.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.6.0.tgz#60cc45d9c46a477d80d92c48076d972c342e5720" + integrity sha512-cnUG4NSBiM4YFBxgZIj/In3/6KX+rQ2l2YPRVcvAMQGWEPKuXoPIhxzwqh31jA3IPbI4qEOp/5ILI4ynioXsGQ== + dependencies: + fast-deep-equal "^3.1.1" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + uri-js "^4.2.2" + +ansi-colors@4.1.1, ansi-colors@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.1.tgz#cbb9ae256bf750af1eab344f229aa27fe94ba348" + integrity sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA== + +ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.0.0, ansi-styles@^4.1.0, ansi-styles@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== + dependencies: + color-convert "^2.0.1" + +ansicolors@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979" + integrity sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk= + +ansistyles@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539" + integrity sha1-XeYEFb2gcbs3EnhUyGT0GyMlRTk= + +anymatch@~3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" + integrity sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" + +app-module-path@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/app-module-path/-/app-module-path-2.2.0.tgz#641aa55dfb7d6a6f0a8141c4b9c0aa50b6c24dd5" + integrity sha1-ZBqlXft9am8KgUHEucCqULbCTdU= + +append-transform@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/append-transform/-/append-transform-2.0.0.tgz#99d9d29c7b38391e6f428d28ce136551f0b77e12" + integrity sha512-7yeyCEurROLQJFv5Xj4lEGTy0borxepjFv1g22oAdqFu//SrAlDl1O1Nxx15SH1RoliUml6p8dwJW9jvZughhg== + dependencies: + default-require-extensions "^3.0.0" + +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== + +"aproba@^1.0.3 || ^2.0.0", aproba@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-2.0.0.tgz#52520b8ae5b569215b354efc0caa3fe1e45a8adc" + integrity sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ== + +archy@^1.0.0, archy@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40" + integrity sha1-+cjBN1fMHde8N5rHeyxipcKGjEA= + +are-we-there-yet@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz#372e0e7bd279d8e94c653aaa1f67200884bf3e1c" + integrity sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw== + dependencies: + delegates "^1.0.0" + readable-stream "^3.6.0" + +are-we-there-yet@~1.1.2: + version "1.1.5" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" + integrity sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w== + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + +arg@^4.1.0: + version "4.1.3" + resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" + integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +arr-union@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" + integrity sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ= + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI= + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + integrity sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk= + dependencies: + array-uniq "^1.0.1" + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY= + +asap@^2.0.0: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= + +asn1@^0.2.4, asn1@~0.2.3: + version "0.2.4" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" + integrity sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg== + dependencies: + safer-buffer "~2.1.0" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU= + +assertion-error@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/assertion-error/-/assertion-error-1.1.0.tgz#e60b6b0e8f301bd97e5375215bda406c85118c0b" + integrity sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw== + +astral-regex@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-2.0.0.tgz#483143c567aeed4785759c0865786dc77d7d2e31" + integrity sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ== + +async-limiter@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd" + integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= + +aws4@^1.8.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.11.0.tgz#d61f46d83b2519250e2784daf5b09479a8b41c59" + integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== + +azure-storage@^2.10.6: + version "2.10.6" + resolved "https://registry.yarnpkg.com/azure-storage/-/azure-storage-2.10.6.tgz#de502d8db8697b10254774a4ce1c66528cd2a4ba" + integrity sha512-14e7wUMXlvQuvDeCqJda5TnPfQ//6+5HgxvZpMy8qfY8jQz0W/0EiN/qvm9wYKjLV+nrfOFrsJdtE4EPRC6u1A== + dependencies: + browserify-mime "~1.2.9" + extend "^3.0.2" + json-edm-parser "0.1.2" + json-schema "^0.4.0" + md5.js "1.3.4" + readable-stream "~2.0.0" + request "^2.86.0" + underscore "^1.12.1" + uuid "^3.0.0" + validator "~13.7.0" + xml2js "0.2.8" + xmlbuilder "^9.0.7" + +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + +base64-js@^1.3.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + +base64url@^3.0.0, base64url@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/base64url/-/base64url-3.0.1.tgz#6399d572e2bc3f90a9a8b22d5dbb0a32d33f788d" + integrity sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A== + +bcrypt-pbkdf@^1.0.0, bcrypt-pbkdf@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + integrity sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4= + dependencies: + tweetnacl "^0.14.3" + +bin-links@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/bin-links/-/bin-links-2.3.0.tgz#1ff241c86d2c29b24ae52f49544db5d78a4eb967" + integrity sha512-JzrOLHLwX2zMqKdyYZjkDgQGT+kHDkIhv2/IK2lJ00qLxV4TmFoHi8drDBb6H5Zrz1YfgHkai4e2MGPqnoUhqA== + dependencies: + cmd-shim "^4.0.1" + mkdirp-infer-owner "^2.0.0" + npm-normalize-package-bin "^1.0.0" + read-cmd-shim "^2.0.0" + rimraf "^3.0.0" + write-file-atomic "^3.0.3" + +binary-extensions@^2.0.0, binary-extensions@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" + integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== + +body-parser@1.19.0: + version "1.19.0" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.19.0.tgz#96b2709e57c9c4e09a6fd66a8fd979844f69f08a" + integrity sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw== + dependencies: + bytes "3.1.0" + content-type "~1.0.4" + debug "2.6.9" + depd "~1.1.2" + http-errors "1.7.2" + iconv-lite "0.4.24" + on-finished "~2.3.0" + qs "6.7.0" + raw-body "2.4.0" + type-is "~1.6.17" + +boom@2.6.x: + version "2.6.1" + resolved "https://registry.yarnpkg.com/boom/-/boom-2.6.1.tgz#4dc8ef9b6dfad9c43bbbfbe71fa4c21419f22753" + integrity sha1-Tcjvm2362cQ7u/vnH6TCFBnyJ1M= + dependencies: + hoek "2.x.x" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^3.0.1, braces@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + dependencies: + fill-range "^7.0.1" + +browser-stdout@1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" + integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== + +browserify-mime@~1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/browserify-mime/-/browserify-mime-1.2.9.tgz#aeb1af28de6c0d7a6a2ce40adb68ff18422af31f" + integrity sha1-rrGvKN5sDXpqLOQK22j/GEIq8x8= + +browserify-zlib@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f" + integrity sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA== + dependencies: + pako "~1.0.5" + +browserslist@^4.16.6: + version "4.16.6" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.6.tgz#d7901277a5a88e554ed305b183ec9b0c08f66fa2" + integrity sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ== + dependencies: + caniuse-lite "^1.0.30001219" + colorette "^1.2.2" + electron-to-chromium "^1.3.723" + escalade "^3.1.1" + node-releases "^1.1.71" + +buffer-from@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" + integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== + +buffer@^5.5.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" + integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.1.13" + +builtins@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88" + integrity sha1-y5T662HIaWRR2zZTThQi+U8K7og= + +bytes@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6" + integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg== + +cacache@^15.0.3, cacache@^15.0.5, cacache@^15.2.0: + version "15.2.0" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-15.2.0.tgz#73af75f77c58e72d8c630a7a2858cb18ef523389" + integrity sha512-uKoJSHmnrqXgthDFx/IU6ED/5xd+NNGe+Bb+kLZy7Ku4P+BaiWEUflAKPZ7eAzsYGcsAGASJZsybXp+quEcHTw== + dependencies: + "@npmcli/move-file" "^1.0.1" + chownr "^2.0.0" + fs-minipass "^2.0.0" + glob "^7.1.4" + infer-owner "^1.0.4" + lru-cache "^6.0.0" + minipass "^3.1.1" + minipass-collect "^1.0.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.2" + mkdirp "^1.0.3" + p-map "^4.0.0" + promise-inflight "^1.0.1" + rimraf "^3.0.2" + ssri "^8.0.1" + tar "^6.0.2" + unique-filename "^1.1.1" + +cacache@^15.3.0: + version "15.3.0" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-15.3.0.tgz#dc85380fb2f556fe3dda4c719bfa0ec875a7f1eb" + integrity sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ== + dependencies: + "@npmcli/fs" "^1.0.0" + "@npmcli/move-file" "^1.0.1" + chownr "^2.0.0" + fs-minipass "^2.0.0" + glob "^7.1.4" + infer-owner "^1.0.4" + lru-cache "^6.0.0" + minipass "^3.1.1" + minipass-collect "^1.0.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.2" + mkdirp "^1.0.3" + p-map "^4.0.0" + promise-inflight "^1.0.1" + rimraf "^3.0.2" + ssri "^8.0.1" + tar "^6.0.2" + unique-filename "^1.1.1" + +cacheable-request@^2.1.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-2.1.4.tgz#0d808801b6342ad33c91df9d0b44dc09b91e5c3d" + integrity sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0= + dependencies: + clone-response "1.0.2" + get-stream "3.0.0" + http-cache-semantics "3.8.1" + keyv "3.0.0" + lowercase-keys "1.0.0" + normalize-url "2.0.1" + responselike "1.0.2" + +caching-transform@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/caching-transform/-/caching-transform-4.0.0.tgz#00d297a4206d71e2163c39eaffa8157ac0651f0f" + integrity sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA== + dependencies: + hasha "^5.0.0" + make-dir "^3.0.0" + package-hash "^4.0.0" + write-file-atomic "^3.0.0" + +call-bind@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c" + integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA== + dependencies: + function-bind "^1.1.1" + get-intrinsic "^1.0.2" + +callsites@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-1.0.1.tgz#c14c24188ce8e1d6a030b4c3c942e6ba895b6a1a" + integrity sha1-wUwkGIzo4dagMLTDyULmuolbaho= + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +camelcase@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== + +camelcase@^6.0.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.2.0.tgz#924af881c9d525ac9d87f40d964e5cea982a1809" + integrity sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg== + +caniuse-lite@^1.0.30001219: + version "1.0.30001235" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001235.tgz#ad5ca75bc5a1f7b12df79ad806d715a43a5ac4ed" + integrity sha512-zWEwIVqnzPkSAXOUlQnPW2oKoYb2aLQ4Q5ejdjBcnH63rfypaW34CxaeBn1VMya2XaEU3P/R2qHpWyj+l0BT1A== + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= + +chai-as-promised@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/chai-as-promised/-/chai-as-promised-7.1.1.tgz#08645d825deb8696ee61725dbf590c012eb00ca0" + integrity sha512-azL6xMoi+uxu6z4rhWQ1jbdUhOMhis2PvscD/xjLqNMkv3BPPp2JyyuTHOrf9BOosGpNQ11v6BKv/g57RXbiaA== + dependencies: + check-error "^1.0.2" + +chai@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/chai/-/chai-4.3.4.tgz#b55e655b31e1eac7099be4c08c21964fce2e6c49" + integrity sha512-yS5H68VYOCtN1cjfwumDSuzn/9c+yza4f3reKXlE5rUg7SFcCEy90gJvydNgOYtblyf4Zi6jIWRnXOgErta0KA== + dependencies: + assertion-error "^1.1.0" + check-error "^1.0.2" + deep-eql "^3.0.1" + get-func-name "^2.0.0" + pathval "^1.1.1" + type-detect "^4.0.5" + +chalk@^2.0.0: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^4.0.0, chalk@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.1.tgz#c80b3fab28bf6371e6863325eee67e618b77e6ad" + integrity sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chalk@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +check-error@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/check-error/-/check-error-1.0.2.tgz#574d312edd88bb5dd8912e9286dd6c0aed4aac82" + integrity sha1-V00xLt2Iu13YkS6Sht1sCu1KrII= + +child-process-promise@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/child-process-promise/-/child-process-promise-2.2.1.tgz#4730a11ef610fad450b8f223c79d31d7bdad8074" + integrity sha1-RzChHvYQ+tRQuPIjx50x172tgHQ= + dependencies: + cross-spawn "^4.0.2" + node-version "^1.0.0" + promise-polyfill "^6.0.1" + +chokidar@3.5.2: + version "3.5.2" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.2.tgz#dba3976fcadb016f66fd365021d91600d01c1e75" + integrity sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ== + dependencies: + anymatch "~3.1.2" + braces "~3.0.2" + glob-parent "~5.1.2" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.6.0" + optionalDependencies: + fsevents "~2.3.2" + +chownr@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" + integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg== + +chownr@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece" + integrity sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ== + +cidr-regex@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/cidr-regex/-/cidr-regex-3.1.1.tgz#ba1972c57c66f61875f18fd7dd487469770b571d" + integrity sha512-RBqYd32aDwbCMFJRL6wHOlDNYJsPNTt8vC82ErHF5vKt8QQzxm1FrkW8s/R5pVrXMf17sba09Uoy91PKiddAsw== + dependencies: + ip-regex "^4.1.0" + +clean-stack@^1.0.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-1.3.0.tgz#9e821501ae979986c46b1d66d2d432db2fd4ae31" + integrity sha1-noIVAa6XmYbEax1m0tQy2y/UrjE= + +clean-stack@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" + integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== + +cli-columns@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cli-columns/-/cli-columns-4.0.0.tgz#9fe4d65975238d55218c41bd2ed296a7fa555646" + integrity sha512-XW2Vg+w+L9on9wtwKpyzluIPCWXjaBahI7mTcYjx+BVIYD9c3yqcv/yKC7CmdCZat4rq2yiE1UMSJC5ivKfMtQ== + dependencies: + string-width "^4.2.3" + strip-ansi "^6.0.1" + +cli-table3@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.0.tgz#b7b1bc65ca8e7b5cef9124e13dc2b21e2ce4faee" + integrity sha512-gnB85c3MGC7Nm9I/FkiasNBOKjOiO1RNuXXarQms37q4QMpWdlbBgD/VnOStA2faG1dpXMv31RFApjX1/QdgWQ== + dependencies: + object-assign "^4.1.0" + string-width "^4.2.0" + optionalDependencies: + colors "^1.1.2" + +cliui@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1" + integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.0" + wrap-ansi "^6.2.0" + +cliui@^7.0.2: + version "7.0.4" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" + integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.0" + wrap-ansi "^7.0.0" + +clone-deep@^0.2.4: + version "0.2.4" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-0.2.4.tgz#4e73dd09e9fb971cc38670c5dced9c1896481cc6" + integrity sha1-TnPdCen7lxzDhnDF3O2cGJZIHMY= + dependencies: + for-own "^0.1.3" + is-plain-object "^2.0.1" + kind-of "^3.0.2" + lazy-cache "^1.0.3" + shallow-clone "^0.1.2" + +clone-response@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + integrity sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws= + dependencies: + mimic-response "^1.0.0" + +clone@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" + integrity sha1-2jCcwmPfFZlMaIypAheco8fNfH4= + +cmd-shim@^4.0.1: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-4.1.0.tgz#b3a904a6743e9fede4148c6f3800bf2a08135bdd" + integrity sha512-lb9L7EM4I/ZRVuljLPEtUJOP+xiQVknZ4ZMpMgEp4JzNldPb27HU03hi6K1/6CoIuit/Zm/LQXySErFeXxDprw== + dependencies: + mkdirp-infer-owner "^2.0.0" + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c= + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= + +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +color-support@^1.1.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-support/-/color-support-1.1.3.tgz#93834379a1cc9a0c61f82f52f0d04322251bd5a2" + integrity sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg== + +colorette@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.2.2.tgz#cbcc79d5e99caea2dbf10eb3a26fd8b3e6acfa94" + integrity sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w== + +colors@^1.1.2: + version "1.4.0" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" + integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== + +columnify@~1.5.4: + version "1.5.4" + resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb" + integrity sha1-Rzfd8ce2mop8NAVweC6UfuyOeLs= + dependencies: + strip-ansi "^3.0.0" + wcwidth "^1.0.0" + +combined-stream@^1.0.6, combined-stream@~1.0.6: + version "1.0.8" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" + +common-ancestor-path@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/common-ancestor-path/-/common-ancestor-path-1.0.1.tgz#4f7d2d1394d91b7abdf51871c62f71eadb0182a7" + integrity sha512-L3sHRo1pXXEqX8VU28kfgUY+YGsk09hPqZiZmLacNib6XNTCM8ubYeT7ryXQw8asB1sKgcU5lkB7ONug08aB8w== + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs= + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= + +console-control-strings@^1.0.0, console-control-strings@^1.1.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + integrity sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4= + +content-disposition@0.5.3: + version "0.5.3" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.3.tgz#e130caf7e7279087c5616c2007d0485698984fbd" + integrity sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g== + dependencies: + safe-buffer "5.1.2" + +content-type@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" + integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== + +convert-source-map@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.7.0.tgz#17a2cb882d7f77d3490585e2ce6c524424a3a442" + integrity sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA== + dependencies: + safe-buffer "~5.1.1" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + integrity sha1-4wOogrNCzD7oylE6eZmXNNqzriw= + +cookie@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.0.tgz#beb437e7022b3b6d49019d088665303ebe9c14ba" + integrity sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg== + +core-util-is@1.0.2, core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= + +cpu-features@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.2.tgz#9f636156f1155fd04bdbaa028bb3c2fbef3cea7a" + integrity sha512-/2yieBqvMcRj8McNzkycjW2v3OIUOibBfd2dLEJ0nWts8NobAxwiyw9phVNS6oDL8x8tz9F7uNVFEVpJncQpeA== + dependencies: + nan "^2.14.1" + +create-require@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" + integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== + +cross-spawn@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-4.0.2.tgz#7b9247621c23adfdd3856004a823cbe397424d41" + integrity sha1-e5JHYhwjrf3ThWAEqCPL45dCTUE= + dependencies: + lru-cache "^4.0.1" + which "^1.2.9" + +cross-spawn@^7.0.0, cross-spawn@^7.0.2: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA= + dependencies: + assert-plus "^1.0.0" + +debug@2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + +debug@4, debug@4.3.1, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" + integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== + dependencies: + ms "2.1.2" + +debug@^3.2.6: + version "3.2.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" + integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== + dependencies: + ms "^2.1.1" + +debuglog@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492" + integrity sha1-qiT/uaw9+aI1GDfPstJ5NgzXhJI= + +decamelize@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= + +decamelize@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-4.0.0.tgz#aa472d7bf660eb15f3494efd531cab7f2a709837" + integrity sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ== + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= + +decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + integrity sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M= + dependencies: + mimic-response "^1.0.0" + +deep-eql@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-3.0.1.tgz#dfc9404400ad1c8fe023e7da1df1c147c4b444df" + integrity sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw== + dependencies: + type-detect "^4.0.0" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== + +deep-is@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= + +deepmerge@^3.2.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-3.3.0.tgz#d3c47fd6f3a93d517b14426b0628a17b0125f5f7" + integrity sha512-GRQOafGHwMHpjPx9iCvTgpu9NojZ49q794EEL94JVEw6VaeA8XTUyBKvAkOOjBX9oJNiV6G3P+T+tihFjo2TqA== + +default-require-extensions@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/default-require-extensions/-/default-require-extensions-3.0.0.tgz#e03f93aac9b2b6443fc52e5e4a37b3ad9ad8df96" + integrity sha512-ek6DpXq/SCpvjhpFsLFRVtIxJCRw6fUR42lYMVZuUMK7n8eMz4Uh5clckdBjEpLhn/gEBZo7hDJnJcwdKLKQjg== + dependencies: + strip-bom "^4.0.0" + +defaults@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d" + integrity sha1-xlYFHpgX2f8I7YgUd/P+QBnz730= + dependencies: + clone "^1.0.2" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= + +depd@^1.1.2, depd@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak= + +destroy@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" + integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA= + +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + integrity sha1-+hN8S9aY7fVc1c0CrFWfkaTEups= + +dezalgo@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456" + integrity sha1-f3Qt4Gb8dIvI24IFad3c5Jvw1FY= + dependencies: + asap "^2.0.0" + wrappy "1" + +diff@5.0.0, diff@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-5.0.0.tgz#7ed6ad76d859d030787ec35855f5b1daf31d852b" + integrity sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w== + +diff@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" + integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +doctrine@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" + integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w== + dependencies: + esutils "^2.0.2" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + integrity sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI= + +ecc-jsbn@~0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" + integrity sha1-OoOpBOVDUyh4dMVkt1SThoSamMk= + dependencies: + jsbn "~0.1.0" + safer-buffer "^2.1.0" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= + +electron-to-chromium@^1.3.723: + version "1.3.749" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.749.tgz#0ecebc529ceb49dd2a7c838ae425236644c3439a" + integrity sha512-F+v2zxZgw/fMwPz/VUGIggG4ZndDsYy0vlpthi3tjmDZlcfbhN5mYW0evXUsBr2sUtuDANFtle410A9u/sd/4A== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k= + +encoding@^0.1.12: + version "0.1.13" + resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.13.tgz#56574afdd791f54a8e9b2785c0582a2d26210fa9" + integrity sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A== + dependencies: + iconv-lite "^0.6.2" + +enquirer@^2.3.5: + version "2.3.6" + resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.6.tgz#2a7fe5dd634a1e4125a975ec994ff5456dc3734d" + integrity sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg== + dependencies: + ansi-colors "^4.1.1" + +env-paths@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/env-paths/-/env-paths-2.2.1.tgz#420399d416ce1fbe9bc0a07c62fa68d67fd0f8f2" + integrity sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A== + +err-code@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9" + integrity sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA== + +es6-error@^4.0.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/es6-error/-/es6-error-4.1.1.tgz#9e3af407459deed47e9a91f9b885a84eb05c561d" + integrity sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg== + +es6-promise@^4.2.8: + version "4.2.8" + resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.2.8.tgz#4eb21594c972bc40553d276e510539143db53e0a" + integrity sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w== + +escalade@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" + integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + integrity sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg= + +escape-string-regexp@4.0.0, escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= + +eslint-scope@^5.0.0, eslint-scope@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" + integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== + dependencies: + esrecurse "^4.3.0" + estraverse "^4.1.1" + +eslint-utils@^2.0.0, eslint-utils@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-2.1.0.tgz#d2de5e03424e707dc10c74068ddedae708741b27" + integrity sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg== + dependencies: + eslint-visitor-keys "^1.1.0" + +eslint-visitor-keys@^1.1.0, eslint-visitor-keys@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz#30ebd1ef7c2fdff01c3a4f151044af25fab0523e" + integrity sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ== + +eslint-visitor-keys@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz#f65328259305927392c938ed44eb0a5c9b2bd303" + integrity sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw== + +eslint@^7.28.0: + version "7.28.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-7.28.0.tgz#435aa17a0b82c13bb2be9d51408b617e49c1e820" + integrity sha512-UMfH0VSjP0G4p3EWirscJEQ/cHqnT/iuH6oNZOB94nBjWbMnhGEPxsZm1eyIW0C/9jLI0Fow4W5DXLjEI7mn1g== + dependencies: + "@babel/code-frame" "7.12.11" + "@eslint/eslintrc" "^0.4.2" + ajv "^6.10.0" + chalk "^4.0.0" + cross-spawn "^7.0.2" + debug "^4.0.1" + doctrine "^3.0.0" + enquirer "^2.3.5" + escape-string-regexp "^4.0.0" + eslint-scope "^5.1.1" + eslint-utils "^2.1.0" + eslint-visitor-keys "^2.0.0" + espree "^7.3.1" + esquery "^1.4.0" + esutils "^2.0.2" + fast-deep-equal "^3.1.3" + file-entry-cache "^6.0.1" + functional-red-black-tree "^1.0.1" + glob-parent "^5.1.2" + globals "^13.6.0" + ignore "^4.0.6" + import-fresh "^3.0.0" + imurmurhash "^0.1.4" + is-glob "^4.0.0" + js-yaml "^3.13.1" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.4.1" + lodash.merge "^4.6.2" + minimatch "^3.0.4" + natural-compare "^1.4.0" + optionator "^0.9.1" + progress "^2.0.0" + regexpp "^3.1.0" + semver "^7.2.1" + strip-ansi "^6.0.0" + strip-json-comments "^3.1.0" + table "^6.0.9" + text-table "^0.2.0" + v8-compile-cache "^2.0.3" + +espree@^7.3.0, espree@^7.3.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/espree/-/espree-7.3.1.tgz#f2df330b752c6f55019f8bd89b7660039c1bbbb6" + integrity sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g== + dependencies: + acorn "^7.4.0" + acorn-jsx "^5.3.1" + eslint-visitor-keys "^1.3.0" + +esprima@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +esquery@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.4.0.tgz#2148ffc38b82e8c7057dfed48425b3e61f0f24a5" + integrity sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w== + dependencies: + estraverse "^5.1.0" + +esrecurse@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" + integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== + dependencies: + estraverse "^5.2.0" + +estraverse@^4.1.1: + version "4.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" + integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== + +estraverse@^5.1.0, estraverse@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.2.0.tgz#307df42547e6cc7324d3cf03c155d5cdb8c53880" + integrity sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ== + +esutils@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc= + +eventemitter3@^4.0.0: + version "4.0.7" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" + integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== + +express-joi-validator@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/express-joi-validator/-/express-joi-validator-2.0.1.tgz#4ac524f27a2afcd56fea973c318256b8993b82a8" + integrity sha512-oBrvkbmYutFnnT8neddWVa/duRrVHQujl2T3WBhF6/riEkeRE1oKvtdp/bfGEaOysdICyHkg5OCnrAlzkFB0aQ== + dependencies: + boom "2.6.x" + extend "2.0.x" + joi "6.x.x" + +express@^4.17.1: + version "4.17.1" + resolved "https://registry.yarnpkg.com/express/-/express-4.17.1.tgz#4491fc38605cf51f8629d39c2b5d026f98a4c134" + integrity sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g== + dependencies: + accepts "~1.3.7" + array-flatten "1.1.1" + body-parser "1.19.0" + content-disposition "0.5.3" + content-type "~1.0.4" + cookie "0.4.0" + cookie-signature "1.0.6" + debug "2.6.9" + depd "~1.1.2" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "~1.1.2" + fresh "0.5.2" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "~2.3.0" + parseurl "~1.3.3" + path-to-regexp "0.1.7" + proxy-addr "~2.0.5" + qs "6.7.0" + range-parser "~1.2.1" + safe-buffer "5.1.2" + send "0.17.1" + serve-static "1.14.1" + setprototypeof "1.1.1" + statuses "~1.5.0" + type-is "~1.6.18" + utils-merge "1.0.1" + vary "~1.1.2" + +extend@2.0.x: + version "2.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-2.0.2.tgz#1b74985400171b85554894459c978de6ef453ab7" + integrity sha512-AgFD4VU+lVLP6vjnlNfF7OeInLTyeyckCNPEsuxz1vi786UuK/nk6ynPuhn/h+Ju9++TQyr5EpLRI14fc1QtTQ== + +extend@^3.0.2, extend@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + integrity sha1-lpGEQOMEGnpBT4xS48V06zw+HgU= + +extsprintf@^1.2.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" + integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= + +fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-glob@^3.1.1: + version "3.2.5" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.5.tgz#7939af2a656de79a4f1901903ee8adcaa7cb9661" + integrity sha512-2DtFcgT68wiTTiwZ2hNdJfcHNke9XOfnwmBRWXhmeKM8rF0TGwmC/Qto3S7RoZKp5cilZbxzO5iTNTQsJ+EeDg== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.0" + merge2 "^1.3.0" + micromatch "^4.0.2" + picomatch "^2.2.1" + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-levenshtein@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= + +fastest-levenshtein@^1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/fastest-levenshtein/-/fastest-levenshtein-1.0.12.tgz#9990f7d3a88cc5a9ffd1f1745745251700d497e2" + integrity sha512-On2N+BpYJ15xIC974QNVuYGMOlEVt4s0EOI3wwMqOmK1fdDY+FN/zltPV8vosq4ad4c/gJ1KHScUn/6AWIgiow== + +fastq@^1.6.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.11.0.tgz#bb9fb955a07130a918eb63c1f5161cc32a5d0858" + integrity sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g== + dependencies: + reusify "^1.0.4" + +file-entry-cache@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027" + integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg== + dependencies: + flat-cache "^3.0.4" + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +finalhandler@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.2.tgz#b7e7d000ffd11938d0fdb053506f6ebabe9f587d" + integrity sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA== + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "~2.3.0" + parseurl "~1.3.3" + statuses "~1.5.0" + unpipe "~1.0.0" + +find-cache-dir@^3.2.0: + version "3.3.1" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-3.3.1.tgz#89b33fad4a4670daa94f855f7fbe31d6d84fe880" + integrity sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ== + dependencies: + commondir "^1.0.1" + make-dir "^3.0.2" + pkg-dir "^4.1.0" + +find-up@5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== + dependencies: + locate-path "^6.0.0" + path-exists "^4.0.0" + +find-up@^4.0.0, find-up@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +flat-cache@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.0.4.tgz#61b0338302b2fe9f957dcc32fc2a87f1c3048b11" + integrity sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg== + dependencies: + flatted "^3.1.0" + rimraf "^3.0.2" + +flat@^5.0.2: + version "5.0.2" + resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" + integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ== + +flatted@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.1.1.tgz#c4b489e80096d9df1dfc97c79871aea7c617c469" + integrity sha512-zAoAQiudy+r5SvnSw3KJy5os/oRJYHzrzja/tBDqrZtNhUw8bt6y8OBzMWcjWr+8liV8Eb6yOhw8WZ7VFZ5ZzA== + +follow-redirects@^1.0.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.1.tgz#d9114ded0a1cfdd334e164e6662ad02bfd91ff43" + integrity sha512-HWqDgT7ZEkqRzBvc2s64vSZ/hfOceEol3ac/7tKwzuvEyWx3/4UegXh5oBOIotkGsObyk3xznnSRVADBgWSQVg== + +for-in@^0.1.3: + version "0.1.8" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.8.tgz#d8773908e31256109952b1fdb9b3fa867d2775e1" + integrity sha1-2Hc5COMSVhCZUrH9ubP6hn0ndeE= + +for-in@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + integrity sha1-gQaNKVqBQuwKxybG4iAMMPttXoA= + +for-own@^0.1.3: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + integrity sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4= + dependencies: + for-in "^1.0.1" + +foreground-child@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-2.0.0.tgz#71b32800c9f15aa8f2f83f4a6bd9bff35d861a53" + integrity sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA== + dependencies: + cross-spawn "^7.0.0" + signal-exit "^3.0.2" + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= + +form-data@^2.5.0: + version "2.5.1" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.5.1.tgz#f2cbec57b5e59e23716e128fe44d4e5dd23895f4" + integrity sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + +form-data@~2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" + integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + +forwarded@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811" + integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow== + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac= + +from2@^2.1.1: + version "2.3.0" + resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" + integrity sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8= + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.0" + +fromentries@^1.2.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/fromentries/-/fromentries-1.3.2.tgz#e4bca6808816bf8f93b52750f1127f5a6fd86e3a" + integrity sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg== + +fs-minipass@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.7.tgz#ccff8570841e7fe4265693da88936c55aed7f7c7" + integrity sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA== + dependencies: + minipass "^2.6.0" + +fs-minipass@^2.0.0, fs-minipass@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb" + integrity sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg== + dependencies: + minipass "^3.0.0" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= + +fsevents@~2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +functional-red-black-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" + integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc= + +gauge@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-4.0.0.tgz#afba07aa0374a93c6219603b1fb83eaa2264d8f8" + integrity sha512-F8sU45yQpjQjxKkm1UOAhf0U/O0aFt//Fl7hsrNVto+patMHjs7dPI9mFOGUKbhrgKm0S3EjW3scMFuQmWSROw== + dependencies: + ansi-regex "^5.0.1" + aproba "^1.0.3 || ^2.0.0" + color-support "^1.1.2" + console-control-strings "^1.0.0" + has-unicode "^2.0.1" + signal-exit "^3.0.0" + string-width "^4.2.3" + strip-ansi "^6.0.1" + wide-align "^1.1.2" + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + integrity sha1-LANAXHU4w51+s3sxcCLjJfsBi/c= + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" + +gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== + +get-caller-file@^2.0.1, get-caller-file@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +get-func-name@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/get-func-name/-/get-func-name-2.0.0.tgz#ead774abee72e20409433a066366023dd6887a41" + integrity sha1-6td0q+5y4gQJQzoGY2YCPdaIekE= + +get-intrinsic@^1.0.2: + version "1.1.1" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.1.tgz#15f59f376f855c446963948f0d24cd3637b4abc6" + integrity sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q== + dependencies: + function-bind "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.1" + +get-package-type@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" + integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== + +get-stream@3.0.0, get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + integrity sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ= + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + integrity sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo= + dependencies: + assert-plus "^1.0.0" + +glob-parent@>=6.0.0, glob-parent@^3.0.0, glob-parent@^5.1.0, glob-parent@^5.1.2, glob-parent@~5.1.2: + version "6.0.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-6.0.0.tgz#f851b59b388e788f3a44d63fab50382b2859c33c" + integrity sha512-Hdd4287VEJcZXUwv1l8a+vXC1GjOQqXe+VS30w/ypihpcnu9M1n3xeYeJu5CBpeEQj2nAab2xxz28GuA3vp4Ww== + dependencies: + is-glob "^4.0.1" + +glob@7.1.7, glob@^7.0.3, glob@^7.1.1, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6, glob@^7.1.7: + version "7.1.7" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.7.tgz#3b193e9233f01d42d0b3f78294bbeeb418f94a90" + integrity sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" + integrity sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +globals@^13.6.0, globals@^13.9.0: + version "13.9.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-13.9.0.tgz#4bf2bf635b334a173fb1daf7c5e6b218ecdc06cb" + integrity sha512-74/FduwI/JaIrr1H8e71UbDE+5x7pIPs1C2rrwC52SszOo043CsWOZEMW7o2Y58xwm9b+0RBKDxY5n2sUpEFxA== + dependencies: + type-fest "^0.20.2" + +globby@^11.0.3: + version "11.0.3" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.0.3.tgz#9b1f0cb523e171dd1ad8c7b2a9fb4b644b9593cb" + integrity sha512-ffdmosjA807y7+lA1NM0jELARVmYul/715xiILEjo3hBLPTcirgQNnXECn5g3mtR8TOLCVbkfua1Hpen25/Xcg== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.1.1" + ignore "^5.1.4" + merge2 "^1.3.0" + slash "^3.0.0" + +globby@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" + integrity sha1-9abXDoOV4hyFj7BInWTfAkJNUGw= + dependencies: + array-union "^1.0.1" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +got@^8.3.2: + version "8.3.2" + resolved "https://registry.yarnpkg.com/got/-/got-8.3.2.tgz#1d23f64390e97f776cac52e5b936e5f514d2e937" + integrity sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw== + dependencies: + "@sindresorhus/is" "^0.7.0" + cacheable-request "^2.1.1" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + into-stream "^3.1.0" + is-retry-allowed "^1.1.0" + isurl "^1.0.0-alpha5" + lowercase-keys "^1.0.0" + mimic-response "^1.0.0" + p-cancelable "^0.4.0" + p-timeout "^2.0.1" + pify "^3.0.0" + safe-buffer "^5.1.1" + timed-out "^4.0.1" + url-parse-lax "^3.0.0" + url-to-options "^1.0.1" + +graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.2.6: + version "4.2.6" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.6.tgz#ff040b2b0853b23c3d31027523706f1885d76bee" + integrity sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ== + +graceful-fs@^4.2.8: + version "4.2.8" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.8.tgz#e412b8d33f5e006593cbd3cee6df9f2cebbe802a" + integrity sha512-qkIilPUYcNhJpd33n0GBXTB1MMPp14TxEsEs0pTrsSVucApsYzW5V+Q8Qxhik6KU3evy+qkAAowTByymK0avdg== + +growl@1.10.5: + version "1.10.5" + resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" + integrity sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA== + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= + +har-validator@~5.1.3: + version "5.1.5" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.5.tgz#1f0803b9f8cb20c0fa13822df1ecddb36bde1efd" + integrity sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w== + dependencies: + ajv "^6.12.3" + har-schema "^2.0.0" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-symbol-support-x@^1.4.1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" + integrity sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw== + +has-symbols@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.2.tgz#165d3070c00309752a1236a479331e3ac56f1423" + integrity sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw== + +has-to-string-tag-x@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" + integrity sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw== + dependencies: + has-symbol-support-x "^1.4.1" + +has-unicode@^2.0.0, has-unicode@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + integrity sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk= + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +hash-base@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.1.0.tgz#55c381d9e06e1d2997a883b4a3fddfe7f0d3af33" + integrity sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA== + dependencies: + inherits "^2.0.4" + readable-stream "^3.6.0" + safe-buffer "^5.2.0" + +hasha@^5.0.0: + version "5.2.2" + resolved "https://registry.yarnpkg.com/hasha/-/hasha-5.2.2.tgz#a48477989b3b327aea3c04f53096d816d97522a1" + integrity sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ== + dependencies: + is-stream "^2.0.0" + type-fest "^0.8.0" + +he@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" + integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== + +hoek@2.x.x, hoek@>=6.1.3: + version "6.1.3" + resolved "https://registry.yarnpkg.com/hoek/-/hoek-6.1.3.tgz#73b7d33952e01fe27a38b0457294b79dd8da242c" + integrity sha512-YXXAAhmF9zpQbC7LEcREFtXfGq5K1fmd+4PHkBq8NUqmzW3G+Dq10bI/i0KucLRwss3YYFQ0fSfoxBZYiGUqtQ== + +hosted-git-info@^4.0.1, hosted-git-info@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-4.0.2.tgz#5e425507eede4fea846b7262f0838456c4209961" + integrity sha512-c9OGXbZ3guC/xOlCg1Ci/VgWlwsqDv1yMQL1CWqXDL0hDjXuNcq0zuR4xqPSuasI3kqFDhqSyTjREz5gzq0fXg== + dependencies: + lru-cache "^6.0.0" + +html-escaper@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" + integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg== + +http-cache-semantics@3.8.1: + version "3.8.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz#39b0e16add9b605bf0a9ef3d9daaf4843b4cacd2" + integrity sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w== + +http-cache-semantics@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" + integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== + +http-errors@1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.2.tgz#4f5029cf13239f31036e5b2e55292bcfbcc85c8f" + integrity sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg== + dependencies: + depd "~1.1.2" + inherits "2.0.3" + setprototypeof "1.1.1" + statuses ">= 1.5.0 < 2" + toidentifier "1.0.0" + +http-errors@~1.7.2: + version "1.7.3" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.3.tgz#6c619e4f9c60308c38519498c14fbb10aacebb06" + integrity sha512-ZTTX0MWrsQ2ZAhA1cejAwDLycFsd7I7nVtnkT3Ol0aqodaKW+0CTZDQ1uBv5whptCnc8e8HeRRJxRs0kmm/Qfw== + dependencies: + depd "~1.1.2" + inherits "2.0.4" + setprototypeof "1.1.1" + statuses ">= 1.5.0 < 2" + toidentifier "1.0.0" + +http-proxy-agent@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz#8a8c8ef7f5932ccf953c296ca8291b95aa74aa3a" + integrity sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg== + dependencies: + "@tootallnate/once" "1" + agent-base "6" + debug "4" + +http-proxy@^1.18.1: + version "1.18.1" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.1.tgz#401541f0534884bbf95260334e72f88ee3976549" + integrity sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ== + dependencies: + eventemitter3 "^4.0.0" + follow-redirects "^1.0.0" + requires-port "^1.0.0" + +http-signature@>=1.3.6, http-signature@~1.2.0: + version "1.3.6" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.3.6.tgz#cb6fbfdf86d1c974f343be94e87f7fc128662cf9" + integrity sha512-3adrsD6zqo4GsTqtO7FyrejHNv+NgiIfAfv68+jVlFmSr9OGy7zrxONceFRLKvnnZA5jbxQBX1u9PpB6Wi32Gw== + dependencies: + assert-plus "^1.0.0" + jsprim "^2.0.2" + sshpk "^1.14.1" + +https-proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz#e2a90542abb68a762e0a0850f6c9edadfd8506b2" + integrity sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA== + dependencies: + agent-base "6" + debug "4" + +humanize-ms@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/humanize-ms/-/humanize-ms-1.2.1.tgz#c46e3159a293f6b896da29316d8b6fe8bb79bbed" + integrity sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0= + dependencies: + ms "^2.0.0" + +iconv-lite@0.4.24, iconv-lite@^0.4.4: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +iconv-lite@^0.6.2: + version "0.6.3" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + +ieee754@^1.1.13: + version "1.2.1" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" + integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== + +ignore-walk@^3.0.1, ignore-walk@^3.0.3: + version "3.0.4" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.4.tgz#c9a09f69b7c7b479a5d74ac1a3c0d4236d2a6335" + integrity sha512-PY6Ii8o1jMRA1z4F2hRkH/xN59ox43DavKvD3oDpfurRlOJyAHpifIwpbdv1n4jt4ov0jSpw3kQ4GhJnpBL6WQ== + dependencies: + minimatch "^3.0.4" + +ignore-walk@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-4.0.1.tgz#fc840e8346cf88a3a9380c5b17933cd8f4d39fa3" + integrity sha512-rzDQLaW4jQbh2YrOFlJdCtX8qgJTehFRYiUB2r1osqTeDzV/3+Jh8fz1oAPzUThf3iku8Ds4IDqawI5d8mUiQw== + dependencies: + minimatch "^3.0.4" + +ignore@^4.0.6: + version "4.0.6" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" + integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== + +ignore@^5.1.4, ignore@^5.1.8: + version "5.1.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" + integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== + +import-fresh@^3.0.0, import-fresh@^3.2.1: + version "3.3.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" + integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= + +indent-string@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-3.2.0.tgz#4a5fd6d27cc332f37e5419a504dbb837105c9289" + integrity sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok= + +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + +infer-owner@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467" + integrity sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.1, inherits@~2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= + +ini@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ini/-/ini-2.0.0.tgz#e5fd556ecdd5726be978fa1001862eacb0a94bc5" + integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA== + +ini@~1.3.0: + version "1.3.8" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" + integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== + +init-package-json@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-2.0.5.tgz#78b85f3c36014db42d8f32117252504f68022646" + integrity sha512-u1uGAtEFu3VA6HNl/yUWw57jmKEMx8SKOxHhxjGnOFUiIlFnohKDFg4ZrPpv9wWqk44nDxGJAtqjdQFm+9XXQA== + dependencies: + npm-package-arg "^8.1.5" + promzard "^0.3.0" + read "~1.0.1" + read-package-json "^4.1.1" + semver "^7.3.5" + validate-npm-package-license "^3.0.4" + validate-npm-package-name "^3.0.0" + +into-stream@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/into-stream/-/into-stream-3.1.0.tgz#96fb0a936c12babd6ff1752a17d05616abd094c6" + integrity sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY= + dependencies: + from2 "^2.1.1" + p-is-promise "^1.1.0" + +ip-regex@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-4.3.0.tgz#687275ab0f57fa76978ff8f4dddc8a23d5990db5" + integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q== + +ip@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a" + integrity sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo= + +ipaddr.js@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" + integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== + +is-binary-path@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" + integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== + dependencies: + binary-extensions "^2.0.0" + +is-buffer@^1.0.2, is-buffer@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== + +is-cidr@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/is-cidr/-/is-cidr-4.0.2.tgz#94c7585e4c6c77ceabf920f8cde51b8c0fda8814" + integrity sha512-z4a1ENUajDbEl/Q6/pVBpTR1nBjjEE1X7qb7bmWYanNnPoKAvUCPFKeXV6Fe4mgTkWKBqiHIcwsI3SndiO5FeA== + dependencies: + cidr-regex "^3.1.1" + +is-core-module@^2.2.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.4.0.tgz#8e9fc8e15027b011418026e98f0e6f4d86305cc1" + integrity sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A== + dependencies: + has "^1.0.3" + +is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + integrity sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik= + +is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs= + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-glob@^4.0.0, is-glob@^4.0.1, is-glob@~4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" + integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg== + dependencies: + is-extglob "^2.1.1" + +is-lambda@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-lambda/-/is-lambda-1.0.1.tgz#3d9877899e6a53efc0160504cde15f82e6f061d5" + integrity sha1-PZh3iZ5qU+/AFgUEzeFfgubwYdU= + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-object@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.2.tgz#a56552e1c665c9e950b4a025461da87e72f86fcf" + integrity sha512-2rRIahhZr2UWb45fIOuvZGpFtz0TyOZLf32KxBbSoUCeZR495zCKlWUKKUByk3geS2eAs7ZAABt0Y/Rx0GiQGA== + +is-plain-obj@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= + +is-plain-obj@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" + integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== + +is-plain-object@^2.0.1: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== + dependencies: + isobject "^3.0.1" + +is-retry-allowed@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz#d778488bd0a4666a3be8a1482b9f2baafedea8b4" + integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg== + +is-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3" + integrity sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw== + +is-typedarray@^1.0.0, is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= + +is-unicode-supported@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" + integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== + +is-windows@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" + integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA== + +is@^3.2.1: + version "3.3.0" + resolved "https://registry.yarnpkg.com/is/-/is-3.3.0.tgz#61cff6dd3c4193db94a3d62582072b44e5645d79" + integrity sha512-nW24QBoPcFGGHJGUwnfpI7Yc5CdqWNdsyHQszVE/z2pKHXzh7FZ5GWhJqSyaQ9wMkQnsTx+kAI8bHlCX4tKdbg== + +isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= + +isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo= + +istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.0.0-alpha.1: + version "3.0.0" + resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz#f5944a37c70b550b02a78a5c3b2055b280cec8ec" + integrity sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg== + +istanbul-lib-hook@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/istanbul-lib-hook/-/istanbul-lib-hook-3.0.0.tgz#8f84c9434888cc6b1d0a9d7092a76d239ebf0cc6" + integrity sha512-Pt/uge1Q9s+5VAZ+pCo16TYMWPBIl+oaNIjgLQxcX0itS6ueeaA+pEfThZpH8WxhFgCiEb8sAJY6MdUKgiIWaQ== + dependencies: + append-transform "^2.0.0" + +istanbul-lib-instrument@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz#873c6fff897450118222774696a3f28902d77c1d" + integrity sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ== + dependencies: + "@babel/core" "^7.7.5" + "@istanbuljs/schema" "^0.1.2" + istanbul-lib-coverage "^3.0.0" + semver "^6.3.0" + +istanbul-lib-processinfo@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-processinfo/-/istanbul-lib-processinfo-2.0.2.tgz#e1426514662244b2f25df728e8fd1ba35fe53b9c" + integrity sha512-kOwpa7z9hme+IBPZMzQ5vdQj8srYgAtaRqeI48NGmAQ+/5yKiHLV0QbYqQpxsdEF0+w14SoB8YbnHKcXE2KnYw== + dependencies: + archy "^1.0.0" + cross-spawn "^7.0.0" + istanbul-lib-coverage "^3.0.0-alpha.1" + make-dir "^3.0.0" + p-map "^3.0.0" + rimraf "^3.0.0" + uuid "^3.3.3" + +istanbul-lib-report@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#7518fe52ea44de372f460a76b5ecda9ffb73d8a6" + integrity sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw== + dependencies: + istanbul-lib-coverage "^3.0.0" + make-dir "^3.0.0" + supports-color "^7.1.0" + +istanbul-lib-source-maps@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.0.tgz#75743ce6d96bb86dc7ee4352cf6366a23f0b1ad9" + integrity sha512-c16LpFRkR8vQXyHZ5nLpY35JZtzj1PQY1iZmesUbf1FZHbIupcWfjgOXBY9YHkLEQ6puz1u4Dgj6qmU/DisrZg== + dependencies: + debug "^4.1.1" + istanbul-lib-coverage "^3.0.0" + source-map "^0.6.1" + +istanbul-reports@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.0.2.tgz#d593210e5000683750cb09fc0644e4b6e27fd53b" + integrity sha512-9tZvz7AiR3PEDNGiV9vIouQ/EAcqMXFmkcA1CDFTwOB98OZVDL0PH9glHotf5Ugp6GCOTypfzGWI/OqjWNCRUw== + dependencies: + html-escaper "^2.0.0" + istanbul-lib-report "^3.0.0" + +isurl@^1.0.0-alpha5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" + integrity sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w== + dependencies: + has-to-string-tag-x "^1.2.0" + is-object "^1.0.1" + +joi@6.x.x, joi@>=17.4.0: + version "17.4.0" + resolved "https://registry.yarnpkg.com/joi/-/joi-17.4.0.tgz#b5c2277c8519e016316e49ababd41a1908d9ef20" + integrity sha512-F4WiW2xaV6wc1jxete70Rw4V/VuMd6IN+a5ilZsxG4uYtUXWu2kq9W5P2dz30e7Gmw8RCbY/u/uk+dMPma9tAg== + dependencies: + "@hapi/hoek" "^9.0.0" + "@hapi/topo" "^5.0.0" + "@sideway/address" "^4.1.0" + "@sideway/formula" "^3.0.0" + "@sideway/pinpoint" "^2.0.0" + +js-base64@*, js-base64@^3.6.1: + version "3.6.1" + resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-3.6.1.tgz#555aae398b74694b4037af1f8a5a6209d170efbe" + integrity sha512-Frdq2+tRRGLQUIQOgsIGSCd1VePCS2fsddTG5dTCqR0JHgltXWfsxnY0gIXPoMeRmdom6Oyq+UMOFg5suduOjQ== + +js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +js-yaml@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" + +js-yaml@^3.13.0, js-yaml@^3.13.1: + version "3.14.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" + integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM= + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + integrity sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg= + +json-edm-parser@0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/json-edm-parser/-/json-edm-parser-0.1.2.tgz#1e60b0fef1bc0af67bc0d146dfdde5486cd615b4" + integrity sha1-HmCw/vG8CvZ7wNFG393lSGzWFbQ= + dependencies: + jsonparse "~1.2.0" + +json-parse-even-better-errors@^2.3.0, json-parse-even-better-errors@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-schema-traverse@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2" + integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== + +json-schema@0.4.0, json-schema@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" + integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE= + +json-stringify-nice@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/json-stringify-nice/-/json-stringify-nice-1.1.4.tgz#2c937962b80181d3f317dd39aa323e14f5a60a67" + integrity sha512-5Z5RFW63yxReJ7vANgW6eZFGWaQvnPE3WNmZoOJrSkGju2etKA2L5rrOa1sm877TVTFt57A80BH1bArcmlLfPw== + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= + +json5@^2.1.2: + version "2.2.0" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.0.tgz#2dfefe720c6ba525d9ebd909950f0515316c89a3" + integrity sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA== + dependencies: + minimist "^1.2.5" + +jsonparse@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.3.1.tgz#3f4dae4a91fac315f71062f8521cc239f1366280" + integrity sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA= + +jsonparse@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.2.0.tgz#5c0c5685107160e72fe7489bddea0b44c2bc67bd" + integrity sha1-XAxWhRBxYOcv50ib3eoLRMK8Z70= + +jsprim@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-2.0.2.tgz#77ca23dbcd4135cd364800d22ff82c2185803d4d" + integrity sha512-gqXddjPqQ6G40VdnI6T6yObEC+pDNvyP95wdQhkWkg7crHH3km5qP1FsOXEkzEQwnz6gz5qGTn1c2Y52wP3OyQ== + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.4.0" + verror "1.10.0" + +just-diff-apply@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/just-diff-apply/-/just-diff-apply-4.0.1.tgz#da89c5a4ccb14aa8873c70e2c3b6695cef45dab5" + integrity sha512-AKOkzB5P6FkfP21UlZVX/OPXx/sC2GagpLX9cBxqHqDuRjwmZ/AJRKSNrB9jHPpRW1W1ONs6gly1gW46t055nQ== + +just-diff@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/just-diff/-/just-diff-5.0.1.tgz#db8fe1cfeea1156f2374bfb289826dca28e7e390" + integrity sha512-X00TokkRIDotUIf3EV4xUm6ELc/IkqhS/vPSHdWnsM5y0HoNMfEqrazizI7g78lpHvnRSRt/PFfKtRqJCOGIuQ== + +keyv@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.0.0.tgz#44923ba39e68b12a7cec7df6c3268c031f2ef373" + integrity sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA== + dependencies: + json-buffer "3.0.0" + +kind-of@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-2.0.1.tgz#018ec7a4ce7e3a86cb9141be519d24c8faa981b5" + integrity sha1-AY7HpM5+OobLkUG+UZ0kyPqpgbU= + dependencies: + is-buffer "^1.0.2" + +kind-of@^3.0.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + integrity sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ= + dependencies: + is-buffer "^1.1.5" + +kubernetes-client@^6.12.1: + version "6.12.1" + resolved "https://registry.yarnpkg.com/kubernetes-client/-/kubernetes-client-6.12.1.tgz#236c4ed5c0417035a4b5c71bbe81549b3bef7cf2" + integrity sha512-NA5e2zaL+BWJsboPDL/+Uv8AF60inOQjKvzHwRhQWJHlg2deaxguyMU3MKi9NEptltkv23Pm0871wuRcDMYrzA== + dependencies: + deepmerge "^3.2.0" + js-yaml "^3.13.0" + openid-client "^2.4.4" + qs "^6.7.0" + request "^2.88.0" + swagger-fluent "^3.1.2" + url-join "^4.0.0" + ws "^6.2.1" + +lazy-cache@^0.2.3: + version "0.2.7" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-0.2.7.tgz#7feddf2dcb6edb77d11ef1d117ab5ffdf0ab1b65" + integrity sha1-f+3fLctu23fRHvHRF6tf/fCrG2U= + +lazy-cache@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e" + integrity sha1-odePw6UEdMuAhF07O24dpJpEbo4= + +levn@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade" + integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== + dependencies: + prelude-ls "^1.2.1" + type-check "~0.4.0" + +libnpmaccess@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/libnpmaccess/-/libnpmaccess-4.0.3.tgz#dfb0e5b0a53c315a2610d300e46b4ddeb66e7eec" + integrity sha512-sPeTSNImksm8O2b6/pf3ikv4N567ERYEpeKRPSmqlNt1dTZbvgpJIzg5vAhXHpw2ISBsELFRelk0jEahj1c6nQ== + dependencies: + aproba "^2.0.0" + minipass "^3.1.1" + npm-package-arg "^8.1.2" + npm-registry-fetch "^11.0.0" + +libnpmdiff@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/libnpmdiff/-/libnpmdiff-2.0.4.tgz#bb1687992b1a97a8ea4a32f58ad7c7f92de53b74" + integrity sha512-q3zWePOJLHwsLEUjZw3Kyu/MJMYfl4tWCg78Vl6QGSfm4aXBUSVzMzjJ6jGiyarsT4d+1NH4B1gxfs62/+y9iQ== + dependencies: + "@npmcli/disparity-colors" "^1.0.1" + "@npmcli/installed-package-contents" "^1.0.7" + binary-extensions "^2.2.0" + diff "^5.0.0" + minimatch "^3.0.4" + npm-package-arg "^8.1.1" + pacote "^11.3.0" + tar "^6.1.0" + +libnpmexec@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/libnpmexec/-/libnpmexec-3.0.1.tgz#bc2fddf1b7bd2c1b2c43b4b726ec4cf11920ad0a" + integrity sha512-VUZTpkKBRPv3Z9DIjbsiHhEQXmQ+OwSQ/yLCY9i6CFE8UIczWyE6wVxP5sJ5NSGtSTUs6I98WewQOL45OKMyxA== + dependencies: + "@npmcli/arborist" "^4.0.0" + "@npmcli/ci-detect" "^1.3.0" + "@npmcli/run-script" "^2.0.0" + chalk "^4.1.0" + mkdirp-infer-owner "^2.0.0" + npm-package-arg "^8.1.2" + pacote "^12.0.0" + proc-log "^1.0.0" + read "^1.0.7" + read-package-json-fast "^2.0.2" + walk-up-path "^1.0.0" + +libnpmfund@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/libnpmfund/-/libnpmfund-2.0.1.tgz#3c7e2be61e8c79e22c4918dde91ef57f64faf064" + integrity sha512-OhDbjB3gqdRyuQ56AhUtO49HZ7cZHSM7yCnhQa1lsNpmAmGPnjCImfx8SoWaAkUM7Ov8jngMR5JHKAr1ddjHTQ== + dependencies: + "@npmcli/arborist" "^4.0.0" + +libnpmhook@^6.0.2: + version "6.0.3" + resolved "https://registry.yarnpkg.com/libnpmhook/-/libnpmhook-6.0.3.tgz#1d7f0d7e6a7932fbf7ce0881fdb0ed8bf8748a30" + integrity sha512-3fmkZJibIybzmAvxJ65PeV3NzRc0m4xmYt6scui5msocThbEp4sKFT80FhgrCERYDjlUuFahU6zFNbJDHbQ++g== + dependencies: + aproba "^2.0.0" + npm-registry-fetch "^11.0.0" + +libnpmorg@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/libnpmorg/-/libnpmorg-2.0.3.tgz#4e605d4113dfa16792d75343824a0625c76703bc" + integrity sha512-JSGl3HFeiRFUZOUlGdiNcUZOsUqkSYrg6KMzvPZ1WVZ478i47OnKSS0vkPmX45Pai5mTKuwIqBMcGWG7O8HfdA== + dependencies: + aproba "^2.0.0" + npm-registry-fetch "^11.0.0" + +libnpmpack@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/libnpmpack/-/libnpmpack-3.0.0.tgz#b1cdf182106bc0d25910e79bb5c9b6c23cd71670" + integrity sha512-W6lt4blkR9YXu/qOrFknfnKBajz/1GvAc5q1XcWTGuBJn2DYKDWHtA7x1fuMQdn7hKDBOPlZ/Aqll+ZvAnrM6g== + dependencies: + "@npmcli/run-script" "^2.0.0" + npm-package-arg "^8.1.0" + pacote "^12.0.0" + +libnpmpublish@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/libnpmpublish/-/libnpmpublish-4.0.2.tgz#be77e8bf5956131bcb45e3caa6b96a842dec0794" + integrity sha512-+AD7A2zbVeGRCFI2aO//oUmapCwy7GHqPXFJh3qpToSRNU+tXKJ2YFUgjt04LPPAf2dlEH95s6EhIHM1J7bmOw== + dependencies: + normalize-package-data "^3.0.2" + npm-package-arg "^8.1.2" + npm-registry-fetch "^11.0.0" + semver "^7.1.3" + ssri "^8.0.1" + +libnpmsearch@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/libnpmsearch/-/libnpmsearch-3.1.2.tgz#aee81b9e4768750d842b627a3051abc89fdc15f3" + integrity sha512-BaQHBjMNnsPYk3Bl6AiOeVuFgp72jviShNBw5aHaHNKWqZxNi38iVNoXbo6bG/Ccc/m1To8s0GtMdtn6xZ1HAw== + dependencies: + npm-registry-fetch "^11.0.0" + +libnpmteam@^2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/libnpmteam/-/libnpmteam-2.0.4.tgz#9dbe2e18ae3cb97551ec07d2a2daf9944f3edc4c" + integrity sha512-FPrVJWv820FZFXaflAEVTLRWZrerCvfe7ZHSMzJ/62EBlho2KFlYKjyNEsPW3JiV7TLSXi3vo8u0gMwIkXSMTw== + dependencies: + aproba "^2.0.0" + npm-registry-fetch "^11.0.0" + +libnpmversion@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/libnpmversion/-/libnpmversion-2.0.1.tgz#20b1425d88cd99c66806a54b458d2d654066b550" + integrity sha512-uFGtNTe/m0GOIBQCE4ryIsgGNJdeShW+qvYtKNLCCuiG7JY3YEslL/maFFZbaO4wlQa/oj1t0Bm9TyjahvtgQQ== + dependencies: + "@npmcli/git" "^2.0.7" + "@npmcli/run-script" "^2.0.0" + json-parse-even-better-errors "^2.3.1" + semver "^7.3.5" + stringify-package "^1.0.1" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +locate-path@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" + integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== + dependencies: + p-locate "^5.0.0" + +lockfile@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.4.tgz#07f819d25ae48f87e538e6578b6964a4981a5609" + integrity sha512-cvbTwETRfsFh4nHsL1eGWapU1XFi5Ot9E85sWAwia7Y7EgB7vfqcZhTKZ+l7hCGxSPoushMv5GKhT5PdLv03WA== + dependencies: + signal-exit "^3.0.2" + +lodash.clonedeep@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" + integrity sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8= + +lodash.flattendeep@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz#fb030917f86a3134e5bc9bec0d69e0013ddfedb2" + integrity sha1-+wMJF/hqMTTlvJvsDWngAT3f7bI= + +lodash.merge@^4.6.2: + version "4.6.2" + resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== + +lodash.truncate@^4.4.2: + version "4.4.2" + resolved "https://registry.yarnpkg.com/lodash.truncate/-/lodash.truncate-4.4.2.tgz#5a350da0b1113b837ecfffd5812cbe58d6eae193" + integrity sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM= + +lodash@^4.17.11, lodash@^4.17.15: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +log-symbols@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" + integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== + dependencies: + chalk "^4.1.0" + is-unicode-supported "^0.1.0" + +long@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" + integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA== + +lowercase-keys@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306" + integrity sha1-TjNms55/VFfjXxMkvfb4jQv8cwY= + +lowercase-keys@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== + +lru-cache@^4.0.1: + version "4.1.5" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.5.tgz#8bbe50ea85bed59bc9e33dcab8235ee9bcf443cd" + integrity sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g== + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +lru-cache@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== + dependencies: + yallist "^3.0.2" + +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + +make-dir@^3.0.0, make-dir@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" + integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== + dependencies: + semver "^6.0.0" + +make-error@^1.1.1: + version "1.3.6" + resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" + integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== + +make-fetch-happen@^9.0.1: + version "9.0.2" + resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-9.0.2.tgz#aa8c0e4a5e3a5f2be86c54d3abed44fe5a32ad5d" + integrity sha512-UkAWAuXPXSSlVviTjH2We20mtj1NnZW2Qq/oTY2dyMbRQ5CR3Xed3akCDMnM7j6axrMY80lhgM7loNE132PfAw== + dependencies: + agentkeepalive "^4.1.3" + cacache "^15.2.0" + http-cache-semantics "^4.1.0" + http-proxy-agent "^4.0.1" + https-proxy-agent "^5.0.0" + is-lambda "^1.0.1" + lru-cache "^6.0.0" + minipass "^3.1.3" + minipass-collect "^1.0.2" + minipass-fetch "^1.3.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.4" + negotiator "^0.6.2" + promise-retry "^2.0.1" + socks-proxy-agent "^5.0.0" + ssri "^8.0.0" + +make-fetch-happen@^9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz#53085a09e7971433e6765f7971bf63f4e05cb968" + integrity sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg== + dependencies: + agentkeepalive "^4.1.3" + cacache "^15.2.0" + http-cache-semantics "^4.1.0" + http-proxy-agent "^4.0.1" + https-proxy-agent "^5.0.0" + is-lambda "^1.0.1" + lru-cache "^6.0.0" + minipass "^3.1.3" + minipass-collect "^1.0.2" + minipass-fetch "^1.3.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.4" + negotiator "^0.6.2" + promise-retry "^2.0.1" + socks-proxy-agent "^6.0.0" + ssri "^8.0.0" + +md5.js@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.4.tgz#e9bdbde94a20a5ac18b04340fc5764d5b09d901d" + integrity sha1-6b296UogpawYsENA/Fdk1bCdkB0= + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g= + +merge-deep@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/merge-deep/-/merge-deep-3.0.3.tgz#1a2b2ae926da8b2ae93a0ac15d90cd1922766003" + integrity sha512-qtmzAS6t6grwEkNrunqTBdn0qKwFgNWvlxUbAV8es9M7Ot1EbyApytCnvE0jALPa46ZpKDUo527kKiaWplmlFA== + dependencies: + arr-union "^3.1.0" + clone-deep "^0.2.4" + kind-of "^3.0.2" + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + integrity sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E= + +merge2@^1.3.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4= + +micromatch@^4.0.2: + version "4.0.4" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.4.tgz#896d519dfe9db25fce94ceb7a500919bf881ebf9" + integrity sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg== + dependencies: + braces "^3.0.1" + picomatch "^2.2.3" + +mime-db@1.48.0: + version "1.48.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.48.0.tgz#e35b31045dd7eada3aaad537ed88a33afbef2d1d" + integrity sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ== + +mime-types@^2.1.12, mime-types@~2.1.19, mime-types@~2.1.24: + version "2.1.31" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.31.tgz#a00d76b74317c61f9c2db2218b8e9f8e9c5c9e6b" + integrity sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg== + dependencies: + mime-db "1.48.0" + +mime@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== + +mimic-response@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" + integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== + +minimatch@3.0.4, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +minimist@^1.2.0, minimist@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" + integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== + +minipass-collect@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/minipass-collect/-/minipass-collect-1.0.2.tgz#22b813bf745dc6edba2576b940022ad6edc8c617" + integrity sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA== + dependencies: + minipass "^3.0.0" + +minipass-fetch@^1.3.0, minipass-fetch@^1.3.2: + version "1.3.3" + resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-1.3.3.tgz#34c7cea038c817a8658461bf35174551dce17a0a" + integrity sha512-akCrLDWfbdAWkMLBxJEeWTdNsjML+dt5YgOI4gJ53vuO0vrmYQkUPxa6j6V65s9CcePIr2SSWqjT2EcrNseryQ== + dependencies: + minipass "^3.1.0" + minipass-sized "^1.0.3" + minizlib "^2.0.0" + optionalDependencies: + encoding "^0.1.12" + +minipass-flush@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/minipass-flush/-/minipass-flush-1.0.5.tgz#82e7135d7e89a50ffe64610a787953c4c4cbb373" + integrity sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw== + dependencies: + minipass "^3.0.0" + +minipass-json-stream@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minipass-json-stream/-/minipass-json-stream-1.0.1.tgz#7edbb92588fbfc2ff1db2fc10397acb7b6b44aa7" + integrity sha512-ODqY18UZt/I8k+b7rl2AENgbWE8IDYam+undIJONvigAz8KR5GWblsFTEfQs0WODsjbSXWlm+JHEv8Gr6Tfdbg== + dependencies: + jsonparse "^1.3.1" + minipass "^3.0.0" + +minipass-pipeline@^1.2.2, minipass-pipeline@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz#68472f79711c084657c067c5c6ad93cddea8214c" + integrity sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A== + dependencies: + minipass "^3.0.0" + +minipass-sized@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/minipass-sized/-/minipass-sized-1.0.3.tgz#70ee5a7c5052070afacfbc22977ea79def353b70" + integrity sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g== + dependencies: + minipass "^3.0.0" + +minipass@^2.6.0, minipass@^2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.9.0.tgz#e713762e7d3e32fed803115cf93e04bca9fcc9a6" + integrity sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg== + dependencies: + safe-buffer "^5.1.2" + yallist "^3.0.0" + +minipass@^3.0.0, minipass@^3.1.0, minipass@^3.1.1, minipass@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.1.3.tgz#7d42ff1f39635482e15f9cdb53184deebd5815fd" + integrity sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg== + dependencies: + yallist "^4.0.0" + +minipass@^3.1.6: + version "3.1.6" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.1.6.tgz#3b8150aa688a711a1521af5e8779c1d3bb4f45ee" + integrity sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ== + dependencies: + yallist "^4.0.0" + +minizlib@^1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d" + integrity sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q== + dependencies: + minipass "^2.9.0" + +minizlib@^2.0.0, minizlib@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-2.1.2.tgz#e90d3466ba209b932451508a11ce3d3632145931" + integrity sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg== + dependencies: + minipass "^3.0.0" + yallist "^4.0.0" + +mixin-object@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mixin-object/-/mixin-object-2.0.1.tgz#4fb949441dab182540f1fe035ba60e1947a5e57e" + integrity sha1-T7lJRB2rGCVA8f4DW6YOGUel5X4= + dependencies: + for-in "^0.1.3" + is-extendable "^0.1.1" + +mkdirp-infer-owner@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mkdirp-infer-owner/-/mkdirp-infer-owner-2.0.0.tgz#55d3b368e7d89065c38f32fd38e638f0ab61d316" + integrity sha512-sdqtiFt3lkOaYvTXSRIUjkIdPTcxgv5+fgqYE/5qgwdw12cOrAuzzgzvVExIkH/ul1oeHN3bCLOWSG3XOqbKKw== + dependencies: + chownr "^2.0.0" + infer-owner "^1.0.4" + mkdirp "^1.0.3" + +mkdirp@^0.5.1, mkdirp@^0.5.5: + version "0.5.5" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.5.tgz#d91cefd62d1436ca0f41620e251288d420099def" + integrity sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ== + dependencies: + minimist "^1.2.5" + +mkdirp@^1.0.3, mkdirp@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e" + integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== + +mocha@^9.0.2: + version "9.0.2" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-9.0.2.tgz#e84849b61f406a680ced85af76425f6f3108d1a0" + integrity sha512-FpspiWU+UT9Sixx/wKimvnpkeW0mh6ROAKkIaPokj3xZgxeRhcna/k5X57jJghEr8X+Cgu/Vegf8zCX5ugSuTA== + dependencies: + "@ungap/promise-all-settled" "1.1.2" + ansi-colors "4.1.1" + browser-stdout "1.3.1" + chokidar "3.5.2" + debug "4.3.1" + diff "5.0.0" + escape-string-regexp "4.0.0" + find-up "5.0.0" + glob "7.1.7" + growl "1.10.5" + he "1.2.0" + js-yaml "4.1.0" + log-symbols "4.1.0" + minimatch "3.0.4" + ms "2.1.3" + nanoid "3.1.23" + serialize-javascript "6.0.0" + strip-json-comments "3.1.1" + supports-color "8.1.1" + which "2.0.2" + wide-align "1.1.3" + workerpool "6.1.5" + yargs "16.2.0" + yargs-parser "20.2.4" + yargs-unparser "2.0.0" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= + +ms@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" + integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg== + +ms@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +ms@2.1.3, ms@^2.0.0, ms@^2.1.1, ms@^2.1.2: + version "2.1.3" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== + +mute-stream@~0.0.4: + version "0.0.8" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" + integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== + +nan@^2.14.1: + version "2.14.2" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" + integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== + +nan@^2.15.0: + version "2.15.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.15.0.tgz#3f34a473ff18e15c1b5626b62903b5ad6e665fee" + integrity sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ== + +nanoid@3.1.23: + version "3.1.23" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.23.tgz#f744086ce7c2bc47ee0a8472574d5c78e4183a81" + integrity sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw== + +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc= + +needle@^2.2.1: + version "2.6.0" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.6.0.tgz#24dbb55f2509e2324b4a99d61f413982013ccdbe" + integrity sha512-KKYdza4heMsEfSWD7VPUIz3zX2XDwOyX2d+geb4vrERZMT5RMU6ujjaD+I5Yr54uZxQ2w6XRTAhHBbSCyovZBg== + dependencies: + debug "^3.2.6" + iconv-lite "^0.4.4" + sax "^1.2.4" + +negotiator@0.6.2, negotiator@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" + integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw== + +node-addon-api@^3.0.0: + version "3.2.1" + resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-3.2.1.tgz#81325e0a2117789c0128dab65e7e38f07ceba161" + integrity sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A== + +node-forge@>=0.10.0, node-forge@^0.8.5: + version "0.10.0" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3" + integrity sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA== + +node-gyp@3.x, node-gyp@>=8.4.1, node-gyp@^7.1.0, node-gyp@^8.2.0, node-gyp@^8.4.1: + version "8.4.1" + resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-8.4.1.tgz#3d49308fc31f768180957d6b5746845fbd429937" + integrity sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w== + dependencies: + env-paths "^2.2.0" + glob "^7.1.4" + graceful-fs "^4.2.6" + make-fetch-happen "^9.1.0" + nopt "^5.0.0" + npmlog "^6.0.0" + rimraf "^3.0.2" + semver "^7.3.5" + tar "^6.1.2" + which "^2.0.2" + +node-jose@^1.1.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/node-jose/-/node-jose-1.1.4.tgz#af3f44a392e586d26b123b0e12dc09bef1e9863b" + integrity sha512-L31IFwL3pWWcMHxxidCY51ezqrDXMkvlT/5pLTfNw5sXmmOLJuN6ug7txzF/iuZN55cRpyOmoJrotwBQIoo5Lw== + dependencies: + base64url "^3.0.1" + browserify-zlib "^0.2.0" + buffer "^5.5.0" + es6-promise "^4.2.8" + lodash "^4.17.15" + long "^4.0.0" + node-forge "^0.8.5" + process "^0.11.10" + react-zlib-js "^1.0.4" + uuid "^3.3.3" + +node-pre-gyp@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.11.0.tgz#db1f33215272f692cd38f03238e3e9b47c5dd054" + integrity sha512-TwWAOZb0j7e9eGaf9esRx3ZcLaE5tQ2lvYy1pb5IAaG1a2e2Kv5Lms1Y4hpj+ciXJRofIxxlt5haeQ/2ANeE0Q== + dependencies: + detect-libc "^1.0.2" + mkdirp "^0.5.1" + needle "^2.2.1" + nopt "^4.0.1" + npm-packlist "^1.1.6" + npmlog "^4.0.2" + rc "^1.2.7" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^4" + +node-preload@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/node-preload/-/node-preload-0.2.1.tgz#c03043bb327f417a18fee7ab7ee57b408a144301" + integrity sha512-RM5oyBy45cLEoHqCeh+MNuFAxO0vTFBLskvQbOKnEE7YTTSN4tbN8QWDIPQ6L+WvKsB/qLEGpYe2ZZ9d4W9OIQ== + dependencies: + process-on-spawn "^1.0.0" + +node-releases@^1.1.71: + version "1.1.72" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.72.tgz#14802ab6b1039a79a0c7d662b610a5bbd76eacbe" + integrity sha512-LLUo+PpH3dU6XizX3iVoubUNheF/owjXCZZ5yACDxNnPtgFuludV1ZL3ayK1kVep42Rmm0+R9/Y60NQbZ2bifw== + +node-version@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/node-version/-/node-version-1.2.0.tgz#34fde3ffa8e1149bd323983479dda620e1b5060d" + integrity sha512-ma6oU4Sk0qOoKEAymVoTvk8EdXEobdS7m/mAGhDJ8Rouugho48crHBORAmy5BoOcv8wraPM6xumapQp5hl4iIQ== + +node.extend@1.0.8, node.extend@>=1.1.8: + version "2.0.2" + resolved "https://registry.yarnpkg.com/node.extend/-/node.extend-2.0.2.tgz#b4404525494acc99740f3703c496b7d5182cc6cc" + integrity sha512-pDT4Dchl94/+kkgdwyS2PauDFjZG0Hk0IcHIB+LkW27HLDtdoeMxHTxZh39DYbPP8UflWXWj9JcdDozF+YDOpQ== + dependencies: + has "^1.0.3" + is "^3.2.1" + +node.flow@1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/node.flow/-/node.flow-1.2.3.tgz#e1c44a82aeca8d78b458a77fb3dc642f2eba2649" + integrity sha1-4cRKgq7KjXi0WKd/s9xkLy66Jkk= + dependencies: + node.extend "1.0.8" + +nopt@^4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.3.tgz#a375cad9d02fd921278d954c2254d5aa57e15e48" + integrity sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg== + dependencies: + abbrev "1" + osenv "^0.1.4" + +nopt@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-5.0.0.tgz#530942bb58a512fccafe53fe210f13a25355dc88" + integrity sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ== + dependencies: + abbrev "1" + +normalize-package-data@^3.0.0, normalize-package-data@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-3.0.2.tgz#cae5c410ae2434f9a6c1baa65d5bc3b9366c8699" + integrity sha512-6CdZocmfGaKnIHPVFhJJZ3GuR8SsLKvDANFp47Jmy51aKIr8akjAWTSxtpI+MBgBFdSMRyo4hMpDlT6dTffgZg== + dependencies: + hosted-git-info "^4.0.1" + resolve "^1.20.0" + semver "^7.3.4" + validate-npm-package-license "^3.0.1" + +normalize-path@^3.0.0, normalize-path@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== + +normalize-url@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-2.0.1.tgz#835a9da1551fa26f70e92329069a23aa6574d7e6" + integrity sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw== + dependencies: + prepend-http "^2.0.0" + query-string "^5.0.1" + sort-keys "^2.0.0" + +npm-audit-report@^2.1.5: + version "2.1.5" + resolved "https://registry.yarnpkg.com/npm-audit-report/-/npm-audit-report-2.1.5.tgz#a5b8850abe2e8452fce976c8960dd432981737b5" + integrity sha512-YB8qOoEmBhUH1UJgh1xFAv7Jg1d+xoNhsDYiFQlEFThEBui0W1vIz2ZK6FVg4WZjwEdl7uBQlm1jy3MUfyHeEw== + dependencies: + chalk "^4.0.0" + +npm-bundled@^1.0.1, npm-bundled@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.1.2.tgz#944c78789bd739035b70baa2ca5cc32b8d860bc1" + integrity sha512-x5DHup0SuyQcmL3s7Rx/YQ8sbw/Hzg0rj48eN0dV7hf5cmQq5PXIeioroH3raV1QC1yh3uTYuMThvEQF3iKgGQ== + dependencies: + npm-normalize-package-bin "^1.0.1" + +npm-install-checks@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-4.0.0.tgz#a37facc763a2fde0497ef2c6d0ac7c3fbe00d7b4" + integrity sha512-09OmyDkNLYwqKPOnbI8exiOZU2GVVmQp7tgez2BPi5OZC8M82elDAps7sxC4l//uSUtotWqoEIDwjRvWH4qz8w== + dependencies: + semver "^7.1.1" + +npm-normalize-package-bin@^1.0.0, npm-normalize-package-bin@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz#6e79a41f23fd235c0623218228da7d9c23b8f6e2" + integrity sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA== + +npm-package-arg@^8.0.0, npm-package-arg@^8.0.1, npm-package-arg@^8.1.0, npm-package-arg@^8.1.1, npm-package-arg@^8.1.2: + version "8.1.4" + resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-8.1.4.tgz#8001cdbc4363997b8ef6c6cf7aaf543c5805879d" + integrity sha512-xLokoCFqj/rPdr3LvcdDL6Kj6ipXGEDHD/QGpzwU6/pibYUOXmp5DBmg76yukFyx4ZDbrXNOTn+BPyd8TD4Jlw== + dependencies: + hosted-git-info "^4.0.1" + semver "^7.3.4" + validate-npm-package-name "^3.0.0" + +npm-package-arg@^8.1.5: + version "8.1.5" + resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-8.1.5.tgz#3369b2d5fe8fdc674baa7f1786514ddc15466e44" + integrity sha512-LhgZrg0n0VgvzVdSm1oiZworPbTxYHUJCgtsJW8mGvlDpxTM1vSJc3m5QZeUkhAHIzbz3VCHd/R4osi1L1Tg/Q== + dependencies: + hosted-git-info "^4.0.1" + semver "^7.3.4" + validate-npm-package-name "^3.0.0" + +npm-packlist@^1.1.6: + version "1.4.8" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.8.tgz#56ee6cc135b9f98ad3d51c1c95da22bbb9b2ef3e" + integrity sha512-5+AZgwru5IevF5ZdnFglB5wNlHG1AOOuw28WhUq8/8emhBmLv6jX5by4WJCh7lW0uSYZYS6DXqIsyZVIXRZU9A== + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + npm-normalize-package-bin "^1.0.1" + +npm-packlist@^2.1.4: + version "2.2.2" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-2.2.2.tgz#076b97293fa620f632833186a7a8f65aaa6148c8" + integrity sha512-Jt01acDvJRhJGthnUJVF/w6gumWOZxO7IkpY/lsX9//zqQgnF7OJaxgQXcerd4uQOLu7W5bkb4mChL9mdfm+Zg== + dependencies: + glob "^7.1.6" + ignore-walk "^3.0.3" + npm-bundled "^1.1.1" + npm-normalize-package-bin "^1.0.1" + +npm-packlist@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-3.0.0.tgz#0370df5cfc2fcc8f79b8f42b37798dd9ee32c2a9" + integrity sha512-L/cbzmutAwII5glUcf2DBRNY/d0TFd4e/FnaZigJV6JD85RHZXJFGwCndjMWiiViiWSsWt3tiOLpI3ByTnIdFQ== + dependencies: + glob "^7.1.6" + ignore-walk "^4.0.1" + npm-bundled "^1.1.1" + npm-normalize-package-bin "^1.0.1" + +npm-pick-manifest@^6.0.0, npm-pick-manifest@^6.1.0, npm-pick-manifest@^6.1.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/npm-pick-manifest/-/npm-pick-manifest-6.1.1.tgz#7b5484ca2c908565f43b7f27644f36bb816f5148" + integrity sha512-dBsdBtORT84S8V8UTad1WlUyKIY9iMsAmqxHbLdeEeBNMLQDlDWWra3wYUx9EBEIiG/YwAy0XyNHDd2goAsfuA== + dependencies: + npm-install-checks "^4.0.0" + npm-normalize-package-bin "^1.0.1" + npm-package-arg "^8.1.2" + semver "^7.3.4" + +npm-profile@^5.0.3: + version "5.0.4" + resolved "https://registry.yarnpkg.com/npm-profile/-/npm-profile-5.0.4.tgz#73e5bd1d808edc2c382d7139049cc367ac43161b" + integrity sha512-OKtU7yoAEBOnc8zJ+/uo5E4ugPp09sopo+6y1njPp+W99P8DvQon3BJYmpvyK2Bf1+3YV5LN1bvgXRoZ1LUJBA== + dependencies: + npm-registry-fetch "^11.0.0" + +npm-registry-fetch@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/npm-registry-fetch/-/npm-registry-fetch-11.0.0.tgz#68c1bb810c46542760d62a6a965f85a702d43a76" + integrity sha512-jmlgSxoDNuhAtxUIG6pVwwtz840i994dL14FoNVZisrmZW5kWd63IUTNv1m/hyRSGSqWjCUp/YZlS1BJyNp9XA== + dependencies: + make-fetch-happen "^9.0.1" + minipass "^3.1.3" + minipass-fetch "^1.3.0" + minipass-json-stream "^1.0.1" + minizlib "^2.0.0" + npm-package-arg "^8.0.0" + +npm-user-validate@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-1.0.1.tgz#31428fc5475fe8416023f178c0ab47935ad8c561" + integrity sha512-uQwcd/tY+h1jnEaze6cdX/LrhWhoBxfSknxentoqmIuStxUExxjWd3ULMLFPiFUrZKbOVMowH6Jq2FRWfmhcEw== + +npm@>=8.3.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/npm/-/npm-8.3.0.tgz#03d32b0ddb07a5865726baf7149bb0475023df4d" + integrity sha512-ug4xToae4Dh3yZh8Fp6MOnAPSS3fqCTANpJx1fXP2C4LTUzoZf7rEantHQR/ANPVYDBe5qQT4tGVsoPqqiYZMw== + dependencies: + "@isaacs/string-locale-compare" "^1.1.0" + "@npmcli/arborist" "^4.1.1" + "@npmcli/ci-detect" "^1.4.0" + "@npmcli/config" "^2.3.2" + "@npmcli/map-workspaces" "^2.0.0" + "@npmcli/package-json" "^1.0.1" + "@npmcli/run-script" "^2.0.0" + abbrev "~1.1.1" + ansicolors "~0.3.2" + ansistyles "~0.1.3" + archy "~1.0.0" + cacache "^15.3.0" + chalk "^4.1.2" + chownr "^2.0.0" + cli-columns "^4.0.0" + cli-table3 "^0.6.0" + columnify "~1.5.4" + fastest-levenshtein "^1.0.12" + glob "^7.2.0" + graceful-fs "^4.2.8" + hosted-git-info "^4.0.2" + ini "^2.0.0" + init-package-json "^2.0.5" + is-cidr "^4.0.2" + json-parse-even-better-errors "^2.3.1" + libnpmaccess "^4.0.2" + libnpmdiff "^2.0.4" + libnpmexec "^3.0.1" + libnpmfund "^2.0.1" + libnpmhook "^6.0.2" + libnpmorg "^2.0.2" + libnpmpack "^3.0.0" + libnpmpublish "^4.0.1" + libnpmsearch "^3.1.1" + libnpmteam "^2.0.3" + libnpmversion "^2.0.1" + make-fetch-happen "^9.1.0" + minipass "^3.1.6" + minipass-pipeline "^1.2.4" + mkdirp "^1.0.4" + mkdirp-infer-owner "^2.0.0" + ms "^2.1.2" + node-gyp "^8.4.1" + nopt "^5.0.0" + npm-audit-report "^2.1.5" + npm-install-checks "^4.0.0" + npm-package-arg "^8.1.5" + npm-pick-manifest "^6.1.1" + npm-profile "^5.0.3" + npm-registry-fetch "^11.0.0" + npm-user-validate "^1.0.1" + npmlog "^6.0.0" + opener "^1.5.2" + pacote "^12.0.2" + parse-conflict-json "^2.0.1" + proc-log "^1.0.0" + qrcode-terminal "^0.12.0" + read "~1.0.7" + read-package-json "^4.1.1" + read-package-json-fast "^2.0.3" + readdir-scoped-modules "^1.1.0" + rimraf "^3.0.2" + semver "^7.3.5" + ssri "^8.0.1" + tar "^6.1.11" + text-table "~0.2.0" + tiny-relative-date "^1.3.0" + treeverse "^1.0.4" + validate-npm-package-name "~3.0.0" + which "^2.0.2" + write-file-atomic "^3.0.3" + +npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg== + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +npmlog@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-6.0.0.tgz#ba9ef39413c3d936ea91553db7be49c34ad0520c" + integrity sha512-03ppFRGlsyUaQFbGC2C8QWJN/C/K7PsfyD9aQdhVKAQIH4sQBc8WASqFBP7O+Ut4d2oo5LoeoboB3cGdBZSp6Q== + dependencies: + are-we-there-yet "^2.0.0" + console-control-strings "^1.1.0" + gauge "^4.0.0" + set-blocking "^2.0.0" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0= + +nyc@^15.1.0: + version "15.1.0" + resolved "https://registry.yarnpkg.com/nyc/-/nyc-15.1.0.tgz#1335dae12ddc87b6e249d5a1994ca4bdaea75f02" + integrity sha512-jMW04n9SxKdKi1ZMGhvUTHBN0EICCRkHemEoE5jm6mTYcqcdas0ATzgUgejlQUHMvpnOZqGB5Xxsv9KxJW1j8A== + dependencies: + "@istanbuljs/load-nyc-config" "^1.0.0" + "@istanbuljs/schema" "^0.1.2" + caching-transform "^4.0.0" + convert-source-map "^1.7.0" + decamelize "^1.2.0" + find-cache-dir "^3.2.0" + find-up "^4.1.0" + foreground-child "^2.0.0" + get-package-type "^0.1.0" + glob "^7.1.6" + istanbul-lib-coverage "^3.0.0" + istanbul-lib-hook "^3.0.0" + istanbul-lib-instrument "^4.0.0" + istanbul-lib-processinfo "^2.0.2" + istanbul-lib-report "^3.0.0" + istanbul-lib-source-maps "^4.0.0" + istanbul-reports "^3.0.2" + make-dir "^3.0.0" + node-preload "^0.2.1" + p-map "^3.0.0" + process-on-spawn "^1.0.0" + resolve-from "^5.0.0" + rimraf "^3.0.0" + signal-exit "^3.0.2" + spawn-wrap "^2.0.0" + test-exclude "^6.0.0" + yargs "^15.0.2" + +oauth-sign@~0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" + integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== + +object-assign@^4.0.1, object-assign@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= + +object-hash@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/object-hash/-/object-hash-1.3.1.tgz#fde452098a951cb145f039bb7d455449ddc126df" + integrity sha512-OSuu/pU4ENM9kmREg0BdNrUDIl1heYa4mBZacJc+vVWz4GtAwu7jO8s4AIt2aGRUTqxykpWzI3Oqnsm13tTMDA== + +object-inspect@^1.9.0: + version "1.10.3" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.10.3.tgz#c2aa7d2d09f50c99375704f7a0adf24c5782d369" + integrity sha512-e5mCJlSH7poANfC8z8S9s9S2IN5/4Zb3aZ33f5s8YqoazCFzNLloLU8r5VCG+G7WoqLvAAZoVMcy3tp/3X0Plw== + +oidc-token-hash@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/oidc-token-hash/-/oidc-token-hash-3.0.2.tgz#5bd4716cc48ad433f4e4e99276811019b165697e" + integrity sha512-dTzp80/y/da+um+i+sOucNqiPpwRL7M/xPwj7pH1TFA2/bqQ+OK2sJahSXbemEoLtPkHcFLyhLhLWZa9yW5+RA== + +on-finished@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc= + dependencies: + ee-first "1.1.1" + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= + dependencies: + wrappy "1" + +opener@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/opener/-/opener-1.5.2.tgz#5d37e1f35077b9dcac4301372271afdeb2a13598" + integrity sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A== + +openid-client@^2.4.4: + version "2.5.0" + resolved "https://registry.yarnpkg.com/openid-client/-/openid-client-2.5.0.tgz#7d4cf552b30dbad26917d7e2722422eda057ea93" + integrity sha512-t3hFD7xEoW1U25RyBcRFaL19fGGs6hNVTysq9pgmiltH0IVUPzH/bQV9w24pM5Q7MunnGv2/5XjIru6BQcWdxg== + dependencies: + base64url "^3.0.0" + got "^8.3.2" + lodash "^4.17.11" + lru-cache "^5.1.1" + node-jose "^1.1.0" + object-hash "^1.3.1" + oidc-token-hash "^3.0.1" + p-any "^1.1.0" + +optionator@^0.9.1: + version "0.9.1" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.1.tgz#4f236a6373dae0566a6d43e1326674f50c291499" + integrity sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw== + dependencies: + deep-is "^0.1.3" + fast-levenshtein "^2.0.6" + levn "^0.4.1" + prelude-ls "^1.2.1" + type-check "^0.4.0" + word-wrap "^1.2.3" + +os-homedir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= + +os-tmpdir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= + +osenv@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" + integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g== + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-any@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-any/-/p-any-1.1.0.tgz#1d03835c7eed1e34b8e539c47b7b60d0d015d4e1" + integrity sha512-Ef0tVa4CZ5pTAmKn+Cg3w8ABBXh+hHO1aV8281dKOoUHfX+3tjG2EaFcC+aZyagg9b4EYGsHEjz21DnEE8Og2g== + dependencies: + p-some "^2.0.0" + +p-cancelable@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-0.4.1.tgz#35f363d67d52081c8d9585e37bcceb7e0bbcb2a0" + integrity sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ== + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4= + +p-is-promise@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-1.1.0.tgz#9c9456989e9f6588017b0434d56097675c3da05e" + integrity sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4= + +p-limit@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-limit@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" + +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + +p-locate@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" + integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== + dependencies: + p-limit "^3.0.2" + +p-map@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-3.0.0.tgz#d704d9af8a2ba684e2600d9a215983d4141a979d" + integrity sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ== + dependencies: + aggregate-error "^3.0.0" + +p-map@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" + integrity sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ== + dependencies: + aggregate-error "^3.0.0" + +p-some@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-some/-/p-some-2.0.1.tgz#65d87c8b154edbcf5221d167778b6d2e150f6f06" + integrity sha1-Zdh8ixVO289SIdFnd4ttLhUPbwY= + dependencies: + aggregate-error "^1.0.0" + +p-timeout@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-2.0.1.tgz#d8dd1979595d2dc0139e1fe46b8b646cb3cdf038" + integrity sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA== + dependencies: + p-finally "^1.0.0" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +package-hash@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/package-hash/-/package-hash-4.0.0.tgz#3537f654665ec3cc38827387fc904c163c54f506" + integrity sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ== + dependencies: + graceful-fs "^4.1.15" + hasha "^5.0.0" + lodash.flattendeep "^4.4.0" + release-zalgo "^1.0.0" + +pacote@^11.3.0: + version "11.3.4" + resolved "https://registry.yarnpkg.com/pacote/-/pacote-11.3.4.tgz#c290b790a5cee3082bb8fa223f3f3e2fdf3d0bfc" + integrity sha512-RfahPCunM9GI7ryJV/zY0bWQiokZyLqaSNHXtbNSoLb7bwTvBbJBEyCJ01KWs4j1Gj7GmX8crYXQ1sNX6P2VKA== + dependencies: + "@npmcli/git" "^2.0.1" + "@npmcli/installed-package-contents" "^1.0.6" + "@npmcli/promise-spawn" "^1.2.0" + "@npmcli/run-script" "^1.8.2" + cacache "^15.0.5" + chownr "^2.0.0" + fs-minipass "^2.1.0" + infer-owner "^1.0.4" + minipass "^3.1.3" + mkdirp "^1.0.3" + npm-package-arg "^8.0.1" + npm-packlist "^2.1.4" + npm-pick-manifest "^6.0.0" + npm-registry-fetch "^11.0.0" + promise-retry "^2.0.1" + read-package-json-fast "^2.0.1" + rimraf "^3.0.2" + ssri "^8.0.1" + tar "^6.1.0" + +pacote@^12.0.0, pacote@^12.0.2: + version "12.0.2" + resolved "https://registry.yarnpkg.com/pacote/-/pacote-12.0.2.tgz#14ae30a81fe62ec4fc18c071150e6763e932527c" + integrity sha512-Ar3mhjcxhMzk+OVZ8pbnXdb0l8+pimvlsqBGRNkble2NVgyqOGE3yrCGi/lAYq7E7NRDMz89R1Wx5HIMCGgeYg== + dependencies: + "@npmcli/git" "^2.1.0" + "@npmcli/installed-package-contents" "^1.0.6" + "@npmcli/promise-spawn" "^1.2.0" + "@npmcli/run-script" "^2.0.0" + cacache "^15.0.5" + chownr "^2.0.0" + fs-minipass "^2.1.0" + infer-owner "^1.0.4" + minipass "^3.1.3" + mkdirp "^1.0.3" + npm-package-arg "^8.0.1" + npm-packlist "^3.0.0" + npm-pick-manifest "^6.0.0" + npm-registry-fetch "^11.0.0" + promise-retry "^2.0.1" + read-package-json-fast "^2.0.1" + rimraf "^3.0.2" + ssri "^8.0.1" + tar "^6.1.0" + +pako@~1.0.5: + version "1.0.11" + resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf" + integrity sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw== + +parent-module@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-0.1.0.tgz#b5292863a1e8c476ecf857e7d75c98920b24b8a6" + integrity sha1-tSkoY6HoxHbs+Ffn11yYkgskuKY= + dependencies: + callsites "^1.0.0" + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-conflict-json@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/parse-conflict-json/-/parse-conflict-json-2.0.1.tgz#76647dd072e6068bcaff20be6ccea68a18e1fb58" + integrity sha512-Y7nYw+QaSGBto1LB9lgwOR05Rtz5SbuTf+Oe7HJ6SYQ/DHsvRjQ8O03oWdJbvkt6GzDWospgyZbGmjDYL0sDgA== + dependencies: + json-parse-even-better-errors "^2.3.1" + just-diff "^5.0.1" + just-diff-apply "^4.0.1" + +parseurl@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= + +path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-parse@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w= + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +pathval@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d" + integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ== + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= + +picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3: + version "2.3.0" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.0.tgz#f1f061de8f6a4bf022892e2d128234fb98302972" + integrity sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw== + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + integrity sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY= + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= + +pkg-dir@^4.1.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" + integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== + dependencies: + find-up "^4.0.0" + +prelude-ls@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" + integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + integrity sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc= + +proc-log@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-1.0.0.tgz#0d927307401f69ed79341e83a0b2c9a13395eb77" + integrity sha512-aCk8AO51s+4JyuYGg3Q/a6gnrlDO09NpVWePtjp7xwphcoQ04x5WAfCyugcsbLooWcMJ87CLkD4+604IckEdhg== + +process-nextick-args@~1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3" + integrity sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M= + +process-nextick-args@~2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" + integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== + +process-on-spawn@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/process-on-spawn/-/process-on-spawn-1.0.0.tgz#95b05a23073d30a17acfdc92a440efd2baefdc93" + integrity sha512-1WsPDsUSMmZH5LeMLegqkPDrsGgsWwk1Exipy2hvB0o/F0ASzbpIctSCcZIK1ykJvtTJULEH+20WOFjMvGnCTg== + dependencies: + fromentries "^1.2.0" + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= + +progress@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" + integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA== + +promise-all-reject-late@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/promise-all-reject-late/-/promise-all-reject-late-1.0.1.tgz#f8ebf13483e5ca91ad809ccc2fcf25f26f8643c2" + integrity sha512-vuf0Lf0lOxyQREH7GDIOUMLS7kz+gs8i6B+Yi8dC68a2sychGrHTJYghMBD6k7eUcH0H5P73EckCA48xijWqXw== + +promise-call-limit@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/promise-call-limit/-/promise-call-limit-1.0.1.tgz#4bdee03aeb85674385ca934da7114e9bcd3c6e24" + integrity sha512-3+hgaa19jzCGLuSCbieeRsu5C2joKfYn8pY6JAuXFRVfF4IO+L7UPpFWNTeWT9pM7uhskvbPPd/oEOktCn317Q== + +promise-inflight@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3" + integrity sha1-mEcocL8igTL8vdhoEputEsPAKeM= + +promise-polyfill@^6.0.1: + version "6.1.0" + resolved "https://registry.yarnpkg.com/promise-polyfill/-/promise-polyfill-6.1.0.tgz#dfa96943ea9c121fca4de9b5868cb39d3472e057" + integrity sha1-36lpQ+qcEh/KTem1hoyznTRy4Fc= + +promise-retry@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/promise-retry/-/promise-retry-2.0.1.tgz#ff747a13620ab57ba688f5fc67855410c370da22" + integrity sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g== + dependencies: + err-code "^2.0.2" + retry "^0.12.0" + +promzard@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/promzard/-/promzard-0.3.0.tgz#26a5d6ee8c7dee4cb12208305acfb93ba382a9ee" + integrity sha1-JqXW7ox97kyxIggwWs+5O6OCqe4= + dependencies: + read "1" + +proxy-addr@~2.0.5: + version "2.0.7" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" + integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg== + dependencies: + forwarded "0.2.0" + ipaddr.js "1.9.1" + +pseudomap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" + integrity sha1-8FKijacOYYkX7wqKw0wa5aaChrM= + +psl@^1.1.28: + version "1.8.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.8.0.tgz#9326f8bcfb013adcc005fdff056acce020e51c24" + integrity sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ== + +punycode@^2.1.0, punycode@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== + +python-shell@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/python-shell/-/python-shell-3.0.0.tgz#4eb04b6e7e8878e715b9ccd782b15194555dd074" + integrity sha512-vlIkpJBwkhtG8d2rBbPEweg+3UXdkoduRZ0jLbIX3efYutBjTdmdmMrEQCQy9tkabH36yUjOhwTPFkH3BvoYZQ== + +qrcode-terminal@^0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/qrcode-terminal/-/qrcode-terminal-0.12.0.tgz#bb5b699ef7f9f0505092a3748be4464fe71b5819" + integrity sha512-EXtzRZmC+YGmGlDFbXKxQiMZNwCLEO6BANKXG4iCtSIM0yqc/pappSx3RIKr4r0uh5JsBckOXeKrB3Iz7mdQpQ== + +qs@6.7.0: + version "6.7.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.7.0.tgz#41dc1a015e3d581f1621776be31afb2876a9b1bc" + integrity sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ== + +qs@^6.7.0: + version "6.10.1" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.1.tgz#4931482fa8d647a5aab799c5271d2133b981fb6a" + integrity sha512-M528Hph6wsSVOBiYUnGf+K/7w0hNshs/duGsNXPUCLH5XAqjEtiPGwNONLV0tBH8NoGb0mvD5JubnUTrujKDTg== + dependencies: + side-channel "^1.0.4" + +qs@~6.5.2: + version "6.5.2" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" + integrity sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA== + +query-string@^5.0.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-5.1.1.tgz#a78c012b71c17e05f2e3fa2319dd330682efb3cb" + integrity sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw== + dependencies: + decode-uri-component "^0.2.0" + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +queue-microtask@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== + +randombytes@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" + integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== + dependencies: + safe-buffer "^5.1.0" + +range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== + +raw-body@2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.4.0.tgz#a1ce6fb9c9bc356ca52e89256ab59059e13d0332" + integrity sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q== + dependencies: + bytes "3.1.0" + http-errors "1.7.2" + iconv-lite "0.4.24" + unpipe "1.0.0" + +rc@^1.2.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +react-zlib-js@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/react-zlib-js/-/react-zlib-js-1.0.5.tgz#7bb433e1a4ae53a8e6f361b3d36166baf5bbc60f" + integrity sha512-TLcPdmqhIl+ylwOwlfm1WUuI7NVvhAv3L74d1AabhjyaAbmLOROTA/Q4EQ/UMCFCOjIkVim9fT3UZOQSFk/mlA== + +read-cmd-shim@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/read-cmd-shim/-/read-cmd-shim-2.0.0.tgz#4a50a71d6f0965364938e9038476f7eede3928d9" + integrity sha512-HJpV9bQpkl6KwjxlJcBoqu9Ba0PQg8TqSNIOrulGt54a0uup0HtevreFHzYzkm0lpnleRdNBzXznKrgxglEHQw== + +read-package-json-fast@^2.0.1, read-package-json-fast@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/read-package-json-fast/-/read-package-json-fast-2.0.2.tgz#2dcb24d9e8dd50fb322042c8c35a954e6cc7ac9e" + integrity sha512-5fyFUyO9B799foVk4n6ylcoAktG/FbE3jwRKxvwaeSrIunaoMc0u81dzXxjeAFKOce7O5KncdfwpGvvs6r5PsQ== + dependencies: + json-parse-even-better-errors "^2.3.0" + npm-normalize-package-bin "^1.0.1" + +read-package-json-fast@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/read-package-json-fast/-/read-package-json-fast-2.0.3.tgz#323ca529630da82cb34b36cc0b996693c98c2b83" + integrity sha512-W/BKtbL+dUjTuRL2vziuYhp76s5HZ9qQhd/dKfWIZveD0O40453QNyZhC0e63lqZrAQ4jiOapVoeJ7JrszenQQ== + dependencies: + json-parse-even-better-errors "^2.3.0" + npm-normalize-package-bin "^1.0.1" + +read-package-json@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-4.1.1.tgz#153be72fce801578c1c86b8ef2b21188df1b9eea" + integrity sha512-P82sbZJ3ldDrWCOSKxJT0r/CXMWR0OR3KRh55SgKo3p91GSIEEC32v3lSHAvO/UcH3/IoL7uqhOFBduAnwdldw== + dependencies: + glob "^7.1.1" + json-parse-even-better-errors "^2.3.0" + normalize-package-data "^3.0.0" + npm-normalize-package-bin "^1.0.0" + +read@1, read@^1.0.7, read@~1.0.1, read@~1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4" + integrity sha1-s9oZvQUkMal2cdRKQmNK33ELQMQ= + dependencies: + mute-stream "~0.0.4" + +readable-stream@^2.0.0, readable-stream@^2.0.6: + version "2.3.7" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.7.tgz#1eca1cf711aef814c04f62252a36a62f6cb23b57" + integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readable-stream@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" + integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + +readable-stream@~2.0.0: + version "2.0.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e" + integrity sha1-j5A0HmilPMySh4jaz80Rs265t44= + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "~1.0.0" + process-nextick-args "~1.0.6" + string_decoder "~0.10.x" + util-deprecate "~1.0.1" + +readdir-scoped-modules@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/readdir-scoped-modules/-/readdir-scoped-modules-1.1.0.tgz#8d45407b4f870a0dcaebc0e28670d18e74514309" + integrity sha512-asaikDeqAQg7JifRsZn1NJZXo9E+VwlyCfbkZhwyISinqk5zNS6266HS5kah6P0SaQKGF6SkNnZVHUzHFYxYDw== + dependencies: + debuglog "^1.0.1" + dezalgo "^1.0.0" + graceful-fs "^4.1.2" + once "^1.3.0" + +readdirp@~3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" + integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== + dependencies: + picomatch "^2.2.1" + +reflect-metadata@^0.1.13: + version "0.1.13" + resolved "https://registry.yarnpkg.com/reflect-metadata/-/reflect-metadata-0.1.13.tgz#67ae3ca57c972a2aa1642b10fe363fe32d49dc08" + integrity sha512-Ts1Y/anZELhSsjMcU605fU9RE4Oi3p5ORujwbIKXfWa+0Zxs510Qrmrce5/Jowq3cHSZSJqBjypxmHarc+vEWg== + +regexpp@^3.0.0, regexpp@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.1.0.tgz#206d0ad0a5648cffbdb8ae46438f3dc51c9f78e2" + integrity sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q== + +release-zalgo@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/release-zalgo/-/release-zalgo-1.0.0.tgz#09700b7e5074329739330e535c5a90fb67851730" + integrity sha1-CXALflB0Mpc5Mw5TXFqQ+2eFFzA= + dependencies: + es6-error "^4.0.1" + +request@^2.86.0, request@^2.88.0, request@^2.88.2: + version "2.88.2" + resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" + integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.8.0" + caseless "~0.12.0" + combined-stream "~1.0.6" + extend "~3.0.2" + forever-agent "~0.6.1" + form-data "~2.3.2" + har-validator "~5.1.3" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.19" + oauth-sign "~0.9.0" + performance-now "^2.1.0" + qs "~6.5.2" + safe-buffer "^5.1.2" + tough-cookie "~2.5.0" + tunnel-agent "^0.6.0" + uuid "^3.3.2" + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= + +require-from-string@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== + +require-glob@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/require-glob/-/require-glob-3.2.0.tgz#90bfe2c8efb4b9f972eb9a3f5e580832e04f64d3" + integrity sha1-kL/iyO+0ufly65o/XlgIMuBPZNM= + dependencies: + glob-parent "^3.0.0" + globby "^6.0.0" + parent-module "^0.1.0" + +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8= + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve-from@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" + integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== + +resolve@^1.20.0: + version "1.20.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" + integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== + dependencies: + is-core-module "^2.2.0" + path-parse "^1.0.6" + +responselike@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + integrity sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec= + dependencies: + lowercase-keys "^1.0.0" + +retry@^0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/retry/-/retry-0.12.0.tgz#1b42a6266a21f07421d1b0b54b7dc167b01c013b" + integrity sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs= + +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + +rimraf@^2.6.1: + version "2.7.1" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec" + integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== + dependencies: + glob "^7.1.3" + +rimraf@^3.0.0, rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +rmdir@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/rmdir/-/rmdir-1.2.0.tgz#4fe0357cb06168c258e73e968093dc4e8a0f3253" + integrity sha1-T+A1fLBhaMJY5z6WgJPcTooPMlM= + dependencies: + node.flow "1.2.3" + +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" + +rx@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/rx/-/rx-4.1.0.tgz#a5f13ff79ef3b740fe30aa803fb09f98805d4782" + integrity sha1-pfE/957zt0D+MKqAP7CfmIBdR4I= + +safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +"safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sax@0.5.x: + version "0.5.8" + resolved "https://registry.yarnpkg.com/sax/-/sax-0.5.8.tgz#d472db228eb331c2506b0e8c15524adb939d12c1" + integrity sha1-1HLbIo6zMcJQaw6MFVJK25OdEsE= + +sax@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== + +semver@^5.3.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + +semver@^6.0.0, semver@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" + integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== + +semver@^7.1.1, semver@^7.1.3, semver@^7.2.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5: + version "7.3.5" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.5.tgz#0b621c879348d8998e4b0e4be94b3f12e6018ef7" + integrity sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ== + dependencies: + lru-cache "^6.0.0" + +send@0.17.1: + version "0.17.1" + resolved "https://registry.yarnpkg.com/send/-/send-0.17.1.tgz#c1d8b059f7900f7466dd4938bdc44e11ddb376c8" + integrity sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg== + dependencies: + debug "2.6.9" + depd "~1.1.2" + destroy "~1.0.4" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "~1.7.2" + mime "1.6.0" + ms "2.1.1" + on-finished "~2.3.0" + range-parser "~1.2.1" + statuses "~1.5.0" + +serialize-javascript@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" + integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag== + dependencies: + randombytes "^2.1.0" + +serve-static@1.14.1: + version "1.14.1" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.14.1.tgz#666e636dc4f010f7ef29970a88a674320898b2f9" + integrity sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg== + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.3" + send "0.17.1" + +set-blocking@^2.0.0, set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= + +setprototypeof@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.1.tgz#7e95acb24aa92f5885e0abef5ba131330d4ae683" + integrity sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw== + +shallow-clone@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-0.1.2.tgz#5909e874ba77106d73ac414cfec1ffca87d97060" + integrity sha1-WQnodLp3EG1zrEFM/sH/yofZcGA= + dependencies: + is-extendable "^0.1.1" + kind-of "^2.0.1" + lazy-cache "^0.2.3" + mixin-object "^2.0.1" + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +side-channel@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" + integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw== + dependencies: + call-bind "^1.0.0" + get-intrinsic "^1.0.2" + object-inspect "^1.9.0" + +signal-exit@^3.0.0, signal-exit@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" + integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +slice-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-4.0.0.tgz#500e8dd0fd55b05815086255b3195adf2a45fe6b" + integrity sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ== + dependencies: + ansi-styles "^4.0.0" + astral-regex "^2.0.0" + is-fullwidth-code-point "^3.0.0" + +smart-buffer@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/smart-buffer/-/smart-buffer-4.1.0.tgz#91605c25d91652f4661ea69ccf45f1b331ca21ba" + integrity sha512-iVICrxOzCynf/SNaBQCw34eM9jROU/s5rzIhpOvzhzuYHfJR/DhZfDkXiZSgKXfgv26HT3Yni3AV/DGw0cGnnw== + +socks-proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-5.0.0.tgz#7c0f364e7b1cf4a7a437e71253bed72e9004be60" + integrity sha512-lEpa1zsWCChxiynk+lCycKuC502RxDWLKJZoIhnxrWNjLSDGYRFflHA1/228VkRcnv9TIb8w98derGbpKxJRgA== + dependencies: + agent-base "6" + debug "4" + socks "^2.3.3" + +socks-proxy-agent@^6.0.0: + version "6.1.1" + resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-6.1.1.tgz#e664e8f1aaf4e1fb3df945f09e3d94f911137f87" + integrity sha512-t8J0kG3csjA4g6FTbsMOWws+7R7vuRC8aQ/wy3/1OWmsgwA68zs/+cExQ0koSitUDXqhufF/YJr9wtNMZHw5Ew== + dependencies: + agent-base "^6.0.2" + debug "^4.3.1" + socks "^2.6.1" + +socks@^2.3.3, socks@^2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/socks/-/socks-2.6.1.tgz#989e6534a07cf337deb1b1c94aaa44296520d30e" + integrity sha512-kLQ9N5ucj8uIcxrDwjm0Jsqk06xdpBjGNQtpXy4Q8/QY2k+fY7nZH8CARy+hkbG+SGAovmzzuauCpBlb8FrnBA== + dependencies: + ip "^1.1.5" + smart-buffer "^4.1.0" + +sort-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-2.0.0.tgz#658535584861ec97d730d6cf41822e1f56684128" + integrity sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg= + dependencies: + is-plain-obj "^1.0.0" + +source-map-support@^0.5.17: + version "0.5.19" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61" + integrity sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map@^0.5.0: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= + +source-map@^0.6.0, source-map@^0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +spawn-wrap@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/spawn-wrap/-/spawn-wrap-2.0.0.tgz#103685b8b8f9b79771318827aa78650a610d457e" + integrity sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg== + dependencies: + foreground-child "^2.0.0" + is-windows "^1.0.2" + make-dir "^3.0.0" + rimraf "^3.0.0" + signal-exit "^3.0.2" + which "^2.0.1" + +spdx-correct@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9" + integrity sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w== + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz#3f28ce1a77a00372683eade4a433183527a2163d" + integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== + +spdx-expression-parse@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679" + integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.9" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.9.tgz#8a595135def9592bda69709474f1cbeea7c2467f" + integrity sha512-Ki212dKK4ogX+xDo4CtOZBVIwhsKBEfsEEcwmJfLQzirgc2jIWdzg40Unxz/HzEUqM1WFzVlQSMF9kZZ2HboLQ== + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= + +sqlite3@5.0.2: + version "5.0.2" + resolved "https://registry.yarnpkg.com/sqlite3/-/sqlite3-5.0.2.tgz#00924adcc001c17686e0a6643b6cbbc2d3965083" + integrity sha512-1SdTNo+BVU211Xj1csWa8lV6KM0CtucDwRyA0VHl91wEH1Mgh7RxUpI4rVvG7OhHrzCSGaVyW5g8vKvlrk9DJA== + dependencies: + node-addon-api "^3.0.0" + node-pre-gyp "^0.11.0" + optionalDependencies: + node-gyp "3.x" + +ssh2@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/ssh2/-/ssh2-1.4.0.tgz#e32e8343394364c922bad915a5a7fecd67d0f5c5" + integrity sha512-XvXwcXKvS452DyQvCa6Ct+chpucwc/UyxgliYz+rWXJ3jDHdtBb9xgmxJdMmnIn5bpgGAEV3KaEsH98ZGPHqwg== + dependencies: + asn1 "^0.2.4" + bcrypt-pbkdf "^1.0.2" + optionalDependencies: + cpu-features "0.0.2" + nan "^2.15.0" + +sshpk@^1.14.1: + version "1.16.1" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877" + integrity sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg== + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + bcrypt-pbkdf "^1.0.0" + dashdash "^1.12.0" + ecc-jsbn "~0.1.1" + getpass "^0.1.1" + jsbn "~0.1.0" + safer-buffer "^2.0.2" + tweetnacl "~0.14.0" + +ssri@^8.0.0, ssri@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-8.0.1.tgz#638e4e439e2ffbd2cd289776d5ca457c4f51a2af" + integrity sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ== + dependencies: + minipass "^3.1.1" + +"statuses@>= 1.5.0 < 2", statuses@~1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow= + +stream-buffers@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/stream-buffers/-/stream-buffers-3.0.2.tgz#5249005a8d5c2d00b3a32e6e0a6ea209dc4f3521" + integrity sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ== + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + integrity sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM= + +string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M= + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +"string-width@^1.0.2 || 2": + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string-width@^4.1.0, string-width@^4.2.0: + version "4.2.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.2.tgz#dafd4f9559a7585cfba529c6a0a4f73488ebd4c5" + integrity sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.0" + +string_decoder@^1.1.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" + +string_decoder@~0.10.x: + version "0.10.31" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" + integrity sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ= + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + +stringify-package@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/stringify-package/-/stringify-package-1.0.1.tgz#e5aa3643e7f74d0f28628b72f3dad5cecfc3ba85" + integrity sha512-sa4DUQsYciMP1xhKWGuFM04fB0LG/9DlluZoSVywUMRNvzid6XucHK0/90xGxRoHrAaROrcHK1aPKaijCtSrhg== + +strip-ansi@=6.0.1, strip-ansi@^3.0.0, strip-ansi@^3.0.1, strip-ansi@^4.0.0, strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-bom@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878" + integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w== + +strip-json-comments@3.1.1, strip-json-comments@^3.1.0, strip-json-comments@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" + integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + +supports-color@8.1.1: + version "8.1.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.1.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== + dependencies: + has-flag "^4.0.0" + +swagger-fluent@^3.1.2: + version "3.2.1" + resolved "https://registry.yarnpkg.com/swagger-fluent/-/swagger-fluent-3.2.1.tgz#755d4617cd74f4c30c25013c725269bdcbb0c599" + integrity sha512-Ol4wwbc9kKNdOGPS3aoKLEkCFPiEioA9gMUeeRzlugHHu0HYVvftLpK9v4X8ZUm8l2n2yGiLUBp+gKqhQzHspw== + dependencies: + merge-deep "^3.0.2" + request "^2.88.0" + +table@^6.0.9: + version "6.7.1" + resolved "https://registry.yarnpkg.com/table/-/table-6.7.1.tgz#ee05592b7143831a8c94f3cee6aae4c1ccef33e2" + integrity sha512-ZGum47Yi6KOOFDE8m223td53ath2enHcYLgOCjGr5ngu8bdIARQk6mN/wRMv4yMRcHnCSnHbCEha4sobQx5yWg== + dependencies: + ajv "^8.0.1" + lodash.clonedeep "^4.5.0" + lodash.truncate "^4.4.2" + slice-ansi "^4.0.0" + string-width "^4.2.0" + strip-ansi "^6.0.0" + +tail-stream@^0.3.4: + version "0.3.4" + resolved "https://registry.yarnpkg.com/tail-stream/-/tail-stream-0.3.4.tgz#bc675a20e92732b1a6a7cc65d6be66f7817fd5c1" + integrity sha1-vGdaIOknMrGmp8xl1r5m94F/1cE= + +tar@^4: + version "4.4.19" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3" + integrity sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA== + dependencies: + chownr "^1.1.4" + fs-minipass "^1.2.7" + minipass "^2.9.0" + minizlib "^1.3.3" + mkdirp "^0.5.5" + safe-buffer "^5.2.1" + yallist "^3.1.1" + +tar@^6.0.2, tar@^6.1.0: + version "6.1.9" + resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.9.tgz#5646ef51342ac55456b2466e44da810439978db1" + integrity sha512-XjLaMNl76o07zqZC/aW4lwegdY07baOH1T8w3AEfrHAdyg/oYO4ctjzEBq9Gy9fEP9oHqLIgvx6zuGDGe+bc8Q== + dependencies: + chownr "^2.0.0" + fs-minipass "^2.0.0" + minipass "^3.0.0" + minizlib "^2.1.1" + mkdirp "^1.0.3" + yallist "^4.0.0" + +tar@^6.1.11, tar@^6.1.2: + version "6.1.11" + resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.11.tgz#6760a38f003afa1b2ffd0ffe9e9abbd0eab3d621" + integrity sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA== + dependencies: + chownr "^2.0.0" + fs-minipass "^2.0.0" + minipass "^3.0.0" + minizlib "^2.1.1" + mkdirp "^1.0.3" + yallist "^4.0.0" + +test-exclude@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e" + integrity sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w== + dependencies: + "@istanbuljs/schema" "^0.1.2" + glob "^7.1.4" + minimatch "^3.0.4" + +text-table@^0.2.0, text-table@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha1-f17oI66AUgfACvLfSoTsP8+lcLQ= + +timed-out@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" + integrity sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8= + +tiny-relative-date@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/tiny-relative-date/-/tiny-relative-date-1.3.0.tgz#fa08aad501ed730f31cc043181d995c39a935e07" + integrity sha512-MOQHpzllWxDCHHaDno30hhLfbouoYlOI8YlMNtvKe1zXbjEVhbcEovQxvZrPvtiYW630GQDoMMarCnjfyfHA+A== + +tmp@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.1.tgz#8457fc3037dcf4719c251367a1af6500ee1ccf14" + integrity sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ== + dependencies: + rimraf "^3.0.0" + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4= + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +toidentifier@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.0.tgz#7e1be3470f1e77948bc43d94a3c8f4d7752ba553" + integrity sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw== + +tough-cookie@~2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" + integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== + dependencies: + psl "^1.1.28" + punycode "^2.1.1" + +tree-kill@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/tree-kill/-/tree-kill-1.2.2.tgz#4ca09a9092c88b73a7cdc5e8a01b507b0790a0cc" + integrity sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A== + +treeverse@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/treeverse/-/treeverse-1.0.4.tgz#a6b0ebf98a1bca6846ddc7ecbc900df08cb9cd5f" + integrity sha512-whw60l7r+8ZU8Tu/Uc2yxtc4ZTZbR/PF3u1IPNKGQ6p8EICLb3Z2lAgoqw9bqYd8IkgnsaOcLzYHFckjqNsf0g== + +ts-deferred@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/ts-deferred/-/ts-deferred-1.0.4.tgz#58145ebaeef5b8f2a290b8cec3d060839f9489c7" + integrity sha1-WBReuu71uPKikLjOw9Bgg5+Uicc= + +ts-node@^10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.0.0.tgz#05f10b9a716b0b624129ad44f0ea05dac84ba3be" + integrity sha512-ROWeOIUvfFbPZkoDis0L/55Fk+6gFQNZwwKPLinacRl6tsxstTF1DbAcLKkovwnpKMVvOMHP1TIbnwXwtLg1gg== + dependencies: + "@tsconfig/node10" "^1.0.7" + "@tsconfig/node12" "^1.0.7" + "@tsconfig/node14" "^1.0.0" + "@tsconfig/node16" "^1.0.1" + arg "^4.1.0" + create-require "^1.1.0" + diff "^4.0.1" + make-error "^1.1.1" + source-map-support "^0.5.17" + yn "3.1.1" + +tslib@^1.8.1: + version "1.14.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" + integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== + +tsutils@^3.17.1, tsutils@^3.21.0: + version "3.21.0" + resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.21.0.tgz#b48717d394cea6c1e096983eed58e9d61715b623" + integrity sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA== + dependencies: + tslib "^1.8.1" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0= + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= + +type-check@^0.4.0, type-check@~0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" + integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== + dependencies: + prelude-ls "^1.2.1" + +type-detect@^4.0.0, type-detect@^4.0.5: + version "4.0.8" + resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" + integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== + +type-fest@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4" + integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ== + +type-fest@^0.8.0: + version "0.8.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d" + integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA== + +type-is@~1.6.17, type-is@~1.6.18: + version "1.6.18" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" + integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== + dependencies: + media-typer "0.3.0" + mime-types "~2.1.24" + +typedarray-to-buffer@^3.1.5: + version "3.1.5" + resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" + integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== + dependencies: + is-typedarray "^1.0.0" + +typescript-ioc@^1.2.6: + version "1.2.6" + resolved "https://registry.yarnpkg.com/typescript-ioc/-/typescript-ioc-1.2.6.tgz#7b535d81f674b2dd8f3b0f33dfc30f1f02906753" + integrity sha512-ksyRctgYtHsjmKBceEgeifV3Zq3tnqLh6/q9HlWC08lnng9ZHA3IwXw8oQlv77TpHbs2J3GVUbxTuhmLLSWCTg== + dependencies: + reflect-metadata "^0.1.13" + require-glob "^3.2.0" + +typescript-string-operations@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/typescript-string-operations/-/typescript-string-operations-1.4.1.tgz#889bd1a9f234346691c00ea5ceaf97b2754d1894" + integrity sha512-c+q+Tb0hxeebohdT9KpGUAm5zwxhU8pHeNOeuLCGFMXKN0OrldoAxtufrGLR3xSPCXDA4A3IBCEdRNNscVqLQg== + +typescript@^4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.3.2.tgz#399ab18aac45802d6f2498de5054fcbbe716a805" + integrity sha512-zZ4hShnmnoVnAHpVHWpTcxdv7dWP60S2FsydQLV8V5PbS3FifjWFFRiHSWpDJahly88PRyV5teTSLoq4eG7mKw== + +underscore@^1.12.1: + version "1.13.1" + resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.13.1.tgz#0c1c6bd2df54b6b69f2314066d65b6cde6fcf9d1" + integrity sha512-hzSoAVtJF+3ZtiFX0VgfFPHEDRm7Y/QPjGyNo4TVdnDTdft3tr8hEkD25a1jC+TjTuE7tkHGKkhwCgs9dgBB2g== + +unique-filename@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230" + integrity sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ== + dependencies: + unique-slug "^2.0.0" + +unique-slug@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.2.tgz#baabce91083fc64e945b0f3ad613e264f7cd4e6c" + integrity sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w== + dependencies: + imurmurhash "^0.1.4" + +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw= + +uri-js@^4.2.2: + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +url-join@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/url-join/-/url-join-4.0.1.tgz#b642e21a2646808ffa178c4c5fda39844e12cde7" + integrity sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA== + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + integrity sha1-FrXK/Afb42dsGxmZF3gj1lA6yww= + dependencies: + prepend-http "^2.0.0" + +url-to-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" + integrity sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k= + +util-deprecate@^1.0.1, util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= + +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM= + +uuid@^3.0.0, uuid@^3.3.2, uuid@^3.3.3: + version "3.4.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" + integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== + +v8-compile-cache@^2.0.3: + version "2.3.0" + resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz#2de19618c66dc247dcfb6f99338035d8245a2cee" + integrity sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA== + +validate-npm-package-license@^3.0.1, validate-npm-package-license@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" + integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew== + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +validate-npm-package-name@^3.0.0, validate-npm-package-name@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e" + integrity sha1-X6kS2B630MdK/BQN5zF/DKffQ34= + dependencies: + builtins "^1.0.3" + +validator@~13.7.0: + version "13.7.0" + resolved "https://registry.yarnpkg.com/validator/-/validator-13.7.0.tgz#4f9658ba13ba8f3d82ee881d3516489ea85c0857" + integrity sha512-nYXQLCBkpJ8X6ltALua9dRrZDHVYxjJ1wgskNt1lH9fzGjs3tgojGSCBjmEPwkWS1y29+DrizMTW19Pr9uB2nw== + +vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw= + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA= + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" + +walk-up-path@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/walk-up-path/-/walk-up-path-1.0.0.tgz#d4745e893dd5fd0dbb58dd0a4c6a33d9c9fec53e" + integrity sha512-hwj/qMDUEjCU5h0xr90KGCf0tg0/LgJbmOWgrWKYlcJZM7XvquvUJZ0G/HMGr7F7OQMOUuPHWP9JpriinkAlkg== + +wcwidth@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" + integrity sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g= + dependencies: + defaults "^1.0.3" + +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= + +which@2.0.2, which@^2.0.1, which@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +which@^1.2.9: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== + dependencies: + isexe "^2.0.0" + +wide-align@1.1.3, wide-align@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== + dependencies: + string-width "^1.0.2 || 2" + +wide-align@^1.1.2: + version "1.1.5" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.5.tgz#df1d4c206854369ecf3c9a4898f1b23fbd9d15d3" + integrity sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg== + dependencies: + string-width "^1.0.2 || 2 || 3 || 4" + +word-wrap@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" + integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ== + +workerpool@6.1.5: + version "6.1.5" + resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.1.5.tgz#0f7cf076b6215fd7e1da903ff6f22ddd1886b581" + integrity sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw== + +wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= + +write-file-atomic@^3.0.0, write-file-atomic@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz#56bd5c5a5c70481cd19c571bd39ab965a5de56e8" + integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q== + dependencies: + imurmurhash "^0.1.4" + is-typedarray "^1.0.0" + signal-exit "^3.0.2" + typedarray-to-buffer "^3.1.5" + +ws@^6.2.1: + version "6.2.2" + resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.2.tgz#dd5cdbd57a9979916097652d78f1cc5faea0c32e" + integrity sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw== + dependencies: + async-limiter "~1.0.0" + +ws@^7.4.6: + version "7.4.6" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" + integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== + +xml2js@0.2.8: + version "0.2.8" + resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.2.8.tgz#9b81690931631ff09d1957549faf54f4f980b3c2" + integrity sha1-m4FpCTFjH/CdGVdUn69U9PmAs8I= + dependencies: + sax "0.5.x" + +xmlbuilder@^9.0.7: + version "9.0.7" + resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-9.0.7.tgz#132ee63d2ec5565c557e20f4c22df9aca686b10d" + integrity sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0= + +y18n@>=5.0.8, y18n@^4.0.0, y18n@^5.0.5: + version "5.0.8" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== + +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" + integrity sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI= + +yallist@^3.0.0, yallist@^3.0.2, yallist@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" + integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yargs-parser@20.2.4, yargs-parser@>=20.2.7, yargs-parser@^18.1.2, yargs-parser@^20.2.2: + version "20.2.7" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.7.tgz#61df85c113edfb5a7a4e36eb8aa60ef423cbc90a" + integrity sha512-FiNkvbeHzB/syOjIUxFDCnhSfzAL8R5vs40MgLFBorXACCOAEaWu0gRZl14vG8MR9AOJIZbmkjhusqBYZ3HTHw== + +yargs-unparser@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-2.0.0.tgz#f131f9226911ae5d9ad38c432fe809366c2325eb" + integrity sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA== + dependencies: + camelcase "^6.0.0" + decamelize "^4.0.0" + flat "^5.0.2" + is-plain-obj "^2.1.0" + +yargs@16.2.0: + version "16.2.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" + integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== + dependencies: + cliui "^7.0.2" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.0" + y18n "^5.0.5" + yargs-parser "^20.2.2" + +yargs@^15.0.2: + version "15.4.1" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8" + integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A== + dependencies: + cliui "^6.0.0" + decamelize "^1.2.0" + find-up "^4.1.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^4.2.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^18.1.2" + +yn@3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" + integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== + +yocto-queue@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== diff --git a/ts/webui/.eslintignore b/ts/webui/.eslintignore new file mode 100644 index 0000000000000000000000000000000000000000..c487812d871f073090cc7000feecbed3f91c23a6 --- /dev/null +++ b/ts/webui/.eslintignore @@ -0,0 +1,3 @@ +/build/** +/scripts/** +/src/serviceWorker.ts \ No newline at end of file diff --git a/ts/webui/.eslintrc b/ts/webui/.eslintrc new file mode 100644 index 0000000000000000000000000000000000000000..cc3750e5660875f2ecb9a1e23ed9516c16cfdf6e --- /dev/null +++ b/ts/webui/.eslintrc @@ -0,0 +1,58 @@ +{ + "env": { + "browser": true, + "es6": true + }, + "parser": "@typescript-eslint/parser", + "parserOptions": { + "ecmaVersion": 2018, + "sourceType": "module" + }, + "plugins": [ + "@typescript-eslint", + "eslint-plugin-prettier" + ], + "extends": [ + "eslint:recommended", + "plugin:react/recommended", + "plugin:@typescript-eslint/eslint-recommended", + "plugin:@typescript-eslint/recommended", + "prettier" + ], + "rules": { + "prettier/prettier": 2, + "@typescript-eslint/no-explicit-any": 0, + "@typescript-eslint/no-namespace": 0, + "@typescript-eslint/consistent-type-assertions": 0, + "@typescript-eslint/no-inferrable-types": 0, + "@typescript-eslint/no-use-before-define": [2, "nofunc"], + "@typescript-eslint/no-var-requires": 0, + "@typescript-eslint/no-unused-vars": [2, { "argsIgnorePattern": "^_" }], + "@typescript-eslint/ban-types": [ + "error", + { + "extendDefaults": true, + "types": { + "{}": false, + "object": false, + "Function": false + } + } + ], + "arrow-parens": [2, "as-needed"], + "no-inner-declarations": 0, + "no-empty": 2, + "no-multiple-empty-lines": [2, { "max": 1 }], + "react/display-name": 0 + }, + "settings": { + "react": { + "version": "detect" + }, + "ignorePatterns": [ + "node_modules/", + "build/", + "**/*.js" + ] + } +} diff --git a/ts/webui/.gitignore b/ts/webui/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..4d29575de80483b005c29bfcac5061cd2f45313e --- /dev/null +++ b/ts/webui/.gitignore @@ -0,0 +1,23 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# production +/build + +# misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/ts/webui/.stylelintrc.json b/ts/webui/.stylelintrc.json new file mode 100644 index 0000000000000000000000000000000000000000..59743ae917f4d7c0911f110a1309ef2963e96788 --- /dev/null +++ b/ts/webui/.stylelintrc.json @@ -0,0 +1,11 @@ +{ + "extends": "stylelint-config-standard", + "ignoreFiles": [ + "build/**" + ], + "rules": { + "at-rule-empty-line-before": null, + "indentation": 4, + "no-descending-specificity": null + } +} \ No newline at end of file diff --git a/ts/webui/LICENSE b/ts/webui/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..21071075c24599ee98254f702bcfc504cdc275a6 --- /dev/null +++ b/ts/webui/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/ts/webui/README.md b/ts/webui/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b51f51a60e9977f5e0536b4e0e715d117354f033 --- /dev/null +++ b/ts/webui/README.md @@ -0,0 +1,32 @@ +# WebUI + +## View summary page + +Click the tab "Overview". + +* See the experiment parameters. +* See good performance trial. +* See search_space json. + +## View job accuracy + +Click the tab "Default Metric" to see the point graph of all trials. Hover every point to see its specific accuracy. + +## View hyper parameter + +Click the tab "Hyper Parameter" to see the parallel graph. + +* You can select the percentage to see top trials. +* Choose two axis to swap its positions + +## View trial status + +Click the tab "Trials Detail" to see the status of the all trials. Specifically: + +* Trial duration: trial's duration in the bar graph. +* Trial detail: trial's id, trial's duration, start time, end time, status, accuracy and search space file. +* Kill: you can kill a job that status is running. + +## Feedback + +[Known Issues](https://github.com/Microsoft/nni/issues). diff --git a/ts/webui/README_zh_CN.md b/ts/webui/README_zh_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..6c9917e41ac636d9c9993a5de940a41acc3216db --- /dev/null +++ b/ts/webui/README_zh_CN.md @@ -0,0 +1,32 @@ +# Web 界面 + +## 查看概要页面 + +点击标签 "Overview"。 + +* 查看 Experiment 参数。 +* 查看最好结果的 Trial。 +* 查看搜索空间 JSON 文件。 + +## 查看任务准确度 + +点击 "Default Metric" 来查看所有尝试的点图。 将鼠标悬停到某个点查看其准确度。 + +## 查看超参 + +点击 "Hyper Parameter" 标签查看图像。 + +* 可选择百分比查看最好的 Trial。 +* 选择两个轴来交换位置。 + +## 查看 Trial 状态 + +点击 "Trials Detail" 标签查看所有 Trial 的状态。 特别是: + +* Trial duration:Trial 执行时间的条形图。 +* Trial 详情:Trial 的 id,持续时间,开始时间,结束时间,状态,精度和搜索空间。 +* Kill: 可终止正在运行的任务。 + +## 反馈 + +[已知问题](https://github.com/Microsoft/nni/issues) diff --git a/ts/webui/config/env.js b/ts/webui/config/env.js new file mode 100644 index 0000000000000000000000000000000000000000..b0344c5a83e6de8a160421d4387ccda5cc8b7456 --- /dev/null +++ b/ts/webui/config/env.js @@ -0,0 +1,93 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const paths = require('./paths'); + +// Make sure that including paths.js after env.js will read .env variables. +delete require.cache[require.resolve('./paths')]; + +const NODE_ENV = process.env.NODE_ENV; +if (!NODE_ENV) { + throw new Error( + 'The NODE_ENV environment variable is required but was not specified.' + ); +} + +// https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use +var dotenvFiles = [ + `${paths.dotenv}.${NODE_ENV}.local`, + `${paths.dotenv}.${NODE_ENV}`, + // Don't include `.env.local` for `test` environment + // since normally you expect tests to produce the same + // results for everyone + NODE_ENV !== 'test' && `${paths.dotenv}.local`, + paths.dotenv, +].filter(Boolean); + +// Load environment variables from .env* files. Suppress warnings using silent +// if this file is missing. dotenv will never modify any environment variables +// that have already been set. Variable expansion is supported in .env files. +// https://github.com/motdotla/dotenv +// https://github.com/motdotla/dotenv-expand +dotenvFiles.forEach(dotenvFile => { + if (fs.existsSync(dotenvFile)) { + require('dotenv-expand')( + require('dotenv').config({ + path: dotenvFile, + }) + ); + } +}); + +// We support resolving modules according to `NODE_PATH`. +// This lets you use absolute paths in imports inside large monorepos: +// https://github.com/facebook/create-react-app/issues/253. +// It works similar to `NODE_PATH` in Node itself: +// https://nodejs.org/api/modules.html#modules_loading_from_the_global_folders +// Note that unlike in Node, only *relative* paths from `NODE_PATH` are honored. +// Otherwise, we risk importing Node.js core modules into an app instead of Webpack shims. +// https://github.com/facebook/create-react-app/issues/1023#issuecomment-265344421 +// We also resolve them to make sure all tools using them work consistently. +const appDirectory = fs.realpathSync(process.cwd()); +process.env.NODE_PATH = (process.env.NODE_PATH || '') + .split(path.delimiter) + .filter(folder => folder && !path.isAbsolute(folder)) + .map(folder => path.resolve(appDirectory, folder)) + .join(path.delimiter); + +// Grab NODE_ENV and REACT_APP_* environment variables and prepare them to be +// injected into the application via DefinePlugin in Webpack configuration. +const REACT_APP = /^REACT_APP_/i; + +function getClientEnvironment(publicUrl) { + const raw = Object.keys(process.env) + .filter(key => REACT_APP.test(key)) + .reduce( + (env, key) => { + env[key] = process.env[key]; + return env; + }, + { + // Useful for determining whether we’re running in production mode. + // Most importantly, it switches React into the correct mode. + NODE_ENV: process.env.NODE_ENV || 'development', + // Useful for resolving the correct path to static assets in `public`. + // For example, . + // This should only be used as an escape hatch. Normally you would put + // images into the `src` and `import` them in code to get their paths. + PUBLIC_URL: publicUrl, + } + ); + // Stringify all values so we can feed into Webpack DefinePlugin + const stringified = { + 'process.env': Object.keys(raw).reduce((env, key) => { + env[key] = JSON.stringify(raw[key]); + return env; + }, {}), + }; + + return { raw, stringified }; +} + +module.exports = getClientEnvironment; diff --git a/ts/webui/config/jest/cssTransform.js b/ts/webui/config/jest/cssTransform.js new file mode 100644 index 0000000000000000000000000000000000000000..8f65114812a4e5726d2e4148cd15481c33e1cfec --- /dev/null +++ b/ts/webui/config/jest/cssTransform.js @@ -0,0 +1,14 @@ +'use strict'; + +// This is a custom Jest transformer turning style imports into empty objects. +// http://facebook.github.io/jest/docs/en/webpack.html + +module.exports = { + process() { + return 'module.exports = {};'; + }, + getCacheKey() { + // The output is always the same. + return 'cssTransform'; + }, +}; diff --git a/ts/webui/config/jest/fileTransform.js b/ts/webui/config/jest/fileTransform.js new file mode 100644 index 0000000000000000000000000000000000000000..4ed6bdb005ded7f2bac9fd85c4924ba6247bcc94 --- /dev/null +++ b/ts/webui/config/jest/fileTransform.js @@ -0,0 +1,31 @@ +'use strict'; + +const path = require('path'); + +// This is a custom Jest transformer turning file imports into filenames. +// http://facebook.github.io/jest/docs/en/webpack.html + +module.exports = { + process(src, filename) { + const assetFilename = JSON.stringify(path.basename(filename)); + + if (filename.match(/\.svg$/)) { + return `const React = require('react'); + module.exports = { + __esModule: true, + default: ${assetFilename}, + ReactComponent: React.forwardRef((props, ref) => ({ + $$typeof: Symbol.for('react.element'), + type: 'svg', + ref: ref, + key: null, + props: Object.assign({}, props, { + children: ${assetFilename} + }) + })), + };`; + } + + return `module.exports = ${assetFilename};`; + }, +}; diff --git a/ts/webui/config/modules.js b/ts/webui/config/modules.js new file mode 100644 index 0000000000000000000000000000000000000000..4646eb05e07975b558f280b41e99fce96f66413c --- /dev/null +++ b/ts/webui/config/modules.js @@ -0,0 +1,84 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const paths = require('./paths'); +const chalk = require('react-dev-utils/chalk'); + +/** + * Get the baseUrl of a compilerOptions object. + * + * @param {Object} options + */ +function getAdditionalModulePaths(options = {}) { + const baseUrl = options.baseUrl; + + // We need to explicitly check for null and undefined (and not a falsy value) because + // TypeScript treats an empty string as `.`. + if (baseUrl == null) { + // If there's no baseUrl set we respect NODE_PATH + // Note that NODE_PATH is deprecated and will be removed + // in the next major release of create-react-app. + + const nodePath = process.env.NODE_PATH || ''; + return nodePath.split(path.delimiter).filter(Boolean); + } + + const baseUrlResolved = path.resolve(paths.appPath, baseUrl); + + // We don't need to do anything if `baseUrl` is set to `node_modules`. This is + // the default behavior. + if (path.relative(paths.appNodeModules, baseUrlResolved) === '') { + return null; + } + + // Allow the user set the `baseUrl` to `appSrc`. + if (path.relative(paths.appSrc, baseUrlResolved) === '') { + return [paths.appSrc]; + } + + // Otherwise, throw an error. + throw new Error( + chalk.red.bold( + "Your project's `baseUrl` can only be set to `src` or `node_modules`." + + ' Create React App does not support other values at this time.' + ) + ); +} + +function getModules() { + // Check if TypeScript is setup + const hasTsConfig = fs.existsSync(paths.appTsConfig); + const hasJsConfig = fs.existsSync(paths.appJsConfig); + + if (hasTsConfig && hasJsConfig) { + throw new Error( + 'You have both a tsconfig.json and a jsconfig.json. If you are using TypeScript please remove your jsconfig.json file.' + ); + } + + let config; + + // If there's a tsconfig.json we assume it's a + // TypeScript project and set up the config + // based on tsconfig.json + if (hasTsConfig) { + config = require(paths.appTsConfig); + // Otherwise we'll check if there is jsconfig.json + // for non TS projects. + } else if (hasJsConfig) { + config = require(paths.appJsConfig); + } + + config = config || {}; + const options = config.compilerOptions || {}; + + const additionalModulePaths = getAdditionalModulePaths(options); + + return { + additionalModulePaths: additionalModulePaths, + hasTsConfig, + }; +} + +module.exports = getModules(); diff --git a/ts/webui/config/paths.js b/ts/webui/config/paths.js new file mode 100644 index 0000000000000000000000000000000000000000..f23c121fa33fb2ecb92ccd2831056e777bd5f0d9 --- /dev/null +++ b/ts/webui/config/paths.js @@ -0,0 +1,90 @@ +'use strict'; + +const path = require('path'); +const fs = require('fs'); +const url = require('url'); + +// Make sure any symlinks in the project folder are resolved: +// https://github.com/facebook/create-react-app/issues/637 +const appDirectory = fs.realpathSync(process.cwd()); +const resolveApp = relativePath => path.resolve(appDirectory, relativePath); + +const envPublicUrl = process.env.PUBLIC_URL; + +function ensureSlash(inputPath, needsSlash) { + const hasSlash = inputPath.endsWith('/'); + if (hasSlash && !needsSlash) { + return inputPath.substr(0, inputPath.length - 1); + } else if (!hasSlash && needsSlash) { + return `${inputPath}/`; + } else { + return inputPath; + } +} + +const getPublicUrl = appPackageJson => + envPublicUrl || require(appPackageJson).homepage; + +// We use `PUBLIC_URL` environment variable or "homepage" field to infer +// "public path" at which the app is served. +// Webpack needs to know it to put the right